function stringlengths 11 56k | repo_name stringlengths 5 60 | features list |
|---|---|---|
def create_poster(self, show_obj):
# type: (sickbeard.tv.TVShow) -> bool
if self.poster and show_obj and not self._has_poster(show_obj):
logger.debug(u'Metadata provider %s creating poster for %s' % (self.name, show_obj.unique_name))
return self.save_poster(show_obj)
return False | SickGear/SickGear | [
574,
83,
574,
2,
1415773777
] |
def create_episode_thumb(self, ep_obj):
# type: (sickbeard.tv.TVEpisode) -> bool
if self.episode_thumbnails and ep_obj and not self.has_episode_thumb(ep_obj):
logger.log(u"Metadata provider " + self.name + " creating episode thumbnail for " + ep_obj.pretty_name(),
logger.DEBUG)
return self.save_thumbnail(ep_obj)
return False | SickGear/SickGear | [
574,
83,
574,
2,
1415773777
] |
def create_season_banners(self, show_obj):
# type: (sickbeard.tv.TVShow) -> bool
if self.season_banners and show_obj:
result = []
for season, _ in iteritems(show_obj.sxe_ep_obj):
if not self._has_season_banner(show_obj, season):
logger.debug(u'Metadata provider %s creating season banners for %s' % (
self.name, show_obj.unique_name))
result = result + [self.save_season_banners(show_obj, season)]
return all(result)
return False | SickGear/SickGear | [
574,
83,
574,
2,
1415773777
] |
def create_season_all_banner(self, show_obj):
# type: (sickbeard.tv.TVShow) -> bool
if self.season_all_banner and show_obj and not self._has_season_all_banner(show_obj):
logger.debug(u'Metadata provider %s creating season all banner for %s' % (
self.name, show_obj.unique_name))
return self.save_season_all_banner(show_obj)
return False | SickGear/SickGear | [
574,
83,
574,
2,
1415773777
] |
def _get_episode_thumb_url(ep_obj):
# type: (sickbeard.tv.TVEpisode) -> Optional[AnyStr]
"""
Returns the URL to use for downloading an episode's thumbnail. Uses
theTVDB.com and TVRage.com data.
:param ep_obj: a TVEpisode object for which to grab the thumb URL
:return: URL to thumb
"""
ep_obj_list = [ep_obj] + ep_obj.related_ep_obj
# validate show
from .. import helpers
if not helpers.validate_show(ep_obj.show_obj):
return None
# try all included episodes in case some have thumbs and others don't
for cur_ep_obj in ep_obj_list:
if TVINFO_TVDB == cur_ep_obj.show_obj.tvid:
show_lang = cur_ep_obj.show_obj.lang
try:
tvinfo_config = sickbeard.TVInfoAPI(TVINFO_TVDB).api_params.copy()
tvinfo_config['dvdorder'] = 0 != cur_ep_obj.show_obj.dvdorder
tvinfo_config['no_dummy'] = True
if show_lang and not 'en' == show_lang:
tvinfo_config['language'] = show_lang
t = sickbeard.TVInfoAPI(TVINFO_TVDB).setup(**tvinfo_config)
ep_info = t[cur_ep_obj.show_obj.prodid][cur_ep_obj.season][cur_ep_obj.episode]
except (BaseTVinfoEpisodenotfound, BaseTVinfoSeasonnotfound, TypeError):
ep_info = None
else:
ep_info = helpers.validate_show(cur_ep_obj.show_obj, cur_ep_obj.season, cur_ep_obj.episode)
if not ep_info:
continue
thumb_url = getattr(ep_info, 'filename', None) \
or (isinstance(ep_info, dict) and ep_info.get('filename', None))
if thumb_url not in (None, False, ''):
return thumb_url
return None | SickGear/SickGear | [
574,
83,
574,
2,
1415773777
] |
def write_ep_file(self, ep_obj):
# type: (sickbeard.tv.TVEpisode) -> bool
"""
Generates and writes ep_obj's metadata under the given path with the
given filename root. Uses the episode's name with the extension in
_ep_nfo_extension.
ep_obj: TVEpisode object for which to create the metadata
file_name_path: The file name to use for this metadata. Note that the extension
will be automatically added based on _ep_nfo_extension. This should
include an absolute path.
Note that this method expects that _ep_data will return an ElementTree
object. If your _ep_data returns data in another format you'll need to
override this method.
"""
data = self._ep_data(ep_obj)
if not data:
return False
nfo_file_path = self.get_episode_file_path(ep_obj)
logger.log(u'Writing episode metadata file: %s' % nfo_file_path, logger.DEBUG)
return sg_helpers.write_file(nfo_file_path, data, xmltree=True, utf8=True) | SickGear/SickGear | [
574,
83,
574,
2,
1415773777
] |
def save_fanart(self, show_obj, which=None):
# type: (sickbeard.tv.TVShow, Optional[AnyStr]) -> bool
"""
Downloads a fanart image and saves it to the filename specified by fanart_name
inside the show's root folder.
show_obj: a TVShow object for which to download fanart
"""
# use the default fanart name
fanart_path = self.get_fanart_path(show_obj)
fanart_data = self._retrieve_show_image('fanart', show_obj, which,
img_cache_type=sickbeard.image_cache.ImageCache.FANART)
if not fanart_data:
logger.log(u"No fanart image was retrieved, unable to write fanart", logger.DEBUG)
return False
return self._write_image(fanart_data, fanart_path) | SickGear/SickGear | [
574,
83,
574,
2,
1415773777
] |
def save_banner(self, show_obj, which=None):
# type: (sickbeard.tv.TVShow, Optional[AnyStr]) -> bool
"""
Downloads a banner image and saves it to the filename specified by banner_name
inside the show's root folder.
show_obj: a TVShow object for which to download a banner
"""
# use the default banner name
banner_path = self.get_banner_path(show_obj)
banner_data = self._retrieve_show_image('banner', show_obj, which,
img_cache_type=sickbeard.image_cache.ImageCache.BANNER)
if not banner_data:
logger.log(u"No show banner image was retrieved, unable to write banner", logger.DEBUG)
return False
return self._write_image(banner_data, banner_path) | SickGear/SickGear | [
574,
83,
574,
2,
1415773777
] |
def save_season_banners(self, show_obj, season):
# type: (sickbeard.tv.TVShow, int) -> bool
"""
Saves all season banners to disk for the given show.
show_obj: a TVShow object for which to save the season thumbs
Cycles through all seasons and saves the season banners if possible.
"""
season_dict = self._season_image_dict(show_obj, season, 'seasonwides')
result = []
# Returns a nested dictionary of season art with the season
# number as primary key. It's really overkill but gives the option
# to present to user via ui to pick down the road.
for cur_season in season_dict:
cur_season_art = season_dict[cur_season]
if 0 == len(cur_season_art):
continue
# Just grab whatever's there for now
art_id, season_url = cur_season_art.popitem()
season_banner_file_path = self.get_season_banner_path(show_obj, cur_season)
if not season_banner_file_path:
logger.log(u'Path for season ' + str(cur_season) + ' came back blank, skipping this season',
logger.DEBUG)
continue
season_data = metadata_helpers.getShowImage(season_url, show_name=show_obj.name)
if not season_data:
logger.log(u'No season banner data available, skipping this season', logger.DEBUG)
continue
result = result + [self._write_image(season_data, season_banner_file_path)]
if result:
return all(result)
return False | SickGear/SickGear | [
574,
83,
574,
2,
1415773777
] |
def save_season_all_banner(self, show_obj, which=None):
# type: (sickbeard.tv.TVShow, Optional[AnyStr]) -> bool
# use the default season all banner name
banner_path = self.get_season_all_banner_path(show_obj)
banner_data = self._retrieve_show_image('banner', show_obj, which,
img_cache_type=sickbeard.image_cache.ImageCache.BANNER)
if not banner_data:
logger.log(u"No show banner image was retrieved, unable to write season all banner", logger.DEBUG)
return False
return self._write_image(banner_data, banner_path) | SickGear/SickGear | [
574,
83,
574,
2,
1415773777
] |
def _write_image(image_data, image_path, force=False):
# type: (bytes, AnyStr, bool) -> bool
"""
Saves the data in image_data to the location image_path. Returns True/False
to represent success or failure.
image_data: binary image data to write to file
image_path: file location to save the image to
"""
# don't bother overwriting it
if not force and ek.ek(os.path.isfile, image_path):
logger.log(u"Image already exists, not downloading", logger.DEBUG)
return False
if not image_data:
logger.log(u"Unable to retrieve image, skipping", logger.WARNING)
return False
image_dir = ek.ek(os.path.dirname, image_path)
try:
if not ek.ek(os.path.isdir, image_dir):
logger.log(u"Metadata dir didn't exist, creating it at " + image_dir, logger.DEBUG)
ek.ek(os.makedirs, image_dir)
sg_helpers.chmod_as_parent(image_dir)
outFile = ek.ek(open, image_path, 'wb')
outFile.write(image_data)
outFile.close()
sg_helpers.chmod_as_parent(image_path)
except IOError as e:
logger.log(
u"Unable to write image to " + image_path + " - are you sure the show folder is writable? " + ex(e),
logger.ERROR)
return False
return True | SickGear/SickGear | [
574,
83,
574,
2,
1415773777
] |
def gen_show_infos_dict(show_obj):
# type: (TVShow) -> ShowInfosDict
show_infos = ShowInfosDict()
def _get_show_info(tv_id):
try:
show_lang = show_obj.lang
# There's gotta be a better way of doing this but we don't wanna
# change the language value elsewhere
tvinfo_config = sickbeard.TVInfoAPI(tv_id).api_params.copy()
tvinfo_config['fanart'] = True
tvinfo_config['posters'] = True
tvinfo_config['banners'] = True
tvinfo_config['dvdorder'] = 0 != show_obj.dvdorder
if show_lang and not 'en' == show_lang:
tvinfo_config['language'] = show_lang
t = sickbeard.TVInfoAPI(tv_id).setup(**tvinfo_config)
return t.get_show((show_obj.ids[tv_id]['id'], show_obj.prodid)[tv_src == show_obj.tvid],
load_episodes=False, banners=False, posters=False, fanart=True)
except (BaseTVinfoError, IOError) as e:
logger.log(u"Unable to look up show on " + sickbeard.TVInfoAPI(
tv_id).name + ", not downloading images: " + ex(e), logger.WARNING)
# todo: when tmdb is added as tv source remove the hardcoded TVINFO_TMDB
for tv_src in list(OrderedDict.fromkeys([show_obj.tvid] + list_keys(sickbeard.TVInfoAPI().search_sources) +
[TVINFO_TMDB])):
if tv_src != show_obj.tvid and not show_obj.ids.get(tv_src, {}).get('id'):
continue
if tv_src == show_obj.tvid:
show_infos[tv_src] = _get_show_info(tv_src)
else:
show_infos[tv_src] = _get_show_info
return show_infos | SickGear/SickGear | [
574,
83,
574,
2,
1415773777
] |
def build_url(s_o, image_mode):
_urls = [[], []]
_url = s_o[image_mode]
if _url and _url.startswith('http'):
if 'poster' == image_mode:
_url = re.sub('posters', '_cache/posters', _url)
elif 'banner' == image_mode:
_url = re.sub('graphical', '_cache/graphical', _url)
_urls[0].append(_url)
try:
alt_url = '%swww.%s%s' % re.findall(
r'(https?://)(?:artworks\.)?(thetvdb\.[^/]+/banners/[^\d]+[^.]+)(?:_t)(.*)', _url)[0][0:3]
if alt_url not in _urls[0]:
_urls[1].append(alt_url)
except (IndexError, Exception):
try:
alt_url = '%sartworks.%s_t%s' % re.findall(
r'(https?://)(?:www\.)?(thetvdb\.[^/]+/banners/[^\d]+[^.]+)(.*)', _url)[0][0:3]
if alt_url not in _urls[0]:
_urls[1].append(alt_url)
except (IndexError, Exception):
pass
return _urls | SickGear/SickGear | [
574,
83,
574,
2,
1415773777
] |
def _de_dupe(images_list):
# type:(Union[List[AnyStr], AnyStr]) -> Optional[Union[List[AnyStr], AnyStr]]
if not isinstance(images_list, list):
return_list = False
temp_list = [images_list]
else:
return_list = True
temp_list = images_list
images_list = [i for i in temp_list if i not in de_dupe]
[de_dupe.add(_i) for _i in images_list]
if not return_list:
if images_list:
return images_list[0]
return None
return images_list | SickGear/SickGear | [
574,
83,
574,
2,
1415773777
] |
def _retrieve_show_image(self,
image_type, # type: AnyStr
show_obj, # type: sickbeard.tv.TVShow
which=None, # type: int
return_links=False, # type: bool
show_infos=None, # type: ShowInfosDict
img_cache_type=None # type: int
):
# type: (...) -> Optional[bytes, List[AnyStr]]
"""
Gets an image URL from theTVDB.com, fanart.tv and TMDB.com, downloads it and returns the data.
If type is fanart, multiple image src urls are returned instead of a single data image.
image_type: type of image to retrieve (currently supported: fanart, poster, banner, poster_thumb, banner_thumb)
show_obj: a TVShow object to use when searching for the image
which: optional, a specific numbered poster to look for
Returns: the binary image data if available, or else None
"""
if not show_infos:
show_infos = self.gen_show_infos_dict(show_obj)
if 'fanart_all' == image_type:
return_links = True
image_type = 'fanart'
if image_type not in ('poster', 'banner', 'fanart', 'poster_thumb', 'banner_thumb'):
logger.log(u"Invalid image type " + str(image_type) + ", couldn't find it in the " + sickbeard.TVInfoAPI(
show_obj.tvid).name + " object", logger.ERROR)
return
image_urls = self._retrieve_image_urls(show_obj, image_type, show_infos)
if image_urls:
if return_links:
return image_urls
else:
img_data = None
image_cache = sickbeard.image_cache.ImageCache()
for image_url in image_urls or []:
if image_type in ('poster', 'banner'):
if isinstance(image_url, tuple):
image_url = image_url[0]
img_data = metadata_helpers.getShowImage(image_url, which, show_obj.name)
if img_cache_type and img_cache_type != image_cache.which_type(img_data, is_binary=True):
img_data = None
continue
if None is not img_data:
break
if None is not img_data:
return img_data | SickGear/SickGear | [
574,
83,
574,
2,
1415773777
] |
def retrieveShowMetadata(self, folder):
# type: (AnyStr) -> Union[Tuple[int, int, AnyStr], Tuple[None, None, None]]
"""
Used only when mass adding Existing Shows,
using previously generated Show metadata to reduce the need to query TVDB.
"""
from sickbeard.indexers.indexer_config import TVINFO_TVDB
empty_return = (None, None, None)
metadata_path = ek.ek(os.path.join, folder, self._show_metadata_filename)
if not ek.ek(os.path.isdir, folder) or not ek.ek(os.path.isfile, metadata_path):
logger.log(u"Can't load the metadata file from " + repr(metadata_path) + ", it doesn't exist", logger.DEBUG)
return empty_return
logger.log(u"Loading show info from metadata file in " + folder, logger.DEBUG)
try:
with ek.ek(io.open, metadata_path, 'r', encoding='utf8') as xmlFileObj:
showXML = etree.ElementTree(file=xmlFileObj)
if None is showXML.findtext('title') \
or all(None is _f for _f in (showXML.find('//uniqueid[@type]'),
showXML.findtext('tvdbid'),
showXML.findtext('id'),
showXML.findtext('indexer'))):
logger.log(u"Invalid info in tvshow.nfo (missing name or id):"
+ str(showXML.findtext('title')) + ' '
+ str(showXML.findtext('indexer')) + ' '
+ str(showXML.findtext('tvdbid')) + ' '
+ str(showXML.findtext('id')))
return empty_return
name = showXML.findtext('title')
try:
tvid = int(showXML.findtext('indexer'))
except (BaseException, Exception):
tvid = None
# handle v2 format of .nfo file
default_source = showXML.find('//uniqueid[@default="true"]')
if None is not default_source:
use_tvid = default_source.attrib.get('type') or tvid
if isinstance(use_tvid, string_types):
use_tvid = {sickbeard.TVInfoAPI(x).config['slug']: x
for x, _ in iteritems(sickbeard.TVInfoAPI().all_sources)}.get(use_tvid)
prodid = sg_helpers.try_int(default_source.text, None)
if use_tvid and None is not prodid:
return use_tvid, prodid, name
prodid = showXML.find('//uniqueid[@type="tvdb"]')
if None is not prodid:
prodid = int(prodid.text)
tvid = TVINFO_TVDB
elif None is not showXML.findtext('tvdbid'):
prodid = int(showXML.findtext('tvdbid'))
tvid = TVINFO_TVDB
elif None is not showXML.findtext('id'):
prodid = int(showXML.findtext('id'))
try:
tvid = TVINFO_TVDB if [s for s in showXML.findall('.//*')
if s.text and -1 != s.text.find('thetvdb.com')] else tvid
except (BaseException, Exception):
pass
else:
logger.log(u"Empty <id> or <tvdbid> field in NFO, unable to find a ID", logger.WARNING)
return empty_return
if None is prodid:
logger.log(u"Invalid Show ID (%s), not using metadata file" % prodid, logger.WARNING)
return empty_return
except (BaseException, Exception) as e:
logger.log(
u"There was an error parsing your existing metadata file: '" + metadata_path + "' error: " + ex(e),
logger.WARNING)
return empty_return
return tvid, prodid, name | SickGear/SickGear | [
574,
83,
574,
2,
1415773777
] |
def _fanart_urls(tvdb_id, image_type='banner', lang='en', thumb=False):
# type: (int, AnyStr, AnyStr, bool) -> Optional[List[int, int, AnyStr]]
types = {'poster': fanart.TYPE.TV.POSTER,
'banner': fanart.TYPE.TV.BANNER,
'fanart': fanart.TYPE.TV.BACKGROUND,
'poster_thumb': fanart.TYPE.TV.POSTER,
'banner_thumb': fanart.TYPE.TV.BANNER}
try:
if tvdb_id:
request = fanartRequest(apikey=sickbeard.FANART_API_KEY, tvdb_id=tvdb_id, types=types[image_type])
resp = request.response()
itemlist = []
dedupe = []
for art in filter_iter(lambda i: 10 < len(i.get('url', '')) and (lang == i.get('lang', '')[0:2]),
# remove "[0:2]" ... to strictly use only data where "en" is at source
resp[types[image_type]]): # type: dict
try:
url = (art['url'], art['url'].replace('/fanart/', '/preview/'))[thumb]
if url not in dedupe:
dedupe += [url]
itemlist += [
[int(art['id']), int(art['likes']), url]
]
except (BaseException, Exception):
continue
itemlist.sort(key=lambda a: (a[1], a[0]), reverse=True)
return itemlist
except (BaseException, Exception):
raise | SickGear/SickGear | [
574,
83,
574,
2,
1415773777
] |
def __init__(self, *args, **kwargs):
"""
Extract prefetches and default fields from Meta
"""
# TODO: move this to a meta class, to evaluate once when defining the
# class
# TODO: This is not efficient - 2016-01-20
serializer_class = self.get_serializer_class()
serializer = serializer_class() # need an instance to extract fields
model = serializer_class.Meta.model
assert issubclass(
serializer_class, DynamicFieldsModelSerializer
) or issubclass(serializer_class, DynamicFieldsSerializer), (
"serializer class must be an instance of \
DynamicFieldsModelSerializer " "instead got %s"
) % (serializer_class.__name__,)
self.serializer_fields = serializer.fields.keys()
self.select_related_fields = [
field.name for field in model._meta.fields
if isinstance(field, (ForeignKey, OneToOneField))
]
self.field_source_mapping = {
field.field_name: field.source
for field in serializer.fields.values()
if isinstance(
field, (ForeignKey, OneToOneField)
)
} | openaid-IATI/OIPA | [
34,
29,
34,
27,
1378915584
] |
def filter_queryset(self, queryset, *args, **kwargs):
"""
Prefetches based on 'fields' GET arg
"""
filter_fields = copy.deepcopy(self.request.query_params)
if 'fields' in filter_fields:
filter_fields.pop('fields')
if 'format' in filter_fields:
filter_fields.pop('format')
if 'page' in filter_fields:
filter_fields.pop('page')
if 'page_size' in filter_fields:
filter_fields.pop('page_size')
if 'ordering' in filter_fields:
filter_fields.pop('ordering')
if 'q'in filter_fields:
filter_fields.pop('q')
if 'q_fields' in filter_fields:
filter_fields.pop('q_fields')
for filter_field in filter_fields:
found = False
try:
declared_filters = self.filter_class.declared_filters
for key in declared_filters:
if filter_field == key:
found = True
if found is False:
# make error in the code to fail
# if input wrong filter name.
setattr(self, 'filter_class', 'No Filter Class')
break
except AttributeError:
pass
fields = self._get_query_fields(*args, **kwargs)
if not fields:
fields = self.serializer_fields
select_related_fields = list(set(
self.select_related_fields
) & set(fields))
if select_related_fields:
queryset = queryset.select_related(*select_related_fields)
for field in fields:
# TODO: Hook this up in the view - 2016-01-15
if hasattr(queryset, 'prefetch_%s' % field):
queryset = getattr(queryset, 'prefetch_%s' % field)()
queryset = super(DynamicView, self).filter_queryset(
queryset, *args, **kwargs
)
return queryset | openaid-IATI/OIPA | [
34,
29,
34,
27,
1378915584
] |
def get(self, request, *args, **kwargs):
return self.retrieve(request, *args, **kwargs) | openaid-IATI/OIPA | [
34,
29,
34,
27,
1378915584
] |
def put(self, request, *args, **kwargs):
return self.update(request, *args, **kwargs) | openaid-IATI/OIPA | [
34,
29,
34,
27,
1378915584
] |
def __init__(self, mail_service):
Resource.__init__(self)
self._mail_service = mail_service | pixelated-project/pixelated-user-agent | [
161,
78,
161,
100,
1406844682
] |
def __init__(self, mail_service):
Resource.__init__(self)
self._mail_service = mail_service | pixelated-project/pixelated-user-agent | [
161,
78,
161,
100,
1406844682
] |
def __init__(self, mail_service):
Resource.__init__(self)
self._mail_service = mail_service | pixelated-project/pixelated-user-agent | [
161,
78,
161,
100,
1406844682
] |
def response_failed(failure):
log.error('something failed: %s' % failure.getErrorMessage())
request.finish() | pixelated-project/pixelated-user-agent | [
161,
78,
161,
100,
1406844682
] |
def __init__(self, mail_service):
Resource.__init__(self)
self._mail_service = mail_service | pixelated-project/pixelated-user-agent | [
161,
78,
161,
100,
1406844682
] |
def __init__(self, mail_service):
Resource.__init__(self)
self._mail_service = mail_service | pixelated-project/pixelated-user-agent | [
161,
78,
161,
100,
1406844682
] |
def _register_smtp_error_handler(self):
def on_error(event, content):
delivery_error_mail = InputMail.delivery_error_template(delivery_address=event.content)
self._mail_service.mailboxes.inbox.add(delivery_error_mail)
events.register(events.catalog.SMTP_SEND_MESSAGE_ERROR, callback=on_error) | pixelated-project/pixelated-user-agent | [
161,
78,
161,
100,
1406844682
] |
def getChild(self, action, request):
_mail_service = self.mail_service(request)
if action == 'delete':
return MailsDeleteResource(_mail_service)
if action == 'recover':
return MailsRecoverResource(_mail_service)
if action == 'archive':
return MailsArchiveResource(_mail_service)
if action == 'read':
return MailsReadResource(_mail_service)
if action == 'unread':
return MailsUnreadResource(_mail_service) | pixelated-project/pixelated-user-agent | [
161,
78,
161,
100,
1406844682
] |
def render_GET(self, request):
_mail_service = self.mail_service(request)
query, window_size, page = request.args.get('q')[0], request.args.get('w')[0], request.args.get('p')[0]
unicode_query = to_unicode(query)
d = _mail_service.mails(unicode_query, window_size, page)
d.addCallback(self._build_mails_response)
d.addCallback(lambda res: respond_json_deferred(res, request))
def error_handler(error):
print error
d.addErrback(error_handler)
return NOT_DONE_YET | pixelated-project/pixelated-user-agent | [
161,
78,
161,
100,
1406844682
] |
def onError(error):
if isinstance(error.value, SMTPDownException):
respond_json_deferred({'message': str(error.value)}, request, status_code=503)
else:
log.error('error occurred while sending: %s' % error.getErrorMessage())
respond_json_deferred({'message': 'an error occurred while sending'}, request, status_code=422) | pixelated-project/pixelated-user-agent | [
161,
78,
161,
100,
1406844682
] |
def render_PUT(self, request):
def onError(error):
log.error('error saving draft: %s' % error.getErrorMessage())
respond_json_deferred("", request, status_code=422)
deferred = self._handle_put(request)
deferred.addErrback(onError)
return server.NOT_DONE_YET | pixelated-project/pixelated-user-agent | [
161,
78,
161,
100,
1406844682
] |
def _fetch_attachment_contents(self, content_dict, _mail_service):
attachments = content_dict.get('attachments', []) if content_dict else []
for attachment in attachments:
retrieved_attachment = yield _mail_service.attachment(attachment['ident'])
attachment['raw'] = retrieved_attachment['content']
content_dict['attachments'] = attachments
defer.returnValue(content_dict) | pixelated-project/pixelated-user-agent | [
161,
78,
161,
100,
1406844682
] |
def _handle_post(self, request):
_mail_service = self.mail_service(request)
content_dict = json.loads(request.content.read())
with_attachment_content = yield self._fetch_attachment_contents(content_dict, _mail_service)
sent_mail = yield _mail_service.send_mail(with_attachment_content)
respond_json_deferred(sent_mail.as_dict(), request, status_code=201) | pixelated-project/pixelated-user-agent | [
161,
78,
161,
100,
1406844682
] |
def allowed(self, request, project):
# allow rescoping token to any project the user has a role on,
# authorized_tenants, and that they are not currently scoped to
return next((True for proj in request.user.authorized_tenants
if proj.id == project.id and
project.id != request.user.project_id), False) | CiscoSystems/avos | [
48,
10,
48,
2,
1400873234
] |
def get_link_url(self, project):
step = 'update_members'
base_url = reverse(self.url, args=[project.id])
param = urlencode({"step": step})
return "?".join([base_url, param]) | CiscoSystems/avos | [
48,
10,
48,
2,
1400873234
] |
def allowed(self, request, project):
return api.keystone.VERSIONS.active >= 3 | CiscoSystems/avos | [
48,
10,
48,
2,
1400873234
] |
def allowed(self, request, project):
return request.user.is_superuser | CiscoSystems/avos | [
48,
10,
48,
2,
1400873234
] |
def allowed(self, request, project):
return api.keystone.keystone_can_edit_project() | CiscoSystems/avos | [
48,
10,
48,
2,
1400873234
] |
def allowed(self, request, project):
return api.keystone.keystone_can_edit_project() | CiscoSystems/avos | [
48,
10,
48,
2,
1400873234
] |
def get_link_url(self, project):
step = 'update_quotas'
base_url = reverse(self.url, args=[project.id])
param = urlencode({"step": step})
return "?".join([base_url, param]) | CiscoSystems/avos | [
48,
10,
48,
2,
1400873234
] |
def action_present(count):
return ungettext_lazy(
u"Delete Project",
u"Delete Projects",
count
) | CiscoSystems/avos | [
48,
10,
48,
2,
1400873234
] |
def action_past(count):
return ungettext_lazy(
u"Deleted Project",
u"Deleted Projects",
count
) | CiscoSystems/avos | [
48,
10,
48,
2,
1400873234
] |
def allowed(self, request, project):
return api.keystone.keystone_can_edit_project() | CiscoSystems/avos | [
48,
10,
48,
2,
1400873234
] |
def handle(self, table, request, obj_ids):
response = \
super(DeleteTenantsAction, self).handle(table, request, obj_ids)
auth_utils.remove_project_cache(request.user.token.id)
return response | CiscoSystems/avos | [
48,
10,
48,
2,
1400873234
] |
def filter(self, table, tenants, filter_string):
"""Really naive case-insensitive search."""
# FIXME(gabriel): This should be smarter. Written for demo purposes.
q = filter_string.lower()
def comp(tenant):
if q in tenant.name.lower():
return True
return False
return filter(comp, tenants) | CiscoSystems/avos | [
48,
10,
48,
2,
1400873234
] |
def get_data(self, request, project_id):
project_info = api.keystone.tenant_get(request, project_id,
admin=True)
return project_info | CiscoSystems/avos | [
48,
10,
48,
2,
1400873234
] |
def allowed(self, request, project, cell):
policy_rule = (("identity", "identity:update_project"),)
return (
(cell.column.name != 'enabled' or
request.user.token.project['id'] != cell.datum.id) and
api.keystone.keystone_can_edit_project() and
policy.check(policy_rule, request)) | CiscoSystems/avos | [
48,
10,
48,
2,
1400873234
] |
def script_args(f):
"""single decorator for adding script args"""
args = [
magic_arguments.argument(
'--out', type=str,
help="""The variable in which to store stdout from the script.
If the script is backgrounded, this will be the stdout *pipe*,
instead of the stderr text itself.
"""
),
magic_arguments.argument(
'--err', type=str,
help="""The variable in which to store stderr from the script.
If the script is backgrounded, this will be the stderr *pipe*,
instead of the stderr text itself.
"""
),
magic_arguments.argument(
'--bg', action="store_true",
help="""Whether to run the script in the background.
If given, the only way to see the output of the command is
with --out/err.
"""
),
magic_arguments.argument(
'--proc', type=str,
help="""The variable in which to store Popen instance.
This is used only when --bg option is given.
"""
),
]
for arg in args:
f = arg(f)
return f | unnikrishnankgs/va | [
1,
5,
1,
10,
1496432585
] |
def _script_magics_default(self):
"""default to a common list of programs""" | unnikrishnankgs/va | [
1,
5,
1,
10,
1496432585
] |
def __init__(self, shell=None):
super(ScriptMagics, self).__init__(shell=shell)
self._generate_script_magics()
self.job_manager = BackgroundJobManager()
self.bg_processes = []
atexit.register(self.kill_bg_processes) | unnikrishnankgs/va | [
1,
5,
1,
10,
1496432585
] |
def _generate_script_magics(self):
cell_magics = self.magics['cell']
for name in self.script_magics:
cell_magics[name] = self._make_script_magic(name) | unnikrishnankgs/va | [
1,
5,
1,
10,
1496432585
] |
def _make_script_magic(self, name):
"""make a named magic, that calls %%script with a particular program"""
# expand to explicit path if necessary:
script = self.script_paths.get(name, name) | unnikrishnankgs/va | [
1,
5,
1,
10,
1496432585
] |
def named_script_magic(line, cell):
# if line, add it as cl-flags
if line:
line = "%s %s" % (script, line)
else:
line = script
return self.shebang(line, cell) | unnikrishnankgs/va | [
1,
5,
1,
10,
1496432585
] |
def shebang(self, line, cell):
"""Run a cell via a shell command | unnikrishnankgs/va | [
1,
5,
1,
10,
1496432585
] |
def _run_script(self, p, cell):
"""callback for running the script in the background"""
p.stdin.write(cell)
p.stdin.close()
p.wait() | unnikrishnankgs/va | [
1,
5,
1,
10,
1496432585
] |
def killbgscripts(self, _nouse_=''):
"""Kill all BG processes started by %%script and its family."""
self.kill_bg_processes()
print("All background processes were killed.") | unnikrishnankgs/va | [
1,
5,
1,
10,
1496432585
] |
def sample_template_checks(study_id, user, check_exists=False):
"""Performs different checks and raises errors if any of the checks fail
Parameters
----------
study_id : int
The study id
user : qiita_db.user.User
The user trying to access the study
check_exists : bool, optional
If true, check if the sample template exists
Raises
------
HTTPError
404 if the study does not exist
403 if the user does not have access to the study
404 if check_exists == True and the sample template doesn't exist
"""
try:
study = Study(int(study_id))
except QiitaDBUnknownIDError:
raise HTTPError(404, reason='Study does not exist')
if not study.has_access(user):
raise HTTPError(403, reason='User does not have access to study')
# Check if the sample template exists
if check_exists and not SampleTemplate.exists(study_id):
raise HTTPError(404, reason="Study %s doesn't have sample information"
% study_id) | biocore/qiita | [
110,
74,
110,
128,
1380221443
] |
def sample_template_handler_patch_request(user, req_op, req_path,
req_value=None, req_from=None,
direct_upload=False):
"""Patches the sample template
Parameters
----------
user: qiita_db.user.User
The user performing the request
req_op : str
The operation to perform on the sample template
req_path : str
The path to the attribute to patch
req_value : str, optional
The new value
req_from : str, optional
The original path of the element
direct_upload : boolean, optional
If the file being uploaded comes from a direct upload (True)
Returns
-------
Raises
------
HTTPError
400 If the path parameter doens't follow the expected format
400 If the given operation is not supported
"""
req_path = [v for v in req_path.split('/') if v]
# At this point we know the path should be at least length 2
if len(req_path) < 2:
raise HTTPError(400, reason='Incorrect path parameter')
study_id = int(req_path[0])
# Check if the current user has access to the study and if the sample
# template exists
sample_template_checks(study_id, user, check_exists=True)
if req_op == 'remove':
# Path format
# column: study_id/columns/column_name
# sample: study_id/samples/sample_id
if len(req_path) != 3:
raise HTTPError(400, reason='Incorrect path parameter')
attribute = req_path[1]
attr_id = req_path[2]
qiita_plugin = Software.from_name_and_version('Qiita', 'alpha')
cmd = qiita_plugin.get_command('delete_sample_or_column')
params = Parameters.load(
cmd, values_dict={'obj_class': 'SampleTemplate',
'obj_id': study_id,
'sample_or_col': attribute,
'name': attr_id})
job = ProcessingJob.create(user, params, True)
# Store the job id attaching it to the sample template id
r_client.set(SAMPLE_TEMPLATE_KEY_FORMAT % study_id,
dumps({'job_id': job.id}))
job.submit()
return {'job': job.id}
elif req_op == 'replace':
# WARNING: Although the patch operation is a replace, is not a full
# true replace. A replace is in theory equivalent to a remove + add.
# In this case, the replace operation doesn't necessarily removes
# anything (e.g. when only new columns/samples are being added to the)
# sample information.
# Path format: study_id/data
# Forcing to specify data for extensibility. In the future we may want
# to use this function to replace other elements of the sample
# information
if len(req_path) != 2:
raise HTTPError(400, reason='Incorrect path parameter')
attribute = req_path[1]
if attribute == 'data':
# Update the sample information
if req_value is None:
raise HTTPError(400, reason="Value is required when updating "
"sample information")
if direct_upload:
# We can assume that the file exist as it was generated by
# the system
filepath = req_value
if not exists(filepath):
reason = ('Upload file not found (%s), please report to '
'qiita.help@gmail.com' % filepath)
raise HTTPError(404, reason=reason)
else:
# Check if the file exists
fp_rsp = check_fp(study_id, req_value)
if fp_rsp['status'] != 'success':
raise HTTPError(404, reason='Filepath not found')
filepath = fp_rsp['file']
qiita_plugin = Software.from_name_and_version('Qiita', 'alpha')
cmd = qiita_plugin.get_command('update_sample_template')
params = Parameters.load(
cmd, values_dict={'study': study_id,
'template_fp': filepath})
job = ProcessingJob.create(user, params, True)
# Store the job id attaching it to the sample template id
r_client.set(SAMPLE_TEMPLATE_KEY_FORMAT % study_id,
dumps({'job_id': job.id}))
job.submit()
return {'job': job.id}
else:
raise HTTPError(404, reason='Attribute %s not found' % attribute)
else:
raise HTTPError(400, reason='Operation %s not supported. Current '
'supported operations: remove, replace' % req_op) | biocore/qiita | [
110,
74,
110,
128,
1380221443
] |
def get(self):
study_id = self.get_argument('study_id')
# Check if the current user has access to the study
sample_template_checks(study_id, self.current_user)
self.render('study_ajax/sample_summary.html', study_id=study_id) | biocore/qiita | [
110,
74,
110,
128,
1380221443
] |
def post(self):
study_id = int(self.get_argument('study_id'))
filepath = self.get_argument('filepath')
data_type = self.get_argument('data_type')
direct_upload = self.get_argument('direct_upload', False)
if direct_upload and direct_upload == 'true':
direct_upload = True
with NamedTemporaryFile(suffix='.txt', delete=False) as fp:
fp.write(self.request.files['theFile'][0]['body'])
filepath = fp.name
self.write(sample_template_handler_post_request(
study_id, self.current_user, filepath, data_type=data_type,
direct_upload=direct_upload)) | biocore/qiita | [
110,
74,
110,
128,
1380221443
] |
def patch(self):
req_op = self.get_argument('op')
req_path = self.get_argument('path')
req_value = self.get_argument('value', None)
req_from = self.get_argument('from', None)
direct_upload = self.get_argument('direct_upload', False)
if direct_upload and direct_upload == 'true':
direct_upload = True
with NamedTemporaryFile(suffix='.txt', delete=False) as fp:
fp.write(self.request.files['value'][0]['body'])
req_value = fp.name
self.write(sample_template_handler_patch_request(
self.current_user, req_op, req_path, req_value, req_from,
direct_upload)) | biocore/qiita | [
110,
74,
110,
128,
1380221443
] |
def delete(self):
study_id = int(self.get_argument('study_id'))
self.write(sample_template_handler_delete_request(
study_id, self.current_user)) | biocore/qiita | [
110,
74,
110,
128,
1380221443
] |
def get(self):
study_id = int(self.get_argument('study_id'))
self.write(
sample_template_overview_handler_get_request(
study_id, self.current_user)) | biocore/qiita | [
110,
74,
110,
128,
1380221443
] |
def get(self):
"""Send formatted summary page of sample template"""
sid = int(self.get_argument('study_id'))
column = self.get_argument('column', None)
reply = sample_template_columns_get_req(sid, column, self.current_user)
# we reply with {'values': reply} because tornado expectes a dict
self.write({'values': reply}) | biocore/qiita | [
110,
74,
110,
128,
1380221443
] |
def get(self):
"""Show the sample summary page"""
study_id = int(self.get_argument('study_id'))
email = self.current_user.id
res = sample_template_meta_cats_get_req(study_id, email)
if res['status'] == 'error':
if 'does not exist' in res['message']:
raise HTTPError(404, reason=res['message'])
elif 'User does not have access to study' in res['message']:
raise HTTPError(403, reason=res['message'])
else:
raise HTTPError(500, reason=res['message'])
categories = res['categories']
columns, rows = _build_sample_summary(study_id, email)
_, alert_type, alert_msg = get_sample_template_processing_status(
study_id)
self.render('study_ajax/sample_prep_summary.html',
rows=rows, columns=columns, categories=categories,
study_id=study_id, alert_type=alert_type,
alert_message=alert_msg,
user_can_edit=Study(study_id).can_edit(self.current_user)) | biocore/qiita | [
110,
74,
110,
128,
1380221443
] |
def find_by_name(name):
"""Finds and returns the DataObject subclass with the given name.
Parameter `name` should be a bare class name with no module. If there is
no class by that name, raises `KeyError`.
"""
return classes_by_name[name] | mozilla/remoteobjects | [
2,
2,
2,
1,
1295756319
] |
def __new__(cls, name, bases, attrs):
"""Creates and returns a new `DataObject` class with its declared
fields and name."""
fields = {}
new_fields = {}
new_properties = {}
# Inherit all the parent DataObject classes' fields.
for base in bases:
if isinstance(base, DataObjectMetaclass):
fields.update(base.fields)
# Move all the class's attributes that are Fields to the fields set.
for attrname, field in attrs.items():
if isinstance(field, remoteobjects.fields.Property):
new_properties[attrname] = field
if isinstance(field, remoteobjects.fields.Field):
new_fields[attrname] = field
elif attrname in fields:
# Throw out any parent fields that the subclass defined as
# something other than a Field.
del fields[attrname]
fields.update(new_fields)
attrs['fields'] = fields
obj_cls = super(DataObjectMetaclass, cls).__new__(cls, name, bases, attrs)
for field, value in new_properties.items():
obj_cls.add_to_class(field, value)
# Register the new class so Object fields can have forward-referenced it.
classes_by_name[name] = obj_cls
# Tell this class's fields what this class is, so they can find their
# forward references later.
for field in new_properties.values():
field.of_cls = obj_cls
return obj_cls | mozilla/remoteobjects | [
2,
2,
2,
1,
1295756319
] |
def __init__(self, **kwargs):
"""Initializes a new `DataObject` with the given field values."""
self.api_data = {}
self.__dict__.update(kwargs) | mozilla/remoteobjects | [
2,
2,
2,
1,
1295756319
] |
def __ne__(self, other):
"""Returns whether two `DataObject` instances are different.
`DataObject` instances are different if they are not equivalent as
determined through `__eq__()`.
"""
return not self == other | mozilla/remoteobjects | [
2,
2,
2,
1,
1295756319
] |
def statefields(cls):
return cls.fields.keys() + ['api_data'] | mozilla/remoteobjects | [
2,
2,
2,
1,
1295756319
] |
def get(self, attr, *args):
return getattr(self, attr, *args) | mozilla/remoteobjects | [
2,
2,
2,
1,
1295756319
] |
def to_dict(self):
"""Encodes the DataObject to a dictionary."""
# Start with the last set of data we got from the API
data = deepcopy(self.api_data)
# Now replace the data with what's actually in our object
for field_name, field in self.fields.iteritems():
value = getattr(self, field.attrname, None)
if value is not None:
data[field.api_name] = field.encode(value)
else:
data[field.api_name] = None
# Now delete any fields that ended up being None
# since we should exclude them in the resulting dict.
for k in data.keys():
if data[k] is None:
del data[k]
return data | mozilla/remoteobjects | [
2,
2,
2,
1,
1295756319
] |
def from_dict(cls, data):
"""Decodes a dictionary into a new `DataObject` instance."""
self = cls()
self.update_from_dict(data)
return self | mozilla/remoteobjects | [
2,
2,
2,
1,
1295756319
] |
def forwards(self, orm): | canvasnetworks/canvas | [
56,
15,
56,
3,
1447125133
] |
def backwards(self, orm): | canvasnetworks/canvas | [
56,
15,
56,
3,
1447125133
] |
def __init__(
self,
data,
dist=stats.norm,
fit=False,
distargs=(),
a=0,
loc=0,
scale=1, | statsmodels/statsmodels | [
8254,
2672,
8254,
2534,
1307898290
] |
def theoretical_percentiles(self):
"""Theoretical percentiles"""
return plotting_pos(self.nobs, self.a) | statsmodels/statsmodels | [
8254,
2672,
8254,
2534,
1307898290
] |
def theoretical_quantiles(self):
"""Theoretical quantiles"""
try:
return self.dist.ppf(self.theoretical_percentiles)
except TypeError:
msg = "%s requires more parameters to compute ppf".format(
self.dist.name,
)
raise TypeError(msg)
except Exception as exc:
msg = "failed to compute the ppf of {0}".format(self.dist.name)
raise type(exc)(msg) | statsmodels/statsmodels | [
8254,
2672,
8254,
2534,
1307898290
] |
def sorted_data(self):
"""sorted data"""
sorted_data = np.array(self.data, copy=True)
sorted_data.sort()
return sorted_data | statsmodels/statsmodels | [
8254,
2672,
8254,
2534,
1307898290
] |
def sample_quantiles(self):
"""sample quantiles"""
if self.fit and self.loc != 0 and self.scale != 1:
return (self.sorted_data - self.loc) / self.scale
else:
return self.sorted_data | statsmodels/statsmodels | [
8254,
2672,
8254,
2534,
1307898290
] |
def sample_percentiles(self):
"""Sample percentiles"""
_check_for(self.dist, "cdf")
if self._is_frozen:
return self.dist.cdf(self.sorted_data)
quantiles = (self.sorted_data - self.fit_params[-2]) / self.fit_params[
-1
]
return self.dist.cdf(quantiles) | statsmodels/statsmodels | [
8254,
2672,
8254,
2534,
1307898290
] |
def qqplot(
self,
xlabel=None,
ylabel=None,
line=None,
other=None,
ax=None,
swap: bool = False,
**plotkwargs, | statsmodels/statsmodels | [
8254,
2672,
8254,
2534,
1307898290
] |
def probplot(
self,
xlabel=None,
ylabel=None,
line=None,
exceed=False,
ax=None,
**plotkwargs, | statsmodels/statsmodels | [
8254,
2672,
8254,
2534,
1307898290
] |
def qqplot(
data,
dist=stats.norm,
distargs=(),
a=0,
loc=0,
scale=1,
fit=False,
line=None,
ax=None,
**plotkwargs, | statsmodels/statsmodels | [
8254,
2672,
8254,
2534,
1307898290
] |
def qqplot_2samples(
data1, data2, xlabel=None, ylabel=None, line=None, ax=None | statsmodels/statsmodels | [
8254,
2672,
8254,
2534,
1307898290
] |
def qqline(ax, line, x=None, y=None, dist=None, fmt="r-", **lineoptions):
"""
Plot a reference line for a qqplot.
Parameters
----------
ax : matplotlib axes instance
The axes on which to plot the line
line : str {"45","r","s","q"}
Options for the reference line to which the data is compared.:
- "45" - 45-degree line
- "s" - standardized line, the expected order statistics are scaled by
the standard deviation of the given sample and have the mean
added to them
- "r" - A regression line is fit
- "q" - A line is fit through the quartiles.
- None - By default no reference line is added to the plot.
x : ndarray
X data for plot. Not needed if line is "45".
y : ndarray
Y data for plot. Not needed if line is "45".
dist : scipy.stats.distribution
A scipy.stats distribution, needed if line is "q".
fmt : str, optional
Line format string passed to `plot`.
**lineoptions
Additional arguments to be passed to the `plot` command.
Notes
-----
There is no return value. The line is plotted on the given `ax`.
Examples
--------
Import the food expenditure dataset. Plot annual food expenditure on x-axis
and household income on y-axis. Use qqline to add regression line into the
plot.
>>> import statsmodels.api as sm
>>> import numpy as np
>>> import matplotlib.pyplot as plt
>>> from statsmodels.graphics.gofplots import qqline
>>> foodexp = sm.datasets.engel.load()
>>> x = foodexp.exog
>>> y = foodexp.endog
>>> ax = plt.subplot(111)
>>> plt.scatter(x, y)
>>> ax.set_xlabel(foodexp.exog_name[0])
>>> ax.set_ylabel(foodexp.endog_name)
>>> qqline(ax, "r", x, y)
>>> plt.show()
.. plot:: plots/graphics_gofplots_qqplot_qqline.py
"""
lineoptions = lineoptions.copy()
for ls in ("-", "--", "-.", ":"):
if ls in fmt:
lineoptions.setdefault("linestyle", ls)
fmt = fmt.replace(ls, "")
break
for marker in (
".",
",",
"o",
"v",
"^",
"<",
">",
"1",
"2",
"3",
"4",
"8",
"s",
"p",
"P",
"*",
"h",
"H",
"+",
"x",
"X",
"D",
"d",
"|",
"_",
):
if marker in fmt:
lineoptions.setdefault("marker", marker)
fmt = fmt.replace(marker, "")
break
if fmt:
lineoptions.setdefault("color", fmt)
if line == "45":
end_pts = lzip(ax.get_xlim(), ax.get_ylim())
end_pts[0] = min(end_pts[0])
end_pts[1] = max(end_pts[1])
ax.plot(end_pts, end_pts, **lineoptions)
ax.set_xlim(end_pts)
ax.set_ylim(end_pts)
return # does this have any side effects?
if x is None or y is None:
raise ValueError("If line is not 45, x and y cannot be None.")
x = np.array(x)
y = np.array(y)
if line == "r":
# could use ax.lines[0].get_xdata(), get_ydata(),
# but don't know axes are "clean"
y = OLS(y, add_constant(x)).fit().fittedvalues
ax.plot(x, y, **lineoptions)
elif line == "s":
m, b = np.std(y), np.mean(y)
ref_line = x * m + b
ax.plot(x, ref_line, **lineoptions)
elif line == "q":
_check_for(dist, "ppf")
q25 = stats.scoreatpercentile(y, 25)
q75 = stats.scoreatpercentile(y, 75)
theoretical_quartiles = dist.ppf([0.25, 0.75])
m = (q75 - q25) / np.diff(theoretical_quartiles)
b = q25 - m * theoretical_quartiles[0]
ax.plot(x, m * x + b, **lineoptions) | statsmodels/statsmodels | [
8254,
2672,
8254,
2534,
1307898290
] |
def plotting_pos(nobs, a=0.0, b=None):
"""
Generates sequence of plotting positions
Parameters
----------
nobs : int
Number of probability points to plot
a : float, default 0.0
alpha parameter for the plotting position of an expected order
statistic
b : float, default None
beta parameter for the plotting position of an expected order
statistic. If None, then b is set to a.
Returns
-------
ndarray
The plotting positions
Notes
-----
The plotting positions are given by (i - a)/(nobs + 1 - a - b) for i in
range(1, nobs+1)
See Also
--------
scipy.stats.mstats.plotting_positions
Additional information on alpha and beta
"""
b = a if b is None else b
return (np.arange(1.0, nobs + 1) - a) / (nobs + 1 - a - b) | statsmodels/statsmodels | [
8254,
2672,
8254,
2534,
1307898290
] |
def _do_plot(
x, y, dist=None, line=None, ax=None, fmt="b", step=False, **kwargs | statsmodels/statsmodels | [
8254,
2672,
8254,
2534,
1307898290
] |
def __init__(self, name_):
self.name = name_ | endlessm/chromium-browser | [
21,
16,
21,
3,
1435959644
] |
def __eq__(self, other):
return self.name == (other if isinstance(other, str) else other.name) | endlessm/chromium-browser | [
21,
16,
21,
3,
1435959644
] |
def __hash__(self):
return hash(str(self)) | endlessm/chromium-browser | [
21,
16,
21,
3,
1435959644
] |
def is_BASE(U, UISC, UGC):
return (UISC in [Number, Consonant, Consonant_Head_Letter,
#SPEC-DRAFT Consonant_Placeholder,
Tone_Letter,
Vowel_Independent #SPEC-DRAFT
] or
(UGC == Lo and UISC in [Avagraha, Bindu, Consonant_Final, Consonant_Medial,
Consonant_Subjoined, Vowel, Vowel_Dependent])) | endlessm/chromium-browser | [
21,
16,
21,
3,
1435959644
] |
def is_BASE_NUM(U, UISC, UGC):
return UISC == Brahmi_Joining_Number | endlessm/chromium-browser | [
21,
16,
21,
3,
1435959644
] |
def is_CGJ(U, UISC, UGC):
return U == 0x034F | endlessm/chromium-browser | [
21,
16,
21,
3,
1435959644
] |
def is_CONS_FINAL_MOD(U, UISC, UGC):
#SPEC-DRAFT return UISC in [Consonant_Final_Modifier, Syllable_Modifier]
return UISC == Syllable_Modifier | endlessm/chromium-browser | [
21,
16,
21,
3,
1435959644
] |
def is_CONS_MOD(U, UISC, UGC):
return UISC in [Nukta, Gemination_Mark, Consonant_Killer] | endlessm/chromium-browser | [
21,
16,
21,
3,
1435959644
] |
def is_CONS_WITH_STACKER(U, UISC, UGC):
return UISC == Consonant_With_Stacker | endlessm/chromium-browser | [
21,
16,
21,
3,
1435959644
] |
def is_HALANT_OR_VOWEL_MODIFIER(U, UISC, UGC):
# https://github.com/harfbuzz/harfbuzz/issues/1102
# https://github.com/harfbuzz/harfbuzz/issues/1379
return U in [0x11046, 0x1134D] | endlessm/chromium-browser | [
21,
16,
21,
3,
1435959644
] |
def is_ZWNJ(U, UISC, UGC):
return UISC == Non_Joiner | endlessm/chromium-browser | [
21,
16,
21,
3,
1435959644
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.