repository_name
stringclasses 316
values | func_path_in_repository
stringlengths 6
223
| func_name
stringlengths 1
134
| language
stringclasses 1
value | func_code_string
stringlengths 57
65.5k
| func_documentation_string
stringlengths 1
46.3k
| split_name
stringclasses 1
value | func_code_url
stringlengths 91
315
| called_functions
listlengths 1
156
⌀ | enclosing_scope
stringlengths 2
1.48M
|
|---|---|---|---|---|---|---|---|---|---|
nephila/djangocms-blog
|
djangocms_blog/cms_menus.py
|
BlogNavModifier.modify
|
python
|
def modify(self, request, nodes, namespace, root_id, post_cut, breadcrumb):
app = None
config = None
if getattr(request, 'current_page', None) and request.current_page.application_urls:
app = apphook_pool.get_apphook(request.current_page.application_urls)
if app and app.app_config:
namespace = resolve(request.path).namespace
if not self._config.get(namespace, False):
self._config[namespace] = app.get_config(namespace)
config = self._config[namespace]
try:
if config and (
not isinstance(config, BlogConfig) or
config.menu_structure != MENU_TYPE_CATEGORIES
):
return nodes
except AttributeError: # pragma: no cover
# in case `menu_structure` is not present in config
return nodes
if post_cut:
return nodes
current_post = getattr(request, get_setting('CURRENT_POST_IDENTIFIER'), None)
category = None
if current_post and current_post.__class__ == Post:
category = current_post.categories.first()
if not category:
return nodes
for node in nodes:
if '{0}-{1}'.format(category.__class__.__name__, category.pk) == node.id:
node.selected = True
return nodes
|
Actual modifier function
:param request: request
:param nodes: complete list of nodes
:param namespace: Menu namespace
:param root_id: eventual root_id
:param post_cut: flag for modifier stage
:param breadcrumb: flag for modifier stage
:return: nodeslist
|
train
|
https://github.com/nephila/djangocms-blog/blob/3fdfbd4ba48947df0ee4c6d42e3a1c812b6dd95d/djangocms_blog/cms_menus.py#L133-L175
|
[
"def get_setting(name):\n from django.conf import settings\n from django.utils.translation import ugettext_lazy as _\n from meta import settings as meta_settings\n\n PERMALINKS = (\n ('full_date', _('Full date')),\n ('short_date', _('Year / Month')),\n ('category', _('Category')),\n ('slug', _('Just slug')),\n )\n PERMALINKS_URLS = {\n 'full_date': r'^(?P<year>\\d{4})/(?P<month>\\d{1,2})/(?P<day>\\d{1,2})/(?P<slug>\\w[-\\w]*)/$',\n 'short_date': r'^(?P<year>\\d{4})/(?P<month>\\d{1,2})/(?P<slug>\\w[-\\w]*)/$',\n 'category': r'^(?P<category>\\w[-\\w]*)/(?P<slug>\\w[-\\w]*)/$',\n 'slug': r'^(?P<slug>\\w[-\\w]*)/$',\n }\n MENU_TYPES = (\n (MENU_TYPE_COMPLETE, _('Categories and posts')),\n (MENU_TYPE_CATEGORIES, _('Categories only')),\n (MENU_TYPE_POSTS, _('Posts only')),\n (MENU_TYPE_NONE, _('None')),\n )\n SITEMAP_CHANGEFREQ_LIST = (\n ('always', _('always')),\n ('hourly', _('hourly')),\n ('daily', _('daily')),\n ('weekly', _('weekly')),\n ('monthly', _('monthly')),\n ('yearly', _('yearly')),\n ('never', _('never')),\n )\n default = {\n 'BLOG_IMAGE_THUMBNAIL_SIZE': getattr(settings, 'BLOG_IMAGE_THUMBNAIL_SIZE', {\n 'size': '120x120',\n 'crop': True,\n 'upscale': False\n }),\n\n 'BLOG_IMAGE_FULL_SIZE': getattr(settings, 'BLOG_IMAGE_FULL_SIZE', {\n 'size': '640x120',\n 'crop': True,\n 'upscale': False\n }),\n\n 'BLOG_URLCONF': getattr(settings, 'BLOG_URLCONF', 'djangocms_blog.urls'),\n 'BLOG_PAGINATION': getattr(settings, 'BLOG_PAGINATION', 10),\n 'BLOG_LATEST_POSTS': getattr(settings, 'BLOG_LATEST_POSTS', 5),\n 'BLOG_POSTS_LIST_TRUNCWORDS_COUNT': getattr(\n settings, 'BLOG_POSTS_LIST_TRUNCWORDS_COUNT', 100\n ),\n 'BLOG_META_DESCRIPTION_LENGTH': getattr(\n settings, 'BLOG_META_DESCRIPTION_LENGTH', 320\n ),\n 'BLOG_META_TITLE_LENGTH': getattr(\n settings, 'BLOG_META_TITLE_LENGTH', 70\n ),\n 'BLOG_MENU_TYPES': MENU_TYPES,\n 'BLOG_MENU_EMPTY_CATEGORIES': getattr(settings, 'MENU_EMPTY_CATEGORIES', True),\n 'BLOG_TYPE': getattr(settings, 'BLOG_TYPE', 'Article'),\n 'BLOG_TYPES': meta_settings.OBJECT_TYPES,\n 'BLOG_FB_TYPE': getattr(settings, 'BLOG_FB_TYPE', 'Article'),\n 'BLOG_FB_TYPES': getattr(settings, 'BLOG_FB_TYPES', meta_settings.FB_TYPES),\n 'BLOG_FB_APPID': getattr(settings, 'BLOG_FB_APPID', meta_settings.FB_APPID),\n 'BLOG_FB_PROFILE_ID': getattr(settings, 'BLOG_FB_PROFILE_ID', meta_settings.FB_PROFILE_ID),\n 'BLOG_FB_PUBLISHER': getattr(settings, 'BLOG_FB_PUBLISHER', meta_settings.FB_PUBLISHER),\n 'BLOG_FB_AUTHOR_URL': getattr(settings, 'BLOG_FB_AUTHOR_URL', 'get_author_url'),\n 'BLOG_FB_AUTHOR': getattr(settings, 'BLOG_FB_AUTHOR', 'get_author_name'),\n 'BLOG_TWITTER_TYPE': getattr(settings, 'BLOG_TWITTER_TYPE', 'summary'),\n 'BLOG_TWITTER_TYPES': getattr(settings, 'BLOG_TWITTER_TYPES', meta_settings.TWITTER_TYPES),\n 'BLOG_TWITTER_SITE': getattr(settings, 'BLOG_TWITTER_SITE', meta_settings.TWITTER_SITE),\n 'BLOG_TWITTER_AUTHOR': getattr(settings, 'BLOG_TWITTER_AUTHOR', 'get_author_twitter'),\n 'BLOG_GPLUS_TYPE': getattr(settings, 'BLOG_GPLUS_TYPE', 'Blog'),\n 'BLOG_GPLUS_TYPES': getattr(settings, 'BLOG_GPLUS_TYPES', meta_settings.GPLUS_TYPES),\n 'BLOG_GPLUS_AUTHOR': getattr(settings, 'BLOG_GPLUS_AUTHOR', 'get_author_gplus'),\n 'BLOG_ENABLE_COMMENTS': getattr(settings, 'BLOG_ENABLE_COMMENTS', True),\n 'BLOG_USE_ABSTRACT': getattr(settings, 'BLOG_USE_ABSTRACT', True),\n 'BLOG_USE_PLACEHOLDER': getattr(settings, 'BLOG_USE_PLACEHOLDER', True),\n 'BLOG_USE_RELATED': getattr(settings, 'BLOG_USE_RELATED', True),\n 'BLOG_MULTISITE': getattr(settings, 'BLOG_MULTISITE', True),\n 'BLOG_AUTHOR_DEFAULT': getattr(settings, 'BLOG_AUTHOR_DEFAULT', True),\n 'BLOG_DEFAULT_PUBLISHED': getattr(settings, 'BLOG_DEFAULT_PUBLISHED', False),\n 'BLOG_ADMIN_POST_FIELDSET_FILTER': getattr(\n settings, 'BLOG_ADMIN_POST_FIELDSET_FILTER', False),\n 'BLOG_AVAILABLE_PERMALINK_STYLES': getattr(\n settings, 'BLOG_AVAILABLE_PERMALINK_STYLES', PERMALINKS\n ),\n 'BLOG_PERMALINK_URLS': getattr(settings, 'BLOG_PERMALINK_URLS', PERMALINKS_URLS),\n 'BLOG_DEFAULT_OBJECT_NAME': getattr(settings, 'BLOG_DEFAULT_OBJECT_NAME', 'Article'),\n\n 'BLOG_AUTO_SETUP': getattr(settings, 'BLOG_AUTO_SETUP', True),\n 'BLOG_AUTO_HOME_TITLE': getattr(settings, 'BLOG_AUTO_HOME_TITLE', 'Home'),\n 'BLOG_AUTO_BLOG_TITLE': getattr(settings, 'BLOG_AUTO_BLOG_TITLE', 'Blog'),\n 'BLOG_AUTO_APP_TITLE': getattr(settings, 'BLOG_AUTO_APP_TITLE', 'Blog'),\n 'BLOG_AUTO_NAMESPACE': getattr(settings, 'BLOG_AUTO_NAMESPACE', 'Blog'),\n\n 'BLOG_SITEMAP_PRIORITY_DEFAULT': getattr(settings, 'BLOG_SITEMAP_PRIORITY_DEFAULT', '0.5'),\n 'BLOG_SITEMAP_CHANGEFREQ': getattr(\n settings, 'BLOG_SITEMAP_CHANGEFREQ', SITEMAP_CHANGEFREQ_LIST\n ),\n 'BLOG_SITEMAP_CHANGEFREQ_DEFAULT': getattr(\n settings, 'BLOG_SITEMAP_CHANGEFREQ_DEFAULT', 'monthly'\n ),\n\n 'BLOG_ENABLE_SEARCH': getattr(settings, 'BLOG_ENABLE_SEARCH', True),\n 'BLOG_CURRENT_POST_IDENTIFIER': getattr(\n settings, 'BLOG_CURRENT_POST_IDENTIFIER', 'djangocms_post_current'),\n 'BLOG_CURRENT_NAMESPACE': getattr(\n settings, 'BLOG_CURRENT_NAMESPACE', 'djangocms_post_current_config'),\n 'BLOG_ENABLE_THROUGH_TOOLBAR_MENU': getattr(\n settings, 'BLOG_ENABLE_THROUGH_TOOLBAR_MENU', False),\n\n 'BLOG_PLUGIN_MODULE_NAME': getattr(settings, 'BLOG_PLUGIN_MODULE_NAME', _('Blog')),\n 'BLOG_LATEST_ENTRIES_PLUGIN_NAME': getattr(\n settings, 'BLOG_LATEST_ENTRIES_PLUGIN_NAME', _('Latest Blog Articles')),\n 'BLOG_LATEST_ENTRIES_PLUGIN_NAME_CACHED': getattr(\n settings, 'BLOG_LATEST_ENTRIES_PLUGIN_NAME_CACHED', _('Latest Blog Articles - Cache')),\n 'BLOG_AUTHOR_POSTS_PLUGIN_NAME': getattr(\n settings, 'BLOG_AUTHOR_POSTS_PLUGIN_NAME', _('Author Blog Articles')),\n 'BLOG_TAGS_PLUGIN_NAME': getattr(\n settings, 'BLOG_TAGS_PLUGIN_NAME', _('Tags')),\n 'BLOG_CATEGORY_PLUGIN_NAME': getattr(\n settings, 'BLOG_CATEGORY_PLUGIN_NAME', _('Categories')),\n 'BLOG_ARCHIVE_PLUGIN_NAME': getattr(\n settings, 'BLOG_ARCHIVE_PLUGIN_NAME', _('Archive')),\n 'BLOG_FEED_CACHE_TIMEOUT': getattr(\n settings, 'BLOG_FEED_CACHE_TIMEOUT', 3600),\n 'BLOG_FEED_INSTANT_ITEMS': getattr(\n settings, 'BLOG_FEED_INSTANT_ITEMS', 50),\n 'BLOG_FEED_LATEST_ITEMS': getattr(\n settings, 'BLOG_FEED_LATEST_ITEMS', 10),\n 'BLOG_FEED_TAGS_ITEMS': getattr(\n settings, 'BLOG_FEED_TAGS_ITEMS', 10),\n 'BLOG_LIVEBLOG_PLUGINS': getattr(\n settings, 'BLOG_LIVEBLOG_PLUGINS', ('LiveblogPlugin',)),\n\n 'BLOG_PLUGIN_TEMPLATE_FOLDERS': getattr(\n settings, 'BLOG_PLUGIN_TEMPLATE_FOLDERS', (('plugins', _('Default template')),)),\n\n }\n return default['BLOG_%s' % name]\n"
] |
class BlogNavModifier(Modifier):
"""
This navigation modifier makes sure that when
a particular blog post is viewed,
a corresponding category is selected in menu
"""
_config = {}
|
nephila/djangocms-blog
|
djangocms_blog/managers.py
|
TaggedFilterItem.tagged
|
python
|
def tagged(self, other_model=None, queryset=None):
tags = self._taglist(other_model, queryset)
return self.get_queryset().filter(tags__in=tags).distinct()
|
Restituisce una queryset di elementi del model taggati,
o con gli stessi tag di un model o un queryset
|
train
|
https://github.com/nephila/djangocms-blog/blob/3fdfbd4ba48947df0ee4c6d42e3a1c812b6dd95d/djangocms_blog/managers.py#L16-L22
|
[
"def _taglist(self, other_model=None, queryset=None):\n \"\"\"\n Restituisce una lista di id di tag comuni al model corrente e al model\n o queryset passati come argomento\n \"\"\"\n from taggit.models import TaggedItem\n filter = None\n if queryset is not None:\n filter = set()\n for item in queryset.all():\n filter.update(item.tags.all())\n filter = set([tag.id for tag in filter])\n elif other_model is not None:\n filter = set(TaggedItem.objects.filter(\n content_type__model=other_model.__name__.lower()\n ).values_list('tag_id', flat=True))\n tags = set(TaggedItem.objects.filter(\n content_type__model=self.model.__name__.lower()\n ).values_list('tag_id', flat=True))\n if filter is not None:\n tags = tags.intersection(filter)\n return list(tags)\n"
] |
class TaggedFilterItem(object):
def _taglist(self, other_model=None, queryset=None):
"""
Restituisce una lista di id di tag comuni al model corrente e al model
o queryset passati come argomento
"""
from taggit.models import TaggedItem
filter = None
if queryset is not None:
filter = set()
for item in queryset.all():
filter.update(item.tags.all())
filter = set([tag.id for tag in filter])
elif other_model is not None:
filter = set(TaggedItem.objects.filter(
content_type__model=other_model.__name__.lower()
).values_list('tag_id', flat=True))
tags = set(TaggedItem.objects.filter(
content_type__model=self.model.__name__.lower()
).values_list('tag_id', flat=True))
if filter is not None:
tags = tags.intersection(filter)
return list(tags)
def tag_list(self, other_model=None, queryset=None):
"""
Restituisce un queryset di tag comuni al model corrente e
al model o queryset passati come argomento
"""
from taggit.models import Tag
return Tag.objects.filter(id__in=self._taglist(other_model, queryset))
def tag_list_slug(self, other_model=None, queryset=None):
queryset = self.tag_list(other_model, queryset)
return queryset.values('slug')
def tag_cloud(self, other_model=None, queryset=None, published=True, on_site=False):
from taggit.models import TaggedItem
if on_site:
queryset = queryset.on_site()
tag_ids = self._taglist(other_model, queryset)
kwargs = {}
if published:
kwargs = TaggedItem.bulk_lookup_kwargs(self.model.objects.published())
kwargs['tag_id__in'] = tag_ids
counted_tags = dict(TaggedItem.objects
.filter(**kwargs)
.values('tag')
.annotate(count=models.Count('tag'))
.values_list('tag', 'count'))
tags = TaggedItem.tag_model().objects.filter(pk__in=counted_tags.keys())
for tag in tags:
tag.count = counted_tags[tag.pk]
return sorted(tags, key=lambda x: -x.count)
|
nephila/djangocms-blog
|
djangocms_blog/managers.py
|
TaggedFilterItem._taglist
|
python
|
def _taglist(self, other_model=None, queryset=None):
from taggit.models import TaggedItem
filter = None
if queryset is not None:
filter = set()
for item in queryset.all():
filter.update(item.tags.all())
filter = set([tag.id for tag in filter])
elif other_model is not None:
filter = set(TaggedItem.objects.filter(
content_type__model=other_model.__name__.lower()
).values_list('tag_id', flat=True))
tags = set(TaggedItem.objects.filter(
content_type__model=self.model.__name__.lower()
).values_list('tag_id', flat=True))
if filter is not None:
tags = tags.intersection(filter)
return list(tags)
|
Restituisce una lista di id di tag comuni al model corrente e al model
o queryset passati come argomento
|
train
|
https://github.com/nephila/djangocms-blog/blob/3fdfbd4ba48947df0ee4c6d42e3a1c812b6dd95d/djangocms_blog/managers.py#L24-L45
| null |
class TaggedFilterItem(object):
def tagged(self, other_model=None, queryset=None):
"""
Restituisce una queryset di elementi del model taggati,
o con gli stessi tag di un model o un queryset
"""
tags = self._taglist(other_model, queryset)
return self.get_queryset().filter(tags__in=tags).distinct()
def tag_list(self, other_model=None, queryset=None):
"""
Restituisce un queryset di tag comuni al model corrente e
al model o queryset passati come argomento
"""
from taggit.models import Tag
return Tag.objects.filter(id__in=self._taglist(other_model, queryset))
def tag_list_slug(self, other_model=None, queryset=None):
queryset = self.tag_list(other_model, queryset)
return queryset.values('slug')
def tag_cloud(self, other_model=None, queryset=None, published=True, on_site=False):
from taggit.models import TaggedItem
if on_site:
queryset = queryset.on_site()
tag_ids = self._taglist(other_model, queryset)
kwargs = {}
if published:
kwargs = TaggedItem.bulk_lookup_kwargs(self.model.objects.published())
kwargs['tag_id__in'] = tag_ids
counted_tags = dict(TaggedItem.objects
.filter(**kwargs)
.values('tag')
.annotate(count=models.Count('tag'))
.values_list('tag', 'count'))
tags = TaggedItem.tag_model().objects.filter(pk__in=counted_tags.keys())
for tag in tags:
tag.count = counted_tags[tag.pk]
return sorted(tags, key=lambda x: -x.count)
|
nephila/djangocms-blog
|
djangocms_blog/managers.py
|
TaggedFilterItem.tag_list
|
python
|
def tag_list(self, other_model=None, queryset=None):
from taggit.models import Tag
return Tag.objects.filter(id__in=self._taglist(other_model, queryset))
|
Restituisce un queryset di tag comuni al model corrente e
al model o queryset passati come argomento
|
train
|
https://github.com/nephila/djangocms-blog/blob/3fdfbd4ba48947df0ee4c6d42e3a1c812b6dd95d/djangocms_blog/managers.py#L47-L53
|
[
"def _taglist(self, other_model=None, queryset=None):\n \"\"\"\n Restituisce una lista di id di tag comuni al model corrente e al model\n o queryset passati come argomento\n \"\"\"\n from taggit.models import TaggedItem\n filter = None\n if queryset is not None:\n filter = set()\n for item in queryset.all():\n filter.update(item.tags.all())\n filter = set([tag.id for tag in filter])\n elif other_model is not None:\n filter = set(TaggedItem.objects.filter(\n content_type__model=other_model.__name__.lower()\n ).values_list('tag_id', flat=True))\n tags = set(TaggedItem.objects.filter(\n content_type__model=self.model.__name__.lower()\n ).values_list('tag_id', flat=True))\n if filter is not None:\n tags = tags.intersection(filter)\n return list(tags)\n"
] |
class TaggedFilterItem(object):
def tagged(self, other_model=None, queryset=None):
"""
Restituisce una queryset di elementi del model taggati,
o con gli stessi tag di un model o un queryset
"""
tags = self._taglist(other_model, queryset)
return self.get_queryset().filter(tags__in=tags).distinct()
def _taglist(self, other_model=None, queryset=None):
"""
Restituisce una lista di id di tag comuni al model corrente e al model
o queryset passati come argomento
"""
from taggit.models import TaggedItem
filter = None
if queryset is not None:
filter = set()
for item in queryset.all():
filter.update(item.tags.all())
filter = set([tag.id for tag in filter])
elif other_model is not None:
filter = set(TaggedItem.objects.filter(
content_type__model=other_model.__name__.lower()
).values_list('tag_id', flat=True))
tags = set(TaggedItem.objects.filter(
content_type__model=self.model.__name__.lower()
).values_list('tag_id', flat=True))
if filter is not None:
tags = tags.intersection(filter)
return list(tags)
def tag_list_slug(self, other_model=None, queryset=None):
queryset = self.tag_list(other_model, queryset)
return queryset.values('slug')
def tag_cloud(self, other_model=None, queryset=None, published=True, on_site=False):
from taggit.models import TaggedItem
if on_site:
queryset = queryset.on_site()
tag_ids = self._taglist(other_model, queryset)
kwargs = {}
if published:
kwargs = TaggedItem.bulk_lookup_kwargs(self.model.objects.published())
kwargs['tag_id__in'] = tag_ids
counted_tags = dict(TaggedItem.objects
.filter(**kwargs)
.values('tag')
.annotate(count=models.Count('tag'))
.values_list('tag', 'count'))
tags = TaggedItem.tag_model().objects.filter(pk__in=counted_tags.keys())
for tag in tags:
tag.count = counted_tags[tag.pk]
return sorted(tags, key=lambda x: -x.count)
|
nephila/djangocms-blog
|
djangocms_blog/managers.py
|
GenericDateTaggedManager.get_months
|
python
|
def get_months(self, queryset=None, current_site=True):
if queryset is None:
queryset = self.get_queryset()
if current_site:
queryset = queryset.on_site()
dates_qs = queryset.values_list(queryset.start_date_field, queryset.fallback_date_field)
dates = []
for blog_dates in dates_qs:
if blog_dates[0]:
current_date = blog_dates[0]
else:
current_date = blog_dates[1]
dates.append((current_date.year, current_date.month,))
date_counter = Counter(dates)
dates = set(dates)
dates = sorted(dates, reverse=True)
return [{'date': now().replace(year=year, month=month, day=1),
'count': date_counter[year, month]} for year, month in dates]
|
Get months with aggregate count (how much posts is in the month).
Results are ordered by date.
|
train
|
https://github.com/nephila/djangocms-blog/blob/3fdfbd4ba48947df0ee4c6d42e3a1c812b6dd95d/djangocms_blog/managers.py#L163-L184
| null |
class GenericDateTaggedManager(TaggedFilterItem, AppHookConfigTranslatableManager):
use_for_related_fields = True
queryset_class = GenericDateQuerySet
def get_queryset(self, *args, **kwargs):
return super(GenericDateTaggedManager, self).get_queryset(*args, **kwargs)
def published(self, current_site=True):
return self.get_queryset().published(current_site)
def available(self, current_site=True):
return self.get_queryset().available(current_site)
def archived(self, current_site=True):
return self.get_queryset().archived(current_site)
def published_future(self, current_site=True):
return self.get_queryset().published_future(current_site)
def filter_by_language(self, language, current_site=True):
return self.get_queryset().filter_by_language(language, current_site)
def on_site(self, site=None):
return self.get_queryset().on_site(site)
|
nephila/djangocms-blog
|
djangocms_blog/liveblog/consumers.py
|
liveblog_connect
|
python
|
def liveblog_connect(message, apphook, lang, post):
try:
post = Post.objects.namespace(apphook).language(lang).active_translations(slug=post).get()
except Post.DoesNotExist:
message.reply_channel.send({
'text': json.dumps({'error': 'no_post'}),
})
return
Group(post.liveblog_group).add(message.reply_channel)
message.reply_channel.send({"accept": True})
|
Connect users to the group of the given post according to the given language
Return with an error message if a post cannot be found
:param message: channel connect message
:param apphook: apphook config namespace
:param lang: language
:param post: post slug
|
train
|
https://github.com/nephila/djangocms-blog/blob/3fdfbd4ba48947df0ee4c6d42e3a1c812b6dd95d/djangocms_blog/liveblog/consumers.py#L11-L30
| null |
# -*- coding: utf-8 -*-
from __future__ import absolute_import, print_function, unicode_literals
import json
from channels import Group
from djangocms_blog.models import Post
def liveblog_disconnect(message, apphook, lang, post):
"""
Disconnect users to the group of the given post according to the given language
Return with an error message if a post cannot be found
:param message: channel connect message
:param apphook: apphook config namespace
:param lang: language
:param post: post slug
"""
try:
post = Post.objects.namespace(apphook).language(lang).active_translations(slug=post).get()
except Post.DoesNotExist:
message.reply_channel.send({
'text': json.dumps({'error': 'no_post'}),
})
return
Group(post.liveblog_group).discard(message.reply_channel)
|
nephila/djangocms-blog
|
djangocms_blog/liveblog/consumers.py
|
liveblog_disconnect
|
python
|
def liveblog_disconnect(message, apphook, lang, post):
try:
post = Post.objects.namespace(apphook).language(lang).active_translations(slug=post).get()
except Post.DoesNotExist:
message.reply_channel.send({
'text': json.dumps({'error': 'no_post'}),
})
return
Group(post.liveblog_group).discard(message.reply_channel)
|
Disconnect users to the group of the given post according to the given language
Return with an error message if a post cannot be found
:param message: channel connect message
:param apphook: apphook config namespace
:param lang: language
:param post: post slug
|
train
|
https://github.com/nephila/djangocms-blog/blob/3fdfbd4ba48947df0ee4c6d42e3a1c812b6dd95d/djangocms_blog/liveblog/consumers.py#L33-L51
| null |
# -*- coding: utf-8 -*-
from __future__ import absolute_import, print_function, unicode_literals
import json
from channels import Group
from djangocms_blog.models import Post
def liveblog_connect(message, apphook, lang, post):
"""
Connect users to the group of the given post according to the given language
Return with an error message if a post cannot be found
:param message: channel connect message
:param apphook: apphook config namespace
:param lang: language
:param post: post slug
"""
try:
post = Post.objects.namespace(apphook).language(lang).active_translations(slug=post).get()
except Post.DoesNotExist:
message.reply_channel.send({
'text': json.dumps({'error': 'no_post'}),
})
return
Group(post.liveblog_group).add(message.reply_channel)
message.reply_channel.send({"accept": True})
|
xiyouMc/ncmbot
|
ncmbot/core.py
|
login
|
python
|
def login(password, phone=None, email=None, rememberLogin=True):
if (phone is None) and (email is None):
raise ParamsError()
if password is None:
raise ParamsError()
r = NCloudBot()
# r.username = phone or email
md5 = hashlib.md5()
md5.update(password)
password = md5.hexdigest()
print password
r.data = {'password': password, 'rememberLogin': rememberLogin}
if phone is not None:
r.data['phone'] = phone
r.method = 'LOGIN'
else:
r.data['username'] = email
r.method = 'EMAIL_LOGIN'
r.send()
return r.response
|
登录接口,返回 :class:'Response' 对象
:param password: 网易云音乐的密码
:param phone: (optional) 手机登录
:param email: (optional) 邮箱登录
:param rememberLogin: (optional) 是否记住密码,默认 True
|
train
|
https://github.com/xiyouMc/ncmbot/blob/c4832f3ee7630ba104a89559f09c1fc366d1547b/ncmbot/core.py#L218-L245
|
[
"def send(self):\n \"\"\"Sens the request.\"\"\"\n success = False\n if self.method is None:\n raise ParamsError()\n try:\n if self.method == 'SEARCH':\n req = self._get_requests()\n _url = self.__NETEAST_HOST + self._METHODS[self.method]\n resp = req.post(_url, data=self.data)\n self._build_response(resp)\n self.response.ok = True\n else:\n if isinstance(self.data, dict):\n data = encrypted_request(self.data)\n\n req = self._get_webapi_requests()\n _url = self.__NETEAST_HOST + self._METHODS[self.method]\n\n if self.method in ('USER_DJ', 'USER_FOLLOWS', 'USER_EVENT'):\n _url = _url % self.params['uid']\n\n if self.method in ('LYRIC', 'MUSIC_COMMENT'):\n _url = _url % self.params['id']\n # GET\n if self.method in ('LYRIC'):\n resp = req.get(_url)\n else:\n resp = req.post(_url, data=data)\n self._build_response(resp)\n self.response.ok = True\n except Exception as why:\n traceback.print_exc()\n print 'Requests Exception', why\n # self._build_response(why)\n self.response.error = why\n"
] |
# coding:utf-8
"""
NCloudBot.core
~~~~~~~~~~~~~~
This module implements the main NCloudBot system.
:copyright: (c) 2017 by xiyouMc.
:license: ISC, see LICENSE for more details.
"""
import hashlib
import requests
import json
import cookielib
import traceback
from .util.encrypt import encrypted_request
from .util import cookiesJar
from utils import get_encoding_from_headers
__title__ = 'ncmbot'
__version__ = '0.1.0'
__build__ = 0x000100
__author__ = 'XiyouMc'
__license__ = 'ISC'
__copyright__ = 'Copyright 2017 XiyouMc'
__all__ = [
'NCloudBot', 'Response', 'login', 'user_play_list', 'user_dj', 'search',
'user_follows', 'user_followeds', 'user_event', 'event',
'top_playlist_highquality', 'play_list_detail', 'music_url', 'lyric',
'music_comment', 'song_detail', 'personal_fm', 'user_record'
]
class NCloudBot(object):
"""
The :class:`NCloudBot` object. It carries out all functionality of
NCloudBot
Recommended interface is with the NCloudBot`s functions.
"""
req = requests.Session()
username = None
_METHODS = {
# 登录模块
'LOGIN': '/weapi/login/cellphone?csrf_token=',
# 邮箱登录
'EMAIL_LOGIN': '/weapi/login?csrf_token=',
# 获取用户信息
'USER_INFO': '/weapi/subcount',
# 获取用户歌单,收藏的歌单 , 指定 UserId , 不需要登录
'USER_PLAY_LIST': '/weapi/user/playlist',
# 获取用户电台
'USER_DJ': '/weapi/dj/program/%s',
# 获取用户关注列表
'USER_FOLLOWS': '/weapi/user/getfollows/%s',
# 获取用户粉丝
'USER_FOLLOWEDS': '/weapi/user/getfolloweds/',
# 获取用户动态
'USER_EVENT': '/weapi/event/get/%s',
# 获取用户播放记录
'USER_RECORD': '/weapi/v1/play/record',
# 获取各种动态,对应网页版网易云,朋友界面的各种动态消息,如分享的视频、音乐、照片等
'EVENT': '/weapi/v1/event/get',
# 获取精品歌单
'TOP_PLAYLIST_HIGHQUALITY': '/weapi/playlist/highquality/list',
# 传入歌单ID 获取对应歌单内的所有音乐
'PLAY_LIST_DETAIL': '/weapi/v3/playlist/detail',
# 传入音乐ID ,获取对应音乐的URL
'MUSIC_URL': '/weapi/song/enhance/player/url',
# 传入关键词,获取歌曲列表
'SEARCH': '/api/search/get/',
# 传入音乐ID,获取对应音乐的歌词
'LYRIC': '/api/song/lyric?os=osx&id=%s&lv=-1&kv=-1&tv=-1',
# 传入音乐ID 和 limit 参数,可获取该音乐的所有评论
'MUSIC_COMMENT': '/weapi/v1/resource/comments/R_SO_4_%s/?csrf_token=',
# 传入专辑ID 和 limit 参数,可获取该专辑的所有评论
'ALBUM_COMMENT': '/weapi/v1/resource/comments/R_AL_3_%s/?csrf_token=',
# 给评论点赞,入参是资源ID,如歌曲ID,MV iD 和 评论ID
'LIKE_COMMENT': '/weapi/v1/comment/%s',
# 传入音乐ID,获取歌曲详情
'SONG_DETAIL': '/weapi/v3/song/detail',
# 获取专辑内容
'ALBUM': '/weapi/v1/album/%s',
# 私人 FM (需要登录)
'PERSONAL_FM': '/weapi/v1/radio/get'
}
__NETEAST_HOST = 'http://music.163.com'
def __init__(self):
self.method = None
self.data = {}
self.params = {}
self.response = Response()
def __repr__(self):
return '<NCloudBot [%s]>' % (self.method)
def __setattr__(self, name, value):
if (name == 'method') and (value):
if value not in self._METHODS.keys():
raise InvalidMethod()
object.__setattr__(self, name, value)
def _get_webapi_requests(self):
"""Update headers of webapi for Requests."""
headers = {
'Accept':
'*/*',
'Accept-Language':
'zh-CN,zh;q=0.8,gl;q=0.6,zh-TW;q=0.4',
'Connection':
'keep-alive',
'Content-Type':
'application/x-www-form-urlencoded',
'Referer':
'http://music.163.com',
'Host':
'music.163.com',
'User-Agent':
'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_9_2) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/33.0.1750.152 Safari/537.36'
}
NCloudBot.req.headers.update(headers)
return NCloudBot.req
def _get_requests(self):
headers = {
'Referer':
self.__NETEAST_HOST,
'Cookie':
'appver=2.0.2;',
'Content-Type':
'application/x-www-form-urlencoded',
'User-Agent':
'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_9_2) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/33.0.1750.152 Safari/537.36'
}
NCloudBot.req.headers.update(headers)
return NCloudBot.req
def _build_response(self, resp):
"""Build internal Response object from given response."""
# rememberLogin
# if self.method is 'LOGIN' and resp.json().get('code') == 200:
# cookiesJar.save_cookies(resp, NCloudBot.username)
self.response.content = resp.content
self.response.status_code = resp.status_code
self.response.headers = resp.headers
def send(self):
"""Sens the request."""
success = False
if self.method is None:
raise ParamsError()
try:
if self.method == 'SEARCH':
req = self._get_requests()
_url = self.__NETEAST_HOST + self._METHODS[self.method]
resp = req.post(_url, data=self.data)
self._build_response(resp)
self.response.ok = True
else:
if isinstance(self.data, dict):
data = encrypted_request(self.data)
req = self._get_webapi_requests()
_url = self.__NETEAST_HOST + self._METHODS[self.method]
if self.method in ('USER_DJ', 'USER_FOLLOWS', 'USER_EVENT'):
_url = _url % self.params['uid']
if self.method in ('LYRIC', 'MUSIC_COMMENT'):
_url = _url % self.params['id']
# GET
if self.method in ('LYRIC'):
resp = req.get(_url)
else:
resp = req.post(_url, data=data)
self._build_response(resp)
self.response.ok = True
except Exception as why:
traceback.print_exc()
print 'Requests Exception', why
# self._build_response(why)
self.response.error = why
class Response(object):
"""
The :class:`Response` object. All :class:`NCloudBot` objects contain a
:class:`NCloudBot.response <response>` attribute.
"""
def __init__(self):
self.content = None
self.headers = None
self.status_code = None
self.ok = False
self.error = None
def __repr__(self):
return '<Response [%s]>' % (self.status_code)
def raise_for_status(self):
if self.error:
raise self.error
def json(self):
"""Returns the json-encoded content of a response, if any."""
if not self.headers and len(self.content) > 3:
encoding = get_encoding_from_headers(self.headers)
if encoding is not None:
return json.loads(self.content.decode(encoding))
return json.loads(self.content)
def user_play_list(uid, offset=0, limit=1000):
"""获取用户歌单,包含收藏的歌单
:param uid: 用户的ID,可通过登录或者其他接口获取
:param offset: (optional) 分段起始位置,默认 0
:param limit: (optional) 数据上限多少行,默认 1000
"""
if uid is None:
raise ParamsError()
r = NCloudBot()
r.method = 'USER_PLAY_LIST'
r.data = {'offset': offset, 'uid': uid, 'limit': limit, 'csrf_token': ''}
r.send()
return r.response
def user_dj(uid, offset=0, limit=30):
"""获取用户电台数据
:param uid: 用户的ID,可通过登录或者其他接口获取
:param offset: (optional) 分段起始位置,默认 0
:param limit: (optional) 数据上限多少行,默认 30
"""
if uid is None:
raise ParamsError()
r = NCloudBot()
r.method = 'USER_DJ'
r.data = {'offset': offset, 'limit': limit, "csrf_token": ""}
r.params = {'uid': uid}
r.send()
return r.response
def search(keyword, type=1, offset=0, limit=30):
"""搜索歌曲,支持搜索歌曲、歌手、专辑等
:param keyword: 关键词
:param type: (optional) 搜索类型,1: 单曲, 100: 歌手, 1000: 歌单, 1002: 用户
:param offset: (optional) 分段起始位置,默认 0
:param limit: (optional) 数据上限多少行,默认 30
"""
if keyword is None:
raise ParamsError()
r = NCloudBot()
r.method = 'SEARCH'
r.data = {
's': keyword,
'limit': str(limit),
'type': str(type),
'offset': str(offset)
}
r.send()
return r.response
def user_follows(uid, offset='0', limit=30):
"""获取用户关注列表
:param uid: 用户的ID,可通过登录或者其他接口获取
:param offset: (optional) 分段起始位置,默认 0
:param limit: (optional) 数据上限多少行,默认 30
"""
if uid is None:
raise ParamsError()
r = NCloudBot()
r.method = 'USER_FOLLOWS'
r.params = {'uid': uid}
r.data = {'offset': offset, 'limit': limit, 'order': True}
r.send()
return r.response
def user_followeds(uid, offset='0', limit=30):
"""获取用户粉丝列表
:param uid: 用户的ID,可通过登录或者其他接口获取
:param offset: (optional) 分段起始位置,默认 0
:param limit: (optional) 数据上限多少行,默认 30
"""
if uid is None:
raise ParamsError()
r = NCloudBot()
r.method = 'USER_FOLLOWEDS'
r.data = {
'userId': uid,
'offset': offset,
'limit': limit,
"csrf_token": ""
}
r.send()
return r.response
def user_event(uid):
"""获取用户动态
:param uid: 用户的ID,可通过登录或者其他接口获取
"""
if uid is None:
raise ParamsError()
r = NCloudBot()
r.method = 'USER_EVENT'
r.params = {'uid': uid}
r.data = {'time': -1, 'getcounts': True, "csrf_token": ""}
r.send()
return r.response
def user_record(uid, type=0):
"""获取用户的播放列表,必须登录
:param uid: 用户的ID,可通过登录或者其他接口获取
:param type: (optional) 数据类型,0:获取所有记录,1:获取 weekData
"""
if uid is None:
raise ParamsError()
r = NCloudBot()
r.method = 'USER_RECORD'
r.data = {'type': type, 'uid': uid, "csrf_token": ""}
r.send()
return r.response
def event():
"""获取好友的动态,包括分享视频、音乐、动态等
"""
r = NCloudBot()
r.method = 'EVENT'
r.data = {"csrf_token": ""}
r.send()
return r.response
# TOP_PLAYLIST_HIGHQUALITY
def top_playlist_highquality(cat='全部', offset=0, limit=20):
"""获取网易云音乐的精品歌单
:param cat: (optional) 歌单类型,默认 ‘全部’,比如 华语、欧美等
:param offset: (optional) 分段起始位置,默认 0
:param limit: (optional) 数据上限多少行,默认 20
"""
r = NCloudBot()
r.method = 'TOP_PLAYLIST_HIGHQUALITY'
r.data = {'cat': cat, 'offset': offset, 'limit': limit}
r.send()
return r.response
# PLAY_LIST_DETAIL
def play_list_detail(id, limit=20):
"""获取歌单中的所有音乐。由于获取精品中,只能看到歌单名字和 ID 并没有歌单的音乐,因此增加该接口传入歌单 ID
获取歌单中的所有音乐.
:param id: 歌单的ID
:param limit: (optional) 数据上限多少行,默认 20
"""
if id is None:
raise ParamsError()
r = NCloudBot()
r.method = 'PLAY_LIST_DETAIL'
r.data = {'id': id, 'limit': limit, "csrf_token": ""}
r.send()
return r.response
# MUSIC_URL
def music_url(ids=[]):
"""通过歌曲 ID 获取歌曲下载地址
:param ids: 歌曲 ID 的 list
"""
if not isinstance(ids, list):
raise ParamsError()
r = NCloudBot()
r.method = 'MUSIC_URL'
r.data = {'ids': ids, 'br': 999000, "csrf_token": ""}
r.send()
return r.response
# LYRIC
def lyric(id):
"""通过歌曲 ID 获取歌曲歌词地址
:param id: 歌曲ID
"""
if id is None:
raise ParamsError()
r = NCloudBot()
r.method = 'LYRIC'
r.params = {'id': id}
r.send()
return r.response
# MUSIC_COMMENT
def music_comment(id, offset=0, limit=20):
"""获取歌曲的评论列表
:param id: 歌曲 ID
:param offset: (optional) 分段起始位置,默认 0
:param limit: (optional) 数据上限多少行,默认 20
"""
if id is None:
raise ParamsError()
r = NCloudBot()
r.method = 'MUSIC_COMMENT'
r.params = {'id': id}
r.data = {'offset': offset, 'limit': limit, 'rid': id, "csrf_token": ""}
r.send()
return r.response
# ALBUM_COMMENT
def album_comment(id, offset=0, limit=20):
if id is None:
raise ParamsError()
r = NCloudBot()
r.method = 'ALBUM_COMMENT'
r.params = {'id': id}
r.data = {'offset': offset, 'limit': limit, 'rid': id, "csrf_token": ""}
r.send()
return r.response
# SONG_DETAIL
def song_detail(ids):
"""通过歌曲 ID 获取歌曲的详细信息
:param ids: 歌曲 ID 的 list
"""
if not isinstance(ids, list):
raise ParamsError()
c = []
for id in ids:
c.append({'id': id})
r = NCloudBot()
r.method = 'SONG_DETAIL'
r.data = {'c': json.dumps(c), 'ids': c, "csrf_token": ""}
r.send()
return r.response
# PERSONAL_FM
def personal_fm():
""" 个人的 FM ,必须在登录之后调用,即 login 之后调用
"""
r = NCloudBot()
r.method = 'PERSONAL_FM'
r.data = {"csrf_token": ""}
r.send()
return r.response
class NCloudBotException(Exception):
""" 这是 NCloudBot 当处理请求时候的异常"""
class ParamsError(NCloudBotException):
""" 参数错误 """
class InvalidMethod(NCloudBotException):
""" 不支持的方法被调用"""
|
xiyouMc/ncmbot
|
ncmbot/core.py
|
user_play_list
|
python
|
def user_play_list(uid, offset=0, limit=1000):
if uid is None:
raise ParamsError()
r = NCloudBot()
r.method = 'USER_PLAY_LIST'
r.data = {'offset': offset, 'uid': uid, 'limit': limit, 'csrf_token': ''}
r.send()
return r.response
|
获取用户歌单,包含收藏的歌单
:param uid: 用户的ID,可通过登录或者其他接口获取
:param offset: (optional) 分段起始位置,默认 0
:param limit: (optional) 数据上限多少行,默认 1000
|
train
|
https://github.com/xiyouMc/ncmbot/blob/c4832f3ee7630ba104a89559f09c1fc366d1547b/ncmbot/core.py#L248-L261
|
[
"def send(self):\n \"\"\"Sens the request.\"\"\"\n success = False\n if self.method is None:\n raise ParamsError()\n try:\n if self.method == 'SEARCH':\n req = self._get_requests()\n _url = self.__NETEAST_HOST + self._METHODS[self.method]\n resp = req.post(_url, data=self.data)\n self._build_response(resp)\n self.response.ok = True\n else:\n if isinstance(self.data, dict):\n data = encrypted_request(self.data)\n\n req = self._get_webapi_requests()\n _url = self.__NETEAST_HOST + self._METHODS[self.method]\n\n if self.method in ('USER_DJ', 'USER_FOLLOWS', 'USER_EVENT'):\n _url = _url % self.params['uid']\n\n if self.method in ('LYRIC', 'MUSIC_COMMENT'):\n _url = _url % self.params['id']\n # GET\n if self.method in ('LYRIC'):\n resp = req.get(_url)\n else:\n resp = req.post(_url, data=data)\n self._build_response(resp)\n self.response.ok = True\n except Exception as why:\n traceback.print_exc()\n print 'Requests Exception', why\n # self._build_response(why)\n self.response.error = why\n"
] |
# coding:utf-8
"""
NCloudBot.core
~~~~~~~~~~~~~~
This module implements the main NCloudBot system.
:copyright: (c) 2017 by xiyouMc.
:license: ISC, see LICENSE for more details.
"""
import hashlib
import requests
import json
import cookielib
import traceback
from .util.encrypt import encrypted_request
from .util import cookiesJar
from utils import get_encoding_from_headers
__title__ = 'ncmbot'
__version__ = '0.1.0'
__build__ = 0x000100
__author__ = 'XiyouMc'
__license__ = 'ISC'
__copyright__ = 'Copyright 2017 XiyouMc'
__all__ = [
'NCloudBot', 'Response', 'login', 'user_play_list', 'user_dj', 'search',
'user_follows', 'user_followeds', 'user_event', 'event',
'top_playlist_highquality', 'play_list_detail', 'music_url', 'lyric',
'music_comment', 'song_detail', 'personal_fm', 'user_record'
]
class NCloudBot(object):
"""
The :class:`NCloudBot` object. It carries out all functionality of
NCloudBot
Recommended interface is with the NCloudBot`s functions.
"""
req = requests.Session()
username = None
_METHODS = {
# 登录模块
'LOGIN': '/weapi/login/cellphone?csrf_token=',
# 邮箱登录
'EMAIL_LOGIN': '/weapi/login?csrf_token=',
# 获取用户信息
'USER_INFO': '/weapi/subcount',
# 获取用户歌单,收藏的歌单 , 指定 UserId , 不需要登录
'USER_PLAY_LIST': '/weapi/user/playlist',
# 获取用户电台
'USER_DJ': '/weapi/dj/program/%s',
# 获取用户关注列表
'USER_FOLLOWS': '/weapi/user/getfollows/%s',
# 获取用户粉丝
'USER_FOLLOWEDS': '/weapi/user/getfolloweds/',
# 获取用户动态
'USER_EVENT': '/weapi/event/get/%s',
# 获取用户播放记录
'USER_RECORD': '/weapi/v1/play/record',
# 获取各种动态,对应网页版网易云,朋友界面的各种动态消息,如分享的视频、音乐、照片等
'EVENT': '/weapi/v1/event/get',
# 获取精品歌单
'TOP_PLAYLIST_HIGHQUALITY': '/weapi/playlist/highquality/list',
# 传入歌单ID 获取对应歌单内的所有音乐
'PLAY_LIST_DETAIL': '/weapi/v3/playlist/detail',
# 传入音乐ID ,获取对应音乐的URL
'MUSIC_URL': '/weapi/song/enhance/player/url',
# 传入关键词,获取歌曲列表
'SEARCH': '/api/search/get/',
# 传入音乐ID,获取对应音乐的歌词
'LYRIC': '/api/song/lyric?os=osx&id=%s&lv=-1&kv=-1&tv=-1',
# 传入音乐ID 和 limit 参数,可获取该音乐的所有评论
'MUSIC_COMMENT': '/weapi/v1/resource/comments/R_SO_4_%s/?csrf_token=',
# 传入专辑ID 和 limit 参数,可获取该专辑的所有评论
'ALBUM_COMMENT': '/weapi/v1/resource/comments/R_AL_3_%s/?csrf_token=',
# 给评论点赞,入参是资源ID,如歌曲ID,MV iD 和 评论ID
'LIKE_COMMENT': '/weapi/v1/comment/%s',
# 传入音乐ID,获取歌曲详情
'SONG_DETAIL': '/weapi/v3/song/detail',
# 获取专辑内容
'ALBUM': '/weapi/v1/album/%s',
# 私人 FM (需要登录)
'PERSONAL_FM': '/weapi/v1/radio/get'
}
__NETEAST_HOST = 'http://music.163.com'
def __init__(self):
self.method = None
self.data = {}
self.params = {}
self.response = Response()
def __repr__(self):
return '<NCloudBot [%s]>' % (self.method)
def __setattr__(self, name, value):
if (name == 'method') and (value):
if value not in self._METHODS.keys():
raise InvalidMethod()
object.__setattr__(self, name, value)
def _get_webapi_requests(self):
"""Update headers of webapi for Requests."""
headers = {
'Accept':
'*/*',
'Accept-Language':
'zh-CN,zh;q=0.8,gl;q=0.6,zh-TW;q=0.4',
'Connection':
'keep-alive',
'Content-Type':
'application/x-www-form-urlencoded',
'Referer':
'http://music.163.com',
'Host':
'music.163.com',
'User-Agent':
'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_9_2) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/33.0.1750.152 Safari/537.36'
}
NCloudBot.req.headers.update(headers)
return NCloudBot.req
def _get_requests(self):
headers = {
'Referer':
self.__NETEAST_HOST,
'Cookie':
'appver=2.0.2;',
'Content-Type':
'application/x-www-form-urlencoded',
'User-Agent':
'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_9_2) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/33.0.1750.152 Safari/537.36'
}
NCloudBot.req.headers.update(headers)
return NCloudBot.req
def _build_response(self, resp):
"""Build internal Response object from given response."""
# rememberLogin
# if self.method is 'LOGIN' and resp.json().get('code') == 200:
# cookiesJar.save_cookies(resp, NCloudBot.username)
self.response.content = resp.content
self.response.status_code = resp.status_code
self.response.headers = resp.headers
def send(self):
"""Sens the request."""
success = False
if self.method is None:
raise ParamsError()
try:
if self.method == 'SEARCH':
req = self._get_requests()
_url = self.__NETEAST_HOST + self._METHODS[self.method]
resp = req.post(_url, data=self.data)
self._build_response(resp)
self.response.ok = True
else:
if isinstance(self.data, dict):
data = encrypted_request(self.data)
req = self._get_webapi_requests()
_url = self.__NETEAST_HOST + self._METHODS[self.method]
if self.method in ('USER_DJ', 'USER_FOLLOWS', 'USER_EVENT'):
_url = _url % self.params['uid']
if self.method in ('LYRIC', 'MUSIC_COMMENT'):
_url = _url % self.params['id']
# GET
if self.method in ('LYRIC'):
resp = req.get(_url)
else:
resp = req.post(_url, data=data)
self._build_response(resp)
self.response.ok = True
except Exception as why:
traceback.print_exc()
print 'Requests Exception', why
# self._build_response(why)
self.response.error = why
class Response(object):
"""
The :class:`Response` object. All :class:`NCloudBot` objects contain a
:class:`NCloudBot.response <response>` attribute.
"""
def __init__(self):
self.content = None
self.headers = None
self.status_code = None
self.ok = False
self.error = None
def __repr__(self):
return '<Response [%s]>' % (self.status_code)
def raise_for_status(self):
if self.error:
raise self.error
def json(self):
"""Returns the json-encoded content of a response, if any."""
if not self.headers and len(self.content) > 3:
encoding = get_encoding_from_headers(self.headers)
if encoding is not None:
return json.loads(self.content.decode(encoding))
return json.loads(self.content)
def login(password, phone=None, email=None, rememberLogin=True):
""" 登录接口,返回 :class:'Response' 对象
:param password: 网易云音乐的密码
:param phone: (optional) 手机登录
:param email: (optional) 邮箱登录
:param rememberLogin: (optional) 是否记住密码,默认 True
"""
if (phone is None) and (email is None):
raise ParamsError()
if password is None:
raise ParamsError()
r = NCloudBot()
# r.username = phone or email
md5 = hashlib.md5()
md5.update(password)
password = md5.hexdigest()
print password
r.data = {'password': password, 'rememberLogin': rememberLogin}
if phone is not None:
r.data['phone'] = phone
r.method = 'LOGIN'
else:
r.data['username'] = email
r.method = 'EMAIL_LOGIN'
r.send()
return r.response
def user_play_list(uid, offset=0, limit=1000):
"""获取用户歌单,包含收藏的歌单
:param uid: 用户的ID,可通过登录或者其他接口获取
:param offset: (optional) 分段起始位置,默认 0
:param limit: (optional) 数据上限多少行,默认 1000
"""
if uid is None:
raise ParamsError()
r = NCloudBot()
r.method = 'USER_PLAY_LIST'
r.data = {'offset': offset, 'uid': uid, 'limit': limit, 'csrf_token': ''}
r.send()
return r.response
def user_dj(uid, offset=0, limit=30):
"""获取用户电台数据
:param uid: 用户的ID,可通过登录或者其他接口获取
:param offset: (optional) 分段起始位置,默认 0
:param limit: (optional) 数据上限多少行,默认 30
"""
if uid is None:
raise ParamsError()
r = NCloudBot()
r.method = 'USER_DJ'
r.data = {'offset': offset, 'limit': limit, "csrf_token": ""}
r.params = {'uid': uid}
r.send()
return r.response
def search(keyword, type=1, offset=0, limit=30):
"""搜索歌曲,支持搜索歌曲、歌手、专辑等
:param keyword: 关键词
:param type: (optional) 搜索类型,1: 单曲, 100: 歌手, 1000: 歌单, 1002: 用户
:param offset: (optional) 分段起始位置,默认 0
:param limit: (optional) 数据上限多少行,默认 30
"""
if keyword is None:
raise ParamsError()
r = NCloudBot()
r.method = 'SEARCH'
r.data = {
's': keyword,
'limit': str(limit),
'type': str(type),
'offset': str(offset)
}
r.send()
return r.response
def user_follows(uid, offset='0', limit=30):
"""获取用户关注列表
:param uid: 用户的ID,可通过登录或者其他接口获取
:param offset: (optional) 分段起始位置,默认 0
:param limit: (optional) 数据上限多少行,默认 30
"""
if uid is None:
raise ParamsError()
r = NCloudBot()
r.method = 'USER_FOLLOWS'
r.params = {'uid': uid}
r.data = {'offset': offset, 'limit': limit, 'order': True}
r.send()
return r.response
def user_followeds(uid, offset='0', limit=30):
"""获取用户粉丝列表
:param uid: 用户的ID,可通过登录或者其他接口获取
:param offset: (optional) 分段起始位置,默认 0
:param limit: (optional) 数据上限多少行,默认 30
"""
if uid is None:
raise ParamsError()
r = NCloudBot()
r.method = 'USER_FOLLOWEDS'
r.data = {
'userId': uid,
'offset': offset,
'limit': limit,
"csrf_token": ""
}
r.send()
return r.response
def user_event(uid):
"""获取用户动态
:param uid: 用户的ID,可通过登录或者其他接口获取
"""
if uid is None:
raise ParamsError()
r = NCloudBot()
r.method = 'USER_EVENT'
r.params = {'uid': uid}
r.data = {'time': -1, 'getcounts': True, "csrf_token": ""}
r.send()
return r.response
def user_record(uid, type=0):
"""获取用户的播放列表,必须登录
:param uid: 用户的ID,可通过登录或者其他接口获取
:param type: (optional) 数据类型,0:获取所有记录,1:获取 weekData
"""
if uid is None:
raise ParamsError()
r = NCloudBot()
r.method = 'USER_RECORD'
r.data = {'type': type, 'uid': uid, "csrf_token": ""}
r.send()
return r.response
def event():
"""获取好友的动态,包括分享视频、音乐、动态等
"""
r = NCloudBot()
r.method = 'EVENT'
r.data = {"csrf_token": ""}
r.send()
return r.response
# TOP_PLAYLIST_HIGHQUALITY
def top_playlist_highquality(cat='全部', offset=0, limit=20):
"""获取网易云音乐的精品歌单
:param cat: (optional) 歌单类型,默认 ‘全部’,比如 华语、欧美等
:param offset: (optional) 分段起始位置,默认 0
:param limit: (optional) 数据上限多少行,默认 20
"""
r = NCloudBot()
r.method = 'TOP_PLAYLIST_HIGHQUALITY'
r.data = {'cat': cat, 'offset': offset, 'limit': limit}
r.send()
return r.response
# PLAY_LIST_DETAIL
def play_list_detail(id, limit=20):
"""获取歌单中的所有音乐。由于获取精品中,只能看到歌单名字和 ID 并没有歌单的音乐,因此增加该接口传入歌单 ID
获取歌单中的所有音乐.
:param id: 歌单的ID
:param limit: (optional) 数据上限多少行,默认 20
"""
if id is None:
raise ParamsError()
r = NCloudBot()
r.method = 'PLAY_LIST_DETAIL'
r.data = {'id': id, 'limit': limit, "csrf_token": ""}
r.send()
return r.response
# MUSIC_URL
def music_url(ids=[]):
"""通过歌曲 ID 获取歌曲下载地址
:param ids: 歌曲 ID 的 list
"""
if not isinstance(ids, list):
raise ParamsError()
r = NCloudBot()
r.method = 'MUSIC_URL'
r.data = {'ids': ids, 'br': 999000, "csrf_token": ""}
r.send()
return r.response
# LYRIC
def lyric(id):
"""通过歌曲 ID 获取歌曲歌词地址
:param id: 歌曲ID
"""
if id is None:
raise ParamsError()
r = NCloudBot()
r.method = 'LYRIC'
r.params = {'id': id}
r.send()
return r.response
# MUSIC_COMMENT
def music_comment(id, offset=0, limit=20):
"""获取歌曲的评论列表
:param id: 歌曲 ID
:param offset: (optional) 分段起始位置,默认 0
:param limit: (optional) 数据上限多少行,默认 20
"""
if id is None:
raise ParamsError()
r = NCloudBot()
r.method = 'MUSIC_COMMENT'
r.params = {'id': id}
r.data = {'offset': offset, 'limit': limit, 'rid': id, "csrf_token": ""}
r.send()
return r.response
# ALBUM_COMMENT
def album_comment(id, offset=0, limit=20):
if id is None:
raise ParamsError()
r = NCloudBot()
r.method = 'ALBUM_COMMENT'
r.params = {'id': id}
r.data = {'offset': offset, 'limit': limit, 'rid': id, "csrf_token": ""}
r.send()
return r.response
# SONG_DETAIL
def song_detail(ids):
"""通过歌曲 ID 获取歌曲的详细信息
:param ids: 歌曲 ID 的 list
"""
if not isinstance(ids, list):
raise ParamsError()
c = []
for id in ids:
c.append({'id': id})
r = NCloudBot()
r.method = 'SONG_DETAIL'
r.data = {'c': json.dumps(c), 'ids': c, "csrf_token": ""}
r.send()
return r.response
# PERSONAL_FM
def personal_fm():
""" 个人的 FM ,必须在登录之后调用,即 login 之后调用
"""
r = NCloudBot()
r.method = 'PERSONAL_FM'
r.data = {"csrf_token": ""}
r.send()
return r.response
class NCloudBotException(Exception):
""" 这是 NCloudBot 当处理请求时候的异常"""
class ParamsError(NCloudBotException):
""" 参数错误 """
class InvalidMethod(NCloudBotException):
""" 不支持的方法被调用"""
|
xiyouMc/ncmbot
|
ncmbot/core.py
|
user_dj
|
python
|
def user_dj(uid, offset=0, limit=30):
if uid is None:
raise ParamsError()
r = NCloudBot()
r.method = 'USER_DJ'
r.data = {'offset': offset, 'limit': limit, "csrf_token": ""}
r.params = {'uid': uid}
r.send()
return r.response
|
获取用户电台数据
:param uid: 用户的ID,可通过登录或者其他接口获取
:param offset: (optional) 分段起始位置,默认 0
:param limit: (optional) 数据上限多少行,默认 30
|
train
|
https://github.com/xiyouMc/ncmbot/blob/c4832f3ee7630ba104a89559f09c1fc366d1547b/ncmbot/core.py#L264-L279
|
[
"def send(self):\n \"\"\"Sens the request.\"\"\"\n success = False\n if self.method is None:\n raise ParamsError()\n try:\n if self.method == 'SEARCH':\n req = self._get_requests()\n _url = self.__NETEAST_HOST + self._METHODS[self.method]\n resp = req.post(_url, data=self.data)\n self._build_response(resp)\n self.response.ok = True\n else:\n if isinstance(self.data, dict):\n data = encrypted_request(self.data)\n\n req = self._get_webapi_requests()\n _url = self.__NETEAST_HOST + self._METHODS[self.method]\n\n if self.method in ('USER_DJ', 'USER_FOLLOWS', 'USER_EVENT'):\n _url = _url % self.params['uid']\n\n if self.method in ('LYRIC', 'MUSIC_COMMENT'):\n _url = _url % self.params['id']\n # GET\n if self.method in ('LYRIC'):\n resp = req.get(_url)\n else:\n resp = req.post(_url, data=data)\n self._build_response(resp)\n self.response.ok = True\n except Exception as why:\n traceback.print_exc()\n print 'Requests Exception', why\n # self._build_response(why)\n self.response.error = why\n"
] |
# coding:utf-8
"""
NCloudBot.core
~~~~~~~~~~~~~~
This module implements the main NCloudBot system.
:copyright: (c) 2017 by xiyouMc.
:license: ISC, see LICENSE for more details.
"""
import hashlib
import requests
import json
import cookielib
import traceback
from .util.encrypt import encrypted_request
from .util import cookiesJar
from utils import get_encoding_from_headers
__title__ = 'ncmbot'
__version__ = '0.1.0'
__build__ = 0x000100
__author__ = 'XiyouMc'
__license__ = 'ISC'
__copyright__ = 'Copyright 2017 XiyouMc'
__all__ = [
'NCloudBot', 'Response', 'login', 'user_play_list', 'user_dj', 'search',
'user_follows', 'user_followeds', 'user_event', 'event',
'top_playlist_highquality', 'play_list_detail', 'music_url', 'lyric',
'music_comment', 'song_detail', 'personal_fm', 'user_record'
]
class NCloudBot(object):
"""
The :class:`NCloudBot` object. It carries out all functionality of
NCloudBot
Recommended interface is with the NCloudBot`s functions.
"""
req = requests.Session()
username = None
_METHODS = {
# 登录模块
'LOGIN': '/weapi/login/cellphone?csrf_token=',
# 邮箱登录
'EMAIL_LOGIN': '/weapi/login?csrf_token=',
# 获取用户信息
'USER_INFO': '/weapi/subcount',
# 获取用户歌单,收藏的歌单 , 指定 UserId , 不需要登录
'USER_PLAY_LIST': '/weapi/user/playlist',
# 获取用户电台
'USER_DJ': '/weapi/dj/program/%s',
# 获取用户关注列表
'USER_FOLLOWS': '/weapi/user/getfollows/%s',
# 获取用户粉丝
'USER_FOLLOWEDS': '/weapi/user/getfolloweds/',
# 获取用户动态
'USER_EVENT': '/weapi/event/get/%s',
# 获取用户播放记录
'USER_RECORD': '/weapi/v1/play/record',
# 获取各种动态,对应网页版网易云,朋友界面的各种动态消息,如分享的视频、音乐、照片等
'EVENT': '/weapi/v1/event/get',
# 获取精品歌单
'TOP_PLAYLIST_HIGHQUALITY': '/weapi/playlist/highquality/list',
# 传入歌单ID 获取对应歌单内的所有音乐
'PLAY_LIST_DETAIL': '/weapi/v3/playlist/detail',
# 传入音乐ID ,获取对应音乐的URL
'MUSIC_URL': '/weapi/song/enhance/player/url',
# 传入关键词,获取歌曲列表
'SEARCH': '/api/search/get/',
# 传入音乐ID,获取对应音乐的歌词
'LYRIC': '/api/song/lyric?os=osx&id=%s&lv=-1&kv=-1&tv=-1',
# 传入音乐ID 和 limit 参数,可获取该音乐的所有评论
'MUSIC_COMMENT': '/weapi/v1/resource/comments/R_SO_4_%s/?csrf_token=',
# 传入专辑ID 和 limit 参数,可获取该专辑的所有评论
'ALBUM_COMMENT': '/weapi/v1/resource/comments/R_AL_3_%s/?csrf_token=',
# 给评论点赞,入参是资源ID,如歌曲ID,MV iD 和 评论ID
'LIKE_COMMENT': '/weapi/v1/comment/%s',
# 传入音乐ID,获取歌曲详情
'SONG_DETAIL': '/weapi/v3/song/detail',
# 获取专辑内容
'ALBUM': '/weapi/v1/album/%s',
# 私人 FM (需要登录)
'PERSONAL_FM': '/weapi/v1/radio/get'
}
__NETEAST_HOST = 'http://music.163.com'
def __init__(self):
self.method = None
self.data = {}
self.params = {}
self.response = Response()
def __repr__(self):
return '<NCloudBot [%s]>' % (self.method)
def __setattr__(self, name, value):
if (name == 'method') and (value):
if value not in self._METHODS.keys():
raise InvalidMethod()
object.__setattr__(self, name, value)
def _get_webapi_requests(self):
"""Update headers of webapi for Requests."""
headers = {
'Accept':
'*/*',
'Accept-Language':
'zh-CN,zh;q=0.8,gl;q=0.6,zh-TW;q=0.4',
'Connection':
'keep-alive',
'Content-Type':
'application/x-www-form-urlencoded',
'Referer':
'http://music.163.com',
'Host':
'music.163.com',
'User-Agent':
'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_9_2) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/33.0.1750.152 Safari/537.36'
}
NCloudBot.req.headers.update(headers)
return NCloudBot.req
def _get_requests(self):
headers = {
'Referer':
self.__NETEAST_HOST,
'Cookie':
'appver=2.0.2;',
'Content-Type':
'application/x-www-form-urlencoded',
'User-Agent':
'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_9_2) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/33.0.1750.152 Safari/537.36'
}
NCloudBot.req.headers.update(headers)
return NCloudBot.req
def _build_response(self, resp):
"""Build internal Response object from given response."""
# rememberLogin
# if self.method is 'LOGIN' and resp.json().get('code') == 200:
# cookiesJar.save_cookies(resp, NCloudBot.username)
self.response.content = resp.content
self.response.status_code = resp.status_code
self.response.headers = resp.headers
def send(self):
"""Sens the request."""
success = False
if self.method is None:
raise ParamsError()
try:
if self.method == 'SEARCH':
req = self._get_requests()
_url = self.__NETEAST_HOST + self._METHODS[self.method]
resp = req.post(_url, data=self.data)
self._build_response(resp)
self.response.ok = True
else:
if isinstance(self.data, dict):
data = encrypted_request(self.data)
req = self._get_webapi_requests()
_url = self.__NETEAST_HOST + self._METHODS[self.method]
if self.method in ('USER_DJ', 'USER_FOLLOWS', 'USER_EVENT'):
_url = _url % self.params['uid']
if self.method in ('LYRIC', 'MUSIC_COMMENT'):
_url = _url % self.params['id']
# GET
if self.method in ('LYRIC'):
resp = req.get(_url)
else:
resp = req.post(_url, data=data)
self._build_response(resp)
self.response.ok = True
except Exception as why:
traceback.print_exc()
print 'Requests Exception', why
# self._build_response(why)
self.response.error = why
class Response(object):
"""
The :class:`Response` object. All :class:`NCloudBot` objects contain a
:class:`NCloudBot.response <response>` attribute.
"""
def __init__(self):
self.content = None
self.headers = None
self.status_code = None
self.ok = False
self.error = None
def __repr__(self):
return '<Response [%s]>' % (self.status_code)
def raise_for_status(self):
if self.error:
raise self.error
def json(self):
"""Returns the json-encoded content of a response, if any."""
if not self.headers and len(self.content) > 3:
encoding = get_encoding_from_headers(self.headers)
if encoding is not None:
return json.loads(self.content.decode(encoding))
return json.loads(self.content)
def login(password, phone=None, email=None, rememberLogin=True):
""" 登录接口,返回 :class:'Response' 对象
:param password: 网易云音乐的密码
:param phone: (optional) 手机登录
:param email: (optional) 邮箱登录
:param rememberLogin: (optional) 是否记住密码,默认 True
"""
if (phone is None) and (email is None):
raise ParamsError()
if password is None:
raise ParamsError()
r = NCloudBot()
# r.username = phone or email
md5 = hashlib.md5()
md5.update(password)
password = md5.hexdigest()
print password
r.data = {'password': password, 'rememberLogin': rememberLogin}
if phone is not None:
r.data['phone'] = phone
r.method = 'LOGIN'
else:
r.data['username'] = email
r.method = 'EMAIL_LOGIN'
r.send()
return r.response
def user_play_list(uid, offset=0, limit=1000):
"""获取用户歌单,包含收藏的歌单
:param uid: 用户的ID,可通过登录或者其他接口获取
:param offset: (optional) 分段起始位置,默认 0
:param limit: (optional) 数据上限多少行,默认 1000
"""
if uid is None:
raise ParamsError()
r = NCloudBot()
r.method = 'USER_PLAY_LIST'
r.data = {'offset': offset, 'uid': uid, 'limit': limit, 'csrf_token': ''}
r.send()
return r.response
def search(keyword, type=1, offset=0, limit=30):
"""搜索歌曲,支持搜索歌曲、歌手、专辑等
:param keyword: 关键词
:param type: (optional) 搜索类型,1: 单曲, 100: 歌手, 1000: 歌单, 1002: 用户
:param offset: (optional) 分段起始位置,默认 0
:param limit: (optional) 数据上限多少行,默认 30
"""
if keyword is None:
raise ParamsError()
r = NCloudBot()
r.method = 'SEARCH'
r.data = {
's': keyword,
'limit': str(limit),
'type': str(type),
'offset': str(offset)
}
r.send()
return r.response
def user_follows(uid, offset='0', limit=30):
"""获取用户关注列表
:param uid: 用户的ID,可通过登录或者其他接口获取
:param offset: (optional) 分段起始位置,默认 0
:param limit: (optional) 数据上限多少行,默认 30
"""
if uid is None:
raise ParamsError()
r = NCloudBot()
r.method = 'USER_FOLLOWS'
r.params = {'uid': uid}
r.data = {'offset': offset, 'limit': limit, 'order': True}
r.send()
return r.response
def user_followeds(uid, offset='0', limit=30):
"""获取用户粉丝列表
:param uid: 用户的ID,可通过登录或者其他接口获取
:param offset: (optional) 分段起始位置,默认 0
:param limit: (optional) 数据上限多少行,默认 30
"""
if uid is None:
raise ParamsError()
r = NCloudBot()
r.method = 'USER_FOLLOWEDS'
r.data = {
'userId': uid,
'offset': offset,
'limit': limit,
"csrf_token": ""
}
r.send()
return r.response
def user_event(uid):
"""获取用户动态
:param uid: 用户的ID,可通过登录或者其他接口获取
"""
if uid is None:
raise ParamsError()
r = NCloudBot()
r.method = 'USER_EVENT'
r.params = {'uid': uid}
r.data = {'time': -1, 'getcounts': True, "csrf_token": ""}
r.send()
return r.response
def user_record(uid, type=0):
"""获取用户的播放列表,必须登录
:param uid: 用户的ID,可通过登录或者其他接口获取
:param type: (optional) 数据类型,0:获取所有记录,1:获取 weekData
"""
if uid is None:
raise ParamsError()
r = NCloudBot()
r.method = 'USER_RECORD'
r.data = {'type': type, 'uid': uid, "csrf_token": ""}
r.send()
return r.response
def event():
"""获取好友的动态,包括分享视频、音乐、动态等
"""
r = NCloudBot()
r.method = 'EVENT'
r.data = {"csrf_token": ""}
r.send()
return r.response
# TOP_PLAYLIST_HIGHQUALITY
def top_playlist_highquality(cat='全部', offset=0, limit=20):
"""获取网易云音乐的精品歌单
:param cat: (optional) 歌单类型,默认 ‘全部’,比如 华语、欧美等
:param offset: (optional) 分段起始位置,默认 0
:param limit: (optional) 数据上限多少行,默认 20
"""
r = NCloudBot()
r.method = 'TOP_PLAYLIST_HIGHQUALITY'
r.data = {'cat': cat, 'offset': offset, 'limit': limit}
r.send()
return r.response
# PLAY_LIST_DETAIL
def play_list_detail(id, limit=20):
"""获取歌单中的所有音乐。由于获取精品中,只能看到歌单名字和 ID 并没有歌单的音乐,因此增加该接口传入歌单 ID
获取歌单中的所有音乐.
:param id: 歌单的ID
:param limit: (optional) 数据上限多少行,默认 20
"""
if id is None:
raise ParamsError()
r = NCloudBot()
r.method = 'PLAY_LIST_DETAIL'
r.data = {'id': id, 'limit': limit, "csrf_token": ""}
r.send()
return r.response
# MUSIC_URL
def music_url(ids=[]):
"""通过歌曲 ID 获取歌曲下载地址
:param ids: 歌曲 ID 的 list
"""
if not isinstance(ids, list):
raise ParamsError()
r = NCloudBot()
r.method = 'MUSIC_URL'
r.data = {'ids': ids, 'br': 999000, "csrf_token": ""}
r.send()
return r.response
# LYRIC
def lyric(id):
"""通过歌曲 ID 获取歌曲歌词地址
:param id: 歌曲ID
"""
if id is None:
raise ParamsError()
r = NCloudBot()
r.method = 'LYRIC'
r.params = {'id': id}
r.send()
return r.response
# MUSIC_COMMENT
def music_comment(id, offset=0, limit=20):
"""获取歌曲的评论列表
:param id: 歌曲 ID
:param offset: (optional) 分段起始位置,默认 0
:param limit: (optional) 数据上限多少行,默认 20
"""
if id is None:
raise ParamsError()
r = NCloudBot()
r.method = 'MUSIC_COMMENT'
r.params = {'id': id}
r.data = {'offset': offset, 'limit': limit, 'rid': id, "csrf_token": ""}
r.send()
return r.response
# ALBUM_COMMENT
def album_comment(id, offset=0, limit=20):
if id is None:
raise ParamsError()
r = NCloudBot()
r.method = 'ALBUM_COMMENT'
r.params = {'id': id}
r.data = {'offset': offset, 'limit': limit, 'rid': id, "csrf_token": ""}
r.send()
return r.response
# SONG_DETAIL
def song_detail(ids):
"""通过歌曲 ID 获取歌曲的详细信息
:param ids: 歌曲 ID 的 list
"""
if not isinstance(ids, list):
raise ParamsError()
c = []
for id in ids:
c.append({'id': id})
r = NCloudBot()
r.method = 'SONG_DETAIL'
r.data = {'c': json.dumps(c), 'ids': c, "csrf_token": ""}
r.send()
return r.response
# PERSONAL_FM
def personal_fm():
""" 个人的 FM ,必须在登录之后调用,即 login 之后调用
"""
r = NCloudBot()
r.method = 'PERSONAL_FM'
r.data = {"csrf_token": ""}
r.send()
return r.response
class NCloudBotException(Exception):
""" 这是 NCloudBot 当处理请求时候的异常"""
class ParamsError(NCloudBotException):
""" 参数错误 """
class InvalidMethod(NCloudBotException):
""" 不支持的方法被调用"""
|
xiyouMc/ncmbot
|
ncmbot/core.py
|
search
|
python
|
def search(keyword, type=1, offset=0, limit=30):
if keyword is None:
raise ParamsError()
r = NCloudBot()
r.method = 'SEARCH'
r.data = {
's': keyword,
'limit': str(limit),
'type': str(type),
'offset': str(offset)
}
r.send()
return r.response
|
搜索歌曲,支持搜索歌曲、歌手、专辑等
:param keyword: 关键词
:param type: (optional) 搜索类型,1: 单曲, 100: 歌手, 1000: 歌单, 1002: 用户
:param offset: (optional) 分段起始位置,默认 0
:param limit: (optional) 数据上限多少行,默认 30
|
train
|
https://github.com/xiyouMc/ncmbot/blob/c4832f3ee7630ba104a89559f09c1fc366d1547b/ncmbot/core.py#L282-L302
|
[
"def send(self):\n \"\"\"Sens the request.\"\"\"\n success = False\n if self.method is None:\n raise ParamsError()\n try:\n if self.method == 'SEARCH':\n req = self._get_requests()\n _url = self.__NETEAST_HOST + self._METHODS[self.method]\n resp = req.post(_url, data=self.data)\n self._build_response(resp)\n self.response.ok = True\n else:\n if isinstance(self.data, dict):\n data = encrypted_request(self.data)\n\n req = self._get_webapi_requests()\n _url = self.__NETEAST_HOST + self._METHODS[self.method]\n\n if self.method in ('USER_DJ', 'USER_FOLLOWS', 'USER_EVENT'):\n _url = _url % self.params['uid']\n\n if self.method in ('LYRIC', 'MUSIC_COMMENT'):\n _url = _url % self.params['id']\n # GET\n if self.method in ('LYRIC'):\n resp = req.get(_url)\n else:\n resp = req.post(_url, data=data)\n self._build_response(resp)\n self.response.ok = True\n except Exception as why:\n traceback.print_exc()\n print 'Requests Exception', why\n # self._build_response(why)\n self.response.error = why\n"
] |
# coding:utf-8
"""
NCloudBot.core
~~~~~~~~~~~~~~
This module implements the main NCloudBot system.
:copyright: (c) 2017 by xiyouMc.
:license: ISC, see LICENSE for more details.
"""
import hashlib
import requests
import json
import cookielib
import traceback
from .util.encrypt import encrypted_request
from .util import cookiesJar
from utils import get_encoding_from_headers
__title__ = 'ncmbot'
__version__ = '0.1.0'
__build__ = 0x000100
__author__ = 'XiyouMc'
__license__ = 'ISC'
__copyright__ = 'Copyright 2017 XiyouMc'
__all__ = [
'NCloudBot', 'Response', 'login', 'user_play_list', 'user_dj', 'search',
'user_follows', 'user_followeds', 'user_event', 'event',
'top_playlist_highquality', 'play_list_detail', 'music_url', 'lyric',
'music_comment', 'song_detail', 'personal_fm', 'user_record'
]
class NCloudBot(object):
"""
The :class:`NCloudBot` object. It carries out all functionality of
NCloudBot
Recommended interface is with the NCloudBot`s functions.
"""
req = requests.Session()
username = None
_METHODS = {
# 登录模块
'LOGIN': '/weapi/login/cellphone?csrf_token=',
# 邮箱登录
'EMAIL_LOGIN': '/weapi/login?csrf_token=',
# 获取用户信息
'USER_INFO': '/weapi/subcount',
# 获取用户歌单,收藏的歌单 , 指定 UserId , 不需要登录
'USER_PLAY_LIST': '/weapi/user/playlist',
# 获取用户电台
'USER_DJ': '/weapi/dj/program/%s',
# 获取用户关注列表
'USER_FOLLOWS': '/weapi/user/getfollows/%s',
# 获取用户粉丝
'USER_FOLLOWEDS': '/weapi/user/getfolloweds/',
# 获取用户动态
'USER_EVENT': '/weapi/event/get/%s',
# 获取用户播放记录
'USER_RECORD': '/weapi/v1/play/record',
# 获取各种动态,对应网页版网易云,朋友界面的各种动态消息,如分享的视频、音乐、照片等
'EVENT': '/weapi/v1/event/get',
# 获取精品歌单
'TOP_PLAYLIST_HIGHQUALITY': '/weapi/playlist/highquality/list',
# 传入歌单ID 获取对应歌单内的所有音乐
'PLAY_LIST_DETAIL': '/weapi/v3/playlist/detail',
# 传入音乐ID ,获取对应音乐的URL
'MUSIC_URL': '/weapi/song/enhance/player/url',
# 传入关键词,获取歌曲列表
'SEARCH': '/api/search/get/',
# 传入音乐ID,获取对应音乐的歌词
'LYRIC': '/api/song/lyric?os=osx&id=%s&lv=-1&kv=-1&tv=-1',
# 传入音乐ID 和 limit 参数,可获取该音乐的所有评论
'MUSIC_COMMENT': '/weapi/v1/resource/comments/R_SO_4_%s/?csrf_token=',
# 传入专辑ID 和 limit 参数,可获取该专辑的所有评论
'ALBUM_COMMENT': '/weapi/v1/resource/comments/R_AL_3_%s/?csrf_token=',
# 给评论点赞,入参是资源ID,如歌曲ID,MV iD 和 评论ID
'LIKE_COMMENT': '/weapi/v1/comment/%s',
# 传入音乐ID,获取歌曲详情
'SONG_DETAIL': '/weapi/v3/song/detail',
# 获取专辑内容
'ALBUM': '/weapi/v1/album/%s',
# 私人 FM (需要登录)
'PERSONAL_FM': '/weapi/v1/radio/get'
}
__NETEAST_HOST = 'http://music.163.com'
def __init__(self):
self.method = None
self.data = {}
self.params = {}
self.response = Response()
def __repr__(self):
return '<NCloudBot [%s]>' % (self.method)
def __setattr__(self, name, value):
if (name == 'method') and (value):
if value not in self._METHODS.keys():
raise InvalidMethod()
object.__setattr__(self, name, value)
def _get_webapi_requests(self):
"""Update headers of webapi for Requests."""
headers = {
'Accept':
'*/*',
'Accept-Language':
'zh-CN,zh;q=0.8,gl;q=0.6,zh-TW;q=0.4',
'Connection':
'keep-alive',
'Content-Type':
'application/x-www-form-urlencoded',
'Referer':
'http://music.163.com',
'Host':
'music.163.com',
'User-Agent':
'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_9_2) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/33.0.1750.152 Safari/537.36'
}
NCloudBot.req.headers.update(headers)
return NCloudBot.req
def _get_requests(self):
headers = {
'Referer':
self.__NETEAST_HOST,
'Cookie':
'appver=2.0.2;',
'Content-Type':
'application/x-www-form-urlencoded',
'User-Agent':
'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_9_2) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/33.0.1750.152 Safari/537.36'
}
NCloudBot.req.headers.update(headers)
return NCloudBot.req
def _build_response(self, resp):
"""Build internal Response object from given response."""
# rememberLogin
# if self.method is 'LOGIN' and resp.json().get('code') == 200:
# cookiesJar.save_cookies(resp, NCloudBot.username)
self.response.content = resp.content
self.response.status_code = resp.status_code
self.response.headers = resp.headers
def send(self):
"""Sens the request."""
success = False
if self.method is None:
raise ParamsError()
try:
if self.method == 'SEARCH':
req = self._get_requests()
_url = self.__NETEAST_HOST + self._METHODS[self.method]
resp = req.post(_url, data=self.data)
self._build_response(resp)
self.response.ok = True
else:
if isinstance(self.data, dict):
data = encrypted_request(self.data)
req = self._get_webapi_requests()
_url = self.__NETEAST_HOST + self._METHODS[self.method]
if self.method in ('USER_DJ', 'USER_FOLLOWS', 'USER_EVENT'):
_url = _url % self.params['uid']
if self.method in ('LYRIC', 'MUSIC_COMMENT'):
_url = _url % self.params['id']
# GET
if self.method in ('LYRIC'):
resp = req.get(_url)
else:
resp = req.post(_url, data=data)
self._build_response(resp)
self.response.ok = True
except Exception as why:
traceback.print_exc()
print 'Requests Exception', why
# self._build_response(why)
self.response.error = why
class Response(object):
"""
The :class:`Response` object. All :class:`NCloudBot` objects contain a
:class:`NCloudBot.response <response>` attribute.
"""
def __init__(self):
self.content = None
self.headers = None
self.status_code = None
self.ok = False
self.error = None
def __repr__(self):
return '<Response [%s]>' % (self.status_code)
def raise_for_status(self):
if self.error:
raise self.error
def json(self):
"""Returns the json-encoded content of a response, if any."""
if not self.headers and len(self.content) > 3:
encoding = get_encoding_from_headers(self.headers)
if encoding is not None:
return json.loads(self.content.decode(encoding))
return json.loads(self.content)
def login(password, phone=None, email=None, rememberLogin=True):
""" 登录接口,返回 :class:'Response' 对象
:param password: 网易云音乐的密码
:param phone: (optional) 手机登录
:param email: (optional) 邮箱登录
:param rememberLogin: (optional) 是否记住密码,默认 True
"""
if (phone is None) and (email is None):
raise ParamsError()
if password is None:
raise ParamsError()
r = NCloudBot()
# r.username = phone or email
md5 = hashlib.md5()
md5.update(password)
password = md5.hexdigest()
print password
r.data = {'password': password, 'rememberLogin': rememberLogin}
if phone is not None:
r.data['phone'] = phone
r.method = 'LOGIN'
else:
r.data['username'] = email
r.method = 'EMAIL_LOGIN'
r.send()
return r.response
def user_play_list(uid, offset=0, limit=1000):
"""获取用户歌单,包含收藏的歌单
:param uid: 用户的ID,可通过登录或者其他接口获取
:param offset: (optional) 分段起始位置,默认 0
:param limit: (optional) 数据上限多少行,默认 1000
"""
if uid is None:
raise ParamsError()
r = NCloudBot()
r.method = 'USER_PLAY_LIST'
r.data = {'offset': offset, 'uid': uid, 'limit': limit, 'csrf_token': ''}
r.send()
return r.response
def user_dj(uid, offset=0, limit=30):
"""获取用户电台数据
:param uid: 用户的ID,可通过登录或者其他接口获取
:param offset: (optional) 分段起始位置,默认 0
:param limit: (optional) 数据上限多少行,默认 30
"""
if uid is None:
raise ParamsError()
r = NCloudBot()
r.method = 'USER_DJ'
r.data = {'offset': offset, 'limit': limit, "csrf_token": ""}
r.params = {'uid': uid}
r.send()
return r.response
def user_follows(uid, offset='0', limit=30):
"""获取用户关注列表
:param uid: 用户的ID,可通过登录或者其他接口获取
:param offset: (optional) 分段起始位置,默认 0
:param limit: (optional) 数据上限多少行,默认 30
"""
if uid is None:
raise ParamsError()
r = NCloudBot()
r.method = 'USER_FOLLOWS'
r.params = {'uid': uid}
r.data = {'offset': offset, 'limit': limit, 'order': True}
r.send()
return r.response
def user_followeds(uid, offset='0', limit=30):
"""获取用户粉丝列表
:param uid: 用户的ID,可通过登录或者其他接口获取
:param offset: (optional) 分段起始位置,默认 0
:param limit: (optional) 数据上限多少行,默认 30
"""
if uid is None:
raise ParamsError()
r = NCloudBot()
r.method = 'USER_FOLLOWEDS'
r.data = {
'userId': uid,
'offset': offset,
'limit': limit,
"csrf_token": ""
}
r.send()
return r.response
def user_event(uid):
"""获取用户动态
:param uid: 用户的ID,可通过登录或者其他接口获取
"""
if uid is None:
raise ParamsError()
r = NCloudBot()
r.method = 'USER_EVENT'
r.params = {'uid': uid}
r.data = {'time': -1, 'getcounts': True, "csrf_token": ""}
r.send()
return r.response
def user_record(uid, type=0):
"""获取用户的播放列表,必须登录
:param uid: 用户的ID,可通过登录或者其他接口获取
:param type: (optional) 数据类型,0:获取所有记录,1:获取 weekData
"""
if uid is None:
raise ParamsError()
r = NCloudBot()
r.method = 'USER_RECORD'
r.data = {'type': type, 'uid': uid, "csrf_token": ""}
r.send()
return r.response
def event():
"""获取好友的动态,包括分享视频、音乐、动态等
"""
r = NCloudBot()
r.method = 'EVENT'
r.data = {"csrf_token": ""}
r.send()
return r.response
# TOP_PLAYLIST_HIGHQUALITY
def top_playlist_highquality(cat='全部', offset=0, limit=20):
"""获取网易云音乐的精品歌单
:param cat: (optional) 歌单类型,默认 ‘全部’,比如 华语、欧美等
:param offset: (optional) 分段起始位置,默认 0
:param limit: (optional) 数据上限多少行,默认 20
"""
r = NCloudBot()
r.method = 'TOP_PLAYLIST_HIGHQUALITY'
r.data = {'cat': cat, 'offset': offset, 'limit': limit}
r.send()
return r.response
# PLAY_LIST_DETAIL
def play_list_detail(id, limit=20):
"""获取歌单中的所有音乐。由于获取精品中,只能看到歌单名字和 ID 并没有歌单的音乐,因此增加该接口传入歌单 ID
获取歌单中的所有音乐.
:param id: 歌单的ID
:param limit: (optional) 数据上限多少行,默认 20
"""
if id is None:
raise ParamsError()
r = NCloudBot()
r.method = 'PLAY_LIST_DETAIL'
r.data = {'id': id, 'limit': limit, "csrf_token": ""}
r.send()
return r.response
# MUSIC_URL
def music_url(ids=[]):
"""通过歌曲 ID 获取歌曲下载地址
:param ids: 歌曲 ID 的 list
"""
if not isinstance(ids, list):
raise ParamsError()
r = NCloudBot()
r.method = 'MUSIC_URL'
r.data = {'ids': ids, 'br': 999000, "csrf_token": ""}
r.send()
return r.response
# LYRIC
def lyric(id):
"""通过歌曲 ID 获取歌曲歌词地址
:param id: 歌曲ID
"""
if id is None:
raise ParamsError()
r = NCloudBot()
r.method = 'LYRIC'
r.params = {'id': id}
r.send()
return r.response
# MUSIC_COMMENT
def music_comment(id, offset=0, limit=20):
"""获取歌曲的评论列表
:param id: 歌曲 ID
:param offset: (optional) 分段起始位置,默认 0
:param limit: (optional) 数据上限多少行,默认 20
"""
if id is None:
raise ParamsError()
r = NCloudBot()
r.method = 'MUSIC_COMMENT'
r.params = {'id': id}
r.data = {'offset': offset, 'limit': limit, 'rid': id, "csrf_token": ""}
r.send()
return r.response
# ALBUM_COMMENT
def album_comment(id, offset=0, limit=20):
if id is None:
raise ParamsError()
r = NCloudBot()
r.method = 'ALBUM_COMMENT'
r.params = {'id': id}
r.data = {'offset': offset, 'limit': limit, 'rid': id, "csrf_token": ""}
r.send()
return r.response
# SONG_DETAIL
def song_detail(ids):
"""通过歌曲 ID 获取歌曲的详细信息
:param ids: 歌曲 ID 的 list
"""
if not isinstance(ids, list):
raise ParamsError()
c = []
for id in ids:
c.append({'id': id})
r = NCloudBot()
r.method = 'SONG_DETAIL'
r.data = {'c': json.dumps(c), 'ids': c, "csrf_token": ""}
r.send()
return r.response
# PERSONAL_FM
def personal_fm():
""" 个人的 FM ,必须在登录之后调用,即 login 之后调用
"""
r = NCloudBot()
r.method = 'PERSONAL_FM'
r.data = {"csrf_token": ""}
r.send()
return r.response
class NCloudBotException(Exception):
""" 这是 NCloudBot 当处理请求时候的异常"""
class ParamsError(NCloudBotException):
""" 参数错误 """
class InvalidMethod(NCloudBotException):
""" 不支持的方法被调用"""
|
xiyouMc/ncmbot
|
ncmbot/core.py
|
user_follows
|
python
|
def user_follows(uid, offset='0', limit=30):
if uid is None:
raise ParamsError()
r = NCloudBot()
r.method = 'USER_FOLLOWS'
r.params = {'uid': uid}
r.data = {'offset': offset, 'limit': limit, 'order': True}
r.send()
return r.response
|
获取用户关注列表
:param uid: 用户的ID,可通过登录或者其他接口获取
:param offset: (optional) 分段起始位置,默认 0
:param limit: (optional) 数据上限多少行,默认 30
|
train
|
https://github.com/xiyouMc/ncmbot/blob/c4832f3ee7630ba104a89559f09c1fc366d1547b/ncmbot/core.py#L305-L320
|
[
"def send(self):\n \"\"\"Sens the request.\"\"\"\n success = False\n if self.method is None:\n raise ParamsError()\n try:\n if self.method == 'SEARCH':\n req = self._get_requests()\n _url = self.__NETEAST_HOST + self._METHODS[self.method]\n resp = req.post(_url, data=self.data)\n self._build_response(resp)\n self.response.ok = True\n else:\n if isinstance(self.data, dict):\n data = encrypted_request(self.data)\n\n req = self._get_webapi_requests()\n _url = self.__NETEAST_HOST + self._METHODS[self.method]\n\n if self.method in ('USER_DJ', 'USER_FOLLOWS', 'USER_EVENT'):\n _url = _url % self.params['uid']\n\n if self.method in ('LYRIC', 'MUSIC_COMMENT'):\n _url = _url % self.params['id']\n # GET\n if self.method in ('LYRIC'):\n resp = req.get(_url)\n else:\n resp = req.post(_url, data=data)\n self._build_response(resp)\n self.response.ok = True\n except Exception as why:\n traceback.print_exc()\n print 'Requests Exception', why\n # self._build_response(why)\n self.response.error = why\n"
] |
# coding:utf-8
"""
NCloudBot.core
~~~~~~~~~~~~~~
This module implements the main NCloudBot system.
:copyright: (c) 2017 by xiyouMc.
:license: ISC, see LICENSE for more details.
"""
import hashlib
import requests
import json
import cookielib
import traceback
from .util.encrypt import encrypted_request
from .util import cookiesJar
from utils import get_encoding_from_headers
__title__ = 'ncmbot'
__version__ = '0.1.0'
__build__ = 0x000100
__author__ = 'XiyouMc'
__license__ = 'ISC'
__copyright__ = 'Copyright 2017 XiyouMc'
__all__ = [
'NCloudBot', 'Response', 'login', 'user_play_list', 'user_dj', 'search',
'user_follows', 'user_followeds', 'user_event', 'event',
'top_playlist_highquality', 'play_list_detail', 'music_url', 'lyric',
'music_comment', 'song_detail', 'personal_fm', 'user_record'
]
class NCloudBot(object):
"""
The :class:`NCloudBot` object. It carries out all functionality of
NCloudBot
Recommended interface is with the NCloudBot`s functions.
"""
req = requests.Session()
username = None
_METHODS = {
# 登录模块
'LOGIN': '/weapi/login/cellphone?csrf_token=',
# 邮箱登录
'EMAIL_LOGIN': '/weapi/login?csrf_token=',
# 获取用户信息
'USER_INFO': '/weapi/subcount',
# 获取用户歌单,收藏的歌单 , 指定 UserId , 不需要登录
'USER_PLAY_LIST': '/weapi/user/playlist',
# 获取用户电台
'USER_DJ': '/weapi/dj/program/%s',
# 获取用户关注列表
'USER_FOLLOWS': '/weapi/user/getfollows/%s',
# 获取用户粉丝
'USER_FOLLOWEDS': '/weapi/user/getfolloweds/',
# 获取用户动态
'USER_EVENT': '/weapi/event/get/%s',
# 获取用户播放记录
'USER_RECORD': '/weapi/v1/play/record',
# 获取各种动态,对应网页版网易云,朋友界面的各种动态消息,如分享的视频、音乐、照片等
'EVENT': '/weapi/v1/event/get',
# 获取精品歌单
'TOP_PLAYLIST_HIGHQUALITY': '/weapi/playlist/highquality/list',
# 传入歌单ID 获取对应歌单内的所有音乐
'PLAY_LIST_DETAIL': '/weapi/v3/playlist/detail',
# 传入音乐ID ,获取对应音乐的URL
'MUSIC_URL': '/weapi/song/enhance/player/url',
# 传入关键词,获取歌曲列表
'SEARCH': '/api/search/get/',
# 传入音乐ID,获取对应音乐的歌词
'LYRIC': '/api/song/lyric?os=osx&id=%s&lv=-1&kv=-1&tv=-1',
# 传入音乐ID 和 limit 参数,可获取该音乐的所有评论
'MUSIC_COMMENT': '/weapi/v1/resource/comments/R_SO_4_%s/?csrf_token=',
# 传入专辑ID 和 limit 参数,可获取该专辑的所有评论
'ALBUM_COMMENT': '/weapi/v1/resource/comments/R_AL_3_%s/?csrf_token=',
# 给评论点赞,入参是资源ID,如歌曲ID,MV iD 和 评论ID
'LIKE_COMMENT': '/weapi/v1/comment/%s',
# 传入音乐ID,获取歌曲详情
'SONG_DETAIL': '/weapi/v3/song/detail',
# 获取专辑内容
'ALBUM': '/weapi/v1/album/%s',
# 私人 FM (需要登录)
'PERSONAL_FM': '/weapi/v1/radio/get'
}
__NETEAST_HOST = 'http://music.163.com'
def __init__(self):
self.method = None
self.data = {}
self.params = {}
self.response = Response()
def __repr__(self):
return '<NCloudBot [%s]>' % (self.method)
def __setattr__(self, name, value):
if (name == 'method') and (value):
if value not in self._METHODS.keys():
raise InvalidMethod()
object.__setattr__(self, name, value)
def _get_webapi_requests(self):
"""Update headers of webapi for Requests."""
headers = {
'Accept':
'*/*',
'Accept-Language':
'zh-CN,zh;q=0.8,gl;q=0.6,zh-TW;q=0.4',
'Connection':
'keep-alive',
'Content-Type':
'application/x-www-form-urlencoded',
'Referer':
'http://music.163.com',
'Host':
'music.163.com',
'User-Agent':
'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_9_2) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/33.0.1750.152 Safari/537.36'
}
NCloudBot.req.headers.update(headers)
return NCloudBot.req
def _get_requests(self):
headers = {
'Referer':
self.__NETEAST_HOST,
'Cookie':
'appver=2.0.2;',
'Content-Type':
'application/x-www-form-urlencoded',
'User-Agent':
'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_9_2) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/33.0.1750.152 Safari/537.36'
}
NCloudBot.req.headers.update(headers)
return NCloudBot.req
def _build_response(self, resp):
"""Build internal Response object from given response."""
# rememberLogin
# if self.method is 'LOGIN' and resp.json().get('code') == 200:
# cookiesJar.save_cookies(resp, NCloudBot.username)
self.response.content = resp.content
self.response.status_code = resp.status_code
self.response.headers = resp.headers
def send(self):
"""Sens the request."""
success = False
if self.method is None:
raise ParamsError()
try:
if self.method == 'SEARCH':
req = self._get_requests()
_url = self.__NETEAST_HOST + self._METHODS[self.method]
resp = req.post(_url, data=self.data)
self._build_response(resp)
self.response.ok = True
else:
if isinstance(self.data, dict):
data = encrypted_request(self.data)
req = self._get_webapi_requests()
_url = self.__NETEAST_HOST + self._METHODS[self.method]
if self.method in ('USER_DJ', 'USER_FOLLOWS', 'USER_EVENT'):
_url = _url % self.params['uid']
if self.method in ('LYRIC', 'MUSIC_COMMENT'):
_url = _url % self.params['id']
# GET
if self.method in ('LYRIC'):
resp = req.get(_url)
else:
resp = req.post(_url, data=data)
self._build_response(resp)
self.response.ok = True
except Exception as why:
traceback.print_exc()
print 'Requests Exception', why
# self._build_response(why)
self.response.error = why
class Response(object):
"""
The :class:`Response` object. All :class:`NCloudBot` objects contain a
:class:`NCloudBot.response <response>` attribute.
"""
def __init__(self):
self.content = None
self.headers = None
self.status_code = None
self.ok = False
self.error = None
def __repr__(self):
return '<Response [%s]>' % (self.status_code)
def raise_for_status(self):
if self.error:
raise self.error
def json(self):
"""Returns the json-encoded content of a response, if any."""
if not self.headers and len(self.content) > 3:
encoding = get_encoding_from_headers(self.headers)
if encoding is not None:
return json.loads(self.content.decode(encoding))
return json.loads(self.content)
def login(password, phone=None, email=None, rememberLogin=True):
""" 登录接口,返回 :class:'Response' 对象
:param password: 网易云音乐的密码
:param phone: (optional) 手机登录
:param email: (optional) 邮箱登录
:param rememberLogin: (optional) 是否记住密码,默认 True
"""
if (phone is None) and (email is None):
raise ParamsError()
if password is None:
raise ParamsError()
r = NCloudBot()
# r.username = phone or email
md5 = hashlib.md5()
md5.update(password)
password = md5.hexdigest()
print password
r.data = {'password': password, 'rememberLogin': rememberLogin}
if phone is not None:
r.data['phone'] = phone
r.method = 'LOGIN'
else:
r.data['username'] = email
r.method = 'EMAIL_LOGIN'
r.send()
return r.response
def user_play_list(uid, offset=0, limit=1000):
"""获取用户歌单,包含收藏的歌单
:param uid: 用户的ID,可通过登录或者其他接口获取
:param offset: (optional) 分段起始位置,默认 0
:param limit: (optional) 数据上限多少行,默认 1000
"""
if uid is None:
raise ParamsError()
r = NCloudBot()
r.method = 'USER_PLAY_LIST'
r.data = {'offset': offset, 'uid': uid, 'limit': limit, 'csrf_token': ''}
r.send()
return r.response
def user_dj(uid, offset=0, limit=30):
"""获取用户电台数据
:param uid: 用户的ID,可通过登录或者其他接口获取
:param offset: (optional) 分段起始位置,默认 0
:param limit: (optional) 数据上限多少行,默认 30
"""
if uid is None:
raise ParamsError()
r = NCloudBot()
r.method = 'USER_DJ'
r.data = {'offset': offset, 'limit': limit, "csrf_token": ""}
r.params = {'uid': uid}
r.send()
return r.response
def search(keyword, type=1, offset=0, limit=30):
"""搜索歌曲,支持搜索歌曲、歌手、专辑等
:param keyword: 关键词
:param type: (optional) 搜索类型,1: 单曲, 100: 歌手, 1000: 歌单, 1002: 用户
:param offset: (optional) 分段起始位置,默认 0
:param limit: (optional) 数据上限多少行,默认 30
"""
if keyword is None:
raise ParamsError()
r = NCloudBot()
r.method = 'SEARCH'
r.data = {
's': keyword,
'limit': str(limit),
'type': str(type),
'offset': str(offset)
}
r.send()
return r.response
def user_followeds(uid, offset='0', limit=30):
"""获取用户粉丝列表
:param uid: 用户的ID,可通过登录或者其他接口获取
:param offset: (optional) 分段起始位置,默认 0
:param limit: (optional) 数据上限多少行,默认 30
"""
if uid is None:
raise ParamsError()
r = NCloudBot()
r.method = 'USER_FOLLOWEDS'
r.data = {
'userId': uid,
'offset': offset,
'limit': limit,
"csrf_token": ""
}
r.send()
return r.response
def user_event(uid):
"""获取用户动态
:param uid: 用户的ID,可通过登录或者其他接口获取
"""
if uid is None:
raise ParamsError()
r = NCloudBot()
r.method = 'USER_EVENT'
r.params = {'uid': uid}
r.data = {'time': -1, 'getcounts': True, "csrf_token": ""}
r.send()
return r.response
def user_record(uid, type=0):
"""获取用户的播放列表,必须登录
:param uid: 用户的ID,可通过登录或者其他接口获取
:param type: (optional) 数据类型,0:获取所有记录,1:获取 weekData
"""
if uid is None:
raise ParamsError()
r = NCloudBot()
r.method = 'USER_RECORD'
r.data = {'type': type, 'uid': uid, "csrf_token": ""}
r.send()
return r.response
def event():
"""获取好友的动态,包括分享视频、音乐、动态等
"""
r = NCloudBot()
r.method = 'EVENT'
r.data = {"csrf_token": ""}
r.send()
return r.response
# TOP_PLAYLIST_HIGHQUALITY
def top_playlist_highquality(cat='全部', offset=0, limit=20):
"""获取网易云音乐的精品歌单
:param cat: (optional) 歌单类型,默认 ‘全部’,比如 华语、欧美等
:param offset: (optional) 分段起始位置,默认 0
:param limit: (optional) 数据上限多少行,默认 20
"""
r = NCloudBot()
r.method = 'TOP_PLAYLIST_HIGHQUALITY'
r.data = {'cat': cat, 'offset': offset, 'limit': limit}
r.send()
return r.response
# PLAY_LIST_DETAIL
def play_list_detail(id, limit=20):
"""获取歌单中的所有音乐。由于获取精品中,只能看到歌单名字和 ID 并没有歌单的音乐,因此增加该接口传入歌单 ID
获取歌单中的所有音乐.
:param id: 歌单的ID
:param limit: (optional) 数据上限多少行,默认 20
"""
if id is None:
raise ParamsError()
r = NCloudBot()
r.method = 'PLAY_LIST_DETAIL'
r.data = {'id': id, 'limit': limit, "csrf_token": ""}
r.send()
return r.response
# MUSIC_URL
def music_url(ids=[]):
"""通过歌曲 ID 获取歌曲下载地址
:param ids: 歌曲 ID 的 list
"""
if not isinstance(ids, list):
raise ParamsError()
r = NCloudBot()
r.method = 'MUSIC_URL'
r.data = {'ids': ids, 'br': 999000, "csrf_token": ""}
r.send()
return r.response
# LYRIC
def lyric(id):
"""通过歌曲 ID 获取歌曲歌词地址
:param id: 歌曲ID
"""
if id is None:
raise ParamsError()
r = NCloudBot()
r.method = 'LYRIC'
r.params = {'id': id}
r.send()
return r.response
# MUSIC_COMMENT
def music_comment(id, offset=0, limit=20):
"""获取歌曲的评论列表
:param id: 歌曲 ID
:param offset: (optional) 分段起始位置,默认 0
:param limit: (optional) 数据上限多少行,默认 20
"""
if id is None:
raise ParamsError()
r = NCloudBot()
r.method = 'MUSIC_COMMENT'
r.params = {'id': id}
r.data = {'offset': offset, 'limit': limit, 'rid': id, "csrf_token": ""}
r.send()
return r.response
# ALBUM_COMMENT
def album_comment(id, offset=0, limit=20):
if id is None:
raise ParamsError()
r = NCloudBot()
r.method = 'ALBUM_COMMENT'
r.params = {'id': id}
r.data = {'offset': offset, 'limit': limit, 'rid': id, "csrf_token": ""}
r.send()
return r.response
# SONG_DETAIL
def song_detail(ids):
"""通过歌曲 ID 获取歌曲的详细信息
:param ids: 歌曲 ID 的 list
"""
if not isinstance(ids, list):
raise ParamsError()
c = []
for id in ids:
c.append({'id': id})
r = NCloudBot()
r.method = 'SONG_DETAIL'
r.data = {'c': json.dumps(c), 'ids': c, "csrf_token": ""}
r.send()
return r.response
# PERSONAL_FM
def personal_fm():
""" 个人的 FM ,必须在登录之后调用,即 login 之后调用
"""
r = NCloudBot()
r.method = 'PERSONAL_FM'
r.data = {"csrf_token": ""}
r.send()
return r.response
class NCloudBotException(Exception):
""" 这是 NCloudBot 当处理请求时候的异常"""
class ParamsError(NCloudBotException):
""" 参数错误 """
class InvalidMethod(NCloudBotException):
""" 不支持的方法被调用"""
|
xiyouMc/ncmbot
|
ncmbot/core.py
|
user_event
|
python
|
def user_event(uid):
if uid is None:
raise ParamsError()
r = NCloudBot()
r.method = 'USER_EVENT'
r.params = {'uid': uid}
r.data = {'time': -1, 'getcounts': True, "csrf_token": ""}
r.send()
return r.response
|
获取用户动态
:param uid: 用户的ID,可通过登录或者其他接口获取
|
train
|
https://github.com/xiyouMc/ncmbot/blob/c4832f3ee7630ba104a89559f09c1fc366d1547b/ncmbot/core.py#L345-L358
|
[
"def send(self):\n \"\"\"Sens the request.\"\"\"\n success = False\n if self.method is None:\n raise ParamsError()\n try:\n if self.method == 'SEARCH':\n req = self._get_requests()\n _url = self.__NETEAST_HOST + self._METHODS[self.method]\n resp = req.post(_url, data=self.data)\n self._build_response(resp)\n self.response.ok = True\n else:\n if isinstance(self.data, dict):\n data = encrypted_request(self.data)\n\n req = self._get_webapi_requests()\n _url = self.__NETEAST_HOST + self._METHODS[self.method]\n\n if self.method in ('USER_DJ', 'USER_FOLLOWS', 'USER_EVENT'):\n _url = _url % self.params['uid']\n\n if self.method in ('LYRIC', 'MUSIC_COMMENT'):\n _url = _url % self.params['id']\n # GET\n if self.method in ('LYRIC'):\n resp = req.get(_url)\n else:\n resp = req.post(_url, data=data)\n self._build_response(resp)\n self.response.ok = True\n except Exception as why:\n traceback.print_exc()\n print 'Requests Exception', why\n # self._build_response(why)\n self.response.error = why\n"
] |
# coding:utf-8
"""
NCloudBot.core
~~~~~~~~~~~~~~
This module implements the main NCloudBot system.
:copyright: (c) 2017 by xiyouMc.
:license: ISC, see LICENSE for more details.
"""
import hashlib
import requests
import json
import cookielib
import traceback
from .util.encrypt import encrypted_request
from .util import cookiesJar
from utils import get_encoding_from_headers
__title__ = 'ncmbot'
__version__ = '0.1.0'
__build__ = 0x000100
__author__ = 'XiyouMc'
__license__ = 'ISC'
__copyright__ = 'Copyright 2017 XiyouMc'
__all__ = [
'NCloudBot', 'Response', 'login', 'user_play_list', 'user_dj', 'search',
'user_follows', 'user_followeds', 'user_event', 'event',
'top_playlist_highquality', 'play_list_detail', 'music_url', 'lyric',
'music_comment', 'song_detail', 'personal_fm', 'user_record'
]
class NCloudBot(object):
"""
The :class:`NCloudBot` object. It carries out all functionality of
NCloudBot
Recommended interface is with the NCloudBot`s functions.
"""
req = requests.Session()
username = None
_METHODS = {
# 登录模块
'LOGIN': '/weapi/login/cellphone?csrf_token=',
# 邮箱登录
'EMAIL_LOGIN': '/weapi/login?csrf_token=',
# 获取用户信息
'USER_INFO': '/weapi/subcount',
# 获取用户歌单,收藏的歌单 , 指定 UserId , 不需要登录
'USER_PLAY_LIST': '/weapi/user/playlist',
# 获取用户电台
'USER_DJ': '/weapi/dj/program/%s',
# 获取用户关注列表
'USER_FOLLOWS': '/weapi/user/getfollows/%s',
# 获取用户粉丝
'USER_FOLLOWEDS': '/weapi/user/getfolloweds/',
# 获取用户动态
'USER_EVENT': '/weapi/event/get/%s',
# 获取用户播放记录
'USER_RECORD': '/weapi/v1/play/record',
# 获取各种动态,对应网页版网易云,朋友界面的各种动态消息,如分享的视频、音乐、照片等
'EVENT': '/weapi/v1/event/get',
# 获取精品歌单
'TOP_PLAYLIST_HIGHQUALITY': '/weapi/playlist/highquality/list',
# 传入歌单ID 获取对应歌单内的所有音乐
'PLAY_LIST_DETAIL': '/weapi/v3/playlist/detail',
# 传入音乐ID ,获取对应音乐的URL
'MUSIC_URL': '/weapi/song/enhance/player/url',
# 传入关键词,获取歌曲列表
'SEARCH': '/api/search/get/',
# 传入音乐ID,获取对应音乐的歌词
'LYRIC': '/api/song/lyric?os=osx&id=%s&lv=-1&kv=-1&tv=-1',
# 传入音乐ID 和 limit 参数,可获取该音乐的所有评论
'MUSIC_COMMENT': '/weapi/v1/resource/comments/R_SO_4_%s/?csrf_token=',
# 传入专辑ID 和 limit 参数,可获取该专辑的所有评论
'ALBUM_COMMENT': '/weapi/v1/resource/comments/R_AL_3_%s/?csrf_token=',
# 给评论点赞,入参是资源ID,如歌曲ID,MV iD 和 评论ID
'LIKE_COMMENT': '/weapi/v1/comment/%s',
# 传入音乐ID,获取歌曲详情
'SONG_DETAIL': '/weapi/v3/song/detail',
# 获取专辑内容
'ALBUM': '/weapi/v1/album/%s',
# 私人 FM (需要登录)
'PERSONAL_FM': '/weapi/v1/radio/get'
}
__NETEAST_HOST = 'http://music.163.com'
def __init__(self):
self.method = None
self.data = {}
self.params = {}
self.response = Response()
def __repr__(self):
return '<NCloudBot [%s]>' % (self.method)
def __setattr__(self, name, value):
if (name == 'method') and (value):
if value not in self._METHODS.keys():
raise InvalidMethod()
object.__setattr__(self, name, value)
def _get_webapi_requests(self):
"""Update headers of webapi for Requests."""
headers = {
'Accept':
'*/*',
'Accept-Language':
'zh-CN,zh;q=0.8,gl;q=0.6,zh-TW;q=0.4',
'Connection':
'keep-alive',
'Content-Type':
'application/x-www-form-urlencoded',
'Referer':
'http://music.163.com',
'Host':
'music.163.com',
'User-Agent':
'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_9_2) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/33.0.1750.152 Safari/537.36'
}
NCloudBot.req.headers.update(headers)
return NCloudBot.req
def _get_requests(self):
headers = {
'Referer':
self.__NETEAST_HOST,
'Cookie':
'appver=2.0.2;',
'Content-Type':
'application/x-www-form-urlencoded',
'User-Agent':
'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_9_2) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/33.0.1750.152 Safari/537.36'
}
NCloudBot.req.headers.update(headers)
return NCloudBot.req
def _build_response(self, resp):
"""Build internal Response object from given response."""
# rememberLogin
# if self.method is 'LOGIN' and resp.json().get('code') == 200:
# cookiesJar.save_cookies(resp, NCloudBot.username)
self.response.content = resp.content
self.response.status_code = resp.status_code
self.response.headers = resp.headers
def send(self):
"""Sens the request."""
success = False
if self.method is None:
raise ParamsError()
try:
if self.method == 'SEARCH':
req = self._get_requests()
_url = self.__NETEAST_HOST + self._METHODS[self.method]
resp = req.post(_url, data=self.data)
self._build_response(resp)
self.response.ok = True
else:
if isinstance(self.data, dict):
data = encrypted_request(self.data)
req = self._get_webapi_requests()
_url = self.__NETEAST_HOST + self._METHODS[self.method]
if self.method in ('USER_DJ', 'USER_FOLLOWS', 'USER_EVENT'):
_url = _url % self.params['uid']
if self.method in ('LYRIC', 'MUSIC_COMMENT'):
_url = _url % self.params['id']
# GET
if self.method in ('LYRIC'):
resp = req.get(_url)
else:
resp = req.post(_url, data=data)
self._build_response(resp)
self.response.ok = True
except Exception as why:
traceback.print_exc()
print 'Requests Exception', why
# self._build_response(why)
self.response.error = why
class Response(object):
"""
The :class:`Response` object. All :class:`NCloudBot` objects contain a
:class:`NCloudBot.response <response>` attribute.
"""
def __init__(self):
self.content = None
self.headers = None
self.status_code = None
self.ok = False
self.error = None
def __repr__(self):
return '<Response [%s]>' % (self.status_code)
def raise_for_status(self):
if self.error:
raise self.error
def json(self):
"""Returns the json-encoded content of a response, if any."""
if not self.headers and len(self.content) > 3:
encoding = get_encoding_from_headers(self.headers)
if encoding is not None:
return json.loads(self.content.decode(encoding))
return json.loads(self.content)
def login(password, phone=None, email=None, rememberLogin=True):
""" 登录接口,返回 :class:'Response' 对象
:param password: 网易云音乐的密码
:param phone: (optional) 手机登录
:param email: (optional) 邮箱登录
:param rememberLogin: (optional) 是否记住密码,默认 True
"""
if (phone is None) and (email is None):
raise ParamsError()
if password is None:
raise ParamsError()
r = NCloudBot()
# r.username = phone or email
md5 = hashlib.md5()
md5.update(password)
password = md5.hexdigest()
print password
r.data = {'password': password, 'rememberLogin': rememberLogin}
if phone is not None:
r.data['phone'] = phone
r.method = 'LOGIN'
else:
r.data['username'] = email
r.method = 'EMAIL_LOGIN'
r.send()
return r.response
def user_play_list(uid, offset=0, limit=1000):
"""获取用户歌单,包含收藏的歌单
:param uid: 用户的ID,可通过登录或者其他接口获取
:param offset: (optional) 分段起始位置,默认 0
:param limit: (optional) 数据上限多少行,默认 1000
"""
if uid is None:
raise ParamsError()
r = NCloudBot()
r.method = 'USER_PLAY_LIST'
r.data = {'offset': offset, 'uid': uid, 'limit': limit, 'csrf_token': ''}
r.send()
return r.response
def user_dj(uid, offset=0, limit=30):
"""获取用户电台数据
:param uid: 用户的ID,可通过登录或者其他接口获取
:param offset: (optional) 分段起始位置,默认 0
:param limit: (optional) 数据上限多少行,默认 30
"""
if uid is None:
raise ParamsError()
r = NCloudBot()
r.method = 'USER_DJ'
r.data = {'offset': offset, 'limit': limit, "csrf_token": ""}
r.params = {'uid': uid}
r.send()
return r.response
def search(keyword, type=1, offset=0, limit=30):
"""搜索歌曲,支持搜索歌曲、歌手、专辑等
:param keyword: 关键词
:param type: (optional) 搜索类型,1: 单曲, 100: 歌手, 1000: 歌单, 1002: 用户
:param offset: (optional) 分段起始位置,默认 0
:param limit: (optional) 数据上限多少行,默认 30
"""
if keyword is None:
raise ParamsError()
r = NCloudBot()
r.method = 'SEARCH'
r.data = {
's': keyword,
'limit': str(limit),
'type': str(type),
'offset': str(offset)
}
r.send()
return r.response
def user_follows(uid, offset='0', limit=30):
"""获取用户关注列表
:param uid: 用户的ID,可通过登录或者其他接口获取
:param offset: (optional) 分段起始位置,默认 0
:param limit: (optional) 数据上限多少行,默认 30
"""
if uid is None:
raise ParamsError()
r = NCloudBot()
r.method = 'USER_FOLLOWS'
r.params = {'uid': uid}
r.data = {'offset': offset, 'limit': limit, 'order': True}
r.send()
return r.response
def user_followeds(uid, offset='0', limit=30):
"""获取用户粉丝列表
:param uid: 用户的ID,可通过登录或者其他接口获取
:param offset: (optional) 分段起始位置,默认 0
:param limit: (optional) 数据上限多少行,默认 30
"""
if uid is None:
raise ParamsError()
r = NCloudBot()
r.method = 'USER_FOLLOWEDS'
r.data = {
'userId': uid,
'offset': offset,
'limit': limit,
"csrf_token": ""
}
r.send()
return r.response
def user_record(uid, type=0):
"""获取用户的播放列表,必须登录
:param uid: 用户的ID,可通过登录或者其他接口获取
:param type: (optional) 数据类型,0:获取所有记录,1:获取 weekData
"""
if uid is None:
raise ParamsError()
r = NCloudBot()
r.method = 'USER_RECORD'
r.data = {'type': type, 'uid': uid, "csrf_token": ""}
r.send()
return r.response
def event():
"""获取好友的动态,包括分享视频、音乐、动态等
"""
r = NCloudBot()
r.method = 'EVENT'
r.data = {"csrf_token": ""}
r.send()
return r.response
# TOP_PLAYLIST_HIGHQUALITY
def top_playlist_highquality(cat='全部', offset=0, limit=20):
"""获取网易云音乐的精品歌单
:param cat: (optional) 歌单类型,默认 ‘全部’,比如 华语、欧美等
:param offset: (optional) 分段起始位置,默认 0
:param limit: (optional) 数据上限多少行,默认 20
"""
r = NCloudBot()
r.method = 'TOP_PLAYLIST_HIGHQUALITY'
r.data = {'cat': cat, 'offset': offset, 'limit': limit}
r.send()
return r.response
# PLAY_LIST_DETAIL
def play_list_detail(id, limit=20):
"""获取歌单中的所有音乐。由于获取精品中,只能看到歌单名字和 ID 并没有歌单的音乐,因此增加该接口传入歌单 ID
获取歌单中的所有音乐.
:param id: 歌单的ID
:param limit: (optional) 数据上限多少行,默认 20
"""
if id is None:
raise ParamsError()
r = NCloudBot()
r.method = 'PLAY_LIST_DETAIL'
r.data = {'id': id, 'limit': limit, "csrf_token": ""}
r.send()
return r.response
# MUSIC_URL
def music_url(ids=[]):
"""通过歌曲 ID 获取歌曲下载地址
:param ids: 歌曲 ID 的 list
"""
if not isinstance(ids, list):
raise ParamsError()
r = NCloudBot()
r.method = 'MUSIC_URL'
r.data = {'ids': ids, 'br': 999000, "csrf_token": ""}
r.send()
return r.response
# LYRIC
def lyric(id):
"""通过歌曲 ID 获取歌曲歌词地址
:param id: 歌曲ID
"""
if id is None:
raise ParamsError()
r = NCloudBot()
r.method = 'LYRIC'
r.params = {'id': id}
r.send()
return r.response
# MUSIC_COMMENT
def music_comment(id, offset=0, limit=20):
"""获取歌曲的评论列表
:param id: 歌曲 ID
:param offset: (optional) 分段起始位置,默认 0
:param limit: (optional) 数据上限多少行,默认 20
"""
if id is None:
raise ParamsError()
r = NCloudBot()
r.method = 'MUSIC_COMMENT'
r.params = {'id': id}
r.data = {'offset': offset, 'limit': limit, 'rid': id, "csrf_token": ""}
r.send()
return r.response
# ALBUM_COMMENT
def album_comment(id, offset=0, limit=20):
if id is None:
raise ParamsError()
r = NCloudBot()
r.method = 'ALBUM_COMMENT'
r.params = {'id': id}
r.data = {'offset': offset, 'limit': limit, 'rid': id, "csrf_token": ""}
r.send()
return r.response
# SONG_DETAIL
def song_detail(ids):
"""通过歌曲 ID 获取歌曲的详细信息
:param ids: 歌曲 ID 的 list
"""
if not isinstance(ids, list):
raise ParamsError()
c = []
for id in ids:
c.append({'id': id})
r = NCloudBot()
r.method = 'SONG_DETAIL'
r.data = {'c': json.dumps(c), 'ids': c, "csrf_token": ""}
r.send()
return r.response
# PERSONAL_FM
def personal_fm():
""" 个人的 FM ,必须在登录之后调用,即 login 之后调用
"""
r = NCloudBot()
r.method = 'PERSONAL_FM'
r.data = {"csrf_token": ""}
r.send()
return r.response
class NCloudBotException(Exception):
""" 这是 NCloudBot 当处理请求时候的异常"""
class ParamsError(NCloudBotException):
""" 参数错误 """
class InvalidMethod(NCloudBotException):
""" 不支持的方法被调用"""
|
xiyouMc/ncmbot
|
ncmbot/core.py
|
user_record
|
python
|
def user_record(uid, type=0):
if uid is None:
raise ParamsError()
r = NCloudBot()
r.method = 'USER_RECORD'
r.data = {'type': type, 'uid': uid, "csrf_token": ""}
r.send()
return r.response
|
获取用户的播放列表,必须登录
:param uid: 用户的ID,可通过登录或者其他接口获取
:param type: (optional) 数据类型,0:获取所有记录,1:获取 weekData
|
train
|
https://github.com/xiyouMc/ncmbot/blob/c4832f3ee7630ba104a89559f09c1fc366d1547b/ncmbot/core.py#L361-L374
|
[
"def send(self):\n \"\"\"Sens the request.\"\"\"\n success = False\n if self.method is None:\n raise ParamsError()\n try:\n if self.method == 'SEARCH':\n req = self._get_requests()\n _url = self.__NETEAST_HOST + self._METHODS[self.method]\n resp = req.post(_url, data=self.data)\n self._build_response(resp)\n self.response.ok = True\n else:\n if isinstance(self.data, dict):\n data = encrypted_request(self.data)\n\n req = self._get_webapi_requests()\n _url = self.__NETEAST_HOST + self._METHODS[self.method]\n\n if self.method in ('USER_DJ', 'USER_FOLLOWS', 'USER_EVENT'):\n _url = _url % self.params['uid']\n\n if self.method in ('LYRIC', 'MUSIC_COMMENT'):\n _url = _url % self.params['id']\n # GET\n if self.method in ('LYRIC'):\n resp = req.get(_url)\n else:\n resp = req.post(_url, data=data)\n self._build_response(resp)\n self.response.ok = True\n except Exception as why:\n traceback.print_exc()\n print 'Requests Exception', why\n # self._build_response(why)\n self.response.error = why\n"
] |
# coding:utf-8
"""
NCloudBot.core
~~~~~~~~~~~~~~
This module implements the main NCloudBot system.
:copyright: (c) 2017 by xiyouMc.
:license: ISC, see LICENSE for more details.
"""
import hashlib
import requests
import json
import cookielib
import traceback
from .util.encrypt import encrypted_request
from .util import cookiesJar
from utils import get_encoding_from_headers
__title__ = 'ncmbot'
__version__ = '0.1.0'
__build__ = 0x000100
__author__ = 'XiyouMc'
__license__ = 'ISC'
__copyright__ = 'Copyright 2017 XiyouMc'
__all__ = [
'NCloudBot', 'Response', 'login', 'user_play_list', 'user_dj', 'search',
'user_follows', 'user_followeds', 'user_event', 'event',
'top_playlist_highquality', 'play_list_detail', 'music_url', 'lyric',
'music_comment', 'song_detail', 'personal_fm', 'user_record'
]
class NCloudBot(object):
"""
The :class:`NCloudBot` object. It carries out all functionality of
NCloudBot
Recommended interface is with the NCloudBot`s functions.
"""
req = requests.Session()
username = None
_METHODS = {
# 登录模块
'LOGIN': '/weapi/login/cellphone?csrf_token=',
# 邮箱登录
'EMAIL_LOGIN': '/weapi/login?csrf_token=',
# 获取用户信息
'USER_INFO': '/weapi/subcount',
# 获取用户歌单,收藏的歌单 , 指定 UserId , 不需要登录
'USER_PLAY_LIST': '/weapi/user/playlist',
# 获取用户电台
'USER_DJ': '/weapi/dj/program/%s',
# 获取用户关注列表
'USER_FOLLOWS': '/weapi/user/getfollows/%s',
# 获取用户粉丝
'USER_FOLLOWEDS': '/weapi/user/getfolloweds/',
# 获取用户动态
'USER_EVENT': '/weapi/event/get/%s',
# 获取用户播放记录
'USER_RECORD': '/weapi/v1/play/record',
# 获取各种动态,对应网页版网易云,朋友界面的各种动态消息,如分享的视频、音乐、照片等
'EVENT': '/weapi/v1/event/get',
# 获取精品歌单
'TOP_PLAYLIST_HIGHQUALITY': '/weapi/playlist/highquality/list',
# 传入歌单ID 获取对应歌单内的所有音乐
'PLAY_LIST_DETAIL': '/weapi/v3/playlist/detail',
# 传入音乐ID ,获取对应音乐的URL
'MUSIC_URL': '/weapi/song/enhance/player/url',
# 传入关键词,获取歌曲列表
'SEARCH': '/api/search/get/',
# 传入音乐ID,获取对应音乐的歌词
'LYRIC': '/api/song/lyric?os=osx&id=%s&lv=-1&kv=-1&tv=-1',
# 传入音乐ID 和 limit 参数,可获取该音乐的所有评论
'MUSIC_COMMENT': '/weapi/v1/resource/comments/R_SO_4_%s/?csrf_token=',
# 传入专辑ID 和 limit 参数,可获取该专辑的所有评论
'ALBUM_COMMENT': '/weapi/v1/resource/comments/R_AL_3_%s/?csrf_token=',
# 给评论点赞,入参是资源ID,如歌曲ID,MV iD 和 评论ID
'LIKE_COMMENT': '/weapi/v1/comment/%s',
# 传入音乐ID,获取歌曲详情
'SONG_DETAIL': '/weapi/v3/song/detail',
# 获取专辑内容
'ALBUM': '/weapi/v1/album/%s',
# 私人 FM (需要登录)
'PERSONAL_FM': '/weapi/v1/radio/get'
}
__NETEAST_HOST = 'http://music.163.com'
def __init__(self):
self.method = None
self.data = {}
self.params = {}
self.response = Response()
def __repr__(self):
return '<NCloudBot [%s]>' % (self.method)
def __setattr__(self, name, value):
if (name == 'method') and (value):
if value not in self._METHODS.keys():
raise InvalidMethod()
object.__setattr__(self, name, value)
def _get_webapi_requests(self):
"""Update headers of webapi for Requests."""
headers = {
'Accept':
'*/*',
'Accept-Language':
'zh-CN,zh;q=0.8,gl;q=0.6,zh-TW;q=0.4',
'Connection':
'keep-alive',
'Content-Type':
'application/x-www-form-urlencoded',
'Referer':
'http://music.163.com',
'Host':
'music.163.com',
'User-Agent':
'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_9_2) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/33.0.1750.152 Safari/537.36'
}
NCloudBot.req.headers.update(headers)
return NCloudBot.req
def _get_requests(self):
headers = {
'Referer':
self.__NETEAST_HOST,
'Cookie':
'appver=2.0.2;',
'Content-Type':
'application/x-www-form-urlencoded',
'User-Agent':
'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_9_2) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/33.0.1750.152 Safari/537.36'
}
NCloudBot.req.headers.update(headers)
return NCloudBot.req
def _build_response(self, resp):
"""Build internal Response object from given response."""
# rememberLogin
# if self.method is 'LOGIN' and resp.json().get('code') == 200:
# cookiesJar.save_cookies(resp, NCloudBot.username)
self.response.content = resp.content
self.response.status_code = resp.status_code
self.response.headers = resp.headers
def send(self):
"""Sens the request."""
success = False
if self.method is None:
raise ParamsError()
try:
if self.method == 'SEARCH':
req = self._get_requests()
_url = self.__NETEAST_HOST + self._METHODS[self.method]
resp = req.post(_url, data=self.data)
self._build_response(resp)
self.response.ok = True
else:
if isinstance(self.data, dict):
data = encrypted_request(self.data)
req = self._get_webapi_requests()
_url = self.__NETEAST_HOST + self._METHODS[self.method]
if self.method in ('USER_DJ', 'USER_FOLLOWS', 'USER_EVENT'):
_url = _url % self.params['uid']
if self.method in ('LYRIC', 'MUSIC_COMMENT'):
_url = _url % self.params['id']
# GET
if self.method in ('LYRIC'):
resp = req.get(_url)
else:
resp = req.post(_url, data=data)
self._build_response(resp)
self.response.ok = True
except Exception as why:
traceback.print_exc()
print 'Requests Exception', why
# self._build_response(why)
self.response.error = why
class Response(object):
"""
The :class:`Response` object. All :class:`NCloudBot` objects contain a
:class:`NCloudBot.response <response>` attribute.
"""
def __init__(self):
self.content = None
self.headers = None
self.status_code = None
self.ok = False
self.error = None
def __repr__(self):
return '<Response [%s]>' % (self.status_code)
def raise_for_status(self):
if self.error:
raise self.error
def json(self):
"""Returns the json-encoded content of a response, if any."""
if not self.headers and len(self.content) > 3:
encoding = get_encoding_from_headers(self.headers)
if encoding is not None:
return json.loads(self.content.decode(encoding))
return json.loads(self.content)
def login(password, phone=None, email=None, rememberLogin=True):
""" 登录接口,返回 :class:'Response' 对象
:param password: 网易云音乐的密码
:param phone: (optional) 手机登录
:param email: (optional) 邮箱登录
:param rememberLogin: (optional) 是否记住密码,默认 True
"""
if (phone is None) and (email is None):
raise ParamsError()
if password is None:
raise ParamsError()
r = NCloudBot()
# r.username = phone or email
md5 = hashlib.md5()
md5.update(password)
password = md5.hexdigest()
print password
r.data = {'password': password, 'rememberLogin': rememberLogin}
if phone is not None:
r.data['phone'] = phone
r.method = 'LOGIN'
else:
r.data['username'] = email
r.method = 'EMAIL_LOGIN'
r.send()
return r.response
def user_play_list(uid, offset=0, limit=1000):
"""获取用户歌单,包含收藏的歌单
:param uid: 用户的ID,可通过登录或者其他接口获取
:param offset: (optional) 分段起始位置,默认 0
:param limit: (optional) 数据上限多少行,默认 1000
"""
if uid is None:
raise ParamsError()
r = NCloudBot()
r.method = 'USER_PLAY_LIST'
r.data = {'offset': offset, 'uid': uid, 'limit': limit, 'csrf_token': ''}
r.send()
return r.response
def user_dj(uid, offset=0, limit=30):
"""获取用户电台数据
:param uid: 用户的ID,可通过登录或者其他接口获取
:param offset: (optional) 分段起始位置,默认 0
:param limit: (optional) 数据上限多少行,默认 30
"""
if uid is None:
raise ParamsError()
r = NCloudBot()
r.method = 'USER_DJ'
r.data = {'offset': offset, 'limit': limit, "csrf_token": ""}
r.params = {'uid': uid}
r.send()
return r.response
def search(keyword, type=1, offset=0, limit=30):
"""搜索歌曲,支持搜索歌曲、歌手、专辑等
:param keyword: 关键词
:param type: (optional) 搜索类型,1: 单曲, 100: 歌手, 1000: 歌单, 1002: 用户
:param offset: (optional) 分段起始位置,默认 0
:param limit: (optional) 数据上限多少行,默认 30
"""
if keyword is None:
raise ParamsError()
r = NCloudBot()
r.method = 'SEARCH'
r.data = {
's': keyword,
'limit': str(limit),
'type': str(type),
'offset': str(offset)
}
r.send()
return r.response
def user_follows(uid, offset='0', limit=30):
"""获取用户关注列表
:param uid: 用户的ID,可通过登录或者其他接口获取
:param offset: (optional) 分段起始位置,默认 0
:param limit: (optional) 数据上限多少行,默认 30
"""
if uid is None:
raise ParamsError()
r = NCloudBot()
r.method = 'USER_FOLLOWS'
r.params = {'uid': uid}
r.data = {'offset': offset, 'limit': limit, 'order': True}
r.send()
return r.response
def user_followeds(uid, offset='0', limit=30):
"""获取用户粉丝列表
:param uid: 用户的ID,可通过登录或者其他接口获取
:param offset: (optional) 分段起始位置,默认 0
:param limit: (optional) 数据上限多少行,默认 30
"""
if uid is None:
raise ParamsError()
r = NCloudBot()
r.method = 'USER_FOLLOWEDS'
r.data = {
'userId': uid,
'offset': offset,
'limit': limit,
"csrf_token": ""
}
r.send()
return r.response
def user_event(uid):
"""获取用户动态
:param uid: 用户的ID,可通过登录或者其他接口获取
"""
if uid is None:
raise ParamsError()
r = NCloudBot()
r.method = 'USER_EVENT'
r.params = {'uid': uid}
r.data = {'time': -1, 'getcounts': True, "csrf_token": ""}
r.send()
return r.response
def event():
"""获取好友的动态,包括分享视频、音乐、动态等
"""
r = NCloudBot()
r.method = 'EVENT'
r.data = {"csrf_token": ""}
r.send()
return r.response
# TOP_PLAYLIST_HIGHQUALITY
def top_playlist_highquality(cat='全部', offset=0, limit=20):
"""获取网易云音乐的精品歌单
:param cat: (optional) 歌单类型,默认 ‘全部’,比如 华语、欧美等
:param offset: (optional) 分段起始位置,默认 0
:param limit: (optional) 数据上限多少行,默认 20
"""
r = NCloudBot()
r.method = 'TOP_PLAYLIST_HIGHQUALITY'
r.data = {'cat': cat, 'offset': offset, 'limit': limit}
r.send()
return r.response
# PLAY_LIST_DETAIL
def play_list_detail(id, limit=20):
"""获取歌单中的所有音乐。由于获取精品中,只能看到歌单名字和 ID 并没有歌单的音乐,因此增加该接口传入歌单 ID
获取歌单中的所有音乐.
:param id: 歌单的ID
:param limit: (optional) 数据上限多少行,默认 20
"""
if id is None:
raise ParamsError()
r = NCloudBot()
r.method = 'PLAY_LIST_DETAIL'
r.data = {'id': id, 'limit': limit, "csrf_token": ""}
r.send()
return r.response
# MUSIC_URL
def music_url(ids=[]):
"""通过歌曲 ID 获取歌曲下载地址
:param ids: 歌曲 ID 的 list
"""
if not isinstance(ids, list):
raise ParamsError()
r = NCloudBot()
r.method = 'MUSIC_URL'
r.data = {'ids': ids, 'br': 999000, "csrf_token": ""}
r.send()
return r.response
# LYRIC
def lyric(id):
"""通过歌曲 ID 获取歌曲歌词地址
:param id: 歌曲ID
"""
if id is None:
raise ParamsError()
r = NCloudBot()
r.method = 'LYRIC'
r.params = {'id': id}
r.send()
return r.response
# MUSIC_COMMENT
def music_comment(id, offset=0, limit=20):
"""获取歌曲的评论列表
:param id: 歌曲 ID
:param offset: (optional) 分段起始位置,默认 0
:param limit: (optional) 数据上限多少行,默认 20
"""
if id is None:
raise ParamsError()
r = NCloudBot()
r.method = 'MUSIC_COMMENT'
r.params = {'id': id}
r.data = {'offset': offset, 'limit': limit, 'rid': id, "csrf_token": ""}
r.send()
return r.response
# ALBUM_COMMENT
def album_comment(id, offset=0, limit=20):
if id is None:
raise ParamsError()
r = NCloudBot()
r.method = 'ALBUM_COMMENT'
r.params = {'id': id}
r.data = {'offset': offset, 'limit': limit, 'rid': id, "csrf_token": ""}
r.send()
return r.response
# SONG_DETAIL
def song_detail(ids):
"""通过歌曲 ID 获取歌曲的详细信息
:param ids: 歌曲 ID 的 list
"""
if not isinstance(ids, list):
raise ParamsError()
c = []
for id in ids:
c.append({'id': id})
r = NCloudBot()
r.method = 'SONG_DETAIL'
r.data = {'c': json.dumps(c), 'ids': c, "csrf_token": ""}
r.send()
return r.response
# PERSONAL_FM
def personal_fm():
""" 个人的 FM ,必须在登录之后调用,即 login 之后调用
"""
r = NCloudBot()
r.method = 'PERSONAL_FM'
r.data = {"csrf_token": ""}
r.send()
return r.response
class NCloudBotException(Exception):
""" 这是 NCloudBot 当处理请求时候的异常"""
class ParamsError(NCloudBotException):
""" 参数错误 """
class InvalidMethod(NCloudBotException):
""" 不支持的方法被调用"""
|
xiyouMc/ncmbot
|
ncmbot/core.py
|
event
|
python
|
def event():
r = NCloudBot()
r.method = 'EVENT'
r.data = {"csrf_token": ""}
r.send()
return r.response
|
获取好友的动态,包括分享视频、音乐、动态等
|
train
|
https://github.com/xiyouMc/ncmbot/blob/c4832f3ee7630ba104a89559f09c1fc366d1547b/ncmbot/core.py#L377-L386
|
[
"def send(self):\n \"\"\"Sens the request.\"\"\"\n success = False\n if self.method is None:\n raise ParamsError()\n try:\n if self.method == 'SEARCH':\n req = self._get_requests()\n _url = self.__NETEAST_HOST + self._METHODS[self.method]\n resp = req.post(_url, data=self.data)\n self._build_response(resp)\n self.response.ok = True\n else:\n if isinstance(self.data, dict):\n data = encrypted_request(self.data)\n\n req = self._get_webapi_requests()\n _url = self.__NETEAST_HOST + self._METHODS[self.method]\n\n if self.method in ('USER_DJ', 'USER_FOLLOWS', 'USER_EVENT'):\n _url = _url % self.params['uid']\n\n if self.method in ('LYRIC', 'MUSIC_COMMENT'):\n _url = _url % self.params['id']\n # GET\n if self.method in ('LYRIC'):\n resp = req.get(_url)\n else:\n resp = req.post(_url, data=data)\n self._build_response(resp)\n self.response.ok = True\n except Exception as why:\n traceback.print_exc()\n print 'Requests Exception', why\n # self._build_response(why)\n self.response.error = why\n"
] |
# coding:utf-8
"""
NCloudBot.core
~~~~~~~~~~~~~~
This module implements the main NCloudBot system.
:copyright: (c) 2017 by xiyouMc.
:license: ISC, see LICENSE for more details.
"""
import hashlib
import requests
import json
import cookielib
import traceback
from .util.encrypt import encrypted_request
from .util import cookiesJar
from utils import get_encoding_from_headers
__title__ = 'ncmbot'
__version__ = '0.1.0'
__build__ = 0x000100
__author__ = 'XiyouMc'
__license__ = 'ISC'
__copyright__ = 'Copyright 2017 XiyouMc'
__all__ = [
'NCloudBot', 'Response', 'login', 'user_play_list', 'user_dj', 'search',
'user_follows', 'user_followeds', 'user_event', 'event',
'top_playlist_highquality', 'play_list_detail', 'music_url', 'lyric',
'music_comment', 'song_detail', 'personal_fm', 'user_record'
]
class NCloudBot(object):
"""
The :class:`NCloudBot` object. It carries out all functionality of
NCloudBot
Recommended interface is with the NCloudBot`s functions.
"""
req = requests.Session()
username = None
_METHODS = {
# 登录模块
'LOGIN': '/weapi/login/cellphone?csrf_token=',
# 邮箱登录
'EMAIL_LOGIN': '/weapi/login?csrf_token=',
# 获取用户信息
'USER_INFO': '/weapi/subcount',
# 获取用户歌单,收藏的歌单 , 指定 UserId , 不需要登录
'USER_PLAY_LIST': '/weapi/user/playlist',
# 获取用户电台
'USER_DJ': '/weapi/dj/program/%s',
# 获取用户关注列表
'USER_FOLLOWS': '/weapi/user/getfollows/%s',
# 获取用户粉丝
'USER_FOLLOWEDS': '/weapi/user/getfolloweds/',
# 获取用户动态
'USER_EVENT': '/weapi/event/get/%s',
# 获取用户播放记录
'USER_RECORD': '/weapi/v1/play/record',
# 获取各种动态,对应网页版网易云,朋友界面的各种动态消息,如分享的视频、音乐、照片等
'EVENT': '/weapi/v1/event/get',
# 获取精品歌单
'TOP_PLAYLIST_HIGHQUALITY': '/weapi/playlist/highquality/list',
# 传入歌单ID 获取对应歌单内的所有音乐
'PLAY_LIST_DETAIL': '/weapi/v3/playlist/detail',
# 传入音乐ID ,获取对应音乐的URL
'MUSIC_URL': '/weapi/song/enhance/player/url',
# 传入关键词,获取歌曲列表
'SEARCH': '/api/search/get/',
# 传入音乐ID,获取对应音乐的歌词
'LYRIC': '/api/song/lyric?os=osx&id=%s&lv=-1&kv=-1&tv=-1',
# 传入音乐ID 和 limit 参数,可获取该音乐的所有评论
'MUSIC_COMMENT': '/weapi/v1/resource/comments/R_SO_4_%s/?csrf_token=',
# 传入专辑ID 和 limit 参数,可获取该专辑的所有评论
'ALBUM_COMMENT': '/weapi/v1/resource/comments/R_AL_3_%s/?csrf_token=',
# 给评论点赞,入参是资源ID,如歌曲ID,MV iD 和 评论ID
'LIKE_COMMENT': '/weapi/v1/comment/%s',
# 传入音乐ID,获取歌曲详情
'SONG_DETAIL': '/weapi/v3/song/detail',
# 获取专辑内容
'ALBUM': '/weapi/v1/album/%s',
# 私人 FM (需要登录)
'PERSONAL_FM': '/weapi/v1/radio/get'
}
__NETEAST_HOST = 'http://music.163.com'
def __init__(self):
self.method = None
self.data = {}
self.params = {}
self.response = Response()
def __repr__(self):
return '<NCloudBot [%s]>' % (self.method)
def __setattr__(self, name, value):
if (name == 'method') and (value):
if value not in self._METHODS.keys():
raise InvalidMethod()
object.__setattr__(self, name, value)
def _get_webapi_requests(self):
"""Update headers of webapi for Requests."""
headers = {
'Accept':
'*/*',
'Accept-Language':
'zh-CN,zh;q=0.8,gl;q=0.6,zh-TW;q=0.4',
'Connection':
'keep-alive',
'Content-Type':
'application/x-www-form-urlencoded',
'Referer':
'http://music.163.com',
'Host':
'music.163.com',
'User-Agent':
'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_9_2) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/33.0.1750.152 Safari/537.36'
}
NCloudBot.req.headers.update(headers)
return NCloudBot.req
def _get_requests(self):
headers = {
'Referer':
self.__NETEAST_HOST,
'Cookie':
'appver=2.0.2;',
'Content-Type':
'application/x-www-form-urlencoded',
'User-Agent':
'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_9_2) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/33.0.1750.152 Safari/537.36'
}
NCloudBot.req.headers.update(headers)
return NCloudBot.req
def _build_response(self, resp):
"""Build internal Response object from given response."""
# rememberLogin
# if self.method is 'LOGIN' and resp.json().get('code') == 200:
# cookiesJar.save_cookies(resp, NCloudBot.username)
self.response.content = resp.content
self.response.status_code = resp.status_code
self.response.headers = resp.headers
def send(self):
"""Sens the request."""
success = False
if self.method is None:
raise ParamsError()
try:
if self.method == 'SEARCH':
req = self._get_requests()
_url = self.__NETEAST_HOST + self._METHODS[self.method]
resp = req.post(_url, data=self.data)
self._build_response(resp)
self.response.ok = True
else:
if isinstance(self.data, dict):
data = encrypted_request(self.data)
req = self._get_webapi_requests()
_url = self.__NETEAST_HOST + self._METHODS[self.method]
if self.method in ('USER_DJ', 'USER_FOLLOWS', 'USER_EVENT'):
_url = _url % self.params['uid']
if self.method in ('LYRIC', 'MUSIC_COMMENT'):
_url = _url % self.params['id']
# GET
if self.method in ('LYRIC'):
resp = req.get(_url)
else:
resp = req.post(_url, data=data)
self._build_response(resp)
self.response.ok = True
except Exception as why:
traceback.print_exc()
print 'Requests Exception', why
# self._build_response(why)
self.response.error = why
class Response(object):
"""
The :class:`Response` object. All :class:`NCloudBot` objects contain a
:class:`NCloudBot.response <response>` attribute.
"""
def __init__(self):
self.content = None
self.headers = None
self.status_code = None
self.ok = False
self.error = None
def __repr__(self):
return '<Response [%s]>' % (self.status_code)
def raise_for_status(self):
if self.error:
raise self.error
def json(self):
"""Returns the json-encoded content of a response, if any."""
if not self.headers and len(self.content) > 3:
encoding = get_encoding_from_headers(self.headers)
if encoding is not None:
return json.loads(self.content.decode(encoding))
return json.loads(self.content)
def login(password, phone=None, email=None, rememberLogin=True):
""" 登录接口,返回 :class:'Response' 对象
:param password: 网易云音乐的密码
:param phone: (optional) 手机登录
:param email: (optional) 邮箱登录
:param rememberLogin: (optional) 是否记住密码,默认 True
"""
if (phone is None) and (email is None):
raise ParamsError()
if password is None:
raise ParamsError()
r = NCloudBot()
# r.username = phone or email
md5 = hashlib.md5()
md5.update(password)
password = md5.hexdigest()
print password
r.data = {'password': password, 'rememberLogin': rememberLogin}
if phone is not None:
r.data['phone'] = phone
r.method = 'LOGIN'
else:
r.data['username'] = email
r.method = 'EMAIL_LOGIN'
r.send()
return r.response
def user_play_list(uid, offset=0, limit=1000):
"""获取用户歌单,包含收藏的歌单
:param uid: 用户的ID,可通过登录或者其他接口获取
:param offset: (optional) 分段起始位置,默认 0
:param limit: (optional) 数据上限多少行,默认 1000
"""
if uid is None:
raise ParamsError()
r = NCloudBot()
r.method = 'USER_PLAY_LIST'
r.data = {'offset': offset, 'uid': uid, 'limit': limit, 'csrf_token': ''}
r.send()
return r.response
def user_dj(uid, offset=0, limit=30):
"""获取用户电台数据
:param uid: 用户的ID,可通过登录或者其他接口获取
:param offset: (optional) 分段起始位置,默认 0
:param limit: (optional) 数据上限多少行,默认 30
"""
if uid is None:
raise ParamsError()
r = NCloudBot()
r.method = 'USER_DJ'
r.data = {'offset': offset, 'limit': limit, "csrf_token": ""}
r.params = {'uid': uid}
r.send()
return r.response
def search(keyword, type=1, offset=0, limit=30):
"""搜索歌曲,支持搜索歌曲、歌手、专辑等
:param keyword: 关键词
:param type: (optional) 搜索类型,1: 单曲, 100: 歌手, 1000: 歌单, 1002: 用户
:param offset: (optional) 分段起始位置,默认 0
:param limit: (optional) 数据上限多少行,默认 30
"""
if keyword is None:
raise ParamsError()
r = NCloudBot()
r.method = 'SEARCH'
r.data = {
's': keyword,
'limit': str(limit),
'type': str(type),
'offset': str(offset)
}
r.send()
return r.response
def user_follows(uid, offset='0', limit=30):
"""获取用户关注列表
:param uid: 用户的ID,可通过登录或者其他接口获取
:param offset: (optional) 分段起始位置,默认 0
:param limit: (optional) 数据上限多少行,默认 30
"""
if uid is None:
raise ParamsError()
r = NCloudBot()
r.method = 'USER_FOLLOWS'
r.params = {'uid': uid}
r.data = {'offset': offset, 'limit': limit, 'order': True}
r.send()
return r.response
def user_followeds(uid, offset='0', limit=30):
"""获取用户粉丝列表
:param uid: 用户的ID,可通过登录或者其他接口获取
:param offset: (optional) 分段起始位置,默认 0
:param limit: (optional) 数据上限多少行,默认 30
"""
if uid is None:
raise ParamsError()
r = NCloudBot()
r.method = 'USER_FOLLOWEDS'
r.data = {
'userId': uid,
'offset': offset,
'limit': limit,
"csrf_token": ""
}
r.send()
return r.response
def user_event(uid):
"""获取用户动态
:param uid: 用户的ID,可通过登录或者其他接口获取
"""
if uid is None:
raise ParamsError()
r = NCloudBot()
r.method = 'USER_EVENT'
r.params = {'uid': uid}
r.data = {'time': -1, 'getcounts': True, "csrf_token": ""}
r.send()
return r.response
def user_record(uid, type=0):
"""获取用户的播放列表,必须登录
:param uid: 用户的ID,可通过登录或者其他接口获取
:param type: (optional) 数据类型,0:获取所有记录,1:获取 weekData
"""
if uid is None:
raise ParamsError()
r = NCloudBot()
r.method = 'USER_RECORD'
r.data = {'type': type, 'uid': uid, "csrf_token": ""}
r.send()
return r.response
# TOP_PLAYLIST_HIGHQUALITY
def top_playlist_highquality(cat='全部', offset=0, limit=20):
"""获取网易云音乐的精品歌单
:param cat: (optional) 歌单类型,默认 ‘全部’,比如 华语、欧美等
:param offset: (optional) 分段起始位置,默认 0
:param limit: (optional) 数据上限多少行,默认 20
"""
r = NCloudBot()
r.method = 'TOP_PLAYLIST_HIGHQUALITY'
r.data = {'cat': cat, 'offset': offset, 'limit': limit}
r.send()
return r.response
# PLAY_LIST_DETAIL
def play_list_detail(id, limit=20):
"""获取歌单中的所有音乐。由于获取精品中,只能看到歌单名字和 ID 并没有歌单的音乐,因此增加该接口传入歌单 ID
获取歌单中的所有音乐.
:param id: 歌单的ID
:param limit: (optional) 数据上限多少行,默认 20
"""
if id is None:
raise ParamsError()
r = NCloudBot()
r.method = 'PLAY_LIST_DETAIL'
r.data = {'id': id, 'limit': limit, "csrf_token": ""}
r.send()
return r.response
# MUSIC_URL
def music_url(ids=[]):
"""通过歌曲 ID 获取歌曲下载地址
:param ids: 歌曲 ID 的 list
"""
if not isinstance(ids, list):
raise ParamsError()
r = NCloudBot()
r.method = 'MUSIC_URL'
r.data = {'ids': ids, 'br': 999000, "csrf_token": ""}
r.send()
return r.response
# LYRIC
def lyric(id):
"""通过歌曲 ID 获取歌曲歌词地址
:param id: 歌曲ID
"""
if id is None:
raise ParamsError()
r = NCloudBot()
r.method = 'LYRIC'
r.params = {'id': id}
r.send()
return r.response
# MUSIC_COMMENT
def music_comment(id, offset=0, limit=20):
"""获取歌曲的评论列表
:param id: 歌曲 ID
:param offset: (optional) 分段起始位置,默认 0
:param limit: (optional) 数据上限多少行,默认 20
"""
if id is None:
raise ParamsError()
r = NCloudBot()
r.method = 'MUSIC_COMMENT'
r.params = {'id': id}
r.data = {'offset': offset, 'limit': limit, 'rid': id, "csrf_token": ""}
r.send()
return r.response
# ALBUM_COMMENT
def album_comment(id, offset=0, limit=20):
if id is None:
raise ParamsError()
r = NCloudBot()
r.method = 'ALBUM_COMMENT'
r.params = {'id': id}
r.data = {'offset': offset, 'limit': limit, 'rid': id, "csrf_token": ""}
r.send()
return r.response
# SONG_DETAIL
def song_detail(ids):
"""通过歌曲 ID 获取歌曲的详细信息
:param ids: 歌曲 ID 的 list
"""
if not isinstance(ids, list):
raise ParamsError()
c = []
for id in ids:
c.append({'id': id})
r = NCloudBot()
r.method = 'SONG_DETAIL'
r.data = {'c': json.dumps(c), 'ids': c, "csrf_token": ""}
r.send()
return r.response
# PERSONAL_FM
def personal_fm():
""" 个人的 FM ,必须在登录之后调用,即 login 之后调用
"""
r = NCloudBot()
r.method = 'PERSONAL_FM'
r.data = {"csrf_token": ""}
r.send()
return r.response
class NCloudBotException(Exception):
""" 这是 NCloudBot 当处理请求时候的异常"""
class ParamsError(NCloudBotException):
""" 参数错误 """
class InvalidMethod(NCloudBotException):
""" 不支持的方法被调用"""
|
xiyouMc/ncmbot
|
ncmbot/core.py
|
top_playlist_highquality
|
python
|
def top_playlist_highquality(cat='全部', offset=0, limit=20):
r = NCloudBot()
r.method = 'TOP_PLAYLIST_HIGHQUALITY'
r.data = {'cat': cat, 'offset': offset, 'limit': limit}
r.send()
return r.response
|
获取网易云音乐的精品歌单
:param cat: (optional) 歌单类型,默认 ‘全部’,比如 华语、欧美等
:param offset: (optional) 分段起始位置,默认 0
:param limit: (optional) 数据上限多少行,默认 20
|
train
|
https://github.com/xiyouMc/ncmbot/blob/c4832f3ee7630ba104a89559f09c1fc366d1547b/ncmbot/core.py#L390-L402
|
[
"def send(self):\n \"\"\"Sens the request.\"\"\"\n success = False\n if self.method is None:\n raise ParamsError()\n try:\n if self.method == 'SEARCH':\n req = self._get_requests()\n _url = self.__NETEAST_HOST + self._METHODS[self.method]\n resp = req.post(_url, data=self.data)\n self._build_response(resp)\n self.response.ok = True\n else:\n if isinstance(self.data, dict):\n data = encrypted_request(self.data)\n\n req = self._get_webapi_requests()\n _url = self.__NETEAST_HOST + self._METHODS[self.method]\n\n if self.method in ('USER_DJ', 'USER_FOLLOWS', 'USER_EVENT'):\n _url = _url % self.params['uid']\n\n if self.method in ('LYRIC', 'MUSIC_COMMENT'):\n _url = _url % self.params['id']\n # GET\n if self.method in ('LYRIC'):\n resp = req.get(_url)\n else:\n resp = req.post(_url, data=data)\n self._build_response(resp)\n self.response.ok = True\n except Exception as why:\n traceback.print_exc()\n print 'Requests Exception', why\n # self._build_response(why)\n self.response.error = why\n"
] |
# coding:utf-8
"""
NCloudBot.core
~~~~~~~~~~~~~~
This module implements the main NCloudBot system.
:copyright: (c) 2017 by xiyouMc.
:license: ISC, see LICENSE for more details.
"""
import hashlib
import requests
import json
import cookielib
import traceback
from .util.encrypt import encrypted_request
from .util import cookiesJar
from utils import get_encoding_from_headers
__title__ = 'ncmbot'
__version__ = '0.1.0'
__build__ = 0x000100
__author__ = 'XiyouMc'
__license__ = 'ISC'
__copyright__ = 'Copyright 2017 XiyouMc'
__all__ = [
'NCloudBot', 'Response', 'login', 'user_play_list', 'user_dj', 'search',
'user_follows', 'user_followeds', 'user_event', 'event',
'top_playlist_highquality', 'play_list_detail', 'music_url', 'lyric',
'music_comment', 'song_detail', 'personal_fm', 'user_record'
]
class NCloudBot(object):
"""
The :class:`NCloudBot` object. It carries out all functionality of
NCloudBot
Recommended interface is with the NCloudBot`s functions.
"""
req = requests.Session()
username = None
_METHODS = {
# 登录模块
'LOGIN': '/weapi/login/cellphone?csrf_token=',
# 邮箱登录
'EMAIL_LOGIN': '/weapi/login?csrf_token=',
# 获取用户信息
'USER_INFO': '/weapi/subcount',
# 获取用户歌单,收藏的歌单 , 指定 UserId , 不需要登录
'USER_PLAY_LIST': '/weapi/user/playlist',
# 获取用户电台
'USER_DJ': '/weapi/dj/program/%s',
# 获取用户关注列表
'USER_FOLLOWS': '/weapi/user/getfollows/%s',
# 获取用户粉丝
'USER_FOLLOWEDS': '/weapi/user/getfolloweds/',
# 获取用户动态
'USER_EVENT': '/weapi/event/get/%s',
# 获取用户播放记录
'USER_RECORD': '/weapi/v1/play/record',
# 获取各种动态,对应网页版网易云,朋友界面的各种动态消息,如分享的视频、音乐、照片等
'EVENT': '/weapi/v1/event/get',
# 获取精品歌单
'TOP_PLAYLIST_HIGHQUALITY': '/weapi/playlist/highquality/list',
# 传入歌单ID 获取对应歌单内的所有音乐
'PLAY_LIST_DETAIL': '/weapi/v3/playlist/detail',
# 传入音乐ID ,获取对应音乐的URL
'MUSIC_URL': '/weapi/song/enhance/player/url',
# 传入关键词,获取歌曲列表
'SEARCH': '/api/search/get/',
# 传入音乐ID,获取对应音乐的歌词
'LYRIC': '/api/song/lyric?os=osx&id=%s&lv=-1&kv=-1&tv=-1',
# 传入音乐ID 和 limit 参数,可获取该音乐的所有评论
'MUSIC_COMMENT': '/weapi/v1/resource/comments/R_SO_4_%s/?csrf_token=',
# 传入专辑ID 和 limit 参数,可获取该专辑的所有评论
'ALBUM_COMMENT': '/weapi/v1/resource/comments/R_AL_3_%s/?csrf_token=',
# 给评论点赞,入参是资源ID,如歌曲ID,MV iD 和 评论ID
'LIKE_COMMENT': '/weapi/v1/comment/%s',
# 传入音乐ID,获取歌曲详情
'SONG_DETAIL': '/weapi/v3/song/detail',
# 获取专辑内容
'ALBUM': '/weapi/v1/album/%s',
# 私人 FM (需要登录)
'PERSONAL_FM': '/weapi/v1/radio/get'
}
__NETEAST_HOST = 'http://music.163.com'
def __init__(self):
self.method = None
self.data = {}
self.params = {}
self.response = Response()
def __repr__(self):
return '<NCloudBot [%s]>' % (self.method)
def __setattr__(self, name, value):
if (name == 'method') and (value):
if value not in self._METHODS.keys():
raise InvalidMethod()
object.__setattr__(self, name, value)
def _get_webapi_requests(self):
"""Update headers of webapi for Requests."""
headers = {
'Accept':
'*/*',
'Accept-Language':
'zh-CN,zh;q=0.8,gl;q=0.6,zh-TW;q=0.4',
'Connection':
'keep-alive',
'Content-Type':
'application/x-www-form-urlencoded',
'Referer':
'http://music.163.com',
'Host':
'music.163.com',
'User-Agent':
'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_9_2) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/33.0.1750.152 Safari/537.36'
}
NCloudBot.req.headers.update(headers)
return NCloudBot.req
def _get_requests(self):
headers = {
'Referer':
self.__NETEAST_HOST,
'Cookie':
'appver=2.0.2;',
'Content-Type':
'application/x-www-form-urlencoded',
'User-Agent':
'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_9_2) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/33.0.1750.152 Safari/537.36'
}
NCloudBot.req.headers.update(headers)
return NCloudBot.req
def _build_response(self, resp):
"""Build internal Response object from given response."""
# rememberLogin
# if self.method is 'LOGIN' and resp.json().get('code') == 200:
# cookiesJar.save_cookies(resp, NCloudBot.username)
self.response.content = resp.content
self.response.status_code = resp.status_code
self.response.headers = resp.headers
def send(self):
"""Sens the request."""
success = False
if self.method is None:
raise ParamsError()
try:
if self.method == 'SEARCH':
req = self._get_requests()
_url = self.__NETEAST_HOST + self._METHODS[self.method]
resp = req.post(_url, data=self.data)
self._build_response(resp)
self.response.ok = True
else:
if isinstance(self.data, dict):
data = encrypted_request(self.data)
req = self._get_webapi_requests()
_url = self.__NETEAST_HOST + self._METHODS[self.method]
if self.method in ('USER_DJ', 'USER_FOLLOWS', 'USER_EVENT'):
_url = _url % self.params['uid']
if self.method in ('LYRIC', 'MUSIC_COMMENT'):
_url = _url % self.params['id']
# GET
if self.method in ('LYRIC'):
resp = req.get(_url)
else:
resp = req.post(_url, data=data)
self._build_response(resp)
self.response.ok = True
except Exception as why:
traceback.print_exc()
print 'Requests Exception', why
# self._build_response(why)
self.response.error = why
class Response(object):
"""
The :class:`Response` object. All :class:`NCloudBot` objects contain a
:class:`NCloudBot.response <response>` attribute.
"""
def __init__(self):
self.content = None
self.headers = None
self.status_code = None
self.ok = False
self.error = None
def __repr__(self):
return '<Response [%s]>' % (self.status_code)
def raise_for_status(self):
if self.error:
raise self.error
def json(self):
"""Returns the json-encoded content of a response, if any."""
if not self.headers and len(self.content) > 3:
encoding = get_encoding_from_headers(self.headers)
if encoding is not None:
return json.loads(self.content.decode(encoding))
return json.loads(self.content)
def login(password, phone=None, email=None, rememberLogin=True):
""" 登录接口,返回 :class:'Response' 对象
:param password: 网易云音乐的密码
:param phone: (optional) 手机登录
:param email: (optional) 邮箱登录
:param rememberLogin: (optional) 是否记住密码,默认 True
"""
if (phone is None) and (email is None):
raise ParamsError()
if password is None:
raise ParamsError()
r = NCloudBot()
# r.username = phone or email
md5 = hashlib.md5()
md5.update(password)
password = md5.hexdigest()
print password
r.data = {'password': password, 'rememberLogin': rememberLogin}
if phone is not None:
r.data['phone'] = phone
r.method = 'LOGIN'
else:
r.data['username'] = email
r.method = 'EMAIL_LOGIN'
r.send()
return r.response
def user_play_list(uid, offset=0, limit=1000):
"""获取用户歌单,包含收藏的歌单
:param uid: 用户的ID,可通过登录或者其他接口获取
:param offset: (optional) 分段起始位置,默认 0
:param limit: (optional) 数据上限多少行,默认 1000
"""
if uid is None:
raise ParamsError()
r = NCloudBot()
r.method = 'USER_PLAY_LIST'
r.data = {'offset': offset, 'uid': uid, 'limit': limit, 'csrf_token': ''}
r.send()
return r.response
def user_dj(uid, offset=0, limit=30):
"""获取用户电台数据
:param uid: 用户的ID,可通过登录或者其他接口获取
:param offset: (optional) 分段起始位置,默认 0
:param limit: (optional) 数据上限多少行,默认 30
"""
if uid is None:
raise ParamsError()
r = NCloudBot()
r.method = 'USER_DJ'
r.data = {'offset': offset, 'limit': limit, "csrf_token": ""}
r.params = {'uid': uid}
r.send()
return r.response
def search(keyword, type=1, offset=0, limit=30):
"""搜索歌曲,支持搜索歌曲、歌手、专辑等
:param keyword: 关键词
:param type: (optional) 搜索类型,1: 单曲, 100: 歌手, 1000: 歌单, 1002: 用户
:param offset: (optional) 分段起始位置,默认 0
:param limit: (optional) 数据上限多少行,默认 30
"""
if keyword is None:
raise ParamsError()
r = NCloudBot()
r.method = 'SEARCH'
r.data = {
's': keyword,
'limit': str(limit),
'type': str(type),
'offset': str(offset)
}
r.send()
return r.response
def user_follows(uid, offset='0', limit=30):
"""获取用户关注列表
:param uid: 用户的ID,可通过登录或者其他接口获取
:param offset: (optional) 分段起始位置,默认 0
:param limit: (optional) 数据上限多少行,默认 30
"""
if uid is None:
raise ParamsError()
r = NCloudBot()
r.method = 'USER_FOLLOWS'
r.params = {'uid': uid}
r.data = {'offset': offset, 'limit': limit, 'order': True}
r.send()
return r.response
def user_followeds(uid, offset='0', limit=30):
"""获取用户粉丝列表
:param uid: 用户的ID,可通过登录或者其他接口获取
:param offset: (optional) 分段起始位置,默认 0
:param limit: (optional) 数据上限多少行,默认 30
"""
if uid is None:
raise ParamsError()
r = NCloudBot()
r.method = 'USER_FOLLOWEDS'
r.data = {
'userId': uid,
'offset': offset,
'limit': limit,
"csrf_token": ""
}
r.send()
return r.response
def user_event(uid):
"""获取用户动态
:param uid: 用户的ID,可通过登录或者其他接口获取
"""
if uid is None:
raise ParamsError()
r = NCloudBot()
r.method = 'USER_EVENT'
r.params = {'uid': uid}
r.data = {'time': -1, 'getcounts': True, "csrf_token": ""}
r.send()
return r.response
def user_record(uid, type=0):
"""获取用户的播放列表,必须登录
:param uid: 用户的ID,可通过登录或者其他接口获取
:param type: (optional) 数据类型,0:获取所有记录,1:获取 weekData
"""
if uid is None:
raise ParamsError()
r = NCloudBot()
r.method = 'USER_RECORD'
r.data = {'type': type, 'uid': uid, "csrf_token": ""}
r.send()
return r.response
def event():
"""获取好友的动态,包括分享视频、音乐、动态等
"""
r = NCloudBot()
r.method = 'EVENT'
r.data = {"csrf_token": ""}
r.send()
return r.response
# TOP_PLAYLIST_HIGHQUALITY
# PLAY_LIST_DETAIL
def play_list_detail(id, limit=20):
"""获取歌单中的所有音乐。由于获取精品中,只能看到歌单名字和 ID 并没有歌单的音乐,因此增加该接口传入歌单 ID
获取歌单中的所有音乐.
:param id: 歌单的ID
:param limit: (optional) 数据上限多少行,默认 20
"""
if id is None:
raise ParamsError()
r = NCloudBot()
r.method = 'PLAY_LIST_DETAIL'
r.data = {'id': id, 'limit': limit, "csrf_token": ""}
r.send()
return r.response
# MUSIC_URL
def music_url(ids=[]):
"""通过歌曲 ID 获取歌曲下载地址
:param ids: 歌曲 ID 的 list
"""
if not isinstance(ids, list):
raise ParamsError()
r = NCloudBot()
r.method = 'MUSIC_URL'
r.data = {'ids': ids, 'br': 999000, "csrf_token": ""}
r.send()
return r.response
# LYRIC
def lyric(id):
"""通过歌曲 ID 获取歌曲歌词地址
:param id: 歌曲ID
"""
if id is None:
raise ParamsError()
r = NCloudBot()
r.method = 'LYRIC'
r.params = {'id': id}
r.send()
return r.response
# MUSIC_COMMENT
def music_comment(id, offset=0, limit=20):
"""获取歌曲的评论列表
:param id: 歌曲 ID
:param offset: (optional) 分段起始位置,默认 0
:param limit: (optional) 数据上限多少行,默认 20
"""
if id is None:
raise ParamsError()
r = NCloudBot()
r.method = 'MUSIC_COMMENT'
r.params = {'id': id}
r.data = {'offset': offset, 'limit': limit, 'rid': id, "csrf_token": ""}
r.send()
return r.response
# ALBUM_COMMENT
def album_comment(id, offset=0, limit=20):
if id is None:
raise ParamsError()
r = NCloudBot()
r.method = 'ALBUM_COMMENT'
r.params = {'id': id}
r.data = {'offset': offset, 'limit': limit, 'rid': id, "csrf_token": ""}
r.send()
return r.response
# SONG_DETAIL
def song_detail(ids):
"""通过歌曲 ID 获取歌曲的详细信息
:param ids: 歌曲 ID 的 list
"""
if not isinstance(ids, list):
raise ParamsError()
c = []
for id in ids:
c.append({'id': id})
r = NCloudBot()
r.method = 'SONG_DETAIL'
r.data = {'c': json.dumps(c), 'ids': c, "csrf_token": ""}
r.send()
return r.response
# PERSONAL_FM
def personal_fm():
""" 个人的 FM ,必须在登录之后调用,即 login 之后调用
"""
r = NCloudBot()
r.method = 'PERSONAL_FM'
r.data = {"csrf_token": ""}
r.send()
return r.response
class NCloudBotException(Exception):
""" 这是 NCloudBot 当处理请求时候的异常"""
class ParamsError(NCloudBotException):
""" 参数错误 """
class InvalidMethod(NCloudBotException):
""" 不支持的方法被调用"""
|
xiyouMc/ncmbot
|
ncmbot/core.py
|
play_list_detail
|
python
|
def play_list_detail(id, limit=20):
if id is None:
raise ParamsError()
r = NCloudBot()
r.method = 'PLAY_LIST_DETAIL'
r.data = {'id': id, 'limit': limit, "csrf_token": ""}
r.send()
return r.response
|
获取歌单中的所有音乐。由于获取精品中,只能看到歌单名字和 ID 并没有歌单的音乐,因此增加该接口传入歌单 ID
获取歌单中的所有音乐.
:param id: 歌单的ID
:param limit: (optional) 数据上限多少行,默认 20
|
train
|
https://github.com/xiyouMc/ncmbot/blob/c4832f3ee7630ba104a89559f09c1fc366d1547b/ncmbot/core.py#L406-L420
|
[
"def send(self):\n \"\"\"Sens the request.\"\"\"\n success = False\n if self.method is None:\n raise ParamsError()\n try:\n if self.method == 'SEARCH':\n req = self._get_requests()\n _url = self.__NETEAST_HOST + self._METHODS[self.method]\n resp = req.post(_url, data=self.data)\n self._build_response(resp)\n self.response.ok = True\n else:\n if isinstance(self.data, dict):\n data = encrypted_request(self.data)\n\n req = self._get_webapi_requests()\n _url = self.__NETEAST_HOST + self._METHODS[self.method]\n\n if self.method in ('USER_DJ', 'USER_FOLLOWS', 'USER_EVENT'):\n _url = _url % self.params['uid']\n\n if self.method in ('LYRIC', 'MUSIC_COMMENT'):\n _url = _url % self.params['id']\n # GET\n if self.method in ('LYRIC'):\n resp = req.get(_url)\n else:\n resp = req.post(_url, data=data)\n self._build_response(resp)\n self.response.ok = True\n except Exception as why:\n traceback.print_exc()\n print 'Requests Exception', why\n # self._build_response(why)\n self.response.error = why\n"
] |
# coding:utf-8
"""
NCloudBot.core
~~~~~~~~~~~~~~
This module implements the main NCloudBot system.
:copyright: (c) 2017 by xiyouMc.
:license: ISC, see LICENSE for more details.
"""
import hashlib
import requests
import json
import cookielib
import traceback
from .util.encrypt import encrypted_request
from .util import cookiesJar
from utils import get_encoding_from_headers
__title__ = 'ncmbot'
__version__ = '0.1.0'
__build__ = 0x000100
__author__ = 'XiyouMc'
__license__ = 'ISC'
__copyright__ = 'Copyright 2017 XiyouMc'
__all__ = [
'NCloudBot', 'Response', 'login', 'user_play_list', 'user_dj', 'search',
'user_follows', 'user_followeds', 'user_event', 'event',
'top_playlist_highquality', 'play_list_detail', 'music_url', 'lyric',
'music_comment', 'song_detail', 'personal_fm', 'user_record'
]
class NCloudBot(object):
"""
The :class:`NCloudBot` object. It carries out all functionality of
NCloudBot
Recommended interface is with the NCloudBot`s functions.
"""
req = requests.Session()
username = None
_METHODS = {
# 登录模块
'LOGIN': '/weapi/login/cellphone?csrf_token=',
# 邮箱登录
'EMAIL_LOGIN': '/weapi/login?csrf_token=',
# 获取用户信息
'USER_INFO': '/weapi/subcount',
# 获取用户歌单,收藏的歌单 , 指定 UserId , 不需要登录
'USER_PLAY_LIST': '/weapi/user/playlist',
# 获取用户电台
'USER_DJ': '/weapi/dj/program/%s',
# 获取用户关注列表
'USER_FOLLOWS': '/weapi/user/getfollows/%s',
# 获取用户粉丝
'USER_FOLLOWEDS': '/weapi/user/getfolloweds/',
# 获取用户动态
'USER_EVENT': '/weapi/event/get/%s',
# 获取用户播放记录
'USER_RECORD': '/weapi/v1/play/record',
# 获取各种动态,对应网页版网易云,朋友界面的各种动态消息,如分享的视频、音乐、照片等
'EVENT': '/weapi/v1/event/get',
# 获取精品歌单
'TOP_PLAYLIST_HIGHQUALITY': '/weapi/playlist/highquality/list',
# 传入歌单ID 获取对应歌单内的所有音乐
'PLAY_LIST_DETAIL': '/weapi/v3/playlist/detail',
# 传入音乐ID ,获取对应音乐的URL
'MUSIC_URL': '/weapi/song/enhance/player/url',
# 传入关键词,获取歌曲列表
'SEARCH': '/api/search/get/',
# 传入音乐ID,获取对应音乐的歌词
'LYRIC': '/api/song/lyric?os=osx&id=%s&lv=-1&kv=-1&tv=-1',
# 传入音乐ID 和 limit 参数,可获取该音乐的所有评论
'MUSIC_COMMENT': '/weapi/v1/resource/comments/R_SO_4_%s/?csrf_token=',
# 传入专辑ID 和 limit 参数,可获取该专辑的所有评论
'ALBUM_COMMENT': '/weapi/v1/resource/comments/R_AL_3_%s/?csrf_token=',
# 给评论点赞,入参是资源ID,如歌曲ID,MV iD 和 评论ID
'LIKE_COMMENT': '/weapi/v1/comment/%s',
# 传入音乐ID,获取歌曲详情
'SONG_DETAIL': '/weapi/v3/song/detail',
# 获取专辑内容
'ALBUM': '/weapi/v1/album/%s',
# 私人 FM (需要登录)
'PERSONAL_FM': '/weapi/v1/radio/get'
}
__NETEAST_HOST = 'http://music.163.com'
def __init__(self):
self.method = None
self.data = {}
self.params = {}
self.response = Response()
def __repr__(self):
return '<NCloudBot [%s]>' % (self.method)
def __setattr__(self, name, value):
if (name == 'method') and (value):
if value not in self._METHODS.keys():
raise InvalidMethod()
object.__setattr__(self, name, value)
def _get_webapi_requests(self):
"""Update headers of webapi for Requests."""
headers = {
'Accept':
'*/*',
'Accept-Language':
'zh-CN,zh;q=0.8,gl;q=0.6,zh-TW;q=0.4',
'Connection':
'keep-alive',
'Content-Type':
'application/x-www-form-urlencoded',
'Referer':
'http://music.163.com',
'Host':
'music.163.com',
'User-Agent':
'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_9_2) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/33.0.1750.152 Safari/537.36'
}
NCloudBot.req.headers.update(headers)
return NCloudBot.req
def _get_requests(self):
headers = {
'Referer':
self.__NETEAST_HOST,
'Cookie':
'appver=2.0.2;',
'Content-Type':
'application/x-www-form-urlencoded',
'User-Agent':
'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_9_2) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/33.0.1750.152 Safari/537.36'
}
NCloudBot.req.headers.update(headers)
return NCloudBot.req
def _build_response(self, resp):
"""Build internal Response object from given response."""
# rememberLogin
# if self.method is 'LOGIN' and resp.json().get('code') == 200:
# cookiesJar.save_cookies(resp, NCloudBot.username)
self.response.content = resp.content
self.response.status_code = resp.status_code
self.response.headers = resp.headers
def send(self):
"""Sens the request."""
success = False
if self.method is None:
raise ParamsError()
try:
if self.method == 'SEARCH':
req = self._get_requests()
_url = self.__NETEAST_HOST + self._METHODS[self.method]
resp = req.post(_url, data=self.data)
self._build_response(resp)
self.response.ok = True
else:
if isinstance(self.data, dict):
data = encrypted_request(self.data)
req = self._get_webapi_requests()
_url = self.__NETEAST_HOST + self._METHODS[self.method]
if self.method in ('USER_DJ', 'USER_FOLLOWS', 'USER_EVENT'):
_url = _url % self.params['uid']
if self.method in ('LYRIC', 'MUSIC_COMMENT'):
_url = _url % self.params['id']
# GET
if self.method in ('LYRIC'):
resp = req.get(_url)
else:
resp = req.post(_url, data=data)
self._build_response(resp)
self.response.ok = True
except Exception as why:
traceback.print_exc()
print 'Requests Exception', why
# self._build_response(why)
self.response.error = why
class Response(object):
"""
The :class:`Response` object. All :class:`NCloudBot` objects contain a
:class:`NCloudBot.response <response>` attribute.
"""
def __init__(self):
self.content = None
self.headers = None
self.status_code = None
self.ok = False
self.error = None
def __repr__(self):
return '<Response [%s]>' % (self.status_code)
def raise_for_status(self):
if self.error:
raise self.error
def json(self):
"""Returns the json-encoded content of a response, if any."""
if not self.headers and len(self.content) > 3:
encoding = get_encoding_from_headers(self.headers)
if encoding is not None:
return json.loads(self.content.decode(encoding))
return json.loads(self.content)
def login(password, phone=None, email=None, rememberLogin=True):
""" 登录接口,返回 :class:'Response' 对象
:param password: 网易云音乐的密码
:param phone: (optional) 手机登录
:param email: (optional) 邮箱登录
:param rememberLogin: (optional) 是否记住密码,默认 True
"""
if (phone is None) and (email is None):
raise ParamsError()
if password is None:
raise ParamsError()
r = NCloudBot()
# r.username = phone or email
md5 = hashlib.md5()
md5.update(password)
password = md5.hexdigest()
print password
r.data = {'password': password, 'rememberLogin': rememberLogin}
if phone is not None:
r.data['phone'] = phone
r.method = 'LOGIN'
else:
r.data['username'] = email
r.method = 'EMAIL_LOGIN'
r.send()
return r.response
def user_play_list(uid, offset=0, limit=1000):
"""获取用户歌单,包含收藏的歌单
:param uid: 用户的ID,可通过登录或者其他接口获取
:param offset: (optional) 分段起始位置,默认 0
:param limit: (optional) 数据上限多少行,默认 1000
"""
if uid is None:
raise ParamsError()
r = NCloudBot()
r.method = 'USER_PLAY_LIST'
r.data = {'offset': offset, 'uid': uid, 'limit': limit, 'csrf_token': ''}
r.send()
return r.response
def user_dj(uid, offset=0, limit=30):
"""获取用户电台数据
:param uid: 用户的ID,可通过登录或者其他接口获取
:param offset: (optional) 分段起始位置,默认 0
:param limit: (optional) 数据上限多少行,默认 30
"""
if uid is None:
raise ParamsError()
r = NCloudBot()
r.method = 'USER_DJ'
r.data = {'offset': offset, 'limit': limit, "csrf_token": ""}
r.params = {'uid': uid}
r.send()
return r.response
def search(keyword, type=1, offset=0, limit=30):
"""搜索歌曲,支持搜索歌曲、歌手、专辑等
:param keyword: 关键词
:param type: (optional) 搜索类型,1: 单曲, 100: 歌手, 1000: 歌单, 1002: 用户
:param offset: (optional) 分段起始位置,默认 0
:param limit: (optional) 数据上限多少行,默认 30
"""
if keyword is None:
raise ParamsError()
r = NCloudBot()
r.method = 'SEARCH'
r.data = {
's': keyword,
'limit': str(limit),
'type': str(type),
'offset': str(offset)
}
r.send()
return r.response
def user_follows(uid, offset='0', limit=30):
"""获取用户关注列表
:param uid: 用户的ID,可通过登录或者其他接口获取
:param offset: (optional) 分段起始位置,默认 0
:param limit: (optional) 数据上限多少行,默认 30
"""
if uid is None:
raise ParamsError()
r = NCloudBot()
r.method = 'USER_FOLLOWS'
r.params = {'uid': uid}
r.data = {'offset': offset, 'limit': limit, 'order': True}
r.send()
return r.response
def user_followeds(uid, offset='0', limit=30):
"""获取用户粉丝列表
:param uid: 用户的ID,可通过登录或者其他接口获取
:param offset: (optional) 分段起始位置,默认 0
:param limit: (optional) 数据上限多少行,默认 30
"""
if uid is None:
raise ParamsError()
r = NCloudBot()
r.method = 'USER_FOLLOWEDS'
r.data = {
'userId': uid,
'offset': offset,
'limit': limit,
"csrf_token": ""
}
r.send()
return r.response
def user_event(uid):
"""获取用户动态
:param uid: 用户的ID,可通过登录或者其他接口获取
"""
if uid is None:
raise ParamsError()
r = NCloudBot()
r.method = 'USER_EVENT'
r.params = {'uid': uid}
r.data = {'time': -1, 'getcounts': True, "csrf_token": ""}
r.send()
return r.response
def user_record(uid, type=0):
"""获取用户的播放列表,必须登录
:param uid: 用户的ID,可通过登录或者其他接口获取
:param type: (optional) 数据类型,0:获取所有记录,1:获取 weekData
"""
if uid is None:
raise ParamsError()
r = NCloudBot()
r.method = 'USER_RECORD'
r.data = {'type': type, 'uid': uid, "csrf_token": ""}
r.send()
return r.response
def event():
"""获取好友的动态,包括分享视频、音乐、动态等
"""
r = NCloudBot()
r.method = 'EVENT'
r.data = {"csrf_token": ""}
r.send()
return r.response
# TOP_PLAYLIST_HIGHQUALITY
def top_playlist_highquality(cat='全部', offset=0, limit=20):
"""获取网易云音乐的精品歌单
:param cat: (optional) 歌单类型,默认 ‘全部’,比如 华语、欧美等
:param offset: (optional) 分段起始位置,默认 0
:param limit: (optional) 数据上限多少行,默认 20
"""
r = NCloudBot()
r.method = 'TOP_PLAYLIST_HIGHQUALITY'
r.data = {'cat': cat, 'offset': offset, 'limit': limit}
r.send()
return r.response
# PLAY_LIST_DETAIL
# MUSIC_URL
def music_url(ids=[]):
"""通过歌曲 ID 获取歌曲下载地址
:param ids: 歌曲 ID 的 list
"""
if not isinstance(ids, list):
raise ParamsError()
r = NCloudBot()
r.method = 'MUSIC_URL'
r.data = {'ids': ids, 'br': 999000, "csrf_token": ""}
r.send()
return r.response
# LYRIC
def lyric(id):
"""通过歌曲 ID 获取歌曲歌词地址
:param id: 歌曲ID
"""
if id is None:
raise ParamsError()
r = NCloudBot()
r.method = 'LYRIC'
r.params = {'id': id}
r.send()
return r.response
# MUSIC_COMMENT
def music_comment(id, offset=0, limit=20):
"""获取歌曲的评论列表
:param id: 歌曲 ID
:param offset: (optional) 分段起始位置,默认 0
:param limit: (optional) 数据上限多少行,默认 20
"""
if id is None:
raise ParamsError()
r = NCloudBot()
r.method = 'MUSIC_COMMENT'
r.params = {'id': id}
r.data = {'offset': offset, 'limit': limit, 'rid': id, "csrf_token": ""}
r.send()
return r.response
# ALBUM_COMMENT
def album_comment(id, offset=0, limit=20):
if id is None:
raise ParamsError()
r = NCloudBot()
r.method = 'ALBUM_COMMENT'
r.params = {'id': id}
r.data = {'offset': offset, 'limit': limit, 'rid': id, "csrf_token": ""}
r.send()
return r.response
# SONG_DETAIL
def song_detail(ids):
"""通过歌曲 ID 获取歌曲的详细信息
:param ids: 歌曲 ID 的 list
"""
if not isinstance(ids, list):
raise ParamsError()
c = []
for id in ids:
c.append({'id': id})
r = NCloudBot()
r.method = 'SONG_DETAIL'
r.data = {'c': json.dumps(c), 'ids': c, "csrf_token": ""}
r.send()
return r.response
# PERSONAL_FM
def personal_fm():
""" 个人的 FM ,必须在登录之后调用,即 login 之后调用
"""
r = NCloudBot()
r.method = 'PERSONAL_FM'
r.data = {"csrf_token": ""}
r.send()
return r.response
class NCloudBotException(Exception):
""" 这是 NCloudBot 当处理请求时候的异常"""
class ParamsError(NCloudBotException):
""" 参数错误 """
class InvalidMethod(NCloudBotException):
""" 不支持的方法被调用"""
|
xiyouMc/ncmbot
|
ncmbot/core.py
|
music_url
|
python
|
def music_url(ids=[]):
if not isinstance(ids, list):
raise ParamsError()
r = NCloudBot()
r.method = 'MUSIC_URL'
r.data = {'ids': ids, 'br': 999000, "csrf_token": ""}
r.send()
return r.response
|
通过歌曲 ID 获取歌曲下载地址
:param ids: 歌曲 ID 的 list
|
train
|
https://github.com/xiyouMc/ncmbot/blob/c4832f3ee7630ba104a89559f09c1fc366d1547b/ncmbot/core.py#L424-L436
|
[
"def send(self):\n \"\"\"Sens the request.\"\"\"\n success = False\n if self.method is None:\n raise ParamsError()\n try:\n if self.method == 'SEARCH':\n req = self._get_requests()\n _url = self.__NETEAST_HOST + self._METHODS[self.method]\n resp = req.post(_url, data=self.data)\n self._build_response(resp)\n self.response.ok = True\n else:\n if isinstance(self.data, dict):\n data = encrypted_request(self.data)\n\n req = self._get_webapi_requests()\n _url = self.__NETEAST_HOST + self._METHODS[self.method]\n\n if self.method in ('USER_DJ', 'USER_FOLLOWS', 'USER_EVENT'):\n _url = _url % self.params['uid']\n\n if self.method in ('LYRIC', 'MUSIC_COMMENT'):\n _url = _url % self.params['id']\n # GET\n if self.method in ('LYRIC'):\n resp = req.get(_url)\n else:\n resp = req.post(_url, data=data)\n self._build_response(resp)\n self.response.ok = True\n except Exception as why:\n traceback.print_exc()\n print 'Requests Exception', why\n # self._build_response(why)\n self.response.error = why\n"
] |
# coding:utf-8
"""
NCloudBot.core
~~~~~~~~~~~~~~
This module implements the main NCloudBot system.
:copyright: (c) 2017 by xiyouMc.
:license: ISC, see LICENSE for more details.
"""
import hashlib
import requests
import json
import cookielib
import traceback
from .util.encrypt import encrypted_request
from .util import cookiesJar
from utils import get_encoding_from_headers
__title__ = 'ncmbot'
__version__ = '0.1.0'
__build__ = 0x000100
__author__ = 'XiyouMc'
__license__ = 'ISC'
__copyright__ = 'Copyright 2017 XiyouMc'
__all__ = [
'NCloudBot', 'Response', 'login', 'user_play_list', 'user_dj', 'search',
'user_follows', 'user_followeds', 'user_event', 'event',
'top_playlist_highquality', 'play_list_detail', 'music_url', 'lyric',
'music_comment', 'song_detail', 'personal_fm', 'user_record'
]
class NCloudBot(object):
"""
The :class:`NCloudBot` object. It carries out all functionality of
NCloudBot
Recommended interface is with the NCloudBot`s functions.
"""
req = requests.Session()
username = None
_METHODS = {
# 登录模块
'LOGIN': '/weapi/login/cellphone?csrf_token=',
# 邮箱登录
'EMAIL_LOGIN': '/weapi/login?csrf_token=',
# 获取用户信息
'USER_INFO': '/weapi/subcount',
# 获取用户歌单,收藏的歌单 , 指定 UserId , 不需要登录
'USER_PLAY_LIST': '/weapi/user/playlist',
# 获取用户电台
'USER_DJ': '/weapi/dj/program/%s',
# 获取用户关注列表
'USER_FOLLOWS': '/weapi/user/getfollows/%s',
# 获取用户粉丝
'USER_FOLLOWEDS': '/weapi/user/getfolloweds/',
# 获取用户动态
'USER_EVENT': '/weapi/event/get/%s',
# 获取用户播放记录
'USER_RECORD': '/weapi/v1/play/record',
# 获取各种动态,对应网页版网易云,朋友界面的各种动态消息,如分享的视频、音乐、照片等
'EVENT': '/weapi/v1/event/get',
# 获取精品歌单
'TOP_PLAYLIST_HIGHQUALITY': '/weapi/playlist/highquality/list',
# 传入歌单ID 获取对应歌单内的所有音乐
'PLAY_LIST_DETAIL': '/weapi/v3/playlist/detail',
# 传入音乐ID ,获取对应音乐的URL
'MUSIC_URL': '/weapi/song/enhance/player/url',
# 传入关键词,获取歌曲列表
'SEARCH': '/api/search/get/',
# 传入音乐ID,获取对应音乐的歌词
'LYRIC': '/api/song/lyric?os=osx&id=%s&lv=-1&kv=-1&tv=-1',
# 传入音乐ID 和 limit 参数,可获取该音乐的所有评论
'MUSIC_COMMENT': '/weapi/v1/resource/comments/R_SO_4_%s/?csrf_token=',
# 传入专辑ID 和 limit 参数,可获取该专辑的所有评论
'ALBUM_COMMENT': '/weapi/v1/resource/comments/R_AL_3_%s/?csrf_token=',
# 给评论点赞,入参是资源ID,如歌曲ID,MV iD 和 评论ID
'LIKE_COMMENT': '/weapi/v1/comment/%s',
# 传入音乐ID,获取歌曲详情
'SONG_DETAIL': '/weapi/v3/song/detail',
# 获取专辑内容
'ALBUM': '/weapi/v1/album/%s',
# 私人 FM (需要登录)
'PERSONAL_FM': '/weapi/v1/radio/get'
}
__NETEAST_HOST = 'http://music.163.com'
def __init__(self):
self.method = None
self.data = {}
self.params = {}
self.response = Response()
def __repr__(self):
return '<NCloudBot [%s]>' % (self.method)
def __setattr__(self, name, value):
if (name == 'method') and (value):
if value not in self._METHODS.keys():
raise InvalidMethod()
object.__setattr__(self, name, value)
def _get_webapi_requests(self):
"""Update headers of webapi for Requests."""
headers = {
'Accept':
'*/*',
'Accept-Language':
'zh-CN,zh;q=0.8,gl;q=0.6,zh-TW;q=0.4',
'Connection':
'keep-alive',
'Content-Type':
'application/x-www-form-urlencoded',
'Referer':
'http://music.163.com',
'Host':
'music.163.com',
'User-Agent':
'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_9_2) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/33.0.1750.152 Safari/537.36'
}
NCloudBot.req.headers.update(headers)
return NCloudBot.req
def _get_requests(self):
headers = {
'Referer':
self.__NETEAST_HOST,
'Cookie':
'appver=2.0.2;',
'Content-Type':
'application/x-www-form-urlencoded',
'User-Agent':
'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_9_2) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/33.0.1750.152 Safari/537.36'
}
NCloudBot.req.headers.update(headers)
return NCloudBot.req
def _build_response(self, resp):
"""Build internal Response object from given response."""
# rememberLogin
# if self.method is 'LOGIN' and resp.json().get('code') == 200:
# cookiesJar.save_cookies(resp, NCloudBot.username)
self.response.content = resp.content
self.response.status_code = resp.status_code
self.response.headers = resp.headers
def send(self):
"""Sens the request."""
success = False
if self.method is None:
raise ParamsError()
try:
if self.method == 'SEARCH':
req = self._get_requests()
_url = self.__NETEAST_HOST + self._METHODS[self.method]
resp = req.post(_url, data=self.data)
self._build_response(resp)
self.response.ok = True
else:
if isinstance(self.data, dict):
data = encrypted_request(self.data)
req = self._get_webapi_requests()
_url = self.__NETEAST_HOST + self._METHODS[self.method]
if self.method in ('USER_DJ', 'USER_FOLLOWS', 'USER_EVENT'):
_url = _url % self.params['uid']
if self.method in ('LYRIC', 'MUSIC_COMMENT'):
_url = _url % self.params['id']
# GET
if self.method in ('LYRIC'):
resp = req.get(_url)
else:
resp = req.post(_url, data=data)
self._build_response(resp)
self.response.ok = True
except Exception as why:
traceback.print_exc()
print 'Requests Exception', why
# self._build_response(why)
self.response.error = why
class Response(object):
"""
The :class:`Response` object. All :class:`NCloudBot` objects contain a
:class:`NCloudBot.response <response>` attribute.
"""
def __init__(self):
self.content = None
self.headers = None
self.status_code = None
self.ok = False
self.error = None
def __repr__(self):
return '<Response [%s]>' % (self.status_code)
def raise_for_status(self):
if self.error:
raise self.error
def json(self):
"""Returns the json-encoded content of a response, if any."""
if not self.headers and len(self.content) > 3:
encoding = get_encoding_from_headers(self.headers)
if encoding is not None:
return json.loads(self.content.decode(encoding))
return json.loads(self.content)
def login(password, phone=None, email=None, rememberLogin=True):
""" 登录接口,返回 :class:'Response' 对象
:param password: 网易云音乐的密码
:param phone: (optional) 手机登录
:param email: (optional) 邮箱登录
:param rememberLogin: (optional) 是否记住密码,默认 True
"""
if (phone is None) and (email is None):
raise ParamsError()
if password is None:
raise ParamsError()
r = NCloudBot()
# r.username = phone or email
md5 = hashlib.md5()
md5.update(password)
password = md5.hexdigest()
print password
r.data = {'password': password, 'rememberLogin': rememberLogin}
if phone is not None:
r.data['phone'] = phone
r.method = 'LOGIN'
else:
r.data['username'] = email
r.method = 'EMAIL_LOGIN'
r.send()
return r.response
def user_play_list(uid, offset=0, limit=1000):
"""获取用户歌单,包含收藏的歌单
:param uid: 用户的ID,可通过登录或者其他接口获取
:param offset: (optional) 分段起始位置,默认 0
:param limit: (optional) 数据上限多少行,默认 1000
"""
if uid is None:
raise ParamsError()
r = NCloudBot()
r.method = 'USER_PLAY_LIST'
r.data = {'offset': offset, 'uid': uid, 'limit': limit, 'csrf_token': ''}
r.send()
return r.response
def user_dj(uid, offset=0, limit=30):
"""获取用户电台数据
:param uid: 用户的ID,可通过登录或者其他接口获取
:param offset: (optional) 分段起始位置,默认 0
:param limit: (optional) 数据上限多少行,默认 30
"""
if uid is None:
raise ParamsError()
r = NCloudBot()
r.method = 'USER_DJ'
r.data = {'offset': offset, 'limit': limit, "csrf_token": ""}
r.params = {'uid': uid}
r.send()
return r.response
def search(keyword, type=1, offset=0, limit=30):
"""搜索歌曲,支持搜索歌曲、歌手、专辑等
:param keyword: 关键词
:param type: (optional) 搜索类型,1: 单曲, 100: 歌手, 1000: 歌单, 1002: 用户
:param offset: (optional) 分段起始位置,默认 0
:param limit: (optional) 数据上限多少行,默认 30
"""
if keyword is None:
raise ParamsError()
r = NCloudBot()
r.method = 'SEARCH'
r.data = {
's': keyword,
'limit': str(limit),
'type': str(type),
'offset': str(offset)
}
r.send()
return r.response
def user_follows(uid, offset='0', limit=30):
"""获取用户关注列表
:param uid: 用户的ID,可通过登录或者其他接口获取
:param offset: (optional) 分段起始位置,默认 0
:param limit: (optional) 数据上限多少行,默认 30
"""
if uid is None:
raise ParamsError()
r = NCloudBot()
r.method = 'USER_FOLLOWS'
r.params = {'uid': uid}
r.data = {'offset': offset, 'limit': limit, 'order': True}
r.send()
return r.response
def user_followeds(uid, offset='0', limit=30):
"""获取用户粉丝列表
:param uid: 用户的ID,可通过登录或者其他接口获取
:param offset: (optional) 分段起始位置,默认 0
:param limit: (optional) 数据上限多少行,默认 30
"""
if uid is None:
raise ParamsError()
r = NCloudBot()
r.method = 'USER_FOLLOWEDS'
r.data = {
'userId': uid,
'offset': offset,
'limit': limit,
"csrf_token": ""
}
r.send()
return r.response
def user_event(uid):
"""获取用户动态
:param uid: 用户的ID,可通过登录或者其他接口获取
"""
if uid is None:
raise ParamsError()
r = NCloudBot()
r.method = 'USER_EVENT'
r.params = {'uid': uid}
r.data = {'time': -1, 'getcounts': True, "csrf_token": ""}
r.send()
return r.response
def user_record(uid, type=0):
"""获取用户的播放列表,必须登录
:param uid: 用户的ID,可通过登录或者其他接口获取
:param type: (optional) 数据类型,0:获取所有记录,1:获取 weekData
"""
if uid is None:
raise ParamsError()
r = NCloudBot()
r.method = 'USER_RECORD'
r.data = {'type': type, 'uid': uid, "csrf_token": ""}
r.send()
return r.response
def event():
"""获取好友的动态,包括分享视频、音乐、动态等
"""
r = NCloudBot()
r.method = 'EVENT'
r.data = {"csrf_token": ""}
r.send()
return r.response
# TOP_PLAYLIST_HIGHQUALITY
def top_playlist_highquality(cat='全部', offset=0, limit=20):
"""获取网易云音乐的精品歌单
:param cat: (optional) 歌单类型,默认 ‘全部’,比如 华语、欧美等
:param offset: (optional) 分段起始位置,默认 0
:param limit: (optional) 数据上限多少行,默认 20
"""
r = NCloudBot()
r.method = 'TOP_PLAYLIST_HIGHQUALITY'
r.data = {'cat': cat, 'offset': offset, 'limit': limit}
r.send()
return r.response
# PLAY_LIST_DETAIL
def play_list_detail(id, limit=20):
"""获取歌单中的所有音乐。由于获取精品中,只能看到歌单名字和 ID 并没有歌单的音乐,因此增加该接口传入歌单 ID
获取歌单中的所有音乐.
:param id: 歌单的ID
:param limit: (optional) 数据上限多少行,默认 20
"""
if id is None:
raise ParamsError()
r = NCloudBot()
r.method = 'PLAY_LIST_DETAIL'
r.data = {'id': id, 'limit': limit, "csrf_token": ""}
r.send()
return r.response
# MUSIC_URL
# LYRIC
def lyric(id):
"""通过歌曲 ID 获取歌曲歌词地址
:param id: 歌曲ID
"""
if id is None:
raise ParamsError()
r = NCloudBot()
r.method = 'LYRIC'
r.params = {'id': id}
r.send()
return r.response
# MUSIC_COMMENT
def music_comment(id, offset=0, limit=20):
"""获取歌曲的评论列表
:param id: 歌曲 ID
:param offset: (optional) 分段起始位置,默认 0
:param limit: (optional) 数据上限多少行,默认 20
"""
if id is None:
raise ParamsError()
r = NCloudBot()
r.method = 'MUSIC_COMMENT'
r.params = {'id': id}
r.data = {'offset': offset, 'limit': limit, 'rid': id, "csrf_token": ""}
r.send()
return r.response
# ALBUM_COMMENT
def album_comment(id, offset=0, limit=20):
if id is None:
raise ParamsError()
r = NCloudBot()
r.method = 'ALBUM_COMMENT'
r.params = {'id': id}
r.data = {'offset': offset, 'limit': limit, 'rid': id, "csrf_token": ""}
r.send()
return r.response
# SONG_DETAIL
def song_detail(ids):
"""通过歌曲 ID 获取歌曲的详细信息
:param ids: 歌曲 ID 的 list
"""
if not isinstance(ids, list):
raise ParamsError()
c = []
for id in ids:
c.append({'id': id})
r = NCloudBot()
r.method = 'SONG_DETAIL'
r.data = {'c': json.dumps(c), 'ids': c, "csrf_token": ""}
r.send()
return r.response
# PERSONAL_FM
def personal_fm():
""" 个人的 FM ,必须在登录之后调用,即 login 之后调用
"""
r = NCloudBot()
r.method = 'PERSONAL_FM'
r.data = {"csrf_token": ""}
r.send()
return r.response
class NCloudBotException(Exception):
""" 这是 NCloudBot 当处理请求时候的异常"""
class ParamsError(NCloudBotException):
""" 参数错误 """
class InvalidMethod(NCloudBotException):
""" 不支持的方法被调用"""
|
xiyouMc/ncmbot
|
ncmbot/core.py
|
lyric
|
python
|
def lyric(id):
if id is None:
raise ParamsError()
r = NCloudBot()
r.method = 'LYRIC'
r.params = {'id': id}
r.send()
return r.response
|
通过歌曲 ID 获取歌曲歌词地址
:param id: 歌曲ID
|
train
|
https://github.com/xiyouMc/ncmbot/blob/c4832f3ee7630ba104a89559f09c1fc366d1547b/ncmbot/core.py#L440-L452
|
[
"def send(self):\n \"\"\"Sens the request.\"\"\"\n success = False\n if self.method is None:\n raise ParamsError()\n try:\n if self.method == 'SEARCH':\n req = self._get_requests()\n _url = self.__NETEAST_HOST + self._METHODS[self.method]\n resp = req.post(_url, data=self.data)\n self._build_response(resp)\n self.response.ok = True\n else:\n if isinstance(self.data, dict):\n data = encrypted_request(self.data)\n\n req = self._get_webapi_requests()\n _url = self.__NETEAST_HOST + self._METHODS[self.method]\n\n if self.method in ('USER_DJ', 'USER_FOLLOWS', 'USER_EVENT'):\n _url = _url % self.params['uid']\n\n if self.method in ('LYRIC', 'MUSIC_COMMENT'):\n _url = _url % self.params['id']\n # GET\n if self.method in ('LYRIC'):\n resp = req.get(_url)\n else:\n resp = req.post(_url, data=data)\n self._build_response(resp)\n self.response.ok = True\n except Exception as why:\n traceback.print_exc()\n print 'Requests Exception', why\n # self._build_response(why)\n self.response.error = why\n"
] |
# coding:utf-8
"""
NCloudBot.core
~~~~~~~~~~~~~~
This module implements the main NCloudBot system.
:copyright: (c) 2017 by xiyouMc.
:license: ISC, see LICENSE for more details.
"""
import hashlib
import requests
import json
import cookielib
import traceback
from .util.encrypt import encrypted_request
from .util import cookiesJar
from utils import get_encoding_from_headers
__title__ = 'ncmbot'
__version__ = '0.1.0'
__build__ = 0x000100
__author__ = 'XiyouMc'
__license__ = 'ISC'
__copyright__ = 'Copyright 2017 XiyouMc'
__all__ = [
'NCloudBot', 'Response', 'login', 'user_play_list', 'user_dj', 'search',
'user_follows', 'user_followeds', 'user_event', 'event',
'top_playlist_highquality', 'play_list_detail', 'music_url', 'lyric',
'music_comment', 'song_detail', 'personal_fm', 'user_record'
]
class NCloudBot(object):
"""
The :class:`NCloudBot` object. It carries out all functionality of
NCloudBot
Recommended interface is with the NCloudBot`s functions.
"""
req = requests.Session()
username = None
_METHODS = {
# 登录模块
'LOGIN': '/weapi/login/cellphone?csrf_token=',
# 邮箱登录
'EMAIL_LOGIN': '/weapi/login?csrf_token=',
# 获取用户信息
'USER_INFO': '/weapi/subcount',
# 获取用户歌单,收藏的歌单 , 指定 UserId , 不需要登录
'USER_PLAY_LIST': '/weapi/user/playlist',
# 获取用户电台
'USER_DJ': '/weapi/dj/program/%s',
# 获取用户关注列表
'USER_FOLLOWS': '/weapi/user/getfollows/%s',
# 获取用户粉丝
'USER_FOLLOWEDS': '/weapi/user/getfolloweds/',
# 获取用户动态
'USER_EVENT': '/weapi/event/get/%s',
# 获取用户播放记录
'USER_RECORD': '/weapi/v1/play/record',
# 获取各种动态,对应网页版网易云,朋友界面的各种动态消息,如分享的视频、音乐、照片等
'EVENT': '/weapi/v1/event/get',
# 获取精品歌单
'TOP_PLAYLIST_HIGHQUALITY': '/weapi/playlist/highquality/list',
# 传入歌单ID 获取对应歌单内的所有音乐
'PLAY_LIST_DETAIL': '/weapi/v3/playlist/detail',
# 传入音乐ID ,获取对应音乐的URL
'MUSIC_URL': '/weapi/song/enhance/player/url',
# 传入关键词,获取歌曲列表
'SEARCH': '/api/search/get/',
# 传入音乐ID,获取对应音乐的歌词
'LYRIC': '/api/song/lyric?os=osx&id=%s&lv=-1&kv=-1&tv=-1',
# 传入音乐ID 和 limit 参数,可获取该音乐的所有评论
'MUSIC_COMMENT': '/weapi/v1/resource/comments/R_SO_4_%s/?csrf_token=',
# 传入专辑ID 和 limit 参数,可获取该专辑的所有评论
'ALBUM_COMMENT': '/weapi/v1/resource/comments/R_AL_3_%s/?csrf_token=',
# 给评论点赞,入参是资源ID,如歌曲ID,MV iD 和 评论ID
'LIKE_COMMENT': '/weapi/v1/comment/%s',
# 传入音乐ID,获取歌曲详情
'SONG_DETAIL': '/weapi/v3/song/detail',
# 获取专辑内容
'ALBUM': '/weapi/v1/album/%s',
# 私人 FM (需要登录)
'PERSONAL_FM': '/weapi/v1/radio/get'
}
__NETEAST_HOST = 'http://music.163.com'
def __init__(self):
self.method = None
self.data = {}
self.params = {}
self.response = Response()
def __repr__(self):
return '<NCloudBot [%s]>' % (self.method)
def __setattr__(self, name, value):
if (name == 'method') and (value):
if value not in self._METHODS.keys():
raise InvalidMethod()
object.__setattr__(self, name, value)
def _get_webapi_requests(self):
"""Update headers of webapi for Requests."""
headers = {
'Accept':
'*/*',
'Accept-Language':
'zh-CN,zh;q=0.8,gl;q=0.6,zh-TW;q=0.4',
'Connection':
'keep-alive',
'Content-Type':
'application/x-www-form-urlencoded',
'Referer':
'http://music.163.com',
'Host':
'music.163.com',
'User-Agent':
'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_9_2) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/33.0.1750.152 Safari/537.36'
}
NCloudBot.req.headers.update(headers)
return NCloudBot.req
def _get_requests(self):
headers = {
'Referer':
self.__NETEAST_HOST,
'Cookie':
'appver=2.0.2;',
'Content-Type':
'application/x-www-form-urlencoded',
'User-Agent':
'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_9_2) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/33.0.1750.152 Safari/537.36'
}
NCloudBot.req.headers.update(headers)
return NCloudBot.req
def _build_response(self, resp):
"""Build internal Response object from given response."""
# rememberLogin
# if self.method is 'LOGIN' and resp.json().get('code') == 200:
# cookiesJar.save_cookies(resp, NCloudBot.username)
self.response.content = resp.content
self.response.status_code = resp.status_code
self.response.headers = resp.headers
def send(self):
"""Sens the request."""
success = False
if self.method is None:
raise ParamsError()
try:
if self.method == 'SEARCH':
req = self._get_requests()
_url = self.__NETEAST_HOST + self._METHODS[self.method]
resp = req.post(_url, data=self.data)
self._build_response(resp)
self.response.ok = True
else:
if isinstance(self.data, dict):
data = encrypted_request(self.data)
req = self._get_webapi_requests()
_url = self.__NETEAST_HOST + self._METHODS[self.method]
if self.method in ('USER_DJ', 'USER_FOLLOWS', 'USER_EVENT'):
_url = _url % self.params['uid']
if self.method in ('LYRIC', 'MUSIC_COMMENT'):
_url = _url % self.params['id']
# GET
if self.method in ('LYRIC'):
resp = req.get(_url)
else:
resp = req.post(_url, data=data)
self._build_response(resp)
self.response.ok = True
except Exception as why:
traceback.print_exc()
print 'Requests Exception', why
# self._build_response(why)
self.response.error = why
class Response(object):
"""
The :class:`Response` object. All :class:`NCloudBot` objects contain a
:class:`NCloudBot.response <response>` attribute.
"""
def __init__(self):
self.content = None
self.headers = None
self.status_code = None
self.ok = False
self.error = None
def __repr__(self):
return '<Response [%s]>' % (self.status_code)
def raise_for_status(self):
if self.error:
raise self.error
def json(self):
"""Returns the json-encoded content of a response, if any."""
if not self.headers and len(self.content) > 3:
encoding = get_encoding_from_headers(self.headers)
if encoding is not None:
return json.loads(self.content.decode(encoding))
return json.loads(self.content)
def login(password, phone=None, email=None, rememberLogin=True):
""" 登录接口,返回 :class:'Response' 对象
:param password: 网易云音乐的密码
:param phone: (optional) 手机登录
:param email: (optional) 邮箱登录
:param rememberLogin: (optional) 是否记住密码,默认 True
"""
if (phone is None) and (email is None):
raise ParamsError()
if password is None:
raise ParamsError()
r = NCloudBot()
# r.username = phone or email
md5 = hashlib.md5()
md5.update(password)
password = md5.hexdigest()
print password
r.data = {'password': password, 'rememberLogin': rememberLogin}
if phone is not None:
r.data['phone'] = phone
r.method = 'LOGIN'
else:
r.data['username'] = email
r.method = 'EMAIL_LOGIN'
r.send()
return r.response
def user_play_list(uid, offset=0, limit=1000):
"""获取用户歌单,包含收藏的歌单
:param uid: 用户的ID,可通过登录或者其他接口获取
:param offset: (optional) 分段起始位置,默认 0
:param limit: (optional) 数据上限多少行,默认 1000
"""
if uid is None:
raise ParamsError()
r = NCloudBot()
r.method = 'USER_PLAY_LIST'
r.data = {'offset': offset, 'uid': uid, 'limit': limit, 'csrf_token': ''}
r.send()
return r.response
def user_dj(uid, offset=0, limit=30):
"""获取用户电台数据
:param uid: 用户的ID,可通过登录或者其他接口获取
:param offset: (optional) 分段起始位置,默认 0
:param limit: (optional) 数据上限多少行,默认 30
"""
if uid is None:
raise ParamsError()
r = NCloudBot()
r.method = 'USER_DJ'
r.data = {'offset': offset, 'limit': limit, "csrf_token": ""}
r.params = {'uid': uid}
r.send()
return r.response
def search(keyword, type=1, offset=0, limit=30):
"""搜索歌曲,支持搜索歌曲、歌手、专辑等
:param keyword: 关键词
:param type: (optional) 搜索类型,1: 单曲, 100: 歌手, 1000: 歌单, 1002: 用户
:param offset: (optional) 分段起始位置,默认 0
:param limit: (optional) 数据上限多少行,默认 30
"""
if keyword is None:
raise ParamsError()
r = NCloudBot()
r.method = 'SEARCH'
r.data = {
's': keyword,
'limit': str(limit),
'type': str(type),
'offset': str(offset)
}
r.send()
return r.response
def user_follows(uid, offset='0', limit=30):
"""获取用户关注列表
:param uid: 用户的ID,可通过登录或者其他接口获取
:param offset: (optional) 分段起始位置,默认 0
:param limit: (optional) 数据上限多少行,默认 30
"""
if uid is None:
raise ParamsError()
r = NCloudBot()
r.method = 'USER_FOLLOWS'
r.params = {'uid': uid}
r.data = {'offset': offset, 'limit': limit, 'order': True}
r.send()
return r.response
def user_followeds(uid, offset='0', limit=30):
"""获取用户粉丝列表
:param uid: 用户的ID,可通过登录或者其他接口获取
:param offset: (optional) 分段起始位置,默认 0
:param limit: (optional) 数据上限多少行,默认 30
"""
if uid is None:
raise ParamsError()
r = NCloudBot()
r.method = 'USER_FOLLOWEDS'
r.data = {
'userId': uid,
'offset': offset,
'limit': limit,
"csrf_token": ""
}
r.send()
return r.response
def user_event(uid):
"""获取用户动态
:param uid: 用户的ID,可通过登录或者其他接口获取
"""
if uid is None:
raise ParamsError()
r = NCloudBot()
r.method = 'USER_EVENT'
r.params = {'uid': uid}
r.data = {'time': -1, 'getcounts': True, "csrf_token": ""}
r.send()
return r.response
def user_record(uid, type=0):
"""获取用户的播放列表,必须登录
:param uid: 用户的ID,可通过登录或者其他接口获取
:param type: (optional) 数据类型,0:获取所有记录,1:获取 weekData
"""
if uid is None:
raise ParamsError()
r = NCloudBot()
r.method = 'USER_RECORD'
r.data = {'type': type, 'uid': uid, "csrf_token": ""}
r.send()
return r.response
def event():
"""获取好友的动态,包括分享视频、音乐、动态等
"""
r = NCloudBot()
r.method = 'EVENT'
r.data = {"csrf_token": ""}
r.send()
return r.response
# TOP_PLAYLIST_HIGHQUALITY
def top_playlist_highquality(cat='全部', offset=0, limit=20):
"""获取网易云音乐的精品歌单
:param cat: (optional) 歌单类型,默认 ‘全部’,比如 华语、欧美等
:param offset: (optional) 分段起始位置,默认 0
:param limit: (optional) 数据上限多少行,默认 20
"""
r = NCloudBot()
r.method = 'TOP_PLAYLIST_HIGHQUALITY'
r.data = {'cat': cat, 'offset': offset, 'limit': limit}
r.send()
return r.response
# PLAY_LIST_DETAIL
def play_list_detail(id, limit=20):
"""获取歌单中的所有音乐。由于获取精品中,只能看到歌单名字和 ID 并没有歌单的音乐,因此增加该接口传入歌单 ID
获取歌单中的所有音乐.
:param id: 歌单的ID
:param limit: (optional) 数据上限多少行,默认 20
"""
if id is None:
raise ParamsError()
r = NCloudBot()
r.method = 'PLAY_LIST_DETAIL'
r.data = {'id': id, 'limit': limit, "csrf_token": ""}
r.send()
return r.response
# MUSIC_URL
def music_url(ids=[]):
"""通过歌曲 ID 获取歌曲下载地址
:param ids: 歌曲 ID 的 list
"""
if not isinstance(ids, list):
raise ParamsError()
r = NCloudBot()
r.method = 'MUSIC_URL'
r.data = {'ids': ids, 'br': 999000, "csrf_token": ""}
r.send()
return r.response
# LYRIC
# MUSIC_COMMENT
def music_comment(id, offset=0, limit=20):
"""获取歌曲的评论列表
:param id: 歌曲 ID
:param offset: (optional) 分段起始位置,默认 0
:param limit: (optional) 数据上限多少行,默认 20
"""
if id is None:
raise ParamsError()
r = NCloudBot()
r.method = 'MUSIC_COMMENT'
r.params = {'id': id}
r.data = {'offset': offset, 'limit': limit, 'rid': id, "csrf_token": ""}
r.send()
return r.response
# ALBUM_COMMENT
def album_comment(id, offset=0, limit=20):
if id is None:
raise ParamsError()
r = NCloudBot()
r.method = 'ALBUM_COMMENT'
r.params = {'id': id}
r.data = {'offset': offset, 'limit': limit, 'rid': id, "csrf_token": ""}
r.send()
return r.response
# SONG_DETAIL
def song_detail(ids):
"""通过歌曲 ID 获取歌曲的详细信息
:param ids: 歌曲 ID 的 list
"""
if not isinstance(ids, list):
raise ParamsError()
c = []
for id in ids:
c.append({'id': id})
r = NCloudBot()
r.method = 'SONG_DETAIL'
r.data = {'c': json.dumps(c), 'ids': c, "csrf_token": ""}
r.send()
return r.response
# PERSONAL_FM
def personal_fm():
""" 个人的 FM ,必须在登录之后调用,即 login 之后调用
"""
r = NCloudBot()
r.method = 'PERSONAL_FM'
r.data = {"csrf_token": ""}
r.send()
return r.response
class NCloudBotException(Exception):
""" 这是 NCloudBot 当处理请求时候的异常"""
class ParamsError(NCloudBotException):
""" 参数错误 """
class InvalidMethod(NCloudBotException):
""" 不支持的方法被调用"""
|
xiyouMc/ncmbot
|
ncmbot/core.py
|
music_comment
|
python
|
def music_comment(id, offset=0, limit=20):
if id is None:
raise ParamsError()
r = NCloudBot()
r.method = 'MUSIC_COMMENT'
r.params = {'id': id}
r.data = {'offset': offset, 'limit': limit, 'rid': id, "csrf_token": ""}
r.send()
return r.response
|
获取歌曲的评论列表
:param id: 歌曲 ID
:param offset: (optional) 分段起始位置,默认 0
:param limit: (optional) 数据上限多少行,默认 20
|
train
|
https://github.com/xiyouMc/ncmbot/blob/c4832f3ee7630ba104a89559f09c1fc366d1547b/ncmbot/core.py#L456-L471
|
[
"def send(self):\n \"\"\"Sens the request.\"\"\"\n success = False\n if self.method is None:\n raise ParamsError()\n try:\n if self.method == 'SEARCH':\n req = self._get_requests()\n _url = self.__NETEAST_HOST + self._METHODS[self.method]\n resp = req.post(_url, data=self.data)\n self._build_response(resp)\n self.response.ok = True\n else:\n if isinstance(self.data, dict):\n data = encrypted_request(self.data)\n\n req = self._get_webapi_requests()\n _url = self.__NETEAST_HOST + self._METHODS[self.method]\n\n if self.method in ('USER_DJ', 'USER_FOLLOWS', 'USER_EVENT'):\n _url = _url % self.params['uid']\n\n if self.method in ('LYRIC', 'MUSIC_COMMENT'):\n _url = _url % self.params['id']\n # GET\n if self.method in ('LYRIC'):\n resp = req.get(_url)\n else:\n resp = req.post(_url, data=data)\n self._build_response(resp)\n self.response.ok = True\n except Exception as why:\n traceback.print_exc()\n print 'Requests Exception', why\n # self._build_response(why)\n self.response.error = why\n"
] |
# coding:utf-8
"""
NCloudBot.core
~~~~~~~~~~~~~~
This module implements the main NCloudBot system.
:copyright: (c) 2017 by xiyouMc.
:license: ISC, see LICENSE for more details.
"""
import hashlib
import requests
import json
import cookielib
import traceback
from .util.encrypt import encrypted_request
from .util import cookiesJar
from utils import get_encoding_from_headers
__title__ = 'ncmbot'
__version__ = '0.1.0'
__build__ = 0x000100
__author__ = 'XiyouMc'
__license__ = 'ISC'
__copyright__ = 'Copyright 2017 XiyouMc'
__all__ = [
'NCloudBot', 'Response', 'login', 'user_play_list', 'user_dj', 'search',
'user_follows', 'user_followeds', 'user_event', 'event',
'top_playlist_highquality', 'play_list_detail', 'music_url', 'lyric',
'music_comment', 'song_detail', 'personal_fm', 'user_record'
]
class NCloudBot(object):
"""
The :class:`NCloudBot` object. It carries out all functionality of
NCloudBot
Recommended interface is with the NCloudBot`s functions.
"""
req = requests.Session()
username = None
_METHODS = {
# 登录模块
'LOGIN': '/weapi/login/cellphone?csrf_token=',
# 邮箱登录
'EMAIL_LOGIN': '/weapi/login?csrf_token=',
# 获取用户信息
'USER_INFO': '/weapi/subcount',
# 获取用户歌单,收藏的歌单 , 指定 UserId , 不需要登录
'USER_PLAY_LIST': '/weapi/user/playlist',
# 获取用户电台
'USER_DJ': '/weapi/dj/program/%s',
# 获取用户关注列表
'USER_FOLLOWS': '/weapi/user/getfollows/%s',
# 获取用户粉丝
'USER_FOLLOWEDS': '/weapi/user/getfolloweds/',
# 获取用户动态
'USER_EVENT': '/weapi/event/get/%s',
# 获取用户播放记录
'USER_RECORD': '/weapi/v1/play/record',
# 获取各种动态,对应网页版网易云,朋友界面的各种动态消息,如分享的视频、音乐、照片等
'EVENT': '/weapi/v1/event/get',
# 获取精品歌单
'TOP_PLAYLIST_HIGHQUALITY': '/weapi/playlist/highquality/list',
# 传入歌单ID 获取对应歌单内的所有音乐
'PLAY_LIST_DETAIL': '/weapi/v3/playlist/detail',
# 传入音乐ID ,获取对应音乐的URL
'MUSIC_URL': '/weapi/song/enhance/player/url',
# 传入关键词,获取歌曲列表
'SEARCH': '/api/search/get/',
# 传入音乐ID,获取对应音乐的歌词
'LYRIC': '/api/song/lyric?os=osx&id=%s&lv=-1&kv=-1&tv=-1',
# 传入音乐ID 和 limit 参数,可获取该音乐的所有评论
'MUSIC_COMMENT': '/weapi/v1/resource/comments/R_SO_4_%s/?csrf_token=',
# 传入专辑ID 和 limit 参数,可获取该专辑的所有评论
'ALBUM_COMMENT': '/weapi/v1/resource/comments/R_AL_3_%s/?csrf_token=',
# 给评论点赞,入参是资源ID,如歌曲ID,MV iD 和 评论ID
'LIKE_COMMENT': '/weapi/v1/comment/%s',
# 传入音乐ID,获取歌曲详情
'SONG_DETAIL': '/weapi/v3/song/detail',
# 获取专辑内容
'ALBUM': '/weapi/v1/album/%s',
# 私人 FM (需要登录)
'PERSONAL_FM': '/weapi/v1/radio/get'
}
__NETEAST_HOST = 'http://music.163.com'
def __init__(self):
self.method = None
self.data = {}
self.params = {}
self.response = Response()
def __repr__(self):
return '<NCloudBot [%s]>' % (self.method)
def __setattr__(self, name, value):
if (name == 'method') and (value):
if value not in self._METHODS.keys():
raise InvalidMethod()
object.__setattr__(self, name, value)
def _get_webapi_requests(self):
"""Update headers of webapi for Requests."""
headers = {
'Accept':
'*/*',
'Accept-Language':
'zh-CN,zh;q=0.8,gl;q=0.6,zh-TW;q=0.4',
'Connection':
'keep-alive',
'Content-Type':
'application/x-www-form-urlencoded',
'Referer':
'http://music.163.com',
'Host':
'music.163.com',
'User-Agent':
'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_9_2) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/33.0.1750.152 Safari/537.36'
}
NCloudBot.req.headers.update(headers)
return NCloudBot.req
def _get_requests(self):
headers = {
'Referer':
self.__NETEAST_HOST,
'Cookie':
'appver=2.0.2;',
'Content-Type':
'application/x-www-form-urlencoded',
'User-Agent':
'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_9_2) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/33.0.1750.152 Safari/537.36'
}
NCloudBot.req.headers.update(headers)
return NCloudBot.req
def _build_response(self, resp):
"""Build internal Response object from given response."""
# rememberLogin
# if self.method is 'LOGIN' and resp.json().get('code') == 200:
# cookiesJar.save_cookies(resp, NCloudBot.username)
self.response.content = resp.content
self.response.status_code = resp.status_code
self.response.headers = resp.headers
def send(self):
"""Sens the request."""
success = False
if self.method is None:
raise ParamsError()
try:
if self.method == 'SEARCH':
req = self._get_requests()
_url = self.__NETEAST_HOST + self._METHODS[self.method]
resp = req.post(_url, data=self.data)
self._build_response(resp)
self.response.ok = True
else:
if isinstance(self.data, dict):
data = encrypted_request(self.data)
req = self._get_webapi_requests()
_url = self.__NETEAST_HOST + self._METHODS[self.method]
if self.method in ('USER_DJ', 'USER_FOLLOWS', 'USER_EVENT'):
_url = _url % self.params['uid']
if self.method in ('LYRIC', 'MUSIC_COMMENT'):
_url = _url % self.params['id']
# GET
if self.method in ('LYRIC'):
resp = req.get(_url)
else:
resp = req.post(_url, data=data)
self._build_response(resp)
self.response.ok = True
except Exception as why:
traceback.print_exc()
print 'Requests Exception', why
# self._build_response(why)
self.response.error = why
class Response(object):
"""
The :class:`Response` object. All :class:`NCloudBot` objects contain a
:class:`NCloudBot.response <response>` attribute.
"""
def __init__(self):
self.content = None
self.headers = None
self.status_code = None
self.ok = False
self.error = None
def __repr__(self):
return '<Response [%s]>' % (self.status_code)
def raise_for_status(self):
if self.error:
raise self.error
def json(self):
"""Returns the json-encoded content of a response, if any."""
if not self.headers and len(self.content) > 3:
encoding = get_encoding_from_headers(self.headers)
if encoding is not None:
return json.loads(self.content.decode(encoding))
return json.loads(self.content)
def login(password, phone=None, email=None, rememberLogin=True):
""" 登录接口,返回 :class:'Response' 对象
:param password: 网易云音乐的密码
:param phone: (optional) 手机登录
:param email: (optional) 邮箱登录
:param rememberLogin: (optional) 是否记住密码,默认 True
"""
if (phone is None) and (email is None):
raise ParamsError()
if password is None:
raise ParamsError()
r = NCloudBot()
# r.username = phone or email
md5 = hashlib.md5()
md5.update(password)
password = md5.hexdigest()
print password
r.data = {'password': password, 'rememberLogin': rememberLogin}
if phone is not None:
r.data['phone'] = phone
r.method = 'LOGIN'
else:
r.data['username'] = email
r.method = 'EMAIL_LOGIN'
r.send()
return r.response
def user_play_list(uid, offset=0, limit=1000):
"""获取用户歌单,包含收藏的歌单
:param uid: 用户的ID,可通过登录或者其他接口获取
:param offset: (optional) 分段起始位置,默认 0
:param limit: (optional) 数据上限多少行,默认 1000
"""
if uid is None:
raise ParamsError()
r = NCloudBot()
r.method = 'USER_PLAY_LIST'
r.data = {'offset': offset, 'uid': uid, 'limit': limit, 'csrf_token': ''}
r.send()
return r.response
def user_dj(uid, offset=0, limit=30):
"""获取用户电台数据
:param uid: 用户的ID,可通过登录或者其他接口获取
:param offset: (optional) 分段起始位置,默认 0
:param limit: (optional) 数据上限多少行,默认 30
"""
if uid is None:
raise ParamsError()
r = NCloudBot()
r.method = 'USER_DJ'
r.data = {'offset': offset, 'limit': limit, "csrf_token": ""}
r.params = {'uid': uid}
r.send()
return r.response
def search(keyword, type=1, offset=0, limit=30):
"""搜索歌曲,支持搜索歌曲、歌手、专辑等
:param keyword: 关键词
:param type: (optional) 搜索类型,1: 单曲, 100: 歌手, 1000: 歌单, 1002: 用户
:param offset: (optional) 分段起始位置,默认 0
:param limit: (optional) 数据上限多少行,默认 30
"""
if keyword is None:
raise ParamsError()
r = NCloudBot()
r.method = 'SEARCH'
r.data = {
's': keyword,
'limit': str(limit),
'type': str(type),
'offset': str(offset)
}
r.send()
return r.response
def user_follows(uid, offset='0', limit=30):
"""获取用户关注列表
:param uid: 用户的ID,可通过登录或者其他接口获取
:param offset: (optional) 分段起始位置,默认 0
:param limit: (optional) 数据上限多少行,默认 30
"""
if uid is None:
raise ParamsError()
r = NCloudBot()
r.method = 'USER_FOLLOWS'
r.params = {'uid': uid}
r.data = {'offset': offset, 'limit': limit, 'order': True}
r.send()
return r.response
def user_followeds(uid, offset='0', limit=30):
"""获取用户粉丝列表
:param uid: 用户的ID,可通过登录或者其他接口获取
:param offset: (optional) 分段起始位置,默认 0
:param limit: (optional) 数据上限多少行,默认 30
"""
if uid is None:
raise ParamsError()
r = NCloudBot()
r.method = 'USER_FOLLOWEDS'
r.data = {
'userId': uid,
'offset': offset,
'limit': limit,
"csrf_token": ""
}
r.send()
return r.response
def user_event(uid):
"""获取用户动态
:param uid: 用户的ID,可通过登录或者其他接口获取
"""
if uid is None:
raise ParamsError()
r = NCloudBot()
r.method = 'USER_EVENT'
r.params = {'uid': uid}
r.data = {'time': -1, 'getcounts': True, "csrf_token": ""}
r.send()
return r.response
def user_record(uid, type=0):
"""获取用户的播放列表,必须登录
:param uid: 用户的ID,可通过登录或者其他接口获取
:param type: (optional) 数据类型,0:获取所有记录,1:获取 weekData
"""
if uid is None:
raise ParamsError()
r = NCloudBot()
r.method = 'USER_RECORD'
r.data = {'type': type, 'uid': uid, "csrf_token": ""}
r.send()
return r.response
def event():
"""获取好友的动态,包括分享视频、音乐、动态等
"""
r = NCloudBot()
r.method = 'EVENT'
r.data = {"csrf_token": ""}
r.send()
return r.response
# TOP_PLAYLIST_HIGHQUALITY
def top_playlist_highquality(cat='全部', offset=0, limit=20):
"""获取网易云音乐的精品歌单
:param cat: (optional) 歌单类型,默认 ‘全部’,比如 华语、欧美等
:param offset: (optional) 分段起始位置,默认 0
:param limit: (optional) 数据上限多少行,默认 20
"""
r = NCloudBot()
r.method = 'TOP_PLAYLIST_HIGHQUALITY'
r.data = {'cat': cat, 'offset': offset, 'limit': limit}
r.send()
return r.response
# PLAY_LIST_DETAIL
def play_list_detail(id, limit=20):
"""获取歌单中的所有音乐。由于获取精品中,只能看到歌单名字和 ID 并没有歌单的音乐,因此增加该接口传入歌单 ID
获取歌单中的所有音乐.
:param id: 歌单的ID
:param limit: (optional) 数据上限多少行,默认 20
"""
if id is None:
raise ParamsError()
r = NCloudBot()
r.method = 'PLAY_LIST_DETAIL'
r.data = {'id': id, 'limit': limit, "csrf_token": ""}
r.send()
return r.response
# MUSIC_URL
def music_url(ids=[]):
"""通过歌曲 ID 获取歌曲下载地址
:param ids: 歌曲 ID 的 list
"""
if not isinstance(ids, list):
raise ParamsError()
r = NCloudBot()
r.method = 'MUSIC_URL'
r.data = {'ids': ids, 'br': 999000, "csrf_token": ""}
r.send()
return r.response
# LYRIC
def lyric(id):
"""通过歌曲 ID 获取歌曲歌词地址
:param id: 歌曲ID
"""
if id is None:
raise ParamsError()
r = NCloudBot()
r.method = 'LYRIC'
r.params = {'id': id}
r.send()
return r.response
# MUSIC_COMMENT
# ALBUM_COMMENT
def album_comment(id, offset=0, limit=20):
if id is None:
raise ParamsError()
r = NCloudBot()
r.method = 'ALBUM_COMMENT'
r.params = {'id': id}
r.data = {'offset': offset, 'limit': limit, 'rid': id, "csrf_token": ""}
r.send()
return r.response
# SONG_DETAIL
def song_detail(ids):
"""通过歌曲 ID 获取歌曲的详细信息
:param ids: 歌曲 ID 的 list
"""
if not isinstance(ids, list):
raise ParamsError()
c = []
for id in ids:
c.append({'id': id})
r = NCloudBot()
r.method = 'SONG_DETAIL'
r.data = {'c': json.dumps(c), 'ids': c, "csrf_token": ""}
r.send()
return r.response
# PERSONAL_FM
def personal_fm():
""" 个人的 FM ,必须在登录之后调用,即 login 之后调用
"""
r = NCloudBot()
r.method = 'PERSONAL_FM'
r.data = {"csrf_token": ""}
r.send()
return r.response
class NCloudBotException(Exception):
""" 这是 NCloudBot 当处理请求时候的异常"""
class ParamsError(NCloudBotException):
""" 参数错误 """
class InvalidMethod(NCloudBotException):
""" 不支持的方法被调用"""
|
xiyouMc/ncmbot
|
ncmbot/core.py
|
song_detail
|
python
|
def song_detail(ids):
if not isinstance(ids, list):
raise ParamsError()
c = []
for id in ids:
c.append({'id': id})
r = NCloudBot()
r.method = 'SONG_DETAIL'
r.data = {'c': json.dumps(c), 'ids': c, "csrf_token": ""}
r.send()
return r.response
|
通过歌曲 ID 获取歌曲的详细信息
:param ids: 歌曲 ID 的 list
|
train
|
https://github.com/xiyouMc/ncmbot/blob/c4832f3ee7630ba104a89559f09c1fc366d1547b/ncmbot/core.py#L488-L503
|
[
"def send(self):\n \"\"\"Sens the request.\"\"\"\n success = False\n if self.method is None:\n raise ParamsError()\n try:\n if self.method == 'SEARCH':\n req = self._get_requests()\n _url = self.__NETEAST_HOST + self._METHODS[self.method]\n resp = req.post(_url, data=self.data)\n self._build_response(resp)\n self.response.ok = True\n else:\n if isinstance(self.data, dict):\n data = encrypted_request(self.data)\n\n req = self._get_webapi_requests()\n _url = self.__NETEAST_HOST + self._METHODS[self.method]\n\n if self.method in ('USER_DJ', 'USER_FOLLOWS', 'USER_EVENT'):\n _url = _url % self.params['uid']\n\n if self.method in ('LYRIC', 'MUSIC_COMMENT'):\n _url = _url % self.params['id']\n # GET\n if self.method in ('LYRIC'):\n resp = req.get(_url)\n else:\n resp = req.post(_url, data=data)\n self._build_response(resp)\n self.response.ok = True\n except Exception as why:\n traceback.print_exc()\n print 'Requests Exception', why\n # self._build_response(why)\n self.response.error = why\n"
] |
# coding:utf-8
"""
NCloudBot.core
~~~~~~~~~~~~~~
This module implements the main NCloudBot system.
:copyright: (c) 2017 by xiyouMc.
:license: ISC, see LICENSE for more details.
"""
import hashlib
import requests
import json
import cookielib
import traceback
from .util.encrypt import encrypted_request
from .util import cookiesJar
from utils import get_encoding_from_headers
__title__ = 'ncmbot'
__version__ = '0.1.0'
__build__ = 0x000100
__author__ = 'XiyouMc'
__license__ = 'ISC'
__copyright__ = 'Copyright 2017 XiyouMc'
__all__ = [
'NCloudBot', 'Response', 'login', 'user_play_list', 'user_dj', 'search',
'user_follows', 'user_followeds', 'user_event', 'event',
'top_playlist_highquality', 'play_list_detail', 'music_url', 'lyric',
'music_comment', 'song_detail', 'personal_fm', 'user_record'
]
class NCloudBot(object):
"""
The :class:`NCloudBot` object. It carries out all functionality of
NCloudBot
Recommended interface is with the NCloudBot`s functions.
"""
req = requests.Session()
username = None
_METHODS = {
# 登录模块
'LOGIN': '/weapi/login/cellphone?csrf_token=',
# 邮箱登录
'EMAIL_LOGIN': '/weapi/login?csrf_token=',
# 获取用户信息
'USER_INFO': '/weapi/subcount',
# 获取用户歌单,收藏的歌单 , 指定 UserId , 不需要登录
'USER_PLAY_LIST': '/weapi/user/playlist',
# 获取用户电台
'USER_DJ': '/weapi/dj/program/%s',
# 获取用户关注列表
'USER_FOLLOWS': '/weapi/user/getfollows/%s',
# 获取用户粉丝
'USER_FOLLOWEDS': '/weapi/user/getfolloweds/',
# 获取用户动态
'USER_EVENT': '/weapi/event/get/%s',
# 获取用户播放记录
'USER_RECORD': '/weapi/v1/play/record',
# 获取各种动态,对应网页版网易云,朋友界面的各种动态消息,如分享的视频、音乐、照片等
'EVENT': '/weapi/v1/event/get',
# 获取精品歌单
'TOP_PLAYLIST_HIGHQUALITY': '/weapi/playlist/highquality/list',
# 传入歌单ID 获取对应歌单内的所有音乐
'PLAY_LIST_DETAIL': '/weapi/v3/playlist/detail',
# 传入音乐ID ,获取对应音乐的URL
'MUSIC_URL': '/weapi/song/enhance/player/url',
# 传入关键词,获取歌曲列表
'SEARCH': '/api/search/get/',
# 传入音乐ID,获取对应音乐的歌词
'LYRIC': '/api/song/lyric?os=osx&id=%s&lv=-1&kv=-1&tv=-1',
# 传入音乐ID 和 limit 参数,可获取该音乐的所有评论
'MUSIC_COMMENT': '/weapi/v1/resource/comments/R_SO_4_%s/?csrf_token=',
# 传入专辑ID 和 limit 参数,可获取该专辑的所有评论
'ALBUM_COMMENT': '/weapi/v1/resource/comments/R_AL_3_%s/?csrf_token=',
# 给评论点赞,入参是资源ID,如歌曲ID,MV iD 和 评论ID
'LIKE_COMMENT': '/weapi/v1/comment/%s',
# 传入音乐ID,获取歌曲详情
'SONG_DETAIL': '/weapi/v3/song/detail',
# 获取专辑内容
'ALBUM': '/weapi/v1/album/%s',
# 私人 FM (需要登录)
'PERSONAL_FM': '/weapi/v1/radio/get'
}
__NETEAST_HOST = 'http://music.163.com'
def __init__(self):
self.method = None
self.data = {}
self.params = {}
self.response = Response()
def __repr__(self):
return '<NCloudBot [%s]>' % (self.method)
def __setattr__(self, name, value):
if (name == 'method') and (value):
if value not in self._METHODS.keys():
raise InvalidMethod()
object.__setattr__(self, name, value)
def _get_webapi_requests(self):
"""Update headers of webapi for Requests."""
headers = {
'Accept':
'*/*',
'Accept-Language':
'zh-CN,zh;q=0.8,gl;q=0.6,zh-TW;q=0.4',
'Connection':
'keep-alive',
'Content-Type':
'application/x-www-form-urlencoded',
'Referer':
'http://music.163.com',
'Host':
'music.163.com',
'User-Agent':
'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_9_2) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/33.0.1750.152 Safari/537.36'
}
NCloudBot.req.headers.update(headers)
return NCloudBot.req
def _get_requests(self):
headers = {
'Referer':
self.__NETEAST_HOST,
'Cookie':
'appver=2.0.2;',
'Content-Type':
'application/x-www-form-urlencoded',
'User-Agent':
'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_9_2) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/33.0.1750.152 Safari/537.36'
}
NCloudBot.req.headers.update(headers)
return NCloudBot.req
def _build_response(self, resp):
"""Build internal Response object from given response."""
# rememberLogin
# if self.method is 'LOGIN' and resp.json().get('code') == 200:
# cookiesJar.save_cookies(resp, NCloudBot.username)
self.response.content = resp.content
self.response.status_code = resp.status_code
self.response.headers = resp.headers
def send(self):
"""Sens the request."""
success = False
if self.method is None:
raise ParamsError()
try:
if self.method == 'SEARCH':
req = self._get_requests()
_url = self.__NETEAST_HOST + self._METHODS[self.method]
resp = req.post(_url, data=self.data)
self._build_response(resp)
self.response.ok = True
else:
if isinstance(self.data, dict):
data = encrypted_request(self.data)
req = self._get_webapi_requests()
_url = self.__NETEAST_HOST + self._METHODS[self.method]
if self.method in ('USER_DJ', 'USER_FOLLOWS', 'USER_EVENT'):
_url = _url % self.params['uid']
if self.method in ('LYRIC', 'MUSIC_COMMENT'):
_url = _url % self.params['id']
# GET
if self.method in ('LYRIC'):
resp = req.get(_url)
else:
resp = req.post(_url, data=data)
self._build_response(resp)
self.response.ok = True
except Exception as why:
traceback.print_exc()
print 'Requests Exception', why
# self._build_response(why)
self.response.error = why
class Response(object):
"""
The :class:`Response` object. All :class:`NCloudBot` objects contain a
:class:`NCloudBot.response <response>` attribute.
"""
def __init__(self):
self.content = None
self.headers = None
self.status_code = None
self.ok = False
self.error = None
def __repr__(self):
return '<Response [%s]>' % (self.status_code)
def raise_for_status(self):
if self.error:
raise self.error
def json(self):
"""Returns the json-encoded content of a response, if any."""
if not self.headers and len(self.content) > 3:
encoding = get_encoding_from_headers(self.headers)
if encoding is not None:
return json.loads(self.content.decode(encoding))
return json.loads(self.content)
def login(password, phone=None, email=None, rememberLogin=True):
""" 登录接口,返回 :class:'Response' 对象
:param password: 网易云音乐的密码
:param phone: (optional) 手机登录
:param email: (optional) 邮箱登录
:param rememberLogin: (optional) 是否记住密码,默认 True
"""
if (phone is None) and (email is None):
raise ParamsError()
if password is None:
raise ParamsError()
r = NCloudBot()
# r.username = phone or email
md5 = hashlib.md5()
md5.update(password)
password = md5.hexdigest()
print password
r.data = {'password': password, 'rememberLogin': rememberLogin}
if phone is not None:
r.data['phone'] = phone
r.method = 'LOGIN'
else:
r.data['username'] = email
r.method = 'EMAIL_LOGIN'
r.send()
return r.response
def user_play_list(uid, offset=0, limit=1000):
"""获取用户歌单,包含收藏的歌单
:param uid: 用户的ID,可通过登录或者其他接口获取
:param offset: (optional) 分段起始位置,默认 0
:param limit: (optional) 数据上限多少行,默认 1000
"""
if uid is None:
raise ParamsError()
r = NCloudBot()
r.method = 'USER_PLAY_LIST'
r.data = {'offset': offset, 'uid': uid, 'limit': limit, 'csrf_token': ''}
r.send()
return r.response
def user_dj(uid, offset=0, limit=30):
"""获取用户电台数据
:param uid: 用户的ID,可通过登录或者其他接口获取
:param offset: (optional) 分段起始位置,默认 0
:param limit: (optional) 数据上限多少行,默认 30
"""
if uid is None:
raise ParamsError()
r = NCloudBot()
r.method = 'USER_DJ'
r.data = {'offset': offset, 'limit': limit, "csrf_token": ""}
r.params = {'uid': uid}
r.send()
return r.response
def search(keyword, type=1, offset=0, limit=30):
"""搜索歌曲,支持搜索歌曲、歌手、专辑等
:param keyword: 关键词
:param type: (optional) 搜索类型,1: 单曲, 100: 歌手, 1000: 歌单, 1002: 用户
:param offset: (optional) 分段起始位置,默认 0
:param limit: (optional) 数据上限多少行,默认 30
"""
if keyword is None:
raise ParamsError()
r = NCloudBot()
r.method = 'SEARCH'
r.data = {
's': keyword,
'limit': str(limit),
'type': str(type),
'offset': str(offset)
}
r.send()
return r.response
def user_follows(uid, offset='0', limit=30):
"""获取用户关注列表
:param uid: 用户的ID,可通过登录或者其他接口获取
:param offset: (optional) 分段起始位置,默认 0
:param limit: (optional) 数据上限多少行,默认 30
"""
if uid is None:
raise ParamsError()
r = NCloudBot()
r.method = 'USER_FOLLOWS'
r.params = {'uid': uid}
r.data = {'offset': offset, 'limit': limit, 'order': True}
r.send()
return r.response
def user_followeds(uid, offset='0', limit=30):
"""获取用户粉丝列表
:param uid: 用户的ID,可通过登录或者其他接口获取
:param offset: (optional) 分段起始位置,默认 0
:param limit: (optional) 数据上限多少行,默认 30
"""
if uid is None:
raise ParamsError()
r = NCloudBot()
r.method = 'USER_FOLLOWEDS'
r.data = {
'userId': uid,
'offset': offset,
'limit': limit,
"csrf_token": ""
}
r.send()
return r.response
def user_event(uid):
"""获取用户动态
:param uid: 用户的ID,可通过登录或者其他接口获取
"""
if uid is None:
raise ParamsError()
r = NCloudBot()
r.method = 'USER_EVENT'
r.params = {'uid': uid}
r.data = {'time': -1, 'getcounts': True, "csrf_token": ""}
r.send()
return r.response
def user_record(uid, type=0):
"""获取用户的播放列表,必须登录
:param uid: 用户的ID,可通过登录或者其他接口获取
:param type: (optional) 数据类型,0:获取所有记录,1:获取 weekData
"""
if uid is None:
raise ParamsError()
r = NCloudBot()
r.method = 'USER_RECORD'
r.data = {'type': type, 'uid': uid, "csrf_token": ""}
r.send()
return r.response
def event():
"""获取好友的动态,包括分享视频、音乐、动态等
"""
r = NCloudBot()
r.method = 'EVENT'
r.data = {"csrf_token": ""}
r.send()
return r.response
# TOP_PLAYLIST_HIGHQUALITY
def top_playlist_highquality(cat='全部', offset=0, limit=20):
"""获取网易云音乐的精品歌单
:param cat: (optional) 歌单类型,默认 ‘全部’,比如 华语、欧美等
:param offset: (optional) 分段起始位置,默认 0
:param limit: (optional) 数据上限多少行,默认 20
"""
r = NCloudBot()
r.method = 'TOP_PLAYLIST_HIGHQUALITY'
r.data = {'cat': cat, 'offset': offset, 'limit': limit}
r.send()
return r.response
# PLAY_LIST_DETAIL
def play_list_detail(id, limit=20):
"""获取歌单中的所有音乐。由于获取精品中,只能看到歌单名字和 ID 并没有歌单的音乐,因此增加该接口传入歌单 ID
获取歌单中的所有音乐.
:param id: 歌单的ID
:param limit: (optional) 数据上限多少行,默认 20
"""
if id is None:
raise ParamsError()
r = NCloudBot()
r.method = 'PLAY_LIST_DETAIL'
r.data = {'id': id, 'limit': limit, "csrf_token": ""}
r.send()
return r.response
# MUSIC_URL
def music_url(ids=[]):
"""通过歌曲 ID 获取歌曲下载地址
:param ids: 歌曲 ID 的 list
"""
if not isinstance(ids, list):
raise ParamsError()
r = NCloudBot()
r.method = 'MUSIC_URL'
r.data = {'ids': ids, 'br': 999000, "csrf_token": ""}
r.send()
return r.response
# LYRIC
def lyric(id):
"""通过歌曲 ID 获取歌曲歌词地址
:param id: 歌曲ID
"""
if id is None:
raise ParamsError()
r = NCloudBot()
r.method = 'LYRIC'
r.params = {'id': id}
r.send()
return r.response
# MUSIC_COMMENT
def music_comment(id, offset=0, limit=20):
"""获取歌曲的评论列表
:param id: 歌曲 ID
:param offset: (optional) 分段起始位置,默认 0
:param limit: (optional) 数据上限多少行,默认 20
"""
if id is None:
raise ParamsError()
r = NCloudBot()
r.method = 'MUSIC_COMMENT'
r.params = {'id': id}
r.data = {'offset': offset, 'limit': limit, 'rid': id, "csrf_token": ""}
r.send()
return r.response
# ALBUM_COMMENT
def album_comment(id, offset=0, limit=20):
if id is None:
raise ParamsError()
r = NCloudBot()
r.method = 'ALBUM_COMMENT'
r.params = {'id': id}
r.data = {'offset': offset, 'limit': limit, 'rid': id, "csrf_token": ""}
r.send()
return r.response
# SONG_DETAIL
# PERSONAL_FM
def personal_fm():
""" 个人的 FM ,必须在登录之后调用,即 login 之后调用
"""
r = NCloudBot()
r.method = 'PERSONAL_FM'
r.data = {"csrf_token": ""}
r.send()
return r.response
class NCloudBotException(Exception):
""" 这是 NCloudBot 当处理请求时候的异常"""
class ParamsError(NCloudBotException):
""" 参数错误 """
class InvalidMethod(NCloudBotException):
""" 不支持的方法被调用"""
|
xiyouMc/ncmbot
|
ncmbot/core.py
|
personal_fm
|
python
|
def personal_fm():
r = NCloudBot()
r.method = 'PERSONAL_FM'
r.data = {"csrf_token": ""}
r.send()
return r.response
|
个人的 FM ,必须在登录之后调用,即 login 之后调用
|
train
|
https://github.com/xiyouMc/ncmbot/blob/c4832f3ee7630ba104a89559f09c1fc366d1547b/ncmbot/core.py#L507-L514
|
[
"def send(self):\n \"\"\"Sens the request.\"\"\"\n success = False\n if self.method is None:\n raise ParamsError()\n try:\n if self.method == 'SEARCH':\n req = self._get_requests()\n _url = self.__NETEAST_HOST + self._METHODS[self.method]\n resp = req.post(_url, data=self.data)\n self._build_response(resp)\n self.response.ok = True\n else:\n if isinstance(self.data, dict):\n data = encrypted_request(self.data)\n\n req = self._get_webapi_requests()\n _url = self.__NETEAST_HOST + self._METHODS[self.method]\n\n if self.method in ('USER_DJ', 'USER_FOLLOWS', 'USER_EVENT'):\n _url = _url % self.params['uid']\n\n if self.method in ('LYRIC', 'MUSIC_COMMENT'):\n _url = _url % self.params['id']\n # GET\n if self.method in ('LYRIC'):\n resp = req.get(_url)\n else:\n resp = req.post(_url, data=data)\n self._build_response(resp)\n self.response.ok = True\n except Exception as why:\n traceback.print_exc()\n print 'Requests Exception', why\n # self._build_response(why)\n self.response.error = why\n"
] |
# coding:utf-8
"""
NCloudBot.core
~~~~~~~~~~~~~~
This module implements the main NCloudBot system.
:copyright: (c) 2017 by xiyouMc.
:license: ISC, see LICENSE for more details.
"""
import hashlib
import requests
import json
import cookielib
import traceback
from .util.encrypt import encrypted_request
from .util import cookiesJar
from utils import get_encoding_from_headers
__title__ = 'ncmbot'
__version__ = '0.1.0'
__build__ = 0x000100
__author__ = 'XiyouMc'
__license__ = 'ISC'
__copyright__ = 'Copyright 2017 XiyouMc'
__all__ = [
'NCloudBot', 'Response', 'login', 'user_play_list', 'user_dj', 'search',
'user_follows', 'user_followeds', 'user_event', 'event',
'top_playlist_highquality', 'play_list_detail', 'music_url', 'lyric',
'music_comment', 'song_detail', 'personal_fm', 'user_record'
]
class NCloudBot(object):
"""
The :class:`NCloudBot` object. It carries out all functionality of
NCloudBot
Recommended interface is with the NCloudBot`s functions.
"""
req = requests.Session()
username = None
_METHODS = {
# 登录模块
'LOGIN': '/weapi/login/cellphone?csrf_token=',
# 邮箱登录
'EMAIL_LOGIN': '/weapi/login?csrf_token=',
# 获取用户信息
'USER_INFO': '/weapi/subcount',
# 获取用户歌单,收藏的歌单 , 指定 UserId , 不需要登录
'USER_PLAY_LIST': '/weapi/user/playlist',
# 获取用户电台
'USER_DJ': '/weapi/dj/program/%s',
# 获取用户关注列表
'USER_FOLLOWS': '/weapi/user/getfollows/%s',
# 获取用户粉丝
'USER_FOLLOWEDS': '/weapi/user/getfolloweds/',
# 获取用户动态
'USER_EVENT': '/weapi/event/get/%s',
# 获取用户播放记录
'USER_RECORD': '/weapi/v1/play/record',
# 获取各种动态,对应网页版网易云,朋友界面的各种动态消息,如分享的视频、音乐、照片等
'EVENT': '/weapi/v1/event/get',
# 获取精品歌单
'TOP_PLAYLIST_HIGHQUALITY': '/weapi/playlist/highquality/list',
# 传入歌单ID 获取对应歌单内的所有音乐
'PLAY_LIST_DETAIL': '/weapi/v3/playlist/detail',
# 传入音乐ID ,获取对应音乐的URL
'MUSIC_URL': '/weapi/song/enhance/player/url',
# 传入关键词,获取歌曲列表
'SEARCH': '/api/search/get/',
# 传入音乐ID,获取对应音乐的歌词
'LYRIC': '/api/song/lyric?os=osx&id=%s&lv=-1&kv=-1&tv=-1',
# 传入音乐ID 和 limit 参数,可获取该音乐的所有评论
'MUSIC_COMMENT': '/weapi/v1/resource/comments/R_SO_4_%s/?csrf_token=',
# 传入专辑ID 和 limit 参数,可获取该专辑的所有评论
'ALBUM_COMMENT': '/weapi/v1/resource/comments/R_AL_3_%s/?csrf_token=',
# 给评论点赞,入参是资源ID,如歌曲ID,MV iD 和 评论ID
'LIKE_COMMENT': '/weapi/v1/comment/%s',
# 传入音乐ID,获取歌曲详情
'SONG_DETAIL': '/weapi/v3/song/detail',
# 获取专辑内容
'ALBUM': '/weapi/v1/album/%s',
# 私人 FM (需要登录)
'PERSONAL_FM': '/weapi/v1/radio/get'
}
__NETEAST_HOST = 'http://music.163.com'
def __init__(self):
self.method = None
self.data = {}
self.params = {}
self.response = Response()
def __repr__(self):
return '<NCloudBot [%s]>' % (self.method)
def __setattr__(self, name, value):
if (name == 'method') and (value):
if value not in self._METHODS.keys():
raise InvalidMethod()
object.__setattr__(self, name, value)
def _get_webapi_requests(self):
"""Update headers of webapi for Requests."""
headers = {
'Accept':
'*/*',
'Accept-Language':
'zh-CN,zh;q=0.8,gl;q=0.6,zh-TW;q=0.4',
'Connection':
'keep-alive',
'Content-Type':
'application/x-www-form-urlencoded',
'Referer':
'http://music.163.com',
'Host':
'music.163.com',
'User-Agent':
'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_9_2) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/33.0.1750.152 Safari/537.36'
}
NCloudBot.req.headers.update(headers)
return NCloudBot.req
def _get_requests(self):
headers = {
'Referer':
self.__NETEAST_HOST,
'Cookie':
'appver=2.0.2;',
'Content-Type':
'application/x-www-form-urlencoded',
'User-Agent':
'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_9_2) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/33.0.1750.152 Safari/537.36'
}
NCloudBot.req.headers.update(headers)
return NCloudBot.req
def _build_response(self, resp):
"""Build internal Response object from given response."""
# rememberLogin
# if self.method is 'LOGIN' and resp.json().get('code') == 200:
# cookiesJar.save_cookies(resp, NCloudBot.username)
self.response.content = resp.content
self.response.status_code = resp.status_code
self.response.headers = resp.headers
def send(self):
"""Sens the request."""
success = False
if self.method is None:
raise ParamsError()
try:
if self.method == 'SEARCH':
req = self._get_requests()
_url = self.__NETEAST_HOST + self._METHODS[self.method]
resp = req.post(_url, data=self.data)
self._build_response(resp)
self.response.ok = True
else:
if isinstance(self.data, dict):
data = encrypted_request(self.data)
req = self._get_webapi_requests()
_url = self.__NETEAST_HOST + self._METHODS[self.method]
if self.method in ('USER_DJ', 'USER_FOLLOWS', 'USER_EVENT'):
_url = _url % self.params['uid']
if self.method in ('LYRIC', 'MUSIC_COMMENT'):
_url = _url % self.params['id']
# GET
if self.method in ('LYRIC'):
resp = req.get(_url)
else:
resp = req.post(_url, data=data)
self._build_response(resp)
self.response.ok = True
except Exception as why:
traceback.print_exc()
print 'Requests Exception', why
# self._build_response(why)
self.response.error = why
class Response(object):
"""
The :class:`Response` object. All :class:`NCloudBot` objects contain a
:class:`NCloudBot.response <response>` attribute.
"""
def __init__(self):
self.content = None
self.headers = None
self.status_code = None
self.ok = False
self.error = None
def __repr__(self):
return '<Response [%s]>' % (self.status_code)
def raise_for_status(self):
if self.error:
raise self.error
def json(self):
"""Returns the json-encoded content of a response, if any."""
if not self.headers and len(self.content) > 3:
encoding = get_encoding_from_headers(self.headers)
if encoding is not None:
return json.loads(self.content.decode(encoding))
return json.loads(self.content)
def login(password, phone=None, email=None, rememberLogin=True):
""" 登录接口,返回 :class:'Response' 对象
:param password: 网易云音乐的密码
:param phone: (optional) 手机登录
:param email: (optional) 邮箱登录
:param rememberLogin: (optional) 是否记住密码,默认 True
"""
if (phone is None) and (email is None):
raise ParamsError()
if password is None:
raise ParamsError()
r = NCloudBot()
# r.username = phone or email
md5 = hashlib.md5()
md5.update(password)
password = md5.hexdigest()
print password
r.data = {'password': password, 'rememberLogin': rememberLogin}
if phone is not None:
r.data['phone'] = phone
r.method = 'LOGIN'
else:
r.data['username'] = email
r.method = 'EMAIL_LOGIN'
r.send()
return r.response
def user_play_list(uid, offset=0, limit=1000):
"""获取用户歌单,包含收藏的歌单
:param uid: 用户的ID,可通过登录或者其他接口获取
:param offset: (optional) 分段起始位置,默认 0
:param limit: (optional) 数据上限多少行,默认 1000
"""
if uid is None:
raise ParamsError()
r = NCloudBot()
r.method = 'USER_PLAY_LIST'
r.data = {'offset': offset, 'uid': uid, 'limit': limit, 'csrf_token': ''}
r.send()
return r.response
def user_dj(uid, offset=0, limit=30):
"""获取用户电台数据
:param uid: 用户的ID,可通过登录或者其他接口获取
:param offset: (optional) 分段起始位置,默认 0
:param limit: (optional) 数据上限多少行,默认 30
"""
if uid is None:
raise ParamsError()
r = NCloudBot()
r.method = 'USER_DJ'
r.data = {'offset': offset, 'limit': limit, "csrf_token": ""}
r.params = {'uid': uid}
r.send()
return r.response
def search(keyword, type=1, offset=0, limit=30):
"""搜索歌曲,支持搜索歌曲、歌手、专辑等
:param keyword: 关键词
:param type: (optional) 搜索类型,1: 单曲, 100: 歌手, 1000: 歌单, 1002: 用户
:param offset: (optional) 分段起始位置,默认 0
:param limit: (optional) 数据上限多少行,默认 30
"""
if keyword is None:
raise ParamsError()
r = NCloudBot()
r.method = 'SEARCH'
r.data = {
's': keyword,
'limit': str(limit),
'type': str(type),
'offset': str(offset)
}
r.send()
return r.response
def user_follows(uid, offset='0', limit=30):
"""获取用户关注列表
:param uid: 用户的ID,可通过登录或者其他接口获取
:param offset: (optional) 分段起始位置,默认 0
:param limit: (optional) 数据上限多少行,默认 30
"""
if uid is None:
raise ParamsError()
r = NCloudBot()
r.method = 'USER_FOLLOWS'
r.params = {'uid': uid}
r.data = {'offset': offset, 'limit': limit, 'order': True}
r.send()
return r.response
def user_followeds(uid, offset='0', limit=30):
"""获取用户粉丝列表
:param uid: 用户的ID,可通过登录或者其他接口获取
:param offset: (optional) 分段起始位置,默认 0
:param limit: (optional) 数据上限多少行,默认 30
"""
if uid is None:
raise ParamsError()
r = NCloudBot()
r.method = 'USER_FOLLOWEDS'
r.data = {
'userId': uid,
'offset': offset,
'limit': limit,
"csrf_token": ""
}
r.send()
return r.response
def user_event(uid):
"""获取用户动态
:param uid: 用户的ID,可通过登录或者其他接口获取
"""
if uid is None:
raise ParamsError()
r = NCloudBot()
r.method = 'USER_EVENT'
r.params = {'uid': uid}
r.data = {'time': -1, 'getcounts': True, "csrf_token": ""}
r.send()
return r.response
def user_record(uid, type=0):
"""获取用户的播放列表,必须登录
:param uid: 用户的ID,可通过登录或者其他接口获取
:param type: (optional) 数据类型,0:获取所有记录,1:获取 weekData
"""
if uid is None:
raise ParamsError()
r = NCloudBot()
r.method = 'USER_RECORD'
r.data = {'type': type, 'uid': uid, "csrf_token": ""}
r.send()
return r.response
def event():
"""获取好友的动态,包括分享视频、音乐、动态等
"""
r = NCloudBot()
r.method = 'EVENT'
r.data = {"csrf_token": ""}
r.send()
return r.response
# TOP_PLAYLIST_HIGHQUALITY
def top_playlist_highquality(cat='全部', offset=0, limit=20):
"""获取网易云音乐的精品歌单
:param cat: (optional) 歌单类型,默认 ‘全部’,比如 华语、欧美等
:param offset: (optional) 分段起始位置,默认 0
:param limit: (optional) 数据上限多少行,默认 20
"""
r = NCloudBot()
r.method = 'TOP_PLAYLIST_HIGHQUALITY'
r.data = {'cat': cat, 'offset': offset, 'limit': limit}
r.send()
return r.response
# PLAY_LIST_DETAIL
def play_list_detail(id, limit=20):
"""获取歌单中的所有音乐。由于获取精品中,只能看到歌单名字和 ID 并没有歌单的音乐,因此增加该接口传入歌单 ID
获取歌单中的所有音乐.
:param id: 歌单的ID
:param limit: (optional) 数据上限多少行,默认 20
"""
if id is None:
raise ParamsError()
r = NCloudBot()
r.method = 'PLAY_LIST_DETAIL'
r.data = {'id': id, 'limit': limit, "csrf_token": ""}
r.send()
return r.response
# MUSIC_URL
def music_url(ids=[]):
"""通过歌曲 ID 获取歌曲下载地址
:param ids: 歌曲 ID 的 list
"""
if not isinstance(ids, list):
raise ParamsError()
r = NCloudBot()
r.method = 'MUSIC_URL'
r.data = {'ids': ids, 'br': 999000, "csrf_token": ""}
r.send()
return r.response
# LYRIC
def lyric(id):
"""通过歌曲 ID 获取歌曲歌词地址
:param id: 歌曲ID
"""
if id is None:
raise ParamsError()
r = NCloudBot()
r.method = 'LYRIC'
r.params = {'id': id}
r.send()
return r.response
# MUSIC_COMMENT
def music_comment(id, offset=0, limit=20):
"""获取歌曲的评论列表
:param id: 歌曲 ID
:param offset: (optional) 分段起始位置,默认 0
:param limit: (optional) 数据上限多少行,默认 20
"""
if id is None:
raise ParamsError()
r = NCloudBot()
r.method = 'MUSIC_COMMENT'
r.params = {'id': id}
r.data = {'offset': offset, 'limit': limit, 'rid': id, "csrf_token": ""}
r.send()
return r.response
# ALBUM_COMMENT
def album_comment(id, offset=0, limit=20):
if id is None:
raise ParamsError()
r = NCloudBot()
r.method = 'ALBUM_COMMENT'
r.params = {'id': id}
r.data = {'offset': offset, 'limit': limit, 'rid': id, "csrf_token": ""}
r.send()
return r.response
# SONG_DETAIL
def song_detail(ids):
"""通过歌曲 ID 获取歌曲的详细信息
:param ids: 歌曲 ID 的 list
"""
if not isinstance(ids, list):
raise ParamsError()
c = []
for id in ids:
c.append({'id': id})
r = NCloudBot()
r.method = 'SONG_DETAIL'
r.data = {'c': json.dumps(c), 'ids': c, "csrf_token": ""}
r.send()
return r.response
# PERSONAL_FM
class NCloudBotException(Exception):
""" 这是 NCloudBot 当处理请求时候的异常"""
class ParamsError(NCloudBotException):
""" 参数错误 """
class InvalidMethod(NCloudBotException):
""" 不支持的方法被调用"""
|
xiyouMc/ncmbot
|
ncmbot/core.py
|
NCloudBot._get_webapi_requests
|
python
|
def _get_webapi_requests(self):
headers = {
'Accept':
'*/*',
'Accept-Language':
'zh-CN,zh;q=0.8,gl;q=0.6,zh-TW;q=0.4',
'Connection':
'keep-alive',
'Content-Type':
'application/x-www-form-urlencoded',
'Referer':
'http://music.163.com',
'Host':
'music.163.com',
'User-Agent':
'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_9_2) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/33.0.1750.152 Safari/537.36'
}
NCloudBot.req.headers.update(headers)
return NCloudBot.req
|
Update headers of webapi for Requests.
|
train
|
https://github.com/xiyouMc/ncmbot/blob/c4832f3ee7630ba104a89559f09c1fc366d1547b/ncmbot/core.py#L104-L124
| null |
class NCloudBot(object):
"""
The :class:`NCloudBot` object. It carries out all functionality of
NCloudBot
Recommended interface is with the NCloudBot`s functions.
"""
req = requests.Session()
username = None
_METHODS = {
# 登录模块
'LOGIN': '/weapi/login/cellphone?csrf_token=',
# 邮箱登录
'EMAIL_LOGIN': '/weapi/login?csrf_token=',
# 获取用户信息
'USER_INFO': '/weapi/subcount',
# 获取用户歌单,收藏的歌单 , 指定 UserId , 不需要登录
'USER_PLAY_LIST': '/weapi/user/playlist',
# 获取用户电台
'USER_DJ': '/weapi/dj/program/%s',
# 获取用户关注列表
'USER_FOLLOWS': '/weapi/user/getfollows/%s',
# 获取用户粉丝
'USER_FOLLOWEDS': '/weapi/user/getfolloweds/',
# 获取用户动态
'USER_EVENT': '/weapi/event/get/%s',
# 获取用户播放记录
'USER_RECORD': '/weapi/v1/play/record',
# 获取各种动态,对应网页版网易云,朋友界面的各种动态消息,如分享的视频、音乐、照片等
'EVENT': '/weapi/v1/event/get',
# 获取精品歌单
'TOP_PLAYLIST_HIGHQUALITY': '/weapi/playlist/highquality/list',
# 传入歌单ID 获取对应歌单内的所有音乐
'PLAY_LIST_DETAIL': '/weapi/v3/playlist/detail',
# 传入音乐ID ,获取对应音乐的URL
'MUSIC_URL': '/weapi/song/enhance/player/url',
# 传入关键词,获取歌曲列表
'SEARCH': '/api/search/get/',
# 传入音乐ID,获取对应音乐的歌词
'LYRIC': '/api/song/lyric?os=osx&id=%s&lv=-1&kv=-1&tv=-1',
# 传入音乐ID 和 limit 参数,可获取该音乐的所有评论
'MUSIC_COMMENT': '/weapi/v1/resource/comments/R_SO_4_%s/?csrf_token=',
# 传入专辑ID 和 limit 参数,可获取该专辑的所有评论
'ALBUM_COMMENT': '/weapi/v1/resource/comments/R_AL_3_%s/?csrf_token=',
# 给评论点赞,入参是资源ID,如歌曲ID,MV iD 和 评论ID
'LIKE_COMMENT': '/weapi/v1/comment/%s',
# 传入音乐ID,获取歌曲详情
'SONG_DETAIL': '/weapi/v3/song/detail',
# 获取专辑内容
'ALBUM': '/weapi/v1/album/%s',
# 私人 FM (需要登录)
'PERSONAL_FM': '/weapi/v1/radio/get'
}
__NETEAST_HOST = 'http://music.163.com'
def __init__(self):
self.method = None
self.data = {}
self.params = {}
self.response = Response()
def __repr__(self):
return '<NCloudBot [%s]>' % (self.method)
def __setattr__(self, name, value):
if (name == 'method') and (value):
if value not in self._METHODS.keys():
raise InvalidMethod()
object.__setattr__(self, name, value)
def _get_requests(self):
headers = {
'Referer':
self.__NETEAST_HOST,
'Cookie':
'appver=2.0.2;',
'Content-Type':
'application/x-www-form-urlencoded',
'User-Agent':
'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_9_2) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/33.0.1750.152 Safari/537.36'
}
NCloudBot.req.headers.update(headers)
return NCloudBot.req
def _build_response(self, resp):
"""Build internal Response object from given response."""
# rememberLogin
# if self.method is 'LOGIN' and resp.json().get('code') == 200:
# cookiesJar.save_cookies(resp, NCloudBot.username)
self.response.content = resp.content
self.response.status_code = resp.status_code
self.response.headers = resp.headers
def send(self):
"""Sens the request."""
success = False
if self.method is None:
raise ParamsError()
try:
if self.method == 'SEARCH':
req = self._get_requests()
_url = self.__NETEAST_HOST + self._METHODS[self.method]
resp = req.post(_url, data=self.data)
self._build_response(resp)
self.response.ok = True
else:
if isinstance(self.data, dict):
data = encrypted_request(self.data)
req = self._get_webapi_requests()
_url = self.__NETEAST_HOST + self._METHODS[self.method]
if self.method in ('USER_DJ', 'USER_FOLLOWS', 'USER_EVENT'):
_url = _url % self.params['uid']
if self.method in ('LYRIC', 'MUSIC_COMMENT'):
_url = _url % self.params['id']
# GET
if self.method in ('LYRIC'):
resp = req.get(_url)
else:
resp = req.post(_url, data=data)
self._build_response(resp)
self.response.ok = True
except Exception as why:
traceback.print_exc()
print 'Requests Exception', why
# self._build_response(why)
self.response.error = why
|
xiyouMc/ncmbot
|
ncmbot/core.py
|
NCloudBot._build_response
|
python
|
def _build_response(self, resp):
# rememberLogin
# if self.method is 'LOGIN' and resp.json().get('code') == 200:
# cookiesJar.save_cookies(resp, NCloudBot.username)
self.response.content = resp.content
self.response.status_code = resp.status_code
self.response.headers = resp.headers
|
Build internal Response object from given response.
|
train
|
https://github.com/xiyouMc/ncmbot/blob/c4832f3ee7630ba104a89559f09c1fc366d1547b/ncmbot/core.py#L141-L148
| null |
class NCloudBot(object):
"""
The :class:`NCloudBot` object. It carries out all functionality of
NCloudBot
Recommended interface is with the NCloudBot`s functions.
"""
req = requests.Session()
username = None
_METHODS = {
# 登录模块
'LOGIN': '/weapi/login/cellphone?csrf_token=',
# 邮箱登录
'EMAIL_LOGIN': '/weapi/login?csrf_token=',
# 获取用户信息
'USER_INFO': '/weapi/subcount',
# 获取用户歌单,收藏的歌单 , 指定 UserId , 不需要登录
'USER_PLAY_LIST': '/weapi/user/playlist',
# 获取用户电台
'USER_DJ': '/weapi/dj/program/%s',
# 获取用户关注列表
'USER_FOLLOWS': '/weapi/user/getfollows/%s',
# 获取用户粉丝
'USER_FOLLOWEDS': '/weapi/user/getfolloweds/',
# 获取用户动态
'USER_EVENT': '/weapi/event/get/%s',
# 获取用户播放记录
'USER_RECORD': '/weapi/v1/play/record',
# 获取各种动态,对应网页版网易云,朋友界面的各种动态消息,如分享的视频、音乐、照片等
'EVENT': '/weapi/v1/event/get',
# 获取精品歌单
'TOP_PLAYLIST_HIGHQUALITY': '/weapi/playlist/highquality/list',
# 传入歌单ID 获取对应歌单内的所有音乐
'PLAY_LIST_DETAIL': '/weapi/v3/playlist/detail',
# 传入音乐ID ,获取对应音乐的URL
'MUSIC_URL': '/weapi/song/enhance/player/url',
# 传入关键词,获取歌曲列表
'SEARCH': '/api/search/get/',
# 传入音乐ID,获取对应音乐的歌词
'LYRIC': '/api/song/lyric?os=osx&id=%s&lv=-1&kv=-1&tv=-1',
# 传入音乐ID 和 limit 参数,可获取该音乐的所有评论
'MUSIC_COMMENT': '/weapi/v1/resource/comments/R_SO_4_%s/?csrf_token=',
# 传入专辑ID 和 limit 参数,可获取该专辑的所有评论
'ALBUM_COMMENT': '/weapi/v1/resource/comments/R_AL_3_%s/?csrf_token=',
# 给评论点赞,入参是资源ID,如歌曲ID,MV iD 和 评论ID
'LIKE_COMMENT': '/weapi/v1/comment/%s',
# 传入音乐ID,获取歌曲详情
'SONG_DETAIL': '/weapi/v3/song/detail',
# 获取专辑内容
'ALBUM': '/weapi/v1/album/%s',
# 私人 FM (需要登录)
'PERSONAL_FM': '/weapi/v1/radio/get'
}
__NETEAST_HOST = 'http://music.163.com'
def __init__(self):
self.method = None
self.data = {}
self.params = {}
self.response = Response()
def __repr__(self):
return '<NCloudBot [%s]>' % (self.method)
def __setattr__(self, name, value):
if (name == 'method') and (value):
if value not in self._METHODS.keys():
raise InvalidMethod()
object.__setattr__(self, name, value)
def _get_webapi_requests(self):
"""Update headers of webapi for Requests."""
headers = {
'Accept':
'*/*',
'Accept-Language':
'zh-CN,zh;q=0.8,gl;q=0.6,zh-TW;q=0.4',
'Connection':
'keep-alive',
'Content-Type':
'application/x-www-form-urlencoded',
'Referer':
'http://music.163.com',
'Host':
'music.163.com',
'User-Agent':
'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_9_2) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/33.0.1750.152 Safari/537.36'
}
NCloudBot.req.headers.update(headers)
return NCloudBot.req
def _get_requests(self):
headers = {
'Referer':
self.__NETEAST_HOST,
'Cookie':
'appver=2.0.2;',
'Content-Type':
'application/x-www-form-urlencoded',
'User-Agent':
'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_9_2) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/33.0.1750.152 Safari/537.36'
}
NCloudBot.req.headers.update(headers)
return NCloudBot.req
def send(self):
"""Sens the request."""
success = False
if self.method is None:
raise ParamsError()
try:
if self.method == 'SEARCH':
req = self._get_requests()
_url = self.__NETEAST_HOST + self._METHODS[self.method]
resp = req.post(_url, data=self.data)
self._build_response(resp)
self.response.ok = True
else:
if isinstance(self.data, dict):
data = encrypted_request(self.data)
req = self._get_webapi_requests()
_url = self.__NETEAST_HOST + self._METHODS[self.method]
if self.method in ('USER_DJ', 'USER_FOLLOWS', 'USER_EVENT'):
_url = _url % self.params['uid']
if self.method in ('LYRIC', 'MUSIC_COMMENT'):
_url = _url % self.params['id']
# GET
if self.method in ('LYRIC'):
resp = req.get(_url)
else:
resp = req.post(_url, data=data)
self._build_response(resp)
self.response.ok = True
except Exception as why:
traceback.print_exc()
print 'Requests Exception', why
# self._build_response(why)
self.response.error = why
|
xiyouMc/ncmbot
|
ncmbot/core.py
|
NCloudBot.send
|
python
|
def send(self):
success = False
if self.method is None:
raise ParamsError()
try:
if self.method == 'SEARCH':
req = self._get_requests()
_url = self.__NETEAST_HOST + self._METHODS[self.method]
resp = req.post(_url, data=self.data)
self._build_response(resp)
self.response.ok = True
else:
if isinstance(self.data, dict):
data = encrypted_request(self.data)
req = self._get_webapi_requests()
_url = self.__NETEAST_HOST + self._METHODS[self.method]
if self.method in ('USER_DJ', 'USER_FOLLOWS', 'USER_EVENT'):
_url = _url % self.params['uid']
if self.method in ('LYRIC', 'MUSIC_COMMENT'):
_url = _url % self.params['id']
# GET
if self.method in ('LYRIC'):
resp = req.get(_url)
else:
resp = req.post(_url, data=data)
self._build_response(resp)
self.response.ok = True
except Exception as why:
traceback.print_exc()
print 'Requests Exception', why
# self._build_response(why)
self.response.error = why
|
Sens the request.
|
train
|
https://github.com/xiyouMc/ncmbot/blob/c4832f3ee7630ba104a89559f09c1fc366d1547b/ncmbot/core.py#L150-L185
|
[
"def encrypted_request(text):\n text = json.dumps(text)\n secKey = createSecretKey(16)\n encText = aesEncrypt(aesEncrypt(text, nonce), secKey)\n encSecKey = rsaEncrypt(secKey, pubKey, modulus)\n data = {\n 'params': encText,\n 'encSecKey': encSecKey\n }\n return data",
"def _get_webapi_requests(self):\n \"\"\"Update headers of webapi for Requests.\"\"\"\n\n headers = {\n 'Accept':\n '*/*',\n 'Accept-Language':\n 'zh-CN,zh;q=0.8,gl;q=0.6,zh-TW;q=0.4',\n 'Connection':\n 'keep-alive',\n 'Content-Type':\n 'application/x-www-form-urlencoded',\n 'Referer':\n 'http://music.163.com',\n 'Host':\n 'music.163.com',\n 'User-Agent':\n 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_9_2) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/33.0.1750.152 Safari/537.36'\n }\n NCloudBot.req.headers.update(headers)\n return NCloudBot.req\n",
"def _get_requests(self):\n headers = {\n 'Referer':\n self.__NETEAST_HOST,\n 'Cookie':\n 'appver=2.0.2;',\n 'Content-Type':\n 'application/x-www-form-urlencoded',\n 'User-Agent':\n 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_9_2) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/33.0.1750.152 Safari/537.36'\n }\n\n NCloudBot.req.headers.update(headers)\n return NCloudBot.req\n",
"def _build_response(self, resp):\n \"\"\"Build internal Response object from given response.\"\"\"\n # rememberLogin\n # if self.method is 'LOGIN' and resp.json().get('code') == 200:\n # cookiesJar.save_cookies(resp, NCloudBot.username)\n self.response.content = resp.content\n self.response.status_code = resp.status_code\n self.response.headers = resp.headers\n"
] |
class NCloudBot(object):
"""
The :class:`NCloudBot` object. It carries out all functionality of
NCloudBot
Recommended interface is with the NCloudBot`s functions.
"""
req = requests.Session()
username = None
_METHODS = {
# 登录模块
'LOGIN': '/weapi/login/cellphone?csrf_token=',
# 邮箱登录
'EMAIL_LOGIN': '/weapi/login?csrf_token=',
# 获取用户信息
'USER_INFO': '/weapi/subcount',
# 获取用户歌单,收藏的歌单 , 指定 UserId , 不需要登录
'USER_PLAY_LIST': '/weapi/user/playlist',
# 获取用户电台
'USER_DJ': '/weapi/dj/program/%s',
# 获取用户关注列表
'USER_FOLLOWS': '/weapi/user/getfollows/%s',
# 获取用户粉丝
'USER_FOLLOWEDS': '/weapi/user/getfolloweds/',
# 获取用户动态
'USER_EVENT': '/weapi/event/get/%s',
# 获取用户播放记录
'USER_RECORD': '/weapi/v1/play/record',
# 获取各种动态,对应网页版网易云,朋友界面的各种动态消息,如分享的视频、音乐、照片等
'EVENT': '/weapi/v1/event/get',
# 获取精品歌单
'TOP_PLAYLIST_HIGHQUALITY': '/weapi/playlist/highquality/list',
# 传入歌单ID 获取对应歌单内的所有音乐
'PLAY_LIST_DETAIL': '/weapi/v3/playlist/detail',
# 传入音乐ID ,获取对应音乐的URL
'MUSIC_URL': '/weapi/song/enhance/player/url',
# 传入关键词,获取歌曲列表
'SEARCH': '/api/search/get/',
# 传入音乐ID,获取对应音乐的歌词
'LYRIC': '/api/song/lyric?os=osx&id=%s&lv=-1&kv=-1&tv=-1',
# 传入音乐ID 和 limit 参数,可获取该音乐的所有评论
'MUSIC_COMMENT': '/weapi/v1/resource/comments/R_SO_4_%s/?csrf_token=',
# 传入专辑ID 和 limit 参数,可获取该专辑的所有评论
'ALBUM_COMMENT': '/weapi/v1/resource/comments/R_AL_3_%s/?csrf_token=',
# 给评论点赞,入参是资源ID,如歌曲ID,MV iD 和 评论ID
'LIKE_COMMENT': '/weapi/v1/comment/%s',
# 传入音乐ID,获取歌曲详情
'SONG_DETAIL': '/weapi/v3/song/detail',
# 获取专辑内容
'ALBUM': '/weapi/v1/album/%s',
# 私人 FM (需要登录)
'PERSONAL_FM': '/weapi/v1/radio/get'
}
__NETEAST_HOST = 'http://music.163.com'
def __init__(self):
self.method = None
self.data = {}
self.params = {}
self.response = Response()
def __repr__(self):
return '<NCloudBot [%s]>' % (self.method)
def __setattr__(self, name, value):
if (name == 'method') and (value):
if value not in self._METHODS.keys():
raise InvalidMethod()
object.__setattr__(self, name, value)
def _get_webapi_requests(self):
"""Update headers of webapi for Requests."""
headers = {
'Accept':
'*/*',
'Accept-Language':
'zh-CN,zh;q=0.8,gl;q=0.6,zh-TW;q=0.4',
'Connection':
'keep-alive',
'Content-Type':
'application/x-www-form-urlencoded',
'Referer':
'http://music.163.com',
'Host':
'music.163.com',
'User-Agent':
'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_9_2) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/33.0.1750.152 Safari/537.36'
}
NCloudBot.req.headers.update(headers)
return NCloudBot.req
def _get_requests(self):
headers = {
'Referer':
self.__NETEAST_HOST,
'Cookie':
'appver=2.0.2;',
'Content-Type':
'application/x-www-form-urlencoded',
'User-Agent':
'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_9_2) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/33.0.1750.152 Safari/537.36'
}
NCloudBot.req.headers.update(headers)
return NCloudBot.req
def _build_response(self, resp):
"""Build internal Response object from given response."""
# rememberLogin
# if self.method is 'LOGIN' and resp.json().get('code') == 200:
# cookiesJar.save_cookies(resp, NCloudBot.username)
self.response.content = resp.content
self.response.status_code = resp.status_code
self.response.headers = resp.headers
|
xiyouMc/ncmbot
|
ncmbot/core.py
|
Response.json
|
python
|
def json(self):
if not self.headers and len(self.content) > 3:
encoding = get_encoding_from_headers(self.headers)
if encoding is not None:
return json.loads(self.content.decode(encoding))
return json.loads(self.content)
|
Returns the json-encoded content of a response, if any.
|
train
|
https://github.com/xiyouMc/ncmbot/blob/c4832f3ee7630ba104a89559f09c1fc366d1547b/ncmbot/core.py#L208-L215
|
[
"def get_encoding_from_headers(headers):\n \"\"\"Returns encodings from given HTTP Header Dict.\n\n :param headers: dictionary to extract encoding from.\n \"\"\"\n\n content_type = headers.get('content-type')\n\n if not content_type:\n return None\n\n content_type, params = cgi.parse_header(content_type)\n\n if 'charset' in params:\n return params['charset'].strip(\"'\\\"\")\n\n if 'text' in content_type:\n return 'ISO-8859-1'"
] |
class Response(object):
"""
The :class:`Response` object. All :class:`NCloudBot` objects contain a
:class:`NCloudBot.response <response>` attribute.
"""
def __init__(self):
self.content = None
self.headers = None
self.status_code = None
self.ok = False
self.error = None
def __repr__(self):
return '<Response [%s]>' % (self.status_code)
def raise_for_status(self):
if self.error:
raise self.error
|
mgedmin/findimports
|
findimports.py
|
adjust_lineno
|
python
|
def adjust_lineno(filename, lineno, name):
line = linecache.getline(filename, lineno)
# Hack warning: might be fooled by comments
rx = re.compile(r'\b%s\b' % re.escape(name) if name != '*' else '[*]')
while line and not rx.search(line):
lineno += 1
line = linecache.getline(filename, lineno)
return lineno
|
Adjust the line number of an import.
Needed because import statements can span multiple lines, and our lineno
is always the first line number.
|
train
|
https://github.com/mgedmin/findimports/blob/c20a50b497390fed15aa3835476f4fad57313e8a/findimports.py#L89-L101
| null |
#!/usr/bin/python
"""
FindImports is a script that processes Python module dependencies. Currently
it can be used for finding unused imports and graphing module dependencies
(with graphviz).
Syntax: findimports.py [options] [filename|dirname ...]
Options:
-h, --help This help message
-i, --imports Print dependency graph (default action).
-d, --dot Print dependency graph in dot (graphviz) format.
-n, --names Print dependency graph with all imported names.
-u, --unused Print unused imports.
-a, --all Print unused imports even if there's a comment.
--duplicate Print duplicate imports.
-v Print more information (currently only affects --duplicate).
-N, --noext Omit external dependencies.
-p, --packages Convert the module graph to a package graph.
-l N, --level N Collapse subpackages deeper than the Nth level.
-c, --collapse Collapse dependency cycles.
-T, --tests Collapse packages named 'tests' and 'ftests' with parent
packages.
FindImports requires Python 2.6 or later.
Notes:
findimports processes doctest blocks inside docstrings.
findimports.py -u will not complain about import statements that have
a comment on the same line, e.g.:
from somewhereelse import somename # reexport
findimports.py -u -a will ignore comments and print these statements also.
Caching:
If you want to produce several different reports from the same dataset,
you can do it as follows:
findimports.py --write-cache foo.importcache dirname
findimports.py foo.importcache -d -T > graph1.dot
findimports.py foo.importcache -d -N -c -p -l 2 > graph2.dot
Copyright (c) 2003--2015 Marius Gedminas <marius@pov.lt>
This program is free software; you can redistribute it and/or modify it under
the terms of the GNU General Public License as published by the Free Software
Foundation; either version 2 of the License, or (at your option) any later
version.
This program is distributed in the hope that it will be useful, but WITHOUT ANY
WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A
PARTICULAR PURPOSE. See the GNU General Public License for more details.
You should have received a copy of the GNU General Public License along with
this program; if not, write to the Free Software Foundation, Inc., 675 Mass
Ave, Cambridge, MA 02139, USA.
"""
from __future__ import print_function
import ast
import doctest
import linecache
import optparse
import os
import pickle
import re
import sys
import zipfile
from operator import attrgetter
__version__ = '1.5.2.dev0'
__author__ = 'Marius Gedminas <marius@gedmin.as>'
__licence__ = 'GPL v2 or later'
__url__ = 'https://github.com/mgedmin/findimports'
class ImportInfo(object):
"""A record of a name and the location of the import statement."""
def __init__(self, name, filename, lineno, level):
self.name = name
self.filename = filename
self.lineno = lineno
self.level = level
def __repr__(self):
return '%s(%r, %r, %r, %r)' % (self.__class__.__name__, self.name,
self.filename, self.lineno, self.level)
class ImportFinder(ast.NodeVisitor):
"""AST visitor that collects all imported names in its imports attribute.
For example, the following import statements in the AST tree
import a, b.c, d as e
from q.w.e import x, y as foo, z
from woof import *
will cause ``imports`` to contain
a
b.c
d
q.w.e.x
q.w.e.y
q.w.e.z
woof.*
"""
lineno_offset = 0 # needed when recursively parsing docstrings
def __init__(self, filename):
self.imports = []
self.filename = filename
def processImport(self, name, imported_as, full_name, level, node):
lineno = adjust_lineno(self.filename,
self.lineno_offset + node.lineno,
name)
info = ImportInfo(full_name, self.filename, lineno, level)
self.imports.append(info)
def visit_Import(self, node):
for alias in node.names:
self.processImport(alias.name, alias.asname, alias.name, None, node)
def visit_ImportFrom(self, node):
if node.module == '__future__':
return
for alias in node.names:
name = alias.name
imported_as = alias.asname
fullname = '%s.%s' % (node.module, name) if node.module else name
self.processImport(name, imported_as, fullname, node.level, node)
def visitSomethingWithADocstring(self, node):
# ClassDef and FunctionDef have a 'lineno' attribute, Module doesn't.
lineno = getattr(node, 'lineno', None)
self.processDocstring(ast.get_docstring(node, clean=False), lineno)
self.generic_visit(node)
visit_Module = visitSomethingWithADocstring
visit_ClassDef = visitSomethingWithADocstring
visit_FunctionDef = visitSomethingWithADocstring
def processDocstring(self, docstring, lineno):
if not docstring:
return
if lineno is None:
# Module nodes don't have a lineno
lineno = 0
dtparser = doctest.DocTestParser()
try:
examples = dtparser.get_examples(docstring)
except Exception:
print("{filename}:{lineno}: error while parsing doctest".format(
filename=self.filename, lineno=lineno), file=sys.stderr)
raise
for example in examples:
try:
source = example.source
if not isinstance(source, str):
source = source.encode('UTF-8')
node = ast.parse(source, filename='<docstring>')
except SyntaxError:
print("{filename}:{lineno}: syntax error in doctest".format(
filename=self.filename, lineno=lineno), file=sys.stderr)
else:
self.lineno_offset += lineno + example.lineno
self.visit(node)
self.lineno_offset -= lineno + example.lineno
class Scope(object):
"""A namespace."""
def __init__(self, parent=None, name=None):
self.parent = parent
self.name = name
self.imports = {}
self.unused_names = {}
def haveImport(self, name):
if name in self.imports:
return True
if self.parent:
return self.parent.haveImport(name)
return False
def whereImported(self, name):
if name in self.imports:
return self.imports[name]
return self.parent.whereImported(name)
def addImport(self, name, filename, level, lineno):
self.unused_names[name] = self.imports[name] = ImportInfo(name,
filename,
lineno,
level)
def useName(self, name):
if name in self.unused_names:
del self.unused_names[name]
if self.parent:
self.parent.useName(name)
class ImportFinderAndNameTracker(ImportFinder):
"""ImportFinder that also keeps track on used names."""
warn_about_duplicates = False
verbose = False
def __init__(self, filename):
ImportFinder.__init__(self, filename)
self.scope = self.top_level = Scope(name=filename)
self.scope_stack = []
self.unused_names = []
def newScope(self, parent, name=None):
self.scope_stack.append(self.scope)
self.scope = Scope(parent, name)
def leaveScope(self):
self.unused_names += self.scope.unused_names.values()
self.scope = self.scope_stack.pop()
def leaveAllScopes(self):
# newScope()/leaveScope() calls are always balanced so scope_stack
# should be empty at this point
assert not self.scope_stack
self.unused_names += self.scope.unused_names.values()
self.unused_names.sort(key=attrgetter('lineno'))
def processDocstring(self, docstring, lineno):
self.newScope(self.top_level, 'docstring')
ImportFinder.processDocstring(self, docstring, lineno)
self.leaveScope()
def visit_FunctionDef(self, node):
self.newScope(self.scope, 'function %s' % node.name)
ImportFinder.visit_FunctionDef(self, node)
self.leaveScope()
def processImport(self, name, imported_as, full_name, level, node):
ImportFinder.processImport(self, name, imported_as, full_name, level, node)
if not imported_as:
imported_as = name
if imported_as != "*":
lineno = self.lineno_offset + node.lineno
if (self.warn_about_duplicates and
self.scope.haveImport(imported_as)):
where = self.scope.whereImported(imported_as).lineno
line = linecache.getline(self.filename, lineno)
if '#' not in line:
print("{filename}:{lineno}: {name} imported again".format(
filename=self.filename, lineno=lineno, name=imported_as), file=sys.stderr)
if self.verbose:
print("{filename}:{lineno}: (location of previous import)".format(
filename=self.filename, lineno=where), file=sys.stderr)
else:
self.scope.addImport(imported_as, self.filename, level, lineno)
def visit_Name(self, node):
self.scope.useName(node.id)
def visit_Attribute(self, node):
full_name = [node.attr]
parent = node.value
while isinstance(parent, ast.Attribute):
full_name.append(parent.attr)
parent = parent.value
if isinstance(parent, ast.Name):
full_name.append(parent.id)
full_name.reverse()
name = ""
for part in full_name:
if name:
name = '%s.%s' % (name, part)
else:
name += part
self.scope.useName(name)
self.generic_visit(node)
def find_imports(filename):
"""Find all imported names in a given file.
Returns a list of ImportInfo objects.
"""
with open(filename) as f:
root = ast.parse(f.read(), filename)
visitor = ImportFinder(filename)
visitor.visit(root)
return visitor.imports
def find_imports_and_track_names(filename, warn_about_duplicates=False,
verbose=False):
"""Find all imported names in a given file.
Returns ``(imports, unused)``. Both are lists of ImportInfo objects.
"""
with open(filename) as f:
root = ast.parse(f.read(), filename)
visitor = ImportFinderAndNameTracker(filename)
visitor.warn_about_duplicates = warn_about_duplicates
visitor.verbose = verbose
visitor.visit(root)
visitor.leaveAllScopes()
return visitor.imports, visitor.unused_names
class Module(object):
"""Node in a module dependency graph.
Packages may also be represented as Module objects.
``imports`` is a set of module names this module depends on.
``imported_names`` is a list of all names that were imported from other
modules (actually, ImportInfo objects).
``unused_names`` is a list of names that were imported, but are not used
(actually, ImportInfo objects).
"""
def __init__(self, modname, filename):
self.modname = modname
self.label = modname
self.filename = filename
self.imports = set()
self.imported_names = ()
self.unused_names = ()
def __repr__(self):
return '<%s: %s>' % (self.__class__.__name__, self.modname)
class ModuleCycle(object):
"""Node in a condenced module dependency graph.
A strongly-connected component of one or more modules/packages.
"""
def __init__(self, modnames):
self.modnames = modnames
self.modname = modnames[0]
self.label = "\n".join(modnames)
self.imports = set()
class ModuleGraph(object):
"""Module graph."""
trackUnusedNames = False
all_unused = False
warn_about_duplicates = False
verbose = False
external_dependencies = True
# some builtin modules do not exist as separate .so files on disk
builtin_modules = sys.builtin_module_names
def __init__(self):
self.modules = {}
self.path = sys.path
self._module_cache = {}
self._warned_about = set()
self._stderr = sys.stderr
self._exts = ('.py', '.so', '.dll')
if hasattr(sys, '_multiarch'): # pragma: nocover
# Ubuntu 14.04 LTS renames
# /usr/lib/python2.7/lib-dynload/datetime.so to
# /usr/lib/python2.7/lib-dynload/datetime.x86_64-linux-gnu.so
# (https://github.com/mgedmin/findimports/issues/3)
self._exts += ('.%s.so' % sys._multiarch, )
def warn(self, about, message, *args):
if about in self._warned_about:
return
if args:
message = message % args
print(message, file=self._stderr)
self._warned_about.add(about)
def parsePathname(self, pathname):
"""Parse one or more source files.
``pathname`` may be a file name or a directory name.
"""
if os.path.isdir(pathname):
for root, dirs, files in os.walk(pathname):
dirs.sort()
files.sort()
for fn in files:
# ignore emacsish junk
if fn.endswith('.py') and not fn.startswith('.#'):
self.parseFile(os.path.join(root, fn))
elif pathname.endswith('.importcache'):
self.readCache(pathname)
else:
self.parseFile(pathname)
def writeCache(self, filename):
"""Write the graph to a cache file."""
with open(filename, 'wb') as f:
pickle.dump(self.modules, f)
def readCache(self, filename):
"""Load the graph from a cache file."""
with open(filename, 'rb') as f:
self.modules = pickle.load(f)
def parseFile(self, filename):
"""Parse a single file."""
modname = self.filenameToModname(filename)
module = Module(modname, filename)
self.modules[modname] = module
if self.trackUnusedNames:
module.imported_names, module.unused_names = \
find_imports_and_track_names(filename,
self.warn_about_duplicates,
self.verbose)
else:
module.imported_names = find_imports(filename)
module.unused_names = None
dir = os.path.dirname(filename)
module.imports = set(
[self.findModuleOfName(imp.name, imp.level, filename, dir)
for imp in module.imported_names])
def filenameToModname(self, filename):
"""Convert a filename to a module name."""
for ext in reversed(self._exts):
if filename.endswith(ext):
filename = filename[:-len(ext)]
break
else:
self.warn(filename, '%s: unknown file name extension', filename)
filename = os.path.abspath(filename)
elements = filename.split(os.path.sep)
modname = []
while elements:
modname.append(elements[-1])
del elements[-1]
if not os.path.exists(os.path.sep.join(elements + ['__init__.py'])):
break
modname.reverse()
modname = ".".join(modname)
return modname
def findModuleOfName(self, dotted_name, level, filename, extrapath=None):
"""Given a fully qualified name, find what module contains it."""
if dotted_name.endswith('.*'):
return dotted_name[:-2]
name = dotted_name
# extrapath is None only in a couple of test cases; in real life it's
# always present
if level and level > 1 and extrapath:
# strip trailing path bits for each extra level to account for
# relative imports
# from . import X has level == 1 and nothing is stripped (the level > 1 check accounts for this case)
# from .. import X has level == 2 and one trailing path component must go
# from ... import X has level == 3 and two trailing path components must go
extrapath = extrapath.split(os.path.sep)
level -= 1
extrapath = extrapath[0:-level]
extrapath = os.path.sep.join(extrapath)
while name:
candidate = self.isModule(name, extrapath)
if candidate:
return candidate
candidate = self.isPackage(name, extrapath)
if candidate:
return candidate
name = name[:name.rfind('.')]
self.warn(dotted_name, '%s: could not find %s', filename, dotted_name)
return dotted_name
def isModule(self, dotted_name, extrapath=None):
"""Is ``dotted_name`` the name of a module?"""
try:
return self._module_cache[(dotted_name, extrapath)]
except KeyError:
pass
if dotted_name in sys.modules or dotted_name in self.builtin_modules:
return dotted_name
filename = dotted_name.replace('.', os.path.sep)
if extrapath:
for ext in self._exts:
candidate = os.path.join(extrapath, filename) + ext
if os.path.exists(candidate):
modname = self.filenameToModname(candidate)
self._module_cache[(dotted_name, extrapath)] = modname
return modname
try:
return self._module_cache[(dotted_name, None)]
except KeyError:
pass
for dir in self.path:
if os.path.isfile(dir):
if dir.endswith('.egg-info'):
# distribute creates a setuptools-blah-blah.egg-info
# that ends up in sys.path
continue
try:
zf = zipfile.ZipFile(dir)
except zipfile.BadZipfile:
self.warn(dir, "%s: not a directory or zip file", dir)
continue
names = zf.namelist()
for ext in self._exts:
candidate = filename + ext
if candidate in names:
modname = filename.replace(os.path.sep, '.')
self._module_cache[(dotted_name, extrapath)] = modname
self._module_cache[(dotted_name, None)] = modname
return modname
else:
for ext in self._exts:
candidate = os.path.join(dir, filename) + ext
if os.path.exists(candidate):
modname = self.filenameToModname(candidate)
self._module_cache[(dotted_name, extrapath)] = modname
self._module_cache[(dotted_name, None)] = modname
return modname
return None
def isPackage(self, dotted_name, extrapath=None):
"""Is ``dotted_name`` the name of a package?"""
candidate = self.isModule(dotted_name + '.__init__', extrapath)
if candidate:
candidate = candidate[:-len(".__init__")]
return candidate
def packageOf(self, dotted_name, packagelevel=None):
"""Determine the package that contains ``dotted_name``."""
if '.' not in dotted_name:
return dotted_name
if not self.isPackage(dotted_name):
dotted_name = '.'.join(dotted_name.split('.')[:-1])
if packagelevel:
dotted_name = '.'.join(dotted_name.split('.')[:packagelevel])
return dotted_name
def removeTestPackage(self, dotted_name, pkgnames=['tests', 'ftests']):
"""Remove tests subpackages from dotted_name."""
result = []
for name in dotted_name.split('.'):
if name in pkgnames:
break
result.append(name)
if not result: # empty names are baad
return dotted_name
return '.'.join(result)
def listModules(self):
"""Return an alphabetical list of all modules."""
modules = list(self.modules.items())
modules.sort()
return [module for name, module in modules]
def packageGraph(self, packagelevel=None):
"""Convert a module graph to a package graph."""
packages = {}
for module in self.listModules():
package_name = self.packageOf(module.modname, packagelevel)
if package_name not in packages:
dirname = os.path.dirname(module.filename)
packages[package_name] = Module(package_name, dirname)
package = packages[package_name]
for name in module.imports:
package_name = self.packageOf(name, packagelevel)
if package_name != package.modname: # no loops
package.imports.add(package_name)
graph = ModuleGraph()
graph.modules = packages
return graph
def collapseTests(self, pkgnames=['tests', 'ftests']):
"""Collapse test packages with parent packages.
Works only with package graphs.
"""
packages = {}
for module in self.listModules():
package_name = self.removeTestPackage(module.modname, pkgnames)
if package_name == module.modname:
packages[package_name] = Module(package_name, module.filename)
for module in self.listModules():
package_name = self.removeTestPackage(module.modname, pkgnames)
package = packages[package_name]
for name in module.imports:
package_name = self.removeTestPackage(name, pkgnames)
if package_name != package.modname: # no loops
package.imports.add(package_name)
graph = ModuleGraph()
graph.modules = packages
return graph
def collapseCycles(self):
"""Create a graph with cycles collapsed.
Collapse modules participating in a cycle to a single node.
"""
# This algorithm determines Strongly Connected Components. Look it up.
# It is adapted to suit our data structures.
# Phase 0: prepare the graph
imports = {}
for u in self.modules:
imports[u] = set()
for v in self.modules[u].imports:
if v in self.modules: # skip external dependencies
imports[u].add(v)
# Phase 1: order the vertices
visited = {}
for u in self.modules:
visited[u] = False
order = []
def visit1(u):
visited[u] = True
for v in imports[u]:
if not visited[v]:
visit1(v)
order.append(u)
for u in self.modules:
if not visited[u]:
visit1(u)
order.reverse()
# Phase 2: compute the inverse graph
revimports = {}
for u in self.modules:
revimports[u] = set()
for u in self.modules:
for v in imports[u]:
revimports[v].add(u)
# Phase 3: determine the strongly connected components
components = {}
component_of = {}
for u in self.modules:
visited[u] = False
def visit2(u):
visited[u] = True
component.append(u)
for v in revimports[u]:
if not visited[v]:
visit2(v)
for u in order:
if not visited[u]:
component = []
visit2(u)
component.sort()
node = ModuleCycle(component)
components[node.modname] = node
for modname in component:
component_of[modname] = node
# Phase 4: construct the condensed graph
for node in components.values():
for modname in node.modnames:
for impname in imports[modname]:
other = component_of[impname].modname
if other != node.modname:
node.imports.add(other)
graph = ModuleGraph()
graph.modules = components
return graph
def printImportedNames(self):
"""Produce a report of imported names."""
for module in self.listModules():
print("%s:" % module.modname)
print(" %s" % "\n ".join(imp.name for imp in module.imported_names))
def printImports(self):
"""Produce a report of dependencies."""
for module in self.listModules():
print("%s:" % module.label)
if self.external_dependencies:
imports = list(module.imports)
else:
imports = [modname for modname in module.imports
if modname in self.modules]
imports.sort()
print(" %s" % "\n ".join(imports))
def printUnusedImports(self):
"""Produce a report of unused imports."""
for module in self.listModules():
names = [(unused.lineno, unused.name)
for unused in module.unused_names]
names.sort()
for lineno, name in names:
if not self.all_unused:
line = linecache.getline(module.filename, lineno)
if '#' in line:
# assume there's a comment explaining why it's not used
continue
print("%s:%s: %s not used" % (module.filename, lineno, name))
def printDot(self):
"""Produce a dependency graph in dot format."""
print("digraph ModuleDependencies {")
print(" node[shape=box];")
allNames = set()
nameDict = {}
for n, module in enumerate(self.listModules()):
module._dot_name = "mod%d" % n
nameDict[module.modname] = module._dot_name
print(" %s[label=\"%s\"];" % (module._dot_name,
quote(module.label)))
allNames |= module.imports
print(" node[style=dotted];")
if self.external_dependencies:
myNames = set(self.modules)
extNames = list(allNames - myNames)
extNames.sort()
for n, name in enumerate(extNames):
nameDict[name] = id = "extmod%d" % n
print(" %s[label=\"%s\"];" % (id, name))
for modname, module in sorted(self.modules.items()):
for other in sorted(module.imports):
if other in nameDict:
print(" %s -> %s;" % (nameDict[module.modname],
nameDict[other]))
print("}")
def quote(s):
"""Quote a string for graphviz.
This function is probably incomplete.
"""
return s.replace("\\", "\\\\").replace('"', '\\"').replace('\n', '\\n')
def main(argv=None):
progname = os.path.basename(argv[0]) if argv else None
description = __doc__.strip().split('\n\n')[0]
parser = optparse.OptionParser('%prog [options] [filename|dirname ...]',
prog=progname, description=description)
parser.add_option('-i', '--imports', action='store_const',
dest='action', const='printImports',
default='printImports',
help='print dependency graph (default action)')
parser.add_option('-d', '--dot', action='store_const',
dest='action', const='printDot',
help='print dependency graph in dot (graphviz) format')
parser.add_option('-n', '--names', action='store_const',
dest='action', const='printImportedNames',
help='print dependency graph with all imported names')
parser.add_option('-u', '--unused', action='store_const',
dest='action', const='printUnusedImports',
help='print unused imports')
parser.add_option('-a', '--all', action='store_true',
dest='all_unused',
help="don't ignore unused imports when there's a comment on the same line (only affects -u)")
parser.add_option('--duplicate', action='store_true',
dest='warn_about_duplicates',
help='warn about duplicate imports')
parser.add_option('-v', '--verbose', action='store_true',
help='print more information (currently only affects --duplicate)')
parser.add_option('-N', '--noext', action='store_true',
help='omit external dependencies')
parser.add_option('-p', '--packages', action='store_true',
dest='condense_to_packages',
help='convert the module graph to a package graph')
parser.add_option('-l', '--level', type='int',
dest='packagelevel',
help='collapse subpackages to the topmost Nth levels')
parser.add_option('-c', '--collapse', action='store_true',
dest='collapse_cycles',
help='collapse dependency cycles')
parser.add_option('-T', '--tests', action='store_true',
dest='collapse_tests',
help="collapse packages named 'tests' and 'ftests' with parent packages")
parser.add_option('-w', '--write-cache', metavar='FILE',
help="write a pickle cache of parsed imports; provide the cache filename as the only non-option argument to load it back")
try:
opts, args = parser.parse_args(args=argv[1:] if argv else None)
except SystemExit as e:
return e.code
if not args:
args = ['.']
g = ModuleGraph()
g.all_unused = opts.all_unused
g.warn_about_duplicates = opts.warn_about_duplicates
g.verbose = opts.verbose
g.trackUnusedNames = (opts.action == 'printUnusedImports')
for fn in args:
g.parsePathname(fn)
if opts.write_cache:
g.writeCache(opts.write_cache)
if opts.condense_to_packages:
g = g.packageGraph(opts.packagelevel)
if opts.collapse_tests:
g = g.collapseTests()
if opts.collapse_cycles:
g = g.collapseCycles()
g.external_dependencies = not opts.noext
getattr(g, opts.action)()
return 0
if __name__ == '__main__': # pragma: nocover
sys.exit(main())
|
mgedmin/findimports
|
findimports.py
|
find_imports
|
python
|
def find_imports(filename):
with open(filename) as f:
root = ast.parse(f.read(), filename)
visitor = ImportFinder(filename)
visitor.visit(root)
return visitor.imports
|
Find all imported names in a given file.
Returns a list of ImportInfo objects.
|
train
|
https://github.com/mgedmin/findimports/blob/c20a50b497390fed15aa3835476f4fad57313e8a/findimports.py#L316-L325
| null |
#!/usr/bin/python
"""
FindImports is a script that processes Python module dependencies. Currently
it can be used for finding unused imports and graphing module dependencies
(with graphviz).
Syntax: findimports.py [options] [filename|dirname ...]
Options:
-h, --help This help message
-i, --imports Print dependency graph (default action).
-d, --dot Print dependency graph in dot (graphviz) format.
-n, --names Print dependency graph with all imported names.
-u, --unused Print unused imports.
-a, --all Print unused imports even if there's a comment.
--duplicate Print duplicate imports.
-v Print more information (currently only affects --duplicate).
-N, --noext Omit external dependencies.
-p, --packages Convert the module graph to a package graph.
-l N, --level N Collapse subpackages deeper than the Nth level.
-c, --collapse Collapse dependency cycles.
-T, --tests Collapse packages named 'tests' and 'ftests' with parent
packages.
FindImports requires Python 2.6 or later.
Notes:
findimports processes doctest blocks inside docstrings.
findimports.py -u will not complain about import statements that have
a comment on the same line, e.g.:
from somewhereelse import somename # reexport
findimports.py -u -a will ignore comments and print these statements also.
Caching:
If you want to produce several different reports from the same dataset,
you can do it as follows:
findimports.py --write-cache foo.importcache dirname
findimports.py foo.importcache -d -T > graph1.dot
findimports.py foo.importcache -d -N -c -p -l 2 > graph2.dot
Copyright (c) 2003--2015 Marius Gedminas <marius@pov.lt>
This program is free software; you can redistribute it and/or modify it under
the terms of the GNU General Public License as published by the Free Software
Foundation; either version 2 of the License, or (at your option) any later
version.
This program is distributed in the hope that it will be useful, but WITHOUT ANY
WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A
PARTICULAR PURPOSE. See the GNU General Public License for more details.
You should have received a copy of the GNU General Public License along with
this program; if not, write to the Free Software Foundation, Inc., 675 Mass
Ave, Cambridge, MA 02139, USA.
"""
from __future__ import print_function
import ast
import doctest
import linecache
import optparse
import os
import pickle
import re
import sys
import zipfile
from operator import attrgetter
__version__ = '1.5.2.dev0'
__author__ = 'Marius Gedminas <marius@gedmin.as>'
__licence__ = 'GPL v2 or later'
__url__ = 'https://github.com/mgedmin/findimports'
def adjust_lineno(filename, lineno, name):
"""Adjust the line number of an import.
Needed because import statements can span multiple lines, and our lineno
is always the first line number.
"""
line = linecache.getline(filename, lineno)
# Hack warning: might be fooled by comments
rx = re.compile(r'\b%s\b' % re.escape(name) if name != '*' else '[*]')
while line and not rx.search(line):
lineno += 1
line = linecache.getline(filename, lineno)
return lineno
class ImportInfo(object):
"""A record of a name and the location of the import statement."""
def __init__(self, name, filename, lineno, level):
self.name = name
self.filename = filename
self.lineno = lineno
self.level = level
def __repr__(self):
return '%s(%r, %r, %r, %r)' % (self.__class__.__name__, self.name,
self.filename, self.lineno, self.level)
class ImportFinder(ast.NodeVisitor):
"""AST visitor that collects all imported names in its imports attribute.
For example, the following import statements in the AST tree
import a, b.c, d as e
from q.w.e import x, y as foo, z
from woof import *
will cause ``imports`` to contain
a
b.c
d
q.w.e.x
q.w.e.y
q.w.e.z
woof.*
"""
lineno_offset = 0 # needed when recursively parsing docstrings
def __init__(self, filename):
self.imports = []
self.filename = filename
def processImport(self, name, imported_as, full_name, level, node):
lineno = adjust_lineno(self.filename,
self.lineno_offset + node.lineno,
name)
info = ImportInfo(full_name, self.filename, lineno, level)
self.imports.append(info)
def visit_Import(self, node):
for alias in node.names:
self.processImport(alias.name, alias.asname, alias.name, None, node)
def visit_ImportFrom(self, node):
if node.module == '__future__':
return
for alias in node.names:
name = alias.name
imported_as = alias.asname
fullname = '%s.%s' % (node.module, name) if node.module else name
self.processImport(name, imported_as, fullname, node.level, node)
def visitSomethingWithADocstring(self, node):
# ClassDef and FunctionDef have a 'lineno' attribute, Module doesn't.
lineno = getattr(node, 'lineno', None)
self.processDocstring(ast.get_docstring(node, clean=False), lineno)
self.generic_visit(node)
visit_Module = visitSomethingWithADocstring
visit_ClassDef = visitSomethingWithADocstring
visit_FunctionDef = visitSomethingWithADocstring
def processDocstring(self, docstring, lineno):
if not docstring:
return
if lineno is None:
# Module nodes don't have a lineno
lineno = 0
dtparser = doctest.DocTestParser()
try:
examples = dtparser.get_examples(docstring)
except Exception:
print("{filename}:{lineno}: error while parsing doctest".format(
filename=self.filename, lineno=lineno), file=sys.stderr)
raise
for example in examples:
try:
source = example.source
if not isinstance(source, str):
source = source.encode('UTF-8')
node = ast.parse(source, filename='<docstring>')
except SyntaxError:
print("{filename}:{lineno}: syntax error in doctest".format(
filename=self.filename, lineno=lineno), file=sys.stderr)
else:
self.lineno_offset += lineno + example.lineno
self.visit(node)
self.lineno_offset -= lineno + example.lineno
class Scope(object):
"""A namespace."""
def __init__(self, parent=None, name=None):
self.parent = parent
self.name = name
self.imports = {}
self.unused_names = {}
def haveImport(self, name):
if name in self.imports:
return True
if self.parent:
return self.parent.haveImport(name)
return False
def whereImported(self, name):
if name in self.imports:
return self.imports[name]
return self.parent.whereImported(name)
def addImport(self, name, filename, level, lineno):
self.unused_names[name] = self.imports[name] = ImportInfo(name,
filename,
lineno,
level)
def useName(self, name):
if name in self.unused_names:
del self.unused_names[name]
if self.parent:
self.parent.useName(name)
class ImportFinderAndNameTracker(ImportFinder):
"""ImportFinder that also keeps track on used names."""
warn_about_duplicates = False
verbose = False
def __init__(self, filename):
ImportFinder.__init__(self, filename)
self.scope = self.top_level = Scope(name=filename)
self.scope_stack = []
self.unused_names = []
def newScope(self, parent, name=None):
self.scope_stack.append(self.scope)
self.scope = Scope(parent, name)
def leaveScope(self):
self.unused_names += self.scope.unused_names.values()
self.scope = self.scope_stack.pop()
def leaveAllScopes(self):
# newScope()/leaveScope() calls are always balanced so scope_stack
# should be empty at this point
assert not self.scope_stack
self.unused_names += self.scope.unused_names.values()
self.unused_names.sort(key=attrgetter('lineno'))
def processDocstring(self, docstring, lineno):
self.newScope(self.top_level, 'docstring')
ImportFinder.processDocstring(self, docstring, lineno)
self.leaveScope()
def visit_FunctionDef(self, node):
self.newScope(self.scope, 'function %s' % node.name)
ImportFinder.visit_FunctionDef(self, node)
self.leaveScope()
def processImport(self, name, imported_as, full_name, level, node):
ImportFinder.processImport(self, name, imported_as, full_name, level, node)
if not imported_as:
imported_as = name
if imported_as != "*":
lineno = self.lineno_offset + node.lineno
if (self.warn_about_duplicates and
self.scope.haveImport(imported_as)):
where = self.scope.whereImported(imported_as).lineno
line = linecache.getline(self.filename, lineno)
if '#' not in line:
print("{filename}:{lineno}: {name} imported again".format(
filename=self.filename, lineno=lineno, name=imported_as), file=sys.stderr)
if self.verbose:
print("{filename}:{lineno}: (location of previous import)".format(
filename=self.filename, lineno=where), file=sys.stderr)
else:
self.scope.addImport(imported_as, self.filename, level, lineno)
def visit_Name(self, node):
self.scope.useName(node.id)
def visit_Attribute(self, node):
full_name = [node.attr]
parent = node.value
while isinstance(parent, ast.Attribute):
full_name.append(parent.attr)
parent = parent.value
if isinstance(parent, ast.Name):
full_name.append(parent.id)
full_name.reverse()
name = ""
for part in full_name:
if name:
name = '%s.%s' % (name, part)
else:
name += part
self.scope.useName(name)
self.generic_visit(node)
def find_imports_and_track_names(filename, warn_about_duplicates=False,
verbose=False):
"""Find all imported names in a given file.
Returns ``(imports, unused)``. Both are lists of ImportInfo objects.
"""
with open(filename) as f:
root = ast.parse(f.read(), filename)
visitor = ImportFinderAndNameTracker(filename)
visitor.warn_about_duplicates = warn_about_duplicates
visitor.verbose = verbose
visitor.visit(root)
visitor.leaveAllScopes()
return visitor.imports, visitor.unused_names
class Module(object):
"""Node in a module dependency graph.
Packages may also be represented as Module objects.
``imports`` is a set of module names this module depends on.
``imported_names`` is a list of all names that were imported from other
modules (actually, ImportInfo objects).
``unused_names`` is a list of names that were imported, but are not used
(actually, ImportInfo objects).
"""
def __init__(self, modname, filename):
self.modname = modname
self.label = modname
self.filename = filename
self.imports = set()
self.imported_names = ()
self.unused_names = ()
def __repr__(self):
return '<%s: %s>' % (self.__class__.__name__, self.modname)
class ModuleCycle(object):
"""Node in a condenced module dependency graph.
A strongly-connected component of one or more modules/packages.
"""
def __init__(self, modnames):
self.modnames = modnames
self.modname = modnames[0]
self.label = "\n".join(modnames)
self.imports = set()
class ModuleGraph(object):
"""Module graph."""
trackUnusedNames = False
all_unused = False
warn_about_duplicates = False
verbose = False
external_dependencies = True
# some builtin modules do not exist as separate .so files on disk
builtin_modules = sys.builtin_module_names
def __init__(self):
self.modules = {}
self.path = sys.path
self._module_cache = {}
self._warned_about = set()
self._stderr = sys.stderr
self._exts = ('.py', '.so', '.dll')
if hasattr(sys, '_multiarch'): # pragma: nocover
# Ubuntu 14.04 LTS renames
# /usr/lib/python2.7/lib-dynload/datetime.so to
# /usr/lib/python2.7/lib-dynload/datetime.x86_64-linux-gnu.so
# (https://github.com/mgedmin/findimports/issues/3)
self._exts += ('.%s.so' % sys._multiarch, )
def warn(self, about, message, *args):
if about in self._warned_about:
return
if args:
message = message % args
print(message, file=self._stderr)
self._warned_about.add(about)
def parsePathname(self, pathname):
"""Parse one or more source files.
``pathname`` may be a file name or a directory name.
"""
if os.path.isdir(pathname):
for root, dirs, files in os.walk(pathname):
dirs.sort()
files.sort()
for fn in files:
# ignore emacsish junk
if fn.endswith('.py') and not fn.startswith('.#'):
self.parseFile(os.path.join(root, fn))
elif pathname.endswith('.importcache'):
self.readCache(pathname)
else:
self.parseFile(pathname)
def writeCache(self, filename):
"""Write the graph to a cache file."""
with open(filename, 'wb') as f:
pickle.dump(self.modules, f)
def readCache(self, filename):
"""Load the graph from a cache file."""
with open(filename, 'rb') as f:
self.modules = pickle.load(f)
def parseFile(self, filename):
"""Parse a single file."""
modname = self.filenameToModname(filename)
module = Module(modname, filename)
self.modules[modname] = module
if self.trackUnusedNames:
module.imported_names, module.unused_names = \
find_imports_and_track_names(filename,
self.warn_about_duplicates,
self.verbose)
else:
module.imported_names = find_imports(filename)
module.unused_names = None
dir = os.path.dirname(filename)
module.imports = set(
[self.findModuleOfName(imp.name, imp.level, filename, dir)
for imp in module.imported_names])
def filenameToModname(self, filename):
"""Convert a filename to a module name."""
for ext in reversed(self._exts):
if filename.endswith(ext):
filename = filename[:-len(ext)]
break
else:
self.warn(filename, '%s: unknown file name extension', filename)
filename = os.path.abspath(filename)
elements = filename.split(os.path.sep)
modname = []
while elements:
modname.append(elements[-1])
del elements[-1]
if not os.path.exists(os.path.sep.join(elements + ['__init__.py'])):
break
modname.reverse()
modname = ".".join(modname)
return modname
def findModuleOfName(self, dotted_name, level, filename, extrapath=None):
"""Given a fully qualified name, find what module contains it."""
if dotted_name.endswith('.*'):
return dotted_name[:-2]
name = dotted_name
# extrapath is None only in a couple of test cases; in real life it's
# always present
if level and level > 1 and extrapath:
# strip trailing path bits for each extra level to account for
# relative imports
# from . import X has level == 1 and nothing is stripped (the level > 1 check accounts for this case)
# from .. import X has level == 2 and one trailing path component must go
# from ... import X has level == 3 and two trailing path components must go
extrapath = extrapath.split(os.path.sep)
level -= 1
extrapath = extrapath[0:-level]
extrapath = os.path.sep.join(extrapath)
while name:
candidate = self.isModule(name, extrapath)
if candidate:
return candidate
candidate = self.isPackage(name, extrapath)
if candidate:
return candidate
name = name[:name.rfind('.')]
self.warn(dotted_name, '%s: could not find %s', filename, dotted_name)
return dotted_name
def isModule(self, dotted_name, extrapath=None):
"""Is ``dotted_name`` the name of a module?"""
try:
return self._module_cache[(dotted_name, extrapath)]
except KeyError:
pass
if dotted_name in sys.modules or dotted_name in self.builtin_modules:
return dotted_name
filename = dotted_name.replace('.', os.path.sep)
if extrapath:
for ext in self._exts:
candidate = os.path.join(extrapath, filename) + ext
if os.path.exists(candidate):
modname = self.filenameToModname(candidate)
self._module_cache[(dotted_name, extrapath)] = modname
return modname
try:
return self._module_cache[(dotted_name, None)]
except KeyError:
pass
for dir in self.path:
if os.path.isfile(dir):
if dir.endswith('.egg-info'):
# distribute creates a setuptools-blah-blah.egg-info
# that ends up in sys.path
continue
try:
zf = zipfile.ZipFile(dir)
except zipfile.BadZipfile:
self.warn(dir, "%s: not a directory or zip file", dir)
continue
names = zf.namelist()
for ext in self._exts:
candidate = filename + ext
if candidate in names:
modname = filename.replace(os.path.sep, '.')
self._module_cache[(dotted_name, extrapath)] = modname
self._module_cache[(dotted_name, None)] = modname
return modname
else:
for ext in self._exts:
candidate = os.path.join(dir, filename) + ext
if os.path.exists(candidate):
modname = self.filenameToModname(candidate)
self._module_cache[(dotted_name, extrapath)] = modname
self._module_cache[(dotted_name, None)] = modname
return modname
return None
def isPackage(self, dotted_name, extrapath=None):
"""Is ``dotted_name`` the name of a package?"""
candidate = self.isModule(dotted_name + '.__init__', extrapath)
if candidate:
candidate = candidate[:-len(".__init__")]
return candidate
def packageOf(self, dotted_name, packagelevel=None):
"""Determine the package that contains ``dotted_name``."""
if '.' not in dotted_name:
return dotted_name
if not self.isPackage(dotted_name):
dotted_name = '.'.join(dotted_name.split('.')[:-1])
if packagelevel:
dotted_name = '.'.join(dotted_name.split('.')[:packagelevel])
return dotted_name
def removeTestPackage(self, dotted_name, pkgnames=['tests', 'ftests']):
"""Remove tests subpackages from dotted_name."""
result = []
for name in dotted_name.split('.'):
if name in pkgnames:
break
result.append(name)
if not result: # empty names are baad
return dotted_name
return '.'.join(result)
def listModules(self):
"""Return an alphabetical list of all modules."""
modules = list(self.modules.items())
modules.sort()
return [module for name, module in modules]
def packageGraph(self, packagelevel=None):
"""Convert a module graph to a package graph."""
packages = {}
for module in self.listModules():
package_name = self.packageOf(module.modname, packagelevel)
if package_name not in packages:
dirname = os.path.dirname(module.filename)
packages[package_name] = Module(package_name, dirname)
package = packages[package_name]
for name in module.imports:
package_name = self.packageOf(name, packagelevel)
if package_name != package.modname: # no loops
package.imports.add(package_name)
graph = ModuleGraph()
graph.modules = packages
return graph
def collapseTests(self, pkgnames=['tests', 'ftests']):
"""Collapse test packages with parent packages.
Works only with package graphs.
"""
packages = {}
for module in self.listModules():
package_name = self.removeTestPackage(module.modname, pkgnames)
if package_name == module.modname:
packages[package_name] = Module(package_name, module.filename)
for module in self.listModules():
package_name = self.removeTestPackage(module.modname, pkgnames)
package = packages[package_name]
for name in module.imports:
package_name = self.removeTestPackage(name, pkgnames)
if package_name != package.modname: # no loops
package.imports.add(package_name)
graph = ModuleGraph()
graph.modules = packages
return graph
def collapseCycles(self):
"""Create a graph with cycles collapsed.
Collapse modules participating in a cycle to a single node.
"""
# This algorithm determines Strongly Connected Components. Look it up.
# It is adapted to suit our data structures.
# Phase 0: prepare the graph
imports = {}
for u in self.modules:
imports[u] = set()
for v in self.modules[u].imports:
if v in self.modules: # skip external dependencies
imports[u].add(v)
# Phase 1: order the vertices
visited = {}
for u in self.modules:
visited[u] = False
order = []
def visit1(u):
visited[u] = True
for v in imports[u]:
if not visited[v]:
visit1(v)
order.append(u)
for u in self.modules:
if not visited[u]:
visit1(u)
order.reverse()
# Phase 2: compute the inverse graph
revimports = {}
for u in self.modules:
revimports[u] = set()
for u in self.modules:
for v in imports[u]:
revimports[v].add(u)
# Phase 3: determine the strongly connected components
components = {}
component_of = {}
for u in self.modules:
visited[u] = False
def visit2(u):
visited[u] = True
component.append(u)
for v in revimports[u]:
if not visited[v]:
visit2(v)
for u in order:
if not visited[u]:
component = []
visit2(u)
component.sort()
node = ModuleCycle(component)
components[node.modname] = node
for modname in component:
component_of[modname] = node
# Phase 4: construct the condensed graph
for node in components.values():
for modname in node.modnames:
for impname in imports[modname]:
other = component_of[impname].modname
if other != node.modname:
node.imports.add(other)
graph = ModuleGraph()
graph.modules = components
return graph
def printImportedNames(self):
"""Produce a report of imported names."""
for module in self.listModules():
print("%s:" % module.modname)
print(" %s" % "\n ".join(imp.name for imp in module.imported_names))
def printImports(self):
"""Produce a report of dependencies."""
for module in self.listModules():
print("%s:" % module.label)
if self.external_dependencies:
imports = list(module.imports)
else:
imports = [modname for modname in module.imports
if modname in self.modules]
imports.sort()
print(" %s" % "\n ".join(imports))
def printUnusedImports(self):
"""Produce a report of unused imports."""
for module in self.listModules():
names = [(unused.lineno, unused.name)
for unused in module.unused_names]
names.sort()
for lineno, name in names:
if not self.all_unused:
line = linecache.getline(module.filename, lineno)
if '#' in line:
# assume there's a comment explaining why it's not used
continue
print("%s:%s: %s not used" % (module.filename, lineno, name))
def printDot(self):
"""Produce a dependency graph in dot format."""
print("digraph ModuleDependencies {")
print(" node[shape=box];")
allNames = set()
nameDict = {}
for n, module in enumerate(self.listModules()):
module._dot_name = "mod%d" % n
nameDict[module.modname] = module._dot_name
print(" %s[label=\"%s\"];" % (module._dot_name,
quote(module.label)))
allNames |= module.imports
print(" node[style=dotted];")
if self.external_dependencies:
myNames = set(self.modules)
extNames = list(allNames - myNames)
extNames.sort()
for n, name in enumerate(extNames):
nameDict[name] = id = "extmod%d" % n
print(" %s[label=\"%s\"];" % (id, name))
for modname, module in sorted(self.modules.items()):
for other in sorted(module.imports):
if other in nameDict:
print(" %s -> %s;" % (nameDict[module.modname],
nameDict[other]))
print("}")
def quote(s):
"""Quote a string for graphviz.
This function is probably incomplete.
"""
return s.replace("\\", "\\\\").replace('"', '\\"').replace('\n', '\\n')
def main(argv=None):
progname = os.path.basename(argv[0]) if argv else None
description = __doc__.strip().split('\n\n')[0]
parser = optparse.OptionParser('%prog [options] [filename|dirname ...]',
prog=progname, description=description)
parser.add_option('-i', '--imports', action='store_const',
dest='action', const='printImports',
default='printImports',
help='print dependency graph (default action)')
parser.add_option('-d', '--dot', action='store_const',
dest='action', const='printDot',
help='print dependency graph in dot (graphviz) format')
parser.add_option('-n', '--names', action='store_const',
dest='action', const='printImportedNames',
help='print dependency graph with all imported names')
parser.add_option('-u', '--unused', action='store_const',
dest='action', const='printUnusedImports',
help='print unused imports')
parser.add_option('-a', '--all', action='store_true',
dest='all_unused',
help="don't ignore unused imports when there's a comment on the same line (only affects -u)")
parser.add_option('--duplicate', action='store_true',
dest='warn_about_duplicates',
help='warn about duplicate imports')
parser.add_option('-v', '--verbose', action='store_true',
help='print more information (currently only affects --duplicate)')
parser.add_option('-N', '--noext', action='store_true',
help='omit external dependencies')
parser.add_option('-p', '--packages', action='store_true',
dest='condense_to_packages',
help='convert the module graph to a package graph')
parser.add_option('-l', '--level', type='int',
dest='packagelevel',
help='collapse subpackages to the topmost Nth levels')
parser.add_option('-c', '--collapse', action='store_true',
dest='collapse_cycles',
help='collapse dependency cycles')
parser.add_option('-T', '--tests', action='store_true',
dest='collapse_tests',
help="collapse packages named 'tests' and 'ftests' with parent packages")
parser.add_option('-w', '--write-cache', metavar='FILE',
help="write a pickle cache of parsed imports; provide the cache filename as the only non-option argument to load it back")
try:
opts, args = parser.parse_args(args=argv[1:] if argv else None)
except SystemExit as e:
return e.code
if not args:
args = ['.']
g = ModuleGraph()
g.all_unused = opts.all_unused
g.warn_about_duplicates = opts.warn_about_duplicates
g.verbose = opts.verbose
g.trackUnusedNames = (opts.action == 'printUnusedImports')
for fn in args:
g.parsePathname(fn)
if opts.write_cache:
g.writeCache(opts.write_cache)
if opts.condense_to_packages:
g = g.packageGraph(opts.packagelevel)
if opts.collapse_tests:
g = g.collapseTests()
if opts.collapse_cycles:
g = g.collapseCycles()
g.external_dependencies = not opts.noext
getattr(g, opts.action)()
return 0
if __name__ == '__main__': # pragma: nocover
sys.exit(main())
|
mgedmin/findimports
|
findimports.py
|
find_imports_and_track_names
|
python
|
def find_imports_and_track_names(filename, warn_about_duplicates=False,
verbose=False):
with open(filename) as f:
root = ast.parse(f.read(), filename)
visitor = ImportFinderAndNameTracker(filename)
visitor.warn_about_duplicates = warn_about_duplicates
visitor.verbose = verbose
visitor.visit(root)
visitor.leaveAllScopes()
return visitor.imports, visitor.unused_names
|
Find all imported names in a given file.
Returns ``(imports, unused)``. Both are lists of ImportInfo objects.
|
train
|
https://github.com/mgedmin/findimports/blob/c20a50b497390fed15aa3835476f4fad57313e8a/findimports.py#L328-L341
|
[
"def leaveAllScopes(self):\n # newScope()/leaveScope() calls are always balanced so scope_stack\n # should be empty at this point\n assert not self.scope_stack\n self.unused_names += self.scope.unused_names.values()\n self.unused_names.sort(key=attrgetter('lineno'))\n"
] |
#!/usr/bin/python
"""
FindImports is a script that processes Python module dependencies. Currently
it can be used for finding unused imports and graphing module dependencies
(with graphviz).
Syntax: findimports.py [options] [filename|dirname ...]
Options:
-h, --help This help message
-i, --imports Print dependency graph (default action).
-d, --dot Print dependency graph in dot (graphviz) format.
-n, --names Print dependency graph with all imported names.
-u, --unused Print unused imports.
-a, --all Print unused imports even if there's a comment.
--duplicate Print duplicate imports.
-v Print more information (currently only affects --duplicate).
-N, --noext Omit external dependencies.
-p, --packages Convert the module graph to a package graph.
-l N, --level N Collapse subpackages deeper than the Nth level.
-c, --collapse Collapse dependency cycles.
-T, --tests Collapse packages named 'tests' and 'ftests' with parent
packages.
FindImports requires Python 2.6 or later.
Notes:
findimports processes doctest blocks inside docstrings.
findimports.py -u will not complain about import statements that have
a comment on the same line, e.g.:
from somewhereelse import somename # reexport
findimports.py -u -a will ignore comments and print these statements also.
Caching:
If you want to produce several different reports from the same dataset,
you can do it as follows:
findimports.py --write-cache foo.importcache dirname
findimports.py foo.importcache -d -T > graph1.dot
findimports.py foo.importcache -d -N -c -p -l 2 > graph2.dot
Copyright (c) 2003--2015 Marius Gedminas <marius@pov.lt>
This program is free software; you can redistribute it and/or modify it under
the terms of the GNU General Public License as published by the Free Software
Foundation; either version 2 of the License, or (at your option) any later
version.
This program is distributed in the hope that it will be useful, but WITHOUT ANY
WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A
PARTICULAR PURPOSE. See the GNU General Public License for more details.
You should have received a copy of the GNU General Public License along with
this program; if not, write to the Free Software Foundation, Inc., 675 Mass
Ave, Cambridge, MA 02139, USA.
"""
from __future__ import print_function
import ast
import doctest
import linecache
import optparse
import os
import pickle
import re
import sys
import zipfile
from operator import attrgetter
__version__ = '1.5.2.dev0'
__author__ = 'Marius Gedminas <marius@gedmin.as>'
__licence__ = 'GPL v2 or later'
__url__ = 'https://github.com/mgedmin/findimports'
def adjust_lineno(filename, lineno, name):
"""Adjust the line number of an import.
Needed because import statements can span multiple lines, and our lineno
is always the first line number.
"""
line = linecache.getline(filename, lineno)
# Hack warning: might be fooled by comments
rx = re.compile(r'\b%s\b' % re.escape(name) if name != '*' else '[*]')
while line and not rx.search(line):
lineno += 1
line = linecache.getline(filename, lineno)
return lineno
class ImportInfo(object):
"""A record of a name and the location of the import statement."""
def __init__(self, name, filename, lineno, level):
self.name = name
self.filename = filename
self.lineno = lineno
self.level = level
def __repr__(self):
return '%s(%r, %r, %r, %r)' % (self.__class__.__name__, self.name,
self.filename, self.lineno, self.level)
class ImportFinder(ast.NodeVisitor):
"""AST visitor that collects all imported names in its imports attribute.
For example, the following import statements in the AST tree
import a, b.c, d as e
from q.w.e import x, y as foo, z
from woof import *
will cause ``imports`` to contain
a
b.c
d
q.w.e.x
q.w.e.y
q.w.e.z
woof.*
"""
lineno_offset = 0 # needed when recursively parsing docstrings
def __init__(self, filename):
self.imports = []
self.filename = filename
def processImport(self, name, imported_as, full_name, level, node):
lineno = adjust_lineno(self.filename,
self.lineno_offset + node.lineno,
name)
info = ImportInfo(full_name, self.filename, lineno, level)
self.imports.append(info)
def visit_Import(self, node):
for alias in node.names:
self.processImport(alias.name, alias.asname, alias.name, None, node)
def visit_ImportFrom(self, node):
if node.module == '__future__':
return
for alias in node.names:
name = alias.name
imported_as = alias.asname
fullname = '%s.%s' % (node.module, name) if node.module else name
self.processImport(name, imported_as, fullname, node.level, node)
def visitSomethingWithADocstring(self, node):
# ClassDef and FunctionDef have a 'lineno' attribute, Module doesn't.
lineno = getattr(node, 'lineno', None)
self.processDocstring(ast.get_docstring(node, clean=False), lineno)
self.generic_visit(node)
visit_Module = visitSomethingWithADocstring
visit_ClassDef = visitSomethingWithADocstring
visit_FunctionDef = visitSomethingWithADocstring
def processDocstring(self, docstring, lineno):
if not docstring:
return
if lineno is None:
# Module nodes don't have a lineno
lineno = 0
dtparser = doctest.DocTestParser()
try:
examples = dtparser.get_examples(docstring)
except Exception:
print("{filename}:{lineno}: error while parsing doctest".format(
filename=self.filename, lineno=lineno), file=sys.stderr)
raise
for example in examples:
try:
source = example.source
if not isinstance(source, str):
source = source.encode('UTF-8')
node = ast.parse(source, filename='<docstring>')
except SyntaxError:
print("{filename}:{lineno}: syntax error in doctest".format(
filename=self.filename, lineno=lineno), file=sys.stderr)
else:
self.lineno_offset += lineno + example.lineno
self.visit(node)
self.lineno_offset -= lineno + example.lineno
class Scope(object):
"""A namespace."""
def __init__(self, parent=None, name=None):
self.parent = parent
self.name = name
self.imports = {}
self.unused_names = {}
def haveImport(self, name):
if name in self.imports:
return True
if self.parent:
return self.parent.haveImport(name)
return False
def whereImported(self, name):
if name in self.imports:
return self.imports[name]
return self.parent.whereImported(name)
def addImport(self, name, filename, level, lineno):
self.unused_names[name] = self.imports[name] = ImportInfo(name,
filename,
lineno,
level)
def useName(self, name):
if name in self.unused_names:
del self.unused_names[name]
if self.parent:
self.parent.useName(name)
class ImportFinderAndNameTracker(ImportFinder):
"""ImportFinder that also keeps track on used names."""
warn_about_duplicates = False
verbose = False
def __init__(self, filename):
ImportFinder.__init__(self, filename)
self.scope = self.top_level = Scope(name=filename)
self.scope_stack = []
self.unused_names = []
def newScope(self, parent, name=None):
self.scope_stack.append(self.scope)
self.scope = Scope(parent, name)
def leaveScope(self):
self.unused_names += self.scope.unused_names.values()
self.scope = self.scope_stack.pop()
def leaveAllScopes(self):
# newScope()/leaveScope() calls are always balanced so scope_stack
# should be empty at this point
assert not self.scope_stack
self.unused_names += self.scope.unused_names.values()
self.unused_names.sort(key=attrgetter('lineno'))
def processDocstring(self, docstring, lineno):
self.newScope(self.top_level, 'docstring')
ImportFinder.processDocstring(self, docstring, lineno)
self.leaveScope()
def visit_FunctionDef(self, node):
self.newScope(self.scope, 'function %s' % node.name)
ImportFinder.visit_FunctionDef(self, node)
self.leaveScope()
def processImport(self, name, imported_as, full_name, level, node):
ImportFinder.processImport(self, name, imported_as, full_name, level, node)
if not imported_as:
imported_as = name
if imported_as != "*":
lineno = self.lineno_offset + node.lineno
if (self.warn_about_duplicates and
self.scope.haveImport(imported_as)):
where = self.scope.whereImported(imported_as).lineno
line = linecache.getline(self.filename, lineno)
if '#' not in line:
print("{filename}:{lineno}: {name} imported again".format(
filename=self.filename, lineno=lineno, name=imported_as), file=sys.stderr)
if self.verbose:
print("{filename}:{lineno}: (location of previous import)".format(
filename=self.filename, lineno=where), file=sys.stderr)
else:
self.scope.addImport(imported_as, self.filename, level, lineno)
def visit_Name(self, node):
self.scope.useName(node.id)
def visit_Attribute(self, node):
full_name = [node.attr]
parent = node.value
while isinstance(parent, ast.Attribute):
full_name.append(parent.attr)
parent = parent.value
if isinstance(parent, ast.Name):
full_name.append(parent.id)
full_name.reverse()
name = ""
for part in full_name:
if name:
name = '%s.%s' % (name, part)
else:
name += part
self.scope.useName(name)
self.generic_visit(node)
def find_imports(filename):
"""Find all imported names in a given file.
Returns a list of ImportInfo objects.
"""
with open(filename) as f:
root = ast.parse(f.read(), filename)
visitor = ImportFinder(filename)
visitor.visit(root)
return visitor.imports
class Module(object):
"""Node in a module dependency graph.
Packages may also be represented as Module objects.
``imports`` is a set of module names this module depends on.
``imported_names`` is a list of all names that were imported from other
modules (actually, ImportInfo objects).
``unused_names`` is a list of names that were imported, but are not used
(actually, ImportInfo objects).
"""
def __init__(self, modname, filename):
self.modname = modname
self.label = modname
self.filename = filename
self.imports = set()
self.imported_names = ()
self.unused_names = ()
def __repr__(self):
return '<%s: %s>' % (self.__class__.__name__, self.modname)
class ModuleCycle(object):
"""Node in a condenced module dependency graph.
A strongly-connected component of one or more modules/packages.
"""
def __init__(self, modnames):
self.modnames = modnames
self.modname = modnames[0]
self.label = "\n".join(modnames)
self.imports = set()
class ModuleGraph(object):
"""Module graph."""
trackUnusedNames = False
all_unused = False
warn_about_duplicates = False
verbose = False
external_dependencies = True
# some builtin modules do not exist as separate .so files on disk
builtin_modules = sys.builtin_module_names
def __init__(self):
self.modules = {}
self.path = sys.path
self._module_cache = {}
self._warned_about = set()
self._stderr = sys.stderr
self._exts = ('.py', '.so', '.dll')
if hasattr(sys, '_multiarch'): # pragma: nocover
# Ubuntu 14.04 LTS renames
# /usr/lib/python2.7/lib-dynload/datetime.so to
# /usr/lib/python2.7/lib-dynload/datetime.x86_64-linux-gnu.so
# (https://github.com/mgedmin/findimports/issues/3)
self._exts += ('.%s.so' % sys._multiarch, )
def warn(self, about, message, *args):
if about in self._warned_about:
return
if args:
message = message % args
print(message, file=self._stderr)
self._warned_about.add(about)
def parsePathname(self, pathname):
"""Parse one or more source files.
``pathname`` may be a file name or a directory name.
"""
if os.path.isdir(pathname):
for root, dirs, files in os.walk(pathname):
dirs.sort()
files.sort()
for fn in files:
# ignore emacsish junk
if fn.endswith('.py') and not fn.startswith('.#'):
self.parseFile(os.path.join(root, fn))
elif pathname.endswith('.importcache'):
self.readCache(pathname)
else:
self.parseFile(pathname)
def writeCache(self, filename):
"""Write the graph to a cache file."""
with open(filename, 'wb') as f:
pickle.dump(self.modules, f)
def readCache(self, filename):
"""Load the graph from a cache file."""
with open(filename, 'rb') as f:
self.modules = pickle.load(f)
def parseFile(self, filename):
"""Parse a single file."""
modname = self.filenameToModname(filename)
module = Module(modname, filename)
self.modules[modname] = module
if self.trackUnusedNames:
module.imported_names, module.unused_names = \
find_imports_and_track_names(filename,
self.warn_about_duplicates,
self.verbose)
else:
module.imported_names = find_imports(filename)
module.unused_names = None
dir = os.path.dirname(filename)
module.imports = set(
[self.findModuleOfName(imp.name, imp.level, filename, dir)
for imp in module.imported_names])
def filenameToModname(self, filename):
"""Convert a filename to a module name."""
for ext in reversed(self._exts):
if filename.endswith(ext):
filename = filename[:-len(ext)]
break
else:
self.warn(filename, '%s: unknown file name extension', filename)
filename = os.path.abspath(filename)
elements = filename.split(os.path.sep)
modname = []
while elements:
modname.append(elements[-1])
del elements[-1]
if not os.path.exists(os.path.sep.join(elements + ['__init__.py'])):
break
modname.reverse()
modname = ".".join(modname)
return modname
def findModuleOfName(self, dotted_name, level, filename, extrapath=None):
"""Given a fully qualified name, find what module contains it."""
if dotted_name.endswith('.*'):
return dotted_name[:-2]
name = dotted_name
# extrapath is None only in a couple of test cases; in real life it's
# always present
if level and level > 1 and extrapath:
# strip trailing path bits for each extra level to account for
# relative imports
# from . import X has level == 1 and nothing is stripped (the level > 1 check accounts for this case)
# from .. import X has level == 2 and one trailing path component must go
# from ... import X has level == 3 and two trailing path components must go
extrapath = extrapath.split(os.path.sep)
level -= 1
extrapath = extrapath[0:-level]
extrapath = os.path.sep.join(extrapath)
while name:
candidate = self.isModule(name, extrapath)
if candidate:
return candidate
candidate = self.isPackage(name, extrapath)
if candidate:
return candidate
name = name[:name.rfind('.')]
self.warn(dotted_name, '%s: could not find %s', filename, dotted_name)
return dotted_name
def isModule(self, dotted_name, extrapath=None):
"""Is ``dotted_name`` the name of a module?"""
try:
return self._module_cache[(dotted_name, extrapath)]
except KeyError:
pass
if dotted_name in sys.modules or dotted_name in self.builtin_modules:
return dotted_name
filename = dotted_name.replace('.', os.path.sep)
if extrapath:
for ext in self._exts:
candidate = os.path.join(extrapath, filename) + ext
if os.path.exists(candidate):
modname = self.filenameToModname(candidate)
self._module_cache[(dotted_name, extrapath)] = modname
return modname
try:
return self._module_cache[(dotted_name, None)]
except KeyError:
pass
for dir in self.path:
if os.path.isfile(dir):
if dir.endswith('.egg-info'):
# distribute creates a setuptools-blah-blah.egg-info
# that ends up in sys.path
continue
try:
zf = zipfile.ZipFile(dir)
except zipfile.BadZipfile:
self.warn(dir, "%s: not a directory or zip file", dir)
continue
names = zf.namelist()
for ext in self._exts:
candidate = filename + ext
if candidate in names:
modname = filename.replace(os.path.sep, '.')
self._module_cache[(dotted_name, extrapath)] = modname
self._module_cache[(dotted_name, None)] = modname
return modname
else:
for ext in self._exts:
candidate = os.path.join(dir, filename) + ext
if os.path.exists(candidate):
modname = self.filenameToModname(candidate)
self._module_cache[(dotted_name, extrapath)] = modname
self._module_cache[(dotted_name, None)] = modname
return modname
return None
def isPackage(self, dotted_name, extrapath=None):
"""Is ``dotted_name`` the name of a package?"""
candidate = self.isModule(dotted_name + '.__init__', extrapath)
if candidate:
candidate = candidate[:-len(".__init__")]
return candidate
def packageOf(self, dotted_name, packagelevel=None):
"""Determine the package that contains ``dotted_name``."""
if '.' not in dotted_name:
return dotted_name
if not self.isPackage(dotted_name):
dotted_name = '.'.join(dotted_name.split('.')[:-1])
if packagelevel:
dotted_name = '.'.join(dotted_name.split('.')[:packagelevel])
return dotted_name
def removeTestPackage(self, dotted_name, pkgnames=['tests', 'ftests']):
"""Remove tests subpackages from dotted_name."""
result = []
for name in dotted_name.split('.'):
if name in pkgnames:
break
result.append(name)
if not result: # empty names are baad
return dotted_name
return '.'.join(result)
def listModules(self):
"""Return an alphabetical list of all modules."""
modules = list(self.modules.items())
modules.sort()
return [module for name, module in modules]
def packageGraph(self, packagelevel=None):
"""Convert a module graph to a package graph."""
packages = {}
for module in self.listModules():
package_name = self.packageOf(module.modname, packagelevel)
if package_name not in packages:
dirname = os.path.dirname(module.filename)
packages[package_name] = Module(package_name, dirname)
package = packages[package_name]
for name in module.imports:
package_name = self.packageOf(name, packagelevel)
if package_name != package.modname: # no loops
package.imports.add(package_name)
graph = ModuleGraph()
graph.modules = packages
return graph
def collapseTests(self, pkgnames=['tests', 'ftests']):
"""Collapse test packages with parent packages.
Works only with package graphs.
"""
packages = {}
for module in self.listModules():
package_name = self.removeTestPackage(module.modname, pkgnames)
if package_name == module.modname:
packages[package_name] = Module(package_name, module.filename)
for module in self.listModules():
package_name = self.removeTestPackage(module.modname, pkgnames)
package = packages[package_name]
for name in module.imports:
package_name = self.removeTestPackage(name, pkgnames)
if package_name != package.modname: # no loops
package.imports.add(package_name)
graph = ModuleGraph()
graph.modules = packages
return graph
def collapseCycles(self):
"""Create a graph with cycles collapsed.
Collapse modules participating in a cycle to a single node.
"""
# This algorithm determines Strongly Connected Components. Look it up.
# It is adapted to suit our data structures.
# Phase 0: prepare the graph
imports = {}
for u in self.modules:
imports[u] = set()
for v in self.modules[u].imports:
if v in self.modules: # skip external dependencies
imports[u].add(v)
# Phase 1: order the vertices
visited = {}
for u in self.modules:
visited[u] = False
order = []
def visit1(u):
visited[u] = True
for v in imports[u]:
if not visited[v]:
visit1(v)
order.append(u)
for u in self.modules:
if not visited[u]:
visit1(u)
order.reverse()
# Phase 2: compute the inverse graph
revimports = {}
for u in self.modules:
revimports[u] = set()
for u in self.modules:
for v in imports[u]:
revimports[v].add(u)
# Phase 3: determine the strongly connected components
components = {}
component_of = {}
for u in self.modules:
visited[u] = False
def visit2(u):
visited[u] = True
component.append(u)
for v in revimports[u]:
if not visited[v]:
visit2(v)
for u in order:
if not visited[u]:
component = []
visit2(u)
component.sort()
node = ModuleCycle(component)
components[node.modname] = node
for modname in component:
component_of[modname] = node
# Phase 4: construct the condensed graph
for node in components.values():
for modname in node.modnames:
for impname in imports[modname]:
other = component_of[impname].modname
if other != node.modname:
node.imports.add(other)
graph = ModuleGraph()
graph.modules = components
return graph
def printImportedNames(self):
"""Produce a report of imported names."""
for module in self.listModules():
print("%s:" % module.modname)
print(" %s" % "\n ".join(imp.name for imp in module.imported_names))
def printImports(self):
"""Produce a report of dependencies."""
for module in self.listModules():
print("%s:" % module.label)
if self.external_dependencies:
imports = list(module.imports)
else:
imports = [modname for modname in module.imports
if modname in self.modules]
imports.sort()
print(" %s" % "\n ".join(imports))
def printUnusedImports(self):
"""Produce a report of unused imports."""
for module in self.listModules():
names = [(unused.lineno, unused.name)
for unused in module.unused_names]
names.sort()
for lineno, name in names:
if not self.all_unused:
line = linecache.getline(module.filename, lineno)
if '#' in line:
# assume there's a comment explaining why it's not used
continue
print("%s:%s: %s not used" % (module.filename, lineno, name))
def printDot(self):
"""Produce a dependency graph in dot format."""
print("digraph ModuleDependencies {")
print(" node[shape=box];")
allNames = set()
nameDict = {}
for n, module in enumerate(self.listModules()):
module._dot_name = "mod%d" % n
nameDict[module.modname] = module._dot_name
print(" %s[label=\"%s\"];" % (module._dot_name,
quote(module.label)))
allNames |= module.imports
print(" node[style=dotted];")
if self.external_dependencies:
myNames = set(self.modules)
extNames = list(allNames - myNames)
extNames.sort()
for n, name in enumerate(extNames):
nameDict[name] = id = "extmod%d" % n
print(" %s[label=\"%s\"];" % (id, name))
for modname, module in sorted(self.modules.items()):
for other in sorted(module.imports):
if other in nameDict:
print(" %s -> %s;" % (nameDict[module.modname],
nameDict[other]))
print("}")
def quote(s):
"""Quote a string for graphviz.
This function is probably incomplete.
"""
return s.replace("\\", "\\\\").replace('"', '\\"').replace('\n', '\\n')
def main(argv=None):
progname = os.path.basename(argv[0]) if argv else None
description = __doc__.strip().split('\n\n')[0]
parser = optparse.OptionParser('%prog [options] [filename|dirname ...]',
prog=progname, description=description)
parser.add_option('-i', '--imports', action='store_const',
dest='action', const='printImports',
default='printImports',
help='print dependency graph (default action)')
parser.add_option('-d', '--dot', action='store_const',
dest='action', const='printDot',
help='print dependency graph in dot (graphviz) format')
parser.add_option('-n', '--names', action='store_const',
dest='action', const='printImportedNames',
help='print dependency graph with all imported names')
parser.add_option('-u', '--unused', action='store_const',
dest='action', const='printUnusedImports',
help='print unused imports')
parser.add_option('-a', '--all', action='store_true',
dest='all_unused',
help="don't ignore unused imports when there's a comment on the same line (only affects -u)")
parser.add_option('--duplicate', action='store_true',
dest='warn_about_duplicates',
help='warn about duplicate imports')
parser.add_option('-v', '--verbose', action='store_true',
help='print more information (currently only affects --duplicate)')
parser.add_option('-N', '--noext', action='store_true',
help='omit external dependencies')
parser.add_option('-p', '--packages', action='store_true',
dest='condense_to_packages',
help='convert the module graph to a package graph')
parser.add_option('-l', '--level', type='int',
dest='packagelevel',
help='collapse subpackages to the topmost Nth levels')
parser.add_option('-c', '--collapse', action='store_true',
dest='collapse_cycles',
help='collapse dependency cycles')
parser.add_option('-T', '--tests', action='store_true',
dest='collapse_tests',
help="collapse packages named 'tests' and 'ftests' with parent packages")
parser.add_option('-w', '--write-cache', metavar='FILE',
help="write a pickle cache of parsed imports; provide the cache filename as the only non-option argument to load it back")
try:
opts, args = parser.parse_args(args=argv[1:] if argv else None)
except SystemExit as e:
return e.code
if not args:
args = ['.']
g = ModuleGraph()
g.all_unused = opts.all_unused
g.warn_about_duplicates = opts.warn_about_duplicates
g.verbose = opts.verbose
g.trackUnusedNames = (opts.action == 'printUnusedImports')
for fn in args:
g.parsePathname(fn)
if opts.write_cache:
g.writeCache(opts.write_cache)
if opts.condense_to_packages:
g = g.packageGraph(opts.packagelevel)
if opts.collapse_tests:
g = g.collapseTests()
if opts.collapse_cycles:
g = g.collapseCycles()
g.external_dependencies = not opts.noext
getattr(g, opts.action)()
return 0
if __name__ == '__main__': # pragma: nocover
sys.exit(main())
|
mgedmin/findimports
|
findimports.py
|
ModuleGraph.parsePathname
|
python
|
def parsePathname(self, pathname):
if os.path.isdir(pathname):
for root, dirs, files in os.walk(pathname):
dirs.sort()
files.sort()
for fn in files:
# ignore emacsish junk
if fn.endswith('.py') and not fn.startswith('.#'):
self.parseFile(os.path.join(root, fn))
elif pathname.endswith('.importcache'):
self.readCache(pathname)
else:
self.parseFile(pathname)
|
Parse one or more source files.
``pathname`` may be a file name or a directory name.
|
train
|
https://github.com/mgedmin/findimports/blob/c20a50b497390fed15aa3835476f4fad57313e8a/findimports.py#L417-L433
| null |
class ModuleGraph(object):
"""Module graph."""
trackUnusedNames = False
all_unused = False
warn_about_duplicates = False
verbose = False
external_dependencies = True
# some builtin modules do not exist as separate .so files on disk
builtin_modules = sys.builtin_module_names
def __init__(self):
self.modules = {}
self.path = sys.path
self._module_cache = {}
self._warned_about = set()
self._stderr = sys.stderr
self._exts = ('.py', '.so', '.dll')
if hasattr(sys, '_multiarch'): # pragma: nocover
# Ubuntu 14.04 LTS renames
# /usr/lib/python2.7/lib-dynload/datetime.so to
# /usr/lib/python2.7/lib-dynload/datetime.x86_64-linux-gnu.so
# (https://github.com/mgedmin/findimports/issues/3)
self._exts += ('.%s.so' % sys._multiarch, )
def warn(self, about, message, *args):
if about in self._warned_about:
return
if args:
message = message % args
print(message, file=self._stderr)
self._warned_about.add(about)
def writeCache(self, filename):
"""Write the graph to a cache file."""
with open(filename, 'wb') as f:
pickle.dump(self.modules, f)
def readCache(self, filename):
"""Load the graph from a cache file."""
with open(filename, 'rb') as f:
self.modules = pickle.load(f)
def parseFile(self, filename):
"""Parse a single file."""
modname = self.filenameToModname(filename)
module = Module(modname, filename)
self.modules[modname] = module
if self.trackUnusedNames:
module.imported_names, module.unused_names = \
find_imports_and_track_names(filename,
self.warn_about_duplicates,
self.verbose)
else:
module.imported_names = find_imports(filename)
module.unused_names = None
dir = os.path.dirname(filename)
module.imports = set(
[self.findModuleOfName(imp.name, imp.level, filename, dir)
for imp in module.imported_names])
def filenameToModname(self, filename):
"""Convert a filename to a module name."""
for ext in reversed(self._exts):
if filename.endswith(ext):
filename = filename[:-len(ext)]
break
else:
self.warn(filename, '%s: unknown file name extension', filename)
filename = os.path.abspath(filename)
elements = filename.split(os.path.sep)
modname = []
while elements:
modname.append(elements[-1])
del elements[-1]
if not os.path.exists(os.path.sep.join(elements + ['__init__.py'])):
break
modname.reverse()
modname = ".".join(modname)
return modname
def findModuleOfName(self, dotted_name, level, filename, extrapath=None):
"""Given a fully qualified name, find what module contains it."""
if dotted_name.endswith('.*'):
return dotted_name[:-2]
name = dotted_name
# extrapath is None only in a couple of test cases; in real life it's
# always present
if level and level > 1 and extrapath:
# strip trailing path bits for each extra level to account for
# relative imports
# from . import X has level == 1 and nothing is stripped (the level > 1 check accounts for this case)
# from .. import X has level == 2 and one trailing path component must go
# from ... import X has level == 3 and two trailing path components must go
extrapath = extrapath.split(os.path.sep)
level -= 1
extrapath = extrapath[0:-level]
extrapath = os.path.sep.join(extrapath)
while name:
candidate = self.isModule(name, extrapath)
if candidate:
return candidate
candidate = self.isPackage(name, extrapath)
if candidate:
return candidate
name = name[:name.rfind('.')]
self.warn(dotted_name, '%s: could not find %s', filename, dotted_name)
return dotted_name
def isModule(self, dotted_name, extrapath=None):
"""Is ``dotted_name`` the name of a module?"""
try:
return self._module_cache[(dotted_name, extrapath)]
except KeyError:
pass
if dotted_name in sys.modules or dotted_name in self.builtin_modules:
return dotted_name
filename = dotted_name.replace('.', os.path.sep)
if extrapath:
for ext in self._exts:
candidate = os.path.join(extrapath, filename) + ext
if os.path.exists(candidate):
modname = self.filenameToModname(candidate)
self._module_cache[(dotted_name, extrapath)] = modname
return modname
try:
return self._module_cache[(dotted_name, None)]
except KeyError:
pass
for dir in self.path:
if os.path.isfile(dir):
if dir.endswith('.egg-info'):
# distribute creates a setuptools-blah-blah.egg-info
# that ends up in sys.path
continue
try:
zf = zipfile.ZipFile(dir)
except zipfile.BadZipfile:
self.warn(dir, "%s: not a directory or zip file", dir)
continue
names = zf.namelist()
for ext in self._exts:
candidate = filename + ext
if candidate in names:
modname = filename.replace(os.path.sep, '.')
self._module_cache[(dotted_name, extrapath)] = modname
self._module_cache[(dotted_name, None)] = modname
return modname
else:
for ext in self._exts:
candidate = os.path.join(dir, filename) + ext
if os.path.exists(candidate):
modname = self.filenameToModname(candidate)
self._module_cache[(dotted_name, extrapath)] = modname
self._module_cache[(dotted_name, None)] = modname
return modname
return None
def isPackage(self, dotted_name, extrapath=None):
"""Is ``dotted_name`` the name of a package?"""
candidate = self.isModule(dotted_name + '.__init__', extrapath)
if candidate:
candidate = candidate[:-len(".__init__")]
return candidate
def packageOf(self, dotted_name, packagelevel=None):
"""Determine the package that contains ``dotted_name``."""
if '.' not in dotted_name:
return dotted_name
if not self.isPackage(dotted_name):
dotted_name = '.'.join(dotted_name.split('.')[:-1])
if packagelevel:
dotted_name = '.'.join(dotted_name.split('.')[:packagelevel])
return dotted_name
def removeTestPackage(self, dotted_name, pkgnames=['tests', 'ftests']):
"""Remove tests subpackages from dotted_name."""
result = []
for name in dotted_name.split('.'):
if name in pkgnames:
break
result.append(name)
if not result: # empty names are baad
return dotted_name
return '.'.join(result)
def listModules(self):
"""Return an alphabetical list of all modules."""
modules = list(self.modules.items())
modules.sort()
return [module for name, module in modules]
def packageGraph(self, packagelevel=None):
"""Convert a module graph to a package graph."""
packages = {}
for module in self.listModules():
package_name = self.packageOf(module.modname, packagelevel)
if package_name not in packages:
dirname = os.path.dirname(module.filename)
packages[package_name] = Module(package_name, dirname)
package = packages[package_name]
for name in module.imports:
package_name = self.packageOf(name, packagelevel)
if package_name != package.modname: # no loops
package.imports.add(package_name)
graph = ModuleGraph()
graph.modules = packages
return graph
def collapseTests(self, pkgnames=['tests', 'ftests']):
"""Collapse test packages with parent packages.
Works only with package graphs.
"""
packages = {}
for module in self.listModules():
package_name = self.removeTestPackage(module.modname, pkgnames)
if package_name == module.modname:
packages[package_name] = Module(package_name, module.filename)
for module in self.listModules():
package_name = self.removeTestPackage(module.modname, pkgnames)
package = packages[package_name]
for name in module.imports:
package_name = self.removeTestPackage(name, pkgnames)
if package_name != package.modname: # no loops
package.imports.add(package_name)
graph = ModuleGraph()
graph.modules = packages
return graph
def collapseCycles(self):
"""Create a graph with cycles collapsed.
Collapse modules participating in a cycle to a single node.
"""
# This algorithm determines Strongly Connected Components. Look it up.
# It is adapted to suit our data structures.
# Phase 0: prepare the graph
imports = {}
for u in self.modules:
imports[u] = set()
for v in self.modules[u].imports:
if v in self.modules: # skip external dependencies
imports[u].add(v)
# Phase 1: order the vertices
visited = {}
for u in self.modules:
visited[u] = False
order = []
def visit1(u):
visited[u] = True
for v in imports[u]:
if not visited[v]:
visit1(v)
order.append(u)
for u in self.modules:
if not visited[u]:
visit1(u)
order.reverse()
# Phase 2: compute the inverse graph
revimports = {}
for u in self.modules:
revimports[u] = set()
for u in self.modules:
for v in imports[u]:
revimports[v].add(u)
# Phase 3: determine the strongly connected components
components = {}
component_of = {}
for u in self.modules:
visited[u] = False
def visit2(u):
visited[u] = True
component.append(u)
for v in revimports[u]:
if not visited[v]:
visit2(v)
for u in order:
if not visited[u]:
component = []
visit2(u)
component.sort()
node = ModuleCycle(component)
components[node.modname] = node
for modname in component:
component_of[modname] = node
# Phase 4: construct the condensed graph
for node in components.values():
for modname in node.modnames:
for impname in imports[modname]:
other = component_of[impname].modname
if other != node.modname:
node.imports.add(other)
graph = ModuleGraph()
graph.modules = components
return graph
def printImportedNames(self):
"""Produce a report of imported names."""
for module in self.listModules():
print("%s:" % module.modname)
print(" %s" % "\n ".join(imp.name for imp in module.imported_names))
def printImports(self):
"""Produce a report of dependencies."""
for module in self.listModules():
print("%s:" % module.label)
if self.external_dependencies:
imports = list(module.imports)
else:
imports = [modname for modname in module.imports
if modname in self.modules]
imports.sort()
print(" %s" % "\n ".join(imports))
def printUnusedImports(self):
"""Produce a report of unused imports."""
for module in self.listModules():
names = [(unused.lineno, unused.name)
for unused in module.unused_names]
names.sort()
for lineno, name in names:
if not self.all_unused:
line = linecache.getline(module.filename, lineno)
if '#' in line:
# assume there's a comment explaining why it's not used
continue
print("%s:%s: %s not used" % (module.filename, lineno, name))
def printDot(self):
"""Produce a dependency graph in dot format."""
print("digraph ModuleDependencies {")
print(" node[shape=box];")
allNames = set()
nameDict = {}
for n, module in enumerate(self.listModules()):
module._dot_name = "mod%d" % n
nameDict[module.modname] = module._dot_name
print(" %s[label=\"%s\"];" % (module._dot_name,
quote(module.label)))
allNames |= module.imports
print(" node[style=dotted];")
if self.external_dependencies:
myNames = set(self.modules)
extNames = list(allNames - myNames)
extNames.sort()
for n, name in enumerate(extNames):
nameDict[name] = id = "extmod%d" % n
print(" %s[label=\"%s\"];" % (id, name))
for modname, module in sorted(self.modules.items()):
for other in sorted(module.imports):
if other in nameDict:
print(" %s -> %s;" % (nameDict[module.modname],
nameDict[other]))
print("}")
|
mgedmin/findimports
|
findimports.py
|
ModuleGraph.writeCache
|
python
|
def writeCache(self, filename):
with open(filename, 'wb') as f:
pickle.dump(self.modules, f)
|
Write the graph to a cache file.
|
train
|
https://github.com/mgedmin/findimports/blob/c20a50b497390fed15aa3835476f4fad57313e8a/findimports.py#L435-L438
| null |
class ModuleGraph(object):
"""Module graph."""
trackUnusedNames = False
all_unused = False
warn_about_duplicates = False
verbose = False
external_dependencies = True
# some builtin modules do not exist as separate .so files on disk
builtin_modules = sys.builtin_module_names
def __init__(self):
self.modules = {}
self.path = sys.path
self._module_cache = {}
self._warned_about = set()
self._stderr = sys.stderr
self._exts = ('.py', '.so', '.dll')
if hasattr(sys, '_multiarch'): # pragma: nocover
# Ubuntu 14.04 LTS renames
# /usr/lib/python2.7/lib-dynload/datetime.so to
# /usr/lib/python2.7/lib-dynload/datetime.x86_64-linux-gnu.so
# (https://github.com/mgedmin/findimports/issues/3)
self._exts += ('.%s.so' % sys._multiarch, )
def warn(self, about, message, *args):
if about in self._warned_about:
return
if args:
message = message % args
print(message, file=self._stderr)
self._warned_about.add(about)
def parsePathname(self, pathname):
"""Parse one or more source files.
``pathname`` may be a file name or a directory name.
"""
if os.path.isdir(pathname):
for root, dirs, files in os.walk(pathname):
dirs.sort()
files.sort()
for fn in files:
# ignore emacsish junk
if fn.endswith('.py') and not fn.startswith('.#'):
self.parseFile(os.path.join(root, fn))
elif pathname.endswith('.importcache'):
self.readCache(pathname)
else:
self.parseFile(pathname)
def readCache(self, filename):
"""Load the graph from a cache file."""
with open(filename, 'rb') as f:
self.modules = pickle.load(f)
def parseFile(self, filename):
"""Parse a single file."""
modname = self.filenameToModname(filename)
module = Module(modname, filename)
self.modules[modname] = module
if self.trackUnusedNames:
module.imported_names, module.unused_names = \
find_imports_and_track_names(filename,
self.warn_about_duplicates,
self.verbose)
else:
module.imported_names = find_imports(filename)
module.unused_names = None
dir = os.path.dirname(filename)
module.imports = set(
[self.findModuleOfName(imp.name, imp.level, filename, dir)
for imp in module.imported_names])
def filenameToModname(self, filename):
"""Convert a filename to a module name."""
for ext in reversed(self._exts):
if filename.endswith(ext):
filename = filename[:-len(ext)]
break
else:
self.warn(filename, '%s: unknown file name extension', filename)
filename = os.path.abspath(filename)
elements = filename.split(os.path.sep)
modname = []
while elements:
modname.append(elements[-1])
del elements[-1]
if not os.path.exists(os.path.sep.join(elements + ['__init__.py'])):
break
modname.reverse()
modname = ".".join(modname)
return modname
def findModuleOfName(self, dotted_name, level, filename, extrapath=None):
"""Given a fully qualified name, find what module contains it."""
if dotted_name.endswith('.*'):
return dotted_name[:-2]
name = dotted_name
# extrapath is None only in a couple of test cases; in real life it's
# always present
if level and level > 1 and extrapath:
# strip trailing path bits for each extra level to account for
# relative imports
# from . import X has level == 1 and nothing is stripped (the level > 1 check accounts for this case)
# from .. import X has level == 2 and one trailing path component must go
# from ... import X has level == 3 and two trailing path components must go
extrapath = extrapath.split(os.path.sep)
level -= 1
extrapath = extrapath[0:-level]
extrapath = os.path.sep.join(extrapath)
while name:
candidate = self.isModule(name, extrapath)
if candidate:
return candidate
candidate = self.isPackage(name, extrapath)
if candidate:
return candidate
name = name[:name.rfind('.')]
self.warn(dotted_name, '%s: could not find %s', filename, dotted_name)
return dotted_name
def isModule(self, dotted_name, extrapath=None):
"""Is ``dotted_name`` the name of a module?"""
try:
return self._module_cache[(dotted_name, extrapath)]
except KeyError:
pass
if dotted_name in sys.modules or dotted_name in self.builtin_modules:
return dotted_name
filename = dotted_name.replace('.', os.path.sep)
if extrapath:
for ext in self._exts:
candidate = os.path.join(extrapath, filename) + ext
if os.path.exists(candidate):
modname = self.filenameToModname(candidate)
self._module_cache[(dotted_name, extrapath)] = modname
return modname
try:
return self._module_cache[(dotted_name, None)]
except KeyError:
pass
for dir in self.path:
if os.path.isfile(dir):
if dir.endswith('.egg-info'):
# distribute creates a setuptools-blah-blah.egg-info
# that ends up in sys.path
continue
try:
zf = zipfile.ZipFile(dir)
except zipfile.BadZipfile:
self.warn(dir, "%s: not a directory or zip file", dir)
continue
names = zf.namelist()
for ext in self._exts:
candidate = filename + ext
if candidate in names:
modname = filename.replace(os.path.sep, '.')
self._module_cache[(dotted_name, extrapath)] = modname
self._module_cache[(dotted_name, None)] = modname
return modname
else:
for ext in self._exts:
candidate = os.path.join(dir, filename) + ext
if os.path.exists(candidate):
modname = self.filenameToModname(candidate)
self._module_cache[(dotted_name, extrapath)] = modname
self._module_cache[(dotted_name, None)] = modname
return modname
return None
def isPackage(self, dotted_name, extrapath=None):
"""Is ``dotted_name`` the name of a package?"""
candidate = self.isModule(dotted_name + '.__init__', extrapath)
if candidate:
candidate = candidate[:-len(".__init__")]
return candidate
def packageOf(self, dotted_name, packagelevel=None):
"""Determine the package that contains ``dotted_name``."""
if '.' not in dotted_name:
return dotted_name
if not self.isPackage(dotted_name):
dotted_name = '.'.join(dotted_name.split('.')[:-1])
if packagelevel:
dotted_name = '.'.join(dotted_name.split('.')[:packagelevel])
return dotted_name
def removeTestPackage(self, dotted_name, pkgnames=['tests', 'ftests']):
"""Remove tests subpackages from dotted_name."""
result = []
for name in dotted_name.split('.'):
if name in pkgnames:
break
result.append(name)
if not result: # empty names are baad
return dotted_name
return '.'.join(result)
def listModules(self):
"""Return an alphabetical list of all modules."""
modules = list(self.modules.items())
modules.sort()
return [module for name, module in modules]
def packageGraph(self, packagelevel=None):
"""Convert a module graph to a package graph."""
packages = {}
for module in self.listModules():
package_name = self.packageOf(module.modname, packagelevel)
if package_name not in packages:
dirname = os.path.dirname(module.filename)
packages[package_name] = Module(package_name, dirname)
package = packages[package_name]
for name in module.imports:
package_name = self.packageOf(name, packagelevel)
if package_name != package.modname: # no loops
package.imports.add(package_name)
graph = ModuleGraph()
graph.modules = packages
return graph
def collapseTests(self, pkgnames=['tests', 'ftests']):
"""Collapse test packages with parent packages.
Works only with package graphs.
"""
packages = {}
for module in self.listModules():
package_name = self.removeTestPackage(module.modname, pkgnames)
if package_name == module.modname:
packages[package_name] = Module(package_name, module.filename)
for module in self.listModules():
package_name = self.removeTestPackage(module.modname, pkgnames)
package = packages[package_name]
for name in module.imports:
package_name = self.removeTestPackage(name, pkgnames)
if package_name != package.modname: # no loops
package.imports.add(package_name)
graph = ModuleGraph()
graph.modules = packages
return graph
def collapseCycles(self):
"""Create a graph with cycles collapsed.
Collapse modules participating in a cycle to a single node.
"""
# This algorithm determines Strongly Connected Components. Look it up.
# It is adapted to suit our data structures.
# Phase 0: prepare the graph
imports = {}
for u in self.modules:
imports[u] = set()
for v in self.modules[u].imports:
if v in self.modules: # skip external dependencies
imports[u].add(v)
# Phase 1: order the vertices
visited = {}
for u in self.modules:
visited[u] = False
order = []
def visit1(u):
visited[u] = True
for v in imports[u]:
if not visited[v]:
visit1(v)
order.append(u)
for u in self.modules:
if not visited[u]:
visit1(u)
order.reverse()
# Phase 2: compute the inverse graph
revimports = {}
for u in self.modules:
revimports[u] = set()
for u in self.modules:
for v in imports[u]:
revimports[v].add(u)
# Phase 3: determine the strongly connected components
components = {}
component_of = {}
for u in self.modules:
visited[u] = False
def visit2(u):
visited[u] = True
component.append(u)
for v in revimports[u]:
if not visited[v]:
visit2(v)
for u in order:
if not visited[u]:
component = []
visit2(u)
component.sort()
node = ModuleCycle(component)
components[node.modname] = node
for modname in component:
component_of[modname] = node
# Phase 4: construct the condensed graph
for node in components.values():
for modname in node.modnames:
for impname in imports[modname]:
other = component_of[impname].modname
if other != node.modname:
node.imports.add(other)
graph = ModuleGraph()
graph.modules = components
return graph
def printImportedNames(self):
"""Produce a report of imported names."""
for module in self.listModules():
print("%s:" % module.modname)
print(" %s" % "\n ".join(imp.name for imp in module.imported_names))
def printImports(self):
"""Produce a report of dependencies."""
for module in self.listModules():
print("%s:" % module.label)
if self.external_dependencies:
imports = list(module.imports)
else:
imports = [modname for modname in module.imports
if modname in self.modules]
imports.sort()
print(" %s" % "\n ".join(imports))
def printUnusedImports(self):
"""Produce a report of unused imports."""
for module in self.listModules():
names = [(unused.lineno, unused.name)
for unused in module.unused_names]
names.sort()
for lineno, name in names:
if not self.all_unused:
line = linecache.getline(module.filename, lineno)
if '#' in line:
# assume there's a comment explaining why it's not used
continue
print("%s:%s: %s not used" % (module.filename, lineno, name))
def printDot(self):
"""Produce a dependency graph in dot format."""
print("digraph ModuleDependencies {")
print(" node[shape=box];")
allNames = set()
nameDict = {}
for n, module in enumerate(self.listModules()):
module._dot_name = "mod%d" % n
nameDict[module.modname] = module._dot_name
print(" %s[label=\"%s\"];" % (module._dot_name,
quote(module.label)))
allNames |= module.imports
print(" node[style=dotted];")
if self.external_dependencies:
myNames = set(self.modules)
extNames = list(allNames - myNames)
extNames.sort()
for n, name in enumerate(extNames):
nameDict[name] = id = "extmod%d" % n
print(" %s[label=\"%s\"];" % (id, name))
for modname, module in sorted(self.modules.items()):
for other in sorted(module.imports):
if other in nameDict:
print(" %s -> %s;" % (nameDict[module.modname],
nameDict[other]))
print("}")
|
mgedmin/findimports
|
findimports.py
|
ModuleGraph.readCache
|
python
|
def readCache(self, filename):
with open(filename, 'rb') as f:
self.modules = pickle.load(f)
|
Load the graph from a cache file.
|
train
|
https://github.com/mgedmin/findimports/blob/c20a50b497390fed15aa3835476f4fad57313e8a/findimports.py#L440-L443
| null |
class ModuleGraph(object):
"""Module graph."""
trackUnusedNames = False
all_unused = False
warn_about_duplicates = False
verbose = False
external_dependencies = True
# some builtin modules do not exist as separate .so files on disk
builtin_modules = sys.builtin_module_names
def __init__(self):
self.modules = {}
self.path = sys.path
self._module_cache = {}
self._warned_about = set()
self._stderr = sys.stderr
self._exts = ('.py', '.so', '.dll')
if hasattr(sys, '_multiarch'): # pragma: nocover
# Ubuntu 14.04 LTS renames
# /usr/lib/python2.7/lib-dynload/datetime.so to
# /usr/lib/python2.7/lib-dynload/datetime.x86_64-linux-gnu.so
# (https://github.com/mgedmin/findimports/issues/3)
self._exts += ('.%s.so' % sys._multiarch, )
def warn(self, about, message, *args):
if about in self._warned_about:
return
if args:
message = message % args
print(message, file=self._stderr)
self._warned_about.add(about)
def parsePathname(self, pathname):
"""Parse one or more source files.
``pathname`` may be a file name or a directory name.
"""
if os.path.isdir(pathname):
for root, dirs, files in os.walk(pathname):
dirs.sort()
files.sort()
for fn in files:
# ignore emacsish junk
if fn.endswith('.py') and not fn.startswith('.#'):
self.parseFile(os.path.join(root, fn))
elif pathname.endswith('.importcache'):
self.readCache(pathname)
else:
self.parseFile(pathname)
def writeCache(self, filename):
"""Write the graph to a cache file."""
with open(filename, 'wb') as f:
pickle.dump(self.modules, f)
def parseFile(self, filename):
"""Parse a single file."""
modname = self.filenameToModname(filename)
module = Module(modname, filename)
self.modules[modname] = module
if self.trackUnusedNames:
module.imported_names, module.unused_names = \
find_imports_and_track_names(filename,
self.warn_about_duplicates,
self.verbose)
else:
module.imported_names = find_imports(filename)
module.unused_names = None
dir = os.path.dirname(filename)
module.imports = set(
[self.findModuleOfName(imp.name, imp.level, filename, dir)
for imp in module.imported_names])
def filenameToModname(self, filename):
"""Convert a filename to a module name."""
for ext in reversed(self._exts):
if filename.endswith(ext):
filename = filename[:-len(ext)]
break
else:
self.warn(filename, '%s: unknown file name extension', filename)
filename = os.path.abspath(filename)
elements = filename.split(os.path.sep)
modname = []
while elements:
modname.append(elements[-1])
del elements[-1]
if not os.path.exists(os.path.sep.join(elements + ['__init__.py'])):
break
modname.reverse()
modname = ".".join(modname)
return modname
def findModuleOfName(self, dotted_name, level, filename, extrapath=None):
"""Given a fully qualified name, find what module contains it."""
if dotted_name.endswith('.*'):
return dotted_name[:-2]
name = dotted_name
# extrapath is None only in a couple of test cases; in real life it's
# always present
if level and level > 1 and extrapath:
# strip trailing path bits for each extra level to account for
# relative imports
# from . import X has level == 1 and nothing is stripped (the level > 1 check accounts for this case)
# from .. import X has level == 2 and one trailing path component must go
# from ... import X has level == 3 and two trailing path components must go
extrapath = extrapath.split(os.path.sep)
level -= 1
extrapath = extrapath[0:-level]
extrapath = os.path.sep.join(extrapath)
while name:
candidate = self.isModule(name, extrapath)
if candidate:
return candidate
candidate = self.isPackage(name, extrapath)
if candidate:
return candidate
name = name[:name.rfind('.')]
self.warn(dotted_name, '%s: could not find %s', filename, dotted_name)
return dotted_name
def isModule(self, dotted_name, extrapath=None):
"""Is ``dotted_name`` the name of a module?"""
try:
return self._module_cache[(dotted_name, extrapath)]
except KeyError:
pass
if dotted_name in sys.modules or dotted_name in self.builtin_modules:
return dotted_name
filename = dotted_name.replace('.', os.path.sep)
if extrapath:
for ext in self._exts:
candidate = os.path.join(extrapath, filename) + ext
if os.path.exists(candidate):
modname = self.filenameToModname(candidate)
self._module_cache[(dotted_name, extrapath)] = modname
return modname
try:
return self._module_cache[(dotted_name, None)]
except KeyError:
pass
for dir in self.path:
if os.path.isfile(dir):
if dir.endswith('.egg-info'):
# distribute creates a setuptools-blah-blah.egg-info
# that ends up in sys.path
continue
try:
zf = zipfile.ZipFile(dir)
except zipfile.BadZipfile:
self.warn(dir, "%s: not a directory or zip file", dir)
continue
names = zf.namelist()
for ext in self._exts:
candidate = filename + ext
if candidate in names:
modname = filename.replace(os.path.sep, '.')
self._module_cache[(dotted_name, extrapath)] = modname
self._module_cache[(dotted_name, None)] = modname
return modname
else:
for ext in self._exts:
candidate = os.path.join(dir, filename) + ext
if os.path.exists(candidate):
modname = self.filenameToModname(candidate)
self._module_cache[(dotted_name, extrapath)] = modname
self._module_cache[(dotted_name, None)] = modname
return modname
return None
def isPackage(self, dotted_name, extrapath=None):
"""Is ``dotted_name`` the name of a package?"""
candidate = self.isModule(dotted_name + '.__init__', extrapath)
if candidate:
candidate = candidate[:-len(".__init__")]
return candidate
def packageOf(self, dotted_name, packagelevel=None):
"""Determine the package that contains ``dotted_name``."""
if '.' not in dotted_name:
return dotted_name
if not self.isPackage(dotted_name):
dotted_name = '.'.join(dotted_name.split('.')[:-1])
if packagelevel:
dotted_name = '.'.join(dotted_name.split('.')[:packagelevel])
return dotted_name
def removeTestPackage(self, dotted_name, pkgnames=['tests', 'ftests']):
"""Remove tests subpackages from dotted_name."""
result = []
for name in dotted_name.split('.'):
if name in pkgnames:
break
result.append(name)
if not result: # empty names are baad
return dotted_name
return '.'.join(result)
def listModules(self):
"""Return an alphabetical list of all modules."""
modules = list(self.modules.items())
modules.sort()
return [module for name, module in modules]
def packageGraph(self, packagelevel=None):
"""Convert a module graph to a package graph."""
packages = {}
for module in self.listModules():
package_name = self.packageOf(module.modname, packagelevel)
if package_name not in packages:
dirname = os.path.dirname(module.filename)
packages[package_name] = Module(package_name, dirname)
package = packages[package_name]
for name in module.imports:
package_name = self.packageOf(name, packagelevel)
if package_name != package.modname: # no loops
package.imports.add(package_name)
graph = ModuleGraph()
graph.modules = packages
return graph
def collapseTests(self, pkgnames=['tests', 'ftests']):
"""Collapse test packages with parent packages.
Works only with package graphs.
"""
packages = {}
for module in self.listModules():
package_name = self.removeTestPackage(module.modname, pkgnames)
if package_name == module.modname:
packages[package_name] = Module(package_name, module.filename)
for module in self.listModules():
package_name = self.removeTestPackage(module.modname, pkgnames)
package = packages[package_name]
for name in module.imports:
package_name = self.removeTestPackage(name, pkgnames)
if package_name != package.modname: # no loops
package.imports.add(package_name)
graph = ModuleGraph()
graph.modules = packages
return graph
def collapseCycles(self):
"""Create a graph with cycles collapsed.
Collapse modules participating in a cycle to a single node.
"""
# This algorithm determines Strongly Connected Components. Look it up.
# It is adapted to suit our data structures.
# Phase 0: prepare the graph
imports = {}
for u in self.modules:
imports[u] = set()
for v in self.modules[u].imports:
if v in self.modules: # skip external dependencies
imports[u].add(v)
# Phase 1: order the vertices
visited = {}
for u in self.modules:
visited[u] = False
order = []
def visit1(u):
visited[u] = True
for v in imports[u]:
if not visited[v]:
visit1(v)
order.append(u)
for u in self.modules:
if not visited[u]:
visit1(u)
order.reverse()
# Phase 2: compute the inverse graph
revimports = {}
for u in self.modules:
revimports[u] = set()
for u in self.modules:
for v in imports[u]:
revimports[v].add(u)
# Phase 3: determine the strongly connected components
components = {}
component_of = {}
for u in self.modules:
visited[u] = False
def visit2(u):
visited[u] = True
component.append(u)
for v in revimports[u]:
if not visited[v]:
visit2(v)
for u in order:
if not visited[u]:
component = []
visit2(u)
component.sort()
node = ModuleCycle(component)
components[node.modname] = node
for modname in component:
component_of[modname] = node
# Phase 4: construct the condensed graph
for node in components.values():
for modname in node.modnames:
for impname in imports[modname]:
other = component_of[impname].modname
if other != node.modname:
node.imports.add(other)
graph = ModuleGraph()
graph.modules = components
return graph
def printImportedNames(self):
"""Produce a report of imported names."""
for module in self.listModules():
print("%s:" % module.modname)
print(" %s" % "\n ".join(imp.name for imp in module.imported_names))
def printImports(self):
"""Produce a report of dependencies."""
for module in self.listModules():
print("%s:" % module.label)
if self.external_dependencies:
imports = list(module.imports)
else:
imports = [modname for modname in module.imports
if modname in self.modules]
imports.sort()
print(" %s" % "\n ".join(imports))
def printUnusedImports(self):
"""Produce a report of unused imports."""
for module in self.listModules():
names = [(unused.lineno, unused.name)
for unused in module.unused_names]
names.sort()
for lineno, name in names:
if not self.all_unused:
line = linecache.getline(module.filename, lineno)
if '#' in line:
# assume there's a comment explaining why it's not used
continue
print("%s:%s: %s not used" % (module.filename, lineno, name))
def printDot(self):
"""Produce a dependency graph in dot format."""
print("digraph ModuleDependencies {")
print(" node[shape=box];")
allNames = set()
nameDict = {}
for n, module in enumerate(self.listModules()):
module._dot_name = "mod%d" % n
nameDict[module.modname] = module._dot_name
print(" %s[label=\"%s\"];" % (module._dot_name,
quote(module.label)))
allNames |= module.imports
print(" node[style=dotted];")
if self.external_dependencies:
myNames = set(self.modules)
extNames = list(allNames - myNames)
extNames.sort()
for n, name in enumerate(extNames):
nameDict[name] = id = "extmod%d" % n
print(" %s[label=\"%s\"];" % (id, name))
for modname, module in sorted(self.modules.items()):
for other in sorted(module.imports):
if other in nameDict:
print(" %s -> %s;" % (nameDict[module.modname],
nameDict[other]))
print("}")
|
mgedmin/findimports
|
findimports.py
|
ModuleGraph.parseFile
|
python
|
def parseFile(self, filename):
modname = self.filenameToModname(filename)
module = Module(modname, filename)
self.modules[modname] = module
if self.trackUnusedNames:
module.imported_names, module.unused_names = \
find_imports_and_track_names(filename,
self.warn_about_duplicates,
self.verbose)
else:
module.imported_names = find_imports(filename)
module.unused_names = None
dir = os.path.dirname(filename)
module.imports = set(
[self.findModuleOfName(imp.name, imp.level, filename, dir)
for imp in module.imported_names])
|
Parse a single file.
|
train
|
https://github.com/mgedmin/findimports/blob/c20a50b497390fed15aa3835476f4fad57313e8a/findimports.py#L445-L461
|
[
"def find_imports(filename):\n \"\"\"Find all imported names in a given file.\n\n Returns a list of ImportInfo objects.\n \"\"\"\n with open(filename) as f:\n root = ast.parse(f.read(), filename)\n visitor = ImportFinder(filename)\n visitor.visit(root)\n return visitor.imports\n",
"def find_imports_and_track_names(filename, warn_about_duplicates=False,\n verbose=False):\n \"\"\"Find all imported names in a given file.\n\n Returns ``(imports, unused)``. Both are lists of ImportInfo objects.\n \"\"\"\n with open(filename) as f:\n root = ast.parse(f.read(), filename)\n visitor = ImportFinderAndNameTracker(filename)\n visitor.warn_about_duplicates = warn_about_duplicates\n visitor.verbose = verbose\n visitor.visit(root)\n visitor.leaveAllScopes()\n return visitor.imports, visitor.unused_names\n",
"def filenameToModname(self, filename):\n \"\"\"Convert a filename to a module name.\"\"\"\n for ext in reversed(self._exts):\n if filename.endswith(ext):\n filename = filename[:-len(ext)]\n break\n else:\n self.warn(filename, '%s: unknown file name extension', filename)\n filename = os.path.abspath(filename)\n elements = filename.split(os.path.sep)\n modname = []\n while elements:\n modname.append(elements[-1])\n del elements[-1]\n if not os.path.exists(os.path.sep.join(elements + ['__init__.py'])):\n break\n modname.reverse()\n modname = \".\".join(modname)\n return modname\n"
] |
class ModuleGraph(object):
"""Module graph."""
trackUnusedNames = False
all_unused = False
warn_about_duplicates = False
verbose = False
external_dependencies = True
# some builtin modules do not exist as separate .so files on disk
builtin_modules = sys.builtin_module_names
def __init__(self):
self.modules = {}
self.path = sys.path
self._module_cache = {}
self._warned_about = set()
self._stderr = sys.stderr
self._exts = ('.py', '.so', '.dll')
if hasattr(sys, '_multiarch'): # pragma: nocover
# Ubuntu 14.04 LTS renames
# /usr/lib/python2.7/lib-dynload/datetime.so to
# /usr/lib/python2.7/lib-dynload/datetime.x86_64-linux-gnu.so
# (https://github.com/mgedmin/findimports/issues/3)
self._exts += ('.%s.so' % sys._multiarch, )
def warn(self, about, message, *args):
if about in self._warned_about:
return
if args:
message = message % args
print(message, file=self._stderr)
self._warned_about.add(about)
def parsePathname(self, pathname):
"""Parse one or more source files.
``pathname`` may be a file name or a directory name.
"""
if os.path.isdir(pathname):
for root, dirs, files in os.walk(pathname):
dirs.sort()
files.sort()
for fn in files:
# ignore emacsish junk
if fn.endswith('.py') and not fn.startswith('.#'):
self.parseFile(os.path.join(root, fn))
elif pathname.endswith('.importcache'):
self.readCache(pathname)
else:
self.parseFile(pathname)
def writeCache(self, filename):
"""Write the graph to a cache file."""
with open(filename, 'wb') as f:
pickle.dump(self.modules, f)
def readCache(self, filename):
"""Load the graph from a cache file."""
with open(filename, 'rb') as f:
self.modules = pickle.load(f)
def filenameToModname(self, filename):
"""Convert a filename to a module name."""
for ext in reversed(self._exts):
if filename.endswith(ext):
filename = filename[:-len(ext)]
break
else:
self.warn(filename, '%s: unknown file name extension', filename)
filename = os.path.abspath(filename)
elements = filename.split(os.path.sep)
modname = []
while elements:
modname.append(elements[-1])
del elements[-1]
if not os.path.exists(os.path.sep.join(elements + ['__init__.py'])):
break
modname.reverse()
modname = ".".join(modname)
return modname
def findModuleOfName(self, dotted_name, level, filename, extrapath=None):
"""Given a fully qualified name, find what module contains it."""
if dotted_name.endswith('.*'):
return dotted_name[:-2]
name = dotted_name
# extrapath is None only in a couple of test cases; in real life it's
# always present
if level and level > 1 and extrapath:
# strip trailing path bits for each extra level to account for
# relative imports
# from . import X has level == 1 and nothing is stripped (the level > 1 check accounts for this case)
# from .. import X has level == 2 and one trailing path component must go
# from ... import X has level == 3 and two trailing path components must go
extrapath = extrapath.split(os.path.sep)
level -= 1
extrapath = extrapath[0:-level]
extrapath = os.path.sep.join(extrapath)
while name:
candidate = self.isModule(name, extrapath)
if candidate:
return candidate
candidate = self.isPackage(name, extrapath)
if candidate:
return candidate
name = name[:name.rfind('.')]
self.warn(dotted_name, '%s: could not find %s', filename, dotted_name)
return dotted_name
def isModule(self, dotted_name, extrapath=None):
"""Is ``dotted_name`` the name of a module?"""
try:
return self._module_cache[(dotted_name, extrapath)]
except KeyError:
pass
if dotted_name in sys.modules or dotted_name in self.builtin_modules:
return dotted_name
filename = dotted_name.replace('.', os.path.sep)
if extrapath:
for ext in self._exts:
candidate = os.path.join(extrapath, filename) + ext
if os.path.exists(candidate):
modname = self.filenameToModname(candidate)
self._module_cache[(dotted_name, extrapath)] = modname
return modname
try:
return self._module_cache[(dotted_name, None)]
except KeyError:
pass
for dir in self.path:
if os.path.isfile(dir):
if dir.endswith('.egg-info'):
# distribute creates a setuptools-blah-blah.egg-info
# that ends up in sys.path
continue
try:
zf = zipfile.ZipFile(dir)
except zipfile.BadZipfile:
self.warn(dir, "%s: not a directory or zip file", dir)
continue
names = zf.namelist()
for ext in self._exts:
candidate = filename + ext
if candidate in names:
modname = filename.replace(os.path.sep, '.')
self._module_cache[(dotted_name, extrapath)] = modname
self._module_cache[(dotted_name, None)] = modname
return modname
else:
for ext in self._exts:
candidate = os.path.join(dir, filename) + ext
if os.path.exists(candidate):
modname = self.filenameToModname(candidate)
self._module_cache[(dotted_name, extrapath)] = modname
self._module_cache[(dotted_name, None)] = modname
return modname
return None
def isPackage(self, dotted_name, extrapath=None):
"""Is ``dotted_name`` the name of a package?"""
candidate = self.isModule(dotted_name + '.__init__', extrapath)
if candidate:
candidate = candidate[:-len(".__init__")]
return candidate
def packageOf(self, dotted_name, packagelevel=None):
"""Determine the package that contains ``dotted_name``."""
if '.' not in dotted_name:
return dotted_name
if not self.isPackage(dotted_name):
dotted_name = '.'.join(dotted_name.split('.')[:-1])
if packagelevel:
dotted_name = '.'.join(dotted_name.split('.')[:packagelevel])
return dotted_name
def removeTestPackage(self, dotted_name, pkgnames=['tests', 'ftests']):
"""Remove tests subpackages from dotted_name."""
result = []
for name in dotted_name.split('.'):
if name in pkgnames:
break
result.append(name)
if not result: # empty names are baad
return dotted_name
return '.'.join(result)
def listModules(self):
"""Return an alphabetical list of all modules."""
modules = list(self.modules.items())
modules.sort()
return [module for name, module in modules]
def packageGraph(self, packagelevel=None):
"""Convert a module graph to a package graph."""
packages = {}
for module in self.listModules():
package_name = self.packageOf(module.modname, packagelevel)
if package_name not in packages:
dirname = os.path.dirname(module.filename)
packages[package_name] = Module(package_name, dirname)
package = packages[package_name]
for name in module.imports:
package_name = self.packageOf(name, packagelevel)
if package_name != package.modname: # no loops
package.imports.add(package_name)
graph = ModuleGraph()
graph.modules = packages
return graph
def collapseTests(self, pkgnames=['tests', 'ftests']):
"""Collapse test packages with parent packages.
Works only with package graphs.
"""
packages = {}
for module in self.listModules():
package_name = self.removeTestPackage(module.modname, pkgnames)
if package_name == module.modname:
packages[package_name] = Module(package_name, module.filename)
for module in self.listModules():
package_name = self.removeTestPackage(module.modname, pkgnames)
package = packages[package_name]
for name in module.imports:
package_name = self.removeTestPackage(name, pkgnames)
if package_name != package.modname: # no loops
package.imports.add(package_name)
graph = ModuleGraph()
graph.modules = packages
return graph
def collapseCycles(self):
"""Create a graph with cycles collapsed.
Collapse modules participating in a cycle to a single node.
"""
# This algorithm determines Strongly Connected Components. Look it up.
# It is adapted to suit our data structures.
# Phase 0: prepare the graph
imports = {}
for u in self.modules:
imports[u] = set()
for v in self.modules[u].imports:
if v in self.modules: # skip external dependencies
imports[u].add(v)
# Phase 1: order the vertices
visited = {}
for u in self.modules:
visited[u] = False
order = []
def visit1(u):
visited[u] = True
for v in imports[u]:
if not visited[v]:
visit1(v)
order.append(u)
for u in self.modules:
if not visited[u]:
visit1(u)
order.reverse()
# Phase 2: compute the inverse graph
revimports = {}
for u in self.modules:
revimports[u] = set()
for u in self.modules:
for v in imports[u]:
revimports[v].add(u)
# Phase 3: determine the strongly connected components
components = {}
component_of = {}
for u in self.modules:
visited[u] = False
def visit2(u):
visited[u] = True
component.append(u)
for v in revimports[u]:
if not visited[v]:
visit2(v)
for u in order:
if not visited[u]:
component = []
visit2(u)
component.sort()
node = ModuleCycle(component)
components[node.modname] = node
for modname in component:
component_of[modname] = node
# Phase 4: construct the condensed graph
for node in components.values():
for modname in node.modnames:
for impname in imports[modname]:
other = component_of[impname].modname
if other != node.modname:
node.imports.add(other)
graph = ModuleGraph()
graph.modules = components
return graph
def printImportedNames(self):
"""Produce a report of imported names."""
for module in self.listModules():
print("%s:" % module.modname)
print(" %s" % "\n ".join(imp.name for imp in module.imported_names))
def printImports(self):
"""Produce a report of dependencies."""
for module in self.listModules():
print("%s:" % module.label)
if self.external_dependencies:
imports = list(module.imports)
else:
imports = [modname for modname in module.imports
if modname in self.modules]
imports.sort()
print(" %s" % "\n ".join(imports))
def printUnusedImports(self):
"""Produce a report of unused imports."""
for module in self.listModules():
names = [(unused.lineno, unused.name)
for unused in module.unused_names]
names.sort()
for lineno, name in names:
if not self.all_unused:
line = linecache.getline(module.filename, lineno)
if '#' in line:
# assume there's a comment explaining why it's not used
continue
print("%s:%s: %s not used" % (module.filename, lineno, name))
def printDot(self):
"""Produce a dependency graph in dot format."""
print("digraph ModuleDependencies {")
print(" node[shape=box];")
allNames = set()
nameDict = {}
for n, module in enumerate(self.listModules()):
module._dot_name = "mod%d" % n
nameDict[module.modname] = module._dot_name
print(" %s[label=\"%s\"];" % (module._dot_name,
quote(module.label)))
allNames |= module.imports
print(" node[style=dotted];")
if self.external_dependencies:
myNames = set(self.modules)
extNames = list(allNames - myNames)
extNames.sort()
for n, name in enumerate(extNames):
nameDict[name] = id = "extmod%d" % n
print(" %s[label=\"%s\"];" % (id, name))
for modname, module in sorted(self.modules.items()):
for other in sorted(module.imports):
if other in nameDict:
print(" %s -> %s;" % (nameDict[module.modname],
nameDict[other]))
print("}")
|
mgedmin/findimports
|
findimports.py
|
ModuleGraph.filenameToModname
|
python
|
def filenameToModname(self, filename):
for ext in reversed(self._exts):
if filename.endswith(ext):
filename = filename[:-len(ext)]
break
else:
self.warn(filename, '%s: unknown file name extension', filename)
filename = os.path.abspath(filename)
elements = filename.split(os.path.sep)
modname = []
while elements:
modname.append(elements[-1])
del elements[-1]
if not os.path.exists(os.path.sep.join(elements + ['__init__.py'])):
break
modname.reverse()
modname = ".".join(modname)
return modname
|
Convert a filename to a module name.
|
train
|
https://github.com/mgedmin/findimports/blob/c20a50b497390fed15aa3835476f4fad57313e8a/findimports.py#L463-L481
|
[
"def warn(self, about, message, *args):\n if about in self._warned_about:\n return\n if args:\n message = message % args\n print(message, file=self._stderr)\n self._warned_about.add(about)\n"
] |
class ModuleGraph(object):
"""Module graph."""
trackUnusedNames = False
all_unused = False
warn_about_duplicates = False
verbose = False
external_dependencies = True
# some builtin modules do not exist as separate .so files on disk
builtin_modules = sys.builtin_module_names
def __init__(self):
self.modules = {}
self.path = sys.path
self._module_cache = {}
self._warned_about = set()
self._stderr = sys.stderr
self._exts = ('.py', '.so', '.dll')
if hasattr(sys, '_multiarch'): # pragma: nocover
# Ubuntu 14.04 LTS renames
# /usr/lib/python2.7/lib-dynload/datetime.so to
# /usr/lib/python2.7/lib-dynload/datetime.x86_64-linux-gnu.so
# (https://github.com/mgedmin/findimports/issues/3)
self._exts += ('.%s.so' % sys._multiarch, )
def warn(self, about, message, *args):
if about in self._warned_about:
return
if args:
message = message % args
print(message, file=self._stderr)
self._warned_about.add(about)
def parsePathname(self, pathname):
"""Parse one or more source files.
``pathname`` may be a file name or a directory name.
"""
if os.path.isdir(pathname):
for root, dirs, files in os.walk(pathname):
dirs.sort()
files.sort()
for fn in files:
# ignore emacsish junk
if fn.endswith('.py') and not fn.startswith('.#'):
self.parseFile(os.path.join(root, fn))
elif pathname.endswith('.importcache'):
self.readCache(pathname)
else:
self.parseFile(pathname)
def writeCache(self, filename):
"""Write the graph to a cache file."""
with open(filename, 'wb') as f:
pickle.dump(self.modules, f)
def readCache(self, filename):
"""Load the graph from a cache file."""
with open(filename, 'rb') as f:
self.modules = pickle.load(f)
def parseFile(self, filename):
"""Parse a single file."""
modname = self.filenameToModname(filename)
module = Module(modname, filename)
self.modules[modname] = module
if self.trackUnusedNames:
module.imported_names, module.unused_names = \
find_imports_and_track_names(filename,
self.warn_about_duplicates,
self.verbose)
else:
module.imported_names = find_imports(filename)
module.unused_names = None
dir = os.path.dirname(filename)
module.imports = set(
[self.findModuleOfName(imp.name, imp.level, filename, dir)
for imp in module.imported_names])
def findModuleOfName(self, dotted_name, level, filename, extrapath=None):
"""Given a fully qualified name, find what module contains it."""
if dotted_name.endswith('.*'):
return dotted_name[:-2]
name = dotted_name
# extrapath is None only in a couple of test cases; in real life it's
# always present
if level and level > 1 and extrapath:
# strip trailing path bits for each extra level to account for
# relative imports
# from . import X has level == 1 and nothing is stripped (the level > 1 check accounts for this case)
# from .. import X has level == 2 and one trailing path component must go
# from ... import X has level == 3 and two trailing path components must go
extrapath = extrapath.split(os.path.sep)
level -= 1
extrapath = extrapath[0:-level]
extrapath = os.path.sep.join(extrapath)
while name:
candidate = self.isModule(name, extrapath)
if candidate:
return candidate
candidate = self.isPackage(name, extrapath)
if candidate:
return candidate
name = name[:name.rfind('.')]
self.warn(dotted_name, '%s: could not find %s', filename, dotted_name)
return dotted_name
def isModule(self, dotted_name, extrapath=None):
"""Is ``dotted_name`` the name of a module?"""
try:
return self._module_cache[(dotted_name, extrapath)]
except KeyError:
pass
if dotted_name in sys.modules or dotted_name in self.builtin_modules:
return dotted_name
filename = dotted_name.replace('.', os.path.sep)
if extrapath:
for ext in self._exts:
candidate = os.path.join(extrapath, filename) + ext
if os.path.exists(candidate):
modname = self.filenameToModname(candidate)
self._module_cache[(dotted_name, extrapath)] = modname
return modname
try:
return self._module_cache[(dotted_name, None)]
except KeyError:
pass
for dir in self.path:
if os.path.isfile(dir):
if dir.endswith('.egg-info'):
# distribute creates a setuptools-blah-blah.egg-info
# that ends up in sys.path
continue
try:
zf = zipfile.ZipFile(dir)
except zipfile.BadZipfile:
self.warn(dir, "%s: not a directory or zip file", dir)
continue
names = zf.namelist()
for ext in self._exts:
candidate = filename + ext
if candidate in names:
modname = filename.replace(os.path.sep, '.')
self._module_cache[(dotted_name, extrapath)] = modname
self._module_cache[(dotted_name, None)] = modname
return modname
else:
for ext in self._exts:
candidate = os.path.join(dir, filename) + ext
if os.path.exists(candidate):
modname = self.filenameToModname(candidate)
self._module_cache[(dotted_name, extrapath)] = modname
self._module_cache[(dotted_name, None)] = modname
return modname
return None
def isPackage(self, dotted_name, extrapath=None):
"""Is ``dotted_name`` the name of a package?"""
candidate = self.isModule(dotted_name + '.__init__', extrapath)
if candidate:
candidate = candidate[:-len(".__init__")]
return candidate
def packageOf(self, dotted_name, packagelevel=None):
"""Determine the package that contains ``dotted_name``."""
if '.' not in dotted_name:
return dotted_name
if not self.isPackage(dotted_name):
dotted_name = '.'.join(dotted_name.split('.')[:-1])
if packagelevel:
dotted_name = '.'.join(dotted_name.split('.')[:packagelevel])
return dotted_name
def removeTestPackage(self, dotted_name, pkgnames=['tests', 'ftests']):
"""Remove tests subpackages from dotted_name."""
result = []
for name in dotted_name.split('.'):
if name in pkgnames:
break
result.append(name)
if not result: # empty names are baad
return dotted_name
return '.'.join(result)
def listModules(self):
"""Return an alphabetical list of all modules."""
modules = list(self.modules.items())
modules.sort()
return [module for name, module in modules]
def packageGraph(self, packagelevel=None):
"""Convert a module graph to a package graph."""
packages = {}
for module in self.listModules():
package_name = self.packageOf(module.modname, packagelevel)
if package_name not in packages:
dirname = os.path.dirname(module.filename)
packages[package_name] = Module(package_name, dirname)
package = packages[package_name]
for name in module.imports:
package_name = self.packageOf(name, packagelevel)
if package_name != package.modname: # no loops
package.imports.add(package_name)
graph = ModuleGraph()
graph.modules = packages
return graph
def collapseTests(self, pkgnames=['tests', 'ftests']):
"""Collapse test packages with parent packages.
Works only with package graphs.
"""
packages = {}
for module in self.listModules():
package_name = self.removeTestPackage(module.modname, pkgnames)
if package_name == module.modname:
packages[package_name] = Module(package_name, module.filename)
for module in self.listModules():
package_name = self.removeTestPackage(module.modname, pkgnames)
package = packages[package_name]
for name in module.imports:
package_name = self.removeTestPackage(name, pkgnames)
if package_name != package.modname: # no loops
package.imports.add(package_name)
graph = ModuleGraph()
graph.modules = packages
return graph
def collapseCycles(self):
"""Create a graph with cycles collapsed.
Collapse modules participating in a cycle to a single node.
"""
# This algorithm determines Strongly Connected Components. Look it up.
# It is adapted to suit our data structures.
# Phase 0: prepare the graph
imports = {}
for u in self.modules:
imports[u] = set()
for v in self.modules[u].imports:
if v in self.modules: # skip external dependencies
imports[u].add(v)
# Phase 1: order the vertices
visited = {}
for u in self.modules:
visited[u] = False
order = []
def visit1(u):
visited[u] = True
for v in imports[u]:
if not visited[v]:
visit1(v)
order.append(u)
for u in self.modules:
if not visited[u]:
visit1(u)
order.reverse()
# Phase 2: compute the inverse graph
revimports = {}
for u in self.modules:
revimports[u] = set()
for u in self.modules:
for v in imports[u]:
revimports[v].add(u)
# Phase 3: determine the strongly connected components
components = {}
component_of = {}
for u in self.modules:
visited[u] = False
def visit2(u):
visited[u] = True
component.append(u)
for v in revimports[u]:
if not visited[v]:
visit2(v)
for u in order:
if not visited[u]:
component = []
visit2(u)
component.sort()
node = ModuleCycle(component)
components[node.modname] = node
for modname in component:
component_of[modname] = node
# Phase 4: construct the condensed graph
for node in components.values():
for modname in node.modnames:
for impname in imports[modname]:
other = component_of[impname].modname
if other != node.modname:
node.imports.add(other)
graph = ModuleGraph()
graph.modules = components
return graph
def printImportedNames(self):
"""Produce a report of imported names."""
for module in self.listModules():
print("%s:" % module.modname)
print(" %s" % "\n ".join(imp.name for imp in module.imported_names))
def printImports(self):
"""Produce a report of dependencies."""
for module in self.listModules():
print("%s:" % module.label)
if self.external_dependencies:
imports = list(module.imports)
else:
imports = [modname for modname in module.imports
if modname in self.modules]
imports.sort()
print(" %s" % "\n ".join(imports))
def printUnusedImports(self):
"""Produce a report of unused imports."""
for module in self.listModules():
names = [(unused.lineno, unused.name)
for unused in module.unused_names]
names.sort()
for lineno, name in names:
if not self.all_unused:
line = linecache.getline(module.filename, lineno)
if '#' in line:
# assume there's a comment explaining why it's not used
continue
print("%s:%s: %s not used" % (module.filename, lineno, name))
def printDot(self):
"""Produce a dependency graph in dot format."""
print("digraph ModuleDependencies {")
print(" node[shape=box];")
allNames = set()
nameDict = {}
for n, module in enumerate(self.listModules()):
module._dot_name = "mod%d" % n
nameDict[module.modname] = module._dot_name
print(" %s[label=\"%s\"];" % (module._dot_name,
quote(module.label)))
allNames |= module.imports
print(" node[style=dotted];")
if self.external_dependencies:
myNames = set(self.modules)
extNames = list(allNames - myNames)
extNames.sort()
for n, name in enumerate(extNames):
nameDict[name] = id = "extmod%d" % n
print(" %s[label=\"%s\"];" % (id, name))
for modname, module in sorted(self.modules.items()):
for other in sorted(module.imports):
if other in nameDict:
print(" %s -> %s;" % (nameDict[module.modname],
nameDict[other]))
print("}")
|
mgedmin/findimports
|
findimports.py
|
ModuleGraph.findModuleOfName
|
python
|
def findModuleOfName(self, dotted_name, level, filename, extrapath=None):
if dotted_name.endswith('.*'):
return dotted_name[:-2]
name = dotted_name
# extrapath is None only in a couple of test cases; in real life it's
# always present
if level and level > 1 and extrapath:
# strip trailing path bits for each extra level to account for
# relative imports
# from . import X has level == 1 and nothing is stripped (the level > 1 check accounts for this case)
# from .. import X has level == 2 and one trailing path component must go
# from ... import X has level == 3 and two trailing path components must go
extrapath = extrapath.split(os.path.sep)
level -= 1
extrapath = extrapath[0:-level]
extrapath = os.path.sep.join(extrapath)
while name:
candidate = self.isModule(name, extrapath)
if candidate:
return candidate
candidate = self.isPackage(name, extrapath)
if candidate:
return candidate
name = name[:name.rfind('.')]
self.warn(dotted_name, '%s: could not find %s', filename, dotted_name)
return dotted_name
|
Given a fully qualified name, find what module contains it.
|
train
|
https://github.com/mgedmin/findimports/blob/c20a50b497390fed15aa3835476f4fad57313e8a/findimports.py#L483-L511
| null |
class ModuleGraph(object):
"""Module graph."""
trackUnusedNames = False
all_unused = False
warn_about_duplicates = False
verbose = False
external_dependencies = True
# some builtin modules do not exist as separate .so files on disk
builtin_modules = sys.builtin_module_names
def __init__(self):
self.modules = {}
self.path = sys.path
self._module_cache = {}
self._warned_about = set()
self._stderr = sys.stderr
self._exts = ('.py', '.so', '.dll')
if hasattr(sys, '_multiarch'): # pragma: nocover
# Ubuntu 14.04 LTS renames
# /usr/lib/python2.7/lib-dynload/datetime.so to
# /usr/lib/python2.7/lib-dynload/datetime.x86_64-linux-gnu.so
# (https://github.com/mgedmin/findimports/issues/3)
self._exts += ('.%s.so' % sys._multiarch, )
def warn(self, about, message, *args):
if about in self._warned_about:
return
if args:
message = message % args
print(message, file=self._stderr)
self._warned_about.add(about)
def parsePathname(self, pathname):
"""Parse one or more source files.
``pathname`` may be a file name or a directory name.
"""
if os.path.isdir(pathname):
for root, dirs, files in os.walk(pathname):
dirs.sort()
files.sort()
for fn in files:
# ignore emacsish junk
if fn.endswith('.py') and not fn.startswith('.#'):
self.parseFile(os.path.join(root, fn))
elif pathname.endswith('.importcache'):
self.readCache(pathname)
else:
self.parseFile(pathname)
def writeCache(self, filename):
"""Write the graph to a cache file."""
with open(filename, 'wb') as f:
pickle.dump(self.modules, f)
def readCache(self, filename):
"""Load the graph from a cache file."""
with open(filename, 'rb') as f:
self.modules = pickle.load(f)
def parseFile(self, filename):
"""Parse a single file."""
modname = self.filenameToModname(filename)
module = Module(modname, filename)
self.modules[modname] = module
if self.trackUnusedNames:
module.imported_names, module.unused_names = \
find_imports_and_track_names(filename,
self.warn_about_duplicates,
self.verbose)
else:
module.imported_names = find_imports(filename)
module.unused_names = None
dir = os.path.dirname(filename)
module.imports = set(
[self.findModuleOfName(imp.name, imp.level, filename, dir)
for imp in module.imported_names])
def filenameToModname(self, filename):
"""Convert a filename to a module name."""
for ext in reversed(self._exts):
if filename.endswith(ext):
filename = filename[:-len(ext)]
break
else:
self.warn(filename, '%s: unknown file name extension', filename)
filename = os.path.abspath(filename)
elements = filename.split(os.path.sep)
modname = []
while elements:
modname.append(elements[-1])
del elements[-1]
if not os.path.exists(os.path.sep.join(elements + ['__init__.py'])):
break
modname.reverse()
modname = ".".join(modname)
return modname
def isModule(self, dotted_name, extrapath=None):
"""Is ``dotted_name`` the name of a module?"""
try:
return self._module_cache[(dotted_name, extrapath)]
except KeyError:
pass
if dotted_name in sys.modules or dotted_name in self.builtin_modules:
return dotted_name
filename = dotted_name.replace('.', os.path.sep)
if extrapath:
for ext in self._exts:
candidate = os.path.join(extrapath, filename) + ext
if os.path.exists(candidate):
modname = self.filenameToModname(candidate)
self._module_cache[(dotted_name, extrapath)] = modname
return modname
try:
return self._module_cache[(dotted_name, None)]
except KeyError:
pass
for dir in self.path:
if os.path.isfile(dir):
if dir.endswith('.egg-info'):
# distribute creates a setuptools-blah-blah.egg-info
# that ends up in sys.path
continue
try:
zf = zipfile.ZipFile(dir)
except zipfile.BadZipfile:
self.warn(dir, "%s: not a directory or zip file", dir)
continue
names = zf.namelist()
for ext in self._exts:
candidate = filename + ext
if candidate in names:
modname = filename.replace(os.path.sep, '.')
self._module_cache[(dotted_name, extrapath)] = modname
self._module_cache[(dotted_name, None)] = modname
return modname
else:
for ext in self._exts:
candidate = os.path.join(dir, filename) + ext
if os.path.exists(candidate):
modname = self.filenameToModname(candidate)
self._module_cache[(dotted_name, extrapath)] = modname
self._module_cache[(dotted_name, None)] = modname
return modname
return None
def isPackage(self, dotted_name, extrapath=None):
"""Is ``dotted_name`` the name of a package?"""
candidate = self.isModule(dotted_name + '.__init__', extrapath)
if candidate:
candidate = candidate[:-len(".__init__")]
return candidate
def packageOf(self, dotted_name, packagelevel=None):
"""Determine the package that contains ``dotted_name``."""
if '.' not in dotted_name:
return dotted_name
if not self.isPackage(dotted_name):
dotted_name = '.'.join(dotted_name.split('.')[:-1])
if packagelevel:
dotted_name = '.'.join(dotted_name.split('.')[:packagelevel])
return dotted_name
def removeTestPackage(self, dotted_name, pkgnames=['tests', 'ftests']):
"""Remove tests subpackages from dotted_name."""
result = []
for name in dotted_name.split('.'):
if name in pkgnames:
break
result.append(name)
if not result: # empty names are baad
return dotted_name
return '.'.join(result)
def listModules(self):
"""Return an alphabetical list of all modules."""
modules = list(self.modules.items())
modules.sort()
return [module for name, module in modules]
def packageGraph(self, packagelevel=None):
"""Convert a module graph to a package graph."""
packages = {}
for module in self.listModules():
package_name = self.packageOf(module.modname, packagelevel)
if package_name not in packages:
dirname = os.path.dirname(module.filename)
packages[package_name] = Module(package_name, dirname)
package = packages[package_name]
for name in module.imports:
package_name = self.packageOf(name, packagelevel)
if package_name != package.modname: # no loops
package.imports.add(package_name)
graph = ModuleGraph()
graph.modules = packages
return graph
def collapseTests(self, pkgnames=['tests', 'ftests']):
"""Collapse test packages with parent packages.
Works only with package graphs.
"""
packages = {}
for module in self.listModules():
package_name = self.removeTestPackage(module.modname, pkgnames)
if package_name == module.modname:
packages[package_name] = Module(package_name, module.filename)
for module in self.listModules():
package_name = self.removeTestPackage(module.modname, pkgnames)
package = packages[package_name]
for name in module.imports:
package_name = self.removeTestPackage(name, pkgnames)
if package_name != package.modname: # no loops
package.imports.add(package_name)
graph = ModuleGraph()
graph.modules = packages
return graph
def collapseCycles(self):
"""Create a graph with cycles collapsed.
Collapse modules participating in a cycle to a single node.
"""
# This algorithm determines Strongly Connected Components. Look it up.
# It is adapted to suit our data structures.
# Phase 0: prepare the graph
imports = {}
for u in self.modules:
imports[u] = set()
for v in self.modules[u].imports:
if v in self.modules: # skip external dependencies
imports[u].add(v)
# Phase 1: order the vertices
visited = {}
for u in self.modules:
visited[u] = False
order = []
def visit1(u):
visited[u] = True
for v in imports[u]:
if not visited[v]:
visit1(v)
order.append(u)
for u in self.modules:
if not visited[u]:
visit1(u)
order.reverse()
# Phase 2: compute the inverse graph
revimports = {}
for u in self.modules:
revimports[u] = set()
for u in self.modules:
for v in imports[u]:
revimports[v].add(u)
# Phase 3: determine the strongly connected components
components = {}
component_of = {}
for u in self.modules:
visited[u] = False
def visit2(u):
visited[u] = True
component.append(u)
for v in revimports[u]:
if not visited[v]:
visit2(v)
for u in order:
if not visited[u]:
component = []
visit2(u)
component.sort()
node = ModuleCycle(component)
components[node.modname] = node
for modname in component:
component_of[modname] = node
# Phase 4: construct the condensed graph
for node in components.values():
for modname in node.modnames:
for impname in imports[modname]:
other = component_of[impname].modname
if other != node.modname:
node.imports.add(other)
graph = ModuleGraph()
graph.modules = components
return graph
def printImportedNames(self):
"""Produce a report of imported names."""
for module in self.listModules():
print("%s:" % module.modname)
print(" %s" % "\n ".join(imp.name for imp in module.imported_names))
def printImports(self):
"""Produce a report of dependencies."""
for module in self.listModules():
print("%s:" % module.label)
if self.external_dependencies:
imports = list(module.imports)
else:
imports = [modname for modname in module.imports
if modname in self.modules]
imports.sort()
print(" %s" % "\n ".join(imports))
def printUnusedImports(self):
"""Produce a report of unused imports."""
for module in self.listModules():
names = [(unused.lineno, unused.name)
for unused in module.unused_names]
names.sort()
for lineno, name in names:
if not self.all_unused:
line = linecache.getline(module.filename, lineno)
if '#' in line:
# assume there's a comment explaining why it's not used
continue
print("%s:%s: %s not used" % (module.filename, lineno, name))
def printDot(self):
"""Produce a dependency graph in dot format."""
print("digraph ModuleDependencies {")
print(" node[shape=box];")
allNames = set()
nameDict = {}
for n, module in enumerate(self.listModules()):
module._dot_name = "mod%d" % n
nameDict[module.modname] = module._dot_name
print(" %s[label=\"%s\"];" % (module._dot_name,
quote(module.label)))
allNames |= module.imports
print(" node[style=dotted];")
if self.external_dependencies:
myNames = set(self.modules)
extNames = list(allNames - myNames)
extNames.sort()
for n, name in enumerate(extNames):
nameDict[name] = id = "extmod%d" % n
print(" %s[label=\"%s\"];" % (id, name))
for modname, module in sorted(self.modules.items()):
for other in sorted(module.imports):
if other in nameDict:
print(" %s -> %s;" % (nameDict[module.modname],
nameDict[other]))
print("}")
|
mgedmin/findimports
|
findimports.py
|
ModuleGraph.isModule
|
python
|
def isModule(self, dotted_name, extrapath=None):
try:
return self._module_cache[(dotted_name, extrapath)]
except KeyError:
pass
if dotted_name in sys.modules or dotted_name in self.builtin_modules:
return dotted_name
filename = dotted_name.replace('.', os.path.sep)
if extrapath:
for ext in self._exts:
candidate = os.path.join(extrapath, filename) + ext
if os.path.exists(candidate):
modname = self.filenameToModname(candidate)
self._module_cache[(dotted_name, extrapath)] = modname
return modname
try:
return self._module_cache[(dotted_name, None)]
except KeyError:
pass
for dir in self.path:
if os.path.isfile(dir):
if dir.endswith('.egg-info'):
# distribute creates a setuptools-blah-blah.egg-info
# that ends up in sys.path
continue
try:
zf = zipfile.ZipFile(dir)
except zipfile.BadZipfile:
self.warn(dir, "%s: not a directory or zip file", dir)
continue
names = zf.namelist()
for ext in self._exts:
candidate = filename + ext
if candidate in names:
modname = filename.replace(os.path.sep, '.')
self._module_cache[(dotted_name, extrapath)] = modname
self._module_cache[(dotted_name, None)] = modname
return modname
else:
for ext in self._exts:
candidate = os.path.join(dir, filename) + ext
if os.path.exists(candidate):
modname = self.filenameToModname(candidate)
self._module_cache[(dotted_name, extrapath)] = modname
self._module_cache[(dotted_name, None)] = modname
return modname
return None
|
Is ``dotted_name`` the name of a module?
|
train
|
https://github.com/mgedmin/findimports/blob/c20a50b497390fed15aa3835476f4fad57313e8a/findimports.py#L513-L560
|
[
"def filenameToModname(self, filename):\n \"\"\"Convert a filename to a module name.\"\"\"\n for ext in reversed(self._exts):\n if filename.endswith(ext):\n filename = filename[:-len(ext)]\n break\n else:\n self.warn(filename, '%s: unknown file name extension', filename)\n filename = os.path.abspath(filename)\n elements = filename.split(os.path.sep)\n modname = []\n while elements:\n modname.append(elements[-1])\n del elements[-1]\n if not os.path.exists(os.path.sep.join(elements + ['__init__.py'])):\n break\n modname.reverse()\n modname = \".\".join(modname)\n return modname\n"
] |
class ModuleGraph(object):
"""Module graph."""
trackUnusedNames = False
all_unused = False
warn_about_duplicates = False
verbose = False
external_dependencies = True
# some builtin modules do not exist as separate .so files on disk
builtin_modules = sys.builtin_module_names
def __init__(self):
self.modules = {}
self.path = sys.path
self._module_cache = {}
self._warned_about = set()
self._stderr = sys.stderr
self._exts = ('.py', '.so', '.dll')
if hasattr(sys, '_multiarch'): # pragma: nocover
# Ubuntu 14.04 LTS renames
# /usr/lib/python2.7/lib-dynload/datetime.so to
# /usr/lib/python2.7/lib-dynload/datetime.x86_64-linux-gnu.so
# (https://github.com/mgedmin/findimports/issues/3)
self._exts += ('.%s.so' % sys._multiarch, )
def warn(self, about, message, *args):
if about in self._warned_about:
return
if args:
message = message % args
print(message, file=self._stderr)
self._warned_about.add(about)
def parsePathname(self, pathname):
"""Parse one or more source files.
``pathname`` may be a file name or a directory name.
"""
if os.path.isdir(pathname):
for root, dirs, files in os.walk(pathname):
dirs.sort()
files.sort()
for fn in files:
# ignore emacsish junk
if fn.endswith('.py') and not fn.startswith('.#'):
self.parseFile(os.path.join(root, fn))
elif pathname.endswith('.importcache'):
self.readCache(pathname)
else:
self.parseFile(pathname)
def writeCache(self, filename):
"""Write the graph to a cache file."""
with open(filename, 'wb') as f:
pickle.dump(self.modules, f)
def readCache(self, filename):
"""Load the graph from a cache file."""
with open(filename, 'rb') as f:
self.modules = pickle.load(f)
def parseFile(self, filename):
"""Parse a single file."""
modname = self.filenameToModname(filename)
module = Module(modname, filename)
self.modules[modname] = module
if self.trackUnusedNames:
module.imported_names, module.unused_names = \
find_imports_and_track_names(filename,
self.warn_about_duplicates,
self.verbose)
else:
module.imported_names = find_imports(filename)
module.unused_names = None
dir = os.path.dirname(filename)
module.imports = set(
[self.findModuleOfName(imp.name, imp.level, filename, dir)
for imp in module.imported_names])
def filenameToModname(self, filename):
"""Convert a filename to a module name."""
for ext in reversed(self._exts):
if filename.endswith(ext):
filename = filename[:-len(ext)]
break
else:
self.warn(filename, '%s: unknown file name extension', filename)
filename = os.path.abspath(filename)
elements = filename.split(os.path.sep)
modname = []
while elements:
modname.append(elements[-1])
del elements[-1]
if not os.path.exists(os.path.sep.join(elements + ['__init__.py'])):
break
modname.reverse()
modname = ".".join(modname)
return modname
def findModuleOfName(self, dotted_name, level, filename, extrapath=None):
"""Given a fully qualified name, find what module contains it."""
if dotted_name.endswith('.*'):
return dotted_name[:-2]
name = dotted_name
# extrapath is None only in a couple of test cases; in real life it's
# always present
if level and level > 1 and extrapath:
# strip trailing path bits for each extra level to account for
# relative imports
# from . import X has level == 1 and nothing is stripped (the level > 1 check accounts for this case)
# from .. import X has level == 2 and one trailing path component must go
# from ... import X has level == 3 and two trailing path components must go
extrapath = extrapath.split(os.path.sep)
level -= 1
extrapath = extrapath[0:-level]
extrapath = os.path.sep.join(extrapath)
while name:
candidate = self.isModule(name, extrapath)
if candidate:
return candidate
candidate = self.isPackage(name, extrapath)
if candidate:
return candidate
name = name[:name.rfind('.')]
self.warn(dotted_name, '%s: could not find %s', filename, dotted_name)
return dotted_name
def isPackage(self, dotted_name, extrapath=None):
"""Is ``dotted_name`` the name of a package?"""
candidate = self.isModule(dotted_name + '.__init__', extrapath)
if candidate:
candidate = candidate[:-len(".__init__")]
return candidate
def packageOf(self, dotted_name, packagelevel=None):
"""Determine the package that contains ``dotted_name``."""
if '.' not in dotted_name:
return dotted_name
if not self.isPackage(dotted_name):
dotted_name = '.'.join(dotted_name.split('.')[:-1])
if packagelevel:
dotted_name = '.'.join(dotted_name.split('.')[:packagelevel])
return dotted_name
def removeTestPackage(self, dotted_name, pkgnames=['tests', 'ftests']):
"""Remove tests subpackages from dotted_name."""
result = []
for name in dotted_name.split('.'):
if name in pkgnames:
break
result.append(name)
if not result: # empty names are baad
return dotted_name
return '.'.join(result)
def listModules(self):
"""Return an alphabetical list of all modules."""
modules = list(self.modules.items())
modules.sort()
return [module for name, module in modules]
def packageGraph(self, packagelevel=None):
"""Convert a module graph to a package graph."""
packages = {}
for module in self.listModules():
package_name = self.packageOf(module.modname, packagelevel)
if package_name not in packages:
dirname = os.path.dirname(module.filename)
packages[package_name] = Module(package_name, dirname)
package = packages[package_name]
for name in module.imports:
package_name = self.packageOf(name, packagelevel)
if package_name != package.modname: # no loops
package.imports.add(package_name)
graph = ModuleGraph()
graph.modules = packages
return graph
def collapseTests(self, pkgnames=['tests', 'ftests']):
"""Collapse test packages with parent packages.
Works only with package graphs.
"""
packages = {}
for module in self.listModules():
package_name = self.removeTestPackage(module.modname, pkgnames)
if package_name == module.modname:
packages[package_name] = Module(package_name, module.filename)
for module in self.listModules():
package_name = self.removeTestPackage(module.modname, pkgnames)
package = packages[package_name]
for name in module.imports:
package_name = self.removeTestPackage(name, pkgnames)
if package_name != package.modname: # no loops
package.imports.add(package_name)
graph = ModuleGraph()
graph.modules = packages
return graph
def collapseCycles(self):
"""Create a graph with cycles collapsed.
Collapse modules participating in a cycle to a single node.
"""
# This algorithm determines Strongly Connected Components. Look it up.
# It is adapted to suit our data structures.
# Phase 0: prepare the graph
imports = {}
for u in self.modules:
imports[u] = set()
for v in self.modules[u].imports:
if v in self.modules: # skip external dependencies
imports[u].add(v)
# Phase 1: order the vertices
visited = {}
for u in self.modules:
visited[u] = False
order = []
def visit1(u):
visited[u] = True
for v in imports[u]:
if not visited[v]:
visit1(v)
order.append(u)
for u in self.modules:
if not visited[u]:
visit1(u)
order.reverse()
# Phase 2: compute the inverse graph
revimports = {}
for u in self.modules:
revimports[u] = set()
for u in self.modules:
for v in imports[u]:
revimports[v].add(u)
# Phase 3: determine the strongly connected components
components = {}
component_of = {}
for u in self.modules:
visited[u] = False
def visit2(u):
visited[u] = True
component.append(u)
for v in revimports[u]:
if not visited[v]:
visit2(v)
for u in order:
if not visited[u]:
component = []
visit2(u)
component.sort()
node = ModuleCycle(component)
components[node.modname] = node
for modname in component:
component_of[modname] = node
# Phase 4: construct the condensed graph
for node in components.values():
for modname in node.modnames:
for impname in imports[modname]:
other = component_of[impname].modname
if other != node.modname:
node.imports.add(other)
graph = ModuleGraph()
graph.modules = components
return graph
def printImportedNames(self):
"""Produce a report of imported names."""
for module in self.listModules():
print("%s:" % module.modname)
print(" %s" % "\n ".join(imp.name for imp in module.imported_names))
def printImports(self):
"""Produce a report of dependencies."""
for module in self.listModules():
print("%s:" % module.label)
if self.external_dependencies:
imports = list(module.imports)
else:
imports = [modname for modname in module.imports
if modname in self.modules]
imports.sort()
print(" %s" % "\n ".join(imports))
def printUnusedImports(self):
"""Produce a report of unused imports."""
for module in self.listModules():
names = [(unused.lineno, unused.name)
for unused in module.unused_names]
names.sort()
for lineno, name in names:
if not self.all_unused:
line = linecache.getline(module.filename, lineno)
if '#' in line:
# assume there's a comment explaining why it's not used
continue
print("%s:%s: %s not used" % (module.filename, lineno, name))
def printDot(self):
"""Produce a dependency graph in dot format."""
print("digraph ModuleDependencies {")
print(" node[shape=box];")
allNames = set()
nameDict = {}
for n, module in enumerate(self.listModules()):
module._dot_name = "mod%d" % n
nameDict[module.modname] = module._dot_name
print(" %s[label=\"%s\"];" % (module._dot_name,
quote(module.label)))
allNames |= module.imports
print(" node[style=dotted];")
if self.external_dependencies:
myNames = set(self.modules)
extNames = list(allNames - myNames)
extNames.sort()
for n, name in enumerate(extNames):
nameDict[name] = id = "extmod%d" % n
print(" %s[label=\"%s\"];" % (id, name))
for modname, module in sorted(self.modules.items()):
for other in sorted(module.imports):
if other in nameDict:
print(" %s -> %s;" % (nameDict[module.modname],
nameDict[other]))
print("}")
|
mgedmin/findimports
|
findimports.py
|
ModuleGraph.isPackage
|
python
|
def isPackage(self, dotted_name, extrapath=None):
candidate = self.isModule(dotted_name + '.__init__', extrapath)
if candidate:
candidate = candidate[:-len(".__init__")]
return candidate
|
Is ``dotted_name`` the name of a package?
|
train
|
https://github.com/mgedmin/findimports/blob/c20a50b497390fed15aa3835476f4fad57313e8a/findimports.py#L562-L567
|
[
"def isModule(self, dotted_name, extrapath=None):\n \"\"\"Is ``dotted_name`` the name of a module?\"\"\"\n try:\n return self._module_cache[(dotted_name, extrapath)]\n except KeyError:\n pass\n if dotted_name in sys.modules or dotted_name in self.builtin_modules:\n return dotted_name\n filename = dotted_name.replace('.', os.path.sep)\n if extrapath:\n for ext in self._exts:\n candidate = os.path.join(extrapath, filename) + ext\n if os.path.exists(candidate):\n modname = self.filenameToModname(candidate)\n self._module_cache[(dotted_name, extrapath)] = modname\n return modname\n try:\n return self._module_cache[(dotted_name, None)]\n except KeyError:\n pass\n for dir in self.path:\n if os.path.isfile(dir):\n if dir.endswith('.egg-info'):\n # distribute creates a setuptools-blah-blah.egg-info\n # that ends up in sys.path\n continue\n try:\n zf = zipfile.ZipFile(dir)\n except zipfile.BadZipfile:\n self.warn(dir, \"%s: not a directory or zip file\", dir)\n continue\n names = zf.namelist()\n for ext in self._exts:\n candidate = filename + ext\n if candidate in names:\n modname = filename.replace(os.path.sep, '.')\n self._module_cache[(dotted_name, extrapath)] = modname\n self._module_cache[(dotted_name, None)] = modname\n return modname\n else:\n for ext in self._exts:\n candidate = os.path.join(dir, filename) + ext\n if os.path.exists(candidate):\n modname = self.filenameToModname(candidate)\n self._module_cache[(dotted_name, extrapath)] = modname\n self._module_cache[(dotted_name, None)] = modname\n return modname\n return None\n"
] |
class ModuleGraph(object):
"""Module graph."""
trackUnusedNames = False
all_unused = False
warn_about_duplicates = False
verbose = False
external_dependencies = True
# some builtin modules do not exist as separate .so files on disk
builtin_modules = sys.builtin_module_names
def __init__(self):
self.modules = {}
self.path = sys.path
self._module_cache = {}
self._warned_about = set()
self._stderr = sys.stderr
self._exts = ('.py', '.so', '.dll')
if hasattr(sys, '_multiarch'): # pragma: nocover
# Ubuntu 14.04 LTS renames
# /usr/lib/python2.7/lib-dynload/datetime.so to
# /usr/lib/python2.7/lib-dynload/datetime.x86_64-linux-gnu.so
# (https://github.com/mgedmin/findimports/issues/3)
self._exts += ('.%s.so' % sys._multiarch, )
def warn(self, about, message, *args):
if about in self._warned_about:
return
if args:
message = message % args
print(message, file=self._stderr)
self._warned_about.add(about)
def parsePathname(self, pathname):
"""Parse one or more source files.
``pathname`` may be a file name or a directory name.
"""
if os.path.isdir(pathname):
for root, dirs, files in os.walk(pathname):
dirs.sort()
files.sort()
for fn in files:
# ignore emacsish junk
if fn.endswith('.py') and not fn.startswith('.#'):
self.parseFile(os.path.join(root, fn))
elif pathname.endswith('.importcache'):
self.readCache(pathname)
else:
self.parseFile(pathname)
def writeCache(self, filename):
"""Write the graph to a cache file."""
with open(filename, 'wb') as f:
pickle.dump(self.modules, f)
def readCache(self, filename):
"""Load the graph from a cache file."""
with open(filename, 'rb') as f:
self.modules = pickle.load(f)
def parseFile(self, filename):
"""Parse a single file."""
modname = self.filenameToModname(filename)
module = Module(modname, filename)
self.modules[modname] = module
if self.trackUnusedNames:
module.imported_names, module.unused_names = \
find_imports_and_track_names(filename,
self.warn_about_duplicates,
self.verbose)
else:
module.imported_names = find_imports(filename)
module.unused_names = None
dir = os.path.dirname(filename)
module.imports = set(
[self.findModuleOfName(imp.name, imp.level, filename, dir)
for imp in module.imported_names])
def filenameToModname(self, filename):
"""Convert a filename to a module name."""
for ext in reversed(self._exts):
if filename.endswith(ext):
filename = filename[:-len(ext)]
break
else:
self.warn(filename, '%s: unknown file name extension', filename)
filename = os.path.abspath(filename)
elements = filename.split(os.path.sep)
modname = []
while elements:
modname.append(elements[-1])
del elements[-1]
if not os.path.exists(os.path.sep.join(elements + ['__init__.py'])):
break
modname.reverse()
modname = ".".join(modname)
return modname
def findModuleOfName(self, dotted_name, level, filename, extrapath=None):
"""Given a fully qualified name, find what module contains it."""
if dotted_name.endswith('.*'):
return dotted_name[:-2]
name = dotted_name
# extrapath is None only in a couple of test cases; in real life it's
# always present
if level and level > 1 and extrapath:
# strip trailing path bits for each extra level to account for
# relative imports
# from . import X has level == 1 and nothing is stripped (the level > 1 check accounts for this case)
# from .. import X has level == 2 and one trailing path component must go
# from ... import X has level == 3 and two trailing path components must go
extrapath = extrapath.split(os.path.sep)
level -= 1
extrapath = extrapath[0:-level]
extrapath = os.path.sep.join(extrapath)
while name:
candidate = self.isModule(name, extrapath)
if candidate:
return candidate
candidate = self.isPackage(name, extrapath)
if candidate:
return candidate
name = name[:name.rfind('.')]
self.warn(dotted_name, '%s: could not find %s', filename, dotted_name)
return dotted_name
def isModule(self, dotted_name, extrapath=None):
"""Is ``dotted_name`` the name of a module?"""
try:
return self._module_cache[(dotted_name, extrapath)]
except KeyError:
pass
if dotted_name in sys.modules or dotted_name in self.builtin_modules:
return dotted_name
filename = dotted_name.replace('.', os.path.sep)
if extrapath:
for ext in self._exts:
candidate = os.path.join(extrapath, filename) + ext
if os.path.exists(candidate):
modname = self.filenameToModname(candidate)
self._module_cache[(dotted_name, extrapath)] = modname
return modname
try:
return self._module_cache[(dotted_name, None)]
except KeyError:
pass
for dir in self.path:
if os.path.isfile(dir):
if dir.endswith('.egg-info'):
# distribute creates a setuptools-blah-blah.egg-info
# that ends up in sys.path
continue
try:
zf = zipfile.ZipFile(dir)
except zipfile.BadZipfile:
self.warn(dir, "%s: not a directory or zip file", dir)
continue
names = zf.namelist()
for ext in self._exts:
candidate = filename + ext
if candidate in names:
modname = filename.replace(os.path.sep, '.')
self._module_cache[(dotted_name, extrapath)] = modname
self._module_cache[(dotted_name, None)] = modname
return modname
else:
for ext in self._exts:
candidate = os.path.join(dir, filename) + ext
if os.path.exists(candidate):
modname = self.filenameToModname(candidate)
self._module_cache[(dotted_name, extrapath)] = modname
self._module_cache[(dotted_name, None)] = modname
return modname
return None
def packageOf(self, dotted_name, packagelevel=None):
"""Determine the package that contains ``dotted_name``."""
if '.' not in dotted_name:
return dotted_name
if not self.isPackage(dotted_name):
dotted_name = '.'.join(dotted_name.split('.')[:-1])
if packagelevel:
dotted_name = '.'.join(dotted_name.split('.')[:packagelevel])
return dotted_name
def removeTestPackage(self, dotted_name, pkgnames=['tests', 'ftests']):
"""Remove tests subpackages from dotted_name."""
result = []
for name in dotted_name.split('.'):
if name in pkgnames:
break
result.append(name)
if not result: # empty names are baad
return dotted_name
return '.'.join(result)
def listModules(self):
"""Return an alphabetical list of all modules."""
modules = list(self.modules.items())
modules.sort()
return [module for name, module in modules]
def packageGraph(self, packagelevel=None):
"""Convert a module graph to a package graph."""
packages = {}
for module in self.listModules():
package_name = self.packageOf(module.modname, packagelevel)
if package_name not in packages:
dirname = os.path.dirname(module.filename)
packages[package_name] = Module(package_name, dirname)
package = packages[package_name]
for name in module.imports:
package_name = self.packageOf(name, packagelevel)
if package_name != package.modname: # no loops
package.imports.add(package_name)
graph = ModuleGraph()
graph.modules = packages
return graph
def collapseTests(self, pkgnames=['tests', 'ftests']):
"""Collapse test packages with parent packages.
Works only with package graphs.
"""
packages = {}
for module in self.listModules():
package_name = self.removeTestPackage(module.modname, pkgnames)
if package_name == module.modname:
packages[package_name] = Module(package_name, module.filename)
for module in self.listModules():
package_name = self.removeTestPackage(module.modname, pkgnames)
package = packages[package_name]
for name in module.imports:
package_name = self.removeTestPackage(name, pkgnames)
if package_name != package.modname: # no loops
package.imports.add(package_name)
graph = ModuleGraph()
graph.modules = packages
return graph
def collapseCycles(self):
"""Create a graph with cycles collapsed.
Collapse modules participating in a cycle to a single node.
"""
# This algorithm determines Strongly Connected Components. Look it up.
# It is adapted to suit our data structures.
# Phase 0: prepare the graph
imports = {}
for u in self.modules:
imports[u] = set()
for v in self.modules[u].imports:
if v in self.modules: # skip external dependencies
imports[u].add(v)
# Phase 1: order the vertices
visited = {}
for u in self.modules:
visited[u] = False
order = []
def visit1(u):
visited[u] = True
for v in imports[u]:
if not visited[v]:
visit1(v)
order.append(u)
for u in self.modules:
if not visited[u]:
visit1(u)
order.reverse()
# Phase 2: compute the inverse graph
revimports = {}
for u in self.modules:
revimports[u] = set()
for u in self.modules:
for v in imports[u]:
revimports[v].add(u)
# Phase 3: determine the strongly connected components
components = {}
component_of = {}
for u in self.modules:
visited[u] = False
def visit2(u):
visited[u] = True
component.append(u)
for v in revimports[u]:
if not visited[v]:
visit2(v)
for u in order:
if not visited[u]:
component = []
visit2(u)
component.sort()
node = ModuleCycle(component)
components[node.modname] = node
for modname in component:
component_of[modname] = node
# Phase 4: construct the condensed graph
for node in components.values():
for modname in node.modnames:
for impname in imports[modname]:
other = component_of[impname].modname
if other != node.modname:
node.imports.add(other)
graph = ModuleGraph()
graph.modules = components
return graph
def printImportedNames(self):
"""Produce a report of imported names."""
for module in self.listModules():
print("%s:" % module.modname)
print(" %s" % "\n ".join(imp.name for imp in module.imported_names))
def printImports(self):
"""Produce a report of dependencies."""
for module in self.listModules():
print("%s:" % module.label)
if self.external_dependencies:
imports = list(module.imports)
else:
imports = [modname for modname in module.imports
if modname in self.modules]
imports.sort()
print(" %s" % "\n ".join(imports))
def printUnusedImports(self):
"""Produce a report of unused imports."""
for module in self.listModules():
names = [(unused.lineno, unused.name)
for unused in module.unused_names]
names.sort()
for lineno, name in names:
if not self.all_unused:
line = linecache.getline(module.filename, lineno)
if '#' in line:
# assume there's a comment explaining why it's not used
continue
print("%s:%s: %s not used" % (module.filename, lineno, name))
def printDot(self):
"""Produce a dependency graph in dot format."""
print("digraph ModuleDependencies {")
print(" node[shape=box];")
allNames = set()
nameDict = {}
for n, module in enumerate(self.listModules()):
module._dot_name = "mod%d" % n
nameDict[module.modname] = module._dot_name
print(" %s[label=\"%s\"];" % (module._dot_name,
quote(module.label)))
allNames |= module.imports
print(" node[style=dotted];")
if self.external_dependencies:
myNames = set(self.modules)
extNames = list(allNames - myNames)
extNames.sort()
for n, name in enumerate(extNames):
nameDict[name] = id = "extmod%d" % n
print(" %s[label=\"%s\"];" % (id, name))
for modname, module in sorted(self.modules.items()):
for other in sorted(module.imports):
if other in nameDict:
print(" %s -> %s;" % (nameDict[module.modname],
nameDict[other]))
print("}")
|
mgedmin/findimports
|
findimports.py
|
ModuleGraph.packageOf
|
python
|
def packageOf(self, dotted_name, packagelevel=None):
if '.' not in dotted_name:
return dotted_name
if not self.isPackage(dotted_name):
dotted_name = '.'.join(dotted_name.split('.')[:-1])
if packagelevel:
dotted_name = '.'.join(dotted_name.split('.')[:packagelevel])
return dotted_name
|
Determine the package that contains ``dotted_name``.
|
train
|
https://github.com/mgedmin/findimports/blob/c20a50b497390fed15aa3835476f4fad57313e8a/findimports.py#L569-L577
|
[
"def isPackage(self, dotted_name, extrapath=None):\n \"\"\"Is ``dotted_name`` the name of a package?\"\"\"\n candidate = self.isModule(dotted_name + '.__init__', extrapath)\n if candidate:\n candidate = candidate[:-len(\".__init__\")]\n return candidate\n"
] |
class ModuleGraph(object):
"""Module graph."""
trackUnusedNames = False
all_unused = False
warn_about_duplicates = False
verbose = False
external_dependencies = True
# some builtin modules do not exist as separate .so files on disk
builtin_modules = sys.builtin_module_names
def __init__(self):
self.modules = {}
self.path = sys.path
self._module_cache = {}
self._warned_about = set()
self._stderr = sys.stderr
self._exts = ('.py', '.so', '.dll')
if hasattr(sys, '_multiarch'): # pragma: nocover
# Ubuntu 14.04 LTS renames
# /usr/lib/python2.7/lib-dynload/datetime.so to
# /usr/lib/python2.7/lib-dynload/datetime.x86_64-linux-gnu.so
# (https://github.com/mgedmin/findimports/issues/3)
self._exts += ('.%s.so' % sys._multiarch, )
def warn(self, about, message, *args):
if about in self._warned_about:
return
if args:
message = message % args
print(message, file=self._stderr)
self._warned_about.add(about)
def parsePathname(self, pathname):
"""Parse one or more source files.
``pathname`` may be a file name or a directory name.
"""
if os.path.isdir(pathname):
for root, dirs, files in os.walk(pathname):
dirs.sort()
files.sort()
for fn in files:
# ignore emacsish junk
if fn.endswith('.py') and not fn.startswith('.#'):
self.parseFile(os.path.join(root, fn))
elif pathname.endswith('.importcache'):
self.readCache(pathname)
else:
self.parseFile(pathname)
def writeCache(self, filename):
"""Write the graph to a cache file."""
with open(filename, 'wb') as f:
pickle.dump(self.modules, f)
def readCache(self, filename):
"""Load the graph from a cache file."""
with open(filename, 'rb') as f:
self.modules = pickle.load(f)
def parseFile(self, filename):
"""Parse a single file."""
modname = self.filenameToModname(filename)
module = Module(modname, filename)
self.modules[modname] = module
if self.trackUnusedNames:
module.imported_names, module.unused_names = \
find_imports_and_track_names(filename,
self.warn_about_duplicates,
self.verbose)
else:
module.imported_names = find_imports(filename)
module.unused_names = None
dir = os.path.dirname(filename)
module.imports = set(
[self.findModuleOfName(imp.name, imp.level, filename, dir)
for imp in module.imported_names])
def filenameToModname(self, filename):
"""Convert a filename to a module name."""
for ext in reversed(self._exts):
if filename.endswith(ext):
filename = filename[:-len(ext)]
break
else:
self.warn(filename, '%s: unknown file name extension', filename)
filename = os.path.abspath(filename)
elements = filename.split(os.path.sep)
modname = []
while elements:
modname.append(elements[-1])
del elements[-1]
if not os.path.exists(os.path.sep.join(elements + ['__init__.py'])):
break
modname.reverse()
modname = ".".join(modname)
return modname
def findModuleOfName(self, dotted_name, level, filename, extrapath=None):
"""Given a fully qualified name, find what module contains it."""
if dotted_name.endswith('.*'):
return dotted_name[:-2]
name = dotted_name
# extrapath is None only in a couple of test cases; in real life it's
# always present
if level and level > 1 and extrapath:
# strip trailing path bits for each extra level to account for
# relative imports
# from . import X has level == 1 and nothing is stripped (the level > 1 check accounts for this case)
# from .. import X has level == 2 and one trailing path component must go
# from ... import X has level == 3 and two trailing path components must go
extrapath = extrapath.split(os.path.sep)
level -= 1
extrapath = extrapath[0:-level]
extrapath = os.path.sep.join(extrapath)
while name:
candidate = self.isModule(name, extrapath)
if candidate:
return candidate
candidate = self.isPackage(name, extrapath)
if candidate:
return candidate
name = name[:name.rfind('.')]
self.warn(dotted_name, '%s: could not find %s', filename, dotted_name)
return dotted_name
def isModule(self, dotted_name, extrapath=None):
"""Is ``dotted_name`` the name of a module?"""
try:
return self._module_cache[(dotted_name, extrapath)]
except KeyError:
pass
if dotted_name in sys.modules or dotted_name in self.builtin_modules:
return dotted_name
filename = dotted_name.replace('.', os.path.sep)
if extrapath:
for ext in self._exts:
candidate = os.path.join(extrapath, filename) + ext
if os.path.exists(candidate):
modname = self.filenameToModname(candidate)
self._module_cache[(dotted_name, extrapath)] = modname
return modname
try:
return self._module_cache[(dotted_name, None)]
except KeyError:
pass
for dir in self.path:
if os.path.isfile(dir):
if dir.endswith('.egg-info'):
# distribute creates a setuptools-blah-blah.egg-info
# that ends up in sys.path
continue
try:
zf = zipfile.ZipFile(dir)
except zipfile.BadZipfile:
self.warn(dir, "%s: not a directory or zip file", dir)
continue
names = zf.namelist()
for ext in self._exts:
candidate = filename + ext
if candidate in names:
modname = filename.replace(os.path.sep, '.')
self._module_cache[(dotted_name, extrapath)] = modname
self._module_cache[(dotted_name, None)] = modname
return modname
else:
for ext in self._exts:
candidate = os.path.join(dir, filename) + ext
if os.path.exists(candidate):
modname = self.filenameToModname(candidate)
self._module_cache[(dotted_name, extrapath)] = modname
self._module_cache[(dotted_name, None)] = modname
return modname
return None
def isPackage(self, dotted_name, extrapath=None):
"""Is ``dotted_name`` the name of a package?"""
candidate = self.isModule(dotted_name + '.__init__', extrapath)
if candidate:
candidate = candidate[:-len(".__init__")]
return candidate
def removeTestPackage(self, dotted_name, pkgnames=['tests', 'ftests']):
"""Remove tests subpackages from dotted_name."""
result = []
for name in dotted_name.split('.'):
if name in pkgnames:
break
result.append(name)
if not result: # empty names are baad
return dotted_name
return '.'.join(result)
def listModules(self):
"""Return an alphabetical list of all modules."""
modules = list(self.modules.items())
modules.sort()
return [module for name, module in modules]
def packageGraph(self, packagelevel=None):
"""Convert a module graph to a package graph."""
packages = {}
for module in self.listModules():
package_name = self.packageOf(module.modname, packagelevel)
if package_name not in packages:
dirname = os.path.dirname(module.filename)
packages[package_name] = Module(package_name, dirname)
package = packages[package_name]
for name in module.imports:
package_name = self.packageOf(name, packagelevel)
if package_name != package.modname: # no loops
package.imports.add(package_name)
graph = ModuleGraph()
graph.modules = packages
return graph
def collapseTests(self, pkgnames=['tests', 'ftests']):
"""Collapse test packages with parent packages.
Works only with package graphs.
"""
packages = {}
for module in self.listModules():
package_name = self.removeTestPackage(module.modname, pkgnames)
if package_name == module.modname:
packages[package_name] = Module(package_name, module.filename)
for module in self.listModules():
package_name = self.removeTestPackage(module.modname, pkgnames)
package = packages[package_name]
for name in module.imports:
package_name = self.removeTestPackage(name, pkgnames)
if package_name != package.modname: # no loops
package.imports.add(package_name)
graph = ModuleGraph()
graph.modules = packages
return graph
def collapseCycles(self):
"""Create a graph with cycles collapsed.
Collapse modules participating in a cycle to a single node.
"""
# This algorithm determines Strongly Connected Components. Look it up.
# It is adapted to suit our data structures.
# Phase 0: prepare the graph
imports = {}
for u in self.modules:
imports[u] = set()
for v in self.modules[u].imports:
if v in self.modules: # skip external dependencies
imports[u].add(v)
# Phase 1: order the vertices
visited = {}
for u in self.modules:
visited[u] = False
order = []
def visit1(u):
visited[u] = True
for v in imports[u]:
if not visited[v]:
visit1(v)
order.append(u)
for u in self.modules:
if not visited[u]:
visit1(u)
order.reverse()
# Phase 2: compute the inverse graph
revimports = {}
for u in self.modules:
revimports[u] = set()
for u in self.modules:
for v in imports[u]:
revimports[v].add(u)
# Phase 3: determine the strongly connected components
components = {}
component_of = {}
for u in self.modules:
visited[u] = False
def visit2(u):
visited[u] = True
component.append(u)
for v in revimports[u]:
if not visited[v]:
visit2(v)
for u in order:
if not visited[u]:
component = []
visit2(u)
component.sort()
node = ModuleCycle(component)
components[node.modname] = node
for modname in component:
component_of[modname] = node
# Phase 4: construct the condensed graph
for node in components.values():
for modname in node.modnames:
for impname in imports[modname]:
other = component_of[impname].modname
if other != node.modname:
node.imports.add(other)
graph = ModuleGraph()
graph.modules = components
return graph
def printImportedNames(self):
"""Produce a report of imported names."""
for module in self.listModules():
print("%s:" % module.modname)
print(" %s" % "\n ".join(imp.name for imp in module.imported_names))
def printImports(self):
"""Produce a report of dependencies."""
for module in self.listModules():
print("%s:" % module.label)
if self.external_dependencies:
imports = list(module.imports)
else:
imports = [modname for modname in module.imports
if modname in self.modules]
imports.sort()
print(" %s" % "\n ".join(imports))
def printUnusedImports(self):
"""Produce a report of unused imports."""
for module in self.listModules():
names = [(unused.lineno, unused.name)
for unused in module.unused_names]
names.sort()
for lineno, name in names:
if not self.all_unused:
line = linecache.getline(module.filename, lineno)
if '#' in line:
# assume there's a comment explaining why it's not used
continue
print("%s:%s: %s not used" % (module.filename, lineno, name))
def printDot(self):
"""Produce a dependency graph in dot format."""
print("digraph ModuleDependencies {")
print(" node[shape=box];")
allNames = set()
nameDict = {}
for n, module in enumerate(self.listModules()):
module._dot_name = "mod%d" % n
nameDict[module.modname] = module._dot_name
print(" %s[label=\"%s\"];" % (module._dot_name,
quote(module.label)))
allNames |= module.imports
print(" node[style=dotted];")
if self.external_dependencies:
myNames = set(self.modules)
extNames = list(allNames - myNames)
extNames.sort()
for n, name in enumerate(extNames):
nameDict[name] = id = "extmod%d" % n
print(" %s[label=\"%s\"];" % (id, name))
for modname, module in sorted(self.modules.items()):
for other in sorted(module.imports):
if other in nameDict:
print(" %s -> %s;" % (nameDict[module.modname],
nameDict[other]))
print("}")
|
mgedmin/findimports
|
findimports.py
|
ModuleGraph.listModules
|
python
|
def listModules(self):
modules = list(self.modules.items())
modules.sort()
return [module for name, module in modules]
|
Return an alphabetical list of all modules.
|
train
|
https://github.com/mgedmin/findimports/blob/c20a50b497390fed15aa3835476f4fad57313e8a/findimports.py#L590-L594
| null |
class ModuleGraph(object):
"""Module graph."""
trackUnusedNames = False
all_unused = False
warn_about_duplicates = False
verbose = False
external_dependencies = True
# some builtin modules do not exist as separate .so files on disk
builtin_modules = sys.builtin_module_names
def __init__(self):
self.modules = {}
self.path = sys.path
self._module_cache = {}
self._warned_about = set()
self._stderr = sys.stderr
self._exts = ('.py', '.so', '.dll')
if hasattr(sys, '_multiarch'): # pragma: nocover
# Ubuntu 14.04 LTS renames
# /usr/lib/python2.7/lib-dynload/datetime.so to
# /usr/lib/python2.7/lib-dynload/datetime.x86_64-linux-gnu.so
# (https://github.com/mgedmin/findimports/issues/3)
self._exts += ('.%s.so' % sys._multiarch, )
def warn(self, about, message, *args):
if about in self._warned_about:
return
if args:
message = message % args
print(message, file=self._stderr)
self._warned_about.add(about)
def parsePathname(self, pathname):
"""Parse one or more source files.
``pathname`` may be a file name or a directory name.
"""
if os.path.isdir(pathname):
for root, dirs, files in os.walk(pathname):
dirs.sort()
files.sort()
for fn in files:
# ignore emacsish junk
if fn.endswith('.py') and not fn.startswith('.#'):
self.parseFile(os.path.join(root, fn))
elif pathname.endswith('.importcache'):
self.readCache(pathname)
else:
self.parseFile(pathname)
def writeCache(self, filename):
"""Write the graph to a cache file."""
with open(filename, 'wb') as f:
pickle.dump(self.modules, f)
def readCache(self, filename):
"""Load the graph from a cache file."""
with open(filename, 'rb') as f:
self.modules = pickle.load(f)
def parseFile(self, filename):
"""Parse a single file."""
modname = self.filenameToModname(filename)
module = Module(modname, filename)
self.modules[modname] = module
if self.trackUnusedNames:
module.imported_names, module.unused_names = \
find_imports_and_track_names(filename,
self.warn_about_duplicates,
self.verbose)
else:
module.imported_names = find_imports(filename)
module.unused_names = None
dir = os.path.dirname(filename)
module.imports = set(
[self.findModuleOfName(imp.name, imp.level, filename, dir)
for imp in module.imported_names])
def filenameToModname(self, filename):
"""Convert a filename to a module name."""
for ext in reversed(self._exts):
if filename.endswith(ext):
filename = filename[:-len(ext)]
break
else:
self.warn(filename, '%s: unknown file name extension', filename)
filename = os.path.abspath(filename)
elements = filename.split(os.path.sep)
modname = []
while elements:
modname.append(elements[-1])
del elements[-1]
if not os.path.exists(os.path.sep.join(elements + ['__init__.py'])):
break
modname.reverse()
modname = ".".join(modname)
return modname
def findModuleOfName(self, dotted_name, level, filename, extrapath=None):
"""Given a fully qualified name, find what module contains it."""
if dotted_name.endswith('.*'):
return dotted_name[:-2]
name = dotted_name
# extrapath is None only in a couple of test cases; in real life it's
# always present
if level and level > 1 and extrapath:
# strip trailing path bits for each extra level to account for
# relative imports
# from . import X has level == 1 and nothing is stripped (the level > 1 check accounts for this case)
# from .. import X has level == 2 and one trailing path component must go
# from ... import X has level == 3 and two trailing path components must go
extrapath = extrapath.split(os.path.sep)
level -= 1
extrapath = extrapath[0:-level]
extrapath = os.path.sep.join(extrapath)
while name:
candidate = self.isModule(name, extrapath)
if candidate:
return candidate
candidate = self.isPackage(name, extrapath)
if candidate:
return candidate
name = name[:name.rfind('.')]
self.warn(dotted_name, '%s: could not find %s', filename, dotted_name)
return dotted_name
def isModule(self, dotted_name, extrapath=None):
"""Is ``dotted_name`` the name of a module?"""
try:
return self._module_cache[(dotted_name, extrapath)]
except KeyError:
pass
if dotted_name in sys.modules or dotted_name in self.builtin_modules:
return dotted_name
filename = dotted_name.replace('.', os.path.sep)
if extrapath:
for ext in self._exts:
candidate = os.path.join(extrapath, filename) + ext
if os.path.exists(candidate):
modname = self.filenameToModname(candidate)
self._module_cache[(dotted_name, extrapath)] = modname
return modname
try:
return self._module_cache[(dotted_name, None)]
except KeyError:
pass
for dir in self.path:
if os.path.isfile(dir):
if dir.endswith('.egg-info'):
# distribute creates a setuptools-blah-blah.egg-info
# that ends up in sys.path
continue
try:
zf = zipfile.ZipFile(dir)
except zipfile.BadZipfile:
self.warn(dir, "%s: not a directory or zip file", dir)
continue
names = zf.namelist()
for ext in self._exts:
candidate = filename + ext
if candidate in names:
modname = filename.replace(os.path.sep, '.')
self._module_cache[(dotted_name, extrapath)] = modname
self._module_cache[(dotted_name, None)] = modname
return modname
else:
for ext in self._exts:
candidate = os.path.join(dir, filename) + ext
if os.path.exists(candidate):
modname = self.filenameToModname(candidate)
self._module_cache[(dotted_name, extrapath)] = modname
self._module_cache[(dotted_name, None)] = modname
return modname
return None
def isPackage(self, dotted_name, extrapath=None):
"""Is ``dotted_name`` the name of a package?"""
candidate = self.isModule(dotted_name + '.__init__', extrapath)
if candidate:
candidate = candidate[:-len(".__init__")]
return candidate
def packageOf(self, dotted_name, packagelevel=None):
"""Determine the package that contains ``dotted_name``."""
if '.' not in dotted_name:
return dotted_name
if not self.isPackage(dotted_name):
dotted_name = '.'.join(dotted_name.split('.')[:-1])
if packagelevel:
dotted_name = '.'.join(dotted_name.split('.')[:packagelevel])
return dotted_name
def removeTestPackage(self, dotted_name, pkgnames=['tests', 'ftests']):
"""Remove tests subpackages from dotted_name."""
result = []
for name in dotted_name.split('.'):
if name in pkgnames:
break
result.append(name)
if not result: # empty names are baad
return dotted_name
return '.'.join(result)
def packageGraph(self, packagelevel=None):
"""Convert a module graph to a package graph."""
packages = {}
for module in self.listModules():
package_name = self.packageOf(module.modname, packagelevel)
if package_name not in packages:
dirname = os.path.dirname(module.filename)
packages[package_name] = Module(package_name, dirname)
package = packages[package_name]
for name in module.imports:
package_name = self.packageOf(name, packagelevel)
if package_name != package.modname: # no loops
package.imports.add(package_name)
graph = ModuleGraph()
graph.modules = packages
return graph
def collapseTests(self, pkgnames=['tests', 'ftests']):
"""Collapse test packages with parent packages.
Works only with package graphs.
"""
packages = {}
for module in self.listModules():
package_name = self.removeTestPackage(module.modname, pkgnames)
if package_name == module.modname:
packages[package_name] = Module(package_name, module.filename)
for module in self.listModules():
package_name = self.removeTestPackage(module.modname, pkgnames)
package = packages[package_name]
for name in module.imports:
package_name = self.removeTestPackage(name, pkgnames)
if package_name != package.modname: # no loops
package.imports.add(package_name)
graph = ModuleGraph()
graph.modules = packages
return graph
def collapseCycles(self):
"""Create a graph with cycles collapsed.
Collapse modules participating in a cycle to a single node.
"""
# This algorithm determines Strongly Connected Components. Look it up.
# It is adapted to suit our data structures.
# Phase 0: prepare the graph
imports = {}
for u in self.modules:
imports[u] = set()
for v in self.modules[u].imports:
if v in self.modules: # skip external dependencies
imports[u].add(v)
# Phase 1: order the vertices
visited = {}
for u in self.modules:
visited[u] = False
order = []
def visit1(u):
visited[u] = True
for v in imports[u]:
if not visited[v]:
visit1(v)
order.append(u)
for u in self.modules:
if not visited[u]:
visit1(u)
order.reverse()
# Phase 2: compute the inverse graph
revimports = {}
for u in self.modules:
revimports[u] = set()
for u in self.modules:
for v in imports[u]:
revimports[v].add(u)
# Phase 3: determine the strongly connected components
components = {}
component_of = {}
for u in self.modules:
visited[u] = False
def visit2(u):
visited[u] = True
component.append(u)
for v in revimports[u]:
if not visited[v]:
visit2(v)
for u in order:
if not visited[u]:
component = []
visit2(u)
component.sort()
node = ModuleCycle(component)
components[node.modname] = node
for modname in component:
component_of[modname] = node
# Phase 4: construct the condensed graph
for node in components.values():
for modname in node.modnames:
for impname in imports[modname]:
other = component_of[impname].modname
if other != node.modname:
node.imports.add(other)
graph = ModuleGraph()
graph.modules = components
return graph
def printImportedNames(self):
"""Produce a report of imported names."""
for module in self.listModules():
print("%s:" % module.modname)
print(" %s" % "\n ".join(imp.name for imp in module.imported_names))
def printImports(self):
"""Produce a report of dependencies."""
for module in self.listModules():
print("%s:" % module.label)
if self.external_dependencies:
imports = list(module.imports)
else:
imports = [modname for modname in module.imports
if modname in self.modules]
imports.sort()
print(" %s" % "\n ".join(imports))
def printUnusedImports(self):
"""Produce a report of unused imports."""
for module in self.listModules():
names = [(unused.lineno, unused.name)
for unused in module.unused_names]
names.sort()
for lineno, name in names:
if not self.all_unused:
line = linecache.getline(module.filename, lineno)
if '#' in line:
# assume there's a comment explaining why it's not used
continue
print("%s:%s: %s not used" % (module.filename, lineno, name))
def printDot(self):
"""Produce a dependency graph in dot format."""
print("digraph ModuleDependencies {")
print(" node[shape=box];")
allNames = set()
nameDict = {}
for n, module in enumerate(self.listModules()):
module._dot_name = "mod%d" % n
nameDict[module.modname] = module._dot_name
print(" %s[label=\"%s\"];" % (module._dot_name,
quote(module.label)))
allNames |= module.imports
print(" node[style=dotted];")
if self.external_dependencies:
myNames = set(self.modules)
extNames = list(allNames - myNames)
extNames.sort()
for n, name in enumerate(extNames):
nameDict[name] = id = "extmod%d" % n
print(" %s[label=\"%s\"];" % (id, name))
for modname, module in sorted(self.modules.items()):
for other in sorted(module.imports):
if other in nameDict:
print(" %s -> %s;" % (nameDict[module.modname],
nameDict[other]))
print("}")
|
mgedmin/findimports
|
findimports.py
|
ModuleGraph.packageGraph
|
python
|
def packageGraph(self, packagelevel=None):
packages = {}
for module in self.listModules():
package_name = self.packageOf(module.modname, packagelevel)
if package_name not in packages:
dirname = os.path.dirname(module.filename)
packages[package_name] = Module(package_name, dirname)
package = packages[package_name]
for name in module.imports:
package_name = self.packageOf(name, packagelevel)
if package_name != package.modname: # no loops
package.imports.add(package_name)
graph = ModuleGraph()
graph.modules = packages
return graph
|
Convert a module graph to a package graph.
|
train
|
https://github.com/mgedmin/findimports/blob/c20a50b497390fed15aa3835476f4fad57313e8a/findimports.py#L596-L611
|
[
"def packageOf(self, dotted_name, packagelevel=None):\n \"\"\"Determine the package that contains ``dotted_name``.\"\"\"\n if '.' not in dotted_name:\n return dotted_name\n if not self.isPackage(dotted_name):\n dotted_name = '.'.join(dotted_name.split('.')[:-1])\n if packagelevel:\n dotted_name = '.'.join(dotted_name.split('.')[:packagelevel])\n return dotted_name\n",
"def listModules(self):\n \"\"\"Return an alphabetical list of all modules.\"\"\"\n modules = list(self.modules.items())\n modules.sort()\n return [module for name, module in modules]\n"
] |
class ModuleGraph(object):
"""Module graph."""
trackUnusedNames = False
all_unused = False
warn_about_duplicates = False
verbose = False
external_dependencies = True
# some builtin modules do not exist as separate .so files on disk
builtin_modules = sys.builtin_module_names
def __init__(self):
self.modules = {}
self.path = sys.path
self._module_cache = {}
self._warned_about = set()
self._stderr = sys.stderr
self._exts = ('.py', '.so', '.dll')
if hasattr(sys, '_multiarch'): # pragma: nocover
# Ubuntu 14.04 LTS renames
# /usr/lib/python2.7/lib-dynload/datetime.so to
# /usr/lib/python2.7/lib-dynload/datetime.x86_64-linux-gnu.so
# (https://github.com/mgedmin/findimports/issues/3)
self._exts += ('.%s.so' % sys._multiarch, )
def warn(self, about, message, *args):
if about in self._warned_about:
return
if args:
message = message % args
print(message, file=self._stderr)
self._warned_about.add(about)
def parsePathname(self, pathname):
"""Parse one or more source files.
``pathname`` may be a file name or a directory name.
"""
if os.path.isdir(pathname):
for root, dirs, files in os.walk(pathname):
dirs.sort()
files.sort()
for fn in files:
# ignore emacsish junk
if fn.endswith('.py') and not fn.startswith('.#'):
self.parseFile(os.path.join(root, fn))
elif pathname.endswith('.importcache'):
self.readCache(pathname)
else:
self.parseFile(pathname)
def writeCache(self, filename):
"""Write the graph to a cache file."""
with open(filename, 'wb') as f:
pickle.dump(self.modules, f)
def readCache(self, filename):
"""Load the graph from a cache file."""
with open(filename, 'rb') as f:
self.modules = pickle.load(f)
def parseFile(self, filename):
"""Parse a single file."""
modname = self.filenameToModname(filename)
module = Module(modname, filename)
self.modules[modname] = module
if self.trackUnusedNames:
module.imported_names, module.unused_names = \
find_imports_and_track_names(filename,
self.warn_about_duplicates,
self.verbose)
else:
module.imported_names = find_imports(filename)
module.unused_names = None
dir = os.path.dirname(filename)
module.imports = set(
[self.findModuleOfName(imp.name, imp.level, filename, dir)
for imp in module.imported_names])
def filenameToModname(self, filename):
"""Convert a filename to a module name."""
for ext in reversed(self._exts):
if filename.endswith(ext):
filename = filename[:-len(ext)]
break
else:
self.warn(filename, '%s: unknown file name extension', filename)
filename = os.path.abspath(filename)
elements = filename.split(os.path.sep)
modname = []
while elements:
modname.append(elements[-1])
del elements[-1]
if not os.path.exists(os.path.sep.join(elements + ['__init__.py'])):
break
modname.reverse()
modname = ".".join(modname)
return modname
def findModuleOfName(self, dotted_name, level, filename, extrapath=None):
"""Given a fully qualified name, find what module contains it."""
if dotted_name.endswith('.*'):
return dotted_name[:-2]
name = dotted_name
# extrapath is None only in a couple of test cases; in real life it's
# always present
if level and level > 1 and extrapath:
# strip trailing path bits for each extra level to account for
# relative imports
# from . import X has level == 1 and nothing is stripped (the level > 1 check accounts for this case)
# from .. import X has level == 2 and one trailing path component must go
# from ... import X has level == 3 and two trailing path components must go
extrapath = extrapath.split(os.path.sep)
level -= 1
extrapath = extrapath[0:-level]
extrapath = os.path.sep.join(extrapath)
while name:
candidate = self.isModule(name, extrapath)
if candidate:
return candidate
candidate = self.isPackage(name, extrapath)
if candidate:
return candidate
name = name[:name.rfind('.')]
self.warn(dotted_name, '%s: could not find %s', filename, dotted_name)
return dotted_name
def isModule(self, dotted_name, extrapath=None):
"""Is ``dotted_name`` the name of a module?"""
try:
return self._module_cache[(dotted_name, extrapath)]
except KeyError:
pass
if dotted_name in sys.modules or dotted_name in self.builtin_modules:
return dotted_name
filename = dotted_name.replace('.', os.path.sep)
if extrapath:
for ext in self._exts:
candidate = os.path.join(extrapath, filename) + ext
if os.path.exists(candidate):
modname = self.filenameToModname(candidate)
self._module_cache[(dotted_name, extrapath)] = modname
return modname
try:
return self._module_cache[(dotted_name, None)]
except KeyError:
pass
for dir in self.path:
if os.path.isfile(dir):
if dir.endswith('.egg-info'):
# distribute creates a setuptools-blah-blah.egg-info
# that ends up in sys.path
continue
try:
zf = zipfile.ZipFile(dir)
except zipfile.BadZipfile:
self.warn(dir, "%s: not a directory or zip file", dir)
continue
names = zf.namelist()
for ext in self._exts:
candidate = filename + ext
if candidate in names:
modname = filename.replace(os.path.sep, '.')
self._module_cache[(dotted_name, extrapath)] = modname
self._module_cache[(dotted_name, None)] = modname
return modname
else:
for ext in self._exts:
candidate = os.path.join(dir, filename) + ext
if os.path.exists(candidate):
modname = self.filenameToModname(candidate)
self._module_cache[(dotted_name, extrapath)] = modname
self._module_cache[(dotted_name, None)] = modname
return modname
return None
def isPackage(self, dotted_name, extrapath=None):
"""Is ``dotted_name`` the name of a package?"""
candidate = self.isModule(dotted_name + '.__init__', extrapath)
if candidate:
candidate = candidate[:-len(".__init__")]
return candidate
def packageOf(self, dotted_name, packagelevel=None):
"""Determine the package that contains ``dotted_name``."""
if '.' not in dotted_name:
return dotted_name
if not self.isPackage(dotted_name):
dotted_name = '.'.join(dotted_name.split('.')[:-1])
if packagelevel:
dotted_name = '.'.join(dotted_name.split('.')[:packagelevel])
return dotted_name
def removeTestPackage(self, dotted_name, pkgnames=['tests', 'ftests']):
"""Remove tests subpackages from dotted_name."""
result = []
for name in dotted_name.split('.'):
if name in pkgnames:
break
result.append(name)
if not result: # empty names are baad
return dotted_name
return '.'.join(result)
def listModules(self):
"""Return an alphabetical list of all modules."""
modules = list(self.modules.items())
modules.sort()
return [module for name, module in modules]
def collapseTests(self, pkgnames=['tests', 'ftests']):
"""Collapse test packages with parent packages.
Works only with package graphs.
"""
packages = {}
for module in self.listModules():
package_name = self.removeTestPackage(module.modname, pkgnames)
if package_name == module.modname:
packages[package_name] = Module(package_name, module.filename)
for module in self.listModules():
package_name = self.removeTestPackage(module.modname, pkgnames)
package = packages[package_name]
for name in module.imports:
package_name = self.removeTestPackage(name, pkgnames)
if package_name != package.modname: # no loops
package.imports.add(package_name)
graph = ModuleGraph()
graph.modules = packages
return graph
def collapseCycles(self):
"""Create a graph with cycles collapsed.
Collapse modules participating in a cycle to a single node.
"""
# This algorithm determines Strongly Connected Components. Look it up.
# It is adapted to suit our data structures.
# Phase 0: prepare the graph
imports = {}
for u in self.modules:
imports[u] = set()
for v in self.modules[u].imports:
if v in self.modules: # skip external dependencies
imports[u].add(v)
# Phase 1: order the vertices
visited = {}
for u in self.modules:
visited[u] = False
order = []
def visit1(u):
visited[u] = True
for v in imports[u]:
if not visited[v]:
visit1(v)
order.append(u)
for u in self.modules:
if not visited[u]:
visit1(u)
order.reverse()
# Phase 2: compute the inverse graph
revimports = {}
for u in self.modules:
revimports[u] = set()
for u in self.modules:
for v in imports[u]:
revimports[v].add(u)
# Phase 3: determine the strongly connected components
components = {}
component_of = {}
for u in self.modules:
visited[u] = False
def visit2(u):
visited[u] = True
component.append(u)
for v in revimports[u]:
if not visited[v]:
visit2(v)
for u in order:
if not visited[u]:
component = []
visit2(u)
component.sort()
node = ModuleCycle(component)
components[node.modname] = node
for modname in component:
component_of[modname] = node
# Phase 4: construct the condensed graph
for node in components.values():
for modname in node.modnames:
for impname in imports[modname]:
other = component_of[impname].modname
if other != node.modname:
node.imports.add(other)
graph = ModuleGraph()
graph.modules = components
return graph
def printImportedNames(self):
"""Produce a report of imported names."""
for module in self.listModules():
print("%s:" % module.modname)
print(" %s" % "\n ".join(imp.name for imp in module.imported_names))
def printImports(self):
"""Produce a report of dependencies."""
for module in self.listModules():
print("%s:" % module.label)
if self.external_dependencies:
imports = list(module.imports)
else:
imports = [modname for modname in module.imports
if modname in self.modules]
imports.sort()
print(" %s" % "\n ".join(imports))
def printUnusedImports(self):
"""Produce a report of unused imports."""
for module in self.listModules():
names = [(unused.lineno, unused.name)
for unused in module.unused_names]
names.sort()
for lineno, name in names:
if not self.all_unused:
line = linecache.getline(module.filename, lineno)
if '#' in line:
# assume there's a comment explaining why it's not used
continue
print("%s:%s: %s not used" % (module.filename, lineno, name))
def printDot(self):
"""Produce a dependency graph in dot format."""
print("digraph ModuleDependencies {")
print(" node[shape=box];")
allNames = set()
nameDict = {}
for n, module in enumerate(self.listModules()):
module._dot_name = "mod%d" % n
nameDict[module.modname] = module._dot_name
print(" %s[label=\"%s\"];" % (module._dot_name,
quote(module.label)))
allNames |= module.imports
print(" node[style=dotted];")
if self.external_dependencies:
myNames = set(self.modules)
extNames = list(allNames - myNames)
extNames.sort()
for n, name in enumerate(extNames):
nameDict[name] = id = "extmod%d" % n
print(" %s[label=\"%s\"];" % (id, name))
for modname, module in sorted(self.modules.items()):
for other in sorted(module.imports):
if other in nameDict:
print(" %s -> %s;" % (nameDict[module.modname],
nameDict[other]))
print("}")
|
mgedmin/findimports
|
findimports.py
|
ModuleGraph.collapseCycles
|
python
|
def collapseCycles(self):
# This algorithm determines Strongly Connected Components. Look it up.
# It is adapted to suit our data structures.
# Phase 0: prepare the graph
imports = {}
for u in self.modules:
imports[u] = set()
for v in self.modules[u].imports:
if v in self.modules: # skip external dependencies
imports[u].add(v)
# Phase 1: order the vertices
visited = {}
for u in self.modules:
visited[u] = False
order = []
def visit1(u):
visited[u] = True
for v in imports[u]:
if not visited[v]:
visit1(v)
order.append(u)
for u in self.modules:
if not visited[u]:
visit1(u)
order.reverse()
# Phase 2: compute the inverse graph
revimports = {}
for u in self.modules:
revimports[u] = set()
for u in self.modules:
for v in imports[u]:
revimports[v].add(u)
# Phase 3: determine the strongly connected components
components = {}
component_of = {}
for u in self.modules:
visited[u] = False
def visit2(u):
visited[u] = True
component.append(u)
for v in revimports[u]:
if not visited[v]:
visit2(v)
for u in order:
if not visited[u]:
component = []
visit2(u)
component.sort()
node = ModuleCycle(component)
components[node.modname] = node
for modname in component:
component_of[modname] = node
# Phase 4: construct the condensed graph
for node in components.values():
for modname in node.modnames:
for impname in imports[modname]:
other = component_of[impname].modname
if other != node.modname:
node.imports.add(other)
graph = ModuleGraph()
graph.modules = components
return graph
|
Create a graph with cycles collapsed.
Collapse modules participating in a cycle to a single node.
|
train
|
https://github.com/mgedmin/findimports/blob/c20a50b497390fed15aa3835476f4fad57313e8a/findimports.py#L634-L699
|
[
"def visit1(u):\n visited[u] = True\n for v in imports[u]:\n if not visited[v]:\n visit1(v)\n order.append(u)\n",
"def visit2(u):\n visited[u] = True\n component.append(u)\n for v in revimports[u]:\n if not visited[v]:\n visit2(v)\n"
] |
class ModuleGraph(object):
"""Module graph."""
trackUnusedNames = False
all_unused = False
warn_about_duplicates = False
verbose = False
external_dependencies = True
# some builtin modules do not exist as separate .so files on disk
builtin_modules = sys.builtin_module_names
def __init__(self):
self.modules = {}
self.path = sys.path
self._module_cache = {}
self._warned_about = set()
self._stderr = sys.stderr
self._exts = ('.py', '.so', '.dll')
if hasattr(sys, '_multiarch'): # pragma: nocover
# Ubuntu 14.04 LTS renames
# /usr/lib/python2.7/lib-dynload/datetime.so to
# /usr/lib/python2.7/lib-dynload/datetime.x86_64-linux-gnu.so
# (https://github.com/mgedmin/findimports/issues/3)
self._exts += ('.%s.so' % sys._multiarch, )
def warn(self, about, message, *args):
if about in self._warned_about:
return
if args:
message = message % args
print(message, file=self._stderr)
self._warned_about.add(about)
def parsePathname(self, pathname):
"""Parse one or more source files.
``pathname`` may be a file name or a directory name.
"""
if os.path.isdir(pathname):
for root, dirs, files in os.walk(pathname):
dirs.sort()
files.sort()
for fn in files:
# ignore emacsish junk
if fn.endswith('.py') and not fn.startswith('.#'):
self.parseFile(os.path.join(root, fn))
elif pathname.endswith('.importcache'):
self.readCache(pathname)
else:
self.parseFile(pathname)
def writeCache(self, filename):
"""Write the graph to a cache file."""
with open(filename, 'wb') as f:
pickle.dump(self.modules, f)
def readCache(self, filename):
"""Load the graph from a cache file."""
with open(filename, 'rb') as f:
self.modules = pickle.load(f)
def parseFile(self, filename):
"""Parse a single file."""
modname = self.filenameToModname(filename)
module = Module(modname, filename)
self.modules[modname] = module
if self.trackUnusedNames:
module.imported_names, module.unused_names = \
find_imports_and_track_names(filename,
self.warn_about_duplicates,
self.verbose)
else:
module.imported_names = find_imports(filename)
module.unused_names = None
dir = os.path.dirname(filename)
module.imports = set(
[self.findModuleOfName(imp.name, imp.level, filename, dir)
for imp in module.imported_names])
def filenameToModname(self, filename):
"""Convert a filename to a module name."""
for ext in reversed(self._exts):
if filename.endswith(ext):
filename = filename[:-len(ext)]
break
else:
self.warn(filename, '%s: unknown file name extension', filename)
filename = os.path.abspath(filename)
elements = filename.split(os.path.sep)
modname = []
while elements:
modname.append(elements[-1])
del elements[-1]
if not os.path.exists(os.path.sep.join(elements + ['__init__.py'])):
break
modname.reverse()
modname = ".".join(modname)
return modname
def findModuleOfName(self, dotted_name, level, filename, extrapath=None):
"""Given a fully qualified name, find what module contains it."""
if dotted_name.endswith('.*'):
return dotted_name[:-2]
name = dotted_name
# extrapath is None only in a couple of test cases; in real life it's
# always present
if level and level > 1 and extrapath:
# strip trailing path bits for each extra level to account for
# relative imports
# from . import X has level == 1 and nothing is stripped (the level > 1 check accounts for this case)
# from .. import X has level == 2 and one trailing path component must go
# from ... import X has level == 3 and two trailing path components must go
extrapath = extrapath.split(os.path.sep)
level -= 1
extrapath = extrapath[0:-level]
extrapath = os.path.sep.join(extrapath)
while name:
candidate = self.isModule(name, extrapath)
if candidate:
return candidate
candidate = self.isPackage(name, extrapath)
if candidate:
return candidate
name = name[:name.rfind('.')]
self.warn(dotted_name, '%s: could not find %s', filename, dotted_name)
return dotted_name
def isModule(self, dotted_name, extrapath=None):
"""Is ``dotted_name`` the name of a module?"""
try:
return self._module_cache[(dotted_name, extrapath)]
except KeyError:
pass
if dotted_name in sys.modules or dotted_name in self.builtin_modules:
return dotted_name
filename = dotted_name.replace('.', os.path.sep)
if extrapath:
for ext in self._exts:
candidate = os.path.join(extrapath, filename) + ext
if os.path.exists(candidate):
modname = self.filenameToModname(candidate)
self._module_cache[(dotted_name, extrapath)] = modname
return modname
try:
return self._module_cache[(dotted_name, None)]
except KeyError:
pass
for dir in self.path:
if os.path.isfile(dir):
if dir.endswith('.egg-info'):
# distribute creates a setuptools-blah-blah.egg-info
# that ends up in sys.path
continue
try:
zf = zipfile.ZipFile(dir)
except zipfile.BadZipfile:
self.warn(dir, "%s: not a directory or zip file", dir)
continue
names = zf.namelist()
for ext in self._exts:
candidate = filename + ext
if candidate in names:
modname = filename.replace(os.path.sep, '.')
self._module_cache[(dotted_name, extrapath)] = modname
self._module_cache[(dotted_name, None)] = modname
return modname
else:
for ext in self._exts:
candidate = os.path.join(dir, filename) + ext
if os.path.exists(candidate):
modname = self.filenameToModname(candidate)
self._module_cache[(dotted_name, extrapath)] = modname
self._module_cache[(dotted_name, None)] = modname
return modname
return None
def isPackage(self, dotted_name, extrapath=None):
"""Is ``dotted_name`` the name of a package?"""
candidate = self.isModule(dotted_name + '.__init__', extrapath)
if candidate:
candidate = candidate[:-len(".__init__")]
return candidate
def packageOf(self, dotted_name, packagelevel=None):
"""Determine the package that contains ``dotted_name``."""
if '.' not in dotted_name:
return dotted_name
if not self.isPackage(dotted_name):
dotted_name = '.'.join(dotted_name.split('.')[:-1])
if packagelevel:
dotted_name = '.'.join(dotted_name.split('.')[:packagelevel])
return dotted_name
def removeTestPackage(self, dotted_name, pkgnames=['tests', 'ftests']):
"""Remove tests subpackages from dotted_name."""
result = []
for name in dotted_name.split('.'):
if name in pkgnames:
break
result.append(name)
if not result: # empty names are baad
return dotted_name
return '.'.join(result)
def listModules(self):
"""Return an alphabetical list of all modules."""
modules = list(self.modules.items())
modules.sort()
return [module for name, module in modules]
def packageGraph(self, packagelevel=None):
"""Convert a module graph to a package graph."""
packages = {}
for module in self.listModules():
package_name = self.packageOf(module.modname, packagelevel)
if package_name not in packages:
dirname = os.path.dirname(module.filename)
packages[package_name] = Module(package_name, dirname)
package = packages[package_name]
for name in module.imports:
package_name = self.packageOf(name, packagelevel)
if package_name != package.modname: # no loops
package.imports.add(package_name)
graph = ModuleGraph()
graph.modules = packages
return graph
def collapseTests(self, pkgnames=['tests', 'ftests']):
"""Collapse test packages with parent packages.
Works only with package graphs.
"""
packages = {}
for module in self.listModules():
package_name = self.removeTestPackage(module.modname, pkgnames)
if package_name == module.modname:
packages[package_name] = Module(package_name, module.filename)
for module in self.listModules():
package_name = self.removeTestPackage(module.modname, pkgnames)
package = packages[package_name]
for name in module.imports:
package_name = self.removeTestPackage(name, pkgnames)
if package_name != package.modname: # no loops
package.imports.add(package_name)
graph = ModuleGraph()
graph.modules = packages
return graph
def printImportedNames(self):
"""Produce a report of imported names."""
for module in self.listModules():
print("%s:" % module.modname)
print(" %s" % "\n ".join(imp.name for imp in module.imported_names))
def printImports(self):
"""Produce a report of dependencies."""
for module in self.listModules():
print("%s:" % module.label)
if self.external_dependencies:
imports = list(module.imports)
else:
imports = [modname for modname in module.imports
if modname in self.modules]
imports.sort()
print(" %s" % "\n ".join(imports))
def printUnusedImports(self):
"""Produce a report of unused imports."""
for module in self.listModules():
names = [(unused.lineno, unused.name)
for unused in module.unused_names]
names.sort()
for lineno, name in names:
if not self.all_unused:
line = linecache.getline(module.filename, lineno)
if '#' in line:
# assume there's a comment explaining why it's not used
continue
print("%s:%s: %s not used" % (module.filename, lineno, name))
def printDot(self):
"""Produce a dependency graph in dot format."""
print("digraph ModuleDependencies {")
print(" node[shape=box];")
allNames = set()
nameDict = {}
for n, module in enumerate(self.listModules()):
module._dot_name = "mod%d" % n
nameDict[module.modname] = module._dot_name
print(" %s[label=\"%s\"];" % (module._dot_name,
quote(module.label)))
allNames |= module.imports
print(" node[style=dotted];")
if self.external_dependencies:
myNames = set(self.modules)
extNames = list(allNames - myNames)
extNames.sort()
for n, name in enumerate(extNames):
nameDict[name] = id = "extmod%d" % n
print(" %s[label=\"%s\"];" % (id, name))
for modname, module in sorted(self.modules.items()):
for other in sorted(module.imports):
if other in nameDict:
print(" %s -> %s;" % (nameDict[module.modname],
nameDict[other]))
print("}")
|
mgedmin/findimports
|
findimports.py
|
ModuleGraph.printImportedNames
|
python
|
def printImportedNames(self):
for module in self.listModules():
print("%s:" % module.modname)
print(" %s" % "\n ".join(imp.name for imp in module.imported_names))
|
Produce a report of imported names.
|
train
|
https://github.com/mgedmin/findimports/blob/c20a50b497390fed15aa3835476f4fad57313e8a/findimports.py#L701-L705
|
[
"def listModules(self):\n \"\"\"Return an alphabetical list of all modules.\"\"\"\n modules = list(self.modules.items())\n modules.sort()\n return [module for name, module in modules]\n"
] |
class ModuleGraph(object):
"""Module graph."""
trackUnusedNames = False
all_unused = False
warn_about_duplicates = False
verbose = False
external_dependencies = True
# some builtin modules do not exist as separate .so files on disk
builtin_modules = sys.builtin_module_names
def __init__(self):
self.modules = {}
self.path = sys.path
self._module_cache = {}
self._warned_about = set()
self._stderr = sys.stderr
self._exts = ('.py', '.so', '.dll')
if hasattr(sys, '_multiarch'): # pragma: nocover
# Ubuntu 14.04 LTS renames
# /usr/lib/python2.7/lib-dynload/datetime.so to
# /usr/lib/python2.7/lib-dynload/datetime.x86_64-linux-gnu.so
# (https://github.com/mgedmin/findimports/issues/3)
self._exts += ('.%s.so' % sys._multiarch, )
def warn(self, about, message, *args):
if about in self._warned_about:
return
if args:
message = message % args
print(message, file=self._stderr)
self._warned_about.add(about)
def parsePathname(self, pathname):
"""Parse one or more source files.
``pathname`` may be a file name or a directory name.
"""
if os.path.isdir(pathname):
for root, dirs, files in os.walk(pathname):
dirs.sort()
files.sort()
for fn in files:
# ignore emacsish junk
if fn.endswith('.py') and not fn.startswith('.#'):
self.parseFile(os.path.join(root, fn))
elif pathname.endswith('.importcache'):
self.readCache(pathname)
else:
self.parseFile(pathname)
def writeCache(self, filename):
"""Write the graph to a cache file."""
with open(filename, 'wb') as f:
pickle.dump(self.modules, f)
def readCache(self, filename):
"""Load the graph from a cache file."""
with open(filename, 'rb') as f:
self.modules = pickle.load(f)
def parseFile(self, filename):
"""Parse a single file."""
modname = self.filenameToModname(filename)
module = Module(modname, filename)
self.modules[modname] = module
if self.trackUnusedNames:
module.imported_names, module.unused_names = \
find_imports_and_track_names(filename,
self.warn_about_duplicates,
self.verbose)
else:
module.imported_names = find_imports(filename)
module.unused_names = None
dir = os.path.dirname(filename)
module.imports = set(
[self.findModuleOfName(imp.name, imp.level, filename, dir)
for imp in module.imported_names])
def filenameToModname(self, filename):
"""Convert a filename to a module name."""
for ext in reversed(self._exts):
if filename.endswith(ext):
filename = filename[:-len(ext)]
break
else:
self.warn(filename, '%s: unknown file name extension', filename)
filename = os.path.abspath(filename)
elements = filename.split(os.path.sep)
modname = []
while elements:
modname.append(elements[-1])
del elements[-1]
if not os.path.exists(os.path.sep.join(elements + ['__init__.py'])):
break
modname.reverse()
modname = ".".join(modname)
return modname
def findModuleOfName(self, dotted_name, level, filename, extrapath=None):
"""Given a fully qualified name, find what module contains it."""
if dotted_name.endswith('.*'):
return dotted_name[:-2]
name = dotted_name
# extrapath is None only in a couple of test cases; in real life it's
# always present
if level and level > 1 and extrapath:
# strip trailing path bits for each extra level to account for
# relative imports
# from . import X has level == 1 and nothing is stripped (the level > 1 check accounts for this case)
# from .. import X has level == 2 and one trailing path component must go
# from ... import X has level == 3 and two trailing path components must go
extrapath = extrapath.split(os.path.sep)
level -= 1
extrapath = extrapath[0:-level]
extrapath = os.path.sep.join(extrapath)
while name:
candidate = self.isModule(name, extrapath)
if candidate:
return candidate
candidate = self.isPackage(name, extrapath)
if candidate:
return candidate
name = name[:name.rfind('.')]
self.warn(dotted_name, '%s: could not find %s', filename, dotted_name)
return dotted_name
def isModule(self, dotted_name, extrapath=None):
"""Is ``dotted_name`` the name of a module?"""
try:
return self._module_cache[(dotted_name, extrapath)]
except KeyError:
pass
if dotted_name in sys.modules or dotted_name in self.builtin_modules:
return dotted_name
filename = dotted_name.replace('.', os.path.sep)
if extrapath:
for ext in self._exts:
candidate = os.path.join(extrapath, filename) + ext
if os.path.exists(candidate):
modname = self.filenameToModname(candidate)
self._module_cache[(dotted_name, extrapath)] = modname
return modname
try:
return self._module_cache[(dotted_name, None)]
except KeyError:
pass
for dir in self.path:
if os.path.isfile(dir):
if dir.endswith('.egg-info'):
# distribute creates a setuptools-blah-blah.egg-info
# that ends up in sys.path
continue
try:
zf = zipfile.ZipFile(dir)
except zipfile.BadZipfile:
self.warn(dir, "%s: not a directory or zip file", dir)
continue
names = zf.namelist()
for ext in self._exts:
candidate = filename + ext
if candidate in names:
modname = filename.replace(os.path.sep, '.')
self._module_cache[(dotted_name, extrapath)] = modname
self._module_cache[(dotted_name, None)] = modname
return modname
else:
for ext in self._exts:
candidate = os.path.join(dir, filename) + ext
if os.path.exists(candidate):
modname = self.filenameToModname(candidate)
self._module_cache[(dotted_name, extrapath)] = modname
self._module_cache[(dotted_name, None)] = modname
return modname
return None
def isPackage(self, dotted_name, extrapath=None):
"""Is ``dotted_name`` the name of a package?"""
candidate = self.isModule(dotted_name + '.__init__', extrapath)
if candidate:
candidate = candidate[:-len(".__init__")]
return candidate
def packageOf(self, dotted_name, packagelevel=None):
"""Determine the package that contains ``dotted_name``."""
if '.' not in dotted_name:
return dotted_name
if not self.isPackage(dotted_name):
dotted_name = '.'.join(dotted_name.split('.')[:-1])
if packagelevel:
dotted_name = '.'.join(dotted_name.split('.')[:packagelevel])
return dotted_name
def removeTestPackage(self, dotted_name, pkgnames=['tests', 'ftests']):
"""Remove tests subpackages from dotted_name."""
result = []
for name in dotted_name.split('.'):
if name in pkgnames:
break
result.append(name)
if not result: # empty names are baad
return dotted_name
return '.'.join(result)
def listModules(self):
"""Return an alphabetical list of all modules."""
modules = list(self.modules.items())
modules.sort()
return [module for name, module in modules]
def packageGraph(self, packagelevel=None):
"""Convert a module graph to a package graph."""
packages = {}
for module in self.listModules():
package_name = self.packageOf(module.modname, packagelevel)
if package_name not in packages:
dirname = os.path.dirname(module.filename)
packages[package_name] = Module(package_name, dirname)
package = packages[package_name]
for name in module.imports:
package_name = self.packageOf(name, packagelevel)
if package_name != package.modname: # no loops
package.imports.add(package_name)
graph = ModuleGraph()
graph.modules = packages
return graph
def collapseTests(self, pkgnames=['tests', 'ftests']):
"""Collapse test packages with parent packages.
Works only with package graphs.
"""
packages = {}
for module in self.listModules():
package_name = self.removeTestPackage(module.modname, pkgnames)
if package_name == module.modname:
packages[package_name] = Module(package_name, module.filename)
for module in self.listModules():
package_name = self.removeTestPackage(module.modname, pkgnames)
package = packages[package_name]
for name in module.imports:
package_name = self.removeTestPackage(name, pkgnames)
if package_name != package.modname: # no loops
package.imports.add(package_name)
graph = ModuleGraph()
graph.modules = packages
return graph
def collapseCycles(self):
"""Create a graph with cycles collapsed.
Collapse modules participating in a cycle to a single node.
"""
# This algorithm determines Strongly Connected Components. Look it up.
# It is adapted to suit our data structures.
# Phase 0: prepare the graph
imports = {}
for u in self.modules:
imports[u] = set()
for v in self.modules[u].imports:
if v in self.modules: # skip external dependencies
imports[u].add(v)
# Phase 1: order the vertices
visited = {}
for u in self.modules:
visited[u] = False
order = []
def visit1(u):
visited[u] = True
for v in imports[u]:
if not visited[v]:
visit1(v)
order.append(u)
for u in self.modules:
if not visited[u]:
visit1(u)
order.reverse()
# Phase 2: compute the inverse graph
revimports = {}
for u in self.modules:
revimports[u] = set()
for u in self.modules:
for v in imports[u]:
revimports[v].add(u)
# Phase 3: determine the strongly connected components
components = {}
component_of = {}
for u in self.modules:
visited[u] = False
def visit2(u):
visited[u] = True
component.append(u)
for v in revimports[u]:
if not visited[v]:
visit2(v)
for u in order:
if not visited[u]:
component = []
visit2(u)
component.sort()
node = ModuleCycle(component)
components[node.modname] = node
for modname in component:
component_of[modname] = node
# Phase 4: construct the condensed graph
for node in components.values():
for modname in node.modnames:
for impname in imports[modname]:
other = component_of[impname].modname
if other != node.modname:
node.imports.add(other)
graph = ModuleGraph()
graph.modules = components
return graph
def printImports(self):
"""Produce a report of dependencies."""
for module in self.listModules():
print("%s:" % module.label)
if self.external_dependencies:
imports = list(module.imports)
else:
imports = [modname for modname in module.imports
if modname in self.modules]
imports.sort()
print(" %s" % "\n ".join(imports))
def printUnusedImports(self):
"""Produce a report of unused imports."""
for module in self.listModules():
names = [(unused.lineno, unused.name)
for unused in module.unused_names]
names.sort()
for lineno, name in names:
if not self.all_unused:
line = linecache.getline(module.filename, lineno)
if '#' in line:
# assume there's a comment explaining why it's not used
continue
print("%s:%s: %s not used" % (module.filename, lineno, name))
def printDot(self):
"""Produce a dependency graph in dot format."""
print("digraph ModuleDependencies {")
print(" node[shape=box];")
allNames = set()
nameDict = {}
for n, module in enumerate(self.listModules()):
module._dot_name = "mod%d" % n
nameDict[module.modname] = module._dot_name
print(" %s[label=\"%s\"];" % (module._dot_name,
quote(module.label)))
allNames |= module.imports
print(" node[style=dotted];")
if self.external_dependencies:
myNames = set(self.modules)
extNames = list(allNames - myNames)
extNames.sort()
for n, name in enumerate(extNames):
nameDict[name] = id = "extmod%d" % n
print(" %s[label=\"%s\"];" % (id, name))
for modname, module in sorted(self.modules.items()):
for other in sorted(module.imports):
if other in nameDict:
print(" %s -> %s;" % (nameDict[module.modname],
nameDict[other]))
print("}")
|
mgedmin/findimports
|
findimports.py
|
ModuleGraph.printImports
|
python
|
def printImports(self):
for module in self.listModules():
print("%s:" % module.label)
if self.external_dependencies:
imports = list(module.imports)
else:
imports = [modname for modname in module.imports
if modname in self.modules]
imports.sort()
print(" %s" % "\n ".join(imports))
|
Produce a report of dependencies.
|
train
|
https://github.com/mgedmin/findimports/blob/c20a50b497390fed15aa3835476f4fad57313e8a/findimports.py#L707-L717
|
[
"def listModules(self):\n \"\"\"Return an alphabetical list of all modules.\"\"\"\n modules = list(self.modules.items())\n modules.sort()\n return [module for name, module in modules]\n"
] |
class ModuleGraph(object):
"""Module graph."""
trackUnusedNames = False
all_unused = False
warn_about_duplicates = False
verbose = False
external_dependencies = True
# some builtin modules do not exist as separate .so files on disk
builtin_modules = sys.builtin_module_names
def __init__(self):
self.modules = {}
self.path = sys.path
self._module_cache = {}
self._warned_about = set()
self._stderr = sys.stderr
self._exts = ('.py', '.so', '.dll')
if hasattr(sys, '_multiarch'): # pragma: nocover
# Ubuntu 14.04 LTS renames
# /usr/lib/python2.7/lib-dynload/datetime.so to
# /usr/lib/python2.7/lib-dynload/datetime.x86_64-linux-gnu.so
# (https://github.com/mgedmin/findimports/issues/3)
self._exts += ('.%s.so' % sys._multiarch, )
def warn(self, about, message, *args):
if about in self._warned_about:
return
if args:
message = message % args
print(message, file=self._stderr)
self._warned_about.add(about)
def parsePathname(self, pathname):
"""Parse one or more source files.
``pathname`` may be a file name or a directory name.
"""
if os.path.isdir(pathname):
for root, dirs, files in os.walk(pathname):
dirs.sort()
files.sort()
for fn in files:
# ignore emacsish junk
if fn.endswith('.py') and not fn.startswith('.#'):
self.parseFile(os.path.join(root, fn))
elif pathname.endswith('.importcache'):
self.readCache(pathname)
else:
self.parseFile(pathname)
def writeCache(self, filename):
"""Write the graph to a cache file."""
with open(filename, 'wb') as f:
pickle.dump(self.modules, f)
def readCache(self, filename):
"""Load the graph from a cache file."""
with open(filename, 'rb') as f:
self.modules = pickle.load(f)
def parseFile(self, filename):
"""Parse a single file."""
modname = self.filenameToModname(filename)
module = Module(modname, filename)
self.modules[modname] = module
if self.trackUnusedNames:
module.imported_names, module.unused_names = \
find_imports_and_track_names(filename,
self.warn_about_duplicates,
self.verbose)
else:
module.imported_names = find_imports(filename)
module.unused_names = None
dir = os.path.dirname(filename)
module.imports = set(
[self.findModuleOfName(imp.name, imp.level, filename, dir)
for imp in module.imported_names])
def filenameToModname(self, filename):
"""Convert a filename to a module name."""
for ext in reversed(self._exts):
if filename.endswith(ext):
filename = filename[:-len(ext)]
break
else:
self.warn(filename, '%s: unknown file name extension', filename)
filename = os.path.abspath(filename)
elements = filename.split(os.path.sep)
modname = []
while elements:
modname.append(elements[-1])
del elements[-1]
if not os.path.exists(os.path.sep.join(elements + ['__init__.py'])):
break
modname.reverse()
modname = ".".join(modname)
return modname
def findModuleOfName(self, dotted_name, level, filename, extrapath=None):
"""Given a fully qualified name, find what module contains it."""
if dotted_name.endswith('.*'):
return dotted_name[:-2]
name = dotted_name
# extrapath is None only in a couple of test cases; in real life it's
# always present
if level and level > 1 and extrapath:
# strip trailing path bits for each extra level to account for
# relative imports
# from . import X has level == 1 and nothing is stripped (the level > 1 check accounts for this case)
# from .. import X has level == 2 and one trailing path component must go
# from ... import X has level == 3 and two trailing path components must go
extrapath = extrapath.split(os.path.sep)
level -= 1
extrapath = extrapath[0:-level]
extrapath = os.path.sep.join(extrapath)
while name:
candidate = self.isModule(name, extrapath)
if candidate:
return candidate
candidate = self.isPackage(name, extrapath)
if candidate:
return candidate
name = name[:name.rfind('.')]
self.warn(dotted_name, '%s: could not find %s', filename, dotted_name)
return dotted_name
def isModule(self, dotted_name, extrapath=None):
"""Is ``dotted_name`` the name of a module?"""
try:
return self._module_cache[(dotted_name, extrapath)]
except KeyError:
pass
if dotted_name in sys.modules or dotted_name in self.builtin_modules:
return dotted_name
filename = dotted_name.replace('.', os.path.sep)
if extrapath:
for ext in self._exts:
candidate = os.path.join(extrapath, filename) + ext
if os.path.exists(candidate):
modname = self.filenameToModname(candidate)
self._module_cache[(dotted_name, extrapath)] = modname
return modname
try:
return self._module_cache[(dotted_name, None)]
except KeyError:
pass
for dir in self.path:
if os.path.isfile(dir):
if dir.endswith('.egg-info'):
# distribute creates a setuptools-blah-blah.egg-info
# that ends up in sys.path
continue
try:
zf = zipfile.ZipFile(dir)
except zipfile.BadZipfile:
self.warn(dir, "%s: not a directory or zip file", dir)
continue
names = zf.namelist()
for ext in self._exts:
candidate = filename + ext
if candidate in names:
modname = filename.replace(os.path.sep, '.')
self._module_cache[(dotted_name, extrapath)] = modname
self._module_cache[(dotted_name, None)] = modname
return modname
else:
for ext in self._exts:
candidate = os.path.join(dir, filename) + ext
if os.path.exists(candidate):
modname = self.filenameToModname(candidate)
self._module_cache[(dotted_name, extrapath)] = modname
self._module_cache[(dotted_name, None)] = modname
return modname
return None
def isPackage(self, dotted_name, extrapath=None):
"""Is ``dotted_name`` the name of a package?"""
candidate = self.isModule(dotted_name + '.__init__', extrapath)
if candidate:
candidate = candidate[:-len(".__init__")]
return candidate
def packageOf(self, dotted_name, packagelevel=None):
"""Determine the package that contains ``dotted_name``."""
if '.' not in dotted_name:
return dotted_name
if not self.isPackage(dotted_name):
dotted_name = '.'.join(dotted_name.split('.')[:-1])
if packagelevel:
dotted_name = '.'.join(dotted_name.split('.')[:packagelevel])
return dotted_name
def removeTestPackage(self, dotted_name, pkgnames=['tests', 'ftests']):
"""Remove tests subpackages from dotted_name."""
result = []
for name in dotted_name.split('.'):
if name in pkgnames:
break
result.append(name)
if not result: # empty names are baad
return dotted_name
return '.'.join(result)
def listModules(self):
"""Return an alphabetical list of all modules."""
modules = list(self.modules.items())
modules.sort()
return [module for name, module in modules]
def packageGraph(self, packagelevel=None):
"""Convert a module graph to a package graph."""
packages = {}
for module in self.listModules():
package_name = self.packageOf(module.modname, packagelevel)
if package_name not in packages:
dirname = os.path.dirname(module.filename)
packages[package_name] = Module(package_name, dirname)
package = packages[package_name]
for name in module.imports:
package_name = self.packageOf(name, packagelevel)
if package_name != package.modname: # no loops
package.imports.add(package_name)
graph = ModuleGraph()
graph.modules = packages
return graph
def collapseTests(self, pkgnames=['tests', 'ftests']):
"""Collapse test packages with parent packages.
Works only with package graphs.
"""
packages = {}
for module in self.listModules():
package_name = self.removeTestPackage(module.modname, pkgnames)
if package_name == module.modname:
packages[package_name] = Module(package_name, module.filename)
for module in self.listModules():
package_name = self.removeTestPackage(module.modname, pkgnames)
package = packages[package_name]
for name in module.imports:
package_name = self.removeTestPackage(name, pkgnames)
if package_name != package.modname: # no loops
package.imports.add(package_name)
graph = ModuleGraph()
graph.modules = packages
return graph
def collapseCycles(self):
"""Create a graph with cycles collapsed.
Collapse modules participating in a cycle to a single node.
"""
# This algorithm determines Strongly Connected Components. Look it up.
# It is adapted to suit our data structures.
# Phase 0: prepare the graph
imports = {}
for u in self.modules:
imports[u] = set()
for v in self.modules[u].imports:
if v in self.modules: # skip external dependencies
imports[u].add(v)
# Phase 1: order the vertices
visited = {}
for u in self.modules:
visited[u] = False
order = []
def visit1(u):
visited[u] = True
for v in imports[u]:
if not visited[v]:
visit1(v)
order.append(u)
for u in self.modules:
if not visited[u]:
visit1(u)
order.reverse()
# Phase 2: compute the inverse graph
revimports = {}
for u in self.modules:
revimports[u] = set()
for u in self.modules:
for v in imports[u]:
revimports[v].add(u)
# Phase 3: determine the strongly connected components
components = {}
component_of = {}
for u in self.modules:
visited[u] = False
def visit2(u):
visited[u] = True
component.append(u)
for v in revimports[u]:
if not visited[v]:
visit2(v)
for u in order:
if not visited[u]:
component = []
visit2(u)
component.sort()
node = ModuleCycle(component)
components[node.modname] = node
for modname in component:
component_of[modname] = node
# Phase 4: construct the condensed graph
for node in components.values():
for modname in node.modnames:
for impname in imports[modname]:
other = component_of[impname].modname
if other != node.modname:
node.imports.add(other)
graph = ModuleGraph()
graph.modules = components
return graph
def printImportedNames(self):
"""Produce a report of imported names."""
for module in self.listModules():
print("%s:" % module.modname)
print(" %s" % "\n ".join(imp.name for imp in module.imported_names))
def printUnusedImports(self):
"""Produce a report of unused imports."""
for module in self.listModules():
names = [(unused.lineno, unused.name)
for unused in module.unused_names]
names.sort()
for lineno, name in names:
if not self.all_unused:
line = linecache.getline(module.filename, lineno)
if '#' in line:
# assume there's a comment explaining why it's not used
continue
print("%s:%s: %s not used" % (module.filename, lineno, name))
def printDot(self):
"""Produce a dependency graph in dot format."""
print("digraph ModuleDependencies {")
print(" node[shape=box];")
allNames = set()
nameDict = {}
for n, module in enumerate(self.listModules()):
module._dot_name = "mod%d" % n
nameDict[module.modname] = module._dot_name
print(" %s[label=\"%s\"];" % (module._dot_name,
quote(module.label)))
allNames |= module.imports
print(" node[style=dotted];")
if self.external_dependencies:
myNames = set(self.modules)
extNames = list(allNames - myNames)
extNames.sort()
for n, name in enumerate(extNames):
nameDict[name] = id = "extmod%d" % n
print(" %s[label=\"%s\"];" % (id, name))
for modname, module in sorted(self.modules.items()):
for other in sorted(module.imports):
if other in nameDict:
print(" %s -> %s;" % (nameDict[module.modname],
nameDict[other]))
print("}")
|
mgedmin/findimports
|
findimports.py
|
ModuleGraph.printUnusedImports
|
python
|
def printUnusedImports(self):
for module in self.listModules():
names = [(unused.lineno, unused.name)
for unused in module.unused_names]
names.sort()
for lineno, name in names:
if not self.all_unused:
line = linecache.getline(module.filename, lineno)
if '#' in line:
# assume there's a comment explaining why it's not used
continue
print("%s:%s: %s not used" % (module.filename, lineno, name))
|
Produce a report of unused imports.
|
train
|
https://github.com/mgedmin/findimports/blob/c20a50b497390fed15aa3835476f4fad57313e8a/findimports.py#L719-L731
|
[
"def listModules(self):\n \"\"\"Return an alphabetical list of all modules.\"\"\"\n modules = list(self.modules.items())\n modules.sort()\n return [module for name, module in modules]\n"
] |
class ModuleGraph(object):
"""Module graph."""
trackUnusedNames = False
all_unused = False
warn_about_duplicates = False
verbose = False
external_dependencies = True
# some builtin modules do not exist as separate .so files on disk
builtin_modules = sys.builtin_module_names
def __init__(self):
self.modules = {}
self.path = sys.path
self._module_cache = {}
self._warned_about = set()
self._stderr = sys.stderr
self._exts = ('.py', '.so', '.dll')
if hasattr(sys, '_multiarch'): # pragma: nocover
# Ubuntu 14.04 LTS renames
# /usr/lib/python2.7/lib-dynload/datetime.so to
# /usr/lib/python2.7/lib-dynload/datetime.x86_64-linux-gnu.so
# (https://github.com/mgedmin/findimports/issues/3)
self._exts += ('.%s.so' % sys._multiarch, )
def warn(self, about, message, *args):
if about in self._warned_about:
return
if args:
message = message % args
print(message, file=self._stderr)
self._warned_about.add(about)
def parsePathname(self, pathname):
"""Parse one or more source files.
``pathname`` may be a file name or a directory name.
"""
if os.path.isdir(pathname):
for root, dirs, files in os.walk(pathname):
dirs.sort()
files.sort()
for fn in files:
# ignore emacsish junk
if fn.endswith('.py') and not fn.startswith('.#'):
self.parseFile(os.path.join(root, fn))
elif pathname.endswith('.importcache'):
self.readCache(pathname)
else:
self.parseFile(pathname)
def writeCache(self, filename):
"""Write the graph to a cache file."""
with open(filename, 'wb') as f:
pickle.dump(self.modules, f)
def readCache(self, filename):
"""Load the graph from a cache file."""
with open(filename, 'rb') as f:
self.modules = pickle.load(f)
def parseFile(self, filename):
"""Parse a single file."""
modname = self.filenameToModname(filename)
module = Module(modname, filename)
self.modules[modname] = module
if self.trackUnusedNames:
module.imported_names, module.unused_names = \
find_imports_and_track_names(filename,
self.warn_about_duplicates,
self.verbose)
else:
module.imported_names = find_imports(filename)
module.unused_names = None
dir = os.path.dirname(filename)
module.imports = set(
[self.findModuleOfName(imp.name, imp.level, filename, dir)
for imp in module.imported_names])
def filenameToModname(self, filename):
"""Convert a filename to a module name."""
for ext in reversed(self._exts):
if filename.endswith(ext):
filename = filename[:-len(ext)]
break
else:
self.warn(filename, '%s: unknown file name extension', filename)
filename = os.path.abspath(filename)
elements = filename.split(os.path.sep)
modname = []
while elements:
modname.append(elements[-1])
del elements[-1]
if not os.path.exists(os.path.sep.join(elements + ['__init__.py'])):
break
modname.reverse()
modname = ".".join(modname)
return modname
def findModuleOfName(self, dotted_name, level, filename, extrapath=None):
"""Given a fully qualified name, find what module contains it."""
if dotted_name.endswith('.*'):
return dotted_name[:-2]
name = dotted_name
# extrapath is None only in a couple of test cases; in real life it's
# always present
if level and level > 1 and extrapath:
# strip trailing path bits for each extra level to account for
# relative imports
# from . import X has level == 1 and nothing is stripped (the level > 1 check accounts for this case)
# from .. import X has level == 2 and one trailing path component must go
# from ... import X has level == 3 and two trailing path components must go
extrapath = extrapath.split(os.path.sep)
level -= 1
extrapath = extrapath[0:-level]
extrapath = os.path.sep.join(extrapath)
while name:
candidate = self.isModule(name, extrapath)
if candidate:
return candidate
candidate = self.isPackage(name, extrapath)
if candidate:
return candidate
name = name[:name.rfind('.')]
self.warn(dotted_name, '%s: could not find %s', filename, dotted_name)
return dotted_name
def isModule(self, dotted_name, extrapath=None):
"""Is ``dotted_name`` the name of a module?"""
try:
return self._module_cache[(dotted_name, extrapath)]
except KeyError:
pass
if dotted_name in sys.modules or dotted_name in self.builtin_modules:
return dotted_name
filename = dotted_name.replace('.', os.path.sep)
if extrapath:
for ext in self._exts:
candidate = os.path.join(extrapath, filename) + ext
if os.path.exists(candidate):
modname = self.filenameToModname(candidate)
self._module_cache[(dotted_name, extrapath)] = modname
return modname
try:
return self._module_cache[(dotted_name, None)]
except KeyError:
pass
for dir in self.path:
if os.path.isfile(dir):
if dir.endswith('.egg-info'):
# distribute creates a setuptools-blah-blah.egg-info
# that ends up in sys.path
continue
try:
zf = zipfile.ZipFile(dir)
except zipfile.BadZipfile:
self.warn(dir, "%s: not a directory or zip file", dir)
continue
names = zf.namelist()
for ext in self._exts:
candidate = filename + ext
if candidate in names:
modname = filename.replace(os.path.sep, '.')
self._module_cache[(dotted_name, extrapath)] = modname
self._module_cache[(dotted_name, None)] = modname
return modname
else:
for ext in self._exts:
candidate = os.path.join(dir, filename) + ext
if os.path.exists(candidate):
modname = self.filenameToModname(candidate)
self._module_cache[(dotted_name, extrapath)] = modname
self._module_cache[(dotted_name, None)] = modname
return modname
return None
def isPackage(self, dotted_name, extrapath=None):
"""Is ``dotted_name`` the name of a package?"""
candidate = self.isModule(dotted_name + '.__init__', extrapath)
if candidate:
candidate = candidate[:-len(".__init__")]
return candidate
def packageOf(self, dotted_name, packagelevel=None):
"""Determine the package that contains ``dotted_name``."""
if '.' not in dotted_name:
return dotted_name
if not self.isPackage(dotted_name):
dotted_name = '.'.join(dotted_name.split('.')[:-1])
if packagelevel:
dotted_name = '.'.join(dotted_name.split('.')[:packagelevel])
return dotted_name
def removeTestPackage(self, dotted_name, pkgnames=['tests', 'ftests']):
"""Remove tests subpackages from dotted_name."""
result = []
for name in dotted_name.split('.'):
if name in pkgnames:
break
result.append(name)
if not result: # empty names are baad
return dotted_name
return '.'.join(result)
def listModules(self):
"""Return an alphabetical list of all modules."""
modules = list(self.modules.items())
modules.sort()
return [module for name, module in modules]
def packageGraph(self, packagelevel=None):
"""Convert a module graph to a package graph."""
packages = {}
for module in self.listModules():
package_name = self.packageOf(module.modname, packagelevel)
if package_name not in packages:
dirname = os.path.dirname(module.filename)
packages[package_name] = Module(package_name, dirname)
package = packages[package_name]
for name in module.imports:
package_name = self.packageOf(name, packagelevel)
if package_name != package.modname: # no loops
package.imports.add(package_name)
graph = ModuleGraph()
graph.modules = packages
return graph
def collapseTests(self, pkgnames=['tests', 'ftests']):
"""Collapse test packages with parent packages.
Works only with package graphs.
"""
packages = {}
for module in self.listModules():
package_name = self.removeTestPackage(module.modname, pkgnames)
if package_name == module.modname:
packages[package_name] = Module(package_name, module.filename)
for module in self.listModules():
package_name = self.removeTestPackage(module.modname, pkgnames)
package = packages[package_name]
for name in module.imports:
package_name = self.removeTestPackage(name, pkgnames)
if package_name != package.modname: # no loops
package.imports.add(package_name)
graph = ModuleGraph()
graph.modules = packages
return graph
def collapseCycles(self):
"""Create a graph with cycles collapsed.
Collapse modules participating in a cycle to a single node.
"""
# This algorithm determines Strongly Connected Components. Look it up.
# It is adapted to suit our data structures.
# Phase 0: prepare the graph
imports = {}
for u in self.modules:
imports[u] = set()
for v in self.modules[u].imports:
if v in self.modules: # skip external dependencies
imports[u].add(v)
# Phase 1: order the vertices
visited = {}
for u in self.modules:
visited[u] = False
order = []
def visit1(u):
visited[u] = True
for v in imports[u]:
if not visited[v]:
visit1(v)
order.append(u)
for u in self.modules:
if not visited[u]:
visit1(u)
order.reverse()
# Phase 2: compute the inverse graph
revimports = {}
for u in self.modules:
revimports[u] = set()
for u in self.modules:
for v in imports[u]:
revimports[v].add(u)
# Phase 3: determine the strongly connected components
components = {}
component_of = {}
for u in self.modules:
visited[u] = False
def visit2(u):
visited[u] = True
component.append(u)
for v in revimports[u]:
if not visited[v]:
visit2(v)
for u in order:
if not visited[u]:
component = []
visit2(u)
component.sort()
node = ModuleCycle(component)
components[node.modname] = node
for modname in component:
component_of[modname] = node
# Phase 4: construct the condensed graph
for node in components.values():
for modname in node.modnames:
for impname in imports[modname]:
other = component_of[impname].modname
if other != node.modname:
node.imports.add(other)
graph = ModuleGraph()
graph.modules = components
return graph
def printImportedNames(self):
"""Produce a report of imported names."""
for module in self.listModules():
print("%s:" % module.modname)
print(" %s" % "\n ".join(imp.name for imp in module.imported_names))
def printImports(self):
"""Produce a report of dependencies."""
for module in self.listModules():
print("%s:" % module.label)
if self.external_dependencies:
imports = list(module.imports)
else:
imports = [modname for modname in module.imports
if modname in self.modules]
imports.sort()
print(" %s" % "\n ".join(imports))
def printDot(self):
"""Produce a dependency graph in dot format."""
print("digraph ModuleDependencies {")
print(" node[shape=box];")
allNames = set()
nameDict = {}
for n, module in enumerate(self.listModules()):
module._dot_name = "mod%d" % n
nameDict[module.modname] = module._dot_name
print(" %s[label=\"%s\"];" % (module._dot_name,
quote(module.label)))
allNames |= module.imports
print(" node[style=dotted];")
if self.external_dependencies:
myNames = set(self.modules)
extNames = list(allNames - myNames)
extNames.sort()
for n, name in enumerate(extNames):
nameDict[name] = id = "extmod%d" % n
print(" %s[label=\"%s\"];" % (id, name))
for modname, module in sorted(self.modules.items()):
for other in sorted(module.imports):
if other in nameDict:
print(" %s -> %s;" % (nameDict[module.modname],
nameDict[other]))
print("}")
|
mgedmin/findimports
|
findimports.py
|
ModuleGraph.printDot
|
python
|
def printDot(self):
print("digraph ModuleDependencies {")
print(" node[shape=box];")
allNames = set()
nameDict = {}
for n, module in enumerate(self.listModules()):
module._dot_name = "mod%d" % n
nameDict[module.modname] = module._dot_name
print(" %s[label=\"%s\"];" % (module._dot_name,
quote(module.label)))
allNames |= module.imports
print(" node[style=dotted];")
if self.external_dependencies:
myNames = set(self.modules)
extNames = list(allNames - myNames)
extNames.sort()
for n, name in enumerate(extNames):
nameDict[name] = id = "extmod%d" % n
print(" %s[label=\"%s\"];" % (id, name))
for modname, module in sorted(self.modules.items()):
for other in sorted(module.imports):
if other in nameDict:
print(" %s -> %s;" % (nameDict[module.modname],
nameDict[other]))
print("}")
|
Produce a dependency graph in dot format.
|
train
|
https://github.com/mgedmin/findimports/blob/c20a50b497390fed15aa3835476f4fad57313e8a/findimports.py#L733-L758
|
[
"def quote(s):\n \"\"\"Quote a string for graphviz.\n\n This function is probably incomplete.\n \"\"\"\n return s.replace(\"\\\\\", \"\\\\\\\\\").replace('\"', '\\\\\"').replace('\\n', '\\\\n')\n",
"def listModules(self):\n \"\"\"Return an alphabetical list of all modules.\"\"\"\n modules = list(self.modules.items())\n modules.sort()\n return [module for name, module in modules]\n"
] |
class ModuleGraph(object):
"""Module graph."""
trackUnusedNames = False
all_unused = False
warn_about_duplicates = False
verbose = False
external_dependencies = True
# some builtin modules do not exist as separate .so files on disk
builtin_modules = sys.builtin_module_names
def __init__(self):
self.modules = {}
self.path = sys.path
self._module_cache = {}
self._warned_about = set()
self._stderr = sys.stderr
self._exts = ('.py', '.so', '.dll')
if hasattr(sys, '_multiarch'): # pragma: nocover
# Ubuntu 14.04 LTS renames
# /usr/lib/python2.7/lib-dynload/datetime.so to
# /usr/lib/python2.7/lib-dynload/datetime.x86_64-linux-gnu.so
# (https://github.com/mgedmin/findimports/issues/3)
self._exts += ('.%s.so' % sys._multiarch, )
def warn(self, about, message, *args):
if about in self._warned_about:
return
if args:
message = message % args
print(message, file=self._stderr)
self._warned_about.add(about)
def parsePathname(self, pathname):
"""Parse one or more source files.
``pathname`` may be a file name or a directory name.
"""
if os.path.isdir(pathname):
for root, dirs, files in os.walk(pathname):
dirs.sort()
files.sort()
for fn in files:
# ignore emacsish junk
if fn.endswith('.py') and not fn.startswith('.#'):
self.parseFile(os.path.join(root, fn))
elif pathname.endswith('.importcache'):
self.readCache(pathname)
else:
self.parseFile(pathname)
def writeCache(self, filename):
"""Write the graph to a cache file."""
with open(filename, 'wb') as f:
pickle.dump(self.modules, f)
def readCache(self, filename):
"""Load the graph from a cache file."""
with open(filename, 'rb') as f:
self.modules = pickle.load(f)
def parseFile(self, filename):
"""Parse a single file."""
modname = self.filenameToModname(filename)
module = Module(modname, filename)
self.modules[modname] = module
if self.trackUnusedNames:
module.imported_names, module.unused_names = \
find_imports_and_track_names(filename,
self.warn_about_duplicates,
self.verbose)
else:
module.imported_names = find_imports(filename)
module.unused_names = None
dir = os.path.dirname(filename)
module.imports = set(
[self.findModuleOfName(imp.name, imp.level, filename, dir)
for imp in module.imported_names])
def filenameToModname(self, filename):
"""Convert a filename to a module name."""
for ext in reversed(self._exts):
if filename.endswith(ext):
filename = filename[:-len(ext)]
break
else:
self.warn(filename, '%s: unknown file name extension', filename)
filename = os.path.abspath(filename)
elements = filename.split(os.path.sep)
modname = []
while elements:
modname.append(elements[-1])
del elements[-1]
if not os.path.exists(os.path.sep.join(elements + ['__init__.py'])):
break
modname.reverse()
modname = ".".join(modname)
return modname
def findModuleOfName(self, dotted_name, level, filename, extrapath=None):
"""Given a fully qualified name, find what module contains it."""
if dotted_name.endswith('.*'):
return dotted_name[:-2]
name = dotted_name
# extrapath is None only in a couple of test cases; in real life it's
# always present
if level and level > 1 and extrapath:
# strip trailing path bits for each extra level to account for
# relative imports
# from . import X has level == 1 and nothing is stripped (the level > 1 check accounts for this case)
# from .. import X has level == 2 and one trailing path component must go
# from ... import X has level == 3 and two trailing path components must go
extrapath = extrapath.split(os.path.sep)
level -= 1
extrapath = extrapath[0:-level]
extrapath = os.path.sep.join(extrapath)
while name:
candidate = self.isModule(name, extrapath)
if candidate:
return candidate
candidate = self.isPackage(name, extrapath)
if candidate:
return candidate
name = name[:name.rfind('.')]
self.warn(dotted_name, '%s: could not find %s', filename, dotted_name)
return dotted_name
def isModule(self, dotted_name, extrapath=None):
"""Is ``dotted_name`` the name of a module?"""
try:
return self._module_cache[(dotted_name, extrapath)]
except KeyError:
pass
if dotted_name in sys.modules or dotted_name in self.builtin_modules:
return dotted_name
filename = dotted_name.replace('.', os.path.sep)
if extrapath:
for ext in self._exts:
candidate = os.path.join(extrapath, filename) + ext
if os.path.exists(candidate):
modname = self.filenameToModname(candidate)
self._module_cache[(dotted_name, extrapath)] = modname
return modname
try:
return self._module_cache[(dotted_name, None)]
except KeyError:
pass
for dir in self.path:
if os.path.isfile(dir):
if dir.endswith('.egg-info'):
# distribute creates a setuptools-blah-blah.egg-info
# that ends up in sys.path
continue
try:
zf = zipfile.ZipFile(dir)
except zipfile.BadZipfile:
self.warn(dir, "%s: not a directory or zip file", dir)
continue
names = zf.namelist()
for ext in self._exts:
candidate = filename + ext
if candidate in names:
modname = filename.replace(os.path.sep, '.')
self._module_cache[(dotted_name, extrapath)] = modname
self._module_cache[(dotted_name, None)] = modname
return modname
else:
for ext in self._exts:
candidate = os.path.join(dir, filename) + ext
if os.path.exists(candidate):
modname = self.filenameToModname(candidate)
self._module_cache[(dotted_name, extrapath)] = modname
self._module_cache[(dotted_name, None)] = modname
return modname
return None
def isPackage(self, dotted_name, extrapath=None):
"""Is ``dotted_name`` the name of a package?"""
candidate = self.isModule(dotted_name + '.__init__', extrapath)
if candidate:
candidate = candidate[:-len(".__init__")]
return candidate
def packageOf(self, dotted_name, packagelevel=None):
"""Determine the package that contains ``dotted_name``."""
if '.' not in dotted_name:
return dotted_name
if not self.isPackage(dotted_name):
dotted_name = '.'.join(dotted_name.split('.')[:-1])
if packagelevel:
dotted_name = '.'.join(dotted_name.split('.')[:packagelevel])
return dotted_name
def removeTestPackage(self, dotted_name, pkgnames=['tests', 'ftests']):
"""Remove tests subpackages from dotted_name."""
result = []
for name in dotted_name.split('.'):
if name in pkgnames:
break
result.append(name)
if not result: # empty names are baad
return dotted_name
return '.'.join(result)
def listModules(self):
"""Return an alphabetical list of all modules."""
modules = list(self.modules.items())
modules.sort()
return [module for name, module in modules]
def packageGraph(self, packagelevel=None):
"""Convert a module graph to a package graph."""
packages = {}
for module in self.listModules():
package_name = self.packageOf(module.modname, packagelevel)
if package_name not in packages:
dirname = os.path.dirname(module.filename)
packages[package_name] = Module(package_name, dirname)
package = packages[package_name]
for name in module.imports:
package_name = self.packageOf(name, packagelevel)
if package_name != package.modname: # no loops
package.imports.add(package_name)
graph = ModuleGraph()
graph.modules = packages
return graph
def collapseTests(self, pkgnames=['tests', 'ftests']):
"""Collapse test packages with parent packages.
Works only with package graphs.
"""
packages = {}
for module in self.listModules():
package_name = self.removeTestPackage(module.modname, pkgnames)
if package_name == module.modname:
packages[package_name] = Module(package_name, module.filename)
for module in self.listModules():
package_name = self.removeTestPackage(module.modname, pkgnames)
package = packages[package_name]
for name in module.imports:
package_name = self.removeTestPackage(name, pkgnames)
if package_name != package.modname: # no loops
package.imports.add(package_name)
graph = ModuleGraph()
graph.modules = packages
return graph
def collapseCycles(self):
"""Create a graph with cycles collapsed.
Collapse modules participating in a cycle to a single node.
"""
# This algorithm determines Strongly Connected Components. Look it up.
# It is adapted to suit our data structures.
# Phase 0: prepare the graph
imports = {}
for u in self.modules:
imports[u] = set()
for v in self.modules[u].imports:
if v in self.modules: # skip external dependencies
imports[u].add(v)
# Phase 1: order the vertices
visited = {}
for u in self.modules:
visited[u] = False
order = []
def visit1(u):
visited[u] = True
for v in imports[u]:
if not visited[v]:
visit1(v)
order.append(u)
for u in self.modules:
if not visited[u]:
visit1(u)
order.reverse()
# Phase 2: compute the inverse graph
revimports = {}
for u in self.modules:
revimports[u] = set()
for u in self.modules:
for v in imports[u]:
revimports[v].add(u)
# Phase 3: determine the strongly connected components
components = {}
component_of = {}
for u in self.modules:
visited[u] = False
def visit2(u):
visited[u] = True
component.append(u)
for v in revimports[u]:
if not visited[v]:
visit2(v)
for u in order:
if not visited[u]:
component = []
visit2(u)
component.sort()
node = ModuleCycle(component)
components[node.modname] = node
for modname in component:
component_of[modname] = node
# Phase 4: construct the condensed graph
for node in components.values():
for modname in node.modnames:
for impname in imports[modname]:
other = component_of[impname].modname
if other != node.modname:
node.imports.add(other)
graph = ModuleGraph()
graph.modules = components
return graph
def printImportedNames(self):
"""Produce a report of imported names."""
for module in self.listModules():
print("%s:" % module.modname)
print(" %s" % "\n ".join(imp.name for imp in module.imported_names))
def printImports(self):
"""Produce a report of dependencies."""
for module in self.listModules():
print("%s:" % module.label)
if self.external_dependencies:
imports = list(module.imports)
else:
imports = [modname for modname in module.imports
if modname in self.modules]
imports.sort()
print(" %s" % "\n ".join(imports))
def printUnusedImports(self):
"""Produce a report of unused imports."""
for module in self.listModules():
names = [(unused.lineno, unused.name)
for unused in module.unused_names]
names.sort()
for lineno, name in names:
if not self.all_unused:
line = linecache.getline(module.filename, lineno)
if '#' in line:
# assume there's a comment explaining why it's not used
continue
print("%s:%s: %s not used" % (module.filename, lineno, name))
|
brendonh/pyth
|
pyth/plugins/rtf15/reader.py
|
Rtf15Reader.read
|
python
|
def read(self, source, errors='strict', clean_paragraphs=True):
reader = Rtf15Reader(source, errors, clean_paragraphs)
return reader.go()
|
source: A list of P objects.
|
train
|
https://github.com/brendonh/pyth/blob/f2a06fc8dc9b1cfc439ea14252d39b9845a7fa4b/pyth/plugins/rtf15/reader.py#L80-L86
|
[
"def go(self):\n self.source.seek(0)\n\n if self.source.read(5) != r\"{\\rtf\":\n from pyth.errors import WrongFileType\n raise WrongFileType(\"Doesn't look like an RTF file\")\n\n self.source.seek(0)\n\n self.charsetTable = None\n self.charset = 'cp1252'\n self.group = Group(self)\n self.stack = [self.group]\n self.parse()\n return self.build()\n"
] |
class Rtf15Reader(PythReader):
@classmethod
def __init__(self, source, errors='strict', clean_paragraphs=True):
self.source = source
self.errors = errors
self.clean_paragraphs = clean_paragraphs
self.document = document.Document
def go(self):
self.source.seek(0)
if self.source.read(5) != r"{\rtf":
from pyth.errors import WrongFileType
raise WrongFileType("Doesn't look like an RTF file")
self.source.seek(0)
self.charsetTable = None
self.charset = 'cp1252'
self.group = Group(self)
self.stack = [self.group]
self.parse()
return self.build()
def parse(self):
while True:
next = self.source.read(1)
if not next:
break
if next in '\r\n':
continue
if next == '{':
subGroup = Group(self, self.group, self.charsetTable)
self.stack.append(subGroup)
subGroup.skip = self.group.skip
self.group.flushChars()
self.group = subGroup
elif next == '}':
subGroup = self.stack.pop()
self.group = self.stack[-1]
subGroup.finalize()
if subGroup.specialMeaning == 'FONT_TABLE':
self.charsetTable = subGroup.charsetTable
self.group.content.append(subGroup)
elif self.group.skip:
# Avoid crashing on stuff we can't handle
# inside groups we don't care about anyway
continue
elif next == '\\':
control, digits = self.getControl()
self.group.handle(control, digits)
else:
self.group.char(next)
def getControl(self):
chars = []
digits = []
current = chars
first = True
while True:
next = self.source.read(1)
if not next:
break
if first and next in '\\{}':
chars.extend("control_symbol")
digits.append(next)
break
if first and next in '\r\n':
# Special-cased in RTF, equivalent to a \par
chars.extend("par")
break
first = False
if next == "'":
# ANSI escape, takes two hex digits
chars.extend("ansi_escape")
digits.extend(self.source.read(2))
break
if next == ' ':
# Don't rewind, the space is just a delimiter
break
if next not in _CONTROLCHARS:
# Rewind, it's a meaningful character
self.source.seek(-1, 1)
break
if next in _DIGITS:
current = digits
current.append(next)
return "".join(chars), "".join(digits)
def build(self):
doc = document.Document()
ctx = DocBuilder(doc, self.clean_paragraphs)
for bit in self.group.flatten():
typeName = type(bit).__name__
getattr(ctx, "handle_%s" % typeName)(bit)
ctx.flushParagraph()
return doc
|
brendonh/pyth
|
pyth/plugins/rtf15/reader.py
|
DocBuilder.cleanParagraph
|
python
|
def cleanParagraph(self):
runs = self.block.content
if not runs:
self.block = None
return
if not self.clean_paragraphs:
return
joinedRuns = []
hasContent = False
for run in runs:
if run.content[0]:
hasContent = True
else:
continue
# For whitespace-only groups, remove any property stuff,
# to avoid extra markup in output
if not run.content[0].strip():
run.properties = {}
# Join runs only if their properties match
if joinedRuns and (run.properties == joinedRuns[-1].properties):
joinedRuns[-1].content[0] += run.content[0]
else:
joinedRuns.append(run)
if hasContent:
# Strip beginning of paragraph
joinedRuns[0].content[0] = joinedRuns[0].content[0].lstrip()
# And then strip the end
joinedRuns[-1].content[0] = joinedRuns[-1].content[0].rstrip()
self.block.content = joinedRuns
else:
self.block = None
|
Compress text runs, remove whitespace at start and end,
skip empty blocks, etc
|
train
|
https://github.com/brendonh/pyth/blob/f2a06fc8dc9b1cfc439ea14252d39b9845a7fa4b/pyth/plugins/rtf15/reader.py#L241-L284
| null |
class DocBuilder(object):
def __init__(self, doc, clean_paragraphs=True):
self.run = []
self.propStack = [{}]
self.block = None
self.isImage = False
self.listLevel = None
self.listStack = [doc]
self.clean_paragraphs = clean_paragraphs
def flushRun(self):
if self.block is None:
self.block = document.Paragraph()
if self.isImage:
self.block.content.append(
document.Image(self.propStack[-1].copy(),
[str("".join(self.run))]))
self.isImage = False
else:
self.block.content.append(
document.Text(self.propStack[-1].copy(),
[u"".join(self.run)]))
self.run[:] = []
def flushParagraph(self):
self.flushRun()
if self.block.content:
self.cleanParagraph()
if self.block is not None:
self.listStack[-1].append(self.block)
def handle_unicode(self, bit):
self.run.append(bit)
def handle_Push(self, _):
self.propStack.append(self.propStack[-1].copy())
def handle_Pop(self, _):
self.flushRun()
self.propStack.pop()
def handle_Para(self, para):
self.flushParagraph()
prevListLevel = self.listLevel
self.listLevel = para.listLevel
if self.listLevel > prevListLevel:
l = document.List()
self.listStack.append(l)
elif self.listLevel < prevListLevel:
l = self.listStack.pop()
self.listStack[-1].append(l)
self.block = None
def handle_Pict(self, pict):
self.flushRun()
self.isImage = True
def handle_Reset(self, _):
self.flushRun()
self.propStack[-1].clear()
def handle_ReadableMarker(self, marker):
self.flushRun()
if marker.val:
# RTF needs underline markers for hyperlinks,
# but nothing else does. If we're in a hyperlink,
# ignore underlines.
if 'url' in self.propStack[-1] and marker.name == 'underline':
return
self.propStack[-1][marker.name] = marker.val
else:
if marker.name in self.propStack[-1]:
del self.propStack[-1][marker.name]
def handle_ImageMarker(self, marker):
if marker.val:
self.propStack[-1][marker.name] = marker.val
else:
if marker.name in self.propStack[-1]:
# Is there any toggle that is applied to images?
del self.propStack[-1][marker.name]
else:
self.propStack[-1][marker.name] = True
|
brendonh/pyth
|
pyth/plugins/xhtml/css.py
|
CSS.parse_css
|
python
|
def parse_css(self, css):
rulesets = self.ruleset_re.findall(css)
for (selector, declarations) in rulesets:
rule = Rule(self.parse_selector(selector))
rule.properties = self.parse_declarations(declarations)
self.rules.append(rule)
|
Parse a css style sheet into the CSS object.
For the moment this will only work for very simple css
documents. It works by using regular expression matching css
syntax. This is not bullet proof.
|
train
|
https://github.com/brendonh/pyth/blob/f2a06fc8dc9b1cfc439ea14252d39b9845a7fa4b/pyth/plugins/xhtml/css.py#L73-L85
|
[
"def parse_declarations(self, declarations):\n \"\"\"\n parse a css declaration list\n \"\"\"\n declarations = self.declaration_re.findall(declarations)\n return dict(declarations)\n",
"def parse_selector(self, selector):\n \"\"\"\n parse a css selector\n \"\"\"\n tag, klass = self.selector_re.match(selector).groups()\n return Selector(tag, klass)\n"
] |
class CSS(object):
"""
Represents a css document
"""
# The regular expressions used to parse the css document
# match a rule e.g: '.imp {font-weight: bold; color: blue}'
ruleset_re = re.compile(r'\s*(.+?)\s+\{(.*?)\}')
# match a property declaration, e.g: 'font-weight = bold'
declaration_re = re.compile(r'\s*(.+?):\s*(.+?)\s*?(?:;|$)')
# match a selector
selector_re = re.compile(r'(.*?)(?:\.(.*))?$')
def __init__(self, source=None):
self.rules = []
if source:
self.parse_css(source)
def __repr__(self):
return repr(self.rules)
def parse_declarations(self, declarations):
"""
parse a css declaration list
"""
declarations = self.declaration_re.findall(declarations)
return dict(declarations)
def parse_selector(self, selector):
"""
parse a css selector
"""
tag, klass = self.selector_re.match(selector).groups()
return Selector(tag, klass)
def get_properties(self, node):
"""
return a dict of all the properties of a given BeautifulSoup
node found by applying the css style.
"""
ret = {}
# Try all the rules one by one
for rule in self.rules:
if rule.selector(node):
ret.update(rule.properties)
# Also search for direct 'style' arguments in the html doc
for style_node in node.findParents(attrs={'style': True}):
style = style_node.get('style')
properties = self.parse_declarations(style)
ret.update(properties)
return ret
def is_bold(self, node):
"""
convenience method equivalent to
self.get_properties(node).get('font-weight', None) == 'bold'
"""
properties = self.get_properties(node)
return properties.get('font-weight') == 'bold'
def is_italic(self, node):
properties = self.get_properties(node)
return properties.get('font-style') == 'italic'
def is_sub(self, node):
properties = self.get_properties(node)
return properties.get('vertical-align') == 'sub'
def is_super(self, node):
properties = self.get_properties(node)
return properties.get('vertical-align') == 'super'
|
brendonh/pyth
|
pyth/plugins/xhtml/css.py
|
CSS.parse_declarations
|
python
|
def parse_declarations(self, declarations):
declarations = self.declaration_re.findall(declarations)
return dict(declarations)
|
parse a css declaration list
|
train
|
https://github.com/brendonh/pyth/blob/f2a06fc8dc9b1cfc439ea14252d39b9845a7fa4b/pyth/plugins/xhtml/css.py#L87-L92
| null |
class CSS(object):
"""
Represents a css document
"""
# The regular expressions used to parse the css document
# match a rule e.g: '.imp {font-weight: bold; color: blue}'
ruleset_re = re.compile(r'\s*(.+?)\s+\{(.*?)\}')
# match a property declaration, e.g: 'font-weight = bold'
declaration_re = re.compile(r'\s*(.+?):\s*(.+?)\s*?(?:;|$)')
# match a selector
selector_re = re.compile(r'(.*?)(?:\.(.*))?$')
def __init__(self, source=None):
self.rules = []
if source:
self.parse_css(source)
def __repr__(self):
return repr(self.rules)
def parse_css(self, css):
"""
Parse a css style sheet into the CSS object.
For the moment this will only work for very simple css
documents. It works by using regular expression matching css
syntax. This is not bullet proof.
"""
rulesets = self.ruleset_re.findall(css)
for (selector, declarations) in rulesets:
rule = Rule(self.parse_selector(selector))
rule.properties = self.parse_declarations(declarations)
self.rules.append(rule)
def parse_selector(self, selector):
"""
parse a css selector
"""
tag, klass = self.selector_re.match(selector).groups()
return Selector(tag, klass)
def get_properties(self, node):
"""
return a dict of all the properties of a given BeautifulSoup
node found by applying the css style.
"""
ret = {}
# Try all the rules one by one
for rule in self.rules:
if rule.selector(node):
ret.update(rule.properties)
# Also search for direct 'style' arguments in the html doc
for style_node in node.findParents(attrs={'style': True}):
style = style_node.get('style')
properties = self.parse_declarations(style)
ret.update(properties)
return ret
def is_bold(self, node):
"""
convenience method equivalent to
self.get_properties(node).get('font-weight', None) == 'bold'
"""
properties = self.get_properties(node)
return properties.get('font-weight') == 'bold'
def is_italic(self, node):
properties = self.get_properties(node)
return properties.get('font-style') == 'italic'
def is_sub(self, node):
properties = self.get_properties(node)
return properties.get('vertical-align') == 'sub'
def is_super(self, node):
properties = self.get_properties(node)
return properties.get('vertical-align') == 'super'
|
brendonh/pyth
|
pyth/plugins/xhtml/css.py
|
CSS.parse_selector
|
python
|
def parse_selector(self, selector):
tag, klass = self.selector_re.match(selector).groups()
return Selector(tag, klass)
|
parse a css selector
|
train
|
https://github.com/brendonh/pyth/blob/f2a06fc8dc9b1cfc439ea14252d39b9845a7fa4b/pyth/plugins/xhtml/css.py#L94-L99
| null |
class CSS(object):
"""
Represents a css document
"""
# The regular expressions used to parse the css document
# match a rule e.g: '.imp {font-weight: bold; color: blue}'
ruleset_re = re.compile(r'\s*(.+?)\s+\{(.*?)\}')
# match a property declaration, e.g: 'font-weight = bold'
declaration_re = re.compile(r'\s*(.+?):\s*(.+?)\s*?(?:;|$)')
# match a selector
selector_re = re.compile(r'(.*?)(?:\.(.*))?$')
def __init__(self, source=None):
self.rules = []
if source:
self.parse_css(source)
def __repr__(self):
return repr(self.rules)
def parse_css(self, css):
"""
Parse a css style sheet into the CSS object.
For the moment this will only work for very simple css
documents. It works by using regular expression matching css
syntax. This is not bullet proof.
"""
rulesets = self.ruleset_re.findall(css)
for (selector, declarations) in rulesets:
rule = Rule(self.parse_selector(selector))
rule.properties = self.parse_declarations(declarations)
self.rules.append(rule)
def parse_declarations(self, declarations):
"""
parse a css declaration list
"""
declarations = self.declaration_re.findall(declarations)
return dict(declarations)
def get_properties(self, node):
"""
return a dict of all the properties of a given BeautifulSoup
node found by applying the css style.
"""
ret = {}
# Try all the rules one by one
for rule in self.rules:
if rule.selector(node):
ret.update(rule.properties)
# Also search for direct 'style' arguments in the html doc
for style_node in node.findParents(attrs={'style': True}):
style = style_node.get('style')
properties = self.parse_declarations(style)
ret.update(properties)
return ret
def is_bold(self, node):
"""
convenience method equivalent to
self.get_properties(node).get('font-weight', None) == 'bold'
"""
properties = self.get_properties(node)
return properties.get('font-weight') == 'bold'
def is_italic(self, node):
properties = self.get_properties(node)
return properties.get('font-style') == 'italic'
def is_sub(self, node):
properties = self.get_properties(node)
return properties.get('vertical-align') == 'sub'
def is_super(self, node):
properties = self.get_properties(node)
return properties.get('vertical-align') == 'super'
|
brendonh/pyth
|
pyth/plugins/xhtml/css.py
|
CSS.get_properties
|
python
|
def get_properties(self, node):
ret = {}
# Try all the rules one by one
for rule in self.rules:
if rule.selector(node):
ret.update(rule.properties)
# Also search for direct 'style' arguments in the html doc
for style_node in node.findParents(attrs={'style': True}):
style = style_node.get('style')
properties = self.parse_declarations(style)
ret.update(properties)
return ret
|
return a dict of all the properties of a given BeautifulSoup
node found by applying the css style.
|
train
|
https://github.com/brendonh/pyth/blob/f2a06fc8dc9b1cfc439ea14252d39b9845a7fa4b/pyth/plugins/xhtml/css.py#L101-L116
|
[
"def parse_declarations(self, declarations):\n \"\"\"\n parse a css declaration list\n \"\"\"\n declarations = self.declaration_re.findall(declarations)\n return dict(declarations)\n"
] |
class CSS(object):
"""
Represents a css document
"""
# The regular expressions used to parse the css document
# match a rule e.g: '.imp {font-weight: bold; color: blue}'
ruleset_re = re.compile(r'\s*(.+?)\s+\{(.*?)\}')
# match a property declaration, e.g: 'font-weight = bold'
declaration_re = re.compile(r'\s*(.+?):\s*(.+?)\s*?(?:;|$)')
# match a selector
selector_re = re.compile(r'(.*?)(?:\.(.*))?$')
def __init__(self, source=None):
self.rules = []
if source:
self.parse_css(source)
def __repr__(self):
return repr(self.rules)
def parse_css(self, css):
"""
Parse a css style sheet into the CSS object.
For the moment this will only work for very simple css
documents. It works by using regular expression matching css
syntax. This is not bullet proof.
"""
rulesets = self.ruleset_re.findall(css)
for (selector, declarations) in rulesets:
rule = Rule(self.parse_selector(selector))
rule.properties = self.parse_declarations(declarations)
self.rules.append(rule)
def parse_declarations(self, declarations):
"""
parse a css declaration list
"""
declarations = self.declaration_re.findall(declarations)
return dict(declarations)
def parse_selector(self, selector):
"""
parse a css selector
"""
tag, klass = self.selector_re.match(selector).groups()
return Selector(tag, klass)
def is_bold(self, node):
"""
convenience method equivalent to
self.get_properties(node).get('font-weight', None) == 'bold'
"""
properties = self.get_properties(node)
return properties.get('font-weight') == 'bold'
def is_italic(self, node):
properties = self.get_properties(node)
return properties.get('font-style') == 'italic'
def is_sub(self, node):
properties = self.get_properties(node)
return properties.get('vertical-align') == 'sub'
def is_super(self, node):
properties = self.get_properties(node)
return properties.get('vertical-align') == 'super'
|
brendonh/pyth
|
pyth/__init__.py
|
namedModule
|
python
|
def namedModule(name):
topLevel = __import__(name)
packages = name.split(".")[1:]
m = topLevel
for p in packages:
m = getattr(m, p)
return m
|
Return a module given its name.
|
train
|
https://github.com/brendonh/pyth/blob/f2a06fc8dc9b1cfc439ea14252d39b9845a7fa4b/pyth/__init__.py#L37-L44
| null |
"""
Pyth -- Python text markup and conversion
"""
import os.path
__version__ = '0.5.6'
writerMap = {
'.rtf': 'pyth.plugins.rtf15.writer.Rtf15Writer',
'.html': 'pyth.plugins.xhtml.writer.XHTMLWriter',
'.xhtml': 'pyth.plugins.xhtml.writer.XHTMLWriter',
'.txt': 'pyth.plugins.plaintext.writer.PlaintextWriter',
'.pdf': 'pyth.plugins.pdf.writer.PDFWriter',
}
mimeMap = {
'.rtf': 'application/rtf',
'.html': 'text/html',
'.xhtml': 'application/xhtml+xml',
'.txt': 'text/plain',
}
def write(doc, filename):
ext = os.path.splitext(filename)[1]
writer = namedObject(writerMap[ext])
buff = writer.write(doc)
buff.seek(0)
return (buff, mimeMap[ext])
# Stolen from twisted.python.reflect
def namedObject(name):
"""Get a fully named module-global object.
"""
classSplit = name.split('.')
module = namedModule('.'.join(classSplit[:-1]))
return getattr(module, classSplit[-1])
|
brendonh/pyth
|
pyth/__init__.py
|
namedObject
|
python
|
def namedObject(name):
classSplit = name.split('.')
module = namedModule('.'.join(classSplit[:-1]))
return getattr(module, classSplit[-1])
|
Get a fully named module-global object.
|
train
|
https://github.com/brendonh/pyth/blob/f2a06fc8dc9b1cfc439ea14252d39b9845a7fa4b/pyth/__init__.py#L47-L52
|
[
"def namedModule(name):\n \"\"\"Return a module given its name.\"\"\"\n topLevel = __import__(name)\n packages = name.split(\".\")[1:]\n m = topLevel\n for p in packages:\n m = getattr(m, p)\n return m\n"
] |
"""
Pyth -- Python text markup and conversion
"""
import os.path
__version__ = '0.5.6'
writerMap = {
'.rtf': 'pyth.plugins.rtf15.writer.Rtf15Writer',
'.html': 'pyth.plugins.xhtml.writer.XHTMLWriter',
'.xhtml': 'pyth.plugins.xhtml.writer.XHTMLWriter',
'.txt': 'pyth.plugins.plaintext.writer.PlaintextWriter',
'.pdf': 'pyth.plugins.pdf.writer.PDFWriter',
}
mimeMap = {
'.rtf': 'application/rtf',
'.html': 'text/html',
'.xhtml': 'application/xhtml+xml',
'.txt': 'text/plain',
}
def write(doc, filename):
ext = os.path.splitext(filename)[1]
writer = namedObject(writerMap[ext])
buff = writer.write(doc)
buff.seek(0)
return (buff, mimeMap[ext])
# Stolen from twisted.python.reflect
def namedModule(name):
"""Return a module given its name."""
topLevel = __import__(name)
packages = name.split(".")[1:]
m = topLevel
for p in packages:
m = getattr(m, p)
return m
|
brendonh/pyth
|
pyth/plugins/rst/writer.py
|
RSTWriter.text
|
python
|
def text(self, text):
ret = u"".join(text.content)
if 'url' in text.properties:
return u"`%s`_" % ret
if 'bold' in text.properties:
return u"**%s**" % ret
if 'italic' in text.properties:
return u"*%s*" % ret
if 'sub' in text.properties:
return ur"\ :sub:`%s`\ " % ret
if 'super' in text.properties:
return ur"\ :sup:`%s`\ " % ret
return ret
|
process a pyth text and return the formatted string
|
train
|
https://github.com/brendonh/pyth/blob/f2a06fc8dc9b1cfc439ea14252d39b9845a7fa4b/pyth/plugins/rst/writer.py#L40-L55
| null |
class RSTWriter(PythWriter):
@classmethod
def write(klass, document, target=None):
if target is None:
target = StringIO()
writer = RSTWriter(document, target)
return writer.go()
def __init__(self, doc, target):
self.document = doc
self.target = target
self.indent = -1
self.paragraphDispatch = {document.List: self.list,
document.Paragraph: self.paragraph}
def go(self):
for (i, paragraph) in enumerate(self.document.content):
handler = self.paragraphDispatch[paragraph.__class__]
handler(paragraph)
self.target.write("\n")
# Heh heh, remove final paragraph spacing
self.target.seek(-2, 1)
self.target.truncate()
return self.target
def paragraph(self, paragraph, prefix=""):
"""
process a pyth paragraph into the target
"""
content = []
for text in paragraph.content:
content.append(self.text(text))
content = u"".join(content).encode("utf-8")
for line in content.split("\n"):
self.target.write(" " * self.indent)
self.target.write(prefix)
self.target.write(line)
self.target.write("\n")
if prefix:
prefix = " "
# handle the links
if any('url' in text.properties for text in paragraph.content):
self.target.write("\n")
for text in paragraph.content:
if 'url' in text.properties:
string = u"".join(text.content)
url = text.properties['url']
self.target.write(".. _%s: %s\n" % (string, url))
def list(self, list, prefix=None):
"""
Process a pyth list into the target
"""
self.indent += 1
for (i, entry) in enumerate(list.content):
for (j, paragraph) in enumerate(entry.content):
prefix = "- " if j == 0 else " "
handler = self.paragraphDispatch[paragraph.__class__]
handler(paragraph, prefix)
self.target.write("\n")
self.indent -= 1
|
brendonh/pyth
|
pyth/plugins/rst/writer.py
|
RSTWriter.paragraph
|
python
|
def paragraph(self, paragraph, prefix=""):
content = []
for text in paragraph.content:
content.append(self.text(text))
content = u"".join(content).encode("utf-8")
for line in content.split("\n"):
self.target.write(" " * self.indent)
self.target.write(prefix)
self.target.write(line)
self.target.write("\n")
if prefix:
prefix = " "
# handle the links
if any('url' in text.properties for text in paragraph.content):
self.target.write("\n")
for text in paragraph.content:
if 'url' in text.properties:
string = u"".join(text.content)
url = text.properties['url']
self.target.write(".. _%s: %s\n" % (string, url))
|
process a pyth paragraph into the target
|
train
|
https://github.com/brendonh/pyth/blob/f2a06fc8dc9b1cfc439ea14252d39b9845a7fa4b/pyth/plugins/rst/writer.py#L57-L81
|
[
"def text(self, text):\n \"\"\"\n process a pyth text and return the formatted string\n \"\"\"\n ret = u\"\".join(text.content)\n if 'url' in text.properties:\n return u\"`%s`_\" % ret\n if 'bold' in text.properties:\n return u\"**%s**\" % ret\n if 'italic' in text.properties:\n return u\"*%s*\" % ret\n if 'sub' in text.properties:\n return ur\"\\ :sub:`%s`\\ \" % ret\n if 'super' in text.properties:\n return ur\"\\ :sup:`%s`\\ \" % ret\n return ret\n"
] |
class RSTWriter(PythWriter):
@classmethod
def write(klass, document, target=None):
if target is None:
target = StringIO()
writer = RSTWriter(document, target)
return writer.go()
def __init__(self, doc, target):
self.document = doc
self.target = target
self.indent = -1
self.paragraphDispatch = {document.List: self.list,
document.Paragraph: self.paragraph}
def go(self):
for (i, paragraph) in enumerate(self.document.content):
handler = self.paragraphDispatch[paragraph.__class__]
handler(paragraph)
self.target.write("\n")
# Heh heh, remove final paragraph spacing
self.target.seek(-2, 1)
self.target.truncate()
return self.target
def text(self, text):
"""
process a pyth text and return the formatted string
"""
ret = u"".join(text.content)
if 'url' in text.properties:
return u"`%s`_" % ret
if 'bold' in text.properties:
return u"**%s**" % ret
if 'italic' in text.properties:
return u"*%s*" % ret
if 'sub' in text.properties:
return ur"\ :sub:`%s`\ " % ret
if 'super' in text.properties:
return ur"\ :sup:`%s`\ " % ret
return ret
def list(self, list, prefix=None):
"""
Process a pyth list into the target
"""
self.indent += 1
for (i, entry) in enumerate(list.content):
for (j, paragraph) in enumerate(entry.content):
prefix = "- " if j == 0 else " "
handler = self.paragraphDispatch[paragraph.__class__]
handler(paragraph, prefix)
self.target.write("\n")
self.indent -= 1
|
brendonh/pyth
|
pyth/plugins/rst/writer.py
|
RSTWriter.list
|
python
|
def list(self, list, prefix=None):
self.indent += 1
for (i, entry) in enumerate(list.content):
for (j, paragraph) in enumerate(entry.content):
prefix = "- " if j == 0 else " "
handler = self.paragraphDispatch[paragraph.__class__]
handler(paragraph, prefix)
self.target.write("\n")
self.indent -= 1
|
Process a pyth list into the target
|
train
|
https://github.com/brendonh/pyth/blob/f2a06fc8dc9b1cfc439ea14252d39b9845a7fa4b/pyth/plugins/rst/writer.py#L83-L94
| null |
class RSTWriter(PythWriter):
@classmethod
def write(klass, document, target=None):
if target is None:
target = StringIO()
writer = RSTWriter(document, target)
return writer.go()
def __init__(self, doc, target):
self.document = doc
self.target = target
self.indent = -1
self.paragraphDispatch = {document.List: self.list,
document.Paragraph: self.paragraph}
def go(self):
for (i, paragraph) in enumerate(self.document.content):
handler = self.paragraphDispatch[paragraph.__class__]
handler(paragraph)
self.target.write("\n")
# Heh heh, remove final paragraph spacing
self.target.seek(-2, 1)
self.target.truncate()
return self.target
def text(self, text):
"""
process a pyth text and return the formatted string
"""
ret = u"".join(text.content)
if 'url' in text.properties:
return u"`%s`_" % ret
if 'bold' in text.properties:
return u"**%s**" % ret
if 'italic' in text.properties:
return u"*%s*" % ret
if 'sub' in text.properties:
return ur"\ :sub:`%s`\ " % ret
if 'super' in text.properties:
return ur"\ :sup:`%s`\ " % ret
return ret
def paragraph(self, paragraph, prefix=""):
"""
process a pyth paragraph into the target
"""
content = []
for text in paragraph.content:
content.append(self.text(text))
content = u"".join(content).encode("utf-8")
for line in content.split("\n"):
self.target.write(" " * self.indent)
self.target.write(prefix)
self.target.write(line)
self.target.write("\n")
if prefix:
prefix = " "
# handle the links
if any('url' in text.properties for text in paragraph.content):
self.target.write("\n")
for text in paragraph.content:
if 'url' in text.properties:
string = u"".join(text.content)
url = text.properties['url']
self.target.write(".. _%s: %s\n" % (string, url))
|
brendonh/pyth
|
pyth/plugins/xhtml/reader.py
|
XHTMLReader.format
|
python
|
def format(self, soup):
# Remove all the newline characters before a closing tag.
for node in soup.findAll(text=True):
if node.rstrip(" ").endswith("\n"):
node.replaceWith(node.rstrip(" ").rstrip("\n"))
# Join the block elements lines into a single long line
for tag in ['p', 'li']:
for node in soup.findAll(tag):
text = unicode(node)
lines = [x.strip() for x in text.splitlines()]
text = ' '.join(lines)
node.replaceWith(BeautifulSoup.BeautifulSoup(text))
soup = BeautifulSoup.BeautifulSoup(unicode(soup))
# replace all <br/> tag by newline character
for node in soup.findAll('br'):
node.replaceWith("\n")
soup = BeautifulSoup.BeautifulSoup(unicode(soup))
return soup
|
format a BeautifulSoup document
This will transform the block elements content from
multi-lines text into single line.
This allow us to avoid having to deal with further text
rendering once this step has been done.
|
train
|
https://github.com/brendonh/pyth/blob/f2a06fc8dc9b1cfc439ea14252d39b9845a7fa4b/pyth/plugins/xhtml/reader.py#L40-L65
| null |
class XHTMLReader(PythReader):
@classmethod
def read(self, source, css_source=None, encoding="utf-8", link_callback=None):
reader = XHTMLReader(source, css_source, encoding, link_callback)
return reader.go()
def __init__(self, source, css_source=None, encoding="utf-8", link_callback=None):
self.source = source
self.css_source = css_source
self.encoding = encoding
self.link_callback = link_callback
def go(self):
soup = BeautifulSoup.BeautifulSoup(self.source,
convertEntities=BeautifulSoup.BeautifulSoup.HTML_ENTITIES,
fromEncoding=self.encoding,
smartQuotesTo=None)
# Make sure the document content doesn't use multi-lines
soup = self.format(soup)
doc = document.Document()
if self.css_source:
self.css = CSS(self.css_source)
else:
self.css = CSS() # empty css
self.process_into(soup, doc)
return doc
def is_bold(self, node):
"""
Return true if the BeautifulSoup node needs to be rendered as
bold.
"""
return (node.findParent(['b', 'strong']) is not None or
self.css.is_bold(node))
def is_italic(self, node):
"""
Return true if the BeautifulSoup node needs to be rendered as
italic.
"""
return (node.findParent(['em', 'i']) is not None
or self.css.is_italic(node))
def is_sub(self, node):
"""
Return true if the BeautifulSoup node needs to be rendered as
sub.
"""
return (node.findParent(['sub']) is not None
or self.css.is_sub(node))
def is_super(self, node):
"""
Return true if the BeautifulSoup node needs to be rendered as
super.
"""
return (node.findParent(['sup']) is not None
or self.css.is_super(node))
def url(self, node):
"""
return the url of a BeautifulSoup node or None if there is no
url.
"""
a_node = node.findParent('a')
if not a_node:
return None
if self.link_callback is None:
return a_node.get('href')
else:
return self.link_callback(a_node.get('href'))
def process_text(self, node):
"""
Return a pyth Text object from a BeautifulSoup node or None if
the text is empty.
"""
text = node.string.strip()
if not text:
return
# Set all the properties
properties=dict()
if self.is_bold(node):
properties['bold'] = True
if self.is_italic(node):
properties['italic'] = True
if self.url(node):
properties['url'] = self.url(node)
if self.is_sub(node):
properties['sub'] = True
if self.is_super(node):
properties['super'] = True
content=[node.string]
return document.Text(properties, content)
def process_into(self, node, obj):
"""
Process a BeautifulSoup node and fill its elements into a pyth
base object.
"""
if isinstance(node, BeautifulSoup.NavigableString):
text = self.process_text(node)
if text:
obj.append(text)
return
if node.name == 'p':
# add a new paragraph into the pyth object
new_obj = document.Paragraph()
obj.append(new_obj)
obj = new_obj
elif node.name == 'ul':
# add a new list
new_obj = document.List()
obj.append(new_obj)
obj = new_obj
elif node.name == 'li':
# add a new list entry
new_obj = document.ListEntry()
obj.append(new_obj)
obj = new_obj
for child in node:
self.process_into(child, obj)
|
brendonh/pyth
|
pyth/plugins/xhtml/reader.py
|
XHTMLReader.url
|
python
|
def url(self, node):
a_node = node.findParent('a')
if not a_node:
return None
if self.link_callback is None:
return a_node.get('href')
else:
return self.link_callback(a_node.get('href'))
|
return the url of a BeautifulSoup node or None if there is no
url.
|
train
|
https://github.com/brendonh/pyth/blob/f2a06fc8dc9b1cfc439ea14252d39b9845a7fa4b/pyth/plugins/xhtml/reader.py#L99-L111
| null |
class XHTMLReader(PythReader):
@classmethod
def read(self, source, css_source=None, encoding="utf-8", link_callback=None):
reader = XHTMLReader(source, css_source, encoding, link_callback)
return reader.go()
def __init__(self, source, css_source=None, encoding="utf-8", link_callback=None):
self.source = source
self.css_source = css_source
self.encoding = encoding
self.link_callback = link_callback
def go(self):
soup = BeautifulSoup.BeautifulSoup(self.source,
convertEntities=BeautifulSoup.BeautifulSoup.HTML_ENTITIES,
fromEncoding=self.encoding,
smartQuotesTo=None)
# Make sure the document content doesn't use multi-lines
soup = self.format(soup)
doc = document.Document()
if self.css_source:
self.css = CSS(self.css_source)
else:
self.css = CSS() # empty css
self.process_into(soup, doc)
return doc
def format(self, soup):
"""format a BeautifulSoup document
This will transform the block elements content from
multi-lines text into single line.
This allow us to avoid having to deal with further text
rendering once this step has been done.
"""
# Remove all the newline characters before a closing tag.
for node in soup.findAll(text=True):
if node.rstrip(" ").endswith("\n"):
node.replaceWith(node.rstrip(" ").rstrip("\n"))
# Join the block elements lines into a single long line
for tag in ['p', 'li']:
for node in soup.findAll(tag):
text = unicode(node)
lines = [x.strip() for x in text.splitlines()]
text = ' '.join(lines)
node.replaceWith(BeautifulSoup.BeautifulSoup(text))
soup = BeautifulSoup.BeautifulSoup(unicode(soup))
# replace all <br/> tag by newline character
for node in soup.findAll('br'):
node.replaceWith("\n")
soup = BeautifulSoup.BeautifulSoup(unicode(soup))
return soup
def is_bold(self, node):
"""
Return true if the BeautifulSoup node needs to be rendered as
bold.
"""
return (node.findParent(['b', 'strong']) is not None or
self.css.is_bold(node))
def is_italic(self, node):
"""
Return true if the BeautifulSoup node needs to be rendered as
italic.
"""
return (node.findParent(['em', 'i']) is not None
or self.css.is_italic(node))
def is_sub(self, node):
"""
Return true if the BeautifulSoup node needs to be rendered as
sub.
"""
return (node.findParent(['sub']) is not None
or self.css.is_sub(node))
def is_super(self, node):
"""
Return true if the BeautifulSoup node needs to be rendered as
super.
"""
return (node.findParent(['sup']) is not None
or self.css.is_super(node))
def process_text(self, node):
"""
Return a pyth Text object from a BeautifulSoup node or None if
the text is empty.
"""
text = node.string.strip()
if not text:
return
# Set all the properties
properties=dict()
if self.is_bold(node):
properties['bold'] = True
if self.is_italic(node):
properties['italic'] = True
if self.url(node):
properties['url'] = self.url(node)
if self.is_sub(node):
properties['sub'] = True
if self.is_super(node):
properties['super'] = True
content=[node.string]
return document.Text(properties, content)
def process_into(self, node, obj):
"""
Process a BeautifulSoup node and fill its elements into a pyth
base object.
"""
if isinstance(node, BeautifulSoup.NavigableString):
text = self.process_text(node)
if text:
obj.append(text)
return
if node.name == 'p':
# add a new paragraph into the pyth object
new_obj = document.Paragraph()
obj.append(new_obj)
obj = new_obj
elif node.name == 'ul':
# add a new list
new_obj = document.List()
obj.append(new_obj)
obj = new_obj
elif node.name == 'li':
# add a new list entry
new_obj = document.ListEntry()
obj.append(new_obj)
obj = new_obj
for child in node:
self.process_into(child, obj)
|
brendonh/pyth
|
pyth/plugins/xhtml/reader.py
|
XHTMLReader.process_text
|
python
|
def process_text(self, node):
text = node.string.strip()
if not text:
return
# Set all the properties
properties=dict()
if self.is_bold(node):
properties['bold'] = True
if self.is_italic(node):
properties['italic'] = True
if self.url(node):
properties['url'] = self.url(node)
if self.is_sub(node):
properties['sub'] = True
if self.is_super(node):
properties['super'] = True
content=[node.string]
return document.Text(properties, content)
|
Return a pyth Text object from a BeautifulSoup node or None if
the text is empty.
|
train
|
https://github.com/brendonh/pyth/blob/f2a06fc8dc9b1cfc439ea14252d39b9845a7fa4b/pyth/plugins/xhtml/reader.py#L113-L137
|
[
"def is_bold(self, node):\n \"\"\"\n Return true if the BeautifulSoup node needs to be rendered as\n bold.\n \"\"\"\n return (node.findParent(['b', 'strong']) is not None or\n self.css.is_bold(node))\n",
"def is_italic(self, node):\n \"\"\"\n Return true if the BeautifulSoup node needs to be rendered as\n italic.\n \"\"\"\n return (node.findParent(['em', 'i']) is not None\n or self.css.is_italic(node))\n",
"def url(self, node):\n \"\"\"\n return the url of a BeautifulSoup node or None if there is no\n url.\n \"\"\"\n a_node = node.findParent('a')\n if not a_node:\n return None\n\n if self.link_callback is None:\n return a_node.get('href')\n else:\n return self.link_callback(a_node.get('href'))\n"
] |
class XHTMLReader(PythReader):
@classmethod
def read(self, source, css_source=None, encoding="utf-8", link_callback=None):
reader = XHTMLReader(source, css_source, encoding, link_callback)
return reader.go()
def __init__(self, source, css_source=None, encoding="utf-8", link_callback=None):
self.source = source
self.css_source = css_source
self.encoding = encoding
self.link_callback = link_callback
def go(self):
soup = BeautifulSoup.BeautifulSoup(self.source,
convertEntities=BeautifulSoup.BeautifulSoup.HTML_ENTITIES,
fromEncoding=self.encoding,
smartQuotesTo=None)
# Make sure the document content doesn't use multi-lines
soup = self.format(soup)
doc = document.Document()
if self.css_source:
self.css = CSS(self.css_source)
else:
self.css = CSS() # empty css
self.process_into(soup, doc)
return doc
def format(self, soup):
"""format a BeautifulSoup document
This will transform the block elements content from
multi-lines text into single line.
This allow us to avoid having to deal with further text
rendering once this step has been done.
"""
# Remove all the newline characters before a closing tag.
for node in soup.findAll(text=True):
if node.rstrip(" ").endswith("\n"):
node.replaceWith(node.rstrip(" ").rstrip("\n"))
# Join the block elements lines into a single long line
for tag in ['p', 'li']:
for node in soup.findAll(tag):
text = unicode(node)
lines = [x.strip() for x in text.splitlines()]
text = ' '.join(lines)
node.replaceWith(BeautifulSoup.BeautifulSoup(text))
soup = BeautifulSoup.BeautifulSoup(unicode(soup))
# replace all <br/> tag by newline character
for node in soup.findAll('br'):
node.replaceWith("\n")
soup = BeautifulSoup.BeautifulSoup(unicode(soup))
return soup
def is_bold(self, node):
"""
Return true if the BeautifulSoup node needs to be rendered as
bold.
"""
return (node.findParent(['b', 'strong']) is not None or
self.css.is_bold(node))
def is_italic(self, node):
"""
Return true if the BeautifulSoup node needs to be rendered as
italic.
"""
return (node.findParent(['em', 'i']) is not None
or self.css.is_italic(node))
def is_sub(self, node):
"""
Return true if the BeautifulSoup node needs to be rendered as
sub.
"""
return (node.findParent(['sub']) is not None
or self.css.is_sub(node))
def is_super(self, node):
"""
Return true if the BeautifulSoup node needs to be rendered as
super.
"""
return (node.findParent(['sup']) is not None
or self.css.is_super(node))
def url(self, node):
"""
return the url of a BeautifulSoup node or None if there is no
url.
"""
a_node = node.findParent('a')
if not a_node:
return None
if self.link_callback is None:
return a_node.get('href')
else:
return self.link_callback(a_node.get('href'))
def process_into(self, node, obj):
"""
Process a BeautifulSoup node and fill its elements into a pyth
base object.
"""
if isinstance(node, BeautifulSoup.NavigableString):
text = self.process_text(node)
if text:
obj.append(text)
return
if node.name == 'p':
# add a new paragraph into the pyth object
new_obj = document.Paragraph()
obj.append(new_obj)
obj = new_obj
elif node.name == 'ul':
# add a new list
new_obj = document.List()
obj.append(new_obj)
obj = new_obj
elif node.name == 'li':
# add a new list entry
new_obj = document.ListEntry()
obj.append(new_obj)
obj = new_obj
for child in node:
self.process_into(child, obj)
|
brendonh/pyth
|
pyth/plugins/xhtml/reader.py
|
XHTMLReader.process_into
|
python
|
def process_into(self, node, obj):
if isinstance(node, BeautifulSoup.NavigableString):
text = self.process_text(node)
if text:
obj.append(text)
return
if node.name == 'p':
# add a new paragraph into the pyth object
new_obj = document.Paragraph()
obj.append(new_obj)
obj = new_obj
elif node.name == 'ul':
# add a new list
new_obj = document.List()
obj.append(new_obj)
obj = new_obj
elif node.name == 'li':
# add a new list entry
new_obj = document.ListEntry()
obj.append(new_obj)
obj = new_obj
for child in node:
self.process_into(child, obj)
|
Process a BeautifulSoup node and fill its elements into a pyth
base object.
|
train
|
https://github.com/brendonh/pyth/blob/f2a06fc8dc9b1cfc439ea14252d39b9845a7fa4b/pyth/plugins/xhtml/reader.py#L139-L165
|
[
"def append(self, item):\n \"\"\"\n Try to add an item to this element.\n\n If the item is of the wrong type, and if this element has a sub-type,\n then try to create such a sub-type and insert the item into that, instead.\n\n This happens recursively, so (in python-markup):\n L [ u'Foo' ]\n actually creates:\n L [ LE [ P [ T [ u'Foo' ] ] ] ]\n\n If that doesn't work, raise a TypeError.\n \"\"\"\n\n okay = True\n if not isinstance(item, self.contentType):\n if hasattr(self.contentType, 'contentType'):\n try:\n item = self.contentType(content=[item])\n except TypeError:\n okay = False\n else:\n okay = False\n\n if not okay:\n raise TypeError(\"Wrong content type for %s: %s (%s)\" % (\n self.__class__.__name__, repr(type(item)), repr(item)))\n\n self.content.append(item)\n",
"def process_text(self, node):\n \"\"\"\n Return a pyth Text object from a BeautifulSoup node or None if\n the text is empty.\n \"\"\"\n text = node.string.strip()\n if not text:\n return\n\n # Set all the properties\n properties=dict()\n if self.is_bold(node):\n properties['bold'] = True\n if self.is_italic(node):\n properties['italic'] = True\n if self.url(node):\n properties['url'] = self.url(node)\n if self.is_sub(node):\n properties['sub'] = True\n if self.is_super(node):\n properties['super'] = True\n\n content=[node.string]\n\n return document.Text(properties, content)\n",
"def process_into(self, node, obj):\n \"\"\"\n Process a BeautifulSoup node and fill its elements into a pyth\n base object.\n \"\"\"\n if isinstance(node, BeautifulSoup.NavigableString):\n text = self.process_text(node)\n if text:\n obj.append(text)\n return\n if node.name == 'p':\n # add a new paragraph into the pyth object\n new_obj = document.Paragraph()\n obj.append(new_obj)\n obj = new_obj\n elif node.name == 'ul':\n # add a new list\n new_obj = document.List()\n obj.append(new_obj)\n obj = new_obj\n elif node.name == 'li':\n # add a new list entry\n new_obj = document.ListEntry()\n obj.append(new_obj)\n obj = new_obj\n for child in node:\n self.process_into(child, obj)\n"
] |
class XHTMLReader(PythReader):
@classmethod
def read(self, source, css_source=None, encoding="utf-8", link_callback=None):
reader = XHTMLReader(source, css_source, encoding, link_callback)
return reader.go()
def __init__(self, source, css_source=None, encoding="utf-8", link_callback=None):
self.source = source
self.css_source = css_source
self.encoding = encoding
self.link_callback = link_callback
def go(self):
soup = BeautifulSoup.BeautifulSoup(self.source,
convertEntities=BeautifulSoup.BeautifulSoup.HTML_ENTITIES,
fromEncoding=self.encoding,
smartQuotesTo=None)
# Make sure the document content doesn't use multi-lines
soup = self.format(soup)
doc = document.Document()
if self.css_source:
self.css = CSS(self.css_source)
else:
self.css = CSS() # empty css
self.process_into(soup, doc)
return doc
def format(self, soup):
"""format a BeautifulSoup document
This will transform the block elements content from
multi-lines text into single line.
This allow us to avoid having to deal with further text
rendering once this step has been done.
"""
# Remove all the newline characters before a closing tag.
for node in soup.findAll(text=True):
if node.rstrip(" ").endswith("\n"):
node.replaceWith(node.rstrip(" ").rstrip("\n"))
# Join the block elements lines into a single long line
for tag in ['p', 'li']:
for node in soup.findAll(tag):
text = unicode(node)
lines = [x.strip() for x in text.splitlines()]
text = ' '.join(lines)
node.replaceWith(BeautifulSoup.BeautifulSoup(text))
soup = BeautifulSoup.BeautifulSoup(unicode(soup))
# replace all <br/> tag by newline character
for node in soup.findAll('br'):
node.replaceWith("\n")
soup = BeautifulSoup.BeautifulSoup(unicode(soup))
return soup
def is_bold(self, node):
"""
Return true if the BeautifulSoup node needs to be rendered as
bold.
"""
return (node.findParent(['b', 'strong']) is not None or
self.css.is_bold(node))
def is_italic(self, node):
"""
Return true if the BeautifulSoup node needs to be rendered as
italic.
"""
return (node.findParent(['em', 'i']) is not None
or self.css.is_italic(node))
def is_sub(self, node):
"""
Return true if the BeautifulSoup node needs to be rendered as
sub.
"""
return (node.findParent(['sub']) is not None
or self.css.is_sub(node))
def is_super(self, node):
"""
Return true if the BeautifulSoup node needs to be rendered as
super.
"""
return (node.findParent(['sup']) is not None
or self.css.is_super(node))
def url(self, node):
"""
return the url of a BeautifulSoup node or None if there is no
url.
"""
a_node = node.findParent('a')
if not a_node:
return None
if self.link_callback is None:
return a_node.get('href')
else:
return self.link_callback(a_node.get('href'))
def process_text(self, node):
"""
Return a pyth Text object from a BeautifulSoup node or None if
the text is empty.
"""
text = node.string.strip()
if not text:
return
# Set all the properties
properties=dict()
if self.is_bold(node):
properties['bold'] = True
if self.is_italic(node):
properties['italic'] = True
if self.url(node):
properties['url'] = self.url(node)
if self.is_sub(node):
properties['sub'] = True
if self.is_super(node):
properties['super'] = True
content=[node.string]
return document.Text(properties, content)
|
brendonh/pyth
|
pyth/document.py
|
_PythBase.append
|
python
|
def append(self, item):
okay = True
if not isinstance(item, self.contentType):
if hasattr(self.contentType, 'contentType'):
try:
item = self.contentType(content=[item])
except TypeError:
okay = False
else:
okay = False
if not okay:
raise TypeError("Wrong content type for %s: %s (%s)" % (
self.__class__.__name__, repr(type(item)), repr(item)))
self.content.append(item)
|
Try to add an item to this element.
If the item is of the wrong type, and if this element has a sub-type,
then try to create such a sub-type and insert the item into that, instead.
This happens recursively, so (in python-markup):
L [ u'Foo' ]
actually creates:
L [ LE [ P [ T [ u'Foo' ] ] ] ]
If that doesn't work, raise a TypeError.
|
train
|
https://github.com/brendonh/pyth/blob/f2a06fc8dc9b1cfc439ea14252d39b9845a7fa4b/pyth/document.py#L30-L59
| null |
class _PythBase(object):
def __init__(self, properties={}, content=[]):
self.properties = {}
self.content = []
for (k,v) in properties.iteritems():
self[k] = v
for item in content:
self.append(item)
def __setitem__(self, key, value):
if key not in self.validProperties:
raise ValueError("Invalid %s property: %s" % (self.__class__.__name__, repr(key)))
self.properties[key] = value
def __getitem__(self, key):
if key not in self.validProperties:
raise ValueError("Invalid %s property: %s" %
(self.__class__.__name__, repr(key)))
return self.properties.get(key)
def append(self, item):
"""
Try to add an item to this element.
If the item is of the wrong type, and if this element has a sub-type,
then try to create such a sub-type and insert the item into that, instead.
This happens recursively, so (in python-markup):
L [ u'Foo' ]
actually creates:
L [ LE [ P [ T [ u'Foo' ] ] ] ]
If that doesn't work, raise a TypeError.
"""
okay = True
if not isinstance(item, self.contentType):
if hasattr(self.contentType, 'contentType'):
try:
item = self.contentType(content=[item])
except TypeError:
okay = False
else:
okay = False
if not okay:
raise TypeError("Wrong content type for %s: %s (%s)" % (
self.__class__.__name__, repr(type(item)), repr(item)))
self.content.append(item)
|
brendonh/pyth
|
pyth/plugins/python/reader.py
|
_MetaPythonBase
|
python
|
def _MetaPythonBase():
class MagicGetItem(type):
def __new__(mcs, name, bases, dict):
klass = type.__new__(mcs, name, bases, dict)
mcs.__getitem__ = lambda _, k: klass()[k]
return klass
return MagicGetItem
|
Return a metaclass which implements __getitem__,
allowing e.g. P[...] instead of P()[...]
|
train
|
https://github.com/brendonh/pyth/blob/f2a06fc8dc9b1cfc439ea14252d39b9845a7fa4b/pyth/plugins/python/reader.py#L40-L52
| null |
"""
Write Pyth documents straight in Python, a la Nevow's Stan.
"""
from pyth.format import PythReader
from pyth.document import *
def _convert(content):
if isinstance(content, _PythonBase):
return content.toPyth()
return content
class PythonReader(PythReader):
@classmethod
def read(self, source):
"""
source: A list of P objects.
"""
return Document(content=[_convert(c) for c in source])
class _Shortcut(object):
def __init__(self, key):
self.key = key
def asDict(self):
return dict(((self.key, True),))
BOLD = _Shortcut("bold")
ITALIC = _Shortcut("italic")
UNDERLINE = _Shortcut("underline")
SUPER = _Shortcut("super")
SUB = _Shortcut("sub")
def _MetaPythonBase():
"""
Return a metaclass which implements __getitem__,
allowing e.g. P[...] instead of P()[...]
"""
class MagicGetItem(type):
def __new__(mcs, name, bases, dict):
klass = type.__new__(mcs, name, bases, dict)
mcs.__getitem__ = lambda _, k: klass()[k]
return klass
return MagicGetItem
class _PythonBase(object):
"""
Base class for Python markup objects, providing
stan-ish interface
"""
def __init__(self, *shortcuts, **properties):
self.properties = properties.copy()
for shortcut in shortcuts:
self.properties.update(shortcut.asDict())
self.content = []
def toPyth(self):
return self.pythType(self.properties,
[_convert(c) for c in self.content])
def __getitem__(self, item):
if isinstance(item, (tuple, list)):
for i in item: self [i]
elif isinstance(item, int):
return self.content[item]
else:
self.content.append(item)
return self
def __str__(self):
return "%s(%s) [ %s ]" % (
self.__class__.__name__,
", ".join("%s=%s" % (k, repr(v)) for (k,v) in self.properties.iteritems()),
", ".join(repr(x) for x in self.content))
class P(_PythonBase):
__metaclass__ = _MetaPythonBase()
pythType = Paragraph
class LE(_PythonBase):
__metaclass__ = _MetaPythonBase()
pythType = ListEntry
class L(_PythonBase):
__metaclass__ = _MetaPythonBase()
pythType = List
class T(_PythonBase):
__metaclass__ = _MetaPythonBase()
__repr__ = _PythonBase.__str__
pythType = Text
def toPyth(self):
return Text(self.properties, self.content)
|
brendonh/pyth
|
pyth/plugins/latex/writer.py
|
LatexWriter.write
|
python
|
def write(klass, document, target=None, stylesheet=""):
writer = LatexWriter(document, target, stylesheet)
return writer.go()
|
convert a pyth document to a latex document
we can specify a stylesheet as a latex document fragment that
will be inserted after the headers. This way we can override
the default style.
|
train
|
https://github.com/brendonh/pyth/blob/f2a06fc8dc9b1cfc439ea14252d39b9845a7fa4b/pyth/plugins/latex/writer.py#L19-L28
|
[
"def go(self):\n rst = RSTWriter.write(self.document).getvalue()\n settings = dict(input_encoding=\"UTF-8\",\n output_encoding=\"UTF-8\",\n stylesheet=\"stylesheet.tex\")\n latex = docutils.core.publish_string(rst,\n writer_name=\"latex\",\n settings_overrides=settings)\n # We don't want to keep an \\input command in the latex file\n latex = latex.replace(r\"\\input{stylesheet.tex}\",\n self.full_stylesheet)\n self.target.write(latex)\n return self.target\n"
] |
class LatexWriter(PythWriter):
@classmethod
def __init__(self, doc, target=None, stylesheet=""):
"""Create a writer that produce a latex document
we can specify a stylesheet as a latex document fragment that
will be inserted after the headers. This way we can override
the default style.
"""
self.document = doc
self.stylesheet = stylesheet
self.target = target if target is not None else StringIO()
@property
def full_stylesheet(self):
"""
Return the style sheet that will ultimately be inserted into
the latex document.
This is the user given style sheet plus some additional parts
to add the meta data.
"""
latex_fragment = r"""
\usepackage[colorlinks=true,linkcolor=blue,urlcolor=blue]{hyperref}
\hypersetup{
pdftitle={%s},
pdfauthor={%s},
pdfsubject={%s}
}
""" % (self.document.properties.get("title"),
self.document.properties.get("author"),
self.document.properties.get("subject"))
return latex_fragment + self.stylesheet
def go(self):
rst = RSTWriter.write(self.document).getvalue()
settings = dict(input_encoding="UTF-8",
output_encoding="UTF-8",
stylesheet="stylesheet.tex")
latex = docutils.core.publish_string(rst,
writer_name="latex",
settings_overrides=settings)
# We don't want to keep an \input command in the latex file
latex = latex.replace(r"\input{stylesheet.tex}",
self.full_stylesheet)
self.target.write(latex)
return self.target
|
brendonh/pyth
|
pyth/plugins/latex/writer.py
|
LatexWriter.full_stylesheet
|
python
|
def full_stylesheet(self):
latex_fragment = r"""
\usepackage[colorlinks=true,linkcolor=blue,urlcolor=blue]{hyperref}
\hypersetup{
pdftitle={%s},
pdfauthor={%s},
pdfsubject={%s}
}
""" % (self.document.properties.get("title"),
self.document.properties.get("author"),
self.document.properties.get("subject"))
return latex_fragment + self.stylesheet
|
Return the style sheet that will ultimately be inserted into
the latex document.
This is the user given style sheet plus some additional parts
to add the meta data.
|
train
|
https://github.com/brendonh/pyth/blob/f2a06fc8dc9b1cfc439ea14252d39b9845a7fa4b/pyth/plugins/latex/writer.py#L42-L60
| null |
class LatexWriter(PythWriter):
@classmethod
def write(klass, document, target=None, stylesheet=""):
"""
convert a pyth document to a latex document
we can specify a stylesheet as a latex document fragment that
will be inserted after the headers. This way we can override
the default style.
"""
writer = LatexWriter(document, target, stylesheet)
return writer.go()
def __init__(self, doc, target=None, stylesheet=""):
"""Create a writer that produce a latex document
we can specify a stylesheet as a latex document fragment that
will be inserted after the headers. This way we can override
the default style.
"""
self.document = doc
self.stylesheet = stylesheet
self.target = target if target is not None else StringIO()
@property
def go(self):
rst = RSTWriter.write(self.document).getvalue()
settings = dict(input_encoding="UTF-8",
output_encoding="UTF-8",
stylesheet="stylesheet.tex")
latex = docutils.core.publish_string(rst,
writer_name="latex",
settings_overrides=settings)
# We don't want to keep an \input command in the latex file
latex = latex.replace(r"\input{stylesheet.tex}",
self.full_stylesheet)
self.target.write(latex)
return self.target
|
cloudsigma/cgroupspy
|
cgroupspy/trees.py
|
BaseTree._build_tree
|
python
|
def _build_tree(self):
groups = self._groups or self.get_children_paths(self.root_path)
for group in groups:
node = Node(name=group, parent=self.root)
self.root.children.append(node)
self._init_sub_groups(node)
|
Build a full or a partial tree, depending on the groups/sub-groups specified.
|
train
|
https://github.com/cloudsigma/cgroupspy/blob/e705ac4ccdfe33d8ecc700e9a35a9556084449ca/cgroupspy/trees.py#L71-L80
|
[
"def _init_sub_groups(self, parent):\n \"\"\"\n Initialise sub-groups, and create any that do not already exist.\n \"\"\"\n\n if self._sub_groups:\n for sub_group in self._sub_groups:\n for component in split_path_components(sub_group):\n fp = os.path.join(parent.full_path, component)\n if os.path.exists(fp):\n node = Node(name=component, parent=parent)\n parent.children.append(node)\n else:\n node = parent.create_cgroup(component)\n parent = node\n self._init_children(node)\n else:\n self._init_children(parent)\n",
"def get_children_paths(self, parent_full_path):\n for dir_name in os.listdir(parent_full_path):\n if os.path.isdir(os.path.join(parent_full_path, dir_name)):\n yield dir_name\n"
] |
class BaseTree(object):
""" A basic cgroup node tree. An exact representation of the filesystem tree, provided by cgroups. """
def __init__(self, root_path="/sys/fs/cgroup/", groups=None, sub_groups=None):
self.root_path = root_path
self._groups = groups or []
self._sub_groups = sub_groups or []
self.root = Node(root_path)
self._build_tree()
@property
def groups(self):
return self._groups
def _init_sub_groups(self, parent):
"""
Initialise sub-groups, and create any that do not already exist.
"""
if self._sub_groups:
for sub_group in self._sub_groups:
for component in split_path_components(sub_group):
fp = os.path.join(parent.full_path, component)
if os.path.exists(fp):
node = Node(name=component, parent=parent)
parent.children.append(node)
else:
node = parent.create_cgroup(component)
parent = node
self._init_children(node)
else:
self._init_children(parent)
def _init_children(self, parent):
"""
Initialise each node's children - essentially build the tree.
"""
for dir_name in self.get_children_paths(parent.full_path):
child = Node(name=dir_name, parent=parent)
parent.children.append(child)
self._init_children(child)
def get_children_paths(self, parent_full_path):
for dir_name in os.listdir(parent_full_path):
if os.path.isdir(os.path.join(parent_full_path, dir_name)):
yield dir_name
def walk(self, root=None):
"""Walk through each each node - pre-order depth-first"""
if root is None:
root = self.root
return walk_tree(root)
def walk_up(self, root=None):
"""Walk through each each node - post-order depth-first"""
if root is None:
root = self.root
return walk_up_tree(root)
|
cloudsigma/cgroupspy
|
cgroupspy/trees.py
|
BaseTree._init_sub_groups
|
python
|
def _init_sub_groups(self, parent):
if self._sub_groups:
for sub_group in self._sub_groups:
for component in split_path_components(sub_group):
fp = os.path.join(parent.full_path, component)
if os.path.exists(fp):
node = Node(name=component, parent=parent)
parent.children.append(node)
else:
node = parent.create_cgroup(component)
parent = node
self._init_children(node)
else:
self._init_children(parent)
|
Initialise sub-groups, and create any that do not already exist.
|
train
|
https://github.com/cloudsigma/cgroupspy/blob/e705ac4ccdfe33d8ecc700e9a35a9556084449ca/cgroupspy/trees.py#L82-L99
|
[
"def split_path_components(path):\n components=[]\n while True:\n path, component = os.path.split(path)\n if component != \"\":\n components.append(component)\n else:\n if path != \"\":\n components.append(path)\n break\n components.reverse()\n return components\n",
"def create_cgroup(self, name):\n \"\"\"\n Create a cgroup by name and attach it under this node.\n \"\"\"\n node = Node(name, parent=self)\n if node in self.children:\n raise RuntimeError('Node {} already exists under {}'.format(name, self.path))\n\n name = name.encode()\n fp = os.path.join(self.full_path, name)\n os.mkdir(fp)\n self.children.append(node)\n return node\n",
"def _init_children(self, parent):\n \"\"\"\n Initialise each node's children - essentially build the tree.\n \"\"\"\n\n for dir_name in self.get_children_paths(parent.full_path):\n child = Node(name=dir_name, parent=parent)\n parent.children.append(child)\n self._init_children(child)\n"
] |
class BaseTree(object):
""" A basic cgroup node tree. An exact representation of the filesystem tree, provided by cgroups. """
def __init__(self, root_path="/sys/fs/cgroup/", groups=None, sub_groups=None):
self.root_path = root_path
self._groups = groups or []
self._sub_groups = sub_groups or []
self.root = Node(root_path)
self._build_tree()
@property
def groups(self):
return self._groups
def _build_tree(self):
"""
Build a full or a partial tree, depending on the groups/sub-groups specified.
"""
groups = self._groups or self.get_children_paths(self.root_path)
for group in groups:
node = Node(name=group, parent=self.root)
self.root.children.append(node)
self._init_sub_groups(node)
def _init_children(self, parent):
"""
Initialise each node's children - essentially build the tree.
"""
for dir_name in self.get_children_paths(parent.full_path):
child = Node(name=dir_name, parent=parent)
parent.children.append(child)
self._init_children(child)
def get_children_paths(self, parent_full_path):
for dir_name in os.listdir(parent_full_path):
if os.path.isdir(os.path.join(parent_full_path, dir_name)):
yield dir_name
def walk(self, root=None):
"""Walk through each each node - pre-order depth-first"""
if root is None:
root = self.root
return walk_tree(root)
def walk_up(self, root=None):
"""Walk through each each node - post-order depth-first"""
if root is None:
root = self.root
return walk_up_tree(root)
|
cloudsigma/cgroupspy
|
cgroupspy/trees.py
|
BaseTree._init_children
|
python
|
def _init_children(self, parent):
for dir_name in self.get_children_paths(parent.full_path):
child = Node(name=dir_name, parent=parent)
parent.children.append(child)
self._init_children(child)
|
Initialise each node's children - essentially build the tree.
|
train
|
https://github.com/cloudsigma/cgroupspy/blob/e705ac4ccdfe33d8ecc700e9a35a9556084449ca/cgroupspy/trees.py#L101-L109
| null |
class BaseTree(object):
""" A basic cgroup node tree. An exact representation of the filesystem tree, provided by cgroups. """
def __init__(self, root_path="/sys/fs/cgroup/", groups=None, sub_groups=None):
self.root_path = root_path
self._groups = groups or []
self._sub_groups = sub_groups or []
self.root = Node(root_path)
self._build_tree()
@property
def groups(self):
return self._groups
def _build_tree(self):
"""
Build a full or a partial tree, depending on the groups/sub-groups specified.
"""
groups = self._groups or self.get_children_paths(self.root_path)
for group in groups:
node = Node(name=group, parent=self.root)
self.root.children.append(node)
self._init_sub_groups(node)
def _init_sub_groups(self, parent):
"""
Initialise sub-groups, and create any that do not already exist.
"""
if self._sub_groups:
for sub_group in self._sub_groups:
for component in split_path_components(sub_group):
fp = os.path.join(parent.full_path, component)
if os.path.exists(fp):
node = Node(name=component, parent=parent)
parent.children.append(node)
else:
node = parent.create_cgroup(component)
parent = node
self._init_children(node)
else:
self._init_children(parent)
def get_children_paths(self, parent_full_path):
for dir_name in os.listdir(parent_full_path):
if os.path.isdir(os.path.join(parent_full_path, dir_name)):
yield dir_name
def walk(self, root=None):
"""Walk through each each node - pre-order depth-first"""
if root is None:
root = self.root
return walk_tree(root)
def walk_up(self, root=None):
"""Walk through each each node - post-order depth-first"""
if root is None:
root = self.root
return walk_up_tree(root)
|
cloudsigma/cgroupspy
|
cgroupspy/nodes.py
|
Node.full_path
|
python
|
def full_path(self):
if self.parent:
return os.path.join(self.parent.full_path, self.name)
return self.name
|
Absolute system path to the node
|
train
|
https://github.com/cloudsigma/cgroupspy/blob/e705ac4ccdfe33d8ecc700e9a35a9556084449ca/cgroupspy/nodes.py#L87-L92
| null |
class Node(object):
"""
Basic cgroup tree node. Provides means to link it to a parent and set a controller, depending on the cgroup the node
exists in.
"""
NODE_ROOT = b"root"
NODE_CONTROLLER_ROOT = b"controller_root"
NODE_SLICE = b"slice"
NODE_SCOPE = b"scope"
NODE_CGROUP = b"cgroup"
CONTROLLERS = {
b"memory": MemoryController,
b"cpuset": CpuSetController,
b"cpu": CpuController,
b"cpuacct": CpuAcctController,
b"devices": DevicesController,
b"blkio": BlkIOController,
b"net_cls": NetClsController,
b"net_prio": NetPrioController,
}
def __init__(self, name, parent=None):
try:
name = name.encode()
except AttributeError:
pass
self.name = name
self.verbose_name = name
try:
self.parent = parent.encode()
except AttributeError:
self.parent = parent
self.children = []
self.node_type = self._get_node_type()
self.controller_type = self._get_controller_type()
self.controller = self._get_controller()
def __eq__(self, other):
if isinstance(other, self.__class__) and self.full_path == other.full_path:
return True
return False
def __repr__(self):
return "<{} {}>".format(self.__class__.__name__, self.path.decode())
@property
@property
def path(self):
"""Node's relative path from the root node"""
if self.parent:
try:
parent_path = self.parent.path.encode()
except AttributeError:
parent_path = self.parent.path
return os.path.join(parent_path, self.name)
return b"/"
def _get_node_type(self):
"""Returns the current node's type"""
if self.parent is None:
return self.NODE_ROOT
elif self.parent.node_type == self.NODE_ROOT:
return self.NODE_CONTROLLER_ROOT
elif b".slice" in self.name or b'.partition' in self.name:
return self.NODE_SLICE
elif b".scope" in self.name:
return self.NODE_SCOPE
else:
return self.NODE_CGROUP
def _get_controller_type(self):
"""Returns the current node's controller type"""
if self.node_type == self.NODE_CONTROLLER_ROOT and self.name in self.CONTROLLERS:
return self.name
elif self.parent:
return self.parent.controller_type
else:
return None
def _get_controller(self):
"""Returns the current node's controller"""
if self.controller_type:
return self.CONTROLLERS[self.controller_type](self)
return None
def create_cgroup(self, name):
"""
Create a cgroup by name and attach it under this node.
"""
node = Node(name, parent=self)
if node in self.children:
raise RuntimeError('Node {} already exists under {}'.format(name, self.path))
name = name.encode()
fp = os.path.join(self.full_path, name)
os.mkdir(fp)
self.children.append(node)
return node
def delete_cgroup(self, name):
"""
Delete a cgroup by name and detach it from this node.
Raises OSError if the cgroup is not empty.
"""
name = name.encode()
fp = os.path.join(self.full_path, name)
if os.path.exists(fp):
os.rmdir(fp)
node = Node(name, parent=self)
try:
self.children.remove(node)
except ValueError:
return
def delete_empty_children(self):
"""
Walk through the children of this node and delete any that are empty.
"""
for child in self.children:
child.delete_empty_children()
try:
if os.path.exists(child.full_path):
os.rmdir(child.full_path)
except OSError: pass
else: self.children.remove(child)
def walk(self):
"""Walk through this node and its children - pre-order depth-first"""
return walk_tree(self)
def walk_up(self):
"""Walk through this node and its children - post-order depth-first"""
return walk_up_tree(self)
|
cloudsigma/cgroupspy
|
cgroupspy/nodes.py
|
Node.path
|
python
|
def path(self):
if self.parent:
try:
parent_path = self.parent.path.encode()
except AttributeError:
parent_path = self.parent.path
return os.path.join(parent_path, self.name)
return b"/"
|
Node's relative path from the root node
|
train
|
https://github.com/cloudsigma/cgroupspy/blob/e705ac4ccdfe33d8ecc700e9a35a9556084449ca/cgroupspy/nodes.py#L95-L104
| null |
class Node(object):
"""
Basic cgroup tree node. Provides means to link it to a parent and set a controller, depending on the cgroup the node
exists in.
"""
NODE_ROOT = b"root"
NODE_CONTROLLER_ROOT = b"controller_root"
NODE_SLICE = b"slice"
NODE_SCOPE = b"scope"
NODE_CGROUP = b"cgroup"
CONTROLLERS = {
b"memory": MemoryController,
b"cpuset": CpuSetController,
b"cpu": CpuController,
b"cpuacct": CpuAcctController,
b"devices": DevicesController,
b"blkio": BlkIOController,
b"net_cls": NetClsController,
b"net_prio": NetPrioController,
}
def __init__(self, name, parent=None):
try:
name = name.encode()
except AttributeError:
pass
self.name = name
self.verbose_name = name
try:
self.parent = parent.encode()
except AttributeError:
self.parent = parent
self.children = []
self.node_type = self._get_node_type()
self.controller_type = self._get_controller_type()
self.controller = self._get_controller()
def __eq__(self, other):
if isinstance(other, self.__class__) and self.full_path == other.full_path:
return True
return False
def __repr__(self):
return "<{} {}>".format(self.__class__.__name__, self.path.decode())
@property
def full_path(self):
"""Absolute system path to the node"""
if self.parent:
return os.path.join(self.parent.full_path, self.name)
return self.name
@property
def _get_node_type(self):
"""Returns the current node's type"""
if self.parent is None:
return self.NODE_ROOT
elif self.parent.node_type == self.NODE_ROOT:
return self.NODE_CONTROLLER_ROOT
elif b".slice" in self.name or b'.partition' in self.name:
return self.NODE_SLICE
elif b".scope" in self.name:
return self.NODE_SCOPE
else:
return self.NODE_CGROUP
def _get_controller_type(self):
"""Returns the current node's controller type"""
if self.node_type == self.NODE_CONTROLLER_ROOT and self.name in self.CONTROLLERS:
return self.name
elif self.parent:
return self.parent.controller_type
else:
return None
def _get_controller(self):
"""Returns the current node's controller"""
if self.controller_type:
return self.CONTROLLERS[self.controller_type](self)
return None
def create_cgroup(self, name):
"""
Create a cgroup by name and attach it under this node.
"""
node = Node(name, parent=self)
if node in self.children:
raise RuntimeError('Node {} already exists under {}'.format(name, self.path))
name = name.encode()
fp = os.path.join(self.full_path, name)
os.mkdir(fp)
self.children.append(node)
return node
def delete_cgroup(self, name):
"""
Delete a cgroup by name and detach it from this node.
Raises OSError if the cgroup is not empty.
"""
name = name.encode()
fp = os.path.join(self.full_path, name)
if os.path.exists(fp):
os.rmdir(fp)
node = Node(name, parent=self)
try:
self.children.remove(node)
except ValueError:
return
def delete_empty_children(self):
"""
Walk through the children of this node and delete any that are empty.
"""
for child in self.children:
child.delete_empty_children()
try:
if os.path.exists(child.full_path):
os.rmdir(child.full_path)
except OSError: pass
else: self.children.remove(child)
def walk(self):
"""Walk through this node and its children - pre-order depth-first"""
return walk_tree(self)
def walk_up(self):
"""Walk through this node and its children - post-order depth-first"""
return walk_up_tree(self)
|
cloudsigma/cgroupspy
|
cgroupspy/nodes.py
|
Node._get_node_type
|
python
|
def _get_node_type(self):
if self.parent is None:
return self.NODE_ROOT
elif self.parent.node_type == self.NODE_ROOT:
return self.NODE_CONTROLLER_ROOT
elif b".slice" in self.name or b'.partition' in self.name:
return self.NODE_SLICE
elif b".scope" in self.name:
return self.NODE_SCOPE
else:
return self.NODE_CGROUP
|
Returns the current node's type
|
train
|
https://github.com/cloudsigma/cgroupspy/blob/e705ac4ccdfe33d8ecc700e9a35a9556084449ca/cgroupspy/nodes.py#L106-L118
| null |
class Node(object):
"""
Basic cgroup tree node. Provides means to link it to a parent and set a controller, depending on the cgroup the node
exists in.
"""
NODE_ROOT = b"root"
NODE_CONTROLLER_ROOT = b"controller_root"
NODE_SLICE = b"slice"
NODE_SCOPE = b"scope"
NODE_CGROUP = b"cgroup"
CONTROLLERS = {
b"memory": MemoryController,
b"cpuset": CpuSetController,
b"cpu": CpuController,
b"cpuacct": CpuAcctController,
b"devices": DevicesController,
b"blkio": BlkIOController,
b"net_cls": NetClsController,
b"net_prio": NetPrioController,
}
def __init__(self, name, parent=None):
try:
name = name.encode()
except AttributeError:
pass
self.name = name
self.verbose_name = name
try:
self.parent = parent.encode()
except AttributeError:
self.parent = parent
self.children = []
self.node_type = self._get_node_type()
self.controller_type = self._get_controller_type()
self.controller = self._get_controller()
def __eq__(self, other):
if isinstance(other, self.__class__) and self.full_path == other.full_path:
return True
return False
def __repr__(self):
return "<{} {}>".format(self.__class__.__name__, self.path.decode())
@property
def full_path(self):
"""Absolute system path to the node"""
if self.parent:
return os.path.join(self.parent.full_path, self.name)
return self.name
@property
def path(self):
"""Node's relative path from the root node"""
if self.parent:
try:
parent_path = self.parent.path.encode()
except AttributeError:
parent_path = self.parent.path
return os.path.join(parent_path, self.name)
return b"/"
def _get_controller_type(self):
"""Returns the current node's controller type"""
if self.node_type == self.NODE_CONTROLLER_ROOT and self.name in self.CONTROLLERS:
return self.name
elif self.parent:
return self.parent.controller_type
else:
return None
def _get_controller(self):
"""Returns the current node's controller"""
if self.controller_type:
return self.CONTROLLERS[self.controller_type](self)
return None
def create_cgroup(self, name):
"""
Create a cgroup by name and attach it under this node.
"""
node = Node(name, parent=self)
if node in self.children:
raise RuntimeError('Node {} already exists under {}'.format(name, self.path))
name = name.encode()
fp = os.path.join(self.full_path, name)
os.mkdir(fp)
self.children.append(node)
return node
def delete_cgroup(self, name):
"""
Delete a cgroup by name and detach it from this node.
Raises OSError if the cgroup is not empty.
"""
name = name.encode()
fp = os.path.join(self.full_path, name)
if os.path.exists(fp):
os.rmdir(fp)
node = Node(name, parent=self)
try:
self.children.remove(node)
except ValueError:
return
def delete_empty_children(self):
"""
Walk through the children of this node and delete any that are empty.
"""
for child in self.children:
child.delete_empty_children()
try:
if os.path.exists(child.full_path):
os.rmdir(child.full_path)
except OSError: pass
else: self.children.remove(child)
def walk(self):
"""Walk through this node and its children - pre-order depth-first"""
return walk_tree(self)
def walk_up(self):
"""Walk through this node and its children - post-order depth-first"""
return walk_up_tree(self)
|
cloudsigma/cgroupspy
|
cgroupspy/nodes.py
|
Node._get_controller_type
|
python
|
def _get_controller_type(self):
if self.node_type == self.NODE_CONTROLLER_ROOT and self.name in self.CONTROLLERS:
return self.name
elif self.parent:
return self.parent.controller_type
else:
return None
|
Returns the current node's controller type
|
train
|
https://github.com/cloudsigma/cgroupspy/blob/e705ac4ccdfe33d8ecc700e9a35a9556084449ca/cgroupspy/nodes.py#L120-L128
| null |
class Node(object):
"""
Basic cgroup tree node. Provides means to link it to a parent and set a controller, depending on the cgroup the node
exists in.
"""
NODE_ROOT = b"root"
NODE_CONTROLLER_ROOT = b"controller_root"
NODE_SLICE = b"slice"
NODE_SCOPE = b"scope"
NODE_CGROUP = b"cgroup"
CONTROLLERS = {
b"memory": MemoryController,
b"cpuset": CpuSetController,
b"cpu": CpuController,
b"cpuacct": CpuAcctController,
b"devices": DevicesController,
b"blkio": BlkIOController,
b"net_cls": NetClsController,
b"net_prio": NetPrioController,
}
def __init__(self, name, parent=None):
try:
name = name.encode()
except AttributeError:
pass
self.name = name
self.verbose_name = name
try:
self.parent = parent.encode()
except AttributeError:
self.parent = parent
self.children = []
self.node_type = self._get_node_type()
self.controller_type = self._get_controller_type()
self.controller = self._get_controller()
def __eq__(self, other):
if isinstance(other, self.__class__) and self.full_path == other.full_path:
return True
return False
def __repr__(self):
return "<{} {}>".format(self.__class__.__name__, self.path.decode())
@property
def full_path(self):
"""Absolute system path to the node"""
if self.parent:
return os.path.join(self.parent.full_path, self.name)
return self.name
@property
def path(self):
"""Node's relative path from the root node"""
if self.parent:
try:
parent_path = self.parent.path.encode()
except AttributeError:
parent_path = self.parent.path
return os.path.join(parent_path, self.name)
return b"/"
def _get_node_type(self):
"""Returns the current node's type"""
if self.parent is None:
return self.NODE_ROOT
elif self.parent.node_type == self.NODE_ROOT:
return self.NODE_CONTROLLER_ROOT
elif b".slice" in self.name or b'.partition' in self.name:
return self.NODE_SLICE
elif b".scope" in self.name:
return self.NODE_SCOPE
else:
return self.NODE_CGROUP
def _get_controller(self):
"""Returns the current node's controller"""
if self.controller_type:
return self.CONTROLLERS[self.controller_type](self)
return None
def create_cgroup(self, name):
"""
Create a cgroup by name and attach it under this node.
"""
node = Node(name, parent=self)
if node in self.children:
raise RuntimeError('Node {} already exists under {}'.format(name, self.path))
name = name.encode()
fp = os.path.join(self.full_path, name)
os.mkdir(fp)
self.children.append(node)
return node
def delete_cgroup(self, name):
"""
Delete a cgroup by name and detach it from this node.
Raises OSError if the cgroup is not empty.
"""
name = name.encode()
fp = os.path.join(self.full_path, name)
if os.path.exists(fp):
os.rmdir(fp)
node = Node(name, parent=self)
try:
self.children.remove(node)
except ValueError:
return
def delete_empty_children(self):
"""
Walk through the children of this node and delete any that are empty.
"""
for child in self.children:
child.delete_empty_children()
try:
if os.path.exists(child.full_path):
os.rmdir(child.full_path)
except OSError: pass
else: self.children.remove(child)
def walk(self):
"""Walk through this node and its children - pre-order depth-first"""
return walk_tree(self)
def walk_up(self):
"""Walk through this node and its children - post-order depth-first"""
return walk_up_tree(self)
|
cloudsigma/cgroupspy
|
cgroupspy/nodes.py
|
Node.create_cgroup
|
python
|
def create_cgroup(self, name):
node = Node(name, parent=self)
if node in self.children:
raise RuntimeError('Node {} already exists under {}'.format(name, self.path))
name = name.encode()
fp = os.path.join(self.full_path, name)
os.mkdir(fp)
self.children.append(node)
return node
|
Create a cgroup by name and attach it under this node.
|
train
|
https://github.com/cloudsigma/cgroupspy/blob/e705ac4ccdfe33d8ecc700e9a35a9556084449ca/cgroupspy/nodes.py#L137-L149
| null |
class Node(object):
"""
Basic cgroup tree node. Provides means to link it to a parent and set a controller, depending on the cgroup the node
exists in.
"""
NODE_ROOT = b"root"
NODE_CONTROLLER_ROOT = b"controller_root"
NODE_SLICE = b"slice"
NODE_SCOPE = b"scope"
NODE_CGROUP = b"cgroup"
CONTROLLERS = {
b"memory": MemoryController,
b"cpuset": CpuSetController,
b"cpu": CpuController,
b"cpuacct": CpuAcctController,
b"devices": DevicesController,
b"blkio": BlkIOController,
b"net_cls": NetClsController,
b"net_prio": NetPrioController,
}
def __init__(self, name, parent=None):
try:
name = name.encode()
except AttributeError:
pass
self.name = name
self.verbose_name = name
try:
self.parent = parent.encode()
except AttributeError:
self.parent = parent
self.children = []
self.node_type = self._get_node_type()
self.controller_type = self._get_controller_type()
self.controller = self._get_controller()
def __eq__(self, other):
if isinstance(other, self.__class__) and self.full_path == other.full_path:
return True
return False
def __repr__(self):
return "<{} {}>".format(self.__class__.__name__, self.path.decode())
@property
def full_path(self):
"""Absolute system path to the node"""
if self.parent:
return os.path.join(self.parent.full_path, self.name)
return self.name
@property
def path(self):
"""Node's relative path from the root node"""
if self.parent:
try:
parent_path = self.parent.path.encode()
except AttributeError:
parent_path = self.parent.path
return os.path.join(parent_path, self.name)
return b"/"
def _get_node_type(self):
"""Returns the current node's type"""
if self.parent is None:
return self.NODE_ROOT
elif self.parent.node_type == self.NODE_ROOT:
return self.NODE_CONTROLLER_ROOT
elif b".slice" in self.name or b'.partition' in self.name:
return self.NODE_SLICE
elif b".scope" in self.name:
return self.NODE_SCOPE
else:
return self.NODE_CGROUP
def _get_controller_type(self):
"""Returns the current node's controller type"""
if self.node_type == self.NODE_CONTROLLER_ROOT and self.name in self.CONTROLLERS:
return self.name
elif self.parent:
return self.parent.controller_type
else:
return None
def _get_controller(self):
"""Returns the current node's controller"""
if self.controller_type:
return self.CONTROLLERS[self.controller_type](self)
return None
def delete_cgroup(self, name):
"""
Delete a cgroup by name and detach it from this node.
Raises OSError if the cgroup is not empty.
"""
name = name.encode()
fp = os.path.join(self.full_path, name)
if os.path.exists(fp):
os.rmdir(fp)
node = Node(name, parent=self)
try:
self.children.remove(node)
except ValueError:
return
def delete_empty_children(self):
"""
Walk through the children of this node and delete any that are empty.
"""
for child in self.children:
child.delete_empty_children()
try:
if os.path.exists(child.full_path):
os.rmdir(child.full_path)
except OSError: pass
else: self.children.remove(child)
def walk(self):
"""Walk through this node and its children - pre-order depth-first"""
return walk_tree(self)
def walk_up(self):
"""Walk through this node and its children - post-order depth-first"""
return walk_up_tree(self)
|
cloudsigma/cgroupspy
|
cgroupspy/nodes.py
|
Node.delete_cgroup
|
python
|
def delete_cgroup(self, name):
name = name.encode()
fp = os.path.join(self.full_path, name)
if os.path.exists(fp):
os.rmdir(fp)
node = Node(name, parent=self)
try:
self.children.remove(node)
except ValueError:
return
|
Delete a cgroup by name and detach it from this node.
Raises OSError if the cgroup is not empty.
|
train
|
https://github.com/cloudsigma/cgroupspy/blob/e705ac4ccdfe33d8ecc700e9a35a9556084449ca/cgroupspy/nodes.py#L151-L164
| null |
class Node(object):
"""
Basic cgroup tree node. Provides means to link it to a parent and set a controller, depending on the cgroup the node
exists in.
"""
NODE_ROOT = b"root"
NODE_CONTROLLER_ROOT = b"controller_root"
NODE_SLICE = b"slice"
NODE_SCOPE = b"scope"
NODE_CGROUP = b"cgroup"
CONTROLLERS = {
b"memory": MemoryController,
b"cpuset": CpuSetController,
b"cpu": CpuController,
b"cpuacct": CpuAcctController,
b"devices": DevicesController,
b"blkio": BlkIOController,
b"net_cls": NetClsController,
b"net_prio": NetPrioController,
}
def __init__(self, name, parent=None):
try:
name = name.encode()
except AttributeError:
pass
self.name = name
self.verbose_name = name
try:
self.parent = parent.encode()
except AttributeError:
self.parent = parent
self.children = []
self.node_type = self._get_node_type()
self.controller_type = self._get_controller_type()
self.controller = self._get_controller()
def __eq__(self, other):
if isinstance(other, self.__class__) and self.full_path == other.full_path:
return True
return False
def __repr__(self):
return "<{} {}>".format(self.__class__.__name__, self.path.decode())
@property
def full_path(self):
"""Absolute system path to the node"""
if self.parent:
return os.path.join(self.parent.full_path, self.name)
return self.name
@property
def path(self):
"""Node's relative path from the root node"""
if self.parent:
try:
parent_path = self.parent.path.encode()
except AttributeError:
parent_path = self.parent.path
return os.path.join(parent_path, self.name)
return b"/"
def _get_node_type(self):
"""Returns the current node's type"""
if self.parent is None:
return self.NODE_ROOT
elif self.parent.node_type == self.NODE_ROOT:
return self.NODE_CONTROLLER_ROOT
elif b".slice" in self.name or b'.partition' in self.name:
return self.NODE_SLICE
elif b".scope" in self.name:
return self.NODE_SCOPE
else:
return self.NODE_CGROUP
def _get_controller_type(self):
"""Returns the current node's controller type"""
if self.node_type == self.NODE_CONTROLLER_ROOT and self.name in self.CONTROLLERS:
return self.name
elif self.parent:
return self.parent.controller_type
else:
return None
def _get_controller(self):
"""Returns the current node's controller"""
if self.controller_type:
return self.CONTROLLERS[self.controller_type](self)
return None
def create_cgroup(self, name):
"""
Create a cgroup by name and attach it under this node.
"""
node = Node(name, parent=self)
if node in self.children:
raise RuntimeError('Node {} already exists under {}'.format(name, self.path))
name = name.encode()
fp = os.path.join(self.full_path, name)
os.mkdir(fp)
self.children.append(node)
return node
def delete_empty_children(self):
"""
Walk through the children of this node and delete any that are empty.
"""
for child in self.children:
child.delete_empty_children()
try:
if os.path.exists(child.full_path):
os.rmdir(child.full_path)
except OSError: pass
else: self.children.remove(child)
def walk(self):
"""Walk through this node and its children - pre-order depth-first"""
return walk_tree(self)
def walk_up(self):
"""Walk through this node and its children - post-order depth-first"""
return walk_up_tree(self)
|
cloudsigma/cgroupspy
|
cgroupspy/nodes.py
|
Node.delete_empty_children
|
python
|
def delete_empty_children(self):
for child in self.children:
child.delete_empty_children()
try:
if os.path.exists(child.full_path):
os.rmdir(child.full_path)
except OSError: pass
else: self.children.remove(child)
|
Walk through the children of this node and delete any that are empty.
|
train
|
https://github.com/cloudsigma/cgroupspy/blob/e705ac4ccdfe33d8ecc700e9a35a9556084449ca/cgroupspy/nodes.py#L166-L176
| null |
class Node(object):
"""
Basic cgroup tree node. Provides means to link it to a parent and set a controller, depending on the cgroup the node
exists in.
"""
NODE_ROOT = b"root"
NODE_CONTROLLER_ROOT = b"controller_root"
NODE_SLICE = b"slice"
NODE_SCOPE = b"scope"
NODE_CGROUP = b"cgroup"
CONTROLLERS = {
b"memory": MemoryController,
b"cpuset": CpuSetController,
b"cpu": CpuController,
b"cpuacct": CpuAcctController,
b"devices": DevicesController,
b"blkio": BlkIOController,
b"net_cls": NetClsController,
b"net_prio": NetPrioController,
}
def __init__(self, name, parent=None):
try:
name = name.encode()
except AttributeError:
pass
self.name = name
self.verbose_name = name
try:
self.parent = parent.encode()
except AttributeError:
self.parent = parent
self.children = []
self.node_type = self._get_node_type()
self.controller_type = self._get_controller_type()
self.controller = self._get_controller()
def __eq__(self, other):
if isinstance(other, self.__class__) and self.full_path == other.full_path:
return True
return False
def __repr__(self):
return "<{} {}>".format(self.__class__.__name__, self.path.decode())
@property
def full_path(self):
"""Absolute system path to the node"""
if self.parent:
return os.path.join(self.parent.full_path, self.name)
return self.name
@property
def path(self):
"""Node's relative path from the root node"""
if self.parent:
try:
parent_path = self.parent.path.encode()
except AttributeError:
parent_path = self.parent.path
return os.path.join(parent_path, self.name)
return b"/"
def _get_node_type(self):
"""Returns the current node's type"""
if self.parent is None:
return self.NODE_ROOT
elif self.parent.node_type == self.NODE_ROOT:
return self.NODE_CONTROLLER_ROOT
elif b".slice" in self.name or b'.partition' in self.name:
return self.NODE_SLICE
elif b".scope" in self.name:
return self.NODE_SCOPE
else:
return self.NODE_CGROUP
def _get_controller_type(self):
"""Returns the current node's controller type"""
if self.node_type == self.NODE_CONTROLLER_ROOT and self.name in self.CONTROLLERS:
return self.name
elif self.parent:
return self.parent.controller_type
else:
return None
def _get_controller(self):
"""Returns the current node's controller"""
if self.controller_type:
return self.CONTROLLERS[self.controller_type](self)
return None
def create_cgroup(self, name):
"""
Create a cgroup by name and attach it under this node.
"""
node = Node(name, parent=self)
if node in self.children:
raise RuntimeError('Node {} already exists under {}'.format(name, self.path))
name = name.encode()
fp = os.path.join(self.full_path, name)
os.mkdir(fp)
self.children.append(node)
return node
def delete_cgroup(self, name):
"""
Delete a cgroup by name and detach it from this node.
Raises OSError if the cgroup is not empty.
"""
name = name.encode()
fp = os.path.join(self.full_path, name)
if os.path.exists(fp):
os.rmdir(fp)
node = Node(name, parent=self)
try:
self.children.remove(node)
except ValueError:
return
def walk(self):
"""Walk through this node and its children - pre-order depth-first"""
return walk_tree(self)
def walk_up(self):
"""Walk through this node and its children - post-order depth-first"""
return walk_up_tree(self)
|
cloudsigma/cgroupspy
|
cgroupspy/nodes.py
|
NodeControlGroup.add_node
|
python
|
def add_node(self, node):
if self.controllers.get(node.controller_type, None):
raise RuntimeError("Cannot add node {} to the node group. A node for {} group is already assigned".format(
node,
node.controller_type
))
self.nodes.append(node)
if node.controller:
self.controllers[node.controller_type] = node.controller
setattr(self, node.controller_type, node.controller)
|
A a Node object to the group. Only one node per cgroup is supported
|
train
|
https://github.com/cloudsigma/cgroupspy/blob/e705ac4ccdfe33d8ecc700e9a35a9556084449ca/cgroupspy/nodes.py#L219-L231
| null |
class NodeControlGroup(object):
"""
A tree node that can group together same multiple nodes based on their position in the cgroup hierarchy
For example - we have mounted all the cgroups in /sys/fs/cgroup/ and we have a scope in each of them under
/{cpuset,cpu,memory,cpuacct}/isolated.scope/. Then NodeControlGroup, can provide access to all cgroup properties
like
isolated_scope.cpu
isolated_scope.memory
isolated_scope.cpuset
Requires a basic Node tree to be generated.
"""
def __init__(self, name, parent=None):
self.name = name
self.parent = parent
self.children_map = {}
self.controllers = {}
self.nodes = []
@property
def path(self):
if self.parent:
base_name, ext = os.path.splitext(self.name)
if ext not in [b'.slice', b'.scope', b'.partition']:
base_name = self.name
return os.path.join(self.parent.path, base_name)
return b"/"
def __repr__(self):
return "<{} {}>".format(self.__class__.__name__, self.name.decode())
@property
def children(self):
return self.children_map.values()
@property
def group_tasks(self):
"""All tasks in the hierarchy, affected by this group."""
tasks = set()
for node in walk_tree(self):
for ctrl in node.controllers.values():
tasks.update(ctrl.tasks)
return tasks
@property
def tasks(self):
"""Tasks in this exact group"""
tasks = set()
for ctrl in self.controllers.values():
tasks.update(ctrl.tasks)
return tasks
|
cloudsigma/cgroupspy
|
cgroupspy/nodes.py
|
NodeControlGroup.group_tasks
|
python
|
def group_tasks(self):
tasks = set()
for node in walk_tree(self):
for ctrl in node.controllers.values():
tasks.update(ctrl.tasks)
return tasks
|
All tasks in the hierarchy, affected by this group.
|
train
|
https://github.com/cloudsigma/cgroupspy/blob/e705ac4ccdfe33d8ecc700e9a35a9556084449ca/cgroupspy/nodes.py#L241-L247
|
[
"def walk_tree(root):\n \"\"\"Pre-order depth-first\"\"\"\n yield root\n\n for child in root.children:\n for el in walk_tree(child):\n yield el\n"
] |
class NodeControlGroup(object):
"""
A tree node that can group together same multiple nodes based on their position in the cgroup hierarchy
For example - we have mounted all the cgroups in /sys/fs/cgroup/ and we have a scope in each of them under
/{cpuset,cpu,memory,cpuacct}/isolated.scope/. Then NodeControlGroup, can provide access to all cgroup properties
like
isolated_scope.cpu
isolated_scope.memory
isolated_scope.cpuset
Requires a basic Node tree to be generated.
"""
def __init__(self, name, parent=None):
self.name = name
self.parent = parent
self.children_map = {}
self.controllers = {}
self.nodes = []
@property
def path(self):
if self.parent:
base_name, ext = os.path.splitext(self.name)
if ext not in [b'.slice', b'.scope', b'.partition']:
base_name = self.name
return os.path.join(self.parent.path, base_name)
return b"/"
def add_node(self, node):
"""
A a Node object to the group. Only one node per cgroup is supported
"""
if self.controllers.get(node.controller_type, None):
raise RuntimeError("Cannot add node {} to the node group. A node for {} group is already assigned".format(
node,
node.controller_type
))
self.nodes.append(node)
if node.controller:
self.controllers[node.controller_type] = node.controller
setattr(self, node.controller_type, node.controller)
def __repr__(self):
return "<{} {}>".format(self.__class__.__name__, self.name.decode())
@property
def children(self):
return self.children_map.values()
@property
@property
def tasks(self):
"""Tasks in this exact group"""
tasks = set()
for ctrl in self.controllers.values():
tasks.update(ctrl.tasks)
return tasks
|
cloudsigma/cgroupspy
|
cgroupspy/nodes.py
|
NodeControlGroup.tasks
|
python
|
def tasks(self):
tasks = set()
for ctrl in self.controllers.values():
tasks.update(ctrl.tasks)
return tasks
|
Tasks in this exact group
|
train
|
https://github.com/cloudsigma/cgroupspy/blob/e705ac4ccdfe33d8ecc700e9a35a9556084449ca/cgroupspy/nodes.py#L250-L255
| null |
class NodeControlGroup(object):
"""
A tree node that can group together same multiple nodes based on their position in the cgroup hierarchy
For example - we have mounted all the cgroups in /sys/fs/cgroup/ and we have a scope in each of them under
/{cpuset,cpu,memory,cpuacct}/isolated.scope/. Then NodeControlGroup, can provide access to all cgroup properties
like
isolated_scope.cpu
isolated_scope.memory
isolated_scope.cpuset
Requires a basic Node tree to be generated.
"""
def __init__(self, name, parent=None):
self.name = name
self.parent = parent
self.children_map = {}
self.controllers = {}
self.nodes = []
@property
def path(self):
if self.parent:
base_name, ext = os.path.splitext(self.name)
if ext not in [b'.slice', b'.scope', b'.partition']:
base_name = self.name
return os.path.join(self.parent.path, base_name)
return b"/"
def add_node(self, node):
"""
A a Node object to the group. Only one node per cgroup is supported
"""
if self.controllers.get(node.controller_type, None):
raise RuntimeError("Cannot add node {} to the node group. A node for {} group is already assigned".format(
node,
node.controller_type
))
self.nodes.append(node)
if node.controller:
self.controllers[node.controller_type] = node.controller
setattr(self, node.controller_type, node.controller)
def __repr__(self):
return "<{} {}>".format(self.__class__.__name__, self.name.decode())
@property
def children(self):
return self.children_map.values()
@property
def group_tasks(self):
"""All tasks in the hierarchy, affected by this group."""
tasks = set()
for node in walk_tree(self):
for ctrl in node.controllers.values():
tasks.update(ctrl.tasks)
return tasks
@property
|
cloudsigma/cgroupspy
|
cgroupspy/controllers.py
|
Controller.filepath
|
python
|
def filepath(self, filename):
return os.path.join(self.node.full_path, filename)
|
The full path to a file
|
train
|
https://github.com/cloudsigma/cgroupspy/blob/e705ac4ccdfe33d8ecc700e9a35a9556084449ca/cgroupspy/controllers.py#L48-L51
| null |
class Controller(object):
"""
Base controller. Provides access to general files, existing in all cgroups and means to get/set properties
"""
tasks = MultiLineIntegerFile("tasks")
procs = MultiLineIntegerFile("cgroup.procs")
notify_on_release = FlagFile("notify_on_release")
clone_children = FlagFile("cgroup.clone_children")
def __init__(self, node):
self.node = node
def get_property(self, filename):
"""Opens the file and reads the value"""
with open(self.filepath(filename)) as f:
return f.read().strip()
def set_property(self, filename, value):
"""Opens the file and writes the value"""
with open(self.filepath(filename), "w") as f:
return f.write(str(value))
|
cloudsigma/cgroupspy
|
cgroupspy/controllers.py
|
Controller.get_property
|
python
|
def get_property(self, filename):
with open(self.filepath(filename)) as f:
return f.read().strip()
|
Opens the file and reads the value
|
train
|
https://github.com/cloudsigma/cgroupspy/blob/e705ac4ccdfe33d8ecc700e9a35a9556084449ca/cgroupspy/controllers.py#L53-L57
|
[
"def filepath(self, filename):\n \"\"\"The full path to a file\"\"\"\n\n return os.path.join(self.node.full_path, filename)\n"
] |
class Controller(object):
"""
Base controller. Provides access to general files, existing in all cgroups and means to get/set properties
"""
tasks = MultiLineIntegerFile("tasks")
procs = MultiLineIntegerFile("cgroup.procs")
notify_on_release = FlagFile("notify_on_release")
clone_children = FlagFile("cgroup.clone_children")
def __init__(self, node):
self.node = node
def filepath(self, filename):
"""The full path to a file"""
return os.path.join(self.node.full_path, filename)
def set_property(self, filename, value):
"""Opens the file and writes the value"""
with open(self.filepath(filename), "w") as f:
return f.write(str(value))
|
cloudsigma/cgroupspy
|
cgroupspy/controllers.py
|
Controller.set_property
|
python
|
def set_property(self, filename, value):
with open(self.filepath(filename), "w") as f:
return f.write(str(value))
|
Opens the file and writes the value
|
train
|
https://github.com/cloudsigma/cgroupspy/blob/e705ac4ccdfe33d8ecc700e9a35a9556084449ca/cgroupspy/controllers.py#L59-L63
|
[
"def filepath(self, filename):\n \"\"\"The full path to a file\"\"\"\n\n return os.path.join(self.node.full_path, filename)\n"
] |
class Controller(object):
"""
Base controller. Provides access to general files, existing in all cgroups and means to get/set properties
"""
tasks = MultiLineIntegerFile("tasks")
procs = MultiLineIntegerFile("cgroup.procs")
notify_on_release = FlagFile("notify_on_release")
clone_children = FlagFile("cgroup.clone_children")
def __init__(self, node):
self.node = node
def filepath(self, filename):
"""The full path to a file"""
return os.path.join(self.node.full_path, filename)
def get_property(self, filename):
"""Opens the file and reads the value"""
with open(self.filepath(filename)) as f:
return f.read().strip()
|
cloudsigma/cgroupspy
|
cgroupspy/utils.py
|
walk_tree
|
python
|
def walk_tree(root):
yield root
for child in root.children:
for el in walk_tree(child):
yield el
|
Pre-order depth-first
|
train
|
https://github.com/cloudsigma/cgroupspy/blob/e705ac4ccdfe33d8ecc700e9a35a9556084449ca/cgroupspy/utils.py#L32-L38
|
[
"def walk_tree(root):\n \"\"\"Pre-order depth-first\"\"\"\n yield root\n\n for child in root.children:\n for el in walk_tree(child):\n yield el\n"
] |
"""
Copyright (c) 2014, CloudSigma AG
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
* Neither the name of the CloudSigma AG nor the
names of its contributors may be used to endorse or promote products
derived from this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL CLOUDSIGMA AG BE LIABLE FOR ANY
DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""
import os
import ctypes
def walk_up_tree(root):
"""Post-order depth-first"""
for child in root.children:
for el in walk_up_tree(child):
yield el
yield root
def get_device_major_minor(dev_path):
"""
Returns the device (major, minor) tuple for simplicity
:param dev_path: Path to the device
:return: (device major, device minor)
:rtype: (int, int)
"""
stat = os.lstat(dev_path)
return os.major(stat.st_rdev), os.minor(stat.st_rdev)
def split_path_components(path):
components=[]
while True:
path, component = os.path.split(path)
if component != "":
components.append(component)
else:
if path != "":
components.append(path)
break
components.reverse()
return components
def mount(source, target, fs, options=""):
ret = ctypes.CDLL("libc.so.6", use_errno=True).mount(source, target, fs, 0, options)
if ret < 0:
errno = ctypes.get_errno()
raise RuntimeError("Error mounting {} ({}) on {} with options '{}': {}".
format(source, fs, target, options, os.strerror(errno)))
|
cloudsigma/cgroupspy
|
cgroupspy/utils.py
|
walk_up_tree
|
python
|
def walk_up_tree(root):
for child in root.children:
for el in walk_up_tree(child):
yield el
yield root
|
Post-order depth-first
|
train
|
https://github.com/cloudsigma/cgroupspy/blob/e705ac4ccdfe33d8ecc700e9a35a9556084449ca/cgroupspy/utils.py#L41-L47
|
[
"def walk_up_tree(root):\n \"\"\"Post-order depth-first\"\"\"\n for child in root.children:\n for el in walk_up_tree(child):\n yield el\n\n yield root\n"
] |
"""
Copyright (c) 2014, CloudSigma AG
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
* Neither the name of the CloudSigma AG nor the
names of its contributors may be used to endorse or promote products
derived from this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL CLOUDSIGMA AG BE LIABLE FOR ANY
DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""
import os
import ctypes
def walk_tree(root):
"""Pre-order depth-first"""
yield root
for child in root.children:
for el in walk_tree(child):
yield el
def get_device_major_minor(dev_path):
"""
Returns the device (major, minor) tuple for simplicity
:param dev_path: Path to the device
:return: (device major, device minor)
:rtype: (int, int)
"""
stat = os.lstat(dev_path)
return os.major(stat.st_rdev), os.minor(stat.st_rdev)
def split_path_components(path):
components=[]
while True:
path, component = os.path.split(path)
if component != "":
components.append(component)
else:
if path != "":
components.append(path)
break
components.reverse()
return components
def mount(source, target, fs, options=""):
ret = ctypes.CDLL("libc.so.6", use_errno=True).mount(source, target, fs, 0, options)
if ret < 0:
errno = ctypes.get_errno()
raise RuntimeError("Error mounting {} ({}) on {} with options '{}': {}".
format(source, fs, target, options, os.strerror(errno)))
|
cloudsigma/cgroupspy
|
cgroupspy/utils.py
|
get_device_major_minor
|
python
|
def get_device_major_minor(dev_path):
stat = os.lstat(dev_path)
return os.major(stat.st_rdev), os.minor(stat.st_rdev)
|
Returns the device (major, minor) tuple for simplicity
:param dev_path: Path to the device
:return: (device major, device minor)
:rtype: (int, int)
|
train
|
https://github.com/cloudsigma/cgroupspy/blob/e705ac4ccdfe33d8ecc700e9a35a9556084449ca/cgroupspy/utils.py#L50-L58
| null |
"""
Copyright (c) 2014, CloudSigma AG
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
* Neither the name of the CloudSigma AG nor the
names of its contributors may be used to endorse or promote products
derived from this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL CLOUDSIGMA AG BE LIABLE FOR ANY
DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""
import os
import ctypes
def walk_tree(root):
"""Pre-order depth-first"""
yield root
for child in root.children:
for el in walk_tree(child):
yield el
def walk_up_tree(root):
"""Post-order depth-first"""
for child in root.children:
for el in walk_up_tree(child):
yield el
yield root
def split_path_components(path):
components=[]
while True:
path, component = os.path.split(path)
if component != "":
components.append(component)
else:
if path != "":
components.append(path)
break
components.reverse()
return components
def mount(source, target, fs, options=""):
ret = ctypes.CDLL("libc.so.6", use_errno=True).mount(source, target, fs, 0, options)
if ret < 0:
errno = ctypes.get_errno()
raise RuntimeError("Error mounting {} ({}) on {} with options '{}': {}".
format(source, fs, target, options, os.strerror(errno)))
|
django-fluent/django-fluent-dashboard
|
fluent_dashboard/items.py
|
CmsModelList.init_with_context
|
python
|
def init_with_context(self, context):
# Apply the include/exclude patterns:
listitems = self._visible_models(context['request'])
# Convert to a similar data structure like the dashboard icons have.
# This allows sorting the items identically.
models = [
{'name': model._meta.model_name,
'app_name': model._meta.app_label,
'title': capfirst(model._meta.verbose_name_plural),
'url': self._get_admin_change_url(model, context)
}
for model, perms in listitems if self.is_item_visible(model, perms)
]
# Sort models.
sort_cms_models(models)
# Convert to items
for model in models:
self.children.append(items.MenuItem(title=model['title'], url=model['url']))
|
Initialize the menu.
|
train
|
https://github.com/django-fluent/django-fluent-dashboard/blob/aee7ef39e0586cd160036b13b7944b69cd2b4b8c/fluent_dashboard/items.py#L27-L50
|
[
"def sort_cms_models(cms_models):\n \"\"\"\n Sort a set of CMS-related models in a custom (predefined) order.\n \"\"\"\n cms_models.sort(key=lambda model: (\n get_cms_model_order(model['name']) if is_cms_app(model['app_name']) else 999,\n model['app_name'],\n model['title']\n ))\n"
] |
class CmsModelList(items.ModelList):
"""
A custom :class:`~admin_tools.menu.items.ModelList` that displays menu items for each model.
It has a strong bias towards sorting CMS apps on top.
"""
def is_item_visible(self, model, perms):
"""
Return whether the model should be displayed in the menu.
By default it checks for the ``perms['change']`` value; only items with change permission will be displayed.
This function can be extended to support "view permissions" for example.
:param model: The model class
:param perms: The permissions from :func:`ModelAdmin.get_model_perms()<django.contrib.admin.ModelAdmin.get_model_perms>`.
"""
return perms['change']
|
django-fluent/django-fluent-dashboard
|
fluent_dashboard/items.py
|
ReturnToSiteItem.init_with_context
|
python
|
def init_with_context(self, context):
super(ReturnToSiteItem, self).init_with_context(context)
# See if the current page is being edited, update URL accordingly.
edited_model = self.get_edited_object(context['request'])
if edited_model:
try:
url = edited_model.get_absolute_url()
except (AttributeError, urls.NoReverseMatch) as e:
pass
else:
if url:
self.url = url
|
Find the current URL based on the context.
It uses :func:`get_edited_object` to find the model,
and calls ``get_absolute_url()`` to get the frontend URL.
|
train
|
https://github.com/django-fluent/django-fluent-dashboard/blob/aee7ef39e0586cd160036b13b7944b69cd2b4b8c/fluent_dashboard/items.py#L82-L99
|
[
"def get_edited_object(self, request):\n \"\"\"\n Return the object which is currently being edited.\n Returns ``None`` if the match could not be made.\n \"\"\"\n resolvermatch = urls.resolve(request.path_info)\n if resolvermatch.namespace == 'admin' and resolvermatch.url_name and resolvermatch.url_name.endswith('_change'):\n # In \"appname_modelname_change\" view of the admin.\n # Extract the appname and model from the url name.\n # For some custom views, url_name might not be filled in (e.g. django-polymorphic's subclass_view)\n match = RE_CHANGE_URL.match(resolvermatch.url_name)\n if not match:\n return None\n\n # object_id can be string (e.g. a country code as PK).\n try:\n object_id = resolvermatch.kwargs['object_id'] # Django 2.0+\n except KeyError:\n object_id = resolvermatch.args[0]\n\n return self.get_object_by_natural_key(match.group(1), match.group(2), object_id)\n return None\n"
] |
class ReturnToSiteItem(items.MenuItem):
"""
A "Return to site" button for the menu.
It redirects the user back to the frontend pages.
By default, it attempts to find the current frontend URL that corresponds
with the model that's being edited in the admin 'change' page.
If this is not possible, the default URL (``/``) will be used instead.
The menu item has a custom ``returntosite`` CSS class to be distinguishable between the other menu items.
"""
#: Set the default title
title = _('Return to site')
#: Set the default URL
url = '/'
# Make the item distinguishable between the other menu items
css_classes = ['returntosite']
def get_edited_object(self, request):
"""
Return the object which is currently being edited.
Returns ``None`` if the match could not be made.
"""
resolvermatch = urls.resolve(request.path_info)
if resolvermatch.namespace == 'admin' and resolvermatch.url_name and resolvermatch.url_name.endswith('_change'):
# In "appname_modelname_change" view of the admin.
# Extract the appname and model from the url name.
# For some custom views, url_name might not be filled in (e.g. django-polymorphic's subclass_view)
match = RE_CHANGE_URL.match(resolvermatch.url_name)
if not match:
return None
# object_id can be string (e.g. a country code as PK).
try:
object_id = resolvermatch.kwargs['object_id'] # Django 2.0+
except KeyError:
object_id = resolvermatch.args[0]
return self.get_object_by_natural_key(match.group(1), match.group(2), object_id)
return None
def get_object_by_natural_key(self, app_label, model_name, object_id):
"""
Return a model based on a natural key.
This is a utility function for :func:`get_edited_object`.
"""
try:
model_type = ContentType.objects.get_by_natural_key(app_label, model_name)
except ContentType.DoesNotExist:
return None
# Pointless to fetch the object, if there is no URL to generate
# Avoid another database query.
ModelClass = model_type.model_class()
if not hasattr(ModelClass, 'get_absolute_url'):
return None
try:
return model_type.get_object_for_this_type(pk=object_id)
except ObjectDoesNotExist:
return None
|
django-fluent/django-fluent-dashboard
|
fluent_dashboard/items.py
|
ReturnToSiteItem.get_edited_object
|
python
|
def get_edited_object(self, request):
resolvermatch = urls.resolve(request.path_info)
if resolvermatch.namespace == 'admin' and resolvermatch.url_name and resolvermatch.url_name.endswith('_change'):
# In "appname_modelname_change" view of the admin.
# Extract the appname and model from the url name.
# For some custom views, url_name might not be filled in (e.g. django-polymorphic's subclass_view)
match = RE_CHANGE_URL.match(resolvermatch.url_name)
if not match:
return None
# object_id can be string (e.g. a country code as PK).
try:
object_id = resolvermatch.kwargs['object_id'] # Django 2.0+
except KeyError:
object_id = resolvermatch.args[0]
return self.get_object_by_natural_key(match.group(1), match.group(2), object_id)
return None
|
Return the object which is currently being edited.
Returns ``None`` if the match could not be made.
|
train
|
https://github.com/django-fluent/django-fluent-dashboard/blob/aee7ef39e0586cd160036b13b7944b69cd2b4b8c/fluent_dashboard/items.py#L101-L122
|
[
"def get_object_by_natural_key(self, app_label, model_name, object_id):\n \"\"\"\n Return a model based on a natural key.\n This is a utility function for :func:`get_edited_object`.\n \"\"\"\n try:\n model_type = ContentType.objects.get_by_natural_key(app_label, model_name)\n except ContentType.DoesNotExist:\n return None\n\n # Pointless to fetch the object, if there is no URL to generate\n # Avoid another database query.\n ModelClass = model_type.model_class()\n if not hasattr(ModelClass, 'get_absolute_url'):\n return None\n\n try:\n return model_type.get_object_for_this_type(pk=object_id)\n except ObjectDoesNotExist:\n return None\n"
] |
class ReturnToSiteItem(items.MenuItem):
"""
A "Return to site" button for the menu.
It redirects the user back to the frontend pages.
By default, it attempts to find the current frontend URL that corresponds
with the model that's being edited in the admin 'change' page.
If this is not possible, the default URL (``/``) will be used instead.
The menu item has a custom ``returntosite`` CSS class to be distinguishable between the other menu items.
"""
#: Set the default title
title = _('Return to site')
#: Set the default URL
url = '/'
# Make the item distinguishable between the other menu items
css_classes = ['returntosite']
def init_with_context(self, context):
"""
Find the current URL based on the context.
It uses :func:`get_edited_object` to find the model,
and calls ``get_absolute_url()`` to get the frontend URL.
"""
super(ReturnToSiteItem, self).init_with_context(context)
# See if the current page is being edited, update URL accordingly.
edited_model = self.get_edited_object(context['request'])
if edited_model:
try:
url = edited_model.get_absolute_url()
except (AttributeError, urls.NoReverseMatch) as e:
pass
else:
if url:
self.url = url
def get_object_by_natural_key(self, app_label, model_name, object_id):
"""
Return a model based on a natural key.
This is a utility function for :func:`get_edited_object`.
"""
try:
model_type = ContentType.objects.get_by_natural_key(app_label, model_name)
except ContentType.DoesNotExist:
return None
# Pointless to fetch the object, if there is no URL to generate
# Avoid another database query.
ModelClass = model_type.model_class()
if not hasattr(ModelClass, 'get_absolute_url'):
return None
try:
return model_type.get_object_for_this_type(pk=object_id)
except ObjectDoesNotExist:
return None
|
django-fluent/django-fluent-dashboard
|
fluent_dashboard/items.py
|
ReturnToSiteItem.get_object_by_natural_key
|
python
|
def get_object_by_natural_key(self, app_label, model_name, object_id):
try:
model_type = ContentType.objects.get_by_natural_key(app_label, model_name)
except ContentType.DoesNotExist:
return None
# Pointless to fetch the object, if there is no URL to generate
# Avoid another database query.
ModelClass = model_type.model_class()
if not hasattr(ModelClass, 'get_absolute_url'):
return None
try:
return model_type.get_object_for_this_type(pk=object_id)
except ObjectDoesNotExist:
return None
|
Return a model based on a natural key.
This is a utility function for :func:`get_edited_object`.
|
train
|
https://github.com/django-fluent/django-fluent-dashboard/blob/aee7ef39e0586cd160036b13b7944b69cd2b4b8c/fluent_dashboard/items.py#L124-L143
| null |
class ReturnToSiteItem(items.MenuItem):
"""
A "Return to site" button for the menu.
It redirects the user back to the frontend pages.
By default, it attempts to find the current frontend URL that corresponds
with the model that's being edited in the admin 'change' page.
If this is not possible, the default URL (``/``) will be used instead.
The menu item has a custom ``returntosite`` CSS class to be distinguishable between the other menu items.
"""
#: Set the default title
title = _('Return to site')
#: Set the default URL
url = '/'
# Make the item distinguishable between the other menu items
css_classes = ['returntosite']
def init_with_context(self, context):
"""
Find the current URL based on the context.
It uses :func:`get_edited_object` to find the model,
and calls ``get_absolute_url()`` to get the frontend URL.
"""
super(ReturnToSiteItem, self).init_with_context(context)
# See if the current page is being edited, update URL accordingly.
edited_model = self.get_edited_object(context['request'])
if edited_model:
try:
url = edited_model.get_absolute_url()
except (AttributeError, urls.NoReverseMatch) as e:
pass
else:
if url:
self.url = url
def get_edited_object(self, request):
"""
Return the object which is currently being edited.
Returns ``None`` if the match could not be made.
"""
resolvermatch = urls.resolve(request.path_info)
if resolvermatch.namespace == 'admin' and resolvermatch.url_name and resolvermatch.url_name.endswith('_change'):
# In "appname_modelname_change" view of the admin.
# Extract the appname and model from the url name.
# For some custom views, url_name might not be filled in (e.g. django-polymorphic's subclass_view)
match = RE_CHANGE_URL.match(resolvermatch.url_name)
if not match:
return None
# object_id can be string (e.g. a country code as PK).
try:
object_id = resolvermatch.kwargs['object_id'] # Django 2.0+
except KeyError:
object_id = resolvermatch.args[0]
return self.get_object_by_natural_key(match.group(1), match.group(2), object_id)
return None
|
django-fluent/django-fluent-dashboard
|
fluent_dashboard/dashboard.py
|
FluentIndexDashboard.get_personal_module
|
python
|
def get_personal_module(self):
return PersonalModule(
layout='inline',
draggable=False,
deletable=False,
collapsible=False,
)
|
Instantiate the :class:`~fluent_dashboard.modules.PersonalModule` for use in the dashboard.
|
train
|
https://github.com/django-fluent/django-fluent-dashboard/blob/aee7ef39e0586cd160036b13b7944b69cd2b4b8c/fluent_dashboard/dashboard.py#L69-L78
| null |
class FluentIndexDashboard(Dashboard):
"""
A custom home screen for the Django admin interface.
It displays the application groups based on with :ref:`FLUENT_DASHBOARD_APP_GROUPS` setting.
To activate the dashboard add the following to your settings.py::
ADMIN_TOOLS_INDEX_DASHBOARD = 'fluent_dashboard.dashboard.FluentIndexDashboard'
The dashboard modules are instantiated by the following functions, which can be overwritten:
* :func:`get_personal_module`
* :func:`get_application_modules`
* :func:`get_recent_actions_module`
* :func:`get_rss_modules`
* :func:`get_cache_status_modules`
To have a menu which is consistent with the application groups displayed by this module,
use the :class:`~fluent_dashboard.menu.FluentMenu` class to render the `admin_tools` menu.
When overwriting this class, the elements can either be added in
the :func:`__init__` method, or the :func:`init_with_context` method.
For more information, see the `django-admin-tools` documentation.
"""
class Media:
if LooseVersion(admin_tools.VERSION) < LooseVersion('0.6'):
# Older versions of django-admin-tools used an incorrect format for media.
css = ("fluent_dashboard/dashboard.css",)
else:
css = {
'all': ("fluent_dashboard/dashboard.css",)
}
def __init__(self, **kwargs):
super(FluentIndexDashboard, self).__init__(**kwargs)
self.children.append(self.get_personal_module())
self.children.extend(self.get_application_modules())
self.children.append(self.get_recent_actions_module())
def init_with_context(self, context):
request = context['request']
if 'dashboardmods' in settings.INSTALLED_APPS:
self.children.extend(self.get_rss_modules())
self.children.extend(self.get_cache_status_modules(request))
def get_application_modules(self):
"""
Instantiate all application modules (i.e.
:class:`~admin_tools.dashboard.modules.AppList`,
:class:`~fluent_dashboard.modules.AppIconList` and
:class:`~fluent_dashboard.modules.CmsAppIconList`)
for use in the dashboard.
"""
modules = []
appgroups = get_application_groups()
for title, kwargs in appgroups:
AppListClass = get_class(kwargs.pop('module')) # e.g. CmsAppIconlist, AppIconlist, Applist
modules.append(AppListClass(title, **kwargs))
return modules
def get_recent_actions_module(self):
"""
Instantiate the :class:`~admin_tools.dashboard.modules.RecentActions` module for use in the dashboard.
"""
return modules.RecentActions(_('Recent Actions'), 5, enabled=False, collapsible=False)
def get_cache_status_modules(self, request):
"""
Instantiate the :class:`~fluent_dashboard.modules.CacheStatusGroup` module for use in the dashboard.
This module displays the status of Varnish and Memcache,
if the :ref:`dashboardmods` package is installed and the caches are configured.
By default, these modules are only shown for the superuser.
"""
if not request.user.is_superuser:
return []
return [CacheStatusGroup()]
def get_rss_modules(self):
"""
Instantiate the RSS modules for use in the dashboard.
This module displays the RSS feeds of the :ref:`dashboardmods` package, if it is installed, and configured.
"""
if not 'dashboardmods' in settings.INSTALLED_APPS:
return []
import dashboardmods
return dashboardmods.get_rss_dash_modules()
|
django-fluent/django-fluent-dashboard
|
fluent_dashboard/dashboard.py
|
FluentIndexDashboard.get_application_modules
|
python
|
def get_application_modules(self):
modules = []
appgroups = get_application_groups()
for title, kwargs in appgroups:
AppListClass = get_class(kwargs.pop('module')) # e.g. CmsAppIconlist, AppIconlist, Applist
modules.append(AppListClass(title, **kwargs))
return modules
|
Instantiate all application modules (i.e.
:class:`~admin_tools.dashboard.modules.AppList`,
:class:`~fluent_dashboard.modules.AppIconList` and
:class:`~fluent_dashboard.modules.CmsAppIconList`)
for use in the dashboard.
|
train
|
https://github.com/django-fluent/django-fluent-dashboard/blob/aee7ef39e0586cd160036b13b7944b69cd2b4b8c/fluent_dashboard/dashboard.py#L80-L93
|
[
"def get_application_groups():\n \"\"\"\n Return the applications of the system, organized in various groups.\n\n These groups are not connected with the application names,\n but rather with a pattern of applications.\n \"\"\"\n\n groups = []\n for title, groupdict in appsettings.FLUENT_DASHBOARD_APP_GROUPS:\n # Allow to pass all possible arguments to the DashboardModule class.\n module_kwargs = groupdict.copy()\n\n # However, the 'models' is treated special, to have catch-all support.\n if '*' in groupdict['models']:\n default_module = appsettings.FLUENT_DASHBOARD_DEFAULT_MODULE\n module_kwargs['exclude'] = ALL_KNOWN_APPS + list(module_kwargs.get('exclude', []))\n del module_kwargs['models']\n else:\n default_module = 'CmsAppIconList'\n\n # Get module to display, can be a alias for known variations.\n module = groupdict.get('module', default_module)\n if module in MODULE_ALIASES:\n module = MODULE_ALIASES[module]\n module_kwargs['module'] = module\n groups.append((title, module_kwargs),)\n\n return groups\n",
"def get_class(import_path):\n \"\"\"\n Import a class by name.\n \"\"\"\n # Used from django-form-designer\n # Copyright (c) 2009, Samuel Luescher, BSD licensed\n\n try:\n dot = import_path.rindex('.')\n except ValueError:\n raise ImproperlyConfigured(\"{0} isn't a Python path.\".format(import_path))\n\n module, classname = import_path[:dot], import_path[dot + 1:]\n try:\n mod = import_module(module)\n except ImportError as e:\n raise ImproperlyConfigured('Error importing module {0}: \"{1}\"'.format(module, e))\n\n try:\n return getattr(mod, classname)\n except AttributeError:\n raise ImproperlyConfigured('Module \"{0}\" does not define a \"{1}\" class.'.format(module, classname))\n"
] |
class FluentIndexDashboard(Dashboard):
"""
A custom home screen for the Django admin interface.
It displays the application groups based on with :ref:`FLUENT_DASHBOARD_APP_GROUPS` setting.
To activate the dashboard add the following to your settings.py::
ADMIN_TOOLS_INDEX_DASHBOARD = 'fluent_dashboard.dashboard.FluentIndexDashboard'
The dashboard modules are instantiated by the following functions, which can be overwritten:
* :func:`get_personal_module`
* :func:`get_application_modules`
* :func:`get_recent_actions_module`
* :func:`get_rss_modules`
* :func:`get_cache_status_modules`
To have a menu which is consistent with the application groups displayed by this module,
use the :class:`~fluent_dashboard.menu.FluentMenu` class to render the `admin_tools` menu.
When overwriting this class, the elements can either be added in
the :func:`__init__` method, or the :func:`init_with_context` method.
For more information, see the `django-admin-tools` documentation.
"""
class Media:
if LooseVersion(admin_tools.VERSION) < LooseVersion('0.6'):
# Older versions of django-admin-tools used an incorrect format for media.
css = ("fluent_dashboard/dashboard.css",)
else:
css = {
'all': ("fluent_dashboard/dashboard.css",)
}
def __init__(self, **kwargs):
super(FluentIndexDashboard, self).__init__(**kwargs)
self.children.append(self.get_personal_module())
self.children.extend(self.get_application_modules())
self.children.append(self.get_recent_actions_module())
def init_with_context(self, context):
request = context['request']
if 'dashboardmods' in settings.INSTALLED_APPS:
self.children.extend(self.get_rss_modules())
self.children.extend(self.get_cache_status_modules(request))
def get_personal_module(self):
"""
Instantiate the :class:`~fluent_dashboard.modules.PersonalModule` for use in the dashboard.
"""
return PersonalModule(
layout='inline',
draggable=False,
deletable=False,
collapsible=False,
)
def get_recent_actions_module(self):
"""
Instantiate the :class:`~admin_tools.dashboard.modules.RecentActions` module for use in the dashboard.
"""
return modules.RecentActions(_('Recent Actions'), 5, enabled=False, collapsible=False)
def get_cache_status_modules(self, request):
"""
Instantiate the :class:`~fluent_dashboard.modules.CacheStatusGroup` module for use in the dashboard.
This module displays the status of Varnish and Memcache,
if the :ref:`dashboardmods` package is installed and the caches are configured.
By default, these modules are only shown for the superuser.
"""
if not request.user.is_superuser:
return []
return [CacheStatusGroup()]
def get_rss_modules(self):
"""
Instantiate the RSS modules for use in the dashboard.
This module displays the RSS feeds of the :ref:`dashboardmods` package, if it is installed, and configured.
"""
if not 'dashboardmods' in settings.INSTALLED_APPS:
return []
import dashboardmods
return dashboardmods.get_rss_dash_modules()
|
django-fluent/django-fluent-dashboard
|
fluent_dashboard/dashboard.py
|
FluentAppIndexDashboard.get_recent_actions_module
|
python
|
def get_recent_actions_module(self):
return modules.RecentActions(
_('Recent Actions'),
include_list=self.get_app_content_types(),
limit=5,
enabled=False,
collapsible=False
)
|
Instantiate the :class:`~admin_tools.dashboard.modules.RecentActions` module
for use in the appliation index page.
|
train
|
https://github.com/django-fluent/django-fluent-dashboard/blob/aee7ef39e0586cd160036b13b7944b69cd2b4b8c/fluent_dashboard/dashboard.py#L153-L164
| null |
class FluentAppIndexDashboard(AppIndexDashboard):
"""
A custom application index page for the Django admin interface.
This dashboard is displayed when one specific application is opened via the breadcrumb.
It displays the models and recent actions of the specific application.
To activate the dashboards add the following to your settings.py::
ADMIN_TOOLS_APP_INDEX_DASHBOARD = 'fluent_dashboard.dashboard.FluentAppIndexDashboard'
"""
# disable title because its redundant with the model list module
title = ''
def __init__(self, app_title, models, **kwargs):
super(FluentAppIndexDashboard, self).__init__(app_title, models, **kwargs)
self.children += (
self.get_model_list_module(),
self.get_recent_actions_module(),
)
def get_model_list_module(self):
"""
Instantiate a standard :class:`~admin_tools.dashboard.modules.ModelList` class
to display the models of this application.
"""
return modules.ModelList(self.app_title, self.models)
|
django-fluent/django-fluent-dashboard
|
fluent_dashboard/appgroups.py
|
get_application_groups
|
python
|
def get_application_groups():
groups = []
for title, groupdict in appsettings.FLUENT_DASHBOARD_APP_GROUPS:
# Allow to pass all possible arguments to the DashboardModule class.
module_kwargs = groupdict.copy()
# However, the 'models' is treated special, to have catch-all support.
if '*' in groupdict['models']:
default_module = appsettings.FLUENT_DASHBOARD_DEFAULT_MODULE
module_kwargs['exclude'] = ALL_KNOWN_APPS + list(module_kwargs.get('exclude', []))
del module_kwargs['models']
else:
default_module = 'CmsAppIconList'
# Get module to display, can be a alias for known variations.
module = groupdict.get('module', default_module)
if module in MODULE_ALIASES:
module = MODULE_ALIASES[module]
module_kwargs['module'] = module
groups.append((title, module_kwargs),)
return groups
|
Return the applications of the system, organized in various groups.
These groups are not connected with the application names,
but rather with a pattern of applications.
|
train
|
https://github.com/django-fluent/django-fluent-dashboard/blob/aee7ef39e0586cd160036b13b7944b69cd2b4b8c/fluent_dashboard/appgroups.py#L27-L55
| null |
"""
Splitting and organizing applications and models into groups.
This module is mostly meant for internal use.
"""
from fnmatch import fnmatch
from future.utils import iteritems
from importlib import import_module
from django.core.exceptions import ImproperlyConfigured
from fluent_dashboard import appsettings
import itertools
_groups = [groupdict['models'] for _, groupdict in appsettings.FLUENT_DASHBOARD_APP_GROUPS]
ALL_KNOWN_APPS = list(itertools.chain(*_groups))
if '*' in ALL_KNOWN_APPS:
ALL_KNOWN_APPS.remove('*') # Default for CMS group, but not useful here.
MODULE_ALIASES = {
'AppList': 'admin_tools.dashboard.modules.AppList',
'ModelList': 'admin_tools.dashboard.modules.ModelList',
'AppIconList': 'fluent_dashboard.modules.AppIconList',
'CmsAppIconList': 'fluent_dashboard.modules.CmsAppIconList',
}
def sort_cms_models(cms_models):
"""
Sort a set of CMS-related models in a custom (predefined) order.
"""
cms_models.sort(key=lambda model: (
get_cms_model_order(model['name']) if is_cms_app(model['app_name']) else 999,
model['app_name'],
model['title']
))
def is_cms_app(app_name):
"""
Return whether the given application is a CMS app
"""
for pat in appsettings.FLUENT_DASHBOARD_CMS_APP_NAMES:
if fnmatch(app_name, pat):
return True
return False
def get_cms_model_order(model_name):
"""
Return a numeric ordering for a model name.
"""
for (name, order) in iteritems(appsettings.FLUENT_DASHBOARD_CMS_MODEL_ORDER):
if name in model_name:
return order
return 999
def get_class(import_path):
"""
Import a class by name.
"""
# Used from django-form-designer
# Copyright (c) 2009, Samuel Luescher, BSD licensed
try:
dot = import_path.rindex('.')
except ValueError:
raise ImproperlyConfigured("{0} isn't a Python path.".format(import_path))
module, classname = import_path[:dot], import_path[dot + 1:]
try:
mod = import_module(module)
except ImportError as e:
raise ImproperlyConfigured('Error importing module {0}: "{1}"'.format(module, e))
try:
return getattr(mod, classname)
except AttributeError:
raise ImproperlyConfigured('Module "{0}" does not define a "{1}" class.'.format(module, classname))
|
django-fluent/django-fluent-dashboard
|
fluent_dashboard/appgroups.py
|
sort_cms_models
|
python
|
def sort_cms_models(cms_models):
cms_models.sort(key=lambda model: (
get_cms_model_order(model['name']) if is_cms_app(model['app_name']) else 999,
model['app_name'],
model['title']
))
|
Sort a set of CMS-related models in a custom (predefined) order.
|
train
|
https://github.com/django-fluent/django-fluent-dashboard/blob/aee7ef39e0586cd160036b13b7944b69cd2b4b8c/fluent_dashboard/appgroups.py#L58-L66
| null |
"""
Splitting and organizing applications and models into groups.
This module is mostly meant for internal use.
"""
from fnmatch import fnmatch
from future.utils import iteritems
from importlib import import_module
from django.core.exceptions import ImproperlyConfigured
from fluent_dashboard import appsettings
import itertools
_groups = [groupdict['models'] for _, groupdict in appsettings.FLUENT_DASHBOARD_APP_GROUPS]
ALL_KNOWN_APPS = list(itertools.chain(*_groups))
if '*' in ALL_KNOWN_APPS:
ALL_KNOWN_APPS.remove('*') # Default for CMS group, but not useful here.
MODULE_ALIASES = {
'AppList': 'admin_tools.dashboard.modules.AppList',
'ModelList': 'admin_tools.dashboard.modules.ModelList',
'AppIconList': 'fluent_dashboard.modules.AppIconList',
'CmsAppIconList': 'fluent_dashboard.modules.CmsAppIconList',
}
def get_application_groups():
"""
Return the applications of the system, organized in various groups.
These groups are not connected with the application names,
but rather with a pattern of applications.
"""
groups = []
for title, groupdict in appsettings.FLUENT_DASHBOARD_APP_GROUPS:
# Allow to pass all possible arguments to the DashboardModule class.
module_kwargs = groupdict.copy()
# However, the 'models' is treated special, to have catch-all support.
if '*' in groupdict['models']:
default_module = appsettings.FLUENT_DASHBOARD_DEFAULT_MODULE
module_kwargs['exclude'] = ALL_KNOWN_APPS + list(module_kwargs.get('exclude', []))
del module_kwargs['models']
else:
default_module = 'CmsAppIconList'
# Get module to display, can be a alias for known variations.
module = groupdict.get('module', default_module)
if module in MODULE_ALIASES:
module = MODULE_ALIASES[module]
module_kwargs['module'] = module
groups.append((title, module_kwargs),)
return groups
def is_cms_app(app_name):
"""
Return whether the given application is a CMS app
"""
for pat in appsettings.FLUENT_DASHBOARD_CMS_APP_NAMES:
if fnmatch(app_name, pat):
return True
return False
def get_cms_model_order(model_name):
"""
Return a numeric ordering for a model name.
"""
for (name, order) in iteritems(appsettings.FLUENT_DASHBOARD_CMS_MODEL_ORDER):
if name in model_name:
return order
return 999
def get_class(import_path):
"""
Import a class by name.
"""
# Used from django-form-designer
# Copyright (c) 2009, Samuel Luescher, BSD licensed
try:
dot = import_path.rindex('.')
except ValueError:
raise ImproperlyConfigured("{0} isn't a Python path.".format(import_path))
module, classname = import_path[:dot], import_path[dot + 1:]
try:
mod = import_module(module)
except ImportError as e:
raise ImproperlyConfigured('Error importing module {0}: "{1}"'.format(module, e))
try:
return getattr(mod, classname)
except AttributeError:
raise ImproperlyConfigured('Module "{0}" does not define a "{1}" class.'.format(module, classname))
|
django-fluent/django-fluent-dashboard
|
fluent_dashboard/appgroups.py
|
is_cms_app
|
python
|
def is_cms_app(app_name):
for pat in appsettings.FLUENT_DASHBOARD_CMS_APP_NAMES:
if fnmatch(app_name, pat):
return True
return False
|
Return whether the given application is a CMS app
|
train
|
https://github.com/django-fluent/django-fluent-dashboard/blob/aee7ef39e0586cd160036b13b7944b69cd2b4b8c/fluent_dashboard/appgroups.py#L69-L77
| null |
"""
Splitting and organizing applications and models into groups.
This module is mostly meant for internal use.
"""
from fnmatch import fnmatch
from future.utils import iteritems
from importlib import import_module
from django.core.exceptions import ImproperlyConfigured
from fluent_dashboard import appsettings
import itertools
_groups = [groupdict['models'] for _, groupdict in appsettings.FLUENT_DASHBOARD_APP_GROUPS]
ALL_KNOWN_APPS = list(itertools.chain(*_groups))
if '*' in ALL_KNOWN_APPS:
ALL_KNOWN_APPS.remove('*') # Default for CMS group, but not useful here.
MODULE_ALIASES = {
'AppList': 'admin_tools.dashboard.modules.AppList',
'ModelList': 'admin_tools.dashboard.modules.ModelList',
'AppIconList': 'fluent_dashboard.modules.AppIconList',
'CmsAppIconList': 'fluent_dashboard.modules.CmsAppIconList',
}
def get_application_groups():
"""
Return the applications of the system, organized in various groups.
These groups are not connected with the application names,
but rather with a pattern of applications.
"""
groups = []
for title, groupdict in appsettings.FLUENT_DASHBOARD_APP_GROUPS:
# Allow to pass all possible arguments to the DashboardModule class.
module_kwargs = groupdict.copy()
# However, the 'models' is treated special, to have catch-all support.
if '*' in groupdict['models']:
default_module = appsettings.FLUENT_DASHBOARD_DEFAULT_MODULE
module_kwargs['exclude'] = ALL_KNOWN_APPS + list(module_kwargs.get('exclude', []))
del module_kwargs['models']
else:
default_module = 'CmsAppIconList'
# Get module to display, can be a alias for known variations.
module = groupdict.get('module', default_module)
if module in MODULE_ALIASES:
module = MODULE_ALIASES[module]
module_kwargs['module'] = module
groups.append((title, module_kwargs),)
return groups
def sort_cms_models(cms_models):
"""
Sort a set of CMS-related models in a custom (predefined) order.
"""
cms_models.sort(key=lambda model: (
get_cms_model_order(model['name']) if is_cms_app(model['app_name']) else 999,
model['app_name'],
model['title']
))
def get_cms_model_order(model_name):
"""
Return a numeric ordering for a model name.
"""
for (name, order) in iteritems(appsettings.FLUENT_DASHBOARD_CMS_MODEL_ORDER):
if name in model_name:
return order
return 999
def get_class(import_path):
"""
Import a class by name.
"""
# Used from django-form-designer
# Copyright (c) 2009, Samuel Luescher, BSD licensed
try:
dot = import_path.rindex('.')
except ValueError:
raise ImproperlyConfigured("{0} isn't a Python path.".format(import_path))
module, classname = import_path[:dot], import_path[dot + 1:]
try:
mod = import_module(module)
except ImportError as e:
raise ImproperlyConfigured('Error importing module {0}: "{1}"'.format(module, e))
try:
return getattr(mod, classname)
except AttributeError:
raise ImproperlyConfigured('Module "{0}" does not define a "{1}" class.'.format(module, classname))
|
django-fluent/django-fluent-dashboard
|
fluent_dashboard/appgroups.py
|
get_cms_model_order
|
python
|
def get_cms_model_order(model_name):
for (name, order) in iteritems(appsettings.FLUENT_DASHBOARD_CMS_MODEL_ORDER):
if name in model_name:
return order
return 999
|
Return a numeric ordering for a model name.
|
train
|
https://github.com/django-fluent/django-fluent-dashboard/blob/aee7ef39e0586cd160036b13b7944b69cd2b4b8c/fluent_dashboard/appgroups.py#L80-L87
| null |
"""
Splitting and organizing applications and models into groups.
This module is mostly meant for internal use.
"""
from fnmatch import fnmatch
from future.utils import iteritems
from importlib import import_module
from django.core.exceptions import ImproperlyConfigured
from fluent_dashboard import appsettings
import itertools
_groups = [groupdict['models'] for _, groupdict in appsettings.FLUENT_DASHBOARD_APP_GROUPS]
ALL_KNOWN_APPS = list(itertools.chain(*_groups))
if '*' in ALL_KNOWN_APPS:
ALL_KNOWN_APPS.remove('*') # Default for CMS group, but not useful here.
MODULE_ALIASES = {
'AppList': 'admin_tools.dashboard.modules.AppList',
'ModelList': 'admin_tools.dashboard.modules.ModelList',
'AppIconList': 'fluent_dashboard.modules.AppIconList',
'CmsAppIconList': 'fluent_dashboard.modules.CmsAppIconList',
}
def get_application_groups():
"""
Return the applications of the system, organized in various groups.
These groups are not connected with the application names,
but rather with a pattern of applications.
"""
groups = []
for title, groupdict in appsettings.FLUENT_DASHBOARD_APP_GROUPS:
# Allow to pass all possible arguments to the DashboardModule class.
module_kwargs = groupdict.copy()
# However, the 'models' is treated special, to have catch-all support.
if '*' in groupdict['models']:
default_module = appsettings.FLUENT_DASHBOARD_DEFAULT_MODULE
module_kwargs['exclude'] = ALL_KNOWN_APPS + list(module_kwargs.get('exclude', []))
del module_kwargs['models']
else:
default_module = 'CmsAppIconList'
# Get module to display, can be a alias for known variations.
module = groupdict.get('module', default_module)
if module in MODULE_ALIASES:
module = MODULE_ALIASES[module]
module_kwargs['module'] = module
groups.append((title, module_kwargs),)
return groups
def sort_cms_models(cms_models):
"""
Sort a set of CMS-related models in a custom (predefined) order.
"""
cms_models.sort(key=lambda model: (
get_cms_model_order(model['name']) if is_cms_app(model['app_name']) else 999,
model['app_name'],
model['title']
))
def is_cms_app(app_name):
"""
Return whether the given application is a CMS app
"""
for pat in appsettings.FLUENT_DASHBOARD_CMS_APP_NAMES:
if fnmatch(app_name, pat):
return True
return False
def get_class(import_path):
"""
Import a class by name.
"""
# Used from django-form-designer
# Copyright (c) 2009, Samuel Luescher, BSD licensed
try:
dot = import_path.rindex('.')
except ValueError:
raise ImproperlyConfigured("{0} isn't a Python path.".format(import_path))
module, classname = import_path[:dot], import_path[dot + 1:]
try:
mod = import_module(module)
except ImportError as e:
raise ImproperlyConfigured('Error importing module {0}: "{1}"'.format(module, e))
try:
return getattr(mod, classname)
except AttributeError:
raise ImproperlyConfigured('Module "{0}" does not define a "{1}" class.'.format(module, classname))
|
django-fluent/django-fluent-dashboard
|
fluent_dashboard/menu.py
|
FluentMenu.init_with_context
|
python
|
def init_with_context(self, context):
site_name = get_admin_site_name(context)
self.children += [
items.MenuItem(_('Dashboard'), reverse('{0}:index'.format(site_name))),
items.Bookmarks(),
]
for title, kwargs in get_application_groups():
if kwargs.get('enabled', True):
self.children.append(CmsModelList(title, **kwargs))
self.children += [
ReturnToSiteItem()
]
|
Initialize the menu items.
|
train
|
https://github.com/django-fluent/django-fluent-dashboard/blob/aee7ef39e0586cd160036b13b7944b69cd2b4b8c/fluent_dashboard/menu.py#L31-L48
|
[
"def get_application_groups():\n \"\"\"\n Return the applications of the system, organized in various groups.\n\n These groups are not connected with the application names,\n but rather with a pattern of applications.\n \"\"\"\n\n groups = []\n for title, groupdict in appsettings.FLUENT_DASHBOARD_APP_GROUPS:\n # Allow to pass all possible arguments to the DashboardModule class.\n module_kwargs = groupdict.copy()\n\n # However, the 'models' is treated special, to have catch-all support.\n if '*' in groupdict['models']:\n default_module = appsettings.FLUENT_DASHBOARD_DEFAULT_MODULE\n module_kwargs['exclude'] = ALL_KNOWN_APPS + list(module_kwargs.get('exclude', []))\n del module_kwargs['models']\n else:\n default_module = 'CmsAppIconList'\n\n # Get module to display, can be a alias for known variations.\n module = groupdict.get('module', default_module)\n if module in MODULE_ALIASES:\n module = MODULE_ALIASES[module]\n module_kwargs['module'] = module\n groups.append((title, module_kwargs),)\n\n return groups\n"
] |
class FluentMenu(Menu):
"""
Custom Menu for admin site.
The top level menu items created by this menu reflect the application groups
defined in :ref:`FLUENT_DASHBOARD_APP_GROUPS`. By using both
the :class:`~fluent_dashboard.dashboard.FluentIndexDashboard` and this class,
the menu and dashboard modules at the admin index page will consistent.
The :class:`~fluent_dashboard.items.ReturnToSiteItem` is also added at the end of the menu.
To activate the menu add the following to your settings.py::
ADMIN_TOOLS_MENU = 'fluent_dashboard.menu.FluentMenu'
"""
|
django-fluent/django-fluent-dashboard
|
fluent_dashboard/modules.py
|
PersonalModule.init_with_context
|
python
|
def init_with_context(self, context):
super(PersonalModule, self).init_with_context(context)
current_user = context['request'].user
if django.VERSION < (1, 5):
current_username = current_user.first_name or current_user.username
else:
current_username = current_user.get_short_name() or current_user.get_username()
site_name = get_admin_site_name(context)
# Personalize
self.title = _('Welcome,') + ' ' + (current_username)
# Expose links
self.pages_link = None
self.pages_title = None
self.password_link = reverse('{0}:password_change'.format(site_name))
self.logout_link = reverse('{0}:logout'.format(site_name))
if self.cms_page_model:
try:
app_label, model_name = self.cms_page_model
model = apps.get_model(app_label, model_name)
pages_title = model._meta.verbose_name_plural.lower()
pages_link = reverse('{site}:{app}_{model}_changelist'.format(site=site_name, app=app_label.lower(), model=model_name.lower()))
except AttributeError:
raise ImproperlyConfigured("The value {0} of FLUENT_DASHBOARD_CMS_PAGE_MODEL setting (or cms_page_model value) does not reffer to an existing model.".format(self.cms_page_model))
except NoReverseMatch:
pass
else:
# Also check if the user has permission to view the module.
# TODO: When there are modules that use Django 1.8's has_module_permission, add the support here.
permission_name = 'change_{0}'.format(model._meta.model_name.lower())
if current_user.has_perm('{0}.{1}'.format(model._meta.app_label, permission_name)):
self.pages_title = pages_title
self.pages_link = pages_link
|
Initializes the link list.
|
train
|
https://github.com/django-fluent/django-fluent-dashboard/blob/aee7ef39e0586cd160036b13b7944b69cd2b4b8c/fluent_dashboard/modules.py#L61-L99
| null |
class PersonalModule(modules.LinkList):
"""
A simple module to display a welcome message.
.. image:: /images/personalmodule.png
:width: 471px
:height: 77px
:alt: PersonalModule for django-fluent-dashboard
It renders the template ``fluent_dashboard/modules/personal.html``,
unless the ``template`` variable is overwritten.
The module overrides :class:`~admin_tools.dashboard.modules.LinkList`,
allowing links to be added to the element.
The :ref:`FLUENT_DASHBOARD_CMS_PAGE_MODEL` setting is used to display a link to the pages module.
If this setting is not defined, a general text will be displayed instead.
"""
# Set admin_tools defaults
draggable = False
deletable = False
collapsible = False
#: Define the title to display
title = _('Welcome,')
#: The model to use for the CMS pages link.
cms_page_model = appsettings.FLUENT_DASHBOARD_CMS_PAGE_MODEL
#: Define the template to render
template = 'fluent_dashboard/modules/personal.html'
def is_empty(self):
# Make sure the element is rendered.
return False
|
django-fluent/django-fluent-dashboard
|
fluent_dashboard/modules.py
|
AppIconList.init_with_context
|
python
|
def init_with_context(self, context):
super(AppIconList, self).init_with_context(context)
apps = self.children
# Standard model only has a title, change_url and add_url.
# Restore the app_name and name, so icons can be matched.
for app in apps:
app_name = self._get_app_name(app)
app['name'] = app_name
for model in app['models']:
try:
model_name = self._get_model_name(model)
model['name'] = model_name
model['icon'] = self.get_icon_for_model(app_name, model_name) or appsettings.FLUENT_DASHBOARD_DEFAULT_ICON
except ValueError:
model['icon'] = appsettings.FLUENT_DASHBOARD_DEFAULT_ICON
# Automatically add STATIC_URL before relative icon paths.
model['icon'] = self.get_icon_url(model['icon'])
model['app_name'] = app_name
|
Initializes the icon list.
|
train
|
https://github.com/django-fluent/django-fluent-dashboard/blob/aee7ef39e0586cd160036b13b7944b69cd2b4b8c/fluent_dashboard/modules.py#L126-L149
|
[
"def _get_app_name(self, appdata):\n \"\"\"\n Extract the app name from the ``appdata`` that *django-admin-tools* provides.\n \"\"\"\n return appdata['url'].strip('/').split('/')[-1] # /foo/admin/appname/\n",
"def _get_model_name(self, modeldata):\n \"\"\"\n Extract the model name from the ``modeldata`` that *django-admin-tools* provides.\n \"\"\"\n if 'change_url' in modeldata:\n return modeldata['change_url'].strip('/').split('/')[-1] # /foo/admin/appname/modelname\n elif 'add_url' in modeldata:\n return modeldata['add_url'].strip('/').split('/')[-2] # /foo/admin/appname/modelname/add\n else:\n raise ValueError(\"Missing attributes in modeldata to find the model name!\")\n",
"def get_icon_for_model(self, app_name, model_name, default=None):\n \"\"\"\n Return the icon for the given model.\n It reads the :ref:`FLUENT_DASHBOARD_APP_ICONS` setting.\n \"\"\"\n key = \"{0}/{1}\".format(app_name, model_name)\n return appsettings.FLUENT_DASHBOARD_APP_ICONS.get(key, default)\n",
"def get_icon_url(self, icon):\n \"\"\"\n Replaces the \"icon name\" with a full usable URL.\n\n * When the icon is an absolute URL, it is used as-is.\n * When the icon contains a slash, it is relative from the ``STATIC_URL``.\n * Otherwise, it's relative to the theme url folder.\n \"\"\"\n if not icon.startswith('/') \\\n and not icon.startswith('http://') \\\n and not icon.startswith('https://'):\n if '/' in icon:\n return self.icon_static_root + icon\n else:\n return self.icon_theme_root + icon\n else:\n return icon\n"
] |
class AppIconList(modules.AppList):
"""
The list of applications, icon style.
.. image:: /images/appiconlist.png
:width: 471px
:height: 124px
:alt: AppIconList module for django-fluent-dashboard
It uses the ``FLUENT_DASHBOARD_APP_ICONS`` setting to find application icons.
"""
#: Specify the template to render
template = 'fluent_dashboard/modules/app_icon_list.html'
#: The current static root (considered read only)
icon_static_root = settings.STATIC_URL
#: The current theme folder (considerd read only)
icon_theme_root = "{0}fluent_dashboard/{1}/".format(icon_static_root, appsettings.FLUENT_DASHBOARD_ICON_THEME)
def _get_app_name(self, appdata):
"""
Extract the app name from the ``appdata`` that *django-admin-tools* provides.
"""
return appdata['url'].strip('/').split('/')[-1] # /foo/admin/appname/
def _get_model_name(self, modeldata):
"""
Extract the model name from the ``modeldata`` that *django-admin-tools* provides.
"""
if 'change_url' in modeldata:
return modeldata['change_url'].strip('/').split('/')[-1] # /foo/admin/appname/modelname
elif 'add_url' in modeldata:
return modeldata['add_url'].strip('/').split('/')[-2] # /foo/admin/appname/modelname/add
else:
raise ValueError("Missing attributes in modeldata to find the model name!")
def get_icon_for_model(self, app_name, model_name, default=None):
"""
Return the icon for the given model.
It reads the :ref:`FLUENT_DASHBOARD_APP_ICONS` setting.
"""
key = "{0}/{1}".format(app_name, model_name)
return appsettings.FLUENT_DASHBOARD_APP_ICONS.get(key, default)
def get_icon_url(self, icon):
"""
Replaces the "icon name" with a full usable URL.
* When the icon is an absolute URL, it is used as-is.
* When the icon contains a slash, it is relative from the ``STATIC_URL``.
* Otherwise, it's relative to the theme url folder.
"""
if not icon.startswith('/') \
and not icon.startswith('http://') \
and not icon.startswith('https://'):
if '/' in icon:
return self.icon_static_root + icon
else:
return self.icon_theme_root + icon
else:
return icon
|
django-fluent/django-fluent-dashboard
|
fluent_dashboard/modules.py
|
AppIconList._get_model_name
|
python
|
def _get_model_name(self, modeldata):
if 'change_url' in modeldata:
return modeldata['change_url'].strip('/').split('/')[-1] # /foo/admin/appname/modelname
elif 'add_url' in modeldata:
return modeldata['add_url'].strip('/').split('/')[-2] # /foo/admin/appname/modelname/add
else:
raise ValueError("Missing attributes in modeldata to find the model name!")
|
Extract the model name from the ``modeldata`` that *django-admin-tools* provides.
|
train
|
https://github.com/django-fluent/django-fluent-dashboard/blob/aee7ef39e0586cd160036b13b7944b69cd2b4b8c/fluent_dashboard/modules.py#L157-L166
| null |
class AppIconList(modules.AppList):
"""
The list of applications, icon style.
.. image:: /images/appiconlist.png
:width: 471px
:height: 124px
:alt: AppIconList module for django-fluent-dashboard
It uses the ``FLUENT_DASHBOARD_APP_ICONS`` setting to find application icons.
"""
#: Specify the template to render
template = 'fluent_dashboard/modules/app_icon_list.html'
#: The current static root (considered read only)
icon_static_root = settings.STATIC_URL
#: The current theme folder (considerd read only)
icon_theme_root = "{0}fluent_dashboard/{1}/".format(icon_static_root, appsettings.FLUENT_DASHBOARD_ICON_THEME)
def init_with_context(self, context):
"""
Initializes the icon list.
"""
super(AppIconList, self).init_with_context(context)
apps = self.children
# Standard model only has a title, change_url and add_url.
# Restore the app_name and name, so icons can be matched.
for app in apps:
app_name = self._get_app_name(app)
app['name'] = app_name
for model in app['models']:
try:
model_name = self._get_model_name(model)
model['name'] = model_name
model['icon'] = self.get_icon_for_model(app_name, model_name) or appsettings.FLUENT_DASHBOARD_DEFAULT_ICON
except ValueError:
model['icon'] = appsettings.FLUENT_DASHBOARD_DEFAULT_ICON
# Automatically add STATIC_URL before relative icon paths.
model['icon'] = self.get_icon_url(model['icon'])
model['app_name'] = app_name
def _get_app_name(self, appdata):
"""
Extract the app name from the ``appdata`` that *django-admin-tools* provides.
"""
return appdata['url'].strip('/').split('/')[-1] # /foo/admin/appname/
def get_icon_for_model(self, app_name, model_name, default=None):
"""
Return the icon for the given model.
It reads the :ref:`FLUENT_DASHBOARD_APP_ICONS` setting.
"""
key = "{0}/{1}".format(app_name, model_name)
return appsettings.FLUENT_DASHBOARD_APP_ICONS.get(key, default)
def get_icon_url(self, icon):
"""
Replaces the "icon name" with a full usable URL.
* When the icon is an absolute URL, it is used as-is.
* When the icon contains a slash, it is relative from the ``STATIC_URL``.
* Otherwise, it's relative to the theme url folder.
"""
if not icon.startswith('/') \
and not icon.startswith('http://') \
and not icon.startswith('https://'):
if '/' in icon:
return self.icon_static_root + icon
else:
return self.icon_theme_root + icon
else:
return icon
|
django-fluent/django-fluent-dashboard
|
fluent_dashboard/modules.py
|
AppIconList.get_icon_for_model
|
python
|
def get_icon_for_model(self, app_name, model_name, default=None):
key = "{0}/{1}".format(app_name, model_name)
return appsettings.FLUENT_DASHBOARD_APP_ICONS.get(key, default)
|
Return the icon for the given model.
It reads the :ref:`FLUENT_DASHBOARD_APP_ICONS` setting.
|
train
|
https://github.com/django-fluent/django-fluent-dashboard/blob/aee7ef39e0586cd160036b13b7944b69cd2b4b8c/fluent_dashboard/modules.py#L168-L174
| null |
class AppIconList(modules.AppList):
"""
The list of applications, icon style.
.. image:: /images/appiconlist.png
:width: 471px
:height: 124px
:alt: AppIconList module for django-fluent-dashboard
It uses the ``FLUENT_DASHBOARD_APP_ICONS`` setting to find application icons.
"""
#: Specify the template to render
template = 'fluent_dashboard/modules/app_icon_list.html'
#: The current static root (considered read only)
icon_static_root = settings.STATIC_URL
#: The current theme folder (considerd read only)
icon_theme_root = "{0}fluent_dashboard/{1}/".format(icon_static_root, appsettings.FLUENT_DASHBOARD_ICON_THEME)
def init_with_context(self, context):
"""
Initializes the icon list.
"""
super(AppIconList, self).init_with_context(context)
apps = self.children
# Standard model only has a title, change_url and add_url.
# Restore the app_name and name, so icons can be matched.
for app in apps:
app_name = self._get_app_name(app)
app['name'] = app_name
for model in app['models']:
try:
model_name = self._get_model_name(model)
model['name'] = model_name
model['icon'] = self.get_icon_for_model(app_name, model_name) or appsettings.FLUENT_DASHBOARD_DEFAULT_ICON
except ValueError:
model['icon'] = appsettings.FLUENT_DASHBOARD_DEFAULT_ICON
# Automatically add STATIC_URL before relative icon paths.
model['icon'] = self.get_icon_url(model['icon'])
model['app_name'] = app_name
def _get_app_name(self, appdata):
"""
Extract the app name from the ``appdata`` that *django-admin-tools* provides.
"""
return appdata['url'].strip('/').split('/')[-1] # /foo/admin/appname/
def _get_model_name(self, modeldata):
"""
Extract the model name from the ``modeldata`` that *django-admin-tools* provides.
"""
if 'change_url' in modeldata:
return modeldata['change_url'].strip('/').split('/')[-1] # /foo/admin/appname/modelname
elif 'add_url' in modeldata:
return modeldata['add_url'].strip('/').split('/')[-2] # /foo/admin/appname/modelname/add
else:
raise ValueError("Missing attributes in modeldata to find the model name!")
def get_icon_url(self, icon):
"""
Replaces the "icon name" with a full usable URL.
* When the icon is an absolute URL, it is used as-is.
* When the icon contains a slash, it is relative from the ``STATIC_URL``.
* Otherwise, it's relative to the theme url folder.
"""
if not icon.startswith('/') \
and not icon.startswith('http://') \
and not icon.startswith('https://'):
if '/' in icon:
return self.icon_static_root + icon
else:
return self.icon_theme_root + icon
else:
return icon
|
django-fluent/django-fluent-dashboard
|
fluent_dashboard/modules.py
|
AppIconList.get_icon_url
|
python
|
def get_icon_url(self, icon):
if not icon.startswith('/') \
and not icon.startswith('http://') \
and not icon.startswith('https://'):
if '/' in icon:
return self.icon_static_root + icon
else:
return self.icon_theme_root + icon
else:
return icon
|
Replaces the "icon name" with a full usable URL.
* When the icon is an absolute URL, it is used as-is.
* When the icon contains a slash, it is relative from the ``STATIC_URL``.
* Otherwise, it's relative to the theme url folder.
|
train
|
https://github.com/django-fluent/django-fluent-dashboard/blob/aee7ef39e0586cd160036b13b7944b69cd2b4b8c/fluent_dashboard/modules.py#L176-L192
| null |
class AppIconList(modules.AppList):
"""
The list of applications, icon style.
.. image:: /images/appiconlist.png
:width: 471px
:height: 124px
:alt: AppIconList module for django-fluent-dashboard
It uses the ``FLUENT_DASHBOARD_APP_ICONS`` setting to find application icons.
"""
#: Specify the template to render
template = 'fluent_dashboard/modules/app_icon_list.html'
#: The current static root (considered read only)
icon_static_root = settings.STATIC_URL
#: The current theme folder (considerd read only)
icon_theme_root = "{0}fluent_dashboard/{1}/".format(icon_static_root, appsettings.FLUENT_DASHBOARD_ICON_THEME)
def init_with_context(self, context):
"""
Initializes the icon list.
"""
super(AppIconList, self).init_with_context(context)
apps = self.children
# Standard model only has a title, change_url and add_url.
# Restore the app_name and name, so icons can be matched.
for app in apps:
app_name = self._get_app_name(app)
app['name'] = app_name
for model in app['models']:
try:
model_name = self._get_model_name(model)
model['name'] = model_name
model['icon'] = self.get_icon_for_model(app_name, model_name) or appsettings.FLUENT_DASHBOARD_DEFAULT_ICON
except ValueError:
model['icon'] = appsettings.FLUENT_DASHBOARD_DEFAULT_ICON
# Automatically add STATIC_URL before relative icon paths.
model['icon'] = self.get_icon_url(model['icon'])
model['app_name'] = app_name
def _get_app_name(self, appdata):
"""
Extract the app name from the ``appdata`` that *django-admin-tools* provides.
"""
return appdata['url'].strip('/').split('/')[-1] # /foo/admin/appname/
def _get_model_name(self, modeldata):
"""
Extract the model name from the ``modeldata`` that *django-admin-tools* provides.
"""
if 'change_url' in modeldata:
return modeldata['change_url'].strip('/').split('/')[-1] # /foo/admin/appname/modelname
elif 'add_url' in modeldata:
return modeldata['add_url'].strip('/').split('/')[-2] # /foo/admin/appname/modelname/add
else:
raise ValueError("Missing attributes in modeldata to find the model name!")
def get_icon_for_model(self, app_name, model_name, default=None):
"""
Return the icon for the given model.
It reads the :ref:`FLUENT_DASHBOARD_APP_ICONS` setting.
"""
key = "{0}/{1}".format(app_name, model_name)
return appsettings.FLUENT_DASHBOARD_APP_ICONS.get(key, default)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.