hexsha stringlengths 40 40 | size int64 2 1.02M | ext stringclasses 10
values | lang stringclasses 1
value | max_stars_repo_path stringlengths 4 245 | max_stars_repo_name stringlengths 6 130 | max_stars_repo_head_hexsha stringlengths 40 40 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 191k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 4 245 | max_issues_repo_name stringlengths 6 130 | max_issues_repo_head_hexsha stringlengths 40 40 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 67k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 4 245 | max_forks_repo_name stringlengths 6 130 | max_forks_repo_head_hexsha stringlengths 40 40 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 2 1.02M | avg_line_length float64 1 958k | max_line_length int64 1 987k | alphanum_fraction float64 0 1 | content_no_comment stringlengths 0 1.01M | is_comment_constant_removed bool 2
classes | is_sharp_comment_removed bool 1
class |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
1c47ea87396ab3c1281919e883bf6c4c9a9cdcd1 | 757 | py | Python | mysite/polls/admin.py | thomasbtf/hello-django | 37c5188667d6eeb2043e3bdd45294dd16e754507 | [
"MIT"
] | null | null | null | mysite/polls/admin.py | thomasbtf/hello-django | 37c5188667d6eeb2043e3bdd45294dd16e754507 | [
"MIT"
] | null | null | null | mysite/polls/admin.py | thomasbtf/hello-django | 37c5188667d6eeb2043e3bdd45294dd16e754507 | [
"MIT"
] | null | null | null | from django.contrib import admin
from .models import Choice, Question
class ChoiceInline(admin.TabularInline):
model = Choice
extra = 3
class QuestionAdmin(admin.ModelAdmin):
fieldsets = [
(None, {'fields': ['question_text']}),
('Date information', {'fields': ['pub_date'], 'classes': ['collapse']}),
]
inlines = [ChoiceInline]
list_filter = ['pub_date']
search_fields = ['question_text']
@admin.display(
boolean=True,
ordering='pub_date',
description='Published recently?',
)
def was_published_recently(self):
now = timezone.now()
return now - datetime.timedelta(days=1) <= self.pub_date <= now
admin.site.register(Question, QuestionAdmin) | 26.103448 | 80 | 0.630119 | from django.contrib import admin
from .models import Choice, Question
class ChoiceInline(admin.TabularInline):
model = Choice
extra = 3
class QuestionAdmin(admin.ModelAdmin):
fieldsets = [
(None, {'fields': ['question_text']}),
('Date information', {'fields': ['pub_date'], 'classes': ['collapse']}),
]
inlines = [ChoiceInline]
list_filter = ['pub_date']
search_fields = ['question_text']
@admin.display(
boolean=True,
ordering='pub_date',
description='Published recently?',
)
def was_published_recently(self):
now = timezone.now()
return now - datetime.timedelta(days=1) <= self.pub_date <= now
admin.site.register(Question, QuestionAdmin) | true | true |
1c47eaf899d1f4ffce8bd8e362130e85c306d2bf | 1,239 | py | Python | jip/cli/jip_cancel.py | VDBWRAIR/pyjip | dc147afebbabd550828fa51cc052db4aa07c5d3b | [
"BSD-3-Clause"
] | 18 | 2015-05-08T06:39:09.000Z | 2020-11-30T10:51:36.000Z | jip/cli/jip_cancel.py | VDBWRAIR/pyjip | dc147afebbabd550828fa51cc052db4aa07c5d3b | [
"BSD-3-Clause"
] | 9 | 2015-01-02T09:55:53.000Z | 2016-02-03T18:31:10.000Z | jip/cli/jip_cancel.py | VDBWRAIR/pyjip | dc147afebbabd550828fa51cc052db4aa07c5d3b | [
"BSD-3-Clause"
] | 5 | 2016-02-01T16:52:36.000Z | 2021-03-10T12:08:39.000Z | #!/usr/bin/env python
"""
Cancel jip jobs
Usage:
jip-cancel [-j <id>...] [-J <cid>...] [--clean]
jip-cancel [--help|-h]
Options:
--clean Remove the logfiles
-j, --job <id>... List jobs with specified id
-J, --cluster-job <cid>... List jobs with specified cluster id
-h --help Show this help message
"""
import jip.db
import jip.jobs
from . import parse_args, parse_job_ids, confirm
import sys
def main():
args = parse_args(__doc__, options_first=False)
job_ids, cluster_ids = parse_job_ids(args)
jobs = jip.db.query(job_ids=job_ids, cluster_ids=cluster_ids,
archived=None)
jobs = list(jobs)
if len(jobs) == 0:
return
# get full pipelines
jobs = jip.jobs.resolve_jobs(jobs)
if confirm("Are you sure you want "
"to cancel %d jobs" % len(jobs),
False):
print >>sys.stderr, "Cancelling %s jobs" % len(jobs)
for job in jobs:
if jip.jobs.cancel(job, clean_logs=args['--clean'],
save=True, cancel_children=False):
print >>sys.stderr, "Canceled %s" % job.id
if __name__ == "__main__":
main()
| 26.934783 | 67 | 0.560936 |
import jip.db
import jip.jobs
from . import parse_args, parse_job_ids, confirm
import sys
def main():
args = parse_args(__doc__, options_first=False)
job_ids, cluster_ids = parse_job_ids(args)
jobs = jip.db.query(job_ids=job_ids, cluster_ids=cluster_ids,
archived=None)
jobs = list(jobs)
if len(jobs) == 0:
return
jobs = jip.jobs.resolve_jobs(jobs)
if confirm("Are you sure you want "
"to cancel %d jobs" % len(jobs),
False):
print >>sys.stderr, "Cancelling %s jobs" % len(jobs)
for job in jobs:
if jip.jobs.cancel(job, clean_logs=args['--clean'],
save=True, cancel_children=False):
print >>sys.stderr, "Canceled %s" % job.id
if __name__ == "__main__":
main()
| true | true |
1c47eaffe4e7767186947540b4dc6ce1552877e7 | 311 | py | Python | tests/conftest.py | valr/flask-webhook | 864a86ad645a958fa7eee9cc1622ca84a79e5801 | [
"MIT"
] | null | null | null | tests/conftest.py | valr/flask-webhook | 864a86ad645a958fa7eee9cc1622ca84a79e5801 | [
"MIT"
] | null | null | null | tests/conftest.py | valr/flask-webhook | 864a86ad645a958fa7eee9cc1622ca84a79e5801 | [
"MIT"
] | null | null | null | import os
import pytest
from application import create_application
@pytest.fixture
def application():
application = create_application(os.environ.get("INSTANCE_PATH"))
application.testing = True
return application
@pytest.fixture
def client(application):
return application.test_client()
| 16.368421 | 69 | 0.771704 | import os
import pytest
from application import create_application
@pytest.fixture
def application():
application = create_application(os.environ.get("INSTANCE_PATH"))
application.testing = True
return application
@pytest.fixture
def client(application):
return application.test_client()
| true | true |
1c47ec05d04593b3dbbb896032df03930856074c | 40,188 | py | Python | core/domain/topic_services.py | sagangwee/oppia | c4bf0673b4d3ec30cff609109241656f71a63a82 | [
"Apache-2.0"
] | null | null | null | core/domain/topic_services.py | sagangwee/oppia | c4bf0673b4d3ec30cff609109241656f71a63a82 | [
"Apache-2.0"
] | 7 | 2019-08-20T08:30:43.000Z | 2022-02-12T18:47:57.000Z | core/domain/topic_services.py | ledriod/oppia | 4f8f95c6689cd36f0b65672b80d98a3463b001f8 | [
"Apache-2.0"
] | null | null | null | # coding: utf-8
#
# Copyright 2018 The Oppia Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.]
"""Commands for operations on topics, and related models."""
import collections
import copy
import logging
from core.domain import role_services
from core.domain import subtopic_page_domain
from core.domain import subtopic_page_services
from core.domain import topic_domain
from core.domain import user_services
from core.platform import models
import feconf
(topic_models,) = models.Registry.import_models([models.NAMES.topic])
datastore_services = models.Registry.import_datastore_services()
memcache_services = models.Registry.import_memcache_services()
def _migrate_subtopics_to_latest_schema(versioned_subtopics):
"""Holds the responsibility of performing a step-by-step, sequential update
of the subtopics structure based on the schema version of the input
subtopics dictionary. If the current subtopics schema changes, a
new conversion function must be added and some code appended to this
function to account for that new version.
Args:
versioned_subtopics: A dict with two keys:
- schema_version: int. The schema version for the subtopics dict.
- subtopics: list(dict). The list of dicts comprising the topic's
subtopics.
Raises:
Exception: The schema version of subtopics is outside of what
is supported at present.
"""
subtopic_schema_version = versioned_subtopics['schema_version']
if not (1 <= subtopic_schema_version
<= feconf.CURRENT_SUBTOPIC_SCHEMA_VERSION):
raise Exception(
'Sorry, we can only process v1-v%d subtopic schemas at '
'present.' % feconf.CURRENT_SUBTOPIC_SCHEMA_VERSION)
while (subtopic_schema_version <
feconf.CURRENT_SUBTOPIC_SCHEMA_VERSION):
topic_domain.Topic.update_subtopics_from_model(
versioned_subtopics, subtopic_schema_version)
subtopic_schema_version += 1
# Repository GET methods.
def _get_topic_memcache_key(topic_id, version=None):
"""Returns a memcache key for the topic.
Args:
topic_id: str. ID of the topic.
version: int. The version of the topic.
Returns:
str. The memcache key of the topic.
"""
if version:
return 'topic-version:%s:%s' % (topic_id, version)
else:
return 'topic:%s' % topic_id
def get_topic_from_model(topic_model):
"""Returns a topic domain object given a topic model loaded
from the datastore.
Args:
topic_model: TopicModel. The topic model loaded from the
datastore.
Returns:
topic. A Topic domain object corresponding to the given
topic model.
"""
versioned_subtopics = {
'schema_version': topic_model.subtopic_schema_version,
'subtopics': copy.deepcopy(topic_model.subtopics)
}
if (topic_model.subtopic_schema_version !=
feconf.CURRENT_SUBTOPIC_SCHEMA_VERSION):
_migrate_subtopics_to_latest_schema(versioned_subtopics)
return topic_domain.Topic(
topic_model.id, topic_model.name,
topic_model.description, topic_model.canonical_story_ids,
topic_model.additional_story_ids, topic_model.uncategorized_skill_ids,
[
topic_domain.Subtopic.from_dict(subtopic)
for subtopic in versioned_subtopics['subtopics']
],
versioned_subtopics['schema_version'],
topic_model.next_subtopic_id,
topic_model.language_code,
topic_model.version, topic_model.created_on,
topic_model.last_updated)
def get_all_topic_summaries():
"""Returns the summaries of all topics present in the datastore.
Returns:
list(TopicSummary). The list of summaries of all topics present in the
datastore.
"""
topic_summaries_models = topic_models.TopicSummaryModel.get_all()
topic_summaries = [
get_topic_summary_from_model(summary)
for summary in topic_summaries_models]
return topic_summaries
def get_all_skill_ids_assigned_to_some_topic():
"""Returns the ids of all the skills that are linked to some topics.
Returns:
set([str]). The ids of all the skills linked to some topic.
"""
skill_ids = set([])
all_topic_models = topic_models.TopicModel.get_all()
all_topics = [get_topic_from_model(topic) for topic in all_topic_models]
for topic in all_topics:
skill_ids.update(topic.get_all_skill_ids())
return skill_ids
def get_topic_summary_from_model(topic_summary_model):
"""Returns a domain object for an Oppia topic summary given a
topic summary model.
Args:
topic_summary_model: TopicSummaryModel.
Returns:
TopicSummary.
"""
return topic_domain.TopicSummary(
topic_summary_model.id, topic_summary_model.name,
topic_summary_model.canonical_name,
topic_summary_model.language_code,
topic_summary_model.version,
topic_summary_model.canonical_story_count,
topic_summary_model.additional_story_count,
topic_summary_model.uncategorized_skill_count,
topic_summary_model.subtopic_count,
topic_summary_model.total_skill_count,
topic_summary_model.topic_model_created_on,
topic_summary_model.topic_model_last_updated
)
def get_topic_by_id(topic_id, strict=True, version=None):
"""Returns a domain object representing a topic.
Args:
topic_id: str. ID of the topic.
strict: bool. Whether to fail noisily if no topic with the given
id exists in the datastore.
version: int or None. The version number of the topic to be
retrieved. If it is None, the latest version will be retrieved.
Returns:
Topic or None. The domain object representing a topic with the
given id, or None if it does not exist.
"""
topic_memcache_key = _get_topic_memcache_key(topic_id, version=version)
memcached_topic = memcache_services.get_multi(
[topic_memcache_key]).get(topic_memcache_key)
if memcached_topic is not None:
return memcached_topic
else:
topic_model = topic_models.TopicModel.get(
topic_id, strict=strict, version=version)
if topic_model:
topic = get_topic_from_model(topic_model)
memcache_services.set_multi({topic_memcache_key: topic})
return topic
else:
return None
def get_topics_by_ids(topic_ids):
"""Returns a list of topics matching the IDs provided.
Args:
topic_ids: list(str). List of IDs to get topics for.
Returns:
list(Topic|None). The list of topics corresponding to given ids
(with None in place of topic ids corresponding to deleted topics).
"""
all_topic_models = topic_models.TopicModel.get_multi(topic_ids)
topics = [
get_topic_from_model(topic_model) if topic_model is not None else None
for topic_model in all_topic_models]
return topics
def get_topic_by_name(topic_name):
"""Returns a domain object representing a topic.
Args:
topic_name: str. The name of the topic.
Returns:
Topic or None. The domain object representing a topic with the
given id, or None if it does not exist.
"""
topic_model = topic_models.TopicModel.get_by_name(topic_name)
if topic_model is None:
return None
topic = get_topic_from_model(topic_model)
return topic
def get_topic_summary_by_id(topic_id, strict=True):
"""Returns a domain object representing a topic summary.
Args:
topic_id: str. ID of the topic summary.
strict: bool. Whether to fail noisily if no topic summary with the given
id exists in the datastore.
Returns:
TopicSummary or None. The topic summary domain object corresponding to
a topic with the given topic_id, if it exists, or else None.
"""
topic_summary_model = topic_models.TopicSummaryModel.get(
topic_id, strict=strict)
if topic_summary_model:
topic_summary = get_topic_summary_from_model(topic_summary_model)
return topic_summary
else:
return None
def get_new_topic_id():
"""Returns a new topic id.
Returns:
str. A new topic id.
"""
return topic_models.TopicModel.get_new_id('')
def _create_topic(committer_id, topic, commit_message, commit_cmds):
"""Creates a new topic, and ensures that rights for a new topic
are saved first.
Args:
committer_id: str. ID of the committer.
topic: Topic. topic domain object.
commit_message: str. A description of changes made to the topic.
commit_cmds: list(TopicChange). A list of TopicChange objects that
represent change commands made to the given topic.
"""
topic.validate()
create_new_topic_rights(topic.id, committer_id)
model = topic_models.TopicModel(
id=topic.id,
name=topic.name,
canonical_name=topic.canonical_name,
description=topic.description,
language_code=topic.language_code,
canonical_story_ids=topic.canonical_story_ids,
additional_story_ids=topic.additional_story_ids,
uncategorized_skill_ids=topic.uncategorized_skill_ids,
subtopic_schema_version=topic.subtopic_schema_version,
next_subtopic_id=topic.next_subtopic_id,
subtopics=[subtopic.to_dict() for subtopic in topic.subtopics]
)
commit_cmd_dicts = [commit_cmd.to_dict() for commit_cmd in commit_cmds]
model.commit(committer_id, commit_message, commit_cmd_dicts)
topic.version += 1
create_topic_summary(topic.id)
def save_new_topic(committer_id, topic):
"""Saves a new topic.
Args:
committer_id: str. ID of the committer.
topic: Topic. Topic to be saved.
Raises:
Exception. Topic with same name already exists.
"""
existing_topic = get_topic_by_name(topic.name)
if existing_topic is not None:
raise Exception('Topic with name \'%s\' already exists' % topic.name)
commit_message = (
'New topic created with name \'%s\'.' % topic.name)
_create_topic(
committer_id, topic, commit_message, [topic_domain.TopicChange({
'cmd': topic_domain.CMD_CREATE_NEW,
'name': topic.name
})])
def apply_change_list(topic_id, change_list):
"""Applies a changelist to a topic and returns the result. The incoming
changelist should not have simultaneuous creations and deletion of
subtopics.
Args:
topic_id: str. ID of the given topic.
change_list: list(TopicChange). A change list to be applied to the given
topic.
Raises:
Exception. The incoming changelist had simultaneuous creation and
deletion of subtopics.
Returns:
Topic, dict, list(int), list(int), list(SubtopicPageChange).
The modified topic object, the modified subtopic pages dict keyed
by subtopic page id containing the updated domain objects of
each subtopic page, a list of ids of the deleted subtopics,
a list of ids of the newly created subtopics and a list of changes
applied to modified subtopic pages.
"""
topic = get_topic_by_id(topic_id)
newly_created_subtopic_ids = []
existing_subtopic_page_ids_to_be_modified = []
deleted_subtopic_ids = []
modified_subtopic_pages_list = []
modified_subtopic_pages = {}
modified_subtopic_change_cmds = collections.defaultdict(list)
for change in change_list:
if (change.cmd ==
subtopic_page_domain.CMD_UPDATE_SUBTOPIC_PAGE_PROPERTY):
if change.subtopic_id < topic.next_subtopic_id:
existing_subtopic_page_ids_to_be_modified.append(
change.subtopic_id)
subtopic_page_id = (
subtopic_page_domain.SubtopicPage.get_subtopic_page_id(
topic_id, change.subtopic_id))
modified_subtopic_change_cmds[subtopic_page_id].append(
change)
modified_subtopic_pages_list = (
subtopic_page_services.get_subtopic_pages_with_ids(
topic_id, existing_subtopic_page_ids_to_be_modified))
for subtopic_page in modified_subtopic_pages_list:
modified_subtopic_pages[subtopic_page.id] = subtopic_page
try:
for change in change_list:
if change.cmd == topic_domain.CMD_ADD_SUBTOPIC:
topic.add_subtopic(change.subtopic_id, change.title)
subtopic_page_id = (
subtopic_page_domain.SubtopicPage.get_subtopic_page_id(
topic_id, change.subtopic_id))
modified_subtopic_pages[subtopic_page_id] = (
subtopic_page_domain.SubtopicPage.create_default_subtopic_page( #pylint: disable=line-too-long
change.subtopic_id, topic_id)
)
modified_subtopic_change_cmds[subtopic_page_id].append(
subtopic_page_domain.SubtopicPageChange({
'cmd': 'create_new',
'topic_id': topic_id,
'subtopic_id': change.subtopic_id
}))
newly_created_subtopic_ids.append(change.subtopic_id)
elif change.cmd == topic_domain.CMD_DELETE_SUBTOPIC:
topic.delete_subtopic(change.subtopic_id)
if change.subtopic_id in newly_created_subtopic_ids:
raise Exception(
'The incoming changelist had simultaneous'
' creation and deletion of subtopics.')
deleted_subtopic_ids.append(change.subtopic_id)
elif change.cmd == topic_domain.CMD_ADD_UNCATEGORIZED_SKILL_ID:
topic.add_uncategorized_skill_id(
change.new_uncategorized_skill_id)
elif change.cmd == topic_domain.CMD_REMOVE_UNCATEGORIZED_SKILL_ID:
topic.remove_uncategorized_skill_id(
change.uncategorized_skill_id)
elif change.cmd == topic_domain.CMD_MOVE_SKILL_ID_TO_SUBTOPIC:
topic.move_skill_id_to_subtopic(
change.old_subtopic_id, change.new_subtopic_id,
change.skill_id)
elif change.cmd == topic_domain.CMD_REMOVE_SKILL_ID_FROM_SUBTOPIC:
topic.remove_skill_id_from_subtopic(
change.subtopic_id, change.skill_id)
elif change.cmd == topic_domain.CMD_UPDATE_TOPIC_PROPERTY:
if (change.property_name ==
topic_domain.TOPIC_PROPERTY_NAME):
topic.update_name(change.new_value)
elif (change.property_name ==
topic_domain.TOPIC_PROPERTY_DESCRIPTION):
topic.update_description(change.new_value)
elif (change.property_name ==
topic_domain.TOPIC_PROPERTY_CANONICAL_STORY_IDS):
topic.update_canonical_story_ids(change.new_value)
elif (change.property_name ==
topic_domain.TOPIC_PROPERTY_ADDITIONAL_STORY_IDS):
topic.update_additional_story_ids(change.new_value)
elif (change.property_name ==
topic_domain.TOPIC_PROPERTY_LANGUAGE_CODE):
topic.update_language_code(change.new_value)
elif (change.cmd ==
subtopic_page_domain.CMD_UPDATE_SUBTOPIC_PAGE_PROPERTY):
subtopic_page_id = (
subtopic_page_domain.SubtopicPage.get_subtopic_page_id(
topic_id, change.subtopic_id))
if ((modified_subtopic_pages[subtopic_page_id] is None) or
(change.subtopic_id in deleted_subtopic_ids)):
raise Exception(
'The subtopic with id %s doesn\'t exist' % (
change.subtopic_id))
if (change.property_name ==
subtopic_page_domain.
SUBTOPIC_PAGE_PROPERTY_PAGE_CONTENTS_HTML):
modified_subtopic_pages[
subtopic_page_id].update_page_contents_html(
change.new_value)
elif (change.property_name ==
subtopic_page_domain.
SUBTOPIC_PAGE_PROPERTY_PAGE_CONTENTS_AUDIO):
modified_subtopic_pages[
subtopic_page_id].update_page_contents_audio(
change.new_value)
elif change.cmd == topic_domain.CMD_UPDATE_SUBTOPIC_PROPERTY:
if (change.property_name ==
topic_domain.SUBTOPIC_PROPERTY_TITLE):
topic.update_subtopic_title(
change.subtopic_id, change.new_value)
elif (
change.cmd ==
topic_domain.CMD_MIGRATE_SUBTOPIC_SCHEMA_TO_LATEST_VERSION):
# Loading the topic model from the datastore into a
# Topic domain object automatically converts it to use the
# latest schema version. As a result, simply resaving the
# topic is sufficient to apply the schema migration.
continue
return (
topic, modified_subtopic_pages, deleted_subtopic_ids,
newly_created_subtopic_ids, modified_subtopic_change_cmds)
except Exception as e:
logging.error(
'%s %s %s %s' % (
e.__class__.__name__, e, topic_id, change_list)
)
raise
def _save_topic(committer_id, topic, commit_message, change_list):
"""Validates a topic and commits it to persistent storage. If
successful, increments the version number of the incoming topic domain
object by 1.
Args:
committer_id: str. ID of the given committer.
topic: Topic. The topic domain object to be saved.
commit_message: str. The commit message.
change_list: list(TopicChange). List of changes applied to a topic.
Raises:
Exception: Received invalid change list.
Exception: The topic model and the incoming topic domain
object have different version numbers.
"""
if not change_list:
raise Exception(
'Unexpected error: received an invalid change list when trying to '
'save topic %s: %s' % (topic.id, change_list))
topic.validate()
topic_model = topic_models.TopicModel.get(topic.id, strict=False)
# Topic model cannot be None as topic is passed as parameter here and that
# is only possible if a topic model with that topic id exists. Also this is
# a private function and so it cannot be called independently with any
# topic object.
if topic.version > topic_model.version:
raise Exception(
'Unexpected error: trying to update version %s of topic '
'from version %s. Please reload the page and try again.'
% (topic_model.version, topic.version))
elif topic.version < topic_model.version:
raise Exception(
'Trying to update version %s of topic from version %s, '
'which is too old. Please reload the page and try again.'
% (topic_model.version, topic.version))
topic_model.description = topic.description
topic_model.name = topic.name
topic_model.canonical_story_ids = topic.canonical_story_ids
topic_model.additional_story_ids = topic.additional_story_ids
topic_model.uncategorized_skill_ids = topic.uncategorized_skill_ids
topic_model.subtopics = [subtopic.to_dict() for subtopic in topic.subtopics]
topic_model.subtopic_schema_version = topic.subtopic_schema_version
topic_model.next_subtopic_id = topic.next_subtopic_id
topic_model.language_code = topic.language_code
change_dicts = [change.to_dict() for change in change_list]
topic_model.commit(committer_id, commit_message, change_dicts)
memcache_services.delete(_get_topic_memcache_key(topic.id))
topic.version += 1
def update_topic_and_subtopic_pages(
committer_id, topic_id, change_list, commit_message):
"""Updates a topic and its subtopic pages. Commits changes.
Args:
committer_id: str. The id of the user who is performing the update
action.
topic_id: str. The topic id.
change_list: list(TopicChange and SubtopicPageChange). These changes are
applied in sequence to produce the resulting topic.
commit_message: str or None. A description of changes made to the
topic.
Raises:
ValueError: Current user does not have enough rights to edit a topic.
"""
if not commit_message:
raise ValueError(
'Expected a commit message, received none.')
(
updated_topic, updated_subtopic_pages_dict,
deleted_subtopic_ids, newly_created_subtopic_ids,
updated_subtopic_pages_change_cmds_dict
) = apply_change_list(topic_id, change_list)
_save_topic(
committer_id, updated_topic, commit_message, change_list
)
# The following loop deletes those subtopic pages that are already in the
# datastore, which are supposed to be deleted in the current changelist.
for subtopic_id in deleted_subtopic_ids:
if subtopic_id not in newly_created_subtopic_ids:
subtopic_page_services.delete_subtopic_page(
committer_id, topic_id, subtopic_id)
for subtopic_page_id in updated_subtopic_pages_dict:
subtopic_page = updated_subtopic_pages_dict[subtopic_page_id]
subtopic_page_change_list = updated_subtopic_pages_change_cmds_dict[
subtopic_page_id]
subtopic_id = subtopic_page.get_subtopic_id_from_subtopic_page_id()
# The following condition prevents the creation of subtopic pages that
# were deleted above.
if subtopic_id not in deleted_subtopic_ids:
subtopic_page_services.save_subtopic_page(
committer_id, subtopic_page, commit_message,
subtopic_page_change_list)
create_topic_summary(topic_id)
def delete_uncategorized_skill(user_id, topic_id, uncategorized_skill_id):
"""Removes skill with given id from the topic.
Args:
user_id: str. The id of the user who is performing the action.
topic_id: str. The id of the topic from which to remove the skill.
uncategorized_skill_id: str. The uncategorized skill to remove from the
topic.
"""
change_list = [topic_domain.TopicChange({
'cmd': 'remove_uncategorized_skill_id',
'uncategorized_skill_id': uncategorized_skill_id
})]
update_topic_and_subtopic_pages(
user_id, topic_id, change_list,
'Removed %s from uncategorized skill ids' % uncategorized_skill_id)
def add_uncategorized_skill(user_id, topic_id, uncategorized_skill_id):
"""Adds a skill with given id to the topic.
Args:
user_id: str. The id of the user who is performing the action.
topic_id: str. The id of the topic to which the skill is to be added.
uncategorized_skill_id: str. The id of the uncategorized skill to add
to the topic.
"""
change_list = [topic_domain.TopicChange({
'cmd': 'add_uncategorized_skill_id',
'new_uncategorized_skill_id': uncategorized_skill_id
})]
update_topic_and_subtopic_pages(
user_id, topic_id, change_list,
'Added %s to uncategorized skill ids' % uncategorized_skill_id)
def delete_story(user_id, topic_id, story_id):
"""Removes story with given id from the topic.
NOTE TO DEVELOPERS: Presently, this function only removes story_id from
canonical_story_ids list.
Args:
user_id: str. The id of the user who is performing the action.
topic_id: str. The id of the topic from which to remove the story.
story_id: str. The story to remove from the topic.
"""
topic = get_topic_by_id(topic_id)
old_canonical_story_ids = copy.deepcopy(topic.canonical_story_ids)
topic.delete_story(story_id)
change_list = [topic_domain.TopicChange({
'cmd': 'update_topic_property',
'property_name': 'canonical_story_ids',
'old_value': old_canonical_story_ids,
'new_value': topic.canonical_story_ids
})]
update_topic_and_subtopic_pages(
user_id, topic_id, change_list,
'Removed %s from canonical story ids' % story_id)
def add_canonical_story(user_id, topic_id, story_id):
"""Adds a story to the canonical story id list of a topic.
Args:
user_id: str. The id of the user who is performing the action.
topic_id: str. The id of the topic to which the story is to be added.
story_id: str. The story to add to the topic.
"""
topic = get_topic_by_id(topic_id)
old_canonical_story_ids = copy.deepcopy(topic.canonical_story_ids)
topic.add_canonical_story(story_id)
change_list = [topic_domain.TopicChange({
'cmd': 'update_topic_property',
'property_name': 'canonical_story_ids',
'old_value': old_canonical_story_ids,
'new_value': topic.canonical_story_ids
})]
update_topic_and_subtopic_pages(
user_id, topic_id, change_list,
'Added %s to canonical story ids' % story_id)
def delete_topic(committer_id, topic_id, force_deletion=False):
"""Deletes the topic with the given topic_id.
Args:
committer_id: str. ID of the committer.
topic_id: str. ID of the topic to be deleted.
force_deletion: bool. If true, the topic and its history are fully
deleted and are unrecoverable. Otherwise, the topic and all
its history are marked as deleted, but the corresponding models are
still retained in the datastore. This last option is the preferred
one.
Raises:
ValueError: User does not have enough rights to delete a topic.
"""
topic_rights_model = topic_models.TopicRightsModel.get(topic_id)
topic_rights_model.delete(
committer_id, feconf.COMMIT_MESSAGE_TOPIC_DELETED,
force_deletion=force_deletion)
# Delete the summary of the topic (regardless of whether
# force_deletion is True or not).
delete_topic_summary(topic_id)
topic_model = topic_models.TopicModel.get(topic_id)
for subtopic in topic_model.subtopics:
subtopic_page_services.delete_subtopic_page(
committer_id, topic_id, subtopic['id'])
topic_model.delete(
committer_id, feconf.COMMIT_MESSAGE_TOPIC_DELETED,
force_deletion=force_deletion)
# This must come after the topic is retrieved. Otherwise the memcache
# key will be reinstated.
topic_memcache_key = _get_topic_memcache_key(topic_id)
memcache_services.delete(topic_memcache_key)
def delete_topic_summary(topic_id):
"""Delete a topic summary model.
Args:
topic_id: str. ID of the topic whose topic summary is to
be deleted.
"""
topic_models.TopicSummaryModel.get(topic_id).delete()
def create_topic_summary(topic_id):
"""Creates and stores a summary of the given topic.
Args:
topic_id: str. ID of the topic.
"""
topic = get_topic_by_id(topic_id)
topic_summary = compute_summary_of_topic(topic)
save_topic_summary(topic_summary)
def compute_summary_of_topic(topic):
"""Create a TopicSummary domain object for a given Topic domain
object and return it.
Args:
topic: Topic. The topic object for which the summary is to be computed.
Returns:
TopicSummary. The computed summary for the given topic.
"""
topic_model_canonical_story_count = len(topic.canonical_story_ids)
topic_model_additional_story_count = len(topic.additional_story_ids)
topic_model_uncategorized_skill_count = len(topic.uncategorized_skill_ids)
topic_model_subtopic_count = len(topic.subtopics)
total_skill_count = topic_model_uncategorized_skill_count
for subtopic in topic.subtopics:
total_skill_count = total_skill_count + len(subtopic.skill_ids)
topic_summary = topic_domain.TopicSummary(
topic.id, topic.name, topic.canonical_name, topic.language_code,
topic.version, topic_model_canonical_story_count,
topic_model_additional_story_count,
topic_model_uncategorized_skill_count, topic_model_subtopic_count,
total_skill_count, topic.created_on, topic.last_updated
)
return topic_summary
def save_topic_summary(topic_summary):
"""Save a topic summary domain object as a TopicSummaryModel
entity in the datastore.
Args:
topic_summary: The topic summary object to be saved in the
datastore.
"""
topic_summary_model = topic_models.TopicSummaryModel(
id=topic_summary.id,
name=topic_summary.name,
canonical_name=topic_summary.canonical_name,
language_code=topic_summary.language_code,
version=topic_summary.version,
additional_story_count=topic_summary.additional_story_count,
canonical_story_count=topic_summary.canonical_story_count,
uncategorized_skill_count=topic_summary.uncategorized_skill_count,
subtopic_count=topic_summary.subtopic_count,
total_skill_count=topic_summary.total_skill_count,
topic_model_last_updated=topic_summary.topic_model_last_updated,
topic_model_created_on=topic_summary.topic_model_created_on
)
topic_summary_model.put()
def get_topic_rights_from_model(topic_rights_model):
"""Constructs a TopicRights object from the given topic rights model.
Args:
topic_rights_model: TopicRightsModel. Topic rights from the
datastore.
Returns:
TopicRights. The rights object created from the model.
"""
return topic_domain.TopicRights(
topic_rights_model.id,
topic_rights_model.manager_ids,
topic_rights_model.topic_is_published
)
def publish_topic(topic_id, committer_id):
"""Marks the given topic as published.
Args:
topic_id: str. The id of the given topic.
committer_id: str. ID of the committer.
Raises:
Exception. The given topic does not exist.
Exception. The topic is already published.
Exception. The user does not have enough rights to publish the topic.
"""
topic_rights = get_topic_rights(topic_id, strict=False)
if topic_rights is None:
raise Exception('The given topic does not exist')
user = user_services.UserActionsInfo(committer_id)
if role_services.ACTION_CHANGE_TOPIC_STATUS not in user.actions:
raise Exception(
'The user does not have enough rights to publish the topic.')
if topic_rights.topic_is_published:
raise Exception('The topic is already published.')
topic_rights.topic_is_published = True
commit_cmds = [topic_domain.TopicRightsChange({
'cmd': topic_domain.CMD_PUBLISH_TOPIC
})]
save_topic_rights(
topic_rights, committer_id, 'Published the topic', commit_cmds)
def unpublish_topic(topic_id, committer_id):
"""Marks the given topic as unpublished.
Args:
topic_id: str. The id of the given topic.
committer_id: str. ID of the committer.
Raises:
Exception. The given topic does not exist.
Exception. The topic is already unpublished.
Exception. The user does not have enough rights to unpublish the topic.
"""
topic_rights = get_topic_rights(topic_id, strict=False)
if topic_rights is None:
raise Exception('The given topic does not exist')
user = user_services.UserActionsInfo(committer_id)
if role_services.ACTION_CHANGE_TOPIC_STATUS not in user.actions:
raise Exception(
'The user does not have enough rights to unpublish the topic.')
if not topic_rights.topic_is_published:
raise Exception('The topic is already unpublished.')
topic_rights.topic_is_published = False
commit_cmds = [topic_domain.TopicRightsChange({
'cmd': topic_domain.CMD_UNPUBLISH_TOPIC
})]
save_topic_rights(
topic_rights, committer_id, 'Unpublished the topic', commit_cmds)
def save_topic_rights(topic_rights, committer_id, commit_message, commit_cmds):
"""Saves a TopicRights domain object to the datastore.
Args:
topic_rights: TopicRights. The rights object for the given
topic.
committer_id: str. ID of the committer.
commit_message: str. Descriptive message for the commit.
commit_cmds: list(TopicRightsChange). A list of commands describing
what kind of commit was done.
"""
model = topic_models.TopicRightsModel.get(topic_rights.id, strict=False)
model.manager_ids = topic_rights.manager_ids
model.topic_is_published = topic_rights.topic_is_published
commit_cmd_dicts = [commit_cmd.to_dict() for commit_cmd in commit_cmds]
model.commit(committer_id, commit_message, commit_cmd_dicts)
def create_new_topic_rights(topic_id, committer_id):
"""Creates a new topic rights object and saves it to the datastore.
Args:
topic_id: str. ID of the topic.
committer_id: str. ID of the committer.
"""
topic_rights = topic_domain.TopicRights(topic_id, [], False)
commit_cmds = [{'cmd': topic_domain.CMD_CREATE_NEW}]
topic_models.TopicRightsModel(
id=topic_rights.id,
manager_ids=topic_rights.manager_ids,
topic_is_published=topic_rights.topic_is_published
).commit(committer_id, 'Created new topic rights', commit_cmds)
def get_topic_rights(topic_id, strict=True):
"""Retrieves the rights object for the given topic.
Args:
topic_id: str. ID of the topic.
strict: bool. Whether to fail noisily if no topic with a given id
exists in the datastore.
Returns:
TopicRights. The rights object associated with the given topic.
Raises:
EntityNotFoundError. The topic with ID topic_id was not
found in the datastore.
"""
model = topic_models.TopicRightsModel.get(topic_id, strict=strict)
if model is None:
return None
return get_topic_rights_from_model(model)
def get_topic_rights_with_user(user_id):
"""Retrieves the rights object for all topics assigned to given user.
Args:
user_id: str. ID of the user.
Returns:
list(TopicRights). The rights objects associated with the topics
assigned to given user.
"""
topic_rights_models = topic_models.TopicRightsModel.get_by_user(user_id)
return [
get_topic_rights_from_model(model) for model in topic_rights_models
if model is not None]
def get_all_topic_rights():
"""Returns the rights object of all topics present in the datastore.
Returns:
dict. The dict of rights objects of all topics present in
the datastore keyed by topic id.
"""
topic_rights_models = topic_models.TopicRightsModel.get_all()
topic_rights = {}
for model in topic_rights_models:
rights = get_topic_rights_from_model(model)
topic_rights[rights.id] = rights
return topic_rights
def check_can_edit_topic(user, topic_rights):
"""Checks whether the user can edit the given topic.
Args:
user: UserActionsInfo. Object having user_id, role and actions for
given user.
topic_rights: TopicRights or None. Rights object for the given topic.
Returns:
bool. Whether the given user can edit the given topic.
"""
if topic_rights is None:
return False
if role_services.ACTION_EDIT_ANY_TOPIC in user.actions:
return True
if role_services.ACTION_EDIT_OWNED_TOPIC not in user.actions:
return False
if topic_rights.is_manager(user.user_id):
return True
return False
def deassign_user_from_all_topics(committer, user_id):
"""Deassigns given user from all topics assigned to them.
Args:
committer: UserActionsInfo. UserActionsInfo object for the user
who is performing the action.
user_id: str. The ID of the user.
Raises:
Exception. The committer does not have rights to modify a role.
"""
topic_rights_list = get_topic_rights_with_user(user_id)
for topic_rights in topic_rights_list:
topic_rights.manager_ids.remove(user_id)
commit_cmds = [topic_domain.TopicRightsChange({
'cmd': topic_domain.CMD_REMOVE_MANAGER_ROLE,
'removed_user_id': user_id
})]
save_topic_rights(
topic_rights, committer.user_id,
'Removed all assigned topics from %s' % (user_id), commit_cmds)
def assign_role(committer, assignee, new_role, topic_id):
"""Assigns a new role to the user.
Args:
committer: UserActionsInfo. UserActionsInfo object for the user
who is performing the action.
assignee: UserActionsInfo. UserActionsInfo object for the user
whose role is being changed.
new_role: str. The name of the new role. Possible values are:
ROLE_MANAGER
topic_id: str. ID of the topic.
Raises:
Exception. The committer does not have rights to modify a role.
Exception. The assignee is already a manager for the topic.
Exception. The assignee doesn't have enough rights to become a manager.
Exception. The role is invalid.
"""
committer_id = committer.user_id
topic_rights = get_topic_rights(topic_id)
if (role_services.ACTION_MODIFY_ROLES_FOR_ANY_ACTIVITY not in
committer.actions):
logging.error(
'User %s tried to allow user %s to be a %s of topic %s '
'but was refused permission.' % (
committer_id, assignee.user_id, new_role, topic_id))
raise Exception(
'UnauthorizedUserException: Could not assign new role.')
assignee_username = user_services.get_username(assignee.user_id)
if role_services.ACTION_EDIT_OWNED_TOPIC not in assignee.actions:
raise Exception(
'The assignee doesn\'t have enough rights to become a manager.')
old_role = topic_domain.ROLE_NONE
if topic_rights.is_manager(assignee.user_id):
old_role = topic_domain.ROLE_MANAGER
if new_role == topic_domain.ROLE_MANAGER:
if topic_rights.is_manager(assignee.user_id):
raise Exception('This user already is a manager for this topic')
topic_rights.manager_ids.append(assignee.user_id)
elif new_role == topic_domain.ROLE_NONE:
if topic_rights.is_manager(assignee.user_id):
topic_rights.manager_ids.remove(assignee.user_id)
else:
old_role = topic_domain.ROLE_NONE
else:
raise Exception('Invalid role: %s' % new_role)
commit_message = 'Changed role of %s from %s to %s' % (
assignee_username, old_role, new_role)
commit_cmds = [topic_domain.TopicRightsChange({
'cmd': topic_domain.CMD_CHANGE_ROLE,
'assignee_id': assignee.user_id,
'old_role': old_role,
'new_role': new_role
})]
save_topic_rights(topic_rights, committer_id, commit_message, commit_cmds)
| 38.347328 | 114 | 0.685105 |
import collections
import copy
import logging
from core.domain import role_services
from core.domain import subtopic_page_domain
from core.domain import subtopic_page_services
from core.domain import topic_domain
from core.domain import user_services
from core.platform import models
import feconf
(topic_models,) = models.Registry.import_models([models.NAMES.topic])
datastore_services = models.Registry.import_datastore_services()
memcache_services = models.Registry.import_memcache_services()
def _migrate_subtopics_to_latest_schema(versioned_subtopics):
subtopic_schema_version = versioned_subtopics['schema_version']
if not (1 <= subtopic_schema_version
<= feconf.CURRENT_SUBTOPIC_SCHEMA_VERSION):
raise Exception(
'Sorry, we can only process v1-v%d subtopic schemas at '
'present.' % feconf.CURRENT_SUBTOPIC_SCHEMA_VERSION)
while (subtopic_schema_version <
feconf.CURRENT_SUBTOPIC_SCHEMA_VERSION):
topic_domain.Topic.update_subtopics_from_model(
versioned_subtopics, subtopic_schema_version)
subtopic_schema_version += 1
def _get_topic_memcache_key(topic_id, version=None):
if version:
return 'topic-version:%s:%s' % (topic_id, version)
else:
return 'topic:%s' % topic_id
def get_topic_from_model(topic_model):
versioned_subtopics = {
'schema_version': topic_model.subtopic_schema_version,
'subtopics': copy.deepcopy(topic_model.subtopics)
}
if (topic_model.subtopic_schema_version !=
feconf.CURRENT_SUBTOPIC_SCHEMA_VERSION):
_migrate_subtopics_to_latest_schema(versioned_subtopics)
return topic_domain.Topic(
topic_model.id, topic_model.name,
topic_model.description, topic_model.canonical_story_ids,
topic_model.additional_story_ids, topic_model.uncategorized_skill_ids,
[
topic_domain.Subtopic.from_dict(subtopic)
for subtopic in versioned_subtopics['subtopics']
],
versioned_subtopics['schema_version'],
topic_model.next_subtopic_id,
topic_model.language_code,
topic_model.version, topic_model.created_on,
topic_model.last_updated)
def get_all_topic_summaries():
topic_summaries_models = topic_models.TopicSummaryModel.get_all()
topic_summaries = [
get_topic_summary_from_model(summary)
for summary in topic_summaries_models]
return topic_summaries
def get_all_skill_ids_assigned_to_some_topic():
skill_ids = set([])
all_topic_models = topic_models.TopicModel.get_all()
all_topics = [get_topic_from_model(topic) for topic in all_topic_models]
for topic in all_topics:
skill_ids.update(topic.get_all_skill_ids())
return skill_ids
def get_topic_summary_from_model(topic_summary_model):
return topic_domain.TopicSummary(
topic_summary_model.id, topic_summary_model.name,
topic_summary_model.canonical_name,
topic_summary_model.language_code,
topic_summary_model.version,
topic_summary_model.canonical_story_count,
topic_summary_model.additional_story_count,
topic_summary_model.uncategorized_skill_count,
topic_summary_model.subtopic_count,
topic_summary_model.total_skill_count,
topic_summary_model.topic_model_created_on,
topic_summary_model.topic_model_last_updated
)
def get_topic_by_id(topic_id, strict=True, version=None):
topic_memcache_key = _get_topic_memcache_key(topic_id, version=version)
memcached_topic = memcache_services.get_multi(
[topic_memcache_key]).get(topic_memcache_key)
if memcached_topic is not None:
return memcached_topic
else:
topic_model = topic_models.TopicModel.get(
topic_id, strict=strict, version=version)
if topic_model:
topic = get_topic_from_model(topic_model)
memcache_services.set_multi({topic_memcache_key: topic})
return topic
else:
return None
def get_topics_by_ids(topic_ids):
all_topic_models = topic_models.TopicModel.get_multi(topic_ids)
topics = [
get_topic_from_model(topic_model) if topic_model is not None else None
for topic_model in all_topic_models]
return topics
def get_topic_by_name(topic_name):
topic_model = topic_models.TopicModel.get_by_name(topic_name)
if topic_model is None:
return None
topic = get_topic_from_model(topic_model)
return topic
def get_topic_summary_by_id(topic_id, strict=True):
topic_summary_model = topic_models.TopicSummaryModel.get(
topic_id, strict=strict)
if topic_summary_model:
topic_summary = get_topic_summary_from_model(topic_summary_model)
return topic_summary
else:
return None
def get_new_topic_id():
return topic_models.TopicModel.get_new_id('')
def _create_topic(committer_id, topic, commit_message, commit_cmds):
topic.validate()
create_new_topic_rights(topic.id, committer_id)
model = topic_models.TopicModel(
id=topic.id,
name=topic.name,
canonical_name=topic.canonical_name,
description=topic.description,
language_code=topic.language_code,
canonical_story_ids=topic.canonical_story_ids,
additional_story_ids=topic.additional_story_ids,
uncategorized_skill_ids=topic.uncategorized_skill_ids,
subtopic_schema_version=topic.subtopic_schema_version,
next_subtopic_id=topic.next_subtopic_id,
subtopics=[subtopic.to_dict() for subtopic in topic.subtopics]
)
commit_cmd_dicts = [commit_cmd.to_dict() for commit_cmd in commit_cmds]
model.commit(committer_id, commit_message, commit_cmd_dicts)
topic.version += 1
create_topic_summary(topic.id)
def save_new_topic(committer_id, topic):
existing_topic = get_topic_by_name(topic.name)
if existing_topic is not None:
raise Exception('Topic with name \'%s\' already exists' % topic.name)
commit_message = (
'New topic created with name \'%s\'.' % topic.name)
_create_topic(
committer_id, topic, commit_message, [topic_domain.TopicChange({
'cmd': topic_domain.CMD_CREATE_NEW,
'name': topic.name
})])
def apply_change_list(topic_id, change_list):
topic = get_topic_by_id(topic_id)
newly_created_subtopic_ids = []
existing_subtopic_page_ids_to_be_modified = []
deleted_subtopic_ids = []
modified_subtopic_pages_list = []
modified_subtopic_pages = {}
modified_subtopic_change_cmds = collections.defaultdict(list)
for change in change_list:
if (change.cmd ==
subtopic_page_domain.CMD_UPDATE_SUBTOPIC_PAGE_PROPERTY):
if change.subtopic_id < topic.next_subtopic_id:
existing_subtopic_page_ids_to_be_modified.append(
change.subtopic_id)
subtopic_page_id = (
subtopic_page_domain.SubtopicPage.get_subtopic_page_id(
topic_id, change.subtopic_id))
modified_subtopic_change_cmds[subtopic_page_id].append(
change)
modified_subtopic_pages_list = (
subtopic_page_services.get_subtopic_pages_with_ids(
topic_id, existing_subtopic_page_ids_to_be_modified))
for subtopic_page in modified_subtopic_pages_list:
modified_subtopic_pages[subtopic_page.id] = subtopic_page
try:
for change in change_list:
if change.cmd == topic_domain.CMD_ADD_SUBTOPIC:
topic.add_subtopic(change.subtopic_id, change.title)
subtopic_page_id = (
subtopic_page_domain.SubtopicPage.get_subtopic_page_id(
topic_id, change.subtopic_id))
modified_subtopic_pages[subtopic_page_id] = (
subtopic_page_domain.SubtopicPage.create_default_subtopic_page(
change.subtopic_id, topic_id)
)
modified_subtopic_change_cmds[subtopic_page_id].append(
subtopic_page_domain.SubtopicPageChange({
'cmd': 'create_new',
'topic_id': topic_id,
'subtopic_id': change.subtopic_id
}))
newly_created_subtopic_ids.append(change.subtopic_id)
elif change.cmd == topic_domain.CMD_DELETE_SUBTOPIC:
topic.delete_subtopic(change.subtopic_id)
if change.subtopic_id in newly_created_subtopic_ids:
raise Exception(
'The incoming changelist had simultaneous'
' creation and deletion of subtopics.')
deleted_subtopic_ids.append(change.subtopic_id)
elif change.cmd == topic_domain.CMD_ADD_UNCATEGORIZED_SKILL_ID:
topic.add_uncategorized_skill_id(
change.new_uncategorized_skill_id)
elif change.cmd == topic_domain.CMD_REMOVE_UNCATEGORIZED_SKILL_ID:
topic.remove_uncategorized_skill_id(
change.uncategorized_skill_id)
elif change.cmd == topic_domain.CMD_MOVE_SKILL_ID_TO_SUBTOPIC:
topic.move_skill_id_to_subtopic(
change.old_subtopic_id, change.new_subtopic_id,
change.skill_id)
elif change.cmd == topic_domain.CMD_REMOVE_SKILL_ID_FROM_SUBTOPIC:
topic.remove_skill_id_from_subtopic(
change.subtopic_id, change.skill_id)
elif change.cmd == topic_domain.CMD_UPDATE_TOPIC_PROPERTY:
if (change.property_name ==
topic_domain.TOPIC_PROPERTY_NAME):
topic.update_name(change.new_value)
elif (change.property_name ==
topic_domain.TOPIC_PROPERTY_DESCRIPTION):
topic.update_description(change.new_value)
elif (change.property_name ==
topic_domain.TOPIC_PROPERTY_CANONICAL_STORY_IDS):
topic.update_canonical_story_ids(change.new_value)
elif (change.property_name ==
topic_domain.TOPIC_PROPERTY_ADDITIONAL_STORY_IDS):
topic.update_additional_story_ids(change.new_value)
elif (change.property_name ==
topic_domain.TOPIC_PROPERTY_LANGUAGE_CODE):
topic.update_language_code(change.new_value)
elif (change.cmd ==
subtopic_page_domain.CMD_UPDATE_SUBTOPIC_PAGE_PROPERTY):
subtopic_page_id = (
subtopic_page_domain.SubtopicPage.get_subtopic_page_id(
topic_id, change.subtopic_id))
if ((modified_subtopic_pages[subtopic_page_id] is None) or
(change.subtopic_id in deleted_subtopic_ids)):
raise Exception(
'The subtopic with id %s doesn\'t exist' % (
change.subtopic_id))
if (change.property_name ==
subtopic_page_domain.
SUBTOPIC_PAGE_PROPERTY_PAGE_CONTENTS_HTML):
modified_subtopic_pages[
subtopic_page_id].update_page_contents_html(
change.new_value)
elif (change.property_name ==
subtopic_page_domain.
SUBTOPIC_PAGE_PROPERTY_PAGE_CONTENTS_AUDIO):
modified_subtopic_pages[
subtopic_page_id].update_page_contents_audio(
change.new_value)
elif change.cmd == topic_domain.CMD_UPDATE_SUBTOPIC_PROPERTY:
if (change.property_name ==
topic_domain.SUBTOPIC_PROPERTY_TITLE):
topic.update_subtopic_title(
change.subtopic_id, change.new_value)
elif (
change.cmd ==
topic_domain.CMD_MIGRATE_SUBTOPIC_SCHEMA_TO_LATEST_VERSION):
# Loading the topic model from the datastore into a
# Topic domain object automatically converts it to use the
# latest schema version. As a result, simply resaving the
# topic is sufficient to apply the schema migration.
continue
return (
topic, modified_subtopic_pages, deleted_subtopic_ids,
newly_created_subtopic_ids, modified_subtopic_change_cmds)
except Exception as e:
logging.error(
'%s %s %s %s' % (
e.__class__.__name__, e, topic_id, change_list)
)
raise
def _save_topic(committer_id, topic, commit_message, change_list):
if not change_list:
raise Exception(
'Unexpected error: received an invalid change list when trying to '
'save topic %s: %s' % (topic.id, change_list))
topic.validate()
topic_model = topic_models.TopicModel.get(topic.id, strict=False)
# Topic model cannot be None as topic is passed as parameter here and that
# is only possible if a topic model with that topic id exists. Also this is
# a private function and so it cannot be called independently with any
# topic object.
if topic.version > topic_model.version:
raise Exception(
'Unexpected error: trying to update version %s of topic '
'from version %s. Please reload the page and try again.'
% (topic_model.version, topic.version))
elif topic.version < topic_model.version:
raise Exception(
'Trying to update version %s of topic from version %s, '
'which is too old. Please reload the page and try again.'
% (topic_model.version, topic.version))
topic_model.description = topic.description
topic_model.name = topic.name
topic_model.canonical_story_ids = topic.canonical_story_ids
topic_model.additional_story_ids = topic.additional_story_ids
topic_model.uncategorized_skill_ids = topic.uncategorized_skill_ids
topic_model.subtopics = [subtopic.to_dict() for subtopic in topic.subtopics]
topic_model.subtopic_schema_version = topic.subtopic_schema_version
topic_model.next_subtopic_id = topic.next_subtopic_id
topic_model.language_code = topic.language_code
change_dicts = [change.to_dict() for change in change_list]
topic_model.commit(committer_id, commit_message, change_dicts)
memcache_services.delete(_get_topic_memcache_key(topic.id))
topic.version += 1
def update_topic_and_subtopic_pages(
committer_id, topic_id, change_list, commit_message):
if not commit_message:
raise ValueError(
'Expected a commit message, received none.')
(
updated_topic, updated_subtopic_pages_dict,
deleted_subtopic_ids, newly_created_subtopic_ids,
updated_subtopic_pages_change_cmds_dict
) = apply_change_list(topic_id, change_list)
_save_topic(
committer_id, updated_topic, commit_message, change_list
)
# The following loop deletes those subtopic pages that are already in the
# datastore, which are supposed to be deleted in the current changelist.
for subtopic_id in deleted_subtopic_ids:
if subtopic_id not in newly_created_subtopic_ids:
subtopic_page_services.delete_subtopic_page(
committer_id, topic_id, subtopic_id)
for subtopic_page_id in updated_subtopic_pages_dict:
subtopic_page = updated_subtopic_pages_dict[subtopic_page_id]
subtopic_page_change_list = updated_subtopic_pages_change_cmds_dict[
subtopic_page_id]
subtopic_id = subtopic_page.get_subtopic_id_from_subtopic_page_id()
# The following condition prevents the creation of subtopic pages that
# were deleted above.
if subtopic_id not in deleted_subtopic_ids:
subtopic_page_services.save_subtopic_page(
committer_id, subtopic_page, commit_message,
subtopic_page_change_list)
create_topic_summary(topic_id)
def delete_uncategorized_skill(user_id, topic_id, uncategorized_skill_id):
change_list = [topic_domain.TopicChange({
'cmd': 'remove_uncategorized_skill_id',
'uncategorized_skill_id': uncategorized_skill_id
})]
update_topic_and_subtopic_pages(
user_id, topic_id, change_list,
'Removed %s from uncategorized skill ids' % uncategorized_skill_id)
def add_uncategorized_skill(user_id, topic_id, uncategorized_skill_id):
change_list = [topic_domain.TopicChange({
'cmd': 'add_uncategorized_skill_id',
'new_uncategorized_skill_id': uncategorized_skill_id
})]
update_topic_and_subtopic_pages(
user_id, topic_id, change_list,
'Added %s to uncategorized skill ids' % uncategorized_skill_id)
def delete_story(user_id, topic_id, story_id):
topic = get_topic_by_id(topic_id)
old_canonical_story_ids = copy.deepcopy(topic.canonical_story_ids)
topic.delete_story(story_id)
change_list = [topic_domain.TopicChange({
'cmd': 'update_topic_property',
'property_name': 'canonical_story_ids',
'old_value': old_canonical_story_ids,
'new_value': topic.canonical_story_ids
})]
update_topic_and_subtopic_pages(
user_id, topic_id, change_list,
'Removed %s from canonical story ids' % story_id)
def add_canonical_story(user_id, topic_id, story_id):
topic = get_topic_by_id(topic_id)
old_canonical_story_ids = copy.deepcopy(topic.canonical_story_ids)
topic.add_canonical_story(story_id)
change_list = [topic_domain.TopicChange({
'cmd': 'update_topic_property',
'property_name': 'canonical_story_ids',
'old_value': old_canonical_story_ids,
'new_value': topic.canonical_story_ids
})]
update_topic_and_subtopic_pages(
user_id, topic_id, change_list,
'Added %s to canonical story ids' % story_id)
def delete_topic(committer_id, topic_id, force_deletion=False):
topic_rights_model = topic_models.TopicRightsModel.get(topic_id)
topic_rights_model.delete(
committer_id, feconf.COMMIT_MESSAGE_TOPIC_DELETED,
force_deletion=force_deletion)
# Delete the summary of the topic (regardless of whether
# force_deletion is True or not).
delete_topic_summary(topic_id)
topic_model = topic_models.TopicModel.get(topic_id)
for subtopic in topic_model.subtopics:
subtopic_page_services.delete_subtopic_page(
committer_id, topic_id, subtopic['id'])
topic_model.delete(
committer_id, feconf.COMMIT_MESSAGE_TOPIC_DELETED,
force_deletion=force_deletion)
# This must come after the topic is retrieved. Otherwise the memcache
# key will be reinstated.
topic_memcache_key = _get_topic_memcache_key(topic_id)
memcache_services.delete(topic_memcache_key)
def delete_topic_summary(topic_id):
topic_models.TopicSummaryModel.get(topic_id).delete()
def create_topic_summary(topic_id):
topic = get_topic_by_id(topic_id)
topic_summary = compute_summary_of_topic(topic)
save_topic_summary(topic_summary)
def compute_summary_of_topic(topic):
topic_model_canonical_story_count = len(topic.canonical_story_ids)
topic_model_additional_story_count = len(topic.additional_story_ids)
topic_model_uncategorized_skill_count = len(topic.uncategorized_skill_ids)
topic_model_subtopic_count = len(topic.subtopics)
total_skill_count = topic_model_uncategorized_skill_count
for subtopic in topic.subtopics:
total_skill_count = total_skill_count + len(subtopic.skill_ids)
topic_summary = topic_domain.TopicSummary(
topic.id, topic.name, topic.canonical_name, topic.language_code,
topic.version, topic_model_canonical_story_count,
topic_model_additional_story_count,
topic_model_uncategorized_skill_count, topic_model_subtopic_count,
total_skill_count, topic.created_on, topic.last_updated
)
return topic_summary
def save_topic_summary(topic_summary):
topic_summary_model = topic_models.TopicSummaryModel(
id=topic_summary.id,
name=topic_summary.name,
canonical_name=topic_summary.canonical_name,
language_code=topic_summary.language_code,
version=topic_summary.version,
additional_story_count=topic_summary.additional_story_count,
canonical_story_count=topic_summary.canonical_story_count,
uncategorized_skill_count=topic_summary.uncategorized_skill_count,
subtopic_count=topic_summary.subtopic_count,
total_skill_count=topic_summary.total_skill_count,
topic_model_last_updated=topic_summary.topic_model_last_updated,
topic_model_created_on=topic_summary.topic_model_created_on
)
topic_summary_model.put()
def get_topic_rights_from_model(topic_rights_model):
return topic_domain.TopicRights(
topic_rights_model.id,
topic_rights_model.manager_ids,
topic_rights_model.topic_is_published
)
def publish_topic(topic_id, committer_id):
topic_rights = get_topic_rights(topic_id, strict=False)
if topic_rights is None:
raise Exception('The given topic does not exist')
user = user_services.UserActionsInfo(committer_id)
if role_services.ACTION_CHANGE_TOPIC_STATUS not in user.actions:
raise Exception(
'The user does not have enough rights to publish the topic.')
if topic_rights.topic_is_published:
raise Exception('The topic is already published.')
topic_rights.topic_is_published = True
commit_cmds = [topic_domain.TopicRightsChange({
'cmd': topic_domain.CMD_PUBLISH_TOPIC
})]
save_topic_rights(
topic_rights, committer_id, 'Published the topic', commit_cmds)
def unpublish_topic(topic_id, committer_id):
topic_rights = get_topic_rights(topic_id, strict=False)
if topic_rights is None:
raise Exception('The given topic does not exist')
user = user_services.UserActionsInfo(committer_id)
if role_services.ACTION_CHANGE_TOPIC_STATUS not in user.actions:
raise Exception(
'The user does not have enough rights to unpublish the topic.')
if not topic_rights.topic_is_published:
raise Exception('The topic is already unpublished.')
topic_rights.topic_is_published = False
commit_cmds = [topic_domain.TopicRightsChange({
'cmd': topic_domain.CMD_UNPUBLISH_TOPIC
})]
save_topic_rights(
topic_rights, committer_id, 'Unpublished the topic', commit_cmds)
def save_topic_rights(topic_rights, committer_id, commit_message, commit_cmds):
model = topic_models.TopicRightsModel.get(topic_rights.id, strict=False)
model.manager_ids = topic_rights.manager_ids
model.topic_is_published = topic_rights.topic_is_published
commit_cmd_dicts = [commit_cmd.to_dict() for commit_cmd in commit_cmds]
model.commit(committer_id, commit_message, commit_cmd_dicts)
def create_new_topic_rights(topic_id, committer_id):
topic_rights = topic_domain.TopicRights(topic_id, [], False)
commit_cmds = [{'cmd': topic_domain.CMD_CREATE_NEW}]
topic_models.TopicRightsModel(
id=topic_rights.id,
manager_ids=topic_rights.manager_ids,
topic_is_published=topic_rights.topic_is_published
).commit(committer_id, 'Created new topic rights', commit_cmds)
def get_topic_rights(topic_id, strict=True):
model = topic_models.TopicRightsModel.get(topic_id, strict=strict)
if model is None:
return None
return get_topic_rights_from_model(model)
def get_topic_rights_with_user(user_id):
topic_rights_models = topic_models.TopicRightsModel.get_by_user(user_id)
return [
get_topic_rights_from_model(model) for model in topic_rights_models
if model is not None]
def get_all_topic_rights():
topic_rights_models = topic_models.TopicRightsModel.get_all()
topic_rights = {}
for model in topic_rights_models:
rights = get_topic_rights_from_model(model)
topic_rights[rights.id] = rights
return topic_rights
def check_can_edit_topic(user, topic_rights):
if topic_rights is None:
return False
if role_services.ACTION_EDIT_ANY_TOPIC in user.actions:
return True
if role_services.ACTION_EDIT_OWNED_TOPIC not in user.actions:
return False
if topic_rights.is_manager(user.user_id):
return True
return False
def deassign_user_from_all_topics(committer, user_id):
topic_rights_list = get_topic_rights_with_user(user_id)
for topic_rights in topic_rights_list:
topic_rights.manager_ids.remove(user_id)
commit_cmds = [topic_domain.TopicRightsChange({
'cmd': topic_domain.CMD_REMOVE_MANAGER_ROLE,
'removed_user_id': user_id
})]
save_topic_rights(
topic_rights, committer.user_id,
'Removed all assigned topics from %s' % (user_id), commit_cmds)
def assign_role(committer, assignee, new_role, topic_id):
committer_id = committer.user_id
topic_rights = get_topic_rights(topic_id)
if (role_services.ACTION_MODIFY_ROLES_FOR_ANY_ACTIVITY not in
committer.actions):
logging.error(
'User %s tried to allow user %s to be a %s of topic %s '
'but was refused permission.' % (
committer_id, assignee.user_id, new_role, topic_id))
raise Exception(
'UnauthorizedUserException: Could not assign new role.')
assignee_username = user_services.get_username(assignee.user_id)
if role_services.ACTION_EDIT_OWNED_TOPIC not in assignee.actions:
raise Exception(
'The assignee doesn\'t have enough rights to become a manager.')
old_role = topic_domain.ROLE_NONE
if topic_rights.is_manager(assignee.user_id):
old_role = topic_domain.ROLE_MANAGER
if new_role == topic_domain.ROLE_MANAGER:
if topic_rights.is_manager(assignee.user_id):
raise Exception('This user already is a manager for this topic')
topic_rights.manager_ids.append(assignee.user_id)
elif new_role == topic_domain.ROLE_NONE:
if topic_rights.is_manager(assignee.user_id):
topic_rights.manager_ids.remove(assignee.user_id)
else:
old_role = topic_domain.ROLE_NONE
else:
raise Exception('Invalid role: %s' % new_role)
commit_message = 'Changed role of %s from %s to %s' % (
assignee_username, old_role, new_role)
commit_cmds = [topic_domain.TopicRightsChange({
'cmd': topic_domain.CMD_CHANGE_ROLE,
'assignee_id': assignee.user_id,
'old_role': old_role,
'new_role': new_role
})]
save_topic_rights(topic_rights, committer_id, commit_message, commit_cmds)
| true | true |
1c47ecd35495ebe0a6ee65d47dce77083635a833 | 36,791 | py | Python | stacker/providers/aws/default.py | CityGrid/stacker | 87688453c64ef333c48786000f2f890cbe664633 | [
"BSD-2-Clause"
] | 1 | 2018-07-17T11:23:47.000Z | 2018-07-17T11:23:47.000Z | stacker/providers/aws/default.py | krm731/stacker | 87688453c64ef333c48786000f2f890cbe664633 | [
"BSD-2-Clause"
] | null | null | null | stacker/providers/aws/default.py | krm731/stacker | 87688453c64ef333c48786000f2f890cbe664633 | [
"BSD-2-Clause"
] | 1 | 2020-02-29T04:49:04.000Z | 2020-02-29T04:49:04.000Z | from __future__ import print_function
from __future__ import division
from __future__ import absolute_import
from future import standard_library
standard_library.install_aliases()
from builtins import range
from builtins import object
import json
import yaml
import logging
import time
import urllib.parse
import sys
import botocore.exceptions
from botocore.config import Config
from ..base import BaseProvider
from ... import exceptions
from ...ui import ui
from stacker.session_cache import get_session
from ...actions.diff import (
DictValue,
diff_parameters,
format_params_diff as format_diff
)
logger = logging.getLogger(__name__)
# This value controls the maximum number of times a CloudFormation API call
# will be attempted, after being throttled. This value is used in an
# exponential backoff algorithm to determine how long the client should wait
# until attempting a retry:
#
# base * growth_factor ^ (attempts - 1)
#
# A value of 10 here would cause the worst case wait time for the last retry to
# be ~8 mins:
#
# 1 * 2 ^ (10 - 1) = 512 seconds
#
# References:
# https://github.com/boto/botocore/blob/1.6.1/botocore/retryhandler.py#L39-L58
# https://github.com/boto/botocore/blob/1.6.1/botocore/data/_retry.json#L97-L121
MAX_ATTEMPTS = 10
MAX_TAIL_RETRIES = 5
DEFAULT_CAPABILITIES = ["CAPABILITY_NAMED_IAM", ]
def get_cloudformation_client(session):
config = Config(
retries=dict(
max_attempts=MAX_ATTEMPTS
)
)
return session.client('cloudformation', config=config)
def get_output_dict(stack):
"""Returns a dict of key/values for the outputs for a given CF stack.
Args:
stack (dict): The stack object to get
outputs from.
Returns:
dict: A dictionary with key/values for each output on the stack.
"""
outputs = {}
if 'Outputs' not in stack:
return outputs
for output in stack['Outputs']:
logger.debug(" %s %s: %s", stack['StackName'], output['OutputKey'],
output['OutputValue'])
outputs[output['OutputKey']] = output['OutputValue']
return outputs
def s3_fallback(fqn, template, parameters, tags, method,
change_set_name=None, service_role=None):
logger.warn("DEPRECATION WARNING: Falling back to legacy "
"stacker S3 bucket region for templates. See "
"http://stacker.readthedocs.io/en/latest/config.html#s3-bucket"
" for more information.")
# extra line break on purpose to avoid status updates removing URL
# from view
logger.warn("\n")
logger.debug("Modifying the S3 TemplateURL to point to "
"us-east-1 endpoint")
template_url = template.url
template_url_parsed = urllib.parse.urlparse(template_url)
template_url_parsed = template_url_parsed._replace(
netloc="s3.amazonaws.com")
template_url = urllib.parse.urlunparse(template_url_parsed)
logger.debug("Using template_url: %s", template_url)
args = generate_cloudformation_args(
fqn, parameters, tags, template,
service_role=service_role,
change_set_name=get_change_set_name()
)
response = method(**args)
return response
def get_change_set_name():
"""Return a valid Change Set Name.
The name has to satisfy the following regex:
[a-zA-Z][-a-zA-Z0-9]*
And must be unique across all change sets.
"""
return 'change-set-{}'.format(int(time.time()))
def requires_replacement(changeset):
"""Return the changes within the changeset that require replacement.
Args:
changeset (list): List of changes
Returns:
list: A list of changes that require replacement, if any.
"""
return [r for r in changeset if r["ResourceChange"].get(
"Replacement", False) == "True"]
def ask_for_approval(full_changeset=None, params_diff=None,
include_verbose=False):
"""Prompt the user for approval to execute a change set.
Args:
full_changeset (list, optional): A list of the full changeset that will
be output if the user specifies verbose.
params_diff (list, optional): A list of DictValue detailing the
differences between two parameters returned by
:func:`stacker.actions.diff.diff_dictionaries`
include_verbose (bool, optional): Boolean for whether or not to include
the verbose option
"""
approval_options = ['y', 'n']
if include_verbose:
approval_options.append('v')
approve = ui.ask("Execute the above changes? [{}] ".format(
'/'.join(approval_options)))
if include_verbose and approve == "v":
if params_diff:
logger.info(
"Full changeset:\n\n%s\n%s",
format_params_diff(params_diff),
yaml.safe_dump(full_changeset),
)
else:
logger.info(
"Full changeset:\n%s",
yaml.safe_dump(full_changeset),
)
return ask_for_approval()
elif approve != "y":
raise exceptions.CancelExecution
def output_summary(fqn, action, changeset, params_diff,
replacements_only=False):
"""Log a summary of the changeset.
Args:
fqn (string): fully qualified name of the stack
action (string): action to include in the log message
changeset (list): AWS changeset
params_diff (list): A list of dictionaries detailing the differences
between two parameters returned by
:func:`stacker.actions.diff.diff_dictionaries`
replacements_only (bool, optional): boolean for whether or not we only
want to list replacements
"""
replacements = []
changes = []
for change in changeset:
resource = change['ResourceChange']
replacement = resource.get('Replacement') == 'True'
summary = '- %s %s (%s)' % (
resource['Action'],
resource['LogicalResourceId'],
resource['ResourceType'],
)
if replacement:
replacements.append(summary)
else:
changes.append(summary)
summary = ''
if params_diff:
summary += summarize_params_diff(params_diff)
if replacements:
if not replacements_only:
summary += 'Replacements:\n'
summary += '\n'.join(replacements)
if changes:
if summary:
summary += '\n'
summary += 'Changes:\n%s' % ('\n'.join(changes))
logger.info('%s %s:\n%s', fqn, action, summary)
def format_params_diff(params_diff):
""" Just a wrapper for stacker.actions.diff.format_params_diff
for testing purposes.
"""
return format_diff(params_diff)
def summarize_params_diff(params_diff):
summary = ''
added_summary = [v.key for v in params_diff
if v.status() is DictValue.ADDED]
if added_summary:
summary += 'Parameters Added: %s\n' % ', '.join(added_summary)
removed_summary = [v.key for v in params_diff
if v.status() is DictValue.REMOVED]
if removed_summary:
summary += 'Parameters Removed: %s\n' % ', '.join(removed_summary)
modified_summary = [v.key for v in params_diff
if v.status() is DictValue.MODIFIED]
if modified_summary:
summary += 'Parameters Modified: %s\n' % ', '.join(modified_summary)
return summary
def wait_till_change_set_complete(cfn_client, change_set_id, try_count=25,
sleep_time=.5, max_sleep=3):
""" Checks state of a changeset, returning when it is in a complete state.
Since changesets can take a little bit of time to get into a complete
state, we need to poll it until it does so. This will try to get the
state `try_count` times, waiting `sleep_time` * 2 seconds between each try
up to the `max_sleep` number of seconds. If, after that time, the changeset
is not in a complete state it fails. These default settings will wait a
little over one minute.
Args:
cfn_client (:class:`botocore.client.CloudFormation`): Used to query
cloudformation.
change_set_id (str): The unique changeset id to wait for.
try_count (int): Number of times to try the call.
sleep_time (int): Time to sleep between attempts.
max_sleep (int): Max time to sleep during backoff
Return:
dict: The response from cloudformation for the describe_change_set
call.
"""
complete = False
response = None
for i in range(try_count):
response = cfn_client.describe_change_set(
ChangeSetName=change_set_id,
)
complete = response["Status"] in ("FAILED", "CREATE_COMPLETE")
if complete:
break
if sleep_time == max_sleep:
logger.debug(
"Still waiting on changeset for another %s seconds",
sleep_time
)
time.sleep(sleep_time)
# exponential backoff with max
sleep_time = min(sleep_time * 2, max_sleep)
if not complete:
raise exceptions.ChangesetDidNotStabilize(change_set_id)
return response
def create_change_set(cfn_client, fqn, template, parameters, tags,
change_set_type='UPDATE', replacements_only=False,
service_role=None):
logger.debug("Attempting to create change set of type %s for stack: %s.",
change_set_type,
fqn)
args = generate_cloudformation_args(
fqn, parameters, tags, template,
change_set_type=change_set_type,
service_role=service_role,
change_set_name=get_change_set_name()
)
try:
response = cfn_client.create_change_set(**args)
except botocore.exceptions.ClientError as e:
if e.response['Error']['Message'] == ('TemplateURL must reference '
'a valid S3 object to which '
'you have access.'):
response = s3_fallback(fqn, template, parameters,
tags, cfn_client.create_change_set,
get_change_set_name(),
service_role)
else:
raise
change_set_id = response["Id"]
response = wait_till_change_set_complete(
cfn_client, change_set_id
)
status = response["Status"]
if status == "FAILED":
status_reason = response["StatusReason"]
if ("didn't contain changes" in response["StatusReason"] or
"No updates are to be performed" in response["StatusReason"]):
logger.debug(
"Stack %s did not change, not updating and removing "
"changeset.",
fqn,
)
cfn_client.delete_change_set(ChangeSetName=change_set_id)
raise exceptions.StackDidNotChange()
logger.warn(
"Got strange status, '%s' for changeset '%s'. Not deleting for "
"further investigation - you will need to delete the changeset "
"manually.",
status, change_set_id
)
raise exceptions.UnhandledChangeSetStatus(
fqn, change_set_id, status, status_reason
)
execution_status = response["ExecutionStatus"]
if execution_status != "AVAILABLE":
raise exceptions.UnableToExecuteChangeSet(fqn,
change_set_id,
execution_status)
changes = response["Changes"]
return changes, change_set_id
def check_tags_contain(actual, expected):
"""Check if a set of AWS resource tags is contained in another
Every tag key in `expected` must be present in `actual`, and have the same
value. Extra keys in `actual` but not in `expected` are ignored.
Args:
actual (list): Set of tags to be verified, usually from the description
of a resource. Each item must be a `dict` containing `Key` and
`Value` items.
expected (list): Set of tags that must be present in `actual` (in the
same format).
"""
actual_set = set((item["Key"], item["Value"]) for item in actual)
expected_set = set((item["Key"], item["Value"]) for item in expected)
return actual_set >= expected_set
def generate_cloudformation_args(stack_name, parameters, tags, template,
capabilities=DEFAULT_CAPABILITIES,
change_set_type=None,
service_role=None,
stack_policy=None,
change_set_name=None):
"""Used to generate the args for common cloudformation API interactions.
This is used for create_stack/update_stack/create_change_set calls in
cloudformation.
Args:
stack_name (str): The fully qualified stack name in Cloudformation.
parameters (list): A list of dictionaries that defines the
parameter list to be applied to the Cloudformation stack.
tags (list): A list of dictionaries that defines the tags
that should be applied to the Cloudformation stack.
template (:class:`stacker.provider.base.Template`): The template
object.
capabilities (list, optional): A list of capabilities to use when
updating Cloudformation.
change_set_type (str, optional): An optional change set type to use
with create_change_set.
service_role (str, optional): An optional service role to use when
interacting with Cloudformation.
change_set_name (str, optional): An optional change set name to use
with create_change_set.
Returns:
dict: A dictionary of arguments to be used in the Cloudformation API
call.
"""
args = {
"StackName": stack_name,
"Parameters": parameters,
"Tags": tags,
"Capabilities": capabilities,
}
if service_role:
args["RoleARN"] = service_role
if change_set_name:
args["ChangeSetName"] = change_set_name
if change_set_type:
args["ChangeSetType"] = change_set_type
if template.url:
args["TemplateURL"] = template.url
else:
args["TemplateBody"] = template.body
# When creating args for CreateChangeSet, don't include the stack policy,
# since ChangeSets don't support it.
if not change_set_name:
args.update(generate_stack_policy_args(stack_policy))
return args
def generate_stack_policy_args(stack_policy=None):
args = {}
if stack_policy:
logger.debug("Stack has a stack policy")
if stack_policy.url:
# stacker currently does not support uploading stack policies to
# S3, so this will never get hit (unless your implementing S3
# uploads, and then you're probably reading this comment about why
# the exception below was raised :))
#
# args["StackPolicyURL"] = stack_policy.url
raise NotImplementedError
else:
args["StackPolicyBody"] = stack_policy.body
return args
class ProviderBuilder(object):
"""Implements a ProviderBuilder for the AWS provider."""
def __init__(self, region=None, **kwargs):
self.region = region
self.kwargs = kwargs
def build(self, region=None, profile=None):
if not region:
region = self.region
session = get_session(region=region, profile=profile)
return Provider(session, region=region, **self.kwargs)
class Provider(BaseProvider):
"""AWS CloudFormation Provider"""
DELETED_STATUS = "DELETE_COMPLETE"
IN_PROGRESS_STATUSES = (
"CREATE_IN_PROGRESS",
"UPDATE_IN_PROGRESS",
"DELETE_IN_PROGRESS",
"UPDATE_COMPLETE_CLEANUP_IN_PROGRESS",
)
ROLLING_BACK_STATUSES = (
"ROLLBACK_IN_PROGRESS",
"UPDATE_ROLLBACK_IN_PROGRESS"
)
FAILED_STATUSES = (
"CREATE_FAILED",
"ROLLBACK_FAILED",
"ROLLBACK_COMPLETE",
"DELETE_FAILED",
"UPDATE_ROLLBACK_FAILED",
# Note: UPDATE_ROLLBACK_COMPLETE is in both the FAILED and COMPLETE
# sets, because we need to wait for it when a rollback is triggered,
# but still mark the stack as failed.
"UPDATE_ROLLBACK_COMPLETE",
)
COMPLETE_STATUSES = (
"CREATE_COMPLETE",
"DELETE_COMPLETE",
"UPDATE_COMPLETE",
"UPDATE_ROLLBACK_COMPLETE",
)
RECREATION_STATUSES = (
"CREATE_FAILED",
"ROLLBACK_FAILED",
"ROLLBACK_COMPLETE",
)
def __init__(self, session, region=None, interactive=False,
replacements_only=False, recreate_failed=False,
service_role=None, **kwargs):
self._outputs = {}
self.region = region
self.cloudformation = get_cloudformation_client(session)
self.interactive = interactive
# replacements only is only used in interactive mode
self.replacements_only = interactive and replacements_only
self.recreate_failed = interactive or recreate_failed
self.service_role = service_role
def get_stack(self, stack_name, **kwargs):
try:
return self.cloudformation.describe_stacks(
StackName=stack_name)['Stacks'][0]
except botocore.exceptions.ClientError as e:
if "does not exist" not in str(e):
raise
raise exceptions.StackDoesNotExist(stack_name)
def get_stack_status(self, stack, **kwargs):
return stack['StackStatus']
def is_stack_completed(self, stack, **kwargs):
return self.get_stack_status(stack) in self.COMPLETE_STATUSES
def is_stack_in_progress(self, stack, **kwargs):
return self.get_stack_status(stack) in self.IN_PROGRESS_STATUSES
def is_stack_destroyed(self, stack, **kwargs):
return self.get_stack_status(stack) == self.DELETED_STATUS
def is_stack_recreatable(self, stack, **kwargs):
return self.get_stack_status(stack) in self.RECREATION_STATUSES
def is_stack_rolling_back(self, stack, **kwargs):
return self.get_stack_status(stack) in self.ROLLING_BACK_STATUSES
def is_stack_failed(self, stack, **kwargs):
return self.get_stack_status(stack) in self.FAILED_STATUSES
def tail_stack(self, stack, cancel, retries=0, **kwargs):
def log_func(e):
event_args = [e['ResourceStatus'], e['ResourceType'],
e.get('ResourceStatusReason', None)]
# filter out any values that are empty
event_args = [arg for arg in event_args if arg]
template = " ".join(["[%s]"] + ["%s" for _ in event_args])
logger.info(template, *([stack.fqn] + event_args))
if not retries:
logger.info("Tailing stack: %s", stack.fqn)
try:
self.tail(stack.fqn,
cancel=cancel,
log_func=log_func,
include_initial=False)
except botocore.exceptions.ClientError as e:
if "does not exist" in str(e) and retries < MAX_TAIL_RETRIES:
# stack might be in the process of launching, wait for a second
# and try again
time.sleep(1)
self.tail_stack(stack, cancel, retries=retries + 1, **kwargs)
else:
raise
@staticmethod
def _tail_print(e):
print("%s %s %s" % (e['ResourceStatus'],
e['ResourceType'],
e['EventId']))
def get_events(self, stackname):
"""Get the events in batches and return in chronological order"""
next_token = None
event_list = []
while 1:
if next_token is not None:
events = self.cloudformation.describe_stack_events(
StackName=stackname, NextToken=next_token
)
else:
events = self.cloudformation.describe_stack_events(
StackName=stackname
)
event_list.append(events['StackEvents'])
next_token = events.get('NextToken', None)
if next_token is None:
break
time.sleep(1)
return reversed(sum(event_list, []))
def tail(self, stack_name, cancel, log_func=_tail_print, sleep_time=5,
include_initial=True):
"""Show and then tail the event log"""
# First dump the full list of events in chronological order and keep
# track of the events we've seen already
seen = set()
initial_events = self.get_events(stack_name)
for e in initial_events:
if include_initial:
log_func(e)
seen.add(e['EventId'])
# Now keep looping through and dump the new events
while 1:
events = self.get_events(stack_name)
for e in events:
if e['EventId'] not in seen:
log_func(e)
seen.add(e['EventId'])
if cancel.wait(sleep_time):
return
def destroy_stack(self, stack, **kwargs):
logger.debug("Destroying stack: %s" % (self.get_stack_name(stack)))
args = {"StackName": self.get_stack_name(stack)}
if self.service_role:
args["RoleARN"] = self.service_role
self.cloudformation.delete_stack(**args)
return True
def create_stack(self, fqn, template, parameters, tags,
force_change_set=False, stack_policy=None,
**kwargs):
"""Create a new Cloudformation stack.
Args:
fqn (str): The fully qualified name of the Cloudformation stack.
template (:class:`stacker.providers.base.Template`): A Template
object to use when creating the stack.
parameters (list): A list of dictionaries that defines the
parameter list to be applied to the Cloudformation stack.
tags (list): A list of dictionaries that defines the tags
that should be applied to the Cloudformation stack.
force_change_set (bool): Whether or not to force change set use.
"""
logger.debug("Attempting to create stack %s:.", fqn)
logger.debug(" parameters: %s", parameters)
logger.debug(" tags: %s", tags)
if template.url:
logger.debug(" template_url: %s", template.url)
else:
logger.debug(" no template url, uploading template "
"directly.")
if force_change_set:
logger.debug("force_change_set set to True, creating stack with "
"changeset.")
_changes, change_set_id = create_change_set(
self.cloudformation, fqn, template, parameters, tags,
'CREATE', service_role=self.service_role, **kwargs
)
self.cloudformation.execute_change_set(
ChangeSetName=change_set_id,
)
else:
args = generate_cloudformation_args(
fqn, parameters, tags, template,
service_role=self.service_role,
stack_policy=stack_policy,
)
try:
self.cloudformation.create_stack(**args)
except botocore.exceptions.ClientError as e:
if e.response['Error']['Message'] == ('TemplateURL must '
'reference a valid S3 '
'object to which you '
'have access.'):
s3_fallback(fqn, template, parameters, tags,
self.cloudformation.create_stack,
self.service_role)
else:
raise
def select_update_method(self, force_interactive, force_change_set):
"""Select the correct update method when updating a stack.
Args:
force_interactive (str): Whether or not to force interactive mode
no matter what mode the provider is in.
force_change_set (bool): Whether or not to force change set use.
Returns:
function: The correct object method to use when updating.
"""
if self.interactive or force_interactive:
return self.interactive_update_stack
elif force_change_set:
return self.noninteractive_changeset_update
else:
return self.default_update_stack
def prepare_stack_for_update(self, stack, tags):
"""Prepare a stack for updating
It may involve deleting the stack if is has failed it's initial
creation. The deletion is only allowed if:
- The stack contains all the tags configured in the current context;
- The stack is in one of the statuses considered safe to re-create
- ``recreate_failed`` is enabled, due to either being explicitly
enabled by the user, or because interactive mode is on.
Args:
stack (dict): a stack object returned from get_stack
tags (list): list of expected tags that must be present in the
stack if it must be re-created
Returns:
bool: True if the stack can be updated, False if it must be
re-created
"""
if self.is_stack_destroyed(stack):
return False
elif self.is_stack_completed(stack):
return True
stack_name = self.get_stack_name(stack)
stack_status = self.get_stack_status(stack)
if self.is_stack_in_progress(stack):
raise exceptions.StackUpdateBadStatus(
stack_name, stack_status,
'Update already in-progress')
if not self.is_stack_recreatable(stack):
raise exceptions.StackUpdateBadStatus(
stack_name, stack_status,
'Unsupported state for re-creation')
if not self.recreate_failed:
raise exceptions.StackUpdateBadStatus(
stack_name, stack_status,
'Stack re-creation is disabled. Run stacker again with the '
'--recreate-failed option to force it to be deleted and '
'created from scratch.')
stack_tags = self.get_stack_tags(stack)
if not check_tags_contain(stack_tags, tags):
raise exceptions.StackUpdateBadStatus(
stack_name, stack_status,
'Tags differ from current configuration, possibly not created '
'with stacker')
if self.interactive:
sys.stdout.write(
'The \"%s\" stack is in a failed state (%s).\n'
'It cannot be updated, but it can be deleted and re-created.\n'
'All its current resources will IRREVERSIBLY DESTROYED.\n'
'Proceed carefully!\n\n' % (stack_name, stack_status))
sys.stdout.flush()
ask_for_approval(include_verbose=False)
logger.warn('Destroying stack \"%s\" for re-creation', stack_name)
self.destroy_stack(stack)
return False
def update_stack(self, fqn, template, old_parameters, parameters, tags,
force_interactive=False, force_change_set=False,
stack_policy=None, **kwargs):
"""Update a Cloudformation stack.
Args:
fqn (str): The fully qualified name of the Cloudformation stack.
template (:class:`stacker.providers.base.Template`): A Template
object to use when updating the stack.
old_parameters (list): A list of dictionaries that defines the
parameter list on the existing Cloudformation stack.
parameters (list): A list of dictionaries that defines the
parameter list to be applied to the Cloudformation stack.
tags (list): A list of dictionaries that defines the tags
that should be applied to the Cloudformation stack.
force_interactive (bool): A flag that indicates whether the update
should be interactive. If set to True, interactive mode will
be used no matter if the provider is in interactive mode or
not. False will follow the behavior of the provider.
force_change_set (bool): A flag that indicates whether the update
must be executed with a change set.
"""
logger.debug("Attempting to update stack %s:", fqn)
logger.debug(" parameters: %s", parameters)
logger.debug(" tags: %s", tags)
if template.url:
logger.debug(" template_url: %s", template.url)
else:
logger.debug(" no template url, uploading template directly.")
update_method = self.select_update_method(force_interactive,
force_change_set)
return update_method(fqn, template, old_parameters, parameters, tags,
stack_policy=stack_policy, **kwargs)
def interactive_update_stack(self, fqn, template, old_parameters,
parameters, tags, stack_policy=None,
**kwargs):
"""Update a Cloudformation stack in interactive mode.
Args:
fqn (str): The fully qualified name of the Cloudformation stack.
template (:class:`stacker.providers.base.Template`): A Template
object to use when updating the stack.
old_parameters (list): A list of dictionaries that defines the
parameter list on the existing Cloudformation stack.
parameters (list): A list of dictionaries that defines the
parameter list to be applied to the Cloudformation stack.
tags (list): A list of dictionaries that defines the tags
that should be applied to the Cloudformation stack.
"""
logger.debug("Using interactive provider mode for %s.", fqn)
changes, change_set_id = create_change_set(
self.cloudformation, fqn, template, parameters, tags,
'UPDATE', service_role=self.service_role, **kwargs
)
params_diff = diff_parameters(
self.params_as_dict(old_parameters),
self.params_as_dict(parameters))
action = "replacements" if self.replacements_only else "changes"
full_changeset = changes
if self.replacements_only:
changes = requires_replacement(changes)
if changes or params_diff:
ui.lock()
try:
output_summary(fqn, action, changes, params_diff,
replacements_only=self.replacements_only)
ask_for_approval(
full_changeset=full_changeset,
params_diff=params_diff,
include_verbose=True,
)
finally:
ui.unlock()
# ChangeSets don't support specifying a stack policy inline, like
# CreateStack/UpdateStack, so we just SetStackPolicy if there is one.
if stack_policy:
kwargs = generate_stack_policy_args(stack_policy)
kwargs["StackName"] = fqn
self.cloudformation.set_stack_policy(**kwargs)
self.cloudformation.execute_change_set(
ChangeSetName=change_set_id,
)
def noninteractive_changeset_update(self, fqn, template, old_parameters,
parameters, tags, **kwargs):
"""Update a Cloudformation stack using a change set.
This is required for stacks with a defined Transform (i.e. SAM), as the
default update_stack API cannot be used with them.
Args:
fqn (str): The fully qualified name of the Cloudformation stack.
template (:class:`stacker.providers.base.Template`): A Template
object to use when updating the stack.
old_parameters (list): A list of dictionaries that defines the
parameter list on the existing Cloudformation stack.
parameters (list): A list of dictionaries that defines the
parameter list to be applied to the Cloudformation stack.
tags (list): A list of dictionaries that defines the tags
that should be applied to the Cloudformation stack.
"""
logger.debug("Using noninterative changeset provider mode "
"for %s.", fqn)
_changes, change_set_id = create_change_set(
self.cloudformation, fqn, template, parameters, tags,
'UPDATE', service_role=self.service_role, **kwargs
)
self.cloudformation.execute_change_set(
ChangeSetName=change_set_id,
)
def default_update_stack(self, fqn, template, old_parameters, parameters,
tags, stack_policy=None, **kwargs):
"""Update a Cloudformation stack in default mode.
Args:
fqn (str): The fully qualified name of the Cloudformation stack.
template (:class:`stacker.providers.base.Template`): A Template
object to use when updating the stack.
old_parameters (list): A list of dictionaries that defines the
parameter list on the existing Cloudformation stack.
parameters (list): A list of dictionaries that defines the
parameter list to be applied to the Cloudformation stack.
tags (list): A list of dictionaries that defines the tags
that should be applied to the Cloudformation stack.
"""
logger.debug("Using default provider mode for %s.", fqn)
args = generate_cloudformation_args(
fqn, parameters, tags, template,
service_role=self.service_role,
stack_policy=stack_policy,
)
try:
self.cloudformation.update_stack(**args)
except botocore.exceptions.ClientError as e:
if "No updates are to be performed." in str(e):
logger.debug(
"Stack %s did not change, not updating.",
fqn,
)
raise exceptions.StackDidNotChange
elif e.response['Error']['Message'] == ('TemplateURL must '
'reference a valid '
'S3 object to which '
'you have access.'):
s3_fallback(fqn, template, parameters, tags,
self.cloudformation.update_stack,
self.service_role)
else:
raise
def get_stack_name(self, stack, **kwargs):
return stack['StackName']
def get_stack_tags(self, stack, **kwargs):
return stack['Tags']
def get_outputs(self, stack_name, *args, **kwargs):
if stack_name not in self._outputs:
stack = self.get_stack(stack_name)
self._outputs[stack_name] = get_output_dict(stack)
return self._outputs[stack_name]
def get_output_dict(self, stack):
return get_output_dict(stack)
def get_stack_info(self, stack):
""" Get the template and parameters of the stack currently in AWS
Returns [ template, parameters ]
"""
stack_name = stack['StackId']
try:
template = self.cloudformation.get_template(
StackName=stack_name)['TemplateBody']
except botocore.exceptions.ClientError as e:
if "does not exist" not in str(e):
raise
raise exceptions.StackDoesNotExist(stack_name)
parameters = self.params_as_dict(stack.get('Parameters', []))
return [json.dumps(template), parameters]
@staticmethod
def params_as_dict(parameters_list):
parameters = dict()
for p in parameters_list:
parameters[p['ParameterKey']] = p['ParameterValue']
return parameters
| 37.850823 | 80 | 0.60561 | from __future__ import print_function
from __future__ import division
from __future__ import absolute_import
from future import standard_library
standard_library.install_aliases()
from builtins import range
from builtins import object
import json
import yaml
import logging
import time
import urllib.parse
import sys
import botocore.exceptions
from botocore.config import Config
from ..base import BaseProvider
from ... import exceptions
from ...ui import ui
from stacker.session_cache import get_session
from ...actions.diff import (
DictValue,
diff_parameters,
format_params_diff as format_diff
)
logger = logging.getLogger(__name__)
10
MAX_TAIL_RETRIES = 5
DEFAULT_CAPABILITIES = ["CAPABILITY_NAMED_IAM", ]
def get_cloudformation_client(session):
config = Config(
retries=dict(
max_attempts=MAX_ATTEMPTS
)
)
return session.client('cloudformation', config=config)
def get_output_dict(stack):
outputs = {}
if 'Outputs' not in stack:
return outputs
for output in stack['Outputs']:
logger.debug(" %s %s: %s", stack['StackName'], output['OutputKey'],
output['OutputValue'])
outputs[output['OutputKey']] = output['OutputValue']
return outputs
def s3_fallback(fqn, template, parameters, tags, method,
change_set_name=None, service_role=None):
logger.warn("DEPRECATION WARNING: Falling back to legacy "
"stacker S3 bucket region for templates. See "
"http://stacker.readthedocs.io/en/latest/config.html#s3-bucket"
" for more information.")
logger.warn("\n")
logger.debug("Modifying the S3 TemplateURL to point to "
"us-east-1 endpoint")
template_url = template.url
template_url_parsed = urllib.parse.urlparse(template_url)
template_url_parsed = template_url_parsed._replace(
netloc="s3.amazonaws.com")
template_url = urllib.parse.urlunparse(template_url_parsed)
logger.debug("Using template_url: %s", template_url)
args = generate_cloudformation_args(
fqn, parameters, tags, template,
service_role=service_role,
change_set_name=get_change_set_name()
)
response = method(**args)
return response
def get_change_set_name():
return 'change-set-{}'.format(int(time.time()))
def requires_replacement(changeset):
return [r for r in changeset if r["ResourceChange"].get(
"Replacement", False) == "True"]
def ask_for_approval(full_changeset=None, params_diff=None,
include_verbose=False):
approval_options = ['y', 'n']
if include_verbose:
approval_options.append('v')
approve = ui.ask("Execute the above changes? [{}] ".format(
'/'.join(approval_options)))
if include_verbose and approve == "v":
if params_diff:
logger.info(
"Full changeset:\n\n%s\n%s",
format_params_diff(params_diff),
yaml.safe_dump(full_changeset),
)
else:
logger.info(
"Full changeset:\n%s",
yaml.safe_dump(full_changeset),
)
return ask_for_approval()
elif approve != "y":
raise exceptions.CancelExecution
def output_summary(fqn, action, changeset, params_diff,
replacements_only=False):
replacements = []
changes = []
for change in changeset:
resource = change['ResourceChange']
replacement = resource.get('Replacement') == 'True'
summary = '- %s %s (%s)' % (
resource['Action'],
resource['LogicalResourceId'],
resource['ResourceType'],
)
if replacement:
replacements.append(summary)
else:
changes.append(summary)
summary = ''
if params_diff:
summary += summarize_params_diff(params_diff)
if replacements:
if not replacements_only:
summary += 'Replacements:\n'
summary += '\n'.join(replacements)
if changes:
if summary:
summary += '\n'
summary += 'Changes:\n%s' % ('\n'.join(changes))
logger.info('%s %s:\n%s', fqn, action, summary)
def format_params_diff(params_diff):
return format_diff(params_diff)
def summarize_params_diff(params_diff):
summary = ''
added_summary = [v.key for v in params_diff
if v.status() is DictValue.ADDED]
if added_summary:
summary += 'Parameters Added: %s\n' % ', '.join(added_summary)
removed_summary = [v.key for v in params_diff
if v.status() is DictValue.REMOVED]
if removed_summary:
summary += 'Parameters Removed: %s\n' % ', '.join(removed_summary)
modified_summary = [v.key for v in params_diff
if v.status() is DictValue.MODIFIED]
if modified_summary:
summary += 'Parameters Modified: %s\n' % ', '.join(modified_summary)
return summary
def wait_till_change_set_complete(cfn_client, change_set_id, try_count=25,
sleep_time=.5, max_sleep=3):
complete = False
response = None
for i in range(try_count):
response = cfn_client.describe_change_set(
ChangeSetName=change_set_id,
)
complete = response["Status"] in ("FAILED", "CREATE_COMPLETE")
if complete:
break
if sleep_time == max_sleep:
logger.debug(
"Still waiting on changeset for another %s seconds",
sleep_time
)
time.sleep(sleep_time)
sleep_time = min(sleep_time * 2, max_sleep)
if not complete:
raise exceptions.ChangesetDidNotStabilize(change_set_id)
return response
def create_change_set(cfn_client, fqn, template, parameters, tags,
change_set_type='UPDATE', replacements_only=False,
service_role=None):
logger.debug("Attempting to create change set of type %s for stack: %s.",
change_set_type,
fqn)
args = generate_cloudformation_args(
fqn, parameters, tags, template,
change_set_type=change_set_type,
service_role=service_role,
change_set_name=get_change_set_name()
)
try:
response = cfn_client.create_change_set(**args)
except botocore.exceptions.ClientError as e:
if e.response['Error']['Message'] == ('TemplateURL must reference '
'a valid S3 object to which '
'you have access.'):
response = s3_fallback(fqn, template, parameters,
tags, cfn_client.create_change_set,
get_change_set_name(),
service_role)
else:
raise
change_set_id = response["Id"]
response = wait_till_change_set_complete(
cfn_client, change_set_id
)
status = response["Status"]
if status == "FAILED":
status_reason = response["StatusReason"]
if ("didn't contain changes" in response["StatusReason"] or
"No updates are to be performed" in response["StatusReason"]):
logger.debug(
"Stack %s did not change, not updating and removing "
"changeset.",
fqn,
)
cfn_client.delete_change_set(ChangeSetName=change_set_id)
raise exceptions.StackDidNotChange()
logger.warn(
"Got strange status, '%s' for changeset '%s'. Not deleting for "
"further investigation - you will need to delete the changeset "
"manually.",
status, change_set_id
)
raise exceptions.UnhandledChangeSetStatus(
fqn, change_set_id, status, status_reason
)
execution_status = response["ExecutionStatus"]
if execution_status != "AVAILABLE":
raise exceptions.UnableToExecuteChangeSet(fqn,
change_set_id,
execution_status)
changes = response["Changes"]
return changes, change_set_id
def check_tags_contain(actual, expected):
actual_set = set((item["Key"], item["Value"]) for item in actual)
expected_set = set((item["Key"], item["Value"]) for item in expected)
return actual_set >= expected_set
def generate_cloudformation_args(stack_name, parameters, tags, template,
capabilities=DEFAULT_CAPABILITIES,
change_set_type=None,
service_role=None,
stack_policy=None,
change_set_name=None):
args = {
"StackName": stack_name,
"Parameters": parameters,
"Tags": tags,
"Capabilities": capabilities,
}
if service_role:
args["RoleARN"] = service_role
if change_set_name:
args["ChangeSetName"] = change_set_name
if change_set_type:
args["ChangeSetType"] = change_set_type
if template.url:
args["TemplateURL"] = template.url
else:
args["TemplateBody"] = template.body
# When creating args for CreateChangeSet, don't include the stack policy,
if not change_set_name:
args.update(generate_stack_policy_args(stack_policy))
return args
def generate_stack_policy_args(stack_policy=None):
args = {}
if stack_policy:
logger.debug("Stack has a stack policy")
if stack_policy.url:
# stacker currently does not support uploading stack policies to
# S3, so this will never get hit (unless your implementing S3
# uploads, and then you're probably reading this comment about why
raise NotImplementedError
else:
args["StackPolicyBody"] = stack_policy.body
return args
class ProviderBuilder(object):
def __init__(self, region=None, **kwargs):
self.region = region
self.kwargs = kwargs
def build(self, region=None, profile=None):
if not region:
region = self.region
session = get_session(region=region, profile=profile)
return Provider(session, region=region, **self.kwargs)
class Provider(BaseProvider):
DELETED_STATUS = "DELETE_COMPLETE"
IN_PROGRESS_STATUSES = (
"CREATE_IN_PROGRESS",
"UPDATE_IN_PROGRESS",
"DELETE_IN_PROGRESS",
"UPDATE_COMPLETE_CLEANUP_IN_PROGRESS",
)
ROLLING_BACK_STATUSES = (
"ROLLBACK_IN_PROGRESS",
"UPDATE_ROLLBACK_IN_PROGRESS"
)
FAILED_STATUSES = (
"CREATE_FAILED",
"ROLLBACK_FAILED",
"ROLLBACK_COMPLETE",
"DELETE_FAILED",
"UPDATE_ROLLBACK_FAILED",
"UPDATE_ROLLBACK_COMPLETE",
)
COMPLETE_STATUSES = (
"CREATE_COMPLETE",
"DELETE_COMPLETE",
"UPDATE_COMPLETE",
"UPDATE_ROLLBACK_COMPLETE",
)
RECREATION_STATUSES = (
"CREATE_FAILED",
"ROLLBACK_FAILED",
"ROLLBACK_COMPLETE",
)
def __init__(self, session, region=None, interactive=False,
replacements_only=False, recreate_failed=False,
service_role=None, **kwargs):
self._outputs = {}
self.region = region
self.cloudformation = get_cloudformation_client(session)
self.interactive = interactive
self.replacements_only = interactive and replacements_only
self.recreate_failed = interactive or recreate_failed
self.service_role = service_role
def get_stack(self, stack_name, **kwargs):
try:
return self.cloudformation.describe_stacks(
StackName=stack_name)['Stacks'][0]
except botocore.exceptions.ClientError as e:
if "does not exist" not in str(e):
raise
raise exceptions.StackDoesNotExist(stack_name)
def get_stack_status(self, stack, **kwargs):
return stack['StackStatus']
def is_stack_completed(self, stack, **kwargs):
return self.get_stack_status(stack) in self.COMPLETE_STATUSES
def is_stack_in_progress(self, stack, **kwargs):
return self.get_stack_status(stack) in self.IN_PROGRESS_STATUSES
def is_stack_destroyed(self, stack, **kwargs):
return self.get_stack_status(stack) == self.DELETED_STATUS
def is_stack_recreatable(self, stack, **kwargs):
return self.get_stack_status(stack) in self.RECREATION_STATUSES
def is_stack_rolling_back(self, stack, **kwargs):
return self.get_stack_status(stack) in self.ROLLING_BACK_STATUSES
def is_stack_failed(self, stack, **kwargs):
return self.get_stack_status(stack) in self.FAILED_STATUSES
def tail_stack(self, stack, cancel, retries=0, **kwargs):
def log_func(e):
event_args = [e['ResourceStatus'], e['ResourceType'],
e.get('ResourceStatusReason', None)]
event_args = [arg for arg in event_args if arg]
template = " ".join(["[%s]"] + ["%s" for _ in event_args])
logger.info(template, *([stack.fqn] + event_args))
if not retries:
logger.info("Tailing stack: %s", stack.fqn)
try:
self.tail(stack.fqn,
cancel=cancel,
log_func=log_func,
include_initial=False)
except botocore.exceptions.ClientError as e:
if "does not exist" in str(e) and retries < MAX_TAIL_RETRIES:
time.sleep(1)
self.tail_stack(stack, cancel, retries=retries + 1, **kwargs)
else:
raise
@staticmethod
def _tail_print(e):
print("%s %s %s" % (e['ResourceStatus'],
e['ResourceType'],
e['EventId']))
def get_events(self, stackname):
next_token = None
event_list = []
while 1:
if next_token is not None:
events = self.cloudformation.describe_stack_events(
StackName=stackname, NextToken=next_token
)
else:
events = self.cloudformation.describe_stack_events(
StackName=stackname
)
event_list.append(events['StackEvents'])
next_token = events.get('NextToken', None)
if next_token is None:
break
time.sleep(1)
return reversed(sum(event_list, []))
def tail(self, stack_name, cancel, log_func=_tail_print, sleep_time=5,
include_initial=True):
seen = set()
initial_events = self.get_events(stack_name)
for e in initial_events:
if include_initial:
log_func(e)
seen.add(e['EventId'])
# Now keep looping through and dump the new events
while 1:
events = self.get_events(stack_name)
for e in events:
if e['EventId'] not in seen:
log_func(e)
seen.add(e['EventId'])
if cancel.wait(sleep_time):
return
def destroy_stack(self, stack, **kwargs):
logger.debug("Destroying stack: %s" % (self.get_stack_name(stack)))
args = {"StackName": self.get_stack_name(stack)}
if self.service_role:
args["RoleARN"] = self.service_role
self.cloudformation.delete_stack(**args)
return True
def create_stack(self, fqn, template, parameters, tags,
force_change_set=False, stack_policy=None,
**kwargs):
logger.debug("Attempting to create stack %s:.", fqn)
logger.debug(" parameters: %s", parameters)
logger.debug(" tags: %s", tags)
if template.url:
logger.debug(" template_url: %s", template.url)
else:
logger.debug(" no template url, uploading template "
"directly.")
if force_change_set:
logger.debug("force_change_set set to True, creating stack with "
"changeset.")
_changes, change_set_id = create_change_set(
self.cloudformation, fqn, template, parameters, tags,
'CREATE', service_role=self.service_role, **kwargs
)
self.cloudformation.execute_change_set(
ChangeSetName=change_set_id,
)
else:
args = generate_cloudformation_args(
fqn, parameters, tags, template,
service_role=self.service_role,
stack_policy=stack_policy,
)
try:
self.cloudformation.create_stack(**args)
except botocore.exceptions.ClientError as e:
if e.response['Error']['Message'] == ('TemplateURL must '
'reference a valid S3 '
'object to which you '
'have access.'):
s3_fallback(fqn, template, parameters, tags,
self.cloudformation.create_stack,
self.service_role)
else:
raise
def select_update_method(self, force_interactive, force_change_set):
if self.interactive or force_interactive:
return self.interactive_update_stack
elif force_change_set:
return self.noninteractive_changeset_update
else:
return self.default_update_stack
def prepare_stack_for_update(self, stack, tags):
if self.is_stack_destroyed(stack):
return False
elif self.is_stack_completed(stack):
return True
stack_name = self.get_stack_name(stack)
stack_status = self.get_stack_status(stack)
if self.is_stack_in_progress(stack):
raise exceptions.StackUpdateBadStatus(
stack_name, stack_status,
'Update already in-progress')
if not self.is_stack_recreatable(stack):
raise exceptions.StackUpdateBadStatus(
stack_name, stack_status,
'Unsupported state for re-creation')
if not self.recreate_failed:
raise exceptions.StackUpdateBadStatus(
stack_name, stack_status,
'Stack re-creation is disabled. Run stacker again with the '
'--recreate-failed option to force it to be deleted and '
'created from scratch.')
stack_tags = self.get_stack_tags(stack)
if not check_tags_contain(stack_tags, tags):
raise exceptions.StackUpdateBadStatus(
stack_name, stack_status,
'Tags differ from current configuration, possibly not created '
'with stacker')
if self.interactive:
sys.stdout.write(
'The \"%s\" stack is in a failed state (%s).\n'
'It cannot be updated, but it can be deleted and re-created.\n'
'All its current resources will IRREVERSIBLY DESTROYED.\n'
'Proceed carefully!\n\n' % (stack_name, stack_status))
sys.stdout.flush()
ask_for_approval(include_verbose=False)
logger.warn('Destroying stack \"%s\" for re-creation', stack_name)
self.destroy_stack(stack)
return False
def update_stack(self, fqn, template, old_parameters, parameters, tags,
force_interactive=False, force_change_set=False,
stack_policy=None, **kwargs):
logger.debug("Attempting to update stack %s:", fqn)
logger.debug(" parameters: %s", parameters)
logger.debug(" tags: %s", tags)
if template.url:
logger.debug(" template_url: %s", template.url)
else:
logger.debug(" no template url, uploading template directly.")
update_method = self.select_update_method(force_interactive,
force_change_set)
return update_method(fqn, template, old_parameters, parameters, tags,
stack_policy=stack_policy, **kwargs)
def interactive_update_stack(self, fqn, template, old_parameters,
parameters, tags, stack_policy=None,
**kwargs):
logger.debug("Using interactive provider mode for %s.", fqn)
changes, change_set_id = create_change_set(
self.cloudformation, fqn, template, parameters, tags,
'UPDATE', service_role=self.service_role, **kwargs
)
params_diff = diff_parameters(
self.params_as_dict(old_parameters),
self.params_as_dict(parameters))
action = "replacements" if self.replacements_only else "changes"
full_changeset = changes
if self.replacements_only:
changes = requires_replacement(changes)
if changes or params_diff:
ui.lock()
try:
output_summary(fqn, action, changes, params_diff,
replacements_only=self.replacements_only)
ask_for_approval(
full_changeset=full_changeset,
params_diff=params_diff,
include_verbose=True,
)
finally:
ui.unlock()
# ChangeSets don't support specifying a stack policy inline, like
if stack_policy:
kwargs = generate_stack_policy_args(stack_policy)
kwargs["StackName"] = fqn
self.cloudformation.set_stack_policy(**kwargs)
self.cloudformation.execute_change_set(
ChangeSetName=change_set_id,
)
def noninteractive_changeset_update(self, fqn, template, old_parameters,
parameters, tags, **kwargs):
logger.debug("Using noninterative changeset provider mode "
"for %s.", fqn)
_changes, change_set_id = create_change_set(
self.cloudformation, fqn, template, parameters, tags,
'UPDATE', service_role=self.service_role, **kwargs
)
self.cloudformation.execute_change_set(
ChangeSetName=change_set_id,
)
def default_update_stack(self, fqn, template, old_parameters, parameters,
tags, stack_policy=None, **kwargs):
logger.debug("Using default provider mode for %s.", fqn)
args = generate_cloudformation_args(
fqn, parameters, tags, template,
service_role=self.service_role,
stack_policy=stack_policy,
)
try:
self.cloudformation.update_stack(**args)
except botocore.exceptions.ClientError as e:
if "No updates are to be performed." in str(e):
logger.debug(
"Stack %s did not change, not updating.",
fqn,
)
raise exceptions.StackDidNotChange
elif e.response['Error']['Message'] == ('TemplateURL must '
'reference a valid '
'S3 object to which '
'you have access.'):
s3_fallback(fqn, template, parameters, tags,
self.cloudformation.update_stack,
self.service_role)
else:
raise
def get_stack_name(self, stack, **kwargs):
return stack['StackName']
def get_stack_tags(self, stack, **kwargs):
return stack['Tags']
def get_outputs(self, stack_name, *args, **kwargs):
if stack_name not in self._outputs:
stack = self.get_stack(stack_name)
self._outputs[stack_name] = get_output_dict(stack)
return self._outputs[stack_name]
def get_output_dict(self, stack):
return get_output_dict(stack)
def get_stack_info(self, stack):
stack_name = stack['StackId']
try:
template = self.cloudformation.get_template(
StackName=stack_name)['TemplateBody']
except botocore.exceptions.ClientError as e:
if "does not exist" not in str(e):
raise
raise exceptions.StackDoesNotExist(stack_name)
parameters = self.params_as_dict(stack.get('Parameters', []))
return [json.dumps(template), parameters]
@staticmethod
def params_as_dict(parameters_list):
parameters = dict()
for p in parameters_list:
parameters[p['ParameterKey']] = p['ParameterValue']
return parameters
| true | true |
1c47eda88b7276191251c02fb6fc7111b8dec436 | 1,147 | py | Python | wxpython/02-temperature-converter.py | kurtmckee/learning | fa8dc08ee0d81b71d3ad569a02b390e7dee35d5a | [
"MIT"
] | null | null | null | wxpython/02-temperature-converter.py | kurtmckee/learning | fa8dc08ee0d81b71d3ad569a02b390e7dee35d5a | [
"MIT"
] | null | null | null | wxpython/02-temperature-converter.py | kurtmckee/learning | fa8dc08ee0d81b71d3ad569a02b390e7dee35d5a | [
"MIT"
] | null | null | null | import wx
class Converter(wx.Frame):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
panel = wx.Panel(self)
sizer = wx.BoxSizer()
panel.SetSizer(sizer)
self.input_box = wx.TextCtrl(panel, style=wx.TE_PROCESS_ENTER)
self.input_box.Bind(wx.EVT_TEXT_ENTER, self.calculate)
sizer.Add(self.input_box)
button = wx.Button(panel, label='F --> C')
button.Bind(wx.EVT_BUTTON, self.calculate)
sizer.Add(button)
self.output_label = wx.StaticText(panel, -1, label='--- C')
sizer.Add(self.output_label)
def calculate(self, event):
try:
f = int(self.input_box.GetValue())
except ValueError:
self.output_label.SetLabel('Invalid')
return
c = (f - 32) * 5 / 9
if c < -273.15:
self.output_label.SetLabel('Impossibly cold!')
else:
self.output_label.SetLabel(f'{c:.1f} C')
app = wx.App()
frame = Converter(None, title='Temperature Converter')
frame.Show()
app.MainLoop()
| 26.674419 | 71 | 0.558849 | import wx
class Converter(wx.Frame):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
panel = wx.Panel(self)
sizer = wx.BoxSizer()
panel.SetSizer(sizer)
self.input_box = wx.TextCtrl(panel, style=wx.TE_PROCESS_ENTER)
self.input_box.Bind(wx.EVT_TEXT_ENTER, self.calculate)
sizer.Add(self.input_box)
button = wx.Button(panel, label='F --> C')
button.Bind(wx.EVT_BUTTON, self.calculate)
sizer.Add(button)
self.output_label = wx.StaticText(panel, -1, label='--- C')
sizer.Add(self.output_label)
def calculate(self, event):
try:
f = int(self.input_box.GetValue())
except ValueError:
self.output_label.SetLabel('Invalid')
return
c = (f - 32) * 5 / 9
if c < -273.15:
self.output_label.SetLabel('Impossibly cold!')
else:
self.output_label.SetLabel(f'{c:.1f} C')
app = wx.App()
frame = Converter(None, title='Temperature Converter')
frame.Show()
app.MainLoop()
| true | true |
1c47ee58a2b4e8bd47920723aea259e394c0a8c6 | 3,278 | py | Python | tests/test_edgeql_datatypes.py | mcaramma/edgedb | 53b18dbaf7407617ca135d1f8a5047bda6414654 | [
"Apache-2.0"
] | 2 | 2019-12-09T12:52:58.000Z | 2020-02-20T15:20:22.000Z | tests/test_edgeql_datatypes.py | 1st1/edgedb | 3e234aede215d4fc517be9397a25bb16e5f1ace3 | [
"Apache-2.0"
] | null | null | null | tests/test_edgeql_datatypes.py | 1st1/edgedb | 3e234aede215d4fc517be9397a25bb16e5f1ace3 | [
"Apache-2.0"
] | null | null | null | #
# This source file is part of the EdgeDB open source project.
#
# Copyright 2012-present MagicStack Inc. and the EdgeDB authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import unittest # NOQA
from edb.server import _testbase as tb
from edb.client import exceptions as exc
class TestEdgeQLDT(tb.QueryTestCase):
SETUP = '''
CREATE MIGRATION default::m TO eschema $$
scalar type seq_t extending sequence
scalar type seq2_t extending sequence
type Obj:
property seq_prop -> seq_t
type Obj2:
property seq_prop -> seq2_t
$$;
COMMIT MIGRATION default::m;
'''
async def test_edgeql_dt_datetime_01(self):
await self.assert_query_result('''
SELECT <datetime>'2017-10-10' + <timedelta>'1 day';
SELECT <timedelta>'1 day' + <datetime>'2017-10-10';
SELECT <datetime>'2017-10-10' - <timedelta>'1 day';
SELECT <timedelta>'1 day' + <timedelta>'1 day';
SELECT <timedelta>'4 days' - <timedelta>'1 day';
''', [
['2017-10-11T00:00:00+00:00'],
['2017-10-11T00:00:00+00:00'],
['2017-10-09T00:00:00+00:00'],
['2 days'],
['3 days'],
])
with self.assertRaisesRegex(
exc.EdgeQLError,
'operator `-` is not defined for types.*timedelta.*datetime'):
await self.con.execute("""
SELECT <timedelta>'1 day' - <datetime>'2017-10-10';
""")
async def test_edgeql_dt_datetime_02(self):
await self.assert_query_result('''
SELECT <str><datetime>'2017-10-10';
SELECT <str>(<datetime>'2017-10-10' - <timedelta>'1 day');
''', [
['2017-10-10T00:00:00+00:00'],
['2017-10-09T00:00:00+00:00'],
])
@unittest.expectedFailure
async def test_edgeql_dt_datetime_03(self):
await self.assert_query_result('''
SELECT <tuple<str,datetime>>('foo', '2020-10-10');
SELECT (<tuple<str,datetime>>('foo', '2020-10-10')).1 +
<timedelta>'1 month';
''', [
[{'foo': '2020-10-10T00:00:00+00:00'}],
['2020-11-10T00:00:00+00:00'],
])
async def test_edgeql_dt_sequence_01(self):
await self.assert_query_result('''
INSERT Obj;
INSERT Obj;
INSERT Obj2;
SELECT Obj { seq_prop } ORDER BY Obj.seq_prop;
SELECT Obj2 { seq_prop };
''', [
[1],
[1],
[1],
[
{'seq_prop': 1}, {'seq_prop': 2}
],
[
{'seq_prop': 1}
],
])
| 31.825243 | 78 | 0.552776 |
import unittest
from edb.server import _testbase as tb
from edb.client import exceptions as exc
class TestEdgeQLDT(tb.QueryTestCase):
SETUP = '''
CREATE MIGRATION default::m TO eschema $$
scalar type seq_t extending sequence
scalar type seq2_t extending sequence
type Obj:
property seq_prop -> seq_t
type Obj2:
property seq_prop -> seq2_t
$$;
COMMIT MIGRATION default::m;
'''
async def test_edgeql_dt_datetime_01(self):
await self.assert_query_result('''
SELECT <datetime>'2017-10-10' + <timedelta>'1 day';
SELECT <timedelta>'1 day' + <datetime>'2017-10-10';
SELECT <datetime>'2017-10-10' - <timedelta>'1 day';
SELECT <timedelta>'1 day' + <timedelta>'1 day';
SELECT <timedelta>'4 days' - <timedelta>'1 day';
''', [
['2017-10-11T00:00:00+00:00'],
['2017-10-11T00:00:00+00:00'],
['2017-10-09T00:00:00+00:00'],
['2 days'],
['3 days'],
])
with self.assertRaisesRegex(
exc.EdgeQLError,
'operator `-` is not defined for types.*timedelta.*datetime'):
await self.con.execute("""
SELECT <timedelta>'1 day' - <datetime>'2017-10-10';
""")
async def test_edgeql_dt_datetime_02(self):
await self.assert_query_result('''
SELECT <str><datetime>'2017-10-10';
SELECT <str>(<datetime>'2017-10-10' - <timedelta>'1 day');
''', [
['2017-10-10T00:00:00+00:00'],
['2017-10-09T00:00:00+00:00'],
])
@unittest.expectedFailure
async def test_edgeql_dt_datetime_03(self):
await self.assert_query_result('''
SELECT <tuple<str,datetime>>('foo', '2020-10-10');
SELECT (<tuple<str,datetime>>('foo', '2020-10-10')).1 +
<timedelta>'1 month';
''', [
[{'foo': '2020-10-10T00:00:00+00:00'}],
['2020-11-10T00:00:00+00:00'],
])
async def test_edgeql_dt_sequence_01(self):
await self.assert_query_result('''
INSERT Obj;
INSERT Obj;
INSERT Obj2;
SELECT Obj { seq_prop } ORDER BY Obj.seq_prop;
SELECT Obj2 { seq_prop };
''', [
[1],
[1],
[1],
[
{'seq_prop': 1}, {'seq_prop': 2}
],
[
{'seq_prop': 1}
],
])
| true | true |
1c47ee6d294ce288e1a41b88f9ca63742633f99f | 37,265 | py | Python | tests/testsuite.py | felixxm/unittest-xml-reporting | 0ef90a6f2565430c4e8c19b4b4741a971a8b4041 | [
"BSD-2-Clause-FreeBSD"
] | 212 | 2015-01-08T13:32:40.000Z | 2022-03-31T21:32:23.000Z | tests/testsuite.py | felixxm/unittest-xml-reporting | 0ef90a6f2565430c4e8c19b4b4741a971a8b4041 | [
"BSD-2-Clause-FreeBSD"
] | 169 | 2015-01-22T20:50:17.000Z | 2022-03-23T06:23:08.000Z | tests/testsuite.py | felixxm/unittest-xml-reporting | 0ef90a6f2565430c4e8c19b4b4741a971a8b4041 | [
"BSD-2-Clause-FreeBSD"
] | 104 | 2015-01-16T19:50:36.000Z | 2022-03-18T20:49:16.000Z | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Executable module to test unittest-xml-reporting.
"""
from __future__ import print_function
import contextlib
import io
import sys
from xmlrunner.unittest import unittest
import xmlrunner
from xmlrunner.result import _DuplicateWriter
from xmlrunner.result import _XMLTestResult
from xmlrunner.result import resolve_filename
import doctest
import tests.doctest_example
from io import StringIO, BytesIO
from tempfile import mkdtemp
from tempfile import mkstemp
from shutil import rmtree
from glob import glob
from xml.dom import minidom
from lxml import etree
import os
import os.path
from unittest import mock
def _load_schema(version):
path = os.path.join(
os.path.dirname(__file__),
'vendor/jenkins/xunit-plugin', version, 'junit-10.xsd')
with open(path, 'r') as schema_file:
schema_doc = etree.parse(schema_file)
schema = etree.XMLSchema(schema_doc)
return schema
raise RuntimeError('Could not load JUnit schema') # pragma: no cover
def validate_junit_report(version, text):
document = etree.parse(BytesIO(text))
schema = _load_schema(version)
schema.assertValid(document)
class DoctestTest(unittest.TestCase):
def test_doctest_example(self):
suite = doctest.DocTestSuite(tests.doctest_example)
outdir = BytesIO()
stream = StringIO()
runner = xmlrunner.XMLTestRunner(
stream=stream, output=outdir, verbosity=0)
runner.run(suite)
outdir.seek(0)
output = outdir.read()
self.assertIn('classname="tests.doctest_example.Multiplicator"'.encode('utf8'), output)
self.assertIn('name="threetimes"'.encode('utf8'), output)
self.assertIn('classname="tests.doctest_example"'.encode('utf8'), output)
self.assertIn('name="twice"'.encode('utf8'), output)
@contextlib.contextmanager
def capture_stdout_stderr():
"""
context manager to capture stdout and stderr
"""
orig_stdout = sys.stdout
orig_stderr = sys.stderr
sys.stdout = StringIO()
sys.stderr = StringIO()
try:
yield (sys.stdout, sys.stderr)
finally:
sys.stdout = orig_stdout
sys.stderr = orig_stderr
def _strip_xml(xml, changes):
doc = etree.fromstring(xml)
for xpath, attributes in changes.items():
for node in doc.xpath(xpath):
for attrib in node.attrib.keys():
if attrib not in attributes:
del node.attrib[attrib]
return etree.tostring(doc)
def some_decorator(f):
# for issue #195
code = """\
def wrapper(*args, **kwargs):
return func(*args, **kwargs)
"""
evaldict = dict(func=f)
exec(code, evaldict)
return evaldict['wrapper']
class XMLTestRunnerTestCase(unittest.TestCase):
"""
XMLTestRunner test case.
"""
class DummyTest(unittest.TestCase):
@unittest.skip("demonstrating skipping")
def test_skip(self):
pass # pragma: no cover
@unittest.skip(u"demonstrating non-ascii skipping: éçà")
def test_non_ascii_skip(self):
pass # pragma: no cover
def test_pass(self):
pass
def test_fail(self):
self.assertTrue(False)
@unittest.expectedFailure
def test_expected_failure(self):
self.assertTrue(False)
@unittest.expectedFailure
def test_unexpected_success(self):
pass
def test_error(self):
1 / 0
def test_cdata_section(self):
print('<![CDATA[content]]>')
def test_invalid_xml_chars_in_doc(self):
"""
Testing comments, -- is not allowed, or invalid xml 1.0 chars such as \x0c
"""
pass
def test_non_ascii_error(self):
self.assertEqual(u"éçà", 42)
def test_unsafe_unicode(self):
print(u"A\x00B\x08C\x0BD\x0C")
def test_output_stdout_and_stderr(self):
print('test on stdout')
print('test on stderr', file=sys.stderr)
def test_runner_buffer_output_pass(self):
print('should not be printed')
def test_runner_buffer_output_fail(self):
print('should be printed')
self.fail('expected to fail')
def test_output(self):
print('test message')
def test_non_ascii_runner_buffer_output_fail(self):
print(u'Where is the café ?')
self.fail(u'The café could not be found')
class DummySubTest(unittest.TestCase):
def test_subTest_pass(self):
for i in range(2):
with self.subTest(i=i):
pass
def test_subTest_fail(self):
for i in range(2):
with self.subTest(i=i):
self.fail('this is a subtest.')
def test_subTest_error(self):
for i in range(2):
with self.subTest(i=i):
raise Exception('this is a subtest')
def test_subTest_mixed(self):
for i in range(2):
with self.subTest(i=i):
self.assertLess(i, 1, msg='this is a subtest.')
def test_subTest_with_dots(self):
for i in range(2):
with self.subTest(module='hello.world.subTest{}'.format(i)):
self.fail('this is a subtest.')
class DecoratedUnitTest(unittest.TestCase):
@some_decorator
def test_pass(self):
pass
class DummyErrorInCallTest(unittest.TestCase):
def __call__(self, result):
try:
raise Exception('Massive fail')
except Exception:
result.addError(self, sys.exc_info())
return
def test_pass(self):
# it is expected not to be called.
pass # pragma: no cover
class DummyRefCountTest(unittest.TestCase):
class dummy(object):
pass
def test_fail(self):
inst = self.dummy()
self.assertTrue(False)
def setUp(self):
self.stream = StringIO()
self.outdir = mkdtemp()
self.verbosity = 0
self.runner_kwargs = {}
self.addCleanup(rmtree, self.outdir)
def _test_xmlrunner(self, suite, runner=None, outdir=None):
if outdir is None:
outdir = self.outdir
stream = self.stream
verbosity = self.verbosity
runner_kwargs = self.runner_kwargs
if runner is None:
runner = xmlrunner.XMLTestRunner(
stream=stream, output=outdir, verbosity=verbosity,
**runner_kwargs)
if isinstance(outdir, BytesIO):
self.assertFalse(outdir.getvalue())
else:
self.assertEqual(0, len(glob(os.path.join(outdir, '*xml'))))
runner.run(suite)
if isinstance(outdir, BytesIO):
self.assertTrue(outdir.getvalue())
else:
self.assertEqual(1, len(glob(os.path.join(outdir, '*xml'))))
return runner
def test_basic_unittest_constructs(self):
suite = unittest.TestSuite()
suite.addTest(self.DummyTest('test_pass'))
suite.addTest(self.DummyTest('test_skip'))
suite.addTest(self.DummyTest('test_fail'))
suite.addTest(self.DummyTest('test_expected_failure'))
suite.addTest(self.DummyTest('test_unexpected_success'))
suite.addTest(self.DummyTest('test_error'))
self._test_xmlrunner(suite)
def test_classnames(self):
suite = unittest.TestSuite()
suite.addTest(self.DummyTest('test_pass'))
suite.addTest(self.DummySubTest('test_subTest_pass'))
outdir = BytesIO()
stream = StringIO()
runner = xmlrunner.XMLTestRunner(
stream=stream, output=outdir, verbosity=0)
runner.run(suite)
outdir.seek(0)
output = outdir.read()
output = _strip_xml(output, {
'//testsuite': (),
'//testcase': ('classname', 'name'),
'//failure': ('message',),
})
self.assertRegexpMatches(
output,
r'classname="tests\.testsuite\.(XMLTestRunnerTestCase\.)?'
r'DummyTest" name="test_pass"'.encode('utf8'),
)
self.assertRegexpMatches(
output,
r'classname="tests\.testsuite\.(XMLTestRunnerTestCase\.)?'
r'DummySubTest" name="test_subTest_pass"'.encode('utf8'),
)
def test_expected_failure(self):
suite = unittest.TestSuite()
suite.addTest(self.DummyTest('test_expected_failure'))
outdir = BytesIO()
self._test_xmlrunner(suite, outdir=outdir)
self.assertNotIn(b'<failure', outdir.getvalue())
self.assertNotIn(b'<error', outdir.getvalue())
self.assertIn(b'<skip', outdir.getvalue())
def test_unexpected_success(self):
suite = unittest.TestSuite()
suite.addTest(self.DummyTest('test_unexpected_success'))
outdir = BytesIO()
self._test_xmlrunner(suite, outdir=outdir)
self.assertNotIn(b'<failure', outdir.getvalue())
self.assertIn(b'<error', outdir.getvalue())
self.assertNotIn(b'<skip', outdir.getvalue())
def test_xmlrunner_non_ascii(self):
suite = unittest.TestSuite()
suite.addTest(self.DummyTest('test_non_ascii_skip'))
suite.addTest(self.DummyTest('test_non_ascii_error'))
outdir = BytesIO()
runner = xmlrunner.XMLTestRunner(
stream=self.stream, output=outdir, verbosity=self.verbosity,
**self.runner_kwargs)
runner.run(suite)
outdir.seek(0)
output = outdir.read()
self.assertIn(
u'message="demonstrating non-ascii skipping: éçà"'.encode('utf8'),
output)
def test_xmlrunner_safe_xml_encoding_name(self):
suite = unittest.TestSuite()
suite.addTest(self.DummyTest('test_pass'))
outdir = BytesIO()
runner = xmlrunner.XMLTestRunner(
stream=self.stream, output=outdir, verbosity=self.verbosity,
**self.runner_kwargs)
runner.run(suite)
outdir.seek(0)
output = outdir.read()
firstline = output.splitlines()[0]
# test for issue #74
self.assertIn('encoding="UTF-8"'.encode('utf8'), firstline)
def test_xmlrunner_check_for_valid_xml_streamout(self):
"""
This test checks if the xml document is valid if there are more than
one testsuite and the output of the report is a single stream.
"""
class DummyTestA(unittest.TestCase):
def test_pass(self):
pass
class DummyTestB(unittest.TestCase):
def test_pass(self):
pass
suite = unittest.TestSuite()
suite.addTest(unittest.TestLoader().loadTestsFromTestCase(DummyTestA))
suite.addTest(unittest.TestLoader().loadTestsFromTestCase(DummyTestB))
outdir = BytesIO()
runner = xmlrunner.XMLTestRunner(
stream=self.stream, output=outdir, verbosity=self.verbosity,
**self.runner_kwargs)
runner.run(suite)
outdir.seek(0)
output = outdir.read()
# Finally check if we have a valid XML document or not.
try:
minidom.parseString(output)
except Exception as e: # pragma: no cover
# note: we could remove the try/except, but it's more crude.
self.fail(e)
def test_xmlrunner_unsafe_unicode(self):
suite = unittest.TestSuite()
suite.addTest(self.DummyTest('test_unsafe_unicode'))
outdir = BytesIO()
runner = xmlrunner.XMLTestRunner(
stream=self.stream, output=outdir, verbosity=self.verbosity,
**self.runner_kwargs)
runner.run(suite)
outdir.seek(0)
output = outdir.read()
self.assertIn(u"<![CDATA[ABCD\n]]>".encode('utf8'),
output)
def test_xmlrunner_non_ascii_failures(self):
self._xmlrunner_non_ascii_failures()
def test_xmlrunner_non_ascii_failures_buffered_output(self):
self._xmlrunner_non_ascii_failures(buffer=True)
def _xmlrunner_non_ascii_failures(self, buffer=False):
suite = unittest.TestSuite()
suite.addTest(self.DummyTest(
'test_non_ascii_runner_buffer_output_fail'))
outdir = BytesIO()
runner = xmlrunner.XMLTestRunner(
stream=self.stream, output=outdir, verbosity=self.verbosity,
buffer=buffer, **self.runner_kwargs)
# allow output non-ascii letters to stdout
orig_stdout = sys.stdout
sys.stdout = io.TextIOWrapper(sys.stdout.buffer, encoding='utf-8')
try:
runner.run(suite)
finally:
# Not to be closed when TextIOWrapper is disposed.
sys.stdout.detach()
sys.stdout = orig_stdout
outdir.seek(0)
output = outdir.read()
self.assertIn(
u'Where is the café ?'.encode('utf8'),
output)
self.assertIn(
u'The café could not be found'.encode('utf8'),
output)
@unittest.expectedFailure
def test_xmlrunner_buffer_output_pass(self):
suite = unittest.TestSuite()
suite.addTest(self.DummyTest('test_runner_buffer_output_pass'))
self._test_xmlrunner(suite)
testsuite_output = self.stream.getvalue()
# Since we are always buffering stdout/stderr
# it is currently troublesome to print anything at all
# and be consistent with --buffer option (issue #59)
self.assertIn('should not be printed', testsuite_output)
# this will be fixed when using the composite approach
# that was under development in the rewrite branch.
def test_xmlrunner_buffer_output_fail(self):
suite = unittest.TestSuite()
suite.addTest(self.DummyTest('test_runner_buffer_output_fail'))
# --buffer option
self.runner_kwargs['buffer'] = True
self._test_xmlrunner(suite)
testsuite_output = self.stream.getvalue()
self.assertIn('should be printed', testsuite_output)
def test_xmlrunner_output_without_buffer(self):
suite = unittest.TestSuite()
suite.addTest(self.DummyTest('test_output'))
with capture_stdout_stderr() as r:
self._test_xmlrunner(suite)
output_from_test = r[0].getvalue()
self.assertIn('test message', output_from_test)
def test_xmlrunner_output_with_buffer(self):
suite = unittest.TestSuite()
suite.addTest(self.DummyTest('test_output'))
# --buffer option
self.runner_kwargs['buffer'] = True
with capture_stdout_stderr() as r:
self._test_xmlrunner(suite)
output_from_test = r[0].getvalue()
self.assertNotIn('test message', output_from_test)
def test_xmlrunner_stdout_stderr_recovered_without_buffer(self):
orig_stdout = sys.stdout
orig_stderr = sys.stderr
suite = unittest.TestSuite()
suite.addTest(self.DummyTest('test_pass'))
self._test_xmlrunner(suite)
self.assertIs(orig_stdout, sys.stdout)
self.assertIs(orig_stderr, sys.stderr)
def test_xmlrunner_stdout_stderr_recovered_with_buffer(self):
orig_stdout = sys.stdout
orig_stderr = sys.stderr
suite = unittest.TestSuite()
suite.addTest(self.DummyTest('test_pass'))
# --buffer option
self.runner_kwargs['buffer'] = True
self._test_xmlrunner(suite)
self.assertIs(orig_stdout, sys.stdout)
self.assertIs(orig_stderr, sys.stderr)
suite = unittest.TestSuite()
suite.addTest(self.DummyTest('test_pass'))
@unittest.skipIf(not hasattr(unittest.TestCase, 'subTest'),
'unittest.TestCase.subTest not present.')
def test_unittest_subTest_fail(self):
# test for issue #77
outdir = BytesIO()
runner = xmlrunner.XMLTestRunner(
stream=self.stream, output=outdir, verbosity=self.verbosity,
**self.runner_kwargs)
suite = unittest.TestSuite()
suite.addTest(self.DummySubTest('test_subTest_fail'))
runner.run(suite)
outdir.seek(0)
output = outdir.read()
output = _strip_xml(output, {
'//testsuite': (),
'//testcase': ('classname', 'name'),
'//failure': ('message',),
})
self.assertRegexpMatches(
output,
br'<testcase classname="tests\.testsuite\.'
br'(XMLTestRunnerTestCase\.)?DummySubTest" '
br'name="test_subTest_fail \(i=0\)"')
self.assertRegexpMatches(
output,
br'<testcase classname="tests\.testsuite\.'
br'(XMLTestRunnerTestCase\.)?DummySubTest" '
br'name="test_subTest_fail \(i=1\)"')
@unittest.skipIf(not hasattr(unittest.TestCase, 'subTest'),
'unittest.TestCase.subTest not present.')
def test_unittest_subTest_error(self):
# test for issue #155
outdir = BytesIO()
runner = xmlrunner.XMLTestRunner(
stream=self.stream, output=outdir, verbosity=self.verbosity,
**self.runner_kwargs)
suite = unittest.TestSuite()
suite.addTest(self.DummySubTest('test_subTest_error'))
runner.run(suite)
outdir.seek(0)
output = outdir.read()
output = _strip_xml(output, {
'//testsuite': (),
'//testcase': ('classname', 'name'),
'//failure': ('message',),
})
self.assertRegexpMatches(
output,
br'<testcase classname="tests\.testsuite\.'
br'(XMLTestRunnerTestCase\.)?DummySubTest" '
br'name="test_subTest_error \(i=0\)"')
self.assertRegexpMatches(
output,
br'<testcase classname="tests\.testsuite\.'
br'(XMLTestRunnerTestCase\.)?DummySubTest" '
br'name="test_subTest_error \(i=1\)"')
@unittest.skipIf(not hasattr(unittest.TestCase, 'subTest'),
'unittest.TestCase.subTest not present.')
def test_unittest_subTest_mixed(self):
# test for issue #155
outdir = BytesIO()
runner = xmlrunner.XMLTestRunner(
stream=self.stream, output=outdir, verbosity=self.verbosity,
**self.runner_kwargs)
suite = unittest.TestSuite()
suite.addTest(self.DummySubTest('test_subTest_mixed'))
runner.run(suite)
outdir.seek(0)
output = outdir.read()
output = _strip_xml(output, {
'//testsuite': (),
'//testcase': ('classname', 'name'),
'//failure': ('message',),
})
self.assertNotIn(
'name="test_subTest_mixed (i=0)"'.encode('utf8'),
output)
self.assertIn(
'name="test_subTest_mixed (i=1)"'.encode('utf8'),
output)
@unittest.skipIf(not hasattr(unittest.TestCase, 'subTest'),
'unittest.TestCase.subTest not present.')
def test_unittest_subTest_pass(self):
# Test for issue #85
suite = unittest.TestSuite()
suite.addTest(self.DummySubTest('test_subTest_pass'))
self._test_xmlrunner(suite)
@unittest.skipIf(not hasattr(unittest.TestCase, 'subTest'),
'unittest.TestCase.subTest not present.')
def test_unittest_subTest_with_dots(self):
# Test for issue #85
suite = unittest.TestSuite()
suite.addTest(self.DummySubTest('test_subTest_with_dots'))
outdir = BytesIO()
self._test_xmlrunner(suite, outdir=outdir)
xmlcontent = outdir.getvalue().decode()
# Method name
self.assertNotIn('name="subTest', xmlcontent, 'parsing of test method name is not done correctly')
self.assertIn('name="test_subTest_with_dots (module=\'hello.world.subTest', xmlcontent)
# Class name
matchString = 'classname="tests.testsuite.XMLTestRunnerTestCase.DummySubTest.test_subTest_with_dots (module=\'hello.world"'
self.assertNotIn(matchString, xmlcontent, 'parsing of class name is not done correctly')
self.assertIn('classname="tests.testsuite.XMLTestRunnerTestCase.DummySubTest"', xmlcontent)
def test_xmlrunner_pass(self):
suite = unittest.TestSuite()
suite.addTest(self.DummyTest('test_pass'))
self._test_xmlrunner(suite)
def test_xmlrunner_failfast(self):
suite = unittest.TestSuite()
suite.addTest(self.DummyTest('test_fail'))
suite.addTest(self.DummyTest('test_pass'))
outdir = BytesIO()
runner = xmlrunner.XMLTestRunner(
stream=self.stream, output=outdir,
verbosity=self.verbosity, failfast=True,
**self.runner_kwargs)
runner.run(suite)
outdir.seek(0)
output = outdir.read()
self.assertIn('test_fail'.encode('utf8'), output)
self.assertNotIn('test_pass'.encode('utf8'), output)
def test_xmlrunner_verbose(self):
self.verbosity = 1
suite = unittest.TestSuite()
suite.addTest(self.DummyTest('test_pass'))
self._test_xmlrunner(suite)
def test_xmlrunner_showall(self):
self.verbosity = 2
suite = unittest.TestSuite()
suite.addTest(self.DummyTest('test_pass'))
self._test_xmlrunner(suite)
def test_xmlrunner_cdata_section(self):
suite = unittest.TestSuite()
suite.addTest(self.DummyTest('test_cdata_section'))
self._test_xmlrunner(suite)
def test_xmlrunner_invalid_xml_chars_in_doc(self):
suite = unittest.TestSuite()
suite.addTest(self.DummyTest('test_invalid_xml_chars_in_doc'))
outdir = BytesIO()
runner = xmlrunner.XMLTestRunner(
stream=self.stream, output=outdir, verbosity=self.verbosity,
**self.runner_kwargs)
runner.run(suite)
outdir.seek(0)
output = outdir.read()
# Finally check if we have a valid XML document or not.
try:
minidom.parseString(output)
except Exception as e: # pragma: no cover
# note: we could remove the try/except, but it's more crude.
self.fail(e)
def test_xmlrunner_outsuffix(self):
self.runner_kwargs['outsuffix'] = '.somesuffix'
suite = unittest.TestSuite()
suite.addTest(self.DummyTest('test_pass'))
self._test_xmlrunner(suite)
xmlfile = glob(os.path.join(self.outdir, '*xml'))[0]
assert xmlfile.endswith('.somesuffix.xml')
def test_xmlrunner_nosuffix(self):
self.runner_kwargs['outsuffix'] = ''
suite = unittest.TestSuite()
suite.addTest(self.DummyTest('test_pass'))
self._test_xmlrunner(suite)
xmlfile = glob(os.path.join(self.outdir, '*xml'))[0]
xmlfile = os.path.basename(xmlfile)
assert xmlfile.endswith('DummyTest.xml')
def test_junitxml_properties(self):
suite = unittest.TestSuite()
suite.addTest(self.DummyTest('test_pass'))
suite.properties = dict(key='value')
self._test_xmlrunner(suite)
def test_junitxml_xsd_validation_order(self):
suite = unittest.TestSuite()
suite.addTest(self.DummyTest('test_fail'))
suite.addTest(self.DummyTest('test_pass'))
suite.addTest(self.DummyTest('test_output_stdout_and_stderr'))
suite.properties = dict(key='value')
outdir = BytesIO()
runner = xmlrunner.XMLTestRunner(
stream=self.stream, output=outdir, verbosity=self.verbosity,
**self.runner_kwargs)
runner.run(suite)
outdir.seek(0)
output = outdir.read()
# poor man's schema validation; see issue #90
i_properties = output.index('<properties>'.encode('utf8'))
i_system_out = output.index('<system-out>'.encode('utf8'))
i_system_err = output.index('<system-err>'.encode('utf8'))
i_testcase = output.index('<testcase'.encode('utf8'))
self.assertTrue(i_properties < i_testcase <
i_system_out < i_system_err)
# XSD validation - for good measure.
validate_junit_report('14c6e39c38408b9ed6280361484a13c6f5becca7', output)
def test_junitxml_xsd_validation_empty_properties(self):
suite = unittest.TestSuite()
suite.addTest(self.DummyTest('test_fail'))
suite.addTest(self.DummyTest('test_pass'))
suite.properties = None
outdir = BytesIO()
runner = xmlrunner.XMLTestRunner(
stream=self.stream, output=outdir, verbosity=self.verbosity,
**self.runner_kwargs)
runner.run(suite)
outdir.seek(0)
output = outdir.read()
self.assertNotIn('<properties>'.encode('utf8'), output)
validate_junit_report('14c6e39c38408b9ed6280361484a13c6f5becca7', output)
@unittest.skipIf(hasattr(sys, 'pypy_version_info'),
'skip - PyPy + lxml seems to be hanging')
def test_xunit_plugin_transform(self):
suite = unittest.TestSuite()
suite.addTest(self.DummyTest('test_fail'))
suite.addTest(self.DummyTest('test_pass'))
suite.properties = None
outdir = BytesIO()
runner = xmlrunner.XMLTestRunner(
stream=self.stream, output=outdir, verbosity=self.verbosity,
**self.runner_kwargs)
runner.run(suite)
outdir.seek(0)
output = outdir.read()
validate_junit_report('14c6e39c38408b9ed6280361484a13c6f5becca7', output)
with self.assertRaises(etree.DocumentInvalid):
validate_junit_report('ae25da5089d4f94ac6c4669bf736e4d416cc4665', output)
from xmlrunner.extra.xunit_plugin import transform
transformed = transform(output)
validate_junit_report('14c6e39c38408b9ed6280361484a13c6f5becca7', transformed)
validate_junit_report('ae25da5089d4f94ac6c4669bf736e4d416cc4665', transformed)
self.assertIn('test_pass'.encode('utf8'), transformed)
self.assertIn('test_fail'.encode('utf8'), transformed)
def test_xmlrunner_elapsed_times(self):
self.runner_kwargs['elapsed_times'] = False
suite = unittest.TestSuite()
suite.addTest(self.DummyTest('test_pass'))
self._test_xmlrunner(suite)
def test_xmlrunner_resultclass(self):
class Result(_XMLTestResult):
pass
suite = unittest.TestSuite()
suite.addTest(self.DummyTest('test_pass'))
self.runner_kwargs['resultclass'] = Result
self._test_xmlrunner(suite)
def test_xmlrunner_stream(self):
stream = self.stream
output = BytesIO()
runner = xmlrunner.XMLTestRunner(
stream=stream, output=output, verbosity=self.verbosity,
**self.runner_kwargs)
suite = unittest.TestSuite()
suite.addTest(self.DummyTest('test_pass'))
runner.run(suite)
def test_xmlrunner_stream_empty_testsuite(self):
stream = self.stream
output = BytesIO()
runner = xmlrunner.XMLTestRunner(
stream=stream, output=output, verbosity=self.verbosity,
**self.runner_kwargs)
suite = unittest.TestSuite()
runner.run(suite)
def test_xmlrunner_output_subdir(self):
stream = self.stream
output = os.path.join(self.outdir, 'subdir')
runner = xmlrunner.XMLTestRunner(
stream=stream, output=output, verbosity=self.verbosity,
**self.runner_kwargs)
suite = unittest.TestSuite()
suite.addTest(self.DummyTest('test_pass'))
runner.run(suite)
def test_xmlrunner_patched_stdout(self):
old_stdout, old_stderr = sys.stdout, sys.stderr
try:
sys.stdout, sys.stderr = StringIO(), StringIO()
suite = unittest.TestSuite()
suite.addTest(self.DummyTest('test_pass'))
suite.properties = dict(key='value')
self._test_xmlrunner(suite)
finally:
sys.stdout, sys.stderr = old_stdout, old_stderr
def test_opaque_decorator(self):
suite = unittest.TestSuite()
suite.addTest(self.DecoratedUnitTest('test_pass'))
self._test_xmlrunner(suite)
testsuite_output = self.stream.getvalue()
self.assertNotIn('IOError:', testsuite_output)
def test_xmlrunner_error_in_call(self):
suite = unittest.TestSuite()
suite.addTest(self.DummyErrorInCallTest('test_pass'))
self._test_xmlrunner(suite)
testsuite_output = self.stream.getvalue()
self.assertIn('Exception: Massive fail', testsuite_output)
@unittest.skipIf(not hasattr(sys, 'getrefcount'),
'skip - PyPy does not have sys.getrefcount.')
@unittest.skipIf((3, 0) <= sys.version_info < (3, 4),
'skip - test not garbage collected. '
'https://bugs.python.org/issue11798.')
def test_xmlrunner_hold_traceback(self):
suite = unittest.TestSuite()
suite.addTest(self.DummyRefCountTest('test_fail'))
countBeforeTest = sys.getrefcount(self.DummyRefCountTest.dummy)
runner = self._test_xmlrunner(suite)
countAfterTest = sys.getrefcount(self.DummyRefCountTest.dummy)
self.assertEqual(countBeforeTest, countAfterTest)
class StderrXMLTestRunner(xmlrunner.XMLTestRunner):
"""
XMLTestRunner that outputs to sys.stderr that might be replaced
Though XMLTestRunner defaults to use sys.stderr as stream,
it cannot be replaced e.g. by replaced by capture_stdout_stderr(),
as it's already resolved.
This class resolved sys.stderr lazily and outputs to replaced sys.stderr.
"""
def __init__(self, **kwargs):
super(XMLTestRunnerTestCase.StderrXMLTestRunner, self).__init__(
stream=sys.stderr,
**kwargs
)
def test_test_program_succeed_with_buffer(self):
with capture_stdout_stderr() as r:
unittest.TestProgram(
module=self.__class__.__module__,
testRunner=self.StderrXMLTestRunner,
argv=[
sys.argv[0],
'-b',
'XMLTestRunnerTestCase.DummyTest.test_runner_buffer_output_pass',
],
exit=False,
)
self.assertNotIn('should not be printed', r[0].getvalue())
self.assertNotIn('should not be printed', r[1].getvalue())
def test_test_program_succeed_wo_buffer(self):
with capture_stdout_stderr() as r:
unittest.TestProgram(
module=self.__class__.__module__,
testRunner=self.StderrXMLTestRunner,
argv=[
sys.argv[0],
'XMLTestRunnerTestCase.DummyTest.test_runner_buffer_output_pass',
],
exit=False,
)
self.assertIn('should not be printed', r[0].getvalue())
self.assertNotIn('should not be printed', r[1].getvalue())
def test_test_program_fail_with_buffer(self):
with capture_stdout_stderr() as r:
unittest.TestProgram(
module=self.__class__.__module__,
testRunner=self.StderrXMLTestRunner,
argv=[
sys.argv[0],
'-b',
'XMLTestRunnerTestCase.DummyTest.test_runner_buffer_output_fail',
],
exit=False,
)
self.assertNotIn('should be printed', r[0].getvalue())
self.assertIn('should be printed', r[1].getvalue())
def test_test_program_fail_wo_buffer(self):
with capture_stdout_stderr() as r:
unittest.TestProgram(
module=self.__class__.__module__,
testRunner=self.StderrXMLTestRunner,
argv=[
sys.argv[0],
'XMLTestRunnerTestCase.DummyTest.test_runner_buffer_output_fail',
],
exit=False,
)
self.assertIn('should be printed', r[0].getvalue())
self.assertNotIn('should be printed', r[1].getvalue())
def test_partialmethod(self):
from functools import partialmethod
def test_partialmethod(test):
pass
class TestWithPartialmethod(unittest.TestCase):
pass
setattr(
TestWithPartialmethod,
'test_partialmethod',
partialmethod(test_partialmethod),
)
suite = unittest.TestSuite()
suite.addTest(TestWithPartialmethod('test_partialmethod'))
self._test_xmlrunner(suite)
class DuplicateWriterTestCase(unittest.TestCase):
def setUp(self):
fd, self.file = mkstemp()
self.fh = os.fdopen(fd, 'w')
self.buffer = StringIO()
self.writer = _DuplicateWriter(self.fh, self.buffer)
def tearDown(self):
self.buffer.close()
self.fh.close()
os.unlink(self.file)
def getFirstContent(self):
with open(self.file, 'r') as f:
return f.read()
def getSecondContent(self):
return self.buffer.getvalue()
def test_flush(self):
self.writer.write('foobarbaz')
self.writer.flush()
self.assertEqual(self.getFirstContent(), self.getSecondContent())
def test_writable(self):
self.assertTrue(self.writer.writable())
def test_writelines(self):
self.writer.writelines([
'foo\n',
'bar\n',
'baz\n',
])
self.writer.flush()
self.assertEqual(self.getFirstContent(), self.getSecondContent())
def test_write(self):
# try long buffer (1M)
buffer = 'x' * (1024 * 1024)
wrote = self.writer.write(buffer)
self.writer.flush()
self.assertEqual(self.getFirstContent(), self.getSecondContent())
self.assertEqual(wrote, len(self.getSecondContent()))
class XMLProgramTestCase(unittest.TestCase):
@mock.patch('sys.argv', ['xmlrunner', '-o', 'flaf'])
@mock.patch('xmlrunner.runner.XMLTestRunner')
@mock.patch('sys.exit')
def test_xmlrunner_output(self, exiter, testrunner):
xmlrunner.runner.XMLTestProgram()
kwargs = dict(
buffer=mock.ANY,
failfast=mock.ANY,
verbosity=mock.ANY,
warnings=mock.ANY,
output='flaf',
)
if sys.version_info[:2] > (3, 4):
kwargs.update(tb_locals=mock.ANY)
testrunner.assert_called_once_with(**kwargs)
exiter.assert_called_once_with(False)
@mock.patch('sys.argv', ['xmlrunner', '--output-file', 'test.xml'])
@mock.patch('xmlrunner.runner.open')
@mock.patch('xmlrunner.runner.XMLTestRunner')
@mock.patch('sys.exit')
def test_xmlrunner_output_file(self, exiter, testrunner, opener):
xmlrunner.runner.XMLTestProgram()
opener.assert_called_once_with('test.xml', 'wb')
open_file = opener()
open_file.close.assert_called_with()
kwargs = dict(
buffer=mock.ANY,
failfast=mock.ANY,
verbosity=mock.ANY,
warnings=mock.ANY,
output=open_file,
)
if sys.version_info[:2] > (3, 4):
kwargs.update(tb_locals=mock.ANY)
testrunner.assert_called_once_with(**kwargs)
exiter.assert_called_once_with(False)
@mock.patch('sys.argv', ['xmlrunner', '--outsuffix', ''])
@mock.patch('xmlrunner.runner.open')
@mock.patch('xmlrunner.runner.XMLTestRunner')
@mock.patch('sys.exit')
def test_xmlrunner_outsuffix(self, exiter, testrunner, opener):
xmlrunner.runner.XMLTestProgram()
kwargs = dict(
buffer=mock.ANY,
failfast=mock.ANY,
verbosity=mock.ANY,
warnings=mock.ANY,
outsuffix='',
)
if sys.version_info[:2] > (3, 4):
kwargs.update(tb_locals=mock.ANY)
testrunner.assert_called_once_with(**kwargs)
exiter.assert_called_once_with(False)
class ResolveFilenameTestCase(unittest.TestCase):
@mock.patch('os.path.relpath')
def test_resolve_filename_relative(self, relpath):
relpath.return_value = 'somefile.py'
filename = resolve_filename('/path/to/somefile.py')
self.assertEqual(filename, 'somefile.py')
@mock.patch('os.path.relpath')
def test_resolve_filename_outside(self, relpath):
relpath.return_value = '../../../tmp/somefile.py'
filename = resolve_filename('/tmp/somefile.py')
self.assertEqual(filename, '/tmp/somefile.py')
@mock.patch('os.path.relpath')
def test_resolve_filename_error(self, relpath):
relpath.side_effect = ValueError("ValueError: path is on mount 'C:', start on mount 'D:'")
filename = resolve_filename('C:\\path\\to\\somefile.py')
self.assertEqual(filename, 'C:\\path\\to\\somefile.py')
| 36.109496 | 131 | 0.620555 |
from __future__ import print_function
import contextlib
import io
import sys
from xmlrunner.unittest import unittest
import xmlrunner
from xmlrunner.result import _DuplicateWriter
from xmlrunner.result import _XMLTestResult
from xmlrunner.result import resolve_filename
import doctest
import tests.doctest_example
from io import StringIO, BytesIO
from tempfile import mkdtemp
from tempfile import mkstemp
from shutil import rmtree
from glob import glob
from xml.dom import minidom
from lxml import etree
import os
import os.path
from unittest import mock
def _load_schema(version):
path = os.path.join(
os.path.dirname(__file__),
'vendor/jenkins/xunit-plugin', version, 'junit-10.xsd')
with open(path, 'r') as schema_file:
schema_doc = etree.parse(schema_file)
schema = etree.XMLSchema(schema_doc)
return schema
raise RuntimeError('Could not load JUnit schema')
def validate_junit_report(version, text):
document = etree.parse(BytesIO(text))
schema = _load_schema(version)
schema.assertValid(document)
class DoctestTest(unittest.TestCase):
def test_doctest_example(self):
suite = doctest.DocTestSuite(tests.doctest_example)
outdir = BytesIO()
stream = StringIO()
runner = xmlrunner.XMLTestRunner(
stream=stream, output=outdir, verbosity=0)
runner.run(suite)
outdir.seek(0)
output = outdir.read()
self.assertIn('classname="tests.doctest_example.Multiplicator"'.encode('utf8'), output)
self.assertIn('name="threetimes"'.encode('utf8'), output)
self.assertIn('classname="tests.doctest_example"'.encode('utf8'), output)
self.assertIn('name="twice"'.encode('utf8'), output)
@contextlib.contextmanager
def capture_stdout_stderr():
orig_stdout = sys.stdout
orig_stderr = sys.stderr
sys.stdout = StringIO()
sys.stderr = StringIO()
try:
yield (sys.stdout, sys.stderr)
finally:
sys.stdout = orig_stdout
sys.stderr = orig_stderr
def _strip_xml(xml, changes):
doc = etree.fromstring(xml)
for xpath, attributes in changes.items():
for node in doc.xpath(xpath):
for attrib in node.attrib.keys():
if attrib not in attributes:
del node.attrib[attrib]
return etree.tostring(doc)
def some_decorator(f):
code = """\
def wrapper(*args, **kwargs):
return func(*args, **kwargs)
"""
evaldict = dict(func=f)
exec(code, evaldict)
return evaldict['wrapper']
class XMLTestRunnerTestCase(unittest.TestCase):
class DummyTest(unittest.TestCase):
@unittest.skip("demonstrating skipping")
def test_skip(self):
pass
@unittest.skip(u"demonstrating non-ascii skipping: éçà")
def test_non_ascii_skip(self):
pass
def test_pass(self):
pass
def test_fail(self):
self.assertTrue(False)
@unittest.expectedFailure
def test_expected_failure(self):
self.assertTrue(False)
@unittest.expectedFailure
def test_unexpected_success(self):
pass
def test_error(self):
1 / 0
def test_cdata_section(self):
print('<![CDATA[content]]>')
def test_invalid_xml_chars_in_doc(self):
pass
def test_non_ascii_error(self):
self.assertEqual(u"éçà", 42)
def test_unsafe_unicode(self):
print(u"A\x00B\x08C\x0BD\x0C")
def test_output_stdout_and_stderr(self):
print('test on stdout')
print('test on stderr', file=sys.stderr)
def test_runner_buffer_output_pass(self):
print('should not be printed')
def test_runner_buffer_output_fail(self):
print('should be printed')
self.fail('expected to fail')
def test_output(self):
print('test message')
def test_non_ascii_runner_buffer_output_fail(self):
print(u'Where is the café ?')
self.fail(u'The café could not be found')
class DummySubTest(unittest.TestCase):
def test_subTest_pass(self):
for i in range(2):
with self.subTest(i=i):
pass
def test_subTest_fail(self):
for i in range(2):
with self.subTest(i=i):
self.fail('this is a subtest.')
def test_subTest_error(self):
for i in range(2):
with self.subTest(i=i):
raise Exception('this is a subtest')
def test_subTest_mixed(self):
for i in range(2):
with self.subTest(i=i):
self.assertLess(i, 1, msg='this is a subtest.')
def test_subTest_with_dots(self):
for i in range(2):
with self.subTest(module='hello.world.subTest{}'.format(i)):
self.fail('this is a subtest.')
class DecoratedUnitTest(unittest.TestCase):
@some_decorator
def test_pass(self):
pass
class DummyErrorInCallTest(unittest.TestCase):
def __call__(self, result):
try:
raise Exception('Massive fail')
except Exception:
result.addError(self, sys.exc_info())
return
def test_pass(self):
pass
class DummyRefCountTest(unittest.TestCase):
class dummy(object):
pass
def test_fail(self):
inst = self.dummy()
self.assertTrue(False)
def setUp(self):
self.stream = StringIO()
self.outdir = mkdtemp()
self.verbosity = 0
self.runner_kwargs = {}
self.addCleanup(rmtree, self.outdir)
def _test_xmlrunner(self, suite, runner=None, outdir=None):
if outdir is None:
outdir = self.outdir
stream = self.stream
verbosity = self.verbosity
runner_kwargs = self.runner_kwargs
if runner is None:
runner = xmlrunner.XMLTestRunner(
stream=stream, output=outdir, verbosity=verbosity,
**runner_kwargs)
if isinstance(outdir, BytesIO):
self.assertFalse(outdir.getvalue())
else:
self.assertEqual(0, len(glob(os.path.join(outdir, '*xml'))))
runner.run(suite)
if isinstance(outdir, BytesIO):
self.assertTrue(outdir.getvalue())
else:
self.assertEqual(1, len(glob(os.path.join(outdir, '*xml'))))
return runner
def test_basic_unittest_constructs(self):
suite = unittest.TestSuite()
suite.addTest(self.DummyTest('test_pass'))
suite.addTest(self.DummyTest('test_skip'))
suite.addTest(self.DummyTest('test_fail'))
suite.addTest(self.DummyTest('test_expected_failure'))
suite.addTest(self.DummyTest('test_unexpected_success'))
suite.addTest(self.DummyTest('test_error'))
self._test_xmlrunner(suite)
def test_classnames(self):
suite = unittest.TestSuite()
suite.addTest(self.DummyTest('test_pass'))
suite.addTest(self.DummySubTest('test_subTest_pass'))
outdir = BytesIO()
stream = StringIO()
runner = xmlrunner.XMLTestRunner(
stream=stream, output=outdir, verbosity=0)
runner.run(suite)
outdir.seek(0)
output = outdir.read()
output = _strip_xml(output, {
'//testsuite': (),
'//testcase': ('classname', 'name'),
'//failure': ('message',),
})
self.assertRegexpMatches(
output,
r'classname="tests\.testsuite\.(XMLTestRunnerTestCase\.)?'
r'DummyTest" name="test_pass"'.encode('utf8'),
)
self.assertRegexpMatches(
output,
r'classname="tests\.testsuite\.(XMLTestRunnerTestCase\.)?'
r'DummySubTest" name="test_subTest_pass"'.encode('utf8'),
)
def test_expected_failure(self):
suite = unittest.TestSuite()
suite.addTest(self.DummyTest('test_expected_failure'))
outdir = BytesIO()
self._test_xmlrunner(suite, outdir=outdir)
self.assertNotIn(b'<failure', outdir.getvalue())
self.assertNotIn(b'<error', outdir.getvalue())
self.assertIn(b'<skip', outdir.getvalue())
def test_unexpected_success(self):
suite = unittest.TestSuite()
suite.addTest(self.DummyTest('test_unexpected_success'))
outdir = BytesIO()
self._test_xmlrunner(suite, outdir=outdir)
self.assertNotIn(b'<failure', outdir.getvalue())
self.assertIn(b'<error', outdir.getvalue())
self.assertNotIn(b'<skip', outdir.getvalue())
def test_xmlrunner_non_ascii(self):
suite = unittest.TestSuite()
suite.addTest(self.DummyTest('test_non_ascii_skip'))
suite.addTest(self.DummyTest('test_non_ascii_error'))
outdir = BytesIO()
runner = xmlrunner.XMLTestRunner(
stream=self.stream, output=outdir, verbosity=self.verbosity,
**self.runner_kwargs)
runner.run(suite)
outdir.seek(0)
output = outdir.read()
self.assertIn(
u'message="demonstrating non-ascii skipping: éçà"'.encode('utf8'),
output)
def test_xmlrunner_safe_xml_encoding_name(self):
suite = unittest.TestSuite()
suite.addTest(self.DummyTest('test_pass'))
outdir = BytesIO()
runner = xmlrunner.XMLTestRunner(
stream=self.stream, output=outdir, verbosity=self.verbosity,
**self.runner_kwargs)
runner.run(suite)
outdir.seek(0)
output = outdir.read()
firstline = output.splitlines()[0]
self.assertIn('encoding="UTF-8"'.encode('utf8'), firstline)
def test_xmlrunner_check_for_valid_xml_streamout(self):
class DummyTestA(unittest.TestCase):
def test_pass(self):
pass
class DummyTestB(unittest.TestCase):
def test_pass(self):
pass
suite = unittest.TestSuite()
suite.addTest(unittest.TestLoader().loadTestsFromTestCase(DummyTestA))
suite.addTest(unittest.TestLoader().loadTestsFromTestCase(DummyTestB))
outdir = BytesIO()
runner = xmlrunner.XMLTestRunner(
stream=self.stream, output=outdir, verbosity=self.verbosity,
**self.runner_kwargs)
runner.run(suite)
outdir.seek(0)
output = outdir.read()
try:
minidom.parseString(output)
except Exception as e:
self.fail(e)
def test_xmlrunner_unsafe_unicode(self):
suite = unittest.TestSuite()
suite.addTest(self.DummyTest('test_unsafe_unicode'))
outdir = BytesIO()
runner = xmlrunner.XMLTestRunner(
stream=self.stream, output=outdir, verbosity=self.verbosity,
**self.runner_kwargs)
runner.run(suite)
outdir.seek(0)
output = outdir.read()
self.assertIn(u"<![CDATA[ABCD\n]]>".encode('utf8'),
output)
def test_xmlrunner_non_ascii_failures(self):
self._xmlrunner_non_ascii_failures()
def test_xmlrunner_non_ascii_failures_buffered_output(self):
self._xmlrunner_non_ascii_failures(buffer=True)
def _xmlrunner_non_ascii_failures(self, buffer=False):
suite = unittest.TestSuite()
suite.addTest(self.DummyTest(
'test_non_ascii_runner_buffer_output_fail'))
outdir = BytesIO()
runner = xmlrunner.XMLTestRunner(
stream=self.stream, output=outdir, verbosity=self.verbosity,
buffer=buffer, **self.runner_kwargs)
# allow output non-ascii letters to stdout
orig_stdout = sys.stdout
sys.stdout = io.TextIOWrapper(sys.stdout.buffer, encoding='utf-8')
try:
runner.run(suite)
finally:
# Not to be closed when TextIOWrapper is disposed.
sys.stdout.detach()
sys.stdout = orig_stdout
outdir.seek(0)
output = outdir.read()
self.assertIn(
u'Where is the café ?'.encode('utf8'),
output)
self.assertIn(
u'The café could not be found'.encode('utf8'),
output)
@unittest.expectedFailure
def test_xmlrunner_buffer_output_pass(self):
suite = unittest.TestSuite()
suite.addTest(self.DummyTest('test_runner_buffer_output_pass'))
self._test_xmlrunner(suite)
testsuite_output = self.stream.getvalue()
# Since we are always buffering stdout/stderr
# it is currently troublesome to print anything at all
# and be consistent with --buffer option (issue #59)
self.assertIn('should not be printed', testsuite_output)
# this will be fixed when using the composite approach
# that was under development in the rewrite branch.
def test_xmlrunner_buffer_output_fail(self):
suite = unittest.TestSuite()
suite.addTest(self.DummyTest('test_runner_buffer_output_fail'))
# --buffer option
self.runner_kwargs['buffer'] = True
self._test_xmlrunner(suite)
testsuite_output = self.stream.getvalue()
self.assertIn('should be printed', testsuite_output)
def test_xmlrunner_output_without_buffer(self):
suite = unittest.TestSuite()
suite.addTest(self.DummyTest('test_output'))
with capture_stdout_stderr() as r:
self._test_xmlrunner(suite)
output_from_test = r[0].getvalue()
self.assertIn('test message', output_from_test)
def test_xmlrunner_output_with_buffer(self):
suite = unittest.TestSuite()
suite.addTest(self.DummyTest('test_output'))
# --buffer option
self.runner_kwargs['buffer'] = True
with capture_stdout_stderr() as r:
self._test_xmlrunner(suite)
output_from_test = r[0].getvalue()
self.assertNotIn('test message', output_from_test)
def test_xmlrunner_stdout_stderr_recovered_without_buffer(self):
orig_stdout = sys.stdout
orig_stderr = sys.stderr
suite = unittest.TestSuite()
suite.addTest(self.DummyTest('test_pass'))
self._test_xmlrunner(suite)
self.assertIs(orig_stdout, sys.stdout)
self.assertIs(orig_stderr, sys.stderr)
def test_xmlrunner_stdout_stderr_recovered_with_buffer(self):
orig_stdout = sys.stdout
orig_stderr = sys.stderr
suite = unittest.TestSuite()
suite.addTest(self.DummyTest('test_pass'))
# --buffer option
self.runner_kwargs['buffer'] = True
self._test_xmlrunner(suite)
self.assertIs(orig_stdout, sys.stdout)
self.assertIs(orig_stderr, sys.stderr)
suite = unittest.TestSuite()
suite.addTest(self.DummyTest('test_pass'))
@unittest.skipIf(not hasattr(unittest.TestCase, 'subTest'),
'unittest.TestCase.subTest not present.')
def test_unittest_subTest_fail(self):
# test for issue #77
outdir = BytesIO()
runner = xmlrunner.XMLTestRunner(
stream=self.stream, output=outdir, verbosity=self.verbosity,
**self.runner_kwargs)
suite = unittest.TestSuite()
suite.addTest(self.DummySubTest('test_subTest_fail'))
runner.run(suite)
outdir.seek(0)
output = outdir.read()
output = _strip_xml(output, {
'//testsuite': (),
'//testcase': ('classname', 'name'),
'//failure': ('message',),
})
self.assertRegexpMatches(
output,
br'<testcase classname="tests\.testsuite\.'
br'(XMLTestRunnerTestCase\.)?DummySubTest" '
br'name="test_subTest_fail \(i=0\)"')
self.assertRegexpMatches(
output,
br'<testcase classname="tests\.testsuite\.'
br'(XMLTestRunnerTestCase\.)?DummySubTest" '
br'name="test_subTest_fail \(i=1\)"')
@unittest.skipIf(not hasattr(unittest.TestCase, 'subTest'),
'unittest.TestCase.subTest not present.')
def test_unittest_subTest_error(self):
# test for issue #155
outdir = BytesIO()
runner = xmlrunner.XMLTestRunner(
stream=self.stream, output=outdir, verbosity=self.verbosity,
**self.runner_kwargs)
suite = unittest.TestSuite()
suite.addTest(self.DummySubTest('test_subTest_error'))
runner.run(suite)
outdir.seek(0)
output = outdir.read()
output = _strip_xml(output, {
'//testsuite': (),
'//testcase': ('classname', 'name'),
'//failure': ('message',),
})
self.assertRegexpMatches(
output,
br'<testcase classname="tests\.testsuite\.'
br'(XMLTestRunnerTestCase\.)?DummySubTest" '
br'name="test_subTest_error \(i=0\)"')
self.assertRegexpMatches(
output,
br'<testcase classname="tests\.testsuite\.'
br'(XMLTestRunnerTestCase\.)?DummySubTest" '
br'name="test_subTest_error \(i=1\)"')
@unittest.skipIf(not hasattr(unittest.TestCase, 'subTest'),
'unittest.TestCase.subTest not present.')
def test_unittest_subTest_mixed(self):
# test for issue #155
outdir = BytesIO()
runner = xmlrunner.XMLTestRunner(
stream=self.stream, output=outdir, verbosity=self.verbosity,
**self.runner_kwargs)
suite = unittest.TestSuite()
suite.addTest(self.DummySubTest('test_subTest_mixed'))
runner.run(suite)
outdir.seek(0)
output = outdir.read()
output = _strip_xml(output, {
'//testsuite': (),
'//testcase': ('classname', 'name'),
'//failure': ('message',),
})
self.assertNotIn(
'name="test_subTest_mixed (i=0)"'.encode('utf8'),
output)
self.assertIn(
'name="test_subTest_mixed (i=1)"'.encode('utf8'),
output)
@unittest.skipIf(not hasattr(unittest.TestCase, 'subTest'),
'unittest.TestCase.subTest not present.')
def test_unittest_subTest_pass(self):
# Test for issue #85
suite = unittest.TestSuite()
suite.addTest(self.DummySubTest('test_subTest_pass'))
self._test_xmlrunner(suite)
@unittest.skipIf(not hasattr(unittest.TestCase, 'subTest'),
'unittest.TestCase.subTest not present.')
def test_unittest_subTest_with_dots(self):
# Test for issue #85
suite = unittest.TestSuite()
suite.addTest(self.DummySubTest('test_subTest_with_dots'))
outdir = BytesIO()
self._test_xmlrunner(suite, outdir=outdir)
xmlcontent = outdir.getvalue().decode()
# Method name
self.assertNotIn('name="subTest', xmlcontent, 'parsing of test method name is not done correctly')
self.assertIn('name="test_subTest_with_dots (module=\'hello.world.subTest', xmlcontent)
matchString = 'classname="tests.testsuite.XMLTestRunnerTestCase.DummySubTest.test_subTest_with_dots (module=\'hello.world"'
self.assertNotIn(matchString, xmlcontent, 'parsing of class name is not done correctly')
self.assertIn('classname="tests.testsuite.XMLTestRunnerTestCase.DummySubTest"', xmlcontent)
def test_xmlrunner_pass(self):
suite = unittest.TestSuite()
suite.addTest(self.DummyTest('test_pass'))
self._test_xmlrunner(suite)
def test_xmlrunner_failfast(self):
suite = unittest.TestSuite()
suite.addTest(self.DummyTest('test_fail'))
suite.addTest(self.DummyTest('test_pass'))
outdir = BytesIO()
runner = xmlrunner.XMLTestRunner(
stream=self.stream, output=outdir,
verbosity=self.verbosity, failfast=True,
**self.runner_kwargs)
runner.run(suite)
outdir.seek(0)
output = outdir.read()
self.assertIn('test_fail'.encode('utf8'), output)
self.assertNotIn('test_pass'.encode('utf8'), output)
def test_xmlrunner_verbose(self):
self.verbosity = 1
suite = unittest.TestSuite()
suite.addTest(self.DummyTest('test_pass'))
self._test_xmlrunner(suite)
def test_xmlrunner_showall(self):
self.verbosity = 2
suite = unittest.TestSuite()
suite.addTest(self.DummyTest('test_pass'))
self._test_xmlrunner(suite)
def test_xmlrunner_cdata_section(self):
suite = unittest.TestSuite()
suite.addTest(self.DummyTest('test_cdata_section'))
self._test_xmlrunner(suite)
def test_xmlrunner_invalid_xml_chars_in_doc(self):
suite = unittest.TestSuite()
suite.addTest(self.DummyTest('test_invalid_xml_chars_in_doc'))
outdir = BytesIO()
runner = xmlrunner.XMLTestRunner(
stream=self.stream, output=outdir, verbosity=self.verbosity,
**self.runner_kwargs)
runner.run(suite)
outdir.seek(0)
output = outdir.read()
# Finally check if we have a valid XML document or not.
try:
minidom.parseString(output)
except Exception as e: # pragma: no cover
# note: we could remove the try/except, but it's more crude.
self.fail(e)
def test_xmlrunner_outsuffix(self):
self.runner_kwargs['outsuffix'] = '.somesuffix'
suite = unittest.TestSuite()
suite.addTest(self.DummyTest('test_pass'))
self._test_xmlrunner(suite)
xmlfile = glob(os.path.join(self.outdir, '*xml'))[0]
assert xmlfile.endswith('.somesuffix.xml')
def test_xmlrunner_nosuffix(self):
self.runner_kwargs['outsuffix'] = ''
suite = unittest.TestSuite()
suite.addTest(self.DummyTest('test_pass'))
self._test_xmlrunner(suite)
xmlfile = glob(os.path.join(self.outdir, '*xml'))[0]
xmlfile = os.path.basename(xmlfile)
assert xmlfile.endswith('DummyTest.xml')
def test_junitxml_properties(self):
suite = unittest.TestSuite()
suite.addTest(self.DummyTest('test_pass'))
suite.properties = dict(key='value')
self._test_xmlrunner(suite)
def test_junitxml_xsd_validation_order(self):
suite = unittest.TestSuite()
suite.addTest(self.DummyTest('test_fail'))
suite.addTest(self.DummyTest('test_pass'))
suite.addTest(self.DummyTest('test_output_stdout_and_stderr'))
suite.properties = dict(key='value')
outdir = BytesIO()
runner = xmlrunner.XMLTestRunner(
stream=self.stream, output=outdir, verbosity=self.verbosity,
**self.runner_kwargs)
runner.run(suite)
outdir.seek(0)
output = outdir.read()
i_properties = output.index('<properties>'.encode('utf8'))
i_system_out = output.index('<system-out>'.encode('utf8'))
i_system_err = output.index('<system-err>'.encode('utf8'))
i_testcase = output.index('<testcase'.encode('utf8'))
self.assertTrue(i_properties < i_testcase <
i_system_out < i_system_err)
# XSD validation - for good measure.
validate_junit_report('14c6e39c38408b9ed6280361484a13c6f5becca7', output)
def test_junitxml_xsd_validation_empty_properties(self):
suite = unittest.TestSuite()
suite.addTest(self.DummyTest('test_fail'))
suite.addTest(self.DummyTest('test_pass'))
suite.properties = None
outdir = BytesIO()
runner = xmlrunner.XMLTestRunner(
stream=self.stream, output=outdir, verbosity=self.verbosity,
**self.runner_kwargs)
runner.run(suite)
outdir.seek(0)
output = outdir.read()
self.assertNotIn('<properties>'.encode('utf8'), output)
validate_junit_report('14c6e39c38408b9ed6280361484a13c6f5becca7', output)
@unittest.skipIf(hasattr(sys, 'pypy_version_info'),
'skip - PyPy + lxml seems to be hanging')
def test_xunit_plugin_transform(self):
suite = unittest.TestSuite()
suite.addTest(self.DummyTest('test_fail'))
suite.addTest(self.DummyTest('test_pass'))
suite.properties = None
outdir = BytesIO()
runner = xmlrunner.XMLTestRunner(
stream=self.stream, output=outdir, verbosity=self.verbosity,
**self.runner_kwargs)
runner.run(suite)
outdir.seek(0)
output = outdir.read()
validate_junit_report('14c6e39c38408b9ed6280361484a13c6f5becca7', output)
with self.assertRaises(etree.DocumentInvalid):
validate_junit_report('ae25da5089d4f94ac6c4669bf736e4d416cc4665', output)
from xmlrunner.extra.xunit_plugin import transform
transformed = transform(output)
validate_junit_report('14c6e39c38408b9ed6280361484a13c6f5becca7', transformed)
validate_junit_report('ae25da5089d4f94ac6c4669bf736e4d416cc4665', transformed)
self.assertIn('test_pass'.encode('utf8'), transformed)
self.assertIn('test_fail'.encode('utf8'), transformed)
def test_xmlrunner_elapsed_times(self):
self.runner_kwargs['elapsed_times'] = False
suite = unittest.TestSuite()
suite.addTest(self.DummyTest('test_pass'))
self._test_xmlrunner(suite)
def test_xmlrunner_resultclass(self):
class Result(_XMLTestResult):
pass
suite = unittest.TestSuite()
suite.addTest(self.DummyTest('test_pass'))
self.runner_kwargs['resultclass'] = Result
self._test_xmlrunner(suite)
def test_xmlrunner_stream(self):
stream = self.stream
output = BytesIO()
runner = xmlrunner.XMLTestRunner(
stream=stream, output=output, verbosity=self.verbosity,
**self.runner_kwargs)
suite = unittest.TestSuite()
suite.addTest(self.DummyTest('test_pass'))
runner.run(suite)
def test_xmlrunner_stream_empty_testsuite(self):
stream = self.stream
output = BytesIO()
runner = xmlrunner.XMLTestRunner(
stream=stream, output=output, verbosity=self.verbosity,
**self.runner_kwargs)
suite = unittest.TestSuite()
runner.run(suite)
def test_xmlrunner_output_subdir(self):
stream = self.stream
output = os.path.join(self.outdir, 'subdir')
runner = xmlrunner.XMLTestRunner(
stream=stream, output=output, verbosity=self.verbosity,
**self.runner_kwargs)
suite = unittest.TestSuite()
suite.addTest(self.DummyTest('test_pass'))
runner.run(suite)
def test_xmlrunner_patched_stdout(self):
old_stdout, old_stderr = sys.stdout, sys.stderr
try:
sys.stdout, sys.stderr = StringIO(), StringIO()
suite = unittest.TestSuite()
suite.addTest(self.DummyTest('test_pass'))
suite.properties = dict(key='value')
self._test_xmlrunner(suite)
finally:
sys.stdout, sys.stderr = old_stdout, old_stderr
def test_opaque_decorator(self):
suite = unittest.TestSuite()
suite.addTest(self.DecoratedUnitTest('test_pass'))
self._test_xmlrunner(suite)
testsuite_output = self.stream.getvalue()
self.assertNotIn('IOError:', testsuite_output)
def test_xmlrunner_error_in_call(self):
suite = unittest.TestSuite()
suite.addTest(self.DummyErrorInCallTest('test_pass'))
self._test_xmlrunner(suite)
testsuite_output = self.stream.getvalue()
self.assertIn('Exception: Massive fail', testsuite_output)
@unittest.skipIf(not hasattr(sys, 'getrefcount'),
'skip - PyPy does not have sys.getrefcount.')
@unittest.skipIf((3, 0) <= sys.version_info < (3, 4),
'skip - test not garbage collected. '
'https://bugs.python.org/issue11798.')
def test_xmlrunner_hold_traceback(self):
suite = unittest.TestSuite()
suite.addTest(self.DummyRefCountTest('test_fail'))
countBeforeTest = sys.getrefcount(self.DummyRefCountTest.dummy)
runner = self._test_xmlrunner(suite)
countAfterTest = sys.getrefcount(self.DummyRefCountTest.dummy)
self.assertEqual(countBeforeTest, countAfterTest)
class StderrXMLTestRunner(xmlrunner.XMLTestRunner):
def __init__(self, **kwargs):
super(XMLTestRunnerTestCase.StderrXMLTestRunner, self).__init__(
stream=sys.stderr,
**kwargs
)
def test_test_program_succeed_with_buffer(self):
with capture_stdout_stderr() as r:
unittest.TestProgram(
module=self.__class__.__module__,
testRunner=self.StderrXMLTestRunner,
argv=[
sys.argv[0],
'-b',
'XMLTestRunnerTestCase.DummyTest.test_runner_buffer_output_pass',
],
exit=False,
)
self.assertNotIn('should not be printed', r[0].getvalue())
self.assertNotIn('should not be printed', r[1].getvalue())
def test_test_program_succeed_wo_buffer(self):
with capture_stdout_stderr() as r:
unittest.TestProgram(
module=self.__class__.__module__,
testRunner=self.StderrXMLTestRunner,
argv=[
sys.argv[0],
'XMLTestRunnerTestCase.DummyTest.test_runner_buffer_output_pass',
],
exit=False,
)
self.assertIn('should not be printed', r[0].getvalue())
self.assertNotIn('should not be printed', r[1].getvalue())
def test_test_program_fail_with_buffer(self):
with capture_stdout_stderr() as r:
unittest.TestProgram(
module=self.__class__.__module__,
testRunner=self.StderrXMLTestRunner,
argv=[
sys.argv[0],
'-b',
'XMLTestRunnerTestCase.DummyTest.test_runner_buffer_output_fail',
],
exit=False,
)
self.assertNotIn('should be printed', r[0].getvalue())
self.assertIn('should be printed', r[1].getvalue())
def test_test_program_fail_wo_buffer(self):
with capture_stdout_stderr() as r:
unittest.TestProgram(
module=self.__class__.__module__,
testRunner=self.StderrXMLTestRunner,
argv=[
sys.argv[0],
'XMLTestRunnerTestCase.DummyTest.test_runner_buffer_output_fail',
],
exit=False,
)
self.assertIn('should be printed', r[0].getvalue())
self.assertNotIn('should be printed', r[1].getvalue())
def test_partialmethod(self):
from functools import partialmethod
def test_partialmethod(test):
pass
class TestWithPartialmethod(unittest.TestCase):
pass
setattr(
TestWithPartialmethod,
'test_partialmethod',
partialmethod(test_partialmethod),
)
suite = unittest.TestSuite()
suite.addTest(TestWithPartialmethod('test_partialmethod'))
self._test_xmlrunner(suite)
class DuplicateWriterTestCase(unittest.TestCase):
def setUp(self):
fd, self.file = mkstemp()
self.fh = os.fdopen(fd, 'w')
self.buffer = StringIO()
self.writer = _DuplicateWriter(self.fh, self.buffer)
def tearDown(self):
self.buffer.close()
self.fh.close()
os.unlink(self.file)
def getFirstContent(self):
with open(self.file, 'r') as f:
return f.read()
def getSecondContent(self):
return self.buffer.getvalue()
def test_flush(self):
self.writer.write('foobarbaz')
self.writer.flush()
self.assertEqual(self.getFirstContent(), self.getSecondContent())
def test_writable(self):
self.assertTrue(self.writer.writable())
def test_writelines(self):
self.writer.writelines([
'foo\n',
'bar\n',
'baz\n',
])
self.writer.flush()
self.assertEqual(self.getFirstContent(), self.getSecondContent())
def test_write(self):
# try long buffer (1M)
buffer = 'x' * (1024 * 1024)
wrote = self.writer.write(buffer)
self.writer.flush()
self.assertEqual(self.getFirstContent(), self.getSecondContent())
self.assertEqual(wrote, len(self.getSecondContent()))
class XMLProgramTestCase(unittest.TestCase):
@mock.patch('sys.argv', ['xmlrunner', '-o', 'flaf'])
@mock.patch('xmlrunner.runner.XMLTestRunner')
@mock.patch('sys.exit')
def test_xmlrunner_output(self, exiter, testrunner):
xmlrunner.runner.XMLTestProgram()
kwargs = dict(
buffer=mock.ANY,
failfast=mock.ANY,
verbosity=mock.ANY,
warnings=mock.ANY,
output='flaf',
)
if sys.version_info[:2] > (3, 4):
kwargs.update(tb_locals=mock.ANY)
testrunner.assert_called_once_with(**kwargs)
exiter.assert_called_once_with(False)
@mock.patch('sys.argv', ['xmlrunner', '--output-file', 'test.xml'])
@mock.patch('xmlrunner.runner.open')
@mock.patch('xmlrunner.runner.XMLTestRunner')
@mock.patch('sys.exit')
def test_xmlrunner_output_file(self, exiter, testrunner, opener):
xmlrunner.runner.XMLTestProgram()
opener.assert_called_once_with('test.xml', 'wb')
open_file = opener()
open_file.close.assert_called_with()
kwargs = dict(
buffer=mock.ANY,
failfast=mock.ANY,
verbosity=mock.ANY,
warnings=mock.ANY,
output=open_file,
)
if sys.version_info[:2] > (3, 4):
kwargs.update(tb_locals=mock.ANY)
testrunner.assert_called_once_with(**kwargs)
exiter.assert_called_once_with(False)
@mock.patch('sys.argv', ['xmlrunner', '--outsuffix', ''])
@mock.patch('xmlrunner.runner.open')
@mock.patch('xmlrunner.runner.XMLTestRunner')
@mock.patch('sys.exit')
def test_xmlrunner_outsuffix(self, exiter, testrunner, opener):
xmlrunner.runner.XMLTestProgram()
kwargs = dict(
buffer=mock.ANY,
failfast=mock.ANY,
verbosity=mock.ANY,
warnings=mock.ANY,
outsuffix='',
)
if sys.version_info[:2] > (3, 4):
kwargs.update(tb_locals=mock.ANY)
testrunner.assert_called_once_with(**kwargs)
exiter.assert_called_once_with(False)
class ResolveFilenameTestCase(unittest.TestCase):
@mock.patch('os.path.relpath')
def test_resolve_filename_relative(self, relpath):
relpath.return_value = 'somefile.py'
filename = resolve_filename('/path/to/somefile.py')
self.assertEqual(filename, 'somefile.py')
@mock.patch('os.path.relpath')
def test_resolve_filename_outside(self, relpath):
relpath.return_value = '../../../tmp/somefile.py'
filename = resolve_filename('/tmp/somefile.py')
self.assertEqual(filename, '/tmp/somefile.py')
@mock.patch('os.path.relpath')
def test_resolve_filename_error(self, relpath):
relpath.side_effect = ValueError("ValueError: path is on mount 'C:', start on mount 'D:'")
filename = resolve_filename('C:\\path\\to\\somefile.py')
self.assertEqual(filename, 'C:\\path\\to\\somefile.py')
| true | true |
1c47eee4810990be4161b62e4a305d1fbceda5f0 | 3,159 | py | Python | src/dkn_kg_preprocess.py | andreeaiana/geneg_benchmarking | 0b53989c79b8e3771c144c0332fd36587dfe0f4d | [
"MIT"
] | 1 | 2021-12-08T12:02:56.000Z | 2021-12-08T12:02:56.000Z | src/dkn_kg_preprocess.py | andreeaiana/geneg_benchmarking | 0b53989c79b8e3771c144c0332fd36587dfe0f4d | [
"MIT"
] | null | null | null | src/dkn_kg_preprocess.py | andreeaiana/geneg_benchmarking | 0b53989c79b8e3771c144c0332fd36587dfe0f4d | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
# DISCLAIMER
# This code file is forked and adapted from https://github.com/hwwang55/DKN/blob/master/data/kg/kg_preprocess.py
# import libraries
import os
import numpy as np
from pathlib import Path
from typing import Dict, List
# import custom code
from src.config import DKN_KGE_METHOD, DKN_KGE_ENTITY_EMBEDDING_DIM
from src.config import FILENAME_ENTITY2INDEX, FILENAME_ENTITY2ID, FILENAME_TRIPLE2ID
from src.config import DATA_DIR
from src.util.logger import setup_logging
def read_map(file: Path) -> Dict[int, int]:
entity2index_map = {}
reader = open(file, encoding='utf-8')
for line in reader:
array = line.split('\t')
if len(array) != 2: # to skip the first line in entity2id.txt
continue
entity_id = array[0]
index = int(array[1])
entity2index_map[entity_id] = index
reader.close()
return entity2index_map
def get_neighbors_for_entity(file: Path) -> Dict[int, List[int]]:
reader = open(file, encoding='utf-8')
entity2neighbor_map = {}
for line in reader:
array = line.strip().split('\t')
if len(array) != 3: # to skip the first line in triple2id.txt
continue
head = int(array[0])
tail = int(array[1])
if head in entity2neighbor_map:
entity2neighbor_map[head].append(tail)
else:
entity2neighbor_map[head] = [tail]
if tail in entity2neighbor_map:
entity2neighbor_map[tail].append(head)
else:
entity2neighbor_map[tail] = [head]
reader.close()
return entity2neighbor_map
if __name__ == '__main__':
logger = setup_logging(name=__file__, log_level='info')
# entity2index.txt (generated by news_preprocess.py) contains all entities appear in the dataset
# entity2id.txt (generated by prepare_data_for_transx.py) contains all entities in the crawled knowledge graph
entity2index = read_map(FILENAME_ENTITY2INDEX)
full_entity2index = read_map(FILENAME_ENTITY2ID)
entity2neighbor = get_neighbors_for_entity(FILENAME_TRIPLE2ID)
full_embeddings = np.loadtxt(os.path.join(DATA_DIR, DKN_KGE_METHOD + '_entity2vec_' + str(DKN_KGE_ENTITY_EMBEDDING_DIM) + '.vec'))
entity_embeddings = np.zeros([len(entity2index) + 1, DKN_KGE_ENTITY_EMBEDDING_DIM])
context_embeddings = np.zeros([len(entity2index) + 1, DKN_KGE_ENTITY_EMBEDDING_DIM])
logger.info('writing entity embeddings...')
for entity, index in entity2index.items():
if entity in full_entity2index:
full_index = full_entity2index[entity]
entity_embeddings[index] = full_embeddings[full_index]
if full_index in entity2neighbor:
context_full_indices = entity2neighbor[full_index]
context_embeddings[index] = np.average(full_embeddings[context_full_indices], axis=0)
np.save(os.path.join(DATA_DIR, 'entity_embeddings_' + DKN_KGE_METHOD + '_' + str(DKN_KGE_ENTITY_EMBEDDING_DIM)), entity_embeddings)
np.save(os.path.join(DATA_DIR, 'context_embeddings_' + DKN_KGE_METHOD + '_' + str(DKN_KGE_ENTITY_EMBEDDING_DIM)), context_embeddings)
| 40.5 | 137 | 0.703704 |
import os
import numpy as np
from pathlib import Path
from typing import Dict, List
from src.config import DKN_KGE_METHOD, DKN_KGE_ENTITY_EMBEDDING_DIM
from src.config import FILENAME_ENTITY2INDEX, FILENAME_ENTITY2ID, FILENAME_TRIPLE2ID
from src.config import DATA_DIR
from src.util.logger import setup_logging
def read_map(file: Path) -> Dict[int, int]:
entity2index_map = {}
reader = open(file, encoding='utf-8')
for line in reader:
array = line.split('\t')
if len(array) != 2:
continue
entity_id = array[0]
index = int(array[1])
entity2index_map[entity_id] = index
reader.close()
return entity2index_map
def get_neighbors_for_entity(file: Path) -> Dict[int, List[int]]:
reader = open(file, encoding='utf-8')
entity2neighbor_map = {}
for line in reader:
array = line.strip().split('\t')
if len(array) != 3:
continue
head = int(array[0])
tail = int(array[1])
if head in entity2neighbor_map:
entity2neighbor_map[head].append(tail)
else:
entity2neighbor_map[head] = [tail]
if tail in entity2neighbor_map:
entity2neighbor_map[tail].append(head)
else:
entity2neighbor_map[tail] = [head]
reader.close()
return entity2neighbor_map
if __name__ == '__main__':
logger = setup_logging(name=__file__, log_level='info')
entity2index = read_map(FILENAME_ENTITY2INDEX)
full_entity2index = read_map(FILENAME_ENTITY2ID)
entity2neighbor = get_neighbors_for_entity(FILENAME_TRIPLE2ID)
full_embeddings = np.loadtxt(os.path.join(DATA_DIR, DKN_KGE_METHOD + '_entity2vec_' + str(DKN_KGE_ENTITY_EMBEDDING_DIM) + '.vec'))
entity_embeddings = np.zeros([len(entity2index) + 1, DKN_KGE_ENTITY_EMBEDDING_DIM])
context_embeddings = np.zeros([len(entity2index) + 1, DKN_KGE_ENTITY_EMBEDDING_DIM])
logger.info('writing entity embeddings...')
for entity, index in entity2index.items():
if entity in full_entity2index:
full_index = full_entity2index[entity]
entity_embeddings[index] = full_embeddings[full_index]
if full_index in entity2neighbor:
context_full_indices = entity2neighbor[full_index]
context_embeddings[index] = np.average(full_embeddings[context_full_indices], axis=0)
np.save(os.path.join(DATA_DIR, 'entity_embeddings_' + DKN_KGE_METHOD + '_' + str(DKN_KGE_ENTITY_EMBEDDING_DIM)), entity_embeddings)
np.save(os.path.join(DATA_DIR, 'context_embeddings_' + DKN_KGE_METHOD + '_' + str(DKN_KGE_ENTITY_EMBEDDING_DIM)), context_embeddings)
| true | true |
1c47f02bbbb62967084bcd65ed4058613766c005 | 179 | py | Python | frappe/core/doctype/user_type/test_user_type.py | ssuda777/frappe | d3f3df2ce15154aecc1d9d6d07d947e72c2e8c6e | [
"MIT"
] | 1 | 2021-12-18T18:37:29.000Z | 2021-12-18T18:37:29.000Z | frappe/core/doctype/user_type/test_user_type.py | JMBodz/frappe | eb218a06d1cbfc3a8f1cc00ba8dac2c927d2f71d | [
"MIT"
] | 3 | 2021-02-27T11:50:14.000Z | 2021-05-03T06:48:49.000Z | frappe/core/doctype/user_type/test_user_type.py | JMBodz/frappe | eb218a06d1cbfc3a8f1cc00ba8dac2c927d2f71d | [
"MIT"
] | 2 | 2021-09-02T09:51:55.000Z | 2021-09-07T04:55:42.000Z | # -*- coding: utf-8 -*-
# Copyright (c) 2021, Frappe Technologies and Contributors
# See license.txt
# import frappe
import unittest
class TestUserType(unittest.TestCase):
pass
| 19.888889 | 58 | 0.743017 |
import unittest
class TestUserType(unittest.TestCase):
pass
| true | true |
1c47f12210994a95c7538f1240f7559389bdc3de | 1,707 | py | Python | watcher.py | yijianduanlang/n95-py | 6efefe9497ed95a75c7712323a85daec7eb7f3aa | [
"MIT"
] | 357 | 2020-02-08T07:07:18.000Z | 2022-03-26T02:44:53.000Z | watcher.py | tavernier/N95-watcher | 6efefe9497ed95a75c7712323a85daec7eb7f3aa | [
"MIT"
] | 14 | 2020-02-10T08:12:58.000Z | 2022-03-12T00:15:19.000Z | watcher.py | tavernier/N95-watcher | 6efefe9497ed95a75c7712323a85daec7eb7f3aa | [
"MIT"
] | 107 | 2020-02-08T13:26:27.000Z | 2022-03-05T04:38:26.000Z | import sys
import os, time, json
lib_path = os.path.join(os.path.dirname(__file__))[:-3]
sys.path.append(lib_path)
from selenium import webdriver
from log.logger import logger as log
from PIL import Image
browser = None
def check_shop(url, keywords):
global browser
browser.get(url)
time.sleep(5)
find_flag = False
for keyword in keywords:
if keyword in browser.page_source:
find_flag = keyword
break
if not find_flag and '出错啦' not in browser.title:
log.warning("FIND!!!")
log.warning(url)
log.warning(keywords)
# "发现口罩有货!!",
fo = open("../data.txt", "r")
lines = fo.readlines()
fo.close()
fo = open("../data.txt", "w")
str_time = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime())
lines.append(str_time+" "+browser.title+" url:"+url+"\n")
fo.writelines(lines)
fo.close()
print("发现口罩有货!!"+url)
browser.save_screenshot("imgs/" + str_time + ".png")
time.sleep(5)
def check_all_shops():
with open(os.path.join(os.path.dirname(__file__),"config","shop.json"), "r", encoding='UTF-8') as f:
infos = json.loads(f.read())
for info in infos:
for shop in info["shop"]:
log.info("checking {} / {}".format(shop, info.get("keyword")))
keywords = info.get("key_word").split(",")
check_shop(shop, keywords)
# 加载 config/shop.json 中的商品,并检查有货状态,如果有货保存在 data.txt 中
if __name__ == "__main__":
browser = webdriver.Chrome(os.path.join(os.path.dirname(__file__),"src", "chromedriver"))
while True:
check_all_shops()
# browser.quit() | 28.932203 | 104 | 0.589338 | import sys
import os, time, json
lib_path = os.path.join(os.path.dirname(__file__))[:-3]
sys.path.append(lib_path)
from selenium import webdriver
from log.logger import logger as log
from PIL import Image
browser = None
def check_shop(url, keywords):
global browser
browser.get(url)
time.sleep(5)
find_flag = False
for keyword in keywords:
if keyword in browser.page_source:
find_flag = keyword
break
if not find_flag and '出错啦' not in browser.title:
log.warning("FIND!!!")
log.warning(url)
log.warning(keywords)
fo = open("../data.txt", "r")
lines = fo.readlines()
fo.close()
fo = open("../data.txt", "w")
str_time = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime())
lines.append(str_time+" "+browser.title+" url:"+url+"\n")
fo.writelines(lines)
fo.close()
print("发现口罩有货!!"+url)
browser.save_screenshot("imgs/" + str_time + ".png")
time.sleep(5)
def check_all_shops():
with open(os.path.join(os.path.dirname(__file__),"config","shop.json"), "r", encoding='UTF-8') as f:
infos = json.loads(f.read())
for info in infos:
for shop in info["shop"]:
log.info("checking {} / {}".format(shop, info.get("keyword")))
keywords = info.get("key_word").split(",")
check_shop(shop, keywords)
if __name__ == "__main__":
browser = webdriver.Chrome(os.path.join(os.path.dirname(__file__),"src", "chromedriver"))
while True:
check_all_shops()
| true | true |
1c47f140c05031e8eb49b0a4ff76d0583d02aea8 | 326 | py | Python | trial and error method.py | ankanpramanik/Solve-Equation-wby-Trial-and-Error | f0456354ad447988672bf97422ccf3245b094b6f | [
"MIT"
] | null | null | null | trial and error method.py | ankanpramanik/Solve-Equation-wby-Trial-and-Error | f0456354ad447988672bf97422ccf3245b094b6f | [
"MIT"
] | null | null | null | trial and error method.py | ankanpramanik/Solve-Equation-wby-Trial-and-Error | f0456354ad447988672bf97422ccf3245b094b6f | [
"MIT"
] | 1 | 2021-11-21T10:26:13.000Z | 2021-11-21T10:26:13.000Z | N1=int(input("Enter the value of coefficient of n1 "))
N2=int(input("Enter the value of coefficient of n2 "))
N=int(input("Enter the value of constant "))
for n1 in range(101):
for n2 in range(101):
if N1*n1+ N2*n2 == N:
print(n1, n2)
elif N1*n1-N2*n2 ==N:
print (n1,n2)
| 29.636364 | 55 | 0.564417 | N1=int(input("Enter the value of coefficient of n1 "))
N2=int(input("Enter the value of coefficient of n2 "))
N=int(input("Enter the value of constant "))
for n1 in range(101):
for n2 in range(101):
if N1*n1+ N2*n2 == N:
print(n1, n2)
elif N1*n1-N2*n2 ==N:
print (n1,n2)
| true | true |
1c47f1a5f34f4cbaa528ae8ec5cd644aad3b5b3b | 758 | py | Python | Shnu_course_classroom.py | Doreamonsky/Shnu | b052dd21c2dd6c8f51fa83da0a3504eaa16aedcf | [
"Apache-2.0"
] | 10 | 2018-01-18T11:45:55.000Z | 2021-01-26T08:44:16.000Z | Shnu_course_classroom.py | Doreamonsky/Shnu | b052dd21c2dd6c8f51fa83da0a3504eaa16aedcf | [
"Apache-2.0"
] | 1 | 2018-02-18T13:56:19.000Z | 2018-02-18T13:56:19.000Z | Shnu_course_classroom.py | Doreamonsky/Shnu | b052dd21c2dd6c8f51fa83da0a3504eaa16aedcf | [
"Apache-2.0"
] | 2 | 2018-02-07T11:47:36.000Z | 2018-04-05T11:45:58.000Z | #!/usr/bin/python
# -*- coding: UTF-8 -*-
import Shnu_course
from prettytable import PrettyTable
course_helper = Shnu_course.CourseHelper()
classroom_list = {'': []}
for course in course_helper.get_all_page_courses():
for myplace in course.places:
if classroom_list.has_key(myplace.place):
classroom_list[myplace.place].append(course)
else:
classroom_list[myplace.place] = [course]
my_table = PrettyTable(['Class', 'ClassNumber', 'MaxCapacityPrediction'])
for key, value in classroom_list.items():
number = 0
for course in value:
if course.max_number > number:
number = course.max_number
my_table.add_row([key, len(value), number])
print my_table.get_string(sortby='Class')
| 26.137931 | 73 | 0.687335 |
import Shnu_course
from prettytable import PrettyTable
course_helper = Shnu_course.CourseHelper()
classroom_list = {'': []}
for course in course_helper.get_all_page_courses():
for myplace in course.places:
if classroom_list.has_key(myplace.place):
classroom_list[myplace.place].append(course)
else:
classroom_list[myplace.place] = [course]
my_table = PrettyTable(['Class', 'ClassNumber', 'MaxCapacityPrediction'])
for key, value in classroom_list.items():
number = 0
for course in value:
if course.max_number > number:
number = course.max_number
my_table.add_row([key, len(value), number])
print my_table.get_string(sortby='Class')
| false | true |
1c47f24060a389cff1da511978f70c8656a55176 | 1,646 | py | Python | config/urls.py | lalfaro1704/backend_test | b94c8768d50829993f4035d522deb72deedce795 | [
"MIT"
] | null | null | null | config/urls.py | lalfaro1704/backend_test | b94c8768d50829993f4035d522deb72deedce795 | [
"MIT"
] | null | null | null | config/urls.py | lalfaro1704/backend_test | b94c8768d50829993f4035d522deb72deedce795 | [
"MIT"
] | null | null | null | from django.conf import settings
from django.urls import include, path
from django.conf.urls.static import static
from django.contrib import admin
from django.views.generic import TemplateView
from django.views import defaults as default_views
urlpatterns = [
path("", TemplateView.as_view(template_name="pages/home.html"), name="home"),
# Django Admin, use {% url 'admin:index' %}
path(settings.ADMIN_URL, admin.site.urls),
# User management
path(
"users/",
include("backend_test.users.urls", namespace="users"),
),
# Your stuff: custom urls includes go here
path(
"menu/",
include(("backend_test.menu.urls", "travel"), namespace="menu"),
),
] + static(
settings.MEDIA_URL, document_root=settings.MEDIA_ROOT
)
if settings.DEBUG:
# This allows the error pages to be debugged during development, just visit
# these url in browser to see how these error pages look like.
urlpatterns += [
path(
"400/",
default_views.bad_request,
kwargs={"exception": Exception("Bad Request!")},
),
path(
"403/",
default_views.permission_denied,
kwargs={"exception": Exception("Permission Denied")},
),
path(
"404/",
default_views.page_not_found,
kwargs={"exception": Exception("Page not Found")},
),
path("500/", default_views.server_error),
]
if "debug_toolbar" in settings.INSTALLED_APPS:
import debug_toolbar
urlpatterns = [path("__debug__/", include(debug_toolbar.urls))] + urlpatterns
| 32.27451 | 85 | 0.63062 | from django.conf import settings
from django.urls import include, path
from django.conf.urls.static import static
from django.contrib import admin
from django.views.generic import TemplateView
from django.views import defaults as default_views
urlpatterns = [
path("", TemplateView.as_view(template_name="pages/home.html"), name="home"),
path(settings.ADMIN_URL, admin.site.urls),
path(
"users/",
include("backend_test.users.urls", namespace="users"),
),
path(
"menu/",
include(("backend_test.menu.urls", "travel"), namespace="menu"),
),
] + static(
settings.MEDIA_URL, document_root=settings.MEDIA_ROOT
)
if settings.DEBUG:
urlpatterns += [
path(
"400/",
default_views.bad_request,
kwargs={"exception": Exception("Bad Request!")},
),
path(
"403/",
default_views.permission_denied,
kwargs={"exception": Exception("Permission Denied")},
),
path(
"404/",
default_views.page_not_found,
kwargs={"exception": Exception("Page not Found")},
),
path("500/", default_views.server_error),
]
if "debug_toolbar" in settings.INSTALLED_APPS:
import debug_toolbar
urlpatterns = [path("__debug__/", include(debug_toolbar.urls))] + urlpatterns
| true | true |
1c47f26627f3856f7841720cb5e66db2363667e0 | 16,289 | py | Python | mongo-python-driver/setup.py | dlminvestments/IBM-Python-VM-1 | 27d06d19d96c170346c03ed4dc66587cbe0bc28f | [
"Apache-2.0"
] | null | null | null | mongo-python-driver/setup.py | dlminvestments/IBM-Python-VM-1 | 27d06d19d96c170346c03ed4dc66587cbe0bc28f | [
"Apache-2.0"
] | 474 | 2020-12-23T22:22:55.000Z | 2022-03-30T18:10:11.000Z | mongo-python-driver/setup.py | dlminvestments/IBM-Python-VM-1 | 27d06d19d96c170346c03ed4dc66587cbe0bc28f | [
"Apache-2.0"
] | 1 | 2020-12-23T23:56:24.000Z | 2020-12-23T23:56:24.000Z | import os
import platform
import re
import sys
import warnings
if sys.version_info[:2] < (2, 7):
raise RuntimeError("Python version >= 2.7 required.")
# Hack to silence atexit traceback in some Python versions
try:
import multiprocessing
except ImportError:
pass
# Don't force people to install setuptools unless
# we have to.
try:
from setuptools import setup, __version__ as _setuptools_version
except ImportError:
from ez_setup import use_setuptools
use_setuptools()
from setuptools import setup, __version__ as _setuptools_version
from distutils.cmd import Command
from distutils.command.build_ext import build_ext
from distutils.errors import CCompilerError, DistutilsOptionError
from distutils.errors import DistutilsPlatformError, DistutilsExecError
from distutils.core import Extension
_HAVE_SPHINX = True
try:
from sphinx.cmd import build as sphinx
except ImportError:
try:
import sphinx
except ImportError:
_HAVE_SPHINX = False
version = "4.0.dev0"
f = open("README.rst")
try:
try:
readme_content = f.read()
except:
readme_content = ""
finally:
f.close()
# PYTHON-654 - Clang doesn't support -mno-fused-madd but the pythons Apple
# ships are built with it. This is a problem starting with Xcode 5.1
# since clang 3.4 errors out when it encounters unrecognized compiler
# flags. This hack removes -mno-fused-madd from the CFLAGS automatically
# generated by distutils for Apple provided pythons, allowing C extension
# builds to complete without error. The inspiration comes from older
# versions of distutils.sysconfig.get_config_vars.
if sys.platform == 'darwin' and 'clang' in platform.python_compiler().lower():
from distutils.sysconfig import get_config_vars
res = get_config_vars()
for key in ('CFLAGS', 'PY_CFLAGS'):
if key in res:
flags = res[key]
flags = re.sub('-mno-fused-madd', '', flags)
res[key] = flags
class test(Command):
description = "run the tests"
user_options = [
("test-module=", "m", "Discover tests in specified module"),
("test-suite=", "s",
"Test suite to run (e.g. 'some_module.test_suite')"),
("failfast", "f", "Stop running tests on first failure or error"),
("xunit-output=", "x",
"Generate a results directory with XUnit XML format")
]
def initialize_options(self):
self.test_module = None
self.test_suite = None
self.failfast = False
self.xunit_output = None
def finalize_options(self):
if self.test_suite is None and self.test_module is None:
self.test_module = 'test'
elif self.test_module is not None and self.test_suite is not None:
raise DistutilsOptionError(
"You may specify a module or suite, but not both"
)
def run(self):
# Installing required packages, running egg_info and build_ext are
# part of normal operation for setuptools.command.test.test
if self.distribution.install_requires:
self.distribution.fetch_build_eggs(
self.distribution.install_requires)
if self.distribution.tests_require:
self.distribution.fetch_build_eggs(self.distribution.tests_require)
if self.xunit_output:
self.distribution.fetch_build_eggs(["unittest-xml-reporting"])
self.run_command('egg_info')
build_ext_cmd = self.reinitialize_command('build_ext')
build_ext_cmd.inplace = 1
self.run_command('build_ext')
# Construct a TextTestRunner directly from the unittest imported from
# test, which creates a TestResult that supports the 'addSkip' method.
# setuptools will by default create a TextTestRunner that uses the old
# TestResult class.
from test import unittest, PymongoTestRunner, test_cases
if self.test_suite is None:
all_tests = unittest.defaultTestLoader.discover(self.test_module)
suite = unittest.TestSuite()
suite.addTests(sorted(test_cases(all_tests),
key=lambda x: x.__module__))
else:
suite = unittest.defaultTestLoader.loadTestsFromName(
self.test_suite)
if self.xunit_output:
from test import PymongoXMLTestRunner
runner = PymongoXMLTestRunner(verbosity=2, failfast=self.failfast,
output=self.xunit_output)
else:
runner = PymongoTestRunner(verbosity=2, failfast=self.failfast)
result = runner.run(suite)
sys.exit(not result.wasSuccessful())
class doc(Command):
description = "generate or test documentation"
user_options = [("test", "t",
"run doctests instead of generating documentation")]
boolean_options = ["test"]
def initialize_options(self):
self.test = False
def finalize_options(self):
pass
def run(self):
if not _HAVE_SPHINX:
raise RuntimeError(
"You must install Sphinx to build or test the documentation.")
if sys.version_info[0] >= 3:
import doctest
from doctest import OutputChecker as _OutputChecker
# Match u or U (possibly followed by r or R), removing it.
# r/R can follow u/U but not precede it. Don't match the
# single character string 'u' or 'U'.
_u_literal_re = re.compile(
r"(\W|^)(?<![\'\"])[uU]([rR]?[\'\"])", re.UNICODE)
# Match b or B (possibly followed by r or R), removing.
# r/R can follow b/B but not precede it. Don't match the
# single character string 'b' or 'B'.
_b_literal_re = re.compile(
r"(\W|^)(?<![\'\"])[bB]([rR]?[\'\"])", re.UNICODE)
class _StringPrefixFixer(_OutputChecker):
def check_output(self, want, got, optionflags):
# The docstrings are written with python 2.x in mind.
# To make the doctests pass in python 3 we have to
# strip the 'u' prefix from the expected results. The
# actual results won't have that prefix.
want = re.sub(_u_literal_re, r'\1\2', want)
# We also have to strip the 'b' prefix from the actual
# results since python 2.x expected results won't have
# that prefix.
got = re.sub(_b_literal_re, r'\1\2', got)
return super(
_StringPrefixFixer, self).check_output(
want, got, optionflags)
def output_difference(self, example, got, optionflags):
example.want = re.sub(_u_literal_re, r'\1\2', example.want)
got = re.sub(_b_literal_re, r'\1\2', got)
return super(
_StringPrefixFixer, self).output_difference(
example, got, optionflags)
doctest.OutputChecker = _StringPrefixFixer
if self.test:
path = os.path.join(
os.path.abspath('.'), "doc", "_build", "doctest")
mode = "doctest"
else:
path = os.path.join(
os.path.abspath('.'), "doc", "_build", version)
mode = "html"
try:
os.makedirs(path)
except:
pass
sphinx_args = ["-E", "-b", mode, "doc", path]
# sphinx.main calls sys.exit when sphinx.build_main exists.
# Call build_main directly so we can check status and print
# the full path to the built docs.
if hasattr(sphinx, 'build_main'):
status = sphinx.build_main(sphinx_args)
else:
status = sphinx.main(sphinx_args)
if status:
raise RuntimeError("documentation step '%s' failed" % (mode,))
sys.stdout.write("\nDocumentation step '%s' performed, results here:\n"
" %s/\n" % (mode, path))
if sys.platform == 'win32':
# distutils.msvc9compiler can raise an IOError when failing to
# find the compiler
build_errors = (CCompilerError, DistutilsExecError,
DistutilsPlatformError, IOError)
else:
build_errors = (CCompilerError, DistutilsExecError, DistutilsPlatformError)
class custom_build_ext(build_ext):
"""Allow C extension building to fail.
The C extension speeds up BSON encoding, but is not essential.
"""
warning_message = """
********************************************************************
WARNING: %s could not
be compiled. No C extensions are essential for PyMongo to run,
although they do result in significant speed improvements.
%s
Please see the installation docs for solutions to build issues:
https://pymongo.readthedocs.io/en/stable/installation.html
Here are some hints for popular operating systems:
If you are seeing this message on Linux you probably need to
install GCC and/or the Python development package for your
version of Python.
Debian and Ubuntu users should issue the following command:
$ sudo apt-get install build-essential python-dev
Users of Red Hat based distributions (RHEL, CentOS, Amazon Linux,
Oracle Linux, Fedora, etc.) should issue the following command:
$ sudo yum install gcc python-devel
If you are seeing this message on Microsoft Windows please install
PyMongo using pip. Modern versions of pip will install PyMongo
from binary wheels available on pypi. If you must install from
source read the documentation here:
https://pymongo.readthedocs.io/en/stable/installation.html#installing-from-source-on-windows
If you are seeing this message on macOS / OSX please install PyMongo
using pip. Modern versions of pip will install PyMongo from binary
wheels available on pypi. If wheels are not available for your version
of macOS / OSX, or you must install from source read the documentation
here:
https://pymongo.readthedocs.io/en/stable/installation.html#osx
********************************************************************
"""
def run(self):
try:
build_ext.run(self)
except DistutilsPlatformError:
e = sys.exc_info()[1]
sys.stdout.write('%s\n' % str(e))
warnings.warn(self.warning_message % ("Extension modules",
"There was an issue with "
"your platform configuration"
" - see above."))
def build_extension(self, ext):
name = ext.name
try:
build_ext.build_extension(self, ext)
except build_errors:
e = sys.exc_info()[1]
sys.stdout.write('%s\n' % str(e))
warnings.warn(self.warning_message % ("The %s extension "
"module" % (name,),
"The output above "
"this warning shows how "
"the compilation "
"failed."))
ext_modules = [Extension('bson._cbson',
include_dirs=['bson'],
sources=['bson/_cbsonmodule.c',
'bson/time64.c',
'bson/buffer.c',
'bson/encoding_helpers.c']),
Extension('pymongo._cmessage',
include_dirs=['bson'],
sources=['pymongo/_cmessagemodule.c',
'bson/buffer.c'])]
# PyOpenSSL 17.0.0 introduced support for OCSP. 17.1.0 introduced
# a related feature we need. 17.2.0 fixes a bug
# in set_default_verify_paths we should really avoid.
# service_identity 18.1.0 introduced support for IP addr matching.
pyopenssl_reqs = ["pyopenssl>=17.2.0", "requests<3.0.0", "service_identity>=18.1.0"]
extras_require = {
'encryption': ['pymongocrypt<2.0.0'],
'ocsp': pyopenssl_reqs,
'snappy': ['python-snappy'],
'tls': [],
'zstd': ['zstandard'],
'aws': ['pymongo-auth-aws<2.0.0'],
}
# https://jira.mongodb.org/browse/PYTHON-2117
# Environment marker support didn't settle down until version 20.10
# https://setuptools.readthedocs.io/en/latest/history.html#v20-10-0
_use_env_markers = tuple(map(int, _setuptools_version.split('.')[:2])) > (20, 9)
# TLS and DNS extras
# We install PyOpenSSL and service_identity for Python < 2.7.9 to
# get support for SNI, which is required to connection to Altas
# free and shared tier.
if sys.version_info[0] == 2:
if _use_env_markers:
# For building wheels on Python versions >= 2.7.9
for req in pyopenssl_reqs:
extras_require['tls'].append(
"%s ; python_full_version < '2.7.9'" % (req,))
if sys.platform == 'win32':
extras_require['tls'].append(
"wincertstore>=0.2 ; python_full_version < '2.7.9'")
else:
extras_require['tls'].append(
"certifi ; python_full_version < '2.7.9'")
elif sys.version_info < (2, 7, 9):
# For installing from source or egg files on Python versions
# older than 2.7.9, or systems that have setuptools versions
# older than 20.10.
extras_require['tls'].extend(pyopenssl_reqs)
if sys.platform == 'win32':
extras_require['tls'].append("wincertstore>=0.2")
else:
extras_require['tls'].append("certifi")
extras_require.update({'srv': ["dnspython>=1.16.0,<1.17.0"]})
extras_require.update({'tls': ["ipaddress"]})
else:
extras_require.update({'srv': ["dnspython>=1.16.0,<2.0.0"]})
# GSSAPI extras
if sys.platform == 'win32':
extras_require['gssapi'] = ["winkerberos>=0.5.0"]
else:
extras_require['gssapi'] = ["pykerberos"]
extra_opts = {
"packages": ["bson", "pymongo", "gridfs"]
}
if "--no_ext" in sys.argv:
sys.argv.remove("--no_ext")
elif (sys.platform.startswith("java") or
sys.platform == "cli" or
"PyPy" in sys.version):
sys.stdout.write("""
*****************************************************\n
The optional C extensions are currently not supported\n
by this python implementation.\n
*****************************************************\n
""")
else:
extra_opts['ext_modules'] = ext_modules
setup(
name="pymongo",
version=version,
description="Python driver for MongoDB <http://www.mongodb.org>",
long_description=readme_content,
author="Mike Dirolf",
author_email="mongodb-user@googlegroups.com",
maintainer="Bernie Hackett",
maintainer_email="bernie@mongodb.com",
url="http://github.com/mongodb/mongo-python-driver",
keywords=["mongo", "mongodb", "pymongo", "gridfs", "bson"],
install_requires=[],
license="Apache License, Version 2.0",
python_requires=">=2.7,!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*",
classifiers=[
"Development Status :: 5 - Production/Stable",
"Intended Audience :: Developers",
"License :: OSI Approved :: Apache Software License",
"Operating System :: MacOS :: MacOS X",
"Operating System :: Microsoft :: Windows",
"Operating System :: POSIX",
"Programming Language :: Python :: 2",
"Programming Language :: Python :: 2.7",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.4",
"Programming Language :: Python :: 3.5",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3.8",
"Programming Language :: Python :: 3.9",
"Programming Language :: Python :: Implementation :: CPython",
"Programming Language :: Python :: Implementation :: PyPy",
"Topic :: Database"],
cmdclass={"build_ext": custom_build_ext,
"doc": doc,
"test": test},
extras_require=extras_require,
**extra_opts
)
| 38.783333 | 92 | 0.599362 | import os
import platform
import re
import sys
import warnings
if sys.version_info[:2] < (2, 7):
raise RuntimeError("Python version >= 2.7 required.")
try:
import multiprocessing
except ImportError:
pass
# we have to.
try:
from setuptools import setup, __version__ as _setuptools_version
except ImportError:
from ez_setup import use_setuptools
use_setuptools()
from setuptools import setup, __version__ as _setuptools_version
from distutils.cmd import Command
from distutils.command.build_ext import build_ext
from distutils.errors import CCompilerError, DistutilsOptionError
from distutils.errors import DistutilsPlatformError, DistutilsExecError
from distutils.core import Extension
_HAVE_SPHINX = True
try:
from sphinx.cmd import build as sphinx
except ImportError:
try:
import sphinx
except ImportError:
_HAVE_SPHINX = False
version = "4.0.dev0"
f = open("README.rst")
try:
try:
readme_content = f.read()
except:
readme_content = ""
finally:
f.close()
# PYTHON-654 - Clang doesn't support -mno-fused-madd but the pythons Apple
if sys.platform == 'darwin' and 'clang' in platform.python_compiler().lower():
from distutils.sysconfig import get_config_vars
res = get_config_vars()
for key in ('CFLAGS', 'PY_CFLAGS'):
if key in res:
flags = res[key]
flags = re.sub('-mno-fused-madd', '', flags)
res[key] = flags
class test(Command):
description = "run the tests"
user_options = [
("test-module=", "m", "Discover tests in specified module"),
("test-suite=", "s",
"Test suite to run (e.g. 'some_module.test_suite')"),
("failfast", "f", "Stop running tests on first failure or error"),
("xunit-output=", "x",
"Generate a results directory with XUnit XML format")
]
def initialize_options(self):
self.test_module = None
self.test_suite = None
self.failfast = False
self.xunit_output = None
def finalize_options(self):
if self.test_suite is None and self.test_module is None:
self.test_module = 'test'
elif self.test_module is not None and self.test_suite is not None:
raise DistutilsOptionError(
"You may specify a module or suite, but not both"
)
def run(self):
if self.distribution.install_requires:
self.distribution.fetch_build_eggs(
self.distribution.install_requires)
if self.distribution.tests_require:
self.distribution.fetch_build_eggs(self.distribution.tests_require)
if self.xunit_output:
self.distribution.fetch_build_eggs(["unittest-xml-reporting"])
self.run_command('egg_info')
build_ext_cmd = self.reinitialize_command('build_ext')
build_ext_cmd.inplace = 1
self.run_command('build_ext')
from test import unittest, PymongoTestRunner, test_cases
if self.test_suite is None:
all_tests = unittest.defaultTestLoader.discover(self.test_module)
suite = unittest.TestSuite()
suite.addTests(sorted(test_cases(all_tests),
key=lambda x: x.__module__))
else:
suite = unittest.defaultTestLoader.loadTestsFromName(
self.test_suite)
if self.xunit_output:
from test import PymongoXMLTestRunner
runner = PymongoXMLTestRunner(verbosity=2, failfast=self.failfast,
output=self.xunit_output)
else:
runner = PymongoTestRunner(verbosity=2, failfast=self.failfast)
result = runner.run(suite)
sys.exit(not result.wasSuccessful())
class doc(Command):
description = "generate or test documentation"
user_options = [("test", "t",
"run doctests instead of generating documentation")]
boolean_options = ["test"]
def initialize_options(self):
self.test = False
def finalize_options(self):
pass
def run(self):
if not _HAVE_SPHINX:
raise RuntimeError(
"You must install Sphinx to build or test the documentation.")
if sys.version_info[0] >= 3:
import doctest
from doctest import OutputChecker as _OutputChecker
# single character string 'u' or 'U'.
_u_literal_re = re.compile(
r"(\W|^)(?<![\'\"])[uU]([rR]?[\'\"])", re.UNICODE)
# Match b or B (possibly followed by r or R), removing.
# r/R can follow b/B but not precede it. Don't match the
_b_literal_re = re.compile(
r"(\W|^)(?<![\'\"])[bB]([rR]?[\'\"])", re.UNICODE)
class _StringPrefixFixer(_OutputChecker):
def check_output(self, want, got, optionflags):
want = re.sub(_u_literal_re, r'\1\2', want)
# We also have to strip the 'b' prefix from the actual
# results since python 2.x expected results won't have
got = re.sub(_b_literal_re, r'\1\2', got)
return super(
_StringPrefixFixer, self).check_output(
want, got, optionflags)
def output_difference(self, example, got, optionflags):
example.want = re.sub(_u_literal_re, r'\1\2', example.want)
got = re.sub(_b_literal_re, r'\1\2', got)
return super(
_StringPrefixFixer, self).output_difference(
example, got, optionflags)
doctest.OutputChecker = _StringPrefixFixer
if self.test:
path = os.path.join(
os.path.abspath('.'), "doc", "_build", "doctest")
mode = "doctest"
else:
path = os.path.join(
os.path.abspath('.'), "doc", "_build", version)
mode = "html"
try:
os.makedirs(path)
except:
pass
sphinx_args = ["-E", "-b", mode, "doc", path]
if hasattr(sphinx, 'build_main'):
status = sphinx.build_main(sphinx_args)
else:
status = sphinx.main(sphinx_args)
if status:
raise RuntimeError("documentation step '%s' failed" % (mode,))
sys.stdout.write("\nDocumentation step '%s' performed, results here:\n"
" %s/\n" % (mode, path))
if sys.platform == 'win32':
build_errors = (CCompilerError, DistutilsExecError,
DistutilsPlatformError, IOError)
else:
build_errors = (CCompilerError, DistutilsExecError, DistutilsPlatformError)
class custom_build_ext(build_ext):
warning_message = """
********************************************************************
WARNING: %s could not
be compiled. No C extensions are essential for PyMongo to run,
although they do result in significant speed improvements.
%s
Please see the installation docs for solutions to build issues:
https://pymongo.readthedocs.io/en/stable/installation.html
Here are some hints for popular operating systems:
If you are seeing this message on Linux you probably need to
install GCC and/or the Python development package for your
version of Python.
Debian and Ubuntu users should issue the following command:
$ sudo apt-get install build-essential python-dev
Users of Red Hat based distributions (RHEL, CentOS, Amazon Linux,
Oracle Linux, Fedora, etc.) should issue the following command:
$ sudo yum install gcc python-devel
If you are seeing this message on Microsoft Windows please install
PyMongo using pip. Modern versions of pip will install PyMongo
from binary wheels available on pypi. If you must install from
source read the documentation here:
https://pymongo.readthedocs.io/en/stable/installation.html#installing-from-source-on-windows
If you are seeing this message on macOS / OSX please install PyMongo
using pip. Modern versions of pip will install PyMongo from binary
wheels available on pypi. If wheels are not available for your version
of macOS / OSX, or you must install from source read the documentation
here:
https://pymongo.readthedocs.io/en/stable/installation.html#osx
********************************************************************
"""
def run(self):
try:
build_ext.run(self)
except DistutilsPlatformError:
e = sys.exc_info()[1]
sys.stdout.write('%s\n' % str(e))
warnings.warn(self.warning_message % ("Extension modules",
"There was an issue with "
"your platform configuration"
" - see above."))
def build_extension(self, ext):
name = ext.name
try:
build_ext.build_extension(self, ext)
except build_errors:
e = sys.exc_info()[1]
sys.stdout.write('%s\n' % str(e))
warnings.warn(self.warning_message % ("The %s extension "
"module" % (name,),
"The output above "
"this warning shows how "
"the compilation "
"failed."))
ext_modules = [Extension('bson._cbson',
include_dirs=['bson'],
sources=['bson/_cbsonmodule.c',
'bson/time64.c',
'bson/buffer.c',
'bson/encoding_helpers.c']),
Extension('pymongo._cmessage',
include_dirs=['bson'],
sources=['pymongo/_cmessagemodule.c',
'bson/buffer.c'])]
pyopenssl_reqs = ["pyopenssl>=17.2.0", "requests<3.0.0", "service_identity>=18.1.0"]
extras_require = {
'encryption': ['pymongocrypt<2.0.0'],
'ocsp': pyopenssl_reqs,
'snappy': ['python-snappy'],
'tls': [],
'zstd': ['zstandard'],
'aws': ['pymongo-auth-aws<2.0.0'],
}
# https://setuptools.readthedocs.io/en/latest/history.html#v20-10-0
_use_env_markers = tuple(map(int, _setuptools_version.split('.')[:2])) > (20, 9)
# TLS and DNS extras
# We install PyOpenSSL and service_identity for Python < 2.7.9 to
# get support for SNI, which is required to connection to Altas
# free and shared tier.
if sys.version_info[0] == 2:
if _use_env_markers:
# For building wheels on Python versions >= 2.7.9
for req in pyopenssl_reqs:
extras_require['tls'].append(
"%s ; python_full_version < '2.7.9'" % (req,))
if sys.platform == 'win32':
extras_require['tls'].append(
"wincertstore>=0.2 ; python_full_version < '2.7.9'")
else:
extras_require['tls'].append(
"certifi ; python_full_version < '2.7.9'")
elif sys.version_info < (2, 7, 9):
# For installing from source or egg files on Python versions
# older than 2.7.9, or systems that have setuptools versions
# older than 20.10.
extras_require['tls'].extend(pyopenssl_reqs)
if sys.platform == 'win32':
extras_require['tls'].append("wincertstore>=0.2")
else:
extras_require['tls'].append("certifi")
extras_require.update({'srv': ["dnspython>=1.16.0,<1.17.0"]})
extras_require.update({'tls': ["ipaddress"]})
else:
extras_require.update({'srv': ["dnspython>=1.16.0,<2.0.0"]})
# GSSAPI extras
if sys.platform == 'win32':
extras_require['gssapi'] = ["winkerberos>=0.5.0"]
else:
extras_require['gssapi'] = ["pykerberos"]
extra_opts = {
"packages": ["bson", "pymongo", "gridfs"]
}
if "--no_ext" in sys.argv:
sys.argv.remove("--no_ext")
elif (sys.platform.startswith("java") or
sys.platform == "cli" or
"PyPy" in sys.version):
sys.stdout.write("""
*****************************************************\n
The optional C extensions are currently not supported\n
by this python implementation.\n
*****************************************************\n
""")
else:
extra_opts['ext_modules'] = ext_modules
setup(
name="pymongo",
version=version,
description="Python driver for MongoDB <http://www.mongodb.org>",
long_description=readme_content,
author="Mike Dirolf",
author_email="mongodb-user@googlegroups.com",
maintainer="Bernie Hackett",
maintainer_email="bernie@mongodb.com",
url="http://github.com/mongodb/mongo-python-driver",
keywords=["mongo", "mongodb", "pymongo", "gridfs", "bson"],
install_requires=[],
license="Apache License, Version 2.0",
python_requires=">=2.7,!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*",
classifiers=[
"Development Status :: 5 - Production/Stable",
"Intended Audience :: Developers",
"License :: OSI Approved :: Apache Software License",
"Operating System :: MacOS :: MacOS X",
"Operating System :: Microsoft :: Windows",
"Operating System :: POSIX",
"Programming Language :: Python :: 2",
"Programming Language :: Python :: 2.7",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.4",
"Programming Language :: Python :: 3.5",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3.8",
"Programming Language :: Python :: 3.9",
"Programming Language :: Python :: Implementation :: CPython",
"Programming Language :: Python :: Implementation :: PyPy",
"Topic :: Database"],
cmdclass={"build_ext": custom_build_ext,
"doc": doc,
"test": test},
extras_require=extras_require,
**extra_opts
)
| true | true |
1c47f334f6f5c914d25a539e1d094e9034d15110 | 7,149 | py | Python | modeling/dynamics/bullet/bdbody.py | takuya-ki/wrs | f6e1009b94332504042fbde9b39323410394ecde | [
"MIT"
] | 23 | 2021-04-02T09:02:04.000Z | 2022-03-22T05:31:03.000Z | modeling/dynamics/bullet/bdbody.py | takuya-ki/wrs | f6e1009b94332504042fbde9b39323410394ecde | [
"MIT"
] | 35 | 2021-04-12T09:41:05.000Z | 2022-03-26T13:32:46.000Z | modeling/dynamics/bullet/bdbody.py | takuya-ki/wrs | f6e1009b94332504042fbde9b39323410394ecde | [
"MIT"
] | 16 | 2021-03-30T11:55:45.000Z | 2022-03-30T07:10:59.000Z | from panda3d.bullet import BulletRigidBodyNode
from panda3d.bullet import BulletTriangleMesh
from panda3d.bullet import BulletTriangleMeshShape
from panda3d.bullet import BulletConvexHullShape, BulletBoxShape
from panda3d.core import TransformState, Vec3, GeomVertexRewriter, CollisionBox, Point3
import copy
import modeling.geometric_model as gm
import basis.data_adapter as dh
import basis.robot_math as rm
import numpy as np
class BDBody(BulletRigidBodyNode):
def __init__(self,
initor,
cdtype="triangles",
mass=.3,
restitution=0,
allow_deactivation=False,
allow_ccd=True,
friction=.2,
dynamic=True,
name="rbd"):
"""
TODO: triangles do not seem to work (very slow) in the github version (20210418)
Use convex if possible
:param initor: could be itself (copy), or an instance of collision model
:param type: triangle or convex
:param mass:
:param restitution: bounce parameter
:param friction:
:param dynamic: only applicable to triangle type, if an object does not move with force, it is not dynamic
:param name:
author: weiwei
date: 20190626, 20201119
"""
super().__init__(name)
if isinstance(initor, gm.GeometricModel):
if initor._objtrm is None:
raise ValueError("Only applicable to models with a trimesh!")
self.com = initor.objtrm.center_mass * base.physics_scale
self.setMass(mass)
self.setRestitution(restitution)
self.setFriction(friction)
self.setLinearDamping(.3)
self.setAngularDamping(.3)
if allow_deactivation:
self.setDeactivationEnabled(True)
self.setLinearSleepThreshold(.01*base.physics_scale)
self.setAngularSleepThreshold(.01*base.physics_scale)
else:
self.setDeactivationEnabled(False)
if allow_ccd: # continuous collision detection
self.setCcdMotionThreshold(1e-7)
self.setCcdSweptSphereRadius(0.0005*base.physics_scale)
geom_np = initor.objpdnp.getChild(0).find("+GeomNode")
geom = copy.deepcopy(geom_np.node().getGeom(0))
vdata = geom.modifyVertexData()
vertices = copy.deepcopy(np.frombuffer(vdata.modifyArrayHandle(0).getData(), dtype=np.float32))
vertices.shape=(-1,6)
vertices[:, :3]=vertices[:, :3]*base.physics_scale-self.com
vdata.modifyArrayHandle(0).setData(vertices.astype(np.float32).tobytes())
geomtf = geom_np.getTransform()
geomtf = geomtf.setPos(geomtf.getPos()*base.physics_scale)
if cdtype == "triangles":
geombmesh = BulletTriangleMesh()
geombmesh.addGeom(geom)
bulletshape = BulletTriangleMeshShape(geombmesh, dynamic=dynamic)
bulletshape.setMargin(1e-6)
self.addShape(bulletshape, geomtf)
elif cdtype == "convex":
bulletshape = BulletConvexHullShape() # TODO: compute a convex hull?
bulletshape.addGeom(geom, geomtf)
bulletshape.setMargin(1e-6)
self.addShape(bulletshape, geomtf)
elif cdtype == 'box':
minx = min(vertices[:,0])
miny = min(vertices[:,1])
minz = min(vertices[:,2])
maxx = max(vertices[:,0])
maxy = max(vertices[:,1])
maxz = max(vertices[:,2])
pcd_box = CollisionBox(Point3(minx, miny, minz),Point3(maxx, maxy, maxz))
bulletshape = BulletBoxShape.makeFromSolid(pcd_box)
bulletshape.setMargin(1e-6)
self.addShape(bulletshape, geomtf)
else:
raise NotImplementedError
pd_homomat = geomtf.getMat()
pd_com_pos = pd_homomat.xformPoint(Vec3(self.com[0], self.com[1], self.com[2]))
np_homomat = dh.pdmat4_to_npmat4(pd_homomat)
np_com_pos = dh.pdv3_to_npv3(pd_com_pos)
np_homomat[:3, 3] = np_com_pos # update center to com
self.setTransform(TransformState.makeMat(dh.npmat4_to_pdmat4(np_homomat)))
elif isinstance(initor, BDBody):
self.com = initor.com.copy()
self.setMass(initor.getMass())
self.setRestitution(initor.restitution)
self.setFriction(initor.friction)
self.setLinearDamping(.3)
self.setAngularDamping(.3)
if allow_deactivation:
self.setDeactivationEnabled(True)
self.setLinearSleepThreshold(.01*base.physics_scale)
self.setAngularSleepThreshold(.01*base.physics_scale)
else:
self.setDeactivationEnabled(False)
if allow_ccd:
self.setCcdMotionThreshold(1e-7)
self.setCcdSweptSphereRadius(0.0005*base.physics_scale)
np_homomat = copy.deepcopy(initor.get_homomat())
np_homomat[:3,3] = np_homomat[:3,3]*base.physics_scale
self.setTransform(TransformState.makeMat(dh.npmat4_to_pdmat4(np_homomat)))
self.addShape(initor.getShape(0), initor.getShapeTransform(0))
def get_pos(self):
pdmat4 = self.getTransform().getMat()
pdv3 = pdmat4.xformPoint(Vec3(-self.com[0], -self.com[1], -self.com[2]))
pos = dh.pdv3_to_npv3(pdv3)/base.physics_scale
return pos
def set_pos(self, npvec3):
self.setPos(dh.pdv3_to_npv3(npvec3)*base.physics_scale)
def get_homomat(self):
"""
get the homomat considering the original local frame
the dynamic body moves in a local frame defined at com (line 46 of this file), instead of returning the
homomat of the dynamic body, this file returns the pose of original local frame
the returned homomat can be used by collision bodies for rendering.
:return:
author: weiwei
date: 2019?, 20201119
"""
pd_homomat = self.getTransform().getMat()
pd_com_pos = pd_homomat.xformPoint(Vec3(-self.com[0], -self.com[1], -self.com[2]))
np_homomat = dh.pdmat4_to_npmat4(pd_homomat)
np_com_pos = dh.pdv3_to_npv3(pd_com_pos)
np_homomat[:3, 3] = np_com_pos/base.physics_scale
return np_homomat
def set_homomat(self, homomat):
"""
set the pose of the dynamic body
:param homomat: the homomat of the original frame (the collision model)
:return:
author: weiwei
date: 2019?, 20201119
"""
tmp_homomat = copy.deepcopy(homomat)
tmp_homomat[:3, 3] = tmp_homomat[:3,3]*base.physics_scale
pos = rm.homomat_transform_points(tmp_homomat, self.com)
rotmat = tmp_homomat[:3, :3]
self.setTransform(TransformState.makeMat(dh.npv3mat3_to_pdmat4(pos, rotmat)))
def copy(self):
return BDBody(self)
| 44.962264 | 114 | 0.615051 | from panda3d.bullet import BulletRigidBodyNode
from panda3d.bullet import BulletTriangleMesh
from panda3d.bullet import BulletTriangleMeshShape
from panda3d.bullet import BulletConvexHullShape, BulletBoxShape
from panda3d.core import TransformState, Vec3, GeomVertexRewriter, CollisionBox, Point3
import copy
import modeling.geometric_model as gm
import basis.data_adapter as dh
import basis.robot_math as rm
import numpy as np
class BDBody(BulletRigidBodyNode):
def __init__(self,
initor,
cdtype="triangles",
mass=.3,
restitution=0,
allow_deactivation=False,
allow_ccd=True,
friction=.2,
dynamic=True,
name="rbd"):
super().__init__(name)
if isinstance(initor, gm.GeometricModel):
if initor._objtrm is None:
raise ValueError("Only applicable to models with a trimesh!")
self.com = initor.objtrm.center_mass * base.physics_scale
self.setMass(mass)
self.setRestitution(restitution)
self.setFriction(friction)
self.setLinearDamping(.3)
self.setAngularDamping(.3)
if allow_deactivation:
self.setDeactivationEnabled(True)
self.setLinearSleepThreshold(.01*base.physics_scale)
self.setAngularSleepThreshold(.01*base.physics_scale)
else:
self.setDeactivationEnabled(False)
if allow_ccd:
self.setCcdMotionThreshold(1e-7)
self.setCcdSweptSphereRadius(0.0005*base.physics_scale)
geom_np = initor.objpdnp.getChild(0).find("+GeomNode")
geom = copy.deepcopy(geom_np.node().getGeom(0))
vdata = geom.modifyVertexData()
vertices = copy.deepcopy(np.frombuffer(vdata.modifyArrayHandle(0).getData(), dtype=np.float32))
vertices.shape=(-1,6)
vertices[:, :3]=vertices[:, :3]*base.physics_scale-self.com
vdata.modifyArrayHandle(0).setData(vertices.astype(np.float32).tobytes())
geomtf = geom_np.getTransform()
geomtf = geomtf.setPos(geomtf.getPos()*base.physics_scale)
if cdtype == "triangles":
geombmesh = BulletTriangleMesh()
geombmesh.addGeom(geom)
bulletshape = BulletTriangleMeshShape(geombmesh, dynamic=dynamic)
bulletshape.setMargin(1e-6)
self.addShape(bulletshape, geomtf)
elif cdtype == "convex":
bulletshape = BulletConvexHullShape()
bulletshape.addGeom(geom, geomtf)
bulletshape.setMargin(1e-6)
self.addShape(bulletshape, geomtf)
elif cdtype == 'box':
minx = min(vertices[:,0])
miny = min(vertices[:,1])
minz = min(vertices[:,2])
maxx = max(vertices[:,0])
maxy = max(vertices[:,1])
maxz = max(vertices[:,2])
pcd_box = CollisionBox(Point3(minx, miny, minz),Point3(maxx, maxy, maxz))
bulletshape = BulletBoxShape.makeFromSolid(pcd_box)
bulletshape.setMargin(1e-6)
self.addShape(bulletshape, geomtf)
else:
raise NotImplementedError
pd_homomat = geomtf.getMat()
pd_com_pos = pd_homomat.xformPoint(Vec3(self.com[0], self.com[1], self.com[2]))
np_homomat = dh.pdmat4_to_npmat4(pd_homomat)
np_com_pos = dh.pdv3_to_npv3(pd_com_pos)
np_homomat[:3, 3] = np_com_pos
self.setTransform(TransformState.makeMat(dh.npmat4_to_pdmat4(np_homomat)))
elif isinstance(initor, BDBody):
self.com = initor.com.copy()
self.setMass(initor.getMass())
self.setRestitution(initor.restitution)
self.setFriction(initor.friction)
self.setLinearDamping(.3)
self.setAngularDamping(.3)
if allow_deactivation:
self.setDeactivationEnabled(True)
self.setLinearSleepThreshold(.01*base.physics_scale)
self.setAngularSleepThreshold(.01*base.physics_scale)
else:
self.setDeactivationEnabled(False)
if allow_ccd:
self.setCcdMotionThreshold(1e-7)
self.setCcdSweptSphereRadius(0.0005*base.physics_scale)
np_homomat = copy.deepcopy(initor.get_homomat())
np_homomat[:3,3] = np_homomat[:3,3]*base.physics_scale
self.setTransform(TransformState.makeMat(dh.npmat4_to_pdmat4(np_homomat)))
self.addShape(initor.getShape(0), initor.getShapeTransform(0))
def get_pos(self):
pdmat4 = self.getTransform().getMat()
pdv3 = pdmat4.xformPoint(Vec3(-self.com[0], -self.com[1], -self.com[2]))
pos = dh.pdv3_to_npv3(pdv3)/base.physics_scale
return pos
def set_pos(self, npvec3):
self.setPos(dh.pdv3_to_npv3(npvec3)*base.physics_scale)
def get_homomat(self):
pd_homomat = self.getTransform().getMat()
pd_com_pos = pd_homomat.xformPoint(Vec3(-self.com[0], -self.com[1], -self.com[2]))
np_homomat = dh.pdmat4_to_npmat4(pd_homomat)
np_com_pos = dh.pdv3_to_npv3(pd_com_pos)
np_homomat[:3, 3] = np_com_pos/base.physics_scale
return np_homomat
def set_homomat(self, homomat):
tmp_homomat = copy.deepcopy(homomat)
tmp_homomat[:3, 3] = tmp_homomat[:3,3]*base.physics_scale
pos = rm.homomat_transform_points(tmp_homomat, self.com)
rotmat = tmp_homomat[:3, :3]
self.setTransform(TransformState.makeMat(dh.npv3mat3_to_pdmat4(pos, rotmat)))
def copy(self):
return BDBody(self)
| true | true |
1c47f43155595c95ee4e65fc813a2c0e931c4d26 | 6,564 | py | Python | sympy/vector/tests/test_vector.py | FabianBall/sympy | 9d849ddfc45427fe7f6733ce4d18fa397d0f43a9 | [
"BSD-3-Clause"
] | 3 | 2015-01-17T23:15:04.000Z | 2015-05-26T14:11:44.000Z | sympy/vector/tests/test_vector.py | FabianBall/sympy | 9d849ddfc45427fe7f6733ce4d18fa397d0f43a9 | [
"BSD-3-Clause"
] | 1 | 2017-08-26T01:07:46.000Z | 2017-08-26T16:05:49.000Z | sympy/vector/tests/test_vector.py | FabianBall/sympy | 9d849ddfc45427fe7f6733ce4d18fa397d0f43a9 | [
"BSD-3-Clause"
] | null | null | null | from sympy.core import S
from sympy.simplify import simplify, trigsimp
from sympy import pi, sqrt, symbols, ImmutableMatrix as Matrix, \
sin, cos, Function, Integral, Derivative, diff
from sympy.vector.vector import Vector, BaseVector, VectorAdd, \
VectorMul, VectorZero
from sympy.vector.coordsysrect import CoordSys3D
from sympy.vector.vector import Cross, Dot, dot, cross
C = CoordSys3D('C')
i, j, k = C.base_vectors()
a, b, c = symbols('a b c')
def test_cross():
v1 = C.x * i + C.z * C.z * j
v2 = C.x * i + C.y * j + C.z * k
assert Cross(v1, v2) == Cross(C.x*C.i + C.z**2*C.j, C.x*C.i + C.y*C.j + C.z*C.k)
assert Cross(v1, v2).doit() == C.z**3*C.i + (-C.x*C.z)*C.j + (C.x*C.y - C.x*C.z**2)*C.k
assert cross(v1, v2) == C.z**3*C.i + (-C.x*C.z)*C.j + (C.x*C.y - C.x*C.z**2)*C.k
assert Cross(v1, v2) == -Cross(v2, v1)
assert Cross(v1, v2) + Cross(v2, v1) == Vector.zero
def test_dot():
v1 = C.x * i + C.z * C.z * j
v2 = C.x * i + C.y * j + C.z * k
assert Dot(v1, v2) == Dot(C.x*C.i + C.z**2*C.j, C.x*C.i + C.y*C.j + C.z*C.k)
assert Dot(v1, v2).doit() == C.x**2 + C.y*C.z**2
assert Dot(v1, v2).doit() == C.x**2 + C.y*C.z**2
assert Dot(v1, v2) == Dot(v2, v1)
def test_vector_sympy():
"""
Test whether the Vector framework confirms to the hashing
and equality testing properties of SymPy.
"""
v1 = 3*j
assert v1 == j*3
assert v1.components == {j: 3}
v2 = 3*i + 4*j + 5*k
v3 = 2*i + 4*j + i + 4*k + k
assert v3 == v2
assert v3.__hash__() == v2.__hash__()
def test_vector():
assert isinstance(i, BaseVector)
assert i != j
assert j != k
assert k != i
assert i - i == Vector.zero
assert i + Vector.zero == i
assert i - Vector.zero == i
assert Vector.zero != 0
assert -Vector.zero == Vector.zero
v1 = a*i + b*j + c*k
v2 = a**2*i + b**2*j + c**2*k
v3 = v1 + v2
v4 = 2 * v1
v5 = a * i
assert isinstance(v1, VectorAdd)
assert v1 - v1 == Vector.zero
assert v1 + Vector.zero == v1
assert v1.dot(i) == a
assert v1.dot(j) == b
assert v1.dot(k) == c
assert i.dot(v2) == a**2
assert j.dot(v2) == b**2
assert k.dot(v2) == c**2
assert v3.dot(i) == a**2 + a
assert v3.dot(j) == b**2 + b
assert v3.dot(k) == c**2 + c
assert v1 + v2 == v2 + v1
assert v1 - v2 == -1 * (v2 - v1)
assert a * v1 == v1 * a
assert isinstance(v5, VectorMul)
assert v5.base_vector == i
assert v5.measure_number == a
assert isinstance(v4, Vector)
assert isinstance(v4, VectorAdd)
assert isinstance(v4, Vector)
assert isinstance(Vector.zero, VectorZero)
assert isinstance(Vector.zero, Vector)
assert isinstance(v1 * 0, VectorZero)
assert v1.to_matrix(C) == Matrix([[a], [b], [c]])
assert i.components == {i: 1}
assert v5.components == {i: a}
assert v1.components == {i: a, j: b, k: c}
assert VectorAdd(v1, Vector.zero) == v1
assert VectorMul(a, v1) == v1*a
assert VectorMul(1, i) == i
assert VectorAdd(v1, Vector.zero) == v1
assert VectorMul(0, Vector.zero) == Vector.zero
def test_vector_magnitude_normalize():
assert Vector.zero.magnitude() == 0
assert Vector.zero.normalize() == Vector.zero
assert i.magnitude() == 1
assert j.magnitude() == 1
assert k.magnitude() == 1
assert i.normalize() == i
assert j.normalize() == j
assert k.normalize() == k
v1 = a * i
assert v1.normalize() == (a/sqrt(a**2))*i
assert v1.magnitude() == sqrt(a**2)
v2 = a*i + b*j + c*k
assert v2.magnitude() == sqrt(a**2 + b**2 + c**2)
assert v2.normalize() == v2 / v2.magnitude()
v3 = i + j
assert v3.normalize() == (sqrt(2)/2)*C.i + (sqrt(2)/2)*C.j
def test_vector_simplify():
A, s, k, m = symbols('A, s, k, m')
test1 = (1 / a + 1 / b) * i
assert (test1 & i) != (a + b) / (a * b)
test1 = simplify(test1)
assert (test1 & i) == (a + b) / (a * b)
assert test1.simplify() == simplify(test1)
test2 = (A**2 * s**4 / (4 * pi * k * m**3)) * i
test2 = simplify(test2)
assert (test2 & i) == (A**2 * s**4 / (4 * pi * k * m**3))
test3 = ((4 + 4 * a - 2 * (2 + 2 * a)) / (2 + 2 * a)) * i
test3 = simplify(test3)
assert (test3 & i) == 0
test4 = ((-4 * a * b**2 - 2 * b**3 - 2 * a**2 * b) / (a + b)**2) * i
test4 = simplify(test4)
assert (test4 & i) == -2 * b
v = (sin(a)+cos(a))**2*i - j
assert trigsimp(v) == (2*sin(a + pi/4)**2)*i + (-1)*j
assert trigsimp(v) == v.trigsimp()
assert simplify(Vector.zero) == Vector.zero
def test_vector_dot():
assert i.dot(Vector.zero) == 0
assert Vector.zero.dot(i) == 0
assert i & Vector.zero == 0
assert i.dot(i) == 1
assert i.dot(j) == 0
assert i.dot(k) == 0
assert i & i == 1
assert i & j == 0
assert i & k == 0
assert j.dot(i) == 0
assert j.dot(j) == 1
assert j.dot(k) == 0
assert j & i == 0
assert j & j == 1
assert j & k == 0
assert k.dot(i) == 0
assert k.dot(j) == 0
assert k.dot(k) == 1
assert k & i == 0
assert k & j == 0
assert k & k == 1
def test_vector_cross():
assert i.cross(Vector.zero) == Vector.zero
assert Vector.zero.cross(i) == Vector.zero
assert i.cross(i) == Vector.zero
assert i.cross(j) == k
assert i.cross(k) == -j
assert i ^ i == Vector.zero
assert i ^ j == k
assert i ^ k == -j
assert j.cross(i) == -k
assert j.cross(j) == Vector.zero
assert j.cross(k) == i
assert j ^ i == -k
assert j ^ j == Vector.zero
assert j ^ k == i
assert k.cross(i) == j
assert k.cross(j) == -i
assert k.cross(k) == Vector.zero
assert k ^ i == j
assert k ^ j == -i
assert k ^ k == Vector.zero
def test_projection():
v1 = i + j + k
v2 = 3*i + 4*j
v3 = 0*i + 0*j
assert v1.projection(v1) == i + j + k
assert v1.projection(v2) == S(7)/3*C.i + S(7)/3*C.j + S(7)/3*C.k
assert v1.projection(v1, scalar=True) == 1
assert v1.projection(v2, scalar=True) == S(7)/3
assert v3.projection(v1) == Vector.zero
def test_vector_diff_integrate():
f = Function('f')
v = f(a)*C.i + a**2*C.j - C.k
assert Derivative(v, a) == Derivative((f(a))*C.i +
a**2*C.j + (-1)*C.k, a)
assert (diff(v, a) == v.diff(a) == Derivative(v, a).doit() ==
(Derivative(f(a), a))*C.i + 2*a*C.j)
assert (Integral(v, a) == (Integral(f(a), a))*C.i +
(Integral(a**2, a))*C.j + (Integral(-1, a))*C.k)
| 28.53913 | 91 | 0.534735 | from sympy.core import S
from sympy.simplify import simplify, trigsimp
from sympy import pi, sqrt, symbols, ImmutableMatrix as Matrix, \
sin, cos, Function, Integral, Derivative, diff
from sympy.vector.vector import Vector, BaseVector, VectorAdd, \
VectorMul, VectorZero
from sympy.vector.coordsysrect import CoordSys3D
from sympy.vector.vector import Cross, Dot, dot, cross
C = CoordSys3D('C')
i, j, k = C.base_vectors()
a, b, c = symbols('a b c')
def test_cross():
v1 = C.x * i + C.z * C.z * j
v2 = C.x * i + C.y * j + C.z * k
assert Cross(v1, v2) == Cross(C.x*C.i + C.z**2*C.j, C.x*C.i + C.y*C.j + C.z*C.k)
assert Cross(v1, v2).doit() == C.z**3*C.i + (-C.x*C.z)*C.j + (C.x*C.y - C.x*C.z**2)*C.k
assert cross(v1, v2) == C.z**3*C.i + (-C.x*C.z)*C.j + (C.x*C.y - C.x*C.z**2)*C.k
assert Cross(v1, v2) == -Cross(v2, v1)
assert Cross(v1, v2) + Cross(v2, v1) == Vector.zero
def test_dot():
v1 = C.x * i + C.z * C.z * j
v2 = C.x * i + C.y * j + C.z * k
assert Dot(v1, v2) == Dot(C.x*C.i + C.z**2*C.j, C.x*C.i + C.y*C.j + C.z*C.k)
assert Dot(v1, v2).doit() == C.x**2 + C.y*C.z**2
assert Dot(v1, v2).doit() == C.x**2 + C.y*C.z**2
assert Dot(v1, v2) == Dot(v2, v1)
def test_vector_sympy():
v1 = 3*j
assert v1 == j*3
assert v1.components == {j: 3}
v2 = 3*i + 4*j + 5*k
v3 = 2*i + 4*j + i + 4*k + k
assert v3 == v2
assert v3.__hash__() == v2.__hash__()
def test_vector():
assert isinstance(i, BaseVector)
assert i != j
assert j != k
assert k != i
assert i - i == Vector.zero
assert i + Vector.zero == i
assert i - Vector.zero == i
assert Vector.zero != 0
assert -Vector.zero == Vector.zero
v1 = a*i + b*j + c*k
v2 = a**2*i + b**2*j + c**2*k
v3 = v1 + v2
v4 = 2 * v1
v5 = a * i
assert isinstance(v1, VectorAdd)
assert v1 - v1 == Vector.zero
assert v1 + Vector.zero == v1
assert v1.dot(i) == a
assert v1.dot(j) == b
assert v1.dot(k) == c
assert i.dot(v2) == a**2
assert j.dot(v2) == b**2
assert k.dot(v2) == c**2
assert v3.dot(i) == a**2 + a
assert v3.dot(j) == b**2 + b
assert v3.dot(k) == c**2 + c
assert v1 + v2 == v2 + v1
assert v1 - v2 == -1 * (v2 - v1)
assert a * v1 == v1 * a
assert isinstance(v5, VectorMul)
assert v5.base_vector == i
assert v5.measure_number == a
assert isinstance(v4, Vector)
assert isinstance(v4, VectorAdd)
assert isinstance(v4, Vector)
assert isinstance(Vector.zero, VectorZero)
assert isinstance(Vector.zero, Vector)
assert isinstance(v1 * 0, VectorZero)
assert v1.to_matrix(C) == Matrix([[a], [b], [c]])
assert i.components == {i: 1}
assert v5.components == {i: a}
assert v1.components == {i: a, j: b, k: c}
assert VectorAdd(v1, Vector.zero) == v1
assert VectorMul(a, v1) == v1*a
assert VectorMul(1, i) == i
assert VectorAdd(v1, Vector.zero) == v1
assert VectorMul(0, Vector.zero) == Vector.zero
def test_vector_magnitude_normalize():
assert Vector.zero.magnitude() == 0
assert Vector.zero.normalize() == Vector.zero
assert i.magnitude() == 1
assert j.magnitude() == 1
assert k.magnitude() == 1
assert i.normalize() == i
assert j.normalize() == j
assert k.normalize() == k
v1 = a * i
assert v1.normalize() == (a/sqrt(a**2))*i
assert v1.magnitude() == sqrt(a**2)
v2 = a*i + b*j + c*k
assert v2.magnitude() == sqrt(a**2 + b**2 + c**2)
assert v2.normalize() == v2 / v2.magnitude()
v3 = i + j
assert v3.normalize() == (sqrt(2)/2)*C.i + (sqrt(2)/2)*C.j
def test_vector_simplify():
A, s, k, m = symbols('A, s, k, m')
test1 = (1 / a + 1 / b) * i
assert (test1 & i) != (a + b) / (a * b)
test1 = simplify(test1)
assert (test1 & i) == (a + b) / (a * b)
assert test1.simplify() == simplify(test1)
test2 = (A**2 * s**4 / (4 * pi * k * m**3)) * i
test2 = simplify(test2)
assert (test2 & i) == (A**2 * s**4 / (4 * pi * k * m**3))
test3 = ((4 + 4 * a - 2 * (2 + 2 * a)) / (2 + 2 * a)) * i
test3 = simplify(test3)
assert (test3 & i) == 0
test4 = ((-4 * a * b**2 - 2 * b**3 - 2 * a**2 * b) / (a + b)**2) * i
test4 = simplify(test4)
assert (test4 & i) == -2 * b
v = (sin(a)+cos(a))**2*i - j
assert trigsimp(v) == (2*sin(a + pi/4)**2)*i + (-1)*j
assert trigsimp(v) == v.trigsimp()
assert simplify(Vector.zero) == Vector.zero
def test_vector_dot():
assert i.dot(Vector.zero) == 0
assert Vector.zero.dot(i) == 0
assert i & Vector.zero == 0
assert i.dot(i) == 1
assert i.dot(j) == 0
assert i.dot(k) == 0
assert i & i == 1
assert i & j == 0
assert i & k == 0
assert j.dot(i) == 0
assert j.dot(j) == 1
assert j.dot(k) == 0
assert j & i == 0
assert j & j == 1
assert j & k == 0
assert k.dot(i) == 0
assert k.dot(j) == 0
assert k.dot(k) == 1
assert k & i == 0
assert k & j == 0
assert k & k == 1
def test_vector_cross():
assert i.cross(Vector.zero) == Vector.zero
assert Vector.zero.cross(i) == Vector.zero
assert i.cross(i) == Vector.zero
assert i.cross(j) == k
assert i.cross(k) == -j
assert i ^ i == Vector.zero
assert i ^ j == k
assert i ^ k == -j
assert j.cross(i) == -k
assert j.cross(j) == Vector.zero
assert j.cross(k) == i
assert j ^ i == -k
assert j ^ j == Vector.zero
assert j ^ k == i
assert k.cross(i) == j
assert k.cross(j) == -i
assert k.cross(k) == Vector.zero
assert k ^ i == j
assert k ^ j == -i
assert k ^ k == Vector.zero
def test_projection():
v1 = i + j + k
v2 = 3*i + 4*j
v3 = 0*i + 0*j
assert v1.projection(v1) == i + j + k
assert v1.projection(v2) == S(7)/3*C.i + S(7)/3*C.j + S(7)/3*C.k
assert v1.projection(v1, scalar=True) == 1
assert v1.projection(v2, scalar=True) == S(7)/3
assert v3.projection(v1) == Vector.zero
def test_vector_diff_integrate():
f = Function('f')
v = f(a)*C.i + a**2*C.j - C.k
assert Derivative(v, a) == Derivative((f(a))*C.i +
a**2*C.j + (-1)*C.k, a)
assert (diff(v, a) == v.diff(a) == Derivative(v, a).doit() ==
(Derivative(f(a), a))*C.i + 2*a*C.j)
assert (Integral(v, a) == (Integral(f(a), a))*C.i +
(Integral(a**2, a))*C.j + (Integral(-1, a))*C.k)
| true | true |
1c47f49ae688340870b158350bc212b335d4eb6d | 5,776 | py | Python | app/views/navigation.py | tch1bo/viaduct | bfd37b0a8408b2dd66fb01138163b80ce97699ff | [
"MIT"
] | 11 | 2015-04-23T21:57:56.000Z | 2019-04-28T12:48:58.000Z | app/views/navigation.py | tch1bo/viaduct | bfd37b0a8408b2dd66fb01138163b80ce97699ff | [
"MIT"
] | 1 | 2016-10-05T14:10:58.000Z | 2016-10-05T14:12:23.000Z | app/views/navigation.py | tch1bo/viaduct | bfd37b0a8408b2dd66fb01138163b80ce97699ff | [
"MIT"
] | 3 | 2016-10-05T14:00:42.000Z | 2019-01-16T14:33:43.000Z | import json
import re
from flask import Blueprint, render_template, abort, request, flash, \
redirect, url_for
from flask_babel import _
from flask_login import current_user
from app import db
from app.decorators import require_role
from app.forms import init_form
from app.forms.navigation import NavigationEntryForm
from app.models.navigation import NavigationEntry
from app.models.page import Page
from app.roles import Roles
from app.service import role_service, page_service
from app.utils.forms import flash_form_errors
from app.utils.navigation import NavigationAPI
from app.utils.resource import get_all_routes
blueprint = Blueprint('navigation', __name__, url_prefix='/navigation')
@blueprint.route('/')
@require_role(Roles.NAVIGATION_WRITE)
def view():
entries = NavigationAPI.get_root_entries()
can_write = role_service.user_has_role(current_user,
Roles.NAVIGATION_WRITE)
return render_template('navigation/view.htm', nav_entries=entries,
can_write=can_write)
@blueprint.route('/create/', methods=['GET', 'POST'])
@blueprint.route('/create/<int:parent_id>/', methods=['GET', 'POST'])
@blueprint.route('/edit/<int:entry_id>/', methods=['GET', 'POST'])
@require_role(Roles.NAVIGATION_WRITE)
def edit(entry_id=None, parent_id=None):
entry = NavigationEntry.query.get_or_404(entry_id) if entry_id else None
form = init_form(NavigationEntryForm, obj=entry)
form.page_id.choices = [(-1, '-- {} --'.format(_('Custom URL')))] + \
db.session.query(Page.id, Page.path).all()
parent = NavigationEntry.query.get(parent_id) if parent_id else None
if parent_id and not parent:
flash(_('Cannot find parent navigation entry.'), 'danger')
return redirect(url_for('navigation.view'))
if form.validate_on_submit():
url = None
if form.page_id.data == -1:
url = form.url.data
if not re.compile('^/').match(url):
url = '/' + url
page_id = None if form.page_id.data == -1 else form.page_id.data
if entry:
entry.nl_title = form.nl_title.data
entry.en_title = form.en_title.data
entry.url = url
entry.page_id = page_id
entry.external = form.external.data
entry.activity_list = form.activity_list.data
entry.order_children_alphabetically = \
form.order_children_alphabetically.data
else:
last_entry = NavigationEntry.query.filter_by(parent_id=None) \
.order_by(NavigationEntry.position.desc()).first()
# If there is no parent position the new entry at the end of the
# top level entry.
position = (last_entry.position + 1) if last_entry else 0
entry = NavigationEntry(parent, form.nl_title.data,
form.en_title.data, url, page_id,
form.external.data,
form.activity_list.data, position)
db.session.add(entry)
db.session.commit()
flash(_('The navigation entry has been saved.'), 'success')
if not page_id and not form.external.data:
# Check if the page exists, if not redirect to create it
path = form.url.data.lstrip('/')
page = page_service.get_page_by_path(path)
if url.rstrip('/') in get_all_routes():
return redirect(url_for('navigation.view'))
if not page and form.url.data != '/':
flash(_('The link refers to a page that does not exist, please'
'create the page!'), 'warning')
return redirect(url_for('page.edit_page', path=path))
return redirect(url_for('navigation.view'))
else:
flash_form_errors(form)
parents = NavigationEntry.query.filter_by(parent_id=None)
if entry:
parents = parents.filter(NavigationEntry.id != entry.id)
return render_template('navigation/edit.htm', entry=entry, form=form,
parents=parents.all())
@blueprint.route('/delete/<int:entry_id>/', methods=['POST'])
@blueprint.route('/delete/<int:entry_id>/<int:inc_page>', methods=['POST'])
@require_role(Roles.NAVIGATION_WRITE)
def delete(entry_id, inc_page=0):
if inc_page and not role_service.user_has_role(current_user,
Roles.PAGE_WRITE):
flash(_('You do not have rights to remove pages'))
return abort(403)
entry = db.session.query(NavigationEntry).filter_by(id=entry_id).first()
if not entry:
abort(404)
if not entry.parent:
if entry.children.count() > 0:
flash('Deze item heeft nog subitems.', 'danger')
return redirect(url_for('navigation.edit', entry_id=entry.id))
if inc_page:
if entry.external or entry.activity_list:
flash('Deze item verwijst niet naar een pagina op deze website.',
'danger')
else:
if (entry.url is None or page_service.delete_page_by_path(
entry.url.lstrip('/'))):
flash('De pagina is verwijderd.', 'success')
else:
flash('De te verwijderen pagina kon niet worden gevonden.',
'danger')
db.session.delete(entry)
db.session.commit()
flash('De navigatie-item is verwijderd.', 'success')
return redirect(url_for('navigation.view'))
@blueprint.route('/navigation/reorder', methods=['POST'])
@require_role(Roles.NAVIGATION_WRITE)
def reorder():
entries = json.loads(request.form['entries'])
NavigationAPI.order(entries, None)
return ""
| 38 | 79 | 0.630886 | import json
import re
from flask import Blueprint, render_template, abort, request, flash, \
redirect, url_for
from flask_babel import _
from flask_login import current_user
from app import db
from app.decorators import require_role
from app.forms import init_form
from app.forms.navigation import NavigationEntryForm
from app.models.navigation import NavigationEntry
from app.models.page import Page
from app.roles import Roles
from app.service import role_service, page_service
from app.utils.forms import flash_form_errors
from app.utils.navigation import NavigationAPI
from app.utils.resource import get_all_routes
blueprint = Blueprint('navigation', __name__, url_prefix='/navigation')
@blueprint.route('/')
@require_role(Roles.NAVIGATION_WRITE)
def view():
entries = NavigationAPI.get_root_entries()
can_write = role_service.user_has_role(current_user,
Roles.NAVIGATION_WRITE)
return render_template('navigation/view.htm', nav_entries=entries,
can_write=can_write)
@blueprint.route('/create/', methods=['GET', 'POST'])
@blueprint.route('/create/<int:parent_id>/', methods=['GET', 'POST'])
@blueprint.route('/edit/<int:entry_id>/', methods=['GET', 'POST'])
@require_role(Roles.NAVIGATION_WRITE)
def edit(entry_id=None, parent_id=None):
entry = NavigationEntry.query.get_or_404(entry_id) if entry_id else None
form = init_form(NavigationEntryForm, obj=entry)
form.page_id.choices = [(-1, '-- {} --'.format(_('Custom URL')))] + \
db.session.query(Page.id, Page.path).all()
parent = NavigationEntry.query.get(parent_id) if parent_id else None
if parent_id and not parent:
flash(_('Cannot find parent navigation entry.'), 'danger')
return redirect(url_for('navigation.view'))
if form.validate_on_submit():
url = None
if form.page_id.data == -1:
url = form.url.data
if not re.compile('^/').match(url):
url = '/' + url
page_id = None if form.page_id.data == -1 else form.page_id.data
if entry:
entry.nl_title = form.nl_title.data
entry.en_title = form.en_title.data
entry.url = url
entry.page_id = page_id
entry.external = form.external.data
entry.activity_list = form.activity_list.data
entry.order_children_alphabetically = \
form.order_children_alphabetically.data
else:
last_entry = NavigationEntry.query.filter_by(parent_id=None) \
.order_by(NavigationEntry.position.desc()).first()
position = (last_entry.position + 1) if last_entry else 0
entry = NavigationEntry(parent, form.nl_title.data,
form.en_title.data, url, page_id,
form.external.data,
form.activity_list.data, position)
db.session.add(entry)
db.session.commit()
flash(_('The navigation entry has been saved.'), 'success')
if not page_id and not form.external.data:
path = form.url.data.lstrip('/')
page = page_service.get_page_by_path(path)
if url.rstrip('/') in get_all_routes():
return redirect(url_for('navigation.view'))
if not page and form.url.data != '/':
flash(_('The link refers to a page that does not exist, please'
'create the page!'), 'warning')
return redirect(url_for('page.edit_page', path=path))
return redirect(url_for('navigation.view'))
else:
flash_form_errors(form)
parents = NavigationEntry.query.filter_by(parent_id=None)
if entry:
parents = parents.filter(NavigationEntry.id != entry.id)
return render_template('navigation/edit.htm', entry=entry, form=form,
parents=parents.all())
@blueprint.route('/delete/<int:entry_id>/', methods=['POST'])
@blueprint.route('/delete/<int:entry_id>/<int:inc_page>', methods=['POST'])
@require_role(Roles.NAVIGATION_WRITE)
def delete(entry_id, inc_page=0):
if inc_page and not role_service.user_has_role(current_user,
Roles.PAGE_WRITE):
flash(_('You do not have rights to remove pages'))
return abort(403)
entry = db.session.query(NavigationEntry).filter_by(id=entry_id).first()
if not entry:
abort(404)
if not entry.parent:
if entry.children.count() > 0:
flash('Deze item heeft nog subitems.', 'danger')
return redirect(url_for('navigation.edit', entry_id=entry.id))
if inc_page:
if entry.external or entry.activity_list:
flash('Deze item verwijst niet naar een pagina op deze website.',
'danger')
else:
if (entry.url is None or page_service.delete_page_by_path(
entry.url.lstrip('/'))):
flash('De pagina is verwijderd.', 'success')
else:
flash('De te verwijderen pagina kon niet worden gevonden.',
'danger')
db.session.delete(entry)
db.session.commit()
flash('De navigatie-item is verwijderd.', 'success')
return redirect(url_for('navigation.view'))
@blueprint.route('/navigation/reorder', methods=['POST'])
@require_role(Roles.NAVIGATION_WRITE)
def reorder():
entries = json.loads(request.form['entries'])
NavigationAPI.order(entries, None)
return ""
| true | true |
1c47f7e4b0e586c5810d5893015727706ff08291 | 1,641 | py | Python | aionewton/wrapper.py | AndrielFR/aionewton | 0f740851154de0e4f64d7c9f676b0b27eaabeccc | [
"MIT"
] | 1 | 2021-01-27T14:35:33.000Z | 2021-01-27T14:35:33.000Z | aionewton/wrapper.py | AndrielFR/aionewton | 0f740851154de0e4f64d7c9f676b0b27eaabeccc | [
"MIT"
] | null | null | null | aionewton/wrapper.py | AndrielFR/aionewton | 0f740851154de0e4f64d7c9f676b0b27eaabeccc | [
"MIT"
] | null | null | null | """An asnycio-based wrapper for `https://newton.now.sh`"""
import sys
from urllib.parse import quote
import aiohttp
ENDPOINTS = ["simplify", "factor", "derive", "integrate", "zeroes", "tangent",
"area", "cos", "sin", "tan", "arccos", "arcsin", "arctan", "abs",
"log"]
class Result:
def __init__(self, **kwargs):
self.operation = kwargs.get("operation", None)
self.expression = kwargs.get("expression", None)
self.result = kwargs.get("result", None)
self.raw = kwargs
def __str__(self):
return str(self.result)
__repr__ = __str__
async def _make_request(operation, expression):
"""Internal function to request a page by using a given string"""
encoded_expression = quote(expression, safe='')
url = f"https://newton.now.sh/api/v2/{operation}/{encoded_expression}"
async with aiohttp.ClientSession() as session:
async with session.get(url) as req:
assert isinstance(req, aiohttp.ClientResponse)
res = await req.json()
return Result(**res)
def wrap_coro(coro):
async def func():
return await coro
return func()
def expose_endpoints(module, *args):
"""
Expose modules globally
"""
# Credit goes to https://github.com/benpryke/PyNewtonMath
# for giving me the idea of wrapping them dynamically.
for op in args:
# Wrap function
def wrap(operator):
return lambda exp: wrap_coro(_make_request(operator, exp))
setattr(sys.modules[__name__], op, wrap(op))
setattr(module, op, getattr(sys.modules[__name__], op))
| 27.35 | 78 | 0.631322 | import sys
from urllib.parse import quote
import aiohttp
ENDPOINTS = ["simplify", "factor", "derive", "integrate", "zeroes", "tangent",
"area", "cos", "sin", "tan", "arccos", "arcsin", "arctan", "abs",
"log"]
class Result:
def __init__(self, **kwargs):
self.operation = kwargs.get("operation", None)
self.expression = kwargs.get("expression", None)
self.result = kwargs.get("result", None)
self.raw = kwargs
def __str__(self):
return str(self.result)
__repr__ = __str__
async def _make_request(operation, expression):
encoded_expression = quote(expression, safe='')
url = f"https://newton.now.sh/api/v2/{operation}/{encoded_expression}"
async with aiohttp.ClientSession() as session:
async with session.get(url) as req:
assert isinstance(req, aiohttp.ClientResponse)
res = await req.json()
return Result(**res)
def wrap_coro(coro):
async def func():
return await coro
return func()
def expose_endpoints(module, *args):
for op in args:
def wrap(operator):
return lambda exp: wrap_coro(_make_request(operator, exp))
setattr(sys.modules[__name__], op, wrap(op))
setattr(module, op, getattr(sys.modules[__name__], op))
| true | true |
1c47f87cd0eec39b0b393cb0abd277a23b887c5d | 1,866 | py | Python | examples/basic_simple_color_triangle.py | 2dx/moderngl | 5f932560a535469626d79d22e4205f400e18f328 | [
"MIT"
] | 916 | 2019-03-11T19:15:20.000Z | 2022-03-31T19:22:16.000Z | examples/basic_simple_color_triangle.py | 2dx/moderngl | 5f932560a535469626d79d22e4205f400e18f328 | [
"MIT"
] | 218 | 2019-03-11T06:05:52.000Z | 2022-03-30T16:59:22.000Z | examples/basic_simple_color_triangle.py | 2dx/moderngl | 5f932560a535469626d79d22e4205f400e18f328 | [
"MIT"
] | 110 | 2019-04-06T18:32:24.000Z | 2022-03-21T20:30:47.000Z | '''
Renders a traingle that has all RGB combinations
'''
import numpy as np
from ported._example import Example
class SimpleColorTriangle(Example):
gl_version = (3, 3)
aspect_ratio = 16 / 9
title = "Simple Color Triangle"
def __init__(self, **kwargs):
super().__init__(**kwargs)
self.prog = self.ctx.program(
vertex_shader='''
#version 330
in vec2 in_vert;
in vec3 in_color;
out vec3 v_color; // Goes to the fragment shader
void main() {
gl_Position = vec4(in_vert, 0.0, 1.0);
v_color = in_color;
}
''',
fragment_shader='''
#version 330
in vec3 v_color;
out vec4 f_color;
void main() {
// We're not interested in changing the alpha value
f_color = vec4(v_color, 1.0);
}
''',
)
# Point coordinates are put followed by the vec3 color values
vertices = np.array([
# x, y, red, green, blue
0.0, 0.8, 1.0, 0.0, 0.0,
-0.6, -0.8, 0.0, 1.0, 0.0,
0.6, -0.8, 0.0, 0.0, 1.0,
], dtype='f4')
self.vbo = self.ctx.buffer(vertices)
# We control the 'in_vert' and `in_color' variables
self.vao = self.ctx.vertex_array(
self.prog,
[
# Map in_vert to the first 2 floats
# Map in_color to the next 3 floats
(self.vbo, '2f 3f', 'in_vert', 'in_color')
],
)
def render(self, time: float, frame_time: float):
self.ctx.clear(1.0, 1.0, 1.0)
self.vao.render()
if __name__ == '__main__':
SimpleColorTriangle.run()
| 25.916667 | 71 | 0.478028 |
import numpy as np
from ported._example import Example
class SimpleColorTriangle(Example):
gl_version = (3, 3)
aspect_ratio = 16 / 9
title = "Simple Color Triangle"
def __init__(self, **kwargs):
super().__init__(**kwargs)
self.prog = self.ctx.program(
vertex_shader='''
#version 330
in vec2 in_vert;
in vec3 in_color;
out vec3 v_color; // Goes to the fragment shader
void main() {
gl_Position = vec4(in_vert, 0.0, 1.0);
v_color = in_color;
}
''',
fragment_shader='''
#version 330
in vec3 v_color;
out vec4 f_color;
void main() {
// We're not interested in changing the alpha value
f_color = vec4(v_color, 1.0);
}
''',
)
# Point coordinates are put followed by the vec3 color values
vertices = np.array([
# x, y, red, green, blue
0.0, 0.8, 1.0, 0.0, 0.0,
-0.6, -0.8, 0.0, 1.0, 0.0,
0.6, -0.8, 0.0, 0.0, 1.0,
], dtype='f4')
self.vbo = self.ctx.buffer(vertices)
# We control the 'in_vert' and `in_color' variables
self.vao = self.ctx.vertex_array(
self.prog,
[
(self.vbo, '2f 3f', 'in_vert', 'in_color')
],
)
def render(self, time: float, frame_time: float):
self.ctx.clear(1.0, 1.0, 1.0)
self.vao.render()
if __name__ == '__main__':
SimpleColorTriangle.run()
| true | true |
1c47f8c6afe39da638d403941783b7f752b3f9d1 | 57,312 | py | Python | nova/tests/unit/api/openstack/compute/test_hypervisors.py | mertakozcan/nova | 6e4ab9714cc0ca147f61997aa7b68f88185ade5c | [
"Apache-2.0"
] | 1 | 2019-04-22T06:25:26.000Z | 2019-04-22T06:25:26.000Z | nova/tests/unit/api/openstack/compute/test_hypervisors.py | woraser/nova | fc3890667e4971e3f0f35ac921c2a6c25f72adec | [
"Apache-2.0"
] | null | null | null | nova/tests/unit/api/openstack/compute/test_hypervisors.py | woraser/nova | fc3890667e4971e3f0f35ac921c2a6c25f72adec | [
"Apache-2.0"
] | null | null | null | # Copyright (c) 2012 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import copy
import mock
import netaddr
from oslo_serialization import jsonutils
from oslo_utils.fixture import uuidsentinel as uuids
import six
from webob import exc
from nova.api.openstack.compute import hypervisors \
as hypervisors_v21
from nova import exception
from nova import objects
from nova import test
from nova.tests.unit.api.openstack import fakes
from nova.tests.unit import fake_instance
CPU_INFO = """
{"arch": "x86_64",
"vendor": "fake",
"topology": {"cores": 1, "threads": 1, "sockets": 1},
"features": [],
"model": ""}"""
TEST_HYPERS = [
dict(id=1,
uuid=uuids.hyper1,
service_id=1,
host="compute1",
vcpus=4,
memory_mb=10 * 1024,
local_gb=250,
vcpus_used=2,
memory_mb_used=5 * 1024,
local_gb_used=125,
hypervisor_type="xen",
hypervisor_version=3,
hypervisor_hostname="hyper1",
free_ram_mb=5 * 1024,
free_disk_gb=125,
current_workload=2,
running_vms=2,
cpu_info=CPU_INFO,
disk_available_least=100,
host_ip=netaddr.IPAddress('1.1.1.1')),
dict(id=2,
uuid=uuids.hyper2,
service_id=2,
host="compute2",
vcpus=4,
memory_mb=10 * 1024,
local_gb=250,
vcpus_used=2,
memory_mb_used=5 * 1024,
local_gb_used=125,
hypervisor_type="xen",
hypervisor_version=3,
hypervisor_hostname="hyper2",
free_ram_mb=5 * 1024,
free_disk_gb=125,
current_workload=2,
running_vms=2,
cpu_info=CPU_INFO,
disk_available_least=100,
host_ip=netaddr.IPAddress('2.2.2.2'))]
TEST_SERVICES = [
objects.Service(id=1,
uuid=uuids.service1,
host="compute1",
binary="nova-compute",
topic="compute_topic",
report_count=5,
disabled=False,
disabled_reason=None,
availability_zone="nova"),
objects.Service(id=2,
uuid=uuids.service2,
host="compute2",
binary="nova-compute",
topic="compute_topic",
report_count=5,
disabled=False,
disabled_reason=None,
availability_zone="nova"),
]
TEST_HYPERS_OBJ = [objects.ComputeNode(**hyper_dct)
for hyper_dct in TEST_HYPERS]
TEST_HYPERS[0].update({'service': TEST_SERVICES[0]})
TEST_HYPERS[1].update({'service': TEST_SERVICES[1]})
TEST_SERVERS = [dict(name="inst1", uuid=uuids.instance_1, host="compute1"),
dict(name="inst2", uuid=uuids.instance_2, host="compute2"),
dict(name="inst3", uuid=uuids.instance_3, host="compute1"),
dict(name="inst4", uuid=uuids.instance_4, host="compute2")]
def fake_compute_node_get_all(context, limit=None, marker=None):
if marker in ['99999', uuids.invalid_marker]:
raise exception.MarkerNotFound(marker)
marker_found = True if marker is None else False
output = []
for hyper in TEST_HYPERS_OBJ:
# Starting with the 2.53 microversion, the marker is a uuid.
if not marker_found and marker in (str(hyper.id), hyper.uuid):
marker_found = True
elif marker_found:
if limit is None or len(output) < int(limit):
output.append(hyper)
return output
def fake_compute_node_search_by_hypervisor(context, hypervisor_re):
return TEST_HYPERS_OBJ
def fake_compute_node_get(context, compute_id):
for hyper in TEST_HYPERS_OBJ:
if hyper.uuid == compute_id or hyper.id == int(compute_id):
return hyper
raise exception.ComputeHostNotFound(host=compute_id)
def fake_service_get_by_compute_host(context, host):
for service in TEST_SERVICES:
if service.host == host:
return service
def fake_compute_node_statistics(context):
result = dict(
count=0,
vcpus=0,
memory_mb=0,
local_gb=0,
vcpus_used=0,
memory_mb_used=0,
local_gb_used=0,
free_ram_mb=0,
free_disk_gb=0,
current_workload=0,
running_vms=0,
disk_available_least=0,
)
for hyper in TEST_HYPERS_OBJ:
for key in result:
if key == 'count':
result[key] += 1
else:
result[key] += getattr(hyper, key)
return result
def fake_instance_get_all_by_host(context, host):
results = []
for inst in TEST_SERVERS:
if inst['host'] == host:
inst_obj = fake_instance.fake_instance_obj(context, **inst)
results.append(inst_obj)
return results
class HypervisorsTestV21(test.NoDBTestCase):
api_version = '2.1'
# Allow subclasses to override if the id value in the response is the
# compute node primary key integer id or the uuid.
expect_uuid_for_id = False
# TODO(stephenfin): These should just be defined here
TEST_HYPERS_OBJ = copy.deepcopy(TEST_HYPERS_OBJ)
TEST_SERVICES = copy.deepcopy(TEST_SERVICES)
TEST_SERVERS = copy.deepcopy(TEST_SERVERS)
DETAIL_HYPERS_DICTS = copy.deepcopy(TEST_HYPERS)
del DETAIL_HYPERS_DICTS[0]['service_id']
del DETAIL_HYPERS_DICTS[1]['service_id']
del DETAIL_HYPERS_DICTS[0]['host']
del DETAIL_HYPERS_DICTS[1]['host']
del DETAIL_HYPERS_DICTS[0]['uuid']
del DETAIL_HYPERS_DICTS[1]['uuid']
DETAIL_HYPERS_DICTS[0].update({'state': 'up',
'status': 'enabled',
'service': dict(id=1, host='compute1',
disabled_reason=None)})
DETAIL_HYPERS_DICTS[1].update({'state': 'up',
'status': 'enabled',
'service': dict(id=2, host='compute2',
disabled_reason=None)})
INDEX_HYPER_DICTS = [
dict(id=1, hypervisor_hostname="hyper1",
state='up', status='enabled'),
dict(id=2, hypervisor_hostname="hyper2",
state='up', status='enabled')]
DETAIL_NULL_CPUINFO_DICT = {'': '', None: None}
def _get_request(self, use_admin_context, url=''):
return fakes.HTTPRequest.blank(url,
use_admin_context=use_admin_context,
version=self.api_version)
def _set_up_controller(self):
self.controller = hypervisors_v21.HypervisorsController()
self.controller.servicegroup_api.service_is_up = mock.MagicMock(
return_value=True)
def _get_hyper_id(self):
"""Helper function to get the proper hypervisor id for a request
:returns: The first hypervisor's uuid for microversions that expect a
uuid for the id, otherwise the hypervisor's id primary key
"""
return (self.TEST_HYPERS_OBJ[0].uuid if self.expect_uuid_for_id
else self.TEST_HYPERS_OBJ[0].id)
def setUp(self):
super(HypervisorsTestV21, self).setUp()
self._set_up_controller()
self.rule_hyp_show = "os_compute_api:os-hypervisors"
host_api = self.controller.host_api
host_api.compute_node_get_all = mock.MagicMock(
side_effect=fake_compute_node_get_all)
host_api.service_get_by_compute_host = mock.MagicMock(
side_effect=fake_service_get_by_compute_host)
host_api.compute_node_search_by_hypervisor = mock.MagicMock(
side_effect=fake_compute_node_search_by_hypervisor)
host_api.compute_node_get = mock.MagicMock(
side_effect=fake_compute_node_get)
self.stub_out('nova.db.api.compute_node_statistics',
fake_compute_node_statistics)
def test_view_hypervisor_nodetail_noservers(self):
req = self._get_request(True)
result = self.controller._view_hypervisor(
self.TEST_HYPERS_OBJ[0], self.TEST_SERVICES[0], False, req)
self.assertEqual(self.INDEX_HYPER_DICTS[0], result)
def test_view_hypervisor_detail_noservers(self):
req = self._get_request(True)
result = self.controller._view_hypervisor(
self.TEST_HYPERS_OBJ[0], self.TEST_SERVICES[0], True, req)
self.assertEqual(self.DETAIL_HYPERS_DICTS[0], result)
def test_view_hypervisor_servers(self):
req = self._get_request(True)
result = self.controller._view_hypervisor(self.TEST_HYPERS_OBJ[0],
self.TEST_SERVICES[0],
False, req,
self.TEST_SERVERS)
expected_dict = copy.deepcopy(self.INDEX_HYPER_DICTS[0])
expected_dict.update({'servers': [
dict(name="inst1", uuid=uuids.instance_1),
dict(name="inst2", uuid=uuids.instance_2),
dict(name="inst3", uuid=uuids.instance_3),
dict(name="inst4", uuid=uuids.instance_4)]})
self.assertEqual(expected_dict, result)
def _test_view_hypervisor_detail_cpuinfo_null(self, cpu_info):
req = self._get_request(True)
test_hypervisor_obj = copy.deepcopy(self.TEST_HYPERS_OBJ[0])
test_hypervisor_obj.cpu_info = cpu_info
result = self.controller._view_hypervisor(test_hypervisor_obj,
self.TEST_SERVICES[0],
True, req)
expected_dict = copy.deepcopy(self.DETAIL_HYPERS_DICTS[0])
expected_dict.update({'cpu_info':
self.DETAIL_NULL_CPUINFO_DICT[cpu_info]})
self.assertEqual(result, expected_dict)
def test_view_hypervisor_detail_cpuinfo_empty_string(self):
self._test_view_hypervisor_detail_cpuinfo_null('')
def test_view_hypervisor_detail_cpuinfo_none(self):
self._test_view_hypervisor_detail_cpuinfo_null(None)
def test_index(self):
req = self._get_request(True)
result = self.controller.index(req)
self.assertEqual(dict(hypervisors=self.INDEX_HYPER_DICTS), result)
def test_index_non_admin(self):
req = self._get_request(False)
self.assertRaises(exception.PolicyNotAuthorized,
self.controller.index, req)
def test_index_compute_host_not_found(self):
"""Tests that if a service is deleted but the compute node is not we
don't fail when listing hypervisors.
"""
# two computes, a matching service only exists for the first one
compute_nodes = objects.ComputeNodeList(objects=[
objects.ComputeNode(**TEST_HYPERS[0]),
objects.ComputeNode(**TEST_HYPERS[1])
])
def fake_service_get_by_compute_host(context, host):
if host == TEST_HYPERS[0]['host']:
return TEST_SERVICES[0]
raise exception.ComputeHostNotFound(host=host)
@mock.patch.object(self.controller.host_api, 'compute_node_get_all',
return_value=compute_nodes)
@mock.patch.object(self.controller.host_api,
'service_get_by_compute_host',
fake_service_get_by_compute_host)
def _test(self, compute_node_get_all):
req = self._get_request(True)
result = self.controller.index(req)
self.assertEqual(1, len(result['hypervisors']))
expected = {
'id': compute_nodes[0].uuid if self.expect_uuid_for_id
else compute_nodes[0].id,
'hypervisor_hostname': compute_nodes[0].hypervisor_hostname,
'state': 'up',
'status': 'enabled',
}
self.assertDictEqual(expected, result['hypervisors'][0])
_test(self)
def test_index_compute_host_not_mapped(self):
"""Tests that we don't fail index if a host is not mapped."""
# two computes, a matching service only exists for the first one
compute_nodes = objects.ComputeNodeList(objects=[
objects.ComputeNode(**TEST_HYPERS[0]),
objects.ComputeNode(**TEST_HYPERS[1])
])
def fake_service_get_by_compute_host(context, host):
if host == TEST_HYPERS[0]['host']:
return TEST_SERVICES[0]
raise exception.HostMappingNotFound(name=host)
@mock.patch.object(self.controller.host_api, 'compute_node_get_all',
return_value=compute_nodes)
@mock.patch.object(self.controller.host_api,
'service_get_by_compute_host',
fake_service_get_by_compute_host)
def _test(self, compute_node_get_all):
req = self._get_request(True)
result = self.controller.index(req)
self.assertEqual(1, len(result['hypervisors']))
expected = {
'id': compute_nodes[0].uuid if self.expect_uuid_for_id
else compute_nodes[0].id,
'hypervisor_hostname': compute_nodes[0].hypervisor_hostname,
'state': 'up',
'status': 'enabled',
}
self.assertDictEqual(expected, result['hypervisors'][0])
_test(self)
def test_detail(self):
req = self._get_request(True)
result = self.controller.detail(req)
self.assertEqual(dict(hypervisors=self.DETAIL_HYPERS_DICTS), result)
def test_detail_non_admin(self):
req = self._get_request(False)
self.assertRaises(exception.PolicyNotAuthorized,
self.controller.detail, req)
def test_detail_compute_host_not_found(self):
"""Tests that if a service is deleted but the compute node is not we
don't fail when listing hypervisors.
"""
# two computes, a matching service only exists for the first one
compute_nodes = objects.ComputeNodeList(objects=[
objects.ComputeNode(**TEST_HYPERS[0]),
objects.ComputeNode(**TEST_HYPERS[1])
])
def fake_service_get_by_compute_host(context, host):
if host == TEST_HYPERS[0]['host']:
return TEST_SERVICES[0]
raise exception.ComputeHostNotFound(host=host)
@mock.patch.object(self.controller.host_api, 'compute_node_get_all',
return_value=compute_nodes)
@mock.patch.object(self.controller.host_api,
'service_get_by_compute_host',
fake_service_get_by_compute_host)
def _test(self, compute_node_get_all):
req = self._get_request(True)
result = self.controller.detail(req)
self.assertEqual(1, len(result['hypervisors']))
expected = {
'id': compute_nodes[0].id,
'hypervisor_hostname': compute_nodes[0].hypervisor_hostname,
'state': 'up',
'status': 'enabled',
}
# we don't care about all of the details, just make sure we get
# the subset we care about and there are more keys than what index
# would return
hypervisor = result['hypervisors'][0]
self.assertTrue(
set(expected.keys()).issubset(set(hypervisor.keys())))
self.assertGreater(len(hypervisor.keys()), len(expected.keys()))
self.assertEqual(compute_nodes[0].hypervisor_hostname,
hypervisor['hypervisor_hostname'])
_test(self)
def test_detail_compute_host_not_mapped(self):
"""Tests that if a service is deleted but the compute node is not we
don't fail when listing hypervisors.
"""
# two computes, a matching service only exists for the first one
compute_nodes = objects.ComputeNodeList(objects=[
objects.ComputeNode(**TEST_HYPERS[0]),
objects.ComputeNode(**TEST_HYPERS[1])
])
def fake_service_get_by_compute_host(context, host):
if host == TEST_HYPERS[0]['host']:
return TEST_SERVICES[0]
raise exception.HostMappingNotFound(name=host)
@mock.patch.object(self.controller.host_api, 'compute_node_get_all',
return_value=compute_nodes)
@mock.patch.object(self.controller.host_api,
'service_get_by_compute_host',
fake_service_get_by_compute_host)
def _test(self, compute_node_get_all):
req = self._get_request(True)
result = self.controller.detail(req)
self.assertEqual(1, len(result['hypervisors']))
expected = {
'id': compute_nodes[0].id,
'hypervisor_hostname': compute_nodes[0].hypervisor_hostname,
'state': 'up',
'status': 'enabled',
}
# we don't care about all of the details, just make sure we get
# the subset we care about and there are more keys than what index
# would return
hypervisor = result['hypervisors'][0]
self.assertTrue(
set(expected.keys()).issubset(set(hypervisor.keys())))
self.assertGreater(len(hypervisor.keys()), len(expected.keys()))
self.assertEqual(compute_nodes[0].hypervisor_hostname,
hypervisor['hypervisor_hostname'])
_test(self)
def test_show_compute_host_not_mapped(self):
"""Tests that if a service is deleted but the compute node is not we
don't fail when listing hypervisors.
"""
@mock.patch.object(self.controller.host_api, 'compute_node_get',
return_value=self.TEST_HYPERS_OBJ[0])
@mock.patch.object(self.controller.host_api,
'service_get_by_compute_host')
def _test(self, mock_service, mock_compute_node_get):
req = self._get_request(True)
mock_service.side_effect = exception.HostMappingNotFound(
name='foo')
hyper_id = self._get_hyper_id()
self.assertRaises(exc.HTTPNotFound, self.controller.show,
req, hyper_id)
self.assertTrue(mock_service.called)
mock_compute_node_get.assert_called_once_with(mock.ANY, hyper_id)
_test(self)
def test_show_noid(self):
req = self._get_request(True)
hyperid = uuids.hyper3 if self.expect_uuid_for_id else '3'
self.assertRaises(exc.HTTPNotFound, self.controller.show, req, hyperid)
def test_show_non_integer_id(self):
req = self._get_request(True)
self.assertRaises(exc.HTTPNotFound, self.controller.show, req, 'abc')
def test_show_withid(self):
req = self._get_request(True)
hyper_id = self._get_hyper_id()
result = self.controller.show(req, hyper_id)
self.assertEqual(dict(hypervisor=self.DETAIL_HYPERS_DICTS[0]), result)
def test_show_non_admin(self):
req = self._get_request(False)
self.assertRaises(exception.PolicyNotAuthorized,
self.controller.show, req,
self._get_hyper_id())
def test_uptime_noid(self):
req = self._get_request(True)
hyper_id = uuids.hyper3 if self.expect_uuid_for_id else '3'
self.assertRaises(exc.HTTPNotFound, self.controller.uptime, req,
hyper_id)
def test_uptime_notimplemented(self):
with mock.patch.object(self.controller.host_api, 'get_host_uptime',
side_effect=exc.HTTPNotImplemented()
) as mock_get_uptime:
req = self._get_request(True)
hyper_id = self._get_hyper_id()
self.assertRaises(exc.HTTPNotImplemented,
self.controller.uptime, req, hyper_id)
self.assertEqual(1, mock_get_uptime.call_count)
def test_uptime_implemented(self):
with mock.patch.object(self.controller.host_api, 'get_host_uptime',
return_value="fake uptime"
) as mock_get_uptime:
req = self._get_request(True)
hyper_id = self._get_hyper_id()
result = self.controller.uptime(req, hyper_id)
expected_dict = copy.deepcopy(self.INDEX_HYPER_DICTS[0])
expected_dict.update({'uptime': "fake uptime"})
self.assertEqual(dict(hypervisor=expected_dict), result)
self.assertEqual(1, mock_get_uptime.call_count)
def test_uptime_non_integer_id(self):
req = self._get_request(True)
self.assertRaises(exc.HTTPNotFound, self.controller.uptime, req, 'abc')
def test_uptime_non_admin(self):
req = self._get_request(False)
self.assertRaises(exception.PolicyNotAuthorized,
self.controller.uptime, req,
self.TEST_HYPERS_OBJ[0].id)
def test_uptime_hypervisor_down(self):
with mock.patch.object(self.controller.host_api, 'get_host_uptime',
side_effect=exception.ComputeServiceUnavailable(host='dummy')
) as mock_get_uptime:
req = self._get_request(True)
hyper_id = self._get_hyper_id()
self.assertRaises(exc.HTTPBadRequest,
self.controller.uptime, req, hyper_id)
mock_get_uptime.assert_called_once_with(
mock.ANY, self.TEST_HYPERS_OBJ[0].host)
def test_uptime_hypervisor_not_mapped_service_get(self):
@mock.patch.object(self.controller.host_api, 'compute_node_get')
@mock.patch.object(self.controller.host_api, 'get_host_uptime')
@mock.patch.object(self.controller.host_api,
'service_get_by_compute_host',
side_effect=exception.HostMappingNotFound(
name='dummy'))
def _test(mock_get, _, __):
req = self._get_request(True)
hyper_id = self._get_hyper_id()
self.assertRaises(exc.HTTPNotFound,
self.controller.uptime, req, hyper_id)
self.assertTrue(mock_get.called)
_test()
def test_uptime_hypervisor_not_mapped(self):
with mock.patch.object(self.controller.host_api, 'get_host_uptime',
side_effect=exception.HostMappingNotFound(name='dummy')
) as mock_get_uptime:
req = self._get_request(True)
hyper_id = self._get_hyper_id()
self.assertRaises(exc.HTTPNotFound,
self.controller.uptime, req, hyper_id)
mock_get_uptime.assert_called_once_with(
mock.ANY, self.TEST_HYPERS_OBJ[0].host)
def test_search(self):
req = self._get_request(True)
result = self.controller.search(req, 'hyper')
self.assertEqual(dict(hypervisors=self.INDEX_HYPER_DICTS), result)
def test_search_non_admin(self):
req = self._get_request(False)
self.assertRaises(exception.PolicyNotAuthorized,
self.controller.search, req,
self.TEST_HYPERS_OBJ[0].id)
def test_search_non_exist(self):
with mock.patch.object(self.controller.host_api,
'compute_node_search_by_hypervisor',
return_value=[]) as mock_node_search:
req = self._get_request(True)
self.assertRaises(exc.HTTPNotFound, self.controller.search,
req, 'a')
self.assertEqual(1, mock_node_search.call_count)
def test_search_unmapped(self):
@mock.patch.object(self.controller.host_api,
'compute_node_search_by_hypervisor')
@mock.patch.object(self.controller.host_api,
'service_get_by_compute_host')
def _test(mock_service, mock_search):
mock_search.return_value = [mock.MagicMock()]
mock_service.side_effect = exception.HostMappingNotFound(
name='foo')
req = self._get_request(True)
self.assertRaises(exc.HTTPNotFound, self.controller.search,
req, 'a')
self.assertTrue(mock_service.called)
_test()
@mock.patch.object(objects.InstanceList, 'get_by_host',
side_effect=fake_instance_get_all_by_host)
def test_servers(self, mock_get):
req = self._get_request(True)
result = self.controller.servers(req, 'hyper')
expected_dict = copy.deepcopy(self.INDEX_HYPER_DICTS)
expected_dict[0].update({'servers': [
dict(uuid=uuids.instance_1),
dict(uuid=uuids.instance_3)]})
expected_dict[1].update({'servers': [
dict(uuid=uuids.instance_2),
dict(uuid=uuids.instance_4)]})
for output in result['hypervisors']:
servers = output['servers']
for server in servers:
del server['name']
self.assertEqual(dict(hypervisors=expected_dict), result)
def test_servers_not_mapped(self):
req = self._get_request(True)
with mock.patch.object(self.controller.host_api,
'instance_get_all_by_host') as m:
m.side_effect = exception.HostMappingNotFound(name='something')
self.assertRaises(exc.HTTPNotFound,
self.controller.servers, req, 'hyper')
def test_servers_non_id(self):
with mock.patch.object(self.controller.host_api,
'compute_node_search_by_hypervisor',
return_value=[]) as mock_node_search:
req = self._get_request(True)
self.assertRaises(exc.HTTPNotFound,
self.controller.servers,
req, '115')
self.assertEqual(1, mock_node_search.call_count)
def test_servers_non_admin(self):
req = self._get_request(False)
self.assertRaises(exception.PolicyNotAuthorized,
self.controller.servers, req,
self.TEST_HYPERS_OBJ[0].id)
def test_servers_with_non_integer_hypervisor_id(self):
with mock.patch.object(self.controller.host_api,
'compute_node_search_by_hypervisor',
return_value=[]) as mock_node_search:
req = self._get_request(True)
self.assertRaises(exc.HTTPNotFound,
self.controller.servers, req, 'abc')
self.assertEqual(1, mock_node_search.call_count)
def test_servers_with_no_server(self):
with mock.patch.object(self.controller.host_api,
'instance_get_all_by_host',
return_value=[]) as mock_inst_get_all:
req = self._get_request(True)
result = self.controller.servers(req, self.TEST_HYPERS_OBJ[0].id)
self.assertEqual(dict(hypervisors=self.INDEX_HYPER_DICTS), result)
self.assertTrue(mock_inst_get_all.called)
def test_statistics(self):
req = self._get_request(True)
result = self.controller.statistics(req)
self.assertEqual(dict(hypervisor_statistics=dict(
count=2,
vcpus=8,
memory_mb=20 * 1024,
local_gb=500,
vcpus_used=4,
memory_mb_used=10 * 1024,
local_gb_used=250,
free_ram_mb=10 * 1024,
free_disk_gb=250,
current_workload=4,
running_vms=4,
disk_available_least=200)), result)
def test_statistics_non_admin(self):
req = self._get_request(False)
self.assertRaises(exception.PolicyNotAuthorized,
self.controller.statistics, req)
class HypervisorsTestV228(HypervisorsTestV21):
api_version = '2.28'
DETAIL_HYPERS_DICTS = copy.deepcopy(HypervisorsTestV21.DETAIL_HYPERS_DICTS)
DETAIL_HYPERS_DICTS[0]['cpu_info'] = jsonutils.loads(CPU_INFO)
DETAIL_HYPERS_DICTS[1]['cpu_info'] = jsonutils.loads(CPU_INFO)
DETAIL_NULL_CPUINFO_DICT = {'': {}, None: {}}
class HypervisorsTestV233(HypervisorsTestV228):
api_version = '2.33'
def test_index_pagination(self):
req = self._get_request(True,
'/v2/1234/os-hypervisors?limit=1&marker=1')
result = self.controller.index(req)
expected = {
'hypervisors': [
{'hypervisor_hostname': 'hyper2',
'id': 2,
'state': 'up',
'status': 'enabled'}
],
'hypervisors_links': [
{'href': 'http://localhost/v2/hypervisors?limit=1&marker=2',
'rel': 'next'}
]
}
self.assertEqual(expected, result)
def test_index_pagination_with_invalid_marker(self):
req = self._get_request(True,
'/v2/1234/os-hypervisors?marker=99999')
self.assertRaises(exc.HTTPBadRequest,
self.controller.index, req)
def test_index_pagination_with_invalid_non_int_limit(self):
req = self._get_request(True,
'/v2/1234/os-hypervisors?limit=-9')
self.assertRaises(exception.ValidationError,
self.controller.index, req)
def test_index_pagination_with_invalid_string_limit(self):
req = self._get_request(True,
'/v2/1234/os-hypervisors?limit=abc')
self.assertRaises(exception.ValidationError,
self.controller.index, req)
def test_index_duplicate_query_parameters_with_invalid_string_limit(self):
req = self._get_request(
True,
'/v2/1234/os-hypervisors/?limit=1&limit=abc')
self.assertRaises(exception.ValidationError,
self.controller.index, req)
def test_index_duplicate_query_parameters_validation(self):
expected = [{
'hypervisor_hostname': 'hyper2',
'id': 2,
'state': 'up',
'status': 'enabled'}
]
params = {
'limit': 1,
'marker': 1,
}
for param, value in params.items():
req = self._get_request(
use_admin_context=True,
url='/os-hypervisors?marker=1&%s=%s&%s=%s' %
(param, value, param, value))
result = self.controller.index(req)
self.assertEqual(expected, result['hypervisors'])
def test_index_pagination_with_additional_filter(self):
expected = {
'hypervisors': [
{'hypervisor_hostname': 'hyper2',
'id': 2,
'state': 'up',
'status': 'enabled'}
],
'hypervisors_links': [
{'href': 'http://localhost/v2/hypervisors?limit=1&marker=2',
'rel': 'next'}
]
}
req = self._get_request(
True, '/v2/1234/os-hypervisors?limit=1&marker=1&additional=3')
result = self.controller.index(req)
self.assertEqual(expected, result)
def test_detail_pagination(self):
req = self._get_request(
True, '/v2/1234/os-hypervisors/detail?limit=1&marker=1')
result = self.controller.detail(req)
link = 'http://localhost/v2/hypervisors/detail?limit=1&marker=2'
expected = {
'hypervisors': [
{'cpu_info': {'arch': 'x86_64',
'features': [],
'model': '',
'topology': {'cores': 1,
'sockets': 1,
'threads': 1},
'vendor': 'fake'},
'current_workload': 2,
'disk_available_least': 100,
'free_disk_gb': 125,
'free_ram_mb': 5120,
'host_ip': netaddr.IPAddress('2.2.2.2'),
'hypervisor_hostname': 'hyper2',
'hypervisor_type': 'xen',
'hypervisor_version': 3,
'id': 2,
'local_gb': 250,
'local_gb_used': 125,
'memory_mb': 10240,
'memory_mb_used': 5120,
'running_vms': 2,
'service': {'disabled_reason': None,
'host': 'compute2',
'id': 2},
'state': 'up',
'status': 'enabled',
'vcpus': 4,
'vcpus_used': 2}
],
'hypervisors_links': [{'href': link, 'rel': 'next'}]
}
self.assertEqual(expected, result)
def test_detail_pagination_with_invalid_marker(self):
req = self._get_request(True,
'/v2/1234/os-hypervisors/detail?marker=99999')
self.assertRaises(exc.HTTPBadRequest,
self.controller.detail, req)
def test_detail_pagination_with_invalid_string_limit(self):
req = self._get_request(True,
'/v2/1234/os-hypervisors/detail?limit=abc')
self.assertRaises(exception.ValidationError,
self.controller.detail, req)
def test_detail_duplicate_query_parameters_with_invalid_string_limit(self):
req = self._get_request(
True,
'/v2/1234/os-hypervisors/detail?limit=1&limit=abc')
self.assertRaises(exception.ValidationError,
self.controller.detail, req)
def test_detail_duplicate_query_parameters_validation(self):
expected = [
{'cpu_info': {'arch': 'x86_64',
'features': [],
'model': '',
'topology': {'cores': 1,
'sockets': 1,
'threads': 1},
'vendor': 'fake'},
'current_workload': 2,
'disk_available_least': 100,
'free_disk_gb': 125,
'free_ram_mb': 5120,
'host_ip': netaddr.IPAddress('2.2.2.2'),
'hypervisor_hostname': 'hyper2',
'hypervisor_type': 'xen',
'hypervisor_version': 3,
'id': 2,
'local_gb': 250,
'local_gb_used': 125,
'memory_mb': 10240,
'memory_mb_used': 5120,
'running_vms': 2,
'service': {'disabled_reason': None,
'host': 'compute2',
'id': 2},
'state': 'up',
'status': 'enabled',
'vcpus': 4,
'vcpus_used': 2}
]
params = {
'limit': 1,
'marker': 1,
}
for param, value in params.items():
req = self._get_request(
use_admin_context=True,
url='/os-hypervisors/detail?marker=1&%s=%s&%s=%s' %
(param, value, param, value))
result = self.controller.detail(req)
self.assertEqual(expected, result['hypervisors'])
def test_detail_pagination_with_additional_filter(self):
link = 'http://localhost/v2/hypervisors/detail?limit=1&marker=2'
expected = {
'hypervisors': [
{'cpu_info': {'arch': 'x86_64',
'features': [],
'model': '',
'topology': {'cores': 1,
'sockets': 1,
'threads': 1},
'vendor': 'fake'},
'current_workload': 2,
'disk_available_least': 100,
'free_disk_gb': 125,
'free_ram_mb': 5120,
'host_ip': netaddr.IPAddress('2.2.2.2'),
'hypervisor_hostname': 'hyper2',
'hypervisor_type': 'xen',
'hypervisor_version': 3,
'id': 2,
'local_gb': 250,
'local_gb_used': 125,
'memory_mb': 10240,
'memory_mb_used': 5120,
'running_vms': 2,
'service': {'disabled_reason': None,
'host': 'compute2',
'id': 2},
'state': 'up',
'status': 'enabled',
'vcpus': 4,
'vcpus_used': 2}
],
'hypervisors_links': [{
'href': link,
'rel': 'next'}]
}
req = self._get_request(
True, '/v2/1234/os-hypervisors/detail?limit=1&marker=1&unknown=2')
result = self.controller.detail(req)
self.assertEqual(expected, result)
class HypervisorsTestV252(HypervisorsTestV233):
"""This is a boundary test to make sure 2.52 works like 2.33."""
api_version = '2.52'
class HypervisorsTestV253(HypervisorsTestV252):
api_version = hypervisors_v21.UUID_FOR_ID_MIN_VERSION
expect_uuid_for_id = True
# This is an expected response for index().
INDEX_HYPER_DICTS = [
dict(id=uuids.hyper1, hypervisor_hostname="hyper1",
state='up', status='enabled'),
dict(id=uuids.hyper2, hypervisor_hostname="hyper2",
state='up', status='enabled')]
def setUp(self):
super(HypervisorsTestV253, self).setUp()
# This is an expected response for detail().
for index, detail_hyper_dict in enumerate(self.DETAIL_HYPERS_DICTS):
detail_hyper_dict['id'] = TEST_HYPERS[index]['uuid']
detail_hyper_dict['service']['id'] = TEST_SERVICES[index].uuid
def test_servers(self):
"""Asserts that calling the servers route after 2.52 fails."""
self.assertRaises(exception.VersionNotFoundForAPIMethod,
self.controller.servers,
self._get_request(True), 'hyper')
def test_servers_with_no_server(self):
"""Tests GET /os-hypervisors?with_servers=1 when there are no
instances on the given host.
"""
with mock.patch.object(self.controller.host_api,
'instance_get_all_by_host',
return_value=[]) as mock_inst_get_all:
req = self._get_request(use_admin_context=True,
url='/os-hypervisors?with_servers=1')
result = self.controller.index(req)
self.assertEqual(dict(hypervisors=self.INDEX_HYPER_DICTS), result)
# instance_get_all_by_host is called for each hypervisor
self.assertEqual(2, mock_inst_get_all.call_count)
mock_inst_get_all.assert_has_calls((
mock.call(req.environ['nova.context'], TEST_HYPERS_OBJ[0].host),
mock.call(req.environ['nova.context'], TEST_HYPERS_OBJ[1].host)))
def test_servers_not_mapped(self):
"""Tests that instance_get_all_by_host fails with HostMappingNotFound.
"""
req = self._get_request(use_admin_context=True,
url='/os-hypervisors?with_servers=1')
with mock.patch.object(
self.controller.host_api, 'instance_get_all_by_host',
side_effect=exception.HostMappingNotFound(name='something')):
result = self.controller.index(req)
self.assertEqual(dict(hypervisors=[]), result)
def test_list_with_servers(self):
"""Tests GET /os-hypervisors?with_servers=True"""
instances = [
objects.InstanceList(objects=[objects.Instance(
id=1, uuid=uuids.hyper1_instance1)]),
objects.InstanceList(objects=[objects.Instance(
id=2, uuid=uuids.hyper2_instance1)])]
with mock.patch.object(self.controller.host_api,
'instance_get_all_by_host',
side_effect=instances) as mock_inst_get_all:
req = self._get_request(use_admin_context=True,
url='/os-hypervisors?with_servers=True')
result = self.controller.index(req)
index_with_servers = copy.deepcopy(self.INDEX_HYPER_DICTS)
index_with_servers[0]['servers'] = [
{'name': 'instance-00000001', 'uuid': uuids.hyper1_instance1}]
index_with_servers[1]['servers'] = [
{'name': 'instance-00000002', 'uuid': uuids.hyper2_instance1}]
self.assertEqual(dict(hypervisors=index_with_servers), result)
# instance_get_all_by_host is called for each hypervisor
self.assertEqual(2, mock_inst_get_all.call_count)
mock_inst_get_all.assert_has_calls((
mock.call(req.environ['nova.context'], TEST_HYPERS_OBJ[0].host),
mock.call(req.environ['nova.context'], TEST_HYPERS_OBJ[1].host)))
def test_list_with_servers_invalid_parameter(self):
"""Tests using an invalid with_servers query parameter."""
req = self._get_request(use_admin_context=True,
url='/os-hypervisors?with_servers=invalid')
self.assertRaises(
exception.ValidationError, self.controller.index, req)
def test_list_with_hostname_pattern_and_paging_parameters(self):
"""This is a negative test to validate that trying to list hypervisors
with a hostname pattern and paging parameters results in a 400 error.
"""
req = self._get_request(
use_admin_context=True,
url='/os-hypervisors?hypervisor_hostname_pattern=foo&'
'limit=1&marker=%s' % uuids.marker)
ex = self.assertRaises(exc.HTTPBadRequest, self.controller.index, req)
self.assertIn('Paging over hypervisors with the '
'hypervisor_hostname_pattern query parameter is not '
'supported.', six.text_type(ex))
def test_servers_with_non_integer_hypervisor_id(self):
"""This is a poorly named test, it's really checking the 404 case where
there is no match for the hostname pattern.
"""
req = self._get_request(
use_admin_context=True,
url='/os-hypervisors?with_servers=yes&'
'hypervisor_hostname_pattern=shenzhen')
with mock.patch.object(self.controller.host_api,
'compute_node_search_by_hypervisor',
return_value=objects.ComputeNodeList()) as s:
self.assertRaises(exc.HTTPNotFound, self.controller.index, req)
s.assert_called_once_with(req.environ['nova.context'], 'shenzhen')
def test_servers_non_admin(self):
"""There is no reason to test this for 2.53 since the
/os-hypervisors/servers route is deprecated.
"""
pass
def test_servers_non_id(self):
"""There is no reason to test this for 2.53 since the
/os-hypervisors/servers route is deprecated.
"""
pass
def test_search_old_route(self):
"""Asserts that calling the search route after 2.52 fails."""
self.assertRaises(exception.VersionNotFoundForAPIMethod,
self.controller.search,
self._get_request(True), 'hyper')
def test_search(self):
"""Test listing hypervisors with details and using the
hypervisor_hostname_pattern query string.
"""
req = self._get_request(
use_admin_context=True,
url='/os-hypervisors?hypervisor_hostname_pattern=shenzhen')
with mock.patch.object(self.controller.host_api,
'compute_node_search_by_hypervisor',
return_value=objects.ComputeNodeList(
objects=[TEST_HYPERS_OBJ[0]])) as s:
result = self.controller.detail(req)
s.assert_called_once_with(req.environ['nova.context'], 'shenzhen')
expected = {
'hypervisors': [
{'cpu_info': {'arch': 'x86_64',
'features': [],
'model': '',
'topology': {'cores': 1,
'sockets': 1,
'threads': 1},
'vendor': 'fake'},
'current_workload': 2,
'disk_available_least': 100,
'free_disk_gb': 125,
'free_ram_mb': 5120,
'host_ip': netaddr.IPAddress('1.1.1.1'),
'hypervisor_hostname': 'hyper1',
'hypervisor_type': 'xen',
'hypervisor_version': 3,
'id': TEST_HYPERS_OBJ[0].uuid,
'local_gb': 250,
'local_gb_used': 125,
'memory_mb': 10240,
'memory_mb_used': 5120,
'running_vms': 2,
'service': {'disabled_reason': None,
'host': 'compute1',
'id': TEST_SERVICES[0].uuid},
'state': 'up',
'status': 'enabled',
'vcpus': 4,
'vcpus_used': 2}
]
}
# There are no links when using the hypervisor_hostname_pattern
# query string since we can't page using a pattern matcher.
self.assertNotIn('hypervisors_links', result)
self.assertDictEqual(expected, result)
def test_search_invalid_hostname_pattern_parameter(self):
"""Tests passing an invalid hypervisor_hostname_pattern query
parameter.
"""
req = self._get_request(
use_admin_context=True,
url='/os-hypervisors?hypervisor_hostname_pattern=invalid~host')
self.assertRaises(
exception.ValidationError, self.controller.detail, req)
def test_search_non_exist(self):
"""This is a duplicate of test_servers_with_non_integer_hypervisor_id.
"""
pass
def test_search_non_admin(self):
"""There is no reason to test this for 2.53 since the
/os-hypervisors/search route is deprecated.
"""
pass
def test_search_unmapped(self):
"""This is already tested with test_index_compute_host_not_mapped."""
pass
def test_show_non_integer_id(self):
"""There is no reason to test this for 2.53 since 2.53 requires a
non-integer id (requires a uuid).
"""
pass
def test_show_integer_id(self):
"""Tests that we get a 400 if passed a hypervisor integer id to show().
"""
req = self._get_request(True)
ex = self.assertRaises(exc.HTTPBadRequest,
self.controller.show, req, '1')
self.assertIn('Invalid uuid 1', six.text_type(ex))
def test_show_with_servers_invalid_parameter(self):
"""Tests passing an invalid value for the with_servers query parameter
to the show() method to make sure the query parameter is validated.
"""
hyper_id = self._get_hyper_id()
req = self._get_request(
use_admin_context=True,
url='/os-hypervisors/%s?with_servers=invalid' % hyper_id)
ex = self.assertRaises(
exception.ValidationError, self.controller.show, req, hyper_id)
self.assertIn('with_servers', six.text_type(ex))
def test_show_with_servers_host_mapping_not_found(self):
"""Tests that a 404 is returned if instance_get_all_by_host raises
HostMappingNotFound.
"""
hyper_id = self._get_hyper_id()
req = self._get_request(
use_admin_context=True,
url='/os-hypervisors/%s?with_servers=true' % hyper_id)
with mock.patch.object(
self.controller.host_api, 'instance_get_all_by_host',
side_effect=exception.HostMappingNotFound(name=hyper_id)):
self.assertRaises(exc.HTTPNotFound, self.controller.show,
req, hyper_id)
def test_show_with_servers(self):
"""Tests the show() result when servers are included in the output."""
instances = objects.InstanceList(objects=[objects.Instance(
id=1, uuid=uuids.hyper1_instance1)])
hyper_id = self._get_hyper_id()
req = self._get_request(
use_admin_context=True,
url='/os-hypervisors/%s?with_servers=on' % hyper_id)
with mock.patch.object(self.controller.host_api,
'instance_get_all_by_host',
return_value=instances) as mock_inst_get_all:
result = self.controller.show(req, hyper_id)
show_with_servers = copy.deepcopy(self.DETAIL_HYPERS_DICTS[0])
show_with_servers['servers'] = [
{'name': 'instance-00000001', 'uuid': uuids.hyper1_instance1}]
self.assertDictEqual(dict(hypervisor=show_with_servers), result)
# instance_get_all_by_host is called
mock_inst_get_all.assert_called_once_with(
req.environ['nova.context'], TEST_HYPERS_OBJ[0].host)
def test_uptime_non_integer_id(self):
"""There is no reason to test this for 2.53 since 2.53 requires a
non-integer id (requires a uuid).
"""
pass
def test_uptime_integer_id(self):
"""Tests that we get a 400 if passed a hypervisor integer id to
uptime().
"""
req = self._get_request(True)
ex = self.assertRaises(exc.HTTPBadRequest,
self.controller.uptime, req, '1')
self.assertIn('Invalid uuid 1', six.text_type(ex))
def test_detail_pagination(self):
"""Tests details paging with uuid markers."""
req = self._get_request(
use_admin_context=True,
url='/os-hypervisors/detail?limit=1&marker=%s' %
TEST_HYPERS_OBJ[0].uuid)
result = self.controller.detail(req)
link = ('http://localhost/v2/hypervisors/detail?limit=1&marker=%s' %
TEST_HYPERS_OBJ[1].uuid)
expected = {
'hypervisors': [
{'cpu_info': {'arch': 'x86_64',
'features': [],
'model': '',
'topology': {'cores': 1,
'sockets': 1,
'threads': 1},
'vendor': 'fake'},
'current_workload': 2,
'disk_available_least': 100,
'free_disk_gb': 125,
'free_ram_mb': 5120,
'host_ip': netaddr.IPAddress('2.2.2.2'),
'hypervisor_hostname': 'hyper2',
'hypervisor_type': 'xen',
'hypervisor_version': 3,
'id': TEST_HYPERS_OBJ[1].uuid,
'local_gb': 250,
'local_gb_used': 125,
'memory_mb': 10240,
'memory_mb_used': 5120,
'running_vms': 2,
'service': {'disabled_reason': None,
'host': 'compute2',
'id': TEST_SERVICES[1].uuid},
'state': 'up',
'status': 'enabled',
'vcpus': 4,
'vcpus_used': 2}
],
'hypervisors_links': [{'href': link, 'rel': 'next'}]
}
self.assertEqual(expected, result)
def test_detail_pagination_with_invalid_marker(self):
"""Tests detail paging with an invalid marker (not found)."""
req = self._get_request(
use_admin_context=True,
url='/os-hypervisors/detail?marker=%s' % uuids.invalid_marker)
self.assertRaises(exc.HTTPBadRequest,
self.controller.detail, req)
def test_detail_pagination_with_additional_filter(self):
req = self._get_request(
True, '/v2/1234/os-hypervisors/detail?limit=1&marker=9&unknown=2')
self.assertRaises(exception.ValidationError,
self.controller.detail, req)
def test_detail_duplicate_query_parameters_validation(self):
"""Tests that the list Detail query parameter schema enforces only a
single entry for any query parameter.
"""
params = {
'limit': 1,
'marker': uuids.marker,
'hypervisor_hostname_pattern': 'foo',
'with_servers': 'true'
}
for param, value in params.items():
req = self._get_request(
use_admin_context=True,
url='/os-hypervisors/detail?%s=%s&%s=%s' %
(param, value, param, value))
self.assertRaises(exception.ValidationError,
self.controller.detail, req)
def test_index_pagination(self):
"""Tests index paging with uuid markers."""
req = self._get_request(
use_admin_context=True,
url='/os-hypervisors?limit=1&marker=%s' %
TEST_HYPERS_OBJ[0].uuid)
result = self.controller.index(req)
link = ('http://localhost/v2/hypervisors?limit=1&marker=%s' %
TEST_HYPERS_OBJ[1].uuid)
expected = {
'hypervisors': [{
'hypervisor_hostname': 'hyper2',
'id': TEST_HYPERS_OBJ[1].uuid,
'state': 'up',
'status': 'enabled'
}],
'hypervisors_links': [{'href': link, 'rel': 'next'}]
}
self.assertEqual(expected, result)
def test_index_pagination_with_invalid_marker(self):
"""Tests index paging with an invalid marker (not found)."""
req = self._get_request(
use_admin_context=True,
url='/os-hypervisors?marker=%s' % uuids.invalid_marker)
self.assertRaises(exc.HTTPBadRequest,
self.controller.index, req)
def test_index_pagination_with_additional_filter(self):
req = self._get_request(
True, '/v2/1234/os-hypervisors/?limit=1&marker=9&unknown=2')
self.assertRaises(exception.ValidationError,
self.controller.index, req)
def test_index_duplicate_query_parameters_validation(self):
"""Tests that the list query parameter schema enforces only a single
entry for any query parameter.
"""
params = {
'limit': 1,
'marker': uuids.marker,
'hypervisor_hostname_pattern': 'foo',
'with_servers': 'true'
}
for param, value in params.items():
req = self._get_request(
use_admin_context=True,
url='/os-hypervisors?%s=%s&%s=%s' %
(param, value, param, value))
self.assertRaises(exception.ValidationError,
self.controller.index, req)
def test_show_duplicate_query_parameters_validation(self):
"""Tests that the show query parameter schema enforces only a single
entry for any query parameter.
"""
req = self._get_request(
use_admin_context=True,
url='/os-hypervisors/%s?with_servers=1&with_servers=1' %
uuids.hyper1)
self.assertRaises(exception.ValidationError,
self.controller.show, req, uuids.hyper1)
| 41.231655 | 79 | 0.570858 |
import copy
import mock
import netaddr
from oslo_serialization import jsonutils
from oslo_utils.fixture import uuidsentinel as uuids
import six
from webob import exc
from nova.api.openstack.compute import hypervisors \
as hypervisors_v21
from nova import exception
from nova import objects
from nova import test
from nova.tests.unit.api.openstack import fakes
from nova.tests.unit import fake_instance
CPU_INFO = """
{"arch": "x86_64",
"vendor": "fake",
"topology": {"cores": 1, "threads": 1, "sockets": 1},
"features": [],
"model": ""}"""
TEST_HYPERS = [
dict(id=1,
uuid=uuids.hyper1,
service_id=1,
host="compute1",
vcpus=4,
memory_mb=10 * 1024,
local_gb=250,
vcpus_used=2,
memory_mb_used=5 * 1024,
local_gb_used=125,
hypervisor_type="xen",
hypervisor_version=3,
hypervisor_hostname="hyper1",
free_ram_mb=5 * 1024,
free_disk_gb=125,
current_workload=2,
running_vms=2,
cpu_info=CPU_INFO,
disk_available_least=100,
host_ip=netaddr.IPAddress('1.1.1.1')),
dict(id=2,
uuid=uuids.hyper2,
service_id=2,
host="compute2",
vcpus=4,
memory_mb=10 * 1024,
local_gb=250,
vcpus_used=2,
memory_mb_used=5 * 1024,
local_gb_used=125,
hypervisor_type="xen",
hypervisor_version=3,
hypervisor_hostname="hyper2",
free_ram_mb=5 * 1024,
free_disk_gb=125,
current_workload=2,
running_vms=2,
cpu_info=CPU_INFO,
disk_available_least=100,
host_ip=netaddr.IPAddress('2.2.2.2'))]
TEST_SERVICES = [
objects.Service(id=1,
uuid=uuids.service1,
host="compute1",
binary="nova-compute",
topic="compute_topic",
report_count=5,
disabled=False,
disabled_reason=None,
availability_zone="nova"),
objects.Service(id=2,
uuid=uuids.service2,
host="compute2",
binary="nova-compute",
topic="compute_topic",
report_count=5,
disabled=False,
disabled_reason=None,
availability_zone="nova"),
]
TEST_HYPERS_OBJ = [objects.ComputeNode(**hyper_dct)
for hyper_dct in TEST_HYPERS]
TEST_HYPERS[0].update({'service': TEST_SERVICES[0]})
TEST_HYPERS[1].update({'service': TEST_SERVICES[1]})
TEST_SERVERS = [dict(name="inst1", uuid=uuids.instance_1, host="compute1"),
dict(name="inst2", uuid=uuids.instance_2, host="compute2"),
dict(name="inst3", uuid=uuids.instance_3, host="compute1"),
dict(name="inst4", uuid=uuids.instance_4, host="compute2")]
def fake_compute_node_get_all(context, limit=None, marker=None):
if marker in ['99999', uuids.invalid_marker]:
raise exception.MarkerNotFound(marker)
marker_found = True if marker is None else False
output = []
for hyper in TEST_HYPERS_OBJ:
if not marker_found and marker in (str(hyper.id), hyper.uuid):
marker_found = True
elif marker_found:
if limit is None or len(output) < int(limit):
output.append(hyper)
return output
def fake_compute_node_search_by_hypervisor(context, hypervisor_re):
return TEST_HYPERS_OBJ
def fake_compute_node_get(context, compute_id):
for hyper in TEST_HYPERS_OBJ:
if hyper.uuid == compute_id or hyper.id == int(compute_id):
return hyper
raise exception.ComputeHostNotFound(host=compute_id)
def fake_service_get_by_compute_host(context, host):
for service in TEST_SERVICES:
if service.host == host:
return service
def fake_compute_node_statistics(context):
result = dict(
count=0,
vcpus=0,
memory_mb=0,
local_gb=0,
vcpus_used=0,
memory_mb_used=0,
local_gb_used=0,
free_ram_mb=0,
free_disk_gb=0,
current_workload=0,
running_vms=0,
disk_available_least=0,
)
for hyper in TEST_HYPERS_OBJ:
for key in result:
if key == 'count':
result[key] += 1
else:
result[key] += getattr(hyper, key)
return result
def fake_instance_get_all_by_host(context, host):
results = []
for inst in TEST_SERVERS:
if inst['host'] == host:
inst_obj = fake_instance.fake_instance_obj(context, **inst)
results.append(inst_obj)
return results
class HypervisorsTestV21(test.NoDBTestCase):
api_version = '2.1'
expect_uuid_for_id = False
TEST_HYPERS_OBJ = copy.deepcopy(TEST_HYPERS_OBJ)
TEST_SERVICES = copy.deepcopy(TEST_SERVICES)
TEST_SERVERS = copy.deepcopy(TEST_SERVERS)
DETAIL_HYPERS_DICTS = copy.deepcopy(TEST_HYPERS)
del DETAIL_HYPERS_DICTS[0]['service_id']
del DETAIL_HYPERS_DICTS[1]['service_id']
del DETAIL_HYPERS_DICTS[0]['host']
del DETAIL_HYPERS_DICTS[1]['host']
del DETAIL_HYPERS_DICTS[0]['uuid']
del DETAIL_HYPERS_DICTS[1]['uuid']
DETAIL_HYPERS_DICTS[0].update({'state': 'up',
'status': 'enabled',
'service': dict(id=1, host='compute1',
disabled_reason=None)})
DETAIL_HYPERS_DICTS[1].update({'state': 'up',
'status': 'enabled',
'service': dict(id=2, host='compute2',
disabled_reason=None)})
INDEX_HYPER_DICTS = [
dict(id=1, hypervisor_hostname="hyper1",
state='up', status='enabled'),
dict(id=2, hypervisor_hostname="hyper2",
state='up', status='enabled')]
DETAIL_NULL_CPUINFO_DICT = {'': '', None: None}
def _get_request(self, use_admin_context, url=''):
return fakes.HTTPRequest.blank(url,
use_admin_context=use_admin_context,
version=self.api_version)
def _set_up_controller(self):
self.controller = hypervisors_v21.HypervisorsController()
self.controller.servicegroup_api.service_is_up = mock.MagicMock(
return_value=True)
def _get_hyper_id(self):
return (self.TEST_HYPERS_OBJ[0].uuid if self.expect_uuid_for_id
else self.TEST_HYPERS_OBJ[0].id)
def setUp(self):
super(HypervisorsTestV21, self).setUp()
self._set_up_controller()
self.rule_hyp_show = "os_compute_api:os-hypervisors"
host_api = self.controller.host_api
host_api.compute_node_get_all = mock.MagicMock(
side_effect=fake_compute_node_get_all)
host_api.service_get_by_compute_host = mock.MagicMock(
side_effect=fake_service_get_by_compute_host)
host_api.compute_node_search_by_hypervisor = mock.MagicMock(
side_effect=fake_compute_node_search_by_hypervisor)
host_api.compute_node_get = mock.MagicMock(
side_effect=fake_compute_node_get)
self.stub_out('nova.db.api.compute_node_statistics',
fake_compute_node_statistics)
def test_view_hypervisor_nodetail_noservers(self):
req = self._get_request(True)
result = self.controller._view_hypervisor(
self.TEST_HYPERS_OBJ[0], self.TEST_SERVICES[0], False, req)
self.assertEqual(self.INDEX_HYPER_DICTS[0], result)
def test_view_hypervisor_detail_noservers(self):
req = self._get_request(True)
result = self.controller._view_hypervisor(
self.TEST_HYPERS_OBJ[0], self.TEST_SERVICES[0], True, req)
self.assertEqual(self.DETAIL_HYPERS_DICTS[0], result)
def test_view_hypervisor_servers(self):
req = self._get_request(True)
result = self.controller._view_hypervisor(self.TEST_HYPERS_OBJ[0],
self.TEST_SERVICES[0],
False, req,
self.TEST_SERVERS)
expected_dict = copy.deepcopy(self.INDEX_HYPER_DICTS[0])
expected_dict.update({'servers': [
dict(name="inst1", uuid=uuids.instance_1),
dict(name="inst2", uuid=uuids.instance_2),
dict(name="inst3", uuid=uuids.instance_3),
dict(name="inst4", uuid=uuids.instance_4)]})
self.assertEqual(expected_dict, result)
def _test_view_hypervisor_detail_cpuinfo_null(self, cpu_info):
req = self._get_request(True)
test_hypervisor_obj = copy.deepcopy(self.TEST_HYPERS_OBJ[0])
test_hypervisor_obj.cpu_info = cpu_info
result = self.controller._view_hypervisor(test_hypervisor_obj,
self.TEST_SERVICES[0],
True, req)
expected_dict = copy.deepcopy(self.DETAIL_HYPERS_DICTS[0])
expected_dict.update({'cpu_info':
self.DETAIL_NULL_CPUINFO_DICT[cpu_info]})
self.assertEqual(result, expected_dict)
def test_view_hypervisor_detail_cpuinfo_empty_string(self):
self._test_view_hypervisor_detail_cpuinfo_null('')
def test_view_hypervisor_detail_cpuinfo_none(self):
self._test_view_hypervisor_detail_cpuinfo_null(None)
def test_index(self):
req = self._get_request(True)
result = self.controller.index(req)
self.assertEqual(dict(hypervisors=self.INDEX_HYPER_DICTS), result)
def test_index_non_admin(self):
req = self._get_request(False)
self.assertRaises(exception.PolicyNotAuthorized,
self.controller.index, req)
def test_index_compute_host_not_found(self):
compute_nodes = objects.ComputeNodeList(objects=[
objects.ComputeNode(**TEST_HYPERS[0]),
objects.ComputeNode(**TEST_HYPERS[1])
])
def fake_service_get_by_compute_host(context, host):
if host == TEST_HYPERS[0]['host']:
return TEST_SERVICES[0]
raise exception.ComputeHostNotFound(host=host)
@mock.patch.object(self.controller.host_api, 'compute_node_get_all',
return_value=compute_nodes)
@mock.patch.object(self.controller.host_api,
'service_get_by_compute_host',
fake_service_get_by_compute_host)
def _test(self, compute_node_get_all):
req = self._get_request(True)
result = self.controller.index(req)
self.assertEqual(1, len(result['hypervisors']))
expected = {
'id': compute_nodes[0].uuid if self.expect_uuid_for_id
else compute_nodes[0].id,
'hypervisor_hostname': compute_nodes[0].hypervisor_hostname,
'state': 'up',
'status': 'enabled',
}
self.assertDictEqual(expected, result['hypervisors'][0])
_test(self)
def test_index_compute_host_not_mapped(self):
compute_nodes = objects.ComputeNodeList(objects=[
objects.ComputeNode(**TEST_HYPERS[0]),
objects.ComputeNode(**TEST_HYPERS[1])
])
def fake_service_get_by_compute_host(context, host):
if host == TEST_HYPERS[0]['host']:
return TEST_SERVICES[0]
raise exception.HostMappingNotFound(name=host)
@mock.patch.object(self.controller.host_api, 'compute_node_get_all',
return_value=compute_nodes)
@mock.patch.object(self.controller.host_api,
'service_get_by_compute_host',
fake_service_get_by_compute_host)
def _test(self, compute_node_get_all):
req = self._get_request(True)
result = self.controller.index(req)
self.assertEqual(1, len(result['hypervisors']))
expected = {
'id': compute_nodes[0].uuid if self.expect_uuid_for_id
else compute_nodes[0].id,
'hypervisor_hostname': compute_nodes[0].hypervisor_hostname,
'state': 'up',
'status': 'enabled',
}
self.assertDictEqual(expected, result['hypervisors'][0])
_test(self)
def test_detail(self):
req = self._get_request(True)
result = self.controller.detail(req)
self.assertEqual(dict(hypervisors=self.DETAIL_HYPERS_DICTS), result)
def test_detail_non_admin(self):
req = self._get_request(False)
self.assertRaises(exception.PolicyNotAuthorized,
self.controller.detail, req)
def test_detail_compute_host_not_found(self):
compute_nodes = objects.ComputeNodeList(objects=[
objects.ComputeNode(**TEST_HYPERS[0]),
objects.ComputeNode(**TEST_HYPERS[1])
])
def fake_service_get_by_compute_host(context, host):
if host == TEST_HYPERS[0]['host']:
return TEST_SERVICES[0]
raise exception.ComputeHostNotFound(host=host)
@mock.patch.object(self.controller.host_api, 'compute_node_get_all',
return_value=compute_nodes)
@mock.patch.object(self.controller.host_api,
'service_get_by_compute_host',
fake_service_get_by_compute_host)
def _test(self, compute_node_get_all):
req = self._get_request(True)
result = self.controller.detail(req)
self.assertEqual(1, len(result['hypervisors']))
expected = {
'id': compute_nodes[0].id,
'hypervisor_hostname': compute_nodes[0].hypervisor_hostname,
'state': 'up',
'status': 'enabled',
}
# the subset we care about and there are more keys than what index
# would return
hypervisor = result['hypervisors'][0]
self.assertTrue(
set(expected.keys()).issubset(set(hypervisor.keys())))
self.assertGreater(len(hypervisor.keys()), len(expected.keys()))
self.assertEqual(compute_nodes[0].hypervisor_hostname,
hypervisor['hypervisor_hostname'])
_test(self)
def test_detail_compute_host_not_mapped(self):
# two computes, a matching service only exists for the first one
compute_nodes = objects.ComputeNodeList(objects=[
objects.ComputeNode(**TEST_HYPERS[0]),
objects.ComputeNode(**TEST_HYPERS[1])
])
def fake_service_get_by_compute_host(context, host):
if host == TEST_HYPERS[0]['host']:
return TEST_SERVICES[0]
raise exception.HostMappingNotFound(name=host)
@mock.patch.object(self.controller.host_api, 'compute_node_get_all',
return_value=compute_nodes)
@mock.patch.object(self.controller.host_api,
'service_get_by_compute_host',
fake_service_get_by_compute_host)
def _test(self, compute_node_get_all):
req = self._get_request(True)
result = self.controller.detail(req)
self.assertEqual(1, len(result['hypervisors']))
expected = {
'id': compute_nodes[0].id,
'hypervisor_hostname': compute_nodes[0].hypervisor_hostname,
'state': 'up',
'status': 'enabled',
}
# we don't care about all of the details, just make sure we get
hypervisor = result['hypervisors'][0]
self.assertTrue(
set(expected.keys()).issubset(set(hypervisor.keys())))
self.assertGreater(len(hypervisor.keys()), len(expected.keys()))
self.assertEqual(compute_nodes[0].hypervisor_hostname,
hypervisor['hypervisor_hostname'])
_test(self)
def test_show_compute_host_not_mapped(self):
@mock.patch.object(self.controller.host_api, 'compute_node_get',
return_value=self.TEST_HYPERS_OBJ[0])
@mock.patch.object(self.controller.host_api,
'service_get_by_compute_host')
def _test(self, mock_service, mock_compute_node_get):
req = self._get_request(True)
mock_service.side_effect = exception.HostMappingNotFound(
name='foo')
hyper_id = self._get_hyper_id()
self.assertRaises(exc.HTTPNotFound, self.controller.show,
req, hyper_id)
self.assertTrue(mock_service.called)
mock_compute_node_get.assert_called_once_with(mock.ANY, hyper_id)
_test(self)
def test_show_noid(self):
req = self._get_request(True)
hyperid = uuids.hyper3 if self.expect_uuid_for_id else '3'
self.assertRaises(exc.HTTPNotFound, self.controller.show, req, hyperid)
def test_show_non_integer_id(self):
req = self._get_request(True)
self.assertRaises(exc.HTTPNotFound, self.controller.show, req, 'abc')
def test_show_withid(self):
req = self._get_request(True)
hyper_id = self._get_hyper_id()
result = self.controller.show(req, hyper_id)
self.assertEqual(dict(hypervisor=self.DETAIL_HYPERS_DICTS[0]), result)
def test_show_non_admin(self):
req = self._get_request(False)
self.assertRaises(exception.PolicyNotAuthorized,
self.controller.show, req,
self._get_hyper_id())
def test_uptime_noid(self):
req = self._get_request(True)
hyper_id = uuids.hyper3 if self.expect_uuid_for_id else '3'
self.assertRaises(exc.HTTPNotFound, self.controller.uptime, req,
hyper_id)
def test_uptime_notimplemented(self):
with mock.patch.object(self.controller.host_api, 'get_host_uptime',
side_effect=exc.HTTPNotImplemented()
) as mock_get_uptime:
req = self._get_request(True)
hyper_id = self._get_hyper_id()
self.assertRaises(exc.HTTPNotImplemented,
self.controller.uptime, req, hyper_id)
self.assertEqual(1, mock_get_uptime.call_count)
def test_uptime_implemented(self):
with mock.patch.object(self.controller.host_api, 'get_host_uptime',
return_value="fake uptime"
) as mock_get_uptime:
req = self._get_request(True)
hyper_id = self._get_hyper_id()
result = self.controller.uptime(req, hyper_id)
expected_dict = copy.deepcopy(self.INDEX_HYPER_DICTS[0])
expected_dict.update({'uptime': "fake uptime"})
self.assertEqual(dict(hypervisor=expected_dict), result)
self.assertEqual(1, mock_get_uptime.call_count)
def test_uptime_non_integer_id(self):
req = self._get_request(True)
self.assertRaises(exc.HTTPNotFound, self.controller.uptime, req, 'abc')
def test_uptime_non_admin(self):
req = self._get_request(False)
self.assertRaises(exception.PolicyNotAuthorized,
self.controller.uptime, req,
self.TEST_HYPERS_OBJ[0].id)
def test_uptime_hypervisor_down(self):
with mock.patch.object(self.controller.host_api, 'get_host_uptime',
side_effect=exception.ComputeServiceUnavailable(host='dummy')
) as mock_get_uptime:
req = self._get_request(True)
hyper_id = self._get_hyper_id()
self.assertRaises(exc.HTTPBadRequest,
self.controller.uptime, req, hyper_id)
mock_get_uptime.assert_called_once_with(
mock.ANY, self.TEST_HYPERS_OBJ[0].host)
def test_uptime_hypervisor_not_mapped_service_get(self):
@mock.patch.object(self.controller.host_api, 'compute_node_get')
@mock.patch.object(self.controller.host_api, 'get_host_uptime')
@mock.patch.object(self.controller.host_api,
'service_get_by_compute_host',
side_effect=exception.HostMappingNotFound(
name='dummy'))
def _test(mock_get, _, __):
req = self._get_request(True)
hyper_id = self._get_hyper_id()
self.assertRaises(exc.HTTPNotFound,
self.controller.uptime, req, hyper_id)
self.assertTrue(mock_get.called)
_test()
def test_uptime_hypervisor_not_mapped(self):
with mock.patch.object(self.controller.host_api, 'get_host_uptime',
side_effect=exception.HostMappingNotFound(name='dummy')
) as mock_get_uptime:
req = self._get_request(True)
hyper_id = self._get_hyper_id()
self.assertRaises(exc.HTTPNotFound,
self.controller.uptime, req, hyper_id)
mock_get_uptime.assert_called_once_with(
mock.ANY, self.TEST_HYPERS_OBJ[0].host)
def test_search(self):
req = self._get_request(True)
result = self.controller.search(req, 'hyper')
self.assertEqual(dict(hypervisors=self.INDEX_HYPER_DICTS), result)
def test_search_non_admin(self):
req = self._get_request(False)
self.assertRaises(exception.PolicyNotAuthorized,
self.controller.search, req,
self.TEST_HYPERS_OBJ[0].id)
def test_search_non_exist(self):
with mock.patch.object(self.controller.host_api,
'compute_node_search_by_hypervisor',
return_value=[]) as mock_node_search:
req = self._get_request(True)
self.assertRaises(exc.HTTPNotFound, self.controller.search,
req, 'a')
self.assertEqual(1, mock_node_search.call_count)
def test_search_unmapped(self):
@mock.patch.object(self.controller.host_api,
'compute_node_search_by_hypervisor')
@mock.patch.object(self.controller.host_api,
'service_get_by_compute_host')
def _test(mock_service, mock_search):
mock_search.return_value = [mock.MagicMock()]
mock_service.side_effect = exception.HostMappingNotFound(
name='foo')
req = self._get_request(True)
self.assertRaises(exc.HTTPNotFound, self.controller.search,
req, 'a')
self.assertTrue(mock_service.called)
_test()
@mock.patch.object(objects.InstanceList, 'get_by_host',
side_effect=fake_instance_get_all_by_host)
def test_servers(self, mock_get):
req = self._get_request(True)
result = self.controller.servers(req, 'hyper')
expected_dict = copy.deepcopy(self.INDEX_HYPER_DICTS)
expected_dict[0].update({'servers': [
dict(uuid=uuids.instance_1),
dict(uuid=uuids.instance_3)]})
expected_dict[1].update({'servers': [
dict(uuid=uuids.instance_2),
dict(uuid=uuids.instance_4)]})
for output in result['hypervisors']:
servers = output['servers']
for server in servers:
del server['name']
self.assertEqual(dict(hypervisors=expected_dict), result)
def test_servers_not_mapped(self):
req = self._get_request(True)
with mock.patch.object(self.controller.host_api,
'instance_get_all_by_host') as m:
m.side_effect = exception.HostMappingNotFound(name='something')
self.assertRaises(exc.HTTPNotFound,
self.controller.servers, req, 'hyper')
def test_servers_non_id(self):
with mock.patch.object(self.controller.host_api,
'compute_node_search_by_hypervisor',
return_value=[]) as mock_node_search:
req = self._get_request(True)
self.assertRaises(exc.HTTPNotFound,
self.controller.servers,
req, '115')
self.assertEqual(1, mock_node_search.call_count)
def test_servers_non_admin(self):
req = self._get_request(False)
self.assertRaises(exception.PolicyNotAuthorized,
self.controller.servers, req,
self.TEST_HYPERS_OBJ[0].id)
def test_servers_with_non_integer_hypervisor_id(self):
with mock.patch.object(self.controller.host_api,
'compute_node_search_by_hypervisor',
return_value=[]) as mock_node_search:
req = self._get_request(True)
self.assertRaises(exc.HTTPNotFound,
self.controller.servers, req, 'abc')
self.assertEqual(1, mock_node_search.call_count)
def test_servers_with_no_server(self):
with mock.patch.object(self.controller.host_api,
'instance_get_all_by_host',
return_value=[]) as mock_inst_get_all:
req = self._get_request(True)
result = self.controller.servers(req, self.TEST_HYPERS_OBJ[0].id)
self.assertEqual(dict(hypervisors=self.INDEX_HYPER_DICTS), result)
self.assertTrue(mock_inst_get_all.called)
def test_statistics(self):
req = self._get_request(True)
result = self.controller.statistics(req)
self.assertEqual(dict(hypervisor_statistics=dict(
count=2,
vcpus=8,
memory_mb=20 * 1024,
local_gb=500,
vcpus_used=4,
memory_mb_used=10 * 1024,
local_gb_used=250,
free_ram_mb=10 * 1024,
free_disk_gb=250,
current_workload=4,
running_vms=4,
disk_available_least=200)), result)
def test_statistics_non_admin(self):
req = self._get_request(False)
self.assertRaises(exception.PolicyNotAuthorized,
self.controller.statistics, req)
class HypervisorsTestV228(HypervisorsTestV21):
api_version = '2.28'
DETAIL_HYPERS_DICTS = copy.deepcopy(HypervisorsTestV21.DETAIL_HYPERS_DICTS)
DETAIL_HYPERS_DICTS[0]['cpu_info'] = jsonutils.loads(CPU_INFO)
DETAIL_HYPERS_DICTS[1]['cpu_info'] = jsonutils.loads(CPU_INFO)
DETAIL_NULL_CPUINFO_DICT = {'': {}, None: {}}
class HypervisorsTestV233(HypervisorsTestV228):
api_version = '2.33'
def test_index_pagination(self):
req = self._get_request(True,
'/v2/1234/os-hypervisors?limit=1&marker=1')
result = self.controller.index(req)
expected = {
'hypervisors': [
{'hypervisor_hostname': 'hyper2',
'id': 2,
'state': 'up',
'status': 'enabled'}
],
'hypervisors_links': [
{'href': 'http://localhost/v2/hypervisors?limit=1&marker=2',
'rel': 'next'}
]
}
self.assertEqual(expected, result)
def test_index_pagination_with_invalid_marker(self):
req = self._get_request(True,
'/v2/1234/os-hypervisors?marker=99999')
self.assertRaises(exc.HTTPBadRequest,
self.controller.index, req)
def test_index_pagination_with_invalid_non_int_limit(self):
req = self._get_request(True,
'/v2/1234/os-hypervisors?limit=-9')
self.assertRaises(exception.ValidationError,
self.controller.index, req)
def test_index_pagination_with_invalid_string_limit(self):
req = self._get_request(True,
'/v2/1234/os-hypervisors?limit=abc')
self.assertRaises(exception.ValidationError,
self.controller.index, req)
def test_index_duplicate_query_parameters_with_invalid_string_limit(self):
req = self._get_request(
True,
'/v2/1234/os-hypervisors/?limit=1&limit=abc')
self.assertRaises(exception.ValidationError,
self.controller.index, req)
def test_index_duplicate_query_parameters_validation(self):
expected = [{
'hypervisor_hostname': 'hyper2',
'id': 2,
'state': 'up',
'status': 'enabled'}
]
params = {
'limit': 1,
'marker': 1,
}
for param, value in params.items():
req = self._get_request(
use_admin_context=True,
url='/os-hypervisors?marker=1&%s=%s&%s=%s' %
(param, value, param, value))
result = self.controller.index(req)
self.assertEqual(expected, result['hypervisors'])
def test_index_pagination_with_additional_filter(self):
expected = {
'hypervisors': [
{'hypervisor_hostname': 'hyper2',
'id': 2,
'state': 'up',
'status': 'enabled'}
],
'hypervisors_links': [
{'href': 'http://localhost/v2/hypervisors?limit=1&marker=2',
'rel': 'next'}
]
}
req = self._get_request(
True, '/v2/1234/os-hypervisors?limit=1&marker=1&additional=3')
result = self.controller.index(req)
self.assertEqual(expected, result)
def test_detail_pagination(self):
req = self._get_request(
True, '/v2/1234/os-hypervisors/detail?limit=1&marker=1')
result = self.controller.detail(req)
link = 'http://localhost/v2/hypervisors/detail?limit=1&marker=2'
expected = {
'hypervisors': [
{'cpu_info': {'arch': 'x86_64',
'features': [],
'model': '',
'topology': {'cores': 1,
'sockets': 1,
'threads': 1},
'vendor': 'fake'},
'current_workload': 2,
'disk_available_least': 100,
'free_disk_gb': 125,
'free_ram_mb': 5120,
'host_ip': netaddr.IPAddress('2.2.2.2'),
'hypervisor_hostname': 'hyper2',
'hypervisor_type': 'xen',
'hypervisor_version': 3,
'id': 2,
'local_gb': 250,
'local_gb_used': 125,
'memory_mb': 10240,
'memory_mb_used': 5120,
'running_vms': 2,
'service': {'disabled_reason': None,
'host': 'compute2',
'id': 2},
'state': 'up',
'status': 'enabled',
'vcpus': 4,
'vcpus_used': 2}
],
'hypervisors_links': [{'href': link, 'rel': 'next'}]
}
self.assertEqual(expected, result)
def test_detail_pagination_with_invalid_marker(self):
req = self._get_request(True,
'/v2/1234/os-hypervisors/detail?marker=99999')
self.assertRaises(exc.HTTPBadRequest,
self.controller.detail, req)
def test_detail_pagination_with_invalid_string_limit(self):
req = self._get_request(True,
'/v2/1234/os-hypervisors/detail?limit=abc')
self.assertRaises(exception.ValidationError,
self.controller.detail, req)
def test_detail_duplicate_query_parameters_with_invalid_string_limit(self):
req = self._get_request(
True,
'/v2/1234/os-hypervisors/detail?limit=1&limit=abc')
self.assertRaises(exception.ValidationError,
self.controller.detail, req)
def test_detail_duplicate_query_parameters_validation(self):
expected = [
{'cpu_info': {'arch': 'x86_64',
'features': [],
'model': '',
'topology': {'cores': 1,
'sockets': 1,
'threads': 1},
'vendor': 'fake'},
'current_workload': 2,
'disk_available_least': 100,
'free_disk_gb': 125,
'free_ram_mb': 5120,
'host_ip': netaddr.IPAddress('2.2.2.2'),
'hypervisor_hostname': 'hyper2',
'hypervisor_type': 'xen',
'hypervisor_version': 3,
'id': 2,
'local_gb': 250,
'local_gb_used': 125,
'memory_mb': 10240,
'memory_mb_used': 5120,
'running_vms': 2,
'service': {'disabled_reason': None,
'host': 'compute2',
'id': 2},
'state': 'up',
'status': 'enabled',
'vcpus': 4,
'vcpus_used': 2}
]
params = {
'limit': 1,
'marker': 1,
}
for param, value in params.items():
req = self._get_request(
use_admin_context=True,
url='/os-hypervisors/detail?marker=1&%s=%s&%s=%s' %
(param, value, param, value))
result = self.controller.detail(req)
self.assertEqual(expected, result['hypervisors'])
def test_detail_pagination_with_additional_filter(self):
link = 'http://localhost/v2/hypervisors/detail?limit=1&marker=2'
expected = {
'hypervisors': [
{'cpu_info': {'arch': 'x86_64',
'features': [],
'model': '',
'topology': {'cores': 1,
'sockets': 1,
'threads': 1},
'vendor': 'fake'},
'current_workload': 2,
'disk_available_least': 100,
'free_disk_gb': 125,
'free_ram_mb': 5120,
'host_ip': netaddr.IPAddress('2.2.2.2'),
'hypervisor_hostname': 'hyper2',
'hypervisor_type': 'xen',
'hypervisor_version': 3,
'id': 2,
'local_gb': 250,
'local_gb_used': 125,
'memory_mb': 10240,
'memory_mb_used': 5120,
'running_vms': 2,
'service': {'disabled_reason': None,
'host': 'compute2',
'id': 2},
'state': 'up',
'status': 'enabled',
'vcpus': 4,
'vcpus_used': 2}
],
'hypervisors_links': [{
'href': link,
'rel': 'next'}]
}
req = self._get_request(
True, '/v2/1234/os-hypervisors/detail?limit=1&marker=1&unknown=2')
result = self.controller.detail(req)
self.assertEqual(expected, result)
class HypervisorsTestV252(HypervisorsTestV233):
api_version = '2.52'
class HypervisorsTestV253(HypervisorsTestV252):
api_version = hypervisors_v21.UUID_FOR_ID_MIN_VERSION
expect_uuid_for_id = True
INDEX_HYPER_DICTS = [
dict(id=uuids.hyper1, hypervisor_hostname="hyper1",
state='up', status='enabled'),
dict(id=uuids.hyper2, hypervisor_hostname="hyper2",
state='up', status='enabled')]
def setUp(self):
super(HypervisorsTestV253, self).setUp()
for index, detail_hyper_dict in enumerate(self.DETAIL_HYPERS_DICTS):
detail_hyper_dict['id'] = TEST_HYPERS[index]['uuid']
detail_hyper_dict['service']['id'] = TEST_SERVICES[index].uuid
def test_servers(self):
self.assertRaises(exception.VersionNotFoundForAPIMethod,
self.controller.servers,
self._get_request(True), 'hyper')
def test_servers_with_no_server(self):
with mock.patch.object(self.controller.host_api,
'instance_get_all_by_host',
return_value=[]) as mock_inst_get_all:
req = self._get_request(use_admin_context=True,
url='/os-hypervisors?with_servers=1')
result = self.controller.index(req)
self.assertEqual(dict(hypervisors=self.INDEX_HYPER_DICTS), result)
self.assertEqual(2, mock_inst_get_all.call_count)
mock_inst_get_all.assert_has_calls((
mock.call(req.environ['nova.context'], TEST_HYPERS_OBJ[0].host),
mock.call(req.environ['nova.context'], TEST_HYPERS_OBJ[1].host)))
def test_servers_not_mapped(self):
req = self._get_request(use_admin_context=True,
url='/os-hypervisors?with_servers=1')
with mock.patch.object(
self.controller.host_api, 'instance_get_all_by_host',
side_effect=exception.HostMappingNotFound(name='something')):
result = self.controller.index(req)
self.assertEqual(dict(hypervisors=[]), result)
def test_list_with_servers(self):
instances = [
objects.InstanceList(objects=[objects.Instance(
id=1, uuid=uuids.hyper1_instance1)]),
objects.InstanceList(objects=[objects.Instance(
id=2, uuid=uuids.hyper2_instance1)])]
with mock.patch.object(self.controller.host_api,
'instance_get_all_by_host',
side_effect=instances) as mock_inst_get_all:
req = self._get_request(use_admin_context=True,
url='/os-hypervisors?with_servers=True')
result = self.controller.index(req)
index_with_servers = copy.deepcopy(self.INDEX_HYPER_DICTS)
index_with_servers[0]['servers'] = [
{'name': 'instance-00000001', 'uuid': uuids.hyper1_instance1}]
index_with_servers[1]['servers'] = [
{'name': 'instance-00000002', 'uuid': uuids.hyper2_instance1}]
self.assertEqual(dict(hypervisors=index_with_servers), result)
self.assertEqual(2, mock_inst_get_all.call_count)
mock_inst_get_all.assert_has_calls((
mock.call(req.environ['nova.context'], TEST_HYPERS_OBJ[0].host),
mock.call(req.environ['nova.context'], TEST_HYPERS_OBJ[1].host)))
def test_list_with_servers_invalid_parameter(self):
req = self._get_request(use_admin_context=True,
url='/os-hypervisors?with_servers=invalid')
self.assertRaises(
exception.ValidationError, self.controller.index, req)
def test_list_with_hostname_pattern_and_paging_parameters(self):
req = self._get_request(
use_admin_context=True,
url='/os-hypervisors?hypervisor_hostname_pattern=foo&'
'limit=1&marker=%s' % uuids.marker)
ex = self.assertRaises(exc.HTTPBadRequest, self.controller.index, req)
self.assertIn('Paging over hypervisors with the '
'hypervisor_hostname_pattern query parameter is not '
'supported.', six.text_type(ex))
def test_servers_with_non_integer_hypervisor_id(self):
req = self._get_request(
use_admin_context=True,
url='/os-hypervisors?with_servers=yes&'
'hypervisor_hostname_pattern=shenzhen')
with mock.patch.object(self.controller.host_api,
'compute_node_search_by_hypervisor',
return_value=objects.ComputeNodeList()) as s:
self.assertRaises(exc.HTTPNotFound, self.controller.index, req)
s.assert_called_once_with(req.environ['nova.context'], 'shenzhen')
def test_servers_non_admin(self):
pass
def test_servers_non_id(self):
pass
def test_search_old_route(self):
self.assertRaises(exception.VersionNotFoundForAPIMethod,
self.controller.search,
self._get_request(True), 'hyper')
def test_search(self):
req = self._get_request(
use_admin_context=True,
url='/os-hypervisors?hypervisor_hostname_pattern=shenzhen')
with mock.patch.object(self.controller.host_api,
'compute_node_search_by_hypervisor',
return_value=objects.ComputeNodeList(
objects=[TEST_HYPERS_OBJ[0]])) as s:
result = self.controller.detail(req)
s.assert_called_once_with(req.environ['nova.context'], 'shenzhen')
expected = {
'hypervisors': [
{'cpu_info': {'arch': 'x86_64',
'features': [],
'model': '',
'topology': {'cores': 1,
'sockets': 1,
'threads': 1},
'vendor': 'fake'},
'current_workload': 2,
'disk_available_least': 100,
'free_disk_gb': 125,
'free_ram_mb': 5120,
'host_ip': netaddr.IPAddress('1.1.1.1'),
'hypervisor_hostname': 'hyper1',
'hypervisor_type': 'xen',
'hypervisor_version': 3,
'id': TEST_HYPERS_OBJ[0].uuid,
'local_gb': 250,
'local_gb_used': 125,
'memory_mb': 10240,
'memory_mb_used': 5120,
'running_vms': 2,
'service': {'disabled_reason': None,
'host': 'compute1',
'id': TEST_SERVICES[0].uuid},
'state': 'up',
'status': 'enabled',
'vcpus': 4,
'vcpus_used': 2}
]
}
self.assertNotIn('hypervisors_links', result)
self.assertDictEqual(expected, result)
def test_search_invalid_hostname_pattern_parameter(self):
req = self._get_request(
use_admin_context=True,
url='/os-hypervisors?hypervisor_hostname_pattern=invalid~host')
self.assertRaises(
exception.ValidationError, self.controller.detail, req)
def test_search_non_exist(self):
pass
def test_search_non_admin(self):
pass
def test_search_unmapped(self):
pass
def test_show_non_integer_id(self):
pass
def test_show_integer_id(self):
req = self._get_request(True)
ex = self.assertRaises(exc.HTTPBadRequest,
self.controller.show, req, '1')
self.assertIn('Invalid uuid 1', six.text_type(ex))
def test_show_with_servers_invalid_parameter(self):
hyper_id = self._get_hyper_id()
req = self._get_request(
use_admin_context=True,
url='/os-hypervisors/%s?with_servers=invalid' % hyper_id)
ex = self.assertRaises(
exception.ValidationError, self.controller.show, req, hyper_id)
self.assertIn('with_servers', six.text_type(ex))
def test_show_with_servers_host_mapping_not_found(self):
hyper_id = self._get_hyper_id()
req = self._get_request(
use_admin_context=True,
url='/os-hypervisors/%s?with_servers=true' % hyper_id)
with mock.patch.object(
self.controller.host_api, 'instance_get_all_by_host',
side_effect=exception.HostMappingNotFound(name=hyper_id)):
self.assertRaises(exc.HTTPNotFound, self.controller.show,
req, hyper_id)
def test_show_with_servers(self):
instances = objects.InstanceList(objects=[objects.Instance(
id=1, uuid=uuids.hyper1_instance1)])
hyper_id = self._get_hyper_id()
req = self._get_request(
use_admin_context=True,
url='/os-hypervisors/%s?with_servers=on' % hyper_id)
with mock.patch.object(self.controller.host_api,
'instance_get_all_by_host',
return_value=instances) as mock_inst_get_all:
result = self.controller.show(req, hyper_id)
show_with_servers = copy.deepcopy(self.DETAIL_HYPERS_DICTS[0])
show_with_servers['servers'] = [
{'name': 'instance-00000001', 'uuid': uuids.hyper1_instance1}]
self.assertDictEqual(dict(hypervisor=show_with_servers), result)
# instance_get_all_by_host is called
mock_inst_get_all.assert_called_once_with(
req.environ['nova.context'], TEST_HYPERS_OBJ[0].host)
def test_uptime_non_integer_id(self):
pass
def test_uptime_integer_id(self):
req = self._get_request(True)
ex = self.assertRaises(exc.HTTPBadRequest,
self.controller.uptime, req, '1')
self.assertIn('Invalid uuid 1', six.text_type(ex))
def test_detail_pagination(self):
req = self._get_request(
use_admin_context=True,
url='/os-hypervisors/detail?limit=1&marker=%s' %
TEST_HYPERS_OBJ[0].uuid)
result = self.controller.detail(req)
link = ('http://localhost/v2/hypervisors/detail?limit=1&marker=%s' %
TEST_HYPERS_OBJ[1].uuid)
expected = {
'hypervisors': [
{'cpu_info': {'arch': 'x86_64',
'features': [],
'model': '',
'topology': {'cores': 1,
'sockets': 1,
'threads': 1},
'vendor': 'fake'},
'current_workload': 2,
'disk_available_least': 100,
'free_disk_gb': 125,
'free_ram_mb': 5120,
'host_ip': netaddr.IPAddress('2.2.2.2'),
'hypervisor_hostname': 'hyper2',
'hypervisor_type': 'xen',
'hypervisor_version': 3,
'id': TEST_HYPERS_OBJ[1].uuid,
'local_gb': 250,
'local_gb_used': 125,
'memory_mb': 10240,
'memory_mb_used': 5120,
'running_vms': 2,
'service': {'disabled_reason': None,
'host': 'compute2',
'id': TEST_SERVICES[1].uuid},
'state': 'up',
'status': 'enabled',
'vcpus': 4,
'vcpus_used': 2}
],
'hypervisors_links': [{'href': link, 'rel': 'next'}]
}
self.assertEqual(expected, result)
def test_detail_pagination_with_invalid_marker(self):
req = self._get_request(
use_admin_context=True,
url='/os-hypervisors/detail?marker=%s' % uuids.invalid_marker)
self.assertRaises(exc.HTTPBadRequest,
self.controller.detail, req)
def test_detail_pagination_with_additional_filter(self):
req = self._get_request(
True, '/v2/1234/os-hypervisors/detail?limit=1&marker=9&unknown=2')
self.assertRaises(exception.ValidationError,
self.controller.detail, req)
def test_detail_duplicate_query_parameters_validation(self):
params = {
'limit': 1,
'marker': uuids.marker,
'hypervisor_hostname_pattern': 'foo',
'with_servers': 'true'
}
for param, value in params.items():
req = self._get_request(
use_admin_context=True,
url='/os-hypervisors/detail?%s=%s&%s=%s' %
(param, value, param, value))
self.assertRaises(exception.ValidationError,
self.controller.detail, req)
def test_index_pagination(self):
req = self._get_request(
use_admin_context=True,
url='/os-hypervisors?limit=1&marker=%s' %
TEST_HYPERS_OBJ[0].uuid)
result = self.controller.index(req)
link = ('http://localhost/v2/hypervisors?limit=1&marker=%s' %
TEST_HYPERS_OBJ[1].uuid)
expected = {
'hypervisors': [{
'hypervisor_hostname': 'hyper2',
'id': TEST_HYPERS_OBJ[1].uuid,
'state': 'up',
'status': 'enabled'
}],
'hypervisors_links': [{'href': link, 'rel': 'next'}]
}
self.assertEqual(expected, result)
def test_index_pagination_with_invalid_marker(self):
req = self._get_request(
use_admin_context=True,
url='/os-hypervisors?marker=%s' % uuids.invalid_marker)
self.assertRaises(exc.HTTPBadRequest,
self.controller.index, req)
def test_index_pagination_with_additional_filter(self):
req = self._get_request(
True, '/v2/1234/os-hypervisors/?limit=1&marker=9&unknown=2')
self.assertRaises(exception.ValidationError,
self.controller.index, req)
def test_index_duplicate_query_parameters_validation(self):
params = {
'limit': 1,
'marker': uuids.marker,
'hypervisor_hostname_pattern': 'foo',
'with_servers': 'true'
}
for param, value in params.items():
req = self._get_request(
use_admin_context=True,
url='/os-hypervisors?%s=%s&%s=%s' %
(param, value, param, value))
self.assertRaises(exception.ValidationError,
self.controller.index, req)
def test_show_duplicate_query_parameters_validation(self):
req = self._get_request(
use_admin_context=True,
url='/os-hypervisors/%s?with_servers=1&with_servers=1' %
uuids.hyper1)
self.assertRaises(exception.ValidationError,
self.controller.show, req, uuids.hyper1)
| true | true |
1c47f98f44be7b87e01c9b1a097b1376b38cc5f4 | 1,906 | py | Python | sdk/python/setup.py | agilecreativity/pulumi-docker | 0a6928ef65763f30820837d63d3ad9e59ad993e1 | [
"Apache-2.0"
] | 45 | 2018-09-22T07:48:05.000Z | 2022-02-15T08:48:26.000Z | sdk/python/setup.py | agilecreativity/pulumi-docker | 0a6928ef65763f30820837d63d3ad9e59ad993e1 | [
"Apache-2.0"
] | 178 | 2018-09-01T23:59:42.000Z | 2022-03-31T22:05:46.000Z | sdk/python/setup.py | agilecreativity/pulumi-docker | 0a6928ef65763f30820837d63d3ad9e59ad993e1 | [
"Apache-2.0"
] | 21 | 2018-10-11T08:00:17.000Z | 2022-01-26T02:28:20.000Z | # coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import errno
from setuptools import setup, find_packages
from setuptools.command.install import install
from subprocess import check_call
class InstallPluginCommand(install):
def run(self):
install.run(self)
try:
check_call(['pulumi', 'plugin', 'install', 'resource', 'docker', '${PLUGIN_VERSION}'])
except OSError as error:
if error.errno == errno.ENOENT:
print("""
There was an error installing the docker resource provider plugin.
It looks like `pulumi` is not installed on your system.
Please visit https://pulumi.com/ to install the Pulumi CLI.
You may try manually installing the plugin by running
`pulumi plugin install resource docker ${PLUGIN_VERSION}`
""")
else:
raise
def readme():
with open('README.md', encoding='utf-8') as f:
return f.read()
setup(name='pulumi_docker',
version='${VERSION}',
description="A Pulumi package for interacting with Docker in Pulumi programs",
long_description=readme(),
long_description_content_type='text/markdown',
cmdclass={
'install': InstallPluginCommand,
},
keywords='pulumi docker',
url='https://pulumi.io',
project_urls={
'Repository': 'https://github.com/pulumi/pulumi-docker'
},
license='Apache-2.0',
packages=find_packages(),
package_data={
'pulumi_docker': [
'py.typed',
]
},
install_requires=[
'parver>=0.2.1',
'pulumi>=3.0.0,<4.0.0',
'semver>=2.8.1'
],
zip_safe=False)
| 31.766667 | 98 | 0.589717 |
import errno
from setuptools import setup, find_packages
from setuptools.command.install import install
from subprocess import check_call
class InstallPluginCommand(install):
def run(self):
install.run(self)
try:
check_call(['pulumi', 'plugin', 'install', 'resource', 'docker', '${PLUGIN_VERSION}'])
except OSError as error:
if error.errno == errno.ENOENT:
print("""
There was an error installing the docker resource provider plugin.
It looks like `pulumi` is not installed on your system.
Please visit https://pulumi.com/ to install the Pulumi CLI.
You may try manually installing the plugin by running
`pulumi plugin install resource docker ${PLUGIN_VERSION}`
""")
else:
raise
def readme():
with open('README.md', encoding='utf-8') as f:
return f.read()
setup(name='pulumi_docker',
version='${VERSION}',
description="A Pulumi package for interacting with Docker in Pulumi programs",
long_description=readme(),
long_description_content_type='text/markdown',
cmdclass={
'install': InstallPluginCommand,
},
keywords='pulumi docker',
url='https://pulumi.io',
project_urls={
'Repository': 'https://github.com/pulumi/pulumi-docker'
},
license='Apache-2.0',
packages=find_packages(),
package_data={
'pulumi_docker': [
'py.typed',
]
},
install_requires=[
'parver>=0.2.1',
'pulumi>=3.0.0,<4.0.0',
'semver>=2.8.1'
],
zip_safe=False)
| true | true |
1c47fa0e5e9054dbfe8764c09a2d5ba88af0b2fc | 5,622 | py | Python | docs/conf.py | tonyseek/python-orphanage | df466c567fda82ef4f6d949b19b5a0b33744513c | [
"MIT"
] | 11 | 2018-05-08T08:05:31.000Z | 2021-03-31T08:57:26.000Z | docs/conf.py | tonyseek/python-orphanage | df466c567fda82ef4f6d949b19b5a0b33744513c | [
"MIT"
] | 2 | 2018-05-09T12:01:23.000Z | 2019-10-21T17:24:40.000Z | docs/conf.py | tonyseek/python-orphanage | df466c567fda82ef4f6d949b19b5a0b33744513c | [
"MIT"
] | 2 | 2018-06-19T06:28:02.000Z | 2021-03-02T01:59:04.000Z | # -*- coding: utf-8 -*-
#
# Configuration file for the Sphinx documentation builder.
#
# This file does only contain a selection of the most common options. For a
# full list see the documentation:
# http://www.sphinx-doc.org/en/master/config
# -- Path setup --------------------------------------------------------------
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
# import os
# import sys
# sys.path.insert(0, os.path.abspath('.'))
# -- Project information -----------------------------------------------------
project = u'orphanage'
copyright = u'2018, Jiangge Zhang'
author = u'Jiangge Zhang'
# The short X.Y version
version = u''
# The full version, including alpha/beta/rc tags
release = u'0.1.0'
# -- General configuration ---------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.intersphinx',
'sphinx.ext.coverage',
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The master toctree document.
master_doc = 'index'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path .
exclude_patterns = [u'_build', u'Thumbs.db', u'.DS_Store', u'.gitignore',
u'Makefile']
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = 'alabaster'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#
html_theme_options = {
'logo_name': True,
'description': 'Let orphan processes suicide',
'github_user': 'tonyseek',
'github_repo': 'python-orphanage',
'github_type': 'star',
'github_count': True,
'github_banner': 'github-ribbons.png',
'github_button': True,
'travis_button': True,
}
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Custom sidebar templates, must be a dictionary that maps document names
# to template names.
#
# The default sidebars (for documents that don't match any pattern) are
# defined by theme itself. Builtin themes are using these templates by
# default: ``['localtoc.html', 'relations.html', 'sourcelink.html',
# 'searchbox.html']``.
#
html_sidebars = {
'**': [
'about.html',
'navigation.html',
'relations.html',
'searchbox.html',
'donate.html',
],
}
# -- Options for HTMLHelp output ---------------------------------------------
# Output file base name for HTML help builder.
htmlhelp_basename = 'orphanagedoc'
# -- Options for LaTeX output ------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'orphanage.tex', u'orphanage Documentation',
u'Jiangge Zhang', 'manual'),
]
# -- Options for manual page output ------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'orphanage', u'orphanage Documentation',
[author], 1)
]
# -- Options for Texinfo output ----------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'orphanage', u'orphanage Documentation',
author, 'orphanage', 'One line description of project.',
'Miscellaneous'),
]
# -- Extension configuration -------------------------------------------------
# -- Options for intersphinx extension ---------------------------------------
# Example configuration for intersphinx: refer to the Python standard library.
intersphinx_mapping = {'https://docs.python.org/': None}
| 30.225806 | 79 | 0.639808 |
project = u'orphanage'
copyright = u'2018, Jiangge Zhang'
author = u'Jiangge Zhang'
version = u''
release = u'0.1.0'
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.intersphinx',
'sphinx.ext.coverage',
]
templates_path = ['_templates']
source_suffix = '.rst'
master_doc = 'index'
language = None
exclude_patterns = [u'_build', u'Thumbs.db', u'.DS_Store', u'.gitignore',
u'Makefile']
pygments_style = 'sphinx'
html_theme = 'alabaster'
html_theme_options = {
'logo_name': True,
'description': 'Let orphan processes suicide',
'github_user': 'tonyseek',
'github_repo': 'python-orphanage',
'github_type': 'star',
'github_count': True,
'github_banner': 'github-ribbons.png',
'github_button': True,
'travis_button': True,
}
html_static_path = ['_static']
# defined by theme itself. Builtin themes are using these templates by
# default: ``['localtoc.html', 'relations.html', 'sourcelink.html',
# 'searchbox.html']``.
#
html_sidebars = {
'**': [
'about.html',
'navigation.html',
'relations.html',
'searchbox.html',
'donate.html',
],
}
# -- Options for HTMLHelp output ---------------------------------------------
# Output file base name for HTML help builder.
htmlhelp_basename = 'orphanagedoc'
# -- Options for LaTeX output ------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'orphanage.tex', u'orphanage Documentation',
u'Jiangge Zhang', 'manual'),
]
# -- Options for manual page output ------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'orphanage', u'orphanage Documentation',
[author], 1)
]
# -- Options for Texinfo output ----------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'orphanage', u'orphanage Documentation',
author, 'orphanage', 'One line description of project.',
'Miscellaneous'),
]
# -- Extension configuration -------------------------------------------------
# -- Options for intersphinx extension ---------------------------------------
# Example configuration for intersphinx: refer to the Python standard library.
intersphinx_mapping = {'https://docs.python.org/': None}
| true | true |
1c47fa3816f95e926ae95ef3681d5bf62a93e19a | 1,212 | py | Python | LeetCode/python-R1/0049-字母异位词分组D/V2-OverTIme.py | huuuuusy/Programming-Practice-Everyday | c78b368ab0439d85b8a69f6d9c8154d708bafc9c | [
"Apache-2.0"
] | 4 | 2019-08-27T11:28:03.000Z | 2020-12-24T07:10:22.000Z | LeetCode/python-R1/0049-字母异位词分组D/V2-OverTIme.py | huuuuusy/Programming-Practice-Everyday | c78b368ab0439d85b8a69f6d9c8154d708bafc9c | [
"Apache-2.0"
] | null | null | null | LeetCode/python-R1/0049-字母异位词分组D/V2-OverTIme.py | huuuuusy/Programming-Practice-Everyday | c78b368ab0439d85b8a69f6d9c8154d708bafc9c | [
"Apache-2.0"
] | 4 | 2019-09-20T09:44:01.000Z | 2020-12-24T07:10:23.000Z | """
@Author: huuuuusy
@GitHub: https://github.com/huuuuusy
系统: Ubuntu 18.04
IDE: VS Code 1.36
工具: python == 3.7.3
"""
"""
思路:
在V1的基础上加了字典对输入进行长度分类,在每一类里循环判断
速度比V1快一些,但仍然超时
结果:
第100个测试用例超时
"""
from collections import Counter
class Solution:
def groupAnagrams(self, strs):
res = []
if len(strs) == 0:
return res
if len(strs) == 1:
return [strs]
d = {}
for s in strs:
d[len(s)] = d.get(len(s),[]) + [s]
for key, value in d.items():
res_part = [[value[0]]]
if len(value) > 1:
for item in value[1:]:
for i in range(len(res_part)):
add = False
if Counter(res_part[i][0]) == Counter(item):
res_part[i].append(item)
add = True
break
if add == False:
res_part.append([item])
res.extend(res_part)
return res
if __name__ == "__main__":
strs = ["eat", "tea", "tan", "ate", "nat", "bat",'apple']
answer = Solution().groupAnagrams(strs)
print(answer)
| 24.734694 | 68 | 0.459571 |
from collections import Counter
class Solution:
def groupAnagrams(self, strs):
res = []
if len(strs) == 0:
return res
if len(strs) == 1:
return [strs]
d = {}
for s in strs:
d[len(s)] = d.get(len(s),[]) + [s]
for key, value in d.items():
res_part = [[value[0]]]
if len(value) > 1:
for item in value[1:]:
for i in range(len(res_part)):
add = False
if Counter(res_part[i][0]) == Counter(item):
res_part[i].append(item)
add = True
break
if add == False:
res_part.append([item])
res.extend(res_part)
return res
if __name__ == "__main__":
strs = ["eat", "tea", "tan", "ate", "nat", "bat",'apple']
answer = Solution().groupAnagrams(strs)
print(answer)
| true | true |
1c47fa676f9225cd4fb2d3c9c868a8eaf56f7c8f | 825 | py | Python | src/native_bayes/classify.py | zegra1989/ml | ed574ff45d4852d0c93f1ad5d7e0160cd752c9e0 | [
"MIT"
] | null | null | null | src/native_bayes/classify.py | zegra1989/ml | ed574ff45d4852d0c93f1ad5d7e0160cd752c9e0 | [
"MIT"
] | null | null | null | src/native_bayes/classify.py | zegra1989/ml | ed574ff45d4852d0c93f1ad5d7e0160cd752c9e0 | [
"MIT"
] | null | null | null | def NBAccuracy(features_train, labels_train, features_test, labels_test):
""" compute the accuracy of your Naive Bayes classifier """
### import the sklearn module for GaussianNB
from sklearn.naive_bayes import GaussianNB
from sklearn.metrics import accuracy_score
### create classifier
clf = GaussianNB()
### fit the classifier on the training features and labels
clf.fit(features_train, labels_train)
### use the trained classifier to predict labels for the test features
pred = clf.predict(features_test)
### calculate and return the accuracy on the test data
### this is slightly different than the example,
### where we just print the accuracy
### you might need to import an sklearn module
accuracy = accuracy_score(pred, labels_test)
return accuracy | 39.285714 | 74 | 0.727273 | def NBAccuracy(features_train, labels_train, features_test, labels_test):
acy_score
| true | true |
1c47fbfef732fe271a24cdb151acef2779e5603d | 644 | py | Python | tools/telemetry/telemetry/unittest_util/options_for_unittests.py | kjthegod/chromium | cf940f7f418436b77e15b1ea23e6fa100ca1c91a | [
"BSD-3-Clause-No-Nuclear-License-2014",
"BSD-3-Clause"
] | 2,151 | 2020-04-18T07:31:17.000Z | 2022-03-31T08:39:18.000Z | telemetry/telemetry/testing/options_for_unittests.py | kind-john/catapult | 29635376119833f172a58a48a3282d353ce55d2b | [
"BSD-3-Clause"
] | 395 | 2020-04-18T08:22:18.000Z | 2021-12-08T13:04:49.000Z | telemetry/telemetry/testing/options_for_unittests.py | kind-john/catapult | 29635376119833f172a58a48a3282d353ce55d2b | [
"BSD-3-Clause"
] | 338 | 2020-04-18T08:03:10.000Z | 2022-03-29T12:33:22.000Z | # Copyright 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""This module provides the global variable options_for_unittests.
This is set to a BrowserOptions object by the test harness, or None
if unit tests are not running.
This allows multiple unit tests to use a specific
browser, in face of multiple options."""
_options = []
def Push(options):
_options.append(options)
def Pop():
return _options.pop()
def GetCopy():
if not AreSet():
return None
return _options[-1].Copy()
def AreSet():
return bool(_options)
| 19.515152 | 72 | 0.734472 |
_options = []
def Push(options):
_options.append(options)
def Pop():
return _options.pop()
def GetCopy():
if not AreSet():
return None
return _options[-1].Copy()
def AreSet():
return bool(_options)
| true | true |
1c47fc0ab6403bb40623a1044733f8334c1d1740 | 33,931 | py | Python | theano/tensor/var.py | EnjoyLifeFund/py36pkgs | 0ac677fbbfa7b6d8c527fe2c759ba05117b07fd2 | [
"MIT",
"BSD-2-Clause",
"BSD-3-Clause"
] | null | null | null | theano/tensor/var.py | EnjoyLifeFund/py36pkgs | 0ac677fbbfa7b6d8c527fe2c759ba05117b07fd2 | [
"MIT",
"BSD-2-Clause",
"BSD-3-Clause"
] | null | null | null | theano/tensor/var.py | EnjoyLifeFund/py36pkgs | 0ac677fbbfa7b6d8c527fe2c759ba05117b07fd2 | [
"MIT",
"BSD-2-Clause",
"BSD-3-Clause"
] | 1 | 2019-09-09T18:31:41.000Z | 2019-09-09T18:31:41.000Z | from __future__ import absolute_import, print_function, division
import collections
import copy
import traceback as tb
import warnings
import numpy
from six import integer_types
from six.moves import xrange
import theano
from theano.compat import PY3
from theano.scalar import ComplexError, IntegerDivisionError
from theano.gof import Constant, Variable
from theano.gof.utils import hashtype
from theano.tensor.utils import hash_from_ndarray
from theano.tensor.type import TensorType
from theano.configparser import config
def equal_slices(s1, s2):
return (s1.start == s2.start and
s1.stop == s2.stop and
s1.step == s2.step)
class AsTensorError(TypeError):
"""
Raised when as_tensor_variable isn't able to create a TensorVariable.
"""
pass
class _tensor_py_operators(object):
# UNARY
def __abs__(self):
return theano.tensor.basic.abs_(self)
def __neg__(self):
return theano.tensor.basic.neg(self)
# CASTS
# REMOVED THESE BECAUSE PYTHON appears to require __int__ to return
# an int. -JB 20081112
# def __int__(self): return convert_to_int32(self)
# def __float__(self): return convert_to_float64(self)
# def __complex__(self): return convert_to_complex128(self)
# COMPARISONS
_is_nonzero = True
def __lt__(self, other):
rval = theano.tensor.basic.lt(self, other)
rval._is_nonzero = False
return rval
def __le__(self, other):
rval = theano.tensor.basic.le(self, other)
rval._is_nonzero = False
return rval
def __gt__(self, other):
rval = theano.tensor.basic.gt(self, other)
rval._is_nonzero = False
return rval
def __ge__(self, other):
rval = theano.tensor.basic.ge(self, other)
rval._is_nonzero = False
return rval
def __nonzero__(self):
# Python 2.x
return self.__bool__()
def __bool__(self):
# This is meant to prohibit stuff like a < b < c, which is internally
# implemented as (a < b) and (b < c). The trouble with this is the
# side-effect that checking for a non-NULL a by typing "if a: ..."
# uses the same __nonzero__ method. We want these both to work, but
# it seems impossible. Currently, all vars evaluate to nonzero except
# the return values of comparison operators, which raise this
# exception. If you can think of a better solution, go for it!
#
# __bool__ is Python 3.x data model. __nonzero__ is Python 2.x.
if self._is_nonzero:
return True
else:
raise TypeError(
"Variables do not support boolean operations."
)
# BITWISE
def __invert__(self):
return theano.tensor.basic.invert(self)
def __and__(self, other):
return theano.tensor.basic.and_(self, other)
def __or__(self, other):
return theano.tensor.basic.or_(self, other)
def __xor__(self, other):
return theano.tensor.basic.xor(self, other)
def __rand__(self, other):
return theano.tensor.basic.and_(other, self)
def __ror__(self, other):
return theano.tensor.basic.or_(other, self)
def __rxor__(self, other):
return theano.tensor.basic.xor(other, self)
# def __iand__(self, other):
# return _and_inplace(self, other)
#
# def __ior__(self, other):
# return _or_inplace(self, other)
#
# def __ixor__(self, other):
# return _xor_inplace(self, other)
# ARITHMETIC - NORMAL
def __add__(self, other):
try:
return theano.tensor.basic.add(self, other)
# We should catch the minimum number of exception here.
# Otherwise this will convert error when Theano flags
# compute_test_value is used
# Evidently, we need to catch NotImplementedError
# TypeError from as_tensor_variable are caught in Elemwise.make_node
# Oterwise TensorVariable * SparseVariable won't work!
except (NotImplementedError, AsTensorError):
# We must return NotImplemented and not an
# NotImplementedError or raise an NotImplementedError.
# That way python will give a good error message like this
# `TypeError: unsupported operand type(s) for +:
# 'TensorVariable' and 'TensorVariable'`
return NotImplemented
def __sub__(self, other):
# See explanation in __add__ for the error catched
# and the return value in that case
try:
return theano.tensor.basic.sub(self, other)
except (NotImplementedError, AsTensorError):
return NotImplemented
def __mul__(self, other):
# See explanation in __add__ for the error catched
# and the return value in that case
try:
return theano.tensor.mul(self, other)
except (NotImplementedError, AsTensorError):
return NotImplemented
def __div__(self, other):
# See explanation in __add__ for the error catched
# and the return value in that case
try:
return theano.tensor.basic.div_proxy(self, other)
except IntegerDivisionError:
# This is to raise the exception that occurs when trying to divide
# two integer arrays (currently forbidden).
raise
except (NotImplementedError, AsTensorError):
return NotImplemented
if PY3:
__truediv__ = __div__
def __pow__(self, other):
# See explanation in __add__ for the error catched
# adn the return value in that case
try:
return theano.tensor.basic.pow(self, other)
except (NotImplementedError, AsTensorError):
return NotImplemented
def __mod__(self, other):
# See explanation in __add__ for the error catched
# adn the return value in that case
try:
return theano.tensor.basic.mod_check(self, other)
except ComplexError:
# This is to raise the exception that occurs when trying to compute
# x % y with either x or y a complex number.
raise
except (NotImplementedError, AsTensorError):
return NotImplemented
def __divmod__(self, other):
return theano.tensor.basic.divmod(self, other)
def __truediv__(self, other):
return theano.tensor.basic.true_div(self, other)
def __floordiv__(self, other):
return theano.tensor.basic.floor_div(self, other)
def __rtruediv__(self, other):
return theano.tensor.basic.true_div(other, self)
def __rfloordiv__(self, other):
return theano.tensor.basic.floor_div(other, self)
# DO NOT USE THESE BECAUSE INPLACE OPS SHOULD BE INSERTED
# BY OPTIMIZATIONS ONLY
# ARITHMETIC - INPLACE
# def __iadd__(self, other):
# return _add_inplace(self, other)
# def __isub__(self, other):
# return _sub_inplace(self, other)
#
# def __imul__(self, other):
# return _mul_inplace(self, other)
#
# def __idiv__(self, other):
# return _div_inplace(self, other)
#
# def __ipow__(self, other):
# return _pow_inplace(self, other)
# ARITHMETIC - RIGHT-OPERAND
def __radd__(self, other):
return theano.tensor.basic.add(other, self)
def __rsub__(self, other):
return theano.tensor.basic.sub(other, self)
def __rmul__(self, other):
return theano.tensor.basic.mul(other, self)
def __rdiv__(self, other):
return theano.tensor.basic.div_proxy(other, self)
def __rmod__(self, other):
return theano.tensor.basic.mod(other, self)
def __rdivmod__(self, other):
return theano.tensor.basic.divmod(other, self)
def __rpow__(self, other):
return theano.tensor.basic.pow(other, self)
# TRANSPOSE
T = property(lambda self: theano.tensor.basic.transpose(self))
def transpose(self, *axes):
"""
Returns
-------
object
`tensor.transpose(self, axes)` or `tensor.transpose(self, axes[0])`.
If only one `axes` argument is provided and it is iterable, then it is
assumed to be the entire axes tuple, and passed intact to
tensor.transpose.
"""
if len(axes) == 0:
return theano.tensor.basic.transpose(self)
try:
iter(axes[0])
iterable = True
except TypeError:
iterable = False
if len(axes) == 1 and iterable:
return theano.tensor.basic.transpose(self, axes[0])
else:
return theano.tensor.basic.transpose(self, axes)
shape = property(lambda self: theano.tensor.basic.shape(self))
size = property(lambda self: self.shape[0] if self.ndim == 1 else
theano.tensor.basic.prod(self.shape))
# We can't implement __len__ to provide a better error message.
def any(self, axis=None, keepdims=False):
return theano.tensor.basic.any(self, axis=axis, keepdims=keepdims)
def all(self, axis=None, keepdims=False):
return theano.tensor.basic.all(self, axis=axis, keepdims=keepdims)
# Otherwise TensorVariable[:-1] does not work as Python 2.5.1 calls
# __len__ before calling __getitem__. It also does not catch the raised
# Exception!
# def __len__(self):
# # We can't implement __len__ as Python requests that this
# # function returns an integer >=0
# raise Exception("Theano Variables can't work with len(Theano "
# "Variable) due to Python restriction. You can use "
# "TheanoVariable.shape[0] instead.")
def reshape(self, shape, ndim=None):
"""Return a reshaped view/copy of this variable.
Parameters
----------
shape
Something that can be converted to a symbolic vector of integers.
ndim
The length of the shape. Passing None here means for
Theano to try and guess the length of `shape`.
.. warning:: This has a different signature than numpy's
ndarray.reshape!
In numpy you do not need to wrap the shape arguments
in a tuple, in theano you do need to.
"""
if ndim is not None:
if not isinstance(ndim, integer_types):
raise ValueError("Expected ndim to be an integer, is " +
str(type(ndim)))
return theano.tensor.basic.reshape(self, shape, ndim=ndim)
def dimshuffle(self, *pattern):
"""
Reorder the dimensions of this variable, optionally inserting
broadcasted dimensions.
Parameters
----------
pattern
List/tuple of int mixed with 'x' for broadcastable dimensions.
Examples
--------
For example, to create a 3D view of a [2D] matrix, call
``dimshuffle([0,'x',1])``. This will create a 3D view such that the
middle dimension is an implicit broadcasted dimension. To do the same
thing on the transpose of that matrix, call ``dimshuffle([1, 'x', 0])``.
Notes
-----
This function supports the pattern passed as a tuple, or as a
variable-length argument (e.g. ``a.dimshuffle(pattern)`` is equivalent
to ``a.dimshuffle(*pattern)`` where ``pattern`` is a list/tuple of ints
mixed with 'x' characters).
See Also
--------
DimShuffle
"""
if (len(pattern) == 1) and (isinstance(pattern[0], (list, tuple))):
pattern = pattern[0]
op = theano.tensor.basic.DimShuffle(list(self.type.broadcastable),
pattern)
return op(self)
def flatten(self, ndim=1):
return theano.tensor.basic.flatten(self, ndim)
def ravel(self):
return theano.tensor.basic.flatten(self)
def diagonal(self, offset=0, axis1=0, axis2=1):
return theano.tensor.basic.diagonal(self, offset, axis1, axis2)
# Transfer the data to another device
def transfer(self, target):
"""
If `target` is `'cpu'` this will transfer to a TensorType (if
not already one). Other types may define additional targets.
Parameters
----------
target : str
The desired location of the output variable
"""
return theano.tensor.transfer(self, target)
# Elemwise
def arccos(self):
return theano.tensor.arccos(self)
def arccosh(self):
return theano.tensor.arccosh(self)
def arcsin(self):
return theano.tensor.arcsin(self)
def arcsinh(self):
return theano.tensor.arcsinh(self)
def arctan(self):
return theano.tensor.arctan(self)
def arctanh(self):
return theano.tensor.arctanh(self)
def ceil(self):
return theano.tensor.ceil(self)
def cos(self):
return theano.tensor.cos(self)
def cosh(self):
return theano.tensor.cosh(self)
def deg2rad(self):
return theano.tensor.deg2rad(self)
def exp(self):
return theano.tensor.exp(self)
def exp2(self):
return theano.tensor.exp2(self)
def expm1(self):
return theano.tensor.expm1(self)
def floor(self):
return theano.tensor.floor(self)
def log(self):
return theano.tensor.log(self)
def log10(self):
return theano.tensor.log10(self)
def log1p(self):
return theano.tensor.log1p(self)
def log2(self):
return theano.tensor.log2(self)
def rad2deg(self):
return theano.tensor.rad2deg(self)
def sin(self):
return theano.tensor.sin(self)
def sinh(self):
return theano.tensor.sinh(self)
def sqrt(self):
return theano.tensor.sqrt(self)
def tan(self):
return theano.tensor.tan(self)
def tanh(self):
return theano.tensor.tanh(self)
def trunc(self):
return theano.tensor.trunc(self)
# CASTING
def astype(self, dtype):
return theano.tensor.cast(self, dtype)
# SLICING/INDEXING
def __getitem__(self, args):
def check_bool(args_el):
try:
if (isinstance(args_el, (numpy.bool_, bool)) or
args_el.dtype == 'bool'):
raise TypeError('TensorType does not support boolean '
'mask for indexing such as tensor[x==0]. '
'Instead you can use non_zeros() such as '
'tensor[(x == 0).nonzeros()]. ')
except AttributeError:
pass
if (not isinstance(args_el, theano.tensor.Variable) and
isinstance(args_el, collections.Iterable)):
for el in args_el:
check_bool(el)
check_bool(args)
if (isinstance(args, list) and
any([isinstance(a, slice) for a in args])):
pass
elif not isinstance(args, tuple):
args = args,
# Convert an Ellipsis if provided into an appropriate number of
# slice(None).
ellipses = [i
for i, index in enumerate(args)
if index is Ellipsis]
if len(ellipses) > 1:
raise IndexError(
"an index can only have a single Ellipsis (`...`)")
elif len(ellipses) == 1:
new_axes = sum(1
for index in args
if index is numpy.newaxis) # numpy.newaxis is None
ellipsis_at = ellipses[0]
args = list(args)
args[ellipsis_at: ellipsis_at + 1] = (
[slice(None)] * (self.ndim - (len(args) - 1 - new_axes)))
# Force input to be int64 datatype if input is an empty list or tuple
# Else leave it as is if it is a real number
args = tuple([numpy.array(inp, dtype=numpy.int64)
if(inp == [] or inp == ()) else inp for inp in args])
# Convert python literals to theano constants
args = theano.tensor.subtensor.make_constant(args)
# Determine if advanced indexing is needed or not
# The logic is already in Subtensor.convert: if it succeeds,
# standard indexing is used; if it fails with
# AdvancedIndexingError, advanced indexing
advanced = False
axis = None
for i, arg in enumerate(args):
try:
if arg is not numpy.newaxis:
theano.tensor.subtensor.Subtensor.convert(arg)
except theano.tensor.subtensor.AdvancedIndexingError:
if advanced:
axis = None
break
else:
advanced = True
axis = i
if advanced:
if (axis is not None and
all(isinstance(a, slice) and
equal_slices(a, slice(None)) for a in args[:axis]) and
all(isinstance(a, slice) and
equal_slices(a, slice(None)) for a in args[axis + 1:]) and
isinstance(args[axis],
(numpy.ndarray, list,
TensorVariable, TensorConstant,
theano.tensor.sharedvar.TensorSharedVariable))):
return self.take(args[axis], axis)
else:
return theano.tensor.subtensor.advanced_subtensor(self, *args)
else:
if numpy.newaxis in args:
# None (aka np.newaxis) in numpy indexing means to add a
# broadcastable dimension, which theano traditionally did with
# the dimshuffle op. The following code converts numpy-style
# indexing on self to traditional [read: implemented] theano
# indexing on a dimshuffled view of self.
counter = 0
pattern = []
new_args = []
for arg in args:
if arg == numpy.newaxis:
pattern.append('x')
new_args.append(slice(None, None, None))
else:
pattern.append(counter)
counter += 1
new_args.append(arg)
view = self.dimshuffle(pattern)
full_slices = True
for arg in new_args:
# We can't do arg == slice(None, None, None) as in
# Python 2.7, this call __lt__ if we have a slice
# with some symbolic variable.
if not (isinstance(arg, slice) and
arg.start is None and
arg.stop is None and
arg.step is None):
full_slices = False
if full_slices:
return view
else:
return view.__getitem__(tuple(new_args))
else:
return theano.tensor.subtensor.Subtensor(args)(
self, *theano.tensor.subtensor.Subtensor.collapse(
args,
lambda entry: isinstance(entry, Variable)))
def take(self, indices, axis=None, mode='raise'):
return theano.tensor.subtensor.take(self, indices, axis, mode)
# COPYING
def copy(self, name=None):
"""Return a symbolic copy and optionally assign a name.
Does not copy the tags.
"""
copied_variable = theano.tensor.basic.tensor_copy(self)
copied_variable.name = name
return copied_variable
def __iter__(self):
try:
for i in xrange(theano.tensor.basic.get_vector_length(self)):
yield self[i]
except TypeError:
# This prevents accidental iteration via builtin.sum(self)
raise TypeError(('TensorType does not support iteration. '
'Maybe you are using builtin.sum instead of '
'theano.tensor.sum? (Maybe .max?)'))
# CONVENIENT ACCESS TO TYPE PROPERTIES
ndim = property(lambda self: self.type.ndim)
"""The rank of this tensor."""
broadcastable = property(lambda self: self.type.broadcastable)
"""
The broadcastable signature of this tensor.
See Also
--------
broadcasting
"""
dtype = property(lambda self: self.type.dtype)
"""The dtype of this tensor."""
# extra pseudo-operator symbols
def __dot__(left, right):
return theano.tensor.basic.dot(left, right)
def __rdot__(right, left):
return theano.tensor.basic.dot(left, right)
dot = __dot__
def sum(self, axis=None, dtype=None, keepdims=False, acc_dtype=None):
"""See `theano.tensor.sum`."""
return theano.tensor.basic.sum(self, axis=axis,
dtype=dtype, keepdims=keepdims,
acc_dtype=acc_dtype)
def prod(self, axis=None, dtype=None, keepdims=False, acc_dtype=None):
"""See `theano.tensor.prod`."""
return theano.tensor.basic.prod(self, axis=axis,
dtype=dtype, keepdims=keepdims,
acc_dtype=acc_dtype)
def norm(self, L, axis=None, keepdims=False):
if L == 0:
raise NotImplementedError()
if numpy.isinf(L):
raise NotImplementedError()
# optimizations will/should catch cases like L=1, L=2
y = theano.tensor.basic.pow(
theano.tensor.basic.pow(
theano.tensor.basic.abs_(self), L).sum(axis=axis), 1.0 / L)
if keepdims:
return theano.tensor.basic.makeKeepDims(self, y, axis)
else:
return y
def mean(self, axis=None, dtype=None, keepdims=False, acc_dtype=None):
"""See `theano.tensor.mean`."""
return theano.tensor.basic.mean(self, axis=axis,
dtype=dtype, keepdims=keepdims,
acc_dtype=acc_dtype)
def var(self, axis=None, ddof=0, keepdims=False, corrected=False):
"""See `theano.tensor.var`."""
return theano.tensor.basic.var(self, axis=axis, ddof=ddof,
keepdims=keepdims, corrected=corrected)
def std(self, axis=None, ddof=0, keepdims=False, corrected=False):
"""See `theano.tensor.std`."""
return theano.tensor.basic.std(self, axis=axis, ddof=ddof,
keepdims=keepdims, corrected=corrected)
def min(self, axis=None, keepdims=False):
"""See `theano.tensor.min`."""
return theano.tensor.basic.min(self, axis, keepdims=keepdims)
def max(self, axis=None, keepdims=False):
"""See `theano.tensor.max`."""
return theano.tensor.basic.max(self, axis, keepdims=keepdims)
def argmin(self, axis=None, keepdims=False):
"""See `theano.tensor.argmin`."""
return theano.tensor.basic.argmin(self, axis, keepdims=keepdims)
def argmax(self, axis=None, keepdims=False):
"""See `theano.tensor.argmax`."""
return theano.tensor.basic.argmax(self, axis, keepdims=keepdims)
def nonzero(self, return_matrix=False):
"""See `theano.tensor.nonzero`."""
return theano.tensor.basic.nonzero(self, return_matrix=return_matrix)
def nonzero_values(self):
"""See `theano.tensor.nonzero_values`."""
return theano.tensor.basic.nonzero_values(self)
def sort(self, axis=-1, kind='quicksort', order=None):
"""See `theano.tensor.sort`."""
return theano.tensor.sort(self, axis, kind, order)
def argsort(self, axis=-1, kind='quicksort', order=None):
"""See `theano.tensor.argsort`."""
return theano.tensor.argsort(self, axis, kind, order)
def clip(self, a_min, a_max):
"Clip (limit) the values in an array."
return theano.tensor.basic.clip(self, a_min, a_max)
def conj(self):
"""See `theano.tensor.conj`."""
return theano.tensor.basic.conj(self)
conjugate = conj
def repeat(self, repeats, axis=None):
"""See `theano.tensor.repeat`."""
return theano.tensor.extra_ops.repeat(self, repeats, axis)
def round(self, mode=None):
"""See `theano.tensor.round`."""
return theano.tensor.basic.round(self, mode)
def trace(self):
return theano.tensor.nlinalg.trace(self)
# TO TRUMP NUMPY OPERATORS
__array_priority__ = 1000
def get_scalar_constant_value(self):
return theano.tensor.basic.get_scalar_constant_value(self)
def zeros_like(model, dtype=None):
return theano.tensor.basic.zeros_like(model, dtype=dtype)
def ones_like(model, dtype=None):
return theano.tensor.basic.ones_like(model, dtype=dtype)
def cumsum(self, axis=None):
return theano.tensor.extra_ops.cumsum(self, axis)
def cumprod(self, axis=None):
return theano.tensor.extra_ops.cumprod(self, axis)
def searchsorted(self, v, side='left', sorter=None):
return theano.tensor.extra_ops.searchsorted(self, v, side, sorter)
def ptp(self, axis=None):
"""See 'theano.tensor.ptp'."""
return theano.tensor.ptp(self, axis)
def swapaxes(self, axis1, axis2):
"""
Return 'tensor.swapaxes(self, axis1, axis2).
If a matrix is provided with the right axes, its transpose
will be returned.
"""
return theano.tensor.basic.swapaxes(self, axis1, axis2)
def fill(self, value):
"""Fill inputted tensor with the assigned value."""
return theano.tensor.basic.fill(self, value)
def choose(self, a, choices, out=None, mode='raise'):
"""
Construct an array from an index array and a set of arrays to choose
from.
"""
return theano.tensor.basic.choose(self, a, choices, out=None,
mode='raise')
def squeeze(self):
"""
Remove broadcastable dimensions from the shape of an array.
It returns the input array, but with the broadcastable dimensions
removed. This is always `x` itself or a view into `x`.
"""
return theano.tensor.extra_ops.squeeze(self)
def compress(self, a, axis=None):
"""Return selected slices only."""
return theano.tensor.extra_ops.compress(self, a, axis=axis)
class TensorVariable(_tensor_py_operators, Variable):
"""
Subclass to add the tensor operators to the basic `Variable` class.
"""
def __init__(self, type, owner=None, index=None, name=None):
super(TensorVariable, self).__init__(type, owner=owner,
index=index, name=name)
if (config.warn_float64 != 'ignore' and type.dtype == 'float64'):
msg = ('You are creating a TensorVariable '
'with float64 dtype. You requested an action via '
'the Theano flag warn_float64={ignore,warn,raise,pdb}.')
if config.warn_float64 == "warn":
# Get the user stack. We don't want function inside the
# tensor and gof directory to be shown to the user.
x = tb.extract_stack()
nb_rm = 0
while x:
file_path = x[-1][0]
rm = False
for p in ["theano/tensor/", "theano\\tensor\\",
"theano/gof/", "theano\\tensor\\"]:
if p in file_path:
x = x[:-1]
nb_rm += 1
rm = True
break
if not rm:
break
warnings.warn(msg, stacklevel=1 + nb_rm)
elif config.warn_float64 == "raise":
raise Exception(msg)
elif config.warn_float64 == 'pdb':
import pdb
pdb.set_trace()
TensorType.Variable = TensorVariable
class TensorConstantSignature(tuple):
"""
A Signature object for comparing TensorConstant instances.
An instance is a pair: (Type instance, ndarray).
"""
def __eq__(self, other):
if type(self) != type(other):
return False
try:
(t0, d0), (t1, d1) = self, other
except Exception:
return False
# N.B. compare shape to ensure no broadcasting in ==
if t0 != t1 or d0.shape != d1.shape:
return False
self.no_nan # Ensure has_nan is computed.
# Note that in the comparisons below, the elementwise comparisons
# come last because they are the most expensive checks.
if self.has_nan:
other.no_nan # Ensure has_nan is computed.
return (other.has_nan and
self.sum == other.sum and
(self.no_nan.mask == other.no_nan.mask).all() and
# Note that the second test below (==) may crash e.g. for
# a single scalar NaN value, so we do not run it when all
# values are missing.
(self.no_nan.mask.all() or
(self.no_nan == other.no_nan).all()))
else:
# Simple case where we do not need to worry about NaN values.
# (note that if there are NaN values in d1, this will return
# False, which is why we do not bother with testing `other.has_nan`
# here).
return (self.sum == other.sum) and numpy.all(d0 == d1)
def __hash__(self):
t, d = self
return hashtype(self) ^ hash(t) ^ hash(d.shape) ^ hash(self.sum)
def theano_hash(self):
_, d = self
return hash_from_ndarray(d)
def _get_sum(self):
"""Compute sum of non NaN / Inf values in the array."""
try:
return self._sum
except AttributeError:
self._sum = self.no_nan.sum()
# The following 2 lines are needede as in Python 3.3 with NumPy
# 1.7.1, numpy.ndarray and numpy.memmap aren't hashable.
if type(self._sum) is numpy.memmap:
self._sum = numpy.asarray(self._sum).item()
if self.has_nan and self.no_nan.mask.all():
# In this case the sum is not properly computed by numpy.
self._sum = 0
if numpy.isinf(self._sum) or numpy.isnan(self._sum):
# NaN may happen when there are both -inf and +inf values.
if self.has_nan:
# Filter both NaN and Inf values.
mask = self.no_nan.mask + numpy.isinf(self[1])
else:
# Filter only Inf values.
mask = numpy.isinf(self[1])
if mask.all():
self._sum = 0
else:
self._sum = numpy.ma.masked_array(self[1], mask).sum()
# At this point there should be no more NaN.
assert not numpy.isnan(self._sum)
return self._sum
sum = property(_get_sum)
def _get_no_nan(self):
try:
return self._no_nan
except AttributeError:
nan_mask = numpy.isnan(self[1])
if nan_mask.any():
self._no_nan = numpy.ma.masked_array(self[1], nan_mask)
self.has_nan = True
else:
self._no_nan = self[1]
self.has_nan = False
return self._no_nan
no_nan = property(_get_no_nan)
class TensorConstant(_tensor_py_operators, Constant):
"""Subclass to add the tensor operators to the basic `Constant` class.
To create a TensorConstant, use the `constant` function in this module.
"""
def __init__(self, type, data, name=None):
Constant.__init__(self, type, data, name)
self.tag.unique_value = None
if isinstance(data, numpy.ndarray) and data.ndim > 0:
flat_data = data.ravel()
if flat_data.shape[0]:
if (flat_data == flat_data[0]).all():
self.tag.unique_value = flat_data[0]
def __str__(self):
if self.tag.unique_value is not None:
name = "%s of %s" % (str(self.data.shape),
str(self.tag.unique_value))
else:
name = "%s" % self.data
if len(name) > 20:
name = name[:10] + ".." + name[-10:]
return "TensorConstant{%s}" % name
def signature(self):
return TensorConstantSignature((self.type, self.data))
def equals(self, other):
# Override Contant.equals to allow to compare with
# numpy.ndarray, and python type.
if isinstance(other, (numpy.ndarray, int, float)):
# Make a TensorConstant to be able to compare
other = theano.tensor.basic.constant(other)
return (isinstance(other, TensorConstant) and
self.signature() == other.signature())
def __copy__(self):
# We need to do this to remove the cached attribute
return type(self)(self.type, self.data, self.name)
def __deepcopy__(self, memo):
# We need to do this to remove the cached attribute
return type(self)(copy.deepcopy(self.type, memo),
copy.deepcopy(self.data, memo),
copy.deepcopy(self.name, memo))
TensorType.Constant = TensorConstant
| 35.016512 | 80 | 0.579529 | from __future__ import absolute_import, print_function, division
import collections
import copy
import traceback as tb
import warnings
import numpy
from six import integer_types
from six.moves import xrange
import theano
from theano.compat import PY3
from theano.scalar import ComplexError, IntegerDivisionError
from theano.gof import Constant, Variable
from theano.gof.utils import hashtype
from theano.tensor.utils import hash_from_ndarray
from theano.tensor.type import TensorType
from theano.configparser import config
def equal_slices(s1, s2):
return (s1.start == s2.start and
s1.stop == s2.stop and
s1.step == s2.step)
class AsTensorError(TypeError):
pass
class _tensor_py_operators(object):
def __abs__(self):
return theano.tensor.basic.abs_(self)
def __neg__(self):
return theano.tensor.basic.neg(self)
_is_nonzero = True
def __lt__(self, other):
rval = theano.tensor.basic.lt(self, other)
rval._is_nonzero = False
return rval
def __le__(self, other):
rval = theano.tensor.basic.le(self, other)
rval._is_nonzero = False
return rval
def __gt__(self, other):
rval = theano.tensor.basic.gt(self, other)
rval._is_nonzero = False
return rval
def __ge__(self, other):
rval = theano.tensor.basic.ge(self, other)
rval._is_nonzero = False
return rval
def __nonzero__(self):
return self.__bool__()
def __bool__(self):
if self._is_nonzero:
return True
else:
raise TypeError(
"Variables do not support boolean operations."
)
def __invert__(self):
return theano.tensor.basic.invert(self)
def __and__(self, other):
return theano.tensor.basic.and_(self, other)
def __or__(self, other):
return theano.tensor.basic.or_(self, other)
def __xor__(self, other):
return theano.tensor.basic.xor(self, other)
def __rand__(self, other):
return theano.tensor.basic.and_(other, self)
def __ror__(self, other):
return theano.tensor.basic.or_(other, self)
def __rxor__(self, other):
return theano.tensor.basic.xor(other, self)
def __add__(self, other):
try:
return theano.tensor.basic.add(self, other)
except (NotImplementedError, AsTensorError):
# We must return NotImplemented and not an
# NotImplementedError or raise an NotImplementedError.
# That way python will give a good error message like this
# `TypeError: unsupported operand type(s) for +:
# 'TensorVariable' and 'TensorVariable'`
return NotImplemented
def __sub__(self, other):
# See explanation in __add__ for the error catched
# and the return value in that case
try:
return theano.tensor.basic.sub(self, other)
except (NotImplementedError, AsTensorError):
return NotImplemented
def __mul__(self, other):
# See explanation in __add__ for the error catched
# and the return value in that case
try:
return theano.tensor.mul(self, other)
except (NotImplementedError, AsTensorError):
return NotImplemented
def __div__(self, other):
# See explanation in __add__ for the error catched
# and the return value in that case
try:
return theano.tensor.basic.div_proxy(self, other)
except IntegerDivisionError:
# This is to raise the exception that occurs when trying to divide
# two integer arrays (currently forbidden).
raise
except (NotImplementedError, AsTensorError):
return NotImplemented
if PY3:
__truediv__ = __div__
def __pow__(self, other):
# See explanation in __add__ for the error catched
# adn the return value in that case
try:
return theano.tensor.basic.pow(self, other)
except (NotImplementedError, AsTensorError):
return NotImplemented
def __mod__(self, other):
# See explanation in __add__ for the error catched
# adn the return value in that case
try:
return theano.tensor.basic.mod_check(self, other)
except ComplexError:
# This is to raise the exception that occurs when trying to compute
# x % y with either x or y a complex number.
raise
except (NotImplementedError, AsTensorError):
return NotImplemented
def __divmod__(self, other):
return theano.tensor.basic.divmod(self, other)
def __truediv__(self, other):
return theano.tensor.basic.true_div(self, other)
def __floordiv__(self, other):
return theano.tensor.basic.floor_div(self, other)
def __rtruediv__(self, other):
return theano.tensor.basic.true_div(other, self)
def __rfloordiv__(self, other):
return theano.tensor.basic.floor_div(other, self)
# DO NOT USE THESE BECAUSE INPLACE OPS SHOULD BE INSERTED
# BY OPTIMIZATIONS ONLY
# ARITHMETIC - INPLACE
# def __iadd__(self, other):
# return _add_inplace(self, other)
# def __isub__(self, other):
# return _sub_inplace(self, other)
#
# def __imul__(self, other):
# return _mul_inplace(self, other)
#
# def __idiv__(self, other):
# return _div_inplace(self, other)
#
# def __ipow__(self, other):
# return _pow_inplace(self, other)
# ARITHMETIC - RIGHT-OPERAND
def __radd__(self, other):
return theano.tensor.basic.add(other, self)
def __rsub__(self, other):
return theano.tensor.basic.sub(other, self)
def __rmul__(self, other):
return theano.tensor.basic.mul(other, self)
def __rdiv__(self, other):
return theano.tensor.basic.div_proxy(other, self)
def __rmod__(self, other):
return theano.tensor.basic.mod(other, self)
def __rdivmod__(self, other):
return theano.tensor.basic.divmod(other, self)
def __rpow__(self, other):
return theano.tensor.basic.pow(other, self)
# TRANSPOSE
T = property(lambda self: theano.tensor.basic.transpose(self))
def transpose(self, *axes):
if len(axes) == 0:
return theano.tensor.basic.transpose(self)
try:
iter(axes[0])
iterable = True
except TypeError:
iterable = False
if len(axes) == 1 and iterable:
return theano.tensor.basic.transpose(self, axes[0])
else:
return theano.tensor.basic.transpose(self, axes)
shape = property(lambda self: theano.tensor.basic.shape(self))
size = property(lambda self: self.shape[0] if self.ndim == 1 else
theano.tensor.basic.prod(self.shape))
# We can't implement __len__ to provide a better error message.
def any(self, axis=None, keepdims=False):
return theano.tensor.basic.any(self, axis=axis, keepdims=keepdims)
def all(self, axis=None, keepdims=False):
return theano.tensor.basic.all(self, axis=axis, keepdims=keepdims)
ise Exception("Theano Variables can't work with len(Theano "
def reshape(self, shape, ndim=None):
if ndim is not None:
if not isinstance(ndim, integer_types):
raise ValueError("Expected ndim to be an integer, is " +
str(type(ndim)))
return theano.tensor.basic.reshape(self, shape, ndim=ndim)
def dimshuffle(self, *pattern):
if (len(pattern) == 1) and (isinstance(pattern[0], (list, tuple))):
pattern = pattern[0]
op = theano.tensor.basic.DimShuffle(list(self.type.broadcastable),
pattern)
return op(self)
def flatten(self, ndim=1):
return theano.tensor.basic.flatten(self, ndim)
def ravel(self):
return theano.tensor.basic.flatten(self)
def diagonal(self, offset=0, axis1=0, axis2=1):
return theano.tensor.basic.diagonal(self, offset, axis1, axis2)
def transfer(self, target):
return theano.tensor.transfer(self, target)
def arccos(self):
return theano.tensor.arccos(self)
def arccosh(self):
return theano.tensor.arccosh(self)
def arcsin(self):
return theano.tensor.arcsin(self)
def arcsinh(self):
return theano.tensor.arcsinh(self)
def arctan(self):
return theano.tensor.arctan(self)
def arctanh(self):
return theano.tensor.arctanh(self)
def ceil(self):
return theano.tensor.ceil(self)
def cos(self):
return theano.tensor.cos(self)
def cosh(self):
return theano.tensor.cosh(self)
def deg2rad(self):
return theano.tensor.deg2rad(self)
def exp(self):
return theano.tensor.exp(self)
def exp2(self):
return theano.tensor.exp2(self)
def expm1(self):
return theano.tensor.expm1(self)
def floor(self):
return theano.tensor.floor(self)
def log(self):
return theano.tensor.log(self)
def log10(self):
return theano.tensor.log10(self)
def log1p(self):
return theano.tensor.log1p(self)
def log2(self):
return theano.tensor.log2(self)
def rad2deg(self):
return theano.tensor.rad2deg(self)
def sin(self):
return theano.tensor.sin(self)
def sinh(self):
return theano.tensor.sinh(self)
def sqrt(self):
return theano.tensor.sqrt(self)
def tan(self):
return theano.tensor.tan(self)
def tanh(self):
return theano.tensor.tanh(self)
def trunc(self):
return theano.tensor.trunc(self)
def astype(self, dtype):
return theano.tensor.cast(self, dtype)
def __getitem__(self, args):
def check_bool(args_el):
try:
if (isinstance(args_el, (numpy.bool_, bool)) or
args_el.dtype == 'bool'):
raise TypeError('TensorType does not support boolean '
'mask for indexing such as tensor[x==0]. '
'Instead you can use non_zeros() such as '
'tensor[(x == 0).nonzeros()]. ')
except AttributeError:
pass
if (not isinstance(args_el, theano.tensor.Variable) and
isinstance(args_el, collections.Iterable)):
for el in args_el:
check_bool(el)
check_bool(args)
if (isinstance(args, list) and
any([isinstance(a, slice) for a in args])):
pass
elif not isinstance(args, tuple):
args = args,
ellipses = [i
for i, index in enumerate(args)
if index is Ellipsis]
if len(ellipses) > 1:
raise IndexError(
"an index can only have a single Ellipsis (`...`)")
elif len(ellipses) == 1:
new_axes = sum(1
for index in args
if index is numpy.newaxis)
ellipsis_at = ellipses[0]
args = list(args)
args[ellipsis_at: ellipsis_at + 1] = (
[slice(None)] * (self.ndim - (len(args) - 1 - new_axes)))
args = tuple([numpy.array(inp, dtype=numpy.int64)
if(inp == [] or inp == ()) else inp for inp in args])
args = theano.tensor.subtensor.make_constant(args)
advanced = False
axis = None
for i, arg in enumerate(args):
try:
if arg is not numpy.newaxis:
theano.tensor.subtensor.Subtensor.convert(arg)
except theano.tensor.subtensor.AdvancedIndexingError:
if advanced:
axis = None
break
else:
advanced = True
axis = i
if advanced:
if (axis is not None and
all(isinstance(a, slice) and
equal_slices(a, slice(None)) for a in args[:axis]) and
all(isinstance(a, slice) and
equal_slices(a, slice(None)) for a in args[axis + 1:]) and
isinstance(args[axis],
(numpy.ndarray, list,
TensorVariable, TensorConstant,
theano.tensor.sharedvar.TensorSharedVariable))):
return self.take(args[axis], axis)
else:
return theano.tensor.subtensor.advanced_subtensor(self, *args)
else:
if numpy.newaxis in args:
counter = 0
pattern = []
new_args = []
for arg in args:
if arg == numpy.newaxis:
pattern.append('x')
new_args.append(slice(None, None, None))
else:
pattern.append(counter)
counter += 1
new_args.append(arg)
view = self.dimshuffle(pattern)
full_slices = True
for arg in new_args:
# Python 2.7, this call __lt__ if we have a slice
# with some symbolic variable.
if not (isinstance(arg, slice) and
arg.start is None and
arg.stop is None and
arg.step is None):
full_slices = False
if full_slices:
return view
else:
return view.__getitem__(tuple(new_args))
else:
return theano.tensor.subtensor.Subtensor(args)(
self, *theano.tensor.subtensor.Subtensor.collapse(
args,
lambda entry: isinstance(entry, Variable)))
def take(self, indices, axis=None, mode='raise'):
return theano.tensor.subtensor.take(self, indices, axis, mode)
# COPYING
def copy(self, name=None):
copied_variable = theano.tensor.basic.tensor_copy(self)
copied_variable.name = name
return copied_variable
def __iter__(self):
try:
for i in xrange(theano.tensor.basic.get_vector_length(self)):
yield self[i]
except TypeError:
# This prevents accidental iteration via builtin.sum(self)
raise TypeError(('TensorType does not support iteration. '
'Maybe you are using builtin.sum instead of '
'theano.tensor.sum? (Maybe .max?)'))
# CONVENIENT ACCESS TO TYPE PROPERTIES
ndim = property(lambda self: self.type.ndim)
broadcastable = property(lambda self: self.type.broadcastable)
dtype = property(lambda self: self.type.dtype)
# extra pseudo-operator symbols
def __dot__(left, right):
return theano.tensor.basic.dot(left, right)
def __rdot__(right, left):
return theano.tensor.basic.dot(left, right)
dot = __dot__
def sum(self, axis=None, dtype=None, keepdims=False, acc_dtype=None):
return theano.tensor.basic.sum(self, axis=axis,
dtype=dtype, keepdims=keepdims,
acc_dtype=acc_dtype)
def prod(self, axis=None, dtype=None, keepdims=False, acc_dtype=None):
return theano.tensor.basic.prod(self, axis=axis,
dtype=dtype, keepdims=keepdims,
acc_dtype=acc_dtype)
def norm(self, L, axis=None, keepdims=False):
if L == 0:
raise NotImplementedError()
if numpy.isinf(L):
raise NotImplementedError()
# optimizations will/should catch cases like L=1, L=2
y = theano.tensor.basic.pow(
theano.tensor.basic.pow(
theano.tensor.basic.abs_(self), L).sum(axis=axis), 1.0 / L)
if keepdims:
return theano.tensor.basic.makeKeepDims(self, y, axis)
else:
return y
def mean(self, axis=None, dtype=None, keepdims=False, acc_dtype=None):
return theano.tensor.basic.mean(self, axis=axis,
dtype=dtype, keepdims=keepdims,
acc_dtype=acc_dtype)
def var(self, axis=None, ddof=0, keepdims=False, corrected=False):
return theano.tensor.basic.var(self, axis=axis, ddof=ddof,
keepdims=keepdims, corrected=corrected)
def std(self, axis=None, ddof=0, keepdims=False, corrected=False):
return theano.tensor.basic.std(self, axis=axis, ddof=ddof,
keepdims=keepdims, corrected=corrected)
def min(self, axis=None, keepdims=False):
return theano.tensor.basic.min(self, axis, keepdims=keepdims)
def max(self, axis=None, keepdims=False):
return theano.tensor.basic.max(self, axis, keepdims=keepdims)
def argmin(self, axis=None, keepdims=False):
return theano.tensor.basic.argmin(self, axis, keepdims=keepdims)
def argmax(self, axis=None, keepdims=False):
return theano.tensor.basic.argmax(self, axis, keepdims=keepdims)
def nonzero(self, return_matrix=False):
return theano.tensor.basic.nonzero(self, return_matrix=return_matrix)
def nonzero_values(self):
return theano.tensor.basic.nonzero_values(self)
def sort(self, axis=-1, kind='quicksort', order=None):
return theano.tensor.sort(self, axis, kind, order)
def argsort(self, axis=-1, kind='quicksort', order=None):
return theano.tensor.argsort(self, axis, kind, order)
def clip(self, a_min, a_max):
return theano.tensor.basic.clip(self, a_min, a_max)
def conj(self):
return theano.tensor.basic.conj(self)
conjugate = conj
def repeat(self, repeats, axis=None):
return theano.tensor.extra_ops.repeat(self, repeats, axis)
def round(self, mode=None):
return theano.tensor.basic.round(self, mode)
def trace(self):
return theano.tensor.nlinalg.trace(self)
# TO TRUMP NUMPY OPERATORS
__array_priority__ = 1000
def get_scalar_constant_value(self):
return theano.tensor.basic.get_scalar_constant_value(self)
def zeros_like(model, dtype=None):
return theano.tensor.basic.zeros_like(model, dtype=dtype)
def ones_like(model, dtype=None):
return theano.tensor.basic.ones_like(model, dtype=dtype)
def cumsum(self, axis=None):
return theano.tensor.extra_ops.cumsum(self, axis)
def cumprod(self, axis=None):
return theano.tensor.extra_ops.cumprod(self, axis)
def searchsorted(self, v, side='left', sorter=None):
return theano.tensor.extra_ops.searchsorted(self, v, side, sorter)
def ptp(self, axis=None):
return theano.tensor.ptp(self, axis)
def swapaxes(self, axis1, axis2):
return theano.tensor.basic.swapaxes(self, axis1, axis2)
def fill(self, value):
return theano.tensor.basic.fill(self, value)
def choose(self, a, choices, out=None, mode='raise'):
return theano.tensor.basic.choose(self, a, choices, out=None,
mode='raise')
def squeeze(self):
return theano.tensor.extra_ops.squeeze(self)
def compress(self, a, axis=None):
return theano.tensor.extra_ops.compress(self, a, axis=axis)
class TensorVariable(_tensor_py_operators, Variable):
def __init__(self, type, owner=None, index=None, name=None):
super(TensorVariable, self).__init__(type, owner=owner,
index=index, name=name)
if (config.warn_float64 != 'ignore' and type.dtype == 'float64'):
msg = ('You are creating a TensorVariable '
'with float64 dtype. You requested an action via '
'the Theano flag warn_float64={ignore,warn,raise,pdb}.')
if config.warn_float64 == "warn":
# Get the user stack. We don't want function inside the
x = tb.extract_stack()
nb_rm = 0
while x:
file_path = x[-1][0]
rm = False
for p in ["theano/tensor/", "theano\\tensor\\",
"theano/gof/", "theano\\tensor\\"]:
if p in file_path:
x = x[:-1]
nb_rm += 1
rm = True
break
if not rm:
break
warnings.warn(msg, stacklevel=1 + nb_rm)
elif config.warn_float64 == "raise":
raise Exception(msg)
elif config.warn_float64 == 'pdb':
import pdb
pdb.set_trace()
TensorType.Variable = TensorVariable
class TensorConstantSignature(tuple):
def __eq__(self, other):
if type(self) != type(other):
return False
try:
(t0, d0), (t1, d1) = self, other
except Exception:
return False
if t0 != t1 or d0.shape != d1.shape:
return False
self.no_nan
if self.has_nan:
other.no_nan
return (other.has_nan and
self.sum == other.sum and
(self.no_nan.mask == other.no_nan.mask).all() and
(self.no_nan.mask.all() or
(self.no_nan == other.no_nan).all()))
else:
return (self.sum == other.sum) and numpy.all(d0 == d1)
def __hash__(self):
t, d = self
return hashtype(self) ^ hash(t) ^ hash(d.shape) ^ hash(self.sum)
def theano_hash(self):
_, d = self
return hash_from_ndarray(d)
def _get_sum(self):
try:
return self._sum
except AttributeError:
self._sum = self.no_nan.sum()
if type(self._sum) is numpy.memmap:
self._sum = numpy.asarray(self._sum).item()
if self.has_nan and self.no_nan.mask.all():
# In this case the sum is not properly computed by numpy.
self._sum = 0
if numpy.isinf(self._sum) or numpy.isnan(self._sum):
# NaN may happen when there are both -inf and +inf values.
if self.has_nan:
# Filter both NaN and Inf values.
mask = self.no_nan.mask + numpy.isinf(self[1])
else:
# Filter only Inf values.
mask = numpy.isinf(self[1])
if mask.all():
self._sum = 0
else:
self._sum = numpy.ma.masked_array(self[1], mask).sum()
# At this point there should be no more NaN.
assert not numpy.isnan(self._sum)
return self._sum
sum = property(_get_sum)
def _get_no_nan(self):
try:
return self._no_nan
except AttributeError:
nan_mask = numpy.isnan(self[1])
if nan_mask.any():
self._no_nan = numpy.ma.masked_array(self[1], nan_mask)
self.has_nan = True
else:
self._no_nan = self[1]
self.has_nan = False
return self._no_nan
no_nan = property(_get_no_nan)
class TensorConstant(_tensor_py_operators, Constant):
def __init__(self, type, data, name=None):
Constant.__init__(self, type, data, name)
self.tag.unique_value = None
if isinstance(data, numpy.ndarray) and data.ndim > 0:
flat_data = data.ravel()
if flat_data.shape[0]:
if (flat_data == flat_data[0]).all():
self.tag.unique_value = flat_data[0]
def __str__(self):
if self.tag.unique_value is not None:
name = "%s of %s" % (str(self.data.shape),
str(self.tag.unique_value))
else:
name = "%s" % self.data
if len(name) > 20:
name = name[:10] + ".." + name[-10:]
return "TensorConstant{%s}" % name
def signature(self):
return TensorConstantSignature((self.type, self.data))
def equals(self, other):
# Override Contant.equals to allow to compare with
# numpy.ndarray, and python type.
if isinstance(other, (numpy.ndarray, int, float)):
# Make a TensorConstant to be able to compare
other = theano.tensor.basic.constant(other)
return (isinstance(other, TensorConstant) and
self.signature() == other.signature())
def __copy__(self):
# We need to do this to remove the cached attribute
return type(self)(self.type, self.data, self.name)
def __deepcopy__(self, memo):
# We need to do this to remove the cached attribute
return type(self)(copy.deepcopy(self.type, memo),
copy.deepcopy(self.data, memo),
copy.deepcopy(self.name, memo))
TensorType.Constant = TensorConstant
| true | true |
1c47fc24b15fec30320a2595ca732b836170dcb9 | 58,366 | py | Python | src/sage/modular/quatalg/brandt.py | bopopescu/classic_diff_geom | 2b1d88becbc8cb30962e0995cc78e429e0f5589f | [
"BSL-1.0"
] | 2 | 2015-08-11T05:05:47.000Z | 2019-05-15T17:27:25.000Z | src/sage/modular/quatalg/brandt.py | bopopescu/classic_diff_geom | 2b1d88becbc8cb30962e0995cc78e429e0f5589f | [
"BSL-1.0"
] | null | null | null | src/sage/modular/quatalg/brandt.py | bopopescu/classic_diff_geom | 2b1d88becbc8cb30962e0995cc78e429e0f5589f | [
"BSL-1.0"
] | 1 | 2020-07-24T12:08:30.000Z | 2020-07-24T12:08:30.000Z | r"""
Brandt Modules
AUTHORS:
- Jon Bober
- Alia Hamieh
- Victoria de Quehen
- William Stein
- Gonzalo Tornaria
Introduction
============
This tutorial outlines the construction of Brandt modules in Sage. The
importance of this construction is that it provides us with a method
to compute modular forms on `\Gamma_0(N)` as outlined in Pizer's paper
[Pi]. In fact there exists a non-canonical Hecke algebra isomorphism
between the Brandt modules and a certain subspace of
`S_{2}(\Gamma_0(pM))` which contains all the newforms.
The Brandt module is the free abelian group on right ideal classes of
a quaternion order together with a natural Hecke action determined by
Brandt matrices.
Quaternion Algebras
-------------------
A quaternion algebra over `\QQ` is a central simple algebra of
dimension 4 over `\QQ`. Such an algebra `A` is said to be
ramified at a place `v` of `\QQ` if and only if `A_v=A\otimes
\QQ_v` is a division algebra. Otherwise `A` is said to be split
at `v`.
``A = QuaternionAlgebra(p)`` returns the quaternion algebra `A` over
`\QQ` ramified precisely at the places `p` and `\infty`.
``A = QuaternionAlgebra(k,a,b)`` returns a quaternion algebra with basis
`\{1,i,j,j\}` over `\mathbb{K}` such that `i^2=a`, `j^2=b` and `ij=k.`
An order `R` in a quaternion algebra is a 4-dimensional lattice on `A`
which is also a subring containing the identity.
``R = A.maximal_order()`` returns a maximal order `R` in the quaternion
algebra `A.`
An Eichler order `\mathcal{O}` in a quaternion algebra is the
intersection of two maximal orders. The level of `\mathcal{O}` is its
index in any maximal order containing it.
``O = A.order_of_level_N`` returns an Eichler order `\mathcal{O}` in `A`
of level `N` where `p` does not divide `N`.
A right `\mathcal{O}`-ideal `I` is a lattice on `A` such that
`I_p=a_p\mathcal{O}` (for some `a_p\in A_p^*`) for all `p<\infty`. Two
right `\mathcal{O}`-ideals `I` and `J` are said to belong to the same
class if `I=aJ` for some `a \in A^*`. (Left `\mathcal{O}`-ideals are
defined in a similar fashion.)
The right order of `I` is defined to be the set of elements in `A`
which fix `I` under right multiplication.
right_order (R, basis) returns the right ideal of `I` in `R` given a
basis for the right ideal `I` contained in the maximal order `R.`
ideal_classes(self) returns a tuple of all right ideal classes in self
which, for the purpose of constructing the Brandt module B(p,M), is
taken to be an Eichler order of level M.
The implementation of this method is especially interesting. It
depends on the construction of a Hecke module defined as a free
abelian group on right ideal classes of a quaternion algebra with the
following action
.. math:
T_n[I]=\sum_{\phi} [J]
where `(n,pM)=1` and the sum is over cyclic `\mathcal{O}`-module
homomorphisms `\phi :I\rightarrow J ` of degree `n` up to isomorphism
of `J`. Equivalently one can sum over the inclusions of the submodules
`J \rightarrow n^{-1}I`. The rough idea is to start with the trivial
ideal class containing the order `\mathcal{O}` itself. Using the
method cyclic_submodules(self, I, p) one computes `T_p([\mathcal{O}])`
for some prime integer $p$ not dividing the level of the order
`\mathcal{O}`. Apply this method repeatedly and test for equivalence
among resulting ideals. A theorem of Serre asserts that one gets a
complete set of ideal class representatives after a finite number of
repetitions.
One can prove that two ideals `I` and `J` are equivalent if and only
if there exists an element `\alpha \in I \overline{J}` such
`N(\alpha)=N(I)N(J)`.
is_equivalent(I,J) returns true if `I` and `J` are equivalent. This
method first compares the theta series of `I` and `J`. If they are the
same, it computes the theta series of the lattice `I\overline(J)`. It
returns true if the `n^{th}` coefficient of this series is nonzero
where `n=N(J)N(I)`.
The theta series of a lattice `L` over the quaternion algebra `A` is
defined as
.. math:
\theta_L(q)=\sum_{x \in L} q^{\frac{N(x)}{N(L)}}
L.theta_series(T,q) returns a power series representing `\theta_L(q)`
up to a precision of `\mathcal{O}(q^{T+1})`.
Hecke Structure
---------------
The Hecke structure defined on the Brandt module is given by the
Brandt matrices which can be computed using the definition of the
Hecke operators given earlier.
hecke_matrix_from_defn (self,n) returns the matrix of the nth Hecke
operator `B_{0}(n)` acting on self, computed directly from the
definition.
However, one can efficiently compute Brandt matrices using theta
series. In fact, let {`I_{1},.....,I_{h}`} be a set of right
`\mathcal{O}`-ideal class representatives. The (i,j) entry in the
Brandt matrix `B_{0}(n)` is the product of the `n^{th}` coefficient in
the theta series of the lattice `I_{i}\overline{I_{j}}` and the first
coefficient in the theta series of the lattice
`I_{i}\overline{I_{i}}`.
compute_hecke_matrix_brandt(self,n) returns the nth Hecke matrix,
computed using theta series.
Example
-------
::
sage: B = BrandtModule(23)
sage: B.maximal_order()
Order of Quaternion Algebra (-1, -23) with base ring Rational Field with basis (1/2 + 1/2*j, 1/2*i + 1/2*k, j, k)
sage: B.right_ideals()
(Fractional ideal (2 + 2*j, 2*i + 2*k, 4*j, 4*k), Fractional ideal (2 + 2*j, 2*i + 6*k, 8*j, 8*k), Fractional ideal (2 + 10*j + 8*k, 2*i + 8*j + 6*k, 16*j, 16*k))
sage: B.hecke_matrix(2)
[1 2 0]
[1 1 1]
[0 3 0]
sage: B.brandt_series(3)
[1/4 + q + q^2 + O(q^3) 1/4 + q^2 + O(q^3) 1/4 + O(q^3)]
[ 1/2 + 2*q^2 + O(q^3) 1/2 + q + q^2 + O(q^3) 1/2 + 3*q^2 + O(q^3)]
[ 1/6 + O(q^3) 1/6 + q^2 + O(q^3) 1/6 + q + O(q^3)]
References
----------
- [Pi] Arnold Pizer, *An Algorithm for Computing Modular Forms on* `\Gamma_{0}(N)`
- [Ko] David Kohel, *Hecke Module Structure of Quaternions*
Further Examples
----------------
We decompose a Brandt module over both `\ZZ` and `\QQ`.::
sage: B = BrandtModule(43, base_ring=ZZ); B
Brandt module of dimension 4 of level 43 of weight 2 over Integer Ring
sage: D = B.decomposition()
sage: D
[
Subspace of dimension 1 of Brandt module of dimension 4 of level 43 of weight 2 over Integer Ring,
Subspace of dimension 1 of Brandt module of dimension 4 of level 43 of weight 2 over Integer Ring,
Subspace of dimension 2 of Brandt module of dimension 4 of level 43 of weight 2 over Integer Ring
]
sage: D[0].basis()
((0, 0, 1, -1),)
sage: D[1].basis()
((1, 2, 2, 2),)
sage: D[2].basis()
((1, 1, -1, -1), (0, 2, -1, -1))
sage: B = BrandtModule(43, base_ring=QQ); B
Brandt module of dimension 4 of level 43 of weight 2 over Rational Field
sage: B.decomposition()[2].basis()
((1, 0, -1/2, -1/2), (0, 1, -1/2, -1/2))
"""
################################################################################
# Sage: Open Source Mathematical Software
#
# Copyright (C) 2009 William Stein <wstein@gmail.com>
#
# Distributed under the terms of the GNU General Public License (GPL)
#
# This code is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# The full text of the GPL is available at:
#
# http://www.gnu.org/licenses/
################################################################################
# imports
from sage.misc.all import prod, verbose
from sage.rings.all import (Integer, ZZ, QQ, prime_divisors,
kronecker, PolynomialRing, GF, next_prime,
lcm, gcd)
from sage.rings.commutative_ring import is_CommutativeRing
from sage.algebras.quatalg.quaternion_algebra import QuaternionAlgebra, basis_for_quaternion_lattice
from sage.algebras.quatalg.quaternion_algebra_cython import rational_matrix_from_rational_quaternions
from sage.rings.arith import gcd, factor, kronecker_symbol
from sage.modular.hecke.all import (AmbientHeckeModule, HeckeSubmodule, HeckeModuleElement)
from sage.matrix.all import MatrixSpace, matrix
from sage.rings.rational_field import is_RationalField
from sage.misc.mrange import cartesian_product_iterator
from sage.misc.cachefunc import cached_method
from copy import copy
cache = {}
def BrandtModule(N, M=1, weight=2, base_ring=QQ, use_cache=True):
"""
Return the Brandt module of given weight associated to the prime
power p^r and integer M, where p and M are coprime.
INPUT:
- N -- a product of primes with odd exponents
- M -- an integer coprime to q (default: 1)
- weight -- an integer that is at least 2 (default: 2)
- base_ring -- the base ring (default: QQ)
- use_cache -- whether to use the cache (default: True)
OUTPUT:
- a Brandt module
EXAMPLES::
sage: BrandtModule(17)
Brandt module of dimension 2 of level 17 of weight 2 over Rational Field
sage: BrandtModule(17,15)
Brandt module of dimension 32 of level 17*15 of weight 2 over Rational Field
sage: BrandtModule(3,7)
Brandt module of dimension 2 of level 3*7 of weight 2 over Rational Field
sage: BrandtModule(3,weight=2)
Brandt module of dimension 1 of level 3 of weight 2 over Rational Field
sage: BrandtModule(11, base_ring=ZZ)
Brandt module of dimension 2 of level 11 of weight 2 over Integer Ring
sage: BrandtModule(11, base_ring=QQbar)
Brandt module of dimension 2 of level 11 of weight 2 over Algebraic Field
The use_cache option determines whether the Brandt module returned
by this function is cached.::
sage: BrandtModule(37) is BrandtModule(37)
True
sage: BrandtModule(37,use_cache=False) is BrandtModule(37,use_cache=False)
False
TESTS:
Note that N and M must be coprime::
sage: BrandtModule(3,15)
Traceback (most recent call last):
...
ValueError: M must be coprime to N
Only weight 2 is currently implemented::
sage: BrandtModule(3,weight=4)
Traceback (most recent call last):
...
NotImplementedError: weight != 2 not yet implemented
Brandt modules are cached::
sage: B = BrandtModule(3,5,2,ZZ)
sage: B is BrandtModule(3,5,2,ZZ)
True
"""
N, M, weight = Integer(N), Integer(M), Integer(weight)
if not N.is_prime():
raise NotImplementedError("Brandt modules currently only implemented when N is a prime")
if M < 1:
raise ValueError("M must be positive")
if gcd(M,N) != 1:
raise ValueError("M must be coprime to N")
if weight < 2:
raise ValueError("weight must be at least 2")
if not is_CommutativeRing(base_ring):
raise TypeError("base_ring must be a commutative ring")
key = (N, M, weight, base_ring)
if use_cache:
if key in cache: # TODO: re-enable caching!
return cache[key]
if weight != 2:
raise NotImplementedError("weight != 2 not yet implemented")
B = BrandtModule_class(*key)
if use_cache:
cache[key] = B
return B
def class_number(p, r, M):
"""
Return the class number of an order of level N = p^r*M in the
quaternion algebra over QQ ramified precisely at p and infinity.
This is an implementation of Theorem 1.12 of [Pizer, 1980].
INPUT:
- p -- a prime
- r -- an odd positive integer (default: 1)
- M -- an integer coprime to q (default: 1)
OUTPUT:
Integer
EXAMPLES::
sage: sage.modular.quatalg.brandt.class_number(389,1,1)
33
sage: sage.modular.quatalg.brandt.class_number(389,1,2) # TODO -- right?
97
sage: sage.modular.quatalg.brandt.class_number(389,3,1) # TODO -- right?
4892713
"""
N = M * p**r
D = prime_divisors(M)
s = 0; t = 0
if N % 4 != 0:
s = (1 - kronecker(-4,p))/4 * prod(1 + kronecker(-4,q) for q in D)
if N % 9 != 0:
t = (1 - kronecker(-3,p))/3 * prod(1 + kronecker(-3,q) for q in D)
h = (N/Integer(12))*(1 - 1/p)*prod(1+1/q for q in D) + s + t
return Integer(h)
def maximal_order(A):
"""
Return a maximal order in the quaternion algebra ramified
at p and infinity.
This is an implementation of Proposition 5.2 of [Pizer, 1980].
INPUT:
- A -- quaternion algebra ramified precisely at p and infinity
OUTPUT:
- a maximal order in A
EXAMPLES::
sage: A = BrandtModule(17).quaternion_algebra()
sage: sage.modular.quatalg.brandt.maximal_order(A)
Order of Quaternion Algebra (-17, -3) with base ring Rational Field with basis (1/2 + 1/2*j, 1/2*i + 1/2*k, -1/3*j - 1/3*k, k)
sage: A = QuaternionAlgebra(17,names='i,j,k')
sage: A.maximal_order()
Order of Quaternion Algebra (-3, -17) with base ring Rational Field with basis (1/2 + 1/2*i, 1/2*j - 1/2*k, -1/3*i + 1/3*k, -k)
"""
return A.maximal_order()
def basis_for_left_ideal(R, gens):
"""
Return a basis for the left ideal of R with given generators.
INPUT:
- R -- quaternion order
- gens -- list of elements of R
OUTPUT:
- list of four elements of R
EXAMPLES::
sage: B = BrandtModule(17); A = B.quaternion_algebra(); i,j,k = A.gens()
sage: sage.modular.quatalg.brandt.basis_for_left_ideal(B.maximal_order(), [i+j,i-j,2*k,A(3)])
[1/2 + 1/6*j + 2/3*k, 1/2*i + 1/2*k, 1/3*j + 1/3*k, k]
sage: sage.modular.quatalg.brandt.basis_for_left_ideal(B.maximal_order(), [3*(i+j),3*(i-j),6*k,A(3)])
[3/2 + 1/2*j + 2*k, 3/2*i + 3/2*k, j + k, 3*k]
"""
return basis_for_quaternion_lattice([b*g for b in R.basis() for g in gens])
def right_order(R, basis):
"""
Given a basis for a left ideal I, return the right order in the
quaternion order R of elements such that I*x is contained in I.
INPUT:
- R -- order in quaternion algebra
- basis -- basis for an ideal I
OUTPUT:
- order in quaternion algebra
EXAMPLES:
We do a consistency check with the ideal equal to a maximal order.::
sage: B = BrandtModule(17); basis = sage.modular.quatalg.brandt.basis_for_left_ideal(B.maximal_order(), B.maximal_order().basis())
sage: sage.modular.quatalg.brandt.right_order(B.maximal_order(), basis)
Order of Quaternion Algebra (-17, -3) with base ring Rational Field with basis (1/2 + 1/6*j + 2/3*k, 1/2*i + 1/2*k, 1/3*j + 1/3*k, k)
sage: basis
[1/2 + 1/6*j + 2/3*k, 1/2*i + 1/2*k, 1/3*j + 1/3*k, k]
sage: B = BrandtModule(17); A = B.quaternion_algebra(); i,j,k = A.gens()
sage: basis = sage.modular.quatalg.brandt.basis_for_left_ideal(B.maximal_order(), [i*j-j])
sage: sage.modular.quatalg.brandt.right_order(B.maximal_order(), basis)
Order of Quaternion Algebra (-17, -3) with base ring Rational Field with basis (1/2 + 1/2*i + 1/2*j + 17/2*k, i, j + 8*k, 9*k)
"""
# Compute matrix of multiplication by each element of the basis.
B = R.basis()
Z = R.quaternion_algebra()
M = MatrixSpace(QQ, 4)
# I = matrix with rows the given basis for I
I = M([list(f) for f in basis])
# psi = matrix of right multiplication on each basis element
psi = [M([list(f*x) for x in Z.basis()]) for f in basis]
# invert them
psi_inv = [x**(-1) for x in psi]
# apply the four inverses to I
W = [I*x for x in psi_inv]
# The right order is the intersection of the row span of the W with the row span of B.
X = M([list(b) for b in B]).row_module(ZZ)
for A in W:
X = X.intersection(A.row_module(ZZ))
C = [Z(list(b)) for b in X.basis()]
return Z.quaternion_order(C)
class BrandtModule_class(AmbientHeckeModule):
"""
A Brandt module.
EXAMPLES::
sage: BrandtModule(3, 10)
Brandt module of dimension 4 of level 3*10 of weight 2 over Rational Field
"""
def __init__(self, N, M, weight, base_ring):
"""
INPUT:
- N -- ramification number (coprime to M)
- M -- auxiliary level
- weight -- integer 2
- base_ring -- the base ring
EXAMPLES::
sage: BrandtModule(3, 5, weight=2, base_ring=ZZ)
Brandt module of dimension 2 of level 3*5 of weight 2 over Integer Ring
"""
assert weight == 2
self.__N = N
self.__M = M
if not N.is_prime():
raise NotImplementedError("right now N must be prime")
rank = class_number(N, 1, M)
self.__key = (N, M, weight, base_ring)
AmbientHeckeModule.__init__(self, base_ring, rank, N * M, weight=2)
self._populate_coercion_lists_(coerce_list=[self.free_module()], element_constructor=BrandtModuleElement)
def _submodule_class(self):
"""
Return the Python class of submodules of this ambient Brandt module.
EXAMPLES::
sage: BrandtModule(37)._submodule_class()
<class 'sage.modular.quatalg.brandt.BrandtSubmodule'>
"""
return BrandtSubmodule
def free_module(self):
"""
Return the underlying free module of the Brandt module.
EXAMPLES::
sage: B = BrandtModule(10007,389)
sage: B.free_module()
Vector space of dimension 325196 over Rational Field
"""
try: return self.__free_module
except AttributeError: pass
V = self.base_ring()**self.dimension()
self.__free_module = V
return V
def N(self):
"""
Return ramification level N.
EXAMPLES::
sage: BrandtModule(7,5,2,ZZ).N()
7
"""
return self.__N
def M(self):
"""
Return the auxiliary level (prime to p part) of the quaternion
order used to compute this Brandt module.
EXAMPLES::
sage: BrandtModule(7,5,2,ZZ).M()
5
"""
return self.__M
def _repr_(self):
"""
Return string representation of this Brandt module.
EXAMPLES::
sage: BrandtModule(7,5,2,ZZ)._repr_()
'Brandt module of dimension 4 of level 7*5 of weight 2 over Integer Ring'
"""
aux = '' if self.__M == 1 else '*%s'%self.__M
return "Brandt module of dimension %s of level %s%s of weight %s over %s"%(
self.rank(), self.__N, aux, self.weight(), self.base_ring())
def __cmp__(self, other):
r"""
Compare self to other.
EXAMPLES::
sage: BrandtModule(37, 5, 2, ZZ) == BrandtModule(37, 5, 2, QQ)
False
sage: BrandtModule(37, 5, 2, ZZ) == BrandtModule(37, 5, 2, ZZ)
True
sage: BrandtModule(37, 5, 2, ZZ) == loads(dumps(BrandtModule(37, 5, 2, ZZ)))
True
"""
if not isinstance(other, BrandtModule_class):
return cmp(type(self), type(other))
else:
return cmp( (self.__M, self.__N, self.weight(), self.base_ring()), (other.__M, other.__N, other.weight(), other.base_ring()))
def quaternion_algebra(self):
"""
Return the quaternion algebra A over QQ ramified precisely at
p and infinity used to compute this Brandt module.
EXAMPLES::
sage: BrandtModule(997).quaternion_algebra()
Quaternion Algebra (-2, -997) with base ring Rational Field
sage: BrandtModule(2).quaternion_algebra()
Quaternion Algebra (-1, -1) with base ring Rational Field
sage: BrandtModule(3).quaternion_algebra()
Quaternion Algebra (-1, -3) with base ring Rational Field
sage: BrandtModule(5).quaternion_algebra()
Quaternion Algebra (-2, -5) with base ring Rational Field
sage: BrandtModule(17).quaternion_algebra()
Quaternion Algebra (-17, -3) with base ring Rational Field
"""
try:
return self.__quaternion_algebra
except AttributeError:
pass
p = self.N()
assert p.is_prime(), "we have only implemented the prime case"
if p == 2:
QA = -1; QB = -1
elif p%4 == 3:
QA = -1; QB = -p
elif p%8 == 5:
QA = -2; QB = -p
elif p%8 == 1:
q = 3
while q%4 != 3 or kronecker(p, q) != -1:
q = next_prime(q)
QA = -p; QB = -q
A = QuaternionAlgebra(QQ, QA, QB)
self.__quaternion_algebra = A
return A
def maximal_order(self):
"""
Return a maximal order in the quaternion algebra associated to this Brandt module.
EXAMPLES::
sage: BrandtModule(17).maximal_order()
Order of Quaternion Algebra (-17, -3) with base ring Rational Field with basis (1/2 + 1/2*j, 1/2*i + 1/2*k, -1/3*j - 1/3*k, k)
sage: BrandtModule(17).maximal_order() is BrandtModule(17).maximal_order()
True
"""
try: return self.__maximal_order
except AttributeError: pass
R = maximal_order(self.quaternion_algebra())
self.__maximal_order = R
return R
def order_of_level_N(self):
"""
Return Eichler order of level N = p^(2*r+1)*M in the quaternion algebra.
EXAMPLES::
sage: BrandtModule(7).order_of_level_N()
Order of Quaternion Algebra (-1, -7) with base ring Rational Field with basis (1/2 + 1/2*j, 1/2*i + 1/2*k, j, k)
sage: BrandtModule(7,13).order_of_level_N()
Order of Quaternion Algebra (-1, -7) with base ring Rational Field with basis (1/2 + 1/2*j + 12*k, 1/2*i + 9/2*k, j + 11*k, 13*k)
sage: BrandtModule(7,3*17).order_of_level_N()
Order of Quaternion Algebra (-1, -7) with base ring Rational Field with basis (1/2 + 1/2*j + 35*k, 1/2*i + 65/2*k, j + 19*k, 51*k)
"""
try: return self.__order_of_level_N
except AttributeError: pass
R = quaternion_order_with_given_level(self.quaternion_algebra(), self.level())
self.__order_of_level_N = R
return R
#@cached_method
def cyclic_submodules(self, I, p):
"""
Return a list of rescaled versions of the fractional right
ideals `J` such that `J` contains `I` and the quotient has
group structure the product of two cyclic groups of order `p`.
We emphasize again that `J` is rescaled to be integral.
INPUT:
I -- ideal I in R = self.order_of_level_N()
p -- prime p coprime to self.level()
OUTPUT:
list of the p+1 fractional right R-ideals that contain I
such that J/I is GF(p) x GF(p).
EXAMPLES::
sage: B = BrandtModule(11)
sage: I = B.order_of_level_N().unit_ideal()
sage: B.cyclic_submodules(I, 2)
[Fractional ideal (1/2 + 3/2*j + k, 1/2*i + j + 1/2*k, 2*j, 2*k),
Fractional ideal (1/2 + 1/2*i + 1/2*j + 1/2*k, i + k, j + k, 2*k),
Fractional ideal (1/2 + 1/2*j + k, 1/2*i + j + 3/2*k, 2*j, 2*k)]
sage: B.cyclic_submodules(I, 3)
[Fractional ideal (1/2 + 1/2*j, 1/2*i + 5/2*k, 3*j, 3*k),
Fractional ideal (1/2 + 3/2*j + 2*k, 1/2*i + 2*j + 3/2*k, 3*j, 3*k),
Fractional ideal (1/2 + 3/2*j + k, 1/2*i + j + 3/2*k, 3*j, 3*k),
Fractional ideal (1/2 + 5/2*j, 1/2*i + 1/2*k, 3*j, 3*k)]
sage: B.cyclic_submodules(I, 11)
Traceback (most recent call last):
...
ValueError: p must be coprime to the level
"""
if not Integer(p).is_prime():
raise ValueError("p must be a prime")
if self.level() % p == 0:
raise ValueError("p must be coprime to the level")
R = self.order_of_level_N()
A = R.quaternion_algebra()
B = R.basis()
V = GF(p)**4
# step 1: Compute alpha, beta, and the matrix of their action on I/pI.
# NOTE: Move this code to orders once we have it all working...
try:
alpha, beta = self.__cyclic_submodules[p]
compute = False
except AttributeError:
self.__cyclic_submodules = {}
compute = True
except KeyError:
compute = True
if compute:
d = R.free_module().basis_matrix().determinant()
S = None
for v in V:
if not v: continue
alpha = sum(Integer(v[i])*B[i] for i in range(4))
# If the quadratic polynomial over GF(p) given by
# X^2 - alpha.reduced_trace() * X + alpha.reduced_norm()
# is not irreducible, we try again with a new element.
if p == 2:
# special case p == 2, since there is a unique quadratic irreducible poly.
if alpha.reduced_trace()%2 == 0 or alpha.reduced_norm()%2 == 0:
continue
else:
# check if the discriminant is a square -- if so, poly is reducible
b = alpha.reduced_trace(); c = alpha.reduced_norm()
if kronecker(b*b - 4*c, p) != -1:
continue
for w in V:
if not w: continue
beta = sum(Integer(w[i])*B[i] for i in range(4))
v = [A(1), alpha, beta, alpha*beta]
M = rational_matrix_from_rational_quaternions(v)
e = M.determinant()
if e and (d/e).valuation(p) == 0:
S = A.quaternion_order(v)
break
if S is not None: break
self.__cyclic_submodules[p] = (alpha, beta)
# right multiplication by X changes something to be written
# in terms of the basis for I.
Y = I.basis_matrix()
X = Y**(-1)
# Compute the matrix of right multiplication by alpha acting on
# our fixed choice of basis for this ideal.
M_alpha = (matrix([(i*alpha).coefficient_tuple() for i in I.basis()]) * X).change_ring(GF(p)).change_ring(GF(p))
M_beta = (matrix([(i*beta).coefficient_tuple() for i in I.basis()]) * X).change_ring(GF(p)).change_ring(GF(p))
# step 2: Find j such that if f=I[j], then mod 2 we have span(I[0],alpha*I[i])
# has trivial intersection with span(I[j],alpha*I[j]).
#
# In terms of our matrices alpha, beta, we can now think of I/p*I
# as being the GF(p)^4 that M_alpha and M_beta naturally act on,
# and I[0], I[1], I[2], I[3] correspond to the standard basis.
#
# We try each of the standard basis vectors.
W0 = V.span([V.gen(0), V.gen(0)*M_alpha])
assert W0.dimension() == 2
j = None
for i in range(1,4):
Wi = V.span([V.gen(i), V.gen(i) * M_alpha])
if Wi.dimension() == 2 and W0.intersection(Wi).dimension() == 0:
j = i
break
assert j is not None, "bug -- couldn't find basis"
# step 3: Enumerate the elements of P^1(GF(p^2)), recording each
# cyclic submodule of degree p.
answer = []
f = V.gen(0)
g = V.gen(j)
M2_4 = MatrixSpace(GF(p),4)
M2_2 = MatrixSpace(QQ,2,4)
Yp = p*Y
from sage.algebras.quatalg.quaternion_algebra_cython import\
rational_quaternions_from_integral_matrix_and_denom
for v in [f + g*(a+b*M_alpha) for a in GF(p) for b in GF(p)] + [g]:
v0 = v
v1 = v*M_alpha
v2 = v*M_beta
v3 = v1*M_beta
W = M2_4([v0, v1, v2, v3], coerce=False)
if W.rank() == 2:
gen_mat = Yp.stack(M2_2([v0.lift()*Y, v1.lift()*Y], coerce=False))
gen_mat, d = gen_mat._clear_denom()
H = gen_mat._hnf_pari(0, include_zero_rows=False)
gens = tuple(rational_quaternions_from_integral_matrix_and_denom(A, H, d))
answer.append( R.right_ideal(gens, check=False) )
if len(answer) == p+1: break
return answer
def hecke_matrix(self, n, algorithm='default', sparse=False, B=None):
"""
Return the matrix of the n-th Hecke operator.
INPUT:
- `n` -- integer
- ``algorithm`` -- string (default: 'default')
- 'default' -- let Sage guess which algorithm is best
- 'direct' -- use cyclic subideals (generally much
better when you want few Hecke operators and the
dimension is very large); uses 'theta' if n divides
the level.
- 'brandt' -- use Brandt matrices (generally much
better when you want many Hecke operators and the
dimension is very small; bad when the dimension
is large)
- ``sparse`` -- bool (default: False)
- `B` -- integer or None (default: None); in direct algorithm,
use theta series to this precision as an initial check for
equality of ideal classes.
EXAMPLES::
sage: B = BrandtModule(3,7); B.hecke_matrix(2)
[0 3]
[1 2]
sage: B.hecke_matrix(5, algorithm='brandt')
[0 6]
[2 4]
sage: t = B.hecke_matrix(11, algorithm='brandt', sparse=True); t
[ 6 6]
[ 2 10]
sage: type(t)
<type 'sage.matrix.matrix_rational_sparse.Matrix_rational_sparse'>
sage: B.hecke_matrix(19, algorithm='direct', B=2)
[ 8 12]
[ 4 16]
"""
n = ZZ(n)
if n <= 0:
raise IndexError("n must be positive.")
if n not in self._hecke_matrices:
if algorithm == 'default':
try: pr = len(self.__brandt_series_vectors[0][0])
except (AttributeError, IndexError): pr = 0
if n <= pr:
# already trivially know the hecke operator in this case
algorithm = 'brandt'
if algorithm == 'default': # still don't know
algorithm = 'direct'
if self.level().gcd(n) != 1:
algorithm = 'brandt'
if algorithm == 'direct':
T = self._compute_hecke_matrix(n, sparse=sparse, B=B)
elif algorithm == 'brandt':
T = self._compute_hecke_matrix_brandt(n, sparse=sparse)
else:
raise ValueError("unknown algorithm '%s'"%algorithm)
T.set_immutable()
self._hecke_matrices[n] = T
return self._hecke_matrices[n]
def _compute_hecke_matrix_prime(self, p, sparse=False, B=None):
"""
Return matrix of the `p`-th Hecke operator on self. The matrix
is always computed using the direct algorithm.
INPUT:
- `p` -- prime number
- `B` -- integer or None (default: None); in direct algorithm,
use theta series to this precision as an initial check for
equality of ideal classes.
- ``sparse`` -- bool (default: False); whether matrix should be sparse
EXAMPLES::
sage: B = BrandtModule(37)
sage: t = B._compute_hecke_matrix_prime(2); t
[1 1 1]
[1 0 2]
[1 2 0]
sage: type(t)
<type 'sage.matrix.matrix_rational_dense.Matrix_rational_dense'>
sage: type(B._compute_hecke_matrix_prime(2,sparse=True))
<type 'sage.matrix.matrix_rational_sparse.Matrix_rational_sparse'>
"""
return self._compute_hecke_matrix_directly(n=p,B=B,sparse=sparse)
def _compute_hecke_matrix_directly(self, n, B=None, sparse=False):
"""
Given an integer `n` coprime to the level, return the matrix of
the n-th Hecke operator on self, computed on our fixed basis
by directly using the definition of the Hecke action in terms
of fractional ideals.
INPUT:
- `n` -- integer, coprime to level
- ``sparse`` -- bool (default: False); whether matrix should be sparse
EXAMPLES::
sage: B = BrandtModule(37)
sage: t = B._compute_hecke_matrix_directly(2); t
[1 1 1]
[1 0 2]
[1 2 0]
sage: type(t)
<type 'sage.matrix.matrix_rational_dense.Matrix_rational_dense'>
sage: type(B._compute_hecke_matrix_directly(2,sparse=True))
<type 'sage.matrix.matrix_rational_sparse.Matrix_rational_sparse'>
You can't compute the Hecke operator for n not coprime to the level using this function::
sage: B._compute_hecke_matrix_directly(37)
Traceback (most recent call last):
...
ValueError: n must be coprime to the level
The generic function (which uses theta series) does work, though::
sage: B.hecke_matrix(37)
[1 0 0]
[0 0 1]
[0 1 0]
An example where the Hecke operator isn't symmetric.::
sage: B = BrandtModule(43)
sage: B._compute_hecke_matrix_directly(2)
[1 2 0 0]
[1 0 1 1]
[0 1 0 2]
[0 1 2 0]
sage: B._compute_hecke_matrix_brandt(2)
[1 2 0 0]
[1 0 1 1]
[0 1 0 2]
[0 1 2 0]
"""
level = self.level()
if gcd(n, level) != 1:
raise ValueError("n must be coprime to the level")
# For rigor it does not matter at all what bound we chose.
# This B is used only for the first phase of checking equality
# of ideals modulo equivalence -- we always provably check
# equivalence if the theta series are the same up to this
# bound.
if B is None:
B = self.dimension() // 2 + 5
T = copy(matrix(self.base_ring(), self.dimension(), sparse=sparse))
C = self.right_ideals()
theta_dict = self._theta_dict(B)
# I think the runtime of this algorithm is now dominated by
# computing theta series of ideals. The computation of
# cyclic submodules is a lower order term.
q = self._smallest_good_prime()
d = lcm([a.denominator() for a in self.order_of_level_N().basis()])
# TODO: temporary!! -- it's not sufficiently *optimized* to be
# sure this is best in these cases.
#if gcd(2*d*q,n) == 1:
# use_fast_alg = True
#else:
# use_fast_alg = False
use_fast_alg = False
last_percent = 0
for r in range(len(C)):
percent_done = 100*r//len(C)
if percent_done != last_percent:
if percent_done%5 == 0:
verbose("percent done: %s"%percent_done)
last_percent = percent_done
if use_fast_alg:
v = C[r].cyclic_right_subideals(n)
else:
v = self.cyclic_submodules(C[r], n)
for J in v:
J_theta = tuple(J.theta_series_vector(B))
v = theta_dict[J_theta]
if len(v) == 1:
T[r,v[0]] += 1
else:
for i in v:
if C[i].is_equivalent(J, 0):
T[r,i] += 1
break
return T
@cached_method
def _theta_dict(self, B):
"""
Return a dictionary from theta series vectors of degree `B` to
list of integers `i`, where the key is the vector of
coefficients of the normalized theta series of the `i`th right
ideal, as indexed by ``self.right_ideals()``.
INPUT:
- `B` -- positive integer, precision of theta series vectors
OUTPUT:
- dictionary
EXAMPLES::
In this example the theta series determine the ideal classes::
sage: B = BrandtModule(5,11); B
Brandt module of dimension 4 of level 5*11 of weight 2 over Rational Field
sage: sorted(list(B._theta_dict(5).iteritems()))
[((1, 0, 0, 4, 0), [3]),
((1, 0, 0, 4, 2), [2]),
((1, 0, 2, 0, 6), [1]),
((1, 2, 4, 0, 6), [0])]
In this example, the theta series does not determine the ideal class::
sage: sorted(list(BrandtModule(37)._theta_dict(6).iteritems()))
[((1, 0, 2, 2, 6, 4), [1, 2]), ((1, 2, 2, 4, 2, 4), [0])]
"""
C = self.right_ideals()
theta_dict = {}
for i in range(len(C)):
I_theta = tuple(C[i].theta_series_vector(B))
if I_theta in theta_dict:
theta_dict[I_theta].append(i)
else:
theta_dict[I_theta] = [i]
return theta_dict
def _compute_hecke_matrix_brandt(self, n, sparse=False):
"""
Return the n-th hecke matrix, computed using Brandt matrices
(theta series).
When the n-th Hecke operator is requested, we computed theta
series to precision `2n+20`, since it only takes slightly
longer, and this means that any Hecke operator $T_m$ can
quickly be computed, for `m<2n+20`.
INPUT:
- n -- integer, coprime to level
- sparse -- bool (default: False); whether matrix should be sparse
EXAMPLES::
sage: B = BrandtModule(3,17)
sage: B._compute_hecke_matrix_brandt(3)
[0 1 0 0]
[1 0 0 0]
[0 0 0 1]
[0 0 1 0]
sage: B._compute_hecke_matrix_brandt(5)
[4 1 1 0]
[1 4 0 1]
[2 0 2 2]
[0 2 2 2]
sage: B._compute_hecke_matrix_brandt(5).fcp()
(x - 6) * (x - 3) * (x^2 - 3*x - 2)
"""
# we go out to 2*n+20 for efficiency, since it takes only a
# little longer, but saves a lot of time if one computes
# successive Hecke operators, which is a very common thing to
# do.
B = self._brandt_series_vectors()
if len(B[0][0]) <= n:
B = self._brandt_series_vectors(2*n+10)
m = len(B)
K = self.base_ring()
Bmat = copy(matrix(K, m, m, sparse=sparse))
for i in range(m):
for j in range(m):
Bmat[i,j] = K(B[j][i][n])
return Bmat
@cached_method
def _smallest_good_prime(self):
"""
Return the smallest prime number that does not divide the level.
EXAMPLES::
sage: BrandtModule(17,6)._smallest_good_prime()
5
"""
level = self.level()
p = ZZ(2)
while level % p == 0:
p = next_prime(p)
return p
def right_ideals(self, B=None):
"""
Return sorted tuple of representatives for the equivalence
classes of right ideals in self.
OUTPUT:
- sorted tuple of fractional ideals
EXAMPLES::
sage: B = BrandtModule(23)
sage: B.right_ideals()
(Fractional ideal (2 + 2*j, 2*i + 2*k, 4*j, 4*k),
Fractional ideal (2 + 2*j, 2*i + 6*k, 8*j, 8*k),
Fractional ideal (2 + 10*j + 8*k, 2*i + 8*j + 6*k, 16*j, 16*k))
TEST::
sage: B = BrandtModule(1009)
sage: Is = B.right_ideals()
sage: n = len(Is)
sage: prod(not Is[i].is_equivalent(Is[j]) for i in range(n) for j in range(i))
1
"""
try: return self.__right_ideals
except AttributeError: pass
p = self._smallest_good_prime()
R = self.order_of_level_N()
I = R.unit_ideal()
I = R.right_ideal([4*x for x in I.basis()])
if B is None:
B = self.dimension() // 2 + 5
ideals = [I]
ideals_theta = { tuple(I.theta_series_vector(B)) : [I] }
new_ideals = [I]
newly_computed_ideals = []
got_something_new = True
while got_something_new:
got_something_new = False
newly_computed_ideals = []
for I in new_ideals:
L = self.cyclic_submodules(I, p)
for J in L:
is_new = True
J_theta = tuple(J.theta_series_vector(B))
if J_theta in ideals_theta:
for K in ideals_theta[J_theta]:
if J.is_equivalent(K, 0):
is_new = False
break
if is_new:
newly_computed_ideals.append(J)
ideals.append(J)
if J_theta in ideals_theta:
ideals_theta[J_theta].append(J)
else:
ideals_theta[J_theta] = [J]
verbose("found %s of %s ideals"%(len(ideals), self.dimension()), level=2)
if len(ideals) >= self.dimension():
ideals = tuple(sorted(ideals))
self.__right_ideals = ideals
return ideals
got_something_new = True
new_ideals = list(newly_computed_ideals)
ideals = tuple(sorted(ideals))
self.__right_ideals = ideals
return ideals
def _ideal_products(self):
"""
Return all products of right ideals, which are used in computing
the Brandt matrices.
This function is used internally by the Brandt matrices
algorithms.
OUTPUT:
- list of ideals
EXAMPLES::
sage: B = BrandtModule(37)
sage: B._ideal_products()
[[Fractional ideal (8 + 8*j + 8*k, 4*i + 8*j + 4*k, 16*j, 16*k)],
[Fractional ideal (8 + 24*j + 8*k, 4*i + 8*j + 4*k, 32*j, 32*k),
Fractional ideal (16 + 16*j + 48*k, 4*i + 8*j + 36*k, 32*j + 32*k, 64*k)],
[Fractional ideal (8 + 24*j + 24*k, 4*i + 24*j + 4*k, 32*j, 32*k),
Fractional ideal (8 + 4*i + 16*j + 28*k, 8*i + 16*j + 8*k, 32*j, 64*k),
Fractional ideal (16 + 16*j + 16*k, 4*i + 24*j + 4*k, 32*j + 32*k, 64*k)]]
"""
try:
return self.__ideal_products
except AttributeError:
pass
L = self.right_ideals()
n = len(L)
if n == 0:
return matrix(self.base_ring()[['q']], 0)
# 1. Compute the theta series
P = []
for i in range(n):
P.append([L[i].multiply_by_conjugate(L[j]) for j in range(i+1)])
self.__ideal_products = P
return P
def _brandt_series_vectors(self, prec=None):
"""
Return Brandt series coefficient vectors out to precision *at least* prec.
EXAMPLES::
sage: B = BrandtModule(37, use_cache=False)
sage: B._brandt_series_vectors(5)
[[(1/2, 1, 1, 2, 1), (1/2, 0, 1, 1, 3), (1/2, 0, 1, 1, 3)],
[(1/2, 0, 1, 1, 3), (1/2, 1, 0, 0, 3), (1/2, 0, 2, 3, 1)],
[(1/2, 0, 1, 1, 3), (1/2, 0, 2, 3, 1), (1/2, 1, 0, 0, 3)]]
If you have computed to higher precision and ask for a lower
precision, the higher precision is still returned.::
sage: B._brandt_series_vectors(2)
[[(1/2, 1, 1, 2, 1), (1/2, 0, 1, 1, 3), (1/2, 0, 1, 1, 3)],
[(1/2, 0, 1, 1, 3), (1/2, 1, 0, 0, 3), (1/2, 0, 2, 3, 1)],
[(1/2, 0, 1, 1, 3), (1/2, 0, 2, 3, 1), (1/2, 1, 0, 0, 3)]]
"""
if prec is None:
try:
return self.__brandt_series_vectors
except AttributeError:
prec = 2
elif prec < 2:
raise ValueError("prec must be at least 2")
L = self.right_ideals()
n = len(L)
K = QQ
if n == 0:
return [[]]
try:
if len(self.__brandt_series_vectors[0][0]) >= prec:
return self.__brandt_series_vectors
except AttributeError: pass
# 1. Compute the theta series
theta = [[I.theta_series_vector(prec) for I in x] for x in self._ideal_products()]
# 2. Compute the number e_j
e = [theta[j][j][1] for j in range(n)]
B = [[0 for _ in range(n)] for _ in range(n)]
# 3. Make the brandt matrix series
for i in range(n):
B[i][i] = theta[i][i]/e[i]
for j in range(i):
B[j][i] = theta[i][j]/e[j]
B[i][j] = theta[i][j]/e[i]
self.__brandt_series_vectors = B
return B
def brandt_series(self, prec, var='q'):
r"""
Return matrix of power series `\sum T_n q^n` to the given
precision. Note that the Hecke operators in this series are
always over `\QQ`, even if the base ring of this Brandt module
is not `\QQ`.
INPUT:
- prec -- positive intege
- var -- string (default: `q`)
OUTPUT:
matrix of power series with coefficients in `\QQ`
EXAMPLES::
sage: B = BrandtModule(11)
sage: B.brandt_series(2)
[1/4 + q + O(q^2) 1/4 + O(q^2)]
[ 1/6 + O(q^2) 1/6 + q + O(q^2)]
sage: B.brandt_series(5)
[1/4 + q + q^2 + 2*q^3 + 5*q^4 + O(q^5) 1/4 + 3*q^2 + 3*q^3 + 3*q^4 + O(q^5)]
[ 1/6 + 2*q^2 + 2*q^3 + 2*q^4 + O(q^5) 1/6 + q + q^3 + 4*q^4 + O(q^5)]
Asking for a smaller precision works.::
sage: B.brandt_series(3)
[1/4 + q + q^2 + O(q^3) 1/4 + 3*q^2 + O(q^3)]
[ 1/6 + 2*q^2 + O(q^3) 1/6 + q + O(q^3)]
sage: B.brandt_series(3,var='t')
[1/4 + t + t^2 + O(t^3) 1/4 + 3*t^2 + O(t^3)]
[ 1/6 + 2*t^2 + O(t^3) 1/6 + t + O(t^3)]
"""
A = self._brandt_series_vectors(prec)
R = QQ[[var]]
n = len(A[0])
return matrix(R, n, n, [[R(x.list()[:prec],prec) for x in Y] for Y in A])
def eisenstein_subspace(self):
"""
Return the 1-dimensional subspace of self on which the Hecke
operators `T_p` act as `p+1` for `p` coprime to the level.
NOTE: This function assumes that the base field has
characteristic 0.
EXAMPLES::
sage: B = BrandtModule(11); B.eisenstein_subspace()
Subspace of dimension 1 of Brandt module of dimension 2 of level 11 of weight 2 over Rational Field
sage: B.eisenstein_subspace() is B.eisenstein_subspace()
True
sage: BrandtModule(3,11).eisenstein_subspace().basis()
((1, 1),)
sage: BrandtModule(7,10).eisenstein_subspace().basis()
((1, 1, 1, 1/2, 1, 1, 1/2, 1, 1, 1),)
sage: BrandtModule(7,10,base_ring=ZZ).eisenstein_subspace().basis()
((2, 2, 2, 1, 2, 2, 1, 2, 2, 2),)
"""
try: return self.__eisenstein_subspace
except AttributeError: pass
if self.base_ring().characteristic() != 0:
raise ValueError("characteristic must be 0")
# cut down until we get a 1-d space using Hecke operators T_p
# with p coprime to the level.
V = self
p = Integer(2)
N = self.level()
while V.dimension() >= 2:
while N%p == 0:
p = p.next_prime()
A = V.T(p) - (p+1)
V = A.kernel()
self.__eisenstein_subspace = V
return V
def is_cuspidal(self):
r"""
Returns whether self is cuspidal, i.e. has no Eisenstein part.
EXAMPLES:
sage: B = BrandtModule(3, 4)
sage: B.is_cuspidal()
False
sage: B.eisenstein_subspace()
Brandt module of dimension 1 of level 3*4 of weight 2 over Rational Field
"""
return self.eisenstein_subspace().dimension() == 0
def monodromy_weights(self):
r"""
Return the weights for the monodromy pairing on this Brandt
module. The weights are associated to each ideal class in our
fixed choice of basis. The weight of an ideal class `[I]` is
half the number of units of the right order `I`.
NOTE: The base ring must be `\QQ` or `\ZZ`.
EXAMPLES::
sage: BrandtModule(11).monodromy_weights()
(2, 3)
sage: BrandtModule(37).monodromy_weights()
(1, 1, 1)
sage: BrandtModule(43).monodromy_weights()
(2, 1, 1, 1)
sage: BrandtModule(7,10).monodromy_weights()
(1, 1, 1, 2, 1, 1, 2, 1, 1, 1)
sage: BrandtModule(5,13).monodromy_weights()
(1, 3, 1, 1, 1, 3)
"""
try: return self.__monodromy_weights
except AttributeError: pass
e = self.eisenstein_subspace().basis()[0].element()
if e.base_ring() != QQ:
e = e.change_ring(QQ)
# Normalize e by making all entries integral so that the common gcd is 1.
e = e * e.denominator()
# then divide by the LCM.
e = e / lcm(list(e))
# Then the denominators are the monodromy weights.
w = tuple([z.denominator() for z in e])
self.__monodromy_weights = w
return w
def quaternion_order_with_given_level(A, level):
"""
Return an order in the quaternion algebra A with given level.
(Implemented only when the base field is the rational numbers.)
INPUT:
level -- The level of the order to be returned. Currently this is only implemented
when the level is divisible by at most one power of a prime that
ramifies in this quaternion algebra.
EXAMPLES::
sage: from sage.modular.quatalg.brandt import quaternion_order_with_given_level, maximal_order
sage: A.<i,j,k> = QuaternionAlgebra(5)
sage: level = 2 * 5 * 17
sage: O = quaternion_order_with_given_level(A, level)
sage: M = maximal_order(A)
sage: L = O.free_module()
sage: N = M.free_module()
sage: print L.index_in(N) == level/5 #check that the order has the right index in the maximal order
True
"""
if not is_RationalField(A.base_ring()):
raise NotImplementedError("base field must be rational numbers")
from sage.modular.quatalg.brandt import maximal_order
if len(A.ramified_primes()) > 1:
raise NotImplementedError("Currently this algorithm only works when the quaternion algebra is only ramified at one finite prime.")
# (The algorithm we use is similar to that in Magma (by David Kohel).)
# in the following magma code, M denotes is the level
level = abs(level)
N = A.discriminant()
N1 = gcd(level, N)
M1 = level/N1
O = maximal_order(A)
if 0 and N1 != 1: # we don't know why magma does the following, so we don't do it.
for p in A.ramified_primes():
if level % p**2 == 0:
raise NotImplementedError("Currently sage can only compute orders whose level is divisible by at most one power of any prime that ramifies in the quaternion algebra")
P = basis_for_left_ideal(O, [N1] + [x*y - y*x for x, y in cartesian_product_iterator([A.basis(), A.basis()]) ])
O = A.quaternion_order(P)
fact = factor(M1)
B = O.basis()
for (p, r) in fact:
a = int((-p/2))
for v in GF(p)**4:
x = sum([int(v[i]+a)*B[i] for i in range(4)])
D = x.reduced_trace()**2 - 4 * x.reduced_norm()
#x = O.random_element((-p/2).floor(), (p/2).ceil())
if kronecker_symbol(D, p) == 1: break
X = PolynomialRing(GF(p), 'x').gen()
a = ZZ((X**2 - ZZ(x.reduced_trace()) * X + ZZ(x.reduced_norm())).roots()[0][0])
I = basis_for_left_ideal(O, [p**r, (x-a)**r] )
O = right_order(O, I) # right_order returns the RightOrder of I inside O, so we don't need to do another intersection
return O
class BrandtSubmodule(HeckeSubmodule):
def _repr_(self):
"""
Return string representation of this Brandt submodule.
EXAMPLES::
sage: BrandtModule(11)[0]._repr_()
'Subspace of dimension 1 of Brandt module of dimension 2 of level 11 of weight 2 over Rational Field'
"""
return "Subspace of dimension %s of %s"%(self.dimension(), self.ambient_module())
class BrandtModuleElement(HeckeModuleElement):
def __init__(self, parent, x):
"""
EXAMPLES::
sage: B = BrandtModule(37)
sage: x = B([1,2,3]); x
(1, 2, 3)
sage: parent(x)
Brandt module of dimension 3 of level 37 of weight 2 over Rational Field
"""
if isinstance(x, BrandtModuleElement):
x = x.element()
HeckeModuleElement.__init__(self, parent, parent.free_module()(x))
def __cmp__(self, other):
"""
EXAMPLES::
sage: B = BrandtModule(13,5)
sage: B.0
(1, 0, 0, 0, 0, 0)
sage: B.0 == B.1
False
sage: B.0 == 0
False
sage: B(0) == 0
True
sage: B.0 + 2*B.1 == 2*B.1 + B.0
True
sage: loads(dumps(B.0)) == B.0
True
"""
if not isinstance(other, BrandtModuleElement):
other = self.parent()(other)
else:
c = cmp(self.parent(), other.parent())
if c: return c
return cmp(self.element(), other.element())
def monodromy_pairing(self, x):
"""
Return the monodromy pairing of self and x.
EXAMPLES::
sage: B = BrandtModule(5,13)
sage: B.monodromy_weights()
(1, 3, 1, 1, 1, 3)
sage: (B.0 + B.1).monodromy_pairing(B.0 + B.1)
4
"""
B = self.parent()
w = B.monodromy_weights()
x = B(x).element()
v = self.element()
return sum(x[i]*v[i]*w[i] for i in range(len(v)))
def __mul__(self, right):
"""
Return the monodromy pairing of self and right.
EXAMPLES::
sage: B = BrandtModule(7,10)
sage: B.monodromy_weights()
(1, 1, 1, 2, 1, 1, 2, 1, 1, 1)
sage: B.0 * B.0
1
sage: B.3 * B.3
2
sage: (B.0+B.3) * (B.0 + B.1 + 2*B.3)
5
"""
return self.monodromy_pairing(right)
def _add_(self, right):
"""
Return sum of self and right.
EXAMPLES::
sage: B = BrandtModule(11)
sage: B.0 + B.1 # indirect doctest
(1, 1)
"""
return BrandtModuleElement(self.parent(), self.element() + right.element())
def _sub_(self, right):
"""
EXAMPLES::
sage: B = BrandtModule(11)
sage: B.0 - B.1 # indirect doctest
(1, -1)
"""
return BrandtModuleElement(self.parent(), self.element() - right.element())
def _neg_(self):
"""
EXAMPLES::
sage: B = BrandtModule(11)
sage: -B.0 # indirect doctest
(-1, 0)
"""
return BrandtModuleElement(self.parent(), -self.element())
#############################################################################
# Benchmarking
#############################################################################
def benchmark_magma(levels, silent=False):
"""
INPUT:
- levels -- list of pairs (p,M) where p is a prime not dividing M
- silent -- bool, default False; if True suppress printing during computation
OUTPUT:
- list of 4-tuples ('magma', p, M, tm), where tm is the
CPU time in seconds to compute T2 using Magma
EXAMPLES::
sage: a = sage.modular.quatalg.brandt.benchmark_magma([(11,1), (37,1), (43,1), (97,1)]) # optional - magma
('magma', 11, 1, ...)
('magma', 37, 1, ...)
('magma', 43, 1, ...)
('magma', 97, 1, ...)
sage: a = sage.modular.quatalg.brandt.benchmark_magma([(11,2), (37,2), (43,2), (97,2)]) # optional - magma
('magma', 11, 2, ...)
('magma', 37, 2, ...)
('magma', 43, 2, ...)
('magma', 97, 2, ...)
"""
ans = []
from sage.interfaces.all import magma
for p, M in levels:
t = magma.cputime()
magma.eval('HeckeOperator(BrandtModule(%s, %s),2)'%(p,M))
tm = magma.cputime(t)
v = ('magma', p, M, tm)
if not silent: print v
ans.append(v)
return ans
def benchmark_sage(levels, silent=False):
"""
INPUT:
- levels -- list of pairs (p,M) where p is a prime not dividing M
- silent -- bool, default False; if True suppress printing during computation
OUTPUT:
- list of 4-tuples ('sage', p, M, tm), where tm is the
CPU time in seconds to compute T2 using Sage
EXAMPLES::
sage: a = sage.modular.quatalg.brandt.benchmark_sage([(11,1), (37,1), (43,1), (97,1)])
('sage', 11, 1, ...)
('sage', 37, 1, ...)
('sage', 43, 1, ...)
('sage', 97, 1, ...)
sage: a = sage.modular.quatalg.brandt.benchmark_sage([(11,2), (37,2), (43,2), (97,2)])
('sage', 11, 2, ...)
('sage', 37, 2, ...)
('sage', 43, 2, ...)
('sage', 97, 2, ...)
"""
from sage.misc.all import cputime
ans = []
for p, M in levels:
t = cputime()
B = BrandtModule(p,M,use_cache=False).hecke_matrix(2)
tm = cputime(t)
v = ('sage', p, M, tm)
if not silent: print v
ans.append(v)
return ans
| 35.394785 | 182 | 0.555735 | r"""
Brandt Modules
AUTHORS:
- Jon Bober
- Alia Hamieh
- Victoria de Quehen
- William Stein
- Gonzalo Tornaria
Introduction
============
This tutorial outlines the construction of Brandt modules in Sage. The
importance of this construction is that it provides us with a method
to compute modular forms on `\Gamma_0(N)` as outlined in Pizer's paper
[Pi]. In fact there exists a non-canonical Hecke algebra isomorphism
between the Brandt modules and a certain subspace of
`S_{2}(\Gamma_0(pM))` which contains all the newforms.
The Brandt module is the free abelian group on right ideal classes of
a quaternion order together with a natural Hecke action determined by
Brandt matrices.
Quaternion Algebras
-------------------
A quaternion algebra over `\QQ` is a central simple algebra of
dimension 4 over `\QQ`. Such an algebra `A` is said to be
ramified at a place `v` of `\QQ` if and only if `A_v=A\otimes
\QQ_v` is a division algebra. Otherwise `A` is said to be split
at `v`.
``A = QuaternionAlgebra(p)`` returns the quaternion algebra `A` over
`\QQ` ramified precisely at the places `p` and `\infty`.
``A = QuaternionAlgebra(k,a,b)`` returns a quaternion algebra with basis
`\{1,i,j,j\}` over `\mathbb{K}` such that `i^2=a`, `j^2=b` and `ij=k.`
An order `R` in a quaternion algebra is a 4-dimensional lattice on `A`
which is also a subring containing the identity.
``R = A.maximal_order()`` returns a maximal order `R` in the quaternion
algebra `A.`
An Eichler order `\mathcal{O}` in a quaternion algebra is the
intersection of two maximal orders. The level of `\mathcal{O}` is its
index in any maximal order containing it.
``O = A.order_of_level_N`` returns an Eichler order `\mathcal{O}` in `A`
of level `N` where `p` does not divide `N`.
A right `\mathcal{O}`-ideal `I` is a lattice on `A` such that
`I_p=a_p\mathcal{O}` (for some `a_p\in A_p^*`) for all `p<\infty`. Two
right `\mathcal{O}`-ideals `I` and `J` are said to belong to the same
class if `I=aJ` for some `a \in A^*`. (Left `\mathcal{O}`-ideals are
defined in a similar fashion.)
The right order of `I` is defined to be the set of elements in `A`
which fix `I` under right multiplication.
right_order (R, basis) returns the right ideal of `I` in `R` given a
basis for the right ideal `I` contained in the maximal order `R.`
ideal_classes(self) returns a tuple of all right ideal classes in self
which, for the purpose of constructing the Brandt module B(p,M), is
taken to be an Eichler order of level M.
The implementation of this method is especially interesting. It
depends on the construction of a Hecke module defined as a free
abelian group on right ideal classes of a quaternion algebra with the
following action
.. math:
T_n[I]=\sum_{\phi} [J]
where `(n,pM)=1` and the sum is over cyclic `\mathcal{O}`-module
homomorphisms `\phi :I\rightarrow J ` of degree `n` up to isomorphism
of `J`. Equivalently one can sum over the inclusions of the submodules
`J \rightarrow n^{-1}I`. The rough idea is to start with the trivial
ideal class containing the order `\mathcal{O}` itself. Using the
method cyclic_submodules(self, I, p) one computes `T_p([\mathcal{O}])`
for some prime integer $p$ not dividing the level of the order
`\mathcal{O}`. Apply this method repeatedly and test for equivalence
among resulting ideals. A theorem of Serre asserts that one gets a
complete set of ideal class representatives after a finite number of
repetitions.
One can prove that two ideals `I` and `J` are equivalent if and only
if there exists an element `\alpha \in I \overline{J}` such
`N(\alpha)=N(I)N(J)`.
is_equivalent(I,J) returns true if `I` and `J` are equivalent. This
method first compares the theta series of `I` and `J`. If they are the
same, it computes the theta series of the lattice `I\overline(J)`. It
returns true if the `n^{th}` coefficient of this series is nonzero
where `n=N(J)N(I)`.
The theta series of a lattice `L` over the quaternion algebra `A` is
defined as
.. math:
\theta_L(q)=\sum_{x \in L} q^{\frac{N(x)}{N(L)}}
L.theta_series(T,q) returns a power series representing `\theta_L(q)`
up to a precision of `\mathcal{O}(q^{T+1})`.
Hecke Structure
---------------
The Hecke structure defined on the Brandt module is given by the
Brandt matrices which can be computed using the definition of the
Hecke operators given earlier.
hecke_matrix_from_defn (self,n) returns the matrix of the nth Hecke
operator `B_{0}(n)` acting on self, computed directly from the
definition.
However, one can efficiently compute Brandt matrices using theta
series. In fact, let {`I_{1},.....,I_{h}`} be a set of right
`\mathcal{O}`-ideal class representatives. The (i,j) entry in the
Brandt matrix `B_{0}(n)` is the product of the `n^{th}` coefficient in
the theta series of the lattice `I_{i}\overline{I_{j}}` and the first
coefficient in the theta series of the lattice
`I_{i}\overline{I_{i}}`.
compute_hecke_matrix_brandt(self,n) returns the nth Hecke matrix,
computed using theta series.
Example
-------
::
sage: B = BrandtModule(23)
sage: B.maximal_order()
Order of Quaternion Algebra (-1, -23) with base ring Rational Field with basis (1/2 + 1/2*j, 1/2*i + 1/2*k, j, k)
sage: B.right_ideals()
(Fractional ideal (2 + 2*j, 2*i + 2*k, 4*j, 4*k), Fractional ideal (2 + 2*j, 2*i + 6*k, 8*j, 8*k), Fractional ideal (2 + 10*j + 8*k, 2*i + 8*j + 6*k, 16*j, 16*k))
sage: B.hecke_matrix(2)
[1 2 0]
[1 1 1]
[0 3 0]
sage: B.brandt_series(3)
[1/4 + q + q^2 + O(q^3) 1/4 + q^2 + O(q^3) 1/4 + O(q^3)]
[ 1/2 + 2*q^2 + O(q^3) 1/2 + q + q^2 + O(q^3) 1/2 + 3*q^2 + O(q^3)]
[ 1/6 + O(q^3) 1/6 + q^2 + O(q^3) 1/6 + q + O(q^3)]
References
----------
- [Pi] Arnold Pizer, *An Algorithm for Computing Modular Forms on* `\Gamma_{0}(N)`
- [Ko] David Kohel, *Hecke Module Structure of Quaternions*
Further Examples
----------------
We decompose a Brandt module over both `\ZZ` and `\QQ`.::
sage: B = BrandtModule(43, base_ring=ZZ); B
Brandt module of dimension 4 of level 43 of weight 2 over Integer Ring
sage: D = B.decomposition()
sage: D
[
Subspace of dimension 1 of Brandt module of dimension 4 of level 43 of weight 2 over Integer Ring,
Subspace of dimension 1 of Brandt module of dimension 4 of level 43 of weight 2 over Integer Ring,
Subspace of dimension 2 of Brandt module of dimension 4 of level 43 of weight 2 over Integer Ring
]
sage: D[0].basis()
((0, 0, 1, -1),)
sage: D[1].basis()
((1, 2, 2, 2),)
sage: D[2].basis()
((1, 1, -1, -1), (0, 2, -1, -1))
sage: B = BrandtModule(43, base_ring=QQ); B
Brandt module of dimension 4 of level 43 of weight 2 over Rational Field
sage: B.decomposition()[2].basis()
((1, 0, -1/2, -1/2), (0, 1, -1/2, -1/2))
"""
################################################################################
# Sage: Open Source Mathematical Software
#
# Copyright (C) 2009 William Stein <wstein@gmail.com>
#
# Distributed under the terms of the GNU General Public License (GPL)
#
# This code is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# The full text of the GPL is available at:
#
# http://www.gnu.org/licenses/
################################################################################
# imports
from sage.misc.all import prod, verbose
from sage.rings.all import (Integer, ZZ, QQ, prime_divisors,
kronecker, PolynomialRing, GF, next_prime,
lcm, gcd)
from sage.rings.commutative_ring import is_CommutativeRing
from sage.algebras.quatalg.quaternion_algebra import QuaternionAlgebra, basis_for_quaternion_lattice
from sage.algebras.quatalg.quaternion_algebra_cython import rational_matrix_from_rational_quaternions
from sage.rings.arith import gcd, factor, kronecker_symbol
from sage.modular.hecke.all import (AmbientHeckeModule, HeckeSubmodule, HeckeModuleElement)
from sage.matrix.all import MatrixSpace, matrix
from sage.rings.rational_field import is_RationalField
from sage.misc.mrange import cartesian_product_iterator
from sage.misc.cachefunc import cached_method
from copy import copy
cache = {}
def BrandtModule(N, M=1, weight=2, base_ring=QQ, use_cache=True):
"""
Return the Brandt module of given weight associated to the prime
power p^r and integer M, where p and M are coprime.
INPUT:
- N -- a product of primes with odd exponents
- M -- an integer coprime to q (default: 1)
- weight -- an integer that is at least 2 (default: 2)
- base_ring -- the base ring (default: QQ)
- use_cache -- whether to use the cache (default: True)
OUTPUT:
- a Brandt module
EXAMPLES::
sage: BrandtModule(17)
Brandt module of dimension 2 of level 17 of weight 2 over Rational Field
sage: BrandtModule(17,15)
Brandt module of dimension 32 of level 17*15 of weight 2 over Rational Field
sage: BrandtModule(3,7)
Brandt module of dimension 2 of level 3*7 of weight 2 over Rational Field
sage: BrandtModule(3,weight=2)
Brandt module of dimension 1 of level 3 of weight 2 over Rational Field
sage: BrandtModule(11, base_ring=ZZ)
Brandt module of dimension 2 of level 11 of weight 2 over Integer Ring
sage: BrandtModule(11, base_ring=QQbar)
Brandt module of dimension 2 of level 11 of weight 2 over Algebraic Field
The use_cache option determines whether the Brandt module returned
by this function is cached.::
sage: BrandtModule(37) is BrandtModule(37)
True
sage: BrandtModule(37,use_cache=False) is BrandtModule(37,use_cache=False)
False
TESTS:
Note that N and M must be coprime::
sage: BrandtModule(3,15)
Traceback (most recent call last):
...
ValueError: M must be coprime to N
Only weight 2 is currently implemented::
sage: BrandtModule(3,weight=4)
Traceback (most recent call last):
...
NotImplementedError: weight != 2 not yet implemented
Brandt modules are cached::
sage: B = BrandtModule(3,5,2,ZZ)
sage: B is BrandtModule(3,5,2,ZZ)
True
"""
N, M, weight = Integer(N), Integer(M), Integer(weight)
if not N.is_prime():
raise NotImplementedError("Brandt modules currently only implemented when N is a prime")
if M < 1:
raise ValueError("M must be positive")
if gcd(M,N) != 1:
raise ValueError("M must be coprime to N")
if weight < 2:
raise ValueError("weight must be at least 2")
if not is_CommutativeRing(base_ring):
raise TypeError("base_ring must be a commutative ring")
key = (N, M, weight, base_ring)
if use_cache:
if key in cache: # TODO: re-enable caching!
return cache[key]
if weight != 2:
raise NotImplementedError("weight != 2 not yet implemented")
B = BrandtModule_class(*key)
if use_cache:
cache[key] = B
return B
def class_number(p, r, M):
"""
Return the class number of an order of level N = p^r*M in the
quaternion algebra over QQ ramified precisely at p and infinity.
This is an implementation of Theorem 1.12 of [Pizer, 1980].
INPUT:
- p -- a prime
- r -- an odd positive integer (default: 1)
- M -- an integer coprime to q (default: 1)
OUTPUT:
Integer
EXAMPLES::
sage: sage.modular.quatalg.brandt.class_number(389,1,1)
33
sage: sage.modular.quatalg.brandt.class_number(389,1,2) # TODO -- right?
97
sage: sage.modular.quatalg.brandt.class_number(389,3,1) # TODO -- right?
4892713
"""
N = M * p**r
D = prime_divisors(M)
s = 0; t = 0
if N % 4 != 0:
s = (1 - kronecker(-4,p))/4 * prod(1 + kronecker(-4,q) for q in D)
if N % 9 != 0:
t = (1 - kronecker(-3,p))/3 * prod(1 + kronecker(-3,q) for q in D)
h = (N/Integer(12))*(1 - 1/p)*prod(1+1/q for q in D) + s + t
return Integer(h)
def maximal_order(A):
"""
Return a maximal order in the quaternion algebra ramified
at p and infinity.
This is an implementation of Proposition 5.2 of [Pizer, 1980].
INPUT:
- A -- quaternion algebra ramified precisely at p and infinity
OUTPUT:
- a maximal order in A
EXAMPLES::
sage: A = BrandtModule(17).quaternion_algebra()
sage: sage.modular.quatalg.brandt.maximal_order(A)
Order of Quaternion Algebra (-17, -3) with base ring Rational Field with basis (1/2 + 1/2*j, 1/2*i + 1/2*k, -1/3*j - 1/3*k, k)
sage: A = QuaternionAlgebra(17,names='i,j,k')
sage: A.maximal_order()
Order of Quaternion Algebra (-3, -17) with base ring Rational Field with basis (1/2 + 1/2*i, 1/2*j - 1/2*k, -1/3*i + 1/3*k, -k)
"""
return A.maximal_order()
def basis_for_left_ideal(R, gens):
"""
Return a basis for the left ideal of R with given generators.
INPUT:
- R -- quaternion order
- gens -- list of elements of R
OUTPUT:
- list of four elements of R
EXAMPLES::
sage: B = BrandtModule(17); A = B.quaternion_algebra(); i,j,k = A.gens()
sage: sage.modular.quatalg.brandt.basis_for_left_ideal(B.maximal_order(), [i+j,i-j,2*k,A(3)])
[1/2 + 1/6*j + 2/3*k, 1/2*i + 1/2*k, 1/3*j + 1/3*k, k]
sage: sage.modular.quatalg.brandt.basis_for_left_ideal(B.maximal_order(), [3*(i+j),3*(i-j),6*k,A(3)])
[3/2 + 1/2*j + 2*k, 3/2*i + 3/2*k, j + k, 3*k]
"""
return basis_for_quaternion_lattice([b*g for b in R.basis() for g in gens])
def right_order(R, basis):
"""
Given a basis for a left ideal I, return the right order in the
quaternion order R of elements such that I*x is contained in I.
INPUT:
- R -- order in quaternion algebra
- basis -- basis for an ideal I
OUTPUT:
- order in quaternion algebra
EXAMPLES:
We do a consistency check with the ideal equal to a maximal order.::
sage: B = BrandtModule(17); basis = sage.modular.quatalg.brandt.basis_for_left_ideal(B.maximal_order(), B.maximal_order().basis())
sage: sage.modular.quatalg.brandt.right_order(B.maximal_order(), basis)
Order of Quaternion Algebra (-17, -3) with base ring Rational Field with basis (1/2 + 1/6*j + 2/3*k, 1/2*i + 1/2*k, 1/3*j + 1/3*k, k)
sage: basis
[1/2 + 1/6*j + 2/3*k, 1/2*i + 1/2*k, 1/3*j + 1/3*k, k]
sage: B = BrandtModule(17); A = B.quaternion_algebra(); i,j,k = A.gens()
sage: basis = sage.modular.quatalg.brandt.basis_for_left_ideal(B.maximal_order(), [i*j-j])
sage: sage.modular.quatalg.brandt.right_order(B.maximal_order(), basis)
Order of Quaternion Algebra (-17, -3) with base ring Rational Field with basis (1/2 + 1/2*i + 1/2*j + 17/2*k, i, j + 8*k, 9*k)
"""
# Compute matrix of multiplication by each element of the basis.
B = R.basis()
Z = R.quaternion_algebra()
M = MatrixSpace(QQ, 4)
# I = matrix with rows the given basis for I
I = M([list(f) for f in basis])
# psi = matrix of right multiplication on each basis element
psi = [M([list(f*x) for x in Z.basis()]) for f in basis]
# invert them
psi_inv = [x**(-1) for x in psi]
# apply the four inverses to I
W = [I*x for x in psi_inv]
# The right order is the intersection of the row span of the W with the row span of B.
X = M([list(b) for b in B]).row_module(ZZ)
for A in W:
X = X.intersection(A.row_module(ZZ))
C = [Z(list(b)) for b in X.basis()]
return Z.quaternion_order(C)
class BrandtModule_class(AmbientHeckeModule):
"""
A Brandt module.
EXAMPLES::
sage: BrandtModule(3, 10)
Brandt module of dimension 4 of level 3*10 of weight 2 over Rational Field
"""
def __init__(self, N, M, weight, base_ring):
"""
INPUT:
- N -- ramification number (coprime to M)
- M -- auxiliary level
- weight -- integer 2
- base_ring -- the base ring
EXAMPLES::
sage: BrandtModule(3, 5, weight=2, base_ring=ZZ)
Brandt module of dimension 2 of level 3*5 of weight 2 over Integer Ring
"""
assert weight == 2
self.__N = N
self.__M = M
if not N.is_prime():
raise NotImplementedError("right now N must be prime")
rank = class_number(N, 1, M)
self.__key = (N, M, weight, base_ring)
AmbientHeckeModule.__init__(self, base_ring, rank, N * M, weight=2)
self._populate_coercion_lists_(coerce_list=[self.free_module()], element_constructor=BrandtModuleElement)
def _submodule_class(self):
"""
Return the Python class of submodules of this ambient Brandt module.
EXAMPLES::
sage: BrandtModule(37)._submodule_class()
<class 'sage.modular.quatalg.brandt.BrandtSubmodule'>
"""
return BrandtSubmodule
def free_module(self):
"""
Return the underlying free module of the Brandt module.
EXAMPLES::
sage: B = BrandtModule(10007,389)
sage: B.free_module()
Vector space of dimension 325196 over Rational Field
"""
try: return self.__free_module
except AttributeError: pass
V = self.base_ring()**self.dimension()
self.__free_module = V
return V
def N(self):
"""
Return ramification level N.
EXAMPLES::
sage: BrandtModule(7,5,2,ZZ).N()
7
"""
return self.__N
def M(self):
"""
Return the auxiliary level (prime to p part) of the quaternion
order used to compute this Brandt module.
EXAMPLES::
sage: BrandtModule(7,5,2,ZZ).M()
5
"""
return self.__M
def _repr_(self):
"""
Return string representation of this Brandt module.
EXAMPLES::
sage: BrandtModule(7,5,2,ZZ)._repr_()
'Brandt module of dimension 4 of level 7*5 of weight 2 over Integer Ring'
"""
aux = '' if self.__M == 1 else '*%s'%self.__M
return "Brandt module of dimension %s of level %s%s of weight %s over %s"%(
self.rank(), self.__N, aux, self.weight(), self.base_ring())
def __cmp__(self, other):
r"""
Compare self to other.
EXAMPLES::
sage: BrandtModule(37, 5, 2, ZZ) == BrandtModule(37, 5, 2, QQ)
False
sage: BrandtModule(37, 5, 2, ZZ) == BrandtModule(37, 5, 2, ZZ)
True
sage: BrandtModule(37, 5, 2, ZZ) == loads(dumps(BrandtModule(37, 5, 2, ZZ)))
True
"""
if not isinstance(other, BrandtModule_class):
return cmp(type(self), type(other))
else:
return cmp( (self.__M, self.__N, self.weight(), self.base_ring()), (other.__M, other.__N, other.weight(), other.base_ring()))
def quaternion_algebra(self):
"""
Return the quaternion algebra A over QQ ramified precisely at
p and infinity used to compute this Brandt module.
EXAMPLES::
sage: BrandtModule(997).quaternion_algebra()
Quaternion Algebra (-2, -997) with base ring Rational Field
sage: BrandtModule(2).quaternion_algebra()
Quaternion Algebra (-1, -1) with base ring Rational Field
sage: BrandtModule(3).quaternion_algebra()
Quaternion Algebra (-1, -3) with base ring Rational Field
sage: BrandtModule(5).quaternion_algebra()
Quaternion Algebra (-2, -5) with base ring Rational Field
sage: BrandtModule(17).quaternion_algebra()
Quaternion Algebra (-17, -3) with base ring Rational Field
"""
try:
return self.__quaternion_algebra
except AttributeError:
pass
p = self.N()
assert p.is_prime(), "we have only implemented the prime case"
if p == 2:
QA = -1; QB = -1
elif p%4 == 3:
QA = -1; QB = -p
elif p%8 == 5:
QA = -2; QB = -p
elif p%8 == 1:
q = 3
while q%4 != 3 or kronecker(p, q) != -1:
q = next_prime(q)
QA = -p; QB = -q
A = QuaternionAlgebra(QQ, QA, QB)
self.__quaternion_algebra = A
return A
def maximal_order(self):
"""
Return a maximal order in the quaternion algebra associated to this Brandt module.
EXAMPLES::
sage: BrandtModule(17).maximal_order()
Order of Quaternion Algebra (-17, -3) with base ring Rational Field with basis (1/2 + 1/2*j, 1/2*i + 1/2*k, -1/3*j - 1/3*k, k)
sage: BrandtModule(17).maximal_order() is BrandtModule(17).maximal_order()
True
"""
try: return self.__maximal_order
except AttributeError: pass
R = maximal_order(self.quaternion_algebra())
self.__maximal_order = R
return R
def order_of_level_N(self):
"""
Return Eichler order of level N = p^(2*r+1)*M in the quaternion algebra.
EXAMPLES::
sage: BrandtModule(7).order_of_level_N()
Order of Quaternion Algebra (-1, -7) with base ring Rational Field with basis (1/2 + 1/2*j, 1/2*i + 1/2*k, j, k)
sage: BrandtModule(7,13).order_of_level_N()
Order of Quaternion Algebra (-1, -7) with base ring Rational Field with basis (1/2 + 1/2*j + 12*k, 1/2*i + 9/2*k, j + 11*k, 13*k)
sage: BrandtModule(7,3*17).order_of_level_N()
Order of Quaternion Algebra (-1, -7) with base ring Rational Field with basis (1/2 + 1/2*j + 35*k, 1/2*i + 65/2*k, j + 19*k, 51*k)
"""
try: return self.__order_of_level_N
except AttributeError: pass
R = quaternion_order_with_given_level(self.quaternion_algebra(), self.level())
self.__order_of_level_N = R
return R
#@cached_method
def cyclic_submodules(self, I, p):
"""
Return a list of rescaled versions of the fractional right
ideals `J` such that `J` contains `I` and the quotient has
group structure the product of two cyclic groups of order `p`.
We emphasize again that `J` is rescaled to be integral.
INPUT:
I -- ideal I in R = self.order_of_level_N()
p -- prime p coprime to self.level()
OUTPUT:
list of the p+1 fractional right R-ideals that contain I
such that J/I is GF(p) x GF(p).
EXAMPLES::
sage: B = BrandtModule(11)
sage: I = B.order_of_level_N().unit_ideal()
sage: B.cyclic_submodules(I, 2)
[Fractional ideal (1/2 + 3/2*j + k, 1/2*i + j + 1/2*k, 2*j, 2*k),
Fractional ideal (1/2 + 1/2*i + 1/2*j + 1/2*k, i + k, j + k, 2*k),
Fractional ideal (1/2 + 1/2*j + k, 1/2*i + j + 3/2*k, 2*j, 2*k)]
sage: B.cyclic_submodules(I, 3)
[Fractional ideal (1/2 + 1/2*j, 1/2*i + 5/2*k, 3*j, 3*k),
Fractional ideal (1/2 + 3/2*j + 2*k, 1/2*i + 2*j + 3/2*k, 3*j, 3*k),
Fractional ideal (1/2 + 3/2*j + k, 1/2*i + j + 3/2*k, 3*j, 3*k),
Fractional ideal (1/2 + 5/2*j, 1/2*i + 1/2*k, 3*j, 3*k)]
sage: B.cyclic_submodules(I, 11)
Traceback (most recent call last):
...
ValueError: p must be coprime to the level
"""
if not Integer(p).is_prime():
raise ValueError("p must be a prime")
if self.level() % p == 0:
raise ValueError("p must be coprime to the level")
R = self.order_of_level_N()
A = R.quaternion_algebra()
B = R.basis()
V = GF(p)**4
# step 1: Compute alpha, beta, and the matrix of their action on I/pI.
# NOTE: Move this code to orders once we have it all working...
try:
alpha, beta = self.__cyclic_submodules[p]
compute = False
except AttributeError:
self.__cyclic_submodules = {}
compute = True
except KeyError:
compute = True
if compute:
d = R.free_module().basis_matrix().determinant()
S = None
for v in V:
if not v: continue
alpha = sum(Integer(v[i])*B[i] for i in range(4))
# If the quadratic polynomial over GF(p) given by
# X^2 - alpha.reduced_trace() * X + alpha.reduced_norm()
# is not irreducible, we try again with a new element.
if p == 2:
# special case p == 2, since there is a unique quadratic irreducible poly.
if alpha.reduced_trace()%2 == 0 or alpha.reduced_norm()%2 == 0:
continue
else:
# check if the discriminant is a square -- if so, poly is reducible
b = alpha.reduced_trace(); c = alpha.reduced_norm()
if kronecker(b*b - 4*c, p) != -1:
continue
for w in V:
if not w: continue
beta = sum(Integer(w[i])*B[i] for i in range(4))
v = [A(1), alpha, beta, alpha*beta]
M = rational_matrix_from_rational_quaternions(v)
e = M.determinant()
if e and (d/e).valuation(p) == 0:
S = A.quaternion_order(v)
break
if S is not None: break
self.__cyclic_submodules[p] = (alpha, beta)
# right multiplication by X changes something to be written
# in terms of the basis for I.
Y = I.basis_matrix()
X = Y**(-1)
# Compute the matrix of right multiplication by alpha acting on
# our fixed choice of basis for this ideal.
M_alpha = (matrix([(i*alpha).coefficient_tuple() for i in I.basis()]) * X).change_ring(GF(p)).change_ring(GF(p))
M_beta = (matrix([(i*beta).coefficient_tuple() for i in I.basis()]) * X).change_ring(GF(p)).change_ring(GF(p))
# step 2: Find j such that if f=I[j], then mod 2 we have span(I[0],alpha*I[i])
# has trivial intersection with span(I[j],alpha*I[j]).
#
# In terms of our matrices alpha, beta, we can now think of I/p*I
# as being the GF(p)^4 that M_alpha and M_beta naturally act on,
# and I[0], I[1], I[2], I[3] correspond to the standard basis.
#
# We try each of the standard basis vectors.
W0 = V.span([V.gen(0), V.gen(0)*M_alpha])
assert W0.dimension() == 2
j = None
for i in range(1,4):
Wi = V.span([V.gen(i), V.gen(i) * M_alpha])
if Wi.dimension() == 2 and W0.intersection(Wi).dimension() == 0:
j = i
break
assert j is not None, "bug -- couldn't find basis"
answer = []
f = V.gen(0)
g = V.gen(j)
M2_4 = MatrixSpace(GF(p),4)
M2_2 = MatrixSpace(QQ,2,4)
Yp = p*Y
from sage.algebras.quatalg.quaternion_algebra_cython import\
rational_quaternions_from_integral_matrix_and_denom
for v in [f + g*(a+b*M_alpha) for a in GF(p) for b in GF(p)] + [g]:
v0 = v
v1 = v*M_alpha
v2 = v*M_beta
v3 = v1*M_beta
W = M2_4([v0, v1, v2, v3], coerce=False)
if W.rank() == 2:
gen_mat = Yp.stack(M2_2([v0.lift()*Y, v1.lift()*Y], coerce=False))
gen_mat, d = gen_mat._clear_denom()
H = gen_mat._hnf_pari(0, include_zero_rows=False)
gens = tuple(rational_quaternions_from_integral_matrix_and_denom(A, H, d))
answer.append( R.right_ideal(gens, check=False) )
if len(answer) == p+1: break
return answer
def hecke_matrix(self, n, algorithm='default', sparse=False, B=None):
"""
Return the matrix of the n-th Hecke operator.
INPUT:
- `n` -- integer
- ``algorithm`` -- string (default: 'default')
- 'default' -- let Sage guess which algorithm is best
- 'direct' -- use cyclic subideals (generally much
better when you want few Hecke operators and the
dimension is very large); uses 'theta' if n divides
the level.
- 'brandt' -- use Brandt matrices (generally much
better when you want many Hecke operators and the
dimension is very small; bad when the dimension
is large)
- ``sparse`` -- bool (default: False)
- `B` -- integer or None (default: None); in direct algorithm,
use theta series to this precision as an initial check for
equality of ideal classes.
EXAMPLES::
sage: B = BrandtModule(3,7); B.hecke_matrix(2)
[0 3]
[1 2]
sage: B.hecke_matrix(5, algorithm='brandt')
[0 6]
[2 4]
sage: t = B.hecke_matrix(11, algorithm='brandt', sparse=True); t
[ 6 6]
[ 2 10]
sage: type(t)
<type 'sage.matrix.matrix_rational_sparse.Matrix_rational_sparse'>
sage: B.hecke_matrix(19, algorithm='direct', B=2)
[ 8 12]
[ 4 16]
"""
n = ZZ(n)
if n <= 0:
raise IndexError("n must be positive.")
if n not in self._hecke_matrices:
if algorithm == 'default':
try: pr = len(self.__brandt_series_vectors[0][0])
except (AttributeError, IndexError): pr = 0
if n <= pr:
algorithm = 'brandt'
if algorithm == 'default':
algorithm = 'direct'
if self.level().gcd(n) != 1:
algorithm = 'brandt'
if algorithm == 'direct':
T = self._compute_hecke_matrix(n, sparse=sparse, B=B)
elif algorithm == 'brandt':
T = self._compute_hecke_matrix_brandt(n, sparse=sparse)
else:
raise ValueError("unknown algorithm '%s'"%algorithm)
T.set_immutable()
self._hecke_matrices[n] = T
return self._hecke_matrices[n]
def _compute_hecke_matrix_prime(self, p, sparse=False, B=None):
"""
Return matrix of the `p`-th Hecke operator on self. The matrix
is always computed using the direct algorithm.
INPUT:
- `p` -- prime number
- `B` -- integer or None (default: None); in direct algorithm,
use theta series to this precision as an initial check for
equality of ideal classes.
- ``sparse`` -- bool (default: False); whether matrix should be sparse
EXAMPLES::
sage: B = BrandtModule(37)
sage: t = B._compute_hecke_matrix_prime(2); t
[1 1 1]
[1 0 2]
[1 2 0]
sage: type(t)
<type 'sage.matrix.matrix_rational_dense.Matrix_rational_dense'>
sage: type(B._compute_hecke_matrix_prime(2,sparse=True))
<type 'sage.matrix.matrix_rational_sparse.Matrix_rational_sparse'>
"""
return self._compute_hecke_matrix_directly(n=p,B=B,sparse=sparse)
def _compute_hecke_matrix_directly(self, n, B=None, sparse=False):
"""
Given an integer `n` coprime to the level, return the matrix of
the n-th Hecke operator on self, computed on our fixed basis
by directly using the definition of the Hecke action in terms
of fractional ideals.
INPUT:
- `n` -- integer, coprime to level
- ``sparse`` -- bool (default: False); whether matrix should be sparse
EXAMPLES::
sage: B = BrandtModule(37)
sage: t = B._compute_hecke_matrix_directly(2); t
[1 1 1]
[1 0 2]
[1 2 0]
sage: type(t)
<type 'sage.matrix.matrix_rational_dense.Matrix_rational_dense'>
sage: type(B._compute_hecke_matrix_directly(2,sparse=True))
<type 'sage.matrix.matrix_rational_sparse.Matrix_rational_sparse'>
You can't compute the Hecke operator for n not coprime to the level using this function::
sage: B._compute_hecke_matrix_directly(37)
Traceback (most recent call last):
...
ValueError: n must be coprime to the level
The generic function (which uses theta series) does work, though::
sage: B.hecke_matrix(37)
[1 0 0]
[0 0 1]
[0 1 0]
An example where the Hecke operator isn't symmetric.::
sage: B = BrandtModule(43)
sage: B._compute_hecke_matrix_directly(2)
[1 2 0 0]
[1 0 1 1]
[0 1 0 2]
[0 1 2 0]
sage: B._compute_hecke_matrix_brandt(2)
[1 2 0 0]
[1 0 1 1]
[0 1 0 2]
[0 1 2 0]
"""
level = self.level()
if gcd(n, level) != 1:
raise ValueError("n must be coprime to the level")
# For rigor it does not matter at all what bound we chose.
# This B is used only for the first phase of checking equality
# of ideals modulo equivalence -- we always provably check
# equivalence if the theta series are the same up to this
# bound.
if B is None:
B = self.dimension() // 2 + 5
T = copy(matrix(self.base_ring(), self.dimension(), sparse=sparse))
C = self.right_ideals()
theta_dict = self._theta_dict(B)
# I think the runtime of this algorithm is now dominated by
# computing theta series of ideals. The computation of
# cyclic submodules is a lower order term.
q = self._smallest_good_prime()
d = lcm([a.denominator() for a in self.order_of_level_N().basis()])
# TODO: temporary!! -- it's not sufficiently *optimized* to be
use_fast_alg = False
last_percent = 0
for r in range(len(C)):
percent_done = 100*r//len(C)
if percent_done != last_percent:
if percent_done%5 == 0:
verbose("percent done: %s"%percent_done)
last_percent = percent_done
if use_fast_alg:
v = C[r].cyclic_right_subideals(n)
else:
v = self.cyclic_submodules(C[r], n)
for J in v:
J_theta = tuple(J.theta_series_vector(B))
v = theta_dict[J_theta]
if len(v) == 1:
T[r,v[0]] += 1
else:
for i in v:
if C[i].is_equivalent(J, 0):
T[r,i] += 1
break
return T
@cached_method
def _theta_dict(self, B):
"""
Return a dictionary from theta series vectors of degree `B` to
list of integers `i`, where the key is the vector of
coefficients of the normalized theta series of the `i`th right
ideal, as indexed by ``self.right_ideals()``.
INPUT:
- `B` -- positive integer, precision of theta series vectors
OUTPUT:
- dictionary
EXAMPLES::
In this example the theta series determine the ideal classes::
sage: B = BrandtModule(5,11); B
Brandt module of dimension 4 of level 5*11 of weight 2 over Rational Field
sage: sorted(list(B._theta_dict(5).iteritems()))
[((1, 0, 0, 4, 0), [3]),
((1, 0, 0, 4, 2), [2]),
((1, 0, 2, 0, 6), [1]),
((1, 2, 4, 0, 6), [0])]
In this example, the theta series does not determine the ideal class::
sage: sorted(list(BrandtModule(37)._theta_dict(6).iteritems()))
[((1, 0, 2, 2, 6, 4), [1, 2]), ((1, 2, 2, 4, 2, 4), [0])]
"""
C = self.right_ideals()
theta_dict = {}
for i in range(len(C)):
I_theta = tuple(C[i].theta_series_vector(B))
if I_theta in theta_dict:
theta_dict[I_theta].append(i)
else:
theta_dict[I_theta] = [i]
return theta_dict
def _compute_hecke_matrix_brandt(self, n, sparse=False):
"""
Return the n-th hecke matrix, computed using Brandt matrices
(theta series).
When the n-th Hecke operator is requested, we computed theta
series to precision `2n+20`, since it only takes slightly
longer, and this means that any Hecke operator $T_m$ can
quickly be computed, for `m<2n+20`.
INPUT:
- n -- integer, coprime to level
- sparse -- bool (default: False); whether matrix should be sparse
EXAMPLES::
sage: B = BrandtModule(3,17)
sage: B._compute_hecke_matrix_brandt(3)
[0 1 0 0]
[1 0 0 0]
[0 0 0 1]
[0 0 1 0]
sage: B._compute_hecke_matrix_brandt(5)
[4 1 1 0]
[1 4 0 1]
[2 0 2 2]
[0 2 2 2]
sage: B._compute_hecke_matrix_brandt(5).fcp()
(x - 6) * (x - 3) * (x^2 - 3*x - 2)
"""
B = self._brandt_series_vectors()
if len(B[0][0]) <= n:
B = self._brandt_series_vectors(2*n+10)
m = len(B)
K = self.base_ring()
Bmat = copy(matrix(K, m, m, sparse=sparse))
for i in range(m):
for j in range(m):
Bmat[i,j] = K(B[j][i][n])
return Bmat
@cached_method
def _smallest_good_prime(self):
"""
Return the smallest prime number that does not divide the level.
EXAMPLES::
sage: BrandtModule(17,6)._smallest_good_prime()
5
"""
level = self.level()
p = ZZ(2)
while level % p == 0:
p = next_prime(p)
return p
def right_ideals(self, B=None):
"""
Return sorted tuple of representatives for the equivalence
classes of right ideals in self.
OUTPUT:
- sorted tuple of fractional ideals
EXAMPLES::
sage: B = BrandtModule(23)
sage: B.right_ideals()
(Fractional ideal (2 + 2*j, 2*i + 2*k, 4*j, 4*k),
Fractional ideal (2 + 2*j, 2*i + 6*k, 8*j, 8*k),
Fractional ideal (2 + 10*j + 8*k, 2*i + 8*j + 6*k, 16*j, 16*k))
TEST::
sage: B = BrandtModule(1009)
sage: Is = B.right_ideals()
sage: n = len(Is)
sage: prod(not Is[i].is_equivalent(Is[j]) for i in range(n) for j in range(i))
1
"""
try: return self.__right_ideals
except AttributeError: pass
p = self._smallest_good_prime()
R = self.order_of_level_N()
I = R.unit_ideal()
I = R.right_ideal([4*x for x in I.basis()])
if B is None:
B = self.dimension() // 2 + 5
ideals = [I]
ideals_theta = { tuple(I.theta_series_vector(B)) : [I] }
new_ideals = [I]
newly_computed_ideals = []
got_something_new = True
while got_something_new:
got_something_new = False
newly_computed_ideals = []
for I in new_ideals:
L = self.cyclic_submodules(I, p)
for J in L:
is_new = True
J_theta = tuple(J.theta_series_vector(B))
if J_theta in ideals_theta:
for K in ideals_theta[J_theta]:
if J.is_equivalent(K, 0):
is_new = False
break
if is_new:
newly_computed_ideals.append(J)
ideals.append(J)
if J_theta in ideals_theta:
ideals_theta[J_theta].append(J)
else:
ideals_theta[J_theta] = [J]
verbose("found %s of %s ideals"%(len(ideals), self.dimension()), level=2)
if len(ideals) >= self.dimension():
ideals = tuple(sorted(ideals))
self.__right_ideals = ideals
return ideals
got_something_new = True
new_ideals = list(newly_computed_ideals)
ideals = tuple(sorted(ideals))
self.__right_ideals = ideals
return ideals
def _ideal_products(self):
"""
Return all products of right ideals, which are used in computing
the Brandt matrices.
This function is used internally by the Brandt matrices
algorithms.
OUTPUT:
- list of ideals
EXAMPLES::
sage: B = BrandtModule(37)
sage: B._ideal_products()
[[Fractional ideal (8 + 8*j + 8*k, 4*i + 8*j + 4*k, 16*j, 16*k)],
[Fractional ideal (8 + 24*j + 8*k, 4*i + 8*j + 4*k, 32*j, 32*k),
Fractional ideal (16 + 16*j + 48*k, 4*i + 8*j + 36*k, 32*j + 32*k, 64*k)],
[Fractional ideal (8 + 24*j + 24*k, 4*i + 24*j + 4*k, 32*j, 32*k),
Fractional ideal (8 + 4*i + 16*j + 28*k, 8*i + 16*j + 8*k, 32*j, 64*k),
Fractional ideal (16 + 16*j + 16*k, 4*i + 24*j + 4*k, 32*j + 32*k, 64*k)]]
"""
try:
return self.__ideal_products
except AttributeError:
pass
L = self.right_ideals()
n = len(L)
if n == 0:
return matrix(self.base_ring()[['q']], 0)
P = []
for i in range(n):
P.append([L[i].multiply_by_conjugate(L[j]) for j in range(i+1)])
self.__ideal_products = P
return P
def _brandt_series_vectors(self, prec=None):
"""
Return Brandt series coefficient vectors out to precision *at least* prec.
EXAMPLES::
sage: B = BrandtModule(37, use_cache=False)
sage: B._brandt_series_vectors(5)
[[(1/2, 1, 1, 2, 1), (1/2, 0, 1, 1, 3), (1/2, 0, 1, 1, 3)],
[(1/2, 0, 1, 1, 3), (1/2, 1, 0, 0, 3), (1/2, 0, 2, 3, 1)],
[(1/2, 0, 1, 1, 3), (1/2, 0, 2, 3, 1), (1/2, 1, 0, 0, 3)]]
If you have computed to higher precision and ask for a lower
precision, the higher precision is still returned.::
sage: B._brandt_series_vectors(2)
[[(1/2, 1, 1, 2, 1), (1/2, 0, 1, 1, 3), (1/2, 0, 1, 1, 3)],
[(1/2, 0, 1, 1, 3), (1/2, 1, 0, 0, 3), (1/2, 0, 2, 3, 1)],
[(1/2, 0, 1, 1, 3), (1/2, 0, 2, 3, 1), (1/2, 1, 0, 0, 3)]]
"""
if prec is None:
try:
return self.__brandt_series_vectors
except AttributeError:
prec = 2
elif prec < 2:
raise ValueError("prec must be at least 2")
L = self.right_ideals()
n = len(L)
K = QQ
if n == 0:
return [[]]
try:
if len(self.__brandt_series_vectors[0][0]) >= prec:
return self.__brandt_series_vectors
except AttributeError: pass
theta = [[I.theta_series_vector(prec) for I in x] for x in self._ideal_products()]
e = [theta[j][j][1] for j in range(n)]
B = [[0 for _ in range(n)] for _ in range(n)]
for i in range(n):
B[i][i] = theta[i][i]/e[i]
for j in range(i):
B[j][i] = theta[i][j]/e[j]
B[i][j] = theta[i][j]/e[i]
self.__brandt_series_vectors = B
return B
def brandt_series(self, prec, var='q'):
r"""
Return matrix of power series `\sum T_n q^n` to the given
precision. Note that the Hecke operators in this series are
always over `\QQ`, even if the base ring of this Brandt module
is not `\QQ`.
INPUT:
- prec -- positive intege
- var -- string (default: `q`)
OUTPUT:
matrix of power series with coefficients in `\QQ`
EXAMPLES::
sage: B = BrandtModule(11)
sage: B.brandt_series(2)
[1/4 + q + O(q^2) 1/4 + O(q^2)]
[ 1/6 + O(q^2) 1/6 + q + O(q^2)]
sage: B.brandt_series(5)
[1/4 + q + q^2 + 2*q^3 + 5*q^4 + O(q^5) 1/4 + 3*q^2 + 3*q^3 + 3*q^4 + O(q^5)]
[ 1/6 + 2*q^2 + 2*q^3 + 2*q^4 + O(q^5) 1/6 + q + q^3 + 4*q^4 + O(q^5)]
Asking for a smaller precision works.::
sage: B.brandt_series(3)
[1/4 + q + q^2 + O(q^3) 1/4 + 3*q^2 + O(q^3)]
[ 1/6 + 2*q^2 + O(q^3) 1/6 + q + O(q^3)]
sage: B.brandt_series(3,var='t')
[1/4 + t + t^2 + O(t^3) 1/4 + 3*t^2 + O(t^3)]
[ 1/6 + 2*t^2 + O(t^3) 1/6 + t + O(t^3)]
"""
A = self._brandt_series_vectors(prec)
R = QQ[[var]]
n = len(A[0])
return matrix(R, n, n, [[R(x.list()[:prec],prec) for x in Y] for Y in A])
def eisenstein_subspace(self):
"""
Return the 1-dimensional subspace of self on which the Hecke
operators `T_p` act as `p+1` for `p` coprime to the level.
NOTE: This function assumes that the base field has
characteristic 0.
EXAMPLES::
sage: B = BrandtModule(11); B.eisenstein_subspace()
Subspace of dimension 1 of Brandt module of dimension 2 of level 11 of weight 2 over Rational Field
sage: B.eisenstein_subspace() is B.eisenstein_subspace()
True
sage: BrandtModule(3,11).eisenstein_subspace().basis()
((1, 1),)
sage: BrandtModule(7,10).eisenstein_subspace().basis()
((1, 1, 1, 1/2, 1, 1, 1/2, 1, 1, 1),)
sage: BrandtModule(7,10,base_ring=ZZ).eisenstein_subspace().basis()
((2, 2, 2, 1, 2, 2, 1, 2, 2, 2),)
"""
try: return self.__eisenstein_subspace
except AttributeError: pass
if self.base_ring().characteristic() != 0:
raise ValueError("characteristic must be 0")
V = self
p = Integer(2)
N = self.level()
while V.dimension() >= 2:
while N%p == 0:
p = p.next_prime()
A = V.T(p) - (p+1)
V = A.kernel()
self.__eisenstein_subspace = V
return V
def is_cuspidal(self):
r"""
Returns whether self is cuspidal, i.e. has no Eisenstein part.
EXAMPLES:
sage: B = BrandtModule(3, 4)
sage: B.is_cuspidal()
False
sage: B.eisenstein_subspace()
Brandt module of dimension 1 of level 3*4 of weight 2 over Rational Field
"""
return self.eisenstein_subspace().dimension() == 0
def monodromy_weights(self):
r"""
Return the weights for the monodromy pairing on this Brandt
module. The weights are associated to each ideal class in our
fixed choice of basis. The weight of an ideal class `[I]` is
half the number of units of the right order `I`.
NOTE: The base ring must be `\QQ` or `\ZZ`.
EXAMPLES::
sage: BrandtModule(11).monodromy_weights()
(2, 3)
sage: BrandtModule(37).monodromy_weights()
(1, 1, 1)
sage: BrandtModule(43).monodromy_weights()
(2, 1, 1, 1)
sage: BrandtModule(7,10).monodromy_weights()
(1, 1, 1, 2, 1, 1, 2, 1, 1, 1)
sage: BrandtModule(5,13).monodromy_weights()
(1, 3, 1, 1, 1, 3)
"""
try: return self.__monodromy_weights
except AttributeError: pass
e = self.eisenstein_subspace().basis()[0].element()
if e.base_ring() != QQ:
e = e.change_ring(QQ)
e = e * e.denominator()
e = e / lcm(list(e))
w = tuple([z.denominator() for z in e])
self.__monodromy_weights = w
return w
def quaternion_order_with_given_level(A, level):
"""
Return an order in the quaternion algebra A with given level.
(Implemented only when the base field is the rational numbers.)
INPUT:
level -- The level of the order to be returned. Currently this is only implemented
when the level is divisible by at most one power of a prime that
ramifies in this quaternion algebra.
EXAMPLES::
sage: from sage.modular.quatalg.brandt import quaternion_order_with_given_level, maximal_order
sage: A.<i,j,k> = QuaternionAlgebra(5)
sage: level = 2 * 5 * 17
sage: O = quaternion_order_with_given_level(A, level)
sage: M = maximal_order(A)
sage: L = O.free_module()
sage: N = M.free_module()
sage: print L.index_in(N) == level/5 #check that the order has the right index in the maximal order
True
"""
if not is_RationalField(A.base_ring()):
raise NotImplementedError("base field must be rational numbers")
from sage.modular.quatalg.brandt import maximal_order
if len(A.ramified_primes()) > 1:
raise NotImplementedError("Currently this algorithm only works when the quaternion algebra is only ramified at one finite prime.")
level = abs(level)
N = A.discriminant()
N1 = gcd(level, N)
M1 = level/N1
O = maximal_order(A)
if 0 and N1 != 1:
for p in A.ramified_primes():
if level % p**2 == 0:
raise NotImplementedError("Currently sage can only compute orders whose level is divisible by at most one power of any prime that ramifies in the quaternion algebra")
P = basis_for_left_ideal(O, [N1] + [x*y - y*x for x, y in cartesian_product_iterator([A.basis(), A.basis()]) ])
O = A.quaternion_order(P)
fact = factor(M1)
B = O.basis()
for (p, r) in fact:
a = int((-p/2))
for v in GF(p)**4:
x = sum([int(v[i]+a)*B[i] for i in range(4)])
D = x.reduced_trace()**2 - 4 * x.reduced_norm()
if kronecker_symbol(D, p) == 1: break
X = PolynomialRing(GF(p), 'x').gen()
a = ZZ((X**2 - ZZ(x.reduced_trace()) * X + ZZ(x.reduced_norm())).roots()[0][0])
I = basis_for_left_ideal(O, [p**r, (x-a)**r] )
O = right_order(O, I)
return O
class BrandtSubmodule(HeckeSubmodule):
def _repr_(self):
"""
Return string representation of this Brandt submodule.
EXAMPLES::
sage: BrandtModule(11)[0]._repr_()
'Subspace of dimension 1 of Brandt module of dimension 2 of level 11 of weight 2 over Rational Field'
"""
return "Subspace of dimension %s of %s"%(self.dimension(), self.ambient_module())
class BrandtModuleElement(HeckeModuleElement):
def __init__(self, parent, x):
"""
EXAMPLES::
sage: B = BrandtModule(37)
sage: x = B([1,2,3]); x
(1, 2, 3)
sage: parent(x)
Brandt module of dimension 3 of level 37 of weight 2 over Rational Field
"""
if isinstance(x, BrandtModuleElement):
x = x.element()
HeckeModuleElement.__init__(self, parent, parent.free_module()(x))
def __cmp__(self, other):
"""
EXAMPLES::
sage: B = BrandtModule(13,5)
sage: B.0
(1, 0, 0, 0, 0, 0)
sage: B.0 == B.1
False
sage: B.0 == 0
False
sage: B(0) == 0
True
sage: B.0 + 2*B.1 == 2*B.1 + B.0
True
sage: loads(dumps(B.0)) == B.0
True
"""
if not isinstance(other, BrandtModuleElement):
other = self.parent()(other)
else:
c = cmp(self.parent(), other.parent())
if c: return c
return cmp(self.element(), other.element())
def monodromy_pairing(self, x):
"""
Return the monodromy pairing of self and x.
EXAMPLES::
sage: B = BrandtModule(5,13)
sage: B.monodromy_weights()
(1, 3, 1, 1, 1, 3)
sage: (B.0 + B.1).monodromy_pairing(B.0 + B.1)
4
"""
B = self.parent()
w = B.monodromy_weights()
x = B(x).element()
v = self.element()
return sum(x[i]*v[i]*w[i] for i in range(len(v)))
def __mul__(self, right):
"""
Return the monodromy pairing of self and right.
EXAMPLES::
sage: B = BrandtModule(7,10)
sage: B.monodromy_weights()
(1, 1, 1, 2, 1, 1, 2, 1, 1, 1)
sage: B.0 * B.0
1
sage: B.3 * B.3
2
sage: (B.0+B.3) * (B.0 + B.1 + 2*B.3)
5
"""
return self.monodromy_pairing(right)
def _add_(self, right):
"""
Return sum of self and right.
EXAMPLES::
sage: B = BrandtModule(11)
sage: B.0 + B.1 # indirect doctest
(1, 1)
"""
return BrandtModuleElement(self.parent(), self.element() + right.element())
def _sub_(self, right):
"""
EXAMPLES::
sage: B = BrandtModule(11)
sage: B.0 - B.1 # indirect doctest
(1, -1)
"""
return BrandtModuleElement(self.parent(), self.element() - right.element())
def _neg_(self):
"""
EXAMPLES::
sage: B = BrandtModule(11)
sage: -B.0 # indirect doctest
(-1, 0)
"""
return BrandtModuleElement(self.parent(), -self.element())
#############################################################################
# Benchmarking
#############################################################################
def benchmark_magma(levels, silent=False):
"""
INPUT:
- levels -- list of pairs (p,M) where p is a prime not dividing M
- silent -- bool, default False; if True suppress printing during computation
OUTPUT:
- list of 4-tuples ('magma', p, M, tm), where tm is the
CPU time in seconds to compute T2 using Magma
EXAMPLES::
sage: a = sage.modular.quatalg.brandt.benchmark_magma([(11,1), (37,1), (43,1), (97,1)]) # optional - magma
('magma', 11, 1, ...)
('magma', 37, 1, ...)
('magma', 43, 1, ...)
('magma', 97, 1, ...)
sage: a = sage.modular.quatalg.brandt.benchmark_magma([(11,2), (37,2), (43,2), (97,2)]) # optional - magma
('magma', 11, 2, ...)
('magma', 37, 2, ...)
('magma', 43, 2, ...)
('magma', 97, 2, ...)
"""
ans = []
from sage.interfaces.all import magma
for p, M in levels:
t = magma.cputime()
magma.eval('HeckeOperator(BrandtModule(%s, %s),2)'%(p,M))
tm = magma.cputime(t)
v = ('magma', p, M, tm)
if not silent: print v
ans.append(v)
return ans
def benchmark_sage(levels, silent=False):
"""
INPUT:
- levels -- list of pairs (p,M) where p is a prime not dividing M
- silent -- bool, default False; if True suppress printing during computation
OUTPUT:
- list of 4-tuples ('sage', p, M, tm), where tm is the
CPU time in seconds to compute T2 using Sage
EXAMPLES::
sage: a = sage.modular.quatalg.brandt.benchmark_sage([(11,1), (37,1), (43,1), (97,1)])
('sage', 11, 1, ...)
('sage', 37, 1, ...)
('sage', 43, 1, ...)
('sage', 97, 1, ...)
sage: a = sage.modular.quatalg.brandt.benchmark_sage([(11,2), (37,2), (43,2), (97,2)])
('sage', 11, 2, ...)
('sage', 37, 2, ...)
('sage', 43, 2, ...)
('sage', 97, 2, ...)
"""
from sage.misc.all import cputime
ans = []
for p, M in levels:
t = cputime()
B = BrandtModule(p,M,use_cache=False).hecke_matrix(2)
tm = cputime(t)
v = ('sage', p, M, tm)
if not silent: print v
ans.append(v)
return ans
| false | true |
1c47fc4c54f24031a9b208b6e98961d9867c8432 | 5,229 | py | Python | desktop/libs/indexer/src/indexer/conf.py | 10088/hue | 802811941dabd015a4fd7a640d349f9d26ac5572 | [
"Apache-2.0"
] | null | null | null | desktop/libs/indexer/src/indexer/conf.py | 10088/hue | 802811941dabd015a4fd7a640d349f9d26ac5572 | [
"Apache-2.0"
] | null | null | null | desktop/libs/indexer/src/indexer/conf.py | 10088/hue | 802811941dabd015a4fd7a640d349f9d26ac5572 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
# Licensed to Cloudera, Inc. under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. Cloudera, Inc. licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from future import standard_library
standard_library.install_aliases()
import logging
import os
import sys
from desktop.lib.conf import Config, coerce_bool
from libsolr import conf as libsolr_conf
from libzookeeper import conf as libzookeeper_conf
if sys.version_info[0] > 2:
from urllib.parse import urlparse
from django.utils.translation import gettext_lazy as _t
else:
from urlparse import urlparse
from django.utils.translation import ugettext_lazy as _t
LOG = logging.getLogger(__name__)
# Deprecated. Should be automatically guessed from Solr admin info API now.
def get_solr_ensemble():
return '%s%s' % (libzookeeper_conf.ENSEMBLE.get(), libsolr_conf.SOLR_ZK_PATH.get())
def solrctl():
"""
solrctl path
"""
for dirname in os.environ.get('PATH', '').split(os.path.pathsep):
path = os.path.join(dirname, 'solrctl')
if os.path.exists(path):
return path
return None
def zkensemble():
"""
ZooKeeper Ensemble
"""
try:
from zookeeper.conf import CLUSTERS
clusters = CLUSTERS.get()
if clusters['default'].HOST_PORTS.get() != 'localhost:2181':
return '%s/solr' % clusters['default'].HOST_PORTS.get()
except:
LOG.warning('Failed to get Zookeeper ensemble')
try:
from search.conf import SOLR_URL
parsed = urlparse(SOLR_URL.get())
return "%s:2181/solr" % (parsed.hostname or 'localhost')
except:
LOG.warning('Failed to get Solr url')
# Deprecated as always on
ENABLE_NEW_INDEXER = Config(
key="enable_new_indexer",
help=_t("Flag to turn on the new Solr indexer."),
type=coerce_bool,
default=True
)
ENABLE_SCALABLE_INDEXER = Config(
key="enable_scalable_indexer",
help=_t("Flag to turn on the Morphline Solr indexer."),
type=coerce_bool,
default=True
)
CONFIG_INDEXER_LIBS_PATH = Config(
key="config_indexer_libs_path",
help=_t("Filesystem directory containing Solr Morphline indexing libs."),
type=str,
default='/tmp/smart_indexer_lib'
)
CONFIG_JDBC_LIBS_PATH = Config(
key="config_jdbc_libs_path",
help=_t("Filesystem directory containing JDBC libs."),
type=str,
default='/user/oozie/libext/jdbc_drivers'
)
CONFIG_JARS_LIBS_PATH = Config(
key="config_jars_libs_path",
help=_t("Filesystem directory containing jars libs."),
type=str,
default='/user/oozie/libext/libs'
)
ENABLE_SQOOP = Config(
key="enable_sqoop",
help=_t("Flag to turn on Sqoop imports."),
type=coerce_bool,
default=True
)
ENABLE_KAFKA = Config(
key="enable_kafka",
help=_t("Flag to turn on Kafka imports."),
type=coerce_bool,
default=False
)
ENABLE_FIELD_EDITOR = Config(
key="enable_field_editor",
help=_t("Flag to turn on the SQL/Morphline field editor."),
type=coerce_bool,
default=False
)
ENABLE_ENVELOPE = Config(
key="enable_envelope",
help=_t("Flag to turn on Envelope based jobs."),
type=coerce_bool,
default=False
)
ENABLE_ALTUS = Config(
key="enable_altus",
help=_t("Flag to turn on Altus imports."),
type=coerce_bool,
default=False
)
ENABLE_DIRECT_UPLOAD = Config(
key="enable_direct_upload",
help=_t("Flag to turn on the direct upload of a small file."),
type=coerce_bool,
default=True
)
# Unused
BATCH_INDEXER_PATH = Config(
key="batch_indexer_path",
help=_t("Batch indexer path in HDFS."),
type=str,
default="/var/lib/search/search-mr-job.jar")
CORE_INSTANCE_DIR = Config(
key="core_instance_dir",
help=_t("Local path to Hue folder where Solr instance directories will be created in non-solrcloud mode."),
type=str,
default=os.path.join(os.path.dirname(__file__), '../data/collections'))
CONFIG_TEMPLATE_PATH = Config(
key="config_template_path",
help=_t("Default template used at collection creation."),
type=str,
default=os.path.join(os.path.dirname(__file__), '..', 'data', 'solrconfigs'))
CONFIG_INDEXING_TEMPLATES_PATH = Config(
key="config_oozie_workspace_path",
help=_t("oozie workspace template for indexing:"),
type=str,
default=os.path.join(os.path.dirname(__file__), '..', 'data', 'oozie_workspace')
)
def config_morphline_path():
return os.path.join(os.path.dirname(__file__), '..', 'data', 'morphline')
# Unused
SOLRCTL_PATH = Config(
key="solrctl_path",
help=_t("Location of the solrctl binary."),
type=str,
dynamic_default=solrctl)
# Deprecated and not used anymore
SOLR_ZK_ENSEMBLE = Config(
key="solr_zk_ensemble",
help=_t("Zookeeper ensemble."),
type=str,
dynamic_default=zkensemble)
| 26.409091 | 109 | 0.733601 |
from future import standard_library
standard_library.install_aliases()
import logging
import os
import sys
from desktop.lib.conf import Config, coerce_bool
from libsolr import conf as libsolr_conf
from libzookeeper import conf as libzookeeper_conf
if sys.version_info[0] > 2:
from urllib.parse import urlparse
from django.utils.translation import gettext_lazy as _t
else:
from urlparse import urlparse
from django.utils.translation import ugettext_lazy as _t
LOG = logging.getLogger(__name__)
def get_solr_ensemble():
return '%s%s' % (libzookeeper_conf.ENSEMBLE.get(), libsolr_conf.SOLR_ZK_PATH.get())
def solrctl():
for dirname in os.environ.get('PATH', '').split(os.path.pathsep):
path = os.path.join(dirname, 'solrctl')
if os.path.exists(path):
return path
return None
def zkensemble():
try:
from zookeeper.conf import CLUSTERS
clusters = CLUSTERS.get()
if clusters['default'].HOST_PORTS.get() != 'localhost:2181':
return '%s/solr' % clusters['default'].HOST_PORTS.get()
except:
LOG.warning('Failed to get Zookeeper ensemble')
try:
from search.conf import SOLR_URL
parsed = urlparse(SOLR_URL.get())
return "%s:2181/solr" % (parsed.hostname or 'localhost')
except:
LOG.warning('Failed to get Solr url')
ENABLE_NEW_INDEXER = Config(
key="enable_new_indexer",
help=_t("Flag to turn on the new Solr indexer."),
type=coerce_bool,
default=True
)
ENABLE_SCALABLE_INDEXER = Config(
key="enable_scalable_indexer",
help=_t("Flag to turn on the Morphline Solr indexer."),
type=coerce_bool,
default=True
)
CONFIG_INDEXER_LIBS_PATH = Config(
key="config_indexer_libs_path",
help=_t("Filesystem directory containing Solr Morphline indexing libs."),
type=str,
default='/tmp/smart_indexer_lib'
)
CONFIG_JDBC_LIBS_PATH = Config(
key="config_jdbc_libs_path",
help=_t("Filesystem directory containing JDBC libs."),
type=str,
default='/user/oozie/libext/jdbc_drivers'
)
CONFIG_JARS_LIBS_PATH = Config(
key="config_jars_libs_path",
help=_t("Filesystem directory containing jars libs."),
type=str,
default='/user/oozie/libext/libs'
)
ENABLE_SQOOP = Config(
key="enable_sqoop",
help=_t("Flag to turn on Sqoop imports."),
type=coerce_bool,
default=True
)
ENABLE_KAFKA = Config(
key="enable_kafka",
help=_t("Flag to turn on Kafka imports."),
type=coerce_bool,
default=False
)
ENABLE_FIELD_EDITOR = Config(
key="enable_field_editor",
help=_t("Flag to turn on the SQL/Morphline field editor."),
type=coerce_bool,
default=False
)
ENABLE_ENVELOPE = Config(
key="enable_envelope",
help=_t("Flag to turn on Envelope based jobs."),
type=coerce_bool,
default=False
)
ENABLE_ALTUS = Config(
key="enable_altus",
help=_t("Flag to turn on Altus imports."),
type=coerce_bool,
default=False
)
ENABLE_DIRECT_UPLOAD = Config(
key="enable_direct_upload",
help=_t("Flag to turn on the direct upload of a small file."),
type=coerce_bool,
default=True
)
BATCH_INDEXER_PATH = Config(
key="batch_indexer_path",
help=_t("Batch indexer path in HDFS."),
type=str,
default="/var/lib/search/search-mr-job.jar")
CORE_INSTANCE_DIR = Config(
key="core_instance_dir",
help=_t("Local path to Hue folder where Solr instance directories will be created in non-solrcloud mode."),
type=str,
default=os.path.join(os.path.dirname(__file__), '../data/collections'))
CONFIG_TEMPLATE_PATH = Config(
key="config_template_path",
help=_t("Default template used at collection creation."),
type=str,
default=os.path.join(os.path.dirname(__file__), '..', 'data', 'solrconfigs'))
CONFIG_INDEXING_TEMPLATES_PATH = Config(
key="config_oozie_workspace_path",
help=_t("oozie workspace template for indexing:"),
type=str,
default=os.path.join(os.path.dirname(__file__), '..', 'data', 'oozie_workspace')
)
def config_morphline_path():
return os.path.join(os.path.dirname(__file__), '..', 'data', 'morphline')
SOLRCTL_PATH = Config(
key="solrctl_path",
help=_t("Location of the solrctl binary."),
type=str,
dynamic_default=solrctl)
SOLR_ZK_ENSEMBLE = Config(
key="solr_zk_ensemble",
help=_t("Zookeeper ensemble."),
type=str,
dynamic_default=zkensemble)
| true | true |
1c47fe26c578435ee60bd4af2a50e86627e1777a | 956 | py | Python | books/model/UserList.py | nudglabs/books-python-wrappers | 8844eca8fe681542644a70749b72a6dc4e48c171 | [
"MIT"
] | 9 | 2015-04-01T08:59:49.000Z | 2022-01-27T01:27:45.000Z | books/model/UserList.py | nudglabs/books-python-wrappers | 8844eca8fe681542644a70749b72a6dc4e48c171 | [
"MIT"
] | 3 | 2020-05-14T04:22:22.000Z | 2021-08-06T11:19:03.000Z | books/model/UserList.py | nudglabs/books-python-wrappers | 8844eca8fe681542644a70749b72a6dc4e48c171 | [
"MIT"
] | 11 | 2016-04-14T10:59:36.000Z | 2020-08-19T13:26:05.000Z | #$Id$
from books.model.PageContext import PageContext
class UserList:
"""This class is used to create object for Users list."""
def __init__(self):
"""Initialize parameters for users."""
self.users = []
self.page_context = PageContext()
def set_users(self, user):
"""Set users.
Args:
user(instance): User object.
"""
self.users.append(user)
def get_users(self):
"""Get users.
Returns:
list of instance: List of user objects.
"""
return self.users
def set_page_context(self, page_context):
"""Set page context.
Args:
page_context(instance): Page context object.
"""
self.page_context = page_context
def get_page_context(self):
"""Get page context.
Returns:
instance: Page context object.
"""
return self.page_context
| 19.916667 | 61 | 0.560669 |
from books.model.PageContext import PageContext
class UserList:
def __init__(self):
self.users = []
self.page_context = PageContext()
def set_users(self, user):
self.users.append(user)
def get_users(self):
return self.users
def set_page_context(self, page_context):
self.page_context = page_context
def get_page_context(self):
return self.page_context
| true | true |
1c47ff9b3f92607d61112ac9b4852bb244a3b137 | 2,539 | py | Python | env/Lib/site-packages/algorithmia_api_client/models/language.py | Vivek-Kamboj/Sargam | 0f3ca5c70ddb722dd40a45373abd0e9b3939064e | [
"MIT"
] | 2 | 2020-05-20T23:10:31.000Z | 2020-12-09T13:00:06.000Z | env/Lib/site-packages/algorithmia_api_client/models/language.py | Vivek-Kamboj/Sargam | 0f3ca5c70ddb722dd40a45373abd0e9b3939064e | [
"MIT"
] | 5 | 2021-04-25T08:16:09.000Z | 2022-03-12T00:42:14.000Z | env/Lib/site-packages/algorithmia_api_client/models/language.py | Vivek-Kamboj/Sargam | 0f3ca5c70ddb722dd40a45373abd0e9b3939064e | [
"MIT"
] | 1 | 2021-10-01T14:32:25.000Z | 2021-10-01T14:32:25.000Z | # coding: utf-8
"""
Algorithmia Management APIs
APIs for managing actions on the Algorithmia platform # noqa: E501
OpenAPI spec version: 1.0.1
Contact: support@algorithmia.com
Generated by: https://openapi-generator.tech
"""
import pprint
import re # noqa: F401
import six
class Language(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
allowed enum values
"""
JAVA = "java"
JAVASCRIPT = "javascript"
PYTHON2_LANGPACK = "python2-langpack"
PYTHON3_1 = "python3-1"
R = "r"
RUBY = "ruby"
RUST = "rust"
SCALA = "scala"
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
}
attribute_map = {
}
def __init__(self): # noqa: E501
"""Language - a model defined in OpenAPI""" # noqa: E501
self.discriminator = None
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, Language):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| 25.908163 | 74 | 0.550217 |
import pprint
import re
import six
class Language(object):
JAVA = "java"
JAVASCRIPT = "javascript"
PYTHON2_LANGPACK = "python2-langpack"
PYTHON3_1 = "python3-1"
R = "r"
RUBY = "ruby"
RUST = "rust"
SCALA = "scala"
openapi_types = {
}
attribute_map = {
}
def __init__(self):
self.discriminator = None
def to_dict(self):
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
return pprint.pformat(self.to_dict())
def __repr__(self):
return self.to_str()
def __eq__(self, other):
if not isinstance(other, Language):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
return not self == other
| true | true |
1c4800c6ae1522e0be0378191ffb0de791b183ba | 541 | py | Python | api/serializers.py | coder-chris-git/django-react-love_backend | 11b7366f6c3c4d9d3b72b145e100b26305c52128 | [
"MIT"
] | null | null | null | api/serializers.py | coder-chris-git/django-react-love_backend | 11b7366f6c3c4d9d3b72b145e100b26305c52128 | [
"MIT"
] | null | null | null | api/serializers.py | coder-chris-git/django-react-love_backend | 11b7366f6c3c4d9d3b72b145e100b26305c52128 | [
"MIT"
] | null | null | null | from django.db.models.base import Model
from rest_framework.fields import IntegerField
from rest_framework.relations import StringRelatedField
from .models import BoastAndRoastModel
from rest_framework.serializers import ModelSerializer,SlugRelatedField
class BoastAndRoastSerializer(ModelSerializer):
date_created = StringRelatedField()
last_updated = StringRelatedField()
class Meta:
model = BoastAndRoastModel
fields = ['id','post_type','body','upvote','downvote','date_created','last_updated','total_votes']
| 28.473684 | 102 | 0.796673 | from django.db.models.base import Model
from rest_framework.fields import IntegerField
from rest_framework.relations import StringRelatedField
from .models import BoastAndRoastModel
from rest_framework.serializers import ModelSerializer,SlugRelatedField
class BoastAndRoastSerializer(ModelSerializer):
date_created = StringRelatedField()
last_updated = StringRelatedField()
class Meta:
model = BoastAndRoastModel
fields = ['id','post_type','body','upvote','downvote','date_created','last_updated','total_votes']
| true | true |
1c4801010c3d4e1174d971d52ec35e804cc3f383 | 7,567 | py | Python | conf.py | Mozilla-GitHub-Standards/7c3e4fe1e3e9e45496cb7bc94f1ff8b3ec66eca2e358a0c7231317e53ec1f6bc | 057a68a7c76270de37df5e1829af2be047852d01 | [
"CC-BY-4.0"
] | 4 | 2016-01-10T19:09:11.000Z | 2019-10-01T16:24:33.000Z | conf.py | Mozilla-GitHub-Standards/7c3e4fe1e3e9e45496cb7bc94f1ff8b3ec66eca2e358a0c7231317e53ec1f6bc | 057a68a7c76270de37df5e1829af2be047852d01 | [
"CC-BY-4.0"
] | 13 | 2015-01-02T19:26:04.000Z | 2019-03-29T12:35:23.000Z | conf.py | Mozilla-GitHub-Standards/7c3e4fe1e3e9e45496cb7bc94f1ff8b3ec66eca2e358a0c7231317e53ec1f6bc | 057a68a7c76270de37df5e1829af2be047852d01 | [
"CC-BY-4.0"
] | 16 | 2015-01-01T16:32:37.000Z | 2020-08-18T19:21:41.000Z | # -*- coding: utf-8 -*-
#
# A-team Bootcamp documentation build configuration file, created by
# sphinx-quickstart on Thu May 5 14:21:14 2011.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import os
on_rtd = os.environ.get('READTHEDOCS') == 'True'
if not on_rtd:
import sphinx_rtd_theme
html_theme = 'sphinx_rtd_theme'
html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.intersphinx', 'sphinx.ext.todo']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'A-Team Bootcamp'
copyright = u'Mozilla. This work is licensed under a Creative Commons Attribution 4.0 International License'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '0.3'
# The full version, including alpha/beta/rc tags.
release = '0.3beta'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build', 'README.rst']
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# Show todo items
todo_include_todos = True
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#html_theme = 'sphinx_rtd_theme'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
#html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'AteamBootcampdoc'
# -- Options for LaTeX output --------------------------------------------------
# The paper size ('letter' or 'a4').
#latex_paper_size = 'letter'
# The font size ('10pt', '11pt' or '12pt').
#latex_font_size = '10pt'
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'AteamBootcamp.tex', u'A-Team Bootcamp Documentation',
u'Mozilla Automation \& Tools', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Additional stuff for the LaTeX preamble.
#latex_preamble = ''
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'ateambootcamp', u'A-team Bootcamp Documentation',
[u'Mozilla Automation \& Tools'], 1)
]
intersphinx_mapping = dict(
playdoh=('https://playdoh.readthedocs.io/en/latest/', None)
)
| 33.188596 | 108 | 0.722743 |
import os
on_rtd = os.environ.get('READTHEDOCS') == 'True'
if not on_rtd:
import sphinx_rtd_theme
html_theme = 'sphinx_rtd_theme'
html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]
extensions = ['sphinx.ext.intersphinx', 'sphinx.ext.todo']
templates_path = ['_templates']
source_suffix = '.rst'
master_doc = 'index'
project = u'A-Team Bootcamp'
copyright = u'Mozilla. This work is licensed under a Creative Commons Attribution 4.0 International License'
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '0.3'
# The full version, including alpha/beta/rc tags.
release = '0.3beta'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build', 'README.rst']
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# Show todo items
todo_include_todos = True
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#html_theme = 'sphinx_rtd_theme'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
#html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'AteamBootcampdoc'
# -- Options for LaTeX output --------------------------------------------------
# The paper size ('letter' or 'a4').
#latex_paper_size = 'letter'
# The font size ('10pt', '11pt' or '12pt').
#latex_font_size = '10pt'
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'AteamBootcamp.tex', u'A-Team Bootcamp Documentation',
u'Mozilla Automation \& Tools', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Additional stuff for the LaTeX preamble.
#latex_preamble = ''
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'ateambootcamp', u'A-team Bootcamp Documentation',
[u'Mozilla Automation \& Tools'], 1)
]
intersphinx_mapping = dict(
playdoh=('https://playdoh.readthedocs.io/en/latest/', None)
)
| true | true |
1c48015d7ae84e2a3dd6ece846052a7d5758efc6 | 2,174 | py | Python | chapter03/python/item_cf.py | coco-in-bluemoon/building-recommendation-engines | b337b2ba75b6c9b08612ab1720a2858e64e9de09 | [
"MIT"
] | null | null | null | chapter03/python/item_cf.py | coco-in-bluemoon/building-recommendation-engines | b337b2ba75b6c9b08612ab1720a2858e64e9de09 | [
"MIT"
] | null | null | null | chapter03/python/item_cf.py | coco-in-bluemoon/building-recommendation-engines | b337b2ba75b6c9b08612ab1720a2858e64e9de09 | [
"MIT"
] | null | null | null | import numpy as np
import pandas as pd
# 1. load dataset
ratings = pd.read_csv('chapter02/data/movie_rating.csv')
movie_ratings = pd.pivot_table(
ratings,
values='rating',
index='title',
columns='critic'
)
# 2. calculate similarity
def calcualte_norm(u):
norm_u = 0.0
for ui in u:
if np.isnan(ui):
continue
norm_u += (ui ** 2)
return np.sqrt(norm_u)
def calculate_cosine_similarity(u, v):
norm_u = calcualte_norm(u)
norm_v = calcualte_norm(v)
denominator = norm_u * norm_v
numerator = 0.0
for ui, vi in zip(u, v):
if np.isnan(ui) or np.isnan(vi):
continue
numerator += (ui * vi)
similarity = numerator / denominator
return similarity
titles = movie_ratings.index
sim_items = pd.DataFrame(0, columns=titles, index=titles, dtype=float)
for src in titles:
for dst in titles:
src_vec = movie_ratings.loc[src, :].values
dst_vec = movie_ratings.loc[dst, :].values
similarity = calculate_cosine_similarity(src_vec, dst_vec)
sim_items.loc[src, dst] = similarity
print(sim_items)
# 3. Make Prediction & Recommendation
user_id = 5
ratings_critic = movie_ratings.loc[:, [movie_ratings.columns[user_id]]]
ratings_critic.columns = ['rating']
titles_na_critic = ratings_critic[pd.isna(ratings_critic.rating)].index
ratings_t = ratings.loc[ratings.critic == movie_ratings.columns[user_id]]
ratings_t = ratings_t.reset_index(drop=True)
x = sim_items.loc[:, titles_na_critic]
ratings_t = pd.merge(ratings_t, x, on='title')
print(ratings_t)
result_dict = {'title': list(), 'rating': list(), 'similarity': list()}
for row in ratings_t.iterrows():
for title in titles_na_critic:
result_dict['title'].append(title)
result_dict['rating'].append(row[1]['rating'])
result_dict['similarity'].append(row[1][title])
result = pd.DataFrame(result_dict)
result.loc[:, 'sim_rating'] = result.rating * result.similarity
result = result.groupby('title').sum()
result.loc[:, 'prediction'] = result.sim_rating / result.similarity
result = result.drop(columns=['rating', 'similarity', 'sim_rating'])
print(result)
| 26.839506 | 73 | 0.684913 | import numpy as np
import pandas as pd
ratings = pd.read_csv('chapter02/data/movie_rating.csv')
movie_ratings = pd.pivot_table(
ratings,
values='rating',
index='title',
columns='critic'
)
def calcualte_norm(u):
norm_u = 0.0
for ui in u:
if np.isnan(ui):
continue
norm_u += (ui ** 2)
return np.sqrt(norm_u)
def calculate_cosine_similarity(u, v):
norm_u = calcualte_norm(u)
norm_v = calcualte_norm(v)
denominator = norm_u * norm_v
numerator = 0.0
for ui, vi in zip(u, v):
if np.isnan(ui) or np.isnan(vi):
continue
numerator += (ui * vi)
similarity = numerator / denominator
return similarity
titles = movie_ratings.index
sim_items = pd.DataFrame(0, columns=titles, index=titles, dtype=float)
for src in titles:
for dst in titles:
src_vec = movie_ratings.loc[src, :].values
dst_vec = movie_ratings.loc[dst, :].values
similarity = calculate_cosine_similarity(src_vec, dst_vec)
sim_items.loc[src, dst] = similarity
print(sim_items)
user_id = 5
ratings_critic = movie_ratings.loc[:, [movie_ratings.columns[user_id]]]
ratings_critic.columns = ['rating']
titles_na_critic = ratings_critic[pd.isna(ratings_critic.rating)].index
ratings_t = ratings.loc[ratings.critic == movie_ratings.columns[user_id]]
ratings_t = ratings_t.reset_index(drop=True)
x = sim_items.loc[:, titles_na_critic]
ratings_t = pd.merge(ratings_t, x, on='title')
print(ratings_t)
result_dict = {'title': list(), 'rating': list(), 'similarity': list()}
for row in ratings_t.iterrows():
for title in titles_na_critic:
result_dict['title'].append(title)
result_dict['rating'].append(row[1]['rating'])
result_dict['similarity'].append(row[1][title])
result = pd.DataFrame(result_dict)
result.loc[:, 'sim_rating'] = result.rating * result.similarity
result = result.groupby('title').sum()
result.loc[:, 'prediction'] = result.sim_rating / result.similarity
result = result.drop(columns=['rating', 'similarity', 'sim_rating'])
print(result)
| true | true |
1c48031ea57f3f19e9314a5f6ec8871aefc6ec8a | 3,084 | py | Python | mainscenemaker-2015/sku_enc.py | RN-JK/Ubiart-Tape-Serializer | 879bfe27b11c290e5653dac8735ddba322bb5716 | [
"MIT"
] | null | null | null | mainscenemaker-2015/sku_enc.py | RN-JK/Ubiart-Tape-Serializer | 879bfe27b11c290e5653dac8735ddba322bb5716 | [
"MIT"
] | null | null | null | mainscenemaker-2015/sku_enc.py | RN-JK/Ubiart-Tape-Serializer | 879bfe27b11c290e5653dac8735ddba322bb5716 | [
"MIT"
] | null | null | null | import os, struct, json, zlib, shutil
print("SKUSCENE ENCRYPTOR BY: JACKLSUMMER15")
with open("input.json") as f:
sku=json.load(f)
mapnames=sku[0]["songs"]
map_count=0
try:
os.mkdir('output')
except:
pass
skuenc=open("output/skuscene_maps_pc_all.isc.ckd","wb")
skudb="skuscene_db"
skubasetpl="skuscene_base.tpl"
skubasepath="world/skuscenes/"
skuenc.write(b'\x00\x00\x00\x01\x00\x02\x6C\xD2\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00')
skuenc.write(struct.pack(">I",1+len(mapnames)))
# skuscene base & skuscene db
skuenc.write(b'\x97\xCA\x62\x8B\x00\x00\x00\x00\x3F\x80\x00\x00\x3F\x80\x00\x00\x00\x00\x00\x00\x00\x00\x00\x0B\x73\x6B\x75\x73\x63\x65\x6E\x65\x5F\x64\x62\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xFF\xFF\xFF\xFF\x00\x00\x00\x00\x00\x00\x00\x11\x73\x6B\x75\x73\x63\x65\x6E\x65\x5F\x62\x61\x73\x65\x2E\x74\x70\x6C\x00\x00\x00\x10\x77\x6F\x72\x6C\x64\x2F\x73\x6B\x75\x73\x63\x65\x6E\x65\x73\x2F\x0C\x1C\x9B\x77\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01\x40\x55\x79\xFB')
for i, mapname in enumerate(mapnames,1):
mapnamelow=mapname.lower()
print("adding "+mapname+"...")
songdesctpl="songdesc.tpl"
songdescpath="world/jd2015/"+mapnamelow+"/"
skuenc.write(b'\x97\xCA\x62\x8B\x00\x00\x00\x00\x3F\x80\x00\x00\x3F\x80\x00\x00\x00\x00\x00\x00')
skuenc.write(struct.pack(">I",len(mapname))+mapname.encode())
skuenc.write(b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xFF\xFF\xFF\xFF\x00\x00\x00\x00')
skuenc.write(struct.pack(">I",len(songdesctpl))+songdesctpl.encode())
skuenc.write(struct.pack(">I",len(songdescpath))+songdescpath.encode()+struct.pack("<I",zlib.crc32(songdescpath.encode())))
skuenc.write(b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01\xE0\x7F\xCC\x3F')
map_count+=1
skuenc.write(b'\x00\x00\x00\x00\x00\x00\x00\x00')
skuenc.write(b'\x00\x00\x00\x01\xF8\x78\xDC\x2D'+struct.pack(">I",len(sku[0]["sku"]))+sku[0]["sku"].encode()+struct.pack(">I",len(sku[0]["territory"]))+sku[0]["territory"].encode()+b'\x00\x00\x00\x13\x72\x61\x74\x69\x6E\x67\x5F\x65\x73\x72\x62\x5F\x31\x36\x39\x2E\x69\x73\x63\x00\x00\x00\x2D\x77\x6F\x72\x6C\x64\x2F\x6A\x64\x32\x30\x31\x35\x2F\x5F\x75\x69\x2F\x73\x63\x72\x65\x65\x6E\x73\x2F\x62\x6F\x6F\x74\x73\x65\x71\x75\x65\x6E\x63\x65\x2F\x72\x61\x74\x69\x6E\x67\x2F\xF8\x41\x4C\x62\x00\x00\x00\x00')
skuenc.write(struct.pack(">I",len(mapnames)))
for i, mapname in enumerate(mapnames,1):
mapnamelow=mapname.lower()
covergenericact=mapnamelow+"_cover_generic.act"
menuartpath="world/jd2015/"+mapnamelow+"/menuart/actors/"
skuenc.write(struct.pack(">I",len(mapname))+mapname.encode())
skuenc.write(struct.pack(">I",len(covergenericact))+covergenericact.encode())
skuenc.write(struct.pack(">I",len(menuartpath))+menuartpath.encode()+struct.pack("<I",zlib.crc32(menuartpath.encode())))
skuenc.write(b'\x00\x00\x00\x00\x00\x00\x00\x00')
map_count+=1
skuenc.write(b'\x00\x00\x00\x00')
skuenc.close()
| 39.538462 | 514 | 0.703956 | import os, struct, json, zlib, shutil
print("SKUSCENE ENCRYPTOR BY: JACKLSUMMER15")
with open("input.json") as f:
sku=json.load(f)
mapnames=sku[0]["songs"]
map_count=0
try:
os.mkdir('output')
except:
pass
skuenc=open("output/skuscene_maps_pc_all.isc.ckd","wb")
skudb="skuscene_db"
skubasetpl="skuscene_base.tpl"
skubasepath="world/skuscenes/"
skuenc.write(b'\x00\x00\x00\x01\x00\x02\x6C\xD2\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00')
skuenc.write(struct.pack(">I",1+len(mapnames)))
skuenc.write(b'\x97\xCA\x62\x8B\x00\x00\x00\x00\x3F\x80\x00\x00\x3F\x80\x00\x00\x00\x00\x00\x00\x00\x00\x00\x0B\x73\x6B\x75\x73\x63\x65\x6E\x65\x5F\x64\x62\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xFF\xFF\xFF\xFF\x00\x00\x00\x00\x00\x00\x00\x11\x73\x6B\x75\x73\x63\x65\x6E\x65\x5F\x62\x61\x73\x65\x2E\x74\x70\x6C\x00\x00\x00\x10\x77\x6F\x72\x6C\x64\x2F\x73\x6B\x75\x73\x63\x65\x6E\x65\x73\x2F\x0C\x1C\x9B\x77\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01\x40\x55\x79\xFB')
for i, mapname in enumerate(mapnames,1):
mapnamelow=mapname.lower()
print("adding "+mapname+"...")
songdesctpl="songdesc.tpl"
songdescpath="world/jd2015/"+mapnamelow+"/"
skuenc.write(b'\x97\xCA\x62\x8B\x00\x00\x00\x00\x3F\x80\x00\x00\x3F\x80\x00\x00\x00\x00\x00\x00')
skuenc.write(struct.pack(">I",len(mapname))+mapname.encode())
skuenc.write(b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xFF\xFF\xFF\xFF\x00\x00\x00\x00')
skuenc.write(struct.pack(">I",len(songdesctpl))+songdesctpl.encode())
skuenc.write(struct.pack(">I",len(songdescpath))+songdescpath.encode()+struct.pack("<I",zlib.crc32(songdescpath.encode())))
skuenc.write(b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01\xE0\x7F\xCC\x3F')
map_count+=1
skuenc.write(b'\x00\x00\x00\x00\x00\x00\x00\x00')
skuenc.write(b'\x00\x00\x00\x01\xF8\x78\xDC\x2D'+struct.pack(">I",len(sku[0]["sku"]))+sku[0]["sku"].encode()+struct.pack(">I",len(sku[0]["territory"]))+sku[0]["territory"].encode()+b'\x00\x00\x00\x13\x72\x61\x74\x69\x6E\x67\x5F\x65\x73\x72\x62\x5F\x31\x36\x39\x2E\x69\x73\x63\x00\x00\x00\x2D\x77\x6F\x72\x6C\x64\x2F\x6A\x64\x32\x30\x31\x35\x2F\x5F\x75\x69\x2F\x73\x63\x72\x65\x65\x6E\x73\x2F\x62\x6F\x6F\x74\x73\x65\x71\x75\x65\x6E\x63\x65\x2F\x72\x61\x74\x69\x6E\x67\x2F\xF8\x41\x4C\x62\x00\x00\x00\x00')
skuenc.write(struct.pack(">I",len(mapnames)))
for i, mapname in enumerate(mapnames,1):
mapnamelow=mapname.lower()
covergenericact=mapnamelow+"_cover_generic.act"
menuartpath="world/jd2015/"+mapnamelow+"/menuart/actors/"
skuenc.write(struct.pack(">I",len(mapname))+mapname.encode())
skuenc.write(struct.pack(">I",len(covergenericact))+covergenericact.encode())
skuenc.write(struct.pack(">I",len(menuartpath))+menuartpath.encode()+struct.pack("<I",zlib.crc32(menuartpath.encode())))
skuenc.write(b'\x00\x00\x00\x00\x00\x00\x00\x00')
map_count+=1
skuenc.write(b'\x00\x00\x00\x00')
skuenc.close()
| true | true |
1c48041a87f8e551a67a3f06553241f52dcb0066 | 32 | py | Python | venv/Lib/site-packages/pdoc/test/example_pkg/_relative_import/__init__.py | StavromularBeta/Rover | 3030f1521e5a6bc2c6722983ca59a008b3a11400 | [
"MIT"
] | null | null | null | venv/Lib/site-packages/pdoc/test/example_pkg/_relative_import/__init__.py | StavromularBeta/Rover | 3030f1521e5a6bc2c6722983ca59a008b3a11400 | [
"MIT"
] | null | null | null | venv/Lib/site-packages/pdoc/test/example_pkg/_relative_import/__init__.py | StavromularBeta/Rover | 3030f1521e5a6bc2c6722983ca59a008b3a11400 | [
"MIT"
] | 1 | 2021-02-22T13:55:32.000Z | 2021-02-22T13:55:32.000Z | from . import foo # noqa: F401
| 16 | 31 | 0.65625 | from . import foo
| true | true |
1c4804828254777e5f84787506bd650e8ff713f5 | 785 | py | Python | invenio_pure_sync/tests/run_process.py | utnapischtim/pure_sync_rdm | 9d465d4f1a2410ba83d09ca691c1655e7daaf113 | [
"MIT"
] | null | null | null | invenio_pure_sync/tests/run_process.py | utnapischtim/pure_sync_rdm | 9d465d4f1a2410ba83d09ca691c1655e7daaf113 | [
"MIT"
] | null | null | null | invenio_pure_sync/tests/run_process.py | utnapischtim/pure_sync_rdm | 9d465d4f1a2410ba83d09ca691c1655e7daaf113 | [
"MIT"
] | null | null | null | import os
command = '/home/bootcamp/.local/share/virtualenvs/pure_sync_rdm-fOqjLk38/bin/python /home/bootcamp/src/pure_sync_rdm/invenio_pure_sync/cli.py '
# os.system(command + 'pages --pageStart=1 --pageEnd=2 --pageSize=1')
os.system(command + 'pages --pageStart=7 --pageEnd=8 --pageSize=2')
# os.system(command + "group_split --oldGroup=2267 --newGroups='13866 19428'")
# os.system(command + "group_merge --oldGroups='13866 19428' --newGroup=2267")
# os.system(command + 'logs')
# os.system(command + 'owners_list')
# os.system(command + "owner --identifier='externalId'")
# os.system(command + 'changes')
# os.system(command + 'pure_import')
# os.system(command + 'delete')
# os.system(command + 'uuid')
# os.system(command + 'duplicates')
# os.system(command + 'owner_orcid')
| 41.315789 | 144 | 0.713376 | import os
command = '/home/bootcamp/.local/share/virtualenvs/pure_sync_rdm-fOqjLk38/bin/python /home/bootcamp/src/pure_sync_rdm/invenio_pure_sync/cli.py '
os.system(command + 'pages --pageStart=7 --pageEnd=8 --pageSize=2')
| true | true |
1c48057c53b2997b5f312ffc1cc0ad73366dcfdd | 8,528 | py | Python | spira/lpe/structure.py | cloudcalvin/spira | 2dcaef188f2bc8c3839e1b5ff0be027e0cd4908c | [
"MIT"
] | null | null | null | spira/lpe/structure.py | cloudcalvin/spira | 2dcaef188f2bc8c3839e1b5ff0be027e0cd4908c | [
"MIT"
] | 1 | 2021-10-17T10:18:04.000Z | 2021-10-17T10:18:04.000Z | spira/lpe/structure.py | cloudcalvin/spira | 2dcaef188f2bc8c3839e1b5ff0be027e0cd4908c | [
"MIT"
] | null | null | null | import spira
from spira import param
from spira import shapes
from spira.lpe.layers import *
from spira.lrc.rules import *
from spira.lrc.checking import Rules
from spira.lpe.containers import __CellContainer__
from spira.lne.graph import Graph
from spira.lne.mesh import Mesh
from spira.lne.geometry import Geometry
RDD = spira.get_rule_deck()
class ComposeMLayers(__CellContainer__):
"""
Decorates all elementals with purpose metal with
LCells and add them as elementals to the new class.
"""
cell_elems = param.ElementListField()
mlayers = param.DataField(fdef_name='create_mlayers')
def _merge_layers(self, flat_metals):
points = []
elems = spira.ElementList()
for p in flat_metals:
for pp in p.polygons:
points.append(pp)
if points:
from spira.gdsii.utils import scale_polygon_down as spd
points = spd(points)
shape = shapes.Shape(points=points)
shape.apply_merge
for pts in shape.points:
pts = spd([pts])
elems += spira.Polygons(shape=pts)
return elems
def create_mlayers(self):
elems = spira.ElementList()
# players = RDD.PLAYER.get_physical_layers(purpose_symbol=['METAL', 'GROUND', 'MOAT'])
flat_elems = self.cell_elems.flat_copy()
for pl in RDD.PLAYER.get_physical_layers(purposes='METAL'):
metal_elems = flat_elems.get_polygons(layer=pl.layer)
if metal_elems:
c_mlayer = CMLayers(layer=pl.layer)
for i, ply in enumerate(self._merge_layers(metal_elems)):
ml = MLayer(name='MLayer_{}_{}_{}_{}'.format(pl.layer.number,
self.cell.name,
self.cell.id, i),
points=ply.polygons,
number=pl.layer.number)
c_mlayer += spira.SRef(ml)
elems += spira.SRef(c_mlayer)
return elems
def create_elementals(self, elems):
# TODO: Apply DRC checking between metals, before being placed.
for lcell in self.mlayers:
elems += lcell
# FIXME: Allow this operation.
# elems += self.mlayers
return elems
class ComposeNLayer(ComposeMLayers):
"""
Decorates all elementas with purpose via with
LCells and add them as elementals to the new class.
"""
cell_elems = param.ElementListField()
level = param.IntegerField(default=1)
nlayers = param.DataField(fdef_name='create_nlayers')
def create_nlayers(self):
elems = ElementList()
flat_elems = self.cell_elems.flat_copy()
for pl in RDD.PLAYER.get_physical_layers(purposes='VIA'):
via_elems = flat_elems.get_polygons(layer=pl.layer)
if via_elems:
c_nlayer = CNLayers(layer=pl.layer)
for i, ply in enumerate(via_elems):
ml = NLayer(name='Via_NLayer_{}_{}_{}'.format(pl.layer.number, self.cell.name, i),
points=ply.polygons,
midpoint=ply.center,
number=pl.layer.number)
c_nlayer += spira.SRef(ml)
elems += SRef(c_nlayer)
return elems
def create_elementals(self, elems):
super().create_elementals(elems)
# Only add it if its a Device.
if self.level == 1:
for lcell in self.nlayers:
elems += lcell
return elems
class ComposeGLayer(ComposeNLayer):
plane_elems = param.ElementListField() # Elementals like skyplanes and groundplanes.
ground_layer = param.DataField(fdef_name='create_merged_ground_layers')
def create_merged_ground_layers(self):
points = []
for p in self.plane_elems.flat_copy():
for pp in p.polygons:
points.append(pp)
if points:
ll = Layer(number=RDD.GDSII.GPLAYER, datatype=6)
merged_ply = UnionPolygons(polygons=points, gdslayer=ll)
return merged_ply
return None
def create_elementals(self, elems):
super().create_elementals(elems)
if self.level == 1:
if self.ground_layer:
box = self.cell.bbox
# box.move(midpoint=box.center, destination=(0,0))
gnd = self.ground_layer | box
if gnd:
c_glayer = CGLayers(layer=gnd.gdslayer)
name = 'GLayer_{}_{}'.format(self.cell.name, gnd.gdslayer.number)
gnd_layer = GLayer(name=name, layer=gnd.gdslayer, player=gnd)
c_glayer += spira.SRef(gnd_layer)
elems += spira.SRef(c_glayer)
return elems
class ConnectDesignRules(ComposeGLayer):
metal_elems = param.ElementListField()
def create_elementals(self, elems):
super().create_elementals(elems)
incorrect_elems = ElementList()
correct_elems = ElementList()
for rule in RDD.RULES.elementals:
if not rule.apply(elems):
for composed_lcell in elems:
for lcell in composed_lcell.ref.elementals.sref:
if lcell.ref.layer.number == rule.layer1.number:
correct_elems += lcell
return elems
class __StructureCell__(ConnectDesignRules):
"""
Add a GROUND bbox to Device for primitive and
DRC detection, since GROUND is only in Mask Cell.
"""
level = param.IntegerField(default=1)
device_elems = param.ElementListField()
devices = param.DataField(fdef_name='create_device_layers')
terminals = param.DataField(fdef_name='create_terminal_layers')
def create_device_layers(self):
box = self.cell.bbox
box.move(midpoint=box.center, destination=(0,0))
B = DLayer(blayer=box, device_elems=self.cell.elementals)
Bs = SRef(B)
Bs.move(midpoint=(0,0), destination=self.cell.bbox.center)
return Bs
def create_terminal_layers(self):
# flat_elems = self.cell_elems.flat_copy()
# port_elems = flat_elems.get_polygons(layer=RDD.PURPOSE.TERM)
# label_elems = flat_elems.labels
#
# elems = ElementList()
# for port in port_elems:
# for label in label_elems:
#
# lbls = label.text.split(' ')
# s_p1, s_p2 = lbls[1], lbls[2]
# p1, p2 = None, None
#
# if s_p1 in RDD.METALS.keys:
# layer = RDD.METALS[s_p1].LAYER
# p1 = spira.Layer(name=lbls[0], number=layer, datatype=RDD.GDSII.TEXT)
#
# if s_p2 in RDD.METALS.keys:
# layer = RDD.METALS[s_p2].LAYER
# p2 = spira.Layer(name=lbls[0], number=layer, datatype=RDD.GDSII.TEXT)
#
# if p1 and p2:
# if label.point_inside(polygon=port.polygons[0]):
# term = TLayer(points=port.polygons,
# layer1=p1,
# layer2=p2,
# number=RDD.GDSII.TERM,
# midpoint=label.position)
#
# term.ports[0].name = 'P1_{}'.format(label.text)
# term.ports[1].name = 'P2_{}'.format(label.text)
#
# elems += SRef(term)
elems = ElementList()
for p in self.cell.ports:
if isinstance(p, spira.Term):
term = TLayer(points=p.polygon.polygons,
# layer1=p1,
# layer2=p2,
number=RDD.PURPOSE.TERM.datatype,
midpoint=p.label.position)
term.ports[0].name = 'P1_{}'.format(1)
term.ports[1].name = 'P2_{}'.format(2)
elems += SRef(term)
return elems
def create_elementals(self, elems):
super().create_elementals(elems)
# elems += self.devices
# for term in self.terminals:
# elems += term
return elems
def create_ports(self, ports):
# for t in self.cell.terms:
# ports += t
return ports
| 30.787004 | 102 | 0.553471 | import spira
from spira import param
from spira import shapes
from spira.lpe.layers import *
from spira.lrc.rules import *
from spira.lrc.checking import Rules
from spira.lpe.containers import __CellContainer__
from spira.lne.graph import Graph
from spira.lne.mesh import Mesh
from spira.lne.geometry import Geometry
RDD = spira.get_rule_deck()
class ComposeMLayers(__CellContainer__):
cell_elems = param.ElementListField()
mlayers = param.DataField(fdef_name='create_mlayers')
def _merge_layers(self, flat_metals):
points = []
elems = spira.ElementList()
for p in flat_metals:
for pp in p.polygons:
points.append(pp)
if points:
from spira.gdsii.utils import scale_polygon_down as spd
points = spd(points)
shape = shapes.Shape(points=points)
shape.apply_merge
for pts in shape.points:
pts = spd([pts])
elems += spira.Polygons(shape=pts)
return elems
def create_mlayers(self):
elems = spira.ElementList()
flat_elems = self.cell_elems.flat_copy()
for pl in RDD.PLAYER.get_physical_layers(purposes='METAL'):
metal_elems = flat_elems.get_polygons(layer=pl.layer)
if metal_elems:
c_mlayer = CMLayers(layer=pl.layer)
for i, ply in enumerate(self._merge_layers(metal_elems)):
ml = MLayer(name='MLayer_{}_{}_{}_{}'.format(pl.layer.number,
self.cell.name,
self.cell.id, i),
points=ply.polygons,
number=pl.layer.number)
c_mlayer += spira.SRef(ml)
elems += spira.SRef(c_mlayer)
return elems
def create_elementals(self, elems):
for lcell in self.mlayers:
elems += lcell
return elems
class ComposeNLayer(ComposeMLayers):
cell_elems = param.ElementListField()
level = param.IntegerField(default=1)
nlayers = param.DataField(fdef_name='create_nlayers')
def create_nlayers(self):
elems = ElementList()
flat_elems = self.cell_elems.flat_copy()
for pl in RDD.PLAYER.get_physical_layers(purposes='VIA'):
via_elems = flat_elems.get_polygons(layer=pl.layer)
if via_elems:
c_nlayer = CNLayers(layer=pl.layer)
for i, ply in enumerate(via_elems):
ml = NLayer(name='Via_NLayer_{}_{}_{}'.format(pl.layer.number, self.cell.name, i),
points=ply.polygons,
midpoint=ply.center,
number=pl.layer.number)
c_nlayer += spira.SRef(ml)
elems += SRef(c_nlayer)
return elems
def create_elementals(self, elems):
super().create_elementals(elems)
if self.level == 1:
for lcell in self.nlayers:
elems += lcell
return elems
class ComposeGLayer(ComposeNLayer):
plane_elems = param.ElementListField()
ground_layer = param.DataField(fdef_name='create_merged_ground_layers')
def create_merged_ground_layers(self):
points = []
for p in self.plane_elems.flat_copy():
for pp in p.polygons:
points.append(pp)
if points:
ll = Layer(number=RDD.GDSII.GPLAYER, datatype=6)
merged_ply = UnionPolygons(polygons=points, gdslayer=ll)
return merged_ply
return None
def create_elementals(self, elems):
super().create_elementals(elems)
if self.level == 1:
if self.ground_layer:
box = self.cell.bbox
gnd = self.ground_layer | box
if gnd:
c_glayer = CGLayers(layer=gnd.gdslayer)
name = 'GLayer_{}_{}'.format(self.cell.name, gnd.gdslayer.number)
gnd_layer = GLayer(name=name, layer=gnd.gdslayer, player=gnd)
c_glayer += spira.SRef(gnd_layer)
elems += spira.SRef(c_glayer)
return elems
class ConnectDesignRules(ComposeGLayer):
metal_elems = param.ElementListField()
def create_elementals(self, elems):
super().create_elementals(elems)
incorrect_elems = ElementList()
correct_elems = ElementList()
for rule in RDD.RULES.elementals:
if not rule.apply(elems):
for composed_lcell in elems:
for lcell in composed_lcell.ref.elementals.sref:
if lcell.ref.layer.number == rule.layer1.number:
correct_elems += lcell
return elems
class __StructureCell__(ConnectDesignRules):
level = param.IntegerField(default=1)
device_elems = param.ElementListField()
devices = param.DataField(fdef_name='create_device_layers')
terminals = param.DataField(fdef_name='create_terminal_layers')
def create_device_layers(self):
box = self.cell.bbox
box.move(midpoint=box.center, destination=(0,0))
B = DLayer(blayer=box, device_elems=self.cell.elementals)
Bs = SRef(B)
Bs.move(midpoint=(0,0), destination=self.cell.bbox.center)
return Bs
def create_terminal_layers(self):
elems = ElementList()
for p in self.cell.ports:
if isinstance(p, spira.Term):
term = TLayer(points=p.polygon.polygons,
number=RDD.PURPOSE.TERM.datatype,
midpoint=p.label.position)
term.ports[0].name = 'P1_{}'.format(1)
term.ports[1].name = 'P2_{}'.format(2)
elems += SRef(term)
return elems
def create_elementals(self, elems):
super().create_elementals(elems)
return elems
def create_ports(self, ports):
return ports
| true | true |
1c48059513bd64cd768042db01ce8f2c7d15dfeb | 1,009 | py | Python | hooks/post_gen_project.py | christophedcpm/cookiecutter-pypackage | eaad44b1ae7c049e9e2a868b3b15164cceaf55e2 | [
"BSD-3-Clause"
] | null | null | null | hooks/post_gen_project.py | christophedcpm/cookiecutter-pypackage | eaad44b1ae7c049e9e2a868b3b15164cceaf55e2 | [
"BSD-3-Clause"
] | null | null | null | hooks/post_gen_project.py | christophedcpm/cookiecutter-pypackage | eaad44b1ae7c049e9e2a868b3b15164cceaf55e2 | [
"BSD-3-Clause"
] | null | null | null | #!/usr/bin/env python
import os
PROJECT_DIRECTORY = os.path.realpath(os.path.curdir)
def remove_file(filepath):
os.remove(os.path.join(PROJECT_DIRECTORY, filepath))
def remove_dir(filepath):
os.removedirs(os.path.join(PROJECT_DIRECTORY, filepath))
if __name__ == "__main__":
if "{{ cookiecutter.create_author_file }}" != "y":
remove_file("AUTHORS.rst")
remove_file("docs/authors.rst")
if "{{ cookiecutter.ci_provider }}" == "Github Action":
remove_file(".travis.yml")
if '{{cookiecutter.enable_automatic_pypi_deployment }}' == 'n':
remove_file('.github/workflows/publish.yml')
if "{{ cookiecutter.ci_provider }}" == "Travis-CI":
remove_dir(".github")
if "no" in "{{ cookiecutter.command_line_interface|lower }}":
cli_file = os.path.join("{{ cookiecutter.project_slug }}", "cli.py")
remove_file(cli_file)
if "Not open source" == "{{ cookiecutter.open_source_license }}":
remove_file("LICENSE")
| 28.828571 | 76 | 0.654113 |
import os
PROJECT_DIRECTORY = os.path.realpath(os.path.curdir)
def remove_file(filepath):
os.remove(os.path.join(PROJECT_DIRECTORY, filepath))
def remove_dir(filepath):
os.removedirs(os.path.join(PROJECT_DIRECTORY, filepath))
if __name__ == "__main__":
if "{{ cookiecutter.create_author_file }}" != "y":
remove_file("AUTHORS.rst")
remove_file("docs/authors.rst")
if "{{ cookiecutter.ci_provider }}" == "Github Action":
remove_file(".travis.yml")
if '{{cookiecutter.enable_automatic_pypi_deployment }}' == 'n':
remove_file('.github/workflows/publish.yml')
if "{{ cookiecutter.ci_provider }}" == "Travis-CI":
remove_dir(".github")
if "no" in "{{ cookiecutter.command_line_interface|lower }}":
cli_file = os.path.join("{{ cookiecutter.project_slug }}", "cli.py")
remove_file(cli_file)
if "Not open source" == "{{ cookiecutter.open_source_license }}":
remove_file("LICENSE")
| true | true |
1c4807c8c7b1516bf451ce708684235b30b64ee7 | 2,410 | py | Python | mayan/apps/linking/links.py | garrans/mayan-edms | e95e90cc47447a1ae72629271652824aa9868572 | [
"Apache-2.0"
] | null | null | null | mayan/apps/linking/links.py | garrans/mayan-edms | e95e90cc47447a1ae72629271652824aa9868572 | [
"Apache-2.0"
] | null | null | null | mayan/apps/linking/links.py | garrans/mayan-edms | e95e90cc47447a1ae72629271652824aa9868572 | [
"Apache-2.0"
] | null | null | null | from __future__ import unicode_literals
from django.utils.translation import ugettext_lazy as _
from documents.permissions import permission_document_view
from navigation import Link
from .permissions import (
permission_smart_link_create, permission_smart_link_delete,
permission_smart_link_edit, permission_smart_link_view
)
link_smart_link_condition_create = Link(
permissions=(permission_smart_link_edit,), text=_('Create condition'),
view='linking:smart_link_condition_create', args='object.pk'
)
link_smart_link_condition_delete = Link(
permissions=(permission_smart_link_edit,), tags='dangerous',
text=_('Delete'), view='linking:smart_link_condition_delete',
args='resolved_object.pk'
)
link_smart_link_condition_edit = Link(
permissions=(permission_smart_link_edit,), text=_('Edit'),
view='linking:smart_link_condition_edit', args='resolved_object.pk'
)
link_smart_link_condition_list = Link(
permissions=(permission_smart_link_edit,), text=_('Conditions'),
view='linking:smart_link_condition_list', args='object.pk'
)
link_smart_link_create = Link(
permissions=(permission_smart_link_create,),
text=_('Create new smart link'), view='linking:smart_link_create'
)
link_smart_link_delete = Link(
permissions=(permission_smart_link_delete,), tags='dangerous',
text=_('Delete'), view='linking:smart_link_delete', args='object.pk'
)
link_smart_link_document_types = Link(
permissions=(permission_smart_link_edit,), text=_('Document types'),
view='linking:smart_link_document_types', args='object.pk'
)
link_smart_link_edit = Link(
permissions=(permission_smart_link_edit,), text=_('Edit'),
view='linking:smart_link_edit', args='object.pk'
)
link_smart_link_instance_view = Link(
permissions=(permission_smart_link_view,), text=_('Documents'),
view='linking:smart_link_instance_view', args=(
'document.pk', 'object.pk',
)
)
link_smart_link_instances_for_document = Link(
permissions=(permission_document_view,), text=_('Smart links'),
view='linking:smart_link_instances_for_document', args='object.pk'
)
link_smart_link_list = Link(
permissions=(permission_smart_link_create,), text=_('Smart links'),
view='linking:smart_link_list'
)
link_smart_link_setup = Link(
icon='fa fa-link', permissions=(permission_smart_link_create,),
text=_('Smart links'), view='linking:smart_link_list'
)
| 37.65625 | 74 | 0.772199 | from __future__ import unicode_literals
from django.utils.translation import ugettext_lazy as _
from documents.permissions import permission_document_view
from navigation import Link
from .permissions import (
permission_smart_link_create, permission_smart_link_delete,
permission_smart_link_edit, permission_smart_link_view
)
link_smart_link_condition_create = Link(
permissions=(permission_smart_link_edit,), text=_('Create condition'),
view='linking:smart_link_condition_create', args='object.pk'
)
link_smart_link_condition_delete = Link(
permissions=(permission_smart_link_edit,), tags='dangerous',
text=_('Delete'), view='linking:smart_link_condition_delete',
args='resolved_object.pk'
)
link_smart_link_condition_edit = Link(
permissions=(permission_smart_link_edit,), text=_('Edit'),
view='linking:smart_link_condition_edit', args='resolved_object.pk'
)
link_smart_link_condition_list = Link(
permissions=(permission_smart_link_edit,), text=_('Conditions'),
view='linking:smart_link_condition_list', args='object.pk'
)
link_smart_link_create = Link(
permissions=(permission_smart_link_create,),
text=_('Create new smart link'), view='linking:smart_link_create'
)
link_smart_link_delete = Link(
permissions=(permission_smart_link_delete,), tags='dangerous',
text=_('Delete'), view='linking:smart_link_delete', args='object.pk'
)
link_smart_link_document_types = Link(
permissions=(permission_smart_link_edit,), text=_('Document types'),
view='linking:smart_link_document_types', args='object.pk'
)
link_smart_link_edit = Link(
permissions=(permission_smart_link_edit,), text=_('Edit'),
view='linking:smart_link_edit', args='object.pk'
)
link_smart_link_instance_view = Link(
permissions=(permission_smart_link_view,), text=_('Documents'),
view='linking:smart_link_instance_view', args=(
'document.pk', 'object.pk',
)
)
link_smart_link_instances_for_document = Link(
permissions=(permission_document_view,), text=_('Smart links'),
view='linking:smart_link_instances_for_document', args='object.pk'
)
link_smart_link_list = Link(
permissions=(permission_smart_link_create,), text=_('Smart links'),
view='linking:smart_link_list'
)
link_smart_link_setup = Link(
icon='fa fa-link', permissions=(permission_smart_link_create,),
text=_('Smart links'), view='linking:smart_link_list'
)
| true | true |
1c4807f83ad4b502ef47f691ad2b1873e34b1f90 | 2,506 | py | Python | bot.py | chenx6/sec_bot | 5ad3427ed62f6dd891bb03afc5b2ebf93ccbb625 | [
"MIT"
] | 2 | 2021-02-03T05:36:05.000Z | 2022-01-20T05:42:46.000Z | bot.py | chenx6/sec_bot | 5ad3427ed62f6dd891bb03afc5b2ebf93ccbb625 | [
"MIT"
] | null | null | null | bot.py | chenx6/sec_bot | 5ad3427ed62f6dd891bb03afc5b2ebf93ccbb625 | [
"MIT"
] | null | null | null | from typing import List
from aiocqhttp import CQHttp, Event
from quart import request
from schedule import every
from config import webhook_token, subscribes
from utils.limit_counter import LimitCounter
from utils.schedule_thread import run_continuously
from plugin import (silent, base_bot, anquanke_vuln, ctfhub, daily_push, help_menu,
whoami, rss, search, admin, unknown_message, lsp, weather, debian_pkg)
silent_ = silent.Silent()
plugins: List[base_bot.BaseBot] = [
silent_,
anquanke_vuln.AnquankeVuln(),
ctfhub.CTFHub(),
daily_push.DailyPush(),
rss.Rss(),
help_menu.HelpMenu(),
whoami.WhoAmI(),
search.SearchBot(),
weather.Weather(),
debian_pkg.DebianPkgBot(),
lsp.LSP(),
admin.Admin(),
unknown_message.Unknown()
]
bot = CQHttp()
logger = bot.logger
counter = LimitCounter()
@bot.on_message('group')
async def reply_at(event: Event):
"""
反馈 at 消息
"""
if silent_.is_silent(event, event.message):
return
if not counter.can_send():
await bot.send(event, f'发送的太快了吧,{event.sender["nickname"]},让我缓缓(>﹏<)')
return
for plugin in plugins:
if not event.message:
break
if plugin.match(event, event.message):
try:
reply_text = await plugin.reply(event)
await bot.send(event, reply_text)
counter.add_counter()
except Exception as e:
logger.error('Plugin error')
logger.error(e)
break
@bot.server_app.route('/webhook')
async def webhook():
token = request.args.get('token')
group_id = request.args.get('group_id')
message = request.args.get('message')
if not token or token != webhook_token:
return {"message": "token error"}, 400
if not group_id or not message:
return {"message": "error while missing argument"}, 400
group_id = int(group_id)
try:
response = await bot.send_group_msg(group_id=group_id,
message=message) # type: ignore
return response
except Exception as e:
return {"message": "Server error, " + str(e)}, 500
def reset_counter():
counter.reset_counter()
@bot.before_sending
async def can_send_word(event: Event, message, kwargs):
if silent_.is_silent():
event.clear()
for sub in subscribes:
sub.job.do(sub.send_message, bot=bot)
every().minutes.do(reset_counter)
run_continuously(60)
| 28.157303 | 90 | 0.640463 | from typing import List
from aiocqhttp import CQHttp, Event
from quart import request
from schedule import every
from config import webhook_token, subscribes
from utils.limit_counter import LimitCounter
from utils.schedule_thread import run_continuously
from plugin import (silent, base_bot, anquanke_vuln, ctfhub, daily_push, help_menu,
whoami, rss, search, admin, unknown_message, lsp, weather, debian_pkg)
silent_ = silent.Silent()
plugins: List[base_bot.BaseBot] = [
silent_,
anquanke_vuln.AnquankeVuln(),
ctfhub.CTFHub(),
daily_push.DailyPush(),
rss.Rss(),
help_menu.HelpMenu(),
whoami.WhoAmI(),
search.SearchBot(),
weather.Weather(),
debian_pkg.DebianPkgBot(),
lsp.LSP(),
admin.Admin(),
unknown_message.Unknown()
]
bot = CQHttp()
logger = bot.logger
counter = LimitCounter()
@bot.on_message('group')
async def reply_at(event: Event):
if silent_.is_silent(event, event.message):
return
if not counter.can_send():
await bot.send(event, f'发送的太快了吧,{event.sender["nickname"]},让我缓缓(>﹏<)')
return
for plugin in plugins:
if not event.message:
break
if plugin.match(event, event.message):
try:
reply_text = await plugin.reply(event)
await bot.send(event, reply_text)
counter.add_counter()
except Exception as e:
logger.error('Plugin error')
logger.error(e)
break
@bot.server_app.route('/webhook')
async def webhook():
token = request.args.get('token')
group_id = request.args.get('group_id')
message = request.args.get('message')
if not token or token != webhook_token:
return {"message": "token error"}, 400
if not group_id or not message:
return {"message": "error while missing argument"}, 400
group_id = int(group_id)
try:
response = await bot.send_group_msg(group_id=group_id,
message=message)
return response
except Exception as e:
return {"message": "Server error, " + str(e)}, 500
def reset_counter():
counter.reset_counter()
@bot.before_sending
async def can_send_word(event: Event, message, kwargs):
if silent_.is_silent():
event.clear()
for sub in subscribes:
sub.job.do(sub.send_message, bot=bot)
every().minutes.do(reset_counter)
run_continuously(60)
| true | true |
1c48084185e3f160708426d640c31a058f31937b | 811 | py | Python | jesse/indicators/rocp.py | The-Makers-of-things/jesse | df061ea21011a3c28f3359f421ec5594216fb708 | [
"MIT"
] | null | null | null | jesse/indicators/rocp.py | The-Makers-of-things/jesse | df061ea21011a3c28f3359f421ec5594216fb708 | [
"MIT"
] | null | null | null | jesse/indicators/rocp.py | The-Makers-of-things/jesse | df061ea21011a3c28f3359f421ec5594216fb708 | [
"MIT"
] | null | null | null | from typing import Union
import numpy as np
import talib
from jesse.helpers import get_candle_source
def rocp(candles: np.ndarray, period: int = 10, source_type: str = "close", sequential: bool = False) -> Union[
float, np.ndarray]:
"""
ROCP - Rate of change Percentage: (price-prevPrice)/prevPrice
:param candles: np.ndarray
:param period: int - default=10
:param source_type: str - default: "close"
:param sequential: bool - default=False
:return: float | np.ndarray
"""
if not sequential and len(candles) > 240:
candles = candles[-240:]
source = get_candle_source(candles, source_type=source_type)
res = talib.ROCP(source, timeperiod=period)
if sequential:
return res
else:
return None if np.isnan(res[-1]) else res[-1]
| 26.16129 | 111 | 0.668311 | from typing import Union
import numpy as np
import talib
from jesse.helpers import get_candle_source
def rocp(candles: np.ndarray, period: int = 10, source_type: str = "close", sequential: bool = False) -> Union[
float, np.ndarray]:
if not sequential and len(candles) > 240:
candles = candles[-240:]
source = get_candle_source(candles, source_type=source_type)
res = talib.ROCP(source, timeperiod=period)
if sequential:
return res
else:
return None if np.isnan(res[-1]) else res[-1]
| true | true |
1c48092d6bf94bc4d8938233cb16fd7a0011d89c | 4,396 | py | Python | tests/test_cli.py | rddunphy/pwg | 47ed13d3a8120e2c21e4ff28af08deeddbbb9d66 | [
"MIT"
] | null | null | null | tests/test_cli.py | rddunphy/pwg | 47ed13d3a8120e2c21e4ff28af08deeddbbb9d66 | [
"MIT"
] | null | null | null | tests/test_cli.py | rddunphy/pwg | 47ed13d3a8120e2c21e4ff28af08deeddbbb9d66 | [
"MIT"
] | null | null | null | from io import StringIO
from unittest import TestCase
from unittest.mock import patch
from generator.cli import create_parser, gen, confirm, munge, reset, add_chars, remove_chars, save, pronounceable, \
phrase
class CLITest(TestCase):
def setUp(self):
self.parser = create_parser()
@patch("builtins.input", side_effect=['n', 'N'])
def test_confirm_no(self, _):
answer = confirm("yes?")
self.assertFalse(answer)
answer = confirm("yes?")
self.assertFalse(answer)
@patch("builtins.input", side_effect=['y', ''])
def test_confirm_yes(self, _):
answer = confirm("yes?")
self.assertTrue(answer)
answer = confirm("yes?")
self.assertTrue(answer)
@patch("sys.stdout", new_callable=StringIO)
def test_gen(self, mock_stdout):
args = self.parser.parse_args(["-p", "xxx"])
gen(args)
result = mock_stdout.getvalue().strip()
self.assertEqual(len(result), 3)
@patch("pyperclip.copy")
@patch("sys.stdout", new_callable=StringIO)
def test_gen_copy(self, mock_stdout, _):
args = self.parser.parse_args(["-c", "-p", "xxx"])
gen(args)
result = mock_stdout.getvalue().strip()
self.assertEqual(result, "Password copied to clipboard.")
@patch("sys.stdout", new_callable=StringIO)
def test_pronounceable(self, mock_stdout):
args = self.parser.parse_args(["pronounceable", "-l", "8"])
pronounceable(args)
result = mock_stdout.getvalue().strip()
self.assertEqual(len(result), 8)
@patch("sys.stdout", new_callable=StringIO)
def test_phrase(self, mock_stdout):
args = self.parser.parse_args(["phrase", "-p", "wwavn"])
phrase(args)
result = mock_stdout.getvalue().strip()
upper_count = sum(1 for c in result if c.isupper())
self.assertEqual(upper_count, 5)
class ParserTest(TestCase):
def setUp(self):
self.parser = create_parser()
def test_no_args(self):
args = self.parser.parse_args([])
self.assertEqual(args.func, gen)
self.assertFalse(args.copy)
self.assertIsNone(args.pattern)
self.assertEqual(args.type, 'default')
self.assertFalse(args.munge)
def test_pattern_args(self):
args = self.parser.parse_args(["-c", "-p", "xxx"])
self.assertEqual(args.func, gen)
self.assertTrue(args.copy)
self.assertEqual(args.pattern, "xxx")
self.assertEqual(args.type, 'default')
self.assertFalse(args.munge)
def test_type_args(self):
args = self.parser.parse_args(["-m", "-t", "pin"])
self.assertEqual(args.func, gen)
self.assertFalse(args.copy)
self.assertIsNone(args.pattern)
self.assertEqual(args.type, 'pin')
self.assertTrue(args.munge)
def test_pronounceable_args(self):
args = self.parser.parse_args(["pronounceable", "-m", "-c", "-l", "15"])
self.assertEqual(args.func, pronounceable)
self.assertTrue(args.copy)
self.assertEqual(args.length, 15)
self.assertTrue(args.munge)
def test_phrase_args(self):
args = self.parser.parse_args(["phrase", "-m", "-c", "-p", "nn"])
self.assertEqual(args.func, phrase)
self.assertTrue(args.copy)
self.assertEqual(args.pattern, "nn")
self.assertTrue(args.munge)
def test_munge(self):
args = self.parser.parse_args(["munge", "mypassword"])
self.assertEqual(args.func, munge)
self.assertEqual(args.string, "mypassword")
def test_reset(self):
args = self.parser.parse_args(["reset"])
self.assertEqual(args.func, reset)
def test_add_chars(self):
args = self.parser.parse_args(["add_chars", "n", "xyz"])
self.assertEqual(args.func, add_chars)
self.assertEqual(args.cls, "n")
self.assertEqual(args.chars, "xyz")
def test_remove_chars(self):
args = self.parser.parse_args(["remove_chars", "n", "xyz"])
self.assertEqual(args.func, remove_chars)
self.assertEqual(args.cls, "n")
self.assertEqual(args.chars, "xyz")
def test_save(self):
args = self.parser.parse_args(["save", "mytype", "xxx"])
self.assertEqual(args.func, save)
self.assertEqual(args.name, "mytype")
self.assertEqual(args.pattern, "xxx")
| 34.34375 | 116 | 0.628071 | from io import StringIO
from unittest import TestCase
from unittest.mock import patch
from generator.cli import create_parser, gen, confirm, munge, reset, add_chars, remove_chars, save, pronounceable, \
phrase
class CLITest(TestCase):
def setUp(self):
self.parser = create_parser()
@patch("builtins.input", side_effect=['n', 'N'])
def test_confirm_no(self, _):
answer = confirm("yes?")
self.assertFalse(answer)
answer = confirm("yes?")
self.assertFalse(answer)
@patch("builtins.input", side_effect=['y', ''])
def test_confirm_yes(self, _):
answer = confirm("yes?")
self.assertTrue(answer)
answer = confirm("yes?")
self.assertTrue(answer)
@patch("sys.stdout", new_callable=StringIO)
def test_gen(self, mock_stdout):
args = self.parser.parse_args(["-p", "xxx"])
gen(args)
result = mock_stdout.getvalue().strip()
self.assertEqual(len(result), 3)
@patch("pyperclip.copy")
@patch("sys.stdout", new_callable=StringIO)
def test_gen_copy(self, mock_stdout, _):
args = self.parser.parse_args(["-c", "-p", "xxx"])
gen(args)
result = mock_stdout.getvalue().strip()
self.assertEqual(result, "Password copied to clipboard.")
@patch("sys.stdout", new_callable=StringIO)
def test_pronounceable(self, mock_stdout):
args = self.parser.parse_args(["pronounceable", "-l", "8"])
pronounceable(args)
result = mock_stdout.getvalue().strip()
self.assertEqual(len(result), 8)
@patch("sys.stdout", new_callable=StringIO)
def test_phrase(self, mock_stdout):
args = self.parser.parse_args(["phrase", "-p", "wwavn"])
phrase(args)
result = mock_stdout.getvalue().strip()
upper_count = sum(1 for c in result if c.isupper())
self.assertEqual(upper_count, 5)
class ParserTest(TestCase):
def setUp(self):
self.parser = create_parser()
def test_no_args(self):
args = self.parser.parse_args([])
self.assertEqual(args.func, gen)
self.assertFalse(args.copy)
self.assertIsNone(args.pattern)
self.assertEqual(args.type, 'default')
self.assertFalse(args.munge)
def test_pattern_args(self):
args = self.parser.parse_args(["-c", "-p", "xxx"])
self.assertEqual(args.func, gen)
self.assertTrue(args.copy)
self.assertEqual(args.pattern, "xxx")
self.assertEqual(args.type, 'default')
self.assertFalse(args.munge)
def test_type_args(self):
args = self.parser.parse_args(["-m", "-t", "pin"])
self.assertEqual(args.func, gen)
self.assertFalse(args.copy)
self.assertIsNone(args.pattern)
self.assertEqual(args.type, 'pin')
self.assertTrue(args.munge)
def test_pronounceable_args(self):
args = self.parser.parse_args(["pronounceable", "-m", "-c", "-l", "15"])
self.assertEqual(args.func, pronounceable)
self.assertTrue(args.copy)
self.assertEqual(args.length, 15)
self.assertTrue(args.munge)
def test_phrase_args(self):
args = self.parser.parse_args(["phrase", "-m", "-c", "-p", "nn"])
self.assertEqual(args.func, phrase)
self.assertTrue(args.copy)
self.assertEqual(args.pattern, "nn")
self.assertTrue(args.munge)
def test_munge(self):
args = self.parser.parse_args(["munge", "mypassword"])
self.assertEqual(args.func, munge)
self.assertEqual(args.string, "mypassword")
def test_reset(self):
args = self.parser.parse_args(["reset"])
self.assertEqual(args.func, reset)
def test_add_chars(self):
args = self.parser.parse_args(["add_chars", "n", "xyz"])
self.assertEqual(args.func, add_chars)
self.assertEqual(args.cls, "n")
self.assertEqual(args.chars, "xyz")
def test_remove_chars(self):
args = self.parser.parse_args(["remove_chars", "n", "xyz"])
self.assertEqual(args.func, remove_chars)
self.assertEqual(args.cls, "n")
self.assertEqual(args.chars, "xyz")
def test_save(self):
args = self.parser.parse_args(["save", "mytype", "xxx"])
self.assertEqual(args.func, save)
self.assertEqual(args.name, "mytype")
self.assertEqual(args.pattern, "xxx")
| true | true |
1c480a65642aabc05cce235a50ba92cd155f5aa5 | 3,905 | py | Python | controllers/ayarlar_controller.py | pyproject23/kackisivar | 752438c51d0d6145ce3a385ca18a471d8b7f3013 | [
"MIT"
] | 5 | 2020-12-13T20:09:34.000Z | 2021-01-05T16:17:01.000Z | controllers/ayarlar_controller.py | pyproject23/kackisivar | 752438c51d0d6145ce3a385ca18a471d8b7f3013 | [
"MIT"
] | null | null | null | controllers/ayarlar_controller.py | pyproject23/kackisivar | 752438c51d0d6145ce3a385ca18a471d8b7f3013 | [
"MIT"
] | 8 | 2020-12-13T19:09:26.000Z | 2020-12-13T21:15:45.000Z | from PyQt5 import QtCore, QtGui, QtWidgets
from PyQt5.QtWidgets import QMessageBox
from views.ayarlar_form import Ui_AyarlarForm
from models.ayarlar import Ayarlar
from models.kullanici import Kullanici
# from datetime import time, datetime
class AyarlarForm(QtWidgets.QWidget, Ui_AyarlarForm):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.kullanici = Kullanici()
self.ayarlar = Ayarlar.ayarlari_getir()
self.setupUi()
def setupUi(self):
super().setupUi(self)
self.pushButton.clicked.connect(self.ayarlari_guncelle)
self.lineEditUsbId.setValidator(QtGui.QIntValidator(0, 1, self))
self.lineEditKisiLimiti.setValidator(QtGui.QIntValidator(0, 1000, self))
self.lineEditSmtpPortNo.setValidator(QtGui.QIntValidator(0, 1000, self))
self.verileri_doldur()
def verileri_doldur(self):
try:
self.ayarlar = Ayarlar.ayarlari_getir()
self.lineEditOkulAdi.setText(self.ayarlar[3])
self.comboBoxGorevli.clear()
kullanicilar = self.kullanici.verileri_getir()
self.comboBoxGorevli.addItem(self.ayarlar[1], self.ayarlar[4])
for k in kullanicilar:
self.comboBoxGorevli.addItem(k[3], k[0])
saat = str.split(self.ayarlar[5], ":")
saat = [int(i) for i in saat]
self.timeEditMailSaat.setTime(QtCore.QTime(*saat))
self.lineEditUsbId.setText(str(self.ayarlar[6]))
self.lineEditDemoVideo.setText(self.ayarlar[7])
self.lineEditKisiLimiti.setText(str(self.ayarlar[8]))
self.lineEditSmtpAdres.setText(self.ayarlar[9])
self.lineEditSmtpKulAdi.setText(self.ayarlar[10])
self.lineEditSmtpParola.setText(self.ayarlar[11])
self.lineEditSmtpPortNo.setText(str(self.ayarlar[12]))
self.checkBoxTLS.setChecked(self.ayarlar[13])
except Exception as e:
self.Mesaj(baslik="Hata", mesaj="Hata:" + str(e), ikon="hata")
def ayarlari_guncelle(self):
try:
okul = str.strip(self.lineEditOkulAdi.text())
gorevli_id = self.comboBoxGorevli.itemData(self.comboBoxGorevli.currentIndex())
mail_saati = self.timeEditMailSaat.text()
usb_id = str.strip(self.lineEditUsbId.text())
demo_video = str.strip(self.lineEditDemoVideo.text())
kisi_siniri = str.strip(self.lineEditKisiLimiti.text())
smtp_server_adres = str.strip(self.lineEditSmtpAdres.text())
smtp_kullanici_adi = str.strip(self.lineEditSmtpKulAdi.text())
smtp_kullanici_parola = str.strip(self.lineEditSmtpParola.text())
smtp_port_numarasi = str.strip(self.lineEditSmtpPortNo.text())
if self.checkBoxTLS.isChecked():
smtp_tls = 1
else:
smtp_tls = 0
Ayarlar.kaydet(id=1, okul_adi=okul, gorevli_id=gorevli_id, mail_gonderme_saati=mail_saati, usb_id=usb_id, demo_video=demo_video, kisi_siniri=kisi_siniri, smtp_server_adres=smtp_server_adres, smtp_kullanici_adi=smtp_kullanici_adi, smtp_kullanici_parola=smtp_kullanici_parola, smtp_port_numarasi=smtp_port_numarasi, smtp_tls=smtp_tls)
self.verileri_doldur()
# print(smtp_tls)
# print("güncelle")
except Exception as e:
self.Mesaj("Hata", "Kayıt işlemi gerçekleştirilemedi", "hata")
def Mesaj(self, baslik="", mesaj="", ikon="bilgi"):
msg1 = QMessageBox()
if (ikon == "bilgi"):
msg1.setIcon(QMessageBox.Information)
elif(ikon == "uyari"):
msg1.setIcon(QMessageBox.Warning)
else:
msg1.setIcon(QMessageBox.Critical)
msg1.setStyleSheet("background:#28595e;")
msg1.setWindowTitle(baslik)
msg1.setText(mesaj)
msg1.exec_()
| 47.048193 | 344 | 0.656338 | from PyQt5 import QtCore, QtGui, QtWidgets
from PyQt5.QtWidgets import QMessageBox
from views.ayarlar_form import Ui_AyarlarForm
from models.ayarlar import Ayarlar
from models.kullanici import Kullanici
class AyarlarForm(QtWidgets.QWidget, Ui_AyarlarForm):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.kullanici = Kullanici()
self.ayarlar = Ayarlar.ayarlari_getir()
self.setupUi()
def setupUi(self):
super().setupUi(self)
self.pushButton.clicked.connect(self.ayarlari_guncelle)
self.lineEditUsbId.setValidator(QtGui.QIntValidator(0, 1, self))
self.lineEditKisiLimiti.setValidator(QtGui.QIntValidator(0, 1000, self))
self.lineEditSmtpPortNo.setValidator(QtGui.QIntValidator(0, 1000, self))
self.verileri_doldur()
def verileri_doldur(self):
try:
self.ayarlar = Ayarlar.ayarlari_getir()
self.lineEditOkulAdi.setText(self.ayarlar[3])
self.comboBoxGorevli.clear()
kullanicilar = self.kullanici.verileri_getir()
self.comboBoxGorevli.addItem(self.ayarlar[1], self.ayarlar[4])
for k in kullanicilar:
self.comboBoxGorevli.addItem(k[3], k[0])
saat = str.split(self.ayarlar[5], ":")
saat = [int(i) for i in saat]
self.timeEditMailSaat.setTime(QtCore.QTime(*saat))
self.lineEditUsbId.setText(str(self.ayarlar[6]))
self.lineEditDemoVideo.setText(self.ayarlar[7])
self.lineEditKisiLimiti.setText(str(self.ayarlar[8]))
self.lineEditSmtpAdres.setText(self.ayarlar[9])
self.lineEditSmtpKulAdi.setText(self.ayarlar[10])
self.lineEditSmtpParola.setText(self.ayarlar[11])
self.lineEditSmtpPortNo.setText(str(self.ayarlar[12]))
self.checkBoxTLS.setChecked(self.ayarlar[13])
except Exception as e:
self.Mesaj(baslik="Hata", mesaj="Hata:" + str(e), ikon="hata")
def ayarlari_guncelle(self):
try:
okul = str.strip(self.lineEditOkulAdi.text())
gorevli_id = self.comboBoxGorevli.itemData(self.comboBoxGorevli.currentIndex())
mail_saati = self.timeEditMailSaat.text()
usb_id = str.strip(self.lineEditUsbId.text())
demo_video = str.strip(self.lineEditDemoVideo.text())
kisi_siniri = str.strip(self.lineEditKisiLimiti.text())
smtp_server_adres = str.strip(self.lineEditSmtpAdres.text())
smtp_kullanici_adi = str.strip(self.lineEditSmtpKulAdi.text())
smtp_kullanici_parola = str.strip(self.lineEditSmtpParola.text())
smtp_port_numarasi = str.strip(self.lineEditSmtpPortNo.text())
if self.checkBoxTLS.isChecked():
smtp_tls = 1
else:
smtp_tls = 0
Ayarlar.kaydet(id=1, okul_adi=okul, gorevli_id=gorevli_id, mail_gonderme_saati=mail_saati, usb_id=usb_id, demo_video=demo_video, kisi_siniri=kisi_siniri, smtp_server_adres=smtp_server_adres, smtp_kullanici_adi=smtp_kullanici_adi, smtp_kullanici_parola=smtp_kullanici_parola, smtp_port_numarasi=smtp_port_numarasi, smtp_tls=smtp_tls)
self.verileri_doldur()
except Exception as e:
self.Mesaj("Hata", "Kayıt işlemi gerçekleştirilemedi", "hata")
def Mesaj(self, baslik="", mesaj="", ikon="bilgi"):
msg1 = QMessageBox()
if (ikon == "bilgi"):
msg1.setIcon(QMessageBox.Information)
elif(ikon == "uyari"):
msg1.setIcon(QMessageBox.Warning)
else:
msg1.setIcon(QMessageBox.Critical)
msg1.setStyleSheet("background:#28595e;")
msg1.setWindowTitle(baslik)
msg1.setText(mesaj)
msg1.exec_()
| true | true |
1c480b17ae493ad4fe4f5c5c467de99cb0d9fc59 | 1,192 | py | Python | pyvisdk/do/updated_agent_being_restarted_event.py | Infinidat/pyvisdk | f2f4e5f50da16f659ccc1d84b6a00f397fa997f8 | [
"MIT"
] | null | null | null | pyvisdk/do/updated_agent_being_restarted_event.py | Infinidat/pyvisdk | f2f4e5f50da16f659ccc1d84b6a00f397fa997f8 | [
"MIT"
] | null | null | null | pyvisdk/do/updated_agent_being_restarted_event.py | Infinidat/pyvisdk | f2f4e5f50da16f659ccc1d84b6a00f397fa997f8 | [
"MIT"
] | null | null | null |
import logging
from pyvisdk.exceptions import InvalidArgumentError
########################################
# Automatically generated, do not edit.
########################################
log = logging.getLogger(__name__)
def UpdatedAgentBeingRestartedEvent(vim, *args, **kwargs):
'''This event records that the agent has been patched and will be restarted.'''
obj = vim.client.factory.create('{urn:vim25}UpdatedAgentBeingRestartedEvent')
# do some validation checking...
if (len(args) + len(kwargs)) < 4:
raise IndexError('Expected at least 5 arguments got: %d' % len(args))
required = [ 'chainId', 'createdTime', 'key', 'userName' ]
optional = [ 'changeTag', 'computeResource', 'datacenter', 'ds', 'dvs',
'fullFormattedMessage', 'host', 'net', 'vm', 'dynamicProperty', 'dynamicType' ]
for name, arg in zip(required+optional, args):
setattr(obj, name, arg)
for name, value in kwargs.items():
if name in required + optional:
setattr(obj, name, value)
else:
raise InvalidArgumentError("Invalid argument: %s. Expected one of %s" % (name, ", ".join(required + optional)))
return obj
| 35.058824 | 124 | 0.615772 |
import logging
from pyvisdk.exceptions import InvalidArgumentError
| true | true |
1c480b184cf95bb9b7a3ca86aa9f6eb52d29969b | 2,678 | py | Python | sdk/python/pulumi_azure_native/network/v20180601/get_virtual_network_gateway_advertised_routes.py | sebtelko/pulumi-azure-native | 711ec021b5c73da05611c56c8a35adb0ce3244e4 | [
"Apache-2.0"
] | null | null | null | sdk/python/pulumi_azure_native/network/v20180601/get_virtual_network_gateway_advertised_routes.py | sebtelko/pulumi-azure-native | 711ec021b5c73da05611c56c8a35adb0ce3244e4 | [
"Apache-2.0"
] | null | null | null | sdk/python/pulumi_azure_native/network/v20180601/get_virtual_network_gateway_advertised_routes.py | sebtelko/pulumi-azure-native | 711ec021b5c73da05611c56c8a35adb0ce3244e4 | [
"Apache-2.0"
] | null | null | null | # coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from ... import _utilities
from . import outputs
__all__ = [
'GetVirtualNetworkGatewayAdvertisedRoutesResult',
'AwaitableGetVirtualNetworkGatewayAdvertisedRoutesResult',
'get_virtual_network_gateway_advertised_routes',
]
@pulumi.output_type
class GetVirtualNetworkGatewayAdvertisedRoutesResult:
"""
List of virtual network gateway routes
"""
def __init__(__self__, value=None):
if value and not isinstance(value, list):
raise TypeError("Expected argument 'value' to be a list")
pulumi.set(__self__, "value", value)
@property
@pulumi.getter
def value(self) -> Optional[Sequence['outputs.GatewayRouteResponse']]:
"""
List of gateway routes
"""
return pulumi.get(self, "value")
class AwaitableGetVirtualNetworkGatewayAdvertisedRoutesResult(GetVirtualNetworkGatewayAdvertisedRoutesResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return GetVirtualNetworkGatewayAdvertisedRoutesResult(
value=self.value)
def get_virtual_network_gateway_advertised_routes(peer: Optional[str] = None,
resource_group_name: Optional[str] = None,
virtual_network_gateway_name: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetVirtualNetworkGatewayAdvertisedRoutesResult:
"""
List of virtual network gateway routes
:param str peer: The IP address of the peer
:param str resource_group_name: The name of the resource group.
:param str virtual_network_gateway_name: The name of the virtual network gateway.
"""
__args__ = dict()
__args__['peer'] = peer
__args__['resourceGroupName'] = resource_group_name
__args__['virtualNetworkGatewayName'] = virtual_network_gateway_name
if opts is None:
opts = pulumi.InvokeOptions()
if opts.version is None:
opts.version = _utilities.get_version()
__ret__ = pulumi.runtime.invoke('azure-native:network/v20180601:getVirtualNetworkGatewayAdvertisedRoutes', __args__, opts=opts, typ=GetVirtualNetworkGatewayAdvertisedRoutesResult).value
return AwaitableGetVirtualNetworkGatewayAdvertisedRoutesResult(
value=__ret__.value)
| 38.257143 | 189 | 0.698282 |
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from ... import _utilities
from . import outputs
__all__ = [
'GetVirtualNetworkGatewayAdvertisedRoutesResult',
'AwaitableGetVirtualNetworkGatewayAdvertisedRoutesResult',
'get_virtual_network_gateway_advertised_routes',
]
@pulumi.output_type
class GetVirtualNetworkGatewayAdvertisedRoutesResult:
def __init__(__self__, value=None):
if value and not isinstance(value, list):
raise TypeError("Expected argument 'value' to be a list")
pulumi.set(__self__, "value", value)
@property
@pulumi.getter
def value(self) -> Optional[Sequence['outputs.GatewayRouteResponse']]:
return pulumi.get(self, "value")
class AwaitableGetVirtualNetworkGatewayAdvertisedRoutesResult(GetVirtualNetworkGatewayAdvertisedRoutesResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return GetVirtualNetworkGatewayAdvertisedRoutesResult(
value=self.value)
def get_virtual_network_gateway_advertised_routes(peer: Optional[str] = None,
resource_group_name: Optional[str] = None,
virtual_network_gateway_name: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetVirtualNetworkGatewayAdvertisedRoutesResult:
__args__ = dict()
__args__['peer'] = peer
__args__['resourceGroupName'] = resource_group_name
__args__['virtualNetworkGatewayName'] = virtual_network_gateway_name
if opts is None:
opts = pulumi.InvokeOptions()
if opts.version is None:
opts.version = _utilities.get_version()
__ret__ = pulumi.runtime.invoke('azure-native:network/v20180601:getVirtualNetworkGatewayAdvertisedRoutes', __args__, opts=opts, typ=GetVirtualNetworkGatewayAdvertisedRoutesResult).value
return AwaitableGetVirtualNetworkGatewayAdvertisedRoutesResult(
value=__ret__.value)
| true | true |
1c480b25134b1e54200e0ddb780bd7bb0f122341 | 7,427 | py | Python | tensorflow/contrib/cluster_resolver/python/training/cluster_resolver.py | tianyapiaozi/tensorflow | fb3ce0467766a8e91f1da0ad7ada7c24fde7a73a | [
"Apache-2.0"
] | 71 | 2017-05-25T16:02:15.000Z | 2021-06-09T16:08:08.000Z | tensorflow/contrib/cluster_resolver/python/training/cluster_resolver.py | shrikunjsarda/tensorflow | 7e8927e7af0c51ac20a63bd4eab6ff83df1a39ae | [
"Apache-2.0"
] | 133 | 2017-04-26T16:49:49.000Z | 2019-10-15T11:39:26.000Z | tensorflow/contrib/cluster_resolver/python/training/cluster_resolver.py | shrikunjsarda/tensorflow | 7e8927e7af0c51ac20a63bd4eab6ff83df1a39ae | [
"Apache-2.0"
] | 31 | 2018-09-11T02:17:17.000Z | 2021-12-15T10:33:35.000Z | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Cluster Resolvers are used for dynamic cluster IP/hostname resolution."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import abc
from tensorflow.python.training.server_lib import ClusterSpec
class ClusterResolver(object):
"""Abstract class for all implementations of ClusterResolvers.
This defines the skeleton for all implementations of ClusterResolvers.
ClusterResolvers are a way for TensorFlow to communicate with various cluster
management systems (e.g. GCE, AWS, etc...).
By letting TensorFlow communicate with these systems, we will be able to
automatically discover and resolve IP addresses for various TensorFlow
workers. This will eventually allow us to automatically recover from
underlying machine failures and scale TensorFlow worker clusters up and down.
"""
@abc.abstractmethod
def cluster_spec(self):
"""Retrieve the current state of the cluster and returns a ClusterSpec.
Returns:
A ClusterSpec representing the state of the cluster at the moment this
function is called.
Implementors of this function must take care in ensuring that the
ClusterSpec returned is up-to-date at the time of calling this function.
This usually means retrieving the information from the underlying cluster
management system every time this function is invoked and reconstructing
a cluster_spec, rather than attempting to cache anything.
"""
raise NotImplementedError(
'cluster_spec is not implemented for {}.'.format(self))
@abc.abstractmethod
def master(self):
"""..."""
raise NotImplementedError('master is not implemented for {}.'.format(self))
class SimpleClusterResolver(ClusterResolver):
"""Simple implementation of ClusterResolver that accepts a ClusterSpec."""
def __init__(self, cluster_spec, master=''):
"""Creates a SimpleClusterResolver from a ClusterSpec."""
super(SimpleClusterResolver, self).__init__()
if not isinstance(cluster_spec, ClusterSpec):
raise TypeError('cluster_spec must be a ClusterSpec.')
self._cluster_spec = cluster_spec
if not isinstance(master, str):
raise TypeError('master must be a string.')
self._master = master
def cluster_spec(self):
"""Returns the ClusterSpec passed into the constructor."""
return self._cluster_spec
def master(self):
"""Returns the master address to use when creating a session."""
return self._master
class UnionClusterResolver(ClusterResolver):
"""Performs a union on underlying ClusterResolvers.
This class performs a union given two or more existing ClusterResolvers. It
merges the underlying ClusterResolvers, and returns one unified ClusterSpec
when cluster_spec is called. The details of the merge function is
documented in the cluster_spec function.
"""
def __init__(self, *args):
"""Initializes a UnionClusterResolver with other ClusterResolvers.
Args:
*args: `ClusterResolver` objects to be unionized.
Raises:
TypeError: If any argument is not a subclass of `ClusterResolvers`.
ValueError: If there are no arguments passed.
"""
super(UnionClusterResolver, self).__init__()
if not args:
raise ValueError('At least one ClusterResolver is required.')
for cluster_resolver in args:
if not isinstance(cluster_resolver, ClusterResolver):
raise TypeError('All arguments must be a sub-class of '
'`ClusterResolver.`')
self._cluster_resolvers = args
def cluster_spec(self):
"""Returns a union of all the ClusterSpecs from the ClusterResolvers.
Returns:
A ClusterSpec containing host information merged from all the underlying
ClusterResolvers.
Raises:
KeyError: If there are conflicting keys detected when merging two or
more dictionaries, this exception is raised.
Note: If there are multiple ClusterResolvers exposing ClusterSpecs with the
same job name, we will merge the list/dict of workers.
If *all* underlying ClusterSpecs expose the set of workers as lists, we will
concatenate the lists of workers, starting with the list of workers from
the first ClusterResolver passed into the constructor.
If *any* of the ClusterSpecs expose the set of workers as a dict, we will
treat all the sets of workers as dicts (even if they are returned as lists)
and will only merge them into a dict if there is no conflicting keys. If
there is a conflicting key, we will raise a `KeyError`.
"""
merged_cluster = {}
# We figure out whether it is all lists for a particular job, or whether
# there are dicts inside.
for cluster_resolver in self._cluster_resolvers:
cluster_spec = cluster_resolver.cluster_spec()
cluster_dict = cluster_spec.as_dict()
for job_name, tasks in cluster_dict.items():
if job_name in merged_cluster:
# If we see a dict, then we write a dict out regardless.
if isinstance(tasks, dict):
merged_cluster[job_name] = {}
else:
# We take whichever type is present.
if isinstance(tasks, list):
merged_cluster[job_name] = []
else:
merged_cluster[job_name] = {}
# We then do the merge as appropriate in merged_cluster[job].
for cluster_resolver in self._cluster_resolvers:
cluster_spec = cluster_resolver.cluster_spec()
cluster_dict = cluster_spec.as_dict()
for job_name, tasks in cluster_dict.items():
if isinstance(merged_cluster[job_name], list):
# We all have lists, we can just concatenate and be done.
merged_cluster[job_name].extend(tasks)
else:
if isinstance(tasks, list):
# We convert to a dictionary if the type is a list.
task_dict = dict(zip(range(0, len(tasks)), tasks))
else:
# We can simply make a copy (for update) and be done.
task_dict = tasks.copy()
# We detect if there are duplicates, and raise an error if so.
task_keys = set(task_dict)
merged_keys = set(merged_cluster[job_name].keys())
intersected_keys = task_keys.intersection(merged_keys)
if intersected_keys:
raise KeyError('Duplicate keys detected when merging two '
'ClusterSpecs: %s' % repr(intersected_keys))
# We do the merge after all the processing.
merged_cluster[job_name].update(task_dict)
return ClusterSpec(merged_cluster)
def master(self):
"""master returns the master address from the first cluster resolver."""
return self._cluster_resolvers[0].master()
| 38.481865 | 80 | 0.706476 |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import abc
from tensorflow.python.training.server_lib import ClusterSpec
class ClusterResolver(object):
@abc.abstractmethod
def cluster_spec(self):
raise NotImplementedError(
'cluster_spec is not implemented for {}.'.format(self))
@abc.abstractmethod
def master(self):
raise NotImplementedError('master is not implemented for {}.'.format(self))
class SimpleClusterResolver(ClusterResolver):
def __init__(self, cluster_spec, master=''):
super(SimpleClusterResolver, self).__init__()
if not isinstance(cluster_spec, ClusterSpec):
raise TypeError('cluster_spec must be a ClusterSpec.')
self._cluster_spec = cluster_spec
if not isinstance(master, str):
raise TypeError('master must be a string.')
self._master = master
def cluster_spec(self):
return self._cluster_spec
def master(self):
return self._master
class UnionClusterResolver(ClusterResolver):
def __init__(self, *args):
super(UnionClusterResolver, self).__init__()
if not args:
raise ValueError('At least one ClusterResolver is required.')
for cluster_resolver in args:
if not isinstance(cluster_resolver, ClusterResolver):
raise TypeError('All arguments must be a sub-class of '
'`ClusterResolver.`')
self._cluster_resolvers = args
def cluster_spec(self):
merged_cluster = {}
for cluster_resolver in self._cluster_resolvers:
cluster_spec = cluster_resolver.cluster_spec()
cluster_dict = cluster_spec.as_dict()
for job_name, tasks in cluster_dict.items():
if job_name in merged_cluster:
if isinstance(tasks, dict):
merged_cluster[job_name] = {}
else:
if isinstance(tasks, list):
merged_cluster[job_name] = []
else:
merged_cluster[job_name] = {}
for cluster_resolver in self._cluster_resolvers:
cluster_spec = cluster_resolver.cluster_spec()
cluster_dict = cluster_spec.as_dict()
for job_name, tasks in cluster_dict.items():
if isinstance(merged_cluster[job_name], list):
merged_cluster[job_name].extend(tasks)
else:
if isinstance(tasks, list):
task_dict = dict(zip(range(0, len(tasks)), tasks))
else:
task_dict = tasks.copy()
task_keys = set(task_dict)
merged_keys = set(merged_cluster[job_name].keys())
intersected_keys = task_keys.intersection(merged_keys)
if intersected_keys:
raise KeyError('Duplicate keys detected when merging two '
'ClusterSpecs: %s' % repr(intersected_keys))
merged_cluster[job_name].update(task_dict)
return ClusterSpec(merged_cluster)
def master(self):
return self._cluster_resolvers[0].master()
| true | true |
1c480b50f62091e40b64bb5483b4200ef19d237f | 24 | py | Python | api/__init__.py | hugofer93/aimo-api | fe3cc3f169f7a46d4ba68625a7936f37f55b1aad | [
"MIT"
] | null | null | null | api/__init__.py | hugofer93/aimo-api | fe3cc3f169f7a46d4ba68625a7936f37f55b1aad | [
"MIT"
] | 10 | 2020-09-07T07:23:08.000Z | 2022-03-02T05:32:10.000Z | api/__init__.py | hugofer93/aimo-api | fe3cc3f169f7a46d4ba68625a7936f37f55b1aad | [
"MIT"
] | null | null | null | from api.app import app
| 12 | 23 | 0.791667 | from api.app import app
| true | true |
1c480bf2b8efe1f035b8d1c81bfb21a1df0ca5f0 | 2,546 | py | Python | tests/test_factory.py | unt-libraries/aubrey-transcription | 0383d67a0ed3f3ddaa306edfb09b2da1364e4178 | [
"BSD-3-Clause"
] | null | null | null | tests/test_factory.py | unt-libraries/aubrey-transcription | 0383d67a0ed3f3ddaa306edfb09b2da1364e4178 | [
"BSD-3-Clause"
] | 8 | 2018-08-22T19:14:38.000Z | 2019-11-22T17:12:56.000Z | tests/test_factory.py | unt-libraries/aubrey-transcription | 0383d67a0ed3f3ddaa306edfb09b2da1364e4178 | [
"BSD-3-Clause"
] | null | null | null | import mock
import pytest
from aubrey_transcription import create_app, default_settings
@mock.patch('aubrey_transcription.os.makedirs') # We don't want 'instance' dirs everywhere.
class TestCreateApp:
@pytest.fixture(scope="session")
def settings_file(self, tmpdir_factory):
settings = (
"PAIRTREE_BASE = '/different/path'"
"\nTRANSCRIPTION_URL = 'http://someothersite.com'"
"\nEXTENSIONS_META = {'txt': {'use': 'text', 'mimetype': 'text'}}"
"\nFILENAME_PATTERN = 'some regex'"
)
fn = tmpdir_factory.mktemp('data').join('settings.py')
fn.write(settings)
return fn
def test_makes_instance_dir(self, mock_makedirs):
app = create_app()
mock_makedirs.assert_called_once_with(app.instance_path)
def test_can_override_instance_dir(self, mock_makedirs):
create_app(instance_path='/new/path')
mock_makedirs.assert_called_once_with('/new/path')
def test_default_settings(self, mock_makedirs):
# Move instance path in case a settings file is already in the standard location.
app = create_app(instance_path='/nothing/here')
assert app.config['PAIRTREE_BASE'] == default_settings.PAIRTREE_BASE
assert app.config['TRANSCRIPTION_URL'] == default_settings.TRANSCRIPTION_URL
assert app.config['EXTENSIONS_META'] == default_settings.EXTENSIONS_META
assert app.config['FILENAME_PATTERN'] == default_settings.FILENAME_PATTERN
def test_instance_file_overrides_default_settings(self, mock_makedirs, settings_file):
app = create_app(instance_path=settings_file.dirname)
assert app.config['PAIRTREE_BASE'] == '/different/path'
assert app.config['TRANSCRIPTION_URL'] == 'http://someothersite.com'
assert app.config['EXTENSIONS_META'] == {'txt': {'use': 'text', 'mimetype': 'text'}}
assert app.config['FILENAME_PATTERN'] == 'some regex'
def test_settings_passed_in_overrides_instance_file(self, mock_makedirs, settings_file):
app = create_app(
test_config={'PAIRTREE_BASE': '/right/here', 'TRANSCRIPTION_URL': 'something.com',
'EXTENSIONS_META': {}, 'FILENAME_PATTERN': 'a pattern'},
instance_path=settings_file.dirname
)
assert app.config['PAIRTREE_BASE'] == '/right/here'
assert app.config['TRANSCRIPTION_URL'] == 'something.com'
assert app.config['EXTENSIONS_META'] == {}
assert app.config['FILENAME_PATTERN'] == 'a pattern'
| 47.148148 | 94 | 0.67989 | import mock
import pytest
from aubrey_transcription import create_app, default_settings
@mock.patch('aubrey_transcription.os.makedirs')
class TestCreateApp:
@pytest.fixture(scope="session")
def settings_file(self, tmpdir_factory):
settings = (
"PAIRTREE_BASE = '/different/path'"
"\nTRANSCRIPTION_URL = 'http://someothersite.com'"
"\nEXTENSIONS_META = {'txt': {'use': 'text', 'mimetype': 'text'}}"
"\nFILENAME_PATTERN = 'some regex'"
)
fn = tmpdir_factory.mktemp('data').join('settings.py')
fn.write(settings)
return fn
def test_makes_instance_dir(self, mock_makedirs):
app = create_app()
mock_makedirs.assert_called_once_with(app.instance_path)
def test_can_override_instance_dir(self, mock_makedirs):
create_app(instance_path='/new/path')
mock_makedirs.assert_called_once_with('/new/path')
def test_default_settings(self, mock_makedirs):
# Move instance path in case a settings file is already in the standard location.
app = create_app(instance_path='/nothing/here')
assert app.config['PAIRTREE_BASE'] == default_settings.PAIRTREE_BASE
assert app.config['TRANSCRIPTION_URL'] == default_settings.TRANSCRIPTION_URL
assert app.config['EXTENSIONS_META'] == default_settings.EXTENSIONS_META
assert app.config['FILENAME_PATTERN'] == default_settings.FILENAME_PATTERN
def test_instance_file_overrides_default_settings(self, mock_makedirs, settings_file):
app = create_app(instance_path=settings_file.dirname)
assert app.config['PAIRTREE_BASE'] == '/different/path'
assert app.config['TRANSCRIPTION_URL'] == 'http://someothersite.com'
assert app.config['EXTENSIONS_META'] == {'txt': {'use': 'text', 'mimetype': 'text'}}
assert app.config['FILENAME_PATTERN'] == 'some regex'
def test_settings_passed_in_overrides_instance_file(self, mock_makedirs, settings_file):
app = create_app(
test_config={'PAIRTREE_BASE': '/right/here', 'TRANSCRIPTION_URL': 'something.com',
'EXTENSIONS_META': {}, 'FILENAME_PATTERN': 'a pattern'},
instance_path=settings_file.dirname
)
assert app.config['PAIRTREE_BASE'] == '/right/here'
assert app.config['TRANSCRIPTION_URL'] == 'something.com'
assert app.config['EXTENSIONS_META'] == {}
assert app.config['FILENAME_PATTERN'] == 'a pattern'
| true | true |
1c480c30b16efe1ba1ddd3320ec4bbc6d27e8a0b | 644 | py | Python | problems/cop/academic/CoinsGrid.py | xcsp3team/pycsp3 | a11bc370e34cd3fe37faeae9a5df935fcbd7770d | [
"MIT"
] | 28 | 2019-12-14T09:25:52.000Z | 2022-03-24T08:15:13.000Z | problems/cop/academic/CoinsGrid.py | xcsp3team/pycsp3 | a11bc370e34cd3fe37faeae9a5df935fcbd7770d | [
"MIT"
] | 7 | 2020-04-15T11:02:07.000Z | 2022-01-20T12:48:54.000Z | problems/cop/academic/CoinsGrid.py | xcsp3team/pycsp3 | a11bc370e34cd3fe37faeae9a5df935fcbd7770d | [
"MIT"
] | 3 | 2020-04-15T08:23:45.000Z | 2021-12-07T14:02:28.000Z | """
See 'Constraint Solving and Planning with Picat' (page 43)
From Tony Hurlimann, A coin puzzle, SVOR-contest 2007
Some data: (8,4) (8,5) (9,4) (10,4) (31,14)
Examples of Execution:
python3 CoinsGrid.py
python3 CoinsGrid.py -data=[10,4]
"""
from pycsp3 import *
n, c = data or (8, 4)
# x[i][j] is 1 if a coin is placed at row i and column j
x = VarArray(size=[n, n], dom={0, 1})
satisfy(
[Sum(x[i]) == c for i in range(n)],
[Sum(x[:, j]) == c for j in range(n)]
)
minimize(
Sum(x[i][j] * abs(i - j) ** 2 for i in range(n) for j in range(n))
)
""" Comments
1) there are other variants in Hurlimann's paper (TODO)
"""
| 19.515152 | 70 | 0.610248 |
from pycsp3 import *
n, c = data or (8, 4)
x = VarArray(size=[n, n], dom={0, 1})
satisfy(
[Sum(x[i]) == c for i in range(n)],
[Sum(x[:, j]) == c for j in range(n)]
)
minimize(
Sum(x[i][j] * abs(i - j) ** 2 for i in range(n) for j in range(n))
)
| true | true |
1c480c456a6f3aede1614e5edca57e8015b7bcc5 | 749 | py | Python | test/ppm_to_challenge.py | mrwonko/pwb2015 | 9e2f82086a72c12150dcd2bd44b52dc7ebb429fc | [
"MIT"
] | null | null | null | test/ppm_to_challenge.py | mrwonko/pwb2015 | 9e2f82086a72c12150dcd2bd44b52dc7ebb429fc | [
"MIT"
] | null | null | null | test/ppm_to_challenge.py | mrwonko/pwb2015 | 9e2f82086a72c12150dcd2bd44b52dc7ebb429fc | [
"MIT"
] | null | null | null | import sys
with open( sys.argv[ 1 ] ) as f:
magic = f.readline()
assert( magic == "P3\n" )
comment = f.readline()
assert( comment.startswith( "#" ) )
dimensions = f.readline()
w, h = map( int, dimensions.split() )
_ = f.readline() # max
colors = {}
grid = []
for y in range( h ):
line = []
for x in range( w ):
color = tuple( [ int( f.readline() ) for _ in range( 3 ) ] )
if color not in colors:
colors[ color ] = len( colors ) + 1
line.append( colors[ color ] )
grid = [line] + grid
with open( sys.argv[ 1 ] + ".txt", "w" ) as o:
o.write( str( grid ).replace( " ", "" ) )
o.write( "\n" )
| 31.208333 | 73 | 0.459279 | import sys
with open( sys.argv[ 1 ] ) as f:
magic = f.readline()
assert( magic == "P3\n" )
comment = f.readline()
assert( comment.startswith( "#" ) )
dimensions = f.readline()
w, h = map( int, dimensions.split() )
_ = f.readline()
colors = {}
grid = []
for y in range( h ):
line = []
for x in range( w ):
color = tuple( [ int( f.readline() ) for _ in range( 3 ) ] )
if color not in colors:
colors[ color ] = len( colors ) + 1
line.append( colors[ color ] )
grid = [line] + grid
with open( sys.argv[ 1 ] + ".txt", "w" ) as o:
o.write( str( grid ).replace( " ", "" ) )
o.write( "\n" )
| true | true |
1c480d61ea3390831e6d2b87dc17df6cf61992b3 | 3,911 | py | Python | tapia/exercise5/ex5.py | appfs/appfs | 8cbbfa0e40e4d4a75a498ce8dd894bb2fbc3a9e3 | [
"MIT"
] | 11 | 2017-04-21T11:39:55.000Z | 2022-02-11T20:25:18.000Z | tapia/exercise5/ex5.py | appfs/appfs | 8cbbfa0e40e4d4a75a498ce8dd894bb2fbc3a9e3 | [
"MIT"
] | 69 | 2017-04-26T09:30:38.000Z | 2017-08-01T11:31:21.000Z | tapia/exercise5/ex5.py | appfs/appfs | 8cbbfa0e40e4d4a75a498ce8dd894bb2fbc3a9e3 | [
"MIT"
] | 53 | 2017-04-20T16:16:11.000Z | 2017-07-19T12:53:01.000Z | import sys
import networkx as nx
def is_valid(line):
"""
Checks if the content of an edge has a valid format.
<vertex vertex weight>
:param line: A line of the input text.
:type: String
:return: A list if edge is valid, None otherwise.
"""
edge = line.rsplit()
wrong_args_number = len(edge) != 3
is_comment = line.startswith("#")
if wrong_args_number or not edge or is_comment:
return None
try:
int(edge[0])
int(edge[1])
int(edge[2])
except ValueError:
return None
return edge
def generate_edges(content):
"""
Yields an edge <vertex vertex weight> to the graph if it's valid.
:param content: The raw content of the input file.
"""
for edge in content:
valid_edge = is_valid(edge)
if valid_edge:
yield valid_edge
def load_graph_data(content):
"""
Loads the content of the input file as a graph and returns
a graph structure and its first node.
:param content: The raw content of the input file.
:return: An undirected Graph and a Node.
"""
G = nx.Graph()
edges = generate_edges(content)
for edge in edges:
if edge:
G.add_edge(edge[0], edge[1], weight=edge[2])
return G
def dijkstra(graph):
"""
Shortest path algorithm. Returns a dictionary with the shortest distance from each
node in the graph to the initial node '1'.
:param graph: A Graph structure
:return: Dictionary with <node: distance> where the distance is the shortest path to the initial node.
"""
nodes = graph.nodes()
initial_node_index = nodes.index('1')
initial_node = nodes[initial_node_index]
neighbors_initial_node = graph.neighbors(initial_node)
distances = dict()
for node in nodes:
if node in neighbors_initial_node:
distances[node] = int(graph[initial_node][node]['weight'])
else:
distances[node] = 2000000000
distances[initial_node] = 0
visited = [initial_node]
while sorted(nodes) != sorted(visited):
current_node = min_vertex(distances, visited)
visited.append(current_node)
for neighbor in graph.neighbors(current_node):
distance_to_neighbor = int(graph[current_node][neighbor]['weight'])
temp_dist = distances[current_node] + distance_to_neighbor
if temp_dist < distances[neighbor]:
distances[neighbor] = temp_dist
return distances
def min_vertex(distances, visited):
'''
Returns the vector with the minimum distance which is not yet visited.
:param distances: A dictionary with <node: distance> structure
:param visited: A list with the visited nodes
:return: The non-visited vertex with the minimum distance
'''
min_distance = 2000000000
min_vertex = None
for node, neighbor_distance in distances.items():
if node in visited:
continue
if neighbor_distance < min_distance:
min_distance = neighbor_distance
min_vertex = node
return min_vertex
if __name__ == '__main__':
filename = sys.argv[-1]
content = open(filename)
graph = load_graph_data(content)
distances = dijkstra(graph)
result_dist = 2000000000
result_vertex = None
for k, v in distances.items():
if k == '1':
continue
if v < result_dist:
result_dist = v
result_vertex = k
elif v == result_dist:
key_list = list(distances.keys())
index_result = key_list.index(result_vertex)
index_current = key_list.index(k)
if index_result < index_current:
result_vertex = k
print('RESULT VERTEX {result_vertex}'.format(result_vertex=result_vertex))
print('RESULT DIST {result_dist}'.format(result_dist=result_dist))
| 26.605442 | 106 | 0.639223 | import sys
import networkx as nx
def is_valid(line):
edge = line.rsplit()
wrong_args_number = len(edge) != 3
is_comment = line.startswith("#")
if wrong_args_number or not edge or is_comment:
return None
try:
int(edge[0])
int(edge[1])
int(edge[2])
except ValueError:
return None
return edge
def generate_edges(content):
for edge in content:
valid_edge = is_valid(edge)
if valid_edge:
yield valid_edge
def load_graph_data(content):
G = nx.Graph()
edges = generate_edges(content)
for edge in edges:
if edge:
G.add_edge(edge[0], edge[1], weight=edge[2])
return G
def dijkstra(graph):
nodes = graph.nodes()
initial_node_index = nodes.index('1')
initial_node = nodes[initial_node_index]
neighbors_initial_node = graph.neighbors(initial_node)
distances = dict()
for node in nodes:
if node in neighbors_initial_node:
distances[node] = int(graph[initial_node][node]['weight'])
else:
distances[node] = 2000000000
distances[initial_node] = 0
visited = [initial_node]
while sorted(nodes) != sorted(visited):
current_node = min_vertex(distances, visited)
visited.append(current_node)
for neighbor in graph.neighbors(current_node):
distance_to_neighbor = int(graph[current_node][neighbor]['weight'])
temp_dist = distances[current_node] + distance_to_neighbor
if temp_dist < distances[neighbor]:
distances[neighbor] = temp_dist
return distances
def min_vertex(distances, visited):
min_distance = 2000000000
min_vertex = None
for node, neighbor_distance in distances.items():
if node in visited:
continue
if neighbor_distance < min_distance:
min_distance = neighbor_distance
min_vertex = node
return min_vertex
if __name__ == '__main__':
filename = sys.argv[-1]
content = open(filename)
graph = load_graph_data(content)
distances = dijkstra(graph)
result_dist = 2000000000
result_vertex = None
for k, v in distances.items():
if k == '1':
continue
if v < result_dist:
result_dist = v
result_vertex = k
elif v == result_dist:
key_list = list(distances.keys())
index_result = key_list.index(result_vertex)
index_current = key_list.index(k)
if index_result < index_current:
result_vertex = k
print('RESULT VERTEX {result_vertex}'.format(result_vertex=result_vertex))
print('RESULT DIST {result_dist}'.format(result_dist=result_dist))
| true | true |
1c480e0170646c335358d40a83a3d3909901aa65 | 18,084 | py | Python | sphinx/util/logging.py | jessetan/sphinx | 4cae0ecf8641551028b1a54168e49d52cb6bc7f3 | [
"BSD-2-Clause"
] | null | null | null | sphinx/util/logging.py | jessetan/sphinx | 4cae0ecf8641551028b1a54168e49d52cb6bc7f3 | [
"BSD-2-Clause"
] | null | null | null | sphinx/util/logging.py | jessetan/sphinx | 4cae0ecf8641551028b1a54168e49d52cb6bc7f3 | [
"BSD-2-Clause"
] | null | null | null | """
sphinx.util.logging
~~~~~~~~~~~~~~~~~~~
Logging utility functions for Sphinx.
:copyright: Copyright 2007-2021 by the Sphinx team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
import logging
import logging.handlers
from collections import defaultdict
from contextlib import contextmanager
from typing import IO, TYPE_CHECKING, Any, Dict, Generator, List, Tuple, Type, Union
from docutils import nodes
from docutils.nodes import Node
from docutils.utils import get_source_line
from sphinx.errors import SphinxWarning
from sphinx.util.console import colorize
if TYPE_CHECKING:
from sphinx.application import Sphinx
NAMESPACE = 'sphinx'
VERBOSE = 15
LEVEL_NAMES = defaultdict(lambda: logging.WARNING) # type: Dict[str, int]
LEVEL_NAMES.update({
'CRITICAL': logging.CRITICAL,
'SEVERE': logging.CRITICAL,
'ERROR': logging.ERROR,
'WARNING': logging.WARNING,
'INFO': logging.INFO,
'VERBOSE': VERBOSE,
'DEBUG': logging.DEBUG,
})
VERBOSITY_MAP = defaultdict(lambda: 0) # type: Dict[int, int]
VERBOSITY_MAP.update({
0: logging.INFO,
1: VERBOSE,
2: logging.DEBUG,
})
COLOR_MAP = defaultdict(lambda: 'blue',
{
logging.ERROR: 'darkred',
logging.WARNING: 'red',
logging.DEBUG: 'darkgray'
})
def getLogger(name: str) -> "SphinxLoggerAdapter":
"""Get logger wrapped by :class:`sphinx.util.logging.SphinxLoggerAdapter`.
Sphinx logger always uses ``sphinx.*`` namespace to be independent from
settings of root logger. It ensures logging is consistent even if a
third-party extension or imported application resets logger settings.
Example usage::
>>> from sphinx.util import logging
>>> logger = logging.getLogger(__name__)
>>> logger.info('Hello, this is an extension!')
Hello, this is an extension!
"""
# add sphinx prefix to name forcely
logger = logging.getLogger(NAMESPACE + '.' + name)
# Forcely enable logger
logger.disabled = False
# wrap logger by SphinxLoggerAdapter
return SphinxLoggerAdapter(logger, {})
def convert_serializable(records: List[logging.LogRecord]) -> None:
"""Convert LogRecord serializable."""
for r in records:
# extract arguments to a message and clear them
r.msg = r.getMessage()
r.args = ()
location = getattr(r, 'location', None)
if isinstance(location, nodes.Node):
r.location = get_node_location(location) # type: ignore
class SphinxLogRecord(logging.LogRecord):
"""Log record class supporting location"""
prefix = ''
location = None # type: Any
def getMessage(self) -> str:
message = super().getMessage()
location = getattr(self, 'location', None)
if location:
message = '%s: %s%s' % (location, self.prefix, message)
elif self.prefix not in message:
message = self.prefix + message
return message
class SphinxInfoLogRecord(SphinxLogRecord):
"""Info log record class supporting location"""
prefix = '' # do not show any prefix for INFO messages
class SphinxWarningLogRecord(SphinxLogRecord):
"""Warning log record class supporting location"""
prefix = 'WARNING: '
class SphinxLoggerAdapter(logging.LoggerAdapter):
"""LoggerAdapter allowing ``type`` and ``subtype`` keywords."""
KEYWORDS = ['type', 'subtype', 'location', 'nonl', 'color', 'once']
def log(self, level: Union[int, str], msg: str, *args: Any, **kwargs: Any) -> None:
if isinstance(level, int):
super().log(level, msg, *args, **kwargs)
else:
levelno = LEVEL_NAMES[level]
super().log(levelno, msg, *args, **kwargs)
def verbose(self, msg: str, *args: Any, **kwargs: Any) -> None:
self.log(VERBOSE, msg, *args, **kwargs)
def process(self, msg: str, kwargs: Dict) -> Tuple[str, Dict]: # type: ignore
extra = kwargs.setdefault('extra', {})
for keyword in self.KEYWORDS:
if keyword in kwargs:
extra[keyword] = kwargs.pop(keyword)
return msg, kwargs
def handle(self, record: logging.LogRecord) -> None:
self.logger.handle(record)
class WarningStreamHandler(logging.StreamHandler):
"""StreamHandler for warnings."""
pass
class NewLineStreamHandler(logging.StreamHandler):
"""StreamHandler which switches line terminator by record.nonl flag."""
def emit(self, record: logging.LogRecord) -> None:
try:
self.acquire()
if getattr(record, 'nonl', False):
# skip appending terminator when nonl=True
self.terminator = ''
super().emit(record)
finally:
self.terminator = '\n'
self.release()
class MemoryHandler(logging.handlers.BufferingHandler):
"""Handler buffering all logs."""
def __init__(self) -> None:
super().__init__(-1)
def shouldFlush(self, record: logging.LogRecord) -> bool:
return False # never flush
def flushTo(self, logger: logging.Logger) -> None:
self.acquire()
try:
for record in self.buffer:
logger.handle(record)
self.buffer = [] # type: List[logging.LogRecord]
finally:
self.release()
def clear(self) -> List[logging.LogRecord]:
buffer, self.buffer = self.buffer, []
return buffer
@contextmanager
def pending_warnings() -> Generator[logging.Handler, None, None]:
"""Contextmanager to pend logging warnings temporary.
Similar to :func:`pending_logging`.
"""
logger = logging.getLogger(NAMESPACE)
memhandler = MemoryHandler()
memhandler.setLevel(logging.WARNING)
try:
handlers = []
for handler in logger.handlers[:]:
if isinstance(handler, WarningStreamHandler):
logger.removeHandler(handler)
handlers.append(handler)
logger.addHandler(memhandler)
yield memhandler
finally:
logger.removeHandler(memhandler)
for handler in handlers:
logger.addHandler(handler)
memhandler.flushTo(logger)
@contextmanager
def suppress_logging() -> Generator[MemoryHandler, None, None]:
"""Contextmanager to suppress logging all logs temporary.
For example::
>>> with suppress_logging():
>>> logger.warning('Warning message!') # suppressed
>>> some_long_process()
>>>
"""
logger = logging.getLogger(NAMESPACE)
memhandler = MemoryHandler()
try:
handlers = []
for handler in logger.handlers[:]:
logger.removeHandler(handler)
handlers.append(handler)
logger.addHandler(memhandler)
yield memhandler
finally:
logger.removeHandler(memhandler)
for handler in handlers:
logger.addHandler(handler)
@contextmanager
def pending_logging() -> Generator[MemoryHandler, None, None]:
"""Contextmanager to pend logging all logs temporary.
For example::
>>> with pending_logging():
>>> logger.warning('Warning message!') # not flushed yet
>>> some_long_process()
>>>
Warning message! # the warning is flushed here
"""
logger = logging.getLogger(NAMESPACE)
try:
with suppress_logging() as memhandler:
yield memhandler
finally:
memhandler.flushTo(logger)
@contextmanager
def skip_warningiserror(skip: bool = True) -> Generator[None, None, None]:
"""contextmanager to skip WarningIsErrorFilter for a while."""
logger = logging.getLogger(NAMESPACE)
if skip is False:
yield
else:
try:
disabler = DisableWarningIsErrorFilter()
for handler in logger.handlers:
# use internal method; filters.insert() directly to install disabler
# before WarningIsErrorFilter
handler.filters.insert(0, disabler)
yield
finally:
for handler in logger.handlers:
handler.removeFilter(disabler)
@contextmanager
def prefixed_warnings(prefix: str) -> Generator[None, None, None]:
"""Prepend prefix to all records for a while.
For example::
>>> with prefixed_warnings("prefix:"):
>>> logger.warning('Warning message!') # => prefix: Warning message!
.. versionadded:: 2.0
"""
logger = logging.getLogger(NAMESPACE)
warning_handler = None
for handler in logger.handlers:
if isinstance(handler, WarningStreamHandler):
warning_handler = handler
break
else:
# warning stream not found
yield
return
prefix_filter = None
for _filter in warning_handler.filters:
if isinstance(_filter, MessagePrefixFilter):
prefix_filter = _filter
break
if prefix_filter:
# already prefixed
try:
previous = prefix_filter.prefix
prefix_filter.prefix = prefix
yield
finally:
prefix_filter.prefix = previous
else:
# not prefixed yet
try:
prefix_filter = MessagePrefixFilter(prefix)
warning_handler.addFilter(prefix_filter)
yield
finally:
warning_handler.removeFilter(prefix_filter)
class LogCollector:
def __init__(self) -> None:
self.logs = [] # type: List[logging.LogRecord]
@contextmanager
def collect(self) -> Generator[None, None, None]:
with pending_logging() as memhandler:
yield
self.logs = memhandler.clear()
class InfoFilter(logging.Filter):
"""Filter error and warning messages."""
def filter(self, record: logging.LogRecord) -> bool:
if record.levelno < logging.WARNING:
return True
else:
return False
def is_suppressed_warning(type: str, subtype: str, suppress_warnings: List[str]) -> bool:
"""Check the warning is suppressed or not."""
if type is None:
return False
for warning_type in suppress_warnings:
if '.' in warning_type:
target, subtarget = warning_type.split('.', 1)
else:
target, subtarget = warning_type, None
if target == type:
if (subtype is None or subtarget is None or
subtarget == subtype or subtarget == '*'):
return True
return False
class WarningSuppressor(logging.Filter):
"""Filter logs by `suppress_warnings`."""
def __init__(self, app: "Sphinx") -> None:
self.app = app
super().__init__()
def filter(self, record: logging.LogRecord) -> bool:
type = getattr(record, 'type', None)
subtype = getattr(record, 'subtype', None)
try:
suppress_warnings = self.app.config.suppress_warnings
except AttributeError:
# config is not initialized yet (ex. in conf.py)
suppress_warnings = []
if is_suppressed_warning(type, subtype, suppress_warnings):
return False
else:
self.app._warncount += 1
return True
class WarningIsErrorFilter(logging.Filter):
"""Raise exception if warning emitted."""
def __init__(self, app: "Sphinx") -> None:
self.app = app
super().__init__()
def filter(self, record: logging.LogRecord) -> bool:
if getattr(record, 'skip_warningsiserror', False):
# disabled by DisableWarningIsErrorFilter
return True
elif self.app.warningiserror:
location = getattr(record, 'location', '')
try:
message = record.msg % record.args
except (TypeError, ValueError):
message = record.msg # use record.msg itself
if location:
exc = SphinxWarning(location + ":" + str(message))
else:
exc = SphinxWarning(message)
if record.exc_info is not None:
raise exc from record.exc_info[1]
else:
raise exc
else:
return True
class DisableWarningIsErrorFilter(logging.Filter):
"""Disable WarningIsErrorFilter if this filter installed."""
def filter(self, record: logging.LogRecord) -> bool:
record.skip_warningsiserror = True # type: ignore
return True
class MessagePrefixFilter(logging.Filter):
"""Prepend prefix to all records."""
def __init__(self, prefix: str) -> None:
self.prefix = prefix
super().__init__()
def filter(self, record: logging.LogRecord) -> bool:
if self.prefix:
record.msg = self.prefix + ' ' + record.msg
return True
class OnceFilter(logging.Filter):
"""Show the message only once."""
def __init__(self, name: str = '') -> None:
super().__init__(name)
self.messages = {} # type: Dict[str, List]
def filter(self, record: logging.LogRecord) -> bool:
once = getattr(record, 'once', '')
if not once:
return True
else:
params = self.messages.setdefault(record.msg, [])
if record.args in params:
return False
params.append(record.args)
return True
class SphinxLogRecordTranslator(logging.Filter):
"""Converts a log record to one Sphinx expects
* Make a instance of SphinxLogRecord
* docname to path if location given
"""
LogRecordClass = None # type: Type[logging.LogRecord]
def __init__(self, app: "Sphinx") -> None:
self.app = app
super().__init__()
def filter(self, record: SphinxWarningLogRecord) -> bool: # type: ignore
if isinstance(record, logging.LogRecord):
# force subclassing to handle location
record.__class__ = self.LogRecordClass # type: ignore
location = getattr(record, 'location', None)
if isinstance(location, tuple):
docname, lineno = location
if docname and lineno:
record.location = '%s:%s' % (self.app.env.doc2path(docname), lineno)
elif docname:
record.location = '%s' % self.app.env.doc2path(docname)
else:
record.location = None
elif isinstance(location, nodes.Node):
record.location = get_node_location(location)
elif location and ':' not in location:
record.location = '%s' % self.app.env.doc2path(location)
return True
class InfoLogRecordTranslator(SphinxLogRecordTranslator):
"""LogRecordTranslator for INFO level log records."""
LogRecordClass = SphinxInfoLogRecord
class WarningLogRecordTranslator(SphinxLogRecordTranslator):
"""LogRecordTranslator for WARNING level log records."""
LogRecordClass = SphinxWarningLogRecord
def get_node_location(node: Node) -> str:
(source, line) = get_source_line(node)
if source and line:
return "%s:%s" % (source, line)
elif source:
return "%s:" % source
elif line:
return "<unknown>:%s" % line
else:
return None
class ColorizeFormatter(logging.Formatter):
def format(self, record: logging.LogRecord) -> str:
message = super().format(record)
color = getattr(record, 'color', None)
if color is None:
color = COLOR_MAP.get(record.levelno)
if color:
return colorize(color, message)
else:
return message
class SafeEncodingWriter:
"""Stream writer which ignores UnicodeEncodeError silently"""
def __init__(self, stream: IO) -> None:
self.stream = stream
self.encoding = getattr(stream, 'encoding', 'ascii') or 'ascii'
def write(self, data: str) -> None:
try:
self.stream.write(data)
except UnicodeEncodeError:
# stream accept only str, not bytes. So, we encode and replace
# non-encodable characters, then decode them.
self.stream.write(data.encode(self.encoding, 'replace').decode(self.encoding))
def flush(self) -> None:
if hasattr(self.stream, 'flush'):
self.stream.flush()
class LastMessagesWriter:
"""Stream writer which memories last 10 messages to save trackback"""
def __init__(self, app: "Sphinx", stream: IO) -> None:
self.app = app
def write(self, data: str) -> None:
self.app.messagelog.append(data)
def setup(app: "Sphinx", status: IO, warning: IO) -> None:
"""Setup root logger for Sphinx"""
logger = logging.getLogger(NAMESPACE)
logger.setLevel(logging.DEBUG)
logger.propagate = False
# clear all handlers
for handler in logger.handlers[:]:
logger.removeHandler(handler)
info_handler = NewLineStreamHandler(SafeEncodingWriter(status)) # type: ignore
info_handler.addFilter(InfoFilter())
info_handler.addFilter(InfoLogRecordTranslator(app))
info_handler.setLevel(VERBOSITY_MAP[app.verbosity])
info_handler.setFormatter(ColorizeFormatter())
warning_handler = WarningStreamHandler(SafeEncodingWriter(warning)) # type: ignore
warning_handler.addFilter(WarningSuppressor(app))
warning_handler.addFilter(WarningLogRecordTranslator(app))
warning_handler.addFilter(WarningIsErrorFilter(app))
warning_handler.addFilter(OnceFilter())
warning_handler.setLevel(logging.WARNING)
warning_handler.setFormatter(ColorizeFormatter())
messagelog_handler = logging.StreamHandler(LastMessagesWriter(app, status)) # type: ignore
messagelog_handler.addFilter(InfoFilter())
messagelog_handler.setLevel(VERBOSITY_MAP[app.verbosity])
messagelog_handler.setFormatter(ColorizeFormatter())
logger.addHandler(info_handler)
logger.addHandler(warning_handler)
logger.addHandler(messagelog_handler)
| 30.444444 | 95 | 0.626742 |
import logging
import logging.handlers
from collections import defaultdict
from contextlib import contextmanager
from typing import IO, TYPE_CHECKING, Any, Dict, Generator, List, Tuple, Type, Union
from docutils import nodes
from docutils.nodes import Node
from docutils.utils import get_source_line
from sphinx.errors import SphinxWarning
from sphinx.util.console import colorize
if TYPE_CHECKING:
from sphinx.application import Sphinx
NAMESPACE = 'sphinx'
VERBOSE = 15
LEVEL_NAMES = defaultdict(lambda: logging.WARNING)
LEVEL_NAMES.update({
'CRITICAL': logging.CRITICAL,
'SEVERE': logging.CRITICAL,
'ERROR': logging.ERROR,
'WARNING': logging.WARNING,
'INFO': logging.INFO,
'VERBOSE': VERBOSE,
'DEBUG': logging.DEBUG,
})
VERBOSITY_MAP = defaultdict(lambda: 0)
VERBOSITY_MAP.update({
0: logging.INFO,
1: VERBOSE,
2: logging.DEBUG,
})
COLOR_MAP = defaultdict(lambda: 'blue',
{
logging.ERROR: 'darkred',
logging.WARNING: 'red',
logging.DEBUG: 'darkgray'
})
def getLogger(name: str) -> "SphinxLoggerAdapter":
logger = logging.getLogger(NAMESPACE + '.' + name)
logger.disabled = False
return SphinxLoggerAdapter(logger, {})
def convert_serializable(records: List[logging.LogRecord]) -> None:
for r in records:
r.msg = r.getMessage()
r.args = ()
location = getattr(r, 'location', None)
if isinstance(location, nodes.Node):
r.location = get_node_location(location)
class SphinxLogRecord(logging.LogRecord):
prefix = ''
location = None
def getMessage(self) -> str:
message = super().getMessage()
location = getattr(self, 'location', None)
if location:
message = '%s: %s%s' % (location, self.prefix, message)
elif self.prefix not in message:
message = self.prefix + message
return message
class SphinxInfoLogRecord(SphinxLogRecord):
prefix = ''
class SphinxWarningLogRecord(SphinxLogRecord):
prefix = 'WARNING: '
class SphinxLoggerAdapter(logging.LoggerAdapter):
KEYWORDS = ['type', 'subtype', 'location', 'nonl', 'color', 'once']
def log(self, level: Union[int, str], msg: str, *args: Any, **kwargs: Any) -> None:
if isinstance(level, int):
super().log(level, msg, *args, **kwargs)
else:
levelno = LEVEL_NAMES[level]
super().log(levelno, msg, *args, **kwargs)
def verbose(self, msg: str, *args: Any, **kwargs: Any) -> None:
self.log(VERBOSE, msg, *args, **kwargs)
def process(self, msg: str, kwargs: Dict) -> Tuple[str, Dict]:
extra = kwargs.setdefault('extra', {})
for keyword in self.KEYWORDS:
if keyword in kwargs:
extra[keyword] = kwargs.pop(keyword)
return msg, kwargs
def handle(self, record: logging.LogRecord) -> None:
self.logger.handle(record)
class WarningStreamHandler(logging.StreamHandler):
pass
class NewLineStreamHandler(logging.StreamHandler):
def emit(self, record: logging.LogRecord) -> None:
try:
self.acquire()
if getattr(record, 'nonl', False):
self.terminator = ''
super().emit(record)
finally:
self.terminator = '\n'
self.release()
class MemoryHandler(logging.handlers.BufferingHandler):
def __init__(self) -> None:
super().__init__(-1)
def shouldFlush(self, record: logging.LogRecord) -> bool:
return False
def flushTo(self, logger: logging.Logger) -> None:
self.acquire()
try:
for record in self.buffer:
logger.handle(record)
self.buffer = []
finally:
self.release()
def clear(self) -> List[logging.LogRecord]:
buffer, self.buffer = self.buffer, []
return buffer
@contextmanager
def pending_warnings() -> Generator[logging.Handler, None, None]:
logger = logging.getLogger(NAMESPACE)
memhandler = MemoryHandler()
memhandler.setLevel(logging.WARNING)
try:
handlers = []
for handler in logger.handlers[:]:
if isinstance(handler, WarningStreamHandler):
logger.removeHandler(handler)
handlers.append(handler)
logger.addHandler(memhandler)
yield memhandler
finally:
logger.removeHandler(memhandler)
for handler in handlers:
logger.addHandler(handler)
memhandler.flushTo(logger)
@contextmanager
def suppress_logging() -> Generator[MemoryHandler, None, None]:
logger = logging.getLogger(NAMESPACE)
memhandler = MemoryHandler()
try:
handlers = []
for handler in logger.handlers[:]:
logger.removeHandler(handler)
handlers.append(handler)
logger.addHandler(memhandler)
yield memhandler
finally:
logger.removeHandler(memhandler)
for handler in handlers:
logger.addHandler(handler)
@contextmanager
def pending_logging() -> Generator[MemoryHandler, None, None]:
logger = logging.getLogger(NAMESPACE)
try:
with suppress_logging() as memhandler:
yield memhandler
finally:
memhandler.flushTo(logger)
@contextmanager
def skip_warningiserror(skip: bool = True) -> Generator[None, None, None]:
logger = logging.getLogger(NAMESPACE)
if skip is False:
yield
else:
try:
disabler = DisableWarningIsErrorFilter()
for handler in logger.handlers:
handler.filters.insert(0, disabler)
yield
finally:
for handler in logger.handlers:
handler.removeFilter(disabler)
@contextmanager
def prefixed_warnings(prefix: str) -> Generator[None, None, None]:
logger = logging.getLogger(NAMESPACE)
warning_handler = None
for handler in logger.handlers:
if isinstance(handler, WarningStreamHandler):
warning_handler = handler
break
else:
yield
return
prefix_filter = None
for _filter in warning_handler.filters:
if isinstance(_filter, MessagePrefixFilter):
prefix_filter = _filter
break
if prefix_filter:
try:
previous = prefix_filter.prefix
prefix_filter.prefix = prefix
yield
finally:
prefix_filter.prefix = previous
else:
try:
prefix_filter = MessagePrefixFilter(prefix)
warning_handler.addFilter(prefix_filter)
yield
finally:
warning_handler.removeFilter(prefix_filter)
class LogCollector:
def __init__(self) -> None:
self.logs = []
@contextmanager
def collect(self) -> Generator[None, None, None]:
with pending_logging() as memhandler:
yield
self.logs = memhandler.clear()
class InfoFilter(logging.Filter):
def filter(self, record: logging.LogRecord) -> bool:
if record.levelno < logging.WARNING:
return True
else:
return False
def is_suppressed_warning(type: str, subtype: str, suppress_warnings: List[str]) -> bool:
if type is None:
return False
for warning_type in suppress_warnings:
if '.' in warning_type:
target, subtarget = warning_type.split('.', 1)
else:
target, subtarget = warning_type, None
if target == type:
if (subtype is None or subtarget is None or
subtarget == subtype or subtarget == '*'):
return True
return False
class WarningSuppressor(logging.Filter):
def __init__(self, app: "Sphinx") -> None:
self.app = app
super().__init__()
def filter(self, record: logging.LogRecord) -> bool:
type = getattr(record, 'type', None)
subtype = getattr(record, 'subtype', None)
try:
suppress_warnings = self.app.config.suppress_warnings
except AttributeError:
suppress_warnings = []
if is_suppressed_warning(type, subtype, suppress_warnings):
return False
else:
self.app._warncount += 1
return True
class WarningIsErrorFilter(logging.Filter):
def __init__(self, app: "Sphinx") -> None:
self.app = app
super().__init__()
def filter(self, record: logging.LogRecord) -> bool:
if getattr(record, 'skip_warningsiserror', False):
return True
elif self.app.warningiserror:
location = getattr(record, 'location', '')
try:
message = record.msg % record.args
except (TypeError, ValueError):
message = record.msg
if location:
exc = SphinxWarning(location + ":" + str(message))
else:
exc = SphinxWarning(message)
if record.exc_info is not None:
raise exc from record.exc_info[1]
else:
raise exc
else:
return True
class DisableWarningIsErrorFilter(logging.Filter):
def filter(self, record: logging.LogRecord) -> bool:
record.skip_warningsiserror = True
return True
class MessagePrefixFilter(logging.Filter):
def __init__(self, prefix: str) -> None:
self.prefix = prefix
super().__init__()
def filter(self, record: logging.LogRecord) -> bool:
if self.prefix:
record.msg = self.prefix + ' ' + record.msg
return True
class OnceFilter(logging.Filter):
def __init__(self, name: str = '') -> None:
super().__init__(name)
self.messages = {}
def filter(self, record: logging.LogRecord) -> bool:
once = getattr(record, 'once', '')
if not once:
return True
else:
params = self.messages.setdefault(record.msg, [])
if record.args in params:
return False
params.append(record.args)
return True
class SphinxLogRecordTranslator(logging.Filter):
LogRecordClass = None
def __init__(self, app: "Sphinx") -> None:
self.app = app
super().__init__()
def filter(self, record: SphinxWarningLogRecord) -> bool:
if isinstance(record, logging.LogRecord):
record.__class__ = self.LogRecordClass
location = getattr(record, 'location', None)
if isinstance(location, tuple):
docname, lineno = location
if docname and lineno:
record.location = '%s:%s' % (self.app.env.doc2path(docname), lineno)
elif docname:
record.location = '%s' % self.app.env.doc2path(docname)
else:
record.location = None
elif isinstance(location, nodes.Node):
record.location = get_node_location(location)
elif location and ':' not in location:
record.location = '%s' % self.app.env.doc2path(location)
return True
class InfoLogRecordTranslator(SphinxLogRecordTranslator):
LogRecordClass = SphinxInfoLogRecord
class WarningLogRecordTranslator(SphinxLogRecordTranslator):
LogRecordClass = SphinxWarningLogRecord
def get_node_location(node: Node) -> str:
(source, line) = get_source_line(node)
if source and line:
return "%s:%s" % (source, line)
elif source:
return "%s:" % source
elif line:
return "<unknown>:%s" % line
else:
return None
class ColorizeFormatter(logging.Formatter):
def format(self, record: logging.LogRecord) -> str:
message = super().format(record)
color = getattr(record, 'color', None)
if color is None:
color = COLOR_MAP.get(record.levelno)
if color:
return colorize(color, message)
else:
return message
class SafeEncodingWriter:
def __init__(self, stream: IO) -> None:
self.stream = stream
self.encoding = getattr(stream, 'encoding', 'ascii') or 'ascii'
def write(self, data: str) -> None:
try:
self.stream.write(data)
except UnicodeEncodeError:
self.stream.write(data.encode(self.encoding, 'replace').decode(self.encoding))
def flush(self) -> None:
if hasattr(self.stream, 'flush'):
self.stream.flush()
class LastMessagesWriter:
def __init__(self, app: "Sphinx", stream: IO) -> None:
self.app = app
def write(self, data: str) -> None:
self.app.messagelog.append(data)
def setup(app: "Sphinx", status: IO, warning: IO) -> None:
logger = logging.getLogger(NAMESPACE)
logger.setLevel(logging.DEBUG)
logger.propagate = False
for handler in logger.handlers[:]:
logger.removeHandler(handler)
info_handler = NewLineStreamHandler(SafeEncodingWriter(status))
info_handler.addFilter(InfoFilter())
info_handler.addFilter(InfoLogRecordTranslator(app))
info_handler.setLevel(VERBOSITY_MAP[app.verbosity])
info_handler.setFormatter(ColorizeFormatter())
warning_handler = WarningStreamHandler(SafeEncodingWriter(warning))
warning_handler.addFilter(WarningSuppressor(app))
warning_handler.addFilter(WarningLogRecordTranslator(app))
warning_handler.addFilter(WarningIsErrorFilter(app))
warning_handler.addFilter(OnceFilter())
warning_handler.setLevel(logging.WARNING)
warning_handler.setFormatter(ColorizeFormatter())
messagelog_handler = logging.StreamHandler(LastMessagesWriter(app, status))
messagelog_handler.addFilter(InfoFilter())
messagelog_handler.setLevel(VERBOSITY_MAP[app.verbosity])
messagelog_handler.setFormatter(ColorizeFormatter())
logger.addHandler(info_handler)
logger.addHandler(warning_handler)
logger.addHandler(messagelog_handler)
| true | true |
1c480ede7ec3ddcada2e2f92c4b2eaa90bb15709 | 4,325 | py | Python | instructors/projects/card_games/war/war.py | mgadagin/PythonClass | 70b370362d75720b3fb0e1d6cc8158f9445e9708 | [
"MIT"
] | 46 | 2017-09-27T20:19:36.000Z | 2020-12-08T10:07:19.000Z | instructors/projects/card_games/war/war.py | mgadagin/PythonClass | 70b370362d75720b3fb0e1d6cc8158f9445e9708 | [
"MIT"
] | 6 | 2018-01-09T08:07:37.000Z | 2020-09-07T12:25:13.000Z | instructors/projects/card_games/war/war.py | mgadagin/PythonClass | 70b370362d75720b3fb0e1d6cc8158f9445e9708 | [
"MIT"
] | 18 | 2017-10-10T02:06:51.000Z | 2019-12-01T10:18:13.000Z | from lib.deck import StandardDeck, Deck
class Player(object):
"""
"""
def __init__(self):
self.stack = Deck()
class Table(object):
"""
"""
def __init__(self):
self.played_cards = Deck()
class WarGame(object):
"""
"""
ranks = ("two", "three", "four", "five", "six", "seven", "eight",
"nine", "ten", "jack", "queen", "king", "ace") # defines the order of the cards
def __init__(self):
"""
"""
self.deck = StandardDeck()
self.player1 = Player()
self.player2 = Player()
self.table = Table()
self.losers_list = list()
self.deck.shuffle()
self.deal_cards()
self.play_war()
def deal_cards(self):
"""
"""
while len(self.deck.deck):
deal_this_card = self.deck.deal_n_cards()
if deal_this_card:
self.player1.stack.accept_n_cards(deal_this_card)
deal_this_card = self.deck.deal_n_cards()
if deal_this_card:
self.player2.stack.accept_n_cards(deal_this_card)
def play_war(self):
""" This run each round and checks if the game is over.
"""
# exit() when someone loses, for now (see finish_game method)
while True:
self.run_round()
def finish_game(self):
""" Resolve the game
"""
print self.losers_list
exit()
def run_round(self):
""" Each player plays a card and the cards are compared.
"""
def compare_cards(card1, card2):
"""
What do we want to get out of compare_cards?
1. Player 1 wins (1>2): player 1 is given all the played cards
2. Player 2 wins (2>1): player 2 is given all the played cards
3. WAR!! (1=2): We do a war
"""
card1_rank = self.ranks.index(card1[0].rank)
card2_rank = self.ranks.index(card2[0].rank)
player1_decksize = len(self.player1.stack.deck)
player2_decksize = len(self.player2.stack.deck)
if card1_rank > card2_rank:
print card1[0].rank, " > ", card2[0].rank, " Player 1 wins.",
print "Player 1 deck: ", player1_decksize, " cards. Player 2 deck: ", player2_decksize, "cards."
return self.player1
elif card1_rank < card2_rank:
print card1[0].rank, " < ", card2[0].rank, " Player 2 wins."
print "Player 1 deck: ", player1_decksize, " cards. Player 2 deck: ", player2_decksize, "cards."
return self.player2
# WAR!!
else:
print "A war is on!"
return None
def draw_cards(num_to_draw, player):
"""
"""
card = player.stack.deal_n_cards(num_to_draw)
if len(card) == 0:
self.losers_list.append(player)
return card
player1_card = draw_cards(1, self.player1)
player2_card = draw_cards(1, self.player2)
# Check if the game is done after all draws are complete.
if len(self.losers_list) > 0:
self.finish_game()
winner = None
while not winner:
winner = compare_cards(player1_card, player2_card)
# Give all played cards to the table.
self.table.played_cards.accept_n_cards(player1_card)
self.table.played_cards.accept_n_cards(player2_card)
if not winner:
# initialize a war
self.table.played_cards.accept_n_cards(draw_cards(3, self.player1))
self.table.played_cards.accept_n_cards(draw_cards(3, self.player2))
player1_card = draw_cards(1, self.player1)
player2_card = draw_cards(1, self.player2)
# Check if the game is done after all draws are complete.
if len(self.losers_list) > 0:
self.finish_game()
# Give all the cards in the played_cards to the winner
winner.stack.accept_n_cards(self.table.played_cards.deal_n_cards(len(self.table.played_cards.deck)))
if __name__ == "__main__":
wargame = WarGame()
| 30.244755 | 112 | 0.546821 | from lib.deck import StandardDeck, Deck
class Player(object):
"""
"""
def __init__(self):
self.stack = Deck()
class Table(object):
"""
"""
def __init__(self):
self.played_cards = Deck()
class WarGame(object):
"""
"""
ranks = ("two", "three", "four", "five", "six", "seven", "eight",
"nine", "ten", "jack", "queen", "king", "ace")
def __init__(self):
"""
"""
self.deck = StandardDeck()
self.player1 = Player()
self.player2 = Player()
self.table = Table()
self.losers_list = list()
self.deck.shuffle()
self.deal_cards()
self.play_war()
def deal_cards(self):
"""
"""
while len(self.deck.deck):
deal_this_card = self.deck.deal_n_cards()
if deal_this_card:
self.player1.stack.accept_n_cards(deal_this_card)
deal_this_card = self.deck.deal_n_cards()
if deal_this_card:
self.player2.stack.accept_n_cards(deal_this_card)
def play_war(self):
""" This run each round and checks if the game is over.
"""
while True:
self.run_round()
def finish_game(self):
""" Resolve the game
"""
print self.losers_list
exit()
def run_round(self):
""" Each player plays a card and the cards are compared.
"""
def compare_cards(card1, card2):
"""
What do we want to get out of compare_cards?
1. Player 1 wins (1>2): player 1 is given all the played cards
2. Player 2 wins (2>1): player 2 is given all the played cards
3. WAR!! (1=2): We do a war
"""
card1_rank = self.ranks.index(card1[0].rank)
card2_rank = self.ranks.index(card2[0].rank)
player1_decksize = len(self.player1.stack.deck)
player2_decksize = len(self.player2.stack.deck)
if card1_rank > card2_rank:
print card1[0].rank, " > ", card2[0].rank, " Player 1 wins.",
print "Player 1 deck: ", player1_decksize, " cards. Player 2 deck: ", player2_decksize, "cards."
return self.player1
elif card1_rank < card2_rank:
print card1[0].rank, " < ", card2[0].rank, " Player 2 wins."
print "Player 1 deck: ", player1_decksize, " cards. Player 2 deck: ", player2_decksize, "cards."
return self.player2
else:
print "A war is on!"
return None
def draw_cards(num_to_draw, player):
"""
"""
card = player.stack.deal_n_cards(num_to_draw)
if len(card) == 0:
self.losers_list.append(player)
return card
player1_card = draw_cards(1, self.player1)
player2_card = draw_cards(1, self.player2)
if len(self.losers_list) > 0:
self.finish_game()
winner = None
while not winner:
winner = compare_cards(player1_card, player2_card)
self.table.played_cards.accept_n_cards(player1_card)
self.table.played_cards.accept_n_cards(player2_card)
if not winner:
self.table.played_cards.accept_n_cards(draw_cards(3, self.player1))
self.table.played_cards.accept_n_cards(draw_cards(3, self.player2))
player1_card = draw_cards(1, self.player1)
player2_card = draw_cards(1, self.player2)
if len(self.losers_list) > 0:
self.finish_game()
winner.stack.accept_n_cards(self.table.played_cards.deal_n_cards(len(self.table.played_cards.deck)))
if __name__ == "__main__":
wargame = WarGame()
| false | true |
1c480ee01fd5c048b152028cc1001c86ca680a28 | 994 | py | Python | trac/upgrades/db27.py | tiagoeckhardt/trac | b18c226195bfed8cd19cba97c6f03bd54dbbc044 | [
"BSD-3-Clause"
] | null | null | null | trac/upgrades/db27.py | tiagoeckhardt/trac | b18c226195bfed8cd19cba97c6f03bd54dbbc044 | [
"BSD-3-Clause"
] | null | null | null | trac/upgrades/db27.py | tiagoeckhardt/trac | b18c226195bfed8cd19cba97c6f03bd54dbbc044 | [
"BSD-3-Clause"
] | null | null | null | # -*- coding: utf-8 -*-
#
# Copyright (C) 2012-2019 Edgewall Software
# All rights reserved.
#
# This software is licensed as described in the file COPYING, which
# you should have received as part of this distribution. The terms
# are also available at https://trac.edgewall.org/wiki/TracLicense.
#
# This software consists of voluntary contributions made by many
# individuals. For the exact contribution history, see the revision
# history and logs, available at https://trac.edgewall.org/.
from trac.db import Table, Column, DatabaseManager
def do_upgrade(env, ver, cursor):
"""Modify the cache table to use an integer id."""
# No need to keep the previous content
cursor.execute("DROP TABLE cache")
table = Table('cache', key='id')[
Column('id', type='int'),
Column('generation', type='int'),
Column('key'),
]
db_connector, _ = DatabaseManager(env).get_connector()
for stmt in db_connector.to_sql(table):
cursor.execute(stmt)
| 34.275862 | 67 | 0.698189 |
from trac.db import Table, Column, DatabaseManager
def do_upgrade(env, ver, cursor):
cursor.execute("DROP TABLE cache")
table = Table('cache', key='id')[
Column('id', type='int'),
Column('generation', type='int'),
Column('key'),
]
db_connector, _ = DatabaseManager(env).get_connector()
for stmt in db_connector.to_sql(table):
cursor.execute(stmt)
| true | true |
1c48113c5d9055273959e9dd774fe4d60a291d17 | 5,374 | py | Python | documents/wifi_positioning_rssi_normal_dist_based/calculation_numpy.py | fe1t/wifi_positioning | a4059704ed66404b9c2f143b07b5c02fdb69c8a7 | [
"MIT"
] | 2 | 2017-12-20T14:01:01.000Z | 2021-12-29T08:31:44.000Z | documents/wifi_positioning_rssi_normal_dist_based/calculation_numpy.py | fe1t/wifi_positioning | a4059704ed66404b9c2f143b07b5c02fdb69c8a7 | [
"MIT"
] | null | null | null | documents/wifi_positioning_rssi_normal_dist_based/calculation_numpy.py | fe1t/wifi_positioning | a4059704ed66404b9c2f143b07b5c02fdb69c8a7 | [
"MIT"
] | null | null | null | #!/usr/bin/python
# - * - coding: utf-8 - * -
from calibration import Calibration
from calibrate_point_manager import CalibratePointManager
from cell import Cell
from iwscanner import IWScanner
from fractions import Fraction
import decimal
import numpy as np
import sys
# np.mean(array)
# np.std(array)
Kd = 4
def euclidean_distance(cj, sk):
# assert(len(cj) == len(sk) and type(cj) == type(sk) == list)
return np.sqrt(reduce(lambda x, y: x + y, \
map(lambda x, y:
(decimal.Decimal(x[0]) - decimal.Decimal(y[0])) ** decimal.Decimal(2)
, cj, sk)))
def improved_euclidean_distance(cj, sk):
#dev_k = decimal.Decimal(sk[2])
#dev_ik = decimal.Decimal(cj[2])
#sum_dev = dev_k + dev_ik
#return np.sqrt(reduce(lambda a, b: a + b, \
# map(lambda a, b: ((np.abs(decimal.Decimal(a[1]) - decimal.Decimal(b[1])) \
# + sum_dev) ** decimal.Decimal(2)), cj, sk)))
return (reduce(lambda a, b: a + b, \
map(lambda a, b: ((np.abs(a[1] - b[1]) \
+ a[2] + b[2]) ** decimal.Decimal(2)), cj, sk))).sqrt()
def gaussian_dist(cj, sk):
x = map(lambda a: a[1], sk)
sigma = map(lambda a: (a[2]) , cj)
if 0 in sigma:
return 0
mu = map(lambda a: a[1], cj)
# if not reduce(lambda x, y: x and y,
#map(lambda x: x != 0, sigma),
#True):
print "sigma: ", sigma
print "x: ", x
P = map(lambda a, b, c: \
decimal.Decimal(np.e) ** -((a - c) ** 2 \
/ decimal.Decimal(2* (b ** 2))) \
/ decimal.Decimal(b * decimal.Decimal(2.0 * np.pi).sqrt()) \
, x, sigma, mu)
return decimal.Decimal(reduce(lambda a, b: a * b, P))
decimal.getcontext().prec = 128
# gmpy2.get_context().precision= 256
def calculate(s):
cp = CalibratePointManager()
cpoints = cp.get_cpoints()
xy = cp.xy
d = []
p = []
# sort_func = lambda x: x.bssid
# sorted(cpoints, key=sort_func)
# sorted(s, key=sort_func)
for cpoint in cpoints:
si = s
# place Pavg instead of -80
# cpoint = filter(lambda x: x.rssi > np.mean(global_avg), cpoint)
si = filter(lambda x: x.rssi > np.mean(global_avg), si)
bssid_list = set(map(lambda x: x.bssid, cpoint))
bssid_list = bssid_list.intersection(set(map(lambda x: x.bssid, si)))
c = filter(lambda x: x.bssid in bssid_list, cpoint)
si = filter(lambda x: x.bssid in bssid_list, si)
sort_func = lambda x: x.bssid
c = sorted(c, key=sort_func)
si = sorted(si, key=sort_func)
c = map(lambda x: (x.bssid, decimal.Decimal(x.rssi), decimal.Decimal(x.std)), c)
si = map(lambda x: (x.bssid, decimal.Decimal(x.rssi), decimal.Decimal(x.std)), si)
assert(len(si) == len(c))
# print bssid_list
if len(bssid_list) != 0:
d.append((improved_euclidean_distance(c, si), len(d)))
p.append((gaussian_dist(c, si), len(p)))
else:
d.append((-1, len(d)))
p.append((-1, len(p)))
d = filter(lambda x: x[0] != -1, sorted(d))[:Kd]
p = filter(lambda x: x[0] != 0 and x[0] != -1, sorted(p))[-Kd:]
# เอา w = d -> ไปเข้าสูตร weight ได้เลย (x1, y1)
# เอา w = p -> ไปเข้าสูตรเหมือน d ได้ออกมาเป็น (x2, y2)
# def get_xy(d, p):
def get_xy(d, p):
ex = gx = decimal.Decimal(0.0)
ey = gy = decimal.Decimal(0.0)
sum_d = decimal.Decimal(0.0)
sum_p = decimal.Decimal(0.0)
for i in range(len(d)):
if d[i][0] == -1: continue
sum_d += decimal.Decimal(1.0)/decimal.Decimal(d[i][0])
ex += ((xy[d[i][1]][0]) / decimal.Decimal(d[i][0]))
ey += ((xy[d[i][1]][1]) / decimal.Decimal(d[i][0]))
for i in range(len(p)):
if p[i][0] == -1: continue
w = decimal.Decimal(np.log2(float(p[i][0])))
sum_p += (w)
gx += (xy[i][0]) * w
gy += (xy[i][1]) * w
# print sum_d
if sum_d == 0: ox, oy = 0, 0
ex = ex / sum_d
ey = ey / sum_d
gx = gx / sum_p # <<<< here
gy = gy / sum_p
return ex, ey, gx, gy
x1, y1, x2, y2 = get_xy(d, p)
print "(x1,y1): (%d, %d)" % (x1,y1)
print "(x2,y2): (%d, %d)" % (x2,y2)
D1 = np.var(map(lambda x: (x[0]), d))
print "p: ", p
D2 = np.var(map(lambda x: (x[0]), p))
sum_D = D1 + D2
print "D1: {} D2: {}".format(D1, D2)
x = x1 * decimal.Decimal(D1 / sum_D) + x2 * decimal.Decimal(D2 / sum_D)
y = y1 * decimal.Decimal(D1 / sum_D) + y2 * decimal.Decimal(D2 / sum_D)
return x , y
# x, y = raw_input("Current position (x, y): ").replace(" ", "").split(",")
# obs[i].bssid, obs[i].rssi (avg), obs[i].std
obs = list()
cells_hash = { }
global_avg = list()
for i in range(10):
for result in IWScanner().do_scan():
if result.bssid not in cells_hash:
cells_hash[result.bssid] = [result]
else:
cells_hash[result.bssid].append(result)
for bssid, arr in cells_hash.iteritems():
rssi_arr = list()
for item in arr:
rssi_arr.append(item.rssi)
cell = Cell(bssid)
cell.rssi = np.mean(rssi_arr)
cell.std = np.std(rssi_arr)
obs.append(cell)
global_avg.append(cell.rssi)
print "Your position is about: (%d, %d)" % (calculate(obs))
| 30.885057 | 90 | 0.537961 |
from calibration import Calibration
from calibrate_point_manager import CalibratePointManager
from cell import Cell
from iwscanner import IWScanner
from fractions import Fraction
import decimal
import numpy as np
import sys
Kd = 4
def euclidean_distance(cj, sk):
return np.sqrt(reduce(lambda x, y: x + y, \
map(lambda x, y:
(decimal.Decimal(x[0]) - decimal.Decimal(y[0])) ** decimal.Decimal(2)
, cj, sk)))
def improved_euclidean_distance(cj, sk):
return (reduce(lambda a, b: a + b, \
map(lambda a, b: ((np.abs(a[1] - b[1]) \
+ a[2] + b[2]) ** decimal.Decimal(2)), cj, sk))).sqrt()
def gaussian_dist(cj, sk):
x = map(lambda a: a[1], sk)
sigma = map(lambda a: (a[2]) , cj)
if 0 in sigma:
return 0
mu = map(lambda a: a[1], cj)
print "sigma: ", sigma
print "x: ", x
P = map(lambda a, b, c: \
decimal.Decimal(np.e) ** -((a - c) ** 2 \
/ decimal.Decimal(2* (b ** 2))) \
/ decimal.Decimal(b * decimal.Decimal(2.0 * np.pi).sqrt()) \
, x, sigma, mu)
return decimal.Decimal(reduce(lambda a, b: a * b, P))
decimal.getcontext().prec = 128
def calculate(s):
cp = CalibratePointManager()
cpoints = cp.get_cpoints()
xy = cp.xy
d = []
p = []
for cpoint in cpoints:
si = s
si = filter(lambda x: x.rssi > np.mean(global_avg), si)
bssid_list = set(map(lambda x: x.bssid, cpoint))
bssid_list = bssid_list.intersection(set(map(lambda x: x.bssid, si)))
c = filter(lambda x: x.bssid in bssid_list, cpoint)
si = filter(lambda x: x.bssid in bssid_list, si)
sort_func = lambda x: x.bssid
c = sorted(c, key=sort_func)
si = sorted(si, key=sort_func)
c = map(lambda x: (x.bssid, decimal.Decimal(x.rssi), decimal.Decimal(x.std)), c)
si = map(lambda x: (x.bssid, decimal.Decimal(x.rssi), decimal.Decimal(x.std)), si)
assert(len(si) == len(c))
if len(bssid_list) != 0:
d.append((improved_euclidean_distance(c, si), len(d)))
p.append((gaussian_dist(c, si), len(p)))
else:
d.append((-1, len(d)))
p.append((-1, len(p)))
d = filter(lambda x: x[0] != -1, sorted(d))[:Kd]
p = filter(lambda x: x[0] != 0 and x[0] != -1, sorted(p))[-Kd:]
def get_xy(d, p):
ex = gx = decimal.Decimal(0.0)
ey = gy = decimal.Decimal(0.0)
sum_d = decimal.Decimal(0.0)
sum_p = decimal.Decimal(0.0)
for i in range(len(d)):
if d[i][0] == -1: continue
sum_d += decimal.Decimal(1.0)/decimal.Decimal(d[i][0])
ex += ((xy[d[i][1]][0]) / decimal.Decimal(d[i][0]))
ey += ((xy[d[i][1]][1]) / decimal.Decimal(d[i][0]))
for i in range(len(p)):
if p[i][0] == -1: continue
w = decimal.Decimal(np.log2(float(p[i][0])))
sum_p += (w)
gx += (xy[i][0]) * w
gy += (xy[i][1]) * w
if sum_d == 0: ox, oy = 0, 0
ex = ex / sum_d
ey = ey / sum_d
gx = gx / sum_p
gy = gy / sum_p
return ex, ey, gx, gy
x1, y1, x2, y2 = get_xy(d, p)
print "(x1,y1): (%d, %d)" % (x1,y1)
print "(x2,y2): (%d, %d)" % (x2,y2)
D1 = np.var(map(lambda x: (x[0]), d))
print "p: ", p
D2 = np.var(map(lambda x: (x[0]), p))
sum_D = D1 + D2
print "D1: {} D2: {}".format(D1, D2)
x = x1 * decimal.Decimal(D1 / sum_D) + x2 * decimal.Decimal(D2 / sum_D)
y = y1 * decimal.Decimal(D1 / sum_D) + y2 * decimal.Decimal(D2 / sum_D)
return x , y
obs = list()
cells_hash = { }
global_avg = list()
for i in range(10):
for result in IWScanner().do_scan():
if result.bssid not in cells_hash:
cells_hash[result.bssid] = [result]
else:
cells_hash[result.bssid].append(result)
for bssid, arr in cells_hash.iteritems():
rssi_arr = list()
for item in arr:
rssi_arr.append(item.rssi)
cell = Cell(bssid)
cell.rssi = np.mean(rssi_arr)
cell.std = np.std(rssi_arr)
obs.append(cell)
global_avg.append(cell.rssi)
print "Your position is about: (%d, %d)" % (calculate(obs))
| false | true |
1c48116e567540872eb35d8210fe6c3f7660b7f8 | 3,913 | py | Python | cryptography/hazmat/bindings/utils.py | derwolfe/cryptography | a6112133d6797313ea8fe741daf25178b2abe25c | [
"Apache-2.0"
] | null | null | null | cryptography/hazmat/bindings/utils.py | derwolfe/cryptography | a6112133d6797313ea8fe741daf25178b2abe25c | [
"Apache-2.0"
] | null | null | null | cryptography/hazmat/bindings/utils.py | derwolfe/cryptography | a6112133d6797313ea8fe741daf25178b2abe25c | [
"Apache-2.0"
] | 3 | 2017-04-07T12:02:22.000Z | 2020-03-23T12:11:55.000Z | # Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import, division, print_function
import binascii
import sys
import cffi
def build_ffi(module_prefix, modules, pre_include="", post_include="",
libraries=[], extra_compile_args=[], extra_link_args=[]):
"""
Modules listed in ``modules`` should have the following attributes:
* ``INCLUDES``: A string containing C includes.
* ``TYPES``: A string containing C declarations for types.
* ``FUNCTIONS``: A string containing C declarations for functions.
* ``MACROS``: A string containing C declarations for any macros.
* ``CUSTOMIZATIONS``: A string containing arbitrary top-level C code, this
can be used to do things like test for a define and provide an
alternate implementation based on that.
* ``CONDITIONAL_NAMES``: A dict mapping strings of condition names from the
library to a list of names which will not be present without the
condition.
"""
ffi = cffi.FFI()
types = []
includes = []
functions = []
macros = []
customizations = []
for name in modules:
module_name = module_prefix + name
__import__(module_name)
module = sys.modules[module_name]
types.append(module.TYPES)
macros.append(module.MACROS)
functions.append(module.FUNCTIONS)
includes.append(module.INCLUDES)
customizations.append(module.CUSTOMIZATIONS)
cdef_sources = types + functions + macros
ffi.cdef("\n".join(cdef_sources))
# We include functions here so that if we got any of their definitions
# wrong, the underlying C compiler will explode. In C you are allowed
# to re-declare a function if it has the same signature. That is:
# int foo(int);
# int foo(int);
# is legal, but the following will fail to compile:
# int foo(int);
# int foo(short);
source = "\n".join(
[pre_include] +
includes +
[post_include] +
functions +
customizations
)
lib = ffi.verify(
source=source,
modulename=_create_modulename(cdef_sources, source, sys.version),
libraries=libraries,
ext_package="cryptography",
extra_compile_args=extra_compile_args,
extra_link_args=extra_link_args,
)
for name in modules:
module_name = module_prefix + name
module = sys.modules[module_name]
for condition, names in module.CONDITIONAL_NAMES.items():
if not getattr(lib, condition):
for name in names:
delattr(lib, name)
return ffi, lib
def _create_modulename(cdef_sources, source, sys_version):
"""
cffi creates a modulename internally that incorporates the cffi version.
This will cause cryptography's wheels to break when the version of cffi
the user has does not match what was used when building the wheel. To
resolve this we build our own modulename that uses most of the same code
from cffi but elides the version key.
"""
key = '\x00'.join([sys_version[:3], source] + cdef_sources)
key = key.encode('utf-8')
k1 = hex(binascii.crc32(key[0::2]) & 0xffffffff)
k1 = k1.lstrip('0x').rstrip('L')
k2 = hex(binascii.crc32(key[1::2]) & 0xffffffff)
k2 = k2.lstrip('0').rstrip('L')
return '_Cryptography_cffi_{0}{1}'.format(k1, k2)
| 35.899083 | 79 | 0.669052 |
from __future__ import absolute_import, division, print_function
import binascii
import sys
import cffi
def build_ffi(module_prefix, modules, pre_include="", post_include="",
libraries=[], extra_compile_args=[], extra_link_args=[]):
ffi = cffi.FFI()
types = []
includes = []
functions = []
macros = []
customizations = []
for name in modules:
module_name = module_prefix + name
__import__(module_name)
module = sys.modules[module_name]
types.append(module.TYPES)
macros.append(module.MACROS)
functions.append(module.FUNCTIONS)
includes.append(module.INCLUDES)
customizations.append(module.CUSTOMIZATIONS)
cdef_sources = types + functions + macros
ffi.cdef("\n".join(cdef_sources))
source = "\n".join(
[pre_include] +
includes +
[post_include] +
functions +
customizations
)
lib = ffi.verify(
source=source,
modulename=_create_modulename(cdef_sources, source, sys.version),
libraries=libraries,
ext_package="cryptography",
extra_compile_args=extra_compile_args,
extra_link_args=extra_link_args,
)
for name in modules:
module_name = module_prefix + name
module = sys.modules[module_name]
for condition, names in module.CONDITIONAL_NAMES.items():
if not getattr(lib, condition):
for name in names:
delattr(lib, name)
return ffi, lib
def _create_modulename(cdef_sources, source, sys_version):
key = '\x00'.join([sys_version[:3], source] + cdef_sources)
key = key.encode('utf-8')
k1 = hex(binascii.crc32(key[0::2]) & 0xffffffff)
k1 = k1.lstrip('0x').rstrip('L')
k2 = hex(binascii.crc32(key[1::2]) & 0xffffffff)
k2 = k2.lstrip('0').rstrip('L')
return '_Cryptography_cffi_{0}{1}'.format(k1, k2)
| true | true |
1c4812afed716716d770b120fccdcf8bd533a504 | 584 | py | Python | aisapi/const.py | sviete/AIS-api | 80694a235b34b99e0ee608e3e3c175732e5be258 | [
"MIT"
] | 1 | 2018-01-03T11:26:01.000Z | 2018-01-03T11:26:01.000Z | aisapi/const.py | sviete/AIS-api | 80694a235b34b99e0ee608e3e3c175732e5be258 | [
"MIT"
] | null | null | null | aisapi/const.py | sviete/AIS-api | 80694a235b34b99e0ee608e3e3c175732e5be258 | [
"MIT"
] | 2 | 2018-08-14T04:30:20.000Z | 2018-08-14T04:31:16.000Z | """Constants."""
AIS_WS_TUNE_IN_URL = "http://opml.radiotime.com/"
AIS_WS_AUDIO_TYPE_URL = (
"https://powiedz.co/ords/dom/dom/audio_type?nature={audio_nature}"
)
AIS_WS_AUDIO_NAME_URL = (
"https://powiedz.co/ords/dom/dom/audio_name?nature={audio_nature}&type={audio_type}"
)
AIS_WS_AUDIOBOOKS_URL = "https://wolnelektury.pl/api/audiobooks/?format=json"
AIS_WS_AUDIO_INFO = "https://powiedz.co/ords/dom/dom/get_audio_full_info"
AIS_WS_COMMAND_URL = "{ais_url}/command"
AIS_WS_AUDIO_STATUS_URL = "{ais_url}/audio_status"
AIS_WS_TTS_URL = "{ais_url}/text_to_speech?text={text}"
| 38.933333 | 88 | 0.765411 |
AIS_WS_TUNE_IN_URL = "http://opml.radiotime.com/"
AIS_WS_AUDIO_TYPE_URL = (
"https://powiedz.co/ords/dom/dom/audio_type?nature={audio_nature}"
)
AIS_WS_AUDIO_NAME_URL = (
"https://powiedz.co/ords/dom/dom/audio_name?nature={audio_nature}&type={audio_type}"
)
AIS_WS_AUDIOBOOKS_URL = "https://wolnelektury.pl/api/audiobooks/?format=json"
AIS_WS_AUDIO_INFO = "https://powiedz.co/ords/dom/dom/get_audio_full_info"
AIS_WS_COMMAND_URL = "{ais_url}/command"
AIS_WS_AUDIO_STATUS_URL = "{ais_url}/audio_status"
AIS_WS_TTS_URL = "{ais_url}/text_to_speech?text={text}"
| true | true |
1c4812d20adc26bbc3dee861735190b37f283c00 | 3,500 | py | Python | fake_gen/factories/datetimes.py | psafont/fake-gen | a3d74fdb54c3d4171ce2ba6ab0ad15791cf5b7e5 | [
"MIT"
] | 1 | 2020-04-14T09:34:58.000Z | 2020-04-14T09:34:58.000Z | fake_gen/factories/datetimes.py | psafont/fake-gen | a3d74fdb54c3d4171ce2ba6ab0ad15791cf5b7e5 | [
"MIT"
] | 1 | 2018-12-04T10:02:57.000Z | 2018-12-04T10:02:57.000Z | fake_gen/factories/datetimes.py | psafont/fake-gen | a3d74fdb54c3d4171ce2ba6ab0ad15791cf5b7e5 | [
"MIT"
] | null | null | null | import random
import datetime
from fake_gen.errors import InvalidFieldType
from fake_gen.base import Factory, DependentField
class RandomDateFactory(Factory):
"""
Generates a random dates between 2 dates.
:type minimum: datetime.datetime
:type maximum: datetime.datetime
Example:
>>> start = datetime.datetime(2013, 10, 1, 1, 1, 0, 0)
>>> end = datetime.datetime(2013, 10, 1, 1, 1, 0, 1)
>>> dates = list(RandomDateFactory(start, end).generate(100))
>>> len(dates)
100
>>> datetime.datetime(2013, 10, 1, 1, 1, 0, 0) in dates
True
>>> datetime.datetime(2013, 10, 1, 1, 1, 0, 1) in dates
True
>>> datetime.datetime(2013, 10, 1, 1, 1, 0, 2) in dates
False
>>> datetime.datetime(2013, 10, 1, 2, 1, 0, 2) in dates
False
"""
def __init__(self, minimum, maximum):
super(RandomDateFactory, self).__init__()
self._maximum = maximum
self._minimum = minimum
delta = maximum - minimum
self._delta_seconds = delta.total_seconds()
self._sign = -1 if self._delta_seconds < 0 else 1
self._delta_seconds *= self._sign
def __call__(self):
delta = datetime.timedelta(seconds=(random.random() * self._delta_seconds))
return self._minimum + (self._sign * delta)
class DateIntervalFactory(Factory):
"""
Generates datetime objects starting from `base` which each iteration adding `delta` to it.
:type base: datetime.datetime
:type delta: datetime.timedelta
Example:
>>> start = datetime.datetime(2013, 10, 1)
>>> interval = datetime.timedelta(days=1)
>>> datetimes = list(DateIntervalFactory(start, interval).generate(3))
>>> len(datetimes)
3
>>> datetimes
[datetime.datetime(2013, 10, 1, 0, 0), ..., datetime.datetime(2013, 10, 3, 0, 0)]
"""
def __init__(self, base, delta):
super(DateIntervalFactory, self).__init__()
self._base = base
self._delta = delta
def __call__(self):
return self._base + self.current_index * self._delta
class RelativeToDatetimeField(DependentField):
"""
Adds a datetime.timedelta to a datetime value from an dependent field.
"""
def __init__(self, datetime_field_name, delta):
super(RelativeToDatetimeField, self).__init__([datetime_field_name])
self._datetime_field_name = datetime_field_name
self._delta = delta
def __call__(self):
other_field = self.depending_fields[self._datetime_field_name]
if not isinstance(other_field, datetime.datetime):
raise InvalidFieldType("field {} isn't of type datetime.datetime")
return other_field + self._delta
class AlignedRelativeDatetimeField(DependentField):
"""
Returns another datetime field, only aligned to specific time quantums.
"""
def __init__(self, other_dateime_field, minute_alignment):
if minute_alignment <= 0 or minute_alignment > 60:
raise ValueError("minute_alignment needs to be a positive integer between 1 - 60")
super(AlignedRelativeDatetimeField, self).__init__([other_dateime_field])
self._other_datetime_field = other_dateime_field
self._minute_alignment = minute_alignment
def __call__(self):
super(AlignedRelativeDatetimeField, self).__call__()
other_value = self.depending_fields[self._other_datetime_field]
return other_value - datetime.timedelta(minutes=other_value.minute % self._minute_alignment)
| 37.634409 | 100 | 0.676857 | import random
import datetime
from fake_gen.errors import InvalidFieldType
from fake_gen.base import Factory, DependentField
class RandomDateFactory(Factory):
def __init__(self, minimum, maximum):
super(RandomDateFactory, self).__init__()
self._maximum = maximum
self._minimum = minimum
delta = maximum - minimum
self._delta_seconds = delta.total_seconds()
self._sign = -1 if self._delta_seconds < 0 else 1
self._delta_seconds *= self._sign
def __call__(self):
delta = datetime.timedelta(seconds=(random.random() * self._delta_seconds))
return self._minimum + (self._sign * delta)
class DateIntervalFactory(Factory):
def __init__(self, base, delta):
super(DateIntervalFactory, self).__init__()
self._base = base
self._delta = delta
def __call__(self):
return self._base + self.current_index * self._delta
class RelativeToDatetimeField(DependentField):
def __init__(self, datetime_field_name, delta):
super(RelativeToDatetimeField, self).__init__([datetime_field_name])
self._datetime_field_name = datetime_field_name
self._delta = delta
def __call__(self):
other_field = self.depending_fields[self._datetime_field_name]
if not isinstance(other_field, datetime.datetime):
raise InvalidFieldType("field {} isn't of type datetime.datetime")
return other_field + self._delta
class AlignedRelativeDatetimeField(DependentField):
def __init__(self, other_dateime_field, minute_alignment):
if minute_alignment <= 0 or minute_alignment > 60:
raise ValueError("minute_alignment needs to be a positive integer between 1 - 60")
super(AlignedRelativeDatetimeField, self).__init__([other_dateime_field])
self._other_datetime_field = other_dateime_field
self._minute_alignment = minute_alignment
def __call__(self):
super(AlignedRelativeDatetimeField, self).__call__()
other_value = self.depending_fields[self._other_datetime_field]
return other_value - datetime.timedelta(minutes=other_value.minute % self._minute_alignment)
| true | true |
1c481379434ada17f2a9088c56489beafbf8d172 | 4,503 | py | Python | reconcile/ecr_mirror.py | bhushanthakur93/qontract-reconcile | fd8eea9f92d353224113955d08e3592864e37df8 | [
"Apache-2.0"
] | null | null | null | reconcile/ecr_mirror.py | bhushanthakur93/qontract-reconcile | fd8eea9f92d353224113955d08e3592864e37df8 | [
"Apache-2.0"
] | null | null | null | reconcile/ecr_mirror.py | bhushanthakur93/qontract-reconcile | fd8eea9f92d353224113955d08e3592864e37df8 | [
"Apache-2.0"
] | null | null | null | import base64
import logging
from sretoolbox.container import Image
from sretoolbox.container import Skopeo
from sretoolbox.container.skopeo import SkopeoCmdError
from sretoolbox.utils import threaded
from reconcile import queries
from reconcile.utils.aws_api import AWSApi
from reconcile.utils.secret_reader import SecretReader
QONTRACT_INTEGRATION = "ecr-mirror"
LOG = logging.getLogger(__name__)
class EcrMirror:
def __init__(self, instance, dry_run):
self.dry_run = dry_run
self.instance = instance
self.settings = queries.get_app_interface_settings()
self.secret_reader = SecretReader(settings=self.settings)
self.skopeo_cli = Skopeo(dry_run)
self.error = False
identifier = instance["identifier"]
account = instance["account"]
region = instance.get("region")
self.aws_cli = AWSApi(
thread_pool_size=1,
accounts=[self._get_aws_account_info(account)],
settings=self.settings,
init_ecr_auth_tokens=True,
)
self.aws_cli.map_ecr_resources()
self.ecr_uri = self._get_image_uri(
account=account,
repository=identifier,
)
if self.ecr_uri is None:
self.error = True
LOG.error(f"Could not find the ECR repository {identifier}")
self.ecr_username, self.ecr_password = self._get_ecr_creds(
account=account,
region=region,
)
self.ecr_auth = f"{self.ecr_username}:{self.ecr_password}"
self.image_username = None
self.image_password = None
self.image_auth = None
pull_secret = self.instance["mirror"]["pullCredentials"]
if pull_secret is not None:
raw_data = self.secret_reader.read_all(pull_secret)
self.image_username = raw_data["user"]
self.image_password = raw_data["token"]
self.image_auth = f"{self.image_username}:{self.image_password}"
def run(self):
if self.error:
return
ecr_mirror = Image(
self.ecr_uri, username=self.ecr_username, password=self.ecr_password
)
image = Image(
self.instance["mirror"]["url"],
username=self.image_username,
password=self.image_password,
)
LOG.debug("[checking %s -> %s]", image, ecr_mirror)
for tag in image:
if tag not in ecr_mirror:
try:
self.skopeo_cli.copy(
src_image=image[tag],
src_creds=self.image_auth,
dst_image=ecr_mirror[tag],
dest_creds=self.ecr_auth,
)
except SkopeoCmdError as details:
LOG.error("[%s]", details)
def _get_ecr_creds(self, account, region):
if region is None:
region = self.aws_cli.accounts[account]["resourcesDefaultRegion"]
auth_token = f"{account}/{region}"
data = self.aws_cli.auth_tokens[auth_token]
auth_data = data["authorizationData"][0]
token = auth_data["authorizationToken"]
password = base64.b64decode(token).decode("utf-8").split(":")[1]
return "AWS", password
def _get_image_uri(self, account, repository):
for repo in self.aws_cli.resources[account]["ecr"]:
if repo["repositoryName"] == repository:
return repo["repositoryUri"]
@staticmethod
def _get_aws_account_info(account):
for account_info in queries.get_aws_accounts():
if "name" not in account_info:
continue
if account_info["name"] != account:
continue
return account_info
def worker(ecr_mirror_instance):
return ecr_mirror_instance.run()
def run(dry_run, thread_pool_size=10):
namespaces = queries.get_namespaces()
tfrs_to_mirror = []
for namespace in namespaces:
if namespace["terraformResources"] is None:
continue
for tfr in namespace["terraformResources"]:
if tfr["provider"] != "ecr":
continue
if tfr["mirror"] is None:
continue
tfrs_to_mirror.append(tfr)
work_list = threaded.run(
EcrMirror, tfrs_to_mirror, thread_pool_size=thread_pool_size, dry_run=dry_run
)
threaded.run(worker, work_list, thread_pool_size=thread_pool_size)
| 31.711268 | 85 | 0.613147 | import base64
import logging
from sretoolbox.container import Image
from sretoolbox.container import Skopeo
from sretoolbox.container.skopeo import SkopeoCmdError
from sretoolbox.utils import threaded
from reconcile import queries
from reconcile.utils.aws_api import AWSApi
from reconcile.utils.secret_reader import SecretReader
QONTRACT_INTEGRATION = "ecr-mirror"
LOG = logging.getLogger(__name__)
class EcrMirror:
def __init__(self, instance, dry_run):
self.dry_run = dry_run
self.instance = instance
self.settings = queries.get_app_interface_settings()
self.secret_reader = SecretReader(settings=self.settings)
self.skopeo_cli = Skopeo(dry_run)
self.error = False
identifier = instance["identifier"]
account = instance["account"]
region = instance.get("region")
self.aws_cli = AWSApi(
thread_pool_size=1,
accounts=[self._get_aws_account_info(account)],
settings=self.settings,
init_ecr_auth_tokens=True,
)
self.aws_cli.map_ecr_resources()
self.ecr_uri = self._get_image_uri(
account=account,
repository=identifier,
)
if self.ecr_uri is None:
self.error = True
LOG.error(f"Could not find the ECR repository {identifier}")
self.ecr_username, self.ecr_password = self._get_ecr_creds(
account=account,
region=region,
)
self.ecr_auth = f"{self.ecr_username}:{self.ecr_password}"
self.image_username = None
self.image_password = None
self.image_auth = None
pull_secret = self.instance["mirror"]["pullCredentials"]
if pull_secret is not None:
raw_data = self.secret_reader.read_all(pull_secret)
self.image_username = raw_data["user"]
self.image_password = raw_data["token"]
self.image_auth = f"{self.image_username}:{self.image_password}"
def run(self):
if self.error:
return
ecr_mirror = Image(
self.ecr_uri, username=self.ecr_username, password=self.ecr_password
)
image = Image(
self.instance["mirror"]["url"],
username=self.image_username,
password=self.image_password,
)
LOG.debug("[checking %s -> %s]", image, ecr_mirror)
for tag in image:
if tag not in ecr_mirror:
try:
self.skopeo_cli.copy(
src_image=image[tag],
src_creds=self.image_auth,
dst_image=ecr_mirror[tag],
dest_creds=self.ecr_auth,
)
except SkopeoCmdError as details:
LOG.error("[%s]", details)
def _get_ecr_creds(self, account, region):
if region is None:
region = self.aws_cli.accounts[account]["resourcesDefaultRegion"]
auth_token = f"{account}/{region}"
data = self.aws_cli.auth_tokens[auth_token]
auth_data = data["authorizationData"][0]
token = auth_data["authorizationToken"]
password = base64.b64decode(token).decode("utf-8").split(":")[1]
return "AWS", password
def _get_image_uri(self, account, repository):
for repo in self.aws_cli.resources[account]["ecr"]:
if repo["repositoryName"] == repository:
return repo["repositoryUri"]
@staticmethod
def _get_aws_account_info(account):
for account_info in queries.get_aws_accounts():
if "name" not in account_info:
continue
if account_info["name"] != account:
continue
return account_info
def worker(ecr_mirror_instance):
return ecr_mirror_instance.run()
def run(dry_run, thread_pool_size=10):
namespaces = queries.get_namespaces()
tfrs_to_mirror = []
for namespace in namespaces:
if namespace["terraformResources"] is None:
continue
for tfr in namespace["terraformResources"]:
if tfr["provider"] != "ecr":
continue
if tfr["mirror"] is None:
continue
tfrs_to_mirror.append(tfr)
work_list = threaded.run(
EcrMirror, tfrs_to_mirror, thread_pool_size=thread_pool_size, dry_run=dry_run
)
threaded.run(worker, work_list, thread_pool_size=thread_pool_size)
| true | true |
1c4814da9270cbf4960b91df34b13e65e8cb7550 | 2,195 | py | Python | nfv/nfv-vim/nfv_vim/instance_fsm/_instance_state_guest_services_delete.py | SidneyAn/nfv | 5f0262a5b6ea4be59f977b9c587c483cbe0e373d | [
"Apache-2.0"
] | 2 | 2020-02-07T19:01:36.000Z | 2022-02-23T01:41:46.000Z | nfv/nfv-vim/nfv_vim/instance_fsm/_instance_state_guest_services_delete.py | SidneyAn/nfv | 5f0262a5b6ea4be59f977b9c587c483cbe0e373d | [
"Apache-2.0"
] | 1 | 2021-01-14T12:02:25.000Z | 2021-01-14T12:02:25.000Z | nfv/nfv-vim/nfv_vim/instance_fsm/_instance_state_guest_services_delete.py | SidneyAn/nfv | 5f0262a5b6ea4be59f977b9c587c483cbe0e373d | [
"Apache-2.0"
] | 2 | 2021-01-13T08:39:21.000Z | 2022-02-09T00:21:55.000Z | #
# Copyright (c) 2015-2016 Wind River Systems, Inc.
#
# SPDX-License-Identifier: Apache-2.0
#
from nfv_common import debug
from nfv_common import state_machine
from nfv_vim.instance_fsm._instance_defs import INSTANCE_EVENT
from nfv_vim.instance_fsm._instance_defs import INSTANCE_STATE
from nfv_vim.instance_fsm._instance_tasks import GuestServicesDeleteTask
DLOG = debug.debug_get_logger('nfv_vim.state_machine.instance')
class GuestServicesDeleteState(state_machine.State):
"""
Instance - GuestServicesDelete State
"""
def __init__(self, name):
super(GuestServicesDeleteState, self).__init__(name)
def enter(self, instance):
"""
Entering GuestServicesDelete state
"""
DLOG.info("Entering state (%s) for %s." % (self.name, instance.name))
instance.task = GuestServicesDeleteTask(instance)
instance.task.start()
def exit(self, instance):
"""
Exiting GuestServicesDelete state
"""
DLOG.info("Exiting state (%s) for %s." % (self.name, instance.name))
if isinstance(instance.task, GuestServicesDeleteTask):
instance.task.abort()
def transition(self, instance, event, event_data, to_state):
"""
Transition from the GuestServicesDelete state
"""
pass
def handle_event(self, instance, event, event_data=None):
"""
Handle event while in the GuestServicesDelete state
"""
if INSTANCE_EVENT.TASK_STOP == event:
return INSTANCE_STATE.INITIAL
elif INSTANCE_EVENT.TASK_COMPLETED == event:
DLOG.debug("GuestServicesDelete completed for %s." % instance.name)
return INSTANCE_STATE.INITIAL
elif INSTANCE_EVENT.TASK_FAILED == event:
DLOG.info("GuestServicesDelete failed for %s." % instance.name)
return INSTANCE_STATE.INITIAL
elif INSTANCE_EVENT.TASK_TIMEOUT == event:
DLOG.info("GuestServicesDelete timed out for %s." % instance.name)
return INSTANCE_STATE.INITIAL
else:
DLOG.verbose("Ignoring %s event for %s." % (event, instance.name))
return self.name
| 32.279412 | 79 | 0.666515 |
from nfv_common import debug
from nfv_common import state_machine
from nfv_vim.instance_fsm._instance_defs import INSTANCE_EVENT
from nfv_vim.instance_fsm._instance_defs import INSTANCE_STATE
from nfv_vim.instance_fsm._instance_tasks import GuestServicesDeleteTask
DLOG = debug.debug_get_logger('nfv_vim.state_machine.instance')
class GuestServicesDeleteState(state_machine.State):
def __init__(self, name):
super(GuestServicesDeleteState, self).__init__(name)
def enter(self, instance):
DLOG.info("Entering state (%s) for %s." % (self.name, instance.name))
instance.task = GuestServicesDeleteTask(instance)
instance.task.start()
def exit(self, instance):
DLOG.info("Exiting state (%s) for %s." % (self.name, instance.name))
if isinstance(instance.task, GuestServicesDeleteTask):
instance.task.abort()
def transition(self, instance, event, event_data, to_state):
pass
def handle_event(self, instance, event, event_data=None):
if INSTANCE_EVENT.TASK_STOP == event:
return INSTANCE_STATE.INITIAL
elif INSTANCE_EVENT.TASK_COMPLETED == event:
DLOG.debug("GuestServicesDelete completed for %s." % instance.name)
return INSTANCE_STATE.INITIAL
elif INSTANCE_EVENT.TASK_FAILED == event:
DLOG.info("GuestServicesDelete failed for %s." % instance.name)
return INSTANCE_STATE.INITIAL
elif INSTANCE_EVENT.TASK_TIMEOUT == event:
DLOG.info("GuestServicesDelete timed out for %s." % instance.name)
return INSTANCE_STATE.INITIAL
else:
DLOG.verbose("Ignoring %s event for %s." % (event, instance.name))
return self.name
| true | true |
1c481554ae98a5e2b009109d0fcec1bafdec2aec | 4,338 | py | Python | pc/userInput.py | martinloland/rov | 542ca17daeb17109ac686f979ed3bb1dfb64b846 | [
"MIT"
] | null | null | null | pc/userInput.py | martinloland/rov | 542ca17daeb17109ac686f979ed3bb1dfb64b846 | [
"MIT"
] | null | null | null | pc/userInput.py | martinloland/rov | 542ca17daeb17109ac686f979ed3bb1dfb64b846 | [
"MIT"
] | null | null | null | '''
userInput.py
- Read input from keyboard and USB-controller
- Initiate functions to make the change
'''
def getUserInput():
buttons = []
if joystickConnected:
buttons = getJoystick(buttons)
# Toggle keys
for event in GAME_EVENTS.get():
if event.type == GAME_GLOBALS.QUIT:
buttons.append('quit')
if event.type == pygame.KEYDOWN:
if event.key == pygame.K_RETURN:
buttons.append('return')
# close
if event.key == pygame.K_ESCAPE:
buttons.append('quit')
# LED
if event.key == pygame.K_l:
buttons.append('led')
# info
if event.key == pygame.K_i:
buttons.append('info')
# info
if event.key == pygame.K_p:
buttons.append('motorin')
# overlay
if event.key == pygame.K_o:
buttons.append('overlay')
# reset gimbal
if event.key == pygame.K_q:
buttons.append('resetGimbal')
# snapshot
if event.key == pygame.K_SPACE:
buttons.append('snapshot')
# snapshot
if event.key == pygame.K_j:
buttons.append('joystick')
# Gimbal
keys = pygame.key.get_pressed()
if keys[pygame.K_a]:
buttons.append('gLeft')
if keys[pygame.K_d]:
buttons.append('gRight')
if keys[pygame.K_s]:
buttons.append('gUp')
if keys[pygame.K_w]:
buttons.append('gDown')
if keys[pygame.K_q]:
buttons.append('gReset')
# Motors
if keys[pygame.K_UP]:
buttons.append('mForward')
if keys[pygame.K_DOWN]:
buttons.append('mBack')
if keys[pygame.K_LEFT]:
buttons.append('mLeft')
if keys[pygame.K_RIGHT]:
buttons.append('mRight')
if keys[pygame.K_z]:
buttons.append('mDecrease')
if keys[pygame.K_c]:
buttons.append('mIncrease')
if keys[pygame.K_r]:
buttons.append('mUp')
if keys[pygame.K_f]:
buttons.append('mDown')
actOnInput(buttons)
def getJoystick(buttons):
global lastout
'''
0:L left/right
1:up/down
2: R left/right
3: up/down
4: 1 increase
5: 2 motors
6: 3 decrease
7: 4 overlay
8: L1 snapshot
9: R1 up
11: R2 down
12: select info
13: start exit
14: L3resetGimbal
15: R3 led
'''
out = [0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]
it = 0 #iterator
# Read axis
for i in range(0, j.get_numaxes()):
out[it] = j.get_axis(i)
it+=1
#Read input from buttons
for i in range(0, j.get_numbuttons()):
out[it] = j.get_button(i)
it+=1
# HOLD KEYS
# gimbal
if out[0]>0.8:
buttons.append('gRight')
if out[0]<-0.8:
buttons.append('gLeft')
if out[1]>0.8:
buttons.append('gUp')
if out[1]<-0.8:
buttons.append('gDown')
#motor
if out[2]>0.8:
buttons.append('mRight')
if out[2]<-0.8:
buttons.append('mLeft')
if out[3]>0.8:
buttons.append('mBack')
if out[3]<-0.8:
buttons.append('mForward')
if out[4]:
buttons.append('mIncrease')
if out[6]:
buttons.append('mDecrease')
if out[9]:
buttons.append('mUp')
if out[11]:
buttons.append('mDown')
# TOGGLE KEYS
if out[12] and out[12] != lastout[12]:
buttons.append('info')
if out[13] and out[13] != lastout[13]:
buttons.append('quit')
if out[14] and out[14] != lastout[14]:
buttons.append('resetGimbal')
if out[15] and out[15] != lastout[15]:
buttons.append('led')
if out[5] and out[5] != lastout[5]:
buttons.append('motorin')
if out[7] and out[7] != lastout[7]:
buttons.append('overlay')
if out[8] and out[8] != lastout[8]:
buttons.append('snapshot')
lastout = out
return buttons
def actOnInput(buttons):
if any("return" in s for s in buttons):
toggle_fullscreen()
if any("quit" in s for s in buttons):
closeProgram()
if any("snapshot" in s for s in buttons):
snapshot()
if any("overlay" in s for s in buttons):
if ui.overlay:
ui.overlay = False
elif not ui.overlay:
ui.overlay = True
if any("motorin" in s for s in buttons):
if ui.motorInfo:
ui.motorInfo = False
elif not ui.motorInfo:
ui.motorInfo = True
if any("info" in s for s in buttons):
if ui.info:
ui.info = False
elif not ui.info:
ui.info = True
if any("led" in s for s in buttons):
if act.led:
act.led = 0
elif not act.led:
act.led = 1
if any("joystick" in s for s in buttons):
if ui.joystick:
ui.joystick = False
elif not ui.joystick:
ui.joystick = True
gimbal(buttons)
motor(buttons) | 22.59375 | 46 | 0.620793 |
def getUserInput():
buttons = []
if joystickConnected:
buttons = getJoystick(buttons)
for event in GAME_EVENTS.get():
if event.type == GAME_GLOBALS.QUIT:
buttons.append('quit')
if event.type == pygame.KEYDOWN:
if event.key == pygame.K_RETURN:
buttons.append('return')
if event.key == pygame.K_ESCAPE:
buttons.append('quit')
if event.key == pygame.K_l:
buttons.append('led')
if event.key == pygame.K_i:
buttons.append('info')
if event.key == pygame.K_p:
buttons.append('motorin')
if event.key == pygame.K_o:
buttons.append('overlay')
if event.key == pygame.K_q:
buttons.append('resetGimbal')
if event.key == pygame.K_SPACE:
buttons.append('snapshot')
if event.key == pygame.K_j:
buttons.append('joystick')
keys = pygame.key.get_pressed()
if keys[pygame.K_a]:
buttons.append('gLeft')
if keys[pygame.K_d]:
buttons.append('gRight')
if keys[pygame.K_s]:
buttons.append('gUp')
if keys[pygame.K_w]:
buttons.append('gDown')
if keys[pygame.K_q]:
buttons.append('gReset')
if keys[pygame.K_UP]:
buttons.append('mForward')
if keys[pygame.K_DOWN]:
buttons.append('mBack')
if keys[pygame.K_LEFT]:
buttons.append('mLeft')
if keys[pygame.K_RIGHT]:
buttons.append('mRight')
if keys[pygame.K_z]:
buttons.append('mDecrease')
if keys[pygame.K_c]:
buttons.append('mIncrease')
if keys[pygame.K_r]:
buttons.append('mUp')
if keys[pygame.K_f]:
buttons.append('mDown')
actOnInput(buttons)
def getJoystick(buttons):
global lastout
out = [0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]
it = 0
for i in range(0, j.get_numaxes()):
out[it] = j.get_axis(i)
it+=1
for i in range(0, j.get_numbuttons()):
out[it] = j.get_button(i)
it+=1
if out[0]>0.8:
buttons.append('gRight')
if out[0]<-0.8:
buttons.append('gLeft')
if out[1]>0.8:
buttons.append('gUp')
if out[1]<-0.8:
buttons.append('gDown')
if out[2]>0.8:
buttons.append('mRight')
if out[2]<-0.8:
buttons.append('mLeft')
if out[3]>0.8:
buttons.append('mBack')
if out[3]<-0.8:
buttons.append('mForward')
if out[4]:
buttons.append('mIncrease')
if out[6]:
buttons.append('mDecrease')
if out[9]:
buttons.append('mUp')
if out[11]:
buttons.append('mDown')
if out[12] and out[12] != lastout[12]:
buttons.append('info')
if out[13] and out[13] != lastout[13]:
buttons.append('quit')
if out[14] and out[14] != lastout[14]:
buttons.append('resetGimbal')
if out[15] and out[15] != lastout[15]:
buttons.append('led')
if out[5] and out[5] != lastout[5]:
buttons.append('motorin')
if out[7] and out[7] != lastout[7]:
buttons.append('overlay')
if out[8] and out[8] != lastout[8]:
buttons.append('snapshot')
lastout = out
return buttons
def actOnInput(buttons):
if any("return" in s for s in buttons):
toggle_fullscreen()
if any("quit" in s for s in buttons):
closeProgram()
if any("snapshot" in s for s in buttons):
snapshot()
if any("overlay" in s for s in buttons):
if ui.overlay:
ui.overlay = False
elif not ui.overlay:
ui.overlay = True
if any("motorin" in s for s in buttons):
if ui.motorInfo:
ui.motorInfo = False
elif not ui.motorInfo:
ui.motorInfo = True
if any("info" in s for s in buttons):
if ui.info:
ui.info = False
elif not ui.info:
ui.info = True
if any("led" in s for s in buttons):
if act.led:
act.led = 0
elif not act.led:
act.led = 1
if any("joystick" in s for s in buttons):
if ui.joystick:
ui.joystick = False
elif not ui.joystick:
ui.joystick = True
gimbal(buttons)
motor(buttons) | true | true |
1c4815d09dda9708289263b84fc70dfe99fb9044 | 4,006 | py | Python | src/py/flwr/server/grpc_server/flower_service_servicer.py | Chris-george-anil/flower | 98fb2fcde273c1226cc1f2e1638c1e4d8f35815c | [
"Apache-2.0"
] | 895 | 2020-03-22T20:34:16.000Z | 2022-03-31T15:20:42.000Z | src/py/flwr/server/grpc_server/flower_service_servicer.py | Chris-george-anil/flower | 98fb2fcde273c1226cc1f2e1638c1e4d8f35815c | [
"Apache-2.0"
] | 322 | 2020-02-19T10:16:33.000Z | 2022-03-31T09:49:08.000Z | src/py/flwr/server/grpc_server/flower_service_servicer.py | Chris-george-anil/flower | 98fb2fcde273c1226cc1f2e1638c1e4d8f35815c | [
"Apache-2.0"
] | 234 | 2020-03-31T10:52:16.000Z | 2022-03-31T14:04:42.000Z | # Copyright 2020 Adap GmbH. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Servicer for FlowerService.
Relevant knowledge for reading this modules code:
- https://github.com/grpc/grpc/blob/master/doc/statuscodes.md
"""
from typing import Callable, Iterator
import grpc
from flwr.proto import transport_pb2_grpc
from flwr.proto.transport_pb2 import ClientMessage, ServerMessage
from flwr.server.client_manager import ClientManager
from flwr.server.grpc_server.grpc_bridge import GRPCBridge
from flwr.server.grpc_server.grpc_client_proxy import GrpcClientProxy
def default_bridge_factory() -> GRPCBridge:
"""Return GRPCBridge instance."""
return GRPCBridge()
def default_grpc_client_factory(cid: str, bridge: GRPCBridge) -> GrpcClientProxy:
"""Return GrpcClientProxy instance."""
return GrpcClientProxy(cid=cid, bridge=bridge)
def register_client(
client_manager: ClientManager,
client: GrpcClientProxy,
context: grpc.ServicerContext,
) -> bool:
"""Try registering GrpcClientProxy with ClientManager."""
is_success = client_manager.register(client)
if is_success:
def rpc_termination_callback() -> None:
client.bridge.close()
client_manager.unregister(client)
context.add_callback(rpc_termination_callback)
return is_success
class FlowerServiceServicer(transport_pb2_grpc.FlowerServiceServicer):
"""FlowerServiceServicer for bi-directional gRPC message stream."""
def __init__(
self,
client_manager: ClientManager,
grpc_bridge_factory: Callable[[], GRPCBridge] = default_bridge_factory,
grpc_client_factory: Callable[
[str, GRPCBridge], GrpcClientProxy
] = default_grpc_client_factory,
) -> None:
self.client_manager: ClientManager = client_manager
self.grpc_bridge_factory = grpc_bridge_factory
self.client_factory = grpc_client_factory
def Join( # pylint: disable=invalid-name
self,
request_iterator: Iterator[ClientMessage],
context: grpc.ServicerContext,
) -> Iterator[ServerMessage]:
"""Method will be invoked by each GrpcClientProxy which participates in
the network.
Protocol:
- The first message is sent from the server to the client
- Both ServerMessage and ClientMessage are message "wrappers"
wrapping the actual message
- The Join method is (pretty much) protocol unaware
"""
peer = context.peer()
bridge = self.grpc_bridge_factory()
client = self.client_factory(peer, bridge)
is_success = register_client(self.client_manager, client, context)
if is_success:
# Get iterators
client_message_iterator = request_iterator
server_message_iterator = bridge.server_message_iterator()
# All messages will be pushed to client bridge directly
while True:
try:
# Get server message from bridge and yield it
server_message = next(server_message_iterator)
yield server_message
# Wait for client message
client_message = next(client_message_iterator)
bridge.set_client_message(client_message)
except StopIteration:
break
| 36.418182 | 81 | 0.678233 |
from typing import Callable, Iterator
import grpc
from flwr.proto import transport_pb2_grpc
from flwr.proto.transport_pb2 import ClientMessage, ServerMessage
from flwr.server.client_manager import ClientManager
from flwr.server.grpc_server.grpc_bridge import GRPCBridge
from flwr.server.grpc_server.grpc_client_proxy import GrpcClientProxy
def default_bridge_factory() -> GRPCBridge:
return GRPCBridge()
def default_grpc_client_factory(cid: str, bridge: GRPCBridge) -> GrpcClientProxy:
return GrpcClientProxy(cid=cid, bridge=bridge)
def register_client(
client_manager: ClientManager,
client: GrpcClientProxy,
context: grpc.ServicerContext,
) -> bool:
is_success = client_manager.register(client)
if is_success:
def rpc_termination_callback() -> None:
client.bridge.close()
client_manager.unregister(client)
context.add_callback(rpc_termination_callback)
return is_success
class FlowerServiceServicer(transport_pb2_grpc.FlowerServiceServicer):
def __init__(
self,
client_manager: ClientManager,
grpc_bridge_factory: Callable[[], GRPCBridge] = default_bridge_factory,
grpc_client_factory: Callable[
[str, GRPCBridge], GrpcClientProxy
] = default_grpc_client_factory,
) -> None:
self.client_manager: ClientManager = client_manager
self.grpc_bridge_factory = grpc_bridge_factory
self.client_factory = grpc_client_factory
def Join(
self,
request_iterator: Iterator[ClientMessage],
context: grpc.ServicerContext,
) -> Iterator[ServerMessage]:
peer = context.peer()
bridge = self.grpc_bridge_factory()
client = self.client_factory(peer, bridge)
is_success = register_client(self.client_manager, client, context)
if is_success:
client_message_iterator = request_iterator
server_message_iterator = bridge.server_message_iterator()
while True:
try:
server_message = next(server_message_iterator)
yield server_message
client_message = next(client_message_iterator)
bridge.set_client_message(client_message)
except StopIteration:
break
| true | true |
1c48168926e014644a7673b353146114eacaca51 | 808 | py | Python | multiprocessing/test_pool_async.py | Carglglz/micropython-lib | 07102c56aa1087b97ee313cedc1d89fd20452e11 | [
"PSF-2.0"
] | 126 | 2019-07-19T14:42:41.000Z | 2022-03-21T22:22:19.000Z | multiprocessing/test_pool_async.py | Carglglz/micropython-lib | 07102c56aa1087b97ee313cedc1d89fd20452e11 | [
"PSF-2.0"
] | 38 | 2019-08-28T01:46:31.000Z | 2022-03-17T05:46:51.000Z | multiprocessing/test_pool_async.py | Carglglz/micropython-lib | 07102c56aa1087b97ee313cedc1d89fd20452e11 | [
"PSF-2.0"
] | 55 | 2019-08-02T09:32:33.000Z | 2021-12-22T11:25:51.000Z | import time
from multiprocessing import Pool
def f(x):
return x*x
pool = Pool(4)
future = pool.apply_async(f, (10,))
assert future.get() == 100
def f2(x):
time.sleep(0.5)
return x + 1
future = pool.apply_async(f2, (10,))
iter = 0
while not future.ready():
#print("not ready")
time.sleep(0.1)
iter += 1
assert future.get() == 11
assert iter >= 5 and iter <= 8
t = time.time()
futs = [
pool.apply_async(f2, (10,)),
pool.apply_async(f2, (11,)),
pool.apply_async(f2, (12,)),
]
iter = 0
while True:
#not all(futs):
c = 0
for f in futs:
if not f.ready():
c += 1
if not c:
break
#print("not ready2")
time.sleep(0.1)
iter += 1
assert iter >= 5 and iter <= 8
print("Run 3 parallel sleep(1)'s in: ", time.time() - t)
| 16.833333 | 56 | 0.564356 | import time
from multiprocessing import Pool
def f(x):
return x*x
pool = Pool(4)
future = pool.apply_async(f, (10,))
assert future.get() == 100
def f2(x):
time.sleep(0.5)
return x + 1
future = pool.apply_async(f2, (10,))
iter = 0
while not future.ready():
time.sleep(0.1)
iter += 1
assert future.get() == 11
assert iter >= 5 and iter <= 8
t = time.time()
futs = [
pool.apply_async(f2, (10,)),
pool.apply_async(f2, (11,)),
pool.apply_async(f2, (12,)),
]
iter = 0
while True:
c = 0
for f in futs:
if not f.ready():
c += 1
if not c:
break
time.sleep(0.1)
iter += 1
assert iter >= 5 and iter <= 8
print("Run 3 parallel sleep(1)'s in: ", time.time() - t)
| true | true |
1c481af4ae671d17e7ab5377a49f168e82fa6385 | 5,618 | py | Python | charts.py | suryatmodulus/excalidraw-analytics | 6cc9ec3800d1ef51e312740a981b656940fb0660 | [
"MIT"
] | 9 | 2021-02-07T13:15:06.000Z | 2021-11-07T22:09:59.000Z | charts.py | suryatmodulus/excalidraw-analytics | 6cc9ec3800d1ef51e312740a981b656940fb0660 | [
"MIT"
] | 16 | 2021-02-08T16:10:44.000Z | 2022-03-27T01:16:21.000Z | charts.py | suryatmodulus/excalidraw-analytics | 6cc9ec3800d1ef51e312740a981b656940fb0660 | [
"MIT"
] | 3 | 2021-02-08T15:09:33.000Z | 2021-08-06T17:34:49.000Z | from datetime import datetime
from datetime import timedelta
from opencolor import oc
import json
import os
ROOT_DIR = os.path.dirname(os.path.realpath(__file__))
VERSION_DIR = os.path.join(ROOT_DIR, "version")
TEMPLATE_FILE = os.path.join(ROOT_DIR, "template.html")
INDEX_FILE = os.path.join(ROOT_DIR, "index.html")
MAX_DAYS = 7
def chart_colors(index):
return [
oc["grape"][index],
oc["red"][index],
oc["orange"][index],
oc["yellow"][index],
oc["lime"][index],
oc["green"][index],
oc["teal"][index],
oc["cyan"][index],
oc["blue"][index],
oc["indigo"][index],
oc["violet"][index],
]
chart_colors_bg = chart_colors(1)
chart_colors_text = chart_colors(9)
empty_color = oc["gray"][1]
usage_colors = [
oc["lime"][0],
oc["lime"][1],
oc["lime"][2],
oc["lime"][3],
oc["lime"][5],
oc["lime"][6],
]
def parse_day(filename):
filename = filename.replace(".json", "")
file_date = datetime.strptime(filename, "%Y-%m-%d")
today = datetime.today()
return file_date > today + timedelta(days=-MAX_DAYS)
def string2date(string):
return datetime.strptime(string, "%Y-%m-%d").strftime("%d %b")
def string2day(string):
return datetime.strptime(string, "%Y-%m-%d").strftime("%a")
def string2weekday(string):
return int(datetime.strptime(string, "%Y-%m-%d").strftime("%w"))
def render_cell(value, max):
color_id = round((value / max) * (len(usage_colors) - 1))
if value:
return "<td style='background-color: %s'>%2.1f%%</td>" % (
usage_colors[color_id],
value * 100,
)
return "<td style='background-color: %s'>-</td>" % (empty_color)
def main():
with open(TEMPLATE_FILE, "r") as template:
data = template.read()
_, _, filenames = next(os.walk(VERSION_DIR))
days = {}
versions = set()
for filename in filenames:
if not parse_day(filename):
continue
with open(os.path.join(VERSION_DIR, filename), "r") as day_json:
day = json.load(day_json)
days[filename.replace(".json", "")] = day
for key in day.keys():
versions.add(key)
sorted_days = sorted(days.items())
sorted_versions = sorted(versions)
# find max_value
max_value = 0
for day in sorted_days:
for version in day[1]:
max_value = max(max_value, day[1][version])
chart_rows = [["Day"]]
for version in sorted_versions:
chart_rows[len(chart_rows) - 1].append(
version[-7:] if len(version) == 28 else "Older"
)
for day in sorted_days:
chart_rows.append([string2date(day[0])])
for version in sorted_versions:
if version in day[1]:
chart_rows[len(chart_rows) - 1].append((day[1][version]))
else:
chart_rows[len(chart_rows) - 1].append(0)
report = {}
for version in sorted(sorted_versions, reverse=True):
report[version] = {}
for day in sorted_days:
report[version][day[0]] = 0
if version in day[1]:
report[version][day[0]] = day[1][version]
version_head = "<tr><th>Version</th><th>Commit</th><th style='background-color: {}'></th>".format(
empty_color
)
for day in sorted_days:
version_head += "<th style='background-color: %s'>%s<br>%s</th>" % (
oc["red"][0] if string2weekday(day[0]) in [6, 0] else oc["white"],
string2day(day[0]),
string2date(day[0]),
)
version_head += "</tr>"
version_body = ""
current_version_date = ""
for index, row in enumerate(report):
version_date = row[:10]
version_datetime = row[:16].replace("T", " ")
version_hash = row[-7:]
color_bg = chart_colors_bg[
(index - len(sorted_versions)) % len(chart_colors_bg)
]
color_text = chart_colors_text[
(index - len(sorted_versions)) % len(chart_colors_text)
]
if version_date != current_version_date:
version_body += (
"<tr><td style='background-color: {}' colspan='{}'></td></tr>".format(
empty_color, 3 + len(report[row])
)
)
version_body += "<tr><td style='background-color: {}; color: {};'><code>{}</code></td>".format(
color_bg,
color_text,
version_datetime,
)
# older version
if len(row) == 20:
version_body += "<td style='background-color: {};'></td><td style='background-color: {}'></td>".format(
color_bg,
empty_color,
)
else:
version_body += "<td style='background-color: {};'><code><a style='color: {};' href='https://github.com/excalidraw/excalidraw/commit/{}'>{}</a></code></td><td style='background-color: {}'></td>".format(
color_bg,
color_text,
version_hash,
version_hash,
empty_color,
)
for day in report[row]:
version_body += render_cell(report[row][day], max_value)
version_body += "</tr>\n"
current_version_date = version_date
data = data.replace("{ data }", "%r" % chart_rows)
data = data.replace("{ version_head }", version_head)
data = data.replace("{ version_body }", version_body)
with open(INDEX_FILE, "w") as index:
index.write(data)
print("Charts updated")
if __name__ == "__main__":
main()
| 29.413613 | 214 | 0.559808 | from datetime import datetime
from datetime import timedelta
from opencolor import oc
import json
import os
ROOT_DIR = os.path.dirname(os.path.realpath(__file__))
VERSION_DIR = os.path.join(ROOT_DIR, "version")
TEMPLATE_FILE = os.path.join(ROOT_DIR, "template.html")
INDEX_FILE = os.path.join(ROOT_DIR, "index.html")
MAX_DAYS = 7
def chart_colors(index):
return [
oc["grape"][index],
oc["red"][index],
oc["orange"][index],
oc["yellow"][index],
oc["lime"][index],
oc["green"][index],
oc["teal"][index],
oc["cyan"][index],
oc["blue"][index],
oc["indigo"][index],
oc["violet"][index],
]
chart_colors_bg = chart_colors(1)
chart_colors_text = chart_colors(9)
empty_color = oc["gray"][1]
usage_colors = [
oc["lime"][0],
oc["lime"][1],
oc["lime"][2],
oc["lime"][3],
oc["lime"][5],
oc["lime"][6],
]
def parse_day(filename):
filename = filename.replace(".json", "")
file_date = datetime.strptime(filename, "%Y-%m-%d")
today = datetime.today()
return file_date > today + timedelta(days=-MAX_DAYS)
def string2date(string):
return datetime.strptime(string, "%Y-%m-%d").strftime("%d %b")
def string2day(string):
return datetime.strptime(string, "%Y-%m-%d").strftime("%a")
def string2weekday(string):
return int(datetime.strptime(string, "%Y-%m-%d").strftime("%w"))
def render_cell(value, max):
color_id = round((value / max) * (len(usage_colors) - 1))
if value:
return "<td style='background-color: %s'>%2.1f%%</td>" % (
usage_colors[color_id],
value * 100,
)
return "<td style='background-color: %s'>-</td>" % (empty_color)
def main():
with open(TEMPLATE_FILE, "r") as template:
data = template.read()
_, _, filenames = next(os.walk(VERSION_DIR))
days = {}
versions = set()
for filename in filenames:
if not parse_day(filename):
continue
with open(os.path.join(VERSION_DIR, filename), "r") as day_json:
day = json.load(day_json)
days[filename.replace(".json", "")] = day
for key in day.keys():
versions.add(key)
sorted_days = sorted(days.items())
sorted_versions = sorted(versions)
max_value = 0
for day in sorted_days:
for version in day[1]:
max_value = max(max_value, day[1][version])
chart_rows = [["Day"]]
for version in sorted_versions:
chart_rows[len(chart_rows) - 1].append(
version[-7:] if len(version) == 28 else "Older"
)
for day in sorted_days:
chart_rows.append([string2date(day[0])])
for version in sorted_versions:
if version in day[1]:
chart_rows[len(chart_rows) - 1].append((day[1][version]))
else:
chart_rows[len(chart_rows) - 1].append(0)
report = {}
for version in sorted(sorted_versions, reverse=True):
report[version] = {}
for day in sorted_days:
report[version][day[0]] = 0
if version in day[1]:
report[version][day[0]] = day[1][version]
version_head = "<tr><th>Version</th><th>Commit</th><th style='background-color: {}'></th>".format(
empty_color
)
for day in sorted_days:
version_head += "<th style='background-color: %s'>%s<br>%s</th>" % (
oc["red"][0] if string2weekday(day[0]) in [6, 0] else oc["white"],
string2day(day[0]),
string2date(day[0]),
)
version_head += "</tr>"
version_body = ""
current_version_date = ""
for index, row in enumerate(report):
version_date = row[:10]
version_datetime = row[:16].replace("T", " ")
version_hash = row[-7:]
color_bg = chart_colors_bg[
(index - len(sorted_versions)) % len(chart_colors_bg)
]
color_text = chart_colors_text[
(index - len(sorted_versions)) % len(chart_colors_text)
]
if version_date != current_version_date:
version_body += (
"<tr><td style='background-color: {}' colspan='{}'></td></tr>".format(
empty_color, 3 + len(report[row])
)
)
version_body += "<tr><td style='background-color: {}; color: {};'><code>{}</code></td>".format(
color_bg,
color_text,
version_datetime,
)
if len(row) == 20:
version_body += "<td style='background-color: {};'></td><td style='background-color: {}'></td>".format(
color_bg,
empty_color,
)
else:
version_body += "<td style='background-color: {};'><code><a style='color: {};' href='https://github.com/excalidraw/excalidraw/commit/{}'>{}</a></code></td><td style='background-color: {}'></td>".format(
color_bg,
color_text,
version_hash,
version_hash,
empty_color,
)
for day in report[row]:
version_body += render_cell(report[row][day], max_value)
version_body += "</tr>\n"
current_version_date = version_date
data = data.replace("{ data }", "%r" % chart_rows)
data = data.replace("{ version_head }", version_head)
data = data.replace("{ version_body }", version_body)
with open(INDEX_FILE, "w") as index:
index.write(data)
print("Charts updated")
if __name__ == "__main__":
main()
| true | true |
1c481be9d20dfe69d10a24305d2be9deb742efd6 | 152 | py | Python | meiduo_mall/apps/contents/urls.py | yeluoguigen/meiduo_project | f7d416cf9ac433c27e58783f38687a1fbe3df6fe | [
"MIT"
] | null | null | null | meiduo_mall/apps/contents/urls.py | yeluoguigen/meiduo_project | f7d416cf9ac433c27e58783f38687a1fbe3df6fe | [
"MIT"
] | null | null | null | meiduo_mall/apps/contents/urls.py | yeluoguigen/meiduo_project | f7d416cf9ac433c27e58783f38687a1fbe3df6fe | [
"MIT"
] | null | null | null | from django.conf.urls import url
from apps.contents import views
urlpatterns = [
# 注册
url(r'^$', views.IndexView.as_view(), name="index"),
] | 15.2 | 56 | 0.664474 | from django.conf.urls import url
from apps.contents import views
urlpatterns = [
url(r'^$', views.IndexView.as_view(), name="index"),
] | true | true |
1c481c9bdad123fecf4b357b43ceb462bf661ab7 | 2,900 | py | Python | external/evolver_trf2gff.py | dentearl/evolverSimControl | b3236debbc8d945a99aecb0988bd1f48f25913c3 | [
"MIT"
] | 4 | 2018-12-01T13:49:12.000Z | 2021-02-18T17:55:46.000Z | external/evolver_trf2gff.py | dentearl/evolverSimControl | b3236debbc8d945a99aecb0988bd1f48f25913c3 | [
"MIT"
] | null | null | null | external/evolver_trf2gff.py | dentearl/evolverSimControl | b3236debbc8d945a99aecb0988bd1f48f25913c3 | [
"MIT"
] | 1 | 2021-04-10T15:05:11.000Z | 2021-04-10T15:05:11.000Z | #!/usr/bin/env python
# Copyright (C) 2008-2011 by
# George Asimenos, Robert C. Edgar, Serafim Batzoglou and Arend Sidow.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
#
##############################
import sys
MAX_MOTIF_ATTR = 32
FEATURE = "tandem"
FileName = sys.argv[1]
def Quit(s):
print >> sys.stderr, "*** ERROR ***", sys.argv[0],s
sys.exit(0)
File = open(FileName)
# Skip 6-line header
for i in range(0, 6):
Line = File.readline()
while 1:
Line = File.readline()
if len(Line) == 0:
break
Line = Line.strip()
if len(Line) == 0:
continue
elif Line.startswith("Sequence: "):
Label = Line[10:]
continue
elif Line.startswith("Parameters: "):
continue
# 1 59 7 8.0 7 83 9 64 37 38 1 22 1.64 CCCTAAA CCCTAAACCCTAAACCCTAAACCCTAAACCTCTGAATCCTTAATCCCTAAATCCCTAAA
# 0 1 2 3 4 5 6 7 8 9 19 11 12 13 14
# 0, 1 Indices of the repeat relative to the start of the sequence.
# 2 Period size of the repeat.
# 3 Number of copies aligned with the consensus pattern.
# 4 Size of consensus pattern (may differ slightly from the period size).
# 5 Percent of matches between adjacent copies overall.
# 6 Percent of indels between adjacent copies overall.
# 7 Alignment score.
# 8-11 Percent composition for each of the four nucleotides.
# 12 Entropy measure based on percent composition.
# 13 Consensus motif
# 14 Sequence
Fields = Line.split()
if len(Fields) != 15:
Quit("Expected 15 fields, got: " + Line)
Start = int(Fields[0])
End = int(Fields[1])
Copies = float(Fields[3])
Score = int(Fields[7])
Motif = Fields[13]
Length = len(Motif)
Attrs = "replen %u;" % Length
Attrs += " copies %.1f;" % Copies
if len(Motif) > MAX_MOTIF_ATTR:
Attrs += " cons \"%s...\";" % Motif[0:MAX_MOTIF_ATTR]
else:
Attrs += " cons \"%s\";" % Motif
s = "%s\ttrf\t%s\t%d\t%d\t%d\t+\t.\t%s" % (Label, FEATURE, Start, End, Score, Attrs)
print s
| 32.954545 | 106 | 0.70069 |
ameters: "):
continue
Fields = Line.split()
if len(Fields) != 15:
Quit("Expected 15 fields, got: " + Line)
Start = int(Fields[0])
End = int(Fields[1])
Copies = float(Fields[3])
Score = int(Fields[7])
Motif = Fields[13]
Length = len(Motif)
Attrs = "replen %u;" % Length
Attrs += " copies %.1f;" % Copies
if len(Motif) > MAX_MOTIF_ATTR:
Attrs += " cons \"%s...\";" % Motif[0:MAX_MOTIF_ATTR]
else:
Attrs += " cons \"%s\";" % Motif
s = "%s\ttrf\t%s\t%d\t%d\t%d\t+\t.\t%s" % (Label, FEATURE, Start, End, Score, Attrs)
print s
| false | true |
1c481cbfa9a98ca0259c24e6baae94b4e74f1854 | 211 | py | Python | erpnext_oralcare/erpnext_oralcare/doctype/labial_mucosa/test_labial_mucosa.py | techlift-tech/erpnext-oralcare | e10a787118c64dcedb7f9d0aedb1fe6c5a00e19e | [
"MIT"
] | null | null | null | erpnext_oralcare/erpnext_oralcare/doctype/labial_mucosa/test_labial_mucosa.py | techlift-tech/erpnext-oralcare | e10a787118c64dcedb7f9d0aedb1fe6c5a00e19e | [
"MIT"
] | 165 | 2019-04-25T12:08:17.000Z | 2019-08-09T13:26:03.000Z | erpnext_oralcare/erpnext_oralcare/doctype/labial_mucosa/test_labial_mucosa.py | techlift-tech/erpnext-oralcare | e10a787118c64dcedb7f9d0aedb1fe6c5a00e19e | [
"MIT"
] | 2 | 2019-09-10T16:49:11.000Z | 2021-12-03T22:54:21.000Z | # -*- coding: utf-8 -*-
# Copyright (c) 2019, Techlift and Contributors
# See license.txt
from __future__ import unicode_literals
import frappe
import unittest
class TestLabialMucosa(unittest.TestCase):
pass
| 19.181818 | 47 | 0.772512 |
from __future__ import unicode_literals
import frappe
import unittest
class TestLabialMucosa(unittest.TestCase):
pass
| true | true |
1c481ccf48bce4f8db10a6511af9ca1646796d16 | 2,134 | py | Python | infinisdk/infinibox/fc_soft_target.py | kobutton/infinisdk | 4ae5c66a2d7926636a52cb0a1452f9a2809b2ec2 | [
"BSD-3-Clause"
] | 5 | 2019-02-26T20:11:43.000Z | 2021-03-10T08:45:38.000Z | infinisdk/infinibox/fc_soft_target.py | kobutton/infinisdk | 4ae5c66a2d7926636a52cb0a1452f9a2809b2ec2 | [
"BSD-3-Clause"
] | 11 | 2017-11-15T19:20:23.000Z | 2021-09-14T18:17:47.000Z | infinisdk/infinibox/fc_soft_target.py | kobutton/infinisdk | 4ae5c66a2d7926636a52cb0a1452f9a2809b2ec2 | [
"BSD-3-Clause"
] | 2 | 2017-11-16T11:59:05.000Z | 2019-02-25T20:44:23.000Z | from urlobject import URLObject as URL
from ..core import Field
from ..core.bindings import RelatedObjectBinding, RelatedComponentBinding
from ..core.system_object import SystemObject
from ..core.system_object_utils import get_data_for_object_creation
from ..core.translators_and_types import WWNType
from ..core.type_binder import TypeBinder
class FcSoftTargetsBinder(TypeBinder):
def create_many(self, **fields):
'''
Creates multiple soft targets in single API command.
:param fields: All :class:`.FcSoftTarget` creation parameters & quantity
:returns: a list of :class:`.FcSoftTarget` objects
'''
url = self.get_url_path().add_path('create_multiple')
data = get_data_for_object_creation(self.object_type, self.system, fields)
res = self.system.api.post(url, data=data)
return [self.object_type.construct(self.system, obj_info)
for obj_info in res.get_result()]
def redistribute(self):
url = self.get_url_path().add_path('redistribute')
return self.system.api.post(url, data={})
def wipe(self):
url = self.get_url_path().add_path('wipe')
return self.system.api.post(url, data={})
class FcSoftTarget(SystemObject):
BINDER_CLASS = FcSoftTargetsBinder
URL_PATH = URL('fc/soft_targets')
FIELDS = [
Field("id", type=int, is_identity=True, cached=True),
Field("wwpn", cached=True, type=WWNType),
Field("port_number", type=int),
Field("switch", api_name="switch_id", type="infinisdk.infinibox.fc_switch:FcSwitch",
creation_parameter=True, binding=RelatedObjectBinding('fc_switches')),
Field("node", api_name="node_id",
binding=RelatedComponentBinding(api_index_name='node_id', value_for_none=None)),
Field("is_home", type=bool, add_getter=False),
]
@classmethod
def is_supported(cls, system):
return system.compat.has_npiv()
@classmethod
def get_type_name(cls):
return "fc_soft_target"
def is_home(self, **kwargs):
return self.get_field('is_home', **kwargs)
| 35.566667 | 94 | 0.685098 | from urlobject import URLObject as URL
from ..core import Field
from ..core.bindings import RelatedObjectBinding, RelatedComponentBinding
from ..core.system_object import SystemObject
from ..core.system_object_utils import get_data_for_object_creation
from ..core.translators_and_types import WWNType
from ..core.type_binder import TypeBinder
class FcSoftTargetsBinder(TypeBinder):
def create_many(self, **fields):
url = self.get_url_path().add_path('create_multiple')
data = get_data_for_object_creation(self.object_type, self.system, fields)
res = self.system.api.post(url, data=data)
return [self.object_type.construct(self.system, obj_info)
for obj_info in res.get_result()]
def redistribute(self):
url = self.get_url_path().add_path('redistribute')
return self.system.api.post(url, data={})
def wipe(self):
url = self.get_url_path().add_path('wipe')
return self.system.api.post(url, data={})
class FcSoftTarget(SystemObject):
BINDER_CLASS = FcSoftTargetsBinder
URL_PATH = URL('fc/soft_targets')
FIELDS = [
Field("id", type=int, is_identity=True, cached=True),
Field("wwpn", cached=True, type=WWNType),
Field("port_number", type=int),
Field("switch", api_name="switch_id", type="infinisdk.infinibox.fc_switch:FcSwitch",
creation_parameter=True, binding=RelatedObjectBinding('fc_switches')),
Field("node", api_name="node_id",
binding=RelatedComponentBinding(api_index_name='node_id', value_for_none=None)),
Field("is_home", type=bool, add_getter=False),
]
@classmethod
def is_supported(cls, system):
return system.compat.has_npiv()
@classmethod
def get_type_name(cls):
return "fc_soft_target"
def is_home(self, **kwargs):
return self.get_field('is_home', **kwargs)
| true | true |
1c481cf6fa6269a851564b750d3143531073ef45 | 13,845 | py | Python | mne/channels/tests/test_layout.py | fmamashli/mne-python | 52f064415e7c9fa8fe243d22108dcdf3d86505b9 | [
"BSD-3-Clause"
] | null | null | null | mne/channels/tests/test_layout.py | fmamashli/mne-python | 52f064415e7c9fa8fe243d22108dcdf3d86505b9 | [
"BSD-3-Clause"
] | 23 | 2017-09-12T11:08:26.000Z | 2019-10-04T11:11:29.000Z | mne/channels/tests/test_layout.py | fmamashli/mne-python | 52f064415e7c9fa8fe243d22108dcdf3d86505b9 | [
"BSD-3-Clause"
] | 3 | 2019-01-28T13:48:00.000Z | 2019-07-10T16:02:11.000Z | # Authors: Alexandre Gramfort <alexandre.gramfort@inria.fr>
# Denis Engemann <denis.engemann@gmail.com>
# Martin Luessi <mluessi@nmr.mgh.harvard.edu>
# Eric Larson <larson.eric.d@gmail.com>
#
# License: Simplified BSD
import copy
import os.path as op
import numpy as np
from numpy.testing import (assert_array_almost_equal, assert_array_equal,
assert_allclose, assert_equal)
import pytest
import matplotlib.pyplot as plt
from mne.channels import (make_eeg_layout, make_grid_layout, read_layout,
find_layout)
from mne.channels.layout import (_box_size, _auto_topomap_coords,
generate_2d_layout)
from mne.utils import run_tests_if_main
from mne import pick_types, pick_info
from mne.io import read_raw_kit, _empty_info, read_info
from mne.io.constants import FIFF
from mne.bem import fit_sphere_to_headshape
from mne.utils import _TempDir
io_dir = op.join(op.dirname(__file__), '..', '..', 'io')
fif_fname = op.join(io_dir, 'tests', 'data', 'test_raw.fif')
lout_path = op.join(io_dir, 'tests', 'data')
bti_dir = op.join(io_dir, 'bti', 'tests', 'data')
fname_ctf_raw = op.join(io_dir, 'tests', 'data', 'test_ctf_comp_raw.fif')
fname_kit_157 = op.join(io_dir, 'kit', 'tests', 'data', 'test.sqd')
fname_kit_umd = op.join(io_dir, 'kit', 'tests', 'data', 'test_umd-raw.sqd')
def _get_test_info():
"""Make test info."""
test_info = _empty_info(1000)
loc = np.array([0., 0., 0., 1., 0., 0., 0., 1., 0., 0., 0., 1.],
dtype=np.float32)
test_info['chs'] = [
{'cal': 1, 'ch_name': 'ICA 001', 'coil_type': 0, 'coord_Frame': 0,
'kind': 502, 'loc': loc.copy(), 'logno': 1, 'range': 1.0, 'scanno': 1,
'unit': -1, 'unit_mul': 0},
{'cal': 1, 'ch_name': 'ICA 002', 'coil_type': 0, 'coord_Frame': 0,
'kind': 502, 'loc': loc.copy(), 'logno': 2, 'range': 1.0, 'scanno': 2,
'unit': -1, 'unit_mul': 0},
{'cal': 0.002142000012099743, 'ch_name': 'EOG 061', 'coil_type': 1,
'coord_frame': 0, 'kind': 202, 'loc': loc.copy(), 'logno': 61,
'range': 1.0, 'scanno': 376, 'unit': 107, 'unit_mul': 0}]
test_info._update_redundant()
test_info._check_consistency()
return test_info
def test_io_layout_lout():
"""Test IO with .lout files."""
tempdir = _TempDir()
layout = read_layout('Vectorview-all', scale=False)
layout.save(op.join(tempdir, 'foobar.lout'))
layout_read = read_layout(op.join(tempdir, 'foobar.lout'), path='./',
scale=False)
assert_array_almost_equal(layout.pos, layout_read.pos, decimal=2)
assert layout.names == layout_read.names
print(layout) # test repr
def test_io_layout_lay():
"""Test IO with .lay files."""
tempdir = _TempDir()
layout = read_layout('CTF151', scale=False)
layout.save(op.join(tempdir, 'foobar.lay'))
layout_read = read_layout(op.join(tempdir, 'foobar.lay'), path='./',
scale=False)
assert_array_almost_equal(layout.pos, layout_read.pos, decimal=2)
assert layout.names == layout_read.names
def test_auto_topomap_coords():
"""Test mapping of coordinates in 3D space to 2D."""
info = read_info(fif_fname)
picks = pick_types(info, meg=False, eeg=True, eog=False, stim=False)
# Remove extra digitization point, so EEG digitization points match up
# with the EEG channels
del info['dig'][85]
# Remove head origin from channel locations, so mapping with digitization
# points yields the same result
dig_kinds = (FIFF.FIFFV_POINT_CARDINAL,
FIFF.FIFFV_POINT_EEG,
FIFF.FIFFV_POINT_EXTRA)
_, origin_head, _ = fit_sphere_to_headshape(info, dig_kinds, units='m')
for ch in info['chs']:
ch['loc'][:3] -= origin_head
# Use channel locations
l0 = _auto_topomap_coords(info, picks)
# Remove electrode position information, use digitization points from now
# on.
for ch in info['chs']:
ch['loc'].fill(np.nan)
l1 = _auto_topomap_coords(info, picks)
assert_allclose(l1, l0, atol=1e-3)
# Test plotting mag topomap without channel locations: it should fail
mag_picks = pick_types(info, meg='mag')
pytest.raises(ValueError, _auto_topomap_coords, info, mag_picks)
# Test function with too many EEG digitization points: it should fail
info['dig'].append({'r': [1, 2, 3], 'kind': FIFF.FIFFV_POINT_EEG})
pytest.raises(ValueError, _auto_topomap_coords, info, picks)
# Test function with too little EEG digitization points: it should fail
info['dig'] = info['dig'][:-2]
pytest.raises(ValueError, _auto_topomap_coords, info, picks)
# Electrode positions must be unique
info['dig'].append(info['dig'][-1])
pytest.raises(ValueError, _auto_topomap_coords, info, picks)
# Test function without EEG digitization points: it should fail
info['dig'] = [d for d in info['dig'] if d['kind'] != FIFF.FIFFV_POINT_EEG]
pytest.raises(RuntimeError, _auto_topomap_coords, info, picks)
# Test function without any digitization points, it should fail
info['dig'] = None
pytest.raises(RuntimeError, _auto_topomap_coords, info, picks)
info['dig'] = []
pytest.raises(RuntimeError, _auto_topomap_coords, info, picks)
def test_make_eeg_layout():
"""Test creation of EEG layout."""
tempdir = _TempDir()
tmp_name = 'foo'
lout_name = 'test_raw'
lout_orig = read_layout(kind=lout_name, path=lout_path)
info = read_info(fif_fname)
info['bads'].append(info['ch_names'][360])
layout = make_eeg_layout(info, exclude=[])
assert_array_equal(len(layout.names), len([ch for ch in info['ch_names']
if ch.startswith('EE')]))
layout.save(op.join(tempdir, tmp_name + '.lout'))
lout_new = read_layout(kind=tmp_name, path=tempdir, scale=False)
assert_array_equal(lout_new.kind, tmp_name)
assert_allclose(layout.pos, lout_new.pos, atol=0.1)
assert_array_equal(lout_orig.names, lout_new.names)
# Test input validation
pytest.raises(ValueError, make_eeg_layout, info, radius=-0.1)
pytest.raises(ValueError, make_eeg_layout, info, radius=0.6)
pytest.raises(ValueError, make_eeg_layout, info, width=-0.1)
pytest.raises(ValueError, make_eeg_layout, info, width=1.1)
pytest.raises(ValueError, make_eeg_layout, info, height=-0.1)
pytest.raises(ValueError, make_eeg_layout, info, height=1.1)
def test_make_grid_layout():
"""Test creation of grid layout."""
tempdir = _TempDir()
tmp_name = 'bar'
lout_name = 'test_ica'
lout_orig = read_layout(kind=lout_name, path=lout_path)
layout = make_grid_layout(_get_test_info())
layout.save(op.join(tempdir, tmp_name + '.lout'))
lout_new = read_layout(kind=tmp_name, path=tempdir)
assert_array_equal(lout_new.kind, tmp_name)
assert_array_equal(lout_orig.pos, lout_new.pos)
assert_array_equal(lout_orig.names, lout_new.names)
# Test creating grid layout with specified number of columns
layout = make_grid_layout(_get_test_info(), n_col=2)
# Vertical positions should be equal
assert layout.pos[0, 1] == layout.pos[1, 1]
# Horizontal positions should be unequal
assert layout.pos[0, 0] != layout.pos[1, 0]
# Box sizes should be equal
assert_array_equal(layout.pos[0, 3:], layout.pos[1, 3:])
def test_find_layout():
"""Test finding layout."""
pytest.raises(ValueError, find_layout, _get_test_info(), ch_type='meep')
sample_info = read_info(fif_fname)
grads = pick_types(sample_info, meg='grad')
sample_info2 = pick_info(sample_info, grads)
mags = pick_types(sample_info, meg='mag')
sample_info3 = pick_info(sample_info, mags)
# mock new convention
sample_info4 = copy.deepcopy(sample_info)
for ii, name in enumerate(sample_info4['ch_names']):
new = name.replace(' ', '')
sample_info4['chs'][ii]['ch_name'] = new
eegs = pick_types(sample_info, meg=False, eeg=True)
sample_info5 = pick_info(sample_info, eegs)
lout = find_layout(sample_info, ch_type=None)
assert lout.kind == 'Vectorview-all'
assert all(' ' in k for k in lout.names)
lout = find_layout(sample_info2, ch_type='meg')
assert_equal(lout.kind, 'Vectorview-all')
# test new vector-view
lout = find_layout(sample_info4, ch_type=None)
assert_equal(lout.kind, 'Vectorview-all')
assert all(' ' not in k for k in lout.names)
lout = find_layout(sample_info, ch_type='grad')
assert_equal(lout.kind, 'Vectorview-grad')
lout = find_layout(sample_info2)
assert_equal(lout.kind, 'Vectorview-grad')
lout = find_layout(sample_info2, ch_type='grad')
assert_equal(lout.kind, 'Vectorview-grad')
lout = find_layout(sample_info2, ch_type='meg')
assert_equal(lout.kind, 'Vectorview-all')
lout = find_layout(sample_info, ch_type='mag')
assert_equal(lout.kind, 'Vectorview-mag')
lout = find_layout(sample_info3)
assert_equal(lout.kind, 'Vectorview-mag')
lout = find_layout(sample_info3, ch_type='mag')
assert_equal(lout.kind, 'Vectorview-mag')
lout = find_layout(sample_info3, ch_type='meg')
assert_equal(lout.kind, 'Vectorview-all')
lout = find_layout(sample_info, ch_type='eeg')
assert_equal(lout.kind, 'EEG')
lout = find_layout(sample_info5)
assert_equal(lout.kind, 'EEG')
lout = find_layout(sample_info5, ch_type='eeg')
assert_equal(lout.kind, 'EEG')
# no common layout, 'meg' option not supported
lout = find_layout(read_info(fname_ctf_raw))
assert_equal(lout.kind, 'CTF-275')
fname_bti_raw = op.join(bti_dir, 'exported4D_linux_raw.fif')
lout = find_layout(read_info(fname_bti_raw))
assert_equal(lout.kind, 'magnesWH3600')
raw_kit = read_raw_kit(fname_kit_157)
lout = find_layout(raw_kit.info)
assert_equal(lout.kind, 'KIT-157')
raw_kit.info['bads'] = ['MEG 13', 'MEG 14', 'MEG 15', 'MEG 16']
lout = find_layout(raw_kit.info)
assert_equal(lout.kind, 'KIT-157')
raw_umd = read_raw_kit(fname_kit_umd)
lout = find_layout(raw_umd.info)
assert_equal(lout.kind, 'KIT-UMD-3')
# Test plotting
lout.plot()
lout.plot(picks=np.arange(10))
plt.close('all')
def test_box_size():
"""Test calculation of box sizes."""
# No points. Box size should be 1,1.
assert_allclose(_box_size([]), (1.0, 1.0))
# Create one point. Box size should be 1,1.
point = [(0, 0)]
assert_allclose(_box_size(point), (1.0, 1.0))
# Create two points. Box size should be 0.5,1.
points = [(0.25, 0.5), (0.75, 0.5)]
assert_allclose(_box_size(points), (0.5, 1.0))
# Create three points. Box size should be (0.5, 0.5).
points = [(0.25, 0.25), (0.75, 0.25), (0.5, 0.75)]
assert_allclose(_box_size(points), (0.5, 0.5))
# Create a grid of points. Box size should be (0.1, 0.1).
x, y = np.meshgrid(np.linspace(-0.5, 0.5, 11), np.linspace(-0.5, 0.5, 11))
x, y = x.ravel(), y.ravel()
assert_allclose(_box_size(np.c_[x, y]), (0.1, 0.1))
# Create a random set of points. This should never break the function.
rng = np.random.RandomState(42)
points = rng.rand(100, 2)
width, height = _box_size(points)
assert width is not None
assert height is not None
# Test specifying an existing width.
points = [(0.25, 0.25), (0.75, 0.25), (0.5, 0.75)]
assert_allclose(_box_size(points, width=0.4), (0.4, 0.5))
# Test specifying an existing width that has influence on the calculated
# height.
points = [(0.25, 0.25), (0.75, 0.25), (0.5, 0.75)]
assert_allclose(_box_size(points, width=0.2), (0.2, 1.0))
# Test specifying an existing height.
points = [(0.25, 0.25), (0.75, 0.25), (0.5, 0.75)]
assert_allclose(_box_size(points, height=0.4), (0.5, 0.4))
# Test specifying an existing height that has influence on the calculated
# width.
points = [(0.25, 0.25), (0.75, 0.45), (0.5, 0.75)]
assert_allclose(_box_size(points, height=0.1), (1.0, 0.1))
# Test specifying both width and height. The function should simply return
# these.
points = [(0.25, 0.25), (0.75, 0.45), (0.5, 0.75)]
assert_array_equal(_box_size(points, width=0.1, height=0.1), (0.1, 0.1))
# Test specifying a width that will cause unfixable horizontal overlap and
# essentially breaks the function (height will be 0).
points = [(0.25, 0.25), (0.75, 0.25), (0.5, 0.75)]
assert_array_equal(_box_size(points, width=1), (1, 0))
# Test adding some padding.
# Create three points. Box size should be a little less than (0.5, 0.5).
points = [(0.25, 0.25), (0.75, 0.25), (0.5, 0.75)]
assert_allclose(_box_size(points, padding=0.1), (0.9 * 0.5, 0.9 * 0.5))
def test_generate_2d_layout():
"""Test creation of a layout from 2d points."""
snobg = 10
sbg = 15
side = range(snobg)
bg_image = np.random.RandomState(42).randn(sbg, sbg)
w, h = [.2, .5]
# Generate fake data
xy = np.array([(i, j) for i in side for j in side])
lt = generate_2d_layout(xy, w=w, h=h)
# Correct points ordering / minmaxing
comp_1, comp_2 = [(5, 0), (7, 0)]
assert lt.pos[:, :2].max() == 1
assert lt.pos[:, :2].min() == 0
with np.errstate(invalid='ignore'): # divide by zero
assert_allclose(xy[comp_2] / float(xy[comp_1]),
lt.pos[comp_2] / float(lt.pos[comp_1]))
assert_allclose(lt.pos[0, [2, 3]], [w, h])
# Correct number elements
assert lt.pos.shape[1] == 4
assert len(lt.box) == 4
# Make sure background image normalizing is correct
lt_bg = generate_2d_layout(xy, bg_image=bg_image)
assert_allclose(lt_bg.pos[:, :2].max(), xy.max() / float(sbg))
run_tests_if_main()
| 38.245856 | 79 | 0.656555 |
import copy
import os.path as op
import numpy as np
from numpy.testing import (assert_array_almost_equal, assert_array_equal,
assert_allclose, assert_equal)
import pytest
import matplotlib.pyplot as plt
from mne.channels import (make_eeg_layout, make_grid_layout, read_layout,
find_layout)
from mne.channels.layout import (_box_size, _auto_topomap_coords,
generate_2d_layout)
from mne.utils import run_tests_if_main
from mne import pick_types, pick_info
from mne.io import read_raw_kit, _empty_info, read_info
from mne.io.constants import FIFF
from mne.bem import fit_sphere_to_headshape
from mne.utils import _TempDir
io_dir = op.join(op.dirname(__file__), '..', '..', 'io')
fif_fname = op.join(io_dir, 'tests', 'data', 'test_raw.fif')
lout_path = op.join(io_dir, 'tests', 'data')
bti_dir = op.join(io_dir, 'bti', 'tests', 'data')
fname_ctf_raw = op.join(io_dir, 'tests', 'data', 'test_ctf_comp_raw.fif')
fname_kit_157 = op.join(io_dir, 'kit', 'tests', 'data', 'test.sqd')
fname_kit_umd = op.join(io_dir, 'kit', 'tests', 'data', 'test_umd-raw.sqd')
def _get_test_info():
test_info = _empty_info(1000)
loc = np.array([0., 0., 0., 1., 0., 0., 0., 1., 0., 0., 0., 1.],
dtype=np.float32)
test_info['chs'] = [
{'cal': 1, 'ch_name': 'ICA 001', 'coil_type': 0, 'coord_Frame': 0,
'kind': 502, 'loc': loc.copy(), 'logno': 1, 'range': 1.0, 'scanno': 1,
'unit': -1, 'unit_mul': 0},
{'cal': 1, 'ch_name': 'ICA 002', 'coil_type': 0, 'coord_Frame': 0,
'kind': 502, 'loc': loc.copy(), 'logno': 2, 'range': 1.0, 'scanno': 2,
'unit': -1, 'unit_mul': 0},
{'cal': 0.002142000012099743, 'ch_name': 'EOG 061', 'coil_type': 1,
'coord_frame': 0, 'kind': 202, 'loc': loc.copy(), 'logno': 61,
'range': 1.0, 'scanno': 376, 'unit': 107, 'unit_mul': 0}]
test_info._update_redundant()
test_info._check_consistency()
return test_info
def test_io_layout_lout():
tempdir = _TempDir()
layout = read_layout('Vectorview-all', scale=False)
layout.save(op.join(tempdir, 'foobar.lout'))
layout_read = read_layout(op.join(tempdir, 'foobar.lout'), path='./',
scale=False)
assert_array_almost_equal(layout.pos, layout_read.pos, decimal=2)
assert layout.names == layout_read.names
print(layout)
def test_io_layout_lay():
tempdir = _TempDir()
layout = read_layout('CTF151', scale=False)
layout.save(op.join(tempdir, 'foobar.lay'))
layout_read = read_layout(op.join(tempdir, 'foobar.lay'), path='./',
scale=False)
assert_array_almost_equal(layout.pos, layout_read.pos, decimal=2)
assert layout.names == layout_read.names
def test_auto_topomap_coords():
info = read_info(fif_fname)
picks = pick_types(info, meg=False, eeg=True, eog=False, stim=False)
del info['dig'][85]
dig_kinds = (FIFF.FIFFV_POINT_CARDINAL,
FIFF.FIFFV_POINT_EEG,
FIFF.FIFFV_POINT_EXTRA)
_, origin_head, _ = fit_sphere_to_headshape(info, dig_kinds, units='m')
for ch in info['chs']:
ch['loc'][:3] -= origin_head
l0 = _auto_topomap_coords(info, picks)
for ch in info['chs']:
ch['loc'].fill(np.nan)
l1 = _auto_topomap_coords(info, picks)
assert_allclose(l1, l0, atol=1e-3)
mag_picks = pick_types(info, meg='mag')
pytest.raises(ValueError, _auto_topomap_coords, info, mag_picks)
info['dig'].append({'r': [1, 2, 3], 'kind': FIFF.FIFFV_POINT_EEG})
pytest.raises(ValueError, _auto_topomap_coords, info, picks)
info['dig'] = info['dig'][:-2]
pytest.raises(ValueError, _auto_topomap_coords, info, picks)
info['dig'].append(info['dig'][-1])
pytest.raises(ValueError, _auto_topomap_coords, info, picks)
info['dig'] = [d for d in info['dig'] if d['kind'] != FIFF.FIFFV_POINT_EEG]
pytest.raises(RuntimeError, _auto_topomap_coords, info, picks)
info['dig'] = None
pytest.raises(RuntimeError, _auto_topomap_coords, info, picks)
info['dig'] = []
pytest.raises(RuntimeError, _auto_topomap_coords, info, picks)
def test_make_eeg_layout():
tempdir = _TempDir()
tmp_name = 'foo'
lout_name = 'test_raw'
lout_orig = read_layout(kind=lout_name, path=lout_path)
info = read_info(fif_fname)
info['bads'].append(info['ch_names'][360])
layout = make_eeg_layout(info, exclude=[])
assert_array_equal(len(layout.names), len([ch for ch in info['ch_names']
if ch.startswith('EE')]))
layout.save(op.join(tempdir, tmp_name + '.lout'))
lout_new = read_layout(kind=tmp_name, path=tempdir, scale=False)
assert_array_equal(lout_new.kind, tmp_name)
assert_allclose(layout.pos, lout_new.pos, atol=0.1)
assert_array_equal(lout_orig.names, lout_new.names)
pytest.raises(ValueError, make_eeg_layout, info, radius=-0.1)
pytest.raises(ValueError, make_eeg_layout, info, radius=0.6)
pytest.raises(ValueError, make_eeg_layout, info, width=-0.1)
pytest.raises(ValueError, make_eeg_layout, info, width=1.1)
pytest.raises(ValueError, make_eeg_layout, info, height=-0.1)
pytest.raises(ValueError, make_eeg_layout, info, height=1.1)
def test_make_grid_layout():
tempdir = _TempDir()
tmp_name = 'bar'
lout_name = 'test_ica'
lout_orig = read_layout(kind=lout_name, path=lout_path)
layout = make_grid_layout(_get_test_info())
layout.save(op.join(tempdir, tmp_name + '.lout'))
lout_new = read_layout(kind=tmp_name, path=tempdir)
assert_array_equal(lout_new.kind, tmp_name)
assert_array_equal(lout_orig.pos, lout_new.pos)
assert_array_equal(lout_orig.names, lout_new.names)
layout = make_grid_layout(_get_test_info(), n_col=2)
assert layout.pos[0, 1] == layout.pos[1, 1]
assert layout.pos[0, 0] != layout.pos[1, 0]
assert_array_equal(layout.pos[0, 3:], layout.pos[1, 3:])
def test_find_layout():
pytest.raises(ValueError, find_layout, _get_test_info(), ch_type='meep')
sample_info = read_info(fif_fname)
grads = pick_types(sample_info, meg='grad')
sample_info2 = pick_info(sample_info, grads)
mags = pick_types(sample_info, meg='mag')
sample_info3 = pick_info(sample_info, mags)
sample_info4 = copy.deepcopy(sample_info)
for ii, name in enumerate(sample_info4['ch_names']):
new = name.replace(' ', '')
sample_info4['chs'][ii]['ch_name'] = new
eegs = pick_types(sample_info, meg=False, eeg=True)
sample_info5 = pick_info(sample_info, eegs)
lout = find_layout(sample_info, ch_type=None)
assert lout.kind == 'Vectorview-all'
assert all(' ' in k for k in lout.names)
lout = find_layout(sample_info2, ch_type='meg')
assert_equal(lout.kind, 'Vectorview-all')
lout = find_layout(sample_info4, ch_type=None)
assert_equal(lout.kind, 'Vectorview-all')
assert all(' ' not in k for k in lout.names)
lout = find_layout(sample_info, ch_type='grad')
assert_equal(lout.kind, 'Vectorview-grad')
lout = find_layout(sample_info2)
assert_equal(lout.kind, 'Vectorview-grad')
lout = find_layout(sample_info2, ch_type='grad')
assert_equal(lout.kind, 'Vectorview-grad')
lout = find_layout(sample_info2, ch_type='meg')
assert_equal(lout.kind, 'Vectorview-all')
lout = find_layout(sample_info, ch_type='mag')
assert_equal(lout.kind, 'Vectorview-mag')
lout = find_layout(sample_info3)
assert_equal(lout.kind, 'Vectorview-mag')
lout = find_layout(sample_info3, ch_type='mag')
assert_equal(lout.kind, 'Vectorview-mag')
lout = find_layout(sample_info3, ch_type='meg')
assert_equal(lout.kind, 'Vectorview-all')
lout = find_layout(sample_info, ch_type='eeg')
assert_equal(lout.kind, 'EEG')
lout = find_layout(sample_info5)
assert_equal(lout.kind, 'EEG')
lout = find_layout(sample_info5, ch_type='eeg')
assert_equal(lout.kind, 'EEG')
lout = find_layout(read_info(fname_ctf_raw))
assert_equal(lout.kind, 'CTF-275')
fname_bti_raw = op.join(bti_dir, 'exported4D_linux_raw.fif')
lout = find_layout(read_info(fname_bti_raw))
assert_equal(lout.kind, 'magnesWH3600')
raw_kit = read_raw_kit(fname_kit_157)
lout = find_layout(raw_kit.info)
assert_equal(lout.kind, 'KIT-157')
raw_kit.info['bads'] = ['MEG 13', 'MEG 14', 'MEG 15', 'MEG 16']
lout = find_layout(raw_kit.info)
assert_equal(lout.kind, 'KIT-157')
raw_umd = read_raw_kit(fname_kit_umd)
lout = find_layout(raw_umd.info)
assert_equal(lout.kind, 'KIT-UMD-3')
lout.plot()
lout.plot(picks=np.arange(10))
plt.close('all')
def test_box_size():
assert_allclose(_box_size([]), (1.0, 1.0))
point = [(0, 0)]
assert_allclose(_box_size(point), (1.0, 1.0))
points = [(0.25, 0.5), (0.75, 0.5)]
assert_allclose(_box_size(points), (0.5, 1.0))
points = [(0.25, 0.25), (0.75, 0.25), (0.5, 0.75)]
assert_allclose(_box_size(points), (0.5, 0.5))
x, y = np.meshgrid(np.linspace(-0.5, 0.5, 11), np.linspace(-0.5, 0.5, 11))
x, y = x.ravel(), y.ravel()
assert_allclose(_box_size(np.c_[x, y]), (0.1, 0.1))
rng = np.random.RandomState(42)
points = rng.rand(100, 2)
width, height = _box_size(points)
assert width is not None
assert height is not None
points = [(0.25, 0.25), (0.75, 0.25), (0.5, 0.75)]
assert_allclose(_box_size(points, width=0.4), (0.4, 0.5))
points = [(0.25, 0.25), (0.75, 0.25), (0.5, 0.75)]
assert_allclose(_box_size(points, width=0.2), (0.2, 1.0))
points = [(0.25, 0.25), (0.75, 0.25), (0.5, 0.75)]
assert_allclose(_box_size(points, height=0.4), (0.5, 0.4))
points = [(0.25, 0.25), (0.75, 0.45), (0.5, 0.75)]
assert_allclose(_box_size(points, height=0.1), (1.0, 0.1))
points = [(0.25, 0.25), (0.75, 0.45), (0.5, 0.75)]
assert_array_equal(_box_size(points, width=0.1, height=0.1), (0.1, 0.1))
points = [(0.25, 0.25), (0.75, 0.25), (0.5, 0.75)]
assert_array_equal(_box_size(points, width=1), (1, 0))
points = [(0.25, 0.25), (0.75, 0.25), (0.5, 0.75)]
assert_allclose(_box_size(points, padding=0.1), (0.9 * 0.5, 0.9 * 0.5))
def test_generate_2d_layout():
snobg = 10
sbg = 15
side = range(snobg)
bg_image = np.random.RandomState(42).randn(sbg, sbg)
w, h = [.2, .5]
xy = np.array([(i, j) for i in side for j in side])
lt = generate_2d_layout(xy, w=w, h=h)
comp_1, comp_2 = [(5, 0), (7, 0)]
assert lt.pos[:, :2].max() == 1
assert lt.pos[:, :2].min() == 0
with np.errstate(invalid='ignore'):
assert_allclose(xy[comp_2] / float(xy[comp_1]),
lt.pos[comp_2] / float(lt.pos[comp_1]))
assert_allclose(lt.pos[0, [2, 3]], [w, h])
assert lt.pos.shape[1] == 4
assert len(lt.box) == 4
lt_bg = generate_2d_layout(xy, bg_image=bg_image)
assert_allclose(lt_bg.pos[:, :2].max(), xy.max() / float(sbg))
run_tests_if_main()
| true | true |
1c481d4fefb3cdfdf7173e935ffef9744efd41a5 | 1,625 | py | Python | eppy/tests/EPlusInterfaceFunctions_tests/integration.py | hnagda/eppy | 422399ada78eb9f39ae61f96b385fe41a0a19100 | [
"MIT"
] | 116 | 2015-04-07T13:58:34.000Z | 2022-02-23T15:52:35.000Z | eppy/tests/EPlusInterfaceFunctions_tests/integration.py | hnagda/eppy | 422399ada78eb9f39ae61f96b385fe41a0a19100 | [
"MIT"
] | 324 | 2015-01-01T04:10:46.000Z | 2022-03-16T16:28:13.000Z | eppy/tests/EPlusInterfaceFunctions_tests/integration.py | hnagda/eppy | 422399ada78eb9f39ae61f96b385fe41a0a19100 | [
"MIT"
] | 65 | 2015-07-14T21:57:02.000Z | 2022-02-14T08:39:02.000Z | # Copyright (c) 2016 Santosh Philip
# =======================================================================
# Distributed under the MIT License.
# (See accompanying file LICENSE or copy at
# http://opensource.org/licenses/MIT)
"""integration tests for EPlusInterfaceFunctions"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import eppy.EPlusInterfaceFunctions.iddgroups as iddgroups
def test_idd2group():
"""py.test for idd2group"""
data = (
(
"./eppy/tests/EPlusInterfaceFunctions_tests/integration/iddgroups.idd",
{
"G2": ["VersionG", "VersionG1", "VersionG2"],
"G1": ["Version", "Version1", "Version2"],
None: ["Lead Input", "Simulation Data"],
},
), # gdict
)
for fname, gdict in data:
result = iddgroups.idd2group(fname)
assert result == gdict
def test_idd2grouplist():
"""py.test idd2grouplist"""
data = (
(
"./eppy/tests/EPlusInterfaceFunctions_tests/integration/iddgroups.idd",
[
(None, "Lead Input"),
(None, "Simulation Data"),
("G1", "Version"),
("G1", "Version1"),
("G1", "Version2"),
("G2", "VersionG"),
("G2", "VersionG1"),
("G2", "VersionG2"),
],
), # glist
)
for fname, glist in data:
result = iddgroups.idd2grouplist(fname)
assert result == glist
| 30.092593 | 83 | 0.533538 |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import eppy.EPlusInterfaceFunctions.iddgroups as iddgroups
def test_idd2group():
data = (
(
"./eppy/tests/EPlusInterfaceFunctions_tests/integration/iddgroups.idd",
{
"G2": ["VersionG", "VersionG1", "VersionG2"],
"G1": ["Version", "Version1", "Version2"],
None: ["Lead Input", "Simulation Data"],
},
),
)
for fname, gdict in data:
result = iddgroups.idd2group(fname)
assert result == gdict
def test_idd2grouplist():
data = (
(
"./eppy/tests/EPlusInterfaceFunctions_tests/integration/iddgroups.idd",
[
(None, "Lead Input"),
(None, "Simulation Data"),
("G1", "Version"),
("G1", "Version1"),
("G1", "Version2"),
("G2", "VersionG"),
("G2", "VersionG1"),
("G2", "VersionG2"),
],
),
)
for fname, glist in data:
result = iddgroups.idd2grouplist(fname)
assert result == glist
| true | true |
1c481da32dcbefeebfd9b534a8ab1eb08416bc0c | 1,059 | py | Python | datasets/charades.py | rohitgirdhar/ActionVLAD | 0d87b69d3069db3fe521923675e353f755c5d765 | [
"Apache-2.0"
] | 228 | 2017-05-08T04:50:49.000Z | 2021-12-23T10:57:52.000Z | datasets/charades.py | icyzhang0923/ActionVLAD | 08d3d65301940991e0a0cdca2c0534edf6749f41 | [
"Apache-2.0"
] | 39 | 2017-05-12T05:23:46.000Z | 2021-03-21T03:40:02.000Z | datasets/charades.py | icyzhang0923/ActionVLAD | 08d3d65301940991e0a0cdca2c0534edf6749f41 | [
"Apache-2.0"
] | 84 | 2017-05-08T01:11:25.000Z | 2021-06-03T09:37:36.000Z | # ------------------------------------------------------------------------------
# ActionVLAD: Learning spatio-temporal aggregation for action classification
# Copyright (c) 2017 Carnegie Mellon University and Adobe Systems Incorporated
# Please see LICENSE on https://github.com/rohitgirdhar/ActionVLAD/ for details
# ------------------------------------------------------------------------------
"""Provides data for the Charades dataset.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from datasets.video_data_utils import gen_dataset
def get_split(split_name, dataset_dir, dataset_list_dir='', file_pattern=None,
reader=None, modality='rgb', num_samples=1,
split_id=1):
_NUM_CLASSES = 157
_LIST_FN = lambda split, id: \
'%s/%s_split%d.txt' % (dataset_list_dir, split, id)
return gen_dataset(split_name, dataset_dir, file_pattern,
reader, modality, num_samples, split_id,
_NUM_CLASSES, _LIST_FN)
| 39.222222 | 80 | 0.614731 |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from datasets.video_data_utils import gen_dataset
def get_split(split_name, dataset_dir, dataset_list_dir='', file_pattern=None,
reader=None, modality='rgb', num_samples=1,
split_id=1):
_NUM_CLASSES = 157
_LIST_FN = lambda split, id: \
'%s/%s_split%d.txt' % (dataset_list_dir, split, id)
return gen_dataset(split_name, dataset_dir, file_pattern,
reader, modality, num_samples, split_id,
_NUM_CLASSES, _LIST_FN)
| true | true |
1c481dc560ad3b371654f3a94b66f2aba9d8f20a | 7,385 | py | Python | python/ccxt/yobit.py | yevsev/ccxt | 7200521a005a2ddc23efe7bd003628b8e8b955dd | [
"MIT"
] | null | null | null | python/ccxt/yobit.py | yevsev/ccxt | 7200521a005a2ddc23efe7bd003628b8e8b955dd | [
"MIT"
] | null | null | null | python/ccxt/yobit.py | yevsev/ccxt | 7200521a005a2ddc23efe7bd003628b8e8b955dd | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
# PLEASE DO NOT EDIT THIS FILE, IT IS GENERATED AND WILL BE OVERWRITTEN:
# https://github.com/ccxt/ccxt/blob/master/CONTRIBUTING.md#how-to-contribute-code
from ccxt.liqui import liqui
from ccxt.base.errors import ExchangeError
from ccxt.base.errors import InsufficientFunds
from ccxt.base.errors import DDoSProtection
class yobit (liqui):
def describe(self):
return self.deep_extend(super(yobit, self).describe(), {
'id': 'yobit',
'name': 'YoBit',
'countries': 'RU',
'rateLimit': 3000, # responses are cached every 2 seconds
'version': '3',
'has': {
'createDepositAddress': True,
'fetchDepositAddress': True,
'CORS': False,
'withdraw': True,
},
'urls': {
'logo': 'https://user-images.githubusercontent.com/1294454/27766910-cdcbfdae-5eea-11e7-9859-03fea873272d.jpg',
'api': {
'public': 'https://yobit.net/api',
'private': 'https://yobit.net/tapi',
},
'www': 'https://www.yobit.net',
'doc': 'https://www.yobit.net/en/api/',
'fees': 'https://www.yobit.net/en/fees/',
},
'api': {
'public': {
'get': [
'depth/{pair}',
'info',
'ticker/{pair}',
'trades/{pair}',
],
},
'private': {
'post': [
'ActiveOrders',
'CancelOrder',
'GetDepositAddress',
'getInfo',
'OrderInfo',
'Trade',
'TradeHistory',
'WithdrawCoinsToAddress',
],
},
},
'fees': {
'trading': {
'maker': 0.002,
'taker': 0.002,
},
'funding': {
'withdraw': {},
},
},
'commonCurrencies': {
'AIR': 'AirCoin',
'ANI': 'ANICoin',
'ANT': 'AntsCoin',
'AST': 'Astral',
'ATM': 'Autumncoin',
'BCC': 'BCH',
'BCS': 'BitcoinStake',
'BLN': 'Bulleon',
'BTS': 'Bitshares2',
'CAT': 'BitClave',
'COV': 'Coven Coin',
'CPC': 'Capricoin',
'CS': 'CryptoSpots',
'DCT': 'Discount',
'DGD': 'DarkGoldCoin',
'DROP': 'FaucetCoin',
'ERT': 'Eristica Token',
'ICN': 'iCoin',
'KNC': 'KingN Coin',
'LIZI': 'LiZi',
'LOC': 'LocoCoin',
'LOCX': 'LOC',
'LUN': 'LunarCoin',
'MDT': 'Midnight',
'NAV': 'NavajoCoin',
'OMG': 'OMGame',
'STK': 'StakeCoin',
'PAY': 'EPAY',
'PLC': 'Platin Coin',
'REP': 'Republicoin',
'RUR': 'RUB',
'XIN': 'XINCoin',
},
'options': {
'fetchOrdersRequiresSymbol': True,
},
})
def parse_order_status(self, status):
statuses = {
'0': 'open',
'1': 'closed',
'2': 'canceled',
'3': 'open', # or partially-filled and closed? https://github.com/ccxt/ccxt/issues/1594
}
if status in statuses:
return statuses[status]
return status
def fetch_balance(self, params={}):
self.load_markets()
response = self.privatePostGetInfo()
balances = response['return']
result = {'info': balances}
sides = {'free': 'funds', 'total': 'funds_incl_orders'}
keys = list(sides.keys())
for i in range(0, len(keys)):
key = keys[i]
side = sides[key]
if side in balances:
currencies = list(balances[side].keys())
for j in range(0, len(currencies)):
lowercase = currencies[j]
uppercase = lowercase.upper()
currency = self.common_currency_code(uppercase)
account = None
if currency in result:
account = result[currency]
else:
account = self.account()
account[key] = balances[side][lowercase]
if account['total'] and account['free']:
account['used'] = account['total'] - account['free']
result[currency] = account
return self.parse_balance(result)
def create_deposit_address(self, code, params={}):
response = self.fetch_deposit_address(code, self.extend({
'need_new': 1,
}, params))
address = self.safe_string(response, 'address')
self.check_address(address)
return {
'currency': code,
'address': address,
'status': 'ok',
'info': response['info'],
}
def fetch_deposit_address(self, code, params={}):
self.load_markets()
currency = self.currency(code)
request = {
'coinName': currency['id'],
'need_new': 0,
}
response = self.privatePostGetDepositAddress(self.extend(request, params))
address = self.safe_string(response['return'], 'address')
self.check_address(address)
return {
'currency': code,
'address': address,
'status': 'ok',
'info': response,
}
def withdraw(self, code, amount, address, tag=None, params={}):
self.check_address(address)
currency = self.currency(code)
self.load_markets()
response = self.privatePostWithdrawCoinsToAddress(self.extend({
'coinName': currency['id'],
'amount': amount,
'address': address,
}, params))
return {
'info': response,
'id': None,
}
def request(self, path, api='public', method='GET', params={}, headers=None, body=None):
response = self.fetch2(path, api, method, params, headers, body)
if 'success' in response:
if not response['success']:
if response['error'].find('Insufficient funds') >= 0: # not enougTh is a typo inside Liqui's own API...
raise InsufficientFunds(self.id + ' ' + self.json(response))
elif response['error'] == 'Requests too often':
raise DDoSProtection(self.id + ' ' + self.json(response))
elif (response['error'] == 'not available') or (response['error'] == 'external service unavailable'):
raise DDoSProtection(self.id + ' ' + self.json(response))
else:
raise ExchangeError(self.id + ' ' + self.json(response))
return response
| 36.559406 | 126 | 0.448206 |
t liqui
from ccxt.base.errors import ExchangeError
from ccxt.base.errors import InsufficientFunds
from ccxt.base.errors import DDoSProtection
class yobit (liqui):
def describe(self):
return self.deep_extend(super(yobit, self).describe(), {
'id': 'yobit',
'name': 'YoBit',
'countries': 'RU',
'rateLimit': 3000,
'version': '3',
'has': {
'createDepositAddress': True,
'fetchDepositAddress': True,
'CORS': False,
'withdraw': True,
},
'urls': {
'logo': 'https://user-images.githubusercontent.com/1294454/27766910-cdcbfdae-5eea-11e7-9859-03fea873272d.jpg',
'api': {
'public': 'https://yobit.net/api',
'private': 'https://yobit.net/tapi',
},
'www': 'https://www.yobit.net',
'doc': 'https://www.yobit.net/en/api/',
'fees': 'https://www.yobit.net/en/fees/',
},
'api': {
'public': {
'get': [
'depth/{pair}',
'info',
'ticker/{pair}',
'trades/{pair}',
],
},
'private': {
'post': [
'ActiveOrders',
'CancelOrder',
'GetDepositAddress',
'getInfo',
'OrderInfo',
'Trade',
'TradeHistory',
'WithdrawCoinsToAddress',
],
},
},
'fees': {
'trading': {
'maker': 0.002,
'taker': 0.002,
},
'funding': {
'withdraw': {},
},
},
'commonCurrencies': {
'AIR': 'AirCoin',
'ANI': 'ANICoin',
'ANT': 'AntsCoin',
'AST': 'Astral',
'ATM': 'Autumncoin',
'BCC': 'BCH',
'BCS': 'BitcoinStake',
'BLN': 'Bulleon',
'BTS': 'Bitshares2',
'CAT': 'BitClave',
'COV': 'Coven Coin',
'CPC': 'Capricoin',
'CS': 'CryptoSpots',
'DCT': 'Discount',
'DGD': 'DarkGoldCoin',
'DROP': 'FaucetCoin',
'ERT': 'Eristica Token',
'ICN': 'iCoin',
'KNC': 'KingN Coin',
'LIZI': 'LiZi',
'LOC': 'LocoCoin',
'LOCX': 'LOC',
'LUN': 'LunarCoin',
'MDT': 'Midnight',
'NAV': 'NavajoCoin',
'OMG': 'OMGame',
'STK': 'StakeCoin',
'PAY': 'EPAY',
'PLC': 'Platin Coin',
'REP': 'Republicoin',
'RUR': 'RUB',
'XIN': 'XINCoin',
},
'options': {
'fetchOrdersRequiresSymbol': True,
},
})
def parse_order_status(self, status):
statuses = {
'0': 'open',
'1': 'closed',
'2': 'canceled',
'3': 'open',
}
if status in statuses:
return statuses[status]
return status
def fetch_balance(self, params={}):
self.load_markets()
response = self.privatePostGetInfo()
balances = response['return']
result = {'info': balances}
sides = {'free': 'funds', 'total': 'funds_incl_orders'}
keys = list(sides.keys())
for i in range(0, len(keys)):
key = keys[i]
side = sides[key]
if side in balances:
currencies = list(balances[side].keys())
for j in range(0, len(currencies)):
lowercase = currencies[j]
uppercase = lowercase.upper()
currency = self.common_currency_code(uppercase)
account = None
if currency in result:
account = result[currency]
else:
account = self.account()
account[key] = balances[side][lowercase]
if account['total'] and account['free']:
account['used'] = account['total'] - account['free']
result[currency] = account
return self.parse_balance(result)
def create_deposit_address(self, code, params={}):
response = self.fetch_deposit_address(code, self.extend({
'need_new': 1,
}, params))
address = self.safe_string(response, 'address')
self.check_address(address)
return {
'currency': code,
'address': address,
'status': 'ok',
'info': response['info'],
}
def fetch_deposit_address(self, code, params={}):
self.load_markets()
currency = self.currency(code)
request = {
'coinName': currency['id'],
'need_new': 0,
}
response = self.privatePostGetDepositAddress(self.extend(request, params))
address = self.safe_string(response['return'], 'address')
self.check_address(address)
return {
'currency': code,
'address': address,
'status': 'ok',
'info': response,
}
def withdraw(self, code, amount, address, tag=None, params={}):
self.check_address(address)
currency = self.currency(code)
self.load_markets()
response = self.privatePostWithdrawCoinsToAddress(self.extend({
'coinName': currency['id'],
'amount': amount,
'address': address,
}, params))
return {
'info': response,
'id': None,
}
def request(self, path, api='public', method='GET', params={}, headers=None, body=None):
response = self.fetch2(path, api, method, params, headers, body)
if 'success' in response:
if not response['success']:
if response['error'].find('Insufficient funds') >= 0:
raise InsufficientFunds(self.id + ' ' + self.json(response))
elif response['error'] == 'Requests too often':
raise DDoSProtection(self.id + ' ' + self.json(response))
elif (response['error'] == 'not available') or (response['error'] == 'external service unavailable'):
raise DDoSProtection(self.id + ' ' + self.json(response))
else:
raise ExchangeError(self.id + ' ' + self.json(response))
return response
| true | true |
1c481e042f3cc47722722f39903e65876d4a4f00 | 979 | py | Python | pyleecan/Methods/Mesh/ScalarProductL2/scalar_product.py | IrakozeFD/pyleecan | 5a93bd98755d880176c1ce8ac90f36ca1b907055 | [
"Apache-2.0"
] | 95 | 2019-01-23T04:19:45.000Z | 2022-03-17T18:22:10.000Z | pyleecan/Methods/Mesh/ScalarProductL2/scalar_product.py | IrakozeFD/pyleecan | 5a93bd98755d880176c1ce8ac90f36ca1b907055 | [
"Apache-2.0"
] | 366 | 2019-02-20T07:15:08.000Z | 2022-03-31T13:37:23.000Z | pyleecan/Methods/Mesh/ScalarProductL2/scalar_product.py | IrakozeFD/pyleecan | 5a93bd98755d880176c1ce8ac90f36ca1b907055 | [
"Apache-2.0"
] | 74 | 2019-01-24T01:47:31.000Z | 2022-02-25T05:44:42.000Z | # -*- coding: utf-8 -*-
import numpy as np
def scalar_product(self, funca, funcb, detJ, weights, nb_gauss_points):
"""Scalar product of shape functions with L2 gauss integration
Parameters
----------
self : ScalarProductL2
a ScalarProductL2 object
funca : ndarray
vertice of the cell
nba : ndarray
coordinates of a point
funcb : ndarray
vertice of the cell
nbb : ndarray
coordinates of a point
detJ : ndarray
jacobian determinant evaluated for each gauss point
weights : ndarray
gauss weights
nb_gauss_points : int
number of gauss points
Returns
-------
l2_scal : ndarray
a L2 scalar product
"""
func_a_w_dJ = np.zeros(funca.shape)
for i in range(nb_gauss_points):
func_a_w_dJ[i, :] = funca[i, :] * weights[i] * detJ[i]
l2_scal_mat = np.squeeze(np.tensordot(func_a_w_dJ, funcb, axes=([0], [0])))
return l2_scal_mat
| 23.878049 | 79 | 0.622063 |
import numpy as np
def scalar_product(self, funca, funcb, detJ, weights, nb_gauss_points):
func_a_w_dJ = np.zeros(funca.shape)
for i in range(nb_gauss_points):
func_a_w_dJ[i, :] = funca[i, :] * weights[i] * detJ[i]
l2_scal_mat = np.squeeze(np.tensordot(func_a_w_dJ, funcb, axes=([0], [0])))
return l2_scal_mat
| true | true |
1c481e4bd46ecaaf5421792b882173bf343bf1cb | 12,093 | py | Python | tests/python/pants_test/backend/project_info/tasks/test_export_integration.py | sammy-1234/pants | 889016952a248cf229c78c014d9f6c95422d98b8 | [
"Apache-2.0"
] | null | null | null | tests/python/pants_test/backend/project_info/tasks/test_export_integration.py | sammy-1234/pants | 889016952a248cf229c78c014d9f6c95422d98b8 | [
"Apache-2.0"
] | null | null | null | tests/python/pants_test/backend/project_info/tasks/test_export_integration.py | sammy-1234/pants | 889016952a248cf229c78c014d9f6c95422d98b8 | [
"Apache-2.0"
] | null | null | null | # Copyright 2014 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
import json
import os
import re
import subprocess
from twitter.common.collections import maybe_list
from pants.base.build_environment import get_buildroot
from pants.build_graph.intermediate_target_factory import hash_target
from pants_test.backend.project_info.tasks.resolve_jars_test_mixin import ResolveJarsTestMixin
from pants_test.pants_run_integration_test import PantsRunIntegrationTest, ensure_resolver
class ExportIntegrationTest(ResolveJarsTestMixin, PantsRunIntegrationTest):
_confs_args = [
'--export-libraries-sources',
'--export-libraries-javadocs',
]
def run_export(self, test_target, workdir, load_libs=False, only_default=False, extra_args=None):
"""Runs ./pants export ... and returns its json output.
:param string|list test_target: spec of the targets to run on.
:param string workdir: working directory to run pants with.
:param bool load_libs: whether to load external libraries (of any conf).
:param bool only_default: if loading libraries, whether to only resolve the default conf, or to
additionally resolve sources and javadocs.
:param list extra_args: list of extra arguments for the pants invocation.
:return: the json output of the console task.
:rtype: dict
"""
export_out_file = os.path.join(workdir, 'export_out.txt')
args = ['export',
'--output-file={out_file}'.format(out_file=export_out_file)] + maybe_list(test_target)
libs_args = ['--no-export-libraries'] if not load_libs else self._confs_args
if load_libs and only_default:
libs_args = []
pants_run = self.run_pants_with_workdir(args + libs_args + (extra_args or []), workdir)
self.assert_success(pants_run)
self.assertTrue(os.path.exists(export_out_file),
msg='Could not find export output file in {out_file}'
.format(out_file=export_out_file))
with open(export_out_file, 'r') as json_file:
json_data = json.load(json_file)
if not load_libs:
self.assertIsNone(json_data.get('libraries'))
return json_data
def evaluate_subtask(self, targets, workdir, load_extra_confs, extra_args, expected_jars):
json_data = self.run_export(targets, workdir, load_libs=True, only_default=not load_extra_confs,
extra_args=extra_args)
for jar in expected_jars:
self.assertIn(jar, json_data['libraries'])
for path in json_data['libraries'][jar].values():
self.assertTrue(os.path.exists(path), 'Expected jar at {} to actually exist.'.format(path))
@ensure_resolver
def test_export_code_gen(self):
with self.temporary_workdir() as workdir:
test_target = 'examples/tests/java/org/pantsbuild/example/usethrift:usethrift'
json_data = self.run_export(test_target, workdir, load_libs=True)
thrift_target_name = ('examples.src.thrift.org.pantsbuild.example.precipitation'
'.precipitation-java')
codegen_target_regex = os.path.join(os.path.relpath(workdir, get_buildroot()),
'gen/thrift-java/[^/]*/[^/:]*/[^/:]*:{0}'.format(thrift_target_name))
p = re.compile(codegen_target_regex)
self.assertTrue(any(p.match(target) for target in json_data.get('targets').keys()))
@ensure_resolver
def test_export_json_transitive_jar(self):
with self.temporary_workdir() as workdir:
test_target = 'examples/tests/java/org/pantsbuild/example/usethrift:usethrift'
json_data = self.run_export(test_target, workdir, load_libs=True)
targets = json_data.get('targets')
self.assertIn('org.hamcrest:hamcrest-core:1.3', targets[test_target]['libraries'])
@ensure_resolver
def test_export_jar_path_with_excludes(self):
with self.temporary_workdir() as workdir:
test_target = 'testprojects/src/java/org/pantsbuild/testproject/exclude:foo'
json_data = self.run_export(test_target, workdir, load_libs=True)
self.assertIsNone(json_data
.get('libraries')
.get('com.typesafe.sbt:incremental-compiler:0.13.7'))
foo_target = (json_data
.get('targets')
.get('testprojects/src/java/org/pantsbuild/testproject/exclude:foo'))
self.assertTrue('com.typesafe.sbt:incremental-compiler' in foo_target.get('excludes'))
@ensure_resolver
def test_export_jar_path_with_excludes_soft(self):
with self.temporary_workdir() as workdir:
test_target = 'testprojects/src/java/org/pantsbuild/testproject/exclude:'
json_data = self.run_export(test_target,
workdir,
load_libs=True,
extra_args=['--export-soft-excludes'])
self.assertIsNotNone(json_data
.get('libraries')
.get('com.martiansoftware:nailgun-server:0.9.1'))
self.assertIsNotNone(json_data.get('libraries').get('org.pantsbuild:jmake:1.3.8-10'))
foo_target = (json_data
.get('targets')
.get('testprojects/src/java/org/pantsbuild/testproject/exclude:foo'))
self.assertTrue('com.typesafe.sbt:incremental-compiler' in foo_target.get('excludes'))
self.assertTrue('org.pantsbuild' in foo_target.get('excludes'))
@ensure_resolver
def test_export_jar_path(self):
with self.temporary_workdir() as workdir:
test_target = 'examples/tests/java/org/pantsbuild/example/usethrift:usethrift'
json_data = self.run_export(test_target, workdir, load_libs=True)
common_lang_lib_info = json_data.get('libraries').get('junit:junit:4.12')
self.assertIsNotNone(common_lang_lib_info)
self.assertIn(
'junit-4.12.jar',
common_lang_lib_info.get('default')
)
self.assertIn(
'junit-4.12-javadoc.jar',
common_lang_lib_info.get('javadoc')
)
self.assertIn(
'junit-4.12-sources.jar',
common_lang_lib_info.get('sources')
)
@ensure_resolver
def test_dep_map_for_java_sources(self):
with self.temporary_workdir() as workdir:
test_target = 'examples/src/scala/org/pantsbuild/example/scala_with_java_sources'
json_data = self.run_export(test_target, workdir)
targets = json_data.get('targets')
self.assertIn('examples/src/java/org/pantsbuild/example/java_sources:java_sources', targets)
@ensure_resolver
def test_sources_and_javadocs(self):
with self.temporary_workdir() as workdir:
test_target = 'testprojects/src/scala/org/pantsbuild/testproject/unicode/shapeless'
json_data = self.run_export(test_target, workdir, load_libs=True)
shapeless_lib = json_data.get('libraries').get('com.chuusai:shapeless_2.12:2.3.2')
self.assertIsNotNone(shapeless_lib)
self.assertIsNotNone(shapeless_lib['default'])
self.assertIsNotNone(shapeless_lib['sources'])
self.assertIsNotNone(shapeless_lib['javadoc'])
@ensure_resolver
def test_classifiers(self):
with self.temporary_workdir() as workdir:
test_target = 'testprojects/tests/java/org/pantsbuild/testproject/ivyclassifier:ivyclassifier'
json_data = self.run_export(test_target, workdir, load_libs=True)
avro_lib_info = json_data.get('libraries').get('org.apache.avro:avro:1.7.7')
self.assertIsNotNone(avro_lib_info)
self.assertIn(
'avro-1.7.7.jar',
avro_lib_info.get('default'),
)
self.assertIn(
'avro-1.7.7-tests.jar',
avro_lib_info.get('tests'),
)
self.assertIn(
'avro-1.7.7-javadoc.jar',
avro_lib_info.get('javadoc'),
)
self.assertIn(
'avro-1.7.7-sources.jar',
avro_lib_info.get('sources'),
)
@ensure_resolver
def test_distributions_and_platforms(self):
with self.temporary_workdir() as workdir:
test_target = 'examples/src/java/org/pantsbuild/example/hello/simple'
json_data = self.run_export(test_target, workdir, load_libs=False, extra_args=[
'--jvm-platform-default-platform=java7',
'--jvm-platform-platforms={'
' "java7": {"source": "1.7", "target": "1.7", "args": [ "-X123" ]},'
' "java8": {"source": "1.8", "target": "1.8", "args": [ "-X456" ]}'
'}',
'--jvm-distributions-paths={'
' "macos": [ "/Library/JDK" ],'
' "linux": [ "/usr/lib/jdk7", "/usr/lib/jdk8"]'
'}'
])
self.assertFalse('python_setup' in json_data)
target_name = 'examples/src/java/org/pantsbuild/example/hello/simple:simple'
targets = json_data.get('targets')
self.assertEqual('java7', targets[target_name]['platform'])
self.assertEqual(
{
'default_platform' : 'java7',
'platforms': {
'java7': {
'source_level': '1.7',
'args': ['-X123'],
'target_level': '1.7'},
'java8': {
'source_level': '1.8',
'args': ['-X456'],
'target_level': '1.8'},
}
},
json_data['jvm_platforms'])
@ensure_resolver
def test_test_platform(self):
with self.temporary_workdir() as workdir:
test_target = 'testprojects/tests/java/org/pantsbuild/testproject/testjvms:eight-test-platform'
json_data = self.run_export(test_target, workdir)
self.assertEqual('java7', json_data['targets'][test_target]['platform'])
self.assertEqual('java8', json_data['targets'][test_target]['test_platform'])
@ensure_resolver
def test_intellij_integration(self):
with self.temporary_workdir() as workdir:
exported_file = os.path.join(workdir, "export_file.json")
p = subprocess.Popen(['build-support/pants-intellij.sh', '--export-output-file=' + exported_file],
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
p.communicate()
self.assertEqual(p.returncode, 0)
with open(exported_file, 'r') as data_file:
json_data = json.load(data_file)
python_setup = json_data['python_setup']
self.assertIsNotNone(python_setup)
self.assertIsNotNone(python_setup['interpreters'])
default_interpreter = python_setup['default_interpreter']
self.assertIsNotNone(default_interpreter)
self.assertIsNotNone(python_setup['interpreters'][default_interpreter])
self.assertTrue(os.path.exists(python_setup['interpreters'][default_interpreter]['binary']))
self.assertTrue(os.path.exists(python_setup['interpreters'][default_interpreter]['chroot']))
python_target = json_data['targets']['src/python/pants/backend/python/targets:targets']
self.assertIsNotNone(python_target)
self.assertEqual(default_interpreter, python_target['python_interpreter'])
@ensure_resolver
def test_intransitive_and_scope(self):
with self.temporary_workdir() as workdir:
test_path = 'testprojects/maven_layout/provided_patching/one/src/main/java'
test_target = '{}:common'.format(test_path)
json_data = self.run_export(test_target, workdir)
h = hash_target('{}:shadow'.format(test_path), 'provided')
synthetic_target = '{}:shadow-unstable-provided-{}'.format(test_path, h)
self.assertEqual(False, json_data['targets'][synthetic_target]['transitive'])
self.assertEqual('compile test', json_data['targets'][synthetic_target]['scope'])
@ensure_resolver
def test_export_is_target_roots(self):
with self.temporary_workdir() as workdir:
test_target = 'examples/tests/java/org/pantsbuild/example/::'
json_data = self.run_export(test_target, workdir, load_libs=False)
for target_address, attributes in json_data['targets'].items():
# Make sure all targets under `test_target`'s directory are target roots.
self.assertEqual(
attributes['is_target_root'],
target_address.startswith("examples/tests/java/org/pantsbuild/example")
)
| 45.633962 | 111 | 0.678326 |
import json
import os
import re
import subprocess
from twitter.common.collections import maybe_list
from pants.base.build_environment import get_buildroot
from pants.build_graph.intermediate_target_factory import hash_target
from pants_test.backend.project_info.tasks.resolve_jars_test_mixin import ResolveJarsTestMixin
from pants_test.pants_run_integration_test import PantsRunIntegrationTest, ensure_resolver
class ExportIntegrationTest(ResolveJarsTestMixin, PantsRunIntegrationTest):
_confs_args = [
'--export-libraries-sources',
'--export-libraries-javadocs',
]
def run_export(self, test_target, workdir, load_libs=False, only_default=False, extra_args=None):
export_out_file = os.path.join(workdir, 'export_out.txt')
args = ['export',
'--output-file={out_file}'.format(out_file=export_out_file)] + maybe_list(test_target)
libs_args = ['--no-export-libraries'] if not load_libs else self._confs_args
if load_libs and only_default:
libs_args = []
pants_run = self.run_pants_with_workdir(args + libs_args + (extra_args or []), workdir)
self.assert_success(pants_run)
self.assertTrue(os.path.exists(export_out_file),
msg='Could not find export output file in {out_file}'
.format(out_file=export_out_file))
with open(export_out_file, 'r') as json_file:
json_data = json.load(json_file)
if not load_libs:
self.assertIsNone(json_data.get('libraries'))
return json_data
def evaluate_subtask(self, targets, workdir, load_extra_confs, extra_args, expected_jars):
json_data = self.run_export(targets, workdir, load_libs=True, only_default=not load_extra_confs,
extra_args=extra_args)
for jar in expected_jars:
self.assertIn(jar, json_data['libraries'])
for path in json_data['libraries'][jar].values():
self.assertTrue(os.path.exists(path), 'Expected jar at {} to actually exist.'.format(path))
@ensure_resolver
def test_export_code_gen(self):
with self.temporary_workdir() as workdir:
test_target = 'examples/tests/java/org/pantsbuild/example/usethrift:usethrift'
json_data = self.run_export(test_target, workdir, load_libs=True)
thrift_target_name = ('examples.src.thrift.org.pantsbuild.example.precipitation'
'.precipitation-java')
codegen_target_regex = os.path.join(os.path.relpath(workdir, get_buildroot()),
'gen/thrift-java/[^/]*/[^/:]*/[^/:]*:{0}'.format(thrift_target_name))
p = re.compile(codegen_target_regex)
self.assertTrue(any(p.match(target) for target in json_data.get('targets').keys()))
@ensure_resolver
def test_export_json_transitive_jar(self):
with self.temporary_workdir() as workdir:
test_target = 'examples/tests/java/org/pantsbuild/example/usethrift:usethrift'
json_data = self.run_export(test_target, workdir, load_libs=True)
targets = json_data.get('targets')
self.assertIn('org.hamcrest:hamcrest-core:1.3', targets[test_target]['libraries'])
@ensure_resolver
def test_export_jar_path_with_excludes(self):
with self.temporary_workdir() as workdir:
test_target = 'testprojects/src/java/org/pantsbuild/testproject/exclude:foo'
json_data = self.run_export(test_target, workdir, load_libs=True)
self.assertIsNone(json_data
.get('libraries')
.get('com.typesafe.sbt:incremental-compiler:0.13.7'))
foo_target = (json_data
.get('targets')
.get('testprojects/src/java/org/pantsbuild/testproject/exclude:foo'))
self.assertTrue('com.typesafe.sbt:incremental-compiler' in foo_target.get('excludes'))
@ensure_resolver
def test_export_jar_path_with_excludes_soft(self):
with self.temporary_workdir() as workdir:
test_target = 'testprojects/src/java/org/pantsbuild/testproject/exclude:'
json_data = self.run_export(test_target,
workdir,
load_libs=True,
extra_args=['--export-soft-excludes'])
self.assertIsNotNone(json_data
.get('libraries')
.get('com.martiansoftware:nailgun-server:0.9.1'))
self.assertIsNotNone(json_data.get('libraries').get('org.pantsbuild:jmake:1.3.8-10'))
foo_target = (json_data
.get('targets')
.get('testprojects/src/java/org/pantsbuild/testproject/exclude:foo'))
self.assertTrue('com.typesafe.sbt:incremental-compiler' in foo_target.get('excludes'))
self.assertTrue('org.pantsbuild' in foo_target.get('excludes'))
@ensure_resolver
def test_export_jar_path(self):
with self.temporary_workdir() as workdir:
test_target = 'examples/tests/java/org/pantsbuild/example/usethrift:usethrift'
json_data = self.run_export(test_target, workdir, load_libs=True)
common_lang_lib_info = json_data.get('libraries').get('junit:junit:4.12')
self.assertIsNotNone(common_lang_lib_info)
self.assertIn(
'junit-4.12.jar',
common_lang_lib_info.get('default')
)
self.assertIn(
'junit-4.12-javadoc.jar',
common_lang_lib_info.get('javadoc')
)
self.assertIn(
'junit-4.12-sources.jar',
common_lang_lib_info.get('sources')
)
@ensure_resolver
def test_dep_map_for_java_sources(self):
with self.temporary_workdir() as workdir:
test_target = 'examples/src/scala/org/pantsbuild/example/scala_with_java_sources'
json_data = self.run_export(test_target, workdir)
targets = json_data.get('targets')
self.assertIn('examples/src/java/org/pantsbuild/example/java_sources:java_sources', targets)
@ensure_resolver
def test_sources_and_javadocs(self):
with self.temporary_workdir() as workdir:
test_target = 'testprojects/src/scala/org/pantsbuild/testproject/unicode/shapeless'
json_data = self.run_export(test_target, workdir, load_libs=True)
shapeless_lib = json_data.get('libraries').get('com.chuusai:shapeless_2.12:2.3.2')
self.assertIsNotNone(shapeless_lib)
self.assertIsNotNone(shapeless_lib['default'])
self.assertIsNotNone(shapeless_lib['sources'])
self.assertIsNotNone(shapeless_lib['javadoc'])
@ensure_resolver
def test_classifiers(self):
with self.temporary_workdir() as workdir:
test_target = 'testprojects/tests/java/org/pantsbuild/testproject/ivyclassifier:ivyclassifier'
json_data = self.run_export(test_target, workdir, load_libs=True)
avro_lib_info = json_data.get('libraries').get('org.apache.avro:avro:1.7.7')
self.assertIsNotNone(avro_lib_info)
self.assertIn(
'avro-1.7.7.jar',
avro_lib_info.get('default'),
)
self.assertIn(
'avro-1.7.7-tests.jar',
avro_lib_info.get('tests'),
)
self.assertIn(
'avro-1.7.7-javadoc.jar',
avro_lib_info.get('javadoc'),
)
self.assertIn(
'avro-1.7.7-sources.jar',
avro_lib_info.get('sources'),
)
@ensure_resolver
def test_distributions_and_platforms(self):
with self.temporary_workdir() as workdir:
test_target = 'examples/src/java/org/pantsbuild/example/hello/simple'
json_data = self.run_export(test_target, workdir, load_libs=False, extra_args=[
'--jvm-platform-default-platform=java7',
'--jvm-platform-platforms={'
' "java7": {"source": "1.7", "target": "1.7", "args": [ "-X123" ]},'
' "java8": {"source": "1.8", "target": "1.8", "args": [ "-X456" ]}'
'}',
'--jvm-distributions-paths={'
' "macos": [ "/Library/JDK" ],'
' "linux": [ "/usr/lib/jdk7", "/usr/lib/jdk8"]'
'}'
])
self.assertFalse('python_setup' in json_data)
target_name = 'examples/src/java/org/pantsbuild/example/hello/simple:simple'
targets = json_data.get('targets')
self.assertEqual('java7', targets[target_name]['platform'])
self.assertEqual(
{
'default_platform' : 'java7',
'platforms': {
'java7': {
'source_level': '1.7',
'args': ['-X123'],
'target_level': '1.7'},
'java8': {
'source_level': '1.8',
'args': ['-X456'],
'target_level': '1.8'},
}
},
json_data['jvm_platforms'])
@ensure_resolver
def test_test_platform(self):
with self.temporary_workdir() as workdir:
test_target = 'testprojects/tests/java/org/pantsbuild/testproject/testjvms:eight-test-platform'
json_data = self.run_export(test_target, workdir)
self.assertEqual('java7', json_data['targets'][test_target]['platform'])
self.assertEqual('java8', json_data['targets'][test_target]['test_platform'])
@ensure_resolver
def test_intellij_integration(self):
with self.temporary_workdir() as workdir:
exported_file = os.path.join(workdir, "export_file.json")
p = subprocess.Popen(['build-support/pants-intellij.sh', '--export-output-file=' + exported_file],
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
p.communicate()
self.assertEqual(p.returncode, 0)
with open(exported_file, 'r') as data_file:
json_data = json.load(data_file)
python_setup = json_data['python_setup']
self.assertIsNotNone(python_setup)
self.assertIsNotNone(python_setup['interpreters'])
default_interpreter = python_setup['default_interpreter']
self.assertIsNotNone(default_interpreter)
self.assertIsNotNone(python_setup['interpreters'][default_interpreter])
self.assertTrue(os.path.exists(python_setup['interpreters'][default_interpreter]['binary']))
self.assertTrue(os.path.exists(python_setup['interpreters'][default_interpreter]['chroot']))
python_target = json_data['targets']['src/python/pants/backend/python/targets:targets']
self.assertIsNotNone(python_target)
self.assertEqual(default_interpreter, python_target['python_interpreter'])
@ensure_resolver
def test_intransitive_and_scope(self):
with self.temporary_workdir() as workdir:
test_path = 'testprojects/maven_layout/provided_patching/one/src/main/java'
test_target = '{}:common'.format(test_path)
json_data = self.run_export(test_target, workdir)
h = hash_target('{}:shadow'.format(test_path), 'provided')
synthetic_target = '{}:shadow-unstable-provided-{}'.format(test_path, h)
self.assertEqual(False, json_data['targets'][synthetic_target]['transitive'])
self.assertEqual('compile test', json_data['targets'][synthetic_target]['scope'])
@ensure_resolver
def test_export_is_target_roots(self):
with self.temporary_workdir() as workdir:
test_target = 'examples/tests/java/org/pantsbuild/example/::'
json_data = self.run_export(test_target, workdir, load_libs=False)
for target_address, attributes in json_data['targets'].items():
self.assertEqual(
attributes['is_target_root'],
target_address.startswith("examples/tests/java/org/pantsbuild/example")
)
| true | true |
1c481e629927a83e32294493a0c2118b75b7d1f9 | 12,288 | py | Python | openpirates-api.py | dhagell/openpirates-api | 68e4d626c2fae15d5e95427302e2a97db306591c | [
"MIT"
] | null | null | null | openpirates-api.py | dhagell/openpirates-api | 68e4d626c2fae15d5e95427302e2a97db306591c | [
"MIT"
] | null | null | null | openpirates-api.py | dhagell/openpirates-api | 68e4d626c2fae15d5e95427302e2a97db306591c | [
"MIT"
] | null | null | null | from flask import Flask, abort
from flask import jsonify
from google.cloud import storage
from google.oauth2 import service_account
from PIL import Image
import os
import mimetypes
GOOGLE_STORAGE_PROJECT = os.environ['GOOGLE_STORAGE_PROJECT']
GOOGLE_STORAGE_BUCKET = os.environ['GOOGLE_STORAGE_BUCKET']
app = Flask(__name__)
########################################################################
# Data
########################################################################
# opensea-pirates
FIRST_NAMES = ['Herbie', 'Sprinkles', 'Boris', 'Dave', 'Randy', 'Captain']
LAST_NAMES = ['Starbelly', 'Fisherton', 'McCoy']
BASES = ['jellyfish', 'starfish', 'crab', 'narwhal', 'tealfish', 'goldfish']
EYES = ['big', 'joy', 'wink', 'sleepy', 'content']
MOUTH = ['happy', 'surprised', 'pleased', 'cute']
HAT = ['tricorn', 'bicorn', 'small', 'scarf','cap']
SWORD = ['cutlass', 'saber', 'scimitar', 'dagger']
GLYPHS = ['parrot', 'frog', 'snake']
GREEKSYMBOL = ['omega', 'alpha', 'beta']
INT_ATTRIBUTES = [5, 2, 3, 4, 8]
FLOAT_ATTRIBUTES = [1.4, 2.3, 11.7, 90.2, 1.2]
STR_ATTRIBUTES = [
'Happy',
'Sad',
'Sleepy',
'Boring'
]
BOOST_ATTRIBUTES = [10, 40, 30]
PERCENT_BOOST_ATTRIBUTES = [5, 10, 15]
NUMBER_ATTRIBUTES = [1, 2, 1, 1]
# opensea-pirates-treasures
ACCESSORIES_IMAGES = [
'Bamboo-flute.png',
'Life-ring.png',
'Message-in-a-bottle.png',
'Pearl.png',
'Scuba-mask.png',
'Trident.png'
]
ACCESSORIES_NAMES = [a.replace('-', ' ').replace('.png', '')
for a in ACCESSORIES_IMAGES]
ACCESSORIES_ATTS_INT = [200, 11, 3, 41, 9, 172]
ACCESSORIES_ATTS_PERCENT = [5, 10, 1, 20, 15, 25]
ACCESSORIES_ATTS_LOCATION = ['Head', 'Body', 'Held', 'Held', 'Head', 'Held']
ACCESSORIES_ATTS_RARITY = [
'Common',
'Rare',
'Legendary',
'Epic',
'Divine',
'Hidden'
]
ACCESSORIES_ATTS_DEPTH = [
'beach',
'shore',
'shallows',
'deeps',
'shore',
'deeps'
]
ACCESSORIES_ATTS_GENERATION = [1, 1, 2, 1, 1, 3]
# contractURI() support
CONTRACT_URI_METADATA = {
'opensea-pirates': {
'name': 'OpenSea Pirates',
'description': 'Friendly pirates of the sea.',
'image': 'https://example.com/image.png',
'external_link': 'https://github.com/dhagell/opensea-pirates/'
},
'opensea-erc1155': {
'name': 'OpenSea Pirate Treasures',
'description': "Fun and useful treasures for your OpenSea pirates.",
'image': 'https://example.com/image.png',
'external_link': 'https://github.com/dhagell/opensea-erc1155/'
}
}
CONTRACT_URI_METADATA_AVAILABLE = CONTRACT_URI_METADATA.keys()
########################################################################
# Routes
########################################################################
# opensea-pirates
@app.route('/api/pirate/<token_id>')
def pirate(token_id):
token_id = int(token_id)
num_first_names = len(FIRST_NAMES)
num_last_names = len(LAST_NAMES)
pirate_name = '%s %s' % (FIRST_NAMES[token_id % num_first_names], LAST_NAMES[token_id % num_last_names])
base = BASES[token_id % len(BASES)]
eyes = EYES[token_id % len(EYES)]
mouth = MOUTH[token_id % len(MOUTH)]
image_url = _compose_image(['images/bases/base-%s.png' % base,
'images/eyes/eyes-%s.png' % eyes,
'images/mouths/mouth-%s.png' % mouth],
token_id)
attributes = []
_add_attribute(attributes, 'Base', BASES, token_id)
_add_attribute(attributes, 'Eyes', EYES, token_id)
_add_attribute(attributes, 'Mouth', MOUTH, token_id)
_add_attribute(attributes, 'Level', INT_ATTRIBUTES, token_id)
_add_attribute(attributes, 'Stamina', FLOAT_ATTRIBUTES, token_id)
_add_attribute(attributes, 'Personality', STR_ATTRIBUTES, token_id)
_add_attribute(attributes, 'Aqua Power', BOOST_ATTRIBUTES, token_id, display_type='boost_number')
_add_attribute(attributes, 'Stamina Increase', PERCENT_BOOST_ATTRIBUTES, token_id, display_type='boost_percentage')
_add_attribute(attributes, 'Generation', NUMBER_ATTRIBUTES, token_id, display_type='number')
return jsonify({
'name': pirate_name,
'description': 'Friendly OpenSea Pirate that enjoys a long sail in the ocean.',
'image': image_url,
'external_url': 'https://openseapirates.io/%s' % token_id,
'attributes': attributes
})
@app.route('/api/box/pirate/<token_id>')
def pirate_box(token_id):
token_id = int(token_id)
image_url = _compose_image(['images/box/bootybox.png'], token_id, 'box')
attributes = []
_add_attribute(attributes, 'number_inside', [3], token_id)
return jsonify({
'name': 'Pirate Booty Box',
'description': 'This bootybox contains some OpenSea Pirates! It can also be traded!',
'image': image_url,
'external_url': 'https://openseapirates.io/%s' % token_id,
'attributes': attributes
})
@app.route('/api/factory/pirate/<token_id>')
def pirate_factory(token_id):
token_id = int(token_id)
if token_id == 0:
name = 'One OpenSea Pirate'
description = 'When you purchase this option, you will receive a single OpenSea pirate of a random variety. ' \
'Enjoy and take good care of your aquatic being!'
image_url = _compose_image(['images/factory/egg.png'], token_id, 'factory')
num_inside = 1
elif token_id == 1:
name = 'Four OpenSea Pirates'
description = 'When you purchase this option, you will receive four OpenSea pirates of random variety. ' \
'Enjoy and take good care of your aquatic beings!'
image_url = _compose_image(['images/factory/four-eggs.png'], token_id, 'factory')
num_inside = 4
elif token_id == 2:
name = 'One OpenSea Pirate bootybox'
description = 'When you purchase this option, you will receive one bootybox, which can be opened to reveal three ' \
'OpenSea pirates of random variety. Enjoy and take good care of these cute aquatic beings!'
image_url = _compose_image(['images/box/bootybox.png'], token_id, 'factory')
num_inside = 3
attributes = []
_add_attribute(attributes, 'number_inside', [num_inside], token_id)
return jsonify({
'name': name,
'description': description,
'image': image_url,
'external_url': 'https://openseapirates.io/%s' % token_id,
'attributes': attributes
})
# opensea-pirates-treasures
@app.route('/api/treasure/<token_id>')
def treasure(token_id):
token_id = int(token_id)
num_treasures = len(ACCESSORIES_NAMES)
if token_id >= num_treasures:
abort(404, description='No such token')
treasure_name = ACCESSORIES_NAMES[token_id]
image_path = 'images/treasure/%s' % ACCESSORIES_IMAGES[token_id]
image_url = _bucket_image(image_path, token_id, 'treasure')
attributes = []
_add_attribute(attributes, 'Aqua Boost', ACCESSORIES_ATTS_INT, token_id, display_type='boost_number')
_add_attribute(attributes, 'Stamina Increase', ACCESSORIES_ATTS_PERCENT, token_id, display_type='boost_percentage')
_add_attribute(attributes, 'Location', ACCESSORIES_ATTS_LOCATION, token_id)
_add_attribute(attributes, 'Depth', ACCESSORIES_ATTS_DEPTH, token_id)
_add_attribute(attributes, 'Rarity', ACCESSORIES_ATTS_RARITY, token_id)
_add_attribute(attributes, 'Generation', ACCESSORIES_ATTS_GENERATION, token_id, display_type='number')
return jsonify({
'name': treasure_name,
'description': 'A fun and useful treasure for your friendly OpenSea pirates.',
'image': image_url,
'external_url': 'https://openseapirates.io/treasure/%s' % token_id,
'attributes': attributes
})
@app.route('/api/box/treasure/<token_id>')
def treasure_box(token_id):
token_id = int(token_id)
image_url = _compose_image(['images/box/bootybox.png'], token_id, 'box')
attributes = []
_add_attribute(attributes, 'number_inside', [3], token_id)
return jsonify({
'name': 'Treasure Booty Box',
'description': 'This bootybox contains some OpenSea Pirate treasures! It can also be traded!',
'image': image_url,
'external_url': 'https://openseapirates.io/box/treasure/%s' % token_id,
'attributes': attributes
})
@app.route('/api/factory/treasure/<token_id>')
def treasure_factory(token_id):
token_id = int(token_id)
if token_id == 0:
name = 'One OpenSea Pirate Treasure'
description = 'When you purchase this option, you will receive a single OpenSea pirate treasure of a random variety. ' \
'Enjoy and take good care of your aquatic being!'
image_url = _compose_image(['images/factory/egg.png'], token_id, 'factory')
num_inside = 1
elif token_id == 1:
name = 'Four OpenSea Pirate Treasures'
description = 'When you purchase this option, you will receive four OpenSea pirate treasures of random variety. ' \
'Enjoy and take good care of your aquatic beings!'
image_url = _compose_image(['images/factory/four-eggs.png'], token_id, 'factory')
num_inside = 4
elif token_id == 2:
name = 'One OpenSea Pirate Treasure BootyBox'
description = 'When you purchase this option, you will receive one bootybox, which can be opened to reveal three ' \
'OpenSea pirate treasures of random variety. Enjoy and take good care of these cute aquatic beings!'
image_url = _compose_image(['images/box/bootybox.png'], token_id, 'factory')
num_inside = 3
attributes = []
_add_attribute(attributes, 'number_inside', [num_inside], token_id)
return jsonify({
'name': name,
'description': description,
'image': image_url,
'external_url': 'https://openseapirates.io/%s' % token_id,
'attributes': attributes
})
# contractURI()
@app.route('/contract/<contract_name>')
def contract_uri(contract_name):
if not contract_name in CONTRACT_URI_METADATA_AVAILABLE:
abort(404, description='Resource not found')
return jsonify(CONTRACT_URI_METADATA[contract_name])
# Error handling
@app.errorhandler(404)
def resource_not_found(e):
return jsonify(error=str(e)), 404
########################################################################
# Utility code
########################################################################
def _add_attribute(existing, attribute_name, options, token_id, display_type=None):
trait = {
'trait_type': attribute_name,
'value': options[token_id % len(options)]
}
if display_type:
trait['display_type'] = display_type
existing.append(trait)
def _compose_image(image_files, token_id, path='pirate'):
composite = None
for image_file in image_files:
foreground = Image.open(image_file).convert('RGBA')
if composite:
composite = Image.alpha_composite(composite, foreground)
else:
composite = foreground
output_path = 'images/output/%s.png' % token_id
composite.save(output_path)
blob = _get_bucket().blob(f'{path}/{token_id}.png')
blob.upload_from_filename(filename=output_path)
return blob.public_url
def _bucket_image(image_path, token_id, path='treasure'):
blob = _get_bucket().blob(f'{path}/{token_id}.png')
blob.upload_from_filename(filename=image_path)
return blob.public_url
def _get_bucket():
credentials = service_account.Credentials.from_service_account_file('credentials/google-storage-credentials.json')
if credentials.requires_scopes:
credentials = credentials.with_scopes(['https://www.googleapis.com/auth/devstorage.read_write'])
client = storage.Client(project=GOOGLE_STORAGE_PROJECT, credentials=credentials)
return client.get_bucket(GOOGLE_STORAGE_BUCKET)
@app.route("/")
def home():
return render_template('index.html')
########################################################################
# Main flow of execution
########################################################################
if __name__ == '__main__':
app.run(debug=True, use_reloader=True)
| 36.035191 | 128 | 0.6368 | from flask import Flask, abort
from flask import jsonify
from google.cloud import storage
from google.oauth2 import service_account
from PIL import Image
import os
import mimetypes
GOOGLE_STORAGE_PROJECT = os.environ['GOOGLE_STORAGE_PROJECT']
GOOGLE_STORAGE_BUCKET = os.environ['GOOGLE_STORAGE_BUCKET']
app = Flask(__name__)
| true | true |
1c481ebd72b3d31925f9e66052d9b12423ea6d1f | 5,720 | py | Python | src/putty/settings/ssh_host_keys/__init__.py | KalleDK/putty.settings | d8a7803bd6df343f53f6a8d404c8a1ceb7bc2436 | [
"MIT"
] | 1 | 2021-05-25T10:42:15.000Z | 2021-05-25T10:42:15.000Z | src/putty/settings/ssh_host_keys/__init__.py | KalleDK/putty.settings | d8a7803bd6df343f53f6a8d404c8a1ceb7bc2436 | [
"MIT"
] | null | null | null | src/putty/settings/ssh_host_keys/__init__.py | KalleDK/putty.settings | d8a7803bd6df343f53f6a8d404c8a1ceb7bc2436 | [
"MIT"
] | null | null | null | import re
import paramiko
import winreg
import pathlib
import typing
import logging
from cryptography.hazmat.backends import default_backend
from cryptography.hazmat.primitives.asymmetric import rsa
logger = logging.getLevelName(__name__)
STORE = winreg.HKEY_CURRENT_USER
PUTTY_PATH = pathlib.PureWindowsPath('Software', 'SimonTatham', 'PuTTY')
paramiko_to_putty_key = {
"ssh-rsa": "rsa2"
}
putty_to_paramiko_key = {val: key for key, val in paramiko_to_putty_key.items()}
class HostKeyEntry:
putty_host_entry_pattern = re.compile(r'(?P<key_type>.+)@(?P<port>.+):(?P<hostname>.+)')
paramiko_host_entry_pattern = re.compile(r'\[(?P<hostname>.+)\]:(?P<port>.+)')
def __init__(self, hostname: str = None, port: str = None, key_type: str = None, key: paramiko.PKey = None):
self.hostname = hostname
self.port = port
self.key_type = key_type
self.key = key
@property
def paramiko_host_entry(self):
if self.port == '22':
return self.hostname
else:
return "[{hostname}]:{port}".format(hostname=self.hostname, port=self.port)
@paramiko_host_entry.setter
def paramiko_host_entry(self, value):
m = self.paramiko_host_entry_pattern.match(value)
if m:
self.hostname = m.group('hostname')
self.port = m.group('port')
else:
self.hostname = value
self.port = '22'
@property
def paramiko_key_type(self):
return self.key_type
@paramiko_key_type.setter
def paramiko_key_type(self, value):
self.key_type = value
@property
def paramiko_key(self):
return self.key
@paramiko_key.setter
def paramiko_key(self, value):
self.key = value
@property
def putty_key_type(self):
return paramiko_to_putty_key[self.key_type]
@putty_key_type.setter
def putty_key_type(self, value):
self.key_type = putty_to_paramiko_key[value]
@property
def putty_host_entry(self):
return "{key_type}@{port}:{hostname}".format(key_type=self.putty_key_type, port=self.port, hostname=self.hostname)
@putty_host_entry.setter
def putty_host_entry(self, value):
m = self.putty_host_entry_pattern.match(value)
if m:
self.hostname = m.group('hostname')
self.port = m.group('port')
self.putty_key_type = m.group('key_type')
else:
raise Exception("Not valid host_entry")
@property
def putty_key(self):
if self.key_type == 'ssh-rsa' and isinstance(self.key, paramiko.RSAKey):
return '{e},{n}'.format(e=hex(self.key.public_numbers.e), n=hex(self.key.public_numbers.n))
@putty_key.setter
def putty_key(self, value):
if self.key_type == 'ssh-rsa':
e, n = (int(x, 0) for x in value.split(','))
self.key = paramiko.RSAKey(key=rsa.RSAPublicNumbers(e=e, n=n).public_key(default_backend()))
@classmethod
def from_registry_entry(cls, entry: typing.Tuple[str, str, int]):
o = cls()
o.putty_host_entry = entry[0]
o.putty_key = entry[1]
return o
@classmethod
def from_paramiko_entry(cls, host_entry, key_type, key):
o = cls()
o.paramiko_host_entry = host_entry
o.paramiko_key_type = key_type
o.paramiko_key = key
return o
class SshHostKeys:
path = str(PUTTY_PATH.joinpath('SshHostKeys'))
def __init__(self):
self.host_keys: typing.Dict[str, HostKeyEntry] = {}
def load(self):
for registry_entry in self.get_from_registry():
try:
self.add(HostKeyEntry.from_registry_entry(registry_entry))
except Exception:
logger.info("Invalid keyformat {}".format(registry_entry))
def save(self):
entries_to_remove = []
for registry_entry in self.get_from_registry():
if self.host_keys.get(registry_entry[0]) is None:
entries_to_remove.append(registry_entry[0])
self.delete_from_registry(entries_to_remove)
self.set_registry_to(self.host_keys)
def add(self, host_key_entry: HostKeyEntry):
self.host_keys[host_key_entry.putty_host_entry] = host_key_entry
def add_from_paramiko_host_keys(self, host_keys: paramiko.HostKeys):
for host_entry in host_keys.keys():
for key_type, key in host_keys.lookup(host_entry).items():
self.add(HostKeyEntry.from_paramiko_entry(host_entry=host_entry, key_type=key_type, key=key))
def add_to_paramiko_host_keys(self, host_keys: paramiko.HostKeys):
for key_type, host_key in self.host_keys.items():
host_keys.add(hostname=host_key.paramiko_host_entry, keytype=host_key.paramiko_key_type, key=host_key.paramiko_key)
@classmethod
def delete_from_registry(cls, entries):
with winreg.OpenKey(STORE, cls.path, 0, winreg.KEY_ALL_ACCESS) as key:
for entry in entries:
winreg.DeleteValue(key, entry)
@classmethod
def get_from_registry(cls):
with winreg.OpenKey(STORE, cls.path, 0, winreg.KEY_ALL_ACCESS) as key:
size = winreg.QueryInfoKey(key)[1]
return [winreg.EnumValue(key, i) for i in range(size)]
@classmethod
def set_registry_to(cls, host_keys):
with winreg.OpenKey(STORE, cls.path, 0, winreg.KEY_ALL_ACCESS) as key:
for key_type, host_key in host_keys.items():
try:
winreg.SetValueEx(key, host_key.putty_host_entry, 0, 1, host_key.putty_key)
except Exception:
logger.info("Invalid keyformat {}".format(host_key))
| 33.450292 | 127 | 0.651224 | import re
import paramiko
import winreg
import pathlib
import typing
import logging
from cryptography.hazmat.backends import default_backend
from cryptography.hazmat.primitives.asymmetric import rsa
logger = logging.getLevelName(__name__)
STORE = winreg.HKEY_CURRENT_USER
PUTTY_PATH = pathlib.PureWindowsPath('Software', 'SimonTatham', 'PuTTY')
paramiko_to_putty_key = {
"ssh-rsa": "rsa2"
}
putty_to_paramiko_key = {val: key for key, val in paramiko_to_putty_key.items()}
class HostKeyEntry:
putty_host_entry_pattern = re.compile(r'(?P<key_type>.+)@(?P<port>.+):(?P<hostname>.+)')
paramiko_host_entry_pattern = re.compile(r'\[(?P<hostname>.+)\]:(?P<port>.+)')
def __init__(self, hostname: str = None, port: str = None, key_type: str = None, key: paramiko.PKey = None):
self.hostname = hostname
self.port = port
self.key_type = key_type
self.key = key
@property
def paramiko_host_entry(self):
if self.port == '22':
return self.hostname
else:
return "[{hostname}]:{port}".format(hostname=self.hostname, port=self.port)
@paramiko_host_entry.setter
def paramiko_host_entry(self, value):
m = self.paramiko_host_entry_pattern.match(value)
if m:
self.hostname = m.group('hostname')
self.port = m.group('port')
else:
self.hostname = value
self.port = '22'
@property
def paramiko_key_type(self):
return self.key_type
@paramiko_key_type.setter
def paramiko_key_type(self, value):
self.key_type = value
@property
def paramiko_key(self):
return self.key
@paramiko_key.setter
def paramiko_key(self, value):
self.key = value
@property
def putty_key_type(self):
return paramiko_to_putty_key[self.key_type]
@putty_key_type.setter
def putty_key_type(self, value):
self.key_type = putty_to_paramiko_key[value]
@property
def putty_host_entry(self):
return "{key_type}@{port}:{hostname}".format(key_type=self.putty_key_type, port=self.port, hostname=self.hostname)
@putty_host_entry.setter
def putty_host_entry(self, value):
m = self.putty_host_entry_pattern.match(value)
if m:
self.hostname = m.group('hostname')
self.port = m.group('port')
self.putty_key_type = m.group('key_type')
else:
raise Exception("Not valid host_entry")
@property
def putty_key(self):
if self.key_type == 'ssh-rsa' and isinstance(self.key, paramiko.RSAKey):
return '{e},{n}'.format(e=hex(self.key.public_numbers.e), n=hex(self.key.public_numbers.n))
@putty_key.setter
def putty_key(self, value):
if self.key_type == 'ssh-rsa':
e, n = (int(x, 0) for x in value.split(','))
self.key = paramiko.RSAKey(key=rsa.RSAPublicNumbers(e=e, n=n).public_key(default_backend()))
@classmethod
def from_registry_entry(cls, entry: typing.Tuple[str, str, int]):
o = cls()
o.putty_host_entry = entry[0]
o.putty_key = entry[1]
return o
@classmethod
def from_paramiko_entry(cls, host_entry, key_type, key):
o = cls()
o.paramiko_host_entry = host_entry
o.paramiko_key_type = key_type
o.paramiko_key = key
return o
class SshHostKeys:
path = str(PUTTY_PATH.joinpath('SshHostKeys'))
def __init__(self):
self.host_keys: typing.Dict[str, HostKeyEntry] = {}
def load(self):
for registry_entry in self.get_from_registry():
try:
self.add(HostKeyEntry.from_registry_entry(registry_entry))
except Exception:
logger.info("Invalid keyformat {}".format(registry_entry))
def save(self):
entries_to_remove = []
for registry_entry in self.get_from_registry():
if self.host_keys.get(registry_entry[0]) is None:
entries_to_remove.append(registry_entry[0])
self.delete_from_registry(entries_to_remove)
self.set_registry_to(self.host_keys)
def add(self, host_key_entry: HostKeyEntry):
self.host_keys[host_key_entry.putty_host_entry] = host_key_entry
def add_from_paramiko_host_keys(self, host_keys: paramiko.HostKeys):
for host_entry in host_keys.keys():
for key_type, key in host_keys.lookup(host_entry).items():
self.add(HostKeyEntry.from_paramiko_entry(host_entry=host_entry, key_type=key_type, key=key))
def add_to_paramiko_host_keys(self, host_keys: paramiko.HostKeys):
for key_type, host_key in self.host_keys.items():
host_keys.add(hostname=host_key.paramiko_host_entry, keytype=host_key.paramiko_key_type, key=host_key.paramiko_key)
@classmethod
def delete_from_registry(cls, entries):
with winreg.OpenKey(STORE, cls.path, 0, winreg.KEY_ALL_ACCESS) as key:
for entry in entries:
winreg.DeleteValue(key, entry)
@classmethod
def get_from_registry(cls):
with winreg.OpenKey(STORE, cls.path, 0, winreg.KEY_ALL_ACCESS) as key:
size = winreg.QueryInfoKey(key)[1]
return [winreg.EnumValue(key, i) for i in range(size)]
@classmethod
def set_registry_to(cls, host_keys):
with winreg.OpenKey(STORE, cls.path, 0, winreg.KEY_ALL_ACCESS) as key:
for key_type, host_key in host_keys.items():
try:
winreg.SetValueEx(key, host_key.putty_host_entry, 0, 1, host_key.putty_key)
except Exception:
logger.info("Invalid keyformat {}".format(host_key))
| true | true |
1c481f629295c2319555aad0fd9a68a7fdad9f62 | 1,209 | py | Python | torch_collections/models/_siamese_configs.py | mingruimingrui/torch-collections | f7c20b28b63de76c763983338aa4c825904ef4cd | [
"MIT"
] | 3 | 2018-08-14T19:40:58.000Z | 2018-10-22T15:41:39.000Z | torch_collections/models/_siamese_configs.py | mingruimingrui/torch-collections | f7c20b28b63de76c763983338aa4c825904ef4cd | [
"MIT"
] | 2 | 2018-08-14T19:40:41.000Z | 2018-10-29T14:46:40.000Z | torch_collections/models/_siamese_configs.py | mingruimingrui/torch-collections | f7c20b28b63de76c763983338aa4c825904ef4cd | [
"MIT"
] | null | null | null | from __future__ import division
from copy import deepcopy
from ..utils.collections import AttrDict
# Define default parameters
_c = AttrDict()
################################################################################
#### Start of configurable parameters
#### Model configs
_c.name = 'encoder'
_c.input_size = [160, 160]
_c.embedding_size = 128
_c.backbone = 'resnet18'
_c.freeze_backbone = False
_c.l2_norm_alpha = 10 # based off https://arxiv.org/pdf/1703.09507.pdf
#### Loss configs
_c.margin = 0.5
_c.dist_type = 'euclidean' # option of ['euclidean', 'cosine']
_c.p_norm = 2.0 # normalization degree in euclidean distance
### Sample selection strategy
_c.negative_mining_type = 'hard' # option of ['random', 'semihard', 'hard']
################################################################################
#### End of configurable parameters
# Set default configs to be immutable
_c.immutable(True)
def make_configs(**kwargs):
configs = deepcopy(_c)
configs.immutable(False)
# Update default configs with user provided ones
for arg, value in kwargs.items():
configs[arg] = value
configs.immutable(True)
return configs
| 25.723404 | 80 | 0.604632 | from __future__ import division
from copy import deepcopy
from ..utils.collections import AttrDict
_c = AttrDict()
| true | true |
1c482118904fb66183993bc5d64794010b600d9e | 2,562 | py | Python | examples/ad_manager/v201802/creative_service/update_creatives.py | khanhnhk/googleads-python-lib | 1e882141b8eb663b55dd582ce0f4fbf3cd2f672d | [
"Apache-2.0"
] | 1 | 2021-12-30T15:21:42.000Z | 2021-12-30T15:21:42.000Z | examples/ad_manager/v201802/creative_service/update_creatives.py | benlistyg/googleads-python-lib | 1e882141b8eb663b55dd582ce0f4fbf3cd2f672d | [
"Apache-2.0"
] | null | null | null | examples/ad_manager/v201802/creative_service/update_creatives.py | benlistyg/googleads-python-lib | 1e882141b8eb663b55dd582ce0f4fbf3cd2f672d | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
#
# Copyright 2015 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This code example updates the destination URL of a single image creative.
To determine which image creatives exist, run get_all_creatives.py.
The LoadFromStorage method is pulling credentials and properties from a
"googleads.yaml" file. By default, it looks for this file in your home
directory. For more information, see the "Caching authentication information"
section of our README.
"""
# Import appropriate modules from the client library.
from googleads import ad_manager
IMAGE_CREATIVE_ID = 'INSERT_IMAGE_CREATIVE_ID_HERE'
def main(client, image_creative_id):
# Initialize appropriate service.
creative_service = client.GetService('CreativeService', version='v201802')
# Create statement object to get the creative by ID.
statement = (ad_manager.StatementBuilder()
.Where('creativeType = :type AND id = :id')
.WithBindVariable('type', 'ImageCreative')
.WithBindVariable('id', long(image_creative_id))
.Limit(1))
# Get creatives by statement.
response = creative_service.getCreativesByStatement(
statement.ToStatement())
if 'results' in response and len(response['results']):
# Update each local creative object by changing its destination URL.
updated_creatives = []
for creative in response['results']:
creative['destinationUrl'] = 'http://news.google.com'
updated_creatives.append(creative)
# Update creatives remotely.
creatives = creative_service.updateCreatives(updated_creatives)
# Display results.
for creative in creatives:
print ('Image creative with id "%s" and destination URL "%s" was '
'updated.' % (creative['id'], creative['destinationUrl']))
else:
print 'No creatives found to update.'
if __name__ == '__main__':
# Initialize client object.
ad_manager_client = ad_manager.AdManagerClient.LoadFromStorage()
main(ad_manager_client, IMAGE_CREATIVE_ID)
| 35.583333 | 77 | 0.732631 |
"""This code example updates the destination URL of a single image creative.
To determine which image creatives exist, run get_all_creatives.py.
The LoadFromStorage method is pulling credentials and properties from a
"googleads.yaml" file. By default, it looks for this file in your home
directory. For more information, see the "Caching authentication information"
section of our README.
"""
from googleads import ad_manager
IMAGE_CREATIVE_ID = 'INSERT_IMAGE_CREATIVE_ID_HERE'
def main(client, image_creative_id):
creative_service = client.GetService('CreativeService', version='v201802')
statement = (ad_manager.StatementBuilder()
.Where('creativeType = :type AND id = :id')
.WithBindVariable('type', 'ImageCreative')
.WithBindVariable('id', long(image_creative_id))
.Limit(1))
response = creative_service.getCreativesByStatement(
statement.ToStatement())
if 'results' in response and len(response['results']):
updated_creatives = []
for creative in response['results']:
creative['destinationUrl'] = 'http://news.google.com'
updated_creatives.append(creative)
creatives = creative_service.updateCreatives(updated_creatives)
for creative in creatives:
print ('Image creative with id "%s" and destination URL "%s" was '
'updated.' % (creative['id'], creative['destinationUrl']))
else:
print 'No creatives found to update.'
if __name__ == '__main__':
ad_manager_client = ad_manager.AdManagerClient.LoadFromStorage()
main(ad_manager_client, IMAGE_CREATIVE_ID)
| false | true |
1c4821226a68cf04839da2954c8a4716bab862ee | 1,285 | py | Python | atividade4.py | cauanicastro/Prog1Ifes | dc0ad7d42d45b837c76e178d43bf608afaab3f02 | [
"Apache-2.0"
] | null | null | null | atividade4.py | cauanicastro/Prog1Ifes | dc0ad7d42d45b837c76e178d43bf608afaab3f02 | [
"Apache-2.0"
] | null | null | null | atividade4.py | cauanicastro/Prog1Ifes | dc0ad7d42d45b837c76e178d43bf608afaab3f02 | [
"Apache-2.0"
] | null | null | null | __author__ = 'cauanicastro'
__copyright__ = "Copyright 2015, Cauani Castro"
__credits__ = ["Cauani Castro"]
__license__ = "Apache License 2.0"
__version__ = "1.0"
__maintainer__ = "Cauani Castro"
__email__ = "cauani.castro@hotmail.com"
__status__ = "Examination program"
def calculaRaiz(numero, aproximacoes):
raiz = 0
for i in range(aproximacoes):
if (i == 0):
raiz = numero / 2
else:
raiz = (raiz**2+numero) / (2 * raiz)
return "Num = %.5f Aprox = %d Raiz Quadrada = %.10f\n" % (numero, aproximacoes, raiz)
def main():
print("Este programa ira calcular a raiz quadrada de uma sequencia de numeros positivos, baseado no metodo de aproximacoes sucessivas de newton.")
print("Para sair do programa digite um numero menor ou igual a zero.")
while True:
numero = float(input("Digite um numero (real, positivo) para calcular a sua raiz quadrada:\n"))
if numero <= 0:
break
aproximacoes = int(input("Digite o numero (inteiro) de aproximacoes desejada:\n"))
print(calculaRaiz(numero, aproximacoes))
print("\n#####################################")
print(" FIM DO PROGRAMA")
print("#####################################")
if __name__ == '__main__':
main() | 38.939394 | 150 | 0.610117 | __author__ = 'cauanicastro'
__copyright__ = "Copyright 2015, Cauani Castro"
__credits__ = ["Cauani Castro"]
__license__ = "Apache License 2.0"
__version__ = "1.0"
__maintainer__ = "Cauani Castro"
__email__ = "cauani.castro@hotmail.com"
__status__ = "Examination program"
def calculaRaiz(numero, aproximacoes):
raiz = 0
for i in range(aproximacoes):
if (i == 0):
raiz = numero / 2
else:
raiz = (raiz**2+numero) / (2 * raiz)
return "Num = %.5f Aprox = %d Raiz Quadrada = %.10f\n" % (numero, aproximacoes, raiz)
def main():
print("Este programa ira calcular a raiz quadrada de uma sequencia de numeros positivos, baseado no metodo de aproximacoes sucessivas de newton.")
print("Para sair do programa digite um numero menor ou igual a zero.")
while True:
numero = float(input("Digite um numero (real, positivo) para calcular a sua raiz quadrada:\n"))
if numero <= 0:
break
aproximacoes = int(input("Digite o numero (inteiro) de aproximacoes desejada:\n"))
print(calculaRaiz(numero, aproximacoes))
print("\n#####################################")
print(" FIM DO PROGRAMA")
print("#####################################")
if __name__ == '__main__':
main() | true | true |
1c4821aedc79f26cc40a3c7988e922eba605d9a0 | 926 | py | Python | bomber_monkey/features/bomb/explosion.py | MonkeyPatchIo/bomber-monkey | 8a351ef1a0ef18e9d98ad72d7274c41f02c0ed1b | [
"MIT"
] | null | null | null | bomber_monkey/features/bomb/explosion.py | MonkeyPatchIo/bomber-monkey | 8a351ef1a0ef18e9d98ad72d7274c41f02c0ed1b | [
"MIT"
] | null | null | null | bomber_monkey/features/bomb/explosion.py | MonkeyPatchIo/bomber-monkey | 8a351ef1a0ef18e9d98ad72d7274c41f02c0ed1b | [
"MIT"
] | null | null | null | from enum import IntEnum
from python_ecs.ecs import Component
class ExplosionDirection(IntEnum):
UP = 1
LEFT = 2
RIGHT = 4
DOWN = 8
ALL = UP | LEFT | RIGHT | DOWN
@staticmethod
def opposed(direction: 'ExplosionDirection'):
if direction == ExplosionDirection.UP:
return ExplosionDirection.DOWN
if direction == ExplosionDirection.DOWN:
return ExplosionDirection.UP
if direction == ExplosionDirection.LEFT:
return ExplosionDirection.RIGHT
if direction == ExplosionDirection.RIGHT:
return ExplosionDirection.LEFT
class Explosion(Component):
def __init__(self, direction: ExplosionDirection, power: int) -> None:
super().__init__()
self.direction = direction
self.power = power
self.propagated = False
def __repr__(self):
return 'Explosion({})'.format(self.direction)
| 27.235294 | 74 | 0.653348 | from enum import IntEnum
from python_ecs.ecs import Component
class ExplosionDirection(IntEnum):
UP = 1
LEFT = 2
RIGHT = 4
DOWN = 8
ALL = UP | LEFT | RIGHT | DOWN
@staticmethod
def opposed(direction: 'ExplosionDirection'):
if direction == ExplosionDirection.UP:
return ExplosionDirection.DOWN
if direction == ExplosionDirection.DOWN:
return ExplosionDirection.UP
if direction == ExplosionDirection.LEFT:
return ExplosionDirection.RIGHT
if direction == ExplosionDirection.RIGHT:
return ExplosionDirection.LEFT
class Explosion(Component):
def __init__(self, direction: ExplosionDirection, power: int) -> None:
super().__init__()
self.direction = direction
self.power = power
self.propagated = False
def __repr__(self):
return 'Explosion({})'.format(self.direction)
| true | true |
1c48221e622c65c1bec23b30d4231c02a9a6600d | 751 | py | Python | setup.py | Tynukua/getManga | 8cc5b090ec3dfcc6cfa7db3ce9e5220e7ef54d2b | [
"MIT"
] | 3 | 2021-05-24T07:38:20.000Z | 2022-03-30T14:47:23.000Z | setup.py | Tynukua/getManga | 8cc5b090ec3dfcc6cfa7db3ce9e5220e7ef54d2b | [
"MIT"
] | 1 | 2021-03-17T08:59:44.000Z | 2021-03-17T08:59:44.000Z | setup.py | Tynukua/getManga | 8cc5b090ec3dfcc6cfa7db3ce9e5220e7ef54d2b | [
"MIT"
] | 1 | 2019-07-02T20:00:54.000Z | 2019-07-02T20:00:54.000Z | import setuptools
with open("README.md", "r") as f:
long_description = f.read()
setuptools.setup(
name = 'getmanga',
version="0.1.6",
author="Tynukua",
author_email = 'tynuk.ua@gmail.com',
description = 'package for load manga! :)',
long_description=long_description,
long_description_content_type="text/markdown",
url='https://github.com/Tynukua/getManga',
packages=setuptools.find_packages(),
requires_python='>=3.7',
classifiers=[
'Programming Language :: Python :: 3.7',
'License :: OSI Approved :: MIT License',
"Operating System :: OS Independent",] ,
install_requires=[
'aiohttp',
'requests',
'beautifulsoup4',
'aiofiles']
) | 27.814815 | 50 | 0.617843 | import setuptools
with open("README.md", "r") as f:
long_description = f.read()
setuptools.setup(
name = 'getmanga',
version="0.1.6",
author="Tynukua",
author_email = 'tynuk.ua@gmail.com',
description = 'package for load manga! :)',
long_description=long_description,
long_description_content_type="text/markdown",
url='https://github.com/Tynukua/getManga',
packages=setuptools.find_packages(),
requires_python='>=3.7',
classifiers=[
'Programming Language :: Python :: 3.7',
'License :: OSI Approved :: MIT License',
"Operating System :: OS Independent",] ,
install_requires=[
'aiohttp',
'requests',
'beautifulsoup4',
'aiofiles']
) | true | true |
1c4822a66e7bd46682a2b852d9f477c93c5099e2 | 736 | py | Python | config/goalpost_default.py | ieu-acm/varipy | 55e12c9f854ab0b568495d9bf682476ad182a88b | [
"MIT"
] | null | null | null | config/goalpost_default.py | ieu-acm/varipy | 55e12c9f854ab0b568495d9bf682476ad182a88b | [
"MIT"
] | null | null | null | config/goalpost_default.py | ieu-acm/varipy | 55e12c9f854ab0b568495d9bf682476ad182a88b | [
"MIT"
] | null | null | null | """ Project configuration parameters """
from imgaug import augmenters as iaa
class config:
path = "data" # Relative to home directory of repository,
# includes "masked" and "original" sub-directories
input_shape = (256,256,3)
num_workers = 2
val_ratio = 0.2
weights_path = "weights"
epochs = 50
batch_size = 16
train_transforms = iaa.Sequential([
iaa.Crop(px=(1,16),keep_size=False),
iaa.Fliplr(0.5),
iaa.MotionBlur(),
iaa.FastSnowyLandscape(
lightness_threshold=[128, 200],
lightness_multiplier=(1.5, 3.5)),
iaa.Snowflakes(flake_size=(0.1, 0.4), speed=(0.01, 0.05)),
iaa.Fog(),
])
valid_transforms = None
| 26.285714 | 68 | 0.607337 | from imgaug import augmenters as iaa
class config:
path = "data"
input_shape = (256,256,3)
num_workers = 2
val_ratio = 0.2
weights_path = "weights"
epochs = 50
batch_size = 16
train_transforms = iaa.Sequential([
iaa.Crop(px=(1,16),keep_size=False),
iaa.Fliplr(0.5),
iaa.MotionBlur(),
iaa.FastSnowyLandscape(
lightness_threshold=[128, 200],
lightness_multiplier=(1.5, 3.5)),
iaa.Snowflakes(flake_size=(0.1, 0.4), speed=(0.01, 0.05)),
iaa.Fog(),
])
valid_transforms = None
| true | true |
1c482301e0d3e00345447ebf90bd35859d42c3d2 | 2,271 | py | Python | canella/config.py | mush42/Canella-CMS | b5132c271a3b8840f0b165c62d14de6853a3e5ac | [
"MIT"
] | 8 | 2017-01-30T22:46:40.000Z | 2018-03-30T21:35:28.000Z | canella/config.py | mush42/Canella-CMS | b5132c271a3b8840f0b165c62d14de6853a3e5ac | [
"MIT"
] | null | null | null | canella/config.py | mush42/Canella-CMS | b5132c271a3b8840f0b165c62d14de6853a3e5ac | [
"MIT"
] | 2 | 2018-01-16T10:31:27.000Z | 2020-10-01T19:49:10.000Z | import os
from collections import OrderedDict
from canella import app
from canella.babel import lazy_gettext
_BASEDIR = app.root_path
HOME_SLUG = 'index'
DB_DIR = os.path.join(_BASEDIR, '..', '.ignore.local', 'data.db')
DEBUG = True
SECRET_KEY = '9bW7b2046be56b4c00b6f10dc2f3c4Ae56SL5PC9'
SQLALCHEMY_DATABASE_URI = "sqlite:///{}".format(DB_DIR)
SQLALCHEMY_TRACK_MODIFICATIONS = False
ERROR_404_HELP = False
CONTENT_PATH = os.path.join(_BASEDIR, 'uploads', 'content')
MEDIA_PATH = os.path.join(_BASEDIR, 'uploads', 'media')
FORM_UPLOADS_PATH = os.path.join(_BASEDIR, 'uploads', 'forms')
SECURITY_PASSWORD_HASH = 'pbkdf2_sha512'
SECURITY_PASSWORD_SALT = '540SDW4426HCAER56546aDrw213d2a6b9a94e15b5d'
SECURITY_USER_IDENTITY_ATTRIBUTES = ['email', 'user_name']
SECURITY_POST_LOGIN_VIEW = '/admin'
SECURITY_CONFIRMABLE = False
SECURITY_RECOVERABLE = True
SECURITY_RESET_URL = '/reset-password/'
ALLOWED_EXTENSIONS = ['doc', 'docx', 'ppt', 'pptx', 'pdf', 'zip']
ENABLE_INLINE_EDITING = True
SUPPORTED_LOCALES = OrderedDict((
('en', 'English'),
('ar', 'Arabic')
))
DEFAULT_LOCALE = 'en'
BABEL = dict(
domain='canella',
translations_directory=os.path.join(_BASEDIR, 'translations'),
babel_config=os.path.abspath(os.path.join(_BASEDIR, '..', 'babel', 'babel.cfg')),
catalog_output_path=os.path.abspath(os.path.join(_BASEDIR, '..', 'babel')),
catalog_filename=os.path.abspath(os.path.join(_BASEDIR, '..', 'babel', 'canella.pot')),
project_name='Canella-CMS',
)
# Add extra fields you want to add to the profile here
PROFILE_EXTRA_FIELDS = (
dict(name='language',
label=lazy_gettext('Default Site Language'),
description=lazy_gettext('All the site elements will be displayed in this language'),
type='select',
choices=lambda: app.config['SUPPORTED_LOCALES'].items(),
default=lambda: app.config['DEFAULT_LOCALE']),
dict(name='facebook_profile',
label=lazy_gettext('Facebook Profile'),
description=lazy_gettext('Will be displayed beneeth your bio in some places'), type='url', default=''),
dict(name='twitter_account',
label=lazy_gettext('Twitter Page URL'),
description=lazy_gettext('Will be displayed beneeth your bio in some places'), type='url', default=''),
)
| 39.842105 | 111 | 0.72303 | import os
from collections import OrderedDict
from canella import app
from canella.babel import lazy_gettext
_BASEDIR = app.root_path
HOME_SLUG = 'index'
DB_DIR = os.path.join(_BASEDIR, '..', '.ignore.local', 'data.db')
DEBUG = True
SECRET_KEY = '9bW7b2046be56b4c00b6f10dc2f3c4Ae56SL5PC9'
SQLALCHEMY_DATABASE_URI = "sqlite:///{}".format(DB_DIR)
SQLALCHEMY_TRACK_MODIFICATIONS = False
ERROR_404_HELP = False
CONTENT_PATH = os.path.join(_BASEDIR, 'uploads', 'content')
MEDIA_PATH = os.path.join(_BASEDIR, 'uploads', 'media')
FORM_UPLOADS_PATH = os.path.join(_BASEDIR, 'uploads', 'forms')
SECURITY_PASSWORD_HASH = 'pbkdf2_sha512'
SECURITY_PASSWORD_SALT = '540SDW4426HCAER56546aDrw213d2a6b9a94e15b5d'
SECURITY_USER_IDENTITY_ATTRIBUTES = ['email', 'user_name']
SECURITY_POST_LOGIN_VIEW = '/admin'
SECURITY_CONFIRMABLE = False
SECURITY_RECOVERABLE = True
SECURITY_RESET_URL = '/reset-password/'
ALLOWED_EXTENSIONS = ['doc', 'docx', 'ppt', 'pptx', 'pdf', 'zip']
ENABLE_INLINE_EDITING = True
SUPPORTED_LOCALES = OrderedDict((
('en', 'English'),
('ar', 'Arabic')
))
DEFAULT_LOCALE = 'en'
BABEL = dict(
domain='canella',
translations_directory=os.path.join(_BASEDIR, 'translations'),
babel_config=os.path.abspath(os.path.join(_BASEDIR, '..', 'babel', 'babel.cfg')),
catalog_output_path=os.path.abspath(os.path.join(_BASEDIR, '..', 'babel')),
catalog_filename=os.path.abspath(os.path.join(_BASEDIR, '..', 'babel', 'canella.pot')),
project_name='Canella-CMS',
)
PROFILE_EXTRA_FIELDS = (
dict(name='language',
label=lazy_gettext('Default Site Language'),
description=lazy_gettext('All the site elements will be displayed in this language'),
type='select',
choices=lambda: app.config['SUPPORTED_LOCALES'].items(),
default=lambda: app.config['DEFAULT_LOCALE']),
dict(name='facebook_profile',
label=lazy_gettext('Facebook Profile'),
description=lazy_gettext('Will be displayed beneeth your bio in some places'), type='url', default=''),
dict(name='twitter_account',
label=lazy_gettext('Twitter Page URL'),
description=lazy_gettext('Will be displayed beneeth your bio in some places'), type='url', default=''),
)
| true | true |
1c4823a2bf4002b2e4471898f081ed69d54fd15f | 29,184 | py | Python | magnum/db/sqlalchemy/api.py | jflower154/magnum | 2b0b3f3e4c9888ff323d4be5cb9b3e97fde4a67a | [
"Apache-2.0"
] | null | null | null | magnum/db/sqlalchemy/api.py | jflower154/magnum | 2b0b3f3e4c9888ff323d4be5cb9b3e97fde4a67a | [
"Apache-2.0"
] | null | null | null | magnum/db/sqlalchemy/api.py | jflower154/magnum | 2b0b3f3e4c9888ff323d4be5cb9b3e97fde4a67a | [
"Apache-2.0"
] | null | null | null | # Copyright 2013 Hewlett-Packard Development Company, L.P.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""SQLAlchemy storage backend."""
from oslo_db import exception as db_exc
from oslo_db.sqlalchemy import session as db_session
from oslo_db.sqlalchemy import utils as db_utils
from oslo_log import log
from oslo_utils import importutils
from oslo_utils import strutils
from oslo_utils import timeutils
from oslo_utils import uuidutils
import sqlalchemy as sa
from sqlalchemy.orm.exc import MultipleResultsFound
from sqlalchemy.orm.exc import NoResultFound
from sqlalchemy.sql import func
from magnum.common import clients
from magnum.common import context as request_context
from magnum.common import exception
import magnum.conf
from magnum.db import api
from magnum.db.sqlalchemy import models
from magnum.i18n import _
profiler_sqlalchemy = importutils.try_import('osprofiler.sqlalchemy')
CONF = magnum.conf.CONF
LOG = log.getLogger(__name__)
_FACADE = None
def _create_facade_lazily():
global _FACADE
if _FACADE is None:
_FACADE = db_session.EngineFacade.from_config(CONF)
if profiler_sqlalchemy:
if CONF.profiler.enabled and CONF.profiler.trace_sqlalchemy:
profiler_sqlalchemy.add_tracing(sa, _FACADE.get_engine(), "db")
return _FACADE
def get_engine():
facade = _create_facade_lazily()
return facade.get_engine()
def get_session(**kwargs):
facade = _create_facade_lazily()
return facade.get_session(**kwargs)
def get_backend():
"""The backend is this module itself."""
return Connection()
def model_query(model, *args, **kwargs):
"""Query helper for simpler session usage.
:param session: if present, the session to use
"""
session = kwargs.get('session') or get_session()
query = session.query(model, *args)
return query
def add_identity_filter(query, value):
"""Adds an identity filter to a query.
Filters results by ID, if supplied value is a valid integer.
Otherwise attempts to filter results by UUID.
:param query: Initial query to add filter to.
:param value: Value for filtering results by.
:return: Modified query.
"""
if strutils.is_int_like(value):
return query.filter_by(id=value)
elif uuidutils.is_uuid_like(value):
return query.filter_by(uuid=value)
else:
raise exception.InvalidIdentity(identity=value)
def _paginate_query(model, limit=None, marker=None, sort_key=None,
sort_dir=None, query=None):
if not query:
query = model_query(model)
sort_keys = ['id']
if sort_key and sort_key not in sort_keys:
sort_keys.insert(0, sort_key)
try:
query = db_utils.paginate_query(query, model, limit, sort_keys,
marker=marker, sort_dir=sort_dir)
except db_exc.InvalidSortKey:
raise exception.InvalidParameterValue(
_('The sort_key value "%(key)s" is an invalid field for sorting')
% {'key': sort_key})
return query.all()
class Connection(api.Connection):
"""SqlAlchemy connection."""
def __init__(self):
pass
def _add_tenant_filters(self, context, query):
if context.is_admin and context.all_tenants:
return query
admin_context = request_context.make_admin_context(all_tenants=True)
osc = clients.OpenStackClients(admin_context)
kst = osc.keystone()
# User in a regular project (not in the trustee domain)
if context.project_id and context.domain_id != kst.trustee_domain_id:
query = query.filter_by(project_id=context.project_id)
# Match project ID component in trustee user's user name against
# cluster's project_id to associate per-cluster trustee users who have
# no project information with the project their clusters/cluster models
# reside in. This is equivalent to the project filtering above.
elif context.domain_id == kst.trustee_domain_id:
user_name = kst.client.users.get(context.user_id).name
user_project = user_name.split('_', 2)[1]
query = query.filter_by(project_id=user_project)
else:
query = query.filter_by(user_id=context.user_id)
return query
def _add_clusters_filters(self, query, filters):
if filters is None:
filters = {}
possible_filters = ["cluster_template_id", "name", "node_count",
"master_count", "stack_id", "api_address",
"node_addresses", "project_id", "user_id"]
filter_names = set(filters).intersection(possible_filters)
filter_dict = {filter_name: filters[filter_name]
for filter_name in filter_names}
query = query.filter_by(**filter_dict)
if 'status' in filters:
query = query.filter(models.Cluster.status.in_(filters['status']))
return query
def get_cluster_list(self, context, filters=None, limit=None, marker=None,
sort_key=None, sort_dir=None):
query = model_query(models.Cluster)
query = self._add_tenant_filters(context, query)
query = self._add_clusters_filters(query, filters)
return _paginate_query(models.Cluster, limit, marker,
sort_key, sort_dir, query)
def create_cluster(self, values):
# ensure defaults are present for new clusters
if not values.get('uuid'):
values['uuid'] = uuidutils.generate_uuid()
cluster = models.Cluster()
cluster.update(values)
try:
cluster.save()
except db_exc.DBDuplicateEntry:
raise exception.ClusterAlreadyExists(uuid=values['uuid'])
return cluster
def get_cluster_by_id(self, context, cluster_id):
query = model_query(models.Cluster)
query = self._add_tenant_filters(context, query)
query = query.filter_by(id=cluster_id)
try:
return query.one()
except NoResultFound:
raise exception.ClusterNotFound(cluster=cluster_id)
def get_cluster_by_name(self, context, cluster_name):
query = model_query(models.Cluster)
query = self._add_tenant_filters(context, query)
query = query.filter_by(name=cluster_name)
try:
return query.one()
except MultipleResultsFound:
raise exception.Conflict('Multiple clusters exist with same name.'
' Please use the cluster uuid instead.')
except NoResultFound:
raise exception.ClusterNotFound(cluster=cluster_name)
def get_cluster_by_uuid(self, context, cluster_uuid):
query = model_query(models.Cluster)
query = self._add_tenant_filters(context, query)
query = query.filter_by(uuid=cluster_uuid)
try:
return query.one()
except NoResultFound:
raise exception.ClusterNotFound(cluster=cluster_uuid)
def get_cluster_stats(self, context, project_id=None):
query = model_query(models.Cluster)
node_count_col = models.Cluster.node_count
master_count_col = models.Cluster.master_count
ncfunc = func.sum(node_count_col + master_count_col)
if project_id:
query = query.filter_by(project_id=project_id)
nquery = query.session.query(ncfunc.label("nodes")).filter_by(
project_id=project_id)
else:
nquery = query.session.query(ncfunc.label("nodes"))
clusters = query.count()
nodes = int(nquery.one()[0]) if nquery.one()[0] else 0
return clusters, nodes
def get_cluster_count_all(self, context, filters=None):
query = model_query(models.Cluster)
query = self._add_tenant_filters(context, query)
query = self._add_clusters_filters(query, filters)
return query.count()
def destroy_cluster(self, cluster_id):
session = get_session()
with session.begin():
query = model_query(models.Cluster, session=session)
query = add_identity_filter(query, cluster_id)
try:
query.one()
except NoResultFound:
raise exception.ClusterNotFound(cluster=cluster_id)
query.delete()
def update_cluster(self, cluster_id, values):
# NOTE(dtantsur): this can lead to very strange errors
if 'uuid' in values:
msg = _("Cannot overwrite UUID for an existing Cluster.")
raise exception.InvalidParameterValue(err=msg)
return self._do_update_cluster(cluster_id, values)
def _do_update_cluster(self, cluster_id, values):
session = get_session()
with session.begin():
query = model_query(models.Cluster, session=session)
query = add_identity_filter(query, cluster_id)
try:
ref = query.with_lockmode('update').one()
except NoResultFound:
raise exception.ClusterNotFound(cluster=cluster_id)
ref.update(values)
return ref
def _add_cluster_template_filters(self, query, filters):
if filters is None:
filters = {}
possible_filters = ["name", "image_id", "flavor_id",
"master_flavor_id", "keypair_id",
"external_network_id", "dns_nameserver",
"project_id", "user_id", "labels"]
filter_names = set(filters).intersection(possible_filters)
filter_dict = {filter_name: filters[filter_name]
for filter_name in filter_names}
return query.filter_by(**filter_dict)
def get_cluster_template_list(self, context, filters=None, limit=None,
marker=None, sort_key=None, sort_dir=None):
query = model_query(models.ClusterTemplate)
query = self._add_tenant_filters(context, query)
query = self._add_cluster_template_filters(query, filters)
# include public ClusterTemplates
public_q = model_query(models.ClusterTemplate).filter_by(public=True)
query = query.union(public_q)
return _paginate_query(models.ClusterTemplate, limit, marker,
sort_key, sort_dir, query)
def create_cluster_template(self, values):
# ensure defaults are present for new ClusterTemplates
if not values.get('uuid'):
values['uuid'] = uuidutils.generate_uuid()
cluster_template = models.ClusterTemplate()
cluster_template.update(values)
try:
cluster_template.save()
except db_exc.DBDuplicateEntry:
raise exception.ClusterTemplateAlreadyExists(uuid=values['uuid'])
return cluster_template
def get_cluster_template_by_id(self, context, cluster_template_id):
query = model_query(models.ClusterTemplate)
query = self._add_tenant_filters(context, query)
public_q = model_query(models.ClusterTemplate).filter_by(public=True)
query = query.union(public_q)
query = query.filter_by(id=cluster_template_id)
try:
return query.one()
except NoResultFound:
raise exception.ClusterTemplateNotFound(
clustertemplate=cluster_template_id)
def get_cluster_template_by_uuid(self, context, cluster_template_uuid):
query = model_query(models.ClusterTemplate)
query = self._add_tenant_filters(context, query)
public_q = model_query(models.ClusterTemplate).filter_by(public=True)
query = query.union(public_q)
query = query.filter_by(uuid=cluster_template_uuid)
try:
return query.one()
except NoResultFound:
raise exception.ClusterTemplateNotFound(
clustertemplate=cluster_template_uuid)
def get_cluster_template_by_name(self, context, cluster_template_name):
query = model_query(models.ClusterTemplate)
query = self._add_tenant_filters(context, query)
public_q = model_query(models.ClusterTemplate).filter_by(public=True)
query = query.union(public_q)
query = query.filter_by(name=cluster_template_name)
try:
return query.one()
except MultipleResultsFound:
raise exception.Conflict('Multiple ClusterTemplates exist with'
' same name. Please use the '
'ClusterTemplate uuid instead.')
except NoResultFound:
raise exception.ClusterTemplateNotFound(
clustertemplate=cluster_template_name)
def _is_cluster_template_referenced(self, session, cluster_template_uuid):
"""Checks whether the ClusterTemplate is referenced by cluster(s)."""
query = model_query(models.Cluster, session=session)
query = self._add_clusters_filters(query, {'cluster_template_id':
cluster_template_uuid})
return query.count() != 0
def _is_publishing_cluster_template(self, values):
if (len(values) == 1 and
'public' in values and values['public'] is True):
return True
return False
def destroy_cluster_template(self, cluster_template_id):
session = get_session()
with session.begin():
query = model_query(models.ClusterTemplate, session=session)
query = add_identity_filter(query, cluster_template_id)
try:
cluster_template_ref = query.one()
except NoResultFound:
raise exception.ClusterTemplateNotFound(
clustertemplate=cluster_template_id)
if self._is_cluster_template_referenced(
session, cluster_template_ref['uuid']):
raise exception.ClusterTemplateReferenced(
clustertemplate=cluster_template_id)
query.delete()
def update_cluster_template(self, cluster_template_id, values):
# NOTE(dtantsur): this can lead to very strange errors
if 'uuid' in values:
msg = _("Cannot overwrite UUID for an existing ClusterTemplate.")
raise exception.InvalidParameterValue(err=msg)
return self._do_update_cluster_template(cluster_template_id, values)
def _do_update_cluster_template(self, cluster_template_id, values):
session = get_session()
with session.begin():
query = model_query(models.ClusterTemplate, session=session)
query = add_identity_filter(query, cluster_template_id)
try:
ref = query.with_lockmode('update').one()
except NoResultFound:
raise exception.ClusterTemplateNotFound(
clustertemplate=cluster_template_id)
if self._is_cluster_template_referenced(session, ref['uuid']):
# we only allow to update ClusterTemplate to be public
if not self._is_publishing_cluster_template(values):
raise exception.ClusterTemplateReferenced(
clustertemplate=cluster_template_id)
ref.update(values)
return ref
def create_x509keypair(self, values):
# ensure defaults are present for new x509keypairs
if not values.get('uuid'):
values['uuid'] = uuidutils.generate_uuid()
x509keypair = models.X509KeyPair()
x509keypair.update(values)
try:
x509keypair.save()
except db_exc.DBDuplicateEntry:
raise exception.X509KeyPairAlreadyExists(uuid=values['uuid'])
return x509keypair
def get_x509keypair_by_id(self, context, x509keypair_id):
query = model_query(models.X509KeyPair)
query = self._add_tenant_filters(context, query)
query = query.filter_by(id=x509keypair_id)
try:
return query.one()
except NoResultFound:
raise exception.X509KeyPairNotFound(x509keypair=x509keypair_id)
def get_x509keypair_by_uuid(self, context, x509keypair_uuid):
query = model_query(models.X509KeyPair)
query = self._add_tenant_filters(context, query)
query = query.filter_by(uuid=x509keypair_uuid)
try:
return query.one()
except NoResultFound:
raise exception.X509KeyPairNotFound(x509keypair=x509keypair_uuid)
def destroy_x509keypair(self, x509keypair_id):
session = get_session()
with session.begin():
query = model_query(models.X509KeyPair, session=session)
query = add_identity_filter(query, x509keypair_id)
count = query.delete()
if count != 1:
raise exception.X509KeyPairNotFound(x509keypair_id)
def update_x509keypair(self, x509keypair_id, values):
# NOTE(dtantsur): this can lead to very strange errors
if 'uuid' in values:
msg = _("Cannot overwrite UUID for an existing X509KeyPair.")
raise exception.InvalidParameterValue(err=msg)
return self._do_update_x509keypair(x509keypair_id, values)
def _do_update_x509keypair(self, x509keypair_id, values):
session = get_session()
with session.begin():
query = model_query(models.X509KeyPair, session=session)
query = add_identity_filter(query, x509keypair_id)
try:
ref = query.with_lockmode('update').one()
except NoResultFound:
raise exception.X509KeyPairNotFound(x509keypair=x509keypair_id)
ref.update(values)
return ref
def _add_x509keypairs_filters(self, query, filters):
if filters is None:
filters = {}
if 'project_id' in filters:
query = query.filter_by(project_id=filters['project_id'])
if 'user_id' in filters:
query = query.filter_by(user_id=filters['user_id'])
return query
def get_x509keypair_list(self, context, filters=None, limit=None,
marker=None, sort_key=None, sort_dir=None):
query = model_query(models.X509KeyPair)
query = self._add_tenant_filters(context, query)
query = self._add_x509keypairs_filters(query, filters)
return _paginate_query(models.X509KeyPair, limit, marker,
sort_key, sort_dir, query)
def destroy_magnum_service(self, magnum_service_id):
session = get_session()
with session.begin():
query = model_query(models.MagnumService, session=session)
query = add_identity_filter(query, magnum_service_id)
count = query.delete()
if count != 1:
raise exception.MagnumServiceNotFound(
magnum_service_id=magnum_service_id)
def update_magnum_service(self, magnum_service_id, values):
session = get_session()
with session.begin():
query = model_query(models.MagnumService, session=session)
query = add_identity_filter(query, magnum_service_id)
try:
ref = query.with_lockmode('update').one()
except NoResultFound:
raise exception.MagnumServiceNotFound(
magnum_service_id=magnum_service_id)
if 'report_count' in values:
if values['report_count'] > ref.report_count:
ref.last_seen_up = timeutils.utcnow()
ref.update(values)
return ref
def get_magnum_service_by_host_and_binary(self, host, binary):
query = model_query(models.MagnumService)
query = query.filter_by(host=host, binary=binary)
try:
return query.one()
except NoResultFound:
return None
def create_magnum_service(self, values):
magnum_service = models.MagnumService()
magnum_service.update(values)
try:
magnum_service.save()
except db_exc.DBDuplicateEntry:
host = values["host"]
binary = values["binary"]
LOG.warning("Magnum service with same host:%(host)s and"
" binary:%(binary)s had been saved into DB",
{'host': host, 'binary': binary})
query = model_query(models.MagnumService)
query = query.filter_by(host=host, binary=binary)
return query.one()
return magnum_service
def get_magnum_service_list(self, disabled=None, limit=None,
marker=None, sort_key=None, sort_dir=None
):
query = model_query(models.MagnumService)
if disabled:
query = query.filter_by(disabled=disabled)
return _paginate_query(models.MagnumService, limit, marker,
sort_key, sort_dir, query)
def create_quota(self, values):
quotas = models.Quota()
quotas.update(values)
try:
quotas.save()
except db_exc.DBDuplicateEntry:
raise exception.QuotaAlreadyExists(project_id=values['project_id'],
resource=values['resource'])
return quotas
def _add_quota_filters(self, query, filters):
if filters is None:
filters = {}
possible_filters = ["resource", "project_id"]
filter_names = set(filters).intersection(possible_filters)
filter_dict = {filter_name: filters[filter_name]
for filter_name in filter_names}
query = query.filter_by(**filter_dict)
return query
def get_quota_list(self, context, filters=None, limit=None, marker=None,
sort_key=None, sort_dir=None):
query = model_query(models.Quota)
query = self._add_quota_filters(query, filters)
return _paginate_query(models.Quota, limit, marker,
sort_key, sort_dir, query)
def update_quota(self, project_id, values):
session = get_session()
with session.begin():
query = model_query(models.Quota, session=session)
resource = values['resource']
try:
query = query.filter_by(project_id=project_id).filter_by(
resource=resource)
ref = query.with_lockmode('update').one()
except NoResultFound:
msg = (_('project_id %(project_id)s resource %(resource)s.') %
{'project_id': project_id, 'resource': resource})
raise exception.QuotaNotFound(msg=msg)
ref.update(values)
return ref
def delete_quota(self, project_id, resource):
session = get_session()
with session.begin():
query = model_query(models.Quota, session=session)
try:
query.filter_by(project_id=project_id).filter_by(
resource=resource).one()
except NoResultFound:
msg = (_('project_id %(project_id)s resource %(resource)s.') %
{'project_id': project_id, 'resource': resource})
raise exception.QuotaNotFound(msg=msg)
query.delete()
def get_quota_by_id(self, context, quota_id):
query = model_query(models.Quota)
query = query.filter_by(id=quota_id)
try:
return query.one()
except NoResultFound:
msg = _('quota id %s .') % quota_id
raise exception.QuotaNotFound(msg=msg)
def quota_get_all_by_project_id(self, project_id):
query = model_query(models.Quota)
result = query.filter_by(project_id=project_id).all()
return result
def get_quota_by_project_id_resource(self, project_id, resource):
query = model_query(models.Quota)
query = query.filter_by(project_id=project_id).filter_by(
resource=resource)
try:
return query.one()
except NoResultFound:
msg = (_('project_id %(project_id)s resource %(resource)s.') %
{'project_id': project_id, 'resource': resource})
raise exception.QuotaNotFound(msg=msg)
def _add_federation_filters(self, query, filters):
if filters is None:
filters = {}
possible_filters = ["name", "project_id", "hostcluster_id",
"member_ids", "properties"]
# TODO(clenimar): implement 'member_ids' filter as a contains query,
# so we return all the federations that have the given clusters,
# instead of all the federations that *only* have the exact given
# clusters.
filter_names = set(filters).intersection(possible_filters)
filter_dict = {filter_name: filters[filter_name]
for filter_name in filter_names}
query = query.filter_by(**filter_dict)
if 'status' in filters:
query = query.filter(
models.Federation.status.in_(filters['status']))
return query
def get_federation_by_id(self, context, federation_id):
query = model_query(models.Federation)
query = self._add_tenant_filters(context, query)
query = query.filter_by(id=federation_id)
try:
return query.one()
except NoResultFound:
raise exception.FederationNotFound(federation=federation_id)
def get_federation_by_uuid(self, context, federation_uuid):
query = model_query(models.Federation)
query = self._add_tenant_filters(context, query)
query = query.filter_by(uuid=federation_uuid)
try:
return query.one()
except NoResultFound:
raise exception.FederationNotFound(federation=federation_uuid)
def get_federation_by_name(self, context, federation_name):
query = model_query(models.Federation)
query = self._add_tenant_filters(context, query)
query = query.filter_by(name=federation_name)
try:
return query.one()
except MultipleResultsFound:
raise exception.Conflict('Multiple federations exist with same '
'name. Please use the federation uuid '
'instead.')
except NoResultFound:
raise exception.FederationNotFound(federation=federation_name)
def get_federation_list(self, context, limit=None, marker=None,
sort_key=None, sort_dir=None, filters=None):
query = model_query(models.Federation)
query = self._add_tenant_filters(context, query)
query = self._add_federation_filters(query, filters)
return _paginate_query(models.Federation, limit, marker,
sort_key, sort_dir, query)
def create_federation(self, values):
if not values.get('uuid'):
values['uuid'] = uuidutils.generate_uuid()
federation = models.Federation()
federation.update(values)
try:
federation.save()
except db_exc.DBDuplicateEntry:
raise exception.FederationAlreadyExists(uuid=values['uuid'])
return federation
def destroy_federation(self, federation_id):
session = get_session()
with session.begin():
query = model_query(models.Federation, session=session)
query = add_identity_filter(query, federation_id)
try:
query.one()
except NoResultFound:
raise exception.FederationNotFound(federation=federation_id)
query.delete()
def update_federation(self, federation_id, values):
if 'uuid' in values:
msg = _("Cannot overwrite UUID for an existing Federation.")
raise exception.InvalidParameterValue(err=msg)
return self._do_update_federation(federation_id, values)
def _do_update_federation(self, federation_id, values):
session = get_session()
with session.begin():
query = model_query(models.Federation, session=session)
query = add_identity_filter(query, federation_id)
try:
ref = query.with_lockmode('update').one()
except NoResultFound:
raise exception.FederationNotFound(federation=federation_id)
ref.update(values)
return ref
| 38.654305 | 79 | 0.636273 |
from oslo_db import exception as db_exc
from oslo_db.sqlalchemy import session as db_session
from oslo_db.sqlalchemy import utils as db_utils
from oslo_log import log
from oslo_utils import importutils
from oslo_utils import strutils
from oslo_utils import timeutils
from oslo_utils import uuidutils
import sqlalchemy as sa
from sqlalchemy.orm.exc import MultipleResultsFound
from sqlalchemy.orm.exc import NoResultFound
from sqlalchemy.sql import func
from magnum.common import clients
from magnum.common import context as request_context
from magnum.common import exception
import magnum.conf
from magnum.db import api
from magnum.db.sqlalchemy import models
from magnum.i18n import _
profiler_sqlalchemy = importutils.try_import('osprofiler.sqlalchemy')
CONF = magnum.conf.CONF
LOG = log.getLogger(__name__)
_FACADE = None
def _create_facade_lazily():
global _FACADE
if _FACADE is None:
_FACADE = db_session.EngineFacade.from_config(CONF)
if profiler_sqlalchemy:
if CONF.profiler.enabled and CONF.profiler.trace_sqlalchemy:
profiler_sqlalchemy.add_tracing(sa, _FACADE.get_engine(), "db")
return _FACADE
def get_engine():
facade = _create_facade_lazily()
return facade.get_engine()
def get_session(**kwargs):
facade = _create_facade_lazily()
return facade.get_session(**kwargs)
def get_backend():
return Connection()
def model_query(model, *args, **kwargs):
session = kwargs.get('session') or get_session()
query = session.query(model, *args)
return query
def add_identity_filter(query, value):
if strutils.is_int_like(value):
return query.filter_by(id=value)
elif uuidutils.is_uuid_like(value):
return query.filter_by(uuid=value)
else:
raise exception.InvalidIdentity(identity=value)
def _paginate_query(model, limit=None, marker=None, sort_key=None,
sort_dir=None, query=None):
if not query:
query = model_query(model)
sort_keys = ['id']
if sort_key and sort_key not in sort_keys:
sort_keys.insert(0, sort_key)
try:
query = db_utils.paginate_query(query, model, limit, sort_keys,
marker=marker, sort_dir=sort_dir)
except db_exc.InvalidSortKey:
raise exception.InvalidParameterValue(
_('The sort_key value "%(key)s" is an invalid field for sorting')
% {'key': sort_key})
return query.all()
class Connection(api.Connection):
def __init__(self):
pass
def _add_tenant_filters(self, context, query):
if context.is_admin and context.all_tenants:
return query
admin_context = request_context.make_admin_context(all_tenants=True)
osc = clients.OpenStackClients(admin_context)
kst = osc.keystone()
if context.project_id and context.domain_id != kst.trustee_domain_id:
query = query.filter_by(project_id=context.project_id)
# cluster's project_id to associate per-cluster trustee users who have
elif context.domain_id == kst.trustee_domain_id:
user_name = kst.client.users.get(context.user_id).name
user_project = user_name.split('_', 2)[1]
query = query.filter_by(project_id=user_project)
else:
query = query.filter_by(user_id=context.user_id)
return query
def _add_clusters_filters(self, query, filters):
if filters is None:
filters = {}
possible_filters = ["cluster_template_id", "name", "node_count",
"master_count", "stack_id", "api_address",
"node_addresses", "project_id", "user_id"]
filter_names = set(filters).intersection(possible_filters)
filter_dict = {filter_name: filters[filter_name]
for filter_name in filter_names}
query = query.filter_by(**filter_dict)
if 'status' in filters:
query = query.filter(models.Cluster.status.in_(filters['status']))
return query
def get_cluster_list(self, context, filters=None, limit=None, marker=None,
sort_key=None, sort_dir=None):
query = model_query(models.Cluster)
query = self._add_tenant_filters(context, query)
query = self._add_clusters_filters(query, filters)
return _paginate_query(models.Cluster, limit, marker,
sort_key, sort_dir, query)
def create_cluster(self, values):
if not values.get('uuid'):
values['uuid'] = uuidutils.generate_uuid()
cluster = models.Cluster()
cluster.update(values)
try:
cluster.save()
except db_exc.DBDuplicateEntry:
raise exception.ClusterAlreadyExists(uuid=values['uuid'])
return cluster
def get_cluster_by_id(self, context, cluster_id):
query = model_query(models.Cluster)
query = self._add_tenant_filters(context, query)
query = query.filter_by(id=cluster_id)
try:
return query.one()
except NoResultFound:
raise exception.ClusterNotFound(cluster=cluster_id)
def get_cluster_by_name(self, context, cluster_name):
query = model_query(models.Cluster)
query = self._add_tenant_filters(context, query)
query = query.filter_by(name=cluster_name)
try:
return query.one()
except MultipleResultsFound:
raise exception.Conflict('Multiple clusters exist with same name.'
' Please use the cluster uuid instead.')
except NoResultFound:
raise exception.ClusterNotFound(cluster=cluster_name)
def get_cluster_by_uuid(self, context, cluster_uuid):
query = model_query(models.Cluster)
query = self._add_tenant_filters(context, query)
query = query.filter_by(uuid=cluster_uuid)
try:
return query.one()
except NoResultFound:
raise exception.ClusterNotFound(cluster=cluster_uuid)
def get_cluster_stats(self, context, project_id=None):
query = model_query(models.Cluster)
node_count_col = models.Cluster.node_count
master_count_col = models.Cluster.master_count
ncfunc = func.sum(node_count_col + master_count_col)
if project_id:
query = query.filter_by(project_id=project_id)
nquery = query.session.query(ncfunc.label("nodes")).filter_by(
project_id=project_id)
else:
nquery = query.session.query(ncfunc.label("nodes"))
clusters = query.count()
nodes = int(nquery.one()[0]) if nquery.one()[0] else 0
return clusters, nodes
def get_cluster_count_all(self, context, filters=None):
query = model_query(models.Cluster)
query = self._add_tenant_filters(context, query)
query = self._add_clusters_filters(query, filters)
return query.count()
def destroy_cluster(self, cluster_id):
session = get_session()
with session.begin():
query = model_query(models.Cluster, session=session)
query = add_identity_filter(query, cluster_id)
try:
query.one()
except NoResultFound:
raise exception.ClusterNotFound(cluster=cluster_id)
query.delete()
def update_cluster(self, cluster_id, values):
if 'uuid' in values:
msg = _("Cannot overwrite UUID for an existing Cluster.")
raise exception.InvalidParameterValue(err=msg)
return self._do_update_cluster(cluster_id, values)
def _do_update_cluster(self, cluster_id, values):
session = get_session()
with session.begin():
query = model_query(models.Cluster, session=session)
query = add_identity_filter(query, cluster_id)
try:
ref = query.with_lockmode('update').one()
except NoResultFound:
raise exception.ClusterNotFound(cluster=cluster_id)
ref.update(values)
return ref
def _add_cluster_template_filters(self, query, filters):
if filters is None:
filters = {}
possible_filters = ["name", "image_id", "flavor_id",
"master_flavor_id", "keypair_id",
"external_network_id", "dns_nameserver",
"project_id", "user_id", "labels"]
filter_names = set(filters).intersection(possible_filters)
filter_dict = {filter_name: filters[filter_name]
for filter_name in filter_names}
return query.filter_by(**filter_dict)
def get_cluster_template_list(self, context, filters=None, limit=None,
marker=None, sort_key=None, sort_dir=None):
query = model_query(models.ClusterTemplate)
query = self._add_tenant_filters(context, query)
query = self._add_cluster_template_filters(query, filters)
public_q = model_query(models.ClusterTemplate).filter_by(public=True)
query = query.union(public_q)
return _paginate_query(models.ClusterTemplate, limit, marker,
sort_key, sort_dir, query)
def create_cluster_template(self, values):
if not values.get('uuid'):
values['uuid'] = uuidutils.generate_uuid()
cluster_template = models.ClusterTemplate()
cluster_template.update(values)
try:
cluster_template.save()
except db_exc.DBDuplicateEntry:
raise exception.ClusterTemplateAlreadyExists(uuid=values['uuid'])
return cluster_template
def get_cluster_template_by_id(self, context, cluster_template_id):
query = model_query(models.ClusterTemplate)
query = self._add_tenant_filters(context, query)
public_q = model_query(models.ClusterTemplate).filter_by(public=True)
query = query.union(public_q)
query = query.filter_by(id=cluster_template_id)
try:
return query.one()
except NoResultFound:
raise exception.ClusterTemplateNotFound(
clustertemplate=cluster_template_id)
def get_cluster_template_by_uuid(self, context, cluster_template_uuid):
query = model_query(models.ClusterTemplate)
query = self._add_tenant_filters(context, query)
public_q = model_query(models.ClusterTemplate).filter_by(public=True)
query = query.union(public_q)
query = query.filter_by(uuid=cluster_template_uuid)
try:
return query.one()
except NoResultFound:
raise exception.ClusterTemplateNotFound(
clustertemplate=cluster_template_uuid)
def get_cluster_template_by_name(self, context, cluster_template_name):
query = model_query(models.ClusterTemplate)
query = self._add_tenant_filters(context, query)
public_q = model_query(models.ClusterTemplate).filter_by(public=True)
query = query.union(public_q)
query = query.filter_by(name=cluster_template_name)
try:
return query.one()
except MultipleResultsFound:
raise exception.Conflict('Multiple ClusterTemplates exist with'
' same name. Please use the '
'ClusterTemplate uuid instead.')
except NoResultFound:
raise exception.ClusterTemplateNotFound(
clustertemplate=cluster_template_name)
def _is_cluster_template_referenced(self, session, cluster_template_uuid):
query = model_query(models.Cluster, session=session)
query = self._add_clusters_filters(query, {'cluster_template_id':
cluster_template_uuid})
return query.count() != 0
def _is_publishing_cluster_template(self, values):
if (len(values) == 1 and
'public' in values and values['public'] is True):
return True
return False
def destroy_cluster_template(self, cluster_template_id):
session = get_session()
with session.begin():
query = model_query(models.ClusterTemplate, session=session)
query = add_identity_filter(query, cluster_template_id)
try:
cluster_template_ref = query.one()
except NoResultFound:
raise exception.ClusterTemplateNotFound(
clustertemplate=cluster_template_id)
if self._is_cluster_template_referenced(
session, cluster_template_ref['uuid']):
raise exception.ClusterTemplateReferenced(
clustertemplate=cluster_template_id)
query.delete()
def update_cluster_template(self, cluster_template_id, values):
if 'uuid' in values:
msg = _("Cannot overwrite UUID for an existing ClusterTemplate.")
raise exception.InvalidParameterValue(err=msg)
return self._do_update_cluster_template(cluster_template_id, values)
def _do_update_cluster_template(self, cluster_template_id, values):
session = get_session()
with session.begin():
query = model_query(models.ClusterTemplate, session=session)
query = add_identity_filter(query, cluster_template_id)
try:
ref = query.with_lockmode('update').one()
except NoResultFound:
raise exception.ClusterTemplateNotFound(
clustertemplate=cluster_template_id)
if self._is_cluster_template_referenced(session, ref['uuid']):
if not self._is_publishing_cluster_template(values):
raise exception.ClusterTemplateReferenced(
clustertemplate=cluster_template_id)
ref.update(values)
return ref
def create_x509keypair(self, values):
if not values.get('uuid'):
values['uuid'] = uuidutils.generate_uuid()
x509keypair = models.X509KeyPair()
x509keypair.update(values)
try:
x509keypair.save()
except db_exc.DBDuplicateEntry:
raise exception.X509KeyPairAlreadyExists(uuid=values['uuid'])
return x509keypair
def get_x509keypair_by_id(self, context, x509keypair_id):
query = model_query(models.X509KeyPair)
query = self._add_tenant_filters(context, query)
query = query.filter_by(id=x509keypair_id)
try:
return query.one()
except NoResultFound:
raise exception.X509KeyPairNotFound(x509keypair=x509keypair_id)
def get_x509keypair_by_uuid(self, context, x509keypair_uuid):
query = model_query(models.X509KeyPair)
query = self._add_tenant_filters(context, query)
query = query.filter_by(uuid=x509keypair_uuid)
try:
return query.one()
except NoResultFound:
raise exception.X509KeyPairNotFound(x509keypair=x509keypair_uuid)
def destroy_x509keypair(self, x509keypair_id):
session = get_session()
with session.begin():
query = model_query(models.X509KeyPair, session=session)
query = add_identity_filter(query, x509keypair_id)
count = query.delete()
if count != 1:
raise exception.X509KeyPairNotFound(x509keypair_id)
def update_x509keypair(self, x509keypair_id, values):
if 'uuid' in values:
msg = _("Cannot overwrite UUID for an existing X509KeyPair.")
raise exception.InvalidParameterValue(err=msg)
return self._do_update_x509keypair(x509keypair_id, values)
def _do_update_x509keypair(self, x509keypair_id, values):
session = get_session()
with session.begin():
query = model_query(models.X509KeyPair, session=session)
query = add_identity_filter(query, x509keypair_id)
try:
ref = query.with_lockmode('update').one()
except NoResultFound:
raise exception.X509KeyPairNotFound(x509keypair=x509keypair_id)
ref.update(values)
return ref
def _add_x509keypairs_filters(self, query, filters):
if filters is None:
filters = {}
if 'project_id' in filters:
query = query.filter_by(project_id=filters['project_id'])
if 'user_id' in filters:
query = query.filter_by(user_id=filters['user_id'])
return query
def get_x509keypair_list(self, context, filters=None, limit=None,
marker=None, sort_key=None, sort_dir=None):
query = model_query(models.X509KeyPair)
query = self._add_tenant_filters(context, query)
query = self._add_x509keypairs_filters(query, filters)
return _paginate_query(models.X509KeyPair, limit, marker,
sort_key, sort_dir, query)
def destroy_magnum_service(self, magnum_service_id):
session = get_session()
with session.begin():
query = model_query(models.MagnumService, session=session)
query = add_identity_filter(query, magnum_service_id)
count = query.delete()
if count != 1:
raise exception.MagnumServiceNotFound(
magnum_service_id=magnum_service_id)
def update_magnum_service(self, magnum_service_id, values):
session = get_session()
with session.begin():
query = model_query(models.MagnumService, session=session)
query = add_identity_filter(query, magnum_service_id)
try:
ref = query.with_lockmode('update').one()
except NoResultFound:
raise exception.MagnumServiceNotFound(
magnum_service_id=magnum_service_id)
if 'report_count' in values:
if values['report_count'] > ref.report_count:
ref.last_seen_up = timeutils.utcnow()
ref.update(values)
return ref
def get_magnum_service_by_host_and_binary(self, host, binary):
query = model_query(models.MagnumService)
query = query.filter_by(host=host, binary=binary)
try:
return query.one()
except NoResultFound:
return None
def create_magnum_service(self, values):
magnum_service = models.MagnumService()
magnum_service.update(values)
try:
magnum_service.save()
except db_exc.DBDuplicateEntry:
host = values["host"]
binary = values["binary"]
LOG.warning("Magnum service with same host:%(host)s and"
" binary:%(binary)s had been saved into DB",
{'host': host, 'binary': binary})
query = model_query(models.MagnumService)
query = query.filter_by(host=host, binary=binary)
return query.one()
return magnum_service
def get_magnum_service_list(self, disabled=None, limit=None,
marker=None, sort_key=None, sort_dir=None
):
query = model_query(models.MagnumService)
if disabled:
query = query.filter_by(disabled=disabled)
return _paginate_query(models.MagnumService, limit, marker,
sort_key, sort_dir, query)
def create_quota(self, values):
quotas = models.Quota()
quotas.update(values)
try:
quotas.save()
except db_exc.DBDuplicateEntry:
raise exception.QuotaAlreadyExists(project_id=values['project_id'],
resource=values['resource'])
return quotas
def _add_quota_filters(self, query, filters):
if filters is None:
filters = {}
possible_filters = ["resource", "project_id"]
filter_names = set(filters).intersection(possible_filters)
filter_dict = {filter_name: filters[filter_name]
for filter_name in filter_names}
query = query.filter_by(**filter_dict)
return query
def get_quota_list(self, context, filters=None, limit=None, marker=None,
sort_key=None, sort_dir=None):
query = model_query(models.Quota)
query = self._add_quota_filters(query, filters)
return _paginate_query(models.Quota, limit, marker,
sort_key, sort_dir, query)
def update_quota(self, project_id, values):
session = get_session()
with session.begin():
query = model_query(models.Quota, session=session)
resource = values['resource']
try:
query = query.filter_by(project_id=project_id).filter_by(
resource=resource)
ref = query.with_lockmode('update').one()
except NoResultFound:
msg = (_('project_id %(project_id)s resource %(resource)s.') %
{'project_id': project_id, 'resource': resource})
raise exception.QuotaNotFound(msg=msg)
ref.update(values)
return ref
def delete_quota(self, project_id, resource):
session = get_session()
with session.begin():
query = model_query(models.Quota, session=session)
try:
query.filter_by(project_id=project_id).filter_by(
resource=resource).one()
except NoResultFound:
msg = (_('project_id %(project_id)s resource %(resource)s.') %
{'project_id': project_id, 'resource': resource})
raise exception.QuotaNotFound(msg=msg)
query.delete()
def get_quota_by_id(self, context, quota_id):
query = model_query(models.Quota)
query = query.filter_by(id=quota_id)
try:
return query.one()
except NoResultFound:
msg = _('quota id %s .') % quota_id
raise exception.QuotaNotFound(msg=msg)
def quota_get_all_by_project_id(self, project_id):
query = model_query(models.Quota)
result = query.filter_by(project_id=project_id).all()
return result
def get_quota_by_project_id_resource(self, project_id, resource):
query = model_query(models.Quota)
query = query.filter_by(project_id=project_id).filter_by(
resource=resource)
try:
return query.one()
except NoResultFound:
msg = (_('project_id %(project_id)s resource %(resource)s.') %
{'project_id': project_id, 'resource': resource})
raise exception.QuotaNotFound(msg=msg)
def _add_federation_filters(self, query, filters):
if filters is None:
filters = {}
possible_filters = ["name", "project_id", "hostcluster_id",
"member_ids", "properties"]
filter_names = set(filters).intersection(possible_filters)
filter_dict = {filter_name: filters[filter_name]
for filter_name in filter_names}
query = query.filter_by(**filter_dict)
if 'status' in filters:
query = query.filter(
models.Federation.status.in_(filters['status']))
return query
def get_federation_by_id(self, context, federation_id):
query = model_query(models.Federation)
query = self._add_tenant_filters(context, query)
query = query.filter_by(id=federation_id)
try:
return query.one()
except NoResultFound:
raise exception.FederationNotFound(federation=federation_id)
def get_federation_by_uuid(self, context, federation_uuid):
query = model_query(models.Federation)
query = self._add_tenant_filters(context, query)
query = query.filter_by(uuid=federation_uuid)
try:
return query.one()
except NoResultFound:
raise exception.FederationNotFound(federation=federation_uuid)
def get_federation_by_name(self, context, federation_name):
query = model_query(models.Federation)
query = self._add_tenant_filters(context, query)
query = query.filter_by(name=federation_name)
try:
return query.one()
except MultipleResultsFound:
raise exception.Conflict('Multiple federations exist with same '
'name. Please use the federation uuid '
'instead.')
except NoResultFound:
raise exception.FederationNotFound(federation=federation_name)
def get_federation_list(self, context, limit=None, marker=None,
sort_key=None, sort_dir=None, filters=None):
query = model_query(models.Federation)
query = self._add_tenant_filters(context, query)
query = self._add_federation_filters(query, filters)
return _paginate_query(models.Federation, limit, marker,
sort_key, sort_dir, query)
def create_federation(self, values):
if not values.get('uuid'):
values['uuid'] = uuidutils.generate_uuid()
federation = models.Federation()
federation.update(values)
try:
federation.save()
except db_exc.DBDuplicateEntry:
raise exception.FederationAlreadyExists(uuid=values['uuid'])
return federation
def destroy_federation(self, federation_id):
session = get_session()
with session.begin():
query = model_query(models.Federation, session=session)
query = add_identity_filter(query, federation_id)
try:
query.one()
except NoResultFound:
raise exception.FederationNotFound(federation=federation_id)
query.delete()
def update_federation(self, federation_id, values):
if 'uuid' in values:
msg = _("Cannot overwrite UUID for an existing Federation.")
raise exception.InvalidParameterValue(err=msg)
return self._do_update_federation(federation_id, values)
def _do_update_federation(self, federation_id, values):
session = get_session()
with session.begin():
query = model_query(models.Federation, session=session)
query = add_identity_filter(query, federation_id)
try:
ref = query.with_lockmode('update').one()
except NoResultFound:
raise exception.FederationNotFound(federation=federation_id)
ref.update(values)
return ref
| true | true |
1c4823f0ebe1ed95a32f25d097316665a3c42efe | 11,938 | py | Python | official/nlp/modeling/networks/bert_encoder.py | 62theories/tf-flask | c6954f0f3c4082165c92c77bb06d2fec6e75a8c4 | [
"Apache-2.0"
] | 82,518 | 2016-02-05T12:07:23.000Z | 2022-03-31T23:09:47.000Z | official/nlp/modeling/networks/bert_encoder.py | 62theories/tf-flask | c6954f0f3c4082165c92c77bb06d2fec6e75a8c4 | [
"Apache-2.0"
] | 9,021 | 2016-03-08T01:02:05.000Z | 2022-03-31T08:06:35.000Z | official/nlp/modeling/networks/bert_encoder.py | 62theories/tf-flask | c6954f0f3c4082165c92c77bb06d2fec6e75a8c4 | [
"Apache-2.0"
] | 54,341 | 2016-02-06T17:19:55.000Z | 2022-03-31T10:27:44.000Z | # Copyright 2021 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Transformer-based BERT encoder network."""
# pylint: disable=g-classes-have-attributes
from absl import logging
import tensorflow as tf
from official.nlp.modeling import layers
@tf.keras.utils.register_keras_serializable(package='Text')
class BertEncoder(tf.keras.Model):
"""Bi-directional Transformer-based encoder network.
This network implements a bi-directional Transformer-based encoder as
described in "BERT: Pre-training of Deep Bidirectional Transformers for
Language Understanding" (https://arxiv.org/abs/1810.04805). It includes the
embedding lookups and transformer layers, but not the masked language model
or classification task networks.
The default values for this object are taken from the BERT-Base implementation
in "BERT: Pre-training of Deep Bidirectional Transformers for Language
Understanding".
*Note* that the network is constructed by
[Keras Functional API](https://keras.io/guides/functional_api/).
Args:
vocab_size: The size of the token vocabulary.
hidden_size: The size of the transformer hidden layers.
num_layers: The number of transformer layers.
num_attention_heads: The number of attention heads for each transformer. The
hidden size must be divisible by the number of attention heads.
max_sequence_length: The maximum sequence length that this encoder can
consume. If None, max_sequence_length uses the value from sequence length.
This determines the variable shape for positional embeddings.
type_vocab_size: The number of types that the 'type_ids' input can take.
inner_dim: The output dimension of the first Dense layer in a two-layer
feedforward network for each transformer.
inner_activation: The activation for the first Dense layer in a two-layer
feedforward network for each transformer.
output_dropout: Dropout probability for the post-attention and output
dropout.
attention_dropout: The dropout rate to use for the attention layers
within the transformer layers.
initializer: The initialzer to use for all weights in this encoder.
output_range: The sequence output range, [0, output_range), by slicing the
target sequence of the last transformer layer. `None` means the entire
target sequence will attend to the source sequence, which yields the full
output.
embedding_width: The width of the word embeddings. If the embedding width is
not equal to hidden size, embedding parameters will be factorized into two
matrices in the shape of ['vocab_size', 'embedding_width'] and
['embedding_width', 'hidden_size'] ('embedding_width' is usually much
smaller than 'hidden_size').
embedding_layer: An optional Layer instance which will be called to
generate embeddings for the input word IDs.
norm_first: Whether to normalize inputs to attention and intermediate
dense layers. If set False, output of attention and intermediate dense
layers is normalized.
dict_outputs: Whether to use a dictionary as the model outputs.
return_all_encoder_outputs: Whether to output sequence embedding outputs of
all encoder transformer layers. Note: when the following `dict_outputs`
argument is True, all encoder outputs are always returned in the dict,
keyed by `encoder_outputs`.
"""
def __init__(
self,
vocab_size,
hidden_size=768,
num_layers=12,
num_attention_heads=12,
max_sequence_length=512,
type_vocab_size=16,
inner_dim=3072,
inner_activation=lambda x: tf.keras.activations.gelu(x, approximate=True),
output_dropout=0.1,
attention_dropout=0.1,
initializer=tf.keras.initializers.TruncatedNormal(stddev=0.02),
output_range=None,
embedding_width=None,
embedding_layer=None,
norm_first=False,
dict_outputs=False,
return_all_encoder_outputs=False,
**kwargs):
if 'sequence_length' in kwargs:
kwargs.pop('sequence_length')
logging.warning('`sequence_length` is a deprecated argument to '
'`BertEncoder`, which has no effect for a while. Please '
'remove `sequence_length` argument.')
# Handles backward compatible kwargs.
if 'intermediate_size' in kwargs:
inner_dim = kwargs.pop('intermediate_size')
if 'activation' in kwargs:
inner_activation = kwargs.pop('activation')
if 'dropout_rate' in kwargs:
output_dropout = kwargs.pop('dropout_rate')
if 'attention_dropout_rate' in kwargs:
attention_dropout = kwargs.pop('attention_dropout_rate')
activation = tf.keras.activations.get(inner_activation)
initializer = tf.keras.initializers.get(initializer)
word_ids = tf.keras.layers.Input(
shape=(None,), dtype=tf.int32, name='input_word_ids')
mask = tf.keras.layers.Input(
shape=(None,), dtype=tf.int32, name='input_mask')
type_ids = tf.keras.layers.Input(
shape=(None,), dtype=tf.int32, name='input_type_ids')
if embedding_width is None:
embedding_width = hidden_size
if embedding_layer is None:
embedding_layer_inst = layers.OnDeviceEmbedding(
vocab_size=vocab_size,
embedding_width=embedding_width,
initializer=initializer,
name='word_embeddings')
else:
embedding_layer_inst = embedding_layer
word_embeddings = embedding_layer_inst(word_ids)
# Always uses dynamic slicing for simplicity.
position_embedding_layer = layers.PositionEmbedding(
initializer=initializer,
max_length=max_sequence_length,
name='position_embedding')
position_embeddings = position_embedding_layer(word_embeddings)
type_embedding_layer = layers.OnDeviceEmbedding(
vocab_size=type_vocab_size,
embedding_width=embedding_width,
initializer=initializer,
use_one_hot=True,
name='type_embeddings')
type_embeddings = type_embedding_layer(type_ids)
embeddings = tf.keras.layers.Add()(
[word_embeddings, position_embeddings, type_embeddings])
embedding_norm_layer = tf.keras.layers.LayerNormalization(
name='embeddings/layer_norm', axis=-1, epsilon=1e-12, dtype=tf.float32)
embeddings = embedding_norm_layer(embeddings)
embeddings = (tf.keras.layers.Dropout(rate=output_dropout)(embeddings))
# We project the 'embedding' output to 'hidden_size' if it is not already
# 'hidden_size'.
if embedding_width != hidden_size:
embedding_projection = tf.keras.layers.experimental.EinsumDense(
'...x,xy->...y',
output_shape=hidden_size,
bias_axes='y',
kernel_initializer=initializer,
name='embedding_projection')
embeddings = embedding_projection(embeddings)
else:
embedding_projection = None
transformer_layers = []
data = embeddings
attention_mask = layers.SelfAttentionMask()(data, mask)
encoder_outputs = []
for i in range(num_layers):
if i == num_layers - 1 and output_range is not None:
transformer_output_range = output_range
else:
transformer_output_range = None
layer = layers.TransformerEncoderBlock(
num_attention_heads=num_attention_heads,
inner_dim=inner_dim,
inner_activation=inner_activation,
output_dropout=output_dropout,
attention_dropout=attention_dropout,
norm_first=norm_first,
output_range=transformer_output_range,
kernel_initializer=initializer,
name='transformer/layer_%d' % i)
transformer_layers.append(layer)
data = layer([data, attention_mask])
encoder_outputs.append(data)
last_encoder_output = encoder_outputs[-1]
# Applying a tf.slice op (through subscript notation) to a Keras tensor
# like this will create a SliceOpLambda layer. This is better than a Lambda
# layer with Python code, because that is fundamentally less portable.
first_token_tensor = last_encoder_output[:, 0, :]
pooler_layer = tf.keras.layers.Dense(
units=hidden_size,
activation='tanh',
kernel_initializer=initializer,
name='pooler_transform')
cls_output = pooler_layer(first_token_tensor)
outputs = dict(
sequence_output=encoder_outputs[-1],
pooled_output=cls_output,
encoder_outputs=encoder_outputs,
)
if dict_outputs:
super().__init__(
inputs=[word_ids, mask, type_ids], outputs=outputs, **kwargs)
else:
cls_output = outputs['pooled_output']
if return_all_encoder_outputs:
encoder_outputs = outputs['encoder_outputs']
outputs = [encoder_outputs, cls_output]
else:
sequence_output = outputs['sequence_output']
outputs = [sequence_output, cls_output]
super().__init__( # pylint: disable=bad-super-call
inputs=[word_ids, mask, type_ids],
outputs=outputs,
**kwargs)
self._pooler_layer = pooler_layer
self._transformer_layers = transformer_layers
self._embedding_norm_layer = embedding_norm_layer
self._embedding_layer = embedding_layer_inst
self._position_embedding_layer = position_embedding_layer
self._type_embedding_layer = type_embedding_layer
if embedding_projection is not None:
self._embedding_projection = embedding_projection
config_dict = {
'vocab_size': vocab_size,
'hidden_size': hidden_size,
'num_layers': num_layers,
'num_attention_heads': num_attention_heads,
'max_sequence_length': max_sequence_length,
'type_vocab_size': type_vocab_size,
'inner_dim': inner_dim,
'inner_activation': tf.keras.activations.serialize(activation),
'output_dropout': output_dropout,
'attention_dropout': attention_dropout,
'initializer': tf.keras.initializers.serialize(initializer),
'output_range': output_range,
'embedding_width': embedding_width,
'embedding_layer': embedding_layer,
'norm_first': norm_first,
'dict_outputs': dict_outputs,
}
# pylint: disable=protected-access
self._setattr_tracking = False
self._config = config_dict
self._setattr_tracking = True
# pylint: enable=protected-access
def get_embedding_table(self):
return self._embedding_layer.embeddings
def get_embedding_layer(self):
return self._embedding_layer
def get_config(self):
return self._config
@property
def transformer_layers(self):
"""List of Transformer layers in the encoder."""
return self._transformer_layers
@property
def pooler_layer(self):
"""The pooler dense layer after the transformer layers."""
return self._pooler_layer
@classmethod
def from_config(cls, config, custom_objects=None):
if 'embedding_layer' in config and config['embedding_layer'] is not None:
warn_string = (
'You are reloading a model that was saved with a '
'potentially-shared embedding layer object. If you contine to '
'train this model, the embedding layer will no longer be shared. '
'To work around this, load the model outside of the Keras API.')
print('WARNING: ' + warn_string)
logging.warn(warn_string)
return cls(**config)
| 39.793333 | 80 | 0.714609 |
from absl import logging
import tensorflow as tf
from official.nlp.modeling import layers
@tf.keras.utils.register_keras_serializable(package='Text')
class BertEncoder(tf.keras.Model):
def __init__(
self,
vocab_size,
hidden_size=768,
num_layers=12,
num_attention_heads=12,
max_sequence_length=512,
type_vocab_size=16,
inner_dim=3072,
inner_activation=lambda x: tf.keras.activations.gelu(x, approximate=True),
output_dropout=0.1,
attention_dropout=0.1,
initializer=tf.keras.initializers.TruncatedNormal(stddev=0.02),
output_range=None,
embedding_width=None,
embedding_layer=None,
norm_first=False,
dict_outputs=False,
return_all_encoder_outputs=False,
**kwargs):
if 'sequence_length' in kwargs:
kwargs.pop('sequence_length')
logging.warning('`sequence_length` is a deprecated argument to '
'`BertEncoder`, which has no effect for a while. Please '
'remove `sequence_length` argument.')
if 'intermediate_size' in kwargs:
inner_dim = kwargs.pop('intermediate_size')
if 'activation' in kwargs:
inner_activation = kwargs.pop('activation')
if 'dropout_rate' in kwargs:
output_dropout = kwargs.pop('dropout_rate')
if 'attention_dropout_rate' in kwargs:
attention_dropout = kwargs.pop('attention_dropout_rate')
activation = tf.keras.activations.get(inner_activation)
initializer = tf.keras.initializers.get(initializer)
word_ids = tf.keras.layers.Input(
shape=(None,), dtype=tf.int32, name='input_word_ids')
mask = tf.keras.layers.Input(
shape=(None,), dtype=tf.int32, name='input_mask')
type_ids = tf.keras.layers.Input(
shape=(None,), dtype=tf.int32, name='input_type_ids')
if embedding_width is None:
embedding_width = hidden_size
if embedding_layer is None:
embedding_layer_inst = layers.OnDeviceEmbedding(
vocab_size=vocab_size,
embedding_width=embedding_width,
initializer=initializer,
name='word_embeddings')
else:
embedding_layer_inst = embedding_layer
word_embeddings = embedding_layer_inst(word_ids)
position_embedding_layer = layers.PositionEmbedding(
initializer=initializer,
max_length=max_sequence_length,
name='position_embedding')
position_embeddings = position_embedding_layer(word_embeddings)
type_embedding_layer = layers.OnDeviceEmbedding(
vocab_size=type_vocab_size,
embedding_width=embedding_width,
initializer=initializer,
use_one_hot=True,
name='type_embeddings')
type_embeddings = type_embedding_layer(type_ids)
embeddings = tf.keras.layers.Add()(
[word_embeddings, position_embeddings, type_embeddings])
embedding_norm_layer = tf.keras.layers.LayerNormalization(
name='embeddings/layer_norm', axis=-1, epsilon=1e-12, dtype=tf.float32)
embeddings = embedding_norm_layer(embeddings)
embeddings = (tf.keras.layers.Dropout(rate=output_dropout)(embeddings))
if embedding_width != hidden_size:
embedding_projection = tf.keras.layers.experimental.EinsumDense(
'...x,xy->...y',
output_shape=hidden_size,
bias_axes='y',
kernel_initializer=initializer,
name='embedding_projection')
embeddings = embedding_projection(embeddings)
else:
embedding_projection = None
transformer_layers = []
data = embeddings
attention_mask = layers.SelfAttentionMask()(data, mask)
encoder_outputs = []
for i in range(num_layers):
if i == num_layers - 1 and output_range is not None:
transformer_output_range = output_range
else:
transformer_output_range = None
layer = layers.TransformerEncoderBlock(
num_attention_heads=num_attention_heads,
inner_dim=inner_dim,
inner_activation=inner_activation,
output_dropout=output_dropout,
attention_dropout=attention_dropout,
norm_first=norm_first,
output_range=transformer_output_range,
kernel_initializer=initializer,
name='transformer/layer_%d' % i)
transformer_layers.append(layer)
data = layer([data, attention_mask])
encoder_outputs.append(data)
last_encoder_output = encoder_outputs[-1]
first_token_tensor = last_encoder_output[:, 0, :]
pooler_layer = tf.keras.layers.Dense(
units=hidden_size,
activation='tanh',
kernel_initializer=initializer,
name='pooler_transform')
cls_output = pooler_layer(first_token_tensor)
outputs = dict(
sequence_output=encoder_outputs[-1],
pooled_output=cls_output,
encoder_outputs=encoder_outputs,
)
if dict_outputs:
super().__init__(
inputs=[word_ids, mask, type_ids], outputs=outputs, **kwargs)
else:
cls_output = outputs['pooled_output']
if return_all_encoder_outputs:
encoder_outputs = outputs['encoder_outputs']
outputs = [encoder_outputs, cls_output]
else:
sequence_output = outputs['sequence_output']
outputs = [sequence_output, cls_output]
super().__init__(
inputs=[word_ids, mask, type_ids],
outputs=outputs,
**kwargs)
self._pooler_layer = pooler_layer
self._transformer_layers = transformer_layers
self._embedding_norm_layer = embedding_norm_layer
self._embedding_layer = embedding_layer_inst
self._position_embedding_layer = position_embedding_layer
self._type_embedding_layer = type_embedding_layer
if embedding_projection is not None:
self._embedding_projection = embedding_projection
config_dict = {
'vocab_size': vocab_size,
'hidden_size': hidden_size,
'num_layers': num_layers,
'num_attention_heads': num_attention_heads,
'max_sequence_length': max_sequence_length,
'type_vocab_size': type_vocab_size,
'inner_dim': inner_dim,
'inner_activation': tf.keras.activations.serialize(activation),
'output_dropout': output_dropout,
'attention_dropout': attention_dropout,
'initializer': tf.keras.initializers.serialize(initializer),
'output_range': output_range,
'embedding_width': embedding_width,
'embedding_layer': embedding_layer,
'norm_first': norm_first,
'dict_outputs': dict_outputs,
}
self._setattr_tracking = False
self._config = config_dict
self._setattr_tracking = True
def get_embedding_table(self):
return self._embedding_layer.embeddings
def get_embedding_layer(self):
return self._embedding_layer
def get_config(self):
return self._config
@property
def transformer_layers(self):
return self._transformer_layers
@property
def pooler_layer(self):
return self._pooler_layer
@classmethod
def from_config(cls, config, custom_objects=None):
if 'embedding_layer' in config and config['embedding_layer'] is not None:
warn_string = (
'You are reloading a model that was saved with a '
'potentially-shared embedding layer object. If you contine to '
'train this model, the embedding layer will no longer be shared. '
'To work around this, load the model outside of the Keras API.')
print('WARNING: ' + warn_string)
logging.warn(warn_string)
return cls(**config)
| true | true |
1c4824ef58155a24d3da7e3337113da8189f354b | 1,975 | py | Python | day03/main.py | thetwoj/advent-of-code-2021 | 87a918e1f8973e3a9e5238248043ec27338939de | [
"MIT"
] | null | null | null | day03/main.py | thetwoj/advent-of-code-2021 | 87a918e1f8973e3a9e5238248043ec27338939de | [
"MIT"
] | null | null | null | day03/main.py | thetwoj/advent-of-code-2021 | 87a918e1f8973e3a9e5238248043ec27338939de | [
"MIT"
] | null | null | null | def get_input(filename):
data = []
with open(filename, 'r') as i:
for x in i.readlines():
data.append(x.strip())
return data
def find_gamma_epsilon(report):
digit_counts = {}
binary_gamma = ""
for line in report:
for index, letter in enumerate(line):
if index in digit_counts:
digit_counts[index][letter] += 1
else:
digit_counts[index] = {"0": 0, "1": 0}
digit_counts[index][letter] += 1
for key in digit_counts:
if digit_counts[key]["0"] > digit_counts[key]["1"]:
binary_gamma += "0"
else:
binary_gamma += "1"
binary_epsilon = ""
for digit in binary_gamma:
if digit == "0":
binary_epsilon += "1"
else:
binary_epsilon += "0"
gamma = int(binary_gamma, 2)
epsilon = int(binary_epsilon, 2)
return gamma, epsilon
def find_oxygen(report):
for x in range(len(report[0])):
tracker = {"0": [], "1": []}
for line in report:
tracker[line[x]].append(line)
if len(tracker["0"]) > len(tracker["1"]):
report = tracker["0"]
else:
report = tracker["1"]
if len(report) == 1:
return int(report[0], 2)
def find_co2(report):
for x in range(len(report[0])):
tracker = {"0": [], "1": []}
for line in report:
tracker[line[x]].append(line)
if len(tracker["0"]) <= len(tracker["1"]):
report = tracker["0"]
else:
report = tracker["1"]
if len(report) == 1:
return int(report[0], 2)
def main():
report = get_input("input")
print("Part 1:")
gamma, epsilon = find_gamma_epsilon(report)
print(gamma * epsilon)
print()
print("Part 2:")
oxygen = find_oxygen(report)
co2 = find_co2(report)
print(oxygen * co2)
if __name__ == "__main__":
main()
| 25.320513 | 59 | 0.517975 | def get_input(filename):
data = []
with open(filename, 'r') as i:
for x in i.readlines():
data.append(x.strip())
return data
def find_gamma_epsilon(report):
digit_counts = {}
binary_gamma = ""
for line in report:
for index, letter in enumerate(line):
if index in digit_counts:
digit_counts[index][letter] += 1
else:
digit_counts[index] = {"0": 0, "1": 0}
digit_counts[index][letter] += 1
for key in digit_counts:
if digit_counts[key]["0"] > digit_counts[key]["1"]:
binary_gamma += "0"
else:
binary_gamma += "1"
binary_epsilon = ""
for digit in binary_gamma:
if digit == "0":
binary_epsilon += "1"
else:
binary_epsilon += "0"
gamma = int(binary_gamma, 2)
epsilon = int(binary_epsilon, 2)
return gamma, epsilon
def find_oxygen(report):
for x in range(len(report[0])):
tracker = {"0": [], "1": []}
for line in report:
tracker[line[x]].append(line)
if len(tracker["0"]) > len(tracker["1"]):
report = tracker["0"]
else:
report = tracker["1"]
if len(report) == 1:
return int(report[0], 2)
def find_co2(report):
for x in range(len(report[0])):
tracker = {"0": [], "1": []}
for line in report:
tracker[line[x]].append(line)
if len(tracker["0"]) <= len(tracker["1"]):
report = tracker["0"]
else:
report = tracker["1"]
if len(report) == 1:
return int(report[0], 2)
def main():
report = get_input("input")
print("Part 1:")
gamma, epsilon = find_gamma_epsilon(report)
print(gamma * epsilon)
print()
print("Part 2:")
oxygen = find_oxygen(report)
co2 = find_co2(report)
print(oxygen * co2)
if __name__ == "__main__":
main()
| true | true |
1c4825ac328df0f6c85d36f015aea32324491bf9 | 2,660 | py | Python | pysnmp/MWORKS-E-MIB.py | agustinhenze/mibs.snmplabs.com | 1fc5c07860542b89212f4c8ab807057d9a9206c7 | [
"Apache-2.0"
] | 11 | 2021-02-02T16:27:16.000Z | 2021-08-31T06:22:49.000Z | pysnmp/MWORKS-E-MIB.py | agustinhenze/mibs.snmplabs.com | 1fc5c07860542b89212f4c8ab807057d9a9206c7 | [
"Apache-2.0"
] | 75 | 2021-02-24T17:30:31.000Z | 2021-12-08T00:01:18.000Z | pysnmp/MWORKS-E-MIB.py | agustinhenze/mibs.snmplabs.com | 1fc5c07860542b89212f4c8ab807057d9a9206c7 | [
"Apache-2.0"
] | 10 | 2019-04-30T05:51:36.000Z | 2022-02-16T03:33:41.000Z | #
# PySNMP MIB module MWORKS-E-MIB (http://snmplabs.com/pysmi)
# ASN.1 source file:///Users/davwang4/Dev/mibs.snmplabs.com/asn1/MWORKS-E-MIB
# Produced by pysmi-0.3.4 at Mon Apr 29 20:06:06 2019
# On host DAVWANG4-M-1475 platform Darwin version 18.5.0 by user davwang4
# Using Python version 3.7.3 (default, Mar 27 2019, 09:23:15)
#
Integer, ObjectIdentifier, OctetString = mibBuilder.importSymbols("ASN1", "Integer", "ObjectIdentifier", "OctetString")
NamedValues, = mibBuilder.importSymbols("ASN1-ENUMERATION", "NamedValues")
ValueSizeConstraint, ValueRangeConstraint, SingleValueConstraint, ConstraintsIntersection, ConstraintsUnion = mibBuilder.importSymbols("ASN1-REFINEMENT", "ValueSizeConstraint", "ValueRangeConstraint", "SingleValueConstraint", "ConstraintsIntersection", "ConstraintsUnion")
NotificationGroup, ModuleCompliance = mibBuilder.importSymbols("SNMPv2-CONF", "NotificationGroup", "ModuleCompliance")
ObjectIdentity, Integer32, enterprises, Bits, Unsigned32, iso, Counter64, ModuleIdentity, IpAddress, Counter32, TimeTicks, MibScalar, MibTable, MibTableRow, MibTableColumn, MibIdentifier, Gauge32, NotificationType = mibBuilder.importSymbols("SNMPv2-SMI", "ObjectIdentity", "Integer32", "enterprises", "Bits", "Unsigned32", "iso", "Counter64", "ModuleIdentity", "IpAddress", "Counter32", "TimeTicks", "MibScalar", "MibTable", "MibTableRow", "MibTableColumn", "MibIdentifier", "Gauge32", "NotificationType")
TextualConvention, DisplayString = mibBuilder.importSymbols("SNMPv2-TC", "TextualConvention", "DisplayString")
tecElite = MibIdentifier((1, 3, 6, 1, 4, 1, 217))
mworkse = MibIdentifier((1, 3, 6, 1, 4, 1, 217, 17))
am501 = MibIdentifier((1, 3, 6, 1, 4, 1, 217, 17, 1))
amMem = MibIdentifier((1, 3, 6, 1, 4, 1, 217, 17, 1, 1))
amHeap = MibIdentifier((1, 3, 6, 1, 4, 1, 217, 17, 1, 2))
amMemCeiling = MibScalar((1, 3, 6, 1, 4, 1, 217, 17, 1, 1, 1), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: amMemCeiling.setStatus('mandatory')
amMemUsed = MibScalar((1, 3, 6, 1, 4, 1, 217, 17, 1, 1, 2), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: amMemUsed.setStatus('mandatory')
amHeapTotal = MibScalar((1, 3, 6, 1, 4, 1, 217, 17, 1, 2, 1), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: amHeapTotal.setStatus('mandatory')
amHeapUsed = MibScalar((1, 3, 6, 1, 4, 1, 217, 17, 1, 2, 2), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: amHeapUsed.setStatus('mandatory')
mibBuilder.exportSymbols("MWORKS-E-MIB", amHeapTotal=amHeapTotal, mworkse=mworkse, amHeapUsed=amHeapUsed, am501=am501, amMem=amMem, amMemCeiling=amMemCeiling, amMemUsed=amMemUsed, tecElite=tecElite, amHeap=amHeap)
| 95 | 505 | 0.749624 |
Integer, ObjectIdentifier, OctetString = mibBuilder.importSymbols("ASN1", "Integer", "ObjectIdentifier", "OctetString")
NamedValues, = mibBuilder.importSymbols("ASN1-ENUMERATION", "NamedValues")
ValueSizeConstraint, ValueRangeConstraint, SingleValueConstraint, ConstraintsIntersection, ConstraintsUnion = mibBuilder.importSymbols("ASN1-REFINEMENT", "ValueSizeConstraint", "ValueRangeConstraint", "SingleValueConstraint", "ConstraintsIntersection", "ConstraintsUnion")
NotificationGroup, ModuleCompliance = mibBuilder.importSymbols("SNMPv2-CONF", "NotificationGroup", "ModuleCompliance")
ObjectIdentity, Integer32, enterprises, Bits, Unsigned32, iso, Counter64, ModuleIdentity, IpAddress, Counter32, TimeTicks, MibScalar, MibTable, MibTableRow, MibTableColumn, MibIdentifier, Gauge32, NotificationType = mibBuilder.importSymbols("SNMPv2-SMI", "ObjectIdentity", "Integer32", "enterprises", "Bits", "Unsigned32", "iso", "Counter64", "ModuleIdentity", "IpAddress", "Counter32", "TimeTicks", "MibScalar", "MibTable", "MibTableRow", "MibTableColumn", "MibIdentifier", "Gauge32", "NotificationType")
TextualConvention, DisplayString = mibBuilder.importSymbols("SNMPv2-TC", "TextualConvention", "DisplayString")
tecElite = MibIdentifier((1, 3, 6, 1, 4, 1, 217))
mworkse = MibIdentifier((1, 3, 6, 1, 4, 1, 217, 17))
am501 = MibIdentifier((1, 3, 6, 1, 4, 1, 217, 17, 1))
amMem = MibIdentifier((1, 3, 6, 1, 4, 1, 217, 17, 1, 1))
amHeap = MibIdentifier((1, 3, 6, 1, 4, 1, 217, 17, 1, 2))
amMemCeiling = MibScalar((1, 3, 6, 1, 4, 1, 217, 17, 1, 1, 1), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: amMemCeiling.setStatus('mandatory')
amMemUsed = MibScalar((1, 3, 6, 1, 4, 1, 217, 17, 1, 1, 2), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: amMemUsed.setStatus('mandatory')
amHeapTotal = MibScalar((1, 3, 6, 1, 4, 1, 217, 17, 1, 2, 1), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: amHeapTotal.setStatus('mandatory')
amHeapUsed = MibScalar((1, 3, 6, 1, 4, 1, 217, 17, 1, 2, 2), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: amHeapUsed.setStatus('mandatory')
mibBuilder.exportSymbols("MWORKS-E-MIB", amHeapTotal=amHeapTotal, mworkse=mworkse, amHeapUsed=amHeapUsed, am501=am501, amMem=amMem, amMemCeiling=amMemCeiling, amMemUsed=amMemUsed, tecElite=tecElite, amHeap=amHeap)
| true | true |
1c48263cc96b9da7e221dfd293fac9cea7534f3f | 351 | py | Python | apps/authentication/migrations/0018_merge.py | kharann/onlineweb4 | 1130128c6233b623780779a25934ea73ef62c264 | [
"MIT"
] | null | null | null | apps/authentication/migrations/0018_merge.py | kharann/onlineweb4 | 1130128c6233b623780779a25934ea73ef62c264 | [
"MIT"
] | null | null | null | apps/authentication/migrations/0018_merge.py | kharann/onlineweb4 | 1130128c6233b623780779a25934ea73ef62c264 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
# Generated by Django 1.9.1 on 2016-01-30 19:25
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('authentication', '0014_auto_20151214_0117'),
('authentication', '0017_auto_20160128_1719'),
]
operations = [
]
| 20.647059 | 54 | 0.675214 |
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('authentication', '0014_auto_20151214_0117'),
('authentication', '0017_auto_20160128_1719'),
]
operations = [
]
| true | true |
1c4826c33c1d16a74af01fb85c32290195d70209 | 2,439 | py | Python | examples/dummynode.py | sq8kfh/pyh9 | 7b1f05709849c30cd6c9086c6539e33106aa5fa2 | [
"MIT"
] | null | null | null | examples/dummynode.py | sq8kfh/pyh9 | 7b1f05709849c30cd6c9086c6539e33106aa5fa2 | [
"MIT"
] | null | null | null | examples/dummynode.py | sq8kfh/pyh9 | 7b1f05709849c30cd6c9086c6539e33106aa5fa2 | [
"MIT"
] | null | null | null | import asyncio
import h9.asyncmsgstream
from h9.msg import H9ExecuteMethod, H9SendFrame, H9Frame
node_id = 32
dev_des=[0, 5, 0, 1] #type_h, type_l, version_major, version_minor
seqnum = -1
reg_10 = 0
def get_next_seqnum():
global seqnum
seqnum = seqnum + 1
seqnum = seqnum % 32
return seqnum
def procces_frame(conn, frame):
global reg_10
print(frame.frametype)
if frame.frametype == H9Frame.FrameType.GET_REG:
if frame.data[0] == 10:
res = H9SendFrame(priority=H9SendFrame.Priority.L,
frametype=H9SendFrame.FrameType.REG_VALUE, seqnum=frame.seqnum,
source=node_id,
destination=frame.source, data=[frame.data[0], reg_10])
conn.writemsg(res)
elif frame.frametype == H9Frame.FrameType.SET_REG:
if frame.data[0] == 10:
reg_10 = frame.data[1]
reg_10 = reg_10 % 9
res = H9SendFrame(priority=H9SendFrame.Priority.L,
frametype=H9SendFrame.FrameType.REG_EXTERNALLY_CHANGED, seqnum=frame.seqnum,
source=node_id,
destination=frame.source, data=[frame.data[0], reg_10])
conn.writemsg(res)
elif frame.frametype == H9Frame.FrameType.DISCOVER:
res = H9SendFrame(priority=H9SendFrame.Priority.L,
frametype=H9SendFrame.FrameType.NODE_INFO, seqnum=frame.seqnum,
source=node_id,
destination=frame.source, data=dev_des)
conn.writemsg(res)
async def run():
conn = h9.asyncmsgstream.H9msgStream("127.0.0.1", 7878)
await conn.connect()
exec_method = H9ExecuteMethod("subscribe")
exec_method.value = {'event': 'frame'}
conn.writemsg(exec_method)
frame = H9SendFrame(priority=H9SendFrame.Priority.L,
frametype=H9SendFrame.FrameType.NODE_TURNED_ON, seqnum=get_next_seqnum(),
source=node_id,
destination=511, data=dev_des)
conn.writemsg(frame)
while True:
recv_msg = await conn.readmsg()
if isinstance(recv_msg, H9Frame) and (recv_msg.destination == node_id or recv_msg.destination == 511):
procces_frame(conn, recv_msg)
loop = asyncio.get_event_loop()
try:
loop.run_until_complete(run())
finally:
loop.close()
| 35.347826 | 110 | 0.609266 | import asyncio
import h9.asyncmsgstream
from h9.msg import H9ExecuteMethod, H9SendFrame, H9Frame
node_id = 32
dev_des=[0, 5, 0, 1]
seqnum = -1
reg_10 = 0
def get_next_seqnum():
global seqnum
seqnum = seqnum + 1
seqnum = seqnum % 32
return seqnum
def procces_frame(conn, frame):
global reg_10
print(frame.frametype)
if frame.frametype == H9Frame.FrameType.GET_REG:
if frame.data[0] == 10:
res = H9SendFrame(priority=H9SendFrame.Priority.L,
frametype=H9SendFrame.FrameType.REG_VALUE, seqnum=frame.seqnum,
source=node_id,
destination=frame.source, data=[frame.data[0], reg_10])
conn.writemsg(res)
elif frame.frametype == H9Frame.FrameType.SET_REG:
if frame.data[0] == 10:
reg_10 = frame.data[1]
reg_10 = reg_10 % 9
res = H9SendFrame(priority=H9SendFrame.Priority.L,
frametype=H9SendFrame.FrameType.REG_EXTERNALLY_CHANGED, seqnum=frame.seqnum,
source=node_id,
destination=frame.source, data=[frame.data[0], reg_10])
conn.writemsg(res)
elif frame.frametype == H9Frame.FrameType.DISCOVER:
res = H9SendFrame(priority=H9SendFrame.Priority.L,
frametype=H9SendFrame.FrameType.NODE_INFO, seqnum=frame.seqnum,
source=node_id,
destination=frame.source, data=dev_des)
conn.writemsg(res)
async def run():
conn = h9.asyncmsgstream.H9msgStream("127.0.0.1", 7878)
await conn.connect()
exec_method = H9ExecuteMethod("subscribe")
exec_method.value = {'event': 'frame'}
conn.writemsg(exec_method)
frame = H9SendFrame(priority=H9SendFrame.Priority.L,
frametype=H9SendFrame.FrameType.NODE_TURNED_ON, seqnum=get_next_seqnum(),
source=node_id,
destination=511, data=dev_des)
conn.writemsg(frame)
while True:
recv_msg = await conn.readmsg()
if isinstance(recv_msg, H9Frame) and (recv_msg.destination == node_id or recv_msg.destination == 511):
procces_frame(conn, recv_msg)
loop = asyncio.get_event_loop()
try:
loop.run_until_complete(run())
finally:
loop.close()
| true | true |
1c482777e51dc00263580068f3d916b2c4437bbe | 1,574 | py | Python | resources/property.py | codeforpdx/dwellinglybackend | 92fee6d19a68ae00750927b8700eaa7195b57668 | [
"MIT"
] | 15 | 2020-07-09T20:51:09.000Z | 2021-11-28T21:59:02.000Z | resources/property.py | codeforpdx/dwellinglybackend | 92fee6d19a68ae00750927b8700eaa7195b57668 | [
"MIT"
] | 148 | 2020-03-28T22:10:30.000Z | 2021-12-19T09:22:59.000Z | resources/property.py | codeforpdx/dwellinglybackend | 92fee6d19a68ae00750927b8700eaa7195b57668 | [
"MIT"
] | 30 | 2020-03-12T02:31:27.000Z | 2021-07-29T02:40:36.000Z | from flask_restful import Resource
from flask import request
from utils.authorizations import admin_required
from db import db
from models.property import PropertyModel
from schemas.property import PropertySchema
class Property(Resource):
@admin_required
def get(self, id):
return PropertyModel.find(id).json(include_tenants=True)
@admin_required
def delete(self, id):
PropertyModel.delete(id)
return {"message": "Property deleted"}
@admin_required
def put(self, id):
property = PropertyModel.find(id)
return property.update(
schema=PropertySchema,
context={"name": property.name},
payload=request.json,
).json()
class Properties(Resource):
@admin_required
def get(self):
return {"properties": PropertyModel.query.json()}
@admin_required
def post(self):
return (
PropertyModel.create(schema=PropertySchema, payload=request.json).json(),
201,
)
class ArchiveProperties(Resource):
@admin_required
def patch(self):
if not ("ids" in request.json and type(request.json["ids"]) is list):
return {"message": "Property IDs missing in request"}, 400
properties = []
for id in request.json["ids"]:
property = PropertyModel.find(id)
property.archived = True
properties.append(property)
db.session.bulk_save_objects(properties)
db.session.commit()
return {"properties": PropertyModel.query.json()}
| 27.137931 | 85 | 0.644854 | from flask_restful import Resource
from flask import request
from utils.authorizations import admin_required
from db import db
from models.property import PropertyModel
from schemas.property import PropertySchema
class Property(Resource):
@admin_required
def get(self, id):
return PropertyModel.find(id).json(include_tenants=True)
@admin_required
def delete(self, id):
PropertyModel.delete(id)
return {"message": "Property deleted"}
@admin_required
def put(self, id):
property = PropertyModel.find(id)
return property.update(
schema=PropertySchema,
context={"name": property.name},
payload=request.json,
).json()
class Properties(Resource):
@admin_required
def get(self):
return {"properties": PropertyModel.query.json()}
@admin_required
def post(self):
return (
PropertyModel.create(schema=PropertySchema, payload=request.json).json(),
201,
)
class ArchiveProperties(Resource):
@admin_required
def patch(self):
if not ("ids" in request.json and type(request.json["ids"]) is list):
return {"message": "Property IDs missing in request"}, 400
properties = []
for id in request.json["ids"]:
property = PropertyModel.find(id)
property.archived = True
properties.append(property)
db.session.bulk_save_objects(properties)
db.session.commit()
return {"properties": PropertyModel.query.json()}
| true | true |
1c4827c357cf7a405de0181536ad034ca79debe7 | 124 | py | Python | scripts/secrets.py | Aviah/one-click-django-dev-ubuntu-14-04-trusty | b6f5da980185eedde8a7a99f7efe76304c6f5c40 | [
"MIT"
] | 10 | 2016-03-22T22:14:40.000Z | 2021-07-23T22:00:02.000Z | scripts/secrets.py | Aviah/one-click-django-dev-ubuntu-14-04-trusty | b6f5da980185eedde8a7a99f7efe76304c6f5c40 | [
"MIT"
] | 1 | 2017-06-03T12:11:47.000Z | 2017-06-03T12:11:47.000Z | scripts/secrets.py | Aviah/one-click-django-dev-osx-el-capitan | ea6832f57e126d30499c9bc66c5b4c77d0ef4020 | [
"MIT"
] | 4 | 2016-04-05T05:41:15.000Z | 2017-01-08T10:03:25.000Z | # Add here secrets, password etc you don't want to keep in the repository
# e.g. django SECRET_KEY, database credentials etc | 62 | 73 | 0.782258 |
# e.g. django SECRET_KEY, database credentials etc | true | true |
1c482859e6dd971e0ebdc01fe98a1798be6c2f40 | 1,548 | py | Python | setup.py | pashtetbezd/miracles-server | 131071d1a4add240151ef55fe9c4f9ff9f5261cc | [
"Apache-2.0"
] | null | null | null | setup.py | pashtetbezd/miracles-server | 131071d1a4add240151ef55fe9c4f9ff9f5261cc | [
"Apache-2.0"
] | null | null | null | setup.py | pashtetbezd/miracles-server | 131071d1a4add240151ef55fe9c4f9ff9f5261cc | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from setuptools import setup
with open('README.rst') as readme_file:
readme = readme_file.read()
with open('HISTORY.rst') as history_file:
history = history_file.read()
requirements = [
# TODO: put package requirements here
'connexion[swagger-ui]',
'connexion==2.6.0',
'sqlalchemy>=1.3.13',
'SQLAlchemy-serializer',
'psycopg2>=2.8.4',
'alembic==1.4.2',
'rauth',
'pyjwt',
'flask-socketio',
'redis',
'eventlet',
'six'
]
test_requirements = [
# TODO: put package test requirements here
]
setup(
name='connexion_sql_utils',
version='0.1.4',
description="Sqlalchemy, Postgres, Connexion utility",
long_description=readme + '\n\n' + history,
author="Michael Housh",
author_email='mhoush@houshhomeenergy.com',
url='https://github.com/m-housh/connexion_sql_utils',
packages=[
'connexion_sql_utils',
],
package_dir={'connexion_sql_utils':
'connexion_sql_utils'},
include_package_data=True,
install_requires=requirements,
license="MIT license",
zip_safe=False,
keywords='connexion_sql_utils',
classifiers=[
'Development Status :: 2 - Pre-Alpha',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Natural Language :: English',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
],
test_suite='tests',
tests_require=test_requirements
)
| 25.377049 | 58 | 0.633075 |
from setuptools import setup
with open('README.rst') as readme_file:
readme = readme_file.read()
with open('HISTORY.rst') as history_file:
history = history_file.read()
requirements = [
'connexion[swagger-ui]',
'connexion==2.6.0',
'sqlalchemy>=1.3.13',
'SQLAlchemy-serializer',
'psycopg2>=2.8.4',
'alembic==1.4.2',
'rauth',
'pyjwt',
'flask-socketio',
'redis',
'eventlet',
'six'
]
test_requirements = [
]
setup(
name='connexion_sql_utils',
version='0.1.4',
description="Sqlalchemy, Postgres, Connexion utility",
long_description=readme + '\n\n' + history,
author="Michael Housh",
author_email='mhoush@houshhomeenergy.com',
url='https://github.com/m-housh/connexion_sql_utils',
packages=[
'connexion_sql_utils',
],
package_dir={'connexion_sql_utils':
'connexion_sql_utils'},
include_package_data=True,
install_requires=requirements,
license="MIT license",
zip_safe=False,
keywords='connexion_sql_utils',
classifiers=[
'Development Status :: 2 - Pre-Alpha',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Natural Language :: English',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
],
test_suite='tests',
tests_require=test_requirements
)
| true | true |
1c482950e64a9537a2996df66ed9403e53cf8a71 | 44,005 | py | Python | tensorflow/contrib/tpu/python/tpu/tpu.py | jiefangxuanyan/tensorflow | f78fd433118830482dddbf6055751898a19265de | [
"Apache-2.0"
] | 1 | 2021-05-03T12:10:38.000Z | 2021-05-03T12:10:38.000Z | tensorflow/contrib/tpu/python/tpu/tpu.py | jiefangxuanyan/tensorflow | f78fd433118830482dddbf6055751898a19265de | [
"Apache-2.0"
] | null | null | null | tensorflow/contrib/tpu/python/tpu/tpu.py | jiefangxuanyan/tensorflow | f78fd433118830482dddbf6055751898a19265de | [
"Apache-2.0"
] | 1 | 2018-06-12T01:58:06.000Z | 2018-06-12T01:58:06.000Z | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ======================================
"""Library of TPU helper functions."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from six.moves import xrange # pylint: disable=redefined-builtin
from tensorflow.contrib.framework.python.framework import experimental
from tensorflow.contrib.tpu.python.ops import tpu_ops
from tensorflow.contrib.tpu.python.tpu import tpu_function
from tensorflow.core.framework import attr_value_pb2
from tensorflow.python.framework import device as pydev
from tensorflow.python.framework import errors
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import variable_scope
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.util import compat
# Operations that indicate some error in the users graph, e.g. a placeholder
# that's introduced outside of the infeed.
_BLACKLISTED_OPS = set([
"Placeholder",
])
# These operations will currently fail to compile, but we should be able to
# support them eventually via CPU offload or extending our operation set.
_NOT_IMPLEMENTED_OPS = set([
"AudioSummary",
"AudioSummaryV2",
"HistogramSummary",
"ImageSummary",
"MergeSummary",
"Print",
"ScalarSummary",
"TensorSummary",
"TensorSummaryV2",
])
_MAX_WARNING_LINES = 5
_TPU_REPLICATE_ATTR = "_tpu_replicate"
_TPU_COMPILATION_STATUS_ATTR = "_tpu_compilation_status"
_OUTSIDE_COMPILATION_ATTR = "_xla_outside_compilation"
def _tpu_system_device_name(job):
"""Returns the device name for the TPU_SYSTEM device of `job`."""
if job is None:
return "/device:TPU_SYSTEM:0"
else:
return "/job:%s/device:TPU_SYSTEM:0" % job
def initialize_system(embedding_config=None, job=None):
"""Initializes a distributed TPU system for use with TensorFlow.
Args:
embedding_config: If not None, an `EmbeddingLayerConfiguration` proto
describing the desired configuration of the hardware embedding lookup
tables. If embedding_config is None, no hardware embeddings can be used.
job: The job (the XXX in TensorFlow device specification /job:XXX)
that contains the TPU devices that will be initialized. If job=None
it is assumed there is only one job in the TensorFlow flock, and an
error will be returned if this assumption does not hold.
Returns:
A serialized `TopologyProto` that describes the TPU system. Note:
the topology must be evaluated using `Session.run` before it can be used.
"""
config_string = ("" if embedding_config is None else
embedding_config.SerializeToString())
with ops.device(_tpu_system_device_name(job)):
return tpu_ops.configure_distributed_tpu(embedding_config=config_string)
def shutdown_system(job=None):
"""Shuts down a running a distributed TPU system."""
with ops.device(_tpu_system_device_name(job)):
shutdown_distributed_tpu = tpu_ops.shutdown_distributed_tpu()
return shutdown_distributed_tpu
def core(num):
"""Returns the device name for a core in a replicated TPU computation.
Args:
num: the virtual core number within each replica to which operators should
be assigned.
Returns:
A device name, suitable for passing to `tf.device()`.
"""
return "device:TPU_REPLICATED_CORE:{}".format(num)
class TPUReplicateContext(control_flow_ops.XLAControlFlowContext):
"""A `ControlFlowContext` for nodes inside a TPU computation.
The primary role of `TPUReplicateContext` is to mark operators inside a
tpu.replicate() computation with the attribute "_tpu_replicate=XYZ", where XYZ
is a unique name.
We use a `ControlFlowContext` to perform the annotation since it
integrates with Tensorflow constructs like ResourceVariables. For example,
if a `ResourceVariable` is constructed inside a tpu.replicate() block, the
`ResourceVariable` implementation can use
`with ops.control_dependencies(None)` to build the variable's definition
outside the replicated computation.
"""
def __init__(self, name, num_replicas, pivot):
"""Builds a new TPUReplicateContext.
Args:
name: a unique name for the context, used to populate the `_tpu_replicate`
attribute.
num_replicas: an integer that gives the number of replicas for the
computation.
pivot: a pivot node. Nodes in the TPUReplicateContext that do not have any
inputs will have a control dependency on the pivot node. This ensures
that nodes are correctly included in any enclosing control flow
contexts.
"""
super(TPUReplicateContext, self).__init__()
self._num_replicas = num_replicas
self._outer_device_function_stack = None
self._oc_dev_fn_stack = None
self._outside_compilation_cluster = None
self._outside_compilation_counter = 0
self._in_gradient_colocation = None
self._gradient_colocation_stack = []
self._host_compute_core = []
self._name = name
self._unsupported_ops = []
self._pivot = pivot
def report_unsupported_operations(self):
if self._unsupported_ops:
op_str = "\n".join([" %s (%s)" % (op.type, op.name)
for op in self._unsupported_ops[:_MAX_WARNING_LINES]])
logging.warning("%d unsupported operations found: \n%s",
len(self._unsupported_ops), op_str)
if len(self._unsupported_ops) > _MAX_WARNING_LINES:
logging.warning("... and %d more" %
(len(self._unsupported_ops) - _MAX_WARNING_LINES))
def EnterGradientColocation(self, op, gradient_uid):
if op is not None:
self._gradient_colocation_stack.append(op)
if not self._outside_compilation_cluster:
try:
outside_attr = op.get_attr(_OUTSIDE_COMPILATION_ATTR)
if self._in_gradient_colocation:
raise NotImplementedError(
"Cannot nest gradient colocation operations outside compilation"
)
if gradient_uid == "__unsupported__":
raise NotImplementedError(
"No gradient_uid calling gradient within outside_compilation")
# When we take the gradient of an op X in an
# outside_compilation cluster C in a forward computation we
# would like to put the ops corresponding to the gradient of
# X into a new outside_compilation cluster C'. However, if
# we take the gradient of X twice, the second one should get
# yet another new outside_compilation cluster C''.
#
# The mechanism we adopt is to use a 'root_cluster' which is
# the cluster that X was in before we took gradients, and a
# 'gradient_uid' which is different for every invocation of
# gradients, and put the gradient of X in cluster
# 'root_cluster.gradient_uid'.
#
# When taking a gradient of a gradient, some ops will be
# colocated with Op in the forward pass (e.g., cluster
# root_cluster) and some in the backward pass (e.g., cluster
# root_cluster.initial_gradient_uid). We need all of the
# grad-of-grad ops to be in the same cluster to avoid cyclic
# dependencies between clusters. We adopt a heuristic that
# puts any op clustered with root_cluster.<xxx> in
# root_cluster.gradient_uid, even if xxx was
# initial_gradient_uid.
self._in_gradient_colocation = op
parts = outside_attr.split(".")
cluster = parts[0] + "." + gradient_uid
self._EnterOutsideCompilationScope(cluster=cluster)
except ValueError:
# The attr was not present: do nothing.
pass
def ExitGradientColocation(self, op, gradient_uid):
if op is not None:
if not self._gradient_colocation_stack:
raise errors.InternalError(
op.node_def, op,
"Badly nested gradient colocation: empty stack when popping Op " +
op.name)
last_op = self._gradient_colocation_stack.pop()
if op is last_op:
if op is self._in_gradient_colocation:
self._in_gradient_colocation = None
self._ExitOutsideCompilationScope()
else:
raise errors.InternalError(
op.node_def, op, "Badly nested gradient colocation, expected " +
last_op + ", got " + op.name)
def _EnterOutsideCompilationScope(self, cluster=None):
class FakeOp(object):
"""A helper class to determine the current device.
Supports only the device set/get methods needed to run the
graph's _apply_device_function method.
"""
def __init__(self):
self._device = ""
@property
def device(self):
return self._device
def _set_device(self, device):
self._device = device.to_string()
if self._outside_compilation_cluster:
raise NotImplementedError("Cannot nest outside_compilation clusters")
if cluster:
self._outside_compilation_cluster = cluster
else:
self._outside_compilation_cluster = str(self._outside_compilation_counter)
self._outside_compilation_counter += 1
graph = ops.get_default_graph()
fake_op = FakeOp()
graph._apply_device_functions(fake_op) # pylint: disable=protected-access
device = pydev.DeviceSpec.from_string(fake_op.device)
if (device.device_type == "TPU_REPLICATED_CORE" and
device.device_index is not None):
self._host_compute_core.append(self._outside_compilation_cluster + ":" +
str(device.device_index))
self._oc_dev_fn_stack = graph._device_function_stack # pylint: disable=protected-access
graph._device_function_stack = self._outer_device_function_stack # pylint: disable=protected-access
def _ExitOutsideCompilationScope(self):
if not self._outside_compilation_cluster:
raise NotImplementedError(
"Attempted to exit outside_compilation scope when not in scope")
self._outside_compilation_cluster = None
graph = ops.get_default_graph()
graph._device_function_stack = self._oc_dev_fn_stack # pylint: disable=protected-access
def Enter(self):
if not self._outer_device_function_stack:
# Capture the device function stack at the time of first entry
# since that is the stack that will be used outside_compilation.
graph = ops.get_default_graph()
self._outer_device_function_stack = list(graph._device_function_stack) # pylint: disable=protected-access
super(TPUReplicateContext, self).Enter()
def HostComputeCore(self):
return self._host_compute_core
def AddOp(self, op):
self._AddOpInternal(op)
def _AddOpInternal(self, op):
# pylint: disable=protected-access
if op.type in _BLACKLISTED_OPS:
logging.error("Operation of type %s (%s) is not supported on the TPU. "
"Execution will fail if this op is used in the graph. " %
(op.type, op.name))
if op.type in _NOT_IMPLEMENTED_OPS:
self._unsupported_ops.append(op)
if any(x.dtype._is_ref_dtype for x in op.inputs):
raise NotImplementedError(
"Non-resource Variables are not supported inside TPU computations "
"(operator name: %s)" % op.name)
if _TPU_REPLICATE_ATTR in op.node_def.attr:
raise ValueError("TPU computations cannot be nested")
op._set_attr(_TPU_REPLICATE_ATTR,
attr_value_pb2.AttrValue(s=compat.as_bytes(self._name)))
if self._outside_compilation_cluster:
op._set_attr(
_OUTSIDE_COMPILATION_ATTR,
attr_value_pb2.AttrValue(
s=compat.as_bytes(self._outside_compilation_cluster)))
if self._num_replicas > 1 or not self._outside_compilation_cluster:
# Prevent feeding or fetching anything that is being compiled,
# and any replicated outside_compilation Op.
op.graph.prevent_feeding(op)
op.graph.prevent_fetching(op)
# Remove any control edges from outer control flow contexts. These may cause
# mismatched frame errors.
control_inputs, external_inputs = self._RemoveExternalControlEdges(op)
if not op.inputs:
# Add a control edge from the control pivot to this op.
if not control_inputs:
# pylint: disable=protected-access
op._add_control_input(self.GetControlPivot())
# pylint: enable=protected-access
else:
for index in xrange(len(op.inputs)):
x = op.inputs[index]
real_x = self.AddValue(x)
if real_x != x:
op._update_input(index, real_x) # pylint: disable=protected-access
if external_inputs:
# Use an identity to pull control inputs as data inputs. Note that we
# ignore ops which don't have outputs. TODO(phawkins): fix that.
with ops.control_dependencies(None):
self.Enter()
external_inputs = [
array_ops.identity(x.outputs[0]).op
for x in external_inputs
if x.outputs
]
self.Exit()
# pylint: disable=protected-access
op._add_control_inputs(external_inputs)
# pylint: enable=protected-access
# Mark op's outputs as seen by this context and any outer contexts.
output_names = [x.name for x in op.outputs]
context = self
while context is not None:
# pylint: disable=protected-access
context._values.update(output_names)
context = context._outer_context
# pylint: enable=protected-access
if self._outer_context:
self._outer_context.AddInnerOp(op)
def AddValue(self, val):
if val.name in self._values:
# Use the real value if it comes from outer context.
result = self._external_values.get(val.name)
return val if result is None else result
result = val
self._values.add(val.name)
if self._outer_context:
result = self._outer_context.AddValue(val)
self._values.add(result.name)
self._external_values[val.name] = result
return result
def AddInnerOp(self, op):
self._AddOpInternal(op)
if self._outer_context:
self._outer_context.AddInnerOp(op)
@property
def grad_state(self):
# Define the gradient loop state associated with the TPUReplicateContext to
# be None as the TPUReplicateContext does not get nested nor does the
# grad_state outside the TPUReplicateContext affect the graph inside so the
# grad_state should be as if this is the top-level gradient state.
return None
@property
def back_prop(self):
"""Forwards to the enclosing while context, if any."""
if self.GetWhileContext():
return self.GetWhileContext().back_prop
return False
def GetControlPivot(self):
return self._pivot
def outside_compilation(computation, *args, **kwargs):
"""Builds part of a computation outside any current TPU replicate scope.
Args:
computation: A Python function that builds the computation to
place on the host.
*args: the positional arguments for the computation.
**kwargs: the keyword arguments for the computation.
Returns:
The Tensors returned by computation.
"""
args = [] if args is None else args
graph = ops.get_default_graph()
# If we are in a TPUReplicateContext, signal that we are now
# outside_compilation
initial_context = graph._get_control_flow_context() # pylint: disable=protected-access
context = initial_context
while context:
if isinstance(context, TPUReplicateContext):
context._EnterOutsideCompilationScope() # pylint: disable=protected-access
context = context.outer_context
retval = computation(*args, **kwargs)
# If we are in a TPUReplicateContext, signal that we are no longer
# outside_compilation
final_context = graph._get_control_flow_context() # pylint: disable=protected-access
if initial_context is not final_context:
raise NotImplementedError(
"Control-flow context cannot be different at start and end of an "
"outside_compilation scope")
context = initial_context
while context:
if isinstance(context, TPUReplicateContext):
context._ExitOutsideCompilationScope() # pylint: disable=protected-access
context = context.outer_context
return retval
def replicate(computation,
inputs=None,
infeed_queue=None,
device_assignment=None,
name=None):
"""Builds a graph operator that runs a replicated TPU computation.
Args:
computation: A Python function that builds the computation to replicate.
inputs: A list of lists of input tensors or `None` (equivalent to
`[[]]`), indexed by `[replica_num][input_num]`. All replicas must
have the same number of inputs.
infeed_queue: If not `None`, the `InfeedQueue` from which to append a tuple
of arguments as inputs to computation.
device_assignment: If not `None`, a `DeviceAssignment` describing the
mapping between logical cores in the computation with physical cores in
the TPU topology. Uses a default device assignment if `None`. The
`DeviceAssignment` may be omitted if each replica of the computation uses
only one core, and there is either only one replica, or the number of
replicas is equal to the number of cores in the TPU system.
name: (Deprecated) Does nothing.
Returns:
A list of lists of output tensors, indexed by `[replica_num][output_num]`.
Raises:
ValueError: If all replicas do not have equal numbers of input tensors.
ValueError: If the number of inputs per replica does not match
the number of formal parameters to `computation`.
"""
return split_compile_and_replicate(computation, inputs, infeed_queue,
device_assignment, name)[1]
def split_compile_and_replicate(computation,
inputs=None,
infeed_queue=None,
device_assignment=None,
name=None,
use_tpu=True):
"""Builds graph operators that runs compilation and replicated computation.
This is a lower level interface than replicate that returns a separate compile
and execute output tensor. In the generated graph the compile op feeds into
the execute op and no additional compilation is incurred when running the
compile op before the execute op. The compile op returns additional
information about the compilation but does not return the compiled program.
Args:
computation: A Python function that builds the computation to replicate.
inputs: A list of lists of input tensors or `None` (equivalent to
`[[]]`), indexed by `[replica_num][input_num]`. All replicas must
have the same number of inputs.
infeed_queue: If not `None`, the `InfeedQueue` from which to append a tuple
of arguments as inputs to computation.
device_assignment: If not `None`, a `DeviceAssignment` describing the
mapping between logical cores in the computation with physical cores in
the TPU topology. Uses a default device assignment if `None`. The
`DeviceAssignment` may be omitted if each replica of the computation uses
only one core, and there is either only one replica, or the number of
replicas is equal to the number of cores in the TPU system.
name: (Deprecated) Does nothing.
use_tpu: When false, the input `computation` is executed on the XLA CPU/GPU
backends. Currently, only supports a default placement (computation is
placed on GPU if one is available, and on CPU if not).
Returns:
A list of lists with the first list corresponding to the compile op and the
second a list of output tensors, indexed by `[replica_num][output_num]`.
Raises:
ValueError: If all replicas do not have equal numbers of input tensors.
ValueError: If the number of inputs per replica does not match
the number of formal parameters to `computation`.
"""
del name
inputs = [[]] if inputs is None else inputs
metadata_kwargs = {}
if device_assignment is not None:
# Turn the Numpy array into a flattened list so we can pass it as an
# operator attribute.
metadata_kwargs = {
"topology":
device_assignment.topology.serialized(),
"device_assignment":
device_assignment.core_assignment.flatten().tolist(),
"computation_shape":
device_assignment.computation_shape.tolist()
}
if ((not isinstance(inputs, list)) or
any(not isinstance(inp, (list, tuple)) for inp in inputs)):
raise TypeError("tpu.replicate() inputs must be a list of lists/tuples")
num_replicas = len(inputs)
# No replicas? Nothing to do.
if num_replicas == 0:
return []
# Converts inputs to Tensors.
inputs = [[ops.convert_to_tensor(x) for x in inp] for inp in inputs]
# Verifies that all replicas have matching numbers and types of inputs
input_types = [x.dtype for x in inputs[0]]
input_arity = len(input_types)
for i in range(num_replicas):
if len(inputs[i]) != input_arity:
raise ValueError("Replicas must have the same number of inputs. "
"Replica 0 had {} inputs, replica {} had {} "
"inputs.".format(input_arity, i, len(inputs[i])))
types = [x.dtype for x in inputs[i]]
if types != input_types:
raise ValueError(
"Replicas must have matching input types. Replica 0 had "
"input types {}, replica {} had input types {}".format(
input_types, i, types))
arg_error = tpu_function.check_function_argument_count(
computation, input_arity, infeed_queue)
if arg_error is not None:
if infeed_queue is None:
raise TypeError(
"Supplied computation cannot be called with the specified inputs. "
"You specified %d inputs: %s, but the computation needs %s" % (
input_arity, str([i.name for i in inputs[0]]), arg_error))
else:
raise TypeError(
"Supplied computation cannot be called with the specified inputs. "
"You specified %d inputs: %s and %d additional inputs from infeed,"
" but the computation needs %s" % (input_arity, str(
[i.name
for i in inputs[0]]), infeed_queue.number_of_tuple_elements,
arg_error))
graph = ops.get_default_graph()
# Fan-in: Builds a TPUReplicatedInput node for each input.
computation_inputs = []
for i in range(0, input_arity):
replicas = [inputs[replica][i] for replica in xrange(num_replicas)]
computation_inputs.append(
tpu_ops.tpu_replicated_input(replicas, name="input{}".format(i)))
cluster_name = graph.unique_name("cluster")
pivot = control_flow_ops.no_op(name=cluster_name + "/pivot")
context = TPUReplicateContext(
name=cluster_name, num_replicas=num_replicas, pivot=pivot)
try:
context.Enter()
metadata = tpu_ops.tpu_replicate_metadata(
num_replicas=num_replicas, use_tpu=use_tpu, **metadata_kwargs)
with tpu_function.tpu_shard_context(
num_replicas), ops.control_dependencies([metadata]):
# The EncapsulateTPUComputations rewrite needs to identify the
# replicated arguments inside each computation. Adds identity operators
# tagged with an attribute _tpu_replicated_input to identify the
# replicated inputs.
# pylint: disable=protected-access
with graph._attr_scope({"_tpu_replicated_input":
attr_value_pb2.AttrValue(b=True)}):
computation_inputs = [
array_ops.identity(x, name="replicated_input_{}".format(i))
for i, x in enumerate(computation_inputs)]
# pylint: enable=protected-access
# If there is an infeed queue, adds the dequeued values to the
# computation's inputs.
if infeed_queue is not None:
infeed_queue.set_number_of_shards(num_replicas)
for t in infeed_queue.generate_dequeue_op():
computation_inputs.append(t)
# Only resource variables work inside a TPU computation, so turn on
# resource variables for the computation.
# TODO(phawkins): consider removing this code. It will
# be less confusing to clients if they knowingly choose to use resource
# variables.
vscope = variable_scope.get_variable_scope()
saved_use_resource = vscope.use_resource
vscope.set_use_resource(True)
outputs = computation(*computation_inputs)
vscope.set_use_resource(saved_use_resource)
# If the computation returns `None`, add `no_op` here so that when user
# fetches `no_op` returned by this function, the TPUExecute node will be
# triggered.
if outputs is None:
outputs = (control_flow_ops.no_op(),)
# If the computation only returned one value, makes it a tuple.
if not isinstance(outputs, (list, tuple)):
outputs = (outputs,)
try:
with ops.device(core(0)):
outputs = [
o if isinstance(o, ops.Operation) else ops.convert_to_tensor(o)
for o in outputs
]
except Exception as e:
raise ValueError(
"TPU function return values must all either be Operations or "
"convertible to Tensors. Got '%s'" % str(e))
# Separates the returned Operations and Tensors.
output_operations = [o for o in outputs if isinstance(o, ops.Operation)]
output_tensors = [o for o in outputs if not isinstance(o, ops.Operation)]
if outputs != output_tensors + output_operations:
raise ValueError(
"TPU functions must return zero-or more Tensor values followed by "
"zero or more Operations.")
output_arity = len(output_tensors)
# Wraps outputs in Identity ops. Otherwise a replicated input copied
# straight to an output would bypass the replicate(). This would be bad
# because the TPUReplicatedInput/TPUReplicatedOutput operator would not
# be rewritten away, leading to a runtime error.
# TODO(phawkins): extend the rewrite to elide these nodes instead.
new_output_tensors = []
for t in output_tensors:
with ops.device(t.device if t.device else core(0)):
new_output_tensors.append(array_ops.identity(t))
output_tensors = new_output_tensors
context.ExitResult(output_tensors)
finally:
context.report_unsupported_operations()
context.Exit()
host_compute_core = context.HostComputeCore()
if host_compute_core:
attr_value = attr_value_pb2.AttrValue()
attr_value.list.s.extend([compat.as_bytes(x) for x in host_compute_core])
metadata._set_attr("host_compute_core", attr_value) # pylint: disable=protected-access
# Fan-out: Builds a TPUReplicatedOutput node for each output.
outputs = [tpu_ops.tpu_replicated_output(output_tensors[i], num_replicas,
name="output{}".format(i))
for i in xrange(output_arity)]
with ops.control_dependencies([metadata]):
if use_tpu:
compile_status = tpu_ops.tpu_compilation_result()
op = compile_status.op
attr_value = attr_value_pb2.AttrValue(s=compat.as_bytes(cluster_name))
op._set_attr(_TPU_COMPILATION_STATUS_ATTR, attr_value) # pylint: disable=protected-access
else:
compile_status = control_flow_ops.no_op(name="compilation_status")
with ops.control_dependencies(output_operations):
if output_arity == 0:
# Returns a list of NoOps dependent on the replication Op, indexed by
# [replica_num].
return [
compile_status, [
control_flow_ops.no_op(name="shard_%d" % i)
for i in range(num_replicas)
]
]
else:
# Wraps the outputs in identity operators so the names of any possible
# `fetch` nodes are preserved by the replication rewrite.
return [
compile_status, [[
array_ops.identity(
outputs[out][replica],
name="output_%d_shard_%d" % (out, replica))
for out in xrange(output_arity)
]
for replica in xrange(num_replicas)]
]
def shard(computation,
inputs=None,
num_shards=1,
input_shard_axes=None,
outputs_from_all_shards=True,
output_shard_axes=None,
infeed_queue=None,
device_assignment=None,
name=None):
"""Shards `computation` for parallel execution.
`inputs` must be a list of Tensors or None (equivalent to an empty
list), each of which has a corresponding split axis (from
`input_shard_axes`). Each input is split into `num_shards` pieces
along the corresponding axis, and computation is applied to each
shard in parallel.
Tensors are broadcast to all shards if they are lexically captured by
`computation`. e.g.,
x = tf.constant(7)
def computation():
return x + 3
... = shard(computation, ...)
TODO(phawkins): consider adding support for broadcasting Tensors passed
as inputs.
If `outputs_from_all_shards` is true, the outputs from all shards of
`computation` are concatenated back together along their `output_shards_axes`.
Otherwise, each output is taken from an arbitrary shard.
Inputs and outputs of the computation must be at least rank-1 Tensors.
Args:
computation: A Python function that builds a computation to apply to each
shard of the input.
inputs: A list of input tensors or None (equivalent to an empty
list). Each input tensor has a corresponding shard axes, given
by `input_shard_axes`, which must have size divisible by
`num_shards`.
num_shards: The number of shards.
input_shard_axes: A list of dimensions along which to shard `inputs`, or
`None`. `None` means "shard all inputs along dimension 0". If not `None`,
there must be one dimension per input.
outputs_from_all_shards: Boolean or list of boolean. For each output, if
`True`, outputs from all shards are concatenated along the corresponding
`output_shard_axes` entry. Otherwise, each output is taken
from an arbitrary shard. If the argument is a boolean, the argument's
value is used for each output.
output_shard_axes: A list of dimensions along which to concatenate the
outputs of `computation`, or `None`. `None` means "concatenate all outputs
along dimension 0". If not `None`, there must be one dimension per output.
Ignored if `outputs_from_all_shards` is False.
infeed_queue: If not `None`, the `InfeedQueue` to use to augment the inputs
of `computation`.
device_assignment: If not `None`, a `DeviceAssignment` describing the
mapping between logical cores in the computation with physical cores in
the TPU topology. Uses a default device assignment if `None`. The
`DeviceAssignment` may be omitted if each shard of the computation uses
only one core, and there is either only one shard, or the number of shards
is equal to the number of cores in the TPU system.
name: (Deprecated) Does nothing.
Returns:
A list of output tensors.
Raises:
ValueError: If num_shards <= 0
ValueError: If len(input_shard_axes) != len(inputs)
ValueError: If len(output_shard_axes) != len(outputs from `computation`)
"""
if num_shards <= 0:
raise ValueError("num_shards must be a positive integer.")
# Converts inputs to Tensors.
inputs = [] if inputs is None else [ops.convert_to_tensor(x) for x in inputs]
if input_shard_axes is None:
input_shard_axes = [0] * len(inputs)
if len(inputs) != len(input_shard_axes):
raise ValueError("Length of input_shard_axes must be equal to the number "
"of inputs.")
if inputs:
# Splits the `inputs` along the corresponding `input_shard_axes`, giving
# lists with layout [input][shard]
split_inputs = [
array_ops.split(x, num_shards, axis=axis)
for (axis, x) in zip(input_shard_axes, inputs)]
# Transposes the input lists to have layout [shard][input]
transposed_inputs = [list(i) for i in zip(*split_inputs)]
else:
transposed_inputs = [[]] * num_shards
outputs = replicate(
computation,
transposed_inputs,
infeed_queue=infeed_queue,
device_assignment=device_assignment,
name=name)
# There must be at least one shard since num_shards > 0.
# TODO(b/36647078) remove disable when pylint bug is fixed.
# pylint: disable=indexing-exception
if isinstance(outputs[0], ops.Operation):
# pylint: enable=indexing-exception
# There were no outputs from the computation and replicate returned a list
# of NoOps with control dependencies on the computation. Return the first
# one so it can be used as a control dependency or fetch node.
# TODO(b/36647078) remove disable when pylint bug is fixed.
# pylint: disable=indexing-exception
return [outputs[0]]
# pylint: enable=indexing-exception
# TODO(b/36647078) remove disable when pylint bug is fixed.
# pylint: disable=indexing-exception
num_outputs = len(outputs[0])
# pylint: enable=indexing-exception
if output_shard_axes is None:
output_shard_axes = [0] * num_outputs
if num_outputs != len(output_shard_axes):
raise ValueError("Length of output_shard_axes must be equal to the number "
"of outputs.")
if isinstance(outputs_from_all_shards, bool):
outputs_from_all_shards = [outputs_from_all_shards] * num_outputs
if num_outputs != len(outputs_from_all_shards):
raise ValueError("Length of outputs_from_all_shards must be equal to the "
"number of outputs.")
results = []
for (axis, all_shards, x) in zip(output_shard_axes, outputs_from_all_shards,
zip(*outputs)):
if all_shards:
# Concatenate all of the outputs together (use stack for scalars).
shape = x[0].shape
is_scalar = shape is not None and (shape.ndims == 0)
results.append((array_ops.stack(list(x)) if is_scalar
else array_ops.concat(list(x), axis=axis)))
else:
# TODO(phawkins): use a smarter policy, e.g., round-robin across shards.
results.append(x[0])
return results
def batch_parallel(computation,
inputs=None,
num_shards=1,
infeed_queue=None,
device_assignment=None,
name=None):
"""Shards `computation` along the batch dimension for parallel execution.
Convenience wrapper around shard().
`inputs` must be a list of Tensors or None (equivalent to an empty
list). Each input is split into `num_shards` pieces along the 0-th
dimension, and computation is applied to each shard in parallel.
Tensors are broadcast to all shards if they are lexically captured by
`computation`. e.g.,
x = tf.constant(7)
def computation():
return x + 3
... = shard(computation, ...)
The outputs from all shards are concatenated back together along their 0-th
dimension.
Inputs and outputs of the computation must be at least rank-1 Tensors.
Args:
computation: A Python function that builds a computation to apply to each
shard of the input.
inputs: A list of input tensors or None (equivalent to an empty
list). The 0-th dimension of each Tensor must have size
divisible by `num_shards`.
num_shards: The number of shards.
infeed_queue: If not `None`, the `InfeedQueue` from which to append a tuple
of arguments as inputs to `computation`.
device_assignment: If not `None`, a `DeviceAssignment` describing the
mapping between logical cores in the computation with physical cores in
the TPU topology. Uses a default device assignment if `None`. The
`DeviceAssignment` may be omitted if each shard of the computation uses
only one core, and there is either only one shard, or the number of shards
is equal to the number of cores in the TPU system.
name: (Deprecated) Does nothing.
Returns:
A list of output tensors.
Raises:
ValueError: If `num_shards <= 0`
"""
return shard(
computation,
inputs,
num_shards=num_shards,
infeed_queue=infeed_queue,
device_assignment=device_assignment,
name=name)
def rewrite(computation,
inputs=None,
infeed_queue=None,
device_assignment=None,
name=None):
"""Rewrites `computation` for execution on a TPU system.
Args:
computation: A Python function that builds a computation to apply
to the input. If the function takes n inputs, 'inputs' should be
a list of n tensors. If the function returns m outputs, rewrite
will return a list of m tensors.
inputs: A list of input tensors or `None` (equivalent to an empty list).
infeed_queue: If not `None`, the `InfeedQueue` from which to append a tuple
of arguments as inputs to `computation`.
device_assignment: if not `None`, a `DeviceAssignment` describing the
mapping between logical cores in the computation with physical cores in
the TPU topology. May be omitted for a single-core computation, in which
case the core attached to task 0, TPU device 0 is used.
name: (Deprecated) Does nothing.
Returns:
A list of output tensors.
"""
if inputs is not None and not isinstance(inputs, (list, tuple)):
raise TypeError("tpu.rewrite() inputs must be a list or tuple")
# TODO(b/36647078) remove disable when pylint bug is fixed.
# pylint: disable=indexing-exception
return replicate(
computation,
None if inputs is None else [inputs],
infeed_queue=infeed_queue,
device_assignment=device_assignment,
name=name)[0]
# pylint: enable=indexing-exception
# Operations that indicate some error in the user's inference graph.
_BLACKLISTED_INFERENCE_OPS = set([
"ReadVariableOp",
"AssignVariableOp",
"AssignAddVariableOp",
"AssignSubVariableOp",
"VarHandleOp",
"Variable",
"VariableV2",
])
class _TPUInferenceContext(control_flow_ops.XLAControlFlowContext):
"""A `ControlFlowContext` for nodes inside a TPU inference computation.
The primary role of `TPUReplicateContext` is to sanity check operators inside
a tpu.rewrite_for_inference() computation.
"""
def __init__(self, name):
super(_TPUInferenceContext, self).__init__()
self._name = name
def AddOp(self, op):
self._AddOpInternal(op)
def _AddOpInternal(self, op):
# pylint: disable=protected-access
if op.type in _BLACKLISTED_INFERENCE_OPS:
raise NotImplementedError(
"Operation of type %s (%s) is not supported on the TPU for inference."
" Execution will fail if this op is used in the graph. Make sure your"
" variables are using variable_scope." % (op.type, op.name))
if self._outer_context:
self._outer_context.AddInnerOp(op)
def AddValue(self, val):
result = val
if self._outer_context:
result = self._outer_context.AddValue(val)
return result
def AddInnerOp(self, op):
self._AddOpInternal(op)
@property
def grad_state(self):
return None
@experimental
def validate_inference_rewrite_for_variables(graph):
"""Validates whether rewrite_for_inference() 'worked' for variables.
The rewrite_for_inference() method is supposed to append
GuaranteeConstOps after ReadVariableOps, but this mechanism works only
if you are using tf.get_variable() to create and access variables in your
tpu computation. This validation method can be called immediately after
calling tpu.rewrite_for_inference() to check whether GuaranteeConstOps
where added to the graph.
Typical usages:
tpu.validate_inference_rewrite_for_variables(tf.get_default_graph())
tpu.validate_inference_rewrite_for_variables(sess.graph)
Args:
graph: The graph which needs to be validated.
Raises:
RuntimeError: if validation failed.
"""
if not any([x.type == "GuaranteeConst" for x in graph.get_operations()]):
raise RuntimeError(
"No GuaranteeConst ops found in the graph after "
"running tpu.rewrite_for_inference(...). Please "
"check that you are using tf.get_variable() to "
"create and access variables in your tpu "
"computation.")
@experimental
def rewrite_for_inference(computation,
inputs=None,
infeed_queue=None,
device_assignment=None,
name=None):
"""Rewrites `computation` for inference on a TPU system.
Other than 'rewriting' the computation to run on a TPU, if using variables
in your computation, it moves the ReadVariableOps outside the TPU
computation, and adds GuaranteeConst ops just after the ReadVariableOps.
This mechanism works only if you are using tf.get_variable() to create and
access variables in your tpu computation. You can validate whether
this worked, by calling validate_inference_rewrite_for_variables() method
immediately after this method to check whether GuaranteeConstOps where
added to the graph.
Args:
computation: A Python function that builds a computation to apply
to the input. If the function takes n inputs, 'inputs' should be
a list of n tensors. If the function returns m outputs, rewrite
will return a list of m tensors.
inputs: A list of input tensors or `None` (equivalent to an empty list).
infeed_queue: If not `None`, the `InfeedQueue` from which to append a tuple
of arguments as inputs to `computation`.
device_assignment: if not `None`, a `DeviceAssignment` describing the
mapping between logical cores in the computation with physical cores in
the TPU topology. May be omitted for a single-core computation, in which
case the core attached to task 0, TPU device 0 is used.
name: The name of the operator.
Returns:
A list of output tensors.
"""
def guarantee_const_getter(getter, name, *args, **kwargs):
with ops.control_dependencies(None):
return array_ops.guarantee_const(
getter(name, *args, **kwargs), name=name + "/GuaranteeConst")
def wrapped_computation(*args, **kwargs):
"""Execute computation under `_TPUInferenceContext`."""
context = _TPUInferenceContext(
name=ops.get_default_graph().unique_name("rewrite_for_inference"))
try:
context.Enter()
vscope = variable_scope.get_variable_scope()
prev_custom_getter = vscope.custom_getter
prev_caching_device = vscope.caching_device
vscope.set_custom_getter(guarantee_const_getter)
vscope.set_caching_device(lambda op: op.device)
result = computation(*args, **kwargs)
vscope.set_custom_getter(prev_custom_getter)
vscope.set_caching_device(prev_caching_device)
finally:
context.Exit()
return result
# pylint: disable=undefined-variable
return rewrite(
wrapped_computation,
inputs=inputs,
infeed_queue=infeed_queue,
device_assignment=device_assignment,
name=name)
# pylint: enable=undefined-variable
| 39.823529 | 112 | 0.695785 |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from six.moves import xrange
from tensorflow.contrib.framework.python.framework import experimental
from tensorflow.contrib.tpu.python.ops import tpu_ops
from tensorflow.contrib.tpu.python.tpu import tpu_function
from tensorflow.core.framework import attr_value_pb2
from tensorflow.python.framework import device as pydev
from tensorflow.python.framework import errors
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import variable_scope
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.util import compat
_BLACKLISTED_OPS = set([
"Placeholder",
])
# These operations will currently fail to compile, but we should be able to
# support them eventually via CPU offload or extending our operation set.
_NOT_IMPLEMENTED_OPS = set([
"AudioSummary",
"AudioSummaryV2",
"HistogramSummary",
"ImageSummary",
"MergeSummary",
"Print",
"ScalarSummary",
"TensorSummary",
"TensorSummaryV2",
])
_MAX_WARNING_LINES = 5
_TPU_REPLICATE_ATTR = "_tpu_replicate"
_TPU_COMPILATION_STATUS_ATTR = "_tpu_compilation_status"
_OUTSIDE_COMPILATION_ATTR = "_xla_outside_compilation"
def _tpu_system_device_name(job):
if job is None:
return "/device:TPU_SYSTEM:0"
else:
return "/job:%s/device:TPU_SYSTEM:0" % job
def initialize_system(embedding_config=None, job=None):
config_string = ("" if embedding_config is None else
embedding_config.SerializeToString())
with ops.device(_tpu_system_device_name(job)):
return tpu_ops.configure_distributed_tpu(embedding_config=config_string)
def shutdown_system(job=None):
with ops.device(_tpu_system_device_name(job)):
shutdown_distributed_tpu = tpu_ops.shutdown_distributed_tpu()
return shutdown_distributed_tpu
def core(num):
return "device:TPU_REPLICATED_CORE:{}".format(num)
class TPUReplicateContext(control_flow_ops.XLAControlFlowContext):
def __init__(self, name, num_replicas, pivot):
super(TPUReplicateContext, self).__init__()
self._num_replicas = num_replicas
self._outer_device_function_stack = None
self._oc_dev_fn_stack = None
self._outside_compilation_cluster = None
self._outside_compilation_counter = 0
self._in_gradient_colocation = None
self._gradient_colocation_stack = []
self._host_compute_core = []
self._name = name
self._unsupported_ops = []
self._pivot = pivot
def report_unsupported_operations(self):
if self._unsupported_ops:
op_str = "\n".join([" %s (%s)" % (op.type, op.name)
for op in self._unsupported_ops[:_MAX_WARNING_LINES]])
logging.warning("%d unsupported operations found: \n%s",
len(self._unsupported_ops), op_str)
if len(self._unsupported_ops) > _MAX_WARNING_LINES:
logging.warning("... and %d more" %
(len(self._unsupported_ops) - _MAX_WARNING_LINES))
def EnterGradientColocation(self, op, gradient_uid):
if op is not None:
self._gradient_colocation_stack.append(op)
if not self._outside_compilation_cluster:
try:
outside_attr = op.get_attr(_OUTSIDE_COMPILATION_ATTR)
if self._in_gradient_colocation:
raise NotImplementedError(
"Cannot nest gradient colocation operations outside compilation"
)
if gradient_uid == "__unsupported__":
raise NotImplementedError(
"No gradient_uid calling gradient within outside_compilation")
# When we take the gradient of an op X in an
# outside_compilation cluster C in a forward computation we
# would like to put the ops corresponding to the gradient of
# X into a new outside_compilation cluster C'. However, if
self._in_gradient_colocation = op
parts = outside_attr.split(".")
cluster = parts[0] + "." + gradient_uid
self._EnterOutsideCompilationScope(cluster=cluster)
except ValueError:
pass
def ExitGradientColocation(self, op, gradient_uid):
if op is not None:
if not self._gradient_colocation_stack:
raise errors.InternalError(
op.node_def, op,
"Badly nested gradient colocation: empty stack when popping Op " +
op.name)
last_op = self._gradient_colocation_stack.pop()
if op is last_op:
if op is self._in_gradient_colocation:
self._in_gradient_colocation = None
self._ExitOutsideCompilationScope()
else:
raise errors.InternalError(
op.node_def, op, "Badly nested gradient colocation, expected " +
last_op + ", got " + op.name)
def _EnterOutsideCompilationScope(self, cluster=None):
class FakeOp(object):
def __init__(self):
self._device = ""
@property
def device(self):
return self._device
def _set_device(self, device):
self._device = device.to_string()
if self._outside_compilation_cluster:
raise NotImplementedError("Cannot nest outside_compilation clusters")
if cluster:
self._outside_compilation_cluster = cluster
else:
self._outside_compilation_cluster = str(self._outside_compilation_counter)
self._outside_compilation_counter += 1
graph = ops.get_default_graph()
fake_op = FakeOp()
graph._apply_device_functions(fake_op)
device = pydev.DeviceSpec.from_string(fake_op.device)
if (device.device_type == "TPU_REPLICATED_CORE" and
device.device_index is not None):
self._host_compute_core.append(self._outside_compilation_cluster + ":" +
str(device.device_index))
self._oc_dev_fn_stack = graph._device_function_stack
graph._device_function_stack = self._outer_device_function_stack
def _ExitOutsideCompilationScope(self):
if not self._outside_compilation_cluster:
raise NotImplementedError(
"Attempted to exit outside_compilation scope when not in scope")
self._outside_compilation_cluster = None
graph = ops.get_default_graph()
graph._device_function_stack = self._oc_dev_fn_stack
def Enter(self):
if not self._outer_device_function_stack:
graph = ops.get_default_graph()
self._outer_device_function_stack = list(graph._device_function_stack)
super(TPUReplicateContext, self).Enter()
def HostComputeCore(self):
return self._host_compute_core
def AddOp(self, op):
self._AddOpInternal(op)
def _AddOpInternal(self, op):
if op.type in _BLACKLISTED_OPS:
logging.error("Operation of type %s (%s) is not supported on the TPU. "
"Execution will fail if this op is used in the graph. " %
(op.type, op.name))
if op.type in _NOT_IMPLEMENTED_OPS:
self._unsupported_ops.append(op)
if any(x.dtype._is_ref_dtype for x in op.inputs):
raise NotImplementedError(
"Non-resource Variables are not supported inside TPU computations "
"(operator name: %s)" % op.name)
if _TPU_REPLICATE_ATTR in op.node_def.attr:
raise ValueError("TPU computations cannot be nested")
op._set_attr(_TPU_REPLICATE_ATTR,
attr_value_pb2.AttrValue(s=compat.as_bytes(self._name)))
if self._outside_compilation_cluster:
op._set_attr(
_OUTSIDE_COMPILATION_ATTR,
attr_value_pb2.AttrValue(
s=compat.as_bytes(self._outside_compilation_cluster)))
if self._num_replicas > 1 or not self._outside_compilation_cluster:
op.graph.prevent_feeding(op)
op.graph.prevent_fetching(op)
control_inputs, external_inputs = self._RemoveExternalControlEdges(op)
if not op.inputs:
if not control_inputs:
op._add_control_input(self.GetControlPivot())
else:
for index in xrange(len(op.inputs)):
x = op.inputs[index]
real_x = self.AddValue(x)
if real_x != x:
op._update_input(index, real_x)
if external_inputs:
with ops.control_dependencies(None):
self.Enter()
external_inputs = [
array_ops.identity(x.outputs[0]).op
for x in external_inputs
if x.outputs
]
self.Exit()
# pylint: disable=protected-access
op._add_control_inputs(external_inputs)
# pylint: enable=protected-access
# Mark op's outputs as seen by this context and any outer contexts.
output_names = [x.name for x in op.outputs]
context = self
while context is not None:
context._values.update(output_names)
context = context._outer_context
if self._outer_context:
self._outer_context.AddInnerOp(op)
def AddValue(self, val):
if val.name in self._values:
result = self._external_values.get(val.name)
return val if result is None else result
result = val
self._values.add(val.name)
if self._outer_context:
result = self._outer_context.AddValue(val)
self._values.add(result.name)
self._external_values[val.name] = result
return result
def AddInnerOp(self, op):
self._AddOpInternal(op)
if self._outer_context:
self._outer_context.AddInnerOp(op)
@property
def grad_state(self):
return None
@property
def back_prop(self):
if self.GetWhileContext():
return self.GetWhileContext().back_prop
return False
def GetControlPivot(self):
return self._pivot
def outside_compilation(computation, *args, **kwargs):
args = [] if args is None else args
graph = ops.get_default_graph()
initial_context = graph._get_control_flow_context()
context = initial_context
while context:
if isinstance(context, TPUReplicateContext):
context._EnterOutsideCompilationScope()
context = context.outer_context
retval = computation(*args, **kwargs)
final_context = graph._get_control_flow_context()
if initial_context is not final_context:
raise NotImplementedError(
"Control-flow context cannot be different at start and end of an "
"outside_compilation scope")
context = initial_context
while context:
if isinstance(context, TPUReplicateContext):
context._ExitOutsideCompilationScope()
context = context.outer_context
return retval
def replicate(computation,
inputs=None,
infeed_queue=None,
device_assignment=None,
name=None):
return split_compile_and_replicate(computation, inputs, infeed_queue,
device_assignment, name)[1]
def split_compile_and_replicate(computation,
inputs=None,
infeed_queue=None,
device_assignment=None,
name=None,
use_tpu=True):
del name
inputs = [[]] if inputs is None else inputs
metadata_kwargs = {}
if device_assignment is not None:
metadata_kwargs = {
"topology":
device_assignment.topology.serialized(),
"device_assignment":
device_assignment.core_assignment.flatten().tolist(),
"computation_shape":
device_assignment.computation_shape.tolist()
}
if ((not isinstance(inputs, list)) or
any(not isinstance(inp, (list, tuple)) for inp in inputs)):
raise TypeError("tpu.replicate() inputs must be a list of lists/tuples")
num_replicas = len(inputs)
if num_replicas == 0:
return []
inputs = [[ops.convert_to_tensor(x) for x in inp] for inp in inputs]
input_types = [x.dtype for x in inputs[0]]
input_arity = len(input_types)
for i in range(num_replicas):
if len(inputs[i]) != input_arity:
raise ValueError("Replicas must have the same number of inputs. "
"Replica 0 had {} inputs, replica {} had {} "
"inputs.".format(input_arity, i, len(inputs[i])))
types = [x.dtype for x in inputs[i]]
if types != input_types:
raise ValueError(
"Replicas must have matching input types. Replica 0 had "
"input types {}, replica {} had input types {}".format(
input_types, i, types))
arg_error = tpu_function.check_function_argument_count(
computation, input_arity, infeed_queue)
if arg_error is not None:
if infeed_queue is None:
raise TypeError(
"Supplied computation cannot be called with the specified inputs. "
"You specified %d inputs: %s, but the computation needs %s" % (
input_arity, str([i.name for i in inputs[0]]), arg_error))
else:
raise TypeError(
"Supplied computation cannot be called with the specified inputs. "
"You specified %d inputs: %s and %d additional inputs from infeed,"
" but the computation needs %s" % (input_arity, str(
[i.name
for i in inputs[0]]), infeed_queue.number_of_tuple_elements,
arg_error))
graph = ops.get_default_graph()
computation_inputs = []
for i in range(0, input_arity):
replicas = [inputs[replica][i] for replica in xrange(num_replicas)]
computation_inputs.append(
tpu_ops.tpu_replicated_input(replicas, name="input{}".format(i)))
cluster_name = graph.unique_name("cluster")
pivot = control_flow_ops.no_op(name=cluster_name + "/pivot")
context = TPUReplicateContext(
name=cluster_name, num_replicas=num_replicas, pivot=pivot)
try:
context.Enter()
metadata = tpu_ops.tpu_replicate_metadata(
num_replicas=num_replicas, use_tpu=use_tpu, **metadata_kwargs)
with tpu_function.tpu_shard_context(
num_replicas), ops.control_dependencies([metadata]):
with graph._attr_scope({"_tpu_replicated_input":
attr_value_pb2.AttrValue(b=True)}):
computation_inputs = [
array_ops.identity(x, name="replicated_input_{}".format(i))
for i, x in enumerate(computation_inputs)]
if infeed_queue is not None:
infeed_queue.set_number_of_shards(num_replicas)
for t in infeed_queue.generate_dequeue_op():
computation_inputs.append(t)
# Only resource variables work inside a TPU computation, so turn on
# resource variables for the computation.
# TODO(phawkins): consider removing this code. It will
# be less confusing to clients if they knowingly choose to use resource
# variables.
vscope = variable_scope.get_variable_scope()
saved_use_resource = vscope.use_resource
vscope.set_use_resource(True)
outputs = computation(*computation_inputs)
vscope.set_use_resource(saved_use_resource)
# If the computation returns `None`, add `no_op` here so that when user
# fetches `no_op` returned by this function, the TPUExecute node will be
# triggered.
if outputs is None:
outputs = (control_flow_ops.no_op(),)
# If the computation only returned one value, makes it a tuple.
if not isinstance(outputs, (list, tuple)):
outputs = (outputs,)
try:
with ops.device(core(0)):
outputs = [
o if isinstance(o, ops.Operation) else ops.convert_to_tensor(o)
for o in outputs
]
except Exception as e:
raise ValueError(
"TPU function return values must all either be Operations or "
"convertible to Tensors. Got '%s'" % str(e))
# Separates the returned Operations and Tensors.
output_operations = [o for o in outputs if isinstance(o, ops.Operation)]
output_tensors = [o for o in outputs if not isinstance(o, ops.Operation)]
if outputs != output_tensors + output_operations:
raise ValueError(
"TPU functions must return zero-or more Tensor values followed by "
"zero or more Operations.")
output_arity = len(output_tensors)
# Wraps outputs in Identity ops. Otherwise a replicated input copied
# straight to an output would bypass the replicate(). This would be bad
# because the TPUReplicatedInput/TPUReplicatedOutput operator would not
# be rewritten away, leading to a runtime error.
# TODO(phawkins): extend the rewrite to elide these nodes instead.
new_output_tensors = []
for t in output_tensors:
with ops.device(t.device if t.device else core(0)):
new_output_tensors.append(array_ops.identity(t))
output_tensors = new_output_tensors
context.ExitResult(output_tensors)
finally:
context.report_unsupported_operations()
context.Exit()
host_compute_core = context.HostComputeCore()
if host_compute_core:
attr_value = attr_value_pb2.AttrValue()
attr_value.list.s.extend([compat.as_bytes(x) for x in host_compute_core])
metadata._set_attr("host_compute_core", attr_value) # pylint: disable=protected-access
# Fan-out: Builds a TPUReplicatedOutput node for each output.
outputs = [tpu_ops.tpu_replicated_output(output_tensors[i], num_replicas,
name="output{}".format(i))
for i in xrange(output_arity)]
with ops.control_dependencies([metadata]):
if use_tpu:
compile_status = tpu_ops.tpu_compilation_result()
op = compile_status.op
attr_value = attr_value_pb2.AttrValue(s=compat.as_bytes(cluster_name))
op._set_attr(_TPU_COMPILATION_STATUS_ATTR, attr_value) # pylint: disable=protected-access
else:
compile_status = control_flow_ops.no_op(name="compilation_status")
with ops.control_dependencies(output_operations):
if output_arity == 0:
# Returns a list of NoOps dependent on the replication Op, indexed by
# [replica_num].
return [
compile_status, [
control_flow_ops.no_op(name="shard_%d" % i)
for i in range(num_replicas)
]
]
else:
# Wraps the outputs in identity operators so the names of any possible
# `fetch` nodes are preserved by the replication rewrite.
return [
compile_status, [[
array_ops.identity(
outputs[out][replica],
name="output_%d_shard_%d" % (out, replica))
for out in xrange(output_arity)
]
for replica in xrange(num_replicas)]
]
def shard(computation,
inputs=None,
num_shards=1,
input_shard_axes=None,
outputs_from_all_shards=True,
output_shard_axes=None,
infeed_queue=None,
device_assignment=None,
name=None):
if num_shards <= 0:
raise ValueError("num_shards must be a positive integer.")
# Converts inputs to Tensors.
inputs = [] if inputs is None else [ops.convert_to_tensor(x) for x in inputs]
if input_shard_axes is None:
input_shard_axes = [0] * len(inputs)
if len(inputs) != len(input_shard_axes):
raise ValueError("Length of input_shard_axes must be equal to the number "
"of inputs.")
if inputs:
# Splits the `inputs` along the corresponding `input_shard_axes`, giving
# lists with layout [input][shard]
split_inputs = [
array_ops.split(x, num_shards, axis=axis)
for (axis, x) in zip(input_shard_axes, inputs)]
# Transposes the input lists to have layout [shard][input]
transposed_inputs = [list(i) for i in zip(*split_inputs)]
else:
transposed_inputs = [[]] * num_shards
outputs = replicate(
computation,
transposed_inputs,
infeed_queue=infeed_queue,
device_assignment=device_assignment,
name=name)
# There must be at least one shard since num_shards > 0.
# TODO(b/36647078) remove disable when pylint bug is fixed.
# pylint: disable=indexing-exception
if isinstance(outputs[0], ops.Operation):
# pylint: enable=indexing-exception
# There were no outputs from the computation and replicate returned a list
# of NoOps with control dependencies on the computation. Return the first
# one so it can be used as a control dependency or fetch node.
# TODO(b/36647078) remove disable when pylint bug is fixed.
# pylint: disable=indexing-exception
return [outputs[0]]
# pylint: enable=indexing-exception
# TODO(b/36647078) remove disable when pylint bug is fixed.
# pylint: disable=indexing-exception
num_outputs = len(outputs[0])
# pylint: enable=indexing-exception
if output_shard_axes is None:
output_shard_axes = [0] * num_outputs
if num_outputs != len(output_shard_axes):
raise ValueError("Length of output_shard_axes must be equal to the number "
"of outputs.")
if isinstance(outputs_from_all_shards, bool):
outputs_from_all_shards = [outputs_from_all_shards] * num_outputs
if num_outputs != len(outputs_from_all_shards):
raise ValueError("Length of outputs_from_all_shards must be equal to the "
"number of outputs.")
results = []
for (axis, all_shards, x) in zip(output_shard_axes, outputs_from_all_shards,
zip(*outputs)):
if all_shards:
# Concatenate all of the outputs together (use stack for scalars).
shape = x[0].shape
is_scalar = shape is not None and (shape.ndims == 0)
results.append((array_ops.stack(list(x)) if is_scalar
else array_ops.concat(list(x), axis=axis)))
else:
# TODO(phawkins): use a smarter policy, e.g., round-robin across shards.
results.append(x[0])
return results
def batch_parallel(computation,
inputs=None,
num_shards=1,
infeed_queue=None,
device_assignment=None,
name=None):
return shard(
computation,
inputs,
num_shards=num_shards,
infeed_queue=infeed_queue,
device_assignment=device_assignment,
name=name)
def rewrite(computation,
inputs=None,
infeed_queue=None,
device_assignment=None,
name=None):
if inputs is not None and not isinstance(inputs, (list, tuple)):
raise TypeError("tpu.rewrite() inputs must be a list or tuple")
# TODO(b/36647078) remove disable when pylint bug is fixed.
# pylint: disable=indexing-exception
return replicate(
computation,
None if inputs is None else [inputs],
infeed_queue=infeed_queue,
device_assignment=device_assignment,
name=name)[0]
# pylint: enable=indexing-exception
# Operations that indicate some error in the user's inference graph.
_BLACKLISTED_INFERENCE_OPS = set([
"ReadVariableOp",
"AssignVariableOp",
"AssignAddVariableOp",
"AssignSubVariableOp",
"VarHandleOp",
"Variable",
"VariableV2",
])
class _TPUInferenceContext(control_flow_ops.XLAControlFlowContext):
def __init__(self, name):
super(_TPUInferenceContext, self).__init__()
self._name = name
def AddOp(self, op):
self._AddOpInternal(op)
def _AddOpInternal(self, op):
if op.type in _BLACKLISTED_INFERENCE_OPS:
raise NotImplementedError(
"Operation of type %s (%s) is not supported on the TPU for inference."
" Execution will fail if this op is used in the graph. Make sure your"
" variables are using variable_scope." % (op.type, op.name))
if self._outer_context:
self._outer_context.AddInnerOp(op)
def AddValue(self, val):
result = val
if self._outer_context:
result = self._outer_context.AddValue(val)
return result
def AddInnerOp(self, op):
self._AddOpInternal(op)
@property
def grad_state(self):
return None
@experimental
def validate_inference_rewrite_for_variables(graph):
if not any([x.type == "GuaranteeConst" for x in graph.get_operations()]):
raise RuntimeError(
"No GuaranteeConst ops found in the graph after "
"running tpu.rewrite_for_inference(...). Please "
"check that you are using tf.get_variable() to "
"create and access variables in your tpu "
"computation.")
@experimental
def rewrite_for_inference(computation,
inputs=None,
infeed_queue=None,
device_assignment=None,
name=None):
def guarantee_const_getter(getter, name, *args, **kwargs):
with ops.control_dependencies(None):
return array_ops.guarantee_const(
getter(name, *args, **kwargs), name=name + "/GuaranteeConst")
def wrapped_computation(*args, **kwargs):
context = _TPUInferenceContext(
name=ops.get_default_graph().unique_name("rewrite_for_inference"))
try:
context.Enter()
vscope = variable_scope.get_variable_scope()
prev_custom_getter = vscope.custom_getter
prev_caching_device = vscope.caching_device
vscope.set_custom_getter(guarantee_const_getter)
vscope.set_caching_device(lambda op: op.device)
result = computation(*args, **kwargs)
vscope.set_custom_getter(prev_custom_getter)
vscope.set_caching_device(prev_caching_device)
finally:
context.Exit()
return result
return rewrite(
wrapped_computation,
inputs=inputs,
infeed_queue=infeed_queue,
device_assignment=device_assignment,
name=name)
| true | true |
1c48297f4c379fead9dd0d6d37e22bd65db66e33 | 1,778 | py | Python | bot_plugins/weather.py | UtopiaXC/Utopia-Bot-For-QQ | 87281f509e20c2d5d25367614d5202f6e53cea50 | [
"MIT"
] | 5 | 2021-03-25T15:18:18.000Z | 2021-03-31T02:29:28.000Z | bot_plugins/weather.py | UtopiaXC/Utopia-Bot-For-QQ | 87281f509e20c2d5d25367614d5202f6e53cea50 | [
"MIT"
] | null | null | null | bot_plugins/weather.py | UtopiaXC/Utopia-Bot-For-QQ | 87281f509e20c2d5d25367614d5202f6e53cea50 | [
"MIT"
] | null | null | null | from nonebot.command import CommandSession
from services.common import ServiceException
from services.weather import get_current_weather_short, get_current_weather_desc
from nonebot.natural_language import NLPSession, IntentCommand
from nonebot.experimental.plugin import on_command, on_natural_language
from jieba import posseg
__plugin_name__ = '天气'
__plugin_usage__ = (
'用法:\n'
'对我说 “天气 香港” 获取天气简要\n'
'“天气 香港 详细” 获取当前天气的详细报告'
)
weather_permission = lambda sender: (not sender.is_privatechat) or sender.is_superuser
@on_command('weather', aliases=('气温', '天气'), permission=weather_permission)
async def _(session: CommandSession):
# 若用户对机器人说“天气”,则此变量为 `['']`
# 若用户对机器人说“天气 香港”,则此变量为 `['香港']`
# 若用户对机器人说“天气 香港 详细”,则此变量为 `['香港', '详细']`
args = session.current_arg_text.strip().split(' ', 1)
if not args[0]:
city = await session.aget(key='city', prompt='请问是什么城市呢?', at_sender=True)
else:
city = args[0]
is_detailed = (len(args) == 2 and args[1].__contains__('详')) or session.state.get('is_detailed')
try:
func = get_current_weather_desc if is_detailed else get_current_weather_short
result = await func(city)
except ServiceException as e:
result = e.message
await session.send(result)
# 只要消息包含“天气”,就执行此处理器
@on_natural_language(keywords={'天气'}, permission=weather_permission)
async def _(session: NLPSession):
# 使用 jieba 将消息句子分词
words = posseg.lcut(session.msg_text.strip())
args = {}
for word in words:
if word.flag == 'ns': # ns 表示该词为地名
args['city'] = word.word
elif word.word in ('详细', '报告', '详情'):
args['is_detailed'] = True
# 置信度为 90,意为将此会话当作 'weather' 命令处理
return IntentCommand(90, 'weather', args=args)
| 29.633333 | 100 | 0.68279 | from nonebot.command import CommandSession
from services.common import ServiceException
from services.weather import get_current_weather_short, get_current_weather_desc
from nonebot.natural_language import NLPSession, IntentCommand
from nonebot.experimental.plugin import on_command, on_natural_language
from jieba import posseg
__plugin_name__ = '天气'
__plugin_usage__ = (
'用法:\n'
'对我说 “天气 香港” 获取天气简要\n'
'“天气 香港 详细” 获取当前天气的详细报告'
)
weather_permission = lambda sender: (not sender.is_privatechat) or sender.is_superuser
@on_command('weather', aliases=('气温', '天气'), permission=weather_permission)
async def _(session: CommandSession):
args = session.current_arg_text.strip().split(' ', 1)
if not args[0]:
city = await session.aget(key='city', prompt='请问是什么城市呢?', at_sender=True)
else:
city = args[0]
is_detailed = (len(args) == 2 and args[1].__contains__('详')) or session.state.get('is_detailed')
try:
func = get_current_weather_desc if is_detailed else get_current_weather_short
result = await func(city)
except ServiceException as e:
result = e.message
await session.send(result)
@on_natural_language(keywords={'天气'}, permission=weather_permission)
async def _(session: NLPSession):
words = posseg.lcut(session.msg_text.strip())
args = {}
for word in words:
if word.flag == 'ns':
args['city'] = word.word
elif word.word in ('详细', '报告', '详情'):
args['is_detailed'] = True
return IntentCommand(90, 'weather', args=args)
| true | true |
1c4829860384987e89e27fe3bc17e0a11f6813fc | 12,746 | py | Python | grid_search_loop/tr5000_N200/ESNtrainCV.py | malfarasplux/pnet2019 | ae34d5c84fb4d3985634b237a14dfb69e98b8339 | [
"BSD-3-Clause"
] | 1 | 2020-11-29T12:42:30.000Z | 2020-11-29T12:42:30.000Z | grid_search_loop/tr5000_N200/ESNtrainCV.py | malfarasplux/pnet2019 | ae34d5c84fb4d3985634b237a14dfb69e98b8339 | [
"BSD-3-Clause"
] | null | null | null | grid_search_loop/tr5000_N200/ESNtrainCV.py | malfarasplux/pnet2019 | ae34d5c84fb4d3985634b237a14dfb69e98b8339 | [
"BSD-3-Clause"
] | null | null | null | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
## Config
# biased_regress = True
# normal_equations = True
dataset = "training_1"
path = "../" + dataset +"/"
kfold_split = 10
nan_to_zero = True
mm = False
std = False
numpy_load = True
nanfill = True
## ESN parameters
N_def = [200] # Neurons
scale_def = [0.001, 0.025, 0.050, 0.075, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1.0, 1.1, 1.2, 1.3, 1.4, 1.5, 1.6, 1.7, 1.8, 1.9, 2.0] # scaling
mem_def = [0.001, 0.025, 0.050, 0.075, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1.0, 1.1, 1.2, 1.3, 1.4, 1.5, 1.6, 1.7, 1.8, 1.9, 2.0] # memory
exponent_def = 1.0 # sigmoid exponent
# Script name struct for report
#script_name = 'ESNtrainCV'
#name_struct_meta = "_N_scale_mem"
#name_struct = '_{:03d}_{:1.3f}_{:1.3f}'.format(N_def, scale_def, mem_def)
## Imports
import numpy as np
import os
from sklearn.preprocessing import MinMaxScaler
from sklearn.preprocessing import StandardScaler
from sklearn.model_selection import GroupKFold
from sklearn.model_selection import KFold
from sklearn.model_selection import StratifiedKFold
from sklearn.utils import shuffle
from sklearn.metrics import confusion_matrix
from sklearn.metrics import accuracy_score
from sklearn.metrics import precision_score
from sklearn.metrics import recall_score
from sklearn.metrics import roc_curve
from sklearn.metrics import roc_auc_score
from sklearn.metrics import f1_score
#import matplotlib.pyplot as plt
import ESNtools
import GSK
#Needed for reporting
import platform
import time
# Fix boundary nans (replicate head/tail vals)
def nan_bounds(feats):
nanidx = np.where(np.isnan(feats))[0]
pointer_left = 0
pointer_right = len(feats)-1
fix_left = pointer_left in nanidx
fix_right = pointer_right in nanidx
while fix_left:
if pointer_left in nanidx:
pointer_left += 1
# print("pointer_left:", pointer_left)
else:
val_left = feats[pointer_left]
feats[:pointer_left] = val_left*np.ones((1,pointer_left),dtype=np.float)
fix_left = False
while fix_right:
if pointer_right in nanidx:
pointer_right -= 1
# print("pointer_right:", pointer_right)
else:
val_right = feats[pointer_right]
feats[pointer_right+1:] = val_right*np.ones((1,len(feats)-pointer_right-1),dtype=np.float)
fix_right = False
# nan interpolation
def nan_interpolate(feats):
nanidx = np.where(np.isnan(feats))[0]
nan_remain = len(nanidx)
nanid = 0
while nan_remain > 0:
nanpos = nanidx[nanid]
nanval = feats[nanpos-1]
nan_remain -= 1
nandim = 1
initpos = nanpos
# Check whether it extends
while nanpos+1 in nanidx:
nanpos += 1
nanid += 1
nan_remain -= 1
nandim += 1
# Average sides
if np.isfinite(feats[nanpos+1]):
nanval = 0.5 * (nanval + feats[nanpos+1])
# Single value average
if nandim == 1:
nanval = 0.5 * (nanval + feats[nanpos+1])
feats[initpos:initpos+nandim] = nanval*np.ones((1,nandim),dtype=np.double)
nanpos += 1
nanid += 1
## Get sepsis patients
def get_sepsis_patients(sepsis_label, patient):
patient_sep = np.zeros(len(sepsis_label),dtype=np.int)
for i in range(n):
i_pat = np.where(patient==i)[0]
patient_sep[i_pat] = int(np.sum(sepsis_label[i_pat])>0)*np.ones(len(i_pat), dtype=np.int)
patient_sep_idx = np.where(patient_sep!=0)[0]
patient_healthy_idx = np.where(patient_sep==0)[0]
return patient_sep, patient_sep_idx, patient_healthy_idx
## Create the feature matrix
features = []
patient = []
sepsis_label = []
dataloaded = False
## Read data
if not numpy_load:
## Folder and files
fnames = os.listdir(path)
fnames.sort()
if 'README.md' in fnames:
fnames.remove('README.md')
print('last file: ', fnames[-1])
n = len(fnames)
print(n, ' files present')
## read data
for i in range(n):
input_file = os.path.join(path, fnames[i])
if i ==0:
data, sep_lab, columns = ESNtools.read_challenge_data_label(input_file, return_header=True)
else:
data, sep_lab = ESNtools.read_challenge_data_label(input_file)
features.append(data)
sepsis_label.append(sep_lab)
pat = i * np.ones((sep_lab.shape), dtype=np.int)
patient.append(pat)
feature_matrix = np.concatenate(features)
del(features)
sepsis_label = np.concatenate(sepsis_label)
patient = np.concatenate(patient)
dataloaded = True
else:
npyfilename = "../npy/" + dataset + "_patient.npy"
patient = np.load(npyfilename)
print(npyfilename, " loaded")
npyfilename = "../npy/" + dataset + "_Y.npy"
sepsis_label = np.load(npyfilename)
print(npyfilename, " loaded")
#ADD nanfill tag
if nanfill:
dataset = dataset + "_nanfill"
if mm:
npyfilename = "../npy/" + dataset + "_mm.npy"
mm = False
print(npyfilename, '(mm) to be loaded')
else:
npyfilename = "../npy/" + dataset + ".npy"
print(npyfilename, '(not mm) to be loaded')
n = len(np.unique(patient))
print(n, ' files present')
dataloaded = True
feature_matrix = np.load(npyfilename)
##Flatten patient
patient = patient.flatten()
## Separate pointers
feature_phys = feature_matrix[:,:-6] ## Physiology
feature_demog = feature_matrix[:,-6:] ## Demographics
## Normalize mm(all) or std (sepsis, phys) vals, feature-based
if mm:
scaler = MinMaxScaler()
for i in range(n):
i_pat = np.where(patient==i)[0]
scaler.fit(feature_matrix[i_pat,:])
feature_matrix[i_pat,:] = scaler.transform(feature_matrix[i_pat,:])
elif std:
## (Get sepsis patients)
patient_sep, patient_sep_idx, patient_healthy_idx = get_sepsis_patients(sepsis_label, patient)
scaler = StandardScaler()
scaler.fit(feature_phys[patient_healthy_idx,:])
feature_phys[:,:] = scaler.transform(feature_phys[:,:])
## nan to zero
if nan_to_zero:
feature_matrix[np.isnan(feature_matrix)]=0
print("Changed nan to 0")
## Septic groups stratify
patient_sep, patient_sep_idx, patient_healthy_idx = get_sepsis_patients(sepsis_label, patient)
#healthy_patient_list = np.unique(patient[patient_healthy_idx])
#sep_patient_list = np.unique(patient[patient_sep_idx])
## Nonlinear mapping function
sigmoid_exponent = exponent_def
func = ESNtools.sigmoid
#SFK
#skf = StratifiedKFold(n_splits=kfold_split)
#skf.get_n_splits(X)
#GSKF
groups = patient
train_index, test_index = GSK.GroupStratifiedKFold(np.hstack([patient_sep.reshape(-1,1), groups.reshape(-1,1)]), 10)
def get_gridsearchpoint(feature_matrix, patient, sepsis_label, M, Mb, N, scale, mem, sigmoid_exponent, train_index, test_index):
script_name = 'ESNtrainCV'
name_struct_meta = "_N_scale_mem"
name_struct = '_{:03d}_{:1.3f}_{:1.3f}'.format(N, scale, mem)
## ESN Generation parameters
## Perform ESN feed
pat_shift = np.append(np.where(np.diff(patient)!=0)[0] + 1, [len(patient)])
pat_ipos = 0
print("pat_shift: ",len(pat_shift))
allocateESN = True
print('ESN: ')
if allocateESN:
ESN = np.ones((len(feature_matrix),N+1), dtype = np.float)
for i in range(len(pat_shift)):
print("Feeding ESN patient:", i)
ESN[pat_ipos:pat_shift[i],:] = ESNtools.feedESN(feature_matrix[pat_ipos:pat_shift[i]], N, M, Mb, scale, mem, func, sigmoid_exponent)
pat_ipos = pat_shift[i]
else:
for i in range(len(pat_shift)):
if i == 0:
ESN = ESNtools.feedESN(feature_matrix[pat_ipos:pat_shift[i]], N, M, Mb, scale, mem, func, sigmoid_exponent)
else:
ESN = np.vstack((ESN, ESNtools.feedESN(feature_matrix[pat_ipos:pat_shift[i]], N, M, Mb, scale, mem, func, sigmoid_exponent)))
pat_ipos = pat_shift[i]
del feature_matrix
## Divide in sets
X = ESN
y = sepsis_label
## KFold
results = []
target = []
kk = 0
#for train_index, test_index in skf.split(X,y): #Stratified KFold
for j in range(len(train_index)): #GSKF
X_train, X_test = X[train_index[j]], X[test_index[j]] #GSKF
y_train, y_test = y[train_index[j]], y[test_index[j]] #GSKF
patients_id_train, patients_id_test = patient[train_index[j]], patient[test_index[j]]
w = ESNtools.get_weights_lu_biasedNE(X_train, y_train)
print("Start testing...", flush=True)
Y_pred = (np.matmul(X_test,w))
print(kk, ' realisation ')
print("auc: ", roc_auc_score(y_test, Y_pred))
kk +=1
target.append(y_test)
results.append(Y_pred)
## Evaluate results
results = np.concatenate(results)
target = np.concatenate(target)
auc = roc_auc_score(target,results)
print('auc: ', auc)
## Threshold study
th_i = np.min(results)
th_f = np.max(results)
## AUC-based CV
AUC_CV = True
if AUC_CV:
th_max = 0
f1 = 0
ACC = 0
Pr = 0
Re = 0
else:
th_steps = 1000
th_step = (th_f-th_i)/th_steps
thsum = 0
th = np.zeros((1000, 1), dtype = np.double)
f1 =np.zeros((1000, 1), dtype = np.double)
print("Threshold: Loop between ", th_i, th_i+th_step*th_steps)
for i, j in enumerate(np.arange(th_i, th_f, th_step)):
if j < th_steps:
th[i] = j
f1[i] = f1_score(target, results > th[i])
thsum = thsum + th[i]
if i%100 == 0:
print(i, th[i], f1[i])
if f1[i] < 0.001 and np.abs(thsum) > 0:
th = th[:i]
f1 = f1[:i]
break
## Max Threshold
th_max = th[np.argmax(f1)]
## Metrics
Pr = precision_score(target, results > th_max)
Re = recall_score(target, results > th_max)
ACC = accuracy_score(target, results > th_max)
auc = roc_auc_score(target, results)
f1 = f1_score(target, results > th_max)
user = platform.uname()[1] + '@' + platform.platform()
dir_path = os.path.dirname(os.path.realpath(__file__))
# write to report file
output_file = 'report_' + script_name + name_struct + '.txt'
with open(output_file, 'w') as f:
f.write(user + '\n')
f.write(dir_path + '\n')
f.write(__file__ + '\n')
f.write(time.strftime("%Y-%m-%d %H:%M") + '\n')
# f.write('Dataset: ' + path + '\n')
f.write('{:03d} \t N \n'.format(N))
f.write('{:1.3f} \t scale \n'.format(scale))
f.write('{:1.3f} \t mem \n'.format(mem))
f.write('%1.3f \t exp\n' % sigmoid_exponent)
f.write('(%2.4f, %2.4f, %2.4f) \t th_i, th_f, *th_sc\n' % (th_i, th_f, th_f-th_i))
f.write('%2.4f \t th\n' % th_max)
f.write('%2.4f \t Pr\n' % Pr)
f.write('%2.4f \t Re\n' % Re)
f.write('%2.4f \t F1\n' % f1)
f.write('%2.4f \t ACC\n' % ACC)
f.write('%2.4f \t AUC\n' % auc)
print(user)
print(dir_path)
print(__file__)
print(time.strftime("%Y-%m-%d %H:%M"))
print('Dataset: ' + path)
print('N: {:03d}'.format(N))
print('scale: {:1.3f}'.format(scale))
print('mem: {:1.3f}'.format(mem))
print('exp: %1.3f' % sigmoid_exponent)
print('th_i, th_f, *th_sc: (%2.4f, %2.4f, %2.4f)' % (th_i, th_f, th_f-th_i))
print('th: %2.4f' % th_max)
print('Pr: %2.4f' % Pr)
print('Re: %2.4f' % Re)
print('F1: %2.4f' % f1)
print('ACC: %2.4f' % ACC)
print('AUC: %2.4f' % auc)
## Grid_search for loop
for i_N in range(len(N_def)):
N = N_def[i_N] # Neurons
## Random seed
np.random.seed(seed=0)
## Mask parameters
M = 2*np.random.rand(np.shape(feature_matrix)[1],N)-1
Mb = 2*np.random.rand(1,N)-1
for i_scale in range(len(scale_def)):
scale = scale_def[i_scale] # scaling factor
for i_mem in range(len(mem_def)):
mem = mem_def[i_mem] # memory
try:
get_gridsearchpoint(feature_matrix, patient, sepsis_label, M, Mb, N, scale, mem, sigmoid_exponent, train_index, test_index)
except:
print("Error at ", N, scale, mem)
pass
| 31.944862 | 154 | 0.595716 |
et = "training_1"
path = "../" + dataset +"/"
kfold_split = 10
nan_to_zero = True
mm = False
std = False
numpy_load = True
nanfill = True
scale_def = [0.001, 0.025, 0.050, 0.075, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1.0, 1.1, 1.2, 1.3, 1.4, 1.5, 1.6, 1.7, 1.8, 1.9, 2.0]
mem_def = [0.001, 0.025, 0.050, 0.075, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1.0, 1.1, 1.2, 1.3, 1.4, 1.5, 1.6, 1.7, 1.8, 1.9, 2.0]
exponent_def = 1.0
umpy as np
import os
from sklearn.preprocessing import MinMaxScaler
from sklearn.preprocessing import StandardScaler
from sklearn.model_selection import GroupKFold
from sklearn.model_selection import KFold
from sklearn.model_selection import StratifiedKFold
from sklearn.utils import shuffle
from sklearn.metrics import confusion_matrix
from sklearn.metrics import accuracy_score
from sklearn.metrics import precision_score
from sklearn.metrics import recall_score
from sklearn.metrics import roc_curve
from sklearn.metrics import roc_auc_score
from sklearn.metrics import f1_score
import ESNtools
import GSK
import platform
import time
def nan_bounds(feats):
nanidx = np.where(np.isnan(feats))[0]
pointer_left = 0
pointer_right = len(feats)-1
fix_left = pointer_left in nanidx
fix_right = pointer_right in nanidx
while fix_left:
if pointer_left in nanidx:
pointer_left += 1
else:
val_left = feats[pointer_left]
feats[:pointer_left] = val_left*np.ones((1,pointer_left),dtype=np.float)
fix_left = False
while fix_right:
if pointer_right in nanidx:
pointer_right -= 1
else:
val_right = feats[pointer_right]
feats[pointer_right+1:] = val_right*np.ones((1,len(feats)-pointer_right-1),dtype=np.float)
fix_right = False
def nan_interpolate(feats):
nanidx = np.where(np.isnan(feats))[0]
nan_remain = len(nanidx)
nanid = 0
while nan_remain > 0:
nanpos = nanidx[nanid]
nanval = feats[nanpos-1]
nan_remain -= 1
nandim = 1
initpos = nanpos
while nanpos+1 in nanidx:
nanpos += 1
nanid += 1
nan_remain -= 1
nandim += 1
if np.isfinite(feats[nanpos+1]):
nanval = 0.5 * (nanval + feats[nanpos+1])
if nandim == 1:
nanval = 0.5 * (nanval + feats[nanpos+1])
feats[initpos:initpos+nandim] = nanval*np.ones((1,nandim),dtype=np.double)
nanpos += 1
nanid += 1
nts(sepsis_label, patient):
patient_sep = np.zeros(len(sepsis_label),dtype=np.int)
for i in range(n):
i_pat = np.where(patient==i)[0]
patient_sep[i_pat] = int(np.sum(sepsis_label[i_pat])>0)*np.ones(len(i_pat), dtype=np.int)
patient_sep_idx = np.where(patient_sep!=0)[0]
patient_healthy_idx = np.where(patient_sep==0)[0]
return patient_sep, patient_sep_idx, patient_healthy_idx
sepsis_label = []
dataloaded = False
y_load:
istdir(path)
fnames.sort()
if 'README.md' in fnames:
fnames.remove('README.md')
print('last file: ', fnames[-1])
n = len(fnames)
print(n, ' files present')
in range(n):
input_file = os.path.join(path, fnames[i])
if i ==0:
data, sep_lab, columns = ESNtools.read_challenge_data_label(input_file, return_header=True)
else:
data, sep_lab = ESNtools.read_challenge_data_label(input_file)
features.append(data)
sepsis_label.append(sep_lab)
pat = i * np.ones((sep_lab.shape), dtype=np.int)
patient.append(pat)
feature_matrix = np.concatenate(features)
del(features)
sepsis_label = np.concatenate(sepsis_label)
patient = np.concatenate(patient)
dataloaded = True
else:
npyfilename = "../npy/" + dataset + "_patient.npy"
patient = np.load(npyfilename)
print(npyfilename, " loaded")
npyfilename = "../npy/" + dataset + "_Y.npy"
sepsis_label = np.load(npyfilename)
print(npyfilename, " loaded")
if nanfill:
dataset = dataset + "_nanfill"
if mm:
npyfilename = "../npy/" + dataset + "_mm.npy"
mm = False
print(npyfilename, '(mm) to be loaded')
else:
npyfilename = "../npy/" + dataset + ".npy"
print(npyfilename, '(not mm) to be loaded')
n = len(np.unique(patient))
print(n, ' files present')
dataloaded = True
feature_matrix = np.load(npyfilename)
nt.flatten()
ture_matrix[:,:-6] og = feature_matrix[:,-6:] = np.where(patient==i)[0]
scaler.fit(feature_matrix[i_pat,:])
feature_matrix[i_pat,:] = scaler.transform(feature_matrix[i_pat,:])
elif std:
nt_sep_idx, patient_healthy_idx = get_sepsis_patients(sepsis_label, patient)
scaler = StandardScaler()
scaler.fit(feature_phys[patient_healthy_idx,:])
feature_phys[:,:] = scaler.transform(feature_phys[:,:])
ro:
feature_matrix[np.isnan(feature_matrix)]=0
print("Changed nan to 0")
p_idx, patient_healthy_idx = get_sepsis_patients(sepsis_label, patient)
_def
func = ESNtools.sigmoid
groups = patient
train_index, test_index = GSK.GroupStratifiedKFold(np.hstack([patient_sep.reshape(-1,1), groups.reshape(-1,1)]), 10)
def get_gridsearchpoint(feature_matrix, patient, sepsis_label, M, Mb, N, scale, mem, sigmoid_exponent, train_index, test_index):
script_name = 'ESNtrainCV'
name_struct_meta = "_N_scale_mem"
name_struct = '_{:03d}_{:1.3f}_{:1.3f}'.format(N, scale, mem)
(np.diff(patient)!=0)[0] + 1, [len(patient)])
pat_ipos = 0
print("pat_shift: ",len(pat_shift))
allocateESN = True
print('ESN: ')
if allocateESN:
ESN = np.ones((len(feature_matrix),N+1), dtype = np.float)
for i in range(len(pat_shift)):
print("Feeding ESN patient:", i)
ESN[pat_ipos:pat_shift[i],:] = ESNtools.feedESN(feature_matrix[pat_ipos:pat_shift[i]], N, M, Mb, scale, mem, func, sigmoid_exponent)
pat_ipos = pat_shift[i]
else:
for i in range(len(pat_shift)):
if i == 0:
ESN = ESNtools.feedESN(feature_matrix[pat_ipos:pat_shift[i]], N, M, Mb, scale, mem, func, sigmoid_exponent)
else:
ESN = np.vstack((ESN, ESNtools.feedESN(feature_matrix[pat_ipos:pat_shift[i]], N, M, Mb, scale, mem, func, sigmoid_exponent)))
pat_ipos = pat_shift[i]
del feature_matrix
y = sepsis_label
sults = []
target = []
kk = 0
ge(len(train_index)):
X_train, X_test = X[train_index[j]], X[test_index[j]]
y_train, y_test = y[train_index[j]], y[test_index[j]]
patients_id_train, patients_id_test = patient[train_index[j]], patient[test_index[j]]
w = ESNtools.get_weights_lu_biasedNE(X_train, y_train)
print("Start testing...", flush=True)
Y_pred = (np.matmul(X_test,w))
print(kk, ' realisation ')
print("auc: ", roc_auc_score(y_test, Y_pred))
kk +=1
target.append(y_test)
results.append(Y_pred)
concatenate(results)
target = np.concatenate(target)
auc = roc_auc_score(target,results)
print('auc: ', auc)
n(results)
th_f = np.max(results)
True
if AUC_CV:
th_max = 0
f1 = 0
ACC = 0
Pr = 0
Re = 0
else:
th_steps = 1000
th_step = (th_f-th_i)/th_steps
thsum = 0
th = np.zeros((1000, 1), dtype = np.double)
f1 =np.zeros((1000, 1), dtype = np.double)
print("Threshold: Loop between ", th_i, th_i+th_step*th_steps)
for i, j in enumerate(np.arange(th_i, th_f, th_step)):
if j < th_steps:
th[i] = j
f1[i] = f1_score(target, results > th[i])
thsum = thsum + th[i]
if i%100 == 0:
print(i, th[i], f1[i])
if f1[i] < 0.001 and np.abs(thsum) > 0:
th = th[:i]
f1 = f1[:i]
break
= th[np.argmax(f1)]
Pr = precision_score(target, results > th_max)
Re = recall_score(target, results > th_max)
ACC = accuracy_score(target, results > th_max)
auc = roc_auc_score(target, results)
f1 = f1_score(target, results > th_max)
user = platform.uname()[1] + '@' + platform.platform()
dir_path = os.path.dirname(os.path.realpath(__file__))
output_file = 'report_' + script_name + name_struct + '.txt'
with open(output_file, 'w') as f:
f.write(user + '\n')
f.write(dir_path + '\n')
f.write(__file__ + '\n')
f.write(time.strftime("%Y-%m-%d %H:%M") + '\n')
f.write('{:03d} \t N \n'.format(N))
f.write('{:1.3f} \t scale \n'.format(scale))
f.write('{:1.3f} \t mem \n'.format(mem))
f.write('%1.3f \t exp\n' % sigmoid_exponent)
f.write('(%2.4f, %2.4f, %2.4f) \t th_i, th_f, *th_sc\n' % (th_i, th_f, th_f-th_i))
f.write('%2.4f \t th\n' % th_max)
f.write('%2.4f \t Pr\n' % Pr)
f.write('%2.4f \t Re\n' % Re)
f.write('%2.4f \t F1\n' % f1)
f.write('%2.4f \t ACC\n' % ACC)
f.write('%2.4f \t AUC\n' % auc)
print(user)
print(dir_path)
print(__file__)
print(time.strftime("%Y-%m-%d %H:%M"))
print('Dataset: ' + path)
print('N: {:03d}'.format(N))
print('scale: {:1.3f}'.format(scale))
print('mem: {:1.3f}'.format(mem))
print('exp: %1.3f' % sigmoid_exponent)
print('th_i, th_f, *th_sc: (%2.4f, %2.4f, %2.4f)' % (th_i, th_f, th_f-th_i))
print('th: %2.4f' % th_max)
print('Pr: %2.4f' % Pr)
print('Re: %2.4f' % Re)
print('F1: %2.4f' % f1)
print('ACC: %2.4f' % ACC)
print('AUC: %2.4f' % auc)
N_def)):
N = N_def[i_N]
m.seed(seed=0)
dom.rand(np.shape(feature_matrix)[1],N)-1
Mb = 2*np.random.rand(1,N)-1
for i_scale in range(len(scale_def)):
scale = scale_def[i_scale]
for i_mem in range(len(mem_def)):
mem = mem_def[i_mem]
try:
get_gridsearchpoint(feature_matrix, patient, sepsis_label, M, Mb, N, scale, mem, sigmoid_exponent, train_index, test_index)
except:
print("Error at ", N, scale, mem)
pass
| true | true |
1c482d59925e0619904da8fa23e69b884ba76a39 | 825 | py | Python | shooter/config.py | codershkoder/zombie_shooter_025 | 12582915af81d641f6a654418c02792ee96ea2a8 | [
"MIT"
] | null | null | null | shooter/config.py | codershkoder/zombie_shooter_025 | 12582915af81d641f6a654418c02792ee96ea2a8 | [
"MIT"
] | null | null | null | shooter/config.py | codershkoder/zombie_shooter_025 | 12582915af81d641f6a654418c02792ee96ea2a8 | [
"MIT"
] | null | null | null | from pathlib import Path
# Настройки путей
_BASE_DIR = Path.cwd()
_RESOURCES_DIR = _BASE_DIR / 'resources'
_IMAGES_DIR = _RESOURCES_DIR / 'images'
_LEVELS_DIR = _RESOURCES_DIR / 'levels'
# Общие настройки
WINDOW_CAPTION = 'Зомби шутер'
FRAME_RATE = 60
BACKGROUND_COLOR = (0, 0, 0)
# Настройки для игрока
PLAYER_IMAGE = _IMAGES_DIR / 'player_min.png'
PLAYER_SPEED = 5
PLAYER_HEALTH = 100
PLAYER_IMMORTALITY_TIME = 1
# Настройки пуль
BULLET_IMAGE = _IMAGES_DIR / 'bullet.png'
BULLET_SPEED = 15
BULLET_DAMAGE = 10
# Настройки зомби
ZOMBIE_IMAGE = _IMAGES_DIR / 'zombie_min.png'
ZOMBIE_SPEED = 2
ZOMBIE_RADIUS_AGR = 70
ZOMBIE_HEALTH = 2000
ZOMBIE_DAMAGE = 40
# Список уровней
LEVEL_1 = _LEVELS_DIR / 'level.txt'
# Объекты окружения
LANDSCAPE_GROUND = _IMAGES_DIR / 'ground.png'
LANDSCAPE_STONE = _IMAGES_DIR / 'stone.png'
| 21.710526 | 45 | 0.768485 | from pathlib import Path
_BASE_DIR = Path.cwd()
_RESOURCES_DIR = _BASE_DIR / 'resources'
_IMAGES_DIR = _RESOURCES_DIR / 'images'
_LEVELS_DIR = _RESOURCES_DIR / 'levels'
WINDOW_CAPTION = 'Зомби шутер'
FRAME_RATE = 60
BACKGROUND_COLOR = (0, 0, 0)
PLAYER_IMAGE = _IMAGES_DIR / 'player_min.png'
PLAYER_SPEED = 5
PLAYER_HEALTH = 100
PLAYER_IMMORTALITY_TIME = 1
BULLET_IMAGE = _IMAGES_DIR / 'bullet.png'
BULLET_SPEED = 15
BULLET_DAMAGE = 10
ZOMBIE_IMAGE = _IMAGES_DIR / 'zombie_min.png'
ZOMBIE_SPEED = 2
ZOMBIE_RADIUS_AGR = 70
ZOMBIE_HEALTH = 2000
ZOMBIE_DAMAGE = 40
LEVEL_1 = _LEVELS_DIR / 'level.txt'
LANDSCAPE_GROUND = _IMAGES_DIR / 'ground.png'
LANDSCAPE_STONE = _IMAGES_DIR / 'stone.png'
| true | true |
1c482e3d03274f06a56af75d2a96e0b689dfe117 | 887 | py | Python | roshant/everest/everest/urls.py | sushant60/Python-web | 426a89200e52e902b3db519998485a5de202fa91 | [
"Apache-2.0"
] | null | null | null | roshant/everest/everest/urls.py | sushant60/Python-web | 426a89200e52e902b3db519998485a5de202fa91 | [
"Apache-2.0"
] | null | null | null | roshant/everest/everest/urls.py | sushant60/Python-web | 426a89200e52e902b3db519998485a5de202fa91 | [
"Apache-2.0"
] | null | null | null | """everest URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/2.2/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path
from . import views
urlpatterns = [
path('admin/', admin.site.urls),
path('eve', views.first, name='first'),
path('science', views.second, name='second'),
]
| 35.48 | 78 | 0.680947 | from django.contrib import admin
from django.urls import path
from . import views
urlpatterns = [
path('admin/', admin.site.urls),
path('eve', views.first, name='first'),
path('science', views.second, name='second'),
]
| true | true |
1c482ebcbaa1bfb3289d76e89372b8cceb55517f | 3,201 | py | Python | B4860-V7/xxx/scons-3.1.1/engine/SCons/Tool/zip.py | miaopei/B4860 | 6f084bd485b787bb36de26d40f83ff4833098c3d | [
"MIT"
] | null | null | null | B4860-V7/xxx/scons-3.1.1/engine/SCons/Tool/zip.py | miaopei/B4860 | 6f084bd485b787bb36de26d40f83ff4833098c3d | [
"MIT"
] | null | null | null | B4860-V7/xxx/scons-3.1.1/engine/SCons/Tool/zip.py | miaopei/B4860 | 6f084bd485b787bb36de26d40f83ff4833098c3d | [
"MIT"
] | null | null | null | """SCons.Tool.zip
Tool-specific initialization for zip.
There normally shouldn't be any need to import this module directly.
It will usually be imported through the generic SCons.Tool.Tool()
selection method.
"""
#
# Copyright (c) 2001 - 2019 The SCons Foundation
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
__revision__ = "src/engine/SCons/Tool/zip.py 72ae09dc35ac2626f8ff711d8c4b30b6138e08e3 2019-08-08 14:50:06 bdeegan"
import os.path
import SCons.Builder
import SCons.Defaults
import SCons.Node.FS
import SCons.Util
import zipfile
zipcompression = zipfile.ZIP_DEFLATED
def zip(target, source, env):
compression = env.get('ZIPCOMPRESSION', 0)
zf = zipfile.ZipFile(str(target[0]), 'w', compression)
for s in source:
if s.isdir():
for dirpath, dirnames, filenames in os.walk(str(s)):
for fname in filenames:
path = os.path.join(dirpath, fname)
if os.path.isfile(path):
zf.write(path, os.path.relpath(path, str(env.get('ZIPROOT', ''))))
else:
zf.write(str(s), os.path.relpath(str(s), str(env.get('ZIPROOT', ''))))
zf.close()
zipAction = SCons.Action.Action(zip, varlist=['ZIPCOMPRESSION'])
ZipBuilder = SCons.Builder.Builder(action = SCons.Action.Action('$ZIPCOM', '$ZIPCOMSTR'),
source_factory = SCons.Node.FS.Entry,
source_scanner = SCons.Defaults.DirScanner,
suffix = '$ZIPSUFFIX',
multi = 1)
def generate(env):
"""Add Builders and construction variables for zip to an Environment."""
try:
bld = env['BUILDERS']['Zip']
except KeyError:
bld = ZipBuilder
env['BUILDERS']['Zip'] = bld
env['ZIP'] = 'zip'
env['ZIPFLAGS'] = SCons.Util.CLVar('')
env['ZIPCOM'] = zipAction
env['ZIPCOMPRESSION'] = zipcompression
env['ZIPSUFFIX'] = '.zip'
env['ZIPROOT'] = SCons.Util.CLVar('')
def exists(env):
return True
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4:
| 34.419355 | 114 | 0.663855 |
__revision__ = "src/engine/SCons/Tool/zip.py 72ae09dc35ac2626f8ff711d8c4b30b6138e08e3 2019-08-08 14:50:06 bdeegan"
import os.path
import SCons.Builder
import SCons.Defaults
import SCons.Node.FS
import SCons.Util
import zipfile
zipcompression = zipfile.ZIP_DEFLATED
def zip(target, source, env):
compression = env.get('ZIPCOMPRESSION', 0)
zf = zipfile.ZipFile(str(target[0]), 'w', compression)
for s in source:
if s.isdir():
for dirpath, dirnames, filenames in os.walk(str(s)):
for fname in filenames:
path = os.path.join(dirpath, fname)
if os.path.isfile(path):
zf.write(path, os.path.relpath(path, str(env.get('ZIPROOT', ''))))
else:
zf.write(str(s), os.path.relpath(str(s), str(env.get('ZIPROOT', ''))))
zf.close()
zipAction = SCons.Action.Action(zip, varlist=['ZIPCOMPRESSION'])
ZipBuilder = SCons.Builder.Builder(action = SCons.Action.Action('$ZIPCOM', '$ZIPCOMSTR'),
source_factory = SCons.Node.FS.Entry,
source_scanner = SCons.Defaults.DirScanner,
suffix = '$ZIPSUFFIX',
multi = 1)
def generate(env):
try:
bld = env['BUILDERS']['Zip']
except KeyError:
bld = ZipBuilder
env['BUILDERS']['Zip'] = bld
env['ZIP'] = 'zip'
env['ZIPFLAGS'] = SCons.Util.CLVar('')
env['ZIPCOM'] = zipAction
env['ZIPCOMPRESSION'] = zipcompression
env['ZIPSUFFIX'] = '.zip'
env['ZIPROOT'] = SCons.Util.CLVar('')
def exists(env):
return True
| true | true |
1c482f45ae4ff817a2e5c8c5c289bc77f9c36105 | 1,792 | py | Python | esolang_IDE/visualisers/io_widget.py | Avanta8/Esolang-Interpreter-IDE | 9a958eb26314c6c138d1921e76c52b1bb53c85ed | [
"MIT"
] | 3 | 2020-01-16T23:04:24.000Z | 2020-07-21T23:55:59.000Z | esolang_IDE/visualisers/io_widget.py | Avanta8/Esolang-Interpreter-IDE | 9a958eb26314c6c138d1921e76c52b1bb53c85ed | [
"MIT"
] | null | null | null | esolang_IDE/visualisers/io_widget.py | Avanta8/Esolang-Interpreter-IDE | 9a958eb26314c6c138d1921e76c52b1bb53c85ed | [
"MIT"
] | null | null | null | from PyQt5 import QtCore, QtWidgets
from esolang_IDE.input_text import HighlightInputText
from esolang_IDE.output_text import OutputText
class IOWidget(QtWidgets.QWidget):
def __init__(self, parent=None, flags=QtCore.Qt.WindowFlags()):
super().__init__(parent=parent, flags=flags)
self.init_widgets()
self.error_text_active = True
self.clear_error_text()
def init_widgets(self):
self._error_text_timer = QtCore.QTimer(self)
self._error_text_timer.setSingleShot(True)
self._error_text_timer.timeout.connect(self.clear_error_text)
self._input_text = HighlightInputText(self)
self._output_text = OutputText(self)
self._error_text = QtWidgets.QLineEdit(self)
self._error_text.setReadOnly(True)
layout = QtWidgets.QVBoxLayout()
layout.addWidget(QtWidgets.QLabel('Input:'))
layout.addWidget(self._input_text)
layout.addWidget(QtWidgets.QLabel('Output:'))
layout.addWidget(self._output_text)
layout.addWidget(self._error_text)
self.setLayout(layout)
def set_error_text(self, message):
self._error_text_timer.stop()
self._error_text.setText(message)
self._error_text.show()
self.error_text_active = True
def timed_error_text(self, message, time=1000):
self.set_error_text(message)
self._error_text_timer.start(time)
def clear_error_text(self):
if not self.error_text_active:
return
self._error_text_timer.stop()
self._error_text.clear()
self._error_text.hide()
self.error_text_active = False
def get_input_text(self):
return self._input_text
def get_output_text(self):
return self._output_text
| 30.896552 | 69 | 0.689174 | from PyQt5 import QtCore, QtWidgets
from esolang_IDE.input_text import HighlightInputText
from esolang_IDE.output_text import OutputText
class IOWidget(QtWidgets.QWidget):
def __init__(self, parent=None, flags=QtCore.Qt.WindowFlags()):
super().__init__(parent=parent, flags=flags)
self.init_widgets()
self.error_text_active = True
self.clear_error_text()
def init_widgets(self):
self._error_text_timer = QtCore.QTimer(self)
self._error_text_timer.setSingleShot(True)
self._error_text_timer.timeout.connect(self.clear_error_text)
self._input_text = HighlightInputText(self)
self._output_text = OutputText(self)
self._error_text = QtWidgets.QLineEdit(self)
self._error_text.setReadOnly(True)
layout = QtWidgets.QVBoxLayout()
layout.addWidget(QtWidgets.QLabel('Input:'))
layout.addWidget(self._input_text)
layout.addWidget(QtWidgets.QLabel('Output:'))
layout.addWidget(self._output_text)
layout.addWidget(self._error_text)
self.setLayout(layout)
def set_error_text(self, message):
self._error_text_timer.stop()
self._error_text.setText(message)
self._error_text.show()
self.error_text_active = True
def timed_error_text(self, message, time=1000):
self.set_error_text(message)
self._error_text_timer.start(time)
def clear_error_text(self):
if not self.error_text_active:
return
self._error_text_timer.stop()
self._error_text.clear()
self._error_text.hide()
self.error_text_active = False
def get_input_text(self):
return self._input_text
def get_output_text(self):
return self._output_text
| true | true |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.