repo_name stringlengths 6 100 | path stringlengths 4 294 | copies stringlengths 1 5 | size stringlengths 4 6 | content stringlengths 606 896k | license stringclasses 15
values |
|---|---|---|---|---|---|
amadeusproject/amadeuslms | dashboards/utils.py | 1 | 62643 | import calendar
import os
from datetime import date, datetime, timedelta
from django.utils import formats, timezone
from django.conf import settings
from django.utils.dateparse import parse_datetime
from django.core.urlresolvers import reverse
from django.utils.formats import get_format
from django.utils.translation import ugettext_lazy as _
from subjects.models import Tag, Subject
from topics.models import Topic, Resource
from log.models import Log
from bulletin.models import Bulletin
from file_link.models import FileLink
from goals.models import Goals
from links.models import Link
from mural.models import SubjectPost
from pdf_file.models import PDFFile
from questionary.models import Questionary
from goals.models import Goals
from webpage.models import Webpage
from webconference.models import Webconference
from youtube_video.models import YTVideo
from notifications.models import Notification
from pendencies.models import Pendencies, PendencyDone
from notifications.utils import get_resource_users
from django.db.models import Q as Cond, Max, Count
from django.db.models.functions import TruncDate
from django.http import HttpResponse, Http404
from collections import OrderedDict
from gtts import gTTS
from mutagen.mp3 import MP3
import operator, math
from log.search import *
from categories.models import Category
from amadeus.permissions import has_category_permissions
from django.shortcuts import get_object_or_404
from users.models import User
import xlwt
from itertools import islice
def done_percent(pendency):
users = get_resource_users(pendency.resource)
usersDone = PendencyDone.objects.filter(
pendency=pendency, student__id__in=users.values_list("id", flat=True)
).count()
number_users = users.count()
if usersDone == 0 or number_users == 0:
done = 0
else:
done = (usersDone * 100) / number_users
return done
def get_pend_graph(user, subject):
pendencies = Pendencies.objects.filter(
resource__topic__subject=subject,
begin_date__gte=subject.init_date,
resource__visible=True,
)
graph = []
for pendency in pendencies:
item = {}
item["date"] = {}
item["date"]["start"] = formats.date_format(pendency.begin_date, "m/d/Y H:i")
item["date"]["startDate"] = pendency.begin_date
item["date"]["end"] = formats.date_format(pendency.end_date, "m/d/Y H:i")
item["date"]["endDate"] = pendency.end_date
item["date"]["delay"] = (
formats.date_format(pendency.limit_date, "m/d/Y H:i")
if pendency.limit_date
else "infinity"
)
item["date"]["delayDate"] = pendency.limit_date
item["action"] = pendency.get_action_display()
item["name"] = pendency.resource.name
if pendency.begin_date <= timezone.now():
item["percent"] = done_percent(pendency) / 100
else:
item["percent"] = 0
item["access_link"] = str(pendency.resource.access_link())
users = get_resource_users(pendency.resource)
subject_begin_date = pendency.resource.topic.subject.init_date
pend_action = pendency.action
resource_type = pendency.resource._my_subclass
resource_key = resource_type + "_id"
resource_id = pendency.resource.id
item["done"] = False
item["doneLate"] = False
if user in users:
has_action = PendencyDone.objects.filter(pendency=pendency, student=user)
item["done"] = has_action.exists()
item["doneLate"] = False
if item["done"]:
pDone = has_action.first()
item["doneLate"] = pDone.late
graph.append(item)
return graph
def getAccessedTags(subject, user):
tags = Tag.objects.filter(resource_tags__topic__subject=subject).distinct().all()
data = []
searchs = []
for tag in tags:
if not tag.name == "":
resources = Resource.objects.filter(tags__id=tag.id, topic__subject=subject)
if resources.count() > 0:
searchs.append(count_logs(resources))
searchs.append(count_logs(resources, user.id))
tag.access = 1
else:
tag.access = 0
if len(searchs) > 0:
res = multi_search(searchs)
counter = 0
for tag in tags:
if not tag.name == "":
item = {}
item["tag_name"] = tag.name
item["details_url"] = reverse(
"dashboards:tag_accessess",
args=(tag.id, subject.slug, user.email),
kwargs={},
)
if tag.access == 1:
item["qtd_access"] = res[counter].to_dict()["hits"]["total"]["value"]
item["qtd_my_access"] = res[counter + 1].to_dict()["hits"]["total"][
"value"
]
counter = counter + 2
else:
item["qtd_access"] = 0
item["qtd_my_access"] = 0
data.append(item)
return data
def getAccessedTagsPeriod(subject, user, data_ini="", data_end=""):
tags = Tag.objects.filter(resource_tags__topic__subject=subject).distinct().all()
if data_ini == "":
data_ini = "now-30d"
if data_end == "":
data_end = "now"
data = []
searchs = []
for tag in tags:
if not tag.name == "":
resources = Resource.objects.filter(tags__id=tag.id, topic__subject=subject)
if resources.count() > 0:
searchs.append(count_logs_period(resources, data_ini, data_end))
searchs.append(
count_logs_period(resources, data_ini, data_end, user.id)
)
tag.access = 1
else:
tag.access = 0
if len(searchs) > 0:
res = multi_search(searchs)
counter = 0
for tag in tags:
if not tag.name == "":
item = {}
item["tag_name"] = tag.name
item["details_url"] = reverse(
"dashboards:tag_accessess_period",
args=(tag.id, subject.slug, user.email, data_ini, data_end),
kwargs={},
)
if tag.access == 1:
item["qtd_access"] = res[counter].to_dict()["hits"]["total"]["value"]
item["qtd_my_access"] = res[counter + 1].to_dict()["hits"]["total"][
"value"
]
counter = counter + 2
else:
item["qtd_access"] = 0
item["qtd_my_access"] = 0
data.append(item)
return data
def getTagAccessess(subject, tag, user):
resources = Resource.objects.filter(tags=tag, topic__subject=subject)
data = []
searchs = []
for resource in resources:
searchs.append(resource_accessess(resource))
searchs.append(resource_accessess(resource, user.id))
if searchs:
res = multi_search(searchs)
counter = 0
for resource in resources:
item = {}
item["resource_name"] = resource.name
item["qtd_access"] = res[counter].to_dict()["hits"]["total"]["value"]
item["qtd_my_access"] = res[counter + 1].to_dict()["hits"]["total"]["value"]
item["access_url"] = resource.access_link()
counter = counter + 2
data.append(item)
return data
def getTagAccessessPeriod(subject, tag, user, data_ini, data_end):
resources = Resource.objects.filter(tags=tag, topic__subject=subject)
if data_ini == "":
data_ini = "now-30d"
if data_end == "":
data_end = "now"
data = []
searchs = []
for resource in resources:
searchs.append(resource_accessess_period(resource, data_ini, data_end))
searchs.append(resource_accessess_period(resource, data_ini, data_end, user.id))
if searchs:
res = multi_search(searchs)
counter = 0
for resource in resources:
item = {}
item["resource_name"] = resource.name
item["qtd_access"] = res[counter].to_dict()["hits"]["total"]["value"]
item["qtd_my_access"] = res[counter + 1].to_dict()["hits"]["total"]["value"]
item["access_url"] = resource.access_link()
counter = counter + 2
data.append(item)
return data
def getOtherIndicators(subject, user):
logs = Log.objects.filter(
datetime__date__gte=timezone.now() - timedelta(days=6),
datetime__date__lte=timezone.now(),
)
data = []
searchs = []
students = subject.students.all()
# First indicator
for student in students:
if student.id != user.id:
searchs.append(count_access_subject(subject.id, student.id))
searchs.append(count_access_subject(subject.id, user.id))
item = {}
if searchs:
res = multi_search(searchs)
accessess = [x.to_dict()["hits"]["total"]["value"] for x in res]
my_access = accessess[-1]
accessess = list(dict.fromkeys(accessess))
accessess.sort()
qtd_results = len(accessess)
if qtd_results > 5:
item["percentil_1"] = accessess[math.floor(qtd_results * 0.25)]
item["percentil_2"] = accessess[math.floor(qtd_results * 0.5)]
item["percentil_3"] = accessess[math.floor(qtd_results * 0.75)]
item["percentil_4"] = accessess[math.floor(qtd_results * 0.9)]
else:
item["percentil_1"] = accessess[-5] if len(accessess) == 5 else 0
item["percentil_2"] = accessess[-4] if len(accessess) > 3 else 0
item["percentil_3"] = accessess[-3] if len(accessess) > 2 else 0
item["percentil_4"] = accessess[-2] if len(accessess) > 1 else 0
item["max_access"] = accessess[-1]
item["my_access"] = my_access
else:
item["percentil_1"] = 0
item["percentil_2"] = 0
item["percentil_3"] = 0
item["percentil_4"] = 0
item["max_access"] = 0
item["my_access"] = 0
data.append(item)
searchs = []
# Second indicator
for student in students:
if student.id != user.id:
searchs.append(count_diff_days(subject.id, student.id))
searchs.append(count_diff_days(subject.id, user.id))
item = {}
if searchs:
res = multi_search(searchs)
accessess = [
len(x.to_dict()["aggregations"]["dt"]["buckets"])
if "aggregations" in x.to_dict()
else 0
for x in res
]
my_access = accessess[-1]
accessess = list(dict.fromkeys(accessess))
accessess.sort()
qtd_results = len(accessess)
if qtd_results > 5:
item["percentil_1"] = accessess[math.floor(qtd_results * 0.25)]
item["percentil_2"] = accessess[math.floor(qtd_results * 0.5)]
item["percentil_3"] = accessess[math.floor(qtd_results * 0.75)]
item["percentil_4"] = accessess[math.floor(qtd_results * 0.9)]
else:
item["percentil_1"] = accessess[-5] if len(accessess) == 5 else 0
item["percentil_2"] = accessess[-4] if len(accessess) > 3 else 0
item["percentil_3"] = accessess[-3] if len(accessess) > 2 else 0
item["percentil_4"] = accessess[-2] if len(accessess) > 1 else 0
item["max_access"] = accessess[-1]
item["my_access"] = my_access
else:
item["percentil_1"] = 0
item["percentil_2"] = 0
item["percentil_3"] = 0
item["percentil_4"] = 0
item["max_access"] = 0
item["my_access"] = 0
data.append(item)
searchs = []
# Third indicator
for student in students:
if student.id != user.id:
searchs.append(count_access_resources(subject.id, student.id))
searchs.append(count_access_resources(subject.id, user.id))
item = {}
if searchs:
res = multi_search(searchs)
accessess = [x.to_dict()["hits"]["total"]["value"] for x in res]
my_access = accessess[-1]
accessess = list(dict.fromkeys(accessess))
accessess.sort()
qtd_results = len(accessess)
if qtd_results > 5:
item["percentil_1"] = accessess[math.floor(qtd_results * 0.25)]
item["percentil_2"] = accessess[math.floor(qtd_results * 0.5)]
item["percentil_3"] = accessess[math.floor(qtd_results * 0.75)]
item["percentil_4"] = accessess[math.floor(qtd_results * 0.9)]
else:
item["percentil_1"] = accessess[-5] if len(accessess) == 5 else 0
item["percentil_2"] = accessess[-4] if len(accessess) > 3 else 0
item["percentil_3"] = accessess[-3] if len(accessess) > 2 else 0
item["percentil_4"] = accessess[-2] if len(accessess) > 1 else 0
item["max_access"] = accessess[-1]
item["my_access"] = my_access
else:
item["percentil_1"] = 0
item["percentil_2"] = 0
item["percentil_3"] = 0
item["percentil_4"] = 0
item["max_access"] = 0
item["my_access"] = 0
data.append(item)
# Fourth indicator
resources_access = logs.filter(
component="resources",
action="view",
context__contains={"subject_id": subject.id},
)
s = [student.id for student in students]
accessess = []
students_sets = {key: set() for key in s}
for entry in resources_access.filter(user_id__in=s).all():
resource_name = "goals" if entry.resource == "my_goals" else entry.resource
students_sets[entry.user_id].add(entry.context["%s_id" % (resource_name)])
students_accessess = [len(students_sets[x]) for x in students_sets]
students_accessess.sort()
item = {}
if students_accessess:
my_access = set()
for entry in resources_access.filter(user_id=user.id).all():
resource_name = "goals" if entry.resource == "my_goals" else entry.resource
my_access.add(entry.context["%s_id" % (resource_name)])
qtd_results = len(students_accessess)
if qtd_results > 5:
item["percentil_1"] = students_accessess[math.floor(qtd_results * 0.25)]
item["percentil_2"] = students_accessess[math.floor(qtd_results * 0.5)]
item["percentil_3"] = students_accessess[math.floor(qtd_results * 0.75)]
item["percentil_4"] = students_accessess[math.floor(qtd_results * 0.9)]
else:
item["percentil_1"] = (
students_accessess[-5] if len(students_accessess) == 5 else 0
)
item["percentil_2"] = (
students_accessess[-4] if len(students_accessess) > 3 else 0
)
item["percentil_3"] = (
students_accessess[-3] if len(students_accessess) > 2 else 0
)
item["percentil_4"] = (
students_accessess[-2] if len(students_accessess) > 1 else 0
)
item["max_access"] = students_accessess[-1]
item["my_access"] = len(my_access)
else:
item["percentil_1"] = 0
item["percentil_2"] = 0
item["percentil_3"] = 0
item["percentil_4"] = 0
item["max_access"] = 0
item["my_access"] = 0
data.append(item)
# Fifth indicator
pend = Pendencies.objects.filter(
resource__topic__subject=subject.id,
resource__visible=True,
begin_date__date__lt=timezone.now(),
end_date__date__gte=timezone.now() - timedelta(days=6),
)
accessess = []
item = {}
if pend.count() > 0:
for student in students:
if student.id != user.id:
accessess.append(
PendencyDone.objects.filter(
pendency__id__in=pend.values_list("id", flat=True),
late=False,
student=student,
).count()
)
accessess.append(
PendencyDone.objects.filter(
pendency__id__in=pend.values_list("id", flat=True),
late=False,
student=user,
).count()
)
if accessess:
my_access = accessess[-1]
accessess = list(dict.fromkeys(accessess))
accessess.sort()
qtd_results = len(accessess)
if qtd_results > 5:
item["percentil_1"] = accessess[math.floor(qtd_results * 0.25)]
item["percentil_2"] = accessess[math.floor(qtd_results * 0.5)]
item["percentil_3"] = accessess[math.floor(qtd_results * 0.75)]
item["percentil_4"] = accessess[math.floor(qtd_results * 0.9)]
else:
item["percentil_1"] = accessess[-5] if len(accessess) == 5 else 0
item["percentil_2"] = accessess[-4] if len(accessess) > 3 else 0
item["percentil_3"] = accessess[-3] if len(accessess) > 2 else 0
item["percentil_4"] = accessess[-2] if len(accessess) > 1 else 0
item["max_access"] = accessess[-1]
item["my_access"] = my_access
else:
item["percentil_1"] = 0
item["percentil_2"] = 0
item["percentil_3"] = 0
item["percentil_4"] = 0
item["max_access"] = 0
item["my_access"] = 0
data.append(item)
return data
def accessResourceCount(subject, dataIni, dataEnd):
resources = Resource.objects.filter(topic__subject=subject)
if dataIni == "":
dataIni = "now-30d"
if dataEnd == "":
dataEnd = "now"
data = []
searchs = []
searchs = []
for resource in resources:
searchs.append(resource_accessess_period(resource, dataIni, dataEnd))
if searchs:
res = multi_search(searchs)
counter = 0
for resource in resources:
item = {}
item["resource_name"] = resource.name
item["qtd_access"] = res[counter].to_dict()["hits"]["total"]["value"]
item["access_url"] = resource.access_link()
counter = counter + 1
data.append(item)
data.sort(key=lambda x: x["qtd_access"], reverse=True)
return data
def studentsAccess(subject, dataIni, dataEnd):
students = subject.students.all()
if dataIni == "":
dataIni = "now-30d"
if dataEnd == "":
dataEnd = "now"
data = []
searchs = []
for student in students:
searchs.append(
count_access_subject_period(subject.id, student.id, dataIni, dataEnd)
)
if searchs:
res = multi_search(searchs)
accessess = [x.to_dict()["hits"]["total"]["value"] for x in res]
for i, access in enumerate(accessess):
item = {}
obj = students[i]
item["count"] = access
item["image"] = obj.image_url
item["user"] = str(obj)
item["user_id"] = obj.id
item["link"] = reverse(
"dashboards:view_subject_student",
args=(),
kwargs={"slug": subject.slug, "email": obj.email},
)
data.append(item)
data.sort(key=lambda x: x["count"], reverse=True)
return data
def parse_date(date_str):
"""Parse date from string by DATE_INPUT_FORMATS of current language"""
for item in get_format("DATE_INPUT_FORMATS"):
try:
return datetime.strptime(date_str, item).date()
except (ValueError, TypeError):
continue
return None
def get_days_in_period(data_ini, data_end):
c = calendar.Calendar()
days_set = set()
dates_start = c.itermonthdates(data_ini.year, data_ini.month)
for day in dates_start:
if data_ini <= day <= data_end:
days_set.add(day)
months_btw = data_end.month - data_ini.month
year_btw = data_ini.year
if months_btw < 0:
months_btw = months_btw * (-1)
month_b = data_ini.month
for i in range(0, months_btw):
month_b = data_ini.month + i
if month_b > 12:
month_b = 1
year_btw = year_btw + 1
dates_btw = c.itermonthdates(year_btw, month_b)
for day in dates_btw:
if data_ini <= day <= data_end:
days_set.add(day)
dates_end = c.itermonthdates(data_end.year, data_end.month)
for day in dates_end:
if data_ini <= day <= data_end:
days_set.add(day)
return days_set
def monthly_users_activity(subject, data_ini, data_end):
period = get_days_in_period(data_ini, data_end)
students = subject.students.all().values_list("id", flat=True)
data = list()
searchs = []
days = []
for day in period:
searchs.append(count_daily_access(subject.id, list(students), day))
days.append(day)
if searchs:
res = multi_search(searchs)
accessess = [x.to_dict()["hits"] for x in res]
users = set()
dates_set = set()
for access in accessess:
for hits in access["hits"]:
log = hits["_source"]
accessDate = parse_datetime(log["datetime"])
dates_set.add(accessDate.date())
utuple = (
str(accessDate.day)
+ "-"
+ str(accessDate.month)
+ "-"
+ str(accessDate.year),
log["user_id"],
)
if not utuple in users:
users.add(utuple)
data.append(
{
"year": accessDate.year,
"month": accessDate.month - 1,
"day": accessDate.day,
"hour": accessDate.hour,
"user_id": log["user_id"],
"value": 1,
"count": 1,
}
)
for day in period:
if not day in dates_set:
dates_set.add(day)
data.append(
{
"year": day.year,
"month": day.month - 1,
"day": day.day,
"hour": 0,
"user_id": 0,
"value": 0,
"count": 0,
}
)
data = sorted(data, key=lambda x: (x["month"], x["day"]))
return data
def general_monthly_users_activity(data_ini, data_end, category = 0):
period = get_days_in_period(data_ini, data_end)
if category > 0:
usersList = (
User.objects.filter(
(Cond(subject_student__isnull=False) & Cond(subject_student__category__id=category))
| (Cond(professors__isnull=False) & Cond(professors__category__id=category))
| (Cond(coordinators__isnull=False) & Cond(coordinators__id=category))
)
.distinct()
)
else:
usersList = (
User.objects.filter(
Cond(subject_student__isnull=False)
| Cond(professors__isnull=False)
| Cond(coordinators__isnull=False)
)
.distinct()
)
data = list()
searchs = []
days = []
period = sorted(period)
for day in period:
searchs.append(
count_general_daily_access(
list(usersList.values_list("id", flat=True)), day
)
)
days.append(day)
if searchs:
res = multi_search(searchs)
studentsList = list(
usersList.filter(subject_student__isnull=False)
.distinct()
.values_list("id", flat=True)
)
teachersList = list(
usersList.filter(professors__isnull=False)
.distinct()
.values_list("id", flat=True)
)
coordinatorsList = list(
usersList.filter(coordinators__isnull=False)
.distinct()
.values_list("id", flat=True)
)
accessess = [x.to_dict()["hits"] for x in res]
users = set()
dates_set = set()
for i, access in enumerate(accessess):
for hits in access["hits"]:
log = hits["_source"]
accessDate = timezone.localtime(parse_datetime(log["datetime"]))
dates_set.add(accessDate.date())
utuple = (
str(accessDate.day)
+ "-"
+ str(accessDate.month)
+ "-"
+ str(accessDate.year),
log["user_id"],
)
if not utuple in users:
users.add(utuple)
if log["user_id"] in studentsList:
if log["user_id"] not in teachersList:
data.append(
{
"year": accessDate.year,
"month": accessDate.month - 1,
"day": accessDate.day,
"hour": accessDate.hour,
"user_id": log["user_id"],
"value": 1,
"count": 1,
"teacher": 0,
}
)
if log["user_id"] in teachersList:
data.append(
{
"year": accessDate.year,
"month": accessDate.month - 1,
"day": accessDate.day,
"hour": accessDate.hour,
"user_id": log["user_id"],
"value": 1,
"count": 1,
"teacher": 1,
}
)
elif log["user_id"] in coordinatorsList:
data.append(
{
"year": accessDate.year,
"month": accessDate.month - 1,
"day": accessDate.day,
"hour": accessDate.hour,
"user_id": log["user_id"],
"value": 1,
"count": 1,
"teacher": 2,
}
)
for day in period:
if not day in dates_set:
dates_set.add(day)
data.append(
{
"year": day.year,
"month": day.month - 1,
"day": day.day,
"hour": 0,
"user_id": 0,
"value": 0,
"count": 0,
"teacher": 0,
}
)
data.append(
{
"year": day.year,
"month": day.month - 1,
"day": day.day,
"hour": 0,
"user_id": 0,
"value": 0,
"count": 0,
"teacher": 1,
}
)
data = sorted(data, key=lambda x: (x["month"], x["day"]))
return data
def my_categories(user):
my_categories = []
categories = Category.objects.filter()
for category in categories:
if has_category_permissions(user, category):
my_categories.append(category)
return my_categories
def generalUsersAccess(dataIni, dataEnd, category = 0):
data = []
if category > 0:
usersList = (
User.objects.filter(
(Cond(subject_student__isnull=False) & Cond(subject_student__category__id=category))
| (Cond(professors__isnull=False) & Cond(professors__category__id=category))
| (Cond(coordinators__isnull=False) & Cond(coordinators__id=category))
)
.distinct()
)
else:
usersList = (
User.objects.filter(
Cond(subject_student__isnull=False)
| Cond(professors__isnull=False)
| Cond(coordinators__isnull=False)
)
.distinct()
)
searchs = []
userAccess = []
for user in usersList:
searchs.append(count_user_interactions(user.id, dataIni, dataEnd))
userAccess.append(user_last_interaction(user.id))
if searchs:
res = multi_search(searchs)
accessess = [x.to_dict()["hits"] for x in res]
userAccessRes = None
if userAccess:
userAccessRes = multi_search(userAccess)
for i, user in enumerate(usersList):
interactions = accessess[i]["total"]["value"]
item = {}
if user.coordinators.count() > 0:
item["teacher"] = 1
elif user.professors.count() > 0:
item["teacher"] = 0
else:
item["teacher"] = 2
item["count"] = interactions
item["image"] = user.image_url
item["user"] = user.fullname()
item["user_id"] = user.id
item["link_profile"] = reverse(
"chat:profile", args=(), kwargs={"email": user.email},
)
item["link_chat"] = reverse(
"chat:talk", args=(), kwargs={"email": user.email},
)
item["status"], item["status_text"] = userStatus(user, userAccessRes)
data.append(item)
data.sort(key=lambda x: x["count"], reverse=True)
return data
def userStatus(user, lastInteractions):
expire_time = settings.SESSION_SECURITY_EXPIRE_AFTER
status = "inactive"
status_text = _("Offline")
if not lastInteractions is None:
lastEntry = next(
(
item
for item in lastInteractions
if len(item.hits) > 0 and item.hits[0].user_id == user.id
),
None,
)
if not lastEntry is None:
timeDelta = datetime.now() - datetime.strptime(
lastEntry.hits[0].datetime[:-6], "%Y-%m-%dT%H:%M:%S.%f"
)
if (
lastEntry.hits[0].action != "logout"
and timeDelta.total_seconds() < expire_time
):
status = "active"
status_text = _("Online")
return status, status_text
def general_logs(user, data_ini, data_end, category = 0):
period = get_days_in_period(data_ini, data_end)
if category > 0:
usersList = (
User.objects.filter(
(Cond(subject_student__isnull=False) & Cond(subject_student__category__id=category))
| (Cond(professors__isnull=False) & Cond(professors__category__id=category))
| (Cond(coordinators__isnull=False) & Cond(coordinators__id=category))
)
.distinct()
.values_list("id", flat=True)
)
else:
usersList = (
User.objects.filter(
Cond(subject_student__isnull=False)
| Cond(professors__isnull=False)
| Cond(coordinators__isnull=False)
)
.distinct()
.values_list("id", flat=True)
)
period = sorted(period)
usersList = list(usersList)
data = list()
searchs = []
days = []
for day in period:
datetime = date_to_datetime(day)
searchs.append(count_logs_in_day(usersList, str(datetime).split()[0]))
days.append(day)
minimun = math.inf
maximun = 0
total = 0
if searchs:
res = multi_search(searchs)
accessess = [x.to_dict()["hits"] for x in res]
users = set()
dates_set = set()
period = list(dict.fromkeys(period))
for i, access in enumerate(accessess):
value = access["total"]["value"]
time = period[i].strftime("%d/%m/%Y")
data.append([time, value])
minimun = min(minimun, value)
maximun = max(maximun, value)
total += value
return data, minimun, maximun, total
def active_users_qty(request_user, data_ini, data_end, category = 0):
logs = list()
cont = 0
if category > 0:
studentsList = User.objects.filter(subject_student__isnull=False, subject_student__category__id=category).distinct()
teachersList = User.objects.filter(professors__isnull=False, professors__category__id=category).distinct()
else:
studentsList = User.objects.filter(subject_student__isnull=False).distinct()
teachersList = User.objects.filter(professors__isnull=False).distinct()
totalStudents = studentsList.count()
totalTeachers = teachersList.count()
activeStudents = 0
activeTeachers = 0
searchs = []
for student in studentsList:
searchs.append(user_last_interaction_in_period(student.id, data_ini, data_end))
for teacher in teachersList:
searchs.append(user_last_interaction_in_period(teacher.id, data_ini, data_end))
if searchs:
res = multi_search(searchs)
accessess = [x.to_dict()["hits"] for x in res]
lastIndex = 0
for i in range(0, totalStudents):
entry = accessess[i]
if entry:
interactions = entry["total"]["value"]
if interactions > 0:
activeStudents += 1
lastIndex = i
if lastIndex > 0:
lastIndex += 1
for i in range(0, totalTeachers):
entry = accessess[i + lastIndex]
if entry:
interactions = entry["total"]["value"]
if interactions > 0:
activeTeachers += 1
data = {
"total_students": totalStudents,
"active_students": activeStudents,
"total_teachers": totalTeachers,
"active_teachers": activeTeachers,
}
return data
def functiontable(dataIni, dataEnd, categoryId = 0):
data = {}
categories_data = []
subjects_data = []
resources_data = []
searchs = []
categories = []
subjects = []
if categoryId > 0:
usersList = (
User.objects.filter(
(Cond(subject_student__isnull=False) & Cond(subject_student__category__id=categoryId))
| (Cond(professors__isnull=False) & Cond(professors__category__id=categoryId))
| (Cond(coordinators__isnull=False) & Cond(coordinators__id=categoryId))
)
.distinct()
.values_list("id", flat=True)
)
subjects = (
Subject.objects.filter(
visible=True, category__id=categoryId
)
.order_by("slug")
.distinct()
)
else:
usersList = (
User.objects.filter(
Cond(subject_student__isnull=False)
| Cond(professors__isnull=False)
| Cond(coordinators__isnull=False)
)
.distinct()
.values_list("id", flat=True)
)
categories = Category.objects.filter(visible=True).order_by("slug").distinct()
subjects = (
Subject.objects.filter(
visible=True, category__id__in=categories.values_list("id", flat=True)
)
.order_by("slug")
.distinct()
)
usersList = list(usersList)
resources = (
Resource.objects.filter(
visible=True, topic__subject__id__in=subjects.values_list("id", flat=True)
)
.order_by("slug")
.distinct()
)
for category in categories:
searchs.append(
count_categories_logs_period(category, usersList, dataIni, dataEnd)
)
for subject in subjects:
searchs.append(count_subject_logs_period(subject, usersList, dataIni, dataEnd))
for resource in resources:
searchs.append(
count_resources_logs_period(resource, usersList, dataIni, dataEnd)
)
if searchs:
res = multi_search(searchs)
accessess = [x.to_dict()["hits"] for x in res]
lastIndex = 0
for i, category in enumerate(categories):
total = accessess[i]["total"]["value"]
categories_data.append(
{
"cat_name": category.name,
"access": total,
"link": reverse(
"subjects:cat_view", args=(), kwargs={"slug": category.slug}
),
}
)
lastIndex = i
if lastIndex > 0:
lastIndex += 1
subjectsLastIndex = lastIndex
for i, subject in enumerate(subjects):
total = accessess[lastIndex + i]["total"]["value"]
subjects_data.append(
{
"name": subject.name,
"access": total,
"category": subject.category.name,
"link": reverse(
"subjects:view", args=(), kwargs={"slug": subject.slug}
),
}
)
subjectsLastIndex = i + lastIndex
if subjectsLastIndex > 0:
lastIndex = subjectsLastIndex + 1
else:
lastIndex = subjectsLastIndex
dataResources = {}
names_resources = {
"pdffile": "Arquivo PDF",
"bulletin": "Boletim do Tópico",
"ytvideo": "Vídeo do YouTube",
"filelink": "Link para arquivo",
"link": "Link para Site",
"goals": "Metas do Tópico",
"webpage": "Página Web",
"questionary": "Questionário",
"webconference": "Web conferência",
"my_goals": "Metas do Tópico",
"h5p": "H5P",
}
for i, resource in enumerate(resources):
total = accessess[lastIndex + i]["total"]["value"]
if resource._my_subclass in dataResources:
dataResources[resource._my_subclass] += total
else:
dataResources[resource._my_subclass] = total
"""resources_data.append(
{
"name": resource.name,
"access": total,
"link": resource.access_link(),
}
)"""
for dataRes in dataResources:
resources_data.append(
{
"name": names_resources[dataRes],
"access": dataResources[dataRes],
"link": "#",
}
)
data = {
"categories": categories_data,
"subjects": subjects_data,
"resources": resources_data,
}
return data
def date_to_datetime(dt: date, hour=0, minute=0, second=0) -> datetime:
return datetime(dt.year, dt.month, dt.day, hour, minute, second)
def xml_users(request_user, data_ini, data_end, category = 0):
"""
categories = my_categories(request_user)
subjects = (
Subject.objects.filter(category__in=categories).order_by("slug").distinct()
)
"""
if category > 0:
studentsList = User.objects.filter(subject_student__isnull=False, subject_student__category__id=category).order_by("username", "last_name").distinct()
teachersList = User.objects.filter(professors__isnull=False, professors__category__id=category).order_by("username", "last_name").distinct()
else:
studentsList = User.objects.filter(subject_student__isnull=False).order_by("username", "last_name").distinct()
teachersList = User.objects.filter(professors__isnull=False).order_by("username", "last_name").distinct()
interactionQuery = []
for teacher in teachersList:
interactionQuery.append(teachers_xls(teacher.id, data_ini, data_end))
for student in studentsList:
interactionQuery.append(students_xls(student.id, data_ini, data_end))
workbook = xlwt.Workbook()
activeTeachersWorksheet = workbook.add_sheet(u"Professores Ativos")
activeTeachersWorksheet.write(0, 0, u"Professor")
activeTeachersWorksheet.write(0, 1, u"E-mail do Professor")
activeTeachersWorksheet.write(0, 2, u"Número de assuntos em que é professor")
activeTeachersWorksheet.write(0, 3, u"Número de assuntos em que é professor e que teve registros no período")
activeTeachersWorksheet.write(0, 4, u"Número de assuntos em que é professor e que não teve registros no período")
activeTeachersWorksheet.write(0, 5, u"Nome dos assuntos em que é professor e tem registros no período")
activeTeachersWorksheet.write(0, 6, u"Nome dos assuntos em que é professor e que não tem registros no período")
activeTeachersWorksheet.write(0, 7, u"Número de mensagens em mural")
activeTeachersWorksheet.write(0, 8, u"Número de mensagens diretas enviadas no período")
activeTeachersWorksheet.write(0, 9, u"Número de recursos criados no período")
activeTeachersWorksheet.write(0, 10, u"Número de registros no período")
activeTeachersWorksheet.write(0, 11, u"Categoria")
inactiveTeachersWorksheet = workbook.add_sheet(u"Professores Inativos")
inactiveTeachersWorksheet.write(0, 0, u"Professor")
inactiveTeachersWorksheet.write(0, 1, u"E-mail do Professor")
inactiveTeachersWorksheet.write(0, 2, u"Nome dos assuntos em que é professor")
inactiveTeachersWorksheet.write(0, 3, u"Categoria")
activeStudentsWorksheet = workbook.add_sheet(u"Estudantes Ativos")
activeStudentsWorksheet.write(0, 0, u"Estudante")
activeStudentsWorksheet.write(0, 1, u"E-mail do Estudante")
activeStudentsWorksheet.write(0, 2, u"Número de assuntos em que é estudante")
activeStudentsWorksheet.write(0, 3, u"Número de assuntos em que é estudante e que teve registros no período")
activeStudentsWorksheet.write(0, 4, u"Número de assuntos em que é estudante e que não teve registros no período")
activeStudentsWorksheet.write(0, 5, u"Nome dos assuntos em que é estudante e tem registros no período")
activeStudentsWorksheet.write(0, 6, u"Nome dos assuntos em que é estudante e que não tem registros no período")
activeStudentsWorksheet.write(0, 7, u"Número de tarefas para o período")
activeStudentsWorksheet.write(0, 8, u"Número de tarefas para o período que foi realizada pelo estudante")
activeStudentsWorksheet.write(0, 9, u"Número de tarefas para o período que não foram realizadas (pendentes) pelo estudante")
activeStudentsWorksheet.write(0, 10, u"Categoria")
inactiveStudentsWorksheet = workbook.add_sheet(u"Estudantes Inativos")
inactiveStudentsWorksheet.write(0, 0, u"Estudante")
inactiveStudentsWorksheet.write(0, 1, u"E-mail do Estudante")
inactiveStudentsWorksheet.write(0, 2, u"Nome dos assuntos em que é estudante")
inactiveStudentsWorksheet.write(0, 3, u"Categoria")
if interactionQuery:
res = multi_search(interactionQuery)
activesLine = 1
inactivesLine = 1
for i, teacher in enumerate(teachersList):
if i < len(res):
entry = res[i].to_dict()
access = entry["hits"]["total"]["value"]
teacherSubjects = teacher.professors.all()
categoriesNames = []
for s in teacherSubjects:
if not s.category.name in categoriesNames:
categoriesNames.append(s.category.name)
teacherSubjectsIds = teacherSubjects.values_list("id", flat=True)
if access > 0:
activeSubjects = []
for x in entry["aggregations"]["subjects"]["buckets"]:
if x["key"] in teacherSubjectsIds:
activeSubjects.append(x["key"])
muralMessages = sum([x["doc_count"] for x in entry["aggregations"]["mural"]["buckets"][0]["action"]["buckets"]]) if len(entry["aggregations"]["mural"]["buckets"]) > 0 else 0
messagesSent = entry["aggregations"]["messages"]["buckets"][0]["action"]["buckets"][0]["doc_count"] if len(entry["aggregations"]["messages"]["buckets"]) > 0 and len(entry["aggregations"]["messages"]["buckets"][0]["action"]["buckets"]) > 0 else 0
resourcesCreated = entry["aggregations"]["resources"]["buckets"][0]["action"]["buckets"][0]["doc_count"] if len(entry["aggregations"]["resources"]["buckets"]) > 0 and len(entry["aggregations"]["resources"]["buckets"][0]["action"]["buckets"]) > 0 else 0
activeSubjectsName = []
inactiveSubjectsName = []
for s in teacherSubjects:
if s.id in activeSubjects:
activeSubjectsName.append(s.name)
else:
inactiveSubjectsName.append(s.name)
activeTeachersWorksheet.write(activesLine, 0, teacher.fullname())
activeTeachersWorksheet.write(activesLine, 1, teacher.email)
activeTeachersWorksheet.write(
activesLine,
2,
len(teacherSubjects))
activeTeachersWorksheet.write(activesLine, 3, len(activeSubjects))
activeTeachersWorksheet.write(activesLine, 4, len(teacherSubjects) - len(activeSubjects))
activeTeachersWorksheet.write(activesLine, 5, ", ".join(activeSubjectsName))
activeTeachersWorksheet.write(activesLine, 6, ", ".join(inactiveSubjectsName))
activeTeachersWorksheet.write(activesLine, 7, muralMessages)
activeTeachersWorksheet.write(activesLine, 8, messagesSent)
activeTeachersWorksheet.write(activesLine, 9, resourcesCreated)
activeTeachersWorksheet.write(activesLine, 10, access)
activeTeachersWorksheet.write(activesLine, 11, ", ".join(categoriesNames))
activesLine += 1
else:
inactiveTeachersWorksheet.write(
inactivesLine, 0, teacher.fullname()
)
inactiveTeachersWorksheet.write(
inactivesLine, 1, teacher.email
)
inactiveTeachersWorksheet.write(
inactivesLine,
2,
", ".join(str(sub.name) for sub in teacherSubjects),
)
inactiveTeachersWorksheet.write(
inactivesLine,
3,
", ".join(categoriesNames),
)
inactivesLine += 1
index = len(teachersList)
activesLine = 1
inactivesLine = 1
for i, student in enumerate(studentsList):
if index + i < len(res):
entry = res[index + i].to_dict()
access = entry["hits"]["total"]["value"]
studentSubjects = student.subject_student.distinct().all()
categoriesNames = []
for s in studentSubjects:
if not s.category.name in categoriesNames:
categoriesNames.append(s.category.name)
studentSubjectsIds = studentSubjects.values_list("id", flat=True)
if access > 0:
activeSubjects = []
for x in entry["aggregations"]["subjects"]["buckets"]:
if x["key"] in studentSubjectsIds:
activeSubjects.append(x["key"])
activeSubjectsName = []
inactiveSubjectsName = []
for s in studentSubjects:
if s.id in activeSubjects:
activeSubjectsName.append(s.name)
else:
inactiveSubjectsName.append(s.name)
tasksInPeriod = Pendencies.objects.filter(resource__topic__subject__in=studentSubjectsIds, end_date__range=(data_ini, data_end)).distinct()
tasksInPeriodIds = list(tasksInPeriod.values_list("id", flat=True))
tasksInPeriodCount = tasksInPeriod.count()
tasksDoneCount = PendencyDone.objects.filter(pendency__id__in=tasksInPeriodIds, student__id=student.id).count()
activeStudentsWorksheet.write(activesLine, 0, student.fullname())
activeStudentsWorksheet.write(activesLine, 1, student.email)
activeStudentsWorksheet.write(
activesLine,
2,
len(studentSubjects))
activeStudentsWorksheet.write(activesLine, 3, len(activeSubjects))
activeStudentsWorksheet.write(activesLine, 4, len(studentSubjects) - len(activeSubjects))
activeStudentsWorksheet.write(activesLine, 5, ", ".join(activeSubjectsName))
activeStudentsWorksheet.write(activesLine, 6, ", ".join(inactiveSubjectsName))
activeStudentsWorksheet.write(activesLine, 7, tasksInPeriodCount)
activeStudentsWorksheet.write(activesLine, 8, tasksDoneCount)
activeStudentsWorksheet.write(activesLine, 9, tasksInPeriodCount - tasksDoneCount)
activeStudentsWorksheet.write(activesLine, 10, ", ".join(categoriesNames))
activesLine += 1
else:
inactiveStudentsWorksheet.write(
inactivesLine, 0, student.fullname()
)
inactiveStudentsWorksheet.write(
inactivesLine, 1, student.email
)
inactiveStudentsWorksheet.write(
inactivesLine,
2,
", ".join(str(sub.name) for sub in studentSubjects),
)
inactiveStudentsWorksheet.write(
inactivesLine,
3,
", ".join(categoriesNames),
)
inactivesLine += 1
path1 = os.path.join(settings.BASE_DIR, "dashboards")
path2 = os.path.join(path1, "sheets")
path3 = os.path.join(path2, "xls")
filename = "dashboard_reports.xls"
folder_path = os.path.join(path3, filename)
if not os.path.isdir(path3):
os.makedirs(path3)
workbook.save(folder_path)
filepath = os.path.join(
"dashboards", os.path.join("sheets", os.path.join("xls", filename))
)
if not os.path.exists(filepath):
raise Http404()
response = HttpResponse(open(filepath, "rb").read())
response["Content-Type"] = "application/force-download"
response["Pragma"] = "public"
response["Expires"] = "0"
response["Cache-Control"] = "must-revalidate, post-check=0, pre-check=0"
response["Content-Disposition"] = "attachment; filename=%s" % (filename)
response["Content-Transfer-Encoding"] = "binary"
response["Content-Length"] = str(os.path.getsize(filepath))
return response
"""
def load_logs():
all_logs = Log.objects.all()
categories = Category.objects.all()
subjects = Subject.objects.all()
teachersList = []
studentsList = []
coordinator_list = []
admins = User.objects.filter(is_staff=True)
admin_list = []
teacher_subjects = {}
student_subjects = {}
for subject in subjects:
for (
professor
) in subject.professor.all(): ## List all subjects for teachers as a dict
if professor.id not in teachersList:
teachersList.append(professor.id)
if professor.id not in teacher_subjects.keys():
teacher_subjects[professor.id] = [subject]
else:
if subject not in teacher_subjects[professor.id]:
teacher_subjects[professor.id].append(subject)
for (
student
) in subject.students.all(): ## List all subjects for students as a dict
if student.id not in studentsList:
studentsList.append(student.id)
if student.id not in student_subjects.keys():
student_subjects[student.id] = [subject]
else:
if subject not in student_subjects[student.id]:
student_subjects[student.id].append(subject)
for category in categories: ## List all coordinators
for coordenador in category.coordinators.all():
if coordenador.id not in coordinator_list:
coordinator_list.append(coordenador.id)
for admin in admins: ## List all admins
if admin.id not in admin_list:
admin_list.append(admin.id)
objs = list()
for logs in all_logs:
is_admin = bool()
is_teacher = bool()
is_student = bool()
is_coordinator = bool()
if logs.user_id in admin_list:
is_admin = True
else:
is_admin = False
if logs.user_id in teachersList:
is_teacher = True
else:
is_teacher = False
if logs.user_id in studentsList:
is_student = True
else:
is_student = False
if logs.user_id in coordinator_list:
is_coordinator = True
else:
is_coordinator = False
log = Log_Consultas()
log.user = logs.user
log.user_id = logs.user_id
log.user_email = logs.user_email
log.component = logs.component
log.context = logs.context
log.action = logs.action
log.resource = logs.resource
log.datetime = logs.datetime
log.is_admin = is_admin
log.is_teacher = is_teacher
log.is_student = is_student
log.is_coordinator = is_coordinator
if logs.context:
if logs.context != {}:
if "subject_id" in logs.context.keys():
id = logs.context["subject_id"]
try:
sub = Subject.objects.get(id=id)
except:
sub = None
if sub is not None:
log.subject = sub
objs.append(log)
print("Saving...")
log_bulk = Log_Consultas.objects.bulk_create(objs=objs)
print("Saved.")
users = User.objects.all().distinct()
objs = list()
for user in users: ## Add all users as teacher or student or coordinator or admin
is_admin = bool()
is_teacher = bool()
is_student = bool()
is_coordinator = bool()
if user.id in admin_list:
is_admin = True
else:
is_admin = False
if user.id in teachersList:
is_teacher = True
else:
is_teacher = False
if user.id in studentsList:
is_student = True
else:
is_student = False
if user.id in coordinator_list:
is_coordinator = True
else:
is_coordinator = False
log = Log_Consultas()
log.user = user
log.user_id = user.id
log.user_email = user.email
log.is_admin = is_admin
log.is_teacher = is_teacher
log.is_student = is_student
log.is_coordinator = is_coordinator
objs.append(log)
print("Saving2...")
log_bulk = Log_Consultas.objects.bulk_create(objs=objs)
print("Saved.")
logs = Log_Consultas.objects.filter(datetime__isnull=True)
if logs:
for log in logs: ## Add all users and list yours subjects as teacher or student
if log.user_id in teacher_subjects.keys():
for a in teacher_subjects[log.user_id]:
log.teacher_subjects.add(a.id)
if log.user_id in student_subjects.keys():
for a in student_subjects[log.user_id]:
log.student_subjects.add(a.id)
def add_daily_logs():
all_logs = Log.objects.all(
datetime__date__gte=timezone.now() - timedelta(days=1),
datetime__date__lte=timezone.now(),
)
categories = Category.objects.all()
subjects = Subject.objects.all()
teachersList = []
studentsList = []
coordinator_list = []
admins = User.objects.filter(is_staff=True)
admin_list = []
teacher_subjects = {}
student_subjects = {}
for subject in subjects:
for professor in subject.professor.all():
if professor.id not in teachersList:
teachersList.append(professor.id)
if professor.id not in teacher_subjects.keys():
teacher_subjects[professor.id] = [subject]
else:
if subject not in teacher_subjects[professor.id]:
teacher_subjects[professor.id].append(subject)
for student in subject.students.all():
if student.id not in studentsList:
studentsList.append(student)
if student.id not in student_subjects.keys():
student_subjects[student.id] = [subject]
else:
if subject not in student_subjects[student.id]:
student_subjects[student.id].append(subject)
for category in categories:
for coordenador in category.coordinators.all():
if coordenador.id not in coordinator_list:
coordinator_list.append(coordenador.id)
for admin in admins:
if admin.id not in admin_list:
admin_list.append(admin.id)
objs = list()
for logs in all_logs:
is_admin = bool()
is_teacher = bool()
is_student = bool()
is_coordinator = bool()
if logs.user_id in admin_list:
is_admin = True
else:
is_admin = False
if logs.user_id in teachersList:
is_teacher = True
else:
is_teacher = False
if logs.user_id in studentsList:
is_student = True
else:
is_student = False
if logs.user_id in coordinator_list:
is_coordinator = True
else:
is_coordinator = False
log = Log_Consultas()
log.user = logs.user
log.user_id = logs.user_id
log.user_email = logs.user_email
log.component = logs.component
log.context = logs.context
log.action = logs.action
log.resource = logs.resource
log.datetime = logs.datetime
log.is_admin = is_admin
log.is_teacher = is_teacher
log.is_student = is_student
log.is_coordinator = is_coordinator
if logs.context:
if logs.context != {}:
if "subject_id" in logs.context.keys():
id = logs.context["subject_id"]
try:
sub = Subject.objects.get(id=id)
except:
sub = None
if sub is not None:
log.subject = sub
objs.append(log)
batch_size = 10000
print("Saving...")
while True:
batch = list(islice(objs, batch_size))
if not batch:
break
log_bulk = Log_Consultas.objects.bulk_create(batch, batch_size)
print("Saved.")
users = User.objects.all().distinct()
objs = list()
new_users = list()
for user in users:
logs = (
Log_Consultas.objects.filter(datetime__isnull=True)
.filter(user_id=user.id)
.count()
)
if logs < 1:
new_users.append(user.id)
is_admin = bool()
is_teacher = bool()
is_student = bool()
is_coordinator = bool()
if user.id in admin_list:
is_admin = True
else:
is_admin = False
if user.id in teachersList:
is_teacher = True
else:
is_teacher = False
if user.id in studentsList:
is_student = True
else:
is_student = False
if user.id in coordinator_list:
is_coordinator = True
else:
is_coordinator = False
log = Log_Consultas()
log.user = user
log.user_id = user.id
log.user_email = user.email
log.is_admin = is_admin
log.is_teacher = is_teacher
log.is_student = is_student
log.is_coordinator = is_coordinator
objs.append(log)
print("Saving2...")
while True:
batch = list(islice(objs, batch_size))
if not batch:
break
log_bulk = Log_Consultas.objects.bulk_create(batch, batch_size)
print("Saved.")
logs = Log_Consultas.objects.filter(datetime__isnull=True).filter(
user_id__in=new_users
)
if logs:
for log in logs:
if log.user_id in teacher_subjects.keys():
for a in teacher_subjects[log.user_id]:
log.teacher_subjects.add(a.id)
if log.user_id in student_subjects.keys():
for a in student_subjects[log.user_id]:
log.student_subjects.add(a.id)
""" | gpl-2.0 |
dionescu/gmock | gtest/test/gtest_throw_on_failure_test.py | 2917 | 5766 | #!/usr/bin/env python
#
# Copyright 2009, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Tests Google Test's throw-on-failure mode with exceptions disabled.
This script invokes gtest_throw_on_failure_test_ (a program written with
Google Test) with different environments and command line flags.
"""
__author__ = 'wan@google.com (Zhanyong Wan)'
import os
import gtest_test_utils
# Constants.
# The command line flag for enabling/disabling the throw-on-failure mode.
THROW_ON_FAILURE = 'gtest_throw_on_failure'
# Path to the gtest_throw_on_failure_test_ program, compiled with
# exceptions disabled.
EXE_PATH = gtest_test_utils.GetTestExecutablePath(
'gtest_throw_on_failure_test_')
# Utilities.
def SetEnvVar(env_var, value):
"""Sets an environment variable to a given value; unsets it when the
given value is None.
"""
env_var = env_var.upper()
if value is not None:
os.environ[env_var] = value
elif env_var in os.environ:
del os.environ[env_var]
def Run(command):
"""Runs a command; returns True/False if its exit code is/isn't 0."""
print 'Running "%s". . .' % ' '.join(command)
p = gtest_test_utils.Subprocess(command)
return p.exited and p.exit_code == 0
# The tests. TODO(wan@google.com): refactor the class to share common
# logic with code in gtest_break_on_failure_unittest.py.
class ThrowOnFailureTest(gtest_test_utils.TestCase):
"""Tests the throw-on-failure mode."""
def RunAndVerify(self, env_var_value, flag_value, should_fail):
"""Runs gtest_throw_on_failure_test_ and verifies that it does
(or does not) exit with a non-zero code.
Args:
env_var_value: value of the GTEST_BREAK_ON_FAILURE environment
variable; None if the variable should be unset.
flag_value: value of the --gtest_break_on_failure flag;
None if the flag should not be present.
should_fail: True iff the program is expected to fail.
"""
SetEnvVar(THROW_ON_FAILURE, env_var_value)
if env_var_value is None:
env_var_value_msg = ' is not set'
else:
env_var_value_msg = '=' + env_var_value
if flag_value is None:
flag = ''
elif flag_value == '0':
flag = '--%s=0' % THROW_ON_FAILURE
else:
flag = '--%s' % THROW_ON_FAILURE
command = [EXE_PATH]
if flag:
command.append(flag)
if should_fail:
should_or_not = 'should'
else:
should_or_not = 'should not'
failed = not Run(command)
SetEnvVar(THROW_ON_FAILURE, None)
msg = ('when %s%s, an assertion failure in "%s" %s cause a non-zero '
'exit code.' %
(THROW_ON_FAILURE, env_var_value_msg, ' '.join(command),
should_or_not))
self.assert_(failed == should_fail, msg)
def testDefaultBehavior(self):
"""Tests the behavior of the default mode."""
self.RunAndVerify(env_var_value=None, flag_value=None, should_fail=False)
def testThrowOnFailureEnvVar(self):
"""Tests using the GTEST_THROW_ON_FAILURE environment variable."""
self.RunAndVerify(env_var_value='0',
flag_value=None,
should_fail=False)
self.RunAndVerify(env_var_value='1',
flag_value=None,
should_fail=True)
def testThrowOnFailureFlag(self):
"""Tests using the --gtest_throw_on_failure flag."""
self.RunAndVerify(env_var_value=None,
flag_value='0',
should_fail=False)
self.RunAndVerify(env_var_value=None,
flag_value='1',
should_fail=True)
def testThrowOnFailureFlagOverridesEnvVar(self):
"""Tests that --gtest_throw_on_failure overrides GTEST_THROW_ON_FAILURE."""
self.RunAndVerify(env_var_value='0',
flag_value='0',
should_fail=False)
self.RunAndVerify(env_var_value='0',
flag_value='1',
should_fail=True)
self.RunAndVerify(env_var_value='1',
flag_value='0',
should_fail=False)
self.RunAndVerify(env_var_value='1',
flag_value='1',
should_fail=True)
if __name__ == '__main__':
gtest_test_utils.Main()
| bsd-3-clause |
titusfortner/selenium | py/test/selenium/webdriver/common/form_handling_tests.py | 1 | 8158 | # Licensed to the Software Freedom Conservancy (SFC) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The SFC licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import pytest
from selenium.common.exceptions import NoSuchElementException
from selenium.common.exceptions import WebDriverException
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.support.ui import WebDriverWait
def testShouldClickOnSubmitInputElements(driver, pages):
pages.load("formPage.html")
driver.find_element_by_id("submitButton").click()
WebDriverWait(driver, 3).until(EC.title_is("We Arrive Here"))
def testClickingOnUnclickableElementsDoesNothing(driver, pages):
pages.load("formPage.html")
driver.find_element_by_xpath("//body").click()
def testShouldBeAbleToClickImageButtons(driver, pages):
pages.load("formPage.html")
driver.find_element_by_id("imageButton").click()
WebDriverWait(driver, 3).until(EC.title_is("We Arrive Here"))
def testShouldBeAbleToSubmitForms(driver, pages):
pages.load("formPage.html")
driver.find_element_by_name("login").submit()
WebDriverWait(driver, 3).until(EC.title_is("We Arrive Here"))
def testShouldSubmitAFormWhenAnyInputElementWithinThatFormIsSubmitted(driver, pages):
pages.load("formPage.html")
driver.find_element_by_id("checky").submit()
WebDriverWait(driver, 3).until(EC.title_is("We Arrive Here"))
def testShouldSubmitAFormWhenAnyElementWithinThatFormIsSubmitted(driver, pages):
pages.load("formPage.html")
driver.find_element_by_xpath("//form/p").submit()
WebDriverWait(driver, 5).until(EC.title_is("We Arrive Here"))
def testShouldNotBeAbleToSubmitAFormThatDoesNotExist(driver, pages):
pages.load("formPage.html")
with pytest.raises(NoSuchElementException):
driver.find_element_by_name("there is no spoon").submit()
def testShouldBeAbleToEnterTextIntoATextAreaBySettingItsValue(driver, pages):
pages.load("javascriptPage.html")
textarea = driver.find_element_by_id("keyUpArea")
cheesey = "Brie and cheddar"
textarea.send_keys(cheesey)
assert textarea.get_attribute("value") == cheesey
def testShouldEnterDataIntoFormFields(driver, pages):
pages.load("xhtmlTest.html")
element = driver.find_element_by_xpath("//form[@name='someForm']/input[@id='username']")
originalValue = element.get_attribute("value")
assert originalValue == "change"
element.clear()
element.send_keys("some text")
element = driver.find_element_by_xpath("//form[@name='someForm']/input[@id='username']")
newFormValue = element.get_attribute("value")
assert newFormValue == "some text"
def testShouldBeAbleToSelectACheckBox(driver, pages):
pages.load("formPage.html")
checkbox = driver.find_element_by_id("checky")
assert checkbox.is_selected() is False
checkbox.click()
assert checkbox.is_selected() is True
checkbox.click()
assert checkbox.is_selected() is False
def testShouldToggleTheCheckedStateOfACheckbox(driver, pages):
pages.load("formPage.html")
checkbox = driver.find_element_by_id("checky")
assert checkbox.is_selected() is False
checkbox.click()
assert checkbox.is_selected() is True
checkbox.click()
assert checkbox.is_selected() is False
def testTogglingACheckboxShouldReturnItsCurrentState(driver, pages):
pages.load("formPage.html")
checkbox = driver.find_element_by_id("checky")
assert checkbox.is_selected() is False
checkbox.click()
assert checkbox.is_selected() is True
checkbox.click()
assert checkbox.is_selected() is False
def testShouldBeAbleToSelectARadioButton(driver, pages):
pages.load("formPage.html")
radioButton = driver.find_element_by_id("peas")
assert radioButton.is_selected() is False
radioButton.click()
assert radioButton.is_selected() is True
def testShouldBeAbleToSelectARadioButtonByClickingOnIt(driver, pages):
pages.load("formPage.html")
radioButton = driver.find_element_by_id("peas")
assert radioButton.is_selected() is False
radioButton.click()
assert radioButton.is_selected() is True
def testShouldReturnStateOfRadioButtonsBeforeInteration(driver, pages):
pages.load("formPage.html")
radioButton = driver.find_element_by_id("cheese_and_peas")
assert radioButton.is_selected() is True
radioButton = driver.find_element_by_id("cheese")
assert radioButton.is_selected() is False
# [ExpectedException(typeof(NotImplementedException))]
# def testShouldThrowAnExceptionWhenTogglingTheStateOfARadioButton(driver, pages):
# pages.load("formPage.html")
# radioButton = driver.find_element_by_id("cheese"))
# radioButton.click()
# [IgnoreBrowser(Browser.IE, "IE allows toggling of an option not in a multiselect")]
# [ExpectedException(typeof(NotImplementedException))]
# def testTogglingAnOptionShouldThrowAnExceptionIfTheOptionIsNotInAMultiSelect(driver, pages):
# pages.load("formPage.html")
# select = driver.find_element_by_name("selectomatic"))
# option = select.find_elements_by_tag_name("option"))[0]
# option.click()
def testTogglingAnOptionShouldToggleOptionsInAMultiSelect(driver, pages):
pages.load("formPage.html")
select = driver.find_element_by_name("multi")
option = select.find_elements_by_tag_name("option")[0]
selected = option.is_selected()
option.click()
assert not selected == option.is_selected()
option.click()
assert selected == option.is_selected()
def testShouldThrowAnExceptionWhenSelectingAnUnselectableElement(driver, pages):
pages.load("formPage.html")
element = driver.find_element_by_xpath("//title")
with pytest.raises(WebDriverException):
element.click()
def testSendingKeyboardEventsShouldAppendTextInInputs(driver, pages):
pages.load("formPage.html")
element = driver.find_element_by_id("working")
element.send_keys("Some")
value = element.get_attribute("value")
assert value == "Some"
element.send_keys(" text")
value = element.get_attribute("value")
assert value == "Some text"
def testShouldBeAbleToClearTextFromInputElements(driver, pages):
pages.load("formPage.html")
element = driver.find_element_by_id("working")
element.send_keys("Some text")
value = element.get_attribute("value")
assert len(value) > 0
element.clear()
value = element.get_attribute("value")
assert len(value) == 0
def testEmptyTextBoxesShouldReturnAnEmptyStringNotNull(driver, pages):
pages.load("formPage.html")
emptyTextBox = driver.find_element_by_id("working")
assert emptyTextBox.get_attribute("value") == ""
emptyTextArea = driver.find_element_by_id("emptyTextArea")
assert emptyTextArea.get_attribute("value") == ""
def testShouldBeAbleToClearTextFromTextAreas(driver, pages):
pages.load("formPage.html")
element = driver.find_element_by_id("withText")
element.send_keys("Some text")
value = element.get_attribute("value")
assert len(value) > 0
element.clear()
value = element.get_attribute("value")
assert len(value) == 0
def testRadioShouldNotBeSelectedAfterSelectingSibling(driver, pages):
pages.load("formPage.html")
cheese = driver.find_element_by_id("cheese")
peas = driver.find_element_by_id("peas")
cheese.click()
assert cheese.is_selected() is True
assert peas.is_selected() is False
peas.click()
assert cheese.is_selected() is False
assert peas.is_selected() is True
| apache-2.0 |
brototyp/CouchPotato | library/sqlalchemy/orm/exc.py | 11 | 3814 | # exc.py - ORM exceptions
# Copyright (C) the SQLAlchemy authors and contributors
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
"""SQLAlchemy ORM exceptions."""
import sqlalchemy as sa
NO_STATE = (AttributeError, KeyError)
"""Exception types that may be raised by instrumentation implementations."""
class StaleDataError(sa.exc.SQLAlchemyError):
"""An operation encountered database state that is unaccounted for.
Two conditions cause this to happen:
* A flush may have attempted to update or delete rows
and an unexpected number of rows were matched during
the UPDATE or DELETE statement. Note that when
version_id_col is used, rows in UPDATE or DELETE statements
are also matched against the current known version
identifier.
* A mapped object with version_id_col was refreshed,
and the version number coming back from the database does
not match that of the object itself.
"""
ConcurrentModificationError = StaleDataError
class FlushError(sa.exc.SQLAlchemyError):
"""A invalid condition was detected during flush()."""
class UnmappedError(sa.exc.InvalidRequestError):
"""Base for exceptions that involve expected mappings not present."""
class DetachedInstanceError(sa.exc.SQLAlchemyError):
"""An attempt to access unloaded attributes on a
mapped instance that is detached."""
class UnmappedInstanceError(UnmappedError):
"""An mapping operation was requested for an unknown instance."""
def __init__(self, obj, msg=None):
if not msg:
try:
mapper = sa.orm.class_mapper(type(obj))
name = _safe_cls_name(type(obj))
msg = ("Class %r is mapped, but this instance lacks "
"instrumentation. This occurs when the instance is created "
"before sqlalchemy.orm.mapper(%s) was called." % (name, name))
except UnmappedClassError:
msg = _default_unmapped(type(obj))
if isinstance(obj, type):
msg += (
'; was a class (%s) supplied where an instance was '
'required?' % _safe_cls_name(obj))
UnmappedError.__init__(self, msg)
class UnmappedClassError(UnmappedError):
"""An mapping operation was requested for an unknown class."""
def __init__(self, cls, msg=None):
if not msg:
msg = _default_unmapped(cls)
UnmappedError.__init__(self, msg)
class ObjectDeletedError(sa.exc.InvalidRequestError):
"""An refresh() operation failed to re-retrieve an object's row."""
class UnmappedColumnError(sa.exc.InvalidRequestError):
"""Mapping operation was requested on an unknown column."""
class NoResultFound(sa.exc.InvalidRequestError):
"""A database result was required but none was found."""
class MultipleResultsFound(sa.exc.InvalidRequestError):
"""A single database result was required but more than one were found."""
# Legacy compat until 0.6.
sa.exc.ConcurrentModificationError = ConcurrentModificationError
sa.exc.FlushError = FlushError
sa.exc.UnmappedColumnError
def _safe_cls_name(cls):
try:
cls_name = '.'.join((cls.__module__, cls.__name__))
except AttributeError:
cls_name = getattr(cls, '__name__', None)
if cls_name is None:
cls_name = repr(cls)
return cls_name
def _default_unmapped(cls):
try:
mappers = sa.orm.attributes.manager_of_class(cls).mappers
except NO_STATE:
mappers = {}
except TypeError:
mappers = {}
name = _safe_cls_name(cls)
if not mappers:
return "Class '%s' is not mapped" % name
| gpl-3.0 |
denisenkom/django | tests/generic_inline_admin/tests.py | 2 | 17626 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.conf import settings
from django.contrib import admin
from django.contrib.admin.sites import AdminSite
from django.contrib.contenttypes.generic import (
generic_inlineformset_factory, GenericTabularInline)
from django.forms.formsets import DEFAULT_MAX_NUM
from django.forms.models import ModelForm
from django.test import TestCase
from django.test.utils import override_settings
# local test models
from .admin import MediaInline, MediaPermanentInline
from .models import (Episode, EpisodeExtra, EpisodeMaxNum, Media,
EpisodePermanent, Category)
@override_settings(PASSWORD_HASHERS=('django.contrib.auth.hashers.SHA1PasswordHasher',))
class GenericAdminViewTest(TestCase):
urls = "generic_inline_admin.urls"
fixtures = ['users.xml']
def setUp(self):
# set TEMPLATE_DEBUG to True to ensure {% include %} will raise
# exceptions since that is how inlines are rendered and #9498 will
# bubble up if it is an issue.
self.original_template_debug = settings.TEMPLATE_DEBUG
settings.TEMPLATE_DEBUG = True
self.client.login(username='super', password='secret')
# Can't load content via a fixture (since the GenericForeignKey
# relies on content type IDs, which will vary depending on what
# other tests have been run), thus we do it here.
e = Episode.objects.create(name='This Week in Django')
self.episode_pk = e.pk
m = Media(content_object=e, url='http://example.com/podcast.mp3')
m.save()
self.mp3_media_pk = m.pk
m = Media(content_object=e, url='http://example.com/logo.png')
m.save()
self.png_media_pk = m.pk
def tearDown(self):
self.client.logout()
settings.TEMPLATE_DEBUG = self.original_template_debug
def testBasicAddGet(self):
"""
A smoke test to ensure GET on the add_view works.
"""
response = self.client.get('/generic_inline_admin/admin/generic_inline_admin/episode/add/')
self.assertEqual(response.status_code, 200)
def testBasicEditGet(self):
"""
A smoke test to ensure GET on the change_view works.
"""
response = self.client.get('/generic_inline_admin/admin/generic_inline_admin/episode/%d/' % self.episode_pk)
self.assertEqual(response.status_code, 200)
def testBasicAddPost(self):
"""
A smoke test to ensure POST on add_view works.
"""
post_data = {
"name": "This Week in Django",
# inline data
"generic_inline_admin-media-content_type-object_id-TOTAL_FORMS": "1",
"generic_inline_admin-media-content_type-object_id-INITIAL_FORMS": "0",
"generic_inline_admin-media-content_type-object_id-MAX_NUM_FORMS": "0",
}
response = self.client.post('/generic_inline_admin/admin/generic_inline_admin/episode/add/', post_data)
self.assertEqual(response.status_code, 302) # redirect somewhere
def testBasicEditPost(self):
"""
A smoke test to ensure POST on edit_view works.
"""
post_data = {
"name": "This Week in Django",
# inline data
"generic_inline_admin-media-content_type-object_id-TOTAL_FORMS": "3",
"generic_inline_admin-media-content_type-object_id-INITIAL_FORMS": "2",
"generic_inline_admin-media-content_type-object_id-MAX_NUM_FORMS": "0",
"generic_inline_admin-media-content_type-object_id-0-id": "%d" % self.mp3_media_pk,
"generic_inline_admin-media-content_type-object_id-0-url": "http://example.com/podcast.mp3",
"generic_inline_admin-media-content_type-object_id-1-id": "%d" % self.png_media_pk,
"generic_inline_admin-media-content_type-object_id-1-url": "http://example.com/logo.png",
"generic_inline_admin-media-content_type-object_id-2-id": "",
"generic_inline_admin-media-content_type-object_id-2-url": "",
}
url = '/generic_inline_admin/admin/generic_inline_admin/episode/%d/' % self.episode_pk
response = self.client.post(url, post_data)
self.assertEqual(response.status_code, 302) # redirect somewhere
def testGenericInlineFormset(self):
EpisodeMediaFormSet = generic_inlineformset_factory(Media, can_delete=False, exclude=['description', 'keywords'], extra=3)
e = Episode.objects.get(name='This Week in Django')
# Works with no queryset
formset = EpisodeMediaFormSet(instance=e)
self.assertEqual(len(formset.forms), 5)
self.assertHTMLEqual(formset.forms[0].as_p(), '<p><label for="id_generic_inline_admin-media-content_type-object_id-0-url">Url:</label> <input id="id_generic_inline_admin-media-content_type-object_id-0-url" type="url" name="generic_inline_admin-media-content_type-object_id-0-url" value="http://example.com/podcast.mp3" maxlength="200" /><input type="hidden" name="generic_inline_admin-media-content_type-object_id-0-id" value="%s" id="id_generic_inline_admin-media-content_type-object_id-0-id" /></p>' % self.mp3_media_pk)
self.assertHTMLEqual(formset.forms[1].as_p(), '<p><label for="id_generic_inline_admin-media-content_type-object_id-1-url">Url:</label> <input id="id_generic_inline_admin-media-content_type-object_id-1-url" type="url" name="generic_inline_admin-media-content_type-object_id-1-url" value="http://example.com/logo.png" maxlength="200" /><input type="hidden" name="generic_inline_admin-media-content_type-object_id-1-id" value="%s" id="id_generic_inline_admin-media-content_type-object_id-1-id" /></p>' % self.png_media_pk)
self.assertHTMLEqual(formset.forms[2].as_p(), '<p><label for="id_generic_inline_admin-media-content_type-object_id-2-url">Url:</label> <input id="id_generic_inline_admin-media-content_type-object_id-2-url" type="url" name="generic_inline_admin-media-content_type-object_id-2-url" maxlength="200" /><input type="hidden" name="generic_inline_admin-media-content_type-object_id-2-id" id="id_generic_inline_admin-media-content_type-object_id-2-id" /></p>')
# A queryset can be used to alter display ordering
formset = EpisodeMediaFormSet(instance=e, queryset=Media.objects.order_by('url'))
self.assertEqual(len(formset.forms), 5)
self.assertHTMLEqual(formset.forms[0].as_p(), '<p><label for="id_generic_inline_admin-media-content_type-object_id-0-url">Url:</label> <input id="id_generic_inline_admin-media-content_type-object_id-0-url" type="url" name="generic_inline_admin-media-content_type-object_id-0-url" value="http://example.com/logo.png" maxlength="200" /><input type="hidden" name="generic_inline_admin-media-content_type-object_id-0-id" value="%s" id="id_generic_inline_admin-media-content_type-object_id-0-id" /></p>' % self.png_media_pk)
self.assertHTMLEqual(formset.forms[1].as_p(), '<p><label for="id_generic_inline_admin-media-content_type-object_id-1-url">Url:</label> <input id="id_generic_inline_admin-media-content_type-object_id-1-url" type="url" name="generic_inline_admin-media-content_type-object_id-1-url" value="http://example.com/podcast.mp3" maxlength="200" /><input type="hidden" name="generic_inline_admin-media-content_type-object_id-1-id" value="%s" id="id_generic_inline_admin-media-content_type-object_id-1-id" /></p>' % self.mp3_media_pk)
self.assertHTMLEqual(formset.forms[2].as_p(), '<p><label for="id_generic_inline_admin-media-content_type-object_id-2-url">Url:</label> <input id="id_generic_inline_admin-media-content_type-object_id-2-url" type="url" name="generic_inline_admin-media-content_type-object_id-2-url" maxlength="200" /><input type="hidden" name="generic_inline_admin-media-content_type-object_id-2-id" id="id_generic_inline_admin-media-content_type-object_id-2-id" /></p>')
# Works with a queryset that omits items
formset = EpisodeMediaFormSet(instance=e, queryset=Media.objects.filter(url__endswith=".png"))
self.assertEqual(len(formset.forms), 4)
self.assertHTMLEqual(formset.forms[0].as_p(), '<p><label for="id_generic_inline_admin-media-content_type-object_id-0-url">Url:</label> <input id="id_generic_inline_admin-media-content_type-object_id-0-url" type="url" name="generic_inline_admin-media-content_type-object_id-0-url" value="http://example.com/logo.png" maxlength="200" /><input type="hidden" name="generic_inline_admin-media-content_type-object_id-0-id" value="%s" id="id_generic_inline_admin-media-content_type-object_id-0-id" /></p>' % self.png_media_pk)
self.assertHTMLEqual(formset.forms[1].as_p(), '<p><label for="id_generic_inline_admin-media-content_type-object_id-1-url">Url:</label> <input id="id_generic_inline_admin-media-content_type-object_id-1-url" type="url" name="generic_inline_admin-media-content_type-object_id-1-url" maxlength="200" /><input type="hidden" name="generic_inline_admin-media-content_type-object_id-1-id" id="id_generic_inline_admin-media-content_type-object_id-1-id" /></p>')
def testGenericInlineFormsetFactory(self):
# Regression test for #10522.
inline_formset = generic_inlineformset_factory(Media,
exclude=('url',))
# Regression test for #12340.
e = Episode.objects.get(name='This Week in Django')
formset = inline_formset(instance=e)
self.assertTrue(formset.get_queryset().ordered)
@override_settings(PASSWORD_HASHERS=('django.contrib.auth.hashers.SHA1PasswordHasher',))
class GenericInlineAdminParametersTest(TestCase):
urls = "generic_inline_admin.urls"
fixtures = ['users.xml']
def setUp(self):
self.client.login(username='super', password='secret')
def tearDown(self):
self.client.logout()
def _create_object(self, model):
"""
Create a model with an attached Media object via GFK. We can't
load content via a fixture (since the GenericForeignKey relies on
content type IDs, which will vary depending on what other tests
have been run), thus we do it here.
"""
e = model.objects.create(name='This Week in Django')
Media.objects.create(content_object=e, url='http://example.com/podcast.mp3')
return e
def testNoParam(self):
"""
With one initial form, extra (default) at 3, there should be 4 forms.
"""
e = self._create_object(Episode)
response = self.client.get('/generic_inline_admin/admin/generic_inline_admin/episode/%s/' % e.pk)
formset = response.context['inline_admin_formsets'][0].formset
self.assertEqual(formset.total_form_count(), 4)
self.assertEqual(formset.initial_form_count(), 1)
def testExtraParam(self):
"""
With extra=0, there should be one form.
"""
e = self._create_object(EpisodeExtra)
response = self.client.get('/generic_inline_admin/admin/generic_inline_admin/episodeextra/%s/' % e.pk)
formset = response.context['inline_admin_formsets'][0].formset
self.assertEqual(formset.total_form_count(), 1)
self.assertEqual(formset.initial_form_count(), 1)
def testMaxNumParam(self):
"""
With extra=5 and max_num=2, there should be only 2 forms.
"""
e = self._create_object(EpisodeMaxNum)
inline_form_data = '<input type="hidden" name="generic_inline_admin-media-content_type-object_id-TOTAL_FORMS" value="2" id="id_generic_inline_admin-media-content_type-object_id-TOTAL_FORMS" /><input type="hidden" name="generic_inline_admin-media-content_type-object_id-INITIAL_FORMS" value="1" id="id_generic_inline_admin-media-content_type-object_id-INITIAL_FORMS" />'
response = self.client.get('/generic_inline_admin/admin/generic_inline_admin/episodemaxnum/%s/' % e.pk)
formset = response.context['inline_admin_formsets'][0].formset
self.assertEqual(formset.total_form_count(), 2)
self.assertEqual(formset.initial_form_count(), 1)
@override_settings(PASSWORD_HASHERS=('django.contrib.auth.hashers.SHA1PasswordHasher',))
class GenericInlineAdminWithUniqueTogetherTest(TestCase):
urls = "generic_inline_admin.urls"
fixtures = ['users.xml']
def setUp(self):
self.client.login(username='super', password='secret')
def tearDown(self):
self.client.logout()
def testAdd(self):
category_id = Category.objects.create(name='male').pk
post_data = {
"name": "John Doe",
# inline data
"generic_inline_admin-phonenumber-content_type-object_id-TOTAL_FORMS": "1",
"generic_inline_admin-phonenumber-content_type-object_id-INITIAL_FORMS": "0",
"generic_inline_admin-phonenumber-content_type-object_id-MAX_NUM_FORMS": "0",
"generic_inline_admin-phonenumber-content_type-object_id-0-id": "",
"generic_inline_admin-phonenumber-content_type-object_id-0-phone_number": "555-555-5555",
"generic_inline_admin-phonenumber-content_type-object_id-0-category": "%s" % category_id,
}
response = self.client.get('/generic_inline_admin/admin/generic_inline_admin/contact/add/')
self.assertEqual(response.status_code, 200)
response = self.client.post('/generic_inline_admin/admin/generic_inline_admin/contact/add/', post_data)
self.assertEqual(response.status_code, 302) # redirect somewhere
class NoInlineDeletionTest(TestCase):
urls = "generic_inline_admin.urls"
def test_no_deletion(self):
fake_site = object()
inline = MediaPermanentInline(EpisodePermanent, fake_site)
fake_request = object()
formset = inline.get_formset(fake_request)
self.assertFalse(formset.can_delete)
class MockRequest(object):
pass
class MockSuperUser(object):
def has_perm(self, perm):
return True
request = MockRequest()
request.user = MockSuperUser()
class GenericInlineModelAdminTest(TestCase):
urls = "generic_inline_admin.urls"
def setUp(self):
self.site = AdminSite()
def test_get_formset_kwargs(self):
media_inline = MediaInline(Media, AdminSite())
# Create a formset with default arguments
formset = media_inline.get_formset(request)
self.assertEqual(formset.max_num, DEFAULT_MAX_NUM)
self.assertEqual(formset.can_order, False)
# Create a formset with custom keyword arguments
formset = media_inline.get_formset(request, max_num=100, can_order=True)
self.assertEqual(formset.max_num, 100)
self.assertEqual(formset.can_order, True)
def test_custom_form_meta_exclude_with_readonly(self):
"""
Ensure that the custom ModelForm's `Meta.exclude` is respected when
used in conjunction with `GenericInlineModelAdmin.readonly_fields`
and when no `ModelAdmin.exclude` is defined.
"""
class MediaForm(ModelForm):
class Meta:
model = Media
exclude = ['url']
class MediaInline(GenericTabularInline):
readonly_fields = ['description']
form = MediaForm
model = Media
class EpisodeAdmin(admin.ModelAdmin):
inlines = [
MediaInline
]
ma = EpisodeAdmin(Episode, self.site)
self.assertEqual(
list(list(ma.get_formsets(request))[0]().forms[0].fields),
['keywords', 'id', 'DELETE'])
def test_custom_form_meta_exclude(self):
"""
Ensure that the custom ModelForm's `Meta.exclude` is respected by
`GenericInlineModelAdmin.get_formset`, and overridden if
`ModelAdmin.exclude` or `GenericInlineModelAdmin.exclude` are defined.
Refs #15907.
"""
# First with `GenericInlineModelAdmin` -----------------
class MediaForm(ModelForm):
class Meta:
model = Media
exclude = ['url']
class MediaInline(GenericTabularInline):
exclude = ['description']
form = MediaForm
model = Media
class EpisodeAdmin(admin.ModelAdmin):
inlines = [
MediaInline
]
ma = EpisodeAdmin(Episode, self.site)
self.assertEqual(
list(list(ma.get_formsets(request))[0]().forms[0].fields),
['url', 'keywords', 'id', 'DELETE'])
# Then, only with `ModelForm` -----------------
class MediaInline(GenericTabularInline):
form = MediaForm
model = Media
class EpisodeAdmin(admin.ModelAdmin):
inlines = [
MediaInline
]
ma = EpisodeAdmin(Episode, self.site)
self.assertEqual(
list(list(ma.get_formsets(request))[0]().forms[0].fields),
['description', 'keywords', 'id', 'DELETE'])
def test_get_fieldsets(self):
# Test that get_fieldsets is called when figuring out form fields.
# Refs #18681.
class MediaForm(ModelForm):
class Meta:
model = Media
fields = '__all__'
class MediaInline(GenericTabularInline):
form = MediaForm
model = Media
can_delete = False
def get_fieldsets(self, request, obj=None):
return [(None, {'fields': ['url', 'description']})]
ma = MediaInline(Media, self.site)
form = ma.get_formset(None).form
self.assertEqual(form._meta.fields, ['url', 'description'])
| bsd-3-clause |
jslhs/clFFT | src/scripts/perf/fftPerformanceTesting.py | 2 | 11673 | # ########################################################################
# Copyright 2013 Advanced Micro Devices, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ########################################################################
import itertools
import re#gex
import subprocess
import os
import sys
from datetime import datetime
# Common data and functions for the performance suite
tableHeader = 'lengthx,lengthy,lengthz,batch,device,inlay,outlay,place,precision,label,GFLOPS'
class TestCombination:
def __init__(self,
lengthx, lengthy, lengthz, batchsize,
device, inlayout, outlayout, placeness, precision,
label):
self.x = lengthx
self.y = lengthy
self.z = lengthz
self.batchsize = batchsize
self.device = device
self.inlayout = inlayout
self.outlayout = outlayout
self.placeness = placeness
self.precision = precision
self.label = label
def __str__(self):
return self.x + 'x' + self.y + 'x' + self.z + ':' + self.batchsize + ', ' + self.device + ', ' + self.inlayout + '/' + self.outlayout + ', ' + self.placeness + ', ' + self.precision + ' -- ' + self.label
class GraphPoint:
def __init__(self,
lengthx, lengthy, lengthz, batchsize,
precision, device, label,
gflops):
self.x = lengthx
self.y = lengthy
self.z = lengthz
self.batchsize = batchsize
self.device = device
self.label = label
self.precision = precision
self.gflops = gflops
self.problemsize = str(int(self.x) * int(self.y) * int(self.z) * int(self.batchsize))
def __str__(self):
# ALL members must be represented here (x, y, z, batch, device, label, etc)
return self.x + 'x' + self.y + 'x' + self.z + ':' + self.batchsize + ', ' + self.precision + ' precision, ' + self.device + ', -- ' + self.label + '; ' + self.gflops
class TableRow:
# parameters = class TestCombination instantiation
def __init__(self, parameters, gflops):
self.parameters = parameters
self.gflops = gflops
def __str__(self):
return self.parameters.__str__() + '; ' + self.gflops
def transformDimension(x,y,z):
if int(z) != 1:
return 3
elif int(y) != 1:
return 2
elif int(x) != 1:
return 1
def executable(library):
if type(library) != str:
print 'ERROR: expected library name to be a string'
quit()
if sys.platform != 'win32' and sys.platform != 'linux2':
print 'ERROR: unknown operating system'
quit()
if library == 'clFFT' or library == 'null':
if sys.platform == 'win32':
exe = 'Client.exe'
elif sys.platform == 'linux2':
exe = 'Client'
else:
print 'ERROR: unknown library -- cannot determine executable name'
quit()
if not os.path.isfile(exe):
error_message = 'ERROR: could not find client named ' + exe
print error_message
quit()
return exe
def max_mem_available_in_bytes(exe, device):
arguments = [exe, '-i', device]
deviceInfo = subprocess.check_output(arguments, stderr=subprocess.STDOUT).split(os.linesep)
deviceInfo = itertools.ifilter( lambda x: x.count('MAX_MEM_ALLOC_SIZE'), deviceInfo)
deviceInfo = list(itertools.islice(deviceInfo, None))
maxMemoryAvailable = re.search('\d+', deviceInfo[0])
return int(maxMemoryAvailable.group(0))
def max_problem_size(exe, layout, precision, device):
if layout == 'ci' or layout == 'cp':
numbers_in_one_datapoint = 2
else:
print 'max_problem_size(): unknown layout'
quit()
if precision == 'single':
bytes_in_one_number = 4
elif precision == 'double':
bytes_in_one_number = 8
else:
print 'max_problem_size(): unknown precision'
quit()
max_problem_size = max_mem_available_in_bytes(exe, device) / (numbers_in_one_datapoint * bytes_in_one_number)
max_problem_size = max_problem_size / 16
return max_problem_size
def maxBatchSize(lengthx, lengthy, lengthz, layout, precision, exe, device):
problemSize = int(lengthx) * int(lengthy) * int(lengthz)
maxBatchSize = max_problem_size(exe, layout, precision, device) / problemSize
if int(lengthx) == pow(2,16) or int(lengthx) == pow(2,17):
# special cases in the kernel. extra padding is added in, so we need to shrink the batch size to accommodate
return str(maxBatchSize/2)
else:
return str(maxBatchSize)
def create_ini_file_if_requested(args):
if args.createIniFilename:
for x in vars(args):
if (type(getattr(args,x)) != file) and x.count('File') == 0:
args.createIniFilename.write('--' + x + os.linesep)
args.createIniFilename.write(str(getattr(args,x)) + os.linesep)
quit()
def load_ini_file_if_requested(args, parser):
if args.useIniFilename:
argument_list = args.useIniFilename.readlines()
argument_list = [x.strip() for x in argument_list]
args = parser.parse_args(argument_list)
return args
def is_numeric_type(x):
return type(x) == int or type(x) == long or type(x) == float
def split_up_comma_delimited_lists(args):
for x in vars(args):
attr = getattr(args, x)
if attr == None:
setattr(args, x, [None])
elif is_numeric_type(attr):
setattr(args, x, [attr])
elif type(attr) == str:
setattr(args, x, attr.split(','))
return args
class Range:
def __init__(self, ranges, defaultStep='+1'):
# we might be passed in a single value or a list of strings
# if we receive a single value, we want to feed it right back
if type(ranges) != list:
self.expanded = ranges
elif ranges[0] == None:
self.expanded = [None]
else:
self.expanded = []
for thisRange in ranges:
thisRange = str(thisRange)
if re.search('^\+\d+$', thisRange):
self.expanded = self.expanded + [thisRange]
elif thisRange == 'max':
self.expanded = self.expanded + ['max']
else:
#elif thisRange != 'max':
if thisRange.count(':'):
self._stepAmount = thisRange.split(':')[1]
else:
self._stepAmount = defaultStep
thisRange = thisRange.split(':')[0]
if self._stepAmount.count('x'):
self._stepper = '_mult'
else:
self._stepper = '_add'
self._stepAmount = self._stepAmount.lstrip('+x')
self._stepAmount = int(self._stepAmount)
if thisRange.count('-'):
self.begin = int(thisRange.split('-')[0])
self.end = int(thisRange.split('-')[1])
else:
self.begin = int(thisRange.split('-')[0])
self.end = int(thisRange.split('-')[0])
self.current = self.begin
if self.begin == 0 and self._stepper == '_mult':
self.expanded = self.expanded + [0]
else:
while self.current <= self.end:
self.expanded = self.expanded + [self.current]
self._step()
# now we want to uniquify and sort the expanded range
self.expanded = list(set(self.expanded))
self.expanded.sort()
# advance current value to next
def _step(self):
getattr(self, self._stepper)()
def _mult(self):
self.current = self.current * self._stepAmount
def _add(self):
self.current = self.current + self._stepAmount
def expand_range(a_range):
return Range(a_range).expanded
def decode_parameter_problemsize(problemsize):
if not problemsize.count(None):
i = 0
while i < len(problemsize):
problemsize[i] = problemsize[i].split(':')
j = 0
while j < len(problemsize[i]):
problemsize[i][j] = problemsize[i][j].split('x')
j = j+1
i = i+1
return problemsize
def gemm_table_header():
return 'm,n,k,lda,ldb,ldc,alpha,beta,order,transa,transb,function,device,library,label,GFLOPS'
class GemmTestCombination:
def __init__(self,
sizem, sizen, sizek, lda, ldb, ldc,
alpha, beta, order, transa, transb,
function, device, library, label):
self.sizem = str(sizem)
self.sizen = str(sizen)
self.sizek = str(sizek)
self.lda = str(lda)
self.ldb = str(ldb)
self.ldc = str(ldc)
self.alpha = str(alpha)
self.beta = str(beta)
self.order = order
self.transa = transa
self.transb = transb
self.function = function
self.device = device
self.library = library
self.label = label
def __str__(self):
return self.sizem + 'x' + self.sizen + 'x' + self.sizek + ':' + self.lda + 'x' + self.ldb + 'x' + self.ldc + ', ' + self.device + ', ' + self.function + ', ' + self.library + ', alpha(' + self.alpha + '), beta(' + self.beta + '), order(' + self.order + '), transa(' + self.transa + '), transb(' + self.transb + ') -- ' + self.label
class GemmGraphPoint:
def __init__(self,
sizem, sizen, sizek,
lda, ldb, ldc,
device, order, transa, transb,
function, library, label,
gflops):
self.sizem = sizem
self.sizen = sizen
self.sizek = sizek
self.lda = lda
self.ldb = ldb
self.ldc = ldc
self.device = device
self.order = order
self.transa = transa
self.transb = transb
self.function = function
self.library = library
self.label = label
self.gflops = gflops
def __str__(self):
# ALL members must be represented here (x, y, z, batch, device, label, etc)
return self.sizem + 'x' + self.sizen + 'x' + self.sizek + ':' + self.device + ', ' + self.function + ', ' + self.library + ', order(' + self.order + '), transa(' + self.transa + '), transb(' + self.transb + ') -- ' + self.label + '; ' + self.gflops + ' gflops'
def open_file( filename ):
if type(filename) == list:
filename = filename[0]
if filename == None:
filename = 'results' + datetime.now().isoformat().replace(':','.') + '.txt'
else:
if os.path.isfile(filename):
oldname = filename
filename = filename + datetime.now().isoformat().replace(':','.')
message = 'A file with the name ' + oldname + ' already exists. Changing filename to ' + filename
print message
return open(filename, 'w')
| apache-2.0 |
manishpatell/erpcustomizationssaiimpex123qwe | addons/l10n_in_hr_payroll/__init__.py | 430 | 1117 | #-*- coding:utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2011 OpenERP SA (<http://openerp.com>). All Rights Reserved
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import l10n_in_hr_payroll
import report
import wizard
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4: | agpl-3.0 |
fvcproductions/dotfiles | bin/sketch/Plugins/WakaTime.sketchplugin/Contents/Resources/wakatime/packages/pygments/styles/lovelace.py | 31 | 3173 | # -*- coding: utf-8 -*-
"""
pygments.styles.lovelace
~~~~~~~~~~~~~~~~~~~~~~~~
Lovelace by Miikka Salminen
Pygments style by Miikka Salminen (https://github.com/miikkas)
A desaturated, somewhat subdued style created for the Lovelace interactive
learning environment.
:copyright: Copyright 2006-2017 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
from pygments.style import Style
from pygments.token import Keyword, Name, Comment, String, Error, \
Number, Operator, Punctuation, Generic, Whitespace
class LovelaceStyle(Style):
"""
The style used in Lovelace interactive learning environment. Tries to avoid
the "angry fruit salad" effect with desaturated and dim colours.
"""
_KW_BLUE = '#2838b0'
_NAME_GREEN = '#388038'
_DOC_ORANGE = '#b85820'
_OW_PURPLE = '#a848a8'
_FUN_BROWN = '#785840'
_STR_RED = '#b83838'
_CLS_CYAN = '#287088'
_ESCAPE_LIME = '#709030'
_LABEL_CYAN = '#289870'
_EXCEPT_YELLOW = '#908828'
default_style = '#222222'
styles = {
Whitespace: '#a89028',
Comment: 'italic #888888',
Comment.Hashbang: _CLS_CYAN,
Comment.Multiline: '#888888',
Comment.Preproc: 'noitalic '+_LABEL_CYAN,
Keyword: _KW_BLUE,
Keyword.Constant: 'italic #444444',
Keyword.Declaration: 'italic',
Keyword.Type: 'italic',
Operator: '#666666',
Operator.Word: _OW_PURPLE,
Punctuation: '#888888',
Name.Attribute: _NAME_GREEN,
Name.Builtin: _NAME_GREEN,
Name.Builtin.Pseudo: 'italic',
Name.Class: _CLS_CYAN,
Name.Constant: _DOC_ORANGE,
Name.Decorator: _CLS_CYAN,
Name.Entity: _ESCAPE_LIME,
Name.Exception: _EXCEPT_YELLOW,
Name.Function: _FUN_BROWN,
Name.Function.Magic: _DOC_ORANGE,
Name.Label: _LABEL_CYAN,
Name.Namespace: _LABEL_CYAN,
Name.Tag: _KW_BLUE,
Name.Variable: '#b04040',
Name.Variable.Global:_EXCEPT_YELLOW,
Name.Variable.Magic: _DOC_ORANGE,
String: _STR_RED,
String.Affix: '#444444',
String.Char: _OW_PURPLE,
String.Delimiter: _DOC_ORANGE,
String.Doc: 'italic '+_DOC_ORANGE,
String.Escape: _ESCAPE_LIME,
String.Interpol: 'underline',
String.Other: _OW_PURPLE,
String.Regex: _OW_PURPLE,
Number: '#444444',
Generic.Deleted: '#c02828',
Generic.Emph: 'italic',
Generic.Error: '#c02828',
Generic.Heading: '#666666',
Generic.Subheading: '#444444',
Generic.Inserted: _NAME_GREEN,
Generic.Output: '#666666',
Generic.Prompt: '#444444',
Generic.Strong: 'bold',
Generic.Traceback: _KW_BLUE,
Error: 'bg:'+_OW_PURPLE,
}
| mit |
liqi328/rjrepaircompany | django/contrib/gis/gdal/geometries.py | 388 | 26357 | """
The OGRGeometry is a wrapper for using the OGR Geometry class
(see http://www.gdal.org/ogr/classOGRGeometry.html). OGRGeometry
may be instantiated when reading geometries from OGR Data Sources
(e.g. SHP files), or when given OGC WKT (a string).
While the 'full' API is not present yet, the API is "pythonic" unlike
the traditional and "next-generation" OGR Python bindings. One major
advantage OGR Geometries have over their GEOS counterparts is support
for spatial reference systems and their transformation.
Example:
>>> from django.contrib.gis.gdal import OGRGeometry, OGRGeomType, SpatialReference
>>> wkt1, wkt2 = 'POINT(-90 30)', 'POLYGON((0 0, 5 0, 5 5, 0 5)'
>>> pnt = OGRGeometry(wkt1)
>>> print pnt
POINT (-90 30)
>>> mpnt = OGRGeometry(OGRGeomType('MultiPoint'), SpatialReference('WGS84'))
>>> mpnt.add(wkt1)
>>> mpnt.add(wkt1)
>>> print mpnt
MULTIPOINT (-90 30,-90 30)
>>> print mpnt.srs.name
WGS 84
>>> print mpnt.srs.proj
+proj=longlat +ellps=WGS84 +datum=WGS84 +no_defs
>>> mpnt.transform_to(SpatialReference('NAD27'))
>>> print mpnt.proj
+proj=longlat +ellps=clrk66 +datum=NAD27 +no_defs
>>> print mpnt
MULTIPOINT (-89.999930378602485 29.999797886557641,-89.999930378602485 29.999797886557641)
The OGRGeomType class is to make it easy to specify an OGR geometry type:
>>> from django.contrib.gis.gdal import OGRGeomType
>>> gt1 = OGRGeomType(3) # Using an integer for the type
>>> gt2 = OGRGeomType('Polygon') # Using a string
>>> gt3 = OGRGeomType('POLYGON') # It's case-insensitive
>>> print gt1 == 3, gt1 == 'Polygon' # Equivalence works w/non-OGRGeomType objects
True
"""
# Python library requisites.
import sys
from binascii import a2b_hex
from ctypes import byref, string_at, c_char_p, c_double, c_ubyte, c_void_p
# Getting GDAL prerequisites
from django.contrib.gis.gdal.base import GDALBase
from django.contrib.gis.gdal.envelope import Envelope, OGREnvelope
from django.contrib.gis.gdal.error import OGRException, OGRIndexError, SRSException
from django.contrib.gis.gdal.geomtype import OGRGeomType
from django.contrib.gis.gdal.libgdal import GEOJSON, GDAL_VERSION
from django.contrib.gis.gdal.srs import SpatialReference, CoordTransform
# Getting the ctypes prototype functions that interface w/the GDAL C library.
from django.contrib.gis.gdal.prototypes import geom as capi, srs as srs_api
# For recognizing geometry input.
from django.contrib.gis.geometry.regex import hex_regex, wkt_regex, json_regex
# For more information, see the OGR C API source code:
# http://www.gdal.org/ogr/ogr__api_8h.html
#
# The OGR_G_* routines are relevant here.
#### OGRGeometry Class ####
class OGRGeometry(GDALBase):
"Generally encapsulates an OGR geometry."
def __init__(self, geom_input, srs=None):
"Initializes Geometry on either WKT or an OGR pointer as input."
str_instance = isinstance(geom_input, basestring)
# If HEX, unpack input to to a binary buffer.
if str_instance and hex_regex.match(geom_input):
geom_input = buffer(a2b_hex(geom_input.upper()))
str_instance = False
# Constructing the geometry,
if str_instance:
# Checking if unicode
if isinstance(geom_input, unicode):
# Encoding to ASCII, WKT or HEX doesn't need any more.
geom_input = geom_input.encode('ascii')
wkt_m = wkt_regex.match(geom_input)
json_m = json_regex.match(geom_input)
if wkt_m:
if wkt_m.group('srid'):
# If there's EWKT, set the SRS w/value of the SRID.
srs = int(wkt_m.group('srid'))
if wkt_m.group('type').upper() == 'LINEARRING':
# OGR_G_CreateFromWkt doesn't work with LINEARRING WKT.
# See http://trac.osgeo.org/gdal/ticket/1992.
g = capi.create_geom(OGRGeomType(wkt_m.group('type')).num)
capi.import_wkt(g, byref(c_char_p(wkt_m.group('wkt'))))
else:
g = capi.from_wkt(byref(c_char_p(wkt_m.group('wkt'))), None, byref(c_void_p()))
elif json_m:
if GEOJSON:
g = capi.from_json(geom_input)
else:
raise NotImplementedError('GeoJSON input only supported on GDAL 1.5+.')
else:
# Seeing if the input is a valid short-hand string
# (e.g., 'Point', 'POLYGON').
ogr_t = OGRGeomType(geom_input)
g = capi.create_geom(OGRGeomType(geom_input).num)
elif isinstance(geom_input, buffer):
# WKB was passed in
g = capi.from_wkb(str(geom_input), None, byref(c_void_p()), len(geom_input))
elif isinstance(geom_input, OGRGeomType):
# OGRGeomType was passed in, an empty geometry will be created.
g = capi.create_geom(geom_input.num)
elif isinstance(geom_input, self.ptr_type):
# OGR pointer (c_void_p) was the input.
g = geom_input
else:
raise OGRException('Invalid input type for OGR Geometry construction: %s' % type(geom_input))
# Now checking the Geometry pointer before finishing initialization
# by setting the pointer for the object.
if not g:
raise OGRException('Cannot create OGR Geometry from input: %s' % str(geom_input))
self.ptr = g
# Assigning the SpatialReference object to the geometry, if valid.
if bool(srs): self.srs = srs
# Setting the class depending upon the OGR Geometry Type
self.__class__ = GEO_CLASSES[self.geom_type.num]
def __del__(self):
"Deletes this Geometry."
if self._ptr: capi.destroy_geom(self._ptr)
# Pickle routines
def __getstate__(self):
srs = self.srs
if srs:
srs = srs.wkt
else:
srs = None
return str(self.wkb), srs
def __setstate__(self, state):
wkb, srs = state
ptr = capi.from_wkb(wkb, None, byref(c_void_p()), len(wkb))
if not ptr: raise OGRException('Invalid OGRGeometry loaded from pickled state.')
self.ptr = ptr
self.srs = srs
@classmethod
def from_bbox(cls, bbox):
"Constructs a Polygon from a bounding box (4-tuple)."
x0, y0, x1, y1 = bbox
return OGRGeometry( 'POLYGON((%s %s, %s %s, %s %s, %s %s, %s %s))' % (
x0, y0, x0, y1, x1, y1, x1, y0, x0, y0) )
### Geometry set-like operations ###
# g = g1 | g2
def __or__(self, other):
"Returns the union of the two geometries."
return self.union(other)
# g = g1 & g2
def __and__(self, other):
"Returns the intersection of this Geometry and the other."
return self.intersection(other)
# g = g1 - g2
def __sub__(self, other):
"Return the difference this Geometry and the other."
return self.difference(other)
# g = g1 ^ g2
def __xor__(self, other):
"Return the symmetric difference of this Geometry and the other."
return self.sym_difference(other)
def __eq__(self, other):
"Is this Geometry equal to the other?"
if isinstance(other, OGRGeometry):
return self.equals(other)
else:
return False
def __ne__(self, other):
"Tests for inequality."
return not (self == other)
def __str__(self):
"WKT is used for the string representation."
return self.wkt
#### Geometry Properties ####
@property
def dimension(self):
"Returns 0 for points, 1 for lines, and 2 for surfaces."
return capi.get_dims(self.ptr)
def _get_coord_dim(self):
"Returns the coordinate dimension of the Geometry."
if isinstance(self, GeometryCollection) and GDAL_VERSION < (1, 5, 2):
# On GDAL versions prior to 1.5.2, there exists a bug in which
# the coordinate dimension of geometry collections is always 2:
# http://trac.osgeo.org/gdal/ticket/2334
# Here we workaround by returning the coordinate dimension of the
# first geometry in the collection instead.
if len(self):
return capi.get_coord_dim(capi.get_geom_ref(self.ptr, 0))
return capi.get_coord_dim(self.ptr)
def _set_coord_dim(self, dim):
"Sets the coordinate dimension of this Geometry."
if not dim in (2, 3):
raise ValueError('Geometry dimension must be either 2 or 3')
capi.set_coord_dim(self.ptr, dim)
coord_dim = property(_get_coord_dim, _set_coord_dim)
@property
def geom_count(self):
"The number of elements in this Geometry."
return capi.get_geom_count(self.ptr)
@property
def point_count(self):
"Returns the number of Points in this Geometry."
return capi.get_point_count(self.ptr)
@property
def num_points(self):
"Alias for `point_count` (same name method in GEOS API.)"
return self.point_count
@property
def num_coords(self):
"Alais for `point_count`."
return self.point_count
@property
def geom_type(self):
"Returns the Type for this Geometry."
return OGRGeomType(capi.get_geom_type(self.ptr))
@property
def geom_name(self):
"Returns the Name of this Geometry."
return capi.get_geom_name(self.ptr)
@property
def area(self):
"Returns the area for a LinearRing, Polygon, or MultiPolygon; 0 otherwise."
return capi.get_area(self.ptr)
@property
def envelope(self):
"Returns the envelope for this Geometry."
# TODO: Fix Envelope() for Point geometries.
return Envelope(capi.get_envelope(self.ptr, byref(OGREnvelope())))
@property
def extent(self):
"Returns the envelope as a 4-tuple, instead of as an Envelope object."
return self.envelope.tuple
#### SpatialReference-related Properties ####
# The SRS property
def _get_srs(self):
"Returns the Spatial Reference for this Geometry."
try:
srs_ptr = capi.get_geom_srs(self.ptr)
return SpatialReference(srs_api.clone_srs(srs_ptr))
except SRSException:
return None
def _set_srs(self, srs):
"Sets the SpatialReference for this geometry."
# Do not have to clone the `SpatialReference` object pointer because
# when it is assigned to this `OGRGeometry` it's internal OGR
# reference count is incremented, and will likewise be released
# (decremented) when this geometry's destructor is called.
if isinstance(srs, SpatialReference):
srs_ptr = srs.ptr
elif isinstance(srs, (int, long, basestring)):
sr = SpatialReference(srs)
srs_ptr = sr.ptr
else:
raise TypeError('Cannot assign spatial reference with object of type: %s' % type(srs))
capi.assign_srs(self.ptr, srs_ptr)
srs = property(_get_srs, _set_srs)
# The SRID property
def _get_srid(self):
srs = self.srs
if srs: return srs.srid
return None
def _set_srid(self, srid):
if isinstance(srid, (int, long)):
self.srs = srid
else:
raise TypeError('SRID must be set with an integer.')
srid = property(_get_srid, _set_srid)
#### Output Methods ####
@property
def geos(self):
"Returns a GEOSGeometry object from this OGRGeometry."
from django.contrib.gis.geos import GEOSGeometry
return GEOSGeometry(self.wkb, self.srid)
@property
def gml(self):
"Returns the GML representation of the Geometry."
return capi.to_gml(self.ptr)
@property
def hex(self):
"Returns the hexadecimal representation of the WKB (a string)."
return str(self.wkb).encode('hex').upper()
#return b2a_hex(self.wkb).upper()
@property
def json(self):
"""
Returns the GeoJSON representation of this Geometry (requires
GDAL 1.5+).
"""
if GEOJSON:
return capi.to_json(self.ptr)
else:
raise NotImplementedError('GeoJSON output only supported on GDAL 1.5+.')
geojson = json
@property
def kml(self):
"Returns the KML representation of the Geometry."
if GEOJSON:
return capi.to_kml(self.ptr, None)
else:
raise NotImplementedError('KML output only supported on GDAL 1.5+.')
@property
def wkb_size(self):
"Returns the size of the WKB buffer."
return capi.get_wkbsize(self.ptr)
@property
def wkb(self):
"Returns the WKB representation of the Geometry."
if sys.byteorder == 'little':
byteorder = 1 # wkbNDR (from ogr_core.h)
else:
byteorder = 0 # wkbXDR
sz = self.wkb_size
# Creating the unsigned character buffer, and passing it in by reference.
buf = (c_ubyte * sz)()
wkb = capi.to_wkb(self.ptr, byteorder, byref(buf))
# Returning a buffer of the string at the pointer.
return buffer(string_at(buf, sz))
@property
def wkt(self):
"Returns the WKT representation of the Geometry."
return capi.to_wkt(self.ptr, byref(c_char_p()))
@property
def ewkt(self):
"Returns the EWKT representation of the Geometry."
srs = self.srs
if srs and srs.srid:
return 'SRID=%s;%s' % (srs.srid, self.wkt)
else:
return self.wkt
#### Geometry Methods ####
def clone(self):
"Clones this OGR Geometry."
return OGRGeometry(capi.clone_geom(self.ptr), self.srs)
def close_rings(self):
"""
If there are any rings within this geometry that have not been
closed, this routine will do so by adding the starting point at the
end.
"""
# Closing the open rings.
capi.geom_close_rings(self.ptr)
def transform(self, coord_trans, clone=False):
"""
Transforms this geometry to a different spatial reference system.
May take a CoordTransform object, a SpatialReference object, string
WKT or PROJ.4, and/or an integer SRID. By default nothing is returned
and the geometry is transformed in-place. However, if the `clone`
keyword is set, then a transformed clone of this geometry will be
returned.
"""
if clone:
klone = self.clone()
klone.transform(coord_trans)
return klone
# Have to get the coordinate dimension of the original geometry
# so it can be used to reset the transformed geometry's dimension
# afterwards. This is done because of GDAL bug (in versions prior
# to 1.7) that turns geometries 3D after transformation, see:
# http://trac.osgeo.org/gdal/changeset/17792
if GDAL_VERSION < (1, 7):
orig_dim = self.coord_dim
# Depending on the input type, use the appropriate OGR routine
# to perform the transformation.
if isinstance(coord_trans, CoordTransform):
capi.geom_transform(self.ptr, coord_trans.ptr)
elif isinstance(coord_trans, SpatialReference):
capi.geom_transform_to(self.ptr, coord_trans.ptr)
elif isinstance(coord_trans, (int, long, basestring)):
sr = SpatialReference(coord_trans)
capi.geom_transform_to(self.ptr, sr.ptr)
else:
raise TypeError('Transform only accepts CoordTransform, '
'SpatialReference, string, and integer objects.')
# Setting with original dimension, see comment above.
if GDAL_VERSION < (1, 7):
if isinstance(self, GeometryCollection):
# With geometry collections have to set dimension on
# each internal geometry reference, as the collection
# dimension isn't affected.
for i in xrange(len(self)):
internal_ptr = capi.get_geom_ref(self.ptr, i)
if orig_dim != capi.get_coord_dim(internal_ptr):
capi.set_coord_dim(internal_ptr, orig_dim)
else:
if self.coord_dim != orig_dim:
self.coord_dim = orig_dim
def transform_to(self, srs):
"For backwards-compatibility."
self.transform(srs)
#### Topology Methods ####
def _topology(self, func, other):
"""A generalized function for topology operations, takes a GDAL function and
the other geometry to perform the operation on."""
if not isinstance(other, OGRGeometry):
raise TypeError('Must use another OGRGeometry object for topology operations!')
# Returning the output of the given function with the other geometry's
# pointer.
return func(self.ptr, other.ptr)
def intersects(self, other):
"Returns True if this geometry intersects with the other."
return self._topology(capi.ogr_intersects, other)
def equals(self, other):
"Returns True if this geometry is equivalent to the other."
return self._topology(capi.ogr_equals, other)
def disjoint(self, other):
"Returns True if this geometry and the other are spatially disjoint."
return self._topology(capi.ogr_disjoint, other)
def touches(self, other):
"Returns True if this geometry touches the other."
return self._topology(capi.ogr_touches, other)
def crosses(self, other):
"Returns True if this geometry crosses the other."
return self._topology(capi.ogr_crosses, other)
def within(self, other):
"Returns True if this geometry is within the other."
return self._topology(capi.ogr_within, other)
def contains(self, other):
"Returns True if this geometry contains the other."
return self._topology(capi.ogr_contains, other)
def overlaps(self, other):
"Returns True if this geometry overlaps the other."
return self._topology(capi.ogr_overlaps, other)
#### Geometry-generation Methods ####
def _geomgen(self, gen_func, other=None):
"A helper routine for the OGR routines that generate geometries."
if isinstance(other, OGRGeometry):
return OGRGeometry(gen_func(self.ptr, other.ptr), self.srs)
else:
return OGRGeometry(gen_func(self.ptr), self.srs)
@property
def boundary(self):
"Returns the boundary of this geometry."
return self._geomgen(capi.get_boundary)
@property
def convex_hull(self):
"""
Returns the smallest convex Polygon that contains all the points in
this Geometry.
"""
return self._geomgen(capi.geom_convex_hull)
def difference(self, other):
"""
Returns a new geometry consisting of the region which is the difference
of this geometry and the other.
"""
return self._geomgen(capi.geom_diff, other)
def intersection(self, other):
"""
Returns a new geometry consisting of the region of intersection of this
geometry and the other.
"""
return self._geomgen(capi.geom_intersection, other)
def sym_difference(self, other):
"""
Returns a new geometry which is the symmetric difference of this
geometry and the other.
"""
return self._geomgen(capi.geom_sym_diff, other)
def union(self, other):
"""
Returns a new geometry consisting of the region which is the union of
this geometry and the other.
"""
return self._geomgen(capi.geom_union, other)
# The subclasses for OGR Geometry.
class Point(OGRGeometry):
@property
def x(self):
"Returns the X coordinate for this Point."
return capi.getx(self.ptr, 0)
@property
def y(self):
"Returns the Y coordinate for this Point."
return capi.gety(self.ptr, 0)
@property
def z(self):
"Returns the Z coordinate for this Point."
if self.coord_dim == 3:
return capi.getz(self.ptr, 0)
@property
def tuple(self):
"Returns the tuple of this point."
if self.coord_dim == 2:
return (self.x, self.y)
elif self.coord_dim == 3:
return (self.x, self.y, self.z)
coords = tuple
class LineString(OGRGeometry):
def __getitem__(self, index):
"Returns the Point at the given index."
if index >= 0 and index < self.point_count:
x, y, z = c_double(), c_double(), c_double()
capi.get_point(self.ptr, index, byref(x), byref(y), byref(z))
dim = self.coord_dim
if dim == 1:
return (x.value,)
elif dim == 2:
return (x.value, y.value)
elif dim == 3:
return (x.value, y.value, z.value)
else:
raise OGRIndexError('index out of range: %s' % str(index))
def __iter__(self):
"Iterates over each point in the LineString."
for i in xrange(self.point_count):
yield self[i]
def __len__(self):
"The length returns the number of points in the LineString."
return self.point_count
@property
def tuple(self):
"Returns the tuple representation of this LineString."
return tuple([self[i] for i in xrange(len(self))])
coords = tuple
def _listarr(self, func):
"""
Internal routine that returns a sequence (list) corresponding with
the given function.
"""
return [func(self.ptr, i) for i in xrange(len(self))]
@property
def x(self):
"Returns the X coordinates in a list."
return self._listarr(capi.getx)
@property
def y(self):
"Returns the Y coordinates in a list."
return self._listarr(capi.gety)
@property
def z(self):
"Returns the Z coordinates in a list."
if self.coord_dim == 3:
return self._listarr(capi.getz)
# LinearRings are used in Polygons.
class LinearRing(LineString): pass
class Polygon(OGRGeometry):
def __len__(self):
"The number of interior rings in this Polygon."
return self.geom_count
def __iter__(self):
"Iterates through each ring in the Polygon."
for i in xrange(self.geom_count):
yield self[i]
def __getitem__(self, index):
"Gets the ring at the specified index."
if index < 0 or index >= self.geom_count:
raise OGRIndexError('index out of range: %s' % index)
else:
return OGRGeometry(capi.clone_geom(capi.get_geom_ref(self.ptr, index)), self.srs)
# Polygon Properties
@property
def shell(self):
"Returns the shell of this Polygon."
return self[0] # First ring is the shell
exterior_ring = shell
@property
def tuple(self):
"Returns a tuple of LinearRing coordinate tuples."
return tuple([self[i].tuple for i in xrange(self.geom_count)])
coords = tuple
@property
def point_count(self):
"The number of Points in this Polygon."
# Summing up the number of points in each ring of the Polygon.
return sum([self[i].point_count for i in xrange(self.geom_count)])
@property
def centroid(self):
"Returns the centroid (a Point) of this Polygon."
# The centroid is a Point, create a geometry for this.
p = OGRGeometry(OGRGeomType('Point'))
capi.get_centroid(self.ptr, p.ptr)
return p
# Geometry Collection base class.
class GeometryCollection(OGRGeometry):
"The Geometry Collection class."
def __getitem__(self, index):
"Gets the Geometry at the specified index."
if index < 0 or index >= self.geom_count:
raise OGRIndexError('index out of range: %s' % index)
else:
return OGRGeometry(capi.clone_geom(capi.get_geom_ref(self.ptr, index)), self.srs)
def __iter__(self):
"Iterates over each Geometry."
for i in xrange(self.geom_count):
yield self[i]
def __len__(self):
"The number of geometries in this Geometry Collection."
return self.geom_count
def add(self, geom):
"Add the geometry to this Geometry Collection."
if isinstance(geom, OGRGeometry):
if isinstance(geom, self.__class__):
for g in geom: capi.add_geom(self.ptr, g.ptr)
else:
capi.add_geom(self.ptr, geom.ptr)
elif isinstance(geom, basestring):
tmp = OGRGeometry(geom)
capi.add_geom(self.ptr, tmp.ptr)
else:
raise OGRException('Must add an OGRGeometry.')
@property
def point_count(self):
"The number of Points in this Geometry Collection."
# Summing up the number of points in each geometry in this collection
return sum([self[i].point_count for i in xrange(self.geom_count)])
@property
def tuple(self):
"Returns a tuple representation of this Geometry Collection."
return tuple([self[i].tuple for i in xrange(self.geom_count)])
coords = tuple
# Multiple Geometry types.
class MultiPoint(GeometryCollection): pass
class MultiLineString(GeometryCollection): pass
class MultiPolygon(GeometryCollection): pass
# Class mapping dictionary (using the OGRwkbGeometryType as the key)
GEO_CLASSES = {1 : Point,
2 : LineString,
3 : Polygon,
4 : MultiPoint,
5 : MultiLineString,
6 : MultiPolygon,
7 : GeometryCollection,
101: LinearRing,
1 + OGRGeomType.wkb25bit : Point,
2 + OGRGeomType.wkb25bit : LineString,
3 + OGRGeomType.wkb25bit : Polygon,
4 + OGRGeomType.wkb25bit : MultiPoint,
5 + OGRGeomType.wkb25bit : MultiLineString,
6 + OGRGeomType.wkb25bit : MultiPolygon,
7 + OGRGeomType.wkb25bit : GeometryCollection,
}
| bsd-3-clause |
Salat-Cx65/python-for-android | python-build/python-libs/gdata/src/gdata/youtube/service.py | 141 | 57914 | #!/usr/bin/python
#
# Copyright (C) 2008 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""YouTubeService extends GDataService to streamline YouTube operations.
YouTubeService: Provides methods to perform CRUD operations on YouTube feeds.
Extends GDataService.
"""
__author__ = ('api.stephaniel@gmail.com (Stephanie Liu), '
'api.jhartmann@gmail.com (Jochen Hartmann)')
try:
from xml.etree import cElementTree as ElementTree
except ImportError:
try:
import cElementTree as ElementTree
except ImportError:
try:
from xml.etree import ElementTree
except ImportError:
from elementtree import ElementTree
import os
import atom
import gdata
import gdata.service
import gdata.youtube
YOUTUBE_SERVER = 'gdata.youtube.com'
YOUTUBE_SERVICE = 'youtube'
YOUTUBE_CLIENTLOGIN_AUTHENTICATION_URL = 'https://www.google.com/youtube/accounts/ClientLogin'
YOUTUBE_SUPPORTED_UPLOAD_TYPES = ('mov', 'avi', 'wmv', 'mpg', 'quicktime',
'flv')
YOUTUBE_QUERY_VALID_TIME_PARAMETERS = ('today', 'this_week', 'this_month',
'all_time')
YOUTUBE_QUERY_VALID_ORDERBY_PARAMETERS = ('published', 'viewCount', 'rating',
'relevance')
YOUTUBE_QUERY_VALID_RACY_PARAMETERS = ('include', 'exclude')
YOUTUBE_QUERY_VALID_FORMAT_PARAMETERS = ('1', '5', '6')
YOUTUBE_STANDARDFEEDS = ('most_recent', 'recently_featured',
'top_rated', 'most_viewed','watch_on_mobile')
YOUTUBE_UPLOAD_URI = 'http://uploads.gdata.youtube.com/feeds/api/users'
YOUTUBE_UPLOAD_TOKEN_URI = 'http://gdata.youtube.com/action/GetUploadToken'
YOUTUBE_VIDEO_URI = 'http://gdata.youtube.com/feeds/api/videos'
YOUTUBE_USER_FEED_URI = 'http://gdata.youtube.com/feeds/api/users'
YOUTUBE_PLAYLIST_FEED_URI = 'http://gdata.youtube.com/feeds/api/playlists'
YOUTUBE_STANDARD_FEEDS = 'http://gdata.youtube.com/feeds/api/standardfeeds'
YOUTUBE_STANDARD_TOP_RATED_URI = '%s/%s' % (YOUTUBE_STANDARD_FEEDS, 'top_rated')
YOUTUBE_STANDARD_MOST_VIEWED_URI = '%s/%s' % (YOUTUBE_STANDARD_FEEDS,
'most_viewed')
YOUTUBE_STANDARD_RECENTLY_FEATURED_URI = '%s/%s' % (YOUTUBE_STANDARD_FEEDS,
'recently_featured')
YOUTUBE_STANDARD_WATCH_ON_MOBILE_URI = '%s/%s' % (YOUTUBE_STANDARD_FEEDS,
'watch_on_mobile')
YOUTUBE_STANDARD_TOP_FAVORITES_URI = '%s/%s' % (YOUTUBE_STANDARD_FEEDS,
'top_favorites')
YOUTUBE_STANDARD_MOST_RECENT_URI = '%s/%s' % (YOUTUBE_STANDARD_FEEDS,
'most_recent')
YOUTUBE_STANDARD_MOST_DISCUSSED_URI = '%s/%s' % (YOUTUBE_STANDARD_FEEDS,
'most_discussed')
YOUTUBE_STANDARD_MOST_LINKED_URI = '%s/%s' % (YOUTUBE_STANDARD_FEEDS,
'most_linked')
YOUTUBE_STANDARD_MOST_RESPONDED_URI = '%s/%s' % (YOUTUBE_STANDARD_FEEDS,
'most_responded')
YOUTUBE_SCHEMA = 'http://gdata.youtube.com/schemas'
YOUTUBE_RATING_LINK_REL = '%s#video.ratings' % YOUTUBE_SCHEMA
YOUTUBE_COMPLAINT_CATEGORY_SCHEME = '%s/%s' % (YOUTUBE_SCHEMA,
'complaint-reasons.cat')
YOUTUBE_SUBSCRIPTION_CATEGORY_SCHEME = '%s/%s' % (YOUTUBE_SCHEMA,
'subscriptiontypes.cat')
YOUTUBE_COMPLAINT_CATEGORY_TERMS = ('PORN', 'VIOLENCE', 'HATE', 'DANGEROUS',
'RIGHTS', 'SPAM')
YOUTUBE_CONTACT_STATUS = ('accepted', 'rejected')
YOUTUBE_CONTACT_CATEGORY = ('Friends', 'Family')
UNKOWN_ERROR = 1000
YOUTUBE_BAD_REQUEST = 400
YOUTUBE_CONFLICT = 409
YOUTUBE_INTERNAL_SERVER_ERROR = 500
YOUTUBE_INVALID_ARGUMENT = 601
YOUTUBE_INVALID_CONTENT_TYPE = 602
YOUTUBE_NOT_A_VIDEO = 603
YOUTUBE_INVALID_KIND = 604
class Error(Exception):
"""Base class for errors within the YouTube service."""
pass
class RequestError(Error):
"""Error class that is thrown in response to an invalid HTTP Request."""
pass
class YouTubeError(Error):
"""YouTube service specific error class."""
pass
class YouTubeService(gdata.service.GDataService):
"""Client for the YouTube service.
Performs all documented Google Data YouTube API functions, such as inserting,
updating and deleting videos, comments, playlist, subscriptions etc.
YouTube Service requires authentication for any write, update or delete
actions.
Attributes:
email: An optional string identifying the user. Required only for
authenticated actions.
password: An optional string identifying the user's password.
source: An optional string identifying the name of your application.
server: An optional address of the YouTube API server. gdata.youtube.com
is provided as the default value.
additional_headers: An optional dictionary containing additional headers
to be passed along with each request. Use to store developer key.
client_id: An optional string identifying your application, required for
authenticated requests, along with a developer key.
developer_key: An optional string value. Register your application at
http://code.google.com/apis/youtube/dashboard to obtain a (free) key.
"""
def __init__(self, email=None, password=None, source=None,
server=YOUTUBE_SERVER, additional_headers=None, client_id=None,
developer_key=None, **kwargs):
"""Creates a client for the YouTube service.
Args:
email: string (optional) The user's email address, used for
authentication.
password: string (optional) The user's password.
source: string (optional) The name of the user's application.
server: string (optional) The name of the server to which a connection
will be opened. Default value: 'gdata.youtube.com'.
client_id: string (optional) Identifies your application, required for
authenticated requests, along with a developer key.
developer_key: string (optional) Register your application at
http://code.google.com/apis/youtube/dashboard to obtain a (free) key.
**kwargs: The other parameters to pass to gdata.service.GDataService
constructor.
"""
if developer_key and not client_id:
raise YouTubeError('You must also specify the clientId')
gdata.service.GDataService.__init__(
self, email=email, password=password, service=YOUTUBE_SERVICE,
source=source, server=server, additional_headers=additional_headers,
**kwargs)
if client_id is not None and developer_key is not None:
self.additional_headers['X-Gdata-Client'] = client_id
self.additional_headers['X-GData-Key'] = 'key=%s' % developer_key
self.auth_service_url = YOUTUBE_CLIENTLOGIN_AUTHENTICATION_URL
def GetYouTubeVideoFeed(self, uri):
"""Retrieve a YouTubeVideoFeed.
Args:
uri: A string representing the URI of the feed that is to be retrieved.
Returns:
A YouTubeVideoFeed if successfully retrieved.
"""
return self.Get(uri, converter=gdata.youtube.YouTubeVideoFeedFromString)
def GetYouTubeVideoEntry(self, uri=None, video_id=None):
"""Retrieve a YouTubeVideoEntry.
Either a uri or a video_id must be provided.
Args:
uri: An optional string representing the URI of the entry that is to
be retrieved.
video_id: An optional string representing the ID of the video.
Returns:
A YouTubeVideoFeed if successfully retrieved.
Raises:
YouTubeError: You must provide at least a uri or a video_id to the
GetYouTubeVideoEntry() method.
"""
if uri is None and video_id is None:
raise YouTubeError('You must provide at least a uri or a video_id '
'to the GetYouTubeVideoEntry() method')
elif video_id and not uri:
uri = '%s/%s' % (YOUTUBE_VIDEO_URI, video_id)
return self.Get(uri, converter=gdata.youtube.YouTubeVideoEntryFromString)
def GetYouTubeContactFeed(self, uri=None, username='default'):
"""Retrieve a YouTubeContactFeed.
Either a uri or a username must be provided.
Args:
uri: An optional string representing the URI of the contact feed that
is to be retrieved.
username: An optional string representing the username. Defaults to the
currently authenticated user.
Returns:
A YouTubeContactFeed if successfully retrieved.
Raises:
YouTubeError: You must provide at least a uri or a username to the
GetYouTubeContactFeed() method.
"""
if uri is None:
uri = '%s/%s/%s' % (YOUTUBE_USER_FEED_URI, username, 'contacts')
return self.Get(uri, converter=gdata.youtube.YouTubeContactFeedFromString)
def GetYouTubeContactEntry(self, uri):
"""Retrieve a YouTubeContactEntry.
Args:
uri: A string representing the URI of the contact entry that is to
be retrieved.
Returns:
A YouTubeContactEntry if successfully retrieved.
"""
return self.Get(uri, converter=gdata.youtube.YouTubeContactEntryFromString)
def GetYouTubeVideoCommentFeed(self, uri=None, video_id=None):
"""Retrieve a YouTubeVideoCommentFeed.
Either a uri or a video_id must be provided.
Args:
uri: An optional string representing the URI of the comment feed that
is to be retrieved.
video_id: An optional string representing the ID of the video for which
to retrieve the comment feed.
Returns:
A YouTubeVideoCommentFeed if successfully retrieved.
Raises:
YouTubeError: You must provide at least a uri or a video_id to the
GetYouTubeVideoCommentFeed() method.
"""
if uri is None and video_id is None:
raise YouTubeError('You must provide at least a uri or a video_id '
'to the GetYouTubeVideoCommentFeed() method')
elif video_id and not uri:
uri = '%s/%s/%s' % (YOUTUBE_VIDEO_URI, video_id, 'comments')
return self.Get(
uri, converter=gdata.youtube.YouTubeVideoCommentFeedFromString)
def GetYouTubeVideoCommentEntry(self, uri):
"""Retrieve a YouTubeVideoCommentEntry.
Args:
uri: A string representing the URI of the comment entry that is to
be retrieved.
Returns:
A YouTubeCommentEntry if successfully retrieved.
"""
return self.Get(
uri, converter=gdata.youtube.YouTubeVideoCommentEntryFromString)
def GetYouTubeUserFeed(self, uri=None, username=None):
"""Retrieve a YouTubeVideoFeed of user uploaded videos
Either a uri or a username must be provided. This will retrieve list
of videos uploaded by specified user. The uri will be of format
"http://gdata.youtube.com/feeds/api/users/{username}/uploads".
Args:
uri: An optional string representing the URI of the user feed that is
to be retrieved.
username: An optional string representing the username.
Returns:
A YouTubeUserFeed if successfully retrieved.
Raises:
YouTubeError: You must provide at least a uri or a username to the
GetYouTubeUserFeed() method.
"""
if uri is None and username is None:
raise YouTubeError('You must provide at least a uri or a username '
'to the GetYouTubeUserFeed() method')
elif username and not uri:
uri = '%s/%s/%s' % (YOUTUBE_USER_FEED_URI, username, 'uploads')
return self.Get(uri, converter=gdata.youtube.YouTubeUserFeedFromString)
def GetYouTubeUserEntry(self, uri=None, username=None):
"""Retrieve a YouTubeUserEntry.
Either a uri or a username must be provided.
Args:
uri: An optional string representing the URI of the user entry that is
to be retrieved.
username: An optional string representing the username.
Returns:
A YouTubeUserEntry if successfully retrieved.
Raises:
YouTubeError: You must provide at least a uri or a username to the
GetYouTubeUserEntry() method.
"""
if uri is None and username is None:
raise YouTubeError('You must provide at least a uri or a username '
'to the GetYouTubeUserEntry() method')
elif username and not uri:
uri = '%s/%s' % (YOUTUBE_USER_FEED_URI, username)
return self.Get(uri, converter=gdata.youtube.YouTubeUserEntryFromString)
def GetYouTubePlaylistFeed(self, uri=None, username='default'):
"""Retrieve a YouTubePlaylistFeed (a feed of playlists for a user).
Either a uri or a username must be provided.
Args:
uri: An optional string representing the URI of the playlist feed that
is to be retrieved.
username: An optional string representing the username. Defaults to the
currently authenticated user.
Returns:
A YouTubePlaylistFeed if successfully retrieved.
Raises:
YouTubeError: You must provide at least a uri or a username to the
GetYouTubePlaylistFeed() method.
"""
if uri is None:
uri = '%s/%s/%s' % (YOUTUBE_USER_FEED_URI, username, 'playlists')
return self.Get(uri, converter=gdata.youtube.YouTubePlaylistFeedFromString)
def GetYouTubePlaylistEntry(self, uri):
"""Retrieve a YouTubePlaylistEntry.
Args:
uri: A string representing the URI of the playlist feed that is to
be retrieved.
Returns:
A YouTubePlaylistEntry if successfully retrieved.
"""
return self.Get(uri, converter=gdata.youtube.YouTubePlaylistEntryFromString)
def GetYouTubePlaylistVideoFeed(self, uri=None, playlist_id=None):
"""Retrieve a YouTubePlaylistVideoFeed (a feed of videos on a playlist).
Either a uri or a playlist_id must be provided.
Args:
uri: An optional string representing the URI of the playlist video feed
that is to be retrieved.
playlist_id: An optional string representing the Id of the playlist whose
playlist video feed is to be retrieved.
Returns:
A YouTubePlaylistVideoFeed if successfully retrieved.
Raises:
YouTubeError: You must provide at least a uri or a playlist_id to the
GetYouTubePlaylistVideoFeed() method.
"""
if uri is None and playlist_id is None:
raise YouTubeError('You must provide at least a uri or a playlist_id '
'to the GetYouTubePlaylistVideoFeed() method')
elif playlist_id and not uri:
uri = '%s/%s' % (YOUTUBE_PLAYLIST_FEED_URI, playlist_id)
return self.Get(
uri, converter=gdata.youtube.YouTubePlaylistVideoFeedFromString)
def GetYouTubeVideoResponseFeed(self, uri=None, video_id=None):
"""Retrieve a YouTubeVideoResponseFeed.
Either a uri or a playlist_id must be provided.
Args:
uri: An optional string representing the URI of the video response feed
that is to be retrieved.
video_id: An optional string representing the ID of the video whose
response feed is to be retrieved.
Returns:
A YouTubeVideoResponseFeed if successfully retrieved.
Raises:
YouTubeError: You must provide at least a uri or a video_id to the
GetYouTubeVideoResponseFeed() method.
"""
if uri is None and video_id is None:
raise YouTubeError('You must provide at least a uri or a video_id '
'to the GetYouTubeVideoResponseFeed() method')
elif video_id and not uri:
uri = '%s/%s/%s' % (YOUTUBE_VIDEO_URI, video_id, 'responses')
return self.Get(
uri, converter=gdata.youtube.YouTubeVideoResponseFeedFromString)
def GetYouTubeVideoResponseEntry(self, uri):
"""Retrieve a YouTubeVideoResponseEntry.
Args:
uri: A string representing the URI of the video response entry that
is to be retrieved.
Returns:
A YouTubeVideoResponseEntry if successfully retrieved.
"""
return self.Get(
uri, converter=gdata.youtube.YouTubeVideoResponseEntryFromString)
def GetYouTubeSubscriptionFeed(self, uri=None, username='default'):
"""Retrieve a YouTubeSubscriptionFeed.
Either the uri of the feed or a username must be provided.
Args:
uri: An optional string representing the URI of the feed that is to
be retrieved.
username: An optional string representing the username whose subscription
feed is to be retrieved. Defaults to the currently authenticted user.
Returns:
A YouTubeVideoSubscriptionFeed if successfully retrieved.
"""
if uri is None:
uri = '%s/%s/%s' % (YOUTUBE_USER_FEED_URI, username, 'subscriptions')
return self.Get(
uri, converter=gdata.youtube.YouTubeSubscriptionFeedFromString)
def GetYouTubeSubscriptionEntry(self, uri):
"""Retrieve a YouTubeSubscriptionEntry.
Args:
uri: A string representing the URI of the entry that is to be retrieved.
Returns:
A YouTubeVideoSubscriptionEntry if successfully retrieved.
"""
return self.Get(
uri, converter=gdata.youtube.YouTubeSubscriptionEntryFromString)
def GetYouTubeRelatedVideoFeed(self, uri=None, video_id=None):
"""Retrieve a YouTubeRelatedVideoFeed.
Either a uri for the feed or a video_id is required.
Args:
uri: An optional string representing the URI of the feed that is to
be retrieved.
video_id: An optional string representing the ID of the video for which
to retrieve the related video feed.
Returns:
A YouTubeRelatedVideoFeed if successfully retrieved.
Raises:
YouTubeError: You must provide at least a uri or a video_id to the
GetYouTubeRelatedVideoFeed() method.
"""
if uri is None and video_id is None:
raise YouTubeError('You must provide at least a uri or a video_id '
'to the GetYouTubeRelatedVideoFeed() method')
elif video_id and not uri:
uri = '%s/%s/%s' % (YOUTUBE_VIDEO_URI, video_id, 'related')
return self.Get(
uri, converter=gdata.youtube.YouTubeVideoFeedFromString)
def GetTopRatedVideoFeed(self):
"""Retrieve the 'top_rated' standard video feed.
Returns:
A YouTubeVideoFeed if successfully retrieved.
"""
return self.GetYouTubeVideoFeed(YOUTUBE_STANDARD_TOP_RATED_URI)
def GetMostViewedVideoFeed(self):
"""Retrieve the 'most_viewed' standard video feed.
Returns:
A YouTubeVideoFeed if successfully retrieved.
"""
return self.GetYouTubeVideoFeed(YOUTUBE_STANDARD_MOST_VIEWED_URI)
def GetRecentlyFeaturedVideoFeed(self):
"""Retrieve the 'recently_featured' standard video feed.
Returns:
A YouTubeVideoFeed if successfully retrieved.
"""
return self.GetYouTubeVideoFeed(YOUTUBE_STANDARD_RECENTLY_FEATURED_URI)
def GetWatchOnMobileVideoFeed(self):
"""Retrieve the 'watch_on_mobile' standard video feed.
Returns:
A YouTubeVideoFeed if successfully retrieved.
"""
return self.GetYouTubeVideoFeed(YOUTUBE_STANDARD_WATCH_ON_MOBILE_URI)
def GetTopFavoritesVideoFeed(self):
"""Retrieve the 'top_favorites' standard video feed.
Returns:
A YouTubeVideoFeed if successfully retrieved.
"""
return self.GetYouTubeVideoFeed(YOUTUBE_STANDARD_TOP_FAVORITES_URI)
def GetMostRecentVideoFeed(self):
"""Retrieve the 'most_recent' standard video feed.
Returns:
A YouTubeVideoFeed if successfully retrieved.
"""
return self.GetYouTubeVideoFeed(YOUTUBE_STANDARD_MOST_RECENT_URI)
def GetMostDiscussedVideoFeed(self):
"""Retrieve the 'most_discussed' standard video feed.
Returns:
A YouTubeVideoFeed if successfully retrieved.
"""
return self.GetYouTubeVideoFeed(YOUTUBE_STANDARD_MOST_DISCUSSED_URI)
def GetMostLinkedVideoFeed(self):
"""Retrieve the 'most_linked' standard video feed.
Returns:
A YouTubeVideoFeed if successfully retrieved.
"""
return self.GetYouTubeVideoFeed(YOUTUBE_STANDARD_MOST_LINKED_URI)
def GetMostRespondedVideoFeed(self):
"""Retrieve the 'most_responded' standard video feed.
Returns:
A YouTubeVideoFeed if successfully retrieved.
"""
return self.GetYouTubeVideoFeed(YOUTUBE_STANDARD_MOST_RESPONDED_URI)
def GetUserFavoritesFeed(self, username='default'):
"""Retrieve the favorites feed for a given user.
Args:
username: An optional string representing the username whose favorites
feed is to be retrieved. Defaults to the currently authenticated user.
Returns:
A YouTubeVideoFeed if successfully retrieved.
"""
favorites_feed_uri = '%s/%s/%s' % (YOUTUBE_USER_FEED_URI, username,
'favorites')
return self.GetYouTubeVideoFeed(favorites_feed_uri)
def InsertVideoEntry(self, video_entry, filename_or_handle,
youtube_username='default',
content_type='video/quicktime'):
"""Upload a new video to YouTube using the direct upload mechanism.
Needs authentication.
Args:
video_entry: The YouTubeVideoEntry to upload.
filename_or_handle: A file-like object or file name where the video
will be read from.
youtube_username: An optional string representing the username into whose
account this video is to be uploaded to. Defaults to the currently
authenticated user.
content_type: An optional string representing internet media type
(a.k.a. mime type) of the media object. Currently the YouTube API
supports these types:
o video/mpeg
o video/quicktime
o video/x-msvideo
o video/mp4
o video/x-flv
Returns:
The newly created YouTubeVideoEntry if successful.
Raises:
AssertionError: video_entry must be a gdata.youtube.VideoEntry instance.
YouTubeError: An error occurred trying to read the video file provided.
gdata.service.RequestError: An error occurred trying to upload the video
to the API server.
"""
# We need to perform a series of checks on the video_entry and on the
# file that we plan to upload, such as checking whether we have a valid
# video_entry and that the file is the correct type and readable, prior
# to performing the actual POST request.
try:
assert(isinstance(video_entry, gdata.youtube.YouTubeVideoEntry))
except AssertionError:
raise YouTubeError({'status':YOUTUBE_INVALID_ARGUMENT,
'body':'`video_entry` must be a gdata.youtube.VideoEntry instance',
'reason':'Found %s, not VideoEntry' % type(video_entry)
})
majtype, mintype = content_type.split('/')
try:
assert(mintype in YOUTUBE_SUPPORTED_UPLOAD_TYPES)
except (ValueError, AssertionError):
raise YouTubeError({'status':YOUTUBE_INVALID_CONTENT_TYPE,
'body':'This is not a valid content type: %s' % content_type,
'reason':'Accepted content types: %s' %
['video/%s' % (t) for t in YOUTUBE_SUPPORTED_UPLOAD_TYPES]})
if (isinstance(filename_or_handle, (str, unicode))
and os.path.exists(filename_or_handle)):
mediasource = gdata.MediaSource()
mediasource.setFile(filename_or_handle, content_type)
elif hasattr(filename_or_handle, 'read'):
import StringIO
if hasattr(filename_or_handle, 'seek'):
filename_or_handle.seek(0)
file_handle = StringIO.StringIO(filename_or_handle.read())
name = 'video'
if hasattr(filename_or_handle, 'name'):
name = filename_or_handle.name
mediasource = gdata.MediaSource(file_handle, content_type,
content_length=file_handle.len, file_name=name)
else:
raise YouTubeError({'status':YOUTUBE_INVALID_ARGUMENT, 'body':
'`filename_or_handle` must be a path name or a file-like object',
'reason': ('Found %s, not path name or object '
'with a .read() method' % type(filename_or_handle))})
upload_uri = '%s/%s/%s' % (YOUTUBE_UPLOAD_URI, youtube_username,
'uploads')
self.additional_headers['Slug'] = mediasource.file_name
# Using a nested try statement to retain Python 2.4 compatibility
try:
try:
return self.Post(video_entry, uri=upload_uri, media_source=mediasource,
converter=gdata.youtube.YouTubeVideoEntryFromString)
except gdata.service.RequestError, e:
raise YouTubeError(e.args[0])
finally:
del(self.additional_headers['Slug'])
def CheckUploadStatus(self, video_entry=None, video_id=None):
"""Check upload status on a recently uploaded video entry.
Needs authentication. Either video_entry or video_id must be provided.
Args:
video_entry: An optional YouTubeVideoEntry whose upload status to check
video_id: An optional string representing the ID of the uploaded video
whose status is to be checked.
Returns:
A tuple containing (video_upload_state, detailed_message) or None if
no status information is found.
Raises:
YouTubeError: You must provide at least a video_entry or a video_id to the
CheckUploadStatus() method.
"""
if video_entry is None and video_id is None:
raise YouTubeError('You must provide at least a uri or a video_id '
'to the CheckUploadStatus() method')
elif video_id and not video_entry:
video_entry = self.GetYouTubeVideoEntry(video_id=video_id)
control = video_entry.control
if control is not None:
draft = control.draft
if draft is not None:
if draft.text == 'yes':
yt_state = control.extension_elements[0]
if yt_state is not None:
state_value = yt_state.attributes['name']
message = ''
if yt_state.text is not None:
message = yt_state.text
return (state_value, message)
def GetFormUploadToken(self, video_entry, uri=YOUTUBE_UPLOAD_TOKEN_URI):
"""Receives a YouTube Token and a YouTube PostUrl from a YouTubeVideoEntry.
Needs authentication.
Args:
video_entry: The YouTubeVideoEntry to upload (meta-data only).
uri: An optional string representing the URI from where to fetch the
token information. Defaults to the YOUTUBE_UPLOADTOKEN_URI.
Returns:
A tuple containing the URL to which to post your video file, along
with the youtube token that must be included with your upload in the
form of: (post_url, youtube_token).
"""
try:
response = self.Post(video_entry, uri)
except gdata.service.RequestError, e:
raise YouTubeError(e.args[0])
tree = ElementTree.fromstring(response)
for child in tree:
if child.tag == 'url':
post_url = child.text
elif child.tag == 'token':
youtube_token = child.text
return (post_url, youtube_token)
def UpdateVideoEntry(self, video_entry):
"""Updates a video entry's meta-data.
Needs authentication.
Args:
video_entry: The YouTubeVideoEntry to update, containing updated
meta-data.
Returns:
An updated YouTubeVideoEntry on success or None.
"""
for link in video_entry.link:
if link.rel == 'edit':
edit_uri = link.href
return self.Put(video_entry, uri=edit_uri,
converter=gdata.youtube.YouTubeVideoEntryFromString)
def DeleteVideoEntry(self, video_entry):
"""Deletes a video entry.
Needs authentication.
Args:
video_entry: The YouTubeVideoEntry to be deleted.
Returns:
True if entry was deleted successfully.
"""
for link in video_entry.link:
if link.rel == 'edit':
edit_uri = link.href
return self.Delete(edit_uri)
def AddRating(self, rating_value, video_entry):
"""Add a rating to a video entry.
Needs authentication.
Args:
rating_value: The integer value for the rating (between 1 and 5).
video_entry: The YouTubeVideoEntry to be rated.
Returns:
True if the rating was added successfully.
Raises:
YouTubeError: rating_value must be between 1 and 5 in AddRating().
"""
if rating_value < 1 or rating_value > 5:
raise YouTubeError('rating_value must be between 1 and 5 in AddRating()')
entry = gdata.GDataEntry()
rating = gdata.youtube.Rating(min='1', max='5')
rating.extension_attributes['name'] = 'value'
rating.extension_attributes['value'] = str(rating_value)
entry.extension_elements.append(rating)
for link in video_entry.link:
if link.rel == YOUTUBE_RATING_LINK_REL:
rating_uri = link.href
return self.Post(entry, uri=rating_uri)
def AddComment(self, comment_text, video_entry):
"""Add a comment to a video entry.
Needs authentication. Note that each comment that is posted must contain
the video entry that it is to be posted to.
Args:
comment_text: A string representing the text of the comment.
video_entry: The YouTubeVideoEntry to be commented on.
Returns:
True if the comment was added successfully.
"""
content = atom.Content(text=comment_text)
comment_entry = gdata.youtube.YouTubeVideoCommentEntry(content=content)
comment_post_uri = video_entry.comments.feed_link[0].href
return self.Post(comment_entry, uri=comment_post_uri)
def AddVideoResponse(self, video_id_to_respond_to, video_response):
"""Add a video response.
Needs authentication.
Args:
video_id_to_respond_to: A string representing the ID of the video to be
responded to.
video_response: YouTubeVideoEntry to be posted as a response.
Returns:
True if video response was posted successfully.
"""
post_uri = '%s/%s/%s' % (YOUTUBE_VIDEO_URI, video_id_to_respond_to,
'responses')
return self.Post(video_response, uri=post_uri)
def DeleteVideoResponse(self, video_id, response_video_id):
"""Delete a video response.
Needs authentication.
Args:
video_id: A string representing the ID of video that contains the
response.
response_video_id: A string representing the ID of the video that was
posted as a response.
Returns:
True if video response was deleted succcessfully.
"""
delete_uri = '%s/%s/%s/%s' % (YOUTUBE_VIDEO_URI, video_id, 'responses',
response_video_id)
return self.Delete(delete_uri)
def AddComplaint(self, complaint_text, complaint_term, video_id):
"""Add a complaint for a particular video entry.
Needs authentication.
Args:
complaint_text: A string representing the complaint text.
complaint_term: A string representing the complaint category term.
video_id: A string representing the ID of YouTubeVideoEntry to
complain about.
Returns:
True if posted successfully.
Raises:
YouTubeError: Your complaint_term is not valid.
"""
if complaint_term not in YOUTUBE_COMPLAINT_CATEGORY_TERMS:
raise YouTubeError('Your complaint_term is not valid')
content = atom.Content(text=complaint_text)
category = atom.Category(term=complaint_term,
scheme=YOUTUBE_COMPLAINT_CATEGORY_SCHEME)
complaint_entry = gdata.GDataEntry(content=content, category=[category])
post_uri = '%s/%s/%s' % (YOUTUBE_VIDEO_URI, video_id, 'complaints')
return self.Post(complaint_entry, post_uri)
def AddVideoEntryToFavorites(self, video_entry, username='default'):
"""Add a video entry to a users favorite feed.
Needs authentication.
Args:
video_entry: The YouTubeVideoEntry to add.
username: An optional string representing the username to whose favorite
feed you wish to add the entry. Defaults to the currently
authenticated user.
Returns:
The posted YouTubeVideoEntry if successfully posted.
"""
post_uri = '%s/%s/%s' % (YOUTUBE_USER_FEED_URI, username, 'favorites')
return self.Post(video_entry, post_uri,
converter=gdata.youtube.YouTubeVideoEntryFromString)
def DeleteVideoEntryFromFavorites(self, video_id, username='default'):
"""Delete a video entry from the users favorite feed.
Needs authentication.
Args:
video_id: A string representing the ID of the video that is to be removed
username: An optional string representing the username of the user's
favorite feed. Defaults to the currently authenticated user.
Returns:
True if entry was successfully deleted.
"""
edit_link = '%s/%s/%s/%s' % (YOUTUBE_USER_FEED_URI, username, 'favorites',
video_id)
return self.Delete(edit_link)
def AddPlaylist(self, playlist_title, playlist_description,
playlist_private=None):
"""Add a new playlist to the currently authenticated users account.
Needs authentication.
Args:
playlist_title: A string representing the title for the new playlist.
playlist_description: A string representing the description of the
playlist.
playlist_private: An optional boolean, set to True if the playlist is
to be private.
Returns:
The YouTubePlaylistEntry if successfully posted.
"""
playlist_entry = gdata.youtube.YouTubePlaylistEntry(
title=atom.Title(text=playlist_title),
description=gdata.youtube.Description(text=playlist_description))
if playlist_private:
playlist_entry.private = gdata.youtube.Private()
playlist_post_uri = '%s/%s/%s' % (YOUTUBE_USER_FEED_URI, 'default',
'playlists')
return self.Post(playlist_entry, playlist_post_uri,
converter=gdata.youtube.YouTubePlaylistEntryFromString)
def UpdatePlaylist(self, playlist_id, new_playlist_title,
new_playlist_description, playlist_private=None,
username='default'):
"""Update a playlist with new meta-data.
Needs authentication.
Args:
playlist_id: A string representing the ID of the playlist to be updated.
new_playlist_title: A string representing a new title for the playlist.
new_playlist_description: A string representing a new description for the
playlist.
playlist_private: An optional boolean, set to True if the playlist is
to be private.
username: An optional string representing the username whose playlist is
to be updated. Defaults to the currently authenticated user.
Returns:
A YouTubePlaylistEntry if the update was successful.
"""
updated_playlist = gdata.youtube.YouTubePlaylistEntry(
title=atom.Title(text=new_playlist_title),
description=gdata.youtube.Description(text=new_playlist_description))
if playlist_private:
updated_playlist.private = gdata.youtube.Private()
playlist_put_uri = '%s/%s/playlists/%s' % (YOUTUBE_USER_FEED_URI, username,
playlist_id)
return self.Put(updated_playlist, playlist_put_uri,
converter=gdata.youtube.YouTubePlaylistEntryFromString)
def DeletePlaylist(self, playlist_uri):
"""Delete a playlist from the currently authenticated users playlists.
Needs authentication.
Args:
playlist_uri: A string representing the URI of the playlist that is
to be deleted.
Returns:
True if successfully deleted.
"""
return self.Delete(playlist_uri)
def AddPlaylistVideoEntryToPlaylist(
self, playlist_uri, video_id, custom_video_title=None,
custom_video_description=None):
"""Add a video entry to a playlist, optionally providing a custom title
and description.
Needs authentication.
Args:
playlist_uri: A string representing the URI of the playlist to which this
video entry is to be added.
video_id: A string representing the ID of the video entry to add.
custom_video_title: An optional string representing a custom title for
the video (only shown on the playlist).
custom_video_description: An optional string representing a custom
description for the video (only shown on the playlist).
Returns:
A YouTubePlaylistVideoEntry if successfully posted.
"""
playlist_video_entry = gdata.youtube.YouTubePlaylistVideoEntry(
atom_id=atom.Id(text=video_id))
if custom_video_title:
playlist_video_entry.title = atom.Title(text=custom_video_title)
if custom_video_description:
playlist_video_entry.description = gdata.youtube.Description(
text=custom_video_description)
return self.Post(playlist_video_entry, playlist_uri,
converter=gdata.youtube.YouTubePlaylistVideoEntryFromString)
def UpdatePlaylistVideoEntryMetaData(
self, playlist_uri, playlist_entry_id, new_video_title,
new_video_description, new_video_position):
"""Update the meta data for a YouTubePlaylistVideoEntry.
Needs authentication.
Args:
playlist_uri: A string representing the URI of the playlist that contains
the entry to be updated.
playlist_entry_id: A string representing the ID of the entry to be
updated.
new_video_title: A string representing the new title for the video entry.
new_video_description: A string representing the new description for
the video entry.
new_video_position: An integer representing the new position on the
playlist for the video.
Returns:
A YouTubePlaylistVideoEntry if the update was successful.
"""
playlist_video_entry = gdata.youtube.YouTubePlaylistVideoEntry(
title=atom.Title(text=new_video_title),
description=gdata.youtube.Description(text=new_video_description),
position=gdata.youtube.Position(text=str(new_video_position)))
playlist_put_uri = playlist_uri + '/' + playlist_entry_id
return self.Put(playlist_video_entry, playlist_put_uri,
converter=gdata.youtube.YouTubePlaylistVideoEntryFromString)
def DeletePlaylistVideoEntry(self, playlist_uri, playlist_video_entry_id):
"""Delete a playlist video entry from a playlist.
Needs authentication.
Args:
playlist_uri: A URI representing the playlist from which the playlist
video entry is to be removed from.
playlist_video_entry_id: A string representing id of the playlist video
entry that is to be removed.
Returns:
True if entry was successfully deleted.
"""
delete_uri = '%s/%s' % (playlist_uri, playlist_video_entry_id)
return self.Delete(delete_uri)
def AddSubscriptionToChannel(self, username_to_subscribe_to,
my_username = 'default'):
"""Add a new channel subscription to the currently authenticated users
account.
Needs authentication.
Args:
username_to_subscribe_to: A string representing the username of the
channel to which we want to subscribe to.
my_username: An optional string representing the name of the user which
we want to subscribe. Defaults to currently authenticated user.
Returns:
A new YouTubeSubscriptionEntry if successfully posted.
"""
subscription_category = atom.Category(
scheme=YOUTUBE_SUBSCRIPTION_CATEGORY_SCHEME,
term='channel')
subscription_username = gdata.youtube.Username(
text=username_to_subscribe_to)
subscription_entry = gdata.youtube.YouTubeSubscriptionEntry(
category=subscription_category,
username=subscription_username)
post_uri = '%s/%s/%s' % (YOUTUBE_USER_FEED_URI, my_username,
'subscriptions')
return self.Post(subscription_entry, post_uri,
converter=gdata.youtube.YouTubeSubscriptionEntryFromString)
def AddSubscriptionToFavorites(self, username, my_username = 'default'):
"""Add a new subscription to a users favorites to the currently
authenticated user's account.
Needs authentication
Args:
username: A string representing the username of the user's favorite feed
to subscribe to.
my_username: An optional string representing the username of the user
that is to be subscribed. Defaults to currently authenticated user.
Returns:
A new YouTubeSubscriptionEntry if successful.
"""
subscription_category = atom.Category(
scheme=YOUTUBE_SUBSCRIPTION_CATEGORY_SCHEME,
term='favorites')
subscription_username = gdata.youtube.Username(text=username)
subscription_entry = gdata.youtube.YouTubeSubscriptionEntry(
category=subscription_category,
username=subscription_username)
post_uri = '%s/%s/%s' % (YOUTUBE_USER_FEED_URI, my_username,
'subscriptions')
return self.Post(subscription_entry, post_uri,
converter=gdata.youtube.YouTubeSubscriptionEntryFromString)
def AddSubscriptionToQuery(self, query, my_username = 'default'):
"""Add a new subscription to a specific keyword query to the currently
authenticated user's account.
Needs authentication
Args:
query: A string representing the keyword query to subscribe to.
my_username: An optional string representing the username of the user
that is to be subscribed. Defaults to currently authenticated user.
Returns:
A new YouTubeSubscriptionEntry if successful.
"""
subscription_category = atom.Category(
scheme=YOUTUBE_SUBSCRIPTION_CATEGORY_SCHEME,
term='query')
subscription_query_string = gdata.youtube.QueryString(text=query)
subscription_entry = gdata.youtube.YouTubeSubscriptionEntry(
category=subscription_category,
query_string=subscription_query_string)
post_uri = '%s/%s/%s' % (YOUTUBE_USER_FEED_URI, my_username,
'subscriptions')
return self.Post(subscription_entry, post_uri,
converter=gdata.youtube.YouTubeSubscriptionEntryFromString)
def DeleteSubscription(self, subscription_uri):
"""Delete a subscription from the currently authenticated user's account.
Needs authentication.
Args:
subscription_uri: A string representing the URI of the subscription that
is to be deleted.
Returns:
True if deleted successfully.
"""
return self.Delete(subscription_uri)
def AddContact(self, contact_username, my_username='default'):
"""Add a new contact to the currently authenticated user's contact feed.
Needs authentication.
Args:
contact_username: A string representing the username of the contact
that you wish to add.
my_username: An optional string representing the username to whose
contact the new contact is to be added.
Returns:
A YouTubeContactEntry if added successfully.
"""
contact_category = atom.Category(
scheme = 'http://gdata.youtube.com/schemas/2007/contact.cat',
term = 'Friends')
contact_username = gdata.youtube.Username(text=contact_username)
contact_entry = gdata.youtube.YouTubeContactEntry(
category=contact_category,
username=contact_username)
contact_post_uri = '%s/%s/%s' % (YOUTUBE_USER_FEED_URI, my_username,
'contacts')
return self.Post(contact_entry, contact_post_uri,
converter=gdata.youtube.YouTubeContactEntryFromString)
def UpdateContact(self, contact_username, new_contact_status,
new_contact_category, my_username='default'):
"""Update a contact, providing a new status and a new category.
Needs authentication.
Args:
contact_username: A string representing the username of the contact
that is to be updated.
new_contact_status: A string representing the new status of the contact.
This can either be set to 'accepted' or 'rejected'.
new_contact_category: A string representing the new category for the
contact, either 'Friends' or 'Family'.
my_username: An optional string representing the username of the user
whose contact feed we are modifying. Defaults to the currently
authenticated user.
Returns:
A YouTubeContactEntry if updated succesfully.
Raises:
YouTubeError: New contact status must be within the accepted values. Or
new contact category must be within the accepted categories.
"""
if new_contact_status not in YOUTUBE_CONTACT_STATUS:
raise YouTubeError('New contact status must be one of %s' %
(' '.join(YOUTUBE_CONTACT_STATUS)))
if new_contact_category not in YOUTUBE_CONTACT_CATEGORY:
raise YouTubeError('New contact category must be one of %s' %
(' '.join(YOUTUBE_CONTACT_CATEGORY)))
contact_category = atom.Category(
scheme='http://gdata.youtube.com/schemas/2007/contact.cat',
term=new_contact_category)
contact_status = gdata.youtube.Status(text=new_contact_status)
contact_entry = gdata.youtube.YouTubeContactEntry(
category=contact_category,
status=contact_status)
contact_put_uri = '%s/%s/%s/%s' % (YOUTUBE_USER_FEED_URI, my_username,
'contacts', contact_username)
return self.Put(contact_entry, contact_put_uri,
converter=gdata.youtube.YouTubeContactEntryFromString)
def DeleteContact(self, contact_username, my_username='default'):
"""Delete a contact from a users contact feed.
Needs authentication.
Args:
contact_username: A string representing the username of the contact
that is to be deleted.
my_username: An optional string representing the username of the user's
contact feed from which to delete the contact. Defaults to the
currently authenticated user.
Returns:
True if the contact was deleted successfully
"""
contact_edit_uri = '%s/%s/%s/%s' % (YOUTUBE_USER_FEED_URI, my_username,
'contacts', contact_username)
return self.Delete(contact_edit_uri)
def _GetDeveloperKey(self):
"""Getter for Developer Key property.
Returns:
If the developer key has been set, a string representing the developer key
is returned or None.
"""
if 'X-GData-Key' in self.additional_headers:
return self.additional_headers['X-GData-Key'][4:]
else:
return None
def _SetDeveloperKey(self, developer_key):
"""Setter for Developer Key property.
Sets the developer key in the 'X-GData-Key' header. The actual value that
is set is 'key=' plus the developer_key that was passed.
"""
self.additional_headers['X-GData-Key'] = 'key=' + developer_key
developer_key = property(_GetDeveloperKey, _SetDeveloperKey,
doc="""The Developer Key property""")
def _GetClientId(self):
"""Getter for Client Id property.
Returns:
If the client_id has been set, a string representing it is returned
or None.
"""
if 'X-Gdata-Client' in self.additional_headers:
return self.additional_headers['X-Gdata-Client']
else:
return None
def _SetClientId(self, client_id):
"""Setter for Client Id property.
Sets the 'X-Gdata-Client' header.
"""
self.additional_headers['X-Gdata-Client'] = client_id
client_id = property(_GetClientId, _SetClientId,
doc="""The ClientId property""")
def Query(self, uri):
"""Performs a query and returns a resulting feed or entry.
Args:
uri: A string representing the URI of the feed that is to be queried.
Returns:
On success, a tuple in the form:
(boolean succeeded=True, ElementTree._Element result)
On failure, a tuple in the form:
(boolean succeeded=False, {'status': HTTP status code from server,
'reason': HTTP reason from the server,
'body': HTTP body of the server's response})
"""
result = self.Get(uri)
return result
def YouTubeQuery(self, query):
"""Performs a YouTube specific query and returns a resulting feed or entry.
Args:
query: A Query object or one if its sub-classes (YouTubeVideoQuery,
YouTubeUserQuery or YouTubePlaylistQuery).
Returns:
Depending on the type of Query object submitted returns either a
YouTubeVideoFeed, a YouTubeUserFeed, a YouTubePlaylistFeed. If the
Query object provided was not YouTube-related, a tuple is returned.
On success the tuple will be in this form:
(boolean succeeded=True, ElementTree._Element result)
On failure, the tuple will be in this form:
(boolean succeeded=False, {'status': HTTP status code from server,
'reason': HTTP reason from the server,
'body': HTTP body of the server response})
"""
result = self.Query(query.ToUri())
if isinstance(query, YouTubeVideoQuery):
return gdata.youtube.YouTubeVideoFeedFromString(result.ToString())
elif isinstance(query, YouTubeUserQuery):
return gdata.youtube.YouTubeUserFeedFromString(result.ToString())
elif isinstance(query, YouTubePlaylistQuery):
return gdata.youtube.YouTubePlaylistFeedFromString(result.ToString())
else:
return result
class YouTubeVideoQuery(gdata.service.Query):
"""Subclasses gdata.service.Query to represent a YouTube Data API query.
Attributes are set dynamically via properties. Properties correspond to
the standard Google Data API query parameters with YouTube Data API
extensions. Please refer to the API documentation for details.
Attributes:
vq: The vq parameter, which is only supported for video feeds, specifies a
search query term. Refer to API documentation for further details.
orderby: The orderby parameter, which is only supported for video feeds,
specifies the value that will be used to sort videos in the search
result set. Valid values for this parameter are relevance, published,
viewCount and rating.
time: The time parameter, which is only available for the top_rated,
top_favorites, most_viewed, most_discussed, most_linked and
most_responded standard feeds, restricts the search to videos uploaded
within the specified time. Valid values for this parameter are today
(1 day), this_week (7 days), this_month (1 month) and all_time.
The default value for this parameter is all_time.
format: The format parameter specifies that videos must be available in a
particular video format. Refer to the API documentation for details.
racy: The racy parameter allows a search result set to include restricted
content as well as standard content. Valid values for this parameter
are include and exclude. By default, restricted content is excluded.
lr: The lr parameter restricts the search to videos that have a title,
description or keywords in a specific language. Valid values for the lr
parameter are ISO 639-1 two-letter language codes.
restriction: The restriction parameter identifies the IP address that
should be used to filter videos that can only be played in specific
countries.
location: A string of geo coordinates. Note that this is not used when the
search is performed but rather to filter the returned videos for ones
that match to the location entered.
"""
def __init__(self, video_id=None, feed_type=None, text_query=None,
params=None, categories=None):
if feed_type in YOUTUBE_STANDARDFEEDS:
feed = 'http://%s/feeds/standardfeeds/%s' % (YOUTUBE_SERVER, feed_type)
elif feed_type is 'responses' or feed_type is 'comments' and video_id:
feed = 'http://%s/feeds/videos/%s/%s' % (YOUTUBE_SERVER, video_id,
feed_type)
else:
feed = 'http://%s/feeds/videos' % (YOUTUBE_SERVER)
gdata.service.Query.__init__(self, feed, text_query=text_query,
params=params, categories=categories)
def _GetVideoQuery(self):
if 'vq' in self:
return self['vq']
else:
return None
def _SetVideoQuery(self, val):
self['vq'] = val
vq = property(_GetVideoQuery, _SetVideoQuery,
doc="""The video query (vq) query parameter""")
def _GetOrderBy(self):
if 'orderby' in self:
return self['orderby']
else:
return None
def _SetOrderBy(self, val):
if val not in YOUTUBE_QUERY_VALID_ORDERBY_PARAMETERS:
if val.startswith('relevance_lang_') is False:
raise YouTubeError('OrderBy must be one of: %s ' %
' '.join(YOUTUBE_QUERY_VALID_ORDERBY_PARAMETERS))
self['orderby'] = val
orderby = property(_GetOrderBy, _SetOrderBy,
doc="""The orderby query parameter""")
def _GetTime(self):
if 'time' in self:
return self['time']
else:
return None
def _SetTime(self, val):
if val not in YOUTUBE_QUERY_VALID_TIME_PARAMETERS:
raise YouTubeError('Time must be one of: %s ' %
' '.join(YOUTUBE_QUERY_VALID_TIME_PARAMETERS))
self['time'] = val
time = property(_GetTime, _SetTime,
doc="""The time query parameter""")
def _GetFormat(self):
if 'format' in self:
return self['format']
else:
return None
def _SetFormat(self, val):
if val not in YOUTUBE_QUERY_VALID_FORMAT_PARAMETERS:
raise YouTubeError('Format must be one of: %s ' %
' '.join(YOUTUBE_QUERY_VALID_FORMAT_PARAMETERS))
self['format'] = val
format = property(_GetFormat, _SetFormat,
doc="""The format query parameter""")
def _GetRacy(self):
if 'racy' in self:
return self['racy']
else:
return None
def _SetRacy(self, val):
if val not in YOUTUBE_QUERY_VALID_RACY_PARAMETERS:
raise YouTubeError('Racy must be one of: %s ' %
' '.join(YOUTUBE_QUERY_VALID_RACY_PARAMETERS))
self['racy'] = val
racy = property(_GetRacy, _SetRacy,
doc="""The racy query parameter""")
def _GetLanguageRestriction(self):
if 'lr' in self:
return self['lr']
else:
return None
def _SetLanguageRestriction(self, val):
self['lr'] = val
lr = property(_GetLanguageRestriction, _SetLanguageRestriction,
doc="""The lr (language restriction) query parameter""")
def _GetIPRestriction(self):
if 'restriction' in self:
return self['restriction']
else:
return None
def _SetIPRestriction(self, val):
self['restriction'] = val
restriction = property(_GetIPRestriction, _SetIPRestriction,
doc="""The restriction query parameter""")
def _GetLocation(self):
if 'location' in self:
return self['location']
else:
return None
def _SetLocation(self, val):
self['location'] = val
location = property(_GetLocation, _SetLocation,
doc="""The location query parameter""")
class YouTubeUserQuery(YouTubeVideoQuery):
"""Subclasses YouTubeVideoQuery to perform user-specific queries.
Attributes are set dynamically via properties. Properties correspond to
the standard Google Data API query parameters with YouTube Data API
extensions.
"""
def __init__(self, username=None, feed_type=None, subscription_id=None,
text_query=None, params=None, categories=None):
uploads_favorites_playlists = ('uploads', 'favorites', 'playlists')
if feed_type is 'subscriptions' and subscription_id and username:
feed = "http://%s/feeds/users/%s/%s/%s" % (YOUTUBE_SERVER, username,
feed_type, subscription_id)
elif feed_type is 'subscriptions' and not subscription_id and username:
feed = "http://%s/feeds/users/%s/%s" % (YOUTUBE_SERVER, username,
feed_type)
elif feed_type in uploads_favorites_playlists:
feed = "http://%s/feeds/users/%s/%s" % (YOUTUBE_SERVER, username,
feed_type)
else:
feed = "http://%s/feeds/users" % (YOUTUBE_SERVER)
YouTubeVideoQuery.__init__(self, feed, text_query=text_query,
params=params, categories=categories)
class YouTubePlaylistQuery(YouTubeVideoQuery):
"""Subclasses YouTubeVideoQuery to perform playlist-specific queries.
Attributes are set dynamically via properties. Properties correspond to
the standard Google Data API query parameters with YouTube Data API
extensions.
"""
def __init__(self, playlist_id, text_query=None, params=None,
categories=None):
if playlist_id:
feed = "http://%s/feeds/playlists/%s" % (YOUTUBE_SERVER, playlist_id)
else:
feed = "http://%s/feeds/playlists" % (YOUTUBE_SERVER)
YouTubeVideoQuery.__init__(self, feed, text_query=text_query,
params=params, categories=categories)
| apache-2.0 |
peterbe/moztrap | tests/model/tags/api/test_tags_resource.py | 2 | 12279 | """
Tests for TagResource api.
"""
from tests.case.api.crud import ApiCrudCases
import logging
mozlogger = logging.getLogger('moztrap.test')
class TagResourceTest(ApiCrudCases):
@property
def factory(self):
"""The model factory for this object."""
return self.F.TagFactory()
@property
def resource_name(self):
return "tag"
@property
def permission(self):
"""String defining the permission required for
Create, Update, and Delete."""
return "tags.manage_tags"
@property
def new_object_data(self):
"""Generates a dictionary containing the field names and auto-generated
values needed to create a unique object.
The output of this method can be sent in the payload parameter of a
POST message.
"""
self.product_fixture = self.F.ProductFactory.create()
fields = {
u"name": unicode(
"test_%s_%s" % (self.datetime, self.resource_name)),
u"description": unicode(
"test %s %s" % (self.datetime, self.resource_name)),
u"product": None,
}
return fields
def backend_object(self, id):
"""Returns the object from the backend, so you can query it's values in
the database for validation.
"""
return self.model.Tag.everything.get(id=id)
def backend_data(self, backend_obj):
"""Query's the database for the object's current values. Output is a
dictionary that should match the result of getting the object's detail
via the API, and can be used to verify API output.
Note: both keys and data should be in unicode
"""
actual = {}
actual[u"id"] = unicode(str(backend_obj.id))
actual[u"name"] = unicode(backend_obj.name)
actual[u"description"] = unicode(backend_obj.description)
actual[u"resource_uri"] = unicode(
self.get_detail_url(self.resource_name, str(backend_obj.id)))
if backend_obj.product:
actual[u"product"] = unicode(
self.get_detail_url("product", str(backend_obj.product.id)))
else:
actual[u"product"] = None
return actual
# additional test cases, if any
# create cases
# test_create handles creating a global tag
def test_create_tag_with_product(self):
"""Create a product-specific tag."""
mozlogger.info('test_create_tag_with_product')
# get data for creation
fields = self.new_object_data
fields['product'] = unicode(
self.get_detail_url("product", str(self.product_fixture.id)))
# do the create
res = self.post(
self.get_list_url(self.resource_name),
params=self.credentials,
payload=fields,
)
# make sure response included detail uri
object_id = self._id_from_uri(res.headers["location"])
self.assertIsNotNone(object_id)
# get data from backend
backend_obj = self.backend_object(object_id)
created_object_data = self.clean_backend_data(backend_obj)
# compare backend data to desired data
self.maxDiff = None
self.assertEqual(created_object_data, fields)
# edit cases
@property
def _invalid_product_msg(self):
return str("Tag's Product may not be changed unless the tag is not " +
"in use, the product is being set to None, or the product " +
"matches the existing cases.")
def test_edit_no_product(self):
"""Test that edit works even without the product."""
mozlogger.info('test_edit_no_product')
# create fixture
fixture1 = self.factory
backend_obj = self.backend_object(fixture1.id)
obj_id = str(fixture1.id)
fields = self.new_object_data
product = fields.pop(u'product')
# do put
res = self.put(
self.get_detail_url(self.resource_name, obj_id),
params=self.credentials,
data=fields
)
# make sure object has been updated in the database
fields[u'product'] = product
fixture1 = self.refresh(fixture1)
backend_data = self.clean_backend_data(fixture1)
self.maxDiff = None
self.assertEqual(fields, backend_data)
def test_edit_global_tag_in_use_change_description(self):
"""Editing the description on a global tag should not un-set it's cases."""
mozlogger.info('test_edit_global_tag_in_use_change_description')
# create fixtures
tag1 = self.factory
tc1 = self.F.CaseVersionFactory()
tc1.tags = [tag1]
tc2 = self.F.CaseVersionFactory()
tc2.tags = [tag1]
tc3 = self.F.CaseVersionFactory()
tag1 = self.refresh(tag1)
self.assertEqual(len(tag1.caseversions.all()), 2)
# generate new values
fields = self.backend_data(tag1)
fields[u'description'] = 'an updated description'
# do put
res = self.put(
self.get_detail_url(self.resource_name, str(tag1.id)),
params=self.credentials,
data=fields,
)
# make sure object has been updated in the database
tag1 = self.refresh(tag1)
self.maxDiff = None
backend_data = self.backend_data(tag1)
self.assertEqual(fields, backend_data)
# make sure test cases still have their tags
self.assertEqual(len(tag1.caseversions.all()), 2)
self.assertTrue(tc1 in tag1.caseversions.all())
self.assertTrue(tc2 in tag1.caseversions.all())
def test_edit_global_tag_in_use_change_product_error(self):
"""If a global tag is in-use by cases of multiple products, it's product field should be read-only."""
mozlogger.info('test_edit_global_tag_in_use_change_product_error')
# create fixtures
tag1 = self.factory
tc1 = self.F.CaseVersionFactory()
tc1.tags = [tag1]
tc2 = self.F.CaseVersionFactory()
tc2.tags = [tag1]
tc3 = self.F.CaseVersionFactory()
tag1 = self.refresh(tag1)
self.assertEqual(len(tag1.caseversions.all()), 2)
# generate new values
fields = self.backend_data(tag1)
product1 = self.F.ProductFactory()
fields[u'product'] = unicode(
self.get_detail_url("product", str(product1.id)))
# do put
res = self.put(
self.get_detail_url(self.resource_name, str(tag1.id)),
params=self.credentials,
data=fields,
status=400,
)
self.assertEqual(res.text, self._invalid_product_msg)
def test_edit_global_tag_in_use_change_product_matches_caseversion(self):
"""If a global tag is in use by cases all having the same product, the product field may be changed to match."""
mozlogger.info(
'test_edit_global_tag_in_use_change_product_matches_caseversion')
# create fixtures
tag1 = self.factory
tc1 = self.F.CaseVersionFactory()
tc1.tags = [tag1]
tc2 = self.F.CaseVersionFactory()
tc2.productversion = tc1.productversion # make it be same product
tc2.save()
tc2.tags = [tag1]
tag1 = self.refresh(tag1)
self.assertEqual(len(tag1.caseversions.all()), 2)
# generate new values
fields = self.backend_data(tag1)
fields[u'product'] = unicode(
self.get_detail_url("product", str(tc1.productversion.product.id)))
# do put
res = self.put(
self.get_detail_url(self.resource_name, str(tag1.id)),
params=self.credentials,
data=fields,
)
# make sure object has been updated in the database
tag1 = self.refresh(tag1)
self.maxDiff = None
backend_data = self.backend_data(tag1)
self.assertEqual(fields, backend_data)
# make sure test cases still have their tags
self.assertEqual(len(tag1.caseversions.all()), 2)
self.assertTrue(tc1 in tag1.caseversions.all())
self.assertTrue(tc2 in tag1.caseversions.all())
def test_edit_global_tag_not_in_use_change_product(self):
"""If a global tag is not in-use by any caseversions, the product field should be editable."""
mozlogger.info('test_edit_global_tag_not_in_use_change_product')
# create fixtures
tag1 = self.factory
self.assertEqual(len(tag1.caseversions.all()), 0)
# generate new values
fields = self.backend_data(tag1)
product1 = self.F.ProductFactory()
fields[u'product'] = unicode(
self.get_detail_url("product", str(product1.id)))
# do put
res = self.put(
self.get_detail_url(self.resource_name, str(tag1.id)),
params=self.credentials,
data=fields,
)
# make sure object has been updated in the database
tag1 = self.refresh(tag1)
self.maxDiff = None
backend_data = self.backend_data(tag1)
self.assertEqual(fields, backend_data)
def test_edit_product_tag_in_use_change_product_error(self):
"""If a product-specific tag is in use, trying to change it's product should error."""
mozlogger.info('test_edit_product_tag_in_use_change_product_error')
# create fixtures
tag1 = self.factory
tc1 = self.F.CaseVersionFactory()
tag1.product = tc1.productversion.product # make tag product-specific
tc1.tags = [tag1] # use the tag
self.assertEqual(len(tag1.caseversions.all()), 1)
# generate new values
fields = self.backend_data(tag1)
product1 = self.F.ProductFactory()
fields[u'product'] = unicode(
self.get_detail_url("product", str(product1.id)))
# do put
res = self.put(
self.get_detail_url(self.resource_name, str(tag1.id)),
params=self.credentials,
data=fields,
status=400,
)
self.assertEqual(res.text, self._invalid_product_msg)
def test_edit_product_tag_in_use_remove_product(self):
"""If a product-specific tag is in use and you reset the product to None. caseversions should stay tagged."""
mozlogger.info("test_edit_product_tag_in_use_remove_product")
# create fixtures
tag1 = self.factory
tc1 = self.F.CaseVersionFactory()
tag1.product = tc1.productversion.product # make tag product-specific
tag1.save()
tc1.tags = [tag1] # use the tag
self.assertEqual(len(tag1.caseversions.all()), 1)
# generate new values
fields = self.backend_data(tag1)
fields[u'product'] = None
# do put
res = self.put(
self.get_detail_url(self.resource_name, str(tag1.id)),
params=self.credentials,
data=fields,
)
# make sure object has been updated in the database
tag1 = self.refresh(tag1)
self.maxDiff = None
backend_data = self.backend_data(tag1)
self.assertEqual(fields, backend_data)
# make sure caseversions are still tagged
self.assertEqual(len(tag1.caseversions.all()), 1)
# filtering
def test_filter_by_name(self):
"""Filter tags by name."""
mozlogger.info("test_filter_by_name")
# create fixtures
tag1 = self.factory
tag2 = self.factory
tag2.name = u'unique name'
tag2.save()
# do test
self._test_filter_list_by(u'name', u'unique name', 1)
def test_filter_by_product(self):
"""Filter tags by product."""
mozlogger.info("test_filter_by_product")
# create fixtures
product1 = self.F.ProductFactory()
tag1 = self.factory
tag1.product = product1
tag1.save()
tag2 = self.factory
tag2.product = product1
tag2.save()
tag3 = self.factory
# do test
self._test_filter_list_by(u'product', str(product1.id), 2)
self._test_filter_list_by(u'product', None, 1)
| bsd-2-clause |
s2oBCN/selenium | py/test/selenium/webdriver/common/driver_element_finding_tests.py | 60 | 7171 | # Licensed to the Software Freedom Conservancy (SFC) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The SFC licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import unittest
from selenium.webdriver.common.by import By
from selenium.common.exceptions import InvalidSelectorException
class DriverElementFindingTests(unittest.TestCase):
def testShouldFindElementById(self):
self._loadSimplePage()
e = self.driver.find_element_by_id("oneline")
self.assertEqual("A single line of text", e.text)
def testShouldFindElementByLinkText(self):
self._loadSimplePage()
e = self.driver.find_element_by_link_text("link with leading space")
self.assertEqual("link with leading space", e.text)
def testShouldFindElementByName(self):
self._loadPage("nestedElements")
e = self.driver.find_element_by_name("div1")
self.assertEqual("hello world hello world", e.text)
def testShouldFindElementByXPath(self):
self._loadSimplePage()
e = self.driver.find_element_by_xpath("/html/body/p[1]")
self.assertEqual("A single line of text", e.text)
def testShouldFindElementByClassName(self):
self._loadPage("nestedElements")
e = self.driver.find_element_by_class_name("one")
self.assertEqual("Span with class of one", e.text)
def testShouldFindElementByPartialLinkText(self):
self._loadSimplePage()
e = self.driver.find_element_by_partial_link_text("leading space")
self.assertEqual("link with leading space", e.text)
def testShouldFindElementByTagName(self):
self._loadSimplePage()
e = self.driver.find_element_by_tag_name("H1")
self.assertEqual("Heading", e.text)
def testShouldFindElementsById(self):
self._loadPage("nestedElements")
elements = self.driver.find_elements_by_id("test_id")
self.assertEqual(2, len(elements))
def testShouldFindElementsByLinkText(self):
self._loadPage("nestedElements")
elements = self.driver.find_elements_by_link_text("hello world")
self.assertEqual(12, len(elements))
def testShouldFindElementsByName(self):
self._loadPage("nestedElements")
elements = self.driver.find_elements_by_name("form1")
self.assertEqual(4, len(elements))
def testShouldFindElementsByXPath(self):
self._loadPage("nestedElements")
elements = self.driver.find_elements_by_xpath("//a")
self.assertEqual(12, len(elements))
def testShouldFindElementsByClassName(self):
self._loadPage("nestedElements")
elements = self.driver.find_elements_by_class_name("one")
self.assertEqual(3, len(elements))
def testShouldFindElementsByPartialLinkText(self):
self._loadPage("nestedElements")
elements = self.driver.find_elements_by_partial_link_text("world")
self.assertEqual(12, len(elements))
def testShouldFindElementsByTagName(self):
self._loadPage("nestedElements")
elements = self.driver.find_elements_by_tag_name("a")
self.assertEqual(12, len(elements))
def testShouldBeAbleToFindAnElementByCssSelector(self):
self._loadPage("xhtmlTest")
element = self.driver.find_element_by_css_selector("div.content")
self.assertEqual("div", element.tag_name.lower())
self.assertEqual("content", element.get_attribute("class"))
def testShouldBeAbleToFindMultipleElementsByCssSelector(self):
self._loadPage("frameset")
elements = self.driver.find_elements_by_css_selector("frame")
self.assertEqual(7, len(elements))
elements = self.driver.find_elements_by_css_selector("frame#sixth")
self.assertEqual(1, len(elements))
self.assertEqual("frame", elements[0].tag_name.lower())
self.assertEqual("sixth", elements[0].get_attribute("id"))
def testShouldThrowAnErrorIfUserPassesInInteger(self):
self._loadSimplePage()
try:
self.driver.find_element(By.ID, 333333)
self.fail("Should have thrown WebDriver Exception")
except InvalidSelectorException:
pass #This is expected
def testShouldThrowAnErrorIfUserPassesInTuple(self):
self._loadSimplePage()
try:
self.driver.find_element((By.ID, 333333))
self.fail("Should have thrown WebDriver Exception")
except InvalidSelectorException:
pass #This is expected
def testShouldThrowAnErrorIfUserPassesInNone(self):
self._loadSimplePage()
try:
self.driver.find_element(By.ID, None)
self.fail("Should have thrown WebDriver Exception")
except InvalidSelectorException:
pass #This is expected
def testShouldThrowAnErrorIfUserPassesInInvalidBy(self):
self._loadSimplePage()
try:
self.driver.find_element("css", "body")
self.fail("Should have thrown WebDriver Exception")
except InvalidSelectorException:
pass #This is expected
def testShouldThrowAnErrorIfUserPassesInIntegerWhenFindElements(self):
self._loadSimplePage()
try:
self.driver.find_elements(By.ID, 333333)
self.fail("Should have thrown WebDriver Exception")
except InvalidSelectorException:
pass #This is expected
def testShouldThrowAnErrorIfUserPassesInTupleWhenFindElements(self):
self._loadSimplePage()
try:
self.driver.find_elements((By.ID, 333333))
self.fail("Should have thrown WebDriver Exception")
except InvalidSelectorException:
pass #This is expected
def testShouldThrowAnErrorIfUserPassesInNoneWhenFindElements(self):
self._loadSimplePage()
try:
self.driver.find_elements(By.ID, None)
self.fail("Should have thrown WebDriver Exception")
except InvalidSelectorException:
pass #This is expected
def testShouldThrowAnErrorIfUserPassesInInvalidByWhenFindElements(self):
self._loadSimplePage()
try:
self.driver.find_elements("css", "body")
self.fail("Should have thrown WebDriver Exception")
except InvalidSelectorException:
pass #This is expected
def _pageURL(self, name):
return self.webserver.where_is(name + '.html')
def _loadSimplePage(self):
self._loadPage("simpleTest")
def _loadPage(self, name):
self.driver.get(self._pageURL(name))
| apache-2.0 |
mbauskar/omnitech-frappe | frappe/website/context.py | 27 | 3030 | # Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors
# MIT License. See license.txt
from __future__ import unicode_literals
import frappe
from frappe.website.doctype.website_settings.website_settings import get_website_settings
from frappe.website.template import build_template
from frappe.website.router import get_route_info
from frappe.website.utils import can_cache
def get_context(path):
context = None
context_cache = {}
def add_data_path(context):
if not context.data:
context.data = {}
context.data["path"] = path
# try from cache
if can_cache():
context_cache = frappe.cache().hget("page_context", path) or {}
context = context_cache.get(frappe.local.lang, None)
if not context:
context = get_route_info(path)
context = build_context(context)
add_data_path(context)
if can_cache(context.no_cache):
context_cache[frappe.local.lang] = context
frappe.cache().hset("page_context", path, context_cache)
else:
add_data_path(context)
context.update(context.data or {})
return context
def build_context(context):
"""get_context method of doc or module is supposed to render content templates and push it into context"""
context = frappe._dict(context)
context.update(get_website_settings())
context.update(frappe.local.conf.get("website_context") or {})
# provide doc
if context.doc:
context.update(context.doc.as_dict())
context.update(context.doc.website)
if hasattr(context.doc, "get_context"):
ret = context.doc.get_context(context)
if ret:
context.update(ret)
for prop in ("no_cache", "no_sitemap"):
if not prop in context:
context[prop] = getattr(context.doc, prop, False)
elif context.controller:
module = frappe.get_module(context.controller)
if module:
# get config fields
for prop in ("base_template_path", "template", "no_cache", "no_sitemap",
"condition_field"):
if hasattr(module, prop):
context[prop] = getattr(module, prop)
if hasattr(module, "get_context"):
ret = module.get_context(context)
if ret:
context.update(ret)
if hasattr(module, "get_children"):
context.children = module.get_children(context)
add_metatags(context)
# determine templates to be used
if not context.base_template_path:
app_base = frappe.get_hooks("base_template")
context.base_template_path = app_base[0] if app_base else "templates/base.html"
if context.get("base_template_path") != context.get("template") and not context.get("rendered"):
context.data = build_template(context)
return context
def add_metatags(context):
tags = context.get("metatags")
if tags:
if not "twitter:card" in tags:
tags["twitter:card"] = "summary"
if not "og:type" in tags:
tags["og:type"] = "article"
if tags.get("name"):
tags["og:title"] = tags["twitter:title"] = tags["name"]
if tags.get("description"):
tags["og:description"] = tags["twitter:description"] = tags["description"]
if tags.get("image"):
tags["og:image"] = tags["twitter:image:src"] = tags["image"]
| mit |
mrozekma/Sprint | Retrospective.py | 1 | 1171 | from collections import OrderedDict
from Project import Project
from Sprint import Sprint
from utils import *
from stasis.ActiveRecord import ActiveRecord, link
DEFAULT_CATEGORIES = ['Product Management', 'Infrastructure', 'Teamwork', 'Release Planning', 'Scrum Process', 'Engineering Practices']
class Category(ActiveRecord):
sprint = link(Sprint, 'sprintid')
def __init__(self, sprintid, name, id = None):
ActiveRecord.__init__(self)
self.id = id
self.sprintid = sprintid
self.name = name
@staticmethod
def table():
return 'retrospective_categories'
class Entry(ActiveRecord):
category = link(Category, 'catid')
def __init__(self, catid, body, good, id = None):
ActiveRecord.__init__(self)
self.id = id
self.catid = catid
self.body = body
self.good = good
@staticmethod
def table():
return 'retrospective_entries'
class Retrospective:
@staticmethod
def load(sprint):
rtn = OrderedDict()
for category in Category.loadAll(sprintid = sprint.id):
rtn[category] = Entry.loadAll(catid = category.id)
return rtn or None
@staticmethod
def init(sprint):
for name in DEFAULT_CATEGORIES:
Category(sprint.id, name).save()
| mit |
luisibanez/vista-debian-med-package | Packages/Scheduling/Testing/RAS/SCMain01_test.py | 6 | 1609 | '''
Created on Jun 14, 2012
@author: bcaine
This is the main Scheduling script that calls the underlying
scheduling functional tests located in SCMain01_suite
'''
import sys
sys.path = ['./RAS/lib'] + ['./dataFiles'] + ['../Python/vista'] + sys.path
import SCMain01_suite
import TestHelper
def main():
test_suite_driver = TestHelper.TestSuiteDriver(__file__)
test_suite_details = test_suite_driver.generate_test_suite_details()
try:
test_suite_driver.pre_test_suite_run(test_suite_details)
# Begin Tests
SCMain01_suite.startmon(test_suite_details)
SCMain01_suite.sc_test001(test_suite_details)
SCMain01_suite.sc_test002(test_suite_details)
SCMain01_suite.sc_test003(test_suite_details)
SCMain01_suite.sc_test004(test_suite_details)
SCMain01_suite.sc_test005(test_suite_details)
SCMain01_suite.sc_test006(test_suite_details)
SCMain01_suite.sc_test007(test_suite_details)
SCMain01_suite.sc_test008(test_suite_details)
SCMain01_suite.sc_test009(test_suite_details)
SCMain01_suite.sc_test010(test_suite_details)
SCMain01_suite.stopmon(test_suite_details)
# End Tests
test_suite_driver.post_test_suite_run(test_suite_details)
except Exception, e:
test_suite_driver.exception_handling(test_suite_details, e)
else:
test_suite_driver.try_else_handling(test_suite_details)
finally:
test_suite_driver.finally_handling(test_suite_details)
test_suite_driver.end_method_handling(test_suite_details)
if __name__ == '__main__':
main()
| apache-2.0 |
iABC2XYZ/abc | DM_Twiss/TwissTrain3.py | 2 | 4285 | #!/usr/bin/env python2
# -*- coding: utf-8 -*-
"""
Created on Wed Sep 20 13:37:16 2017
Author: Peiyong Jiang : jiangpeiyong@impcas.ac.cn
Function:
Check that the Distribution generation method is right.
"""
import matplotlib.pyplot as plt
import tensorflow as tf
import numpy as np
from Orth import LambdaR,OrthTrans
from TFOrth import TFLambdaR,TFOrthTrans
plt.close('all')
emitX=4.8
alphaX=-2.3
betaX=15.3
gammaX=(1.+alphaX**2)/betaX
diagRX=LambdaR(emitX,alphaX,betaX,gammaX)
PX=OrthTrans(emitX,alphaX,betaX,gammaX)
numPart=np.int32(1e5)
Z=np.random.randn(2,numPart)
X=np.matmul(np.matmul(PX,np.linalg.inv(diagRX)),Z)
plt.figure(1)
plt.plot(X[0,:],X[1,:],'r.')
plt.axis('equal')
##
def WeightP(shape):
initial=tf.truncated_normal(shape,stddev=0.1)
return tf.Variable(initial)
def WeightLambda2D():
lambda1=tf.Variable(tf.random_uniform([1,1]),dtype=tf.float32)
lambda2=tf.Variable(tf.random_uniform([1,1]),dtype=tf.float32)
O=tf.reshape(tf.constant(0,tf.float32),[1,1])
LambdaR1=tf.concat([lambda1,O],0)
LambdaR2=tf.concat([O,lambda2],0)
LambdaR=tf.concat([LambdaR1,LambdaR2],1)
return LambdaR
P_1=WeightP([2,2])
LambdaR=WeightLambda2D()
xI=tf.placeholder(tf.float32,[2,None])
xL1=tf.matmul(P_1,xI)
xO=tf.matmul(LambdaR,xL1)
xR=xO[0]**2+xO[1]**2
lossXR=(xR-2.)**2
rateLearn=5e-4
optXR=tf.train.AdamOptimizer(rateLearn)
trainXR=optXR.minimize(lossXR)
meanLossXR=tf.reduce_mean(lossXR)
sess = tf.InteractiveSession(config=tf.ConfigProto(log_device_placement=True))
sess.run(tf.global_variables_initializer())
sizeBatch=64
for _ in xrange(30000):
startBatch=np.random.randint(0,high=numPart-sizeBatch-1)
xFeed=X[:,startBatch:startBatch+sizeBatch:]
sess.run(trainXR,feed_dict={xI:xFeed})
#print(sess.run(LambdaR))
#print('---------------------------')
print(sess.run(meanLossXR,feed_dict={xI:X}))
print('_______________________________________________')
'''
zReal=sess.run(xO,feed_dict={xI:X})
plt.figure(2)
plt.clf()
plt.plot(zReal[0,:],zReal[1,:],'r.')
plt.axis('equal')
plt.pause(0.001)
'''
LambdaRGet=sess.run(LambdaR)
print(LambdaRGet)
print('---------------------------')
print(1./(LambdaRGet[0,0]*LambdaRGet[1,1]))
zReal=sess.run(xO,feed_dict={xI:X})
plt.figure(2)
plt.plot(zReal[0,:],zReal[1,:],'r.')
plt.axis('equal')
'''
print(sess.run(P_1))
print(sess.run(LambdaR))
print(sess.run(xR,feed_dict={xI:X}))
'''
'''
wEmit=tf.Variable([emitX])
wAlpha=tf.Variable([alphaX])
wBeta=tf.Variable([betaX])
wGamma=tf.Variable([gammaX])
'''
'''
wEmit=tf.Variable([13.])
wAlpha=tf.Variable([1.3])
wBeta=tf.Variable([0.5])
#wGamma=tf.Variable([0.5])
wGamma=(1.+wAlpha**2)/wBeta
xH=tf.placeholder(tf.float32,[2,None])
diagR,diagRT=TFLambdaR(wEmit,wAlpha,wBeta,wGamma)
P,PI=TFOrthTrans(wEmit,wAlpha,wBeta,wGamma)
zH=tf.matmul(tf.matmul(diagR,PI),xH)
R=zH[0]**2+zH[1]**2
#lossR=tf.abs(R-2.e-6)
lossR=R
optR=tf.train.GradientDescentOptimizer(0.01)
trainR=optR.minimize(lossR)
sess=tf.Session()
sess.run(tf.global_variables_initializer())
#sess.run(diagR)
print(sess.run(R,feed_dict={xH:X}))
numIter=10
recEmit=np.zeros(numIter)
recAlpha=np.zeros(numIter)
recBeta=np.zeros(numIter)
recGamma=np.zeros(numIter)
recLoss=np.zeros(numIter)
for _ in xrange(numIter):
sess.run(trainR,feed_dict={xH:X})
recEmit[_]=sess.run(wEmit)
recAlpha[_]=sess.run(wAlpha)
recBeta[_]=sess.run(wBeta)
recGamma[_]=sess.run(wGamma)
recLoss[_]=sess.run(tf.reduce_mean(lossR))
print(recEmit)
print(recAlpha)
#print(sess.run(R,feed_dict={xH:X}))
plt.figure('emit')
plt.plot(recEmit)
plt.figure('alpha')
plt.plot(recAlpha)
plt.figure('beta')
plt.plot(recBeta)
plt.figure('gamma')
plt.plot(recGamma)
plt.figure('Loss')
plt.plot(recLoss)
'''
'''
zGet=sess.run(zH,feed_dict={xH:X})
print(sess.run(lossR,feed_dict={xH:X}))
'''
'''
plt.figure('Check')
plt.hold('on')
plt.plot(Z[0,:],Z[1,:],'bo')
plt.plot(zGet[0,:],zGet[1,:],'r.')
plt.axis('equal')
'''
'''
print(sess.run(wEmit))
print(sess.run(wAlpha))
print(sess.run(wBeta))
print(sess.run(wGamma))
print(sess.run(diagR))
print(sess.run(diagRT))
'''
#print(PX)
#print(sess.run(P))
#print(sess.run(zH,feed_dict={xH:X}))
| gpl-3.0 |
myarjunar/inasafe | safe_extras/raven/events.py | 26 | 4122 | """
raven.events
~~~~~~~~~~~~
:copyright: (c) 2010 by the Sentry Team, see AUTHORS for more details.
:license: BSD, see LICENSE for more details.
"""
import logging
import sys
from raven.utils import varmap
from raven.utils.encoding import shorten, to_unicode
from raven.utils.stacks import get_stack_info, iter_traceback_frames, \
get_culprit
__all__ = ('BaseEvent', 'Exception', 'Message', 'Query')
class BaseEvent(object):
def __init__(self, client):
self.client = client
self.logger = logging.getLogger(__name__)
def to_string(self, data):
raise NotImplementedError
def capture(self, **kwargs):
return {
}
class Exception(BaseEvent):
"""
Exceptions store the following metadata:
- value: 'My exception value'
- type: 'ClassName'
- module '__builtin__' (i.e. __builtin__.TypeError)
- frames: a list of serialized frames (see _get_traceback_frames)
"""
def to_string(self, data):
exc = data['sentry.interfaces.Exception']
if exc['value']:
return '%s: %s' % (exc['type'], exc['value'])
return exc['type']
def get_hash(self, data):
exc = data['sentry.interfaces.Exception']
output = [exc['type']]
for frame in data['sentry.interfaces.Stacktrace']['frames']:
output.append(frame['module'])
output.append(frame['function'])
return output
def capture(self, exc_info=None, **kwargs):
new_exc_info = False
if not exc_info or exc_info is True:
new_exc_info = True
exc_info = sys.exc_info()
if not exc_info:
raise ValueError('No exception found')
try:
exc_type, exc_value, exc_traceback = exc_info
frames = varmap(lambda k, v: shorten(v,
string_length=self.client.string_max_length, list_length=self.client.list_max_length),
get_stack_info(iter_traceback_frames(exc_traceback)))
culprit = get_culprit(frames, self.client.include_paths, self.client.exclude_paths)
exc_module = getattr(exc_type, '__module__', None)
exc_type = getattr(exc_type, '__name__', '<unknown>')
finally:
if new_exc_info:
try:
del exc_info
del exc_traceback
except Exception, e:
self.logger.exception(e)
return {
'level': logging.ERROR,
'culprit': culprit,
'sentry.interfaces.Exception': {
'value': to_unicode(exc_value),
'type': str(exc_type),
'module': str(exc_module),
},
'sentry.interfaces.Stacktrace': {
'frames': frames
},
}
class Message(BaseEvent):
"""
Messages store the following metadata:
- message: 'My message from %s about %s'
- params: ('foo', 'bar')
"""
def to_string(self, data):
msg = data['sentry.interfaces.Message']
if msg.get('params'):
return msg['message'] % msg['params']
return msg['message']
def get_hash(self, data):
msg = data['sentry.interfaces.Message']
return [msg['message']]
def capture(self, message, params=(), **kwargs):
data = {
'sentry.interfaces.Message': {
'message': message,
'params': params,
}
}
return data
class Query(BaseEvent):
"""
Messages store the following metadata:
- query: 'SELECT * FROM table'
- engine: 'postgesql_psycopg2'
"""
def to_string(self, data):
sql = data['sentry.interfaces.Query']
return sql['query']
def get_hash(self, data):
sql = data['sentry.interfaces.Query']
return [sql['query'], sql['engine']]
def capture(self, query, engine, **kwargs):
return {
'sentry.interfaces.Query': {
'query': query,
'engine': engine,
}
}
| gpl-3.0 |
skycucumber/Messaging-Gateway | webapp/venv/lib/python2.7/site-packages/twisted/web/domhelpers.py | 60 | 8562 | # -*- test-case-name: twisted.web.test.test_domhelpers -*-
# Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
"""
A library for performing interesting tasks with DOM objects.
"""
import StringIO
from twisted.web import microdom
from twisted.web.microdom import getElementsByTagName, escape, unescape
class NodeLookupError(Exception):
pass
def substitute(request, node, subs):
"""
Look through the given node's children for strings, and
attempt to do string substitution with the given parameter.
"""
for child in node.childNodes:
if hasattr(child, 'nodeValue') and child.nodeValue:
child.replaceData(0, len(child.nodeValue), child.nodeValue % subs)
substitute(request, child, subs)
def _get(node, nodeId, nodeAttrs=('id','class','model','pattern')):
"""
(internal) Get a node with the specified C{nodeId} as any of the C{class},
C{id} or C{pattern} attributes.
"""
if hasattr(node, 'hasAttributes') and node.hasAttributes():
for nodeAttr in nodeAttrs:
if (str (node.getAttribute(nodeAttr)) == nodeId):
return node
if node.hasChildNodes():
if hasattr(node.childNodes, 'length'):
length = node.childNodes.length
else:
length = len(node.childNodes)
for childNum in range(length):
result = _get(node.childNodes[childNum], nodeId)
if result: return result
def get(node, nodeId):
"""
Get a node with the specified C{nodeId} as any of the C{class},
C{id} or C{pattern} attributes. If there is no such node, raise
L{NodeLookupError}.
"""
result = _get(node, nodeId)
if result: return result
raise NodeLookupError, nodeId
def getIfExists(node, nodeId):
"""
Get a node with the specified C{nodeId} as any of the C{class},
C{id} or C{pattern} attributes. If there is no such node, return
C{None}.
"""
return _get(node, nodeId)
def getAndClear(node, nodeId):
"""Get a node with the specified C{nodeId} as any of the C{class},
C{id} or C{pattern} attributes. If there is no such node, raise
L{NodeLookupError}. Remove all child nodes before returning.
"""
result = get(node, nodeId)
if result:
clearNode(result)
return result
def clearNode(node):
"""
Remove all children from the given node.
"""
node.childNodes[:] = []
def locateNodes(nodeList, key, value, noNesting=1):
"""
Find subnodes in the given node where the given attribute
has the given value.
"""
returnList = []
if not isinstance(nodeList, type([])):
return locateNodes(nodeList.childNodes, key, value, noNesting)
for childNode in nodeList:
if not hasattr(childNode, 'getAttribute'):
continue
if str(childNode.getAttribute(key)) == value:
returnList.append(childNode)
if noNesting:
continue
returnList.extend(locateNodes(childNode, key, value, noNesting))
return returnList
def superSetAttribute(node, key, value):
if not hasattr(node, 'setAttribute'): return
node.setAttribute(key, value)
if node.hasChildNodes():
for child in node.childNodes:
superSetAttribute(child, key, value)
def superPrependAttribute(node, key, value):
if not hasattr(node, 'setAttribute'): return
old = node.getAttribute(key)
if old:
node.setAttribute(key, value+'/'+old)
else:
node.setAttribute(key, value)
if node.hasChildNodes():
for child in node.childNodes:
superPrependAttribute(child, key, value)
def superAppendAttribute(node, key, value):
if not hasattr(node, 'setAttribute'): return
old = node.getAttribute(key)
if old:
node.setAttribute(key, old + '/' + value)
else:
node.setAttribute(key, value)
if node.hasChildNodes():
for child in node.childNodes:
superAppendAttribute(child, key, value)
def gatherTextNodes(iNode, dounescape=0, joinWith=""):
"""Visit each child node and collect its text data, if any, into a string.
For example::
>>> doc=microdom.parseString('<a>1<b>2<c>3</c>4</b></a>')
>>> gatherTextNodes(doc.documentElement)
'1234'
With dounescape=1, also convert entities back into normal characters.
@return: the gathered nodes as a single string
@rtype: str
"""
gathered=[]
gathered_append=gathered.append
slice=[iNode]
while len(slice)>0:
c=slice.pop(0)
if hasattr(c, 'nodeValue') and c.nodeValue is not None:
if dounescape:
val=unescape(c.nodeValue)
else:
val=c.nodeValue
gathered_append(val)
slice[:0]=c.childNodes
return joinWith.join(gathered)
class RawText(microdom.Text):
"""This is an evil and horrible speed hack. Basically, if you have a big
chunk of XML that you want to insert into the DOM, but you don't want to
incur the cost of parsing it, you can construct one of these and insert it
into the DOM. This will most certainly only work with microdom as the API
for converting nodes to xml is different in every DOM implementation.
This could be improved by making this class a Lazy parser, so if you
inserted this into the DOM and then later actually tried to mutate this
node, it would be parsed then.
"""
def writexml(self, writer, indent="", addindent="", newl="", strip=0, nsprefixes=None, namespace=None):
writer.write("%s%s%s" % (indent, self.data, newl))
def findNodes(parent, matcher, accum=None):
if accum is None:
accum = []
if not parent.hasChildNodes():
return accum
for child in parent.childNodes:
# print child, child.nodeType, child.nodeName
if matcher(child):
accum.append(child)
findNodes(child, matcher, accum)
return accum
def findNodesShallowOnMatch(parent, matcher, recurseMatcher, accum=None):
if accum is None:
accum = []
if not parent.hasChildNodes():
return accum
for child in parent.childNodes:
# print child, child.nodeType, child.nodeName
if matcher(child):
accum.append(child)
if recurseMatcher(child):
findNodesShallowOnMatch(child, matcher, recurseMatcher, accum)
return accum
def findNodesShallow(parent, matcher, accum=None):
if accum is None:
accum = []
if not parent.hasChildNodes():
return accum
for child in parent.childNodes:
if matcher(child):
accum.append(child)
else:
findNodes(child, matcher, accum)
return accum
def findElementsWithAttributeShallow(parent, attribute):
"""
Return an iterable of the elements which are direct children of C{parent}
and which have the C{attribute} attribute.
"""
return findNodesShallow(parent,
lambda n: getattr(n, 'tagName', None) is not None and
n.hasAttribute(attribute))
def findElements(parent, matcher):
"""
Return an iterable of the elements which are children of C{parent} for
which the predicate C{matcher} returns true.
"""
return findNodes(
parent,
lambda n, matcher=matcher: getattr(n, 'tagName', None) is not None and
matcher(n))
def findElementsWithAttribute(parent, attribute, value=None):
if value:
return findElements(
parent,
lambda n, attribute=attribute, value=value:
n.hasAttribute(attribute) and n.getAttribute(attribute) == value)
else:
return findElements(
parent,
lambda n, attribute=attribute: n.hasAttribute(attribute))
def findNodesNamed(parent, name):
return findNodes(parent, lambda n, name=name: n.nodeName == name)
def writeNodeData(node, oldio):
for subnode in node.childNodes:
if hasattr(subnode, 'data'):
oldio.write(subnode.data)
else:
writeNodeData(subnode, oldio)
def getNodeText(node):
oldio = StringIO.StringIO()
writeNodeData(node, oldio)
return oldio.getvalue()
def getParents(node):
l = []
while node:
l.append(node)
node = node.parentNode
return l
def namedChildren(parent, nodeName):
"""namedChildren(parent, nodeName) -> children (not descendants) of parent
that have tagName == nodeName
"""
return [n for n in parent.childNodes if getattr(n, 'tagName', '')==nodeName]
| gpl-2.0 |
thodoris/djangoPharma | djangoPharma/env/Lib/site-packages/pip/utils/packaging.py | 343 | 2080 | from __future__ import absolute_import
from email.parser import FeedParser
import logging
import sys
from pip._vendor.packaging import specifiers
from pip._vendor.packaging import version
from pip._vendor import pkg_resources
from pip import exceptions
logger = logging.getLogger(__name__)
def check_requires_python(requires_python):
"""
Check if the python version in use match the `requires_python` specifier.
Returns `True` if the version of python in use matches the requirement.
Returns `False` if the version of python in use does not matches the
requirement.
Raises an InvalidSpecifier if `requires_python` have an invalid format.
"""
if requires_python is None:
# The package provides no information
return True
requires_python_specifier = specifiers.SpecifierSet(requires_python)
# We only use major.minor.micro
python_version = version.parse('.'.join(map(str, sys.version_info[:3])))
return python_version in requires_python_specifier
def get_metadata(dist):
if (isinstance(dist, pkg_resources.DistInfoDistribution) and
dist.has_metadata('METADATA')):
return dist.get_metadata('METADATA')
elif dist.has_metadata('PKG-INFO'):
return dist.get_metadata('PKG-INFO')
def check_dist_requires_python(dist):
metadata = get_metadata(dist)
feed_parser = FeedParser()
feed_parser.feed(metadata)
pkg_info_dict = feed_parser.close()
requires_python = pkg_info_dict.get('Requires-Python')
try:
if not check_requires_python(requires_python):
raise exceptions.UnsupportedPythonVersion(
"%s requires Python '%s' but the running Python is %s" % (
dist.project_name,
requires_python,
'.'.join(map(str, sys.version_info[:3])),)
)
except specifiers.InvalidSpecifier as e:
logger.warning(
"Package %s has an invalid Requires-Python entry %s - %s" % (
dist.project_name, requires_python, e))
return
| apache-2.0 |
4022321818/2015cd_midterm2 | static/Brython3.1.1-20150328-091302/Lib/xml/dom/domreg.py | 841 | 3402 | """Registration facilities for DOM. This module should not be used
directly. Instead, the functions getDOMImplementation and
registerDOMImplementation should be imported from xml.dom."""
# This is a list of well-known implementations. Well-known names
# should be published by posting to xml-sig@python.org, and are
# subsequently recorded in this file.
well_known_implementations = {
'minidom':'xml.dom.minidom',
'4DOM': 'xml.dom.DOMImplementation',
}
# DOM implementations not officially registered should register
# themselves with their
registered = {}
def registerDOMImplementation(name, factory):
"""registerDOMImplementation(name, factory)
Register the factory function with the name. The factory function
should return an object which implements the DOMImplementation
interface. The factory function can either return the same object,
or a new one (e.g. if that implementation supports some
customization)."""
registered[name] = factory
def _good_enough(dom, features):
"_good_enough(dom, features) -> Return 1 if the dom offers the features"
for f,v in features:
if not dom.hasFeature(f,v):
return 0
return 1
def getDOMImplementation(name=None, features=()):
"""getDOMImplementation(name = None, features = ()) -> DOM implementation.
Return a suitable DOM implementation. The name is either
well-known, the module name of a DOM implementation, or None. If
it is not None, imports the corresponding module and returns
DOMImplementation object if the import succeeds.
If name is not given, consider the available implementations to
find one with the required feature set. If no implementation can
be found, raise an ImportError. The features list must be a sequence
of (feature, version) pairs which are passed to hasFeature."""
import os
creator = None
mod = well_known_implementations.get(name)
if mod:
mod = __import__(mod, {}, {}, ['getDOMImplementation'])
return mod.getDOMImplementation()
elif name:
return registered[name]()
elif "PYTHON_DOM" in os.environ:
return getDOMImplementation(name = os.environ["PYTHON_DOM"])
# User did not specify a name, try implementations in arbitrary
# order, returning the one that has the required features
if isinstance(features, str):
features = _parse_feature_string(features)
for creator in registered.values():
dom = creator()
if _good_enough(dom, features):
return dom
for creator in well_known_implementations.keys():
try:
dom = getDOMImplementation(name = creator)
except Exception: # typically ImportError, or AttributeError
continue
if _good_enough(dom, features):
return dom
raise ImportError("no suitable DOM implementation found")
def _parse_feature_string(s):
features = []
parts = s.split()
i = 0
length = len(parts)
while i < length:
feature = parts[i]
if feature[0] in "0123456789":
raise ValueError("bad feature name: %r" % (feature,))
i = i + 1
version = None
if i < length:
v = parts[i]
if v[0] in "0123456789":
i = i + 1
version = v
features.append((feature, version))
return tuple(features)
| agpl-3.0 |
asi-uniovi/malloovia | malloovia/lpsolver.py | 1 | 33503 | # coding: utf-8
# import pandas as pd
"""Malloovia interface to LP solver"""
from typing import Sequence, List, Any
from itertools import product as cartesian_product
from inspect import ismethod
from collections import namedtuple
from uuid import uuid4
import os
import pulp # type: ignore
from pulp import (
LpContinuous,
LpInteger,
LpVariable,
lpSum,
LpProblem,
LpMinimize,
LpMaximize,
PulpSolverError,
COIN_CMD,
log,
subprocess,
)
from .solution_model import (
MallooviaHistogram,
ReservedAllocation,
AllocationInfo,
Status,
pulp_to_malloovia_status,
)
from .model import System, Workload, App, TimeUnit
LpProblem.bestBound = None # Add new attribute to pulp problems
class MallooviaLp:
"""Solves the allocation problem, using Linear Programming.
This class contains methods to create a linear programming problem
(using PuLP), to add restrictions and extra variables to it,
to solve it (using PuLP supported solvers), and to retrieve
the solution in a format amenable to further analysis and display.
The LP problem instantiates these variables:
- For reserved instances: ``Y_(_a,_ic)``, where ``Y`` is a fixed prefix,
``a`` is a string representation of each application and ``ic`` is the string
representation of each reserved instance class considered.
After solving the LP problem, the value of the variable is the number of
reserved machines of instance class `ic` for application `a`, for the whole
reservation period.
- For on-demand instances: ``X_(_a,_ic,_l)``, where ``X`` is a fixed prefix,
``a`` is a string representation of each application, ``ic`` is the string
representation of each on-demand instance class considered and ``l`` is a
string representation of a "workload tuple", which is a tuple of numbers,
e.g: ``(1230, 442, 123)``, each one representing the workload of one of the apps.
After solving the LP problem, the value of the variable is the number of
on-demand machines of instance class `ic` deployed for application `a` at a
timeslot which has a workload prediction equal to the tuple ``l``.
Intended usage:
1. Instantiate the class (see constructor parameters below).
2. Call object's ``.create_problem()``.
3. Call object's ``.solve()``.
4. Retrieve solution by calling object's ``.get_allocation()`` to get the solution
for all variables, or ``.get_reserved_allocation()`` to get ony the number of
reserved instances of each type.
5. Retrieve the cost of the solution via object's ``.get_solution()``.
You can use object's property ``pulp_problem`` to access the PuLP problem object
which represents the linear programming problem, to inspect or save it if required.
"""
def __init__(
self,
system: System,
workloads: Sequence[Workload],
preallocation: ReservedAllocation = None,
relaxed: bool = False,
) -> None:
"""Constructor:
Args:
system: namedtuple containing "name", "apps", "instance_classes"
and "performances" for the problem to solve.
workloads: list of workloads, one per app. Each workload
is a namedtuple which contains a reference to the app, and a sequence
of N numbers which is the prediction for the next N timeslots. This
sequence must have the same length for all workloads in the list.
preallocation: number of reserved instances which are
preallocated. In phase I this parameter can be omitted (defaults to ``None``),
and in phase II it should contain the object returned by
``get_reserved_allocation()`` after solving phase I.
relaxed: if ``True``, the problem uses continuous variables
instead of integer ones.
"""
self.system = system
# Ensure that the workloads received are ordered by the field app in the same
# ordering than the list system.apps
self.workloads = reorder_workloads(workloads, system.apps)
if preallocation is None:
self.fixed_vms = None
else:
assert len(preallocation.instance_classes) == len(
preallocation.vms_number
), (
"preallocation is wrong, the number of elements in instance_classes and in "
"vms_number must be the same"
)
self.fixed_vms = dict(
zip(preallocation.instance_classes, preallocation.vms_number)
)
self.relaxed = relaxed
self.pulp_problem: Any = None
self.load_hist = get_load_hist_from_load(self.workloads)
self.solver_called = False
# CookedData stores some info required when building the problem, so that
# this data is gathered only once, during __init__, and used when required
CookedData = namedtuple( # pylint: disable=invalid-name
"CookedData",
[
"map_dem",
"map_res",
"instances_res",
"instances_dem",
"limiting_sets",
"instance_prices",
"instance_perfs",
],
)
# Separate the instances in two types: reserved and on-demand
# Also create dictionaries for fast lookup of price and performance, converted
# to the timeslot units
instances_res = []
instances_dem = []
instance_prices = {}
instance_perfs = {}
timeslot_length = self.workloads[0].time_unit
for iclass in system.instance_classes:
instance_prices[iclass] = iclass.price / TimeUnit(iclass.time_unit).to(
timeslot_length
)
for app in self.system.apps:
instance_perfs[iclass, app] = self.system.performances.values[
iclass, app
] / TimeUnit(self.system.performances.time_unit).to(timeslot_length)
if iclass.is_reserved:
instances_res.append(iclass)
else:
instances_dem.append(iclass)
# Compute the set of LimitingSets (clouds), extracted
# from the instances
limiting_sets = set()
for iclass in system.instance_classes:
limiting_sets.update(iclass.limiting_sets)
# Store cooked data
self.cooked = CookedData(
map_dem=None, # To be updated later by create_variables
map_res=None,
instances_res=instances_res,
instances_dem=instances_dem,
instance_prices=instance_prices,
instance_perfs=instance_perfs,
limiting_sets=limiting_sets,
)
def _create_variables(self) -> None:
"""Creates the set of variables Y* and X* of the PuLP problem.
Override it if you need to create extra variables (first use
``super().create_variables()`` to call the base class method)."""
if self.relaxed:
kind = LpContinuous
else:
kind = LpInteger
# List all combinations of apps and instances and workloads
comb_res = cartesian_product(self.system.apps, self.cooked.instances_res)
comb_dem = cartesian_product(
self.system.apps, self.cooked.instances_dem, self.load_hist.keys()
)
map_res = LpVariable.dicts("Y", comb_res, 0, None, kind)
map_dem = LpVariable.dicts("X", comb_dem, 0, None, kind)
self.cooked = self.cooked._replace(map_res=map_res, map_dem=map_dem)
def _cost_function(self) -> None:
"""Adds to the LP problem the function to optimize.
The function to optimize is the cost of the deployment. It is computed as
the sum of all Y_a_ic multiplied by the length of the period and by the price/timeslot
of each reserved instance class plus all X_a_ic_l multiplied by the price/timeslot
of each on-demand instance class and by the number of times that workload ``l``
appears in the period."""
period_length = sum(self.load_hist.values())
self.pulp_problem += (
lpSum(
[
self.cooked.map_res[_a, _ic]
* self.cooked.instance_prices[_ic]
* period_length
for _a in self.system.apps
for _ic in self.cooked.instances_res
]
+ [
self.cooked.map_dem[_a, _ic, _l]
* self.cooked.instance_prices[_ic]
* self.load_hist[_l]
for _a in self.system.apps
for _ic in self.cooked.instances_dem
for _l in self.load_hist.keys()
]
),
"Objective: minimize cost",
)
def create_problem(self) -> "MallooviaLp":
"""Creates the PuLP problem with all variables and restrictions.
Returns:
pulp.LpProblem: instance of the PuLP problem.
"""
# Create the linear programming problem
self.pulp_problem = LpProblem(self.system.name, LpMinimize)
# Once we have the variables represented as tuples, we use
# the tuples to create the linear programming variables for pulp
self._create_variables()
# Create the goal function
self._cost_function()
# Add all restrictions indicated with functions *_restriction
# in this class
self._add_all_restrictions()
return self
def _add_all_restrictions(self) -> None:
"""This functions uses introspection to discover all implemented
methods whose name ends with ``_restriction``, and runs them all."""
for name in dir(self):
attribute = getattr(self, name)
if ismethod(attribute) and name.endswith("_restriction"):
attribute()
def performance_restriction(self) -> None:
"""Adds performance restriction to the problem.
This restriction forces, for each workload tuple, the performance of the
solution to be greater than or equal to that workload level for
all applications.
"""
for i, app in enumerate(self.system.apps):
perf_reserved = []
for ins in self.cooked.instances_res:
perf_reserved.append(
self.cooked.map_res[app, ins] * self.cooked.instance_perfs[ins, app]
)
for load in self.load_hist.keys():
perf_ondemand = []
for ins in self.cooked.instances_dem:
perf_ondemand.append(
self.cooked.map_dem[app, ins, load]
* self.cooked.instance_perfs[ins, app]
)
self.pulp_problem += (
lpSum(perf_reserved + perf_ondemand) >= load[i],
"Minimum performance for application {} "
"when workload is {}".format(app, load),
)
return
def limit_instances_per_class_restriction(
self
) -> None: # pylint: disable=invalid-name
"""Adds ``max_vms`` per instance class restriction.
If the ``ic`` instance has a ``max_vms`` attribute, this is a limit for all
``Y_*_ic`` and ``X_*_ic_*`` variables."""
for ins in self.system.instance_classes:
if ins.max_vms == 0:
continue # No limit for this instance class
if ins.is_reserved:
self.pulp_problem += (
lpSum(self.cooked.map_res[app, ins] for app in self.system.apps)
<= ins.max_vms,
"Max instances reserved " "instance class {}".format(ins),
)
else:
for load in self.load_hist.keys():
self.pulp_problem += (
lpSum(
self.cooked.map_dem[app, ins, load]
for app in self.system.apps
)
<= ins.max_vms,
"Max instances for on-demand instance "
"class {} when workload is {}".format(ins, load),
)
def set_fixed_instances_restriction(self) -> None:
"""Adds restrictions for variables with pre-fixed values.
For every ``ic`` in ``self.fixed_vms`` a restriction is
added which forces the total number of those instance classes in
the solution to be at equal to a given value for reserved instances,
and at least equal to a given value for on-demand instances.
This is used mainly in phase II to ensure that reserved instances
are fixed, or to allow to keep at least some number of on-demand
instances running from previous timeslots, when using "guided"
strategies"."""
if self.fixed_vms is None: # No fixed instances, we are in PhaseI
return
for ins, value in self.fixed_vms.items():
if ins.is_reserved:
self.pulp_problem += (
lpSum(self.cooked.map_res[app, ins] for app in self.system.apps)
== value,
"Reserved instance class {} " "is fixed to {}".format(ins, value),
)
else:
for load in self.load_hist.keys():
self.pulp_problem += (
lpSum(
self.cooked.map_dem[app, ins, load]
for app in self.system.apps
)
>= value,
"On-demand instance class {} is at least {} "
"when workload is {}".format(ins, value, load),
)
def limit_instances_per_limiting_set_restriction(
self
) -> None: # pylint: disable=invalid-name
"""Adds ``max_vms`` per limiting set restriction.
If the limiting set provides a max_vms > 0, then the sum of all
instances which are member of that limiting set should be limited
to that maximum."""
for cloud in self.cooked.limiting_sets:
if cloud.max_vms == 0:
continue # No restriction for this limiting set
for load in self.load_hist.keys():
self.pulp_problem += (
lpSum(
[
self.cooked.map_res[app, ic]
for app in self.system.apps
for ic in self.cooked.instances_res
if cloud in ic.limiting_sets
]
+ [
self.cooked.map_dem[app, ic, load]
for app in self.system.apps
for ic in self.cooked.instances_dem
if cloud in ic.limiting_sets
]
)
<= cloud.max_vms,
"Max instances for limiting set {} "
"when workload is {}".format(cloud, load),
)
def limit_cores_per_limiting_set_restriction(
self
) -> None: # pylint: disable=invalid-name
"""Adds ``max_cores`` per limiting set restriction.
If the limiting set provides a max_cores > 0, then the sum of all
instance cores among all instance classes which are member of that
limiting set should be limited to that maximum."""
for cloud in self.cooked.limiting_sets:
if cloud.max_cores == 0:
continue # No restriction for this limiting set
for load in self.load_hist.keys():
self.pulp_problem += (
lpSum(
[
self.cooked.map_res[app, ic] * ic.cores
for app in self.system.apps
for ic in self.cooked.instances_res
if cloud in ic.limiting_sets
]
+ [
self.cooked.map_dem[app, ic, load] * ic.cores
for app in self.system.apps
for ic in self.cooked.instances_dem
if cloud in ic.limiting_sets
]
)
<= cloud.max_cores,
"Max cores for limiting set {} "
"when workload is {}".format(cloud, load),
)
def solve(self, *args, **kwargs):
"""Calls PuLP solver.
Args:
*args: positional args passed to ``LpProblem.solve()``
\\**kwargs: keyword args passed to ``LpProblem.solve()``.
Returns:
the value returned by ``LpProblem.solve()``.
"""
self.solver_called = True
return self.pulp_problem.solve(*args, **kwargs)
def get_status(self) -> Status:
"""Returns the status of the problem"""
if not self.solver_called:
return Status.unsolved
return pulp_to_malloovia_status(self.pulp_problem.status)
def get_cost(self) -> float:
"""Gets the cost of the problem, obtained after solving it.
Returns:
The cost of the optimal solution found by PuLP.
Raises:
ValueError: when the problem is yet unsolved.
"""
if self.pulp_problem.status != pulp.LpStatusOptimal:
raise ValueError("Cannot get the cost when the status is not optimal")
return pulp.value(self.pulp_problem.objective)
def get_allocation(self) -> AllocationInfo:
"""Retrieves the allocation given by the solution of the LP problem.
Returns:
The allocation given by the solution.
Raises:
ValueError: if no solution is available (unsolved or infeasible problem)
"""
if self.pulp_problem.status != pulp.LpStatusOptimal:
raise ValueError("Cannot get the cost when the status is not optimal")
workload_tuples = []
repeats = []
allocation = []
for load, repeat in self.load_hist.items():
workload_tuples.append(load)
repeats.append(repeat)
workload_allocation = []
for app in self.system.apps:
row = list(
self.cooked.map_res[app, i].varValue
for i in self.cooked.instances_res
)
row.extend(
self.cooked.map_dem[app, i, load].varValue
for i in self.cooked.instances_dem
)
workload_allocation.append(tuple(row))
allocation.append(tuple(workload_allocation))
return AllocationInfo(
apps=tuple(self.system.apps),
instance_classes=tuple(
self.cooked.instances_res + self.cooked.instances_dem
),
workload_tuples=workload_tuples,
repeats=repeats,
values=tuple(allocation),
units="vms",
)
def get_reserved_allocation(self) -> ReservedAllocation:
"""Retrieves the allocation of reserved instances from the solution of the LP problem.
Returns:
The total number of reserved instance classes of each
type to be purchased for the whole reservation period.
Raises:
ValueError: if no solution is available (unsolved or infeasible problem)
"""
# Returns the solution as a list of numbers, each one
# representing the required number of vms of each reserved type, stored
# in the field "vms_number" of the object.
# This number is valid for any workload tuple, and for every timeslot
# in the reservation period. Also, it does not depend on the applications
# because it is the total number of reserved instances for all apps.
# The returned class also stores the list "instance_classes" which provides
# the instance class associated with each index in the above table.
# So, if r is the value returned, the value of r.vms_number[i]
# (being i an integer) is the number of VMs to be allocated
# from reserved instance class r.instance_classes[i], for every
# timeslot and for the set of all apps.
# This is all the information required for PhaseII.
if self.pulp_problem.status != pulp.LpStatusOptimal:
raise ValueError("Cannot get the cost when the status is not optimal")
allocation: List[float] = []
for _ in self.load_hist: # Loop over all possible workloads
workload_allocation: List[float] = []
for iclass in self.cooked.instances_res:
i_allocation = sum(
self.cooked.map_res[app, iclass].varValue
for app in self.system.apps
)
workload_allocation.append(i_allocation)
# The obtained allocation MUST be the same for any workload
assert allocation == [] or allocation == workload_allocation
allocation = workload_allocation
return ReservedAllocation(
instance_classes=tuple(self.cooked.instances_res),
vms_number=tuple(allocation),
)
class ShortReprTuple(tuple):
"""This class implements a tuple whose repr is not standard
but uses instead the hash of the tuple, to ensure a constant
length of the repr.
This is required to store keys in the histogram, because they
are used to name LP variables which otherwise would have
a name too long for the solver if the number of apps is large.
"""
def __repr__(self):
return str(hash(self))
def get_load_hist_from_load(workloads: Sequence[Workload]) -> MallooviaHistogram:
"""Computes the histogram of the workloads.
Args:
workloads: a sequence of :class:`Workload` objects, each one
containing the fields ``app`` (which identifies the app producing this
workload) and ``values`` (which stores a sequence of numbers representing
the workload for each timeslot for that app).
Returns:
A dictionary where the key is the workload for one timeslot,
expressed as a tuple with one element for each application, and the value
is the number of timeslots in which that workload was found.
"""
hist = MallooviaHistogram()
hist.apps = tuple(w.app for w in workloads)
timeslots = len(workloads[0].values)
# Ensure that all workloads have the same length and units
assert all(
len(w.values) == timeslots for w in workloads
), "All workloads should have the same length"
# Iterate over tuples of loads, one tuple per timeslot
workload_tuples = zip(*(w.values for w in workloads))
for load in workload_tuples:
hist[ShortReprTuple(load)] += 1
return hist
def reorder_workloads(
workloads: Sequence[Workload], apps: Sequence[App]
) -> Sequence[Workload]:
"""Returns the a new workload list ordered as the list of apps.
Args:
workloads: Sequence of workloads to reorder
apps: Sequence of apps which dictate the new ordering
Returns:
A new sequence of workloads, ordered by app in the order given by apps argument.
"""
map_apps_workloads = {workload.app: workload for workload in workloads}
ordered_workloads = []
for app in apps:
ordered_workloads.append(map_apps_workloads[app])
return tuple(ordered_workloads)
class MallooviaLpMaximizeTimeslotPerformance(MallooviaLp):
"""Find the allocation which maximizes performance for a single timeslot.
This problem is the dual of MallooviaLp. Instead of minimizing the cost
while providing the minimum performances, the problem to solve now is
to maximize the performance without breaking the limits.
The class inherits from Malloovia the initialization methods as well as
the ones to get the cost and allocation of the solution, but overrides
the function to be optimized and some of the constraints.
"""
def _cost_function(self) -> None:
"""Adds to the LP problem the function to optimize (maximize in this case).
The function to optimize is the performance of the deployment. However, since
the system is composed to several applications, no single "performance" exists.
The solution is to maximize the "fraction of performance fulfilled", i.e., the
sum of `X(_a,_ic,_l)*_ic.performance/_l[a]` among all `_a` and `_ic`.
"""
workloads = {wl.app: wl.values[0] for wl in self.workloads}
self.pulp_problem += (
lpSum(
[
self.cooked.map_res[_a, _ic]
* self.cooked.instance_perfs[_ic, _a]
/ workloads[_a]
for _a in self.system.apps
for _ic in self.cooked.instances_res
]
+ [
self.cooked.map_dem[_a, _ic, _l]
* self.cooked.instance_perfs[_ic, _a]
/ workloads[_a]
for _a in self.system.apps
for _ic in self.cooked.instances_dem
for _l in self.load_hist.keys()
]
),
"Objective: maximize fulfilled workload fraction",
)
def create_problem(self) -> "MallooviaLpMaximizeTimeslotPerformance":
"""This method creates the PuLP problem, and calls other
methods to add variables and restrictions to it.
It initializes the attribute 'self.prob' with the
instance of the PuLP problem created.
"""
# Create the linear programming problem
self.pulp_problem = LpProblem(self.system.name, LpMaximize)
# Create the linear programming variables for pulp
self._create_variables()
# Create the goal function
self._cost_function()
# Add all restrictions indicated with functions *_restriction
# in this class
self._add_all_restrictions()
return self
def performance_restriction(self) -> None:
"""Adds performance restriction to the problem.
This restriction forces, for each workload tuple, the performance of the
solution to be less than or equal to that workload level, for
all applications.
"""
for i, app in enumerate(self.system.apps):
perf_reserved = []
for ins in self.cooked.instances_res:
perf_reserved.append(
self.cooked.map_res[app, ins] * self.cooked.instance_perfs[ins, app]
)
for load in self.load_hist.keys():
perf_ondemand = []
for ins in self.cooked.instances_dem:
perf_ondemand.append(
self.cooked.map_dem[app, ins, load]
* self.cooked.instance_perfs[ins, app]
)
self.pulp_problem += (
lpSum(perf_reserved + perf_ondemand) <= load[i],
"Maximum performance for application {} "
"when workload is {}".format(app, load),
)
def get_cost(self) -> float:
"""Gets the cost of the problem, obtained after solving it.
Returns:
The cost of the optimal solution found by PuLP.
Raises:
ValueError: when the problem is yet unsolved.
"""
if self.pulp_problem.status == pulp.LpStatusNotSolved: # Not solved
raise ValueError("Cannot get the cost of an unsolved problem")
return sum(
self.cooked.instance_prices[ic] * self.cooked.map_res[app, ic].varValue
for ic in self.cooked.instances_res
for app in self.system.apps
) + sum(
self.cooked.instance_prices[ic]
* self.cooked.map_dem[app, ic, wl].varValue
* self.load_hist[wl]
for ic in self.cooked.instances_dem
for app in self.system.apps
for wl in self.load_hist.keys()
)
# The following function is used to monkey patch part of PuLP code.
# This modification is aimed to get the value of the optimal best bound
# which is provided by CBC solver as part of the solution, even if
# the solution could not be found due to a time limit
#
# PuLP does not recover this value, but for our analysis is useful
# to estimate the worst-case error of our approximation when the
# exact solution cannot be found in a reasonable time.
#
# The code patches the part in which PuLP calls CBC, so that the standard
# output of CBC is redirected to a logfile. When CBC exits, the code
# inspects the logfile and locates the bestBound value, storing it
# as part of the problem to make it accessible to the python code.
#
# This patch only works when the solver is COIN.
# pylint: disable=invalid-name,too-many-locals,missing-docstring,bare-except,too-many-branches,too-many-statements
def _solve_CBC_patched(self, lp, use_mps=True): # pragma: no cover
"""Solve a MIP problem using CBC, patched from original PuLP function
to save a log with cbc's output and take from it the best bound."""
def takeBestBoundFromLog(filename):
try:
with open(filename, "r") as f:
for l in f:
if l.startswith("Lower bound:"):
return float(l.split(":")[-1])
except:
pass
return None
if not self.executable(self.path):
raise PulpSolverError("Pulp: cannot execute %s cwd: %s" %
(self.path, os.getcwd()))
if not self.keepFiles:
uuid = uuid4().hex
tmpLp = os.path.join(self.tmpDir, "%s-pulp.lp" % uuid)
tmpMps = os.path.join(self.tmpDir, "%s-pulp.mps" % uuid)
tmpSol = os.path.join(self.tmpDir, "%s-pulp.sol" % uuid)
tmpSol_init = os.path.join(self.tmpDir, "%s-pulp_init.sol" % uuid)
else:
tmpLp = lp.name+"-pulp.lp"
tmpMps = lp.name+"-pulp.mps"
tmpSol = lp.name+"-pulp.sol"
tmpSol_init = lp.name + "-pulp_init.sol"
if use_mps:
vs, variablesNames, constraintsNames, objectiveName = lp.writeMPS(tmpMps, rename = 1)
cmds = ' '+tmpMps+" "
if lp.sense == LpMaximize:
cmds += 'max '
else:
vs = lp.writeLP(tmpLp)
# In the Lp we do not create new variable or constraint names:
variablesNames = dict((v.name, v.name) for v in vs)
constraintsNames = dict((c, c) for c in lp.constraints)
objectiveName = None
cmds = ' '+tmpLp+" "
if self.mip_start:
self.writesol(tmpSol_init, lp, vs, variablesNames, constraintsNames)
cmds += 'mips {} '.format(tmpSol_init)
if self.threads:
cmds += "threads %s "%self.threads
if self.fracGap is not None:
cmds += "ratio %s "%self.fracGap
if self.maxSeconds is not None:
cmds += "sec %s "%self.maxSeconds
if self.presolve:
cmds += "presolve on "
if self.strong:
cmds += "strong %d " % self.strong
if self.cuts:
cmds += "gomory on "
# cbc.write("oddhole on "
cmds += "knapsack on "
cmds += "probing on "
for option in self.options:
cmds += option+" "
if self.mip:
cmds += "branch "
else:
cmds += "initialSolve "
cmds += "printingOptions all "
cmds += "solution "+tmpSol+" "
# if self.msg:
# pipe = None
# else:
# pipe = open(os.devnull, 'w')
log.debug(self.path + cmds)
with open(tmpLp + ".log", 'w') as pipe:
cbc = subprocess.Popen((self.path + cmds).split(), stdout=pipe,
stderr=pipe)
if cbc.wait() != 0:
raise PulpSolverError("Pulp: Error while trying to execute " +
self.path)
if not os.path.exists(tmpSol):
raise PulpSolverError("Pulp: Error while executing "+self.path)
if use_mps:
status, values, reducedCosts, shadowPrices, slacks, sol_status = \
self.readsol_MPS(tmpSol, lp, lp.variables(), variablesNames, constraintsNames)
else:
status, values, reducedCosts, shadowPrices, slacks, sol_status = self.readsol_LP(
tmpSol, lp, lp.variables()
)
lp.assignVarsVals(values)
lp.assignVarsDj(reducedCosts)
lp.assignConsPi(shadowPrices)
lp.assignConsSlack(slacks, activity=True)
lp.assignStatus(status, sol_status)
lp.bestBound = takeBestBoundFromLog(tmpLp + ".log")
if not self.keepFiles:
for f in [tmpMps, tmpLp, tmpSol, tmpSol_init]:
try:
os.remove(f)
except:
pass
return status
# Monkey patching
COIN_CMD.solve_CBC = _solve_CBC_patched
__all__ = [
"MallooviaLp",
"get_load_hist_from_load",
"MallooviaLpMaximizeTimeslotPerformance",
]
| mit |
ruibarreira/linuxtrail | usr/lib/python2.7/_pyio.py | 28 | 69294 | """
Python implementation of the io module.
"""
from __future__ import (print_function, unicode_literals)
import os
import abc
import codecs
import warnings
import errno
# Import thread instead of threading to reduce startup cost
try:
from thread import allocate_lock as Lock
except ImportError:
from dummy_thread import allocate_lock as Lock
import io
from io import (__all__, SEEK_SET, SEEK_CUR, SEEK_END)
from errno import EINTR
__metaclass__ = type
# open() uses st_blksize whenever we can
DEFAULT_BUFFER_SIZE = 8 * 1024 # bytes
# NOTE: Base classes defined here are registered with the "official" ABCs
# defined in io.py. We don't use real inheritance though, because we don't
# want to inherit the C implementations.
class BlockingIOError(IOError):
"""Exception raised when I/O would block on a non-blocking I/O stream."""
def __init__(self, errno, strerror, characters_written=0):
super(IOError, self).__init__(errno, strerror)
if not isinstance(characters_written, (int, long)):
raise TypeError("characters_written must be a integer")
self.characters_written = characters_written
def open(file, mode="r", buffering=-1,
encoding=None, errors=None,
newline=None, closefd=True):
r"""Open file and return a stream. Raise IOError upon failure.
file is either a text or byte string giving the name (and the path
if the file isn't in the current working directory) of the file to
be opened or an integer file descriptor of the file to be
wrapped. (If a file descriptor is given, it is closed when the
returned I/O object is closed, unless closefd is set to False.)
mode is an optional string that specifies the mode in which the file
is opened. It defaults to 'r' which means open for reading in text
mode. Other common values are 'w' for writing (truncating the file if
it already exists), and 'a' for appending (which on some Unix systems,
means that all writes append to the end of the file regardless of the
current seek position). In text mode, if encoding is not specified the
encoding used is platform dependent. (For reading and writing raw
bytes use binary mode and leave encoding unspecified.) The available
modes are:
========= ===============================================================
Character Meaning
--------- ---------------------------------------------------------------
'r' open for reading (default)
'w' open for writing, truncating the file first
'a' open for writing, appending to the end of the file if it exists
'b' binary mode
't' text mode (default)
'+' open a disk file for updating (reading and writing)
'U' universal newline mode (for backwards compatibility; unneeded
for new code)
========= ===============================================================
The default mode is 'rt' (open for reading text). For binary random
access, the mode 'w+b' opens and truncates the file to 0 bytes, while
'r+b' opens the file without truncation.
Python distinguishes between files opened in binary and text modes,
even when the underlying operating system doesn't. Files opened in
binary mode (appending 'b' to the mode argument) return contents as
bytes objects without any decoding. In text mode (the default, or when
't' is appended to the mode argument), the contents of the file are
returned as strings, the bytes having been first decoded using a
platform-dependent encoding or using the specified encoding if given.
buffering is an optional integer used to set the buffering policy.
Pass 0 to switch buffering off (only allowed in binary mode), 1 to select
line buffering (only usable in text mode), and an integer > 1 to indicate
the size of a fixed-size chunk buffer. When no buffering argument is
given, the default buffering policy works as follows:
* Binary files are buffered in fixed-size chunks; the size of the buffer
is chosen using a heuristic trying to determine the underlying device's
"block size" and falling back on `io.DEFAULT_BUFFER_SIZE`.
On many systems, the buffer will typically be 4096 or 8192 bytes long.
* "Interactive" text files (files for which isatty() returns True)
use line buffering. Other text files use the policy described above
for binary files.
encoding is the name of the encoding used to decode or encode the
file. This should only be used in text mode. The default encoding is
platform dependent, but any encoding supported by Python can be
passed. See the codecs module for the list of supported encodings.
errors is an optional string that specifies how encoding errors are to
be handled---this argument should not be used in binary mode. Pass
'strict' to raise a ValueError exception if there is an encoding error
(the default of None has the same effect), or pass 'ignore' to ignore
errors. (Note that ignoring encoding errors can lead to data loss.)
See the documentation for codecs.register for a list of the permitted
encoding error strings.
newline controls how universal newlines works (it only applies to text
mode). It can be None, '', '\n', '\r', and '\r\n'. It works as
follows:
* On input, if newline is None, universal newlines mode is
enabled. Lines in the input can end in '\n', '\r', or '\r\n', and
these are translated into '\n' before being returned to the
caller. If it is '', universal newline mode is enabled, but line
endings are returned to the caller untranslated. If it has any of
the other legal values, input lines are only terminated by the given
string, and the line ending is returned to the caller untranslated.
* On output, if newline is None, any '\n' characters written are
translated to the system default line separator, os.linesep. If
newline is '', no translation takes place. If newline is any of the
other legal values, any '\n' characters written are translated to
the given string.
If closefd is False, the underlying file descriptor will be kept open
when the file is closed. This does not work when a file name is given
and must be True in that case.
open() returns a file object whose type depends on the mode, and
through which the standard file operations such as reading and writing
are performed. When open() is used to open a file in a text mode ('w',
'r', 'wt', 'rt', etc.), it returns a TextIOWrapper. When used to open
a file in a binary mode, the returned class varies: in read binary
mode, it returns a BufferedReader; in write binary and append binary
modes, it returns a BufferedWriter, and in read/write mode, it returns
a BufferedRandom.
It is also possible to use a string or bytearray as a file for both
reading and writing. For strings StringIO can be used like a file
opened in a text mode, and for bytes a BytesIO can be used like a file
opened in a binary mode.
"""
if not isinstance(file, (basestring, int, long)):
raise TypeError("invalid file: %r" % file)
if not isinstance(mode, basestring):
raise TypeError("invalid mode: %r" % mode)
if not isinstance(buffering, (int, long)):
raise TypeError("invalid buffering: %r" % buffering)
if encoding is not None and not isinstance(encoding, basestring):
raise TypeError("invalid encoding: %r" % encoding)
if errors is not None and not isinstance(errors, basestring):
raise TypeError("invalid errors: %r" % errors)
modes = set(mode)
if modes - set("arwb+tU") or len(mode) > len(modes):
raise ValueError("invalid mode: %r" % mode)
reading = "r" in modes
writing = "w" in modes
appending = "a" in modes
updating = "+" in modes
text = "t" in modes
binary = "b" in modes
if "U" in modes:
if writing or appending:
raise ValueError("can't use U and writing mode at once")
reading = True
if text and binary:
raise ValueError("can't have text and binary mode at once")
if reading + writing + appending > 1:
raise ValueError("can't have read/write/append mode at once")
if not (reading or writing or appending):
raise ValueError("must have exactly one of read/write/append mode")
if binary and encoding is not None:
raise ValueError("binary mode doesn't take an encoding argument")
if binary and errors is not None:
raise ValueError("binary mode doesn't take an errors argument")
if binary and newline is not None:
raise ValueError("binary mode doesn't take a newline argument")
raw = FileIO(file,
(reading and "r" or "") +
(writing and "w" or "") +
(appending and "a" or "") +
(updating and "+" or ""),
closefd)
result = raw
try:
line_buffering = False
if buffering == 1 or buffering < 0 and raw.isatty():
buffering = -1
line_buffering = True
if buffering < 0:
buffering = DEFAULT_BUFFER_SIZE
try:
bs = os.fstat(raw.fileno()).st_blksize
except (os.error, AttributeError):
pass
else:
if bs > 1:
buffering = bs
if buffering < 0:
raise ValueError("invalid buffering size")
if buffering == 0:
if binary:
return result
raise ValueError("can't have unbuffered text I/O")
if updating:
buffer = BufferedRandom(raw, buffering)
elif writing or appending:
buffer = BufferedWriter(raw, buffering)
elif reading:
buffer = BufferedReader(raw, buffering)
else:
raise ValueError("unknown mode: %r" % mode)
result = buffer
if binary:
return result
text = TextIOWrapper(buffer, encoding, errors, newline, line_buffering)
result = text
text.mode = mode
return result
except:
result.close()
raise
class DocDescriptor:
"""Helper for builtins.open.__doc__
"""
def __get__(self, obj, typ):
return (
"open(file, mode='r', buffering=-1, encoding=None, "
"errors=None, newline=None, closefd=True)\n\n" +
open.__doc__)
class OpenWrapper:
"""Wrapper for builtins.open
Trick so that open won't become a bound method when stored
as a class variable (as dbm.dumb does).
See initstdio() in Python/pythonrun.c.
"""
__doc__ = DocDescriptor()
def __new__(cls, *args, **kwargs):
return open(*args, **kwargs)
class UnsupportedOperation(ValueError, IOError):
pass
class IOBase:
__metaclass__ = abc.ABCMeta
"""The abstract base class for all I/O classes, acting on streams of
bytes. There is no public constructor.
This class provides dummy implementations for many methods that
derived classes can override selectively; the default implementations
represent a file that cannot be read, written or seeked.
Even though IOBase does not declare read, readinto, or write because
their signatures will vary, implementations and clients should
consider those methods part of the interface. Also, implementations
may raise a IOError when operations they do not support are called.
The basic type used for binary data read from or written to a file is
bytes. bytearrays are accepted too, and in some cases (such as
readinto) needed. Text I/O classes work with str data.
Note that calling any method (even inquiries) on a closed stream is
undefined. Implementations may raise IOError in this case.
IOBase (and its subclasses) support the iterator protocol, meaning
that an IOBase object can be iterated over yielding the lines in a
stream.
IOBase also supports the :keyword:`with` statement. In this example,
fp is closed after the suite of the with statement is complete:
with open('spam.txt', 'r') as fp:
fp.write('Spam and eggs!')
"""
### Internal ###
def _unsupported(self, name):
"""Internal: raise an exception for unsupported operations."""
raise UnsupportedOperation("%s.%s() not supported" %
(self.__class__.__name__, name))
### Positioning ###
def seek(self, pos, whence=0):
"""Change stream position.
Change the stream position to byte offset pos. Argument pos is
interpreted relative to the position indicated by whence. Values
for whence are:
* 0 -- start of stream (the default); offset should be zero or positive
* 1 -- current stream position; offset may be negative
* 2 -- end of stream; offset is usually negative
Return the new absolute position.
"""
self._unsupported("seek")
def tell(self):
"""Return current stream position."""
return self.seek(0, 1)
def truncate(self, pos=None):
"""Truncate file to size bytes.
Size defaults to the current IO position as reported by tell(). Return
the new size.
"""
self._unsupported("truncate")
### Flush and close ###
def flush(self):
"""Flush write buffers, if applicable.
This is not implemented for read-only and non-blocking streams.
"""
self._checkClosed()
# XXX Should this return the number of bytes written???
__closed = False
def close(self):
"""Flush and close the IO object.
This method has no effect if the file is already closed.
"""
if not self.__closed:
try:
self.flush()
finally:
self.__closed = True
def __del__(self):
"""Destructor. Calls close()."""
# The try/except block is in case this is called at program
# exit time, when it's possible that globals have already been
# deleted, and then the close() call might fail. Since
# there's nothing we can do about such failures and they annoy
# the end users, we suppress the traceback.
try:
self.close()
except:
pass
### Inquiries ###
def seekable(self):
"""Return whether object supports random access.
If False, seek(), tell() and truncate() will raise IOError.
This method may need to do a test seek().
"""
return False
def _checkSeekable(self, msg=None):
"""Internal: raise an IOError if file is not seekable
"""
if not self.seekable():
raise IOError("File or stream is not seekable."
if msg is None else msg)
def readable(self):
"""Return whether object was opened for reading.
If False, read() will raise IOError.
"""
return False
def _checkReadable(self, msg=None):
"""Internal: raise an IOError if file is not readable
"""
if not self.readable():
raise IOError("File or stream is not readable."
if msg is None else msg)
def writable(self):
"""Return whether object was opened for writing.
If False, write() and truncate() will raise IOError.
"""
return False
def _checkWritable(self, msg=None):
"""Internal: raise an IOError if file is not writable
"""
if not self.writable():
raise IOError("File or stream is not writable."
if msg is None else msg)
@property
def closed(self):
"""closed: bool. True iff the file has been closed.
For backwards compatibility, this is a property, not a predicate.
"""
return self.__closed
def _checkClosed(self, msg=None):
"""Internal: raise an ValueError if file is closed
"""
if self.closed:
raise ValueError("I/O operation on closed file."
if msg is None else msg)
### Context manager ###
def __enter__(self):
"""Context management protocol. Returns self."""
self._checkClosed()
return self
def __exit__(self, *args):
"""Context management protocol. Calls close()"""
self.close()
### Lower-level APIs ###
# XXX Should these be present even if unimplemented?
def fileno(self):
"""Returns underlying file descriptor if one exists.
An IOError is raised if the IO object does not use a file descriptor.
"""
self._unsupported("fileno")
def isatty(self):
"""Return whether this is an 'interactive' stream.
Return False if it can't be determined.
"""
self._checkClosed()
return False
### Readline[s] and writelines ###
def readline(self, limit=-1):
r"""Read and return a line from the stream.
If limit is specified, at most limit bytes will be read.
The line terminator is always b'\n' for binary files; for text
files, the newlines argument to open can be used to select the line
terminator(s) recognized.
"""
# For backwards compatibility, a (slowish) readline().
if hasattr(self, "peek"):
def nreadahead():
readahead = self.peek(1)
if not readahead:
return 1
n = (readahead.find(b"\n") + 1) or len(readahead)
if limit >= 0:
n = min(n, limit)
return n
else:
def nreadahead():
return 1
if limit is None:
limit = -1
elif not isinstance(limit, (int, long)):
raise TypeError("limit must be an integer")
res = bytearray()
while limit < 0 or len(res) < limit:
b = self.read(nreadahead())
if not b:
break
res += b
if res.endswith(b"\n"):
break
return bytes(res)
def __iter__(self):
self._checkClosed()
return self
def next(self):
line = self.readline()
if not line:
raise StopIteration
return line
def readlines(self, hint=None):
"""Return a list of lines from the stream.
hint can be specified to control the number of lines read: no more
lines will be read if the total size (in bytes/characters) of all
lines so far exceeds hint.
"""
if hint is not None and not isinstance(hint, (int, long)):
raise TypeError("integer or None expected")
if hint is None or hint <= 0:
return list(self)
n = 0
lines = []
for line in self:
lines.append(line)
n += len(line)
if n >= hint:
break
return lines
def writelines(self, lines):
self._checkClosed()
for line in lines:
self.write(line)
io.IOBase.register(IOBase)
class RawIOBase(IOBase):
"""Base class for raw binary I/O."""
# The read() method is implemented by calling readinto(); derived
# classes that want to support read() only need to implement
# readinto() as a primitive operation. In general, readinto() can be
# more efficient than read().
# (It would be tempting to also provide an implementation of
# readinto() in terms of read(), in case the latter is a more suitable
# primitive operation, but that would lead to nasty recursion in case
# a subclass doesn't implement either.)
def read(self, n=-1):
"""Read and return up to n bytes.
Returns an empty bytes object on EOF, or None if the object is
set not to block and has no data to read.
"""
if n is None:
n = -1
if n < 0:
return self.readall()
b = bytearray(n.__index__())
n = self.readinto(b)
if n is None:
return None
del b[n:]
return bytes(b)
def readall(self):
"""Read until EOF, using multiple read() call."""
res = bytearray()
while True:
data = self.read(DEFAULT_BUFFER_SIZE)
if not data:
break
res += data
if res:
return bytes(res)
else:
# b'' or None
return data
def readinto(self, b):
"""Read up to len(b) bytes into b.
Returns number of bytes read (0 for EOF), or None if the object
is set not to block and has no data to read.
"""
self._unsupported("readinto")
def write(self, b):
"""Write the given buffer to the IO stream.
Returns the number of bytes written, which may be less than len(b).
"""
self._unsupported("write")
io.RawIOBase.register(RawIOBase)
from _io import FileIO
RawIOBase.register(FileIO)
class BufferedIOBase(IOBase):
"""Base class for buffered IO objects.
The main difference with RawIOBase is that the read() method
supports omitting the size argument, and does not have a default
implementation that defers to readinto().
In addition, read(), readinto() and write() may raise
BlockingIOError if the underlying raw stream is in non-blocking
mode and not ready; unlike their raw counterparts, they will never
return None.
A typical implementation should not inherit from a RawIOBase
implementation, but wrap one.
"""
def read(self, n=None):
"""Read and return up to n bytes.
If the argument is omitted, None, or negative, reads and
returns all data until EOF.
If the argument is positive, and the underlying raw stream is
not 'interactive', multiple raw reads may be issued to satisfy
the byte count (unless EOF is reached first). But for
interactive raw streams (XXX and for pipes?), at most one raw
read will be issued, and a short result does not imply that
EOF is imminent.
Returns an empty bytes array on EOF.
Raises BlockingIOError if the underlying raw stream has no
data at the moment.
"""
self._unsupported("read")
def read1(self, n=None):
"""Read up to n bytes with at most one read() system call."""
self._unsupported("read1")
def readinto(self, b):
"""Read up to len(b) bytes into b.
Like read(), this may issue multiple reads to the underlying raw
stream, unless the latter is 'interactive'.
Returns the number of bytes read (0 for EOF).
Raises BlockingIOError if the underlying raw stream has no
data at the moment.
"""
# XXX This ought to work with anything that supports the buffer API
data = self.read(len(b))
n = len(data)
try:
b[:n] = data
except TypeError as err:
import array
if not isinstance(b, array.array):
raise err
b[:n] = array.array(b'b', data)
return n
def write(self, b):
"""Write the given buffer to the IO stream.
Return the number of bytes written, which is never less than
len(b).
Raises BlockingIOError if the buffer is full and the
underlying raw stream cannot accept more data at the moment.
"""
self._unsupported("write")
def detach(self):
"""
Separate the underlying raw stream from the buffer and return it.
After the raw stream has been detached, the buffer is in an unusable
state.
"""
self._unsupported("detach")
io.BufferedIOBase.register(BufferedIOBase)
class _BufferedIOMixin(BufferedIOBase):
"""A mixin implementation of BufferedIOBase with an underlying raw stream.
This passes most requests on to the underlying raw stream. It
does *not* provide implementations of read(), readinto() or
write().
"""
def __init__(self, raw):
self._raw = raw
### Positioning ###
def seek(self, pos, whence=0):
new_position = self.raw.seek(pos, whence)
if new_position < 0:
raise IOError("seek() returned an invalid position")
return new_position
def tell(self):
pos = self.raw.tell()
if pos < 0:
raise IOError("tell() returned an invalid position")
return pos
def truncate(self, pos=None):
# Flush the stream. We're mixing buffered I/O with lower-level I/O,
# and a flush may be necessary to synch both views of the current
# file state.
self.flush()
if pos is None:
pos = self.tell()
# XXX: Should seek() be used, instead of passing the position
# XXX directly to truncate?
return self.raw.truncate(pos)
### Flush and close ###
def flush(self):
if self.closed:
raise ValueError("flush of closed file")
self.raw.flush()
def close(self):
if self.raw is not None and not self.closed:
try:
# may raise BlockingIOError or BrokenPipeError etc
self.flush()
finally:
self.raw.close()
def detach(self):
if self.raw is None:
raise ValueError("raw stream already detached")
self.flush()
raw = self._raw
self._raw = None
return raw
### Inquiries ###
def seekable(self):
return self.raw.seekable()
def readable(self):
return self.raw.readable()
def writable(self):
return self.raw.writable()
@property
def raw(self):
return self._raw
@property
def closed(self):
return self.raw.closed
@property
def name(self):
return self.raw.name
@property
def mode(self):
return self.raw.mode
def __repr__(self):
clsname = self.__class__.__name__
try:
name = self.name
except AttributeError:
return "<_pyio.{0}>".format(clsname)
else:
return "<_pyio.{0} name={1!r}>".format(clsname, name)
### Lower-level APIs ###
def fileno(self):
return self.raw.fileno()
def isatty(self):
return self.raw.isatty()
class BytesIO(BufferedIOBase):
"""Buffered I/O implementation using an in-memory bytes buffer."""
def __init__(self, initial_bytes=None):
buf = bytearray()
if initial_bytes is not None:
buf.extend(initial_bytes)
self._buffer = buf
self._pos = 0
def __getstate__(self):
if self.closed:
raise ValueError("__getstate__ on closed file")
return self.__dict__.copy()
def getvalue(self):
"""Return the bytes value (contents) of the buffer
"""
if self.closed:
raise ValueError("getvalue on closed file")
return bytes(self._buffer)
def read(self, n=None):
if self.closed:
raise ValueError("read from closed file")
if n is None:
n = -1
if not isinstance(n, (int, long)):
raise TypeError("integer argument expected, got {0!r}".format(
type(n)))
if n < 0:
n = len(self._buffer)
if len(self._buffer) <= self._pos:
return b""
newpos = min(len(self._buffer), self._pos + n)
b = self._buffer[self._pos : newpos]
self._pos = newpos
return bytes(b)
def read1(self, n):
"""This is the same as read.
"""
return self.read(n)
def write(self, b):
if self.closed:
raise ValueError("write to closed file")
if isinstance(b, unicode):
raise TypeError("can't write unicode to binary stream")
n = len(b)
if n == 0:
return 0
pos = self._pos
if pos > len(self._buffer):
# Inserts null bytes between the current end of the file
# and the new write position.
padding = b'\x00' * (pos - len(self._buffer))
self._buffer += padding
self._buffer[pos:pos + n] = b
self._pos += n
return n
def seek(self, pos, whence=0):
if self.closed:
raise ValueError("seek on closed file")
try:
pos.__index__
except AttributeError:
raise TypeError("an integer is required")
if whence == 0:
if pos < 0:
raise ValueError("negative seek position %r" % (pos,))
self._pos = pos
elif whence == 1:
self._pos = max(0, self._pos + pos)
elif whence == 2:
self._pos = max(0, len(self._buffer) + pos)
else:
raise ValueError("invalid whence value")
return self._pos
def tell(self):
if self.closed:
raise ValueError("tell on closed file")
return self._pos
def truncate(self, pos=None):
if self.closed:
raise ValueError("truncate on closed file")
if pos is None:
pos = self._pos
else:
try:
pos.__index__
except AttributeError:
raise TypeError("an integer is required")
if pos < 0:
raise ValueError("negative truncate position %r" % (pos,))
del self._buffer[pos:]
return pos
def readable(self):
if self.closed:
raise ValueError("I/O operation on closed file.")
return True
def writable(self):
if self.closed:
raise ValueError("I/O operation on closed file.")
return True
def seekable(self):
if self.closed:
raise ValueError("I/O operation on closed file.")
return True
class BufferedReader(_BufferedIOMixin):
"""BufferedReader(raw[, buffer_size])
A buffer for a readable, sequential BaseRawIO object.
The constructor creates a BufferedReader for the given readable raw
stream and buffer_size. If buffer_size is omitted, DEFAULT_BUFFER_SIZE
is used.
"""
def __init__(self, raw, buffer_size=DEFAULT_BUFFER_SIZE):
"""Create a new buffered reader using the given readable raw IO object.
"""
if not raw.readable():
raise IOError('"raw" argument must be readable.')
_BufferedIOMixin.__init__(self, raw)
if buffer_size <= 0:
raise ValueError("invalid buffer size")
self.buffer_size = buffer_size
self._reset_read_buf()
self._read_lock = Lock()
def _reset_read_buf(self):
self._read_buf = b""
self._read_pos = 0
def read(self, n=None):
"""Read n bytes.
Returns exactly n bytes of data unless the underlying raw IO
stream reaches EOF or if the call would block in non-blocking
mode. If n is negative, read until EOF or until read() would
block.
"""
if n is not None and n < -1:
raise ValueError("invalid number of bytes to read")
with self._read_lock:
return self._read_unlocked(n)
def _read_unlocked(self, n=None):
nodata_val = b""
empty_values = (b"", None)
buf = self._read_buf
pos = self._read_pos
# Special case for when the number of bytes to read is unspecified.
if n is None or n == -1:
self._reset_read_buf()
chunks = [buf[pos:]] # Strip the consumed bytes.
current_size = 0
while True:
# Read until EOF or until read() would block.
try:
chunk = self.raw.read()
except IOError as e:
if e.errno != EINTR:
raise
continue
if chunk in empty_values:
nodata_val = chunk
break
current_size += len(chunk)
chunks.append(chunk)
return b"".join(chunks) or nodata_val
# The number of bytes to read is specified, return at most n bytes.
avail = len(buf) - pos # Length of the available buffered data.
if n <= avail:
# Fast path: the data to read is fully buffered.
self._read_pos += n
return buf[pos:pos+n]
# Slow path: read from the stream until enough bytes are read,
# or until an EOF occurs or until read() would block.
chunks = [buf[pos:]]
wanted = max(self.buffer_size, n)
while avail < n:
try:
chunk = self.raw.read(wanted)
except IOError as e:
if e.errno != EINTR:
raise
continue
if chunk in empty_values:
nodata_val = chunk
break
avail += len(chunk)
chunks.append(chunk)
# n is more then avail only when an EOF occurred or when
# read() would have blocked.
n = min(n, avail)
out = b"".join(chunks)
self._read_buf = out[n:] # Save the extra data in the buffer.
self._read_pos = 0
return out[:n] if out else nodata_val
def peek(self, n=0):
"""Returns buffered bytes without advancing the position.
The argument indicates a desired minimal number of bytes; we
do at most one raw read to satisfy it. We never return more
than self.buffer_size.
"""
with self._read_lock:
return self._peek_unlocked(n)
def _peek_unlocked(self, n=0):
want = min(n, self.buffer_size)
have = len(self._read_buf) - self._read_pos
if have < want or have <= 0:
to_read = self.buffer_size - have
while True:
try:
current = self.raw.read(to_read)
except IOError as e:
if e.errno != EINTR:
raise
continue
break
if current:
self._read_buf = self._read_buf[self._read_pos:] + current
self._read_pos = 0
return self._read_buf[self._read_pos:]
def read1(self, n):
"""Reads up to n bytes, with at most one read() system call."""
# Returns up to n bytes. If at least one byte is buffered, we
# only return buffered bytes. Otherwise, we do one raw read.
if n < 0:
raise ValueError("number of bytes to read must be positive")
if n == 0:
return b""
with self._read_lock:
self._peek_unlocked(1)
return self._read_unlocked(
min(n, len(self._read_buf) - self._read_pos))
def tell(self):
return _BufferedIOMixin.tell(self) - len(self._read_buf) + self._read_pos
def seek(self, pos, whence=0):
if not (0 <= whence <= 2):
raise ValueError("invalid whence value")
with self._read_lock:
if whence == 1:
pos -= len(self._read_buf) - self._read_pos
pos = _BufferedIOMixin.seek(self, pos, whence)
self._reset_read_buf()
return pos
class BufferedWriter(_BufferedIOMixin):
"""A buffer for a writeable sequential RawIO object.
The constructor creates a BufferedWriter for the given writeable raw
stream. If the buffer_size is not given, it defaults to
DEFAULT_BUFFER_SIZE.
"""
_warning_stack_offset = 2
def __init__(self, raw,
buffer_size=DEFAULT_BUFFER_SIZE, max_buffer_size=None):
if not raw.writable():
raise IOError('"raw" argument must be writable.')
_BufferedIOMixin.__init__(self, raw)
if buffer_size <= 0:
raise ValueError("invalid buffer size")
if max_buffer_size is not None:
warnings.warn("max_buffer_size is deprecated", DeprecationWarning,
self._warning_stack_offset)
self.buffer_size = buffer_size
self._write_buf = bytearray()
self._write_lock = Lock()
def write(self, b):
if self.closed:
raise ValueError("write to closed file")
if isinstance(b, unicode):
raise TypeError("can't write unicode to binary stream")
with self._write_lock:
# XXX we can implement some more tricks to try and avoid
# partial writes
if len(self._write_buf) > self.buffer_size:
# We're full, so let's pre-flush the buffer. (This may
# raise BlockingIOError with characters_written == 0.)
self._flush_unlocked()
before = len(self._write_buf)
self._write_buf.extend(b)
written = len(self._write_buf) - before
if len(self._write_buf) > self.buffer_size:
try:
self._flush_unlocked()
except BlockingIOError as e:
if len(self._write_buf) > self.buffer_size:
# We've hit the buffer_size. We have to accept a partial
# write and cut back our buffer.
overage = len(self._write_buf) - self.buffer_size
written -= overage
self._write_buf = self._write_buf[:self.buffer_size]
raise BlockingIOError(e.errno, e.strerror, written)
return written
def truncate(self, pos=None):
with self._write_lock:
self._flush_unlocked()
if pos is None:
pos = self.raw.tell()
return self.raw.truncate(pos)
def flush(self):
with self._write_lock:
self._flush_unlocked()
def _flush_unlocked(self):
if self.closed:
raise ValueError("flush of closed file")
while self._write_buf:
try:
n = self.raw.write(self._write_buf)
except BlockingIOError:
raise RuntimeError("self.raw should implement RawIOBase: it "
"should not raise BlockingIOError")
except IOError as e:
if e.errno != EINTR:
raise
continue
if n is None:
raise BlockingIOError(
errno.EAGAIN,
"write could not complete without blocking", 0)
if n > len(self._write_buf) or n < 0:
raise IOError("write() returned incorrect number of bytes")
del self._write_buf[:n]
def tell(self):
return _BufferedIOMixin.tell(self) + len(self._write_buf)
def seek(self, pos, whence=0):
if not (0 <= whence <= 2):
raise ValueError("invalid whence")
with self._write_lock:
self._flush_unlocked()
return _BufferedIOMixin.seek(self, pos, whence)
class BufferedRWPair(BufferedIOBase):
"""A buffered reader and writer object together.
A buffered reader object and buffered writer object put together to
form a sequential IO object that can read and write. This is typically
used with a socket or two-way pipe.
reader and writer are RawIOBase objects that are readable and
writeable respectively. If the buffer_size is omitted it defaults to
DEFAULT_BUFFER_SIZE.
"""
# XXX The usefulness of this (compared to having two separate IO
# objects) is questionable.
def __init__(self, reader, writer,
buffer_size=DEFAULT_BUFFER_SIZE, max_buffer_size=None):
"""Constructor.
The arguments are two RawIO instances.
"""
if max_buffer_size is not None:
warnings.warn("max_buffer_size is deprecated", DeprecationWarning, 2)
if not reader.readable():
raise IOError('"reader" argument must be readable.')
if not writer.writable():
raise IOError('"writer" argument must be writable.')
self.reader = BufferedReader(reader, buffer_size)
self.writer = BufferedWriter(writer, buffer_size)
def read(self, n=None):
if n is None:
n = -1
return self.reader.read(n)
def readinto(self, b):
return self.reader.readinto(b)
def write(self, b):
return self.writer.write(b)
def peek(self, n=0):
return self.reader.peek(n)
def read1(self, n):
return self.reader.read1(n)
def readable(self):
return self.reader.readable()
def writable(self):
return self.writer.writable()
def flush(self):
return self.writer.flush()
def close(self):
self.writer.close()
self.reader.close()
def isatty(self):
return self.reader.isatty() or self.writer.isatty()
@property
def closed(self):
return self.writer.closed
class BufferedRandom(BufferedWriter, BufferedReader):
"""A buffered interface to random access streams.
The constructor creates a reader and writer for a seekable stream,
raw, given in the first argument. If the buffer_size is omitted it
defaults to DEFAULT_BUFFER_SIZE.
"""
_warning_stack_offset = 3
def __init__(self, raw,
buffer_size=DEFAULT_BUFFER_SIZE, max_buffer_size=None):
raw._checkSeekable()
BufferedReader.__init__(self, raw, buffer_size)
BufferedWriter.__init__(self, raw, buffer_size, max_buffer_size)
def seek(self, pos, whence=0):
if not (0 <= whence <= 2):
raise ValueError("invalid whence")
self.flush()
if self._read_buf:
# Undo read ahead.
with self._read_lock:
self.raw.seek(self._read_pos - len(self._read_buf), 1)
# First do the raw seek, then empty the read buffer, so that
# if the raw seek fails, we don't lose buffered data forever.
pos = self.raw.seek(pos, whence)
with self._read_lock:
self._reset_read_buf()
if pos < 0:
raise IOError("seek() returned invalid position")
return pos
def tell(self):
if self._write_buf:
return BufferedWriter.tell(self)
else:
return BufferedReader.tell(self)
def truncate(self, pos=None):
if pos is None:
pos = self.tell()
# Use seek to flush the read buffer.
return BufferedWriter.truncate(self, pos)
def read(self, n=None):
if n is None:
n = -1
self.flush()
return BufferedReader.read(self, n)
def readinto(self, b):
self.flush()
return BufferedReader.readinto(self, b)
def peek(self, n=0):
self.flush()
return BufferedReader.peek(self, n)
def read1(self, n):
self.flush()
return BufferedReader.read1(self, n)
def write(self, b):
if self._read_buf:
# Undo readahead
with self._read_lock:
self.raw.seek(self._read_pos - len(self._read_buf), 1)
self._reset_read_buf()
return BufferedWriter.write(self, b)
class TextIOBase(IOBase):
"""Base class for text I/O.
This class provides a character and line based interface to stream
I/O. There is no readinto method because Python's character strings
are immutable. There is no public constructor.
"""
def read(self, n=-1):
"""Read at most n characters from stream.
Read from underlying buffer until we have n characters or we hit EOF.
If n is negative or omitted, read until EOF.
"""
self._unsupported("read")
def write(self, s):
"""Write string s to stream."""
self._unsupported("write")
def truncate(self, pos=None):
"""Truncate size to pos."""
self._unsupported("truncate")
def readline(self):
"""Read until newline or EOF.
Returns an empty string if EOF is hit immediately.
"""
self._unsupported("readline")
def detach(self):
"""
Separate the underlying buffer from the TextIOBase and return it.
After the underlying buffer has been detached, the TextIO is in an
unusable state.
"""
self._unsupported("detach")
@property
def encoding(self):
"""Subclasses should override."""
return None
@property
def newlines(self):
"""Line endings translated so far.
Only line endings translated during reading are considered.
Subclasses should override.
"""
return None
@property
def errors(self):
"""Error setting of the decoder or encoder.
Subclasses should override."""
return None
io.TextIOBase.register(TextIOBase)
class IncrementalNewlineDecoder(codecs.IncrementalDecoder):
r"""Codec used when reading a file in universal newlines mode. It wraps
another incremental decoder, translating \r\n and \r into \n. It also
records the types of newlines encountered. When used with
translate=False, it ensures that the newline sequence is returned in
one piece.
"""
def __init__(self, decoder, translate, errors='strict'):
codecs.IncrementalDecoder.__init__(self, errors=errors)
self.translate = translate
self.decoder = decoder
self.seennl = 0
self.pendingcr = False
def decode(self, input, final=False):
# decode input (with the eventual \r from a previous pass)
if self.decoder is None:
output = input
else:
output = self.decoder.decode(input, final=final)
if self.pendingcr and (output or final):
output = "\r" + output
self.pendingcr = False
# retain last \r even when not translating data:
# then readline() is sure to get \r\n in one pass
if output.endswith("\r") and not final:
output = output[:-1]
self.pendingcr = True
# Record which newlines are read
crlf = output.count('\r\n')
cr = output.count('\r') - crlf
lf = output.count('\n') - crlf
self.seennl |= (lf and self._LF) | (cr and self._CR) \
| (crlf and self._CRLF)
if self.translate:
if crlf:
output = output.replace("\r\n", "\n")
if cr:
output = output.replace("\r", "\n")
return output
def getstate(self):
if self.decoder is None:
buf = b""
flag = 0
else:
buf, flag = self.decoder.getstate()
flag <<= 1
if self.pendingcr:
flag |= 1
return buf, flag
def setstate(self, state):
buf, flag = state
self.pendingcr = bool(flag & 1)
if self.decoder is not None:
self.decoder.setstate((buf, flag >> 1))
def reset(self):
self.seennl = 0
self.pendingcr = False
if self.decoder is not None:
self.decoder.reset()
_LF = 1
_CR = 2
_CRLF = 4
@property
def newlines(self):
return (None,
"\n",
"\r",
("\r", "\n"),
"\r\n",
("\n", "\r\n"),
("\r", "\r\n"),
("\r", "\n", "\r\n")
)[self.seennl]
class TextIOWrapper(TextIOBase):
r"""Character and line based layer over a BufferedIOBase object, buffer.
encoding gives the name of the encoding that the stream will be
decoded or encoded with. It defaults to locale.getpreferredencoding.
errors determines the strictness of encoding and decoding (see the
codecs.register) and defaults to "strict".
newline can be None, '', '\n', '\r', or '\r\n'. It controls the
handling of line endings. If it is None, universal newlines is
enabled. With this enabled, on input, the lines endings '\n', '\r',
or '\r\n' are translated to '\n' before being returned to the
caller. Conversely, on output, '\n' is translated to the system
default line separator, os.linesep. If newline is any other of its
legal values, that newline becomes the newline when the file is read
and it is returned untranslated. On output, '\n' is converted to the
newline.
If line_buffering is True, a call to flush is implied when a call to
write contains a newline character.
"""
_CHUNK_SIZE = 2048
def __init__(self, buffer, encoding=None, errors=None, newline=None,
line_buffering=False):
if newline is not None and not isinstance(newline, basestring):
raise TypeError("illegal newline type: %r" % (type(newline),))
if newline not in (None, "", "\n", "\r", "\r\n"):
raise ValueError("illegal newline value: %r" % (newline,))
if encoding is None:
try:
import locale
except ImportError:
# Importing locale may fail if Python is being built
encoding = "ascii"
else:
encoding = locale.getpreferredencoding()
if not isinstance(encoding, basestring):
raise ValueError("invalid encoding: %r" % encoding)
if errors is None:
errors = "strict"
else:
if not isinstance(errors, basestring):
raise ValueError("invalid errors: %r" % errors)
self._buffer = buffer
self._line_buffering = line_buffering
self._encoding = encoding
self._errors = errors
self._readuniversal = not newline
self._readtranslate = newline is None
self._readnl = newline
self._writetranslate = newline != ''
self._writenl = newline or os.linesep
self._encoder = None
self._decoder = None
self._decoded_chars = '' # buffer for text returned from decoder
self._decoded_chars_used = 0 # offset into _decoded_chars for read()
self._snapshot = None # info for reconstructing decoder state
self._seekable = self._telling = self.buffer.seekable()
if self._seekable and self.writable():
position = self.buffer.tell()
if position != 0:
try:
self._get_encoder().setstate(0)
except LookupError:
# Sometimes the encoder doesn't exist
pass
# self._snapshot is either None, or a tuple (dec_flags, next_input)
# where dec_flags is the second (integer) item of the decoder state
# and next_input is the chunk of input bytes that comes next after the
# snapshot point. We use this to reconstruct decoder states in tell().
# Naming convention:
# - "bytes_..." for integer variables that count input bytes
# - "chars_..." for integer variables that count decoded characters
def __repr__(self):
try:
name = self.name
except AttributeError:
return "<_pyio.TextIOWrapper encoding='{0}'>".format(self.encoding)
else:
return "<_pyio.TextIOWrapper name={0!r} encoding='{1}'>".format(
name, self.encoding)
@property
def encoding(self):
return self._encoding
@property
def errors(self):
return self._errors
@property
def line_buffering(self):
return self._line_buffering
@property
def buffer(self):
return self._buffer
def seekable(self):
if self.closed:
raise ValueError("I/O operation on closed file.")
return self._seekable
def readable(self):
return self.buffer.readable()
def writable(self):
return self.buffer.writable()
def flush(self):
self.buffer.flush()
self._telling = self._seekable
def close(self):
if self.buffer is not None and not self.closed:
try:
self.flush()
finally:
self.buffer.close()
@property
def closed(self):
return self.buffer.closed
@property
def name(self):
return self.buffer.name
def fileno(self):
return self.buffer.fileno()
def isatty(self):
return self.buffer.isatty()
def write(self, s):
if self.closed:
raise ValueError("write to closed file")
if not isinstance(s, unicode):
raise TypeError("can't write %s to text stream" %
s.__class__.__name__)
length = len(s)
haslf = (self._writetranslate or self._line_buffering) and "\n" in s
if haslf and self._writetranslate and self._writenl != "\n":
s = s.replace("\n", self._writenl)
encoder = self._encoder or self._get_encoder()
# XXX What if we were just reading?
b = encoder.encode(s)
self.buffer.write(b)
if self._line_buffering and (haslf or "\r" in s):
self.flush()
self._snapshot = None
if self._decoder:
self._decoder.reset()
return length
def _get_encoder(self):
make_encoder = codecs.getincrementalencoder(self._encoding)
self._encoder = make_encoder(self._errors)
return self._encoder
def _get_decoder(self):
make_decoder = codecs.getincrementaldecoder(self._encoding)
decoder = make_decoder(self._errors)
if self._readuniversal:
decoder = IncrementalNewlineDecoder(decoder, self._readtranslate)
self._decoder = decoder
return decoder
# The following three methods implement an ADT for _decoded_chars.
# Text returned from the decoder is buffered here until the client
# requests it by calling our read() or readline() method.
def _set_decoded_chars(self, chars):
"""Set the _decoded_chars buffer."""
self._decoded_chars = chars
self._decoded_chars_used = 0
def _get_decoded_chars(self, n=None):
"""Advance into the _decoded_chars buffer."""
offset = self._decoded_chars_used
if n is None:
chars = self._decoded_chars[offset:]
else:
chars = self._decoded_chars[offset:offset + n]
self._decoded_chars_used += len(chars)
return chars
def _rewind_decoded_chars(self, n):
"""Rewind the _decoded_chars buffer."""
if self._decoded_chars_used < n:
raise AssertionError("rewind decoded_chars out of bounds")
self._decoded_chars_used -= n
def _read_chunk(self):
"""
Read and decode the next chunk of data from the BufferedReader.
"""
# The return value is True unless EOF was reached. The decoded
# string is placed in self._decoded_chars (replacing its previous
# value). The entire input chunk is sent to the decoder, though
# some of it may remain buffered in the decoder, yet to be
# converted.
if self._decoder is None:
raise ValueError("no decoder")
if self._telling:
# To prepare for tell(), we need to snapshot a point in the
# file where the decoder's input buffer is empty.
dec_buffer, dec_flags = self._decoder.getstate()
# Given this, we know there was a valid snapshot point
# len(dec_buffer) bytes ago with decoder state (b'', dec_flags).
# Read a chunk, decode it, and put the result in self._decoded_chars.
input_chunk = self.buffer.read1(self._CHUNK_SIZE)
eof = not input_chunk
self._set_decoded_chars(self._decoder.decode(input_chunk, eof))
if self._telling:
# At the snapshot point, len(dec_buffer) bytes before the read,
# the next input to be decoded is dec_buffer + input_chunk.
self._snapshot = (dec_flags, dec_buffer + input_chunk)
return not eof
def _pack_cookie(self, position, dec_flags=0,
bytes_to_feed=0, need_eof=0, chars_to_skip=0):
# The meaning of a tell() cookie is: seek to position, set the
# decoder flags to dec_flags, read bytes_to_feed bytes, feed them
# into the decoder with need_eof as the EOF flag, then skip
# chars_to_skip characters of the decoded result. For most simple
# decoders, tell() will often just give a byte offset in the file.
return (position | (dec_flags<<64) | (bytes_to_feed<<128) |
(chars_to_skip<<192) | bool(need_eof)<<256)
def _unpack_cookie(self, bigint):
rest, position = divmod(bigint, 1<<64)
rest, dec_flags = divmod(rest, 1<<64)
rest, bytes_to_feed = divmod(rest, 1<<64)
need_eof, chars_to_skip = divmod(rest, 1<<64)
return position, dec_flags, bytes_to_feed, need_eof, chars_to_skip
def tell(self):
if not self._seekable:
raise IOError("underlying stream is not seekable")
if not self._telling:
raise IOError("telling position disabled by next() call")
self.flush()
position = self.buffer.tell()
decoder = self._decoder
if decoder is None or self._snapshot is None:
if self._decoded_chars:
# This should never happen.
raise AssertionError("pending decoded text")
return position
# Skip backward to the snapshot point (see _read_chunk).
dec_flags, next_input = self._snapshot
position -= len(next_input)
# How many decoded characters have been used up since the snapshot?
chars_to_skip = self._decoded_chars_used
if chars_to_skip == 0:
# We haven't moved from the snapshot point.
return self._pack_cookie(position, dec_flags)
# Starting from the snapshot position, we will walk the decoder
# forward until it gives us enough decoded characters.
saved_state = decoder.getstate()
try:
# Note our initial start point.
decoder.setstate((b'', dec_flags))
start_pos = position
start_flags, bytes_fed, chars_decoded = dec_flags, 0, 0
need_eof = 0
# Feed the decoder one byte at a time. As we go, note the
# nearest "safe start point" before the current location
# (a point where the decoder has nothing buffered, so seek()
# can safely start from there and advance to this location).
for next_byte in next_input:
bytes_fed += 1
chars_decoded += len(decoder.decode(next_byte))
dec_buffer, dec_flags = decoder.getstate()
if not dec_buffer and chars_decoded <= chars_to_skip:
# Decoder buffer is empty, so this is a safe start point.
start_pos += bytes_fed
chars_to_skip -= chars_decoded
start_flags, bytes_fed, chars_decoded = dec_flags, 0, 0
if chars_decoded >= chars_to_skip:
break
else:
# We didn't get enough decoded data; signal EOF to get more.
chars_decoded += len(decoder.decode(b'', final=True))
need_eof = 1
if chars_decoded < chars_to_skip:
raise IOError("can't reconstruct logical file position")
# The returned cookie corresponds to the last safe start point.
return self._pack_cookie(
start_pos, start_flags, bytes_fed, need_eof, chars_to_skip)
finally:
decoder.setstate(saved_state)
def truncate(self, pos=None):
self.flush()
if pos is None:
pos = self.tell()
return self.buffer.truncate(pos)
def detach(self):
if self.buffer is None:
raise ValueError("buffer is already detached")
self.flush()
buffer = self._buffer
self._buffer = None
return buffer
def seek(self, cookie, whence=0):
if self.closed:
raise ValueError("tell on closed file")
if not self._seekable:
raise IOError("underlying stream is not seekable")
if whence == 1: # seek relative to current position
if cookie != 0:
raise IOError("can't do nonzero cur-relative seeks")
# Seeking to the current position should attempt to
# sync the underlying buffer with the current position.
whence = 0
cookie = self.tell()
if whence == 2: # seek relative to end of file
if cookie != 0:
raise IOError("can't do nonzero end-relative seeks")
self.flush()
position = self.buffer.seek(0, 2)
self._set_decoded_chars('')
self._snapshot = None
if self._decoder:
self._decoder.reset()
return position
if whence != 0:
raise ValueError("invalid whence (%r, should be 0, 1 or 2)" %
(whence,))
if cookie < 0:
raise ValueError("negative seek position %r" % (cookie,))
self.flush()
# The strategy of seek() is to go back to the safe start point
# and replay the effect of read(chars_to_skip) from there.
start_pos, dec_flags, bytes_to_feed, need_eof, chars_to_skip = \
self._unpack_cookie(cookie)
# Seek back to the safe start point.
self.buffer.seek(start_pos)
self._set_decoded_chars('')
self._snapshot = None
# Restore the decoder to its state from the safe start point.
if cookie == 0 and self._decoder:
self._decoder.reset()
elif self._decoder or dec_flags or chars_to_skip:
self._decoder = self._decoder or self._get_decoder()
self._decoder.setstate((b'', dec_flags))
self._snapshot = (dec_flags, b'')
if chars_to_skip:
# Just like _read_chunk, feed the decoder and save a snapshot.
input_chunk = self.buffer.read(bytes_to_feed)
self._set_decoded_chars(
self._decoder.decode(input_chunk, need_eof))
self._snapshot = (dec_flags, input_chunk)
# Skip chars_to_skip of the decoded characters.
if len(self._decoded_chars) < chars_to_skip:
raise IOError("can't restore logical file position")
self._decoded_chars_used = chars_to_skip
# Finally, reset the encoder (merely useful for proper BOM handling)
try:
encoder = self._encoder or self._get_encoder()
except LookupError:
# Sometimes the encoder doesn't exist
pass
else:
if cookie != 0:
encoder.setstate(0)
else:
encoder.reset()
return cookie
def read(self, n=None):
self._checkReadable()
if n is None:
n = -1
decoder = self._decoder or self._get_decoder()
try:
n.__index__
except AttributeError:
raise TypeError("an integer is required")
if n < 0:
# Read everything.
result = (self._get_decoded_chars() +
decoder.decode(self.buffer.read(), final=True))
self._set_decoded_chars('')
self._snapshot = None
return result
else:
# Keep reading chunks until we have n characters to return.
eof = False
result = self._get_decoded_chars(n)
while len(result) < n and not eof:
eof = not self._read_chunk()
result += self._get_decoded_chars(n - len(result))
return result
def next(self):
self._telling = False
line = self.readline()
if not line:
self._snapshot = None
self._telling = self._seekable
raise StopIteration
return line
def readline(self, limit=None):
if self.closed:
raise ValueError("read from closed file")
if limit is None:
limit = -1
elif not isinstance(limit, (int, long)):
raise TypeError("limit must be an integer")
# Grab all the decoded text (we will rewind any extra bits later).
line = self._get_decoded_chars()
start = 0
# Make the decoder if it doesn't already exist.
if not self._decoder:
self._get_decoder()
pos = endpos = None
while True:
if self._readtranslate:
# Newlines are already translated, only search for \n
pos = line.find('\n', start)
if pos >= 0:
endpos = pos + 1
break
else:
start = len(line)
elif self._readuniversal:
# Universal newline search. Find any of \r, \r\n, \n
# The decoder ensures that \r\n are not split in two pieces
# In C we'd look for these in parallel of course.
nlpos = line.find("\n", start)
crpos = line.find("\r", start)
if crpos == -1:
if nlpos == -1:
# Nothing found
start = len(line)
else:
# Found \n
endpos = nlpos + 1
break
elif nlpos == -1:
# Found lone \r
endpos = crpos + 1
break
elif nlpos < crpos:
# Found \n
endpos = nlpos + 1
break
elif nlpos == crpos + 1:
# Found \r\n
endpos = crpos + 2
break
else:
# Found \r
endpos = crpos + 1
break
else:
# non-universal
pos = line.find(self._readnl)
if pos >= 0:
endpos = pos + len(self._readnl)
break
if limit >= 0 and len(line) >= limit:
endpos = limit # reached length limit
break
# No line ending seen yet - get more data'
while self._read_chunk():
if self._decoded_chars:
break
if self._decoded_chars:
line += self._get_decoded_chars()
else:
# end of file
self._set_decoded_chars('')
self._snapshot = None
return line
if limit >= 0 and endpos > limit:
endpos = limit # don't exceed limit
# Rewind _decoded_chars to just after the line ending we found.
self._rewind_decoded_chars(len(line) - endpos)
return line[:endpos]
@property
def newlines(self):
return self._decoder.newlines if self._decoder else None
class StringIO(TextIOWrapper):
"""Text I/O implementation using an in-memory buffer.
The initial_value argument sets the value of object. The newline
argument is like the one of TextIOWrapper's constructor.
"""
def __init__(self, initial_value="", newline="\n"):
super(StringIO, self).__init__(BytesIO(),
encoding="utf-8",
errors="strict",
newline=newline)
# Issue #5645: make universal newlines semantics the same as in the
# C version, even under Windows.
if newline is None:
self._writetranslate = False
if initial_value:
if not isinstance(initial_value, unicode):
initial_value = unicode(initial_value)
self.write(initial_value)
self.seek(0)
def getvalue(self):
self.flush()
decoder = self._decoder or self._get_decoder()
old_state = decoder.getstate()
decoder.reset()
try:
return decoder.decode(self.buffer.getvalue(), final=True)
finally:
decoder.setstate(old_state)
def __repr__(self):
# TextIOWrapper tells the encoding in its repr. In StringIO,
# that's a implementation detail.
return object.__repr__(self)
@property
def errors(self):
return None
@property
def encoding(self):
return None
def detach(self):
# This doesn't make sense on StringIO.
self._unsupported("detach")
| gpl-3.0 |
lauregonnord/cap-labs | TP04/Mu-evalntype/MyMuTypingVisitor.py | 1 | 1555 | from MuVisitor import MuVisitor
from MuParser import MuParser
from enum import Enum
class MuTypeError(Exception):
pass
class BaseType(Enum):
Float, Integer, Boolean, String, Nil = range(5)
def printBaseType(self):
print(self)
# Basic Type Checking for Mu programs.
class MyMuTypingVisitor(MuVisitor):
def __init__(self):
self._memorytypes = dict() # id-> types
def _raise(self, ctx, for_what, *types):
raise MuTypeError(
'Line {} col {}: invalid type for {}: {}'.format(
ctx.start.line, ctx.start.column, for_what,
' and '.join(t.name.lower() for t in types)))
# type declaration
def visitVarDecl(self, ctx):
vars_l = self.visit(ctx.id_l())
tt = self.visit(ctx.typee())
for name in vars_l:
self._memorytypes[name] = tt
return
def visitBasicType(self, ctx):
if ctx.mytype.type == MuParser.INTTYPE:
return BaseType.Integer
elif ctx.mytype.type == MuParser.FLOATTYPE:
return BaseType.Float
elif ctx.mytype.type == MuParser.BOOLTYPE:
return BaseType.Boolean
elif ctx.mytype.type == MuParser.STRINGTYPE:
return BaseType.String
else:
return BaseType.Nil
def visitIdList(self, ctx):
t = self.visit(ctx.id_l())
t.append(ctx.ID().getText())
return t
def visitIdListBase(self, ctx):
return [ctx.ID().getText()]
# typing visitors for expressions, statements !
| gpl-3.0 |
benpatterson/edx-platform | lms/djangoapps/mobile_api/testutils.py | 57 | 8425 | """
Test utilities for mobile API tests:
MobileAPITestCase - Common base class with helper methods and common functionality.
No tests are implemented in this base class.
Test Mixins to be included by concrete test classes and provide implementation of common test methods:
MobileAuthTestMixin - tests for APIs with mobile_view and is_user=False.
MobileAuthUserTestMixin - tests for APIs with mobile_view and is_user=True.
MobileCourseAccessTestMixin - tests for APIs with mobile_course_access.
"""
# pylint: disable=no-member
import ddt
from mock import patch
from unittest import skip
from django.core.urlresolvers import reverse
from rest_framework.test import APITestCase
from opaque_keys.edx.keys import CourseKey
from courseware.access_response import (
MobileAvailabilityError,
StartDateError,
VisibilityError
)
from courseware.tests.factories import UserFactory
from student import auth
from student.models import CourseEnrollment
from xmodule.modulestore.tests.django_utils import ModuleStoreTestCase
from xmodule.modulestore.tests.factories import CourseFactory
from mobile_api.test_milestones import MobileAPIMilestonesMixin
class MobileAPITestCase(ModuleStoreTestCase, APITestCase):
"""
Base class for testing Mobile APIs.
Subclasses are expected to define REVERSE_INFO to be used for django reverse URL, of the form:
REVERSE_INFO = {'name': <django reverse name>, 'params': [<list of params in the URL>]}
They may also override any of the methods defined in this class to control the behavior of the TestMixins.
"""
def setUp(self):
super(MobileAPITestCase, self).setUp()
self.course = CourseFactory.create(mobile_available=True, static_asset_path="needed_for_split")
self.user = UserFactory.create()
self.password = 'test'
self.username = self.user.username
def tearDown(self):
super(MobileAPITestCase, self).tearDown()
self.logout()
def login(self):
"""Login test user."""
self.client.login(username=self.username, password=self.password)
def logout(self):
"""Logout test user."""
self.client.logout()
def enroll(self, course_id=None):
"""Enroll test user in test course."""
CourseEnrollment.enroll(self.user, course_id or self.course.id)
def unenroll(self, course_id=None):
"""Unenroll test user in test course."""
CourseEnrollment.unenroll(self.user, course_id or self.course.id)
def login_and_enroll(self, course_id=None):
"""Shortcut for both login and enrollment of the user."""
self.login()
self.enroll(course_id)
def api_response(self, reverse_args=None, expected_response_code=200, **kwargs):
"""
Helper method for calling endpoint, verifying and returning response.
If expected_response_code is None, doesn't verify the response' status_code.
"""
url = self.reverse_url(reverse_args, **kwargs)
response = self.url_method(url, **kwargs)
if expected_response_code is not None:
self.assertEqual(response.status_code, expected_response_code)
return response
def reverse_url(self, reverse_args=None, **kwargs): # pylint: disable=unused-argument
"""Base implementation that returns URL for endpoint that's being tested."""
reverse_args = reverse_args or {}
if 'course_id' in self.REVERSE_INFO['params']:
reverse_args.update({'course_id': unicode(kwargs.get('course_id', self.course.id))})
if 'username' in self.REVERSE_INFO['params']:
reverse_args.update({'username': kwargs.get('username', self.user.username)})
return reverse(self.REVERSE_INFO['name'], kwargs=reverse_args)
def url_method(self, url, **kwargs): # pylint: disable=unused-argument
"""Base implementation that returns response from the GET method of the URL."""
return self.client.get(url)
class MobileAuthTestMixin(object):
"""
Test Mixin for testing APIs decorated with mobile_view.
"""
def test_no_auth(self):
self.logout()
self.api_response(expected_response_code=401)
class MobileAuthUserTestMixin(MobileAuthTestMixin):
"""
Test Mixin for testing APIs related to users: mobile_view with is_user=True.
"""
def test_invalid_user(self):
self.login_and_enroll()
self.api_response(expected_response_code=404, username='no_user')
def test_other_user(self):
# login and enroll as the test user
self.login_and_enroll()
self.logout()
# login and enroll as another user
other = UserFactory.create()
self.client.login(username=other.username, password='test')
self.enroll()
self.logout()
# now login and call the API as the test user
self.login()
self.api_response(expected_response_code=404, username=other.username)
@ddt.ddt
class MobileCourseAccessTestMixin(MobileAPIMilestonesMixin):
"""
Test Mixin for testing APIs marked with mobile_course_access.
Subclasses are expected to inherit from MobileAPITestCase.
Subclasses can override verify_success, verify_failure, and init_course_access methods.
"""
ALLOW_ACCESS_TO_UNRELEASED_COURSE = False # pylint: disable=invalid-name
def verify_success(self, response):
"""Base implementation of verifying a successful response."""
self.assertEqual(response.status_code, 200)
def verify_failure(self, response):
"""Base implementation of verifying a failed response."""
self.assertEqual(response.status_code, 404)
def init_course_access(self, course_id=None):
"""Base implementation of initializing the user for each test."""
self.login_and_enroll(course_id)
def test_success(self):
self.init_course_access()
response = self.api_response(expected_response_code=None)
self.verify_success(response) # allow subclasses to override verification
def test_course_not_found(self):
non_existent_course_id = CourseKey.from_string('a/b/c')
self.init_course_access(course_id=non_existent_course_id)
response = self.api_response(expected_response_code=None, course_id=non_existent_course_id)
self.verify_failure(response) # allow subclasses to override verification
@skip # TODO fix this, see MA-1038
@patch.dict('django.conf.settings.FEATURES', {'DISABLE_START_DATES': False})
def test_unreleased_course(self):
self.init_course_access()
self._verify_response(self.ALLOW_ACCESS_TO_UNRELEASED_COURSE, StartDateError(self.course.start))
# A tuple of Role Types and Boolean values that indicate whether access should be given to that role.
@ddt.data(
(auth.CourseBetaTesterRole, True),
(auth.CourseStaffRole, True),
(auth.CourseInstructorRole, True),
(None, False)
)
@ddt.unpack
def test_non_mobile_available(self, role, should_succeed):
self.init_course_access()
# set mobile_available to False for the test course
self.course.mobile_available = False
self.store.update_item(self.course, self.user.id)
self._verify_response(should_succeed, MobileAvailabilityError(), role)
def test_unenrolled_user(self):
self.login()
self.unenroll()
response = self.api_response(expected_response_code=None)
self.verify_failure(response)
@ddt.data(
(auth.CourseStaffRole, True),
(None, False)
)
@ddt.unpack
def test_visible_to_staff_only_course(self, role, should_succeed):
self.init_course_access()
self.course.visible_to_staff_only = True
self.store.update_item(self.course, self.user.id)
self._verify_response(should_succeed, VisibilityError(), role)
def _verify_response(self, should_succeed, error_type, role=None):
"""
Calls API and verifies the response
"""
# set user's role in the course
if role:
role(self.course.id).add_users(self.user)
response = self.api_response(expected_response_code=None)
if should_succeed:
self.verify_success(response)
else:
self.verify_failure(response)
self.assertEqual(response.data, error_type.to_json())
| agpl-3.0 |
tumbl3w33d/ansible | lib/ansible/modules/cloud/azure/azure_rm_devtestlabschedule_info.py | 20 | 6599 | #!/usr/bin/python
#
# Copyright (c) 2019 Zim Kalinowski, (@zikalino)
#
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: azure_rm_devtestlabschedule_info
version_added: "2.9"
short_description: Get Azure Schedule facts
description:
- Get facts of Azure Schedule.
options:
resource_group:
description:
- The name of the resource group.
required: True
type: str
lab_name:
description:
- The name of the lab.
required: True
type: str
name:
description:
- The name of the schedule.
type: str
tags:
description:
- Limit results by providing a list of tags. Format tags as 'key' or 'key:value'.
type: list
extends_documentation_fragment:
- azure
author:
- Zim Kalinowski (@zikalino)
'''
EXAMPLES = '''
- name: Get instance of Schedule
azure_rm_devtestlabschedule_info:
resource_group: myResourceGroup
lab_name: myLab
name: mySchedule
'''
RETURN = '''
schedules:
description:
- A list of dictionaries containing facts for Schedule.
returned: always
type: complex
contains:
id:
description:
- The identifier of the artifact source.
returned: always
type: str
sample: "/subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxxx/resourceGroups/myResourceGroup/providers/Microsoft.DevTestLab/labs/myLab/sc
hedules/labvmsshutdown"
resource_group:
description:
- Name of the resource group.
returned: always
type: str
sample: myResourceGroup
lab_name:
description:
- Name of the lab.
returned: always
type: str
sample: myLab
name:
description:
- The name of the environment.
returned: always
type: str
sample: lab_vms_shutdown
time:
description:
- Time of the schedule.
returned: always
type: str
sample: lab_vms_shutdown
time_zone_id:
description:
- Time zone id.
returned: always
type: str
sample: UTC+12
tags:
description:
- The tags of the resource.
returned: always
type: complex
sample: "{ 'MyTag': 'MyValue' }"
'''
from ansible.module_utils.azure_rm_common import AzureRMModuleBase
from ansible.module_utils.common.dict_transformations import _camel_to_snake, _snake_to_camel
try:
from msrestazure.azure_exceptions import CloudError
from azure.mgmt.devtestlabs import DevTestLabsClient
from msrest.serialization import Model
except ImportError:
# This is handled in azure_rm_common
pass
class AzureRMDtlScheduleInfo(AzureRMModuleBase):
def __init__(self):
# define user inputs into argument
self.module_arg_spec = dict(
resource_group=dict(
type='str',
required=True
),
lab_name=dict(
type='str',
required=True
),
name=dict(
type='str'
),
tags=dict(
type='list'
)
)
# store the results of the module operation
self.results = dict(
changed=False
)
self.mgmt_client = None
self.resource_group = None
self.lab_name = None
self.name = None
self.tags = None
super(AzureRMDtlScheduleInfo, self).__init__(self.module_arg_spec, supports_tags=False)
def exec_module(self, **kwargs):
is_old_facts = self.module._name == 'azure_rm_devtestlabschedule_facts'
if is_old_facts:
self.module.deprecate("The 'azure_rm_devtestlabschedule_facts' module has been renamed to 'azure_rm_devtestlabschedule_info'", version='2.13')
for key in self.module_arg_spec:
setattr(self, key, kwargs[key])
self.mgmt_client = self.get_mgmt_svc_client(DevTestLabsClient,
base_url=self._cloud_environment.endpoints.resource_manager)
if self.name:
self.results['schedules'] = self.get()
else:
self.results['schedules'] = self.list()
return self.results
def get(self):
response = None
results = []
try:
response = self.mgmt_client.schedules.get(resource_group_name=self.resource_group,
lab_name=self.lab_name,
name=_snake_to_camel(self.name))
self.log("Response : {0}".format(response))
except CloudError as e:
self.log('Could not get facts for Schedule.')
if response and self.has_tags(response.tags, self.tags):
results.append(self.format_response(response))
return results
def list(self):
response = None
results = []
try:
response = self.mgmt_client.schedules.list(resource_group_name=self.resource_group,
lab_name=self.lab_name)
self.log("Response : {0}".format(response))
except CloudError as e:
self.log('Could not get facts for Schedule.')
if response is not None:
for item in response:
if self.has_tags(item.tags, self.tags):
results.append(self.format_response(item))
return results
def format_response(self, item):
d = item.as_dict()
d = {
'resource_group': self.resource_group,
'lab_name': self.lab_name,
'name': _camel_to_snake(d.get('name')),
'id': d.get('id', None),
'tags': d.get('tags', None),
'time': d.get('daily_recurrence', {}).get('time'),
'time_zone_id': d.get('time_zone_id')
}
return d
def main():
AzureRMDtlScheduleInfo()
if __name__ == '__main__':
main()
| gpl-3.0 |
Erotemic/plottool | plottool_ibeis/viz_keypoints.py | 1 | 4272 | from __future__ import absolute_import, division, print_function
import utool
import plottool_ibeis.draw_func2 as df2
import numpy as np
from plottool_ibeis import plot_helpers as ph
#(print, print_, printDBG, rrr, profile) = utool.inject(__name__, '[viz_keypoints]', DEBUG=False)
utool.noinject(__name__, '[viz_keypoints]')
def testdata_kpts():
import utool as ut
import vtool_ibeis as vt
import pyhesaff
img_fpath = ut.grab_test_imgpath(ut.get_argval('--fname', default='star.png'))
kwargs = ut.parse_dict_from_argv(pyhesaff.get_hesaff_default_params())
(kpts, vecs) = pyhesaff.detect_feats(img_fpath, **kwargs)
imgBGR = vt.imread(img_fpath)
return kpts, vecs, imgBGR
def show_keypoints(chip, kpts, fnum=0, pnum=None, **kwargs):
r"""
Args:
chip (ndarray[uint8_t, ndim=2]): annotation image data
kpts (ndarray[float32_t, ndim=2]): keypoints
fnum (int): figure number(default = 0)
pnum (tuple): plot number(default = None)
Kwargs:
ddd, title, figtitle, interpolation, cmap, heatmap, data_colorbar,
darken, update, redraw_image, docla, doclf, projection, sel_fx
CommandLine:
python -m plottool_ibeis.viz_keypoints --exec-show_keypoints
Example:
>>> # DISABLE_DOCTEST
>>> from plottool_ibeis.viz_keypoints import * # NOQA
>>> import vtool_ibeis as vt
>>> kpts, vecs, chip = testdata_kpts()
>>> fnum = 0
>>> pnum = None
>>> result = show_keypoints(chip, kpts, fnum, pnum)
>>> print(result)
"""
#printDBG('[df2.show_kpts] %r' % (kwargs.keys(),))
fig, ax = df2.imshow(chip, fnum=fnum, pnum=pnum, **kwargs)
_annotate_kpts(kpts, **kwargs)
ph.set_plotdat(ax, 'viztype', 'keypoints')
ph.set_plotdat(ax, 'kpts', kpts)
if kwargs.get('ddd', False):
ph.draw()
#@utool.indent_func
def _annotate_kpts(kpts_, sel_fx=None, **kwargs):
r"""
Args:
kpts_ (ndarray): keypoints
sel_fx (None):
Keywords:
color: 3/4-tuple, ndarray, or str
Returns:
None
Example:
>>> from plottool_ibeis.viz_keypoints import * # NOQA
>>> sel_fx = None
>>> kpts = np.array([[ 92.9246, 17.5453, 7.8103, -3.4594, 10.8566, 0. ],
... [ 76.8585, 24.7918, 11.4412, -3.2634, 9.6287, 0. ],
... [ 140.6303, 24.9027, 10.4051, -10.9452, 10.5991, 0. ],])
"""
if len(kpts_) == 0:
print('len(kpts_) == 0...')
return
#color = kwargs.get('color', 'distinct' if sel_fx is None else df2.ORANGE)
color = kwargs.get('color', 'scale' if sel_fx is None else df2.ORANGE)
if color == 'distinct':
# hack for distinct colors
color = df2.distinct_colors(len(kpts_)) # , randomize=True)
elif color == 'scale':
# hack for distinct colors
import vtool_ibeis as vt
#color = df2.scores_to_color(vt.get_scales(kpts_), cmap_='inferno', score_range=(0, 50))
color = df2.scores_to_color(vt.get_scales(kpts_), cmap_='viridis', score_range=(5, 30), cmap_range=None)
#df2.distinct_colors(len(kpts_)) # , randomize=True)
# Keypoint drawing kwargs
drawkpts_kw = {
'ell': True,
'pts': False,
'ell_alpha': .4,
'ell_linewidth': 2,
'ell_color': color,
}
drawkpts_kw.update(kwargs)
# draw all keypoints
if sel_fx is None:
df2.draw_kpts2(kpts_, **drawkpts_kw)
else:
# dont draw the selected keypoint in this batch
nonsel_kpts_ = np.vstack((kpts_[0:sel_fx], kpts_[sel_fx + 1:]))
# Draw selected keypoint
sel_kpts = kpts_[sel_fx:sel_fx + 1]
import utool as ut
if ut.isiterable(color) and ut.isiterable(color[0]):
# hack for distinct colors
drawkpts_kw['ell_color'] = color[0:sel_fx] + color[sel_fx + 1:]
drawkpts_kw
drawkpts_kw2 = drawkpts_kw.copy()
drawkpts_kw2.update({
'ell_color': df2.BLUE,
'eig': True,
'rect': True,
'ori': True,
})
df2.draw_kpts2(nonsel_kpts_, **drawkpts_kw)
df2.draw_kpts2(sel_kpts, **drawkpts_kw2)
| apache-2.0 |
maikelwever/glances | glances/plugins/glances_help.py | 11 | 8932 | # -*- coding: utf-8 -*-
#
# This file is part of Glances.
#
# Copyright (C) 2015 Nicolargo <nicolas@nicolargo.com>
#
# Glances is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Glances is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
Help plugin.
Just a stupid plugin to display the help screen.
"""
# Import Glances libs
from glances.core.glances_globals import appname, psutil_version, version
from glances.plugins.glances_plugin import GlancesPlugin
class Plugin(GlancesPlugin):
"""Glances help plugin."""
def __init__(self, args=None, config=None):
"""Init the plugin."""
GlancesPlugin.__init__(self, args=args)
# Set the config instance
self.config = config
# We want to display the stat in the curse interface
self.display_curse = True
# init data dictionary
self.view_data = {}
self.generate_view_data()
def update(self):
"""No stats. It is just a plugin to display the help."""
pass
def generate_view_data(self):
self.view_data['version'] = '{0} {1}'.format(appname.title(), version)
self.view_data['psutil_version'] = ' with PSutil {0}'.format(psutil_version)
try:
self.view_data['configuration_file'] = 'Configuration file: {0}'.format(self.config.loaded_config_file)
except AttributeError:
pass
msg_col = ' {0:1} {1:35}'
msg_col2 = ' {0:1} {1:35}'
self.view_data['sort_auto'] = msg_col.format('a', 'Sort processes automatically')
self.view_data['sort_network'] = msg_col2.format('b', 'Bytes or bits for network I/O')
self.view_data['sort_cpu'] = msg_col.format('c', 'Sort processes by CPU%')
self.view_data['show_hide_alert'] = msg_col2.format('l', 'Show/hide alert logs')
self.view_data['sort_mem'] = msg_col.format('m', 'Sort processes by MEM%')
self.view_data['sort_user'] = msg_col.format('u', 'Sort processes by USER')
self.view_data['delete_warning_alerts'] = msg_col2.format('w', 'Delete warning alerts')
self.view_data['sort_proc'] = msg_col.format('p', 'Sort processes by name')
self.view_data['delete_warning_critical_alerts'] = msg_col2.format('x', 'Delete warning and critical alerts')
self.view_data['sort_io'] = msg_col.format('i', 'Sort processes by I/O rate')
self.view_data['percpu'] = msg_col2.format('1', 'Global CPU or per-CPU stats')
self.view_data['sort_cpu_times'] = msg_col.format('t', 'Sort processes by TIME')
self.view_data['show_hide_help'] = msg_col2.format('h', 'Show/hide this help screen')
self.view_data['show_hide_diskio'] = msg_col.format('d', 'Show/hide disk I/O stats')
self.view_data['view_network_io_combination'] = msg_col2.format('T', 'View network I/O as combination')
self.view_data['show_hide_filesystem'] = msg_col.format('f', 'Show/hide filesystem stats')
self.view_data['view_cumulative_network'] = msg_col2.format('U', 'View cumulative network I/O')
self.view_data['show_hide_network'] = msg_col.format('n', 'Show/hide network stats')
self.view_data['show_hide_filesytem_freespace'] = msg_col2.format('F', 'Show filesystem free space')
self.view_data['show_hide_sensors'] = msg_col.format('s', 'Show/hide sensors stats')
self.view_data['generate_graphs'] = msg_col2.format('g', 'Generate graphs for current history')
self.view_data['show_hide_left_sidebar'] = msg_col.format('2', 'Show/hide left sidebar')
self.view_data['reset_history'] = msg_col2.format('r', 'Reset history')
self.view_data['enable_disable_process_stats'] = msg_col.format('z', 'Enable/disable processes stats')
self.view_data['quit'] = msg_col2.format('q', 'Quit (Esc and Ctrl-C also work)')
self.view_data['enable_disable_top_extends_stats'] = msg_col.format('e', 'Enable/disable top extended stats')
self.view_data['enable_disable_short_processname'] = msg_col.format('/', 'Enable/disable short processes name')
self.view_data['enable_disable_docker'] = msg_col2.format('D', 'Enable/disable Docker stats')
self.view_data['enable_disable_quick_look'] = msg_col.format('3', 'Enable/disable quick look plugin')
self.view_data['show_hide_ip'] = msg_col2.format('I', 'Show/hide IP module')
self.view_data['edit_pattern_filter'] = 'ENTER: Edit the process filter pattern'
def get_view_data(self, args=None):
return self.view_data
def msg_curse(self, args=None):
"""Return the list to display in the curse interface."""
# Init the return message
ret = []
# Build the string message
# Header
ret.append(self.curse_add_line(self.view_data['version'], 'TITLE'))
ret.append(self.curse_add_line(self.view_data['psutil_version']))
ret.append(self.curse_new_line())
# Configuration file path
if 'configuration_file' in self.view_data:
ret.append(self.curse_new_line())
ret.append(self.curse_add_line(self.view_data['configuration_file']))
ret.append(self.curse_new_line())
# Keys
ret.append(self.curse_new_line())
ret.append(self.curse_add_line(self.view_data['sort_auto']))
ret.append(self.curse_add_line(self.view_data['sort_network']))
ret.append(self.curse_new_line())
ret.append(self.curse_add_line(self.view_data['sort_cpu']))
ret.append(self.curse_add_line(self.view_data['show_hide_alert']))
ret.append(self.curse_new_line())
ret.append(self.curse_add_line(self.view_data['sort_mem']))
ret.append(self.curse_add_line(self.view_data['delete_warning_alerts']))
ret.append(self.curse_new_line())
ret.append(self.curse_add_line(self.view_data['sort_user']))
ret.append(self.curse_add_line(self.view_data['delete_warning_critical_alerts']))
ret.append(self.curse_new_line())
ret.append(self.curse_add_line(self.view_data['sort_proc']))
ret.append(self.curse_add_line(self.view_data['percpu']))
ret.append(self.curse_new_line())
ret.append(self.curse_add_line(self.view_data['sort_io']))
ret.append(self.curse_add_line(self.view_data['show_hide_ip']))
ret.append(self.curse_new_line())
ret.append(self.curse_add_line(self.view_data['sort_cpu_times']))
ret.append(self.curse_add_line(self.view_data['enable_disable_docker']))
ret.append(self.curse_new_line())
ret.append(self.curse_add_line(self.view_data['show_hide_diskio']))
ret.append(self.curse_add_line(self.view_data['view_network_io_combination']))
ret.append(self.curse_new_line())
ret.append(self.curse_add_line(self.view_data['show_hide_filesystem']))
ret.append(self.curse_add_line(self.view_data['view_cumulative_network']))
ret.append(self.curse_new_line())
ret.append(self.curse_add_line(self.view_data['show_hide_network']))
ret.append(self.curse_add_line(self.view_data['show_hide_filesytem_freespace']))
ret.append(self.curse_new_line())
ret.append(self.curse_add_line(self.view_data['show_hide_sensors']))
ret.append(self.curse_add_line(self.view_data['generate_graphs']))
ret.append(self.curse_new_line())
ret.append(self.curse_add_line(self.view_data['show_hide_left_sidebar']))
ret.append(self.curse_add_line(self.view_data['reset_history']))
ret.append(self.curse_new_line())
ret.append(self.curse_add_line(self.view_data['enable_disable_process_stats']))
ret.append(self.curse_add_line(self.view_data['show_hide_help']))
ret.append(self.curse_new_line())
ret.append(self.curse_add_line(self.view_data['enable_disable_quick_look']))
ret.append(self.curse_add_line(self.view_data['quit']))
ret.append(self.curse_new_line())
ret.append(self.curse_add_line(self.view_data['enable_disable_top_extends_stats']))
ret.append(self.curse_new_line())
ret.append(self.curse_add_line(self.view_data['enable_disable_short_processname']))
ret.append(self.curse_new_line())
ret.append(self.curse_new_line())
ret.append(self.curse_add_line(self.view_data['edit_pattern_filter']))
# Return the message with decoration
return ret
| lgpl-3.0 |
miguelpalacio/python-for-android | python-build/python-libs/gdata/src/gdata/tlslite/mathtls.py | 273 | 11647 | """Miscellaneous helper functions."""
from utils.compat import *
from utils.cryptomath import *
import hmac
import md5
import sha
#1024, 1536, 2048, 3072, 4096, 6144, and 8192 bit groups]
goodGroupParameters = [(2,0xEEAF0AB9ADB38DD69C33F80AFA8FC5E86072618775FF3C0B9EA2314C9C256576D674DF7496EA81D3383B4813D692C6E0E0D5D8E250B98BE48E495C1D6089DAD15DC7D7B46154D6B6CE8EF4AD69B15D4982559B297BCF1885C529F566660E57EC68EDBC3C05726CC02FD4CBF4976EAA9AFD5138FE8376435B9FC61D2FC0EB06E3),\
(2,0x9DEF3CAFB939277AB1F12A8617A47BBBDBA51DF499AC4C80BEEEA9614B19CC4D5F4F5F556E27CBDE51C6A94BE4607A291558903BA0D0F84380B655BB9A22E8DCDF028A7CEC67F0D08134B1C8B97989149B609E0BE3BAB63D47548381DBC5B1FC764E3F4B53DD9DA1158BFD3E2B9C8CF56EDF019539349627DB2FD53D24B7C48665772E437D6C7F8CE442734AF7CCB7AE837C264AE3A9BEB87F8A2FE9B8B5292E5A021FFF5E91479E8CE7A28C2442C6F315180F93499A234DCF76E3FED135F9BB),\
(2,0xAC6BDB41324A9A9BF166DE5E1389582FAF72B6651987EE07FC3192943DB56050A37329CBB4A099ED8193E0757767A13DD52312AB4B03310DCD7F48A9DA04FD50E8083969EDB767B0CF6095179A163AB3661A05FBD5FAAAE82918A9962F0B93B855F97993EC975EEAA80D740ADBF4FF747359D041D5C33EA71D281E446B14773BCA97B43A23FB801676BD207A436C6481F1D2B9078717461A5B9D32E688F87748544523B524B0D57D5EA77A2775D2ECFA032CFBDBF52FB3786160279004E57AE6AF874E7303CE53299CCC041C7BC308D82A5698F3A8D0C38271AE35F8E9DBFBB694B5C803D89F7AE435DE236D525F54759B65E372FCD68EF20FA7111F9E4AFF73),\
(2,0xFFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD129024E088A67CC74020BBEA63B139B22514A08798E3404DDEF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7EDEE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3DC2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F83655D23DCA3AD961C62F356208552BB9ED529077096966D670C354E4ABC9804F1746C08CA18217C32905E462E36CE3BE39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9DE2BCBF6955817183995497CEA956AE515D2261898FA051015728E5A8AAAC42DAD33170D04507A33A85521ABDF1CBA64ECFB850458DBEF0A8AEA71575D060C7DB3970F85A6E1E4C7ABF5AE8CDB0933D71E8C94E04A25619DCEE3D2261AD2EE6BF12FFA06D98A0864D87602733EC86A64521F2B18177B200CBBE117577A615D6C770988C0BAD946E208E24FA074E5AB3143DB5BFCE0FD108E4B82D120A93AD2CAFFFFFFFFFFFFFFFF),\
(5,0xFFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD129024E088A67CC74020BBEA63B139B22514A08798E3404DDEF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7EDEE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3DC2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F83655D23DCA3AD961C62F356208552BB9ED529077096966D670C354E4ABC9804F1746C08CA18217C32905E462E36CE3BE39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9DE2BCBF6955817183995497CEA956AE515D2261898FA051015728E5A8AAAC42DAD33170D04507A33A85521ABDF1CBA64ECFB850458DBEF0A8AEA71575D060C7DB3970F85A6E1E4C7ABF5AE8CDB0933D71E8C94E04A25619DCEE3D2261AD2EE6BF12FFA06D98A0864D87602733EC86A64521F2B18177B200CBBE117577A615D6C770988C0BAD946E208E24FA074E5AB3143DB5BFCE0FD108E4B82D120A92108011A723C12A787E6D788719A10BDBA5B2699C327186AF4E23C1A946834B6150BDA2583E9CA2AD44CE8DBBBC2DB04DE8EF92E8EFC141FBECAA6287C59474E6BC05D99B2964FA090C3A2233BA186515BE7ED1F612970CEE2D7AFB81BDD762170481CD0069127D5B05AA993B4EA988D8FDDC186FFB7DC90A6C08F4DF435C934063199FFFFFFFFFFFFFFFF),\
(5,0xFFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD129024E088A67CC74020BBEA63B139B22514A08798E3404DDEF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7EDEE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3DC2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F83655D23DCA3AD961C62F356208552BB9ED529077096966D670C354E4ABC9804F1746C08CA18217C32905E462E36CE3BE39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9DE2BCBF6955817183995497CEA956AE515D2261898FA051015728E5A8AAAC42DAD33170D04507A33A85521ABDF1CBA64ECFB850458DBEF0A8AEA71575D060C7DB3970F85A6E1E4C7ABF5AE8CDB0933D71E8C94E04A25619DCEE3D2261AD2EE6BF12FFA06D98A0864D87602733EC86A64521F2B18177B200CBBE117577A615D6C770988C0BAD946E208E24FA074E5AB3143DB5BFCE0FD108E4B82D120A92108011A723C12A787E6D788719A10BDBA5B2699C327186AF4E23C1A946834B6150BDA2583E9CA2AD44CE8DBBBC2DB04DE8EF92E8EFC141FBECAA6287C59474E6BC05D99B2964FA090C3A2233BA186515BE7ED1F612970CEE2D7AFB81BDD762170481CD0069127D5B05AA993B4EA988D8FDDC186FFB7DC90A6C08F4DF435C93402849236C3FAB4D27C7026C1D4DCB2602646DEC9751E763DBA37BDF8FF9406AD9E530EE5DB382F413001AEB06A53ED9027D831179727B0865A8918DA3EDBEBCF9B14ED44CE6CBACED4BB1BDB7F1447E6CC254B332051512BD7AF426FB8F401378CD2BF5983CA01C64B92ECF032EA15D1721D03F482D7CE6E74FEF6D55E702F46980C82B5A84031900B1C9E59E7C97FBEC7E8F323A97A7E36CC88BE0F1D45B7FF585AC54BD407B22B4154AACC8F6D7EBF48E1D814CC5ED20F8037E0A79715EEF29BE32806A1D58BB7C5DA76F550AA3D8A1FBFF0EB19CCB1A313D55CDA56C9EC2EF29632387FE8D76E3C0468043E8F663F4860EE12BF2D5B0B7474D6E694F91E6DCC4024FFFFFFFFFFFFFFFF),\
(5,0xFFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD129024E088A67CC74020BBEA63B139B22514A08798E3404DDEF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7EDEE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3DC2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F83655D23DCA3AD961C62F356208552BB9ED529077096966D670C354E4ABC9804F1746C08CA18217C32905E462E36CE3BE39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9DE2BCBF6955817183995497CEA956AE515D2261898FA051015728E5A8AAAC42DAD33170D04507A33A85521ABDF1CBA64ECFB850458DBEF0A8AEA71575D060C7DB3970F85A6E1E4C7ABF5AE8CDB0933D71E8C94E04A25619DCEE3D2261AD2EE6BF12FFA06D98A0864D87602733EC86A64521F2B18177B200CBBE117577A615D6C770988C0BAD946E208E24FA074E5AB3143DB5BFCE0FD108E4B82D120A92108011A723C12A787E6D788719A10BDBA5B2699C327186AF4E23C1A946834B6150BDA2583E9CA2AD44CE8DBBBC2DB04DE8EF92E8EFC141FBECAA6287C59474E6BC05D99B2964FA090C3A2233BA186515BE7ED1F612970CEE2D7AFB81BDD762170481CD0069127D5B05AA993B4EA988D8FDDC186FFB7DC90A6C08F4DF435C93402849236C3FAB4D27C7026C1D4DCB2602646DEC9751E763DBA37BDF8FF9406AD9E530EE5DB382F413001AEB06A53ED9027D831179727B0865A8918DA3EDBEBCF9B14ED44CE6CBACED4BB1BDB7F1447E6CC254B332051512BD7AF426FB8F401378CD2BF5983CA01C64B92ECF032EA15D1721D03F482D7CE6E74FEF6D55E702F46980C82B5A84031900B1C9E59E7C97FBEC7E8F323A97A7E36CC88BE0F1D45B7FF585AC54BD407B22B4154AACC8F6D7EBF48E1D814CC5ED20F8037E0A79715EEF29BE32806A1D58BB7C5DA76F550AA3D8A1FBFF0EB19CCB1A313D55CDA56C9EC2EF29632387FE8D76E3C0468043E8F663F4860EE12BF2D5B0B7474D6E694F91E6DBE115974A3926F12FEE5E438777CB6A932DF8CD8BEC4D073B931BA3BC832B68D9DD300741FA7BF8AFC47ED2576F6936BA424663AAB639C5AE4F5683423B4742BF1C978238F16CBE39D652DE3FDB8BEFC848AD922222E04A4037C0713EB57A81A23F0C73473FC646CEA306B4BCBC8862F8385DDFA9D4B7FA2C087E879683303ED5BDD3A062B3CF5B3A278A66D2A13F83F44F82DDF310EE074AB6A364597E899A0255DC164F31CC50846851DF9AB48195DED7EA1B1D510BD7EE74D73FAF36BC31ECFA268359046F4EB879F924009438B481C6CD7889A002ED5EE382BC9190DA6FC026E479558E4475677E9AA9E3050E2765694DFC81F56E880B96E7160C980DD98EDD3DFFFFFFFFFFFFFFFFF)]
def P_hash(hashModule, secret, seed, length):
bytes = createByteArrayZeros(length)
secret = bytesToString(secret)
seed = bytesToString(seed)
A = seed
index = 0
while 1:
A = hmac.HMAC(secret, A, hashModule).digest()
output = hmac.HMAC(secret, A+seed, hashModule).digest()
for c in output:
if index >= length:
return bytes
bytes[index] = ord(c)
index += 1
return bytes
def PRF(secret, label, seed, length):
#Split the secret into left and right halves
S1 = secret[ : int(math.ceil(len(secret)/2.0))]
S2 = secret[ int(math.floor(len(secret)/2.0)) : ]
#Run the left half through P_MD5 and the right half through P_SHA1
p_md5 = P_hash(md5, S1, concatArrays(stringToBytes(label), seed), length)
p_sha1 = P_hash(sha, S2, concatArrays(stringToBytes(label), seed), length)
#XOR the output values and return the result
for x in range(length):
p_md5[x] ^= p_sha1[x]
return p_md5
def PRF_SSL(secret, seed, length):
secretStr = bytesToString(secret)
seedStr = bytesToString(seed)
bytes = createByteArrayZeros(length)
index = 0
for x in range(26):
A = chr(ord('A')+x) * (x+1) # 'A', 'BB', 'CCC', etc..
input = secretStr + sha.sha(A + secretStr + seedStr).digest()
output = md5.md5(input).digest()
for c in output:
if index >= length:
return bytes
bytes[index] = ord(c)
index += 1
return bytes
def makeX(salt, username, password):
if len(username)>=256:
raise ValueError("username too long")
if len(salt)>=256:
raise ValueError("salt too long")
return stringToNumber(sha.sha(salt + sha.sha(username + ":" + password)\
.digest()).digest())
#This function is used by VerifierDB.makeVerifier
def makeVerifier(username, password, bits):
bitsIndex = {1024:0, 1536:1, 2048:2, 3072:3, 4096:4, 6144:5, 8192:6}[bits]
g,N = goodGroupParameters[bitsIndex]
salt = bytesToString(getRandomBytes(16))
x = makeX(salt, username, password)
verifier = powMod(g, x, N)
return N, g, salt, verifier
def PAD(n, x):
nLength = len(numberToString(n))
s = numberToString(x)
if len(s) < nLength:
s = ("\0" * (nLength-len(s))) + s
return s
def makeU(N, A, B):
return stringToNumber(sha.sha(PAD(N, A) + PAD(N, B)).digest())
def makeK(N, g):
return stringToNumber(sha.sha(numberToString(N) + PAD(N, g)).digest())
"""
MAC_SSL
Modified from Python HMAC by Trevor
"""
class MAC_SSL:
"""MAC_SSL class.
This supports the API for Cryptographic Hash Functions (PEP 247).
"""
def __init__(self, key, msg = None, digestmod = None):
"""Create a new MAC_SSL object.
key: key for the keyed hash object.
msg: Initial input for the hash, if provided.
digestmod: A module supporting PEP 247. Defaults to the md5 module.
"""
if digestmod is None:
import md5
digestmod = md5
if key == None: #TREVNEW - for faster copying
return #TREVNEW
self.digestmod = digestmod
self.outer = digestmod.new()
self.inner = digestmod.new()
self.digest_size = digestmod.digest_size
ipad = "\x36" * 40
opad = "\x5C" * 40
self.inner.update(key)
self.inner.update(ipad)
self.outer.update(key)
self.outer.update(opad)
if msg is not None:
self.update(msg)
def update(self, msg):
"""Update this hashing object with the string msg.
"""
self.inner.update(msg)
def copy(self):
"""Return a separate copy of this hashing object.
An update to this copy won't affect the original object.
"""
other = MAC_SSL(None) #TREVNEW - for faster copying
other.digest_size = self.digest_size #TREVNEW
other.digestmod = self.digestmod
other.inner = self.inner.copy()
other.outer = self.outer.copy()
return other
def digest(self):
"""Return the hash value of this hashing object.
This returns a string containing 8-bit data. The object is
not altered in any way by this function; you can continue
updating the object after calling this function.
"""
h = self.outer.copy()
h.update(self.inner.digest())
return h.digest()
def hexdigest(self):
"""Like digest(), but returns a string of hexadecimal digits instead.
"""
return "".join([hex(ord(x))[2:].zfill(2)
for x in tuple(self.digest())])
| apache-2.0 |
timkahlke/BASTA | basta/test_tax_creator.py | 1 | 4320 | import unittest
import os
import sys
from basta import NCBITaxonomyCreator
class TestDB(unittest.TestCase):
def setUp(self):
dir_path = os.path.dirname(os.path.realpath(__file__))
self.pwd = dir_path
with open(os.path.join(self.pwd,"names.tab"), "w") as f:
f.write("2|\tBacteria|\tBacteria <prokaryotes>|\tscientific name|\n")
f.write("1|\troot\t|\t|\tscientific name|\n")
f.write("7|\tAzorhizobium caulinodans|\t|\tscientific name|\n")
f.write("6|\tAzorhizobium|\t|\tscientific name|\n")
f.write("335928|\tXanthobacteraceae|\t|\tscientific name |\n")
f.write("356|\tRhizobiales|\t|\tscientific name |\n")
f.write("28211|\tAlphaproteobacteria|\t| scientific name |\n")
f.write("1224|\tProteobacteria|\t| scientific name |\n")
f.write("1236|\tGammaproteobacteria|\t|\tscientific name |\n")
f.write("28216|\tBetaproteobacteria|\t|\tscientific name |\n")
f.write("131567|\tcellular organisms|\t|\tscientific name |\n")
with open(os.path.join(self.pwd,"nodes.tab"), "w") as g:
g.write("1|\t 1 |\tno rank |\t| 8 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | |\n")
g.write("2|\t131567 |\tsuperkingdom |\t| 0 | 0 | 11 | 0 | 0 | 0 | 0 | 0 | |\n")
g.write("6|\t335928 |\tgenus |\t| 0 | 1 | 11 | 1 | 0 | 1 | 0 | 0 | |\n")
g.write("7|\t6 |\tspecies |\t| 0 | 1 | 11 | 1 | 0 | 1 | 1 | 0 | |\n")
g.write("335928|\t356 |\tfamily |\t| 0 | 1 | 11 | 1 | 0 | 1 | 0 | 0 | |\n")
g.write("356|\t28211 |\torder |\t| 0 | 1 | 11 | 1 | 0 | 1 | 0 | 0 | |\n")
g.write("28211|\t1224 |\t class |\t| 0 | 1 | 11 | 1 | 0 | 1 | 0 | 0 | |\n")
g.write("1224|\t2|\t phylum |\t| 0 | 1 | 11 | 1 | 0 | 1 | 0 | 0 | |\n")
g.write("1236|\t2 |\tphylum |\t| 0 | 1 | 11 | 1 | 0 | 1 | 0 | 0 | |\n")
g.write("28216|\t2|\tphylum |\t| 0 | 1 | 11 | 1 | 0 | 1 | 0 | 0 | |\n")
g.write("131567|\t1|\tno rank |\t | 8 | 1 | 1 | 1 | 0 | 1 | 1 | 0 | |\n")
self.nodes = os.path.join(self.pwd,"nodes.tab")
self.names = os.path.join(self.pwd,"names.tab")
def tearDown(self):
os.remove(os.path.join(self.pwd,"names.tab"))
os.remove(os.path.join(self.pwd,"nodes.tab"))
def test_creator(self):
creator = NCBITaxonomyCreator.Creator(self.names,self.nodes)
self.assertIsInstance(creator,NCBITaxonomyCreator.Creator)
def test_tree(self):
creator = NCBITaxonomyCreator.Creator(self.names,self.nodes)
self.assertEqual(len(creator.tree["1"]["131567"]),3)
self.assertEqual(creator.tree["1"]["131567"]["2"]["1236"]["name"],"Gammaproteobacteria")
self.assertEqual(creator.tree["1"]["131567"]["2"]["1224"]["28211"]["356"]["rank"],"order")
self.assertEqual(creator.tree["1"]["131567"]["2"]["1224"]["28211"]["356"]["335928"]["6"]["7"]["name"],"Azorhizobium_caulinodans")
def test_fill_taxon_pre(self):
creator = NCBITaxonomyCreator.Creator(self.names,self.nodes)
self.assertEqual(creator._fill_taxon_pre_rank("species",""),"unknown;unknown;unknown;unknown;unknown;unknown;")
self.assertEqual(creator._fill_taxon_pre_rank("species","1;2;3;"),"1;2;3;unknown;unknown;unknown;")
self.assertEqual(creator._fill_taxon_pre_rank("family","1;2;3;"),"1;2;3;unknown;")
def test_fill_taxon_post(self):
creator = NCBITaxonomyCreator.Creator(self.names,self.nodes)
self.assertEqual(creator._fill_taxon_pre_rank("genus",""),"unknown;unknown;unknown;unknown;unknown;")
self.assertEqual(creator._fill_taxon_pre_rank("genus","1;2;3;"),"1;2;3;unknown;unknown;")
self.assertEqual(creator._fill_taxon_pre_rank("order","1;2;3;"),"1;2;3;")
if __name__ =='__main__':
unittest.main()
| gpl-3.0 |
huihoo/reader | vendor/paypal/standard/ipn/migrations/0001_first_migration.py | 2 | 21676 | # -*- coding: utf-8 -*-
from django.db import models
from south.db import db
from vendor.paypal.standard.ipn.models import *
class Migration:
def forwards(self, orm):
# Adding model 'PayPalIPN'
db.create_table('paypal_ipn', (
('id', models.AutoField(primary_key=True)),
('business', models.CharField(max_length=127, blank=True)),
('charset', models.CharField(max_length=32, blank=True)),
('custom', models.CharField(max_length=255, blank=True)),
('notify_version', models.DecimalField(default=0, null=True, max_digits=64, decimal_places=2, blank=True)),
('parent_txn_id', models.CharField("Parent Transaction ID", max_length=19, blank=True)),
('receiver_email', models.EmailField(max_length=127, blank=True)),
('receiver_id', models.CharField(max_length=127, blank=True)),
('residence_country', models.CharField(max_length=2, blank=True)),
('test_ipn', models.BooleanField(default=False, blank=True)),
('txn_id', models.CharField("Transaction ID", max_length=19, blank=True)),
('txn_type', models.CharField("Transaction Type", max_length=128, blank=True)),
('verify_sign', models.CharField(max_length=255, blank=True)),
('address_country', models.CharField(max_length=64, blank=True)),
('address_city', models.CharField(max_length=40, blank=True)),
('address_country_code', models.CharField(max_length=64, blank=True)),
('address_name', models.CharField(max_length=128, blank=True)),
('address_state', models.CharField(max_length=40, blank=True)),
('address_status', models.CharField(max_length=11, blank=True)),
('address_street', models.CharField(max_length=200, blank=True)),
('address_zip', models.CharField(max_length=20, blank=True)),
('contact_phone', models.CharField(max_length=20, blank=True)),
('first_name', models.CharField(max_length=64, blank=True)),
('last_name', models.CharField(max_length=64, blank=True)),
('payer_business_name', models.CharField(max_length=127, blank=True)),
('payer_email', models.CharField(max_length=127, blank=True)),
('payer_id', models.CharField(max_length=13, blank=True)),
('auth_amount', models.DecimalField(default=0, null=True, max_digits=64, decimal_places=2, blank=True)),
('auth_exp', models.CharField(max_length=28, blank=True)),
('auth_id', models.CharField(max_length=19, blank=True)),
('auth_status', models.CharField(max_length=9, blank=True)),
('exchange_rate', models.DecimalField(default=0, null=True, max_digits=64, decimal_places=16, blank=True)),
('invoice', models.CharField(max_length=127, blank=True)),
('item_name', models.CharField(max_length=127, blank=True)),
('item_number', models.CharField(max_length=127, blank=True)),
('mc_currency', models.CharField(default='USD', max_length=32, blank=True)),
('mc_fee', models.DecimalField(default=0, null=True, max_digits=64, decimal_places=2, blank=True)),
('mc_gross', models.DecimalField(default=0, null=True, max_digits=64, decimal_places=2, blank=True)),
('mc_handling', models.DecimalField(default=0, null=True, max_digits=64, decimal_places=2, blank=True)),
('mc_shipping', models.DecimalField(default=0, null=True, max_digits=64, decimal_places=2, blank=True)),
('memo', models.CharField(max_length=255, blank=True)),
('num_cart_items', models.IntegerField(default=0, null=True, blank=True)),
('option_name1', models.CharField(max_length=64, blank=True)),
('option_name2', models.CharField(max_length=64, blank=True)),
('payer_status', models.CharField(max_length=10, blank=True)),
('payment_date', models.DateTimeField(null=True, blank=True)),
('payment_gross', models.DecimalField(default=0, null=True, max_digits=64, decimal_places=2, blank=True)),
('payment_status', models.CharField(max_length=9, blank=True)),
('payment_type', models.CharField(max_length=7, blank=True)),
('pending_reason', models.CharField(max_length=14, blank=True)),
('protection_eligibility', models.CharField(max_length=32, blank=True)),
('quantity', models.IntegerField(default=1, null=True, blank=True)),
('reason_code', models.CharField(max_length=15, blank=True)),
('remaining_settle', models.DecimalField(default=0, null=True, max_digits=64, decimal_places=2, blank=True)),
('settle_amount', models.DecimalField(default=0, null=True, max_digits=64, decimal_places=2, blank=True)),
('settle_currency', models.CharField(max_length=32, blank=True)),
('shipping', models.DecimalField(default=0, null=True, max_digits=64, decimal_places=2, blank=True)),
('shipping_method', models.CharField(max_length=255, blank=True)),
('tax', models.DecimalField(default=0, null=True, max_digits=64, decimal_places=2, blank=True)),
('transaction_entity', models.CharField(max_length=7, blank=True)),
('auction_buyer_id', models.CharField(max_length=64, blank=True)),
('auction_closing_date', models.DateTimeField(null=True, blank=True)),
('auction_multi_item', models.IntegerField(default=0, null=True, blank=True)),
('for_auction', models.DecimalField(default=0, null=True, max_digits=64, decimal_places=2, blank=True)),
('amount', models.DecimalField(default=0, null=True, max_digits=64, decimal_places=2, blank=True)),
('amount_per_cycle', models.DecimalField(default=0, null=True, max_digits=64, decimal_places=2, blank=True)),
('initial_payment_amount', models.DecimalField(default=0, null=True, max_digits=64, decimal_places=2, blank=True)),
('next_payment_date', models.DateTimeField(null=True, blank=True)),
('outstanding_balance', models.DecimalField(default=0, null=True, max_digits=64, decimal_places=2, blank=True)),
('payment_cycle', models.CharField(max_length=32, blank=True)),
('period_type', models.CharField(max_length=32, blank=True)),
('product_name', models.CharField(max_length=128, blank=True)),
('product_type', models.CharField(max_length=128, blank=True)),
('profile_status', models.CharField(max_length=32, blank=True)),
('recurring_payment_id', models.CharField(max_length=128, blank=True)),
('rp_invoice_id', models.CharField(max_length=127, blank=True)),
('time_created', models.DateTimeField(null=True, blank=True)),
('amount1', models.DecimalField(default=0, null=True, max_digits=64, decimal_places=2, blank=True)),
('amount2', models.DecimalField(default=0, null=True, max_digits=64, decimal_places=2, blank=True)),
('amount3', models.DecimalField(default=0, null=True, max_digits=64, decimal_places=2, blank=True)),
('mc_amount1', models.DecimalField(default=0, null=True, max_digits=64, decimal_places=2, blank=True)),
('mc_amount2', models.DecimalField(default=0, null=True, max_digits=64, decimal_places=2, blank=True)),
('mc_amount3', models.DecimalField(default=0, null=True, max_digits=64, decimal_places=2, blank=True)),
('password', models.CharField(max_length=24, blank=True)),
('period1', models.CharField(max_length=32, blank=True)),
('period2', models.CharField(max_length=32, blank=True)),
('period3', models.CharField(max_length=32, blank=True)),
('reattempt', models.CharField(max_length=1, blank=True)),
('recur_times', models.IntegerField(default=0, null=True, blank=True)),
('recurring', models.CharField(max_length=1, blank=True)),
('retry_at', models.DateTimeField(null=True, blank=True)),
('subscr_date', models.DateTimeField(null=True, blank=True)),
('subscr_effective', models.DateTimeField(null=True, blank=True)),
('subscr_id', models.CharField(max_length=19, blank=True)),
('username', models.CharField(max_length=64, blank=True)),
('case_creation_date', models.DateTimeField(null=True, blank=True)),
('case_id', models.CharField(max_length=14, blank=True)),
('case_type', models.CharField(max_length=24, blank=True)),
('receipt_id', models.CharField(max_length=64, blank=True)),
('currency_code', models.CharField(default='USD', max_length=32, blank=True)),
('handling_amount', models.DecimalField(default=0, null=True, max_digits=64, decimal_places=2, blank=True)),
('transaction_subject', models.CharField(max_length=255, blank=True)),
('ipaddress', models.IPAddressField(blank=True)),
('flag', models.BooleanField(default=False, blank=True)),
('flag_code', models.CharField(max_length=16, blank=True)),
('flag_info', models.TextField(blank=True)),
('query', models.TextField(blank=True)),
('response', models.TextField(blank=True)),
('created_at', models.DateTimeField(auto_now_add=True)),
('updated_at', models.DateTimeField(auto_now=True)),
('from_view', models.CharField(max_length=6, null=True, blank=True)),
))
db.send_create_signal('ipn', ['PayPalIPN'])
def backwards(self, orm):
# Deleting model 'PayPalIPN'
db.delete_table('paypal_ipn')
models = {
'ipn.paypalipn': {
'Meta': {'db_table': '"paypal_ipn"'},
'address_city': ('models.CharField', [], {'max_length': '40', 'blank': 'True'}),
'address_country': ('models.CharField', [], {'max_length': '64', 'blank': 'True'}),
'address_country_code': ('models.CharField', [], {'max_length': '64', 'blank': 'True'}),
'address_name': ('models.CharField', [], {'max_length': '128', 'blank': 'True'}),
'address_state': ('models.CharField', [], {'max_length': '40', 'blank': 'True'}),
'address_status': ('models.CharField', [], {'max_length': '11', 'blank': 'True'}),
'address_street': ('models.CharField', [], {'max_length': '200', 'blank': 'True'}),
'address_zip': ('models.CharField', [], {'max_length': '20', 'blank': 'True'}),
'amount': ('models.DecimalField', [], {'default': '0', 'null': 'True', 'max_digits': '64', 'decimal_places': '2', 'blank': 'True'}),
'amount1': ('models.DecimalField', [], {'default': '0', 'null': 'True', 'max_digits': '64', 'decimal_places': '2', 'blank': 'True'}),
'amount2': ('models.DecimalField', [], {'default': '0', 'null': 'True', 'max_digits': '64', 'decimal_places': '2', 'blank': 'True'}),
'amount3': ('models.DecimalField', [], {'default': '0', 'null': 'True', 'max_digits': '64', 'decimal_places': '2', 'blank': 'True'}),
'amount_per_cycle': ('models.DecimalField', [], {'default': '0', 'null': 'True', 'max_digits': '64', 'decimal_places': '2', 'blank': 'True'}),
'auction_buyer_id': ('models.CharField', [], {'max_length': '64', 'blank': 'True'}),
'auction_closing_date': ('models.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'auction_multi_item': ('models.IntegerField', [], {'default': '0', 'null': 'True', 'blank': 'True'}),
'auth_amount': ('models.DecimalField', [], {'default': '0', 'null': 'True', 'max_digits': '64', 'decimal_places': '2', 'blank': 'True'}),
'auth_exp': ('models.CharField', [], {'max_length': '28', 'blank': 'True'}),
'auth_id': ('models.CharField', [], {'max_length': '19', 'blank': 'True'}),
'auth_status': ('models.CharField', [], {'max_length': '9', 'blank': 'True'}),
'business': ('models.CharField', [], {'max_length': '127', 'blank': 'True'}),
'case_creation_date': ('models.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'case_id': ('models.CharField', [], {'max_length': '14', 'blank': 'True'}),
'case_type': ('models.CharField', [], {'max_length': '24', 'blank': 'True'}),
'charset': ('models.CharField', [], {'max_length': '32', 'blank': 'True'}),
'contact_phone': ('models.CharField', [], {'max_length': '20', 'blank': 'True'}),
'created_at': ('models.DateTimeField', [], {'auto_now_add': 'True'}),
'currency_code': ('models.CharField', [], {'default': "'USD'", 'max_length': '32', 'blank': 'True'}),
'custom': ('models.CharField', [], {'max_length': '255', 'blank': 'True'}),
'exchange_rate': ('models.DecimalField', [], {'default': '0', 'null': 'True', 'max_digits': '64', 'decimal_places': '16', 'blank': 'True'}),
'first_name': ('models.CharField', [], {'max_length': '64', 'blank': 'True'}),
'flag': ('models.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'flag_code': ('models.CharField', [], {'max_length': '16', 'blank': 'True'}),
'flag_info': ('models.TextField', [], {'blank': 'True'}),
'for_auction': ('models.DecimalField', [], {'default': '0', 'null': 'True', 'max_digits': '64', 'decimal_places': '2', 'blank': 'True'}),
'from_view': ('models.CharField', [], {'max_length': '6', 'null': 'True', 'blank': 'True'}),
'handling_amount': ('models.DecimalField', [], {'default': '0', 'null': 'True', 'max_digits': '64', 'decimal_places': '2', 'blank': 'True'}),
'id': ('models.AutoField', [], {'primary_key': 'True'}),
'initial_payment_amount': ('models.DecimalField', [], {'default': '0', 'null': 'True', 'max_digits': '64', 'decimal_places': '2', 'blank': 'True'}),
'invoice': ('models.CharField', [], {'max_length': '127', 'blank': 'True'}),
'ipaddress': ('models.IPAddressField', [], {'blank': 'True'}),
'item_name': ('models.CharField', [], {'max_length': '127', 'blank': 'True'}),
'item_number': ('models.CharField', [], {'max_length': '127', 'blank': 'True'}),
'last_name': ('models.CharField', [], {'max_length': '64', 'blank': 'True'}),
'mc_amount1': ('models.DecimalField', [], {'default': '0', 'null': 'True', 'max_digits': '64', 'decimal_places': '2', 'blank': 'True'}),
'mc_amount2': ('models.DecimalField', [], {'default': '0', 'null': 'True', 'max_digits': '64', 'decimal_places': '2', 'blank': 'True'}),
'mc_amount3': ('models.DecimalField', [], {'default': '0', 'null': 'True', 'max_digits': '64', 'decimal_places': '2', 'blank': 'True'}),
'mc_currency': ('models.CharField', [], {'default': "'USD'", 'max_length': '32', 'blank': 'True'}),
'mc_fee': ('models.DecimalField', [], {'default': '0', 'null': 'True', 'max_digits': '64', 'decimal_places': '2', 'blank': 'True'}),
'mc_gross': ('models.DecimalField', [], {'default': '0', 'null': 'True', 'max_digits': '64', 'decimal_places': '2', 'blank': 'True'}),
'mc_handling': ('models.DecimalField', [], {'default': '0', 'null': 'True', 'max_digits': '64', 'decimal_places': '2', 'blank': 'True'}),
'mc_shipping': ('models.DecimalField', [], {'default': '0', 'null': 'True', 'max_digits': '64', 'decimal_places': '2', 'blank': 'True'}),
'memo': ('models.CharField', [], {'max_length': '255', 'blank': 'True'}),
'next_payment_date': ('models.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'notify_version': ('models.DecimalField', [], {'default': '0', 'null': 'True', 'max_digits': '64', 'decimal_places': '2', 'blank': 'True'}),
'num_cart_items': ('models.IntegerField', [], {'default': '0', 'null': 'True', 'blank': 'True'}),
'option_name1': ('models.CharField', [], {'max_length': '64', 'blank': 'True'}),
'option_name2': ('models.CharField', [], {'max_length': '64', 'blank': 'True'}),
'outstanding_balance': ('models.DecimalField', [], {'default': '0', 'null': 'True', 'max_digits': '64', 'decimal_places': '2', 'blank': 'True'}),
'parent_txn_id': ('models.CharField', ['"Parent Transaction ID"'], {'max_length': '19', 'blank': 'True'}),
'password': ('models.CharField', [], {'max_length': '24', 'blank': 'True'}),
'payer_business_name': ('models.CharField', [], {'max_length': '127', 'blank': 'True'}),
'payer_email': ('models.CharField', [], {'max_length': '127', 'blank': 'True'}),
'payer_id': ('models.CharField', [], {'max_length': '13', 'blank': 'True'}),
'payer_status': ('models.CharField', [], {'max_length': '10', 'blank': 'True'}),
'payment_cycle': ('models.CharField', [], {'max_length': '32', 'blank': 'True'}),
'payment_date': ('models.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'payment_gross': ('models.DecimalField', [], {'default': '0', 'null': 'True', 'max_digits': '64', 'decimal_places': '2', 'blank': 'True'}),
'payment_status': ('models.CharField', [], {'max_length': '9', 'blank': 'True'}),
'payment_type': ('models.CharField', [], {'max_length': '7', 'blank': 'True'}),
'pending_reason': ('models.CharField', [], {'max_length': '14', 'blank': 'True'}),
'period1': ('models.CharField', [], {'max_length': '32', 'blank': 'True'}),
'period2': ('models.CharField', [], {'max_length': '32', 'blank': 'True'}),
'period3': ('models.CharField', [], {'max_length': '32', 'blank': 'True'}),
'period_type': ('models.CharField', [], {'max_length': '32', 'blank': 'True'}),
'product_name': ('models.CharField', [], {'max_length': '128', 'blank': 'True'}),
'product_type': ('models.CharField', [], {'max_length': '128', 'blank': 'True'}),
'profile_status': ('models.CharField', [], {'max_length': '32', 'blank': 'True'}),
'protection_eligibility': ('models.CharField', [], {'max_length': '32', 'blank': 'True'}),
'quantity': ('models.IntegerField', [], {'default': '1', 'null': 'True', 'blank': 'True'}),
'query': ('models.TextField', [], {'blank': 'True'}),
'reason_code': ('models.CharField', [], {'max_length': '15', 'blank': 'True'}),
'reattempt': ('models.CharField', [], {'max_length': '1', 'blank': 'True'}),
'receipt_id': ('models.CharField', [], {'max_length': '64', 'blank': 'True'}),
'receiver_email': ('models.EmailField', [], {'max_length': '127', 'blank': 'True'}),
'receiver_id': ('models.CharField', [], {'max_length': '127', 'blank': 'True'}),
'recur_times': ('models.IntegerField', [], {'default': '0', 'null': 'True', 'blank': 'True'}),
'recurring': ('models.CharField', [], {'max_length': '1', 'blank': 'True'}),
'recurring_payment_id': ('models.CharField', [], {'max_length': '128', 'blank': 'True'}),
'remaining_settle': ('models.DecimalField', [], {'default': '0', 'null': 'True', 'max_digits': '64', 'decimal_places': '2', 'blank': 'True'}),
'residence_country': ('models.CharField', [], {'max_length': '2', 'blank': 'True'}),
'response': ('models.TextField', [], {'blank': 'True'}),
'retry_at': ('models.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'rp_invoice_id': ('models.CharField', [], {'max_length': '127', 'blank': 'True'}),
'settle_amount': ('models.DecimalField', [], {'default': '0', 'null': 'True', 'max_digits': '64', 'decimal_places': '2', 'blank': 'True'}),
'settle_currency': ('models.CharField', [], {'max_length': '32', 'blank': 'True'}),
'shipping': ('models.DecimalField', [], {'default': '0', 'null': 'True', 'max_digits': '64', 'decimal_places': '2', 'blank': 'True'}),
'shipping_method': ('models.CharField', [], {'max_length': '255', 'blank': 'True'}),
'subscr_date': ('models.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'subscr_effective': ('models.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'subscr_id': ('models.CharField', [], {'max_length': '19', 'blank': 'True'}),
'tax': ('models.DecimalField', [], {'default': '0', 'null': 'True', 'max_digits': '64', 'decimal_places': '2', 'blank': 'True'}),
'test_ipn': ('models.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'time_created': ('models.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'transaction_entity': ('models.CharField', [], {'max_length': '7', 'blank': 'True'}),
'transaction_subject': ('models.CharField', [], {'max_length': '255', 'blank': 'True'}),
'txn_id': ('models.CharField', ['"Transaction ID"'], {'max_length': '19', 'blank': 'True'}),
'txn_type': ('models.CharField', ['"Transaction Type"'], {'max_length': '128', 'blank': 'True'}),
'updated_at': ('models.DateTimeField', [], {'auto_now': 'True'}),
'username': ('models.CharField', [], {'max_length': '64', 'blank': 'True'}),
'verify_sign': ('models.CharField', [], {'max_length': '255', 'blank': 'True'})
}
}
complete_apps = ['ipn'] | mit |
chris-chris/tensorflow | tensorflow/contrib/distributions/python/kernel_tests/bijectors/inline_test.py | 79 | 3122 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for Bijector."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.contrib.distributions.python.ops.bijectors.exp import Exp
from tensorflow.contrib.distributions.python.ops.bijectors.inline import Inline
from tensorflow.python.framework import tensor_shape
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.platform import test
class InlineBijectorTest(test.TestCase):
"""Tests correctness of the inline constructed bijector."""
def testBijector(self):
with self.test_session():
exp = Exp(event_ndims=1)
inline = Inline(
forward_fn=math_ops.exp,
inverse_fn=math_ops.log,
inverse_log_det_jacobian_fn=(
lambda y: -math_ops.reduce_sum( # pylint: disable=g-long-lambda
math_ops.log(y), reduction_indices=-1)),
forward_log_det_jacobian_fn=(
lambda x: math_ops.reduce_sum(x, reduction_indices=-1)),
name="exp")
self.assertEqual(exp.name, inline.name)
x = [[[1., 2.], [3., 4.], [5., 6.]]]
y = np.exp(x)
self.assertAllClose(y, inline.forward(x).eval())
self.assertAllClose(x, inline.inverse(y).eval())
self.assertAllClose(
-np.sum(np.log(y), axis=-1),
inline.inverse_log_det_jacobian(y).eval())
self.assertAllClose(-inline.inverse_log_det_jacobian(y).eval(),
inline.forward_log_det_jacobian(x).eval())
def testShapeGetters(self):
with self.test_session():
bijector = Inline(
forward_event_shape_tensor_fn=lambda x: array_ops.concat((x, [1]), 0),
forward_event_shape_fn=lambda x: x.as_list() + [1],
inverse_event_shape_tensor_fn=lambda x: x[:-1],
inverse_event_shape_fn=lambda x: x[:-1],
name="shape_only")
x = tensor_shape.TensorShape([1, 2, 3])
y = tensor_shape.TensorShape([1, 2, 3, 1])
self.assertAllEqual(y, bijector.forward_event_shape(x))
self.assertAllEqual(
y.as_list(),
bijector.forward_event_shape_tensor(x.as_list()).eval())
self.assertAllEqual(x, bijector.inverse_event_shape(y))
self.assertAllEqual(
x.as_list(),
bijector.inverse_event_shape_tensor(y.as_list()).eval())
if __name__ == "__main__":
test.main()
| apache-2.0 |
dgillis/scrapy | tests/test_downloadermiddleware_redirect.py | 47 | 11713 | # -*- coding: utf-8 -*-
import unittest
from scrapy.downloadermiddlewares.redirect import RedirectMiddleware, MetaRefreshMiddleware
from scrapy.spiders import Spider
from scrapy.exceptions import IgnoreRequest
from scrapy.http import Request, Response, HtmlResponse
from scrapy.utils.test import get_crawler
class RedirectMiddlewareTest(unittest.TestCase):
def setUp(self):
self.crawler = get_crawler(Spider)
self.spider = self.crawler._create_spider('foo')
self.mw = RedirectMiddleware.from_crawler(self.crawler)
def test_priority_adjust(self):
req = Request('http://a.com')
rsp = Response('http://a.com', headers={'Location': 'http://a.com/redirected'}, status=301)
req2 = self.mw.process_response(req, rsp, self.spider)
assert req2.priority > req.priority
def test_redirect_301(self):
def _test(method):
url = 'http://www.example.com/301'
url2 = 'http://www.example.com/redirected'
req = Request(url, method=method)
rsp = Response(url, headers={'Location': url2}, status=301)
req2 = self.mw.process_response(req, rsp, self.spider)
assert isinstance(req2, Request)
self.assertEqual(req2.url, url2)
self.assertEqual(req2.method, method)
# response without Location header but with status code is 3XX should be ignored
del rsp.headers['Location']
assert self.mw.process_response(req, rsp, self.spider) is rsp
_test('GET')
_test('POST')
_test('HEAD')
def test_dont_redirect(self):
url = 'http://www.example.com/301'
url2 = 'http://www.example.com/redirected'
req = Request(url, meta={'dont_redirect': True})
rsp = Response(url, headers={'Location': url2}, status=301)
r = self.mw.process_response(req, rsp, self.spider)
assert isinstance(r, Response)
assert r is rsp
# Test that it redirects when dont_redirect is False
req = Request(url, meta={'dont_redirect': False})
rsp = Response(url2, status=200)
r = self.mw.process_response(req, rsp, self.spider)
assert isinstance(r, Response)
assert r is rsp
def test_redirect_302(self):
url = 'http://www.example.com/302'
url2 = 'http://www.example.com/redirected2'
req = Request(url, method='POST', body='test',
headers={'Content-Type': 'text/plain', 'Content-length': '4'})
rsp = Response(url, headers={'Location': url2}, status=302)
req2 = self.mw.process_response(req, rsp, self.spider)
assert isinstance(req2, Request)
self.assertEqual(req2.url, url2)
self.assertEqual(req2.method, 'GET')
assert 'Content-Type' not in req2.headers, \
"Content-Type header must not be present in redirected request"
assert 'Content-Length' not in req2.headers, \
"Content-Length header must not be present in redirected request"
assert not req2.body, \
"Redirected body must be empty, not '%s'" % req2.body
# response without Location header but with status code is 3XX should be ignored
del rsp.headers['Location']
assert self.mw.process_response(req, rsp, self.spider) is rsp
def test_redirect_302_head(self):
url = 'http://www.example.com/302'
url2 = 'http://www.example.com/redirected2'
req = Request(url, method='HEAD')
rsp = Response(url, headers={'Location': url2}, status=302)
req2 = self.mw.process_response(req, rsp, self.spider)
assert isinstance(req2, Request)
self.assertEqual(req2.url, url2)
self.assertEqual(req2.method, 'HEAD')
# response without Location header but with status code is 3XX should be ignored
del rsp.headers['Location']
assert self.mw.process_response(req, rsp, self.spider) is rsp
def test_max_redirect_times(self):
self.mw.max_redirect_times = 1
req = Request('http://scrapytest.org/302')
rsp = Response('http://scrapytest.org/302', headers={'Location': '/redirected'}, status=302)
req = self.mw.process_response(req, rsp, self.spider)
assert isinstance(req, Request)
assert 'redirect_times' in req.meta
self.assertEqual(req.meta['redirect_times'], 1)
self.assertRaises(IgnoreRequest, self.mw.process_response, req, rsp, self.spider)
def test_ttl(self):
self.mw.max_redirect_times = 100
req = Request('http://scrapytest.org/302', meta={'redirect_ttl': 1})
rsp = Response('http://www.scrapytest.org/302', headers={'Location': '/redirected'}, status=302)
req = self.mw.process_response(req, rsp, self.spider)
assert isinstance(req, Request)
self.assertRaises(IgnoreRequest, self.mw.process_response, req, rsp, self.spider)
def test_redirect_urls(self):
req1 = Request('http://scrapytest.org/first')
rsp1 = Response('http://scrapytest.org/first', headers={'Location': '/redirected'}, status=302)
req2 = self.mw.process_response(req1, rsp1, self.spider)
rsp2 = Response('http://scrapytest.org/redirected', headers={'Location': '/redirected2'}, status=302)
req3 = self.mw.process_response(req2, rsp2, self.spider)
self.assertEqual(req2.url, 'http://scrapytest.org/redirected')
self.assertEqual(req2.meta['redirect_urls'], ['http://scrapytest.org/first'])
self.assertEqual(req3.url, 'http://scrapytest.org/redirected2')
self.assertEqual(req3.meta['redirect_urls'], ['http://scrapytest.org/first', 'http://scrapytest.org/redirected'])
def test_spider_handling(self):
smartspider = self.crawler._create_spider('smarty')
smartspider.handle_httpstatus_list = [404, 301, 302]
url = 'http://www.example.com/301'
url2 = 'http://www.example.com/redirected'
req = Request(url)
rsp = Response(url, headers={'Location': url2}, status=301)
r = self.mw.process_response(req, rsp, smartspider)
self.assertIs(r, rsp)
def test_request_meta_handling(self):
url = 'http://www.example.com/301'
url2 = 'http://www.example.com/redirected'
def _test_passthrough(req):
rsp = Response(url, headers={'Location': url2}, status=301, request=req)
r = self.mw.process_response(req, rsp, self.spider)
self.assertIs(r, rsp)
_test_passthrough(Request(url, meta={'handle_httpstatus_list':
[404, 301, 302]}))
_test_passthrough(Request(url, meta={'handle_httpstatus_all': True}))
def test_latin1_location(self):
req = Request('http://scrapytest.org/first')
latin1_location = u'/ação'.encode('latin1') # HTTP historically supports latin1
resp = Response('http://scrapytest.org/first', headers={'Location': latin1_location}, status=302)
req_result = self.mw.process_response(req, resp, self.spider)
perc_encoded_utf8_url = 'http://scrapytest.org/a%C3%A7%C3%A3o'
self.assertEquals(perc_encoded_utf8_url, req_result.url)
def test_location_with_wrong_encoding(self):
req = Request('http://scrapytest.org/first')
utf8_location = u'/ação' # header with wrong encoding (utf-8)
resp = Response('http://scrapytest.org/first', headers={'Location': utf8_location}, status=302)
req_result = self.mw.process_response(req, resp, self.spider)
perc_encoded_utf8_url = 'http://scrapytest.org/a%C3%83%C2%A7%C3%83%C2%A3o'
self.assertEquals(perc_encoded_utf8_url, req_result.url)
class MetaRefreshMiddlewareTest(unittest.TestCase):
def setUp(self):
crawler = get_crawler(Spider)
self.spider = crawler._create_spider('foo')
self.mw = MetaRefreshMiddleware.from_crawler(crawler)
def _body(self, interval=5, url='http://example.org/newpage'):
html = u"""<html><head><meta http-equiv="refresh" content="{0};url={1}"/></head></html>"""
return html.format(interval, url).encode('utf-8')
def test_priority_adjust(self):
req = Request('http://a.com')
rsp = HtmlResponse(req.url, body=self._body())
req2 = self.mw.process_response(req, rsp, self.spider)
assert req2.priority > req.priority
def test_meta_refresh(self):
req = Request(url='http://example.org')
rsp = HtmlResponse(req.url, body=self._body())
req2 = self.mw.process_response(req, rsp, self.spider)
assert isinstance(req2, Request)
self.assertEqual(req2.url, 'http://example.org/newpage')
def test_meta_refresh_with_high_interval(self):
# meta-refresh with high intervals don't trigger redirects
req = Request(url='http://example.org')
rsp = HtmlResponse(url='http://example.org',
body=self._body(interval=1000),
encoding='utf-8')
rsp2 = self.mw.process_response(req, rsp, self.spider)
assert rsp is rsp2
def test_meta_refresh_trough_posted_request(self):
req = Request(url='http://example.org', method='POST', body='test',
headers={'Content-Type': 'text/plain', 'Content-length': '4'})
rsp = HtmlResponse(req.url, body=self._body())
req2 = self.mw.process_response(req, rsp, self.spider)
assert isinstance(req2, Request)
self.assertEqual(req2.url, 'http://example.org/newpage')
self.assertEqual(req2.method, 'GET')
assert 'Content-Type' not in req2.headers, \
"Content-Type header must not be present in redirected request"
assert 'Content-Length' not in req2.headers, \
"Content-Length header must not be present in redirected request"
assert not req2.body, \
"Redirected body must be empty, not '%s'" % req2.body
def test_max_redirect_times(self):
self.mw.max_redirect_times = 1
req = Request('http://scrapytest.org/max')
rsp = HtmlResponse(req.url, body=self._body())
req = self.mw.process_response(req, rsp, self.spider)
assert isinstance(req, Request)
assert 'redirect_times' in req.meta
self.assertEqual(req.meta['redirect_times'], 1)
self.assertRaises(IgnoreRequest, self.mw.process_response, req, rsp, self.spider)
def test_ttl(self):
self.mw.max_redirect_times = 100
req = Request('http://scrapytest.org/302', meta={'redirect_ttl': 1})
rsp = HtmlResponse(req.url, body=self._body())
req = self.mw.process_response(req, rsp, self.spider)
assert isinstance(req, Request)
self.assertRaises(IgnoreRequest, self.mw.process_response, req, rsp, self.spider)
def test_redirect_urls(self):
req1 = Request('http://scrapytest.org/first')
rsp1 = HtmlResponse(req1.url, body=self._body(url='/redirected'))
req2 = self.mw.process_response(req1, rsp1, self.spider)
assert isinstance(req2, Request), req2
rsp2 = HtmlResponse(req2.url, body=self._body(url='/redirected2'))
req3 = self.mw.process_response(req2, rsp2, self.spider)
assert isinstance(req3, Request), req3
self.assertEqual(req2.url, 'http://scrapytest.org/redirected')
self.assertEqual(req2.meta['redirect_urls'], ['http://scrapytest.org/first'])
self.assertEqual(req3.url, 'http://scrapytest.org/redirected2')
self.assertEqual(req3.meta['redirect_urls'], ['http://scrapytest.org/first', 'http://scrapytest.org/redirected'])
if __name__ == "__main__":
unittest.main()
| bsd-3-clause |
gnychis/grforwarder | gnuradio-core/src/python/gnuradio/gr/gr_threading_24.py | 94 | 25507 | """Thread module emulating a subset of Java's threading model."""
# This started life as the threading.py module of Python 2.4
# It's been patched to fix a problem with join, where a KeyboardInterrupt
# caused a lock to be left in the acquired state.
import sys as _sys
try:
import thread
except ImportError:
del _sys.modules[__name__]
raise
from time import time as _time, sleep as _sleep
from traceback import format_exc as _format_exc
from collections import deque
# Rename some stuff so "from threading import *" is safe
__all__ = ['activeCount', 'Condition', 'currentThread', 'enumerate', 'Event',
'Lock', 'RLock', 'Semaphore', 'BoundedSemaphore', 'Thread',
'Timer', 'setprofile', 'settrace', 'local']
_start_new_thread = thread.start_new_thread
_allocate_lock = thread.allocate_lock
_get_ident = thread.get_ident
ThreadError = thread.error
del thread
# Debug support (adapted from ihooks.py).
# All the major classes here derive from _Verbose. We force that to
# be a new-style class so that all the major classes here are new-style.
# This helps debugging (type(instance) is more revealing for instances
# of new-style classes).
_VERBOSE = False
if __debug__:
class _Verbose(object):
def __init__(self, verbose=None):
if verbose is None:
verbose = _VERBOSE
self.__verbose = verbose
def _note(self, format, *args):
if self.__verbose:
format = format % args
format = "%s: %s\n" % (
currentThread().getName(), format)
_sys.stderr.write(format)
else:
# Disable this when using "python -O"
class _Verbose(object):
def __init__(self, verbose=None):
pass
def _note(self, *args):
pass
# Support for profile and trace hooks
_profile_hook = None
_trace_hook = None
def setprofile(func):
global _profile_hook
_profile_hook = func
def settrace(func):
global _trace_hook
_trace_hook = func
# Synchronization classes
Lock = _allocate_lock
def RLock(*args, **kwargs):
return _RLock(*args, **kwargs)
class _RLock(_Verbose):
def __init__(self, verbose=None):
_Verbose.__init__(self, verbose)
self.__block = _allocate_lock()
self.__owner = None
self.__count = 0
def __repr__(self):
return "<%s(%s, %d)>" % (
self.__class__.__name__,
self.__owner and self.__owner.getName(),
self.__count)
def acquire(self, blocking=1):
me = currentThread()
if self.__owner is me:
self.__count = self.__count + 1
if __debug__:
self._note("%s.acquire(%s): recursive success", self, blocking)
return 1
rc = self.__block.acquire(blocking)
if rc:
self.__owner = me
self.__count = 1
if __debug__:
self._note("%s.acquire(%s): initial succes", self, blocking)
else:
if __debug__:
self._note("%s.acquire(%s): failure", self, blocking)
return rc
def release(self):
me = currentThread()
assert self.__owner is me, "release() of un-acquire()d lock"
self.__count = count = self.__count - 1
if not count:
self.__owner = None
self.__block.release()
if __debug__:
self._note("%s.release(): final release", self)
else:
if __debug__:
self._note("%s.release(): non-final release", self)
# Internal methods used by condition variables
def _acquire_restore(self, (count, owner)):
self.__block.acquire()
self.__count = count
self.__owner = owner
if __debug__:
self._note("%s._acquire_restore()", self)
def _release_save(self):
if __debug__:
self._note("%s._release_save()", self)
count = self.__count
self.__count = 0
owner = self.__owner
self.__owner = None
self.__block.release()
return (count, owner)
def _is_owned(self):
return self.__owner is currentThread()
def Condition(*args, **kwargs):
return _Condition(*args, **kwargs)
class _Condition(_Verbose):
def __init__(self, lock=None, verbose=None):
_Verbose.__init__(self, verbose)
if lock is None:
lock = RLock()
self.__lock = lock
# Export the lock's acquire() and release() methods
self.acquire = lock.acquire
self.release = lock.release
# If the lock defines _release_save() and/or _acquire_restore(),
# these override the default implementations (which just call
# release() and acquire() on the lock). Ditto for _is_owned().
try:
self._release_save = lock._release_save
except AttributeError:
pass
try:
self._acquire_restore = lock._acquire_restore
except AttributeError:
pass
try:
self._is_owned = lock._is_owned
except AttributeError:
pass
self.__waiters = []
def __repr__(self):
return "<Condition(%s, %d)>" % (self.__lock, len(self.__waiters))
def _release_save(self):
self.__lock.release() # No state to save
def _acquire_restore(self, x):
self.__lock.acquire() # Ignore saved state
def _is_owned(self):
# Return True if lock is owned by currentThread.
# This method is called only if __lock doesn't have _is_owned().
if self.__lock.acquire(0):
self.__lock.release()
return False
else:
return True
def wait(self, timeout=None):
assert self._is_owned(), "wait() of un-acquire()d lock"
waiter = _allocate_lock()
waiter.acquire()
self.__waiters.append(waiter)
saved_state = self._release_save()
try: # restore state no matter what (e.g., KeyboardInterrupt)
if timeout is None:
waiter.acquire()
if __debug__:
self._note("%s.wait(): got it", self)
else:
# Balancing act: We can't afford a pure busy loop, so we
# have to sleep; but if we sleep the whole timeout time,
# we'll be unresponsive. The scheme here sleeps very
# little at first, longer as time goes on, but never longer
# than 20 times per second (or the timeout time remaining).
endtime = _time() + timeout
delay = 0.0005 # 500 us -> initial delay of 1 ms
while True:
gotit = waiter.acquire(0)
if gotit:
break
remaining = endtime - _time()
if remaining <= 0:
break
delay = min(delay * 2, remaining, .05)
_sleep(delay)
if not gotit:
if __debug__:
self._note("%s.wait(%s): timed out", self, timeout)
try:
self.__waiters.remove(waiter)
except ValueError:
pass
else:
if __debug__:
self._note("%s.wait(%s): got it", self, timeout)
finally:
self._acquire_restore(saved_state)
def notify(self, n=1):
assert self._is_owned(), "notify() of un-acquire()d lock"
__waiters = self.__waiters
waiters = __waiters[:n]
if not waiters:
if __debug__:
self._note("%s.notify(): no waiters", self)
return
self._note("%s.notify(): notifying %d waiter%s", self, n,
n!=1 and "s" or "")
for waiter in waiters:
waiter.release()
try:
__waiters.remove(waiter)
except ValueError:
pass
def notifyAll(self):
self.notify(len(self.__waiters))
def Semaphore(*args, **kwargs):
return _Semaphore(*args, **kwargs)
class _Semaphore(_Verbose):
# After Tim Peters' semaphore class, but not quite the same (no maximum)
def __init__(self, value=1, verbose=None):
assert value >= 0, "Semaphore initial value must be >= 0"
_Verbose.__init__(self, verbose)
self.__cond = Condition(Lock())
self.__value = value
def acquire(self, blocking=1):
rc = False
self.__cond.acquire()
while self.__value == 0:
if not blocking:
break
if __debug__:
self._note("%s.acquire(%s): blocked waiting, value=%s",
self, blocking, self.__value)
self.__cond.wait()
else:
self.__value = self.__value - 1
if __debug__:
self._note("%s.acquire: success, value=%s",
self, self.__value)
rc = True
self.__cond.release()
return rc
def release(self):
self.__cond.acquire()
self.__value = self.__value + 1
if __debug__:
self._note("%s.release: success, value=%s",
self, self.__value)
self.__cond.notify()
self.__cond.release()
def BoundedSemaphore(*args, **kwargs):
return _BoundedSemaphore(*args, **kwargs)
class _BoundedSemaphore(_Semaphore):
"""Semaphore that checks that # releases is <= # acquires"""
def __init__(self, value=1, verbose=None):
_Semaphore.__init__(self, value, verbose)
self._initial_value = value
def release(self):
if self._Semaphore__value >= self._initial_value:
raise ValueError, "Semaphore released too many times"
return _Semaphore.release(self)
def Event(*args, **kwargs):
return _Event(*args, **kwargs)
class _Event(_Verbose):
# After Tim Peters' event class (without is_posted())
def __init__(self, verbose=None):
_Verbose.__init__(self, verbose)
self.__cond = Condition(Lock())
self.__flag = False
def isSet(self):
return self.__flag
def set(self):
self.__cond.acquire()
try:
self.__flag = True
self.__cond.notifyAll()
finally:
self.__cond.release()
def clear(self):
self.__cond.acquire()
try:
self.__flag = False
finally:
self.__cond.release()
def wait(self, timeout=None):
self.__cond.acquire()
try:
if not self.__flag:
self.__cond.wait(timeout)
finally:
self.__cond.release()
# Helper to generate new thread names
_counter = 0
def _newname(template="Thread-%d"):
global _counter
_counter = _counter + 1
return template % _counter
# Active thread administration
_active_limbo_lock = _allocate_lock()
_active = {}
_limbo = {}
# Main class for threads
class Thread(_Verbose):
__initialized = False
# Need to store a reference to sys.exc_info for printing
# out exceptions when a thread tries to use a global var. during interp.
# shutdown and thus raises an exception about trying to perform some
# operation on/with a NoneType
__exc_info = _sys.exc_info
def __init__(self, group=None, target=None, name=None,
args=(), kwargs={}, verbose=None):
assert group is None, "group argument must be None for now"
_Verbose.__init__(self, verbose)
self.__target = target
self.__name = str(name or _newname())
self.__args = args
self.__kwargs = kwargs
self.__daemonic = self._set_daemon()
self.__started = False
self.__stopped = False
self.__block = Condition(Lock())
self.__initialized = True
# sys.stderr is not stored in the class like
# sys.exc_info since it can be changed between instances
self.__stderr = _sys.stderr
def _set_daemon(self):
# Overridden in _MainThread and _DummyThread
return currentThread().isDaemon()
def __repr__(self):
assert self.__initialized, "Thread.__init__() was not called"
status = "initial"
if self.__started:
status = "started"
if self.__stopped:
status = "stopped"
if self.__daemonic:
status = status + " daemon"
return "<%s(%s, %s)>" % (self.__class__.__name__, self.__name, status)
def start(self):
assert self.__initialized, "Thread.__init__() not called"
assert not self.__started, "thread already started"
if __debug__:
self._note("%s.start(): starting thread", self)
_active_limbo_lock.acquire()
_limbo[self] = self
_active_limbo_lock.release()
_start_new_thread(self.__bootstrap, ())
self.__started = True
_sleep(0.000001) # 1 usec, to let the thread run (Solaris hack)
def run(self):
if self.__target:
self.__target(*self.__args, **self.__kwargs)
def __bootstrap(self):
try:
self.__started = True
_active_limbo_lock.acquire()
_active[_get_ident()] = self
del _limbo[self]
_active_limbo_lock.release()
if __debug__:
self._note("%s.__bootstrap(): thread started", self)
if _trace_hook:
self._note("%s.__bootstrap(): registering trace hook", self)
_sys.settrace(_trace_hook)
if _profile_hook:
self._note("%s.__bootstrap(): registering profile hook", self)
_sys.setprofile(_profile_hook)
try:
self.run()
except SystemExit:
if __debug__:
self._note("%s.__bootstrap(): raised SystemExit", self)
except:
if __debug__:
self._note("%s.__bootstrap(): unhandled exception", self)
# If sys.stderr is no more (most likely from interpreter
# shutdown) use self.__stderr. Otherwise still use sys (as in
# _sys) in case sys.stderr was redefined since the creation of
# self.
if _sys:
_sys.stderr.write("Exception in thread %s:\n%s\n" %
(self.getName(), _format_exc()))
else:
# Do the best job possible w/o a huge amt. of code to
# approximate a traceback (code ideas from
# Lib/traceback.py)
exc_type, exc_value, exc_tb = self.__exc_info()
try:
print>>self.__stderr, (
"Exception in thread " + self.getName() +
" (most likely raised during interpreter shutdown):")
print>>self.__stderr, (
"Traceback (most recent call last):")
while exc_tb:
print>>self.__stderr, (
' File "%s", line %s, in %s' %
(exc_tb.tb_frame.f_code.co_filename,
exc_tb.tb_lineno,
exc_tb.tb_frame.f_code.co_name))
exc_tb = exc_tb.tb_next
print>>self.__stderr, ("%s: %s" % (exc_type, exc_value))
# Make sure that exc_tb gets deleted since it is a memory
# hog; deleting everything else is just for thoroughness
finally:
del exc_type, exc_value, exc_tb
else:
if __debug__:
self._note("%s.__bootstrap(): normal return", self)
finally:
self.__stop()
try:
self.__delete()
except:
pass
def __stop(self):
self.__block.acquire()
self.__stopped = True
self.__block.notifyAll()
self.__block.release()
def __delete(self):
"Remove current thread from the dict of currently running threads."
# Notes about running with dummy_thread:
#
# Must take care to not raise an exception if dummy_thread is being
# used (and thus this module is being used as an instance of
# dummy_threading). dummy_thread.get_ident() always returns -1 since
# there is only one thread if dummy_thread is being used. Thus
# len(_active) is always <= 1 here, and any Thread instance created
# overwrites the (if any) thread currently registered in _active.
#
# An instance of _MainThread is always created by 'threading'. This
# gets overwritten the instant an instance of Thread is created; both
# threads return -1 from dummy_thread.get_ident() and thus have the
# same key in the dict. So when the _MainThread instance created by
# 'threading' tries to clean itself up when atexit calls this method
# it gets a KeyError if another Thread instance was created.
#
# This all means that KeyError from trying to delete something from
# _active if dummy_threading is being used is a red herring. But
# since it isn't if dummy_threading is *not* being used then don't
# hide the exception.
_active_limbo_lock.acquire()
try:
try:
del _active[_get_ident()]
except KeyError:
if 'dummy_threading' not in _sys.modules:
raise
finally:
_active_limbo_lock.release()
def join(self, timeout=None):
assert self.__initialized, "Thread.__init__() not called"
assert self.__started, "cannot join thread before it is started"
assert self is not currentThread(), "cannot join current thread"
if __debug__:
if not self.__stopped:
self._note("%s.join(): waiting until thread stops", self)
self.__block.acquire()
try:
if timeout is None:
while not self.__stopped:
self.__block.wait()
if __debug__:
self._note("%s.join(): thread stopped", self)
else:
deadline = _time() + timeout
while not self.__stopped:
delay = deadline - _time()
if delay <= 0:
if __debug__:
self._note("%s.join(): timed out", self)
break
self.__block.wait(delay)
else:
if __debug__:
self._note("%s.join(): thread stopped", self)
finally:
self.__block.release()
def getName(self):
assert self.__initialized, "Thread.__init__() not called"
return self.__name
def setName(self, name):
assert self.__initialized, "Thread.__init__() not called"
self.__name = str(name)
def isAlive(self):
assert self.__initialized, "Thread.__init__() not called"
return self.__started and not self.__stopped
def isDaemon(self):
assert self.__initialized, "Thread.__init__() not called"
return self.__daemonic
def setDaemon(self, daemonic):
assert self.__initialized, "Thread.__init__() not called"
assert not self.__started, "cannot set daemon status of active thread"
self.__daemonic = daemonic
# The timer class was contributed by Itamar Shtull-Trauring
def Timer(*args, **kwargs):
return _Timer(*args, **kwargs)
class _Timer(Thread):
"""Call a function after a specified number of seconds:
t = Timer(30.0, f, args=[], kwargs={})
t.start()
t.cancel() # stop the timer's action if it's still waiting
"""
def __init__(self, interval, function, args=[], kwargs={}):
Thread.__init__(self)
self.interval = interval
self.function = function
self.args = args
self.kwargs = kwargs
self.finished = Event()
def cancel(self):
"""Stop the timer if it hasn't finished yet"""
self.finished.set()
def run(self):
self.finished.wait(self.interval)
if not self.finished.isSet():
self.function(*self.args, **self.kwargs)
self.finished.set()
# Special thread class to represent the main thread
# This is garbage collected through an exit handler
class _MainThread(Thread):
def __init__(self):
Thread.__init__(self, name="MainThread")
self._Thread__started = True
_active_limbo_lock.acquire()
_active[_get_ident()] = self
_active_limbo_lock.release()
import atexit
atexit.register(self.__exitfunc)
def _set_daemon(self):
return False
def __exitfunc(self):
self._Thread__stop()
t = _pickSomeNonDaemonThread()
if t:
if __debug__:
self._note("%s: waiting for other threads", self)
while t:
t.join()
t = _pickSomeNonDaemonThread()
if __debug__:
self._note("%s: exiting", self)
self._Thread__delete()
def _pickSomeNonDaemonThread():
for t in enumerate():
if not t.isDaemon() and t.isAlive():
return t
return None
# Dummy thread class to represent threads not started here.
# These aren't garbage collected when they die,
# nor can they be waited for.
# Their purpose is to return *something* from currentThread().
# They are marked as daemon threads so we won't wait for them
# when we exit (conform previous semantics).
class _DummyThread(Thread):
def __init__(self):
Thread.__init__(self, name=_newname("Dummy-%d"))
self._Thread__started = True
_active_limbo_lock.acquire()
_active[_get_ident()] = self
_active_limbo_lock.release()
def _set_daemon(self):
return True
def join(self, timeout=None):
assert False, "cannot join a dummy thread"
# Global API functions
def currentThread():
try:
return _active[_get_ident()]
except KeyError:
##print "currentThread(): no current thread for", _get_ident()
return _DummyThread()
def activeCount():
_active_limbo_lock.acquire()
count = len(_active) + len(_limbo)
_active_limbo_lock.release()
return count
def enumerate():
_active_limbo_lock.acquire()
active = _active.values() + _limbo.values()
_active_limbo_lock.release()
return active
# Create the main thread object
_MainThread()
# get thread-local implementation, either from the thread
# module, or from the python fallback
try:
from thread import _local as local
except ImportError:
from _threading_local import local
# Self-test code
def _test():
class BoundedQueue(_Verbose):
def __init__(self, limit):
_Verbose.__init__(self)
self.mon = RLock()
self.rc = Condition(self.mon)
self.wc = Condition(self.mon)
self.limit = limit
self.queue = deque()
def put(self, item):
self.mon.acquire()
while len(self.queue) >= self.limit:
self._note("put(%s): queue full", item)
self.wc.wait()
self.queue.append(item)
self._note("put(%s): appended, length now %d",
item, len(self.queue))
self.rc.notify()
self.mon.release()
def get(self):
self.mon.acquire()
while not self.queue:
self._note("get(): queue empty")
self.rc.wait()
item = self.queue.popleft()
self._note("get(): got %s, %d left", item, len(self.queue))
self.wc.notify()
self.mon.release()
return item
class ProducerThread(Thread):
def __init__(self, queue, quota):
Thread.__init__(self, name="Producer")
self.queue = queue
self.quota = quota
def run(self):
from random import random
counter = 0
while counter < self.quota:
counter = counter + 1
self.queue.put("%s.%d" % (self.getName(), counter))
_sleep(random() * 0.00001)
class ConsumerThread(Thread):
def __init__(self, queue, count):
Thread.__init__(self, name="Consumer")
self.queue = queue
self.count = count
def run(self):
while self.count > 0:
item = self.queue.get()
print item
self.count = self.count - 1
NP = 3
QL = 4
NI = 5
Q = BoundedQueue(QL)
P = []
for i in range(NP):
t = ProducerThread(Q, NI)
t.setName("Producer-%d" % (i+1))
P.append(t)
C = ConsumerThread(Q, NI*NP)
for t in P:
t.start()
_sleep(0.000001)
C.start()
for t in P:
t.join()
C.join()
if __name__ == '__main__':
_test()
| gpl-3.0 |
Kingclove/ChannelAPI-Demo | server/lib/flask/templating.py | 783 | 4707 | # -*- coding: utf-8 -*-
"""
flask.templating
~~~~~~~~~~~~~~~~
Implements the bridge to Jinja2.
:copyright: (c) 2011 by Armin Ronacher.
:license: BSD, see LICENSE for more details.
"""
import posixpath
from jinja2 import BaseLoader, Environment as BaseEnvironment, \
TemplateNotFound
from .globals import _request_ctx_stack, _app_ctx_stack
from .signals import template_rendered
from .module import blueprint_is_module
from ._compat import itervalues, iteritems
def _default_template_ctx_processor():
"""Default template context processor. Injects `request`,
`session` and `g`.
"""
reqctx = _request_ctx_stack.top
appctx = _app_ctx_stack.top
rv = {}
if appctx is not None:
rv['g'] = appctx.g
if reqctx is not None:
rv['request'] = reqctx.request
rv['session'] = reqctx.session
return rv
class Environment(BaseEnvironment):
"""Works like a regular Jinja2 environment but has some additional
knowledge of how Flask's blueprint works so that it can prepend the
name of the blueprint to referenced templates if necessary.
"""
def __init__(self, app, **options):
if 'loader' not in options:
options['loader'] = app.create_global_jinja_loader()
BaseEnvironment.__init__(self, **options)
self.app = app
class DispatchingJinjaLoader(BaseLoader):
"""A loader that looks for templates in the application and all
the blueprint folders.
"""
def __init__(self, app):
self.app = app
def get_source(self, environment, template):
for loader, local_name in self._iter_loaders(template):
try:
return loader.get_source(environment, local_name)
except TemplateNotFound:
pass
raise TemplateNotFound(template)
def _iter_loaders(self, template):
loader = self.app.jinja_loader
if loader is not None:
yield loader, template
# old style module based loaders in case we are dealing with a
# blueprint that is an old style module
try:
module, local_name = posixpath.normpath(template).split('/', 1)
blueprint = self.app.blueprints[module]
if blueprint_is_module(blueprint):
loader = blueprint.jinja_loader
if loader is not None:
yield loader, local_name
except (ValueError, KeyError):
pass
for blueprint in itervalues(self.app.blueprints):
if blueprint_is_module(blueprint):
continue
loader = blueprint.jinja_loader
if loader is not None:
yield loader, template
def list_templates(self):
result = set()
loader = self.app.jinja_loader
if loader is not None:
result.update(loader.list_templates())
for name, blueprint in iteritems(self.app.blueprints):
loader = blueprint.jinja_loader
if loader is not None:
for template in loader.list_templates():
prefix = ''
if blueprint_is_module(blueprint):
prefix = name + '/'
result.add(prefix + template)
return list(result)
def _render(template, context, app):
"""Renders the template and fires the signal"""
rv = template.render(context)
template_rendered.send(app, template=template, context=context)
return rv
def render_template(template_name_or_list, **context):
"""Renders a template from the template folder with the given
context.
:param template_name_or_list: the name of the template to be
rendered, or an iterable with template names
the first one existing will be rendered
:param context: the variables that should be available in the
context of the template.
"""
ctx = _app_ctx_stack.top
ctx.app.update_template_context(context)
return _render(ctx.app.jinja_env.get_or_select_template(template_name_or_list),
context, ctx.app)
def render_template_string(source, **context):
"""Renders a template from the given template source string
with the given context.
:param source: the sourcecode of the template to be
rendered
:param context: the variables that should be available in the
context of the template.
"""
ctx = _app_ctx_stack.top
ctx.app.update_template_context(context)
return _render(ctx.app.jinja_env.from_string(source),
context, ctx.app)
| apache-2.0 |
bjornlevi/5thpower | nefndaralit/env/lib/python3.6/site-packages/chardet/sbcharsetprober.py | 269 | 5657 | ######################## BEGIN LICENSE BLOCK ########################
# The Original Code is Mozilla Universal charset detector code.
#
# The Initial Developer of the Original Code is
# Netscape Communications Corporation.
# Portions created by the Initial Developer are Copyright (C) 2001
# the Initial Developer. All Rights Reserved.
#
# Contributor(s):
# Mark Pilgrim - port to Python
# Shy Shalom - original C code
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
# 02110-1301 USA
######################### END LICENSE BLOCK #########################
from .charsetprober import CharSetProber
from .enums import CharacterCategory, ProbingState, SequenceLikelihood
class SingleByteCharSetProber(CharSetProber):
SAMPLE_SIZE = 64
SB_ENOUGH_REL_THRESHOLD = 1024 # 0.25 * SAMPLE_SIZE^2
POSITIVE_SHORTCUT_THRESHOLD = 0.95
NEGATIVE_SHORTCUT_THRESHOLD = 0.05
def __init__(self, model, reversed=False, name_prober=None):
super(SingleByteCharSetProber, self).__init__()
self._model = model
# TRUE if we need to reverse every pair in the model lookup
self._reversed = reversed
# Optional auxiliary prober for name decision
self._name_prober = name_prober
self._last_order = None
self._seq_counters = None
self._total_seqs = None
self._total_char = None
self._freq_char = None
self.reset()
def reset(self):
super(SingleByteCharSetProber, self).reset()
# char order of last character
self._last_order = 255
self._seq_counters = [0] * SequenceLikelihood.get_num_categories()
self._total_seqs = 0
self._total_char = 0
# characters that fall in our sampling range
self._freq_char = 0
@property
def charset_name(self):
if self._name_prober:
return self._name_prober.charset_name
else:
return self._model['charset_name']
@property
def language(self):
if self._name_prober:
return self._name_prober.language
else:
return self._model.get('language')
def feed(self, byte_str):
if not self._model['keep_english_letter']:
byte_str = self.filter_international_words(byte_str)
if not byte_str:
return self.state
char_to_order_map = self._model['char_to_order_map']
for i, c in enumerate(byte_str):
# XXX: Order is in range 1-64, so one would think we want 0-63 here,
# but that leads to 27 more test failures than before.
order = char_to_order_map[c]
# XXX: This was SYMBOL_CAT_ORDER before, with a value of 250, but
# CharacterCategory.SYMBOL is actually 253, so we use CONTROL
# to make it closer to the original intent. The only difference
# is whether or not we count digits and control characters for
# _total_char purposes.
if order < CharacterCategory.CONTROL:
self._total_char += 1
if order < self.SAMPLE_SIZE:
self._freq_char += 1
if self._last_order < self.SAMPLE_SIZE:
self._total_seqs += 1
if not self._reversed:
i = (self._last_order * self.SAMPLE_SIZE) + order
model = self._model['precedence_matrix'][i]
else: # reverse the order of the letters in the lookup
i = (order * self.SAMPLE_SIZE) + self._last_order
model = self._model['precedence_matrix'][i]
self._seq_counters[model] += 1
self._last_order = order
charset_name = self._model['charset_name']
if self.state == ProbingState.DETECTING:
if self._total_seqs > self.SB_ENOUGH_REL_THRESHOLD:
confidence = self.get_confidence()
if confidence > self.POSITIVE_SHORTCUT_THRESHOLD:
self.logger.debug('%s confidence = %s, we have a winner',
charset_name, confidence)
self._state = ProbingState.FOUND_IT
elif confidence < self.NEGATIVE_SHORTCUT_THRESHOLD:
self.logger.debug('%s confidence = %s, below negative '
'shortcut threshhold %s', charset_name,
confidence,
self.NEGATIVE_SHORTCUT_THRESHOLD)
self._state = ProbingState.NOT_ME
return self.state
def get_confidence(self):
r = 0.01
if self._total_seqs > 0:
r = ((1.0 * self._seq_counters[SequenceLikelihood.POSITIVE]) /
self._total_seqs / self._model['typical_positive_ratio'])
r = r * self._freq_char / self._total_char
if r >= 1.0:
r = 0.99
return r
| mit |
alanjw/GreenOpenERP-Win-X86 | openerp/addons/decimal_precision/decimal_precision.py | 35 | 3065 | # -*- encoding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2009 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp import SUPERUSER_ID
from openerp import pooler, tools
from openerp.osv import osv, fields
from openerp.modules.registry import RegistryManager
class decimal_precision(osv.osv):
_name = 'decimal.precision'
_columns = {
'name': fields.char('Usage', size=50, select=True, required=True),
'digits': fields.integer('Digits', required=True),
}
_defaults = {
'digits': 2,
}
_sql_constraints = [
('name_uniq', 'unique (name)', """Only one value can be defined for each given usage!"""),
]
@tools.ormcache(skiparg=3)
def precision_get(self, cr, uid, application):
cr.execute('select digits from decimal_precision where name=%s', (application,))
res = cr.fetchone()
return res[0] if res else 2
def clear_cache(self, cr):
"""clear cache and update models. Notify other workers to restart their registry."""
self.precision_get.clear_cache(self)
for obj in self.pool.obj_list():
for colname, col in self.pool.get(obj)._columns.items():
if hasattr(col, 'digits_change'):
col.digits_change(cr)
RegistryManager.signal_caches_change(cr.dbname)
def create(self, cr, uid, data, context=None):
res = super(decimal_precision, self).create(cr, uid, data, context=context)
self.clear_cache(cr)
return res
def unlink(self, cr, uid, ids, context=None):
res = super(decimal_precision, self).unlink(cr, uid, ids, context=context)
self.clear_cache(cr)
return res
def write(self, cr, uid, ids, data, *args, **argv):
res = super(decimal_precision, self).write(cr, uid, ids, data, *args, **argv)
self.clear_cache(cr)
return res
decimal_precision()
def get_precision(application):
def change_digit(cr):
res = pooler.get_pool(cr.dbname).get('decimal.precision').precision_get(cr, SUPERUSER_ID, application)
return (16, res)
return change_digit
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
nagaozen/my-os-customizations | home/nagaozen/gedit-plugins-2.30.1/usr/local/lib/gedit-2/plugins/codecomment.py | 1 | 9563 | # -*- coding: utf-8 -*-
# Code comment plugin
# This file is part of gedit
#
# Copyright (C) 2005-2006 Igalia
# Copyright (C) 2006 Matthew Dugan
# Copyrignt (C) 2007 Steve Frécinaux
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330,
# Boston, MA 02111-1307, USA.
import gedit
import gtk
import copy
import gettext
from gpdefs import *
try:
gettext.bindtextdomain(GETTEXT_PACKAGE, GP_LOCALEDIR)
_ = lambda s: gettext.dgettext(GETTEXT_PACKAGE, s);
except:
_ = lambda s: s
# If the language is listed here we prefer block comments over line comments.
# Maybe this list should be user configurable, but just C comes to my mind...
block_comment_languages = [
'c',
]
def get_block_comment_tags(lang):
start_tag = lang.get_metadata('block-comment-start')
end_tag = lang.get_metadata('block-comment-end')
if start_tag and end_tag:
return (start_tag, end_tag)
return (None, None)
def get_line_comment_tags(lang):
start_tag = lang.get_metadata('line-comment-start')
if start_tag:
return (start_tag, None)
return (None, None)
def get_comment_tags(lang):
if lang.get_id() in block_comment_languages:
(s, e) = get_block_comment_tags(lang)
if (s, e) == (None, None):
(s, e) = get_line_comment_tags(lang)
else:
(s, e) = get_line_comment_tags(lang)
if (s, e) == (None, None):
(s, e) = get_block_comment_tags(lang)
return (s, e)
def forward_tag(iter, tag):
iter.forward_chars(len(tag))
def backward_tag(iter, tag):
iter.backward_chars(len(tag))
def get_tag_position_in_line(tag, head_iter, iter):
found = False
while (not found) and (not iter.ends_line()):
s = iter.get_slice(head_iter)
if s == tag:
found = True
else:
head_iter.forward_char()
iter.forward_char()
return found
def add_comment_characters(document, start_tag, end_tag, start, end):
smark = document.create_mark("start", start, False)
imark = document.create_mark("iter", start, False)
emark = document.create_mark("end", end, False)
number_lines = end.get_line() - start.get_line() + 1
document.begin_user_action()
for i in range(0, number_lines):
iter = document.get_iter_at_mark(imark)
if not iter.ends_line():
document.insert(iter, start_tag, -1)
if end_tag is not None:
if i != number_lines -1:
iter = document.get_iter_at_mark(imark)
iter.forward_to_line_end()
document.insert(iter, end_tag, -1)
else:
iter = document.get_iter_at_mark(emark)
document.insert(iter, end_tag, -1)
iter = document.get_iter_at_mark(imark)
iter.forward_line()
document.delete_mark(imark)
imark = document.create_mark("iter", iter, True)
document.end_user_action()
document.delete_mark(imark)
new_start = document.get_iter_at_mark(smark)
new_end = document.get_iter_at_mark(emark)
if not new_start.ends_line():
backward_tag(new_start, start_tag)
document.select_range(new_start, new_end)
document.delete_mark(smark)
document.delete_mark(emark)
def remove_comment_characters(document, start_tag, end_tag, start, end):
smark = document.create_mark("start", start, False)
emark = document.create_mark("end", end, False)
number_lines = end.get_line() - start.get_line() + 1
iter = start.copy()
head_iter = iter.copy()
forward_tag(head_iter, start_tag)
document.begin_user_action()
for i in range(0, number_lines):
if get_tag_position_in_line(start_tag, head_iter, iter):
dmark = document.create_mark("delete", iter, False)
document.delete(iter, head_iter)
if end_tag is not None:
iter = document.get_iter_at_mark(dmark)
head_iter = iter.copy()
forward_tag(head_iter, end_tag)
if get_tag_position_in_line(end_tag, head_iter, iter):
document.delete(iter, head_iter)
document.delete_mark(dmark)
iter = document.get_iter_at_mark(smark)
iter.forward_line()
document.delete_mark(smark)
head_iter = iter.copy()
forward_tag(head_iter, start_tag)
smark = document.create_mark("iter", iter, True)
document.end_user_action()
document.delete_mark(smark)
document.delete_mark(emark)
def do_comment(document, unindent=False):
selection = document.get_selection_bounds()
currentPosMark = document.get_insert()
deselect = False
if selection != ():
(start, end) = selection
if start.ends_line():
start.forward_line()
elif not start.starts_line():
start.set_line_offset(0)
if end.starts_line():
end.backward_char()
elif not end.ends_line():
end.forward_to_line_end()
else:
deselect = True
start = document.get_iter_at_mark(currentPosMark)
start.set_line_offset(0)
end = start.copy()
end.forward_to_line_end()
lang = document.get_language()
if lang is None:
return
(start_tag, end_tag) = get_comment_tags(lang)
if not start_tag and not end_tag:
return
if unindent: # Select the comment or the uncomment method
new_code = remove_comment_characters(document,
start_tag,
end_tag,
start,
end)
else:
new_code = add_comment_characters(document,
start_tag,
end_tag,
start,
end)
if deselect:
oldPosIter = document.get_iter_at_mark(currentPosMark)
document.select_range(oldPosIter,oldPosIter)
document.place_cursor(oldPosIter)
ui_str = """
<ui>
<menubar name="MenuBar">
<menu name="EditMenu" action="Edit">
<placeholder name="EditOps_4">
<menuitem name="Comment" action="CodeComment"/>
<menuitem name="Uncomment" action="CodeUncomment"/>
</placeholder>
</menu>
</menubar>
</ui>
"""
class CodeCommentWindowHelper(object):
def __init__(self, plugin, window):
self._window = window
self._plugin = plugin
self._insert_menu()
def deactivate(self):
self._remove_menu()
self._action_group = None
self._window = None
self._plugin = None
def _insert_menu(self):
manager = self._window.get_ui_manager()
self._action_group = gtk.ActionGroup("CodeCommentActions")
self._action_group.add_actions([("CodeComment",
None,
_("Co_mment Code"),
"<control>M",
_("Comment the selected code"),
lambda a, w: do_comment (w.get_active_document())),
('CodeUncomment',
None,
_('U_ncomment Code'),
"<control><shift>M",
_("Uncomment the selected code"),
lambda a, w: do_comment (w.get_active_document(), True))],
self._window)
manager.insert_action_group(self._action_group, -1)
self._ui_id = manager.add_ui_from_string(ui_str)
def _remove_menu(self):
manager = self._window.get_ui_manager()
manager.remove_ui(self._ui_id)
manager.remove_action_group(self._action_group)
manager.ensure_update()
def update_ui(self):
doc = self._window.get_active_document()
if doc:
lang = doc.get_language()
if lang is not None:
sensitive = get_comment_tags(lang) != (None, None)
self._action_group.set_sensitive(sensitive)
return
self._action_group.set_sensitive(False)
class CodeCommentPlugin(gedit.Plugin):
DATA_TAG = "CodeCommentPluginWindowHelper"
def __init__(self):
gedit.Plugin.__init__(self)
def activate(self, window):
window.set_data(self.DATA_TAG, CodeCommentWindowHelper(self, window))
def deactivate(self, window):
window.get_data(self.DATA_TAG).deactivate()
window.set_data(self.DATA_TAG, None)
def update_ui(self, window):
window.get_data(self.DATA_TAG).update_ui()
# ex:ts=4:et:
| gpl-3.0 |
LibriCerule/Cerulean_Tracking | env/lib/python3.5/site-packages/pip/utils/__init__.py | 22 | 26034 | from __future__ import absolute_import
from collections import deque
import contextlib
import errno
import locale
# we have a submodule named 'logging' which would shadow this if we used the
# regular name:
import logging as std_logging
import re
import os
import posixpath
import shutil
import stat
import subprocess
import sys
import tarfile
import zipfile
from pip.exceptions import InstallationError
from pip.compat import console_to_str, expanduser, stdlib_pkgs
from pip.locations import (
site_packages, user_site, running_under_virtualenv, virtualenv_no_global,
write_delete_marker_file,
)
from pip._vendor import pkg_resources
from pip._vendor.six.moves import input
from pip._vendor.six import PY2
from pip._vendor.retrying import retry
if PY2:
from io import BytesIO as StringIO
else:
from io import StringIO
__all__ = ['rmtree', 'display_path', 'backup_dir',
'ask', 'splitext',
'format_size', 'is_installable_dir',
'is_svn_page', 'file_contents',
'split_leading_dir', 'has_leading_dir',
'normalize_path', 'canonicalize_name',
'renames', 'get_terminal_size', 'get_prog',
'unzip_file', 'untar_file', 'unpack_file', 'call_subprocess',
'captured_stdout', 'remove_tracebacks', 'ensure_dir',
'ARCHIVE_EXTENSIONS', 'SUPPORTED_EXTENSIONS',
'get_installed_version']
logger = std_logging.getLogger(__name__)
BZ2_EXTENSIONS = ('.tar.bz2', '.tbz')
XZ_EXTENSIONS = ('.tar.xz', '.txz', '.tlz', '.tar.lz', '.tar.lzma')
ZIP_EXTENSIONS = ('.zip', '.whl')
TAR_EXTENSIONS = ('.tar.gz', '.tgz', '.tar')
ARCHIVE_EXTENSIONS = (
ZIP_EXTENSIONS + BZ2_EXTENSIONS + TAR_EXTENSIONS + XZ_EXTENSIONS)
SUPPORTED_EXTENSIONS = ZIP_EXTENSIONS + TAR_EXTENSIONS
try:
import bz2 # noqa
SUPPORTED_EXTENSIONS += BZ2_EXTENSIONS
except ImportError:
logger.debug('bz2 module is not available')
try:
# Only for Python 3.3+
import lzma # noqa
SUPPORTED_EXTENSIONS += XZ_EXTENSIONS
except ImportError:
logger.debug('lzma module is not available')
def import_or_raise(pkg_or_module_string, ExceptionType, *args, **kwargs):
try:
return __import__(pkg_or_module_string)
except ImportError:
raise ExceptionType(*args, **kwargs)
def ensure_dir(path):
"""os.path.makedirs without EEXIST."""
try:
os.makedirs(path)
except OSError as e:
if e.errno != errno.EEXIST:
raise
def get_prog():
try:
if os.path.basename(sys.argv[0]) in ('__main__.py', '-c'):
return "%s -m pip" % sys.executable
except (AttributeError, TypeError, IndexError):
pass
return 'pip'
# Retry every half second for up to 3 seconds
@retry(stop_max_delay=3000, wait_fixed=500)
def rmtree(dir, ignore_errors=False):
shutil.rmtree(dir, ignore_errors=ignore_errors,
onerror=rmtree_errorhandler)
def rmtree_errorhandler(func, path, exc_info):
"""On Windows, the files in .svn are read-only, so when rmtree() tries to
remove them, an exception is thrown. We catch that here, remove the
read-only attribute, and hopefully continue without problems."""
# if file type currently read only
if os.stat(path).st_mode & stat.S_IREAD:
# convert to read/write
os.chmod(path, stat.S_IWRITE)
# use the original function to repeat the operation
func(path)
return
else:
raise
def display_path(path):
"""Gives the display value for a given path, making it relative to cwd
if possible."""
path = os.path.normcase(os.path.abspath(path))
if sys.version_info[0] == 2:
path = path.decode(sys.getfilesystemencoding(), 'replace')
path = path.encode(sys.getdefaultencoding(), 'replace')
if path.startswith(os.getcwd() + os.path.sep):
path = '.' + path[len(os.getcwd()):]
return path
def backup_dir(dir, ext='.bak'):
"""Figure out the name of a directory to back up the given dir to
(adding .bak, .bak2, etc)"""
n = 1
extension = ext
while os.path.exists(dir + extension):
n += 1
extension = ext + str(n)
return dir + extension
def ask_path_exists(message, options):
for action in os.environ.get('PIP_EXISTS_ACTION', '').split():
if action in options:
return action
return ask(message, options)
def ask(message, options):
"""Ask the message interactively, with the given possible responses"""
while 1:
if os.environ.get('PIP_NO_INPUT'):
raise Exception(
'No input was expected ($PIP_NO_INPUT set); question: %s' %
message
)
response = input(message)
response = response.strip().lower()
if response not in options:
print(
'Your response (%r) was not one of the expected responses: '
'%s' % (response, ', '.join(options))
)
else:
return response
def format_size(bytes):
if bytes > 1000 * 1000:
return '%.1fMB' % (bytes / 1000.0 / 1000)
elif bytes > 10 * 1000:
return '%ikB' % (bytes / 1000)
elif bytes > 1000:
return '%.1fkB' % (bytes / 1000.0)
else:
return '%ibytes' % bytes
def is_installable_dir(path):
"""Return True if `path` is a directory containing a setup.py file."""
if not os.path.isdir(path):
return False
setup_py = os.path.join(path, 'setup.py')
if os.path.isfile(setup_py):
return True
return False
def is_svn_page(html):
"""
Returns true if the page appears to be the index page of an svn repository
"""
return (re.search(r'<title>[^<]*Revision \d+:', html) and
re.search(r'Powered by (?:<a[^>]*?>)?Subversion', html, re.I))
def file_contents(filename):
with open(filename, 'rb') as fp:
return fp.read().decode('utf-8')
def read_chunks(file, size=4096):
"""Yield pieces of data from a file-like object until EOF."""
while True:
chunk = file.read(size)
if not chunk:
break
yield chunk
def split_leading_dir(path):
path = path.lstrip('/').lstrip('\\')
if '/' in path and (('\\' in path and path.find('/') < path.find('\\')) or
'\\' not in path):
return path.split('/', 1)
elif '\\' in path:
return path.split('\\', 1)
else:
return path, ''
def has_leading_dir(paths):
"""Returns true if all the paths have the same leading path name
(i.e., everything is in one subdirectory in an archive)"""
common_prefix = None
for path in paths:
prefix, rest = split_leading_dir(path)
if not prefix:
return False
elif common_prefix is None:
common_prefix = prefix
elif prefix != common_prefix:
return False
return True
def normalize_path(path, resolve_symlinks=True):
"""
Convert a path to its canonical, case-normalized, absolute version.
"""
path = expanduser(path)
if resolve_symlinks:
path = os.path.realpath(path)
else:
path = os.path.abspath(path)
return os.path.normcase(path)
def splitext(path):
"""Like os.path.splitext, but take off .tar too"""
base, ext = posixpath.splitext(path)
if base.lower().endswith('.tar'):
ext = base[-4:] + ext
base = base[:-4]
return base, ext
def renames(old, new):
"""Like os.renames(), but handles renaming across devices."""
# Implementation borrowed from os.renames().
head, tail = os.path.split(new)
if head and tail and not os.path.exists(head):
os.makedirs(head)
shutil.move(old, new)
head, tail = os.path.split(old)
if head and tail:
try:
os.removedirs(head)
except OSError:
pass
def is_local(path):
"""
Return True if path is within sys.prefix, if we're running in a virtualenv.
If we're not in a virtualenv, all paths are considered "local."
"""
if not running_under_virtualenv():
return True
return normalize_path(path).startswith(normalize_path(sys.prefix))
def dist_is_local(dist):
"""
Return True if given Distribution object is installed locally
(i.e. within current virtualenv).
Always True if we're not in a virtualenv.
"""
return is_local(dist_location(dist))
def dist_in_usersite(dist):
"""
Return True if given Distribution is installed in user site.
"""
norm_path = normalize_path(dist_location(dist))
return norm_path.startswith(normalize_path(user_site))
def dist_in_site_packages(dist):
"""
Return True if given Distribution is installed in
distutils.sysconfig.get_python_lib().
"""
return normalize_path(
dist_location(dist)
).startswith(normalize_path(site_packages))
def dist_is_editable(dist):
"""Is distribution an editable install?"""
for path_item in sys.path:
egg_link = os.path.join(path_item, dist.project_name + '.egg-link')
if os.path.isfile(egg_link):
return True
return False
def get_installed_distributions(local_only=True,
skip=stdlib_pkgs,
include_editables=True,
editables_only=False,
user_only=False):
"""
Return a list of installed Distribution objects.
If ``local_only`` is True (default), only return installations
local to the current virtualenv, if in a virtualenv.
``skip`` argument is an iterable of lower-case project names to
ignore; defaults to stdlib_pkgs
If ``editables`` is False, don't report editables.
If ``editables_only`` is True , only report editables.
If ``user_only`` is True , only report installations in the user
site directory.
"""
if local_only:
local_test = dist_is_local
else:
def local_test(d):
return True
if include_editables:
def editable_test(d):
return True
else:
def editable_test(d):
return not dist_is_editable(d)
if editables_only:
def editables_only_test(d):
return dist_is_editable(d)
else:
def editables_only_test(d):
return True
if user_only:
user_test = dist_in_usersite
else:
def user_test(d):
return True
return [d for d in pkg_resources.working_set
if local_test(d) and
d.key not in skip and
editable_test(d) and
editables_only_test(d) and
user_test(d)
]
def egg_link_path(dist):
"""
Return the path for the .egg-link file if it exists, otherwise, None.
There's 3 scenarios:
1) not in a virtualenv
try to find in site.USER_SITE, then site_packages
2) in a no-global virtualenv
try to find in site_packages
3) in a yes-global virtualenv
try to find in site_packages, then site.USER_SITE
(don't look in global location)
For #1 and #3, there could be odd cases, where there's an egg-link in 2
locations.
This method will just return the first one found.
"""
sites = []
if running_under_virtualenv():
if virtualenv_no_global():
sites.append(site_packages)
else:
sites.append(site_packages)
if user_site:
sites.append(user_site)
else:
if user_site:
sites.append(user_site)
sites.append(site_packages)
for site in sites:
egglink = os.path.join(site, dist.project_name) + '.egg-link'
if os.path.isfile(egglink):
return egglink
def dist_location(dist):
"""
Get the site-packages location of this distribution. Generally
this is dist.location, except in the case of develop-installed
packages, where dist.location is the source code location, and we
want to know where the egg-link file is.
"""
egg_link = egg_link_path(dist)
if egg_link:
return egg_link
return dist.location
def get_terminal_size():
"""Returns a tuple (x, y) representing the width(x) and the height(x)
in characters of the terminal window."""
def ioctl_GWINSZ(fd):
try:
import fcntl
import termios
import struct
cr = struct.unpack(
'hh',
fcntl.ioctl(fd, termios.TIOCGWINSZ, '1234')
)
except:
return None
if cr == (0, 0):
return None
return cr
cr = ioctl_GWINSZ(0) or ioctl_GWINSZ(1) or ioctl_GWINSZ(2)
if not cr:
try:
fd = os.open(os.ctermid(), os.O_RDONLY)
cr = ioctl_GWINSZ(fd)
os.close(fd)
except:
pass
if not cr:
cr = (os.environ.get('LINES', 25), os.environ.get('COLUMNS', 80))
return int(cr[1]), int(cr[0])
def current_umask():
"""Get the current umask which involves having to set it temporarily."""
mask = os.umask(0)
os.umask(mask)
return mask
def unzip_file(filename, location, flatten=True):
"""
Unzip the file (with path `filename`) to the destination `location`. All
files are written based on system defaults and umask (i.e. permissions are
not preserved), except that regular file members with any execute
permissions (user, group, or world) have "chmod +x" applied after being
written. Note that for windows, any execute changes using os.chmod are
no-ops per the python docs.
"""
ensure_dir(location)
zipfp = open(filename, 'rb')
try:
zip = zipfile.ZipFile(zipfp, allowZip64=True)
leading = has_leading_dir(zip.namelist()) and flatten
for info in zip.infolist():
name = info.filename
data = zip.read(name)
fn = name
if leading:
fn = split_leading_dir(name)[1]
fn = os.path.join(location, fn)
dir = os.path.dirname(fn)
if fn.endswith('/') or fn.endswith('\\'):
# A directory
ensure_dir(fn)
else:
ensure_dir(dir)
fp = open(fn, 'wb')
try:
fp.write(data)
finally:
fp.close()
mode = info.external_attr >> 16
# if mode and regular file and any execute permissions for
# user/group/world?
if mode and stat.S_ISREG(mode) and mode & 0o111:
# make dest file have execute for user/group/world
# (chmod +x) no-op on windows per python docs
os.chmod(fn, (0o777 - current_umask() | 0o111))
finally:
zipfp.close()
def untar_file(filename, location):
"""
Untar the file (with path `filename`) to the destination `location`.
All files are written based on system defaults and umask (i.e. permissions
are not preserved), except that regular file members with any execute
permissions (user, group, or world) have "chmod +x" applied after being
written. Note that for windows, any execute changes using os.chmod are
no-ops per the python docs.
"""
ensure_dir(location)
if filename.lower().endswith('.gz') or filename.lower().endswith('.tgz'):
mode = 'r:gz'
elif filename.lower().endswith(BZ2_EXTENSIONS):
mode = 'r:bz2'
elif filename.lower().endswith(XZ_EXTENSIONS):
mode = 'r:xz'
elif filename.lower().endswith('.tar'):
mode = 'r'
else:
logger.warning(
'Cannot determine compression type for file %s', filename,
)
mode = 'r:*'
tar = tarfile.open(filename, mode)
try:
# note: python<=2.5 doesn't seem to know about pax headers, filter them
leading = has_leading_dir([
member.name for member in tar.getmembers()
if member.name != 'pax_global_header'
])
for member in tar.getmembers():
fn = member.name
if fn == 'pax_global_header':
continue
if leading:
fn = split_leading_dir(fn)[1]
path = os.path.join(location, fn)
if member.isdir():
ensure_dir(path)
elif member.issym():
try:
tar._extract_member(member, path)
except Exception as exc:
# Some corrupt tar files seem to produce this
# (specifically bad symlinks)
logger.warning(
'In the tar file %s the member %s is invalid: %s',
filename, member.name, exc,
)
continue
else:
try:
fp = tar.extractfile(member)
except (KeyError, AttributeError) as exc:
# Some corrupt tar files seem to produce this
# (specifically bad symlinks)
logger.warning(
'In the tar file %s the member %s is invalid: %s',
filename, member.name, exc,
)
continue
ensure_dir(os.path.dirname(path))
with open(path, 'wb') as destfp:
shutil.copyfileobj(fp, destfp)
fp.close()
# Update the timestamp (useful for cython compiled files)
tar.utime(member, path)
# member have any execute permissions for user/group/world?
if member.mode & 0o111:
# make dest file have execute for user/group/world
# no-op on windows per python docs
os.chmod(path, (0o777 - current_umask() | 0o111))
finally:
tar.close()
def unpack_file(filename, location, content_type, link):
filename = os.path.realpath(filename)
if (content_type == 'application/zip' or
filename.lower().endswith(ZIP_EXTENSIONS) or
zipfile.is_zipfile(filename)):
unzip_file(
filename,
location,
flatten=not filename.endswith('.whl')
)
elif (content_type == 'application/x-gzip' or
tarfile.is_tarfile(filename) or
filename.lower().endswith(
TAR_EXTENSIONS + BZ2_EXTENSIONS + XZ_EXTENSIONS)):
untar_file(filename, location)
elif (content_type and content_type.startswith('text/html') and
is_svn_page(file_contents(filename))):
# We don't really care about this
from pip.vcs.subversion import Subversion
Subversion('svn+' + link.url).unpack(location)
else:
# FIXME: handle?
# FIXME: magic signatures?
logger.critical(
'Cannot unpack file %s (downloaded from %s, content-type: %s); '
'cannot detect archive format',
filename, location, content_type,
)
raise InstallationError(
'Cannot determine archive format of %s' % location
)
def remove_tracebacks(output):
pattern = (r'(?:\W+File "(?:.*)", line (?:.*)\W+(?:.*)\W+\^\W+)?'
r'Syntax(?:Error|Warning): (?:.*)')
output = re.sub(pattern, '', output)
if PY2:
return output
# compileall.compile_dir() prints different messages to stdout
# in Python 3
return re.sub(r"\*\*\* Error compiling (?:.*)", '', output)
def call_subprocess(cmd, show_stdout=True, cwd=None,
on_returncode='raise',
command_level=std_logging.DEBUG, command_desc=None,
extra_environ=None, spinner=None):
if command_desc is None:
cmd_parts = []
for part in cmd:
if ' ' in part or '\n' in part or '"' in part or "'" in part:
part = '"%s"' % part.replace('"', '\\"')
cmd_parts.append(part)
command_desc = ' '.join(cmd_parts)
logger.log(command_level, "Running command %s", command_desc)
env = os.environ.copy()
if extra_environ:
env.update(extra_environ)
try:
proc = subprocess.Popen(
cmd, stderr=subprocess.STDOUT, stdin=None, stdout=subprocess.PIPE,
cwd=cwd, env=env)
except Exception as exc:
logger.critical(
"Error %s while executing command %s", exc, command_desc,
)
raise
all_output = []
while True:
line = console_to_str(proc.stdout.readline())
if not line:
break
line = line.rstrip()
all_output.append(line + '\n')
if show_stdout:
logger.debug(line)
if spinner is not None:
spinner.spin()
proc.wait()
if spinner is not None:
if proc.returncode:
spinner.finish("error")
else:
spinner.finish("done")
if proc.returncode:
if on_returncode == 'raise':
if all_output:
logger.info(
'Complete output from command %s:', command_desc,
)
logger.info(
''.join(all_output) +
'\n----------------------------------------'
)
raise InstallationError(
'Command "%s" failed with error code %s in %s'
% (command_desc, proc.returncode, cwd))
elif on_returncode == 'warn':
logger.warning(
'Command "%s" had error code %s in %s',
command_desc, proc.returncode, cwd,
)
elif on_returncode == 'ignore':
pass
else:
raise ValueError('Invalid value: on_returncode=%s' %
repr(on_returncode))
if not show_stdout:
return remove_tracebacks(''.join(all_output))
def read_text_file(filename):
"""Return the contents of *filename*.
Try to decode the file contents with utf-8, the preferred system encoding
(e.g., cp1252 on some Windows machines), and latin1, in that order.
Decoding a byte string with latin1 will never raise an error. In the worst
case, the returned string will contain some garbage characters.
"""
with open(filename, 'rb') as fp:
data = fp.read()
encodings = ['utf-8', locale.getpreferredencoding(False), 'latin1']
for enc in encodings:
try:
data = data.decode(enc)
except UnicodeDecodeError:
continue
break
assert type(data) != bytes # Latin1 should have worked.
return data
def _make_build_dir(build_dir):
os.makedirs(build_dir)
write_delete_marker_file(build_dir)
class FakeFile(object):
"""Wrap a list of lines in an object with readline() to make
ConfigParser happy."""
def __init__(self, lines):
self._gen = (l for l in lines)
def readline(self):
try:
try:
return next(self._gen)
except NameError:
return self._gen.next()
except StopIteration:
return ''
def __iter__(self):
return self._gen
class StreamWrapper(StringIO):
@classmethod
def from_stream(cls, orig_stream):
cls.orig_stream = orig_stream
return cls()
# compileall.compile_dir() needs stdout.encoding to print to stdout
@property
def encoding(self):
return self.orig_stream.encoding
@contextlib.contextmanager
def captured_output(stream_name):
"""Return a context manager used by captured_stdout/stdin/stderr
that temporarily replaces the sys stream *stream_name* with a StringIO.
Taken from Lib/support/__init__.py in the CPython repo.
"""
orig_stdout = getattr(sys, stream_name)
setattr(sys, stream_name, StreamWrapper.from_stream(orig_stdout))
try:
yield getattr(sys, stream_name)
finally:
setattr(sys, stream_name, orig_stdout)
def captured_stdout():
"""Capture the output of sys.stdout:
with captured_stdout() as stdout:
print('hello')
self.assertEqual(stdout.getvalue(), 'hello\n')
Taken from Lib/support/__init__.py in the CPython repo.
"""
return captured_output('stdout')
class cached_property(object):
"""A property that is only computed once per instance and then replaces
itself with an ordinary attribute. Deleting the attribute resets the
property.
Source: https://github.com/bottlepy/bottle/blob/0.11.5/bottle.py#L175
"""
def __init__(self, func):
self.__doc__ = getattr(func, '__doc__')
self.func = func
def __get__(self, obj, cls):
if obj is None:
# We're being accessed from the class itself, not from an object
return self
value = obj.__dict__[self.func.__name__] = self.func(obj)
return value
def get_installed_version(dist_name):
"""Get the installed version of dist_name avoiding pkg_resources cache"""
# Create a requirement that we'll look for inside of setuptools.
req = pkg_resources.Requirement.parse(dist_name)
# We want to avoid having this cached, so we need to construct a new
# working set each time.
working_set = pkg_resources.WorkingSet()
# Get the installed distribution from our working set
dist = working_set.find(req)
# Check to see if we got an installed distribution or not, if we did
# we want to return it's version.
return dist.version if dist else None
def canonicalize_name(name):
"""Convert an arbitrary string to a canonical name used for comparison"""
return pkg_resources.safe_name(name).lower()
def consume(iterator):
"""Consume an iterable at C speed."""
deque(iterator, maxlen=0)
| mit |
hasecbinusr/pysal | pysal/cg/sphere.py | 7 | 12483 | """
sphere: Tools for working with spherical geometry.
Author(s):
Charles R Schmidt schmidtc@gmail.com
Luc Anselin luc.anselin@asu.edu
Xun Li xun.li@asu.edu
"""
__author__ = "Charles R Schmidt <schmidtc@gmail.com>, Luc Anselin <luc.anselin@asu.edu, Xun Li <xun.li@asu.edu"
import math
import random
import numpy
import scipy.spatial
import scipy.constants
from scipy.spatial.distance import euclidean
from math import pi, cos, sin, asin
__all__ = ['RADIUS_EARTH_KM', 'RADIUS_EARTH_MILES', 'arcdist', 'arcdist2linear', 'brute_knn', 'fast_knn', 'fast_threshold', 'linear2arcdist', 'toLngLat', 'toXYZ', 'lonlat','harcdist','geointerpolate','geogrid']
RADIUS_EARTH_KM = 6371.0
RADIUS_EARTH_MILES = (
RADIUS_EARTH_KM * scipy.constants.kilo) / scipy.constants.mile
def arcdist(pt0, pt1, radius=RADIUS_EARTH_KM):
"""
Parameters
----------
pt0 : point
assumed to be in form (lng,lat)
pt1 : point
assumed to be in form (lng,lat)
radius : radius of the sphere
defaults to Earth's radius
Source: http://nssdc.gsfc.nasa.gov/planetary/factsheet/earthfact.html
Returns
-------
The arc distance between pt0 and pt1 using supplied radius
Examples
--------
>>> pt0 = (0,0)
>>> pt1 = (180,0)
>>> d = arcdist(pt0,pt1,RADIUS_EARTH_MILES)
>>> d == math.pi*RADIUS_EARTH_MILES
True
"""
return linear2arcdist(euclidean(toXYZ(pt0), toXYZ(pt1)), radius)
def arcdist2linear(arc_dist, radius=RADIUS_EARTH_KM):
"""
Convert an arc distance (spherical earth) to a linear distance (R3) in the unit sphere.
Examples
--------
>>> pt0 = (0,0)
>>> pt1 = (180,0)
>>> d = arcdist(pt0,pt1,RADIUS_EARTH_MILES)
>>> d == math.pi*RADIUS_EARTH_MILES
True
>>> arcdist2linear(d,RADIUS_EARTH_MILES)
2.0
"""
c = 2 * math.pi * radius
d = (2 - (2 * math.cos(math.radians((arc_dist * 360.0) / c)))) ** (0.5)
return d
def linear2arcdist(linear_dist, radius=RADIUS_EARTH_KM):
"""
Convert a linear distance in the unit sphere (R3) to an arc distance based on supplied radius
Examples
--------
>>> pt0 = (0,0)
>>> pt1 = (180,0)
>>> d = arcdist(pt0,pt1,RADIUS_EARTH_MILES)
>>> d == linear2arcdist(2.0, radius = RADIUS_EARTH_MILES)
True
"""
if linear_dist == float('inf'):
return float('inf')
elif linear_dist > 2.0:
raise ValueError("linear_dist, must not exceed the diameter of the unit sphere, 2.0")
c = 2 * math.pi * radius
a2 = linear_dist ** 2
theta = math.degrees(math.acos((2 - a2) / (2.)))
d = (theta * c) / 360.0
return d
def toXYZ(pt):
"""
Parameters
----------
pt0 : point
assumed to be in form (lng,lat)
pt1 : point
assumed to be in form (lng,lat)
Returns
-------
x, y, z
"""
phi, theta = map(math.radians, pt)
phi, theta = phi + pi, theta + (pi / 2)
x = 1 * sin(theta) * cos(phi)
y = 1 * sin(theta) * sin(phi)
z = 1 * cos(theta)
return x, y, z
def toLngLat(xyz):
x, y, z = xyz
if z == -1 or z == 1:
phi = 0
else:
phi = math.atan2(y, x)
if phi > 0:
phi = phi - math.pi
elif phi < 0:
phi = phi + math.pi
theta = math.acos(z) - (math.pi / 2)
return phi, theta
def brute_knn(pts, k, mode='arc'):
"""
valid modes are ['arc','xrz']
"""
n = len(pts)
full = numpy.zeros((n, n))
for i in xrange(n):
for j in xrange(i + 1, n):
if mode == 'arc':
lng0, lat0 = pts[i]
lng1, lat1 = pts[j]
dist = arcdist(pts[i], pts[j], radius=RADIUS_EARTH_KM)
elif mode == 'xyz':
dist = euclidean(pts[i], pts[j])
full[i, j] = dist
full[j, i] = dist
w = {}
for i in xrange(n):
w[i] = full[i].argsort()[1:k + 1].tolist()
return w
def fast_knn(pts, k, return_dist=False):
"""
Computes k nearest neighbors on a sphere.
Parameters
----------
pts : list of x,y pairs
k : int
Number of points to query
return_dist : bool
Return distances in the 'wd' container object
Returns
-------
wn : list
list of neighbors
wd : list
list of neighbor distances (optional)
"""
pts = numpy.array(pts)
kd = scipy.spatial.KDTree(pts)
d, w = kd.query(pts, k + 1)
w = w[:, 1:]
wn = {}
for i in xrange(len(pts)):
wn[i] = w[i].tolist()
if return_dist:
d = d[:, 1:]
wd = {}
for i in xrange(len(pts)):
wd[i] = [linear2arcdist(x,
radius=RADIUS_EARTH_MILES) for x in d[i].tolist()]
return wn, wd
return wn
def fast_threshold(pts, dist, radius=RADIUS_EARTH_KM):
d = arcdist2linear(dist, radius)
kd = scipy.spatial.KDTree(pts)
r = kd.query_ball_tree(kd, d)
wd = {}
for i in xrange(len(pts)):
l = r[i]
l.remove(i)
wd[i] = l
return wd
########### new functions
def lonlat(pointslist):
"""
Converts point order from lat-lon tuples to lon-lat (x,y) tuples
Parameters
----------
pointslist : list of lat-lon tuples (Note, has to be a list, even for one point)
Returns
-------
newpts : list with tuples of points in lon-lat order
Example
-------
>>> points = [(41.981417, -87.893517), (41.980396, -87.776787), (41.980906, -87.696450)]
>>> newpoints = lonlat(points)
>>> newpoints
[(-87.893517, 41.981417), (-87.776787, 41.980396), (-87.69645, 41.980906)]
"""
newpts = [(i[1],i[0]) for i in pointslist]
return newpts
def haversine(x):
"""
Computes the haversine formula
Parameters
----------
x : angle in radians
Returns
-------
: square of sine of half the radian (the haversine formula)
Example
-------
>>> haversine(math.pi) # is 180 in radians, hence sin of 90 = 1
1.0
"""
x = math.sin(x/2)
return x*x
# Lambda functions
# degree to radian conversion
d2r = lambda x: x * math.pi / 180.0
# radian to degree conversion
r2d = lambda x: x * 180.0 / math.pi
def radangle(p0,p1):
"""
Radian angle between two points on a sphere in lon-lat (x,y)
Parameters
----------
p0 : first point as a lon,lat tuple
p1 : second point as a lon,lat tuple
Returns
-------
d : radian angle in radians
Example
-------
>>> p0 = (-87.893517, 41.981417)
>>> p1 = (-87.519295, 41.657498)
>>> radangle(p0,p1)
0.007460167953189258
Note
----
Uses haversine formula, function haversine and degree to radian
conversion lambda function d2r
"""
x0, y0 = d2r(p0[0]),d2r(p0[1])
x1, y1 = d2r(p1[0]),d2r(p1[1])
d = 2.0 * math.asin(math.sqrt(haversine(y1 - y0) +
math.cos(y0) * math.cos(y1)*haversine(x1 - x0)))
return d
def harcdist(p0,p1,lonx=True,radius=RADIUS_EARTH_KM):
"""
Alternative arc distance function, uses haversine formula
Parameters
----------
p0 : first point as a tuple in decimal degrees
p1 : second point as a tuple in decimal degrees
lonx : boolean to assess the order of the coordinates,
for lon,lat (default) = True, for lat,lon = False
radius : radius of the earth at the equator as a sphere
default: RADIUS_EARTH_KM (6371.0 km)
options: RADIUS_EARTH_MILES (3959.0 miles)
None (for result in radians)
Returns
-------
d : distance in units specified, km, miles or radians (for None)
Example
-------
>>> p0 = (-87.893517, 41.981417)
>>> p1 = (-87.519295, 41.657498)
>>> harcdist(p0,p1)
47.52873002976876
>>> harcdist(p0,p1,radius=None)
0.007460167953189258
Note
----
Uses radangle function to compute radian angle
"""
if not(lonx):
p = lonlat([p0,p1])
p0 = p[0]
p1 = p[1]
d = radangle(p0,p1)
if radius is not None:
d = d*radius
return d
def geointerpolate(p0,p1,t,lonx=True):
"""
Finds a point on a sphere along the great circle distance between two points
on a sphere
also known as a way point in great circle navigation
Parameters
----------
p0 : first point as a tuple in decimal degrees
p1 : second point as a tuple in decimal degrees
t : proportion along great circle distance between p0 and p1
e.g., t=0.5 would find the mid-point
lonx : boolean to assess the order of the coordinates,
for lon,lat (default) = True, for lat,lon = False
Returns
-------
x,y : tuple in decimal degrees of lon-lat (default) or lat-lon,
depending on setting of lonx; in other words, the same
order is used as for the input
Example
-------
>>> p0 = (-87.893517, 41.981417)
>>> p1 = (-87.519295, 41.657498)
>>> geointerpolate(p0,p1,0.1) # using lon-lat
(-87.85592403438788, 41.949079912574796)
>>> p3 = (41.981417, -87.893517)
>>> p4 = (41.657498, -87.519295)
>>> geointerpolate(p3,p4,0.1,lonx=False) # using lat-lon
(41.949079912574796, -87.85592403438788)
"""
if not(lonx):
p = lonlat([p0,p1])
p0 = p[0]
p1 = p[1]
d = radangle(p0,p1)
k = 1.0 / math.sin(d)
t = t*d
A = math.sin(d-t) * k
B = math.sin(t) * k
x0, y0 = d2r(p0[0]),d2r(p0[1])
x1, y1 = d2r(p1[0]),d2r(p1[1])
x = A * math.cos(y0) * math.cos(x0) + B * math.cos(y1) * math.cos(x1)
y = A * math.cos(y0) * math.sin(x0) + B * math.cos(y1) * math.sin(x1)
z = A * math.sin(y0) + B * math.sin(y1)
newpx = r2d(math.atan2(y, x))
newpy = r2d(math.atan2(z, math.sqrt(x*x + y*y)))
if not(lonx):
return newpy,newpx
return newpx,newpy
def geogrid(pup,pdown,k,lonx=True):
"""
Computes a k+1 by k+1 set of grid points for a bounding box in lat-lon
uses geointerpolate
Parameters
----------
pup : tuple with lat-lon or lon-lat for upper left corner of bounding box
pdown : tuple with lat-lon or lon-lat for lower right corner of bounding box
k : number of grid cells (grid points will be one more)
lonx : boolean to assess the order of the coordinates,
for lon,lat (default) = True, for lat,lon = False
Returns
-------
grid : list of tuples with lat-lon or lon-lat for grid points, row by row,
starting with the top row and moving to the bottom; coordinate tuples
are returned in same order as input
Example
-------
>>> pup = (42.023768,-87.946389) # Arlington Heights IL
>>> pdown = (41.644415,-87.524102) # Hammond, IN
>>> geogrid(pup,pdown,3,lonx=False)
[(42.023768, -87.946389), (42.02393997819538, -87.80562679358316), (42.02393997819538, -87.66486420641684), (42.023768, -87.524102), (41.897317, -87.94638900000001), (41.8974888973743, -87.80562679296166), (41.8974888973743, -87.66486420703835), (41.897317, -87.524102), (41.770866000000005, -87.94638900000001), (41.77103781320412, -87.80562679234043), (41.77103781320412, -87.66486420765956), (41.770866000000005, -87.524102), (41.644415, -87.946389), (41.64458672568646, -87.80562679171955), (41.64458672568646, -87.66486420828045), (41.644415, -87.524102)]
"""
if lonx:
corners = [pup,pdown]
else:
corners = lonlat([pup,pdown])
tpoints = [float(i)/k for i in range(k)[1:]]
leftcorners = [corners[0],(corners[0][0],corners[1][1])]
rightcorners = [(corners[1][0],corners[0][1]),corners[1]]
leftside = [leftcorners[0]]
rightside = [rightcorners[0]]
for t in tpoints:
newpl = geointerpolate(leftcorners[0],leftcorners[1],t)
leftside.append(newpl)
newpr = geointerpolate(rightcorners[0],rightcorners[1],t)
rightside.append(newpr)
leftside.append(leftcorners[1])
rightside.append(rightcorners[1])
grid = []
for i in range(len(leftside)):
grid.append(leftside[i])
for t in tpoints:
newp = geointerpolate(leftside[i],rightside[i],t)
grid.append(newp)
grid.append(rightside[i])
if not(lonx):
grid = lonlat(grid)
return grid
| bsd-3-clause |
kamyu104/django | django/core/management/commands/flush.py | 200 | 3905 | from __future__ import unicode_literals
import sys
from importlib import import_module
from django.apps import apps
from django.core.management.base import BaseCommand, CommandError
from django.core.management.color import no_style
from django.core.management.sql import emit_post_migrate_signal, sql_flush
from django.db import DEFAULT_DB_ALIAS, connections, transaction
from django.utils import six
from django.utils.six.moves import input
class Command(BaseCommand):
help = ('Removes ALL DATA from the database, including data added during '
'migrations. Does not achieve a "fresh install" state.')
def add_arguments(self, parser):
parser.add_argument('--noinput', '--no-input',
action='store_false', dest='interactive', default=True,
help='Tells Django to NOT prompt the user for input of any kind.')
parser.add_argument('--database', action='store', dest='database',
default=DEFAULT_DB_ALIAS,
help='Nominates a database to flush. Defaults to the "default" database.')
def handle(self, **options):
database = options.get('database')
connection = connections[database]
verbosity = options.get('verbosity')
interactive = options.get('interactive')
# The following are stealth options used by Django's internals.
reset_sequences = options.get('reset_sequences', True)
allow_cascade = options.get('allow_cascade', False)
inhibit_post_migrate = options.get('inhibit_post_migrate', False)
self.style = no_style()
# Import the 'management' module within each installed app, to register
# dispatcher events.
for app_config in apps.get_app_configs():
try:
import_module('.management', app_config.name)
except ImportError:
pass
sql_list = sql_flush(self.style, connection, only_django=True,
reset_sequences=reset_sequences,
allow_cascade=allow_cascade)
if interactive:
confirm = input("""You have requested a flush of the database.
This will IRREVERSIBLY DESTROY all data currently in the %r database,
and return each table to an empty state.
Are you sure you want to do this?
Type 'yes' to continue, or 'no' to cancel: """ % connection.settings_dict['NAME'])
else:
confirm = 'yes'
if confirm == 'yes':
try:
with transaction.atomic(using=database,
savepoint=connection.features.can_rollback_ddl):
with connection.cursor() as cursor:
for sql in sql_list:
cursor.execute(sql)
except Exception as e:
new_msg = (
"Database %s couldn't be flushed. Possible reasons:\n"
" * The database isn't running or isn't configured correctly.\n"
" * At least one of the expected database tables doesn't exist.\n"
" * The SQL was invalid.\n"
"Hint: Look at the output of 'django-admin sqlflush'. "
"That's the SQL this command wasn't able to run.\n"
"The full error: %s") % (connection.settings_dict['NAME'], e)
six.reraise(CommandError, CommandError(new_msg), sys.exc_info()[2])
# Empty sql_list may signify an empty database and post_migrate would then crash
if sql_list and not inhibit_post_migrate:
# Emit the post migrate signal. This allows individual applications to
# respond as if the database had been migrated from scratch.
emit_post_migrate_signal(verbosity, interactive, database)
else:
self.stdout.write("Flush cancelled.\n")
| bsd-3-clause |
scorphus/thumbor | thumbor/filters/convolution.py | 4 | 1197 | # -*- coding: utf-8 -*-
# thumbor imaging service
# https://github.com/thumbor/thumbor/wiki
# Licensed under the MIT license:
# http://www.opensource.org/licenses/mit-license
# Copyright (c) 2011 globo.com thumbor@googlegroups.com
from thumbor.ext.filters import _convolution
from thumbor.filters import BaseFilter, filter_method
# pylint: disable=line-too-long
class Filter(BaseFilter):
"""
Usage: /filters:convolution(<semicolon separated matrix items>, <number of columns in matrix>, <should normalize boolean>)
Example of blur filter: /filters:convolution(1;2;1;2;4;2;1;2;1,3,true)/
"""
@filter_method(
r"(?:[-]?[\d]+\.?[\d]*[;])*(?:[-]?[\d]+\.?[\d]*)",
BaseFilter.PositiveNumber,
BaseFilter.Boolean,
)
def convolution(self, matrix, columns, should_normalize=True):
matrix = tuple(matrix.split(";"))
mode, data = self.engine.image_data_as_rgb()
imgdata = _convolution.apply(
mode,
data,
self.engine.size[0],
self.engine.size[1],
matrix,
columns,
should_normalize,
)
self.engine.set_image_data(imgdata)
| mit |
openstack/tempest | tempest/api/network/admin/test_external_networks_negative.py | 1 | 2655 | # Copyright 2014 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import testtools
from tempest.api.network import base
from tempest import config
from tempest.lib.common.utils import data_utils
from tempest.lib.common.utils import test_utils
from tempest.lib import decorators
from tempest.lib import exceptions as lib_exc
CONF = config.CONF
class ExternalNetworksAdminNegativeTestJSON(base.BaseAdminNetworkTest):
"""Negative tests of external network"""
@decorators.attr(type=['negative'])
@decorators.idempotent_id('d402ae6c-0be0-4d8e-833b-a738895d98d0')
@testtools.skipUnless(CONF.network.public_network_id,
'The public_network_id option must be specified.')
def test_create_port_with_precreated_floatingip_as_fixed_ip(self):
"""Test creating port with precreated floating ip as fixed ip
NOTE: External networks can be used to create both floating-ip as
well as instance-ip. So, creating an instance-ip with a value of a
pre-created floating-ip should be denied.
"""
# create a floating ip
body = self.admin_floating_ips_client.create_floatingip(
floating_network_id=CONF.network.public_network_id)
created_floating_ip = body['floatingip']
self.addCleanup(test_utils.call_and_ignore_notfound_exc,
self.admin_floating_ips_client.delete_floatingip,
created_floating_ip['id'])
floating_ip_address = created_floating_ip['floating_ip_address']
self.assertIsNotNone(floating_ip_address)
# use the same value of floatingip as fixed-ip to create_port()
fixed_ips = [{'ip_address': floating_ip_address}]
# create a port which will internally create an instance-ip
self.assertRaises(lib_exc.Conflict,
self.admin_ports_client.create_port,
name=data_utils.rand_name(self.__class__.__name__),
network_id=CONF.network.public_network_id,
fixed_ips=fixed_ips)
| apache-2.0 |
jpakkane/meson | mesonbuild/modules/rpm.py | 2 | 8284 | # Copyright 2015 The Meson development team
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
'''This module provides helper functions for RPM related
functionality such as generating template RPM spec file.'''
from .. import build
from .. import compilers
import datetime
from .. import mlog
from . import GirTarget, TypelibTarget
from . import ModuleReturnValue
from . import ExtensionModule
from ..interpreterbase import noKwargs
import os
class RPMModule(ExtensionModule):
@noKwargs
def generate_spec_template(self, coredata, args, kwargs):
self.coredata = coredata
required_compilers = self.__get_required_compilers()
proj = coredata.project_name.replace(' ', '_').replace('\t', '_')
so_installed = False
devel_subpkg = False
files = set()
files_devel = set()
to_delete = set()
for target in coredata.targets.values():
if isinstance(target, build.Executable) and target.need_install:
files.add('%%{_bindir}/%s' % target.get_filename())
elif isinstance(target, build.SharedLibrary) and target.need_install:
files.add('%%{_libdir}/%s' % target.get_filename())
for alias in target.get_aliases():
if alias.endswith('.so'):
files_devel.add('%%{_libdir}/%s' % alias)
else:
files.add('%%{_libdir}/%s' % alias)
so_installed = True
elif isinstance(target, build.StaticLibrary) and target.need_install:
to_delete.add('%%{buildroot}%%{_libdir}/%s' % target.get_filename())
mlog.warning('removing', mlog.bold(target.get_filename()),
'from package because packaging static libs not recommended')
elif isinstance(target, GirTarget) and target.should_install():
files_devel.add('%%{_datadir}/gir-1.0/%s' % target.get_filename()[0])
elif isinstance(target, TypelibTarget) and target.should_install():
files.add('%%{_libdir}/girepository-1.0/%s' % target.get_filename()[0])
for header in coredata.headers:
if header.get_install_subdir():
files_devel.add('%%{_includedir}/%s/' % header.get_install_subdir())
else:
for hdr_src in header.get_sources():
files_devel.add('%%{_includedir}/%s' % hdr_src)
for man in coredata.man:
for man_file in man.get_sources():
files.add('%%{_mandir}/man%u/%s.*' % (int(man_file.split('.')[-1]), man_file))
if files_devel:
devel_subpkg = True
filename = os.path.join(coredata.environment.get_build_dir(),
'%s.spec' % proj)
with open(filename, 'w+') as fn:
fn.write('Name: %s\n' % proj)
fn.write('Version: # FIXME\n')
fn.write('Release: 1%{?dist}\n')
fn.write('Summary: # FIXME\n')
fn.write('License: # FIXME\n')
fn.write('\n')
fn.write('Source0: %{name}-%{version}.tar.xz # FIXME\n')
fn.write('\n')
fn.write('BuildRequires: meson\n')
for compiler in required_compilers:
fn.write('BuildRequires: %s\n' % compiler)
for dep in coredata.environment.coredata.deps.host:
fn.write('BuildRequires: pkgconfig(%s)\n' % dep[0])
# ext_libs and ext_progs have been removed from coredata so the following code
# no longer works. It is kept as a reminder of the idea should anyone wish
# to re-implement it.
#
# for lib in state.environment.coredata.ext_libs.values():
# name = lib.get_name()
# fn.write('BuildRequires: {} # FIXME\n'.format(name))
# mlog.warning('replace', mlog.bold(name), 'with the real package.',
# 'You can use following command to find package which '
# 'contains this lib:',
# mlog.bold("dnf provides '*/lib{}.so'".format(name)))
# for prog in state.environment.coredata.ext_progs.values():
# if not prog.found():
# fn.write('BuildRequires: %%{_bindir}/%s # FIXME\n' %
# prog.get_name())
# else:
# fn.write('BuildRequires: {}\n'.format(prog.get_path()))
fn.write('\n')
fn.write('%description\n')
fn.write('\n')
if devel_subpkg:
fn.write('%package devel\n')
fn.write('Summary: Development files for %{name}\n')
fn.write('Requires: %{name}%{?_isa} = %{?epoch:%{epoch}:}{version}-%{release}\n')
fn.write('\n')
fn.write('%description devel\n')
fn.write('Development files for %{name}.\n')
fn.write('\n')
fn.write('%prep\n')
fn.write('%autosetup\n')
fn.write('\n')
fn.write('%build\n')
fn.write('%meson\n')
fn.write('%meson_build\n')
fn.write('\n')
fn.write('%install\n')
fn.write('%meson_install\n')
if to_delete:
fn.write('rm -vf %s\n' % ' '.join(to_delete))
fn.write('\n')
fn.write('%check\n')
fn.write('%meson_test\n')
fn.write('\n')
fn.write('%files\n')
for f in files:
fn.write('%s\n' % f)
fn.write('\n')
if devel_subpkg:
fn.write('%files devel\n')
for f in files_devel:
fn.write('%s\n' % f)
fn.write('\n')
if so_installed:
fn.write('%post -p /sbin/ldconfig\n')
fn.write('%postun -p /sbin/ldconfig\n')
fn.write('\n')
fn.write('%changelog\n')
fn.write('* %s meson <meson@example.com> - \n' %
datetime.date.today().strftime('%a %b %d %Y'))
fn.write('- \n')
fn.write('\n')
mlog.log('RPM spec template written to %s.spec.\n' % proj)
return ModuleReturnValue(None, [])
def __get_required_compilers(self):
required_compilers = set()
for compiler in self.coredata.compilers.values():
# Elbrus has one 'lcc' package for every compiler
if isinstance(compiler, compilers.GnuCCompiler):
required_compilers.add('gcc')
elif isinstance(compiler, compilers.GnuCPPCompiler):
required_compilers.add('gcc-c++')
elif isinstance(compiler, compilers.ElbrusCCompiler):
required_compilers.add('lcc')
elif isinstance(compiler, compilers.ElbrusCPPCompiler):
required_compilers.add('lcc')
elif isinstance(compiler, compilers.ElbrusFortranCompiler):
required_compilers.add('lcc')
elif isinstance(compiler, compilers.ValaCompiler):
required_compilers.add('vala')
elif isinstance(compiler, compilers.GnuFortranCompiler):
required_compilers.add('gcc-gfortran')
elif isinstance(compiler, compilers.GnuObjCCompiler):
required_compilers.add('gcc-objc')
elif compiler == compilers.GnuObjCPPCompiler:
required_compilers.add('gcc-objc++')
else:
mlog.log('RPM spec file not created, generation not allowed for:',
mlog.bold(compiler.get_id()))
return required_compilers
def initialize(*args, **kwargs):
return RPMModule(*args, **kwargs)
| apache-2.0 |
manipopopo/tensorflow | tensorflow/python/ops/image_grad.py | 31 | 4235 | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Contains Gradient functions for image ops."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import gen_image_ops
@ops.RegisterGradient("ResizeNearestNeighbor")
def _ResizeNearestNeighborGrad(op, grad):
"""The derivatives for nearest neighbor resizing.
Args:
op: The ResizeNearestNeighbor op.
grad: The tensor representing the gradient w.r.t. the output.
Returns:
The gradients w.r.t. the input and the output.
"""
image = op.inputs[0]
if image.get_shape()[1:3].is_fully_defined():
image_shape = image.get_shape()[1:3]
else:
image_shape = array_ops.shape(image)[1:3]
grads = gen_image_ops.resize_nearest_neighbor_grad(
grad,
image_shape,
align_corners=op.get_attr("align_corners"))
return [grads, None]
@ops.RegisterGradient("ResizeBilinear")
def _ResizeBilinearGrad(op, grad):
"""The derivatives for bilinear resizing.
Args:
op: The ResizeBilinear op.
grad: The tensor representing the gradient w.r.t. the output.
Returns:
The gradients w.r.t. the input.
"""
grad0 = gen_image_ops.resize_bilinear_grad(
grad, op.inputs[0], align_corners=op.get_attr("align_corners"))
return [grad0, None]
@ops.RegisterGradient("ResizeBicubic")
def _ResizeBicubicGrad(op, grad):
"""The derivatives for bicubic resizing.
Args:
op: The ResizeBicubic op.
grad: The tensor representing the gradient w.r.t. the output.
Returns:
The gradients w.r.t. the input.
"""
allowed_types = [dtypes.float32, dtypes.float64]
grad0 = None
if op.inputs[0].dtype in allowed_types:
grad0 = gen_image_ops.resize_bicubic_grad(
grad, op.inputs[0], align_corners=op.get_attr("align_corners"))
return [grad0, None]
@ops.RegisterGradient("CropAndResize")
def _CropAndResizeGrad(op, grad):
"""The derivatives for crop_and_resize.
We back-propagate to the image only when the input image tensor has floating
point dtype but we always back-propagate to the input boxes tensor.
Args:
op: The CropAndResize op.
grad: The tensor representing the gradient w.r.t. the output.
Returns:
The gradients w.r.t. the input image, boxes, as well as the always-None
gradients w.r.t. box_ind and crop_size.
"""
image = op.inputs[0]
if image.get_shape().is_fully_defined():
image_shape = image.get_shape().as_list()
else:
image_shape = array_ops.shape(image)
allowed_types = [dtypes.float16, dtypes.float32, dtypes.float64]
if op.inputs[0].dtype in allowed_types:
# pylint: disable=protected-access
grad0 = gen_image_ops.crop_and_resize_grad_image(
grad, op.inputs[1], op.inputs[2], image_shape, T=op.get_attr("T"),
method=op.get_attr("method"))
# pylint: enable=protected-access
else:
grad0 = None
# `grad0` is the gradient to the input image pixels and it
# has been implemented for nearest neighbor and bilinear sampling
# respectively. `grad1` is the gradient to the input crop boxes' coordinates.
# When using nearest neighbor sampling, the gradient to crop boxes'
# coordinates are not well defined. In practice, we still approximate
# grad1 using the gradient derived from bilinear sampling.
grad1 = gen_image_ops.crop_and_resize_grad_boxes(
grad, op.inputs[0], op.inputs[1], op.inputs[2])
return [grad0, grad1, None, None]
| apache-2.0 |
eliben/deep-learning-samples | cs231n/linear_classifier.py | 1 | 2603 | # This is a generic linear classifier that implements SGD - Stochastic Gradient
# Descent (actually its mini-batch generalization).
#
# It has to be derived from by classes that provide a 'loss' member function,
# to implement different classifiers.
# See http://cs231n.github.io/classification/ for background.
import numpy as np
import random
class LinearClassifier:
def __init__(self):
self.W = None
def train(self,
X,
y,
learning_rate=1e-3,
reg=1e-5,
num_iters=100,
batch_size=200,
verbose=False):
"""Train this linear classifier using stochastic gradient descent.
Inputs:
- X: D x N array of training data. Each training point is a
D-dimensional column.
- y: 1-dimensional array of length N with labels 0...K-1, for K classes.
- learning_rate: (float) learning rate for optimization.
- reg: (float) regularization strength.
- num_iters: (integer) number of steps to take when optimizing
- batch_size: (integer) number of training examples to use at each step.
- verbose: (boolean) If true, print progress during optimization.
Outputs:
A list containing the value of the loss function at each training
iteration.
"""
D, N = X.shape
K = np.max(y) + 1
if self.W is None:
# Lazily initialize W to a random matrix
self.W = np.random.randn(K, D) * 0.001
# Run stochastic gradient descent to optimize W
loss_history = []
for it in xrange(num_iters):
batch_samples = np.random.choice(N, batch_size)
X_batch = X[:, batch_samples]
y_batch = y[batch_samples]
# Evaluate loss and gradient
loss, dW = self.loss(X_batch, y_batch, reg)
loss_history.append(loss)
self.W += -learning_rate * dW
if verbose and it % 100 == 0:
print 'iteration %d / %d: loss %f' % (it, num_iters, loss)
return loss_history
def predict(self, X):
"""Use the trained weights of this linear classifier to predict labels.
Inputs:
- X: D x N array of training data. Each column is a D-dimensional point.
Returns:
- y_pred: Predicted labels for the data in X. y_pred is a 1-dimensional
array of length N, and each element is an integer giving the predicted
class.
"""
y_pred = self.W.dot(X)
return y_pred.argmax(axis=0)
| unlicense |
Knownly/Cactus | cactus/utils/filesystem.py | 5 | 1445 | import os
import shutil
import tempfile
from contextlib import contextmanager
from cactus.utils.helpers import map_apply
def mkdtemp():
return tempfile.mkdtemp(dir=os.environ.get("TEMPDIR"))
def fileList(paths, relative=False, folders=False):
"""
Generate a recursive list of files from a given path.
"""
if not isinstance(paths, list):
paths = [paths]
files = []
def append(directory, name):
if not name.startswith('.'):
path = os.path.join(directory, name)
files.append(path)
for path in paths:
for directory, dirnames, filenames in os.walk(path, followlinks=True):
if folders:
for dirname in dirnames:
append(directory, dirname)
for filename in filenames:
append(directory, filename)
if relative:
files = map_apply(lambda x: x[len(path) + 1:], files)
return files
@contextmanager
def alt_file(current_file):
"""
Create an alternate file next to an existing file.
"""
_alt_file = current_file + '-alt'
yield _alt_file
try:
shutil.move(_alt_file, current_file)
except IOError:
# We didn't use an alt file.
pass
@contextmanager
def chdir(new_dir):
"""
Chdir to another directory for an operation
"""
current_dir = os.getcwd()
os.chdir(new_dir)
yield
os.chdir(current_dir)
| bsd-3-clause |
pombreda/labyrinth | src/Links.py | 3 | 10425 | # Link.py
# This file is part of Labyrinth
#
# Copyright (C) 2006 - Don Scorgie <Don@Scorgie.org>
#
# Labyrinth is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# Labyrinth is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Labyrinth; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor,
# Boston, MA 02110-1301 USA
#
import gobject
import gtk
import BaseThought
import utils
import math
import gettext
_ = gettext.gettext
def norm(x, y):
mod = math.sqrt(abs((x[0]**2 - y[0]**2) + (x[1]**2 - y[1]**2)))
return [abs(x[0]-y[0]) / (mod), abs(x[1] - y[1]) / (mod)]
class Link (gobject.GObject):
__gsignals__ = dict (select_link = (gobject.SIGNAL_RUN_FIRST,
gobject.TYPE_NONE,
(gobject.TYPE_PYOBJECT,)),
update_view = (gobject.SIGNAL_RUN_LAST,
gobject.TYPE_NONE,
()),
popup_requested = (gobject.SIGNAL_RUN_FIRST,
gobject.TYPE_NONE,
(gobject.TYPE_PYOBJECT, gobject.TYPE_INT)))
def __init__ (self, save, parent = None, child = None, start_coords = None, end_coords = None, strength = 2):
super (Link, self).__init__()
self.parent = parent
self.child = child
self.end = end_coords
self.start = start_coords
self.strength = strength
self.element = save.createElement ("link")
self.selected = False
self.color = utils.gtk_to_cairo_color(gtk.gdk.color_parse("black"))
if not self.start and parent and parent.lr:
self.start = (parent.ul[0]-((parent.ul[0]-parent.lr[0]) / 2.), \
parent.ul[1]-((parent.ul[1]-parent.lr[1]) / 2.))
if parent and child:
self.find_ends ()
def get_save_element (self):
return self.element
def includes (self, coords, mode):
# TODO: Change this to make link selection work. Also needs
# some fairly large changes in MMapArea
if not self.start or not self.end or not coords:
return False
mag = (math.sqrt(((self.end[0] - self.start[0]) ** 2) + \
((self.end[1] - self.start[1]) ** 2)))
U = (((coords[0] - self.start[0]) * (self.end[0] - self.start[0])) + \
((coords[1] - self.start[1]) * (self.end[1] - self.start[1]))) / \
(mag**2)
inter = [self.start[0] + U*(self.end[0] - self.start[0]),
self.start[1] + U*(self.end[1] - self.start[1])]
dist = math.sqrt(((coords[0] - inter[0]) ** 2) + \
((coords[1] - inter[1]) ** 2))
if dist < (3+self.strength) and dist > -(3+self.strength):
if self.start[0] < self.end[0] and coords[0] > self.start[0] and coords[0] < self.end[0]:
return True
elif coords[0] < self.start[0] and coords[0] > self.end[0]:
return True
return False
def connects (self, thought, thought2):
return (self.parent == thought and self.child == thought2) or \
(self.child == thought and self.parent == thought2)
def set_end (self, coords):
self.end = coords
def set_strength (self, strength):
self.strength = strength
def change_strength (self, thought, thought2):
if not self.connects (thought, thought2):
return False
if self.parent == thought:
self.strength += 1
else:
self.strength -= 1
return self.strength != 0
def set_child (self, child):
self.child = child
self.find_ends ()
def uses (self, thought):
return self.parent == thought or self.child == thought
def find_ends (self):
(self.start, self.end) = self.parent.find_connection (self.child)
def draw (self, context):
if not self.start or not self.end:
return
cwidth = context.get_line_width ()
context.set_line_width (self.strength)
context.move_to (self.start[0], self.start[1])
if utils.use_bezier_curves:
dx = self.end[0] - self.start[0]
x2 = self.start[0] + dx / 2.0
x3 = self.end[0] - dx / 2.0
context.curve_to(x2, self.start[1], x3, self.end[1], self.end[0], self.end[1])
else:
context.line_to (self.end[0], self.end[1])
if self.selected:
color = utils.selected_colors["bg"]
context.set_source_rgb (color[0], color[1], color[2])
else:
context.set_source_rgb (self.color[0], self.color[1], self.color[2])
context.stroke ()
context.set_line_width (cwidth)
context.set_source_rgb (0.0, 0.0, 0.0)
def export (self, context, move_x, move_y):
rem = False
if not self.start or not self.end:
# Probably shouldn't do this, but its safe now
self.start = (self.parent.ul[0]-((self.parent.ul[0]-self.parent.lr[0]) / 2.), \
self.parent.ul[1]-((self.parent.ul[1]-self.parent.lr[1]) / 2.))
self.end = (self.child.ul[0]-((self.child.ul[0]-self.child.lr[0]) / 2.), \
self.child.ul[1]-((self.child.ul[1]-self.child.lr[1]) / 2.))
rem = True
cwidth = context.get_line_width ()
context.set_line_width (self.strength)
context.move_to (self.start[0]+move_x, self.start[1]+move_y)
context.line_to (self.end[0]+move_x, self.end[1]+move_y)
context.stroke ()
context.set_line_width (cwidth)
if rem:
self.start = self.end = None
def set_parent_child (self, parent, child):
self.parent = parent
self.child = child
if self.parent and self.child:
self.find_ends ()
def update_save (self):
self.element.setAttribute ("start", str(self.start))
self.element.setAttribute ("end", str(self.end))
self.element.setAttribute ("strength", str(self.strength))
self.element.setAttribute ("color", str(self.color))
if self.child:
self.element.setAttribute ("child", str(self.child.identity))
else:
self.element.setAttribute ("child", "None")
if self.parent:
self.element.setAttribute ("parent", str(self.parent.identity))
else:
self.element.setAttribute ("parent", "None")
def load (self, node):
self.parent_number = self.child_number = -1
tmp = node.getAttribute ("end")
if not tmp:
print "No tmp found"
return
self.end = utils.parse_coords (tmp)
tmp = node.getAttribute ("start")
if not tmp:
print "No start found"
return
self.start = utils.parse_coords (tmp)
self.strength = int(node.getAttribute ("strength"))
try:
colors = node.getAttribute ("color").split()
self.color = (float(colors[0].strip('(,)')), float(colors[1].strip('(,)')), float(colors[2].strip('(,)')))
except:
pass
if node.hasAttribute ("parent"):
tmp = node.getAttribute ("parent")
if tmp == "None":
self.parent_number = -1
else:
self.parent_number = int (tmp)
if node.hasAttribute ("child"):
tmp = node.getAttribute ("child")
if tmp == "None":
self.child_number = -1
else:
self.child_number = int (tmp)
def process_button_down (self, event, mode, transformed):
modifiers = gtk.accelerator_get_default_mod_mask ()
self.button_down = True
if event.button == 1:
if event.type == gtk.gdk.BUTTON_PRESS:
self.emit ("select_link", event.state & modifiers)
self.emit ("update_view")
elif event.button == 3:
self.emit ("popup_requested", event, 2)
self.emit ("update_view")
return False
def process_button_release (self, event, unending_link, mode, transformed):
return False
def process_key_press (self, event, mode):
if mode != BaseThought.MODE_EDITING or event.keyval == gtk.keysyms.Delete:
return False
if event.keyval == gtk.keysyms.plus or \
event.keyval == gtk.keysyms.KP_Add:
self.strength += 1
elif (event.keyval == gtk.keysyms.minus or \
event.keyval == gtk.keysyms.KP_Subtract) and \
self.strength > 1:
self.strength -= 1
elif event.keyval == gtk.keysyms.Escape:
self.unselect()
self.emit("update_view")
return True
def handle_motion (self, event, mode, transformed):
pass
def want_motion (self):
return False
def select(self):
self.selected = True
def unselect(self):
self.selected = False
def move_by (self, x,y):
pass
def can_be_parent (self):
return False
def set_color_cb(self, widget):
dialog = gtk.ColorSelectionDialog(_('Choose Color'))
dialog.connect('response', self.color_selection_ok_cb)
self.color_sel = dialog.colorsel
dialog.run()
def color_selection_ok_cb(self, dialog, response_id):
if response_id == gtk.RESPONSE_OK:
self.color = utils.gtk_to_cairo_color(self.color_sel.get_current_color())
dialog.destroy()
def get_popup_menu_items(self):
image = gtk.Image()
image.set_from_stock(gtk.STOCK_COLOR_PICKER, gtk.ICON_SIZE_MENU)
item = gtk.ImageMenuItem(_('Set Color'))
item.set_image(image)
item.connect('activate', self.set_color_cb)
return [item]
| gpl-2.0 |
AlanZatarain/pybootchartgui | pybootchartgui/draw.py | 3 | 12094 | # This file is part of pybootchartgui.
# pybootchartgui is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
# pybootchartgui is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with pybootchartgui. If not, see <http://www.gnu.org/licenses/>.
import cairo
import math
import re
# Process tree background color.
BACK_COLOR = (1.0, 1.0, 1.0, 1.0)
WHITE = (1.0, 1.0, 1.0, 1.0)
# Process tree border color.
BORDER_COLOR = (0.63, 0.63, 0.63, 1.0)
# Second tick line color.
TICK_COLOR = (0.92, 0.92, 0.92, 1.0)
# 5-second tick line color.
TICK_COLOR_BOLD = (0.86, 0.86, 0.86, 1.0)
# Text color.
TEXT_COLOR = (0.0, 0.0, 0.0, 1.0)
# Font family
FONT_NAME = "Bitstream Vera Sans"
# Title text font.
TITLE_FONT_SIZE = 18
# Default text font.
TEXT_FONT_SIZE = 12
# Axis label font.
AXIS_FONT_SIZE = 11
# Legend font.
LEGEND_FONT_SIZE = 12
# CPU load chart color.
CPU_COLOR = (0.40, 0.55, 0.70, 1.0)
# IO wait chart color.
IO_COLOR = (0.76, 0.48, 0.48, 0.5)
# Disk throughput color.
DISK_TPUT_COLOR = (0.20, 0.71, 0.20, 1.0)
# CPU load chart color.
FILE_OPEN_COLOR = (0.20, 0.71, 0.71, 1.0)
# Process border color.
PROC_BORDER_COLOR = (0.71, 0.71, 0.71, 1.0)
# Waiting process color.
PROC_COLOR_D = (0.76, 0.48, 0.48, 0.125)
# Running process color.
PROC_COLOR_R = CPU_COLOR
# Sleeping process color.
PROC_COLOR_S = (0.94, 0.94, 0.94, 1.0)
# Stopped process color.
PROC_COLOR_T = (0.94, 0.50, 0.50, 1.0)
# Zombie process color.
PROC_COLOR_Z = (0.71, 0.71, 0.71, 1.0)
# Dead process color.
PROC_COLOR_X = (0.71, 0.71, 0.71, 0.125)
# Paging process color.
PROC_COLOR_W = (0.71, 0.71, 0.71, 0.125)
# Process label color.
PROC_TEXT_COLOR = (0.19, 0.19, 0.19, 1.0)
# Process label font.
PROC_TEXT_FONT_SIZE = 12
# Signature color.
SIG_COLOR = (0.0, 0.0, 0.0, 0.3125)
# Signature font.
SIG_FONT_SIZE = 14
# Signature text.
SIGNATURE = "http://code.google.com/p/pybootchartgui"
# Process dependency line color.
DEP_COLOR = (0.75, 0.75, 0.75, 1.0)
# Process dependency line stroke.
DEP_STROKE = 1.0
# Process description date format.
DESC_TIME_FORMAT = "mm:ss.SSS"
# Process states
STATE_UNDEFINED = 0
STATE_RUNNING = 1
STATE_SLEEPING = 2
STATE_WAITING = 3
STATE_STOPPED = 4
STATE_ZOMBIE = 5
STATE_COLORS = [(0,0,0,0), PROC_COLOR_R, PROC_COLOR_S, PROC_COLOR_D, PROC_COLOR_T, PROC_COLOR_Z, PROC_COLOR_X, PROC_COLOR_W]
# Convert ps process state to an int
def get_proc_state(flag):
return "RSDTZXW".find(flag) + 1
def draw_text(ctx, text, color, x, y):
ctx.set_source_rgba(*color)
ctx.move_to(x, y)
ctx.show_text(text)
def draw_fill_rect(ctx, color, rect):
ctx.set_source_rgba(*color)
ctx.rectangle(*rect)
ctx.fill()
def draw_rect(ctx, color, rect):
ctx.set_source_rgba(*color)
ctx.rectangle(*rect)
ctx.stroke()
def draw_legend_box(ctx, label, fill_color, x, y, s):
draw_fill_rect(ctx, fill_color, (x, y - s, s, s))
draw_rect(ctx, PROC_BORDER_COLOR, (x, y - s, s, s))
draw_text(ctx, label, TEXT_COLOR, x + s + 5, y)
def draw_legend_line(ctx, label, fill_color, x, y, s):
draw_fill_rect(ctx, fill_color, (x, y - s/2, s + 1, 3))
ctx.arc(x + (s + 1)/2.0, y - (s - 3)/2.0, 2.5, 0, 2.0 * math.pi)
ctx.fill()
draw_text(ctx, label, TEXT_COLOR, x + s + 5, y)
def draw_label_in_box(ctx, color, label, x, y, w, maxx):
label_w = ctx.text_extents(label)[2]
label_x = x + w / 2 - label_w / 2
if label_w + 10 > w:
label_x = x + w + 5
if label_x + label_w > maxx:
label_x = x - label_w - 5
draw_text(ctx, label, color, label_x, y)
def draw_5sec_labels(ctx, rect, sec_w):
ctx.set_font_size(AXIS_FONT_SIZE)
for i in range(0, rect[2] + 1, sec_w):
if ((i / sec_w) % 5 == 0) :
label = "%ds" % (i / sec_w)
label_w = ctx.text_extents(label)[2]
draw_text(ctx, label, TEXT_COLOR, rect[0] + i - label_w/2, rect[1] - 2)
def draw_box_ticks(ctx, rect, sec_w):
draw_rect(ctx, BORDER_COLOR, tuple(rect))
ctx.set_line_cap(cairo.LINE_CAP_SQUARE)
for i in range(sec_w, rect[2] + 1, sec_w):
if ((i / sec_w) % 5 == 0) :
ctx.set_source_rgba(*TICK_COLOR_BOLD)
else :
ctx.set_source_rgba(*TICK_COLOR)
ctx.move_to(rect[0] + i, rect[1] + 1)
ctx.line_to(rect[0] + i, rect[1] + rect[3] - 1)
ctx.stroke()
ctx.set_line_cap(cairo.LINE_CAP_BUTT)
def draw_chart(ctx, color, fill, chart_bounds, data, proc_tree):
ctx.set_line_width(0.5)
x_shift = proc_tree.start_time
x_scale = proc_tree.duration
def transform_point_coords(point, x_base, y_base, xscale, yscale, x_trans, y_trans):
x = (point[0] - x_base) * xscale + x_trans
y = (point[1] - y_base) * -yscale + y_trans + bar_h
return x, y
xscale = float(chart_bounds[2]) / max(x for (x,y) in data)
yscale = float(chart_bounds[3]) / max(y for (x,y) in data)
first = transform_point_coords(data[0], x_shift, 0, xscale, yscale, chart_bounds[0], chart_bounds[1])
last = transform_point_coords(data[-1], x_shift, 0, xscale, yscale, chart_bounds[0], chart_bounds[1])
ctx.set_source_rgba(*color)
ctx.move_to(*first)
for point in data:
x, y = transform_point_coords(point, x_shift, 0, xscale, yscale, chart_bounds[0], chart_bounds[1])
ctx.line_to(x, y)
if fill:
ctx.stroke_preserve()
ctx.line_to(last[0], chart_bounds[1]+bar_h)
ctx.line_to(first[0], chart_bounds[1]+bar_h)
ctx.line_to(first[0], first[1])
ctx.fill()
else:
ctx.stroke()
ctx.set_line_width(1.0)
header_h = 280
bar_h = 55
# offsets
off_x, off_y = 10, 10
sec_w = 25 # the width of a second
proc_h = 16 # the height of a process
leg_s = 10
MIN_IMG_W = 800
OPTIONS = None
def extents(headers, cpu_stats, disk_stats, proc_tree):
w = (proc_tree.duration * sec_w / 100) + 2*off_x
h = proc_h * proc_tree.num_proc + header_h + 2*off_y
return (w,h)
#
# Render the chart.
#
def render(ctx, options, headers, cpu_stats, disk_stats, proc_tree):
(w, h) = extents(headers, cpu_stats, disk_stats, proc_tree)
global OPTIONS
OPTIONS = options
print options.show_pid
ctx.set_line_width(1.0)
ctx.select_font_face(FONT_NAME)
draw_fill_rect(ctx, WHITE, (0, 0, max(w, MIN_IMG_W), h))
w -= 2*off_x
# draw the title and headers
curr_y = draw_header(ctx, headers, off_x, proc_tree.duration)
# render bar legend
ctx.set_font_size(LEGEND_FONT_SIZE)
draw_legend_box(ctx, "CPU (user+sys)", CPU_COLOR, off_x, curr_y+20, leg_s)
draw_legend_box(ctx, "I/O (wait)", IO_COLOR, off_x + 120, curr_y+20, leg_s)
# render I/O wait
chart_rect = (off_x, curr_y+30, w, bar_h)
draw_box_ticks(ctx, chart_rect, sec_w)
draw_chart(ctx, IO_COLOR, True, chart_rect, [(sample.time, sample.user + sample.sys + sample.io) for sample in cpu_stats], proc_tree)
# render CPU load
draw_chart(ctx, CPU_COLOR, True, chart_rect, [(sample.time, sample.user + sample.sys) for sample in cpu_stats], proc_tree)
curr_y = curr_y + 30 + bar_h
# render second chart
draw_legend_line(ctx, "Disk throughput", DISK_TPUT_COLOR, off_x, curr_y+20, leg_s)
draw_legend_box(ctx, "Disk utilization", IO_COLOR, off_x + 120, curr_y+20, leg_s)
# render I/O utilization
chart_rect = (off_x, curr_y+30, w, bar_h)
draw_box_ticks(ctx, chart_rect, sec_w)
draw_chart(ctx, IO_COLOR, True, chart_rect, [(sample.time, sample.util) for sample in disk_stats], proc_tree)
# render disk throughput
max_sample = max(disk_stats, key=lambda s: s.tput)
draw_chart(ctx, DISK_TPUT_COLOR, False, chart_rect, [(sample.time, sample.tput) for sample in disk_stats], proc_tree)
pos_x = off_x + ((max_sample.time - proc_tree.start_time) * w / proc_tree.duration)
shift_x, shift_y = -20, 20
if (pos_x < off_x + 245):
shift_x, shift_y = 5, 40
label = "%dMB/s" % round((max_sample.tput) / 1024.0)
draw_text(ctx, label, DISK_TPUT_COLOR, pos_x + shift_x, curr_y + shift_y)
# draw process boxes
draw_process_bar_chart(ctx, proc_tree, curr_y + bar_h, w, h)
ctx.set_font_size(SIG_FONT_SIZE)
draw_text(ctx, SIGNATURE, SIG_COLOR, off_x + 5, h - off_y - 5)
def draw_process_bar_chart(ctx, proc_tree, curr_y, w, h):
draw_legend_box(ctx, "Running (%cpu)", PROC_COLOR_R, off_x , curr_y + 45, leg_s)
draw_legend_box(ctx, "Unint.sleep (I/O)", PROC_COLOR_D, off_x+120, curr_y + 45, leg_s)
draw_legend_box(ctx, "Sleeping", PROC_COLOR_S, off_x+240, curr_y + 45, leg_s)
draw_legend_box(ctx, "Zombie", PROC_COLOR_Z, off_x+360, curr_y + 45, leg_s)
chart_rect = [off_x, curr_y+60, w, h - 2 * off_y - (curr_y+60) + proc_h]
ctx.set_font_size(PROC_TEXT_FONT_SIZE)
draw_box_ticks(ctx, chart_rect, sec_w)
draw_5sec_labels(ctx, chart_rect, sec_w)
y = curr_y+60
for root in proc_tree.process_tree:
draw_processes_recursively(ctx, root, proc_tree, y, proc_h, chart_rect)
y = y + proc_h * proc_tree.num_nodes([root])
def draw_header(ctx, headers, off_x, duration):
dur = duration / 100.0
toshow = [
('system.uname', 'uname', lambda s: s),
('system.release', 'release', lambda s: s),
('system.cpu', 'CPU', lambda s: re.sub('model name\s*:\s*', '', s, 1)),
('system.kernel.options', 'kernel options', lambda s: s),
('pseudo.header', 'time', lambda s: '%02d:%05.2f' % (math.floor(dur/60), dur - 60 * math.floor(dur/60)))
]
header_y = ctx.font_extents()[2] + 10
ctx.set_font_size(TITLE_FONT_SIZE)
draw_text(ctx, headers['title'], TEXT_COLOR, off_x, header_y)
ctx.set_font_size(TEXT_FONT_SIZE)
for (headerkey, headertitle, mangle) in toshow:
header_y += ctx.font_extents()[2]
txt = headertitle + ': ' + mangle(headers.get(headerkey))
draw_text(ctx, txt, TEXT_COLOR, off_x, header_y)
return header_y
def draw_processes_recursively(ctx, proc, proc_tree, y, proc_h, rect) :
x = rect[0] + ((proc.start_time - proc_tree.start_time) * rect[2] / proc_tree.duration)
w = ((proc.duration) * rect[2] / proc_tree.duration)
draw_process_activity_colors(ctx, proc, proc_tree, x, y, w, proc_h, rect)
draw_rect(ctx, PROC_BORDER_COLOR, (x, y, w, proc_h))
cmdString = (proc.cmd + " [" + str(proc.pid) + "]") if OPTIONS.show_pid else proc.cmd
draw_label_in_box(ctx, PROC_TEXT_COLOR, cmdString, x, y + proc_h - 4, w, rect[0] + rect[2])
next_y = y + proc_h
for child in proc.child_list:
child_x, child_y = draw_processes_recursively(ctx, child, proc_tree, next_y, proc_h, rect)
draw_process_connecting_lines(ctx, x, y, child_x, child_y, proc_h)
next_y = next_y + proc_h * proc_tree.num_nodes([child])
return x, y
def draw_process_activity_colors(ctx, proc, proc_tree, x, y, w, proc_h, rect):
draw_fill_rect(ctx, PROC_COLOR_S, (x, y, w, proc_h))
last_tx = -1
for sample in proc.samples :
tx = rect[0] + round(((sample.time - proc_tree.start_time) * rect[2] / proc_tree.duration))
tw = round(proc_tree.sample_period * rect[2] / float(proc_tree.duration))
if last_tx != -1 and abs(last_tx - tx) <= tw:
tw -= last_tx - tx
tx = last_tx
last_tx = tx + tw
state = get_proc_state( sample.state )
color = STATE_COLORS[state]
if state == STATE_RUNNING:
alpha = sample.cpu_sample.user + sample.cpu_sample.sys
color = tuple(list(PROC_COLOR_R[0:3]) + [alpha])
elif state == STATE_SLEEPING:
continue
draw_fill_rect(ctx, color, (tx, y, tw, proc_h))
def draw_process_connecting_lines(ctx, px, py, x, y, proc_h):
ctx.set_source_rgba(*DEP_COLOR)
ctx.set_dash([2,2])
if abs(px - x) < 3:
dep_off_x = 3
dep_off_y = proc_h / 4
ctx.move_to(x, y + proc_h / 2)
ctx.line_to(px - dep_off_x, y + proc_h / 2)
ctx.line_to(px - dep_off_x, py - dep_off_y)
ctx.line_to(px, py - dep_off_y)
else:
ctx.move_to(x, y + proc_h / 2)
ctx.line_to(px, y + proc_h / 2)
ctx.line_to(px, py)
ctx.stroke()
ctx.set_dash([])
| gpl-3.0 |
bergholz/trello2misc | trello2misc.py | 1 | 5884 | #!/usr/bin/python3
"""trello2misc.py
Pulls your cards from Trello to your console or your local todo.txt-file.
Broadly speaking, one card corresponds to one entry. Boards correspond to
contexts, lists to priorities, and labels to projects. See the README.md and
trello2misc.ini files for more information.
Author: André Bergholz
Version: 1.0
"""
import sys, datetime
import trello, todotxt, utils
__version__ = "1.0"
__date__ = "2013/02/28"
__updated__ = "2014/08/15"
__author__ = "André Bergholz (bergholz@gmail.com)"
__license__ = "GPL3"
# Returns the list of todo.txt tasks generated from
# the information contained in the Trello cards.
def generate_todotxttasks(cards, lists, boards, allCardsBoardNames):
tasks = []
for card in cards.values():
if not card.closed and card.board in boards:
priority = generate_priority(card, lists)
label = ""
if len(card.labels) > 0:
label = card.labels[0]
if boards[card.board].name in allCardsBoardNames:
context = "trello"
else:
context = boards[card.board].name
if card.due is None:
due = ""
else:
due = card.due
task = todotxt.TodotxtTask(card.name, priority, label, context, due)
tasks.append(task)
return tasks
# Returns a priority for a given Trello card
def generate_priority(card, lists):
config = utils.readconfig("trello2misc.ini")
aLists = config.get("trello", "aLists")
bLists = config.get("trello", "bLists")
cLists = config.get("trello", "cLists")
aList = []
bList = []
cList = []
for name in aLists.split(","):
aList.append(name.replace("\"","").strip())
for name in bLists.split(","):
bList.append(name.replace("\"","").strip())
for name in cLists.split(","):
cList.append(name.replace("\"","").strip())
listName = lists[card.list]
if listName in aList:
priority = "A"
elif listName in bList:
priority = "B"
elif listName in cList:
priority = "C"
else:
priority = ""
return priority
# Returns a list of merged todo.txt tasks
# from the base tasks (which existed previously)
# and the newly generated tasks from the Trello cards.
# For base tasks, priority and due date are updated.
def merge_tasks(newTasks, baseTasks):
tasks = baseTasks
for task in newTasks:
if task in tasks:
index = tasks.index(task)
tasks[index].priority = task.priority
tasks[index].due = task.due
else:
tasks.append(task)
return tasks
# Prints the current card dictionary to screen.
def print_oneliner(cards, lists):
lines = []
for card in cards:
string = "%s" % (lists[card.list])
if card.due is not None:
stripped = datetime.datetime.strptime(card.due, "%Y-%m-%dT%H:%M:%S.%fZ").strftime("%Y-%m-%d")
string += " %s" % (stripped)
string += ": %s" % (card.name)
if len(card.labels) > 0:
string += " ("
for label in card.labels[0:2]:
string += "%s " % (label)
string = string.strip()
string += ")"
print(string)
# The main method processes the given command.
def main(command):
config = utils.readconfig("trello2misc.ini")
if command == "todotxt":
todotxtTasks = todotxt.read_todotxtfile()
allCardsBoards = config.get("trello", "allCardsBoards")
myCardsBoards = config.get("trello", "myCardsBoards")
allCardsBoardNames = []
myCardsBoardNames = []
for name in allCardsBoards.split(","):
allCardsBoardNames.append(name.replace("\"","").strip())
for name in myCardsBoards.split(","):
myCardsBoardNames.append(name.replace("\"","").strip())
boardNames = allCardsBoardNames + myCardsBoardNames
boards = trello.read_my_trello_boards()
boards = trello.filter_trello_boards(boardNames, boards)
lists = trello.read_trello_lists(boards)
cards = trello.read_all_trello_cards(allCardsBoardNames, boards)
cards.update(trello.read_my_trello_cards(myCardsBoardNames, boards))
cards = trello.filter_cards(cards, lists)
trelloTasks = generate_todotxttasks(cards, lists, boards, allCardsBoardNames)
tasks = merge_tasks(trelloTasks, todotxtTasks)
todotxt.write_tasks(tasks)
elif command == "stdout":
allCardsBoards = config.get("trello", "allCardsBoards")
myCardsBoards = config.get("trello", "myCardsBoards")
allCardsBoardNames = []
myCardsBoardNames = []
for name in allCardsBoards.split(","):
allCardsBoardNames.append(name.replace("\"","").strip())
for name in myCardsBoards.split(","):
myCardsBoardNames.append(name.replace("\"","").strip())
boardNames = allCardsBoardNames + myCardsBoardNames
boards = trello.read_my_trello_boards()
boards = trello.filter_trello_boards(boardNames, boards)
lists = trello.read_trello_lists(boards)
cards = trello.read_all_trello_cards(allCardsBoardNames, boards)
cards.update(trello.read_my_trello_cards(myCardsBoardNames, boards))
cards = trello.filter_cards(cards, lists)
sortedCards = trello.sort_cards(cards, lists)
print_oneliner(sortedCards, lists)
elif command == "help" or command == "usage":
print("Usage: ./trello2misc.py [stdout|todotxt|help]?")
else:
print("Unsupported command: " + command)
print("Usage: ./trello2misc.py [stdout|todotxt|help]?")
# The main program.
if __name__ == '__main__':
if (len(sys.argv) < 2):
command = "help"
else:
command = sys.argv[1].lower().strip()
main(command)
| mit |
SpeedBienal/app-bienal | Bienal/node_modules/node-gyp/gyp/pylib/gyp/msvs_emulation.py | 1407 | 47697 | # Copyright (c) 2012 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""
This module helps emulate Visual Studio 2008 behavior on top of other
build systems, primarily ninja.
"""
import os
import re
import subprocess
import sys
from gyp.common import OrderedSet
import gyp.MSVSUtil
import gyp.MSVSVersion
windows_quoter_regex = re.compile(r'(\\*)"')
def QuoteForRspFile(arg):
"""Quote a command line argument so that it appears as one argument when
processed via cmd.exe and parsed by CommandLineToArgvW (as is typical for
Windows programs)."""
# See http://goo.gl/cuFbX and http://goo.gl/dhPnp including the comment
# threads. This is actually the quoting rules for CommandLineToArgvW, not
# for the shell, because the shell doesn't do anything in Windows. This
# works more or less because most programs (including the compiler, etc.)
# use that function to handle command line arguments.
# For a literal quote, CommandLineToArgvW requires 2n+1 backslashes
# preceding it, and results in n backslashes + the quote. So we substitute
# in 2* what we match, +1 more, plus the quote.
arg = windows_quoter_regex.sub(lambda mo: 2 * mo.group(1) + '\\"', arg)
# %'s also need to be doubled otherwise they're interpreted as batch
# positional arguments. Also make sure to escape the % so that they're
# passed literally through escaping so they can be singled to just the
# original %. Otherwise, trying to pass the literal representation that
# looks like an environment variable to the shell (e.g. %PATH%) would fail.
arg = arg.replace('%', '%%')
# These commands are used in rsp files, so no escaping for the shell (via ^)
# is necessary.
# Finally, wrap the whole thing in quotes so that the above quote rule
# applies and whitespace isn't a word break.
return '"' + arg + '"'
def EncodeRspFileList(args):
"""Process a list of arguments using QuoteCmdExeArgument."""
# Note that the first argument is assumed to be the command. Don't add
# quotes around it because then built-ins like 'echo', etc. won't work.
# Take care to normpath only the path in the case of 'call ../x.bat' because
# otherwise the whole thing is incorrectly interpreted as a path and not
# normalized correctly.
if not args: return ''
if args[0].startswith('call '):
call, program = args[0].split(' ', 1)
program = call + ' ' + os.path.normpath(program)
else:
program = os.path.normpath(args[0])
return program + ' ' + ' '.join(QuoteForRspFile(arg) for arg in args[1:])
def _GenericRetrieve(root, default, path):
"""Given a list of dictionary keys |path| and a tree of dicts |root|, find
value at path, or return |default| if any of the path doesn't exist."""
if not root:
return default
if not path:
return root
return _GenericRetrieve(root.get(path[0]), default, path[1:])
def _AddPrefix(element, prefix):
"""Add |prefix| to |element| or each subelement if element is iterable."""
if element is None:
return element
# Note, not Iterable because we don't want to handle strings like that.
if isinstance(element, list) or isinstance(element, tuple):
return [prefix + e for e in element]
else:
return prefix + element
def _DoRemapping(element, map):
"""If |element| then remap it through |map|. If |element| is iterable then
each item will be remapped. Any elements not found will be removed."""
if map is not None and element is not None:
if not callable(map):
map = map.get # Assume it's a dict, otherwise a callable to do the remap.
if isinstance(element, list) or isinstance(element, tuple):
element = filter(None, [map(elem) for elem in element])
else:
element = map(element)
return element
def _AppendOrReturn(append, element):
"""If |append| is None, simply return |element|. If |append| is not None,
then add |element| to it, adding each item in |element| if it's a list or
tuple."""
if append is not None and element is not None:
if isinstance(element, list) or isinstance(element, tuple):
append.extend(element)
else:
append.append(element)
else:
return element
def _FindDirectXInstallation():
"""Try to find an installation location for the DirectX SDK. Check for the
standard environment variable, and if that doesn't exist, try to find
via the registry. May return None if not found in either location."""
# Return previously calculated value, if there is one
if hasattr(_FindDirectXInstallation, 'dxsdk_dir'):
return _FindDirectXInstallation.dxsdk_dir
dxsdk_dir = os.environ.get('DXSDK_DIR')
if not dxsdk_dir:
# Setup params to pass to and attempt to launch reg.exe.
cmd = ['reg.exe', 'query', r'HKLM\Software\Microsoft\DirectX', '/s']
p = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
for line in p.communicate()[0].splitlines():
if 'InstallPath' in line:
dxsdk_dir = line.split(' ')[3] + "\\"
# Cache return value
_FindDirectXInstallation.dxsdk_dir = dxsdk_dir
return dxsdk_dir
def GetGlobalVSMacroEnv(vs_version):
"""Get a dict of variables mapping internal VS macro names to their gyp
equivalents. Returns all variables that are independent of the target."""
env = {}
# '$(VSInstallDir)' and '$(VCInstallDir)' are available when and only when
# Visual Studio is actually installed.
if vs_version.Path():
env['$(VSInstallDir)'] = vs_version.Path()
env['$(VCInstallDir)'] = os.path.join(vs_version.Path(), 'VC') + '\\'
# Chromium uses DXSDK_DIR in include/lib paths, but it may or may not be
# set. This happens when the SDK is sync'd via src-internal, rather than
# by typical end-user installation of the SDK. If it's not set, we don't
# want to leave the unexpanded variable in the path, so simply strip it.
dxsdk_dir = _FindDirectXInstallation()
env['$(DXSDK_DIR)'] = dxsdk_dir if dxsdk_dir else ''
# Try to find an installation location for the Windows DDK by checking
# the WDK_DIR environment variable, may be None.
env['$(WDK_DIR)'] = os.environ.get('WDK_DIR', '')
return env
def ExtractSharedMSVSSystemIncludes(configs, generator_flags):
"""Finds msvs_system_include_dirs that are common to all targets, removes
them from all targets, and returns an OrderedSet containing them."""
all_system_includes = OrderedSet(
configs[0].get('msvs_system_include_dirs', []))
for config in configs[1:]:
system_includes = config.get('msvs_system_include_dirs', [])
all_system_includes = all_system_includes & OrderedSet(system_includes)
if not all_system_includes:
return None
# Expand macros in all_system_includes.
env = GetGlobalVSMacroEnv(GetVSVersion(generator_flags))
expanded_system_includes = OrderedSet([ExpandMacros(include, env)
for include in all_system_includes])
if any(['$' in include for include in expanded_system_includes]):
# Some path relies on target-specific variables, bail.
return None
# Remove system includes shared by all targets from the targets.
for config in configs:
includes = config.get('msvs_system_include_dirs', [])
if includes: # Don't insert a msvs_system_include_dirs key if not needed.
# This must check the unexpanded includes list:
new_includes = [i for i in includes if i not in all_system_includes]
config['msvs_system_include_dirs'] = new_includes
return expanded_system_includes
class MsvsSettings(object):
"""A class that understands the gyp 'msvs_...' values (especially the
msvs_settings field). They largely correpond to the VS2008 IDE DOM. This
class helps map those settings to command line options."""
def __init__(self, spec, generator_flags):
self.spec = spec
self.vs_version = GetVSVersion(generator_flags)
supported_fields = [
('msvs_configuration_attributes', dict),
('msvs_settings', dict),
('msvs_system_include_dirs', list),
('msvs_disabled_warnings', list),
('msvs_precompiled_header', str),
('msvs_precompiled_source', str),
('msvs_configuration_platform', str),
('msvs_target_platform', str),
]
configs = spec['configurations']
for field, default in supported_fields:
setattr(self, field, {})
for configname, config in configs.iteritems():
getattr(self, field)[configname] = config.get(field, default())
self.msvs_cygwin_dirs = spec.get('msvs_cygwin_dirs', ['.'])
unsupported_fields = [
'msvs_prebuild',
'msvs_postbuild',
]
unsupported = []
for field in unsupported_fields:
for config in configs.values():
if field in config:
unsupported += ["%s not supported (target %s)." %
(field, spec['target_name'])]
if unsupported:
raise Exception('\n'.join(unsupported))
def GetExtension(self):
"""Returns the extension for the target, with no leading dot.
Uses 'product_extension' if specified, otherwise uses MSVS defaults based on
the target type.
"""
ext = self.spec.get('product_extension', None)
if ext:
return ext
return gyp.MSVSUtil.TARGET_TYPE_EXT.get(self.spec['type'], '')
def GetVSMacroEnv(self, base_to_build=None, config=None):
"""Get a dict of variables mapping internal VS macro names to their gyp
equivalents."""
target_platform = 'Win32' if self.GetArch(config) == 'x86' else 'x64'
target_name = self.spec.get('product_prefix', '') + \
self.spec.get('product_name', self.spec['target_name'])
target_dir = base_to_build + '\\' if base_to_build else ''
target_ext = '.' + self.GetExtension()
target_file_name = target_name + target_ext
replacements = {
'$(InputName)': '${root}',
'$(InputPath)': '${source}',
'$(IntDir)': '$!INTERMEDIATE_DIR',
'$(OutDir)\\': target_dir,
'$(PlatformName)': target_platform,
'$(ProjectDir)\\': '',
'$(ProjectName)': self.spec['target_name'],
'$(TargetDir)\\': target_dir,
'$(TargetExt)': target_ext,
'$(TargetFileName)': target_file_name,
'$(TargetName)': target_name,
'$(TargetPath)': os.path.join(target_dir, target_file_name),
}
replacements.update(GetGlobalVSMacroEnv(self.vs_version))
return replacements
def ConvertVSMacros(self, s, base_to_build=None, config=None):
"""Convert from VS macro names to something equivalent."""
env = self.GetVSMacroEnv(base_to_build, config=config)
return ExpandMacros(s, env)
def AdjustLibraries(self, libraries):
"""Strip -l from library if it's specified with that."""
libs = [lib[2:] if lib.startswith('-l') else lib for lib in libraries]
return [lib + '.lib' if not lib.endswith('.lib') else lib for lib in libs]
def _GetAndMunge(self, field, path, default, prefix, append, map):
"""Retrieve a value from |field| at |path| or return |default|. If
|append| is specified, and the item is found, it will be appended to that
object instead of returned. If |map| is specified, results will be
remapped through |map| before being returned or appended."""
result = _GenericRetrieve(field, default, path)
result = _DoRemapping(result, map)
result = _AddPrefix(result, prefix)
return _AppendOrReturn(append, result)
class _GetWrapper(object):
def __init__(self, parent, field, base_path, append=None):
self.parent = parent
self.field = field
self.base_path = [base_path]
self.append = append
def __call__(self, name, map=None, prefix='', default=None):
return self.parent._GetAndMunge(self.field, self.base_path + [name],
default=default, prefix=prefix, append=self.append, map=map)
def GetArch(self, config):
"""Get architecture based on msvs_configuration_platform and
msvs_target_platform. Returns either 'x86' or 'x64'."""
configuration_platform = self.msvs_configuration_platform.get(config, '')
platform = self.msvs_target_platform.get(config, '')
if not platform: # If no specific override, use the configuration's.
platform = configuration_platform
# Map from platform to architecture.
return {'Win32': 'x86', 'x64': 'x64'}.get(platform, 'x86')
def _TargetConfig(self, config):
"""Returns the target-specific configuration."""
# There's two levels of architecture/platform specification in VS. The
# first level is globally for the configuration (this is what we consider
# "the" config at the gyp level, which will be something like 'Debug' or
# 'Release_x64'), and a second target-specific configuration, which is an
# override for the global one. |config| is remapped here to take into
# account the local target-specific overrides to the global configuration.
arch = self.GetArch(config)
if arch == 'x64' and not config.endswith('_x64'):
config += '_x64'
if arch == 'x86' and config.endswith('_x64'):
config = config.rsplit('_', 1)[0]
return config
def _Setting(self, path, config,
default=None, prefix='', append=None, map=None):
"""_GetAndMunge for msvs_settings."""
return self._GetAndMunge(
self.msvs_settings[config], path, default, prefix, append, map)
def _ConfigAttrib(self, path, config,
default=None, prefix='', append=None, map=None):
"""_GetAndMunge for msvs_configuration_attributes."""
return self._GetAndMunge(
self.msvs_configuration_attributes[config],
path, default, prefix, append, map)
def AdjustIncludeDirs(self, include_dirs, config):
"""Updates include_dirs to expand VS specific paths, and adds the system
include dirs used for platform SDK and similar."""
config = self._TargetConfig(config)
includes = include_dirs + self.msvs_system_include_dirs[config]
includes.extend(self._Setting(
('VCCLCompilerTool', 'AdditionalIncludeDirectories'), config, default=[]))
return [self.ConvertVSMacros(p, config=config) for p in includes]
def AdjustMidlIncludeDirs(self, midl_include_dirs, config):
"""Updates midl_include_dirs to expand VS specific paths, and adds the
system include dirs used for platform SDK and similar."""
config = self._TargetConfig(config)
includes = midl_include_dirs + self.msvs_system_include_dirs[config]
includes.extend(self._Setting(
('VCMIDLTool', 'AdditionalIncludeDirectories'), config, default=[]))
return [self.ConvertVSMacros(p, config=config) for p in includes]
def GetComputedDefines(self, config):
"""Returns the set of defines that are injected to the defines list based
on other VS settings."""
config = self._TargetConfig(config)
defines = []
if self._ConfigAttrib(['CharacterSet'], config) == '1':
defines.extend(('_UNICODE', 'UNICODE'))
if self._ConfigAttrib(['CharacterSet'], config) == '2':
defines.append('_MBCS')
defines.extend(self._Setting(
('VCCLCompilerTool', 'PreprocessorDefinitions'), config, default=[]))
return defines
def GetCompilerPdbName(self, config, expand_special):
"""Get the pdb file name that should be used for compiler invocations, or
None if there's no explicit name specified."""
config = self._TargetConfig(config)
pdbname = self._Setting(
('VCCLCompilerTool', 'ProgramDataBaseFileName'), config)
if pdbname:
pdbname = expand_special(self.ConvertVSMacros(pdbname))
return pdbname
def GetMapFileName(self, config, expand_special):
"""Gets the explicitly overriden map file name for a target or returns None
if it's not set."""
config = self._TargetConfig(config)
map_file = self._Setting(('VCLinkerTool', 'MapFileName'), config)
if map_file:
map_file = expand_special(self.ConvertVSMacros(map_file, config=config))
return map_file
def GetOutputName(self, config, expand_special):
"""Gets the explicitly overridden output name for a target or returns None
if it's not overridden."""
config = self._TargetConfig(config)
type = self.spec['type']
root = 'VCLibrarianTool' if type == 'static_library' else 'VCLinkerTool'
# TODO(scottmg): Handle OutputDirectory without OutputFile.
output_file = self._Setting((root, 'OutputFile'), config)
if output_file:
output_file = expand_special(self.ConvertVSMacros(
output_file, config=config))
return output_file
def GetPDBName(self, config, expand_special, default):
"""Gets the explicitly overridden pdb name for a target or returns
default if it's not overridden, or if no pdb will be generated."""
config = self._TargetConfig(config)
output_file = self._Setting(('VCLinkerTool', 'ProgramDatabaseFile'), config)
generate_debug_info = self._Setting(
('VCLinkerTool', 'GenerateDebugInformation'), config)
if generate_debug_info == 'true':
if output_file:
return expand_special(self.ConvertVSMacros(output_file, config=config))
else:
return default
else:
return None
def GetNoImportLibrary(self, config):
"""If NoImportLibrary: true, ninja will not expect the output to include
an import library."""
config = self._TargetConfig(config)
noimplib = self._Setting(('NoImportLibrary',), config)
return noimplib == 'true'
def GetAsmflags(self, config):
"""Returns the flags that need to be added to ml invocations."""
config = self._TargetConfig(config)
asmflags = []
safeseh = self._Setting(('MASM', 'UseSafeExceptionHandlers'), config)
if safeseh == 'true':
asmflags.append('/safeseh')
return asmflags
def GetCflags(self, config):
"""Returns the flags that need to be added to .c and .cc compilations."""
config = self._TargetConfig(config)
cflags = []
cflags.extend(['/wd' + w for w in self.msvs_disabled_warnings[config]])
cl = self._GetWrapper(self, self.msvs_settings[config],
'VCCLCompilerTool', append=cflags)
cl('Optimization',
map={'0': 'd', '1': '1', '2': '2', '3': 'x'}, prefix='/O', default='2')
cl('InlineFunctionExpansion', prefix='/Ob')
cl('DisableSpecificWarnings', prefix='/wd')
cl('StringPooling', map={'true': '/GF'})
cl('EnableFiberSafeOptimizations', map={'true': '/GT'})
cl('OmitFramePointers', map={'false': '-', 'true': ''}, prefix='/Oy')
cl('EnableIntrinsicFunctions', map={'false': '-', 'true': ''}, prefix='/Oi')
cl('FavorSizeOrSpeed', map={'1': 't', '2': 's'}, prefix='/O')
cl('FloatingPointModel',
map={'0': 'precise', '1': 'strict', '2': 'fast'}, prefix='/fp:',
default='0')
cl('CompileAsManaged', map={'false': '', 'true': '/clr'})
cl('WholeProgramOptimization', map={'true': '/GL'})
cl('WarningLevel', prefix='/W')
cl('WarnAsError', map={'true': '/WX'})
cl('CallingConvention',
map={'0': 'd', '1': 'r', '2': 'z', '3': 'v'}, prefix='/G')
cl('DebugInformationFormat',
map={'1': '7', '3': 'i', '4': 'I'}, prefix='/Z')
cl('RuntimeTypeInfo', map={'true': '/GR', 'false': '/GR-'})
cl('EnableFunctionLevelLinking', map={'true': '/Gy', 'false': '/Gy-'})
cl('MinimalRebuild', map={'true': '/Gm'})
cl('BufferSecurityCheck', map={'true': '/GS', 'false': '/GS-'})
cl('BasicRuntimeChecks', map={'1': 's', '2': 'u', '3': '1'}, prefix='/RTC')
cl('RuntimeLibrary',
map={'0': 'T', '1': 'Td', '2': 'D', '3': 'Dd'}, prefix='/M')
cl('ExceptionHandling', map={'1': 'sc','2': 'a'}, prefix='/EH')
cl('DefaultCharIsUnsigned', map={'true': '/J'})
cl('TreatWChar_tAsBuiltInType',
map={'false': '-', 'true': ''}, prefix='/Zc:wchar_t')
cl('EnablePREfast', map={'true': '/analyze'})
cl('AdditionalOptions', prefix='')
cl('EnableEnhancedInstructionSet',
map={'1': 'SSE', '2': 'SSE2', '3': 'AVX', '4': 'IA32', '5': 'AVX2'},
prefix='/arch:')
cflags.extend(['/FI' + f for f in self._Setting(
('VCCLCompilerTool', 'ForcedIncludeFiles'), config, default=[])])
if self.vs_version.short_name in ('2013', '2013e', '2015'):
# New flag required in 2013 to maintain previous PDB behavior.
cflags.append('/FS')
# ninja handles parallelism by itself, don't have the compiler do it too.
cflags = filter(lambda x: not x.startswith('/MP'), cflags)
return cflags
def _GetPchFlags(self, config, extension):
"""Get the flags to be added to the cflags for precompiled header support.
"""
config = self._TargetConfig(config)
# The PCH is only built once by a particular source file. Usage of PCH must
# only be for the same language (i.e. C vs. C++), so only include the pch
# flags when the language matches.
if self.msvs_precompiled_header[config]:
source_ext = os.path.splitext(self.msvs_precompiled_source[config])[1]
if _LanguageMatchesForPch(source_ext, extension):
pch = os.path.split(self.msvs_precompiled_header[config])[1]
return ['/Yu' + pch, '/FI' + pch, '/Fp${pchprefix}.' + pch + '.pch']
return []
def GetCflagsC(self, config):
"""Returns the flags that need to be added to .c compilations."""
config = self._TargetConfig(config)
return self._GetPchFlags(config, '.c')
def GetCflagsCC(self, config):
"""Returns the flags that need to be added to .cc compilations."""
config = self._TargetConfig(config)
return ['/TP'] + self._GetPchFlags(config, '.cc')
def _GetAdditionalLibraryDirectories(self, root, config, gyp_to_build_path):
"""Get and normalize the list of paths in AdditionalLibraryDirectories
setting."""
config = self._TargetConfig(config)
libpaths = self._Setting((root, 'AdditionalLibraryDirectories'),
config, default=[])
libpaths = [os.path.normpath(
gyp_to_build_path(self.ConvertVSMacros(p, config=config)))
for p in libpaths]
return ['/LIBPATH:"' + p + '"' for p in libpaths]
def GetLibFlags(self, config, gyp_to_build_path):
"""Returns the flags that need to be added to lib commands."""
config = self._TargetConfig(config)
libflags = []
lib = self._GetWrapper(self, self.msvs_settings[config],
'VCLibrarianTool', append=libflags)
libflags.extend(self._GetAdditionalLibraryDirectories(
'VCLibrarianTool', config, gyp_to_build_path))
lib('LinkTimeCodeGeneration', map={'true': '/LTCG'})
lib('TargetMachine', map={'1': 'X86', '17': 'X64', '3': 'ARM'},
prefix='/MACHINE:')
lib('AdditionalOptions')
return libflags
def GetDefFile(self, gyp_to_build_path):
"""Returns the .def file from sources, if any. Otherwise returns None."""
spec = self.spec
if spec['type'] in ('shared_library', 'loadable_module', 'executable'):
def_files = [s for s in spec.get('sources', []) if s.endswith('.def')]
if len(def_files) == 1:
return gyp_to_build_path(def_files[0])
elif len(def_files) > 1:
raise Exception("Multiple .def files")
return None
def _GetDefFileAsLdflags(self, ldflags, gyp_to_build_path):
""".def files get implicitly converted to a ModuleDefinitionFile for the
linker in the VS generator. Emulate that behaviour here."""
def_file = self.GetDefFile(gyp_to_build_path)
if def_file:
ldflags.append('/DEF:"%s"' % def_file)
def GetPGDName(self, config, expand_special):
"""Gets the explicitly overridden pgd name for a target or returns None
if it's not overridden."""
config = self._TargetConfig(config)
output_file = self._Setting(
('VCLinkerTool', 'ProfileGuidedDatabase'), config)
if output_file:
output_file = expand_special(self.ConvertVSMacros(
output_file, config=config))
return output_file
def GetLdflags(self, config, gyp_to_build_path, expand_special,
manifest_base_name, output_name, is_executable, build_dir):
"""Returns the flags that need to be added to link commands, and the
manifest files."""
config = self._TargetConfig(config)
ldflags = []
ld = self._GetWrapper(self, self.msvs_settings[config],
'VCLinkerTool', append=ldflags)
self._GetDefFileAsLdflags(ldflags, gyp_to_build_path)
ld('GenerateDebugInformation', map={'true': '/DEBUG'})
ld('TargetMachine', map={'1': 'X86', '17': 'X64', '3': 'ARM'},
prefix='/MACHINE:')
ldflags.extend(self._GetAdditionalLibraryDirectories(
'VCLinkerTool', config, gyp_to_build_path))
ld('DelayLoadDLLs', prefix='/DELAYLOAD:')
ld('TreatLinkerWarningAsErrors', prefix='/WX',
map={'true': '', 'false': ':NO'})
out = self.GetOutputName(config, expand_special)
if out:
ldflags.append('/OUT:' + out)
pdb = self.GetPDBName(config, expand_special, output_name + '.pdb')
if pdb:
ldflags.append('/PDB:' + pdb)
pgd = self.GetPGDName(config, expand_special)
if pgd:
ldflags.append('/PGD:' + pgd)
map_file = self.GetMapFileName(config, expand_special)
ld('GenerateMapFile', map={'true': '/MAP:' + map_file if map_file
else '/MAP'})
ld('MapExports', map={'true': '/MAPINFO:EXPORTS'})
ld('AdditionalOptions', prefix='')
minimum_required_version = self._Setting(
('VCLinkerTool', 'MinimumRequiredVersion'), config, default='')
if minimum_required_version:
minimum_required_version = ',' + minimum_required_version
ld('SubSystem',
map={'1': 'CONSOLE%s' % minimum_required_version,
'2': 'WINDOWS%s' % minimum_required_version},
prefix='/SUBSYSTEM:')
stack_reserve_size = self._Setting(
('VCLinkerTool', 'StackReserveSize'), config, default='')
if stack_reserve_size:
stack_commit_size = self._Setting(
('VCLinkerTool', 'StackCommitSize'), config, default='')
if stack_commit_size:
stack_commit_size = ',' + stack_commit_size
ldflags.append('/STACK:%s%s' % (stack_reserve_size, stack_commit_size))
ld('TerminalServerAware', map={'1': ':NO', '2': ''}, prefix='/TSAWARE')
ld('LinkIncremental', map={'1': ':NO', '2': ''}, prefix='/INCREMENTAL')
ld('BaseAddress', prefix='/BASE:')
ld('FixedBaseAddress', map={'1': ':NO', '2': ''}, prefix='/FIXED')
ld('RandomizedBaseAddress',
map={'1': ':NO', '2': ''}, prefix='/DYNAMICBASE')
ld('DataExecutionPrevention',
map={'1': ':NO', '2': ''}, prefix='/NXCOMPAT')
ld('OptimizeReferences', map={'1': 'NOREF', '2': 'REF'}, prefix='/OPT:')
ld('ForceSymbolReferences', prefix='/INCLUDE:')
ld('EnableCOMDATFolding', map={'1': 'NOICF', '2': 'ICF'}, prefix='/OPT:')
ld('LinkTimeCodeGeneration',
map={'1': '', '2': ':PGINSTRUMENT', '3': ':PGOPTIMIZE',
'4': ':PGUPDATE'},
prefix='/LTCG')
ld('IgnoreDefaultLibraryNames', prefix='/NODEFAULTLIB:')
ld('ResourceOnlyDLL', map={'true': '/NOENTRY'})
ld('EntryPointSymbol', prefix='/ENTRY:')
ld('Profile', map={'true': '/PROFILE'})
ld('LargeAddressAware',
map={'1': ':NO', '2': ''}, prefix='/LARGEADDRESSAWARE')
# TODO(scottmg): This should sort of be somewhere else (not really a flag).
ld('AdditionalDependencies', prefix='')
if self.GetArch(config) == 'x86':
safeseh_default = 'true'
else:
safeseh_default = None
ld('ImageHasSafeExceptionHandlers',
map={'false': ':NO', 'true': ''}, prefix='/SAFESEH',
default=safeseh_default)
# If the base address is not specifically controlled, DYNAMICBASE should
# be on by default.
base_flags = filter(lambda x: 'DYNAMICBASE' in x or x == '/FIXED',
ldflags)
if not base_flags:
ldflags.append('/DYNAMICBASE')
# If the NXCOMPAT flag has not been specified, default to on. Despite the
# documentation that says this only defaults to on when the subsystem is
# Vista or greater (which applies to the linker), the IDE defaults it on
# unless it's explicitly off.
if not filter(lambda x: 'NXCOMPAT' in x, ldflags):
ldflags.append('/NXCOMPAT')
have_def_file = filter(lambda x: x.startswith('/DEF:'), ldflags)
manifest_flags, intermediate_manifest, manifest_files = \
self._GetLdManifestFlags(config, manifest_base_name, gyp_to_build_path,
is_executable and not have_def_file, build_dir)
ldflags.extend(manifest_flags)
return ldflags, intermediate_manifest, manifest_files
def _GetLdManifestFlags(self, config, name, gyp_to_build_path,
allow_isolation, build_dir):
"""Returns a 3-tuple:
- the set of flags that need to be added to the link to generate
a default manifest
- the intermediate manifest that the linker will generate that should be
used to assert it doesn't add anything to the merged one.
- the list of all the manifest files to be merged by the manifest tool and
included into the link."""
generate_manifest = self._Setting(('VCLinkerTool', 'GenerateManifest'),
config,
default='true')
if generate_manifest != 'true':
# This means not only that the linker should not generate the intermediate
# manifest but also that the manifest tool should do nothing even when
# additional manifests are specified.
return ['/MANIFEST:NO'], [], []
output_name = name + '.intermediate.manifest'
flags = [
'/MANIFEST',
'/ManifestFile:' + output_name,
]
# Instead of using the MANIFESTUAC flags, we generate a .manifest to
# include into the list of manifests. This allows us to avoid the need to
# do two passes during linking. The /MANIFEST flag and /ManifestFile are
# still used, and the intermediate manifest is used to assert that the
# final manifest we get from merging all the additional manifest files
# (plus the one we generate here) isn't modified by merging the
# intermediate into it.
# Always NO, because we generate a manifest file that has what we want.
flags.append('/MANIFESTUAC:NO')
config = self._TargetConfig(config)
enable_uac = self._Setting(('VCLinkerTool', 'EnableUAC'), config,
default='true')
manifest_files = []
generated_manifest_outer = \
"<?xml version='1.0' encoding='UTF-8' standalone='yes'?>" \
"<assembly xmlns='urn:schemas-microsoft-com:asm.v1' manifestVersion='1.0'>%s" \
"</assembly>"
if enable_uac == 'true':
execution_level = self._Setting(('VCLinkerTool', 'UACExecutionLevel'),
config, default='0')
execution_level_map = {
'0': 'asInvoker',
'1': 'highestAvailable',
'2': 'requireAdministrator'
}
ui_access = self._Setting(('VCLinkerTool', 'UACUIAccess'), config,
default='false')
inner = '''
<trustInfo xmlns="urn:schemas-microsoft-com:asm.v3">
<security>
<requestedPrivileges>
<requestedExecutionLevel level='%s' uiAccess='%s' />
</requestedPrivileges>
</security>
</trustInfo>''' % (execution_level_map[execution_level], ui_access)
else:
inner = ''
generated_manifest_contents = generated_manifest_outer % inner
generated_name = name + '.generated.manifest'
# Need to join with the build_dir here as we're writing it during
# generation time, but we return the un-joined version because the build
# will occur in that directory. We only write the file if the contents
# have changed so that simply regenerating the project files doesn't
# cause a relink.
build_dir_generated_name = os.path.join(build_dir, generated_name)
gyp.common.EnsureDirExists(build_dir_generated_name)
f = gyp.common.WriteOnDiff(build_dir_generated_name)
f.write(generated_manifest_contents)
f.close()
manifest_files = [generated_name]
if allow_isolation:
flags.append('/ALLOWISOLATION')
manifest_files += self._GetAdditionalManifestFiles(config,
gyp_to_build_path)
return flags, output_name, manifest_files
def _GetAdditionalManifestFiles(self, config, gyp_to_build_path):
"""Gets additional manifest files that are added to the default one
generated by the linker."""
files = self._Setting(('VCManifestTool', 'AdditionalManifestFiles'), config,
default=[])
if isinstance(files, str):
files = files.split(';')
return [os.path.normpath(
gyp_to_build_path(self.ConvertVSMacros(f, config=config)))
for f in files]
def IsUseLibraryDependencyInputs(self, config):
"""Returns whether the target should be linked via Use Library Dependency
Inputs (using component .objs of a given .lib)."""
config = self._TargetConfig(config)
uldi = self._Setting(('VCLinkerTool', 'UseLibraryDependencyInputs'), config)
return uldi == 'true'
def IsEmbedManifest(self, config):
"""Returns whether manifest should be linked into binary."""
config = self._TargetConfig(config)
embed = self._Setting(('VCManifestTool', 'EmbedManifest'), config,
default='true')
return embed == 'true'
def IsLinkIncremental(self, config):
"""Returns whether the target should be linked incrementally."""
config = self._TargetConfig(config)
link_inc = self._Setting(('VCLinkerTool', 'LinkIncremental'), config)
return link_inc != '1'
def GetRcflags(self, config, gyp_to_ninja_path):
"""Returns the flags that need to be added to invocations of the resource
compiler."""
config = self._TargetConfig(config)
rcflags = []
rc = self._GetWrapper(self, self.msvs_settings[config],
'VCResourceCompilerTool', append=rcflags)
rc('AdditionalIncludeDirectories', map=gyp_to_ninja_path, prefix='/I')
rcflags.append('/I' + gyp_to_ninja_path('.'))
rc('PreprocessorDefinitions', prefix='/d')
# /l arg must be in hex without leading '0x'
rc('Culture', prefix='/l', map=lambda x: hex(int(x))[2:])
return rcflags
def BuildCygwinBashCommandLine(self, args, path_to_base):
"""Build a command line that runs args via cygwin bash. We assume that all
incoming paths are in Windows normpath'd form, so they need to be
converted to posix style for the part of the command line that's passed to
bash. We also have to do some Visual Studio macro emulation here because
various rules use magic VS names for things. Also note that rules that
contain ninja variables cannot be fixed here (for example ${source}), so
the outer generator needs to make sure that the paths that are written out
are in posix style, if the command line will be used here."""
cygwin_dir = os.path.normpath(
os.path.join(path_to_base, self.msvs_cygwin_dirs[0]))
cd = ('cd %s' % path_to_base).replace('\\', '/')
args = [a.replace('\\', '/').replace('"', '\\"') for a in args]
args = ["'%s'" % a.replace("'", "'\\''") for a in args]
bash_cmd = ' '.join(args)
cmd = (
'call "%s\\setup_env.bat" && set CYGWIN=nontsec && ' % cygwin_dir +
'bash -c "%s ; %s"' % (cd, bash_cmd))
return cmd
def IsRuleRunUnderCygwin(self, rule):
"""Determine if an action should be run under cygwin. If the variable is
unset, or set to 1 we use cygwin."""
return int(rule.get('msvs_cygwin_shell',
self.spec.get('msvs_cygwin_shell', 1))) != 0
def _HasExplicitRuleForExtension(self, spec, extension):
"""Determine if there's an explicit rule for a particular extension."""
for rule in spec.get('rules', []):
if rule['extension'] == extension:
return True
return False
def _HasExplicitIdlActions(self, spec):
"""Determine if an action should not run midl for .idl files."""
return any([action.get('explicit_idl_action', 0)
for action in spec.get('actions', [])])
def HasExplicitIdlRulesOrActions(self, spec):
"""Determine if there's an explicit rule or action for idl files. When
there isn't we need to generate implicit rules to build MIDL .idl files."""
return (self._HasExplicitRuleForExtension(spec, 'idl') or
self._HasExplicitIdlActions(spec))
def HasExplicitAsmRules(self, spec):
"""Determine if there's an explicit rule for asm files. When there isn't we
need to generate implicit rules to assemble .asm files."""
return self._HasExplicitRuleForExtension(spec, 'asm')
def GetIdlBuildData(self, source, config):
"""Determine the implicit outputs for an idl file. Returns output
directory, outputs, and variables and flags that are required."""
config = self._TargetConfig(config)
midl_get = self._GetWrapper(self, self.msvs_settings[config], 'VCMIDLTool')
def midl(name, default=None):
return self.ConvertVSMacros(midl_get(name, default=default),
config=config)
tlb = midl('TypeLibraryName', default='${root}.tlb')
header = midl('HeaderFileName', default='${root}.h')
dlldata = midl('DLLDataFileName', default='dlldata.c')
iid = midl('InterfaceIdentifierFileName', default='${root}_i.c')
proxy = midl('ProxyFileName', default='${root}_p.c')
# Note that .tlb is not included in the outputs as it is not always
# generated depending on the content of the input idl file.
outdir = midl('OutputDirectory', default='')
output = [header, dlldata, iid, proxy]
variables = [('tlb', tlb),
('h', header),
('dlldata', dlldata),
('iid', iid),
('proxy', proxy)]
# TODO(scottmg): Are there configuration settings to set these flags?
target_platform = 'win32' if self.GetArch(config) == 'x86' else 'x64'
flags = ['/char', 'signed', '/env', target_platform, '/Oicf']
return outdir, output, variables, flags
def _LanguageMatchesForPch(source_ext, pch_source_ext):
c_exts = ('.c',)
cc_exts = ('.cc', '.cxx', '.cpp')
return ((source_ext in c_exts and pch_source_ext in c_exts) or
(source_ext in cc_exts and pch_source_ext in cc_exts))
class PrecompiledHeader(object):
"""Helper to generate dependencies and build rules to handle generation of
precompiled headers. Interface matches the GCH handler in xcode_emulation.py.
"""
def __init__(
self, settings, config, gyp_to_build_path, gyp_to_unique_output, obj_ext):
self.settings = settings
self.config = config
pch_source = self.settings.msvs_precompiled_source[self.config]
self.pch_source = gyp_to_build_path(pch_source)
filename, _ = os.path.splitext(pch_source)
self.output_obj = gyp_to_unique_output(filename + obj_ext).lower()
def _PchHeader(self):
"""Get the header that will appear in an #include line for all source
files."""
return os.path.split(self.settings.msvs_precompiled_header[self.config])[1]
def GetObjDependencies(self, sources, objs, arch):
"""Given a list of sources files and the corresponding object files,
returns a list of the pch files that should be depended upon. The
additional wrapping in the return value is for interface compatibility
with make.py on Mac, and xcode_emulation.py."""
assert arch is None
if not self._PchHeader():
return []
pch_ext = os.path.splitext(self.pch_source)[1]
for source in sources:
if _LanguageMatchesForPch(os.path.splitext(source)[1], pch_ext):
return [(None, None, self.output_obj)]
return []
def GetPchBuildCommands(self, arch):
"""Not used on Windows as there are no additional build steps required
(instead, existing steps are modified in GetFlagsModifications below)."""
return []
def GetFlagsModifications(self, input, output, implicit, command,
cflags_c, cflags_cc, expand_special):
"""Get the modified cflags and implicit dependencies that should be used
for the pch compilation step."""
if input == self.pch_source:
pch_output = ['/Yc' + self._PchHeader()]
if command == 'cxx':
return ([('cflags_cc', map(expand_special, cflags_cc + pch_output))],
self.output_obj, [])
elif command == 'cc':
return ([('cflags_c', map(expand_special, cflags_c + pch_output))],
self.output_obj, [])
return [], output, implicit
vs_version = None
def GetVSVersion(generator_flags):
global vs_version
if not vs_version:
vs_version = gyp.MSVSVersion.SelectVisualStudioVersion(
generator_flags.get('msvs_version', 'auto'),
allow_fallback=False)
return vs_version
def _GetVsvarsSetupArgs(generator_flags, arch):
vs = GetVSVersion(generator_flags)
return vs.SetupScript()
def ExpandMacros(string, expansions):
"""Expand $(Variable) per expansions dict. See MsvsSettings.GetVSMacroEnv
for the canonical way to retrieve a suitable dict."""
if '$' in string:
for old, new in expansions.iteritems():
assert '$(' not in new, new
string = string.replace(old, new)
return string
def _ExtractImportantEnvironment(output_of_set):
"""Extracts environment variables required for the toolchain to run from
a textual dump output by the cmd.exe 'set' command."""
envvars_to_save = (
'goma_.*', # TODO(scottmg): This is ugly, but needed for goma.
'include',
'lib',
'libpath',
'path',
'pathext',
'systemroot',
'temp',
'tmp',
)
env = {}
for line in output_of_set.splitlines():
for envvar in envvars_to_save:
if re.match(envvar + '=', line.lower()):
var, setting = line.split('=', 1)
if envvar == 'path':
# Our own rules (for running gyp-win-tool) and other actions in
# Chromium rely on python being in the path. Add the path to this
# python here so that if it's not in the path when ninja is run
# later, python will still be found.
setting = os.path.dirname(sys.executable) + os.pathsep + setting
env[var.upper()] = setting
break
for required in ('SYSTEMROOT', 'TEMP', 'TMP'):
if required not in env:
raise Exception('Environment variable "%s" '
'required to be set to valid path' % required)
return env
def _FormatAsEnvironmentBlock(envvar_dict):
"""Format as an 'environment block' directly suitable for CreateProcess.
Briefly this is a list of key=value\0, terminated by an additional \0. See
CreateProcess documentation for more details."""
block = ''
nul = '\0'
for key, value in envvar_dict.iteritems():
block += key + '=' + value + nul
block += nul
return block
def _ExtractCLPath(output_of_where):
"""Gets the path to cl.exe based on the output of calling the environment
setup batch file, followed by the equivalent of `where`."""
# Take the first line, as that's the first found in the PATH.
for line in output_of_where.strip().splitlines():
if line.startswith('LOC:'):
return line[len('LOC:'):].strip()
def GenerateEnvironmentFiles(toplevel_build_dir, generator_flags,
system_includes, open_out):
"""It's not sufficient to have the absolute path to the compiler, linker,
etc. on Windows, as those tools rely on .dlls being in the PATH. We also
need to support both x86 and x64 compilers within the same build (to support
msvs_target_platform hackery). Different architectures require a different
compiler binary, and different supporting environment variables (INCLUDE,
LIB, LIBPATH). So, we extract the environment here, wrap all invocations
of compiler tools (cl, link, lib, rc, midl, etc.) via win_tool.py which
sets up the environment, and then we do not prefix the compiler with
an absolute path, instead preferring something like "cl.exe" in the rule
which will then run whichever the environment setup has put in the path.
When the following procedure to generate environment files does not
meet your requirement (e.g. for custom toolchains), you can pass
"-G ninja_use_custom_environment_files" to the gyp to suppress file
generation and use custom environment files prepared by yourself."""
archs = ('x86', 'x64')
if generator_flags.get('ninja_use_custom_environment_files', 0):
cl_paths = {}
for arch in archs:
cl_paths[arch] = 'cl.exe'
return cl_paths
vs = GetVSVersion(generator_flags)
cl_paths = {}
for arch in archs:
# Extract environment variables for subprocesses.
args = vs.SetupScript(arch)
args.extend(('&&', 'set'))
popen = subprocess.Popen(
args, shell=True, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
variables, _ = popen.communicate()
env = _ExtractImportantEnvironment(variables)
# Inject system includes from gyp files into INCLUDE.
if system_includes:
system_includes = system_includes | OrderedSet(
env.get('INCLUDE', '').split(';'))
env['INCLUDE'] = ';'.join(system_includes)
env_block = _FormatAsEnvironmentBlock(env)
f = open_out(os.path.join(toplevel_build_dir, 'environment.' + arch), 'wb')
f.write(env_block)
f.close()
# Find cl.exe location for this architecture.
args = vs.SetupScript(arch)
args.extend(('&&',
'for', '%i', 'in', '(cl.exe)', 'do', '@echo', 'LOC:%~$PATH:i'))
popen = subprocess.Popen(args, shell=True, stdout=subprocess.PIPE)
output, _ = popen.communicate()
cl_paths[arch] = _ExtractCLPath(output)
return cl_paths
def VerifyMissingSources(sources, build_dir, generator_flags, gyp_to_ninja):
"""Emulate behavior of msvs_error_on_missing_sources present in the msvs
generator: Check that all regular source files, i.e. not created at run time,
exist on disk. Missing files cause needless recompilation when building via
VS, and we want this check to match for people/bots that build using ninja,
so they're not surprised when the VS build fails."""
if int(generator_flags.get('msvs_error_on_missing_sources', 0)):
no_specials = filter(lambda x: '$' not in x, sources)
relative = [os.path.join(build_dir, gyp_to_ninja(s)) for s in no_specials]
missing = filter(lambda x: not os.path.exists(x), relative)
if missing:
# They'll look like out\Release\..\..\stuff\things.cc, so normalize the
# path for a slightly less crazy looking output.
cleaned_up = [os.path.normpath(x) for x in missing]
raise Exception('Missing input files:\n%s' % '\n'.join(cleaned_up))
# Sets some values in default_variables, which are required for many
# generators, run on Windows.
def CalculateCommonVariables(default_variables, params):
generator_flags = params.get('generator_flags', {})
# Set a variable so conditions can be based on msvs_version.
msvs_version = gyp.msvs_emulation.GetVSVersion(generator_flags)
default_variables['MSVS_VERSION'] = msvs_version.ShortName()
# To determine processor word size on Windows, in addition to checking
# PROCESSOR_ARCHITECTURE (which reflects the word size of the current
# process), it is also necessary to check PROCESSOR_ARCHITEW6432 (which
# contains the actual word size of the system when running thru WOW64).
if ('64' in os.environ.get('PROCESSOR_ARCHITECTURE', '') or
'64' in os.environ.get('PROCESSOR_ARCHITEW6432', '')):
default_variables['MSVS_OS_BITS'] = 64
else:
default_variables['MSVS_OS_BITS'] = 32
| gpl-3.0 |
MSeifert04/numpy | numpy/f2py/tests/test_return_real.py | 13 | 5619 | from __future__ import division, absolute_import, print_function
import platform
import pytest
from numpy import array
from numpy.compat import long
from numpy.testing import assert_, assert_raises
from . import util
class TestReturnReal(util.F2PyTest):
def check_function(self, t):
if t.__doc__.split()[0] in ['t0', 't4', 's0', 's4']:
err = 1e-5
else:
err = 0.0
assert_(abs(t(234) - 234.0) <= err)
assert_(abs(t(234.6) - 234.6) <= err)
assert_(abs(t(long(234)) - 234.0) <= err)
assert_(abs(t('234') - 234) <= err)
assert_(abs(t('234.6') - 234.6) <= err)
assert_(abs(t(-234) + 234) <= err)
assert_(abs(t([234]) - 234) <= err)
assert_(abs(t((234,)) - 234.) <= err)
assert_(abs(t(array(234)) - 234.) <= err)
assert_(abs(t(array([234])) - 234.) <= err)
assert_(abs(t(array([[234]])) - 234.) <= err)
assert_(abs(t(array([234], 'b')) + 22) <= err)
assert_(abs(t(array([234], 'h')) - 234.) <= err)
assert_(abs(t(array([234], 'i')) - 234.) <= err)
assert_(abs(t(array([234], 'l')) - 234.) <= err)
assert_(abs(t(array([234], 'B')) - 234.) <= err)
assert_(abs(t(array([234], 'f')) - 234.) <= err)
assert_(abs(t(array([234], 'd')) - 234.) <= err)
if t.__doc__.split()[0] in ['t0', 't4', 's0', 's4']:
assert_(t(1e200) == t(1e300)) # inf
#assert_raises(ValueError, t, array([234], 'S1'))
assert_raises(ValueError, t, 'abc')
assert_raises(IndexError, t, [])
assert_raises(IndexError, t, ())
assert_raises(Exception, t, t)
assert_raises(Exception, t, {})
try:
r = t(10 ** 400)
assert_(repr(r) in ['inf', 'Infinity'], repr(r))
except OverflowError:
pass
@pytest.mark.skipif(
platform.system() == 'Darwin',
reason="Prone to error when run with numpy/f2py/tests on mac os, "
"but not when run in isolation")
class TestCReturnReal(TestReturnReal):
suffix = ".pyf"
module_name = "c_ext_return_real"
code = """
python module c_ext_return_real
usercode \'\'\'
float t4(float value) { return value; }
void s4(float *t4, float value) { *t4 = value; }
double t8(double value) { return value; }
void s8(double *t8, double value) { *t8 = value; }
\'\'\'
interface
function t4(value)
real*4 intent(c) :: t4,value
end
function t8(value)
real*8 intent(c) :: t8,value
end
subroutine s4(t4,value)
intent(c) s4
real*4 intent(out) :: t4
real*4 intent(c) :: value
end
subroutine s8(t8,value)
intent(c) s8
real*8 intent(out) :: t8
real*8 intent(c) :: value
end
end interface
end python module c_ext_return_real
"""
@pytest.mark.slow
@pytest.mark.parametrize('name', 't4,t8,s4,s8'.split(','))
def test_all(self, name):
self.check_function(getattr(self.module, name))
class TestF77ReturnReal(TestReturnReal):
code = """
function t0(value)
real value
real t0
t0 = value
end
function t4(value)
real*4 value
real*4 t4
t4 = value
end
function t8(value)
real*8 value
real*8 t8
t8 = value
end
function td(value)
double precision value
double precision td
td = value
end
subroutine s0(t0,value)
real value
real t0
cf2py intent(out) t0
t0 = value
end
subroutine s4(t4,value)
real*4 value
real*4 t4
cf2py intent(out) t4
t4 = value
end
subroutine s8(t8,value)
real*8 value
real*8 t8
cf2py intent(out) t8
t8 = value
end
subroutine sd(td,value)
double precision value
double precision td
cf2py intent(out) td
td = value
end
"""
@pytest.mark.slow
@pytest.mark.parametrize('name', 't0,t4,t8,td,s0,s4,s8,sd'.split(','))
def test_all(self, name):
self.check_function(getattr(self.module, name))
class TestF90ReturnReal(TestReturnReal):
suffix = ".f90"
code = """
module f90_return_real
contains
function t0(value)
real :: value
real :: t0
t0 = value
end function t0
function t4(value)
real(kind=4) :: value
real(kind=4) :: t4
t4 = value
end function t4
function t8(value)
real(kind=8) :: value
real(kind=8) :: t8
t8 = value
end function t8
function td(value)
double precision :: value
double precision :: td
td = value
end function td
subroutine s0(t0,value)
real :: value
real :: t0
!f2py intent(out) t0
t0 = value
end subroutine s0
subroutine s4(t4,value)
real(kind=4) :: value
real(kind=4) :: t4
!f2py intent(out) t4
t4 = value
end subroutine s4
subroutine s8(t8,value)
real(kind=8) :: value
real(kind=8) :: t8
!f2py intent(out) t8
t8 = value
end subroutine s8
subroutine sd(td,value)
double precision :: value
double precision :: td
!f2py intent(out) td
td = value
end subroutine sd
end module f90_return_real
"""
@pytest.mark.slow
@pytest.mark.parametrize('name', 't0,t4,t8,td,s0,s4,s8,sd'.split(','))
def test_all(self, name):
self.check_function(getattr(self.module.f90_return_real, name))
| bsd-3-clause |
40223123/finaltest2 | static/Brython3.1.0-20150301-090019/Lib/socket.py | 730 | 14913 | # Wrapper module for _socket, providing some additional facilities
# implemented in Python.
"""\
This module provides socket operations and some related functions.
On Unix, it supports IP (Internet Protocol) and Unix domain sockets.
On other systems, it only supports IP. Functions specific for a
socket are available as methods of the socket object.
Functions:
socket() -- create a new socket object
socketpair() -- create a pair of new socket objects [*]
fromfd() -- create a socket object from an open file descriptor [*]
fromshare() -- create a socket object from data received from socket.share() [*]
gethostname() -- return the current hostname
gethostbyname() -- map a hostname to its IP number
gethostbyaddr() -- map an IP number or hostname to DNS info
getservbyname() -- map a service name and a protocol name to a port number
getprotobyname() -- map a protocol name (e.g. 'tcp') to a number
ntohs(), ntohl() -- convert 16, 32 bit int from network to host byte order
htons(), htonl() -- convert 16, 32 bit int from host to network byte order
inet_aton() -- convert IP addr string (123.45.67.89) to 32-bit packed format
inet_ntoa() -- convert 32-bit packed format IP to string (123.45.67.89)
socket.getdefaulttimeout() -- get the default timeout value
socket.setdefaulttimeout() -- set the default timeout value
create_connection() -- connects to an address, with an optional timeout and
optional source address.
[*] not available on all platforms!
Special objects:
SocketType -- type object for socket objects
error -- exception raised for I/O errors
has_ipv6 -- boolean value indicating if IPv6 is supported
Integer constants:
AF_INET, AF_UNIX -- socket domains (first argument to socket() call)
SOCK_STREAM, SOCK_DGRAM, SOCK_RAW -- socket types (second argument)
Many other constants may be defined; these may be used in calls to
the setsockopt() and getsockopt() methods.
"""
import _socket
from _socket import *
import os, sys, io
try:
import errno
except ImportError:
errno = None
EBADF = getattr(errno, 'EBADF', 9)
EAGAIN = getattr(errno, 'EAGAIN', 11)
EWOULDBLOCK = getattr(errno, 'EWOULDBLOCK', 11)
__all__ = ["getfqdn", "create_connection"]
__all__.extend(os._get_exports_list(_socket))
_realsocket = socket
# WSA error codes
if sys.platform.lower().startswith("win"):
errorTab = {}
errorTab[10004] = "The operation was interrupted."
errorTab[10009] = "A bad file handle was passed."
errorTab[10013] = "Permission denied."
errorTab[10014] = "A fault occurred on the network??" # WSAEFAULT
errorTab[10022] = "An invalid operation was attempted."
errorTab[10035] = "The socket operation would block"
errorTab[10036] = "A blocking operation is already in progress."
errorTab[10048] = "The network address is in use."
errorTab[10054] = "The connection has been reset."
errorTab[10058] = "The network has been shut down."
errorTab[10060] = "The operation timed out."
errorTab[10061] = "Connection refused."
errorTab[10063] = "The name is too long."
errorTab[10064] = "The host is down."
errorTab[10065] = "The host is unreachable."
__all__.append("errorTab")
class socket(_socket.socket):
"""A subclass of _socket.socket adding the makefile() method."""
__slots__ = ["__weakref__", "_io_refs", "_closed"]
def __init__(self, family=AF_INET, type=SOCK_STREAM, proto=0, fileno=None):
_socket.socket.__init__(self, family, type, proto, fileno)
self._io_refs = 0
self._closed = False
def __enter__(self):
return self
def __exit__(self, *args):
if not self._closed:
self.close()
def __repr__(self):
"""Wrap __repr__() to reveal the real class name."""
s = _socket.socket.__repr__(self)
if s.startswith("<socket object"):
s = "<%s.%s%s%s" % (self.__class__.__module__,
self.__class__.__name__,
getattr(self, '_closed', False) and " [closed] " or "",
s[7:])
return s
def __getstate__(self):
raise TypeError("Cannot serialize socket object")
def dup(self):
"""dup() -> socket object
Return a new socket object connected to the same system resource.
"""
fd = dup(self.fileno())
sock = self.__class__(self.family, self.type, self.proto, fileno=fd)
sock.settimeout(self.gettimeout())
return sock
def accept(self):
"""accept() -> (socket object, address info)
Wait for an incoming connection. Return a new socket
representing the connection, and the address of the client.
For IP sockets, the address info is a pair (hostaddr, port).
"""
fd, addr = self._accept()
sock = socket(self.family, self.type, self.proto, fileno=fd)
# Issue #7995: if no default timeout is set and the listening
# socket had a (non-zero) timeout, force the new socket in blocking
# mode to override platform-specific socket flags inheritance.
if getdefaulttimeout() is None and self.gettimeout():
sock.setblocking(True)
return sock, addr
def makefile(self, mode="r", buffering=None, *,
encoding=None, errors=None, newline=None):
"""makefile(...) -> an I/O stream connected to the socket
The arguments are as for io.open() after the filename,
except the only mode characters supported are 'r', 'w' and 'b'.
The semantics are similar too. (XXX refactor to share code?)
"""
for c in mode:
if c not in {"r", "w", "b"}:
raise ValueError("invalid mode %r (only r, w, b allowed)")
writing = "w" in mode
reading = "r" in mode or not writing
assert reading or writing
binary = "b" in mode
rawmode = ""
if reading:
rawmode += "r"
if writing:
rawmode += "w"
raw = SocketIO(self, rawmode)
self._io_refs += 1
if buffering is None:
buffering = -1
if buffering < 0:
buffering = io.DEFAULT_BUFFER_SIZE
if buffering == 0:
if not binary:
raise ValueError("unbuffered streams must be binary")
return raw
if reading and writing:
buffer = io.BufferedRWPair(raw, raw, buffering)
elif reading:
buffer = io.BufferedReader(raw, buffering)
else:
assert writing
buffer = io.BufferedWriter(raw, buffering)
if binary:
return buffer
text = io.TextIOWrapper(buffer, encoding, errors, newline)
text.mode = mode
return text
def _decref_socketios(self):
if self._io_refs > 0:
self._io_refs -= 1
if self._closed:
self.close()
def _real_close(self, _ss=_socket.socket):
# This function should not reference any globals. See issue #808164.
_ss.close(self)
def close(self):
# This function should not reference any globals. See issue #808164.
self._closed = True
if self._io_refs <= 0:
self._real_close()
def detach(self):
"""detach() -> file descriptor
Close the socket object without closing the underlying file descriptor.
The object cannot be used after this call, but the file descriptor
can be reused for other purposes. The file descriptor is returned.
"""
self._closed = True
return super().detach()
def fromfd(fd, family, type, proto=0):
""" fromfd(fd, family, type[, proto]) -> socket object
Create a socket object from a duplicate of the given file
descriptor. The remaining arguments are the same as for socket().
"""
nfd = dup(fd)
return socket(family, type, proto, nfd)
if hasattr(_socket.socket, "share"):
def fromshare(info):
""" fromshare(info) -> socket object
Create a socket object from a the bytes object returned by
socket.share(pid).
"""
return socket(0, 0, 0, info)
if hasattr(_socket, "socketpair"):
def socketpair(family=None, type=SOCK_STREAM, proto=0):
"""socketpair([family[, type[, proto]]]) -> (socket object, socket object)
Create a pair of socket objects from the sockets returned by the platform
socketpair() function.
The arguments are the same as for socket() except the default family is
AF_UNIX if defined on the platform; otherwise, the default is AF_INET.
"""
if family is None:
try:
family = AF_UNIX
except NameError:
family = AF_INET
a, b = _socket.socketpair(family, type, proto)
a = socket(family, type, proto, a.detach())
b = socket(family, type, proto, b.detach())
return a, b
_blocking_errnos = { EAGAIN, EWOULDBLOCK }
class SocketIO(io.RawIOBase):
"""Raw I/O implementation for stream sockets.
This class supports the makefile() method on sockets. It provides
the raw I/O interface on top of a socket object.
"""
# One might wonder why not let FileIO do the job instead. There are two
# main reasons why FileIO is not adapted:
# - it wouldn't work under Windows (where you can't used read() and
# write() on a socket handle)
# - it wouldn't work with socket timeouts (FileIO would ignore the
# timeout and consider the socket non-blocking)
# XXX More docs
def __init__(self, sock, mode):
if mode not in ("r", "w", "rw", "rb", "wb", "rwb"):
raise ValueError("invalid mode: %r" % mode)
io.RawIOBase.__init__(self)
self._sock = sock
if "b" not in mode:
mode += "b"
self._mode = mode
self._reading = "r" in mode
self._writing = "w" in mode
self._timeout_occurred = False
def readinto(self, b):
"""Read up to len(b) bytes into the writable buffer *b* and return
the number of bytes read. If the socket is non-blocking and no bytes
are available, None is returned.
If *b* is non-empty, a 0 return value indicates that the connection
was shutdown at the other end.
"""
self._checkClosed()
self._checkReadable()
if self._timeout_occurred:
raise IOError("cannot read from timed out object")
while True:
try:
return self._sock.recv_into(b)
except timeout:
self._timeout_occurred = True
raise
except InterruptedError:
continue
except error as e:
if e.args[0] in _blocking_errnos:
return None
raise
def write(self, b):
"""Write the given bytes or bytearray object *b* to the socket
and return the number of bytes written. This can be less than
len(b) if not all data could be written. If the socket is
non-blocking and no bytes could be written None is returned.
"""
self._checkClosed()
self._checkWritable()
try:
return self._sock.send(b)
except error as e:
# XXX what about EINTR?
if e.args[0] in _blocking_errnos:
return None
raise
def readable(self):
"""True if the SocketIO is open for reading.
"""
if self.closed:
raise ValueError("I/O operation on closed socket.")
return self._reading
def writable(self):
"""True if the SocketIO is open for writing.
"""
if self.closed:
raise ValueError("I/O operation on closed socket.")
return self._writing
def seekable(self):
"""True if the SocketIO is open for seeking.
"""
if self.closed:
raise ValueError("I/O operation on closed socket.")
return super().seekable()
def fileno(self):
"""Return the file descriptor of the underlying socket.
"""
self._checkClosed()
return self._sock.fileno()
@property
def name(self):
if not self.closed:
return self.fileno()
else:
return -1
@property
def mode(self):
return self._mode
def close(self):
"""Close the SocketIO object. This doesn't close the underlying
socket, except if all references to it have disappeared.
"""
if self.closed:
return
io.RawIOBase.close(self)
self._sock._decref_socketios()
self._sock = None
def getfqdn(name=''):
"""Get fully qualified domain name from name.
An empty argument is interpreted as meaning the local host.
First the hostname returned by gethostbyaddr() is checked, then
possibly existing aliases. In case no FQDN is available, hostname
from gethostname() is returned.
"""
name = name.strip()
if not name or name == '0.0.0.0':
name = gethostname()
try:
hostname, aliases, ipaddrs = gethostbyaddr(name)
except error:
pass
else:
aliases.insert(0, hostname)
for name in aliases:
if '.' in name:
break
else:
name = hostname
return name
_GLOBAL_DEFAULT_TIMEOUT = object()
def create_connection(address, timeout=_GLOBAL_DEFAULT_TIMEOUT,
source_address=None):
"""Connect to *address* and return the socket object.
Convenience function. Connect to *address* (a 2-tuple ``(host,
port)``) and return the socket object. Passing the optional
*timeout* parameter will set the timeout on the socket instance
before attempting to connect. If no *timeout* is supplied, the
global default timeout setting returned by :func:`getdefaulttimeout`
is used. If *source_address* is set it must be a tuple of (host, port)
for the socket to bind as a source address before making the connection.
An host of '' or port 0 tells the OS to use the default.
"""
host, port = address
err = None
for res in getaddrinfo(host, port, 0, SOCK_STREAM):
af, socktype, proto, canonname, sa = res
sock = None
try:
sock = socket(af, socktype, proto)
if timeout is not _GLOBAL_DEFAULT_TIMEOUT:
sock.settimeout(timeout)
if source_address:
sock.bind(source_address)
sock.connect(sa)
return sock
except error as _:
err = _
if sock is not None:
sock.close()
if err is not None:
raise err
else:
raise error("getaddrinfo returns an empty list")
| gpl-3.0 |
AladdinSonni/youtube-dl | youtube_dl/extractor/mdr.py | 73 | 2124 | from __future__ import unicode_literals
import re
from .common import InfoExtractor
class MDRIE(InfoExtractor):
_VALID_URL = r'^(?P<domain>https?://(?:www\.)?mdr\.de)/(?:.*)/(?P<type>video|audio)(?P<video_id>[^/_]+)(?:_|\.html)'
# No tests, MDR regularily deletes its videos
_TEST = {
'url': 'http://www.mdr.de/fakt/video189002.html',
'only_matching': True,
}
def _real_extract(self, url):
m = re.match(self._VALID_URL, url)
video_id = m.group('video_id')
domain = m.group('domain')
# determine title and media streams from webpage
html = self._download_webpage(url, video_id)
title = self._html_search_regex(r'<h[12]>(.*?)</h[12]>', html, 'title')
xmlurl = self._search_regex(
r'dataURL:\'(/(?:.+)/(?:video|audio)[0-9]+-avCustom.xml)', html, 'XML URL')
doc = self._download_xml(domain + xmlurl, video_id)
formats = []
for a in doc.findall('./assets/asset'):
url_el = a.find('./progressiveDownloadUrl')
if url_el is None:
continue
abr = int(a.find('bitrateAudio').text) // 1000
media_type = a.find('mediaType').text
format = {
'abr': abr,
'filesize': int(a.find('fileSize').text),
'url': url_el.text,
}
vbr_el = a.find('bitrateVideo')
if vbr_el is None:
format.update({
'vcodec': 'none',
'format_id': '%s-%d' % (media_type, abr),
})
else:
vbr = int(vbr_el.text) // 1000
format.update({
'vbr': vbr,
'width': int(a.find('frameWidth').text),
'height': int(a.find('frameHeight').text),
'format_id': '%s-%d' % (media_type, vbr),
})
formats.append(format)
self._sort_formats(formats)
return {
'id': video_id,
'title': title,
'formats': formats,
}
| unlicense |
kenshay/ImageScript | ProgramData/SystemFiles/Python/Lib/test/test_xml_etree_c.py | 6 | 2602 | # xml.etree test for cElementTree
from test import test_support
from test.test_support import precisionbigmemtest, _2G
import unittest
cET = test_support.import_module('xml.etree.cElementTree')
# cElementTree specific tests
def sanity():
"""
Import sanity.
>>> from xml.etree import cElementTree
"""
class MiscTests(unittest.TestCase):
# Issue #8651.
@precisionbigmemtest(size=_2G + 100, memuse=1)
def test_length_overflow(self, size):
if size < _2G + 100:
self.skipTest("not enough free memory, need at least 2 GB")
data = b'x' * size
parser = cET.XMLParser()
try:
self.assertRaises(OverflowError, parser.feed, data)
finally:
data = None
def test_del_attribute(self):
element = cET.Element('tag')
element.tag = 'TAG'
with self.assertRaises(AttributeError):
del element.tag
self.assertEqual(element.tag, 'TAG')
with self.assertRaises(AttributeError):
del element.text
self.assertIsNone(element.text)
element.text = 'TEXT'
with self.assertRaises(AttributeError):
del element.text
self.assertEqual(element.text, 'TEXT')
with self.assertRaises(AttributeError):
del element.tail
self.assertIsNone(element.tail)
element.tail = 'TAIL'
with self.assertRaises(AttributeError):
del element.tail
self.assertEqual(element.tail, 'TAIL')
with self.assertRaises(AttributeError):
del element.attrib
self.assertEqual(element.attrib, {})
element.attrib = {'A': 'B', 'C': 'D'}
with self.assertRaises(AttributeError):
del element.attrib
self.assertEqual(element.attrib, {'A': 'B', 'C': 'D'})
def test_main():
from test import test_xml_etree, test_xml_etree_c
# Run the tests specific to the C implementation
test_support.run_doctest(test_xml_etree_c, verbosity=True)
# Assign the C implementation before running the doctests
# Patch the __name__, to prevent confusion with the pure Python test
pyET = test_xml_etree.ET
py__name__ = test_xml_etree.__name__
test_xml_etree.ET = cET
if __name__ != '__main__':
test_xml_etree.__name__ = __name__
try:
# Run the same test suite as xml.etree.ElementTree
test_xml_etree.test_main(module_name='xml.etree.cElementTree')
finally:
test_xml_etree.ET = pyET
test_xml_etree.__name__ = py__name__
if __name__ == '__main__':
test_main()
| gpl-3.0 |
MauHernandez/cyclope | cyclope/apps/staticpages/admin.py | 1 | 4324 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright 2010-2013 Código Sur Sociedad Civil.
# All rights reserved.
#
# This file is part of Cyclope.
#
# Cyclope is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Cyclope is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from django.contrib import admin
from django import forms
from django.utils.translation import ugettext_lazy as _
from django.contrib.contenttypes.models import ContentType
from django.contrib.admin.widgets import AdminTextareaWidget
from cyclope.core.collections.admin import CollectibleAdmin
from cyclope.admin import BaseContentAdmin
from cyclope.widgets import CKEditor
from cyclope.models import MenuItem
from cyclope import settings as cyc_settings
from cyclope.core import frontend
from models import StaticPage, HTMLBlock
class StaticPageAdminForm(forms.ModelForm):
menu_items = forms.ModelMultipleChoiceField(label=_('Menu items'),
queryset = MenuItem.tree.all(), required=False,
)
def __init__(self, *args, **kwargs):
# this was initially written to be used for any BaseContent, that's
# why we don't assume the content_type to be pre-determined
# TODO(nicoechaniz): update code
super(forms.ModelForm, self).__init__(*args, **kwargs)
if self.instance.id is not None:
instance_type = ContentType.objects.get_for_model(self.instance)
selected_items = [
values[0] for values in
MenuItem.objects.filter(
content_type=instance_type,
object_id=self.instance.id).values_list('id') ]
self.fields['menu_items'].initial = selected_items
class Meta:
model = StaticPage
class StaticPageAdmin(CollectibleAdmin, BaseContentAdmin):
# updates related menu_items information when a StaticPaget is saved
form = StaticPageAdminForm
list_display = ('__unicode__', 'published') + CollectibleAdmin.list_display
search_fields = ('name', 'text', )
fieldsets = ((None,
{'fields': ('name', 'text',)}),
(_('Publication data'),
{
'classes': ('collapse',),
'fields': ('published', 'summary', 'menu_items')}),
)
inlines = CollectibleAdmin.inlines + BaseContentAdmin.inlines
def save_model(self, request, obj, form, change):
super(CollectibleAdmin, self).save_model(request, obj, form, change)
object_type = ContentType.objects.get_for_model(obj)
selected_items_ids = form.data.getlist('menu_items')
selected_items = set(MenuItem.objects.filter(pk__in=selected_items_ids))
old_items = set(MenuItem.objects.filter(content_type=object_type,
object_id=obj.id))
discarded_items = old_items.difference(selected_items)
new_items = selected_items.difference(old_items)
for menu_item in discarded_items:
menu_item.content_type = None
menu_item.object_id = None
menu_item.content_view = None
menu_item.save()
for menu_item in new_items:
menu_item.content_type = object_type
menu_item.content_view = frontend.site.get_default_view_name(StaticPage)
menu_item.object_id = obj.id
menu_item.save()
admin.site.register(StaticPage, StaticPageAdmin)
class HTMLBlockAdminForm(forms.ModelForm):
def __init__(self, *args, **kwargs):
super(HTMLBlockAdminForm, self).__init__(*args, **kwargs)
self.fields['text'].widget = AdminTextareaWidget()
class HTMLBlockAdmin(admin.ModelAdmin):
form = HTMLBlockAdminForm
search_fields = ('name', 'text', )
admin.site.register(HTMLBlock, HTMLBlockAdmin)
| gpl-3.0 |
redbaron/ansible | lib/ansible/runner/action_plugins/assemble.py | 14 | 6198 | # (c) 2013-2014, Michael DeHaan <michael.dehaan@gmail.com>
# Stephen Fromm <sfromm@gmail.com>
# Brian Coca <briancoca+dev@gmail.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
import os
import os.path
import pipes
import shutil
import tempfile
import base64
import re
from ansible import utils
from ansible.runner.return_data import ReturnData
class ActionModule(object):
TRANSFERS_FILES = True
def __init__(self, runner):
self.runner = runner
def _assemble_from_fragments(self, src_path, delimiter=None, compiled_regexp=None):
''' assemble a file from a directory of fragments '''
tmpfd, temp_path = tempfile.mkstemp()
tmp = os.fdopen(tmpfd,'w')
delimit_me = False
add_newline = False
for f in sorted(os.listdir(src_path)):
if compiled_regexp and not compiled_regexp.search(f):
continue
fragment = "%s/%s" % (src_path, f)
if not os.path.isfile(fragment):
continue
fragment_content = file(fragment).read()
# always put a newline between fragments if the previous fragment didn't end with a newline.
if add_newline:
tmp.write('\n')
# delimiters should only appear between fragments
if delimit_me:
if delimiter:
# un-escape anything like newlines
delimiter = delimiter.decode('unicode-escape')
tmp.write(delimiter)
# always make sure there's a newline after the
# delimiter, so lines don't run together
if delimiter[-1] != '\n':
tmp.write('\n')
tmp.write(fragment_content)
delimit_me = True
if fragment_content.endswith('\n'):
add_newline = False
else:
add_newline = True
tmp.close()
return temp_path
def run(self, conn, tmp, module_name, module_args, inject, complex_args=None, **kwargs):
# load up options
options = {}
if complex_args:
options.update(complex_args)
options.update(utils.parse_kv(module_args))
src = options.get('src', None)
dest = options.get('dest', None)
delimiter = options.get('delimiter', None)
remote_src = utils.boolean(options.get('remote_src', 'yes'))
regexp = options.get('regexp', None)
if src is None or dest is None:
result = dict(failed=True, msg="src and dest are required")
return ReturnData(conn=conn, comm_ok=False, result=result)
if remote_src:
return self.runner._execute_module(conn, tmp, 'assemble', module_args, inject=inject, complex_args=complex_args)
elif '_original_file' in inject:
src = utils.path_dwim_relative(inject['_original_file'], 'files', src, self.runner.basedir)
else:
# the source is local, so expand it here
src = os.path.expanduser(src)
_re = None
if regexp is not None:
_re = re.compile(regexp)
# Does all work assembling the file
path = self._assemble_from_fragments(src, delimiter, _re)
path_checksum = utils.checksum_s(path)
dest = self.runner._remote_expand_user(conn, dest, tmp)
remote_checksum = self.runner._remote_checksum(conn, tmp, dest, inject)
if path_checksum != remote_checksum:
resultant = file(path).read()
if self.runner.diff:
dest_result = self.runner._execute_module(conn, tmp, 'slurp', "path=%s" % dest, inject=inject, persist_files=True)
if 'content' in dest_result.result:
dest_contents = dest_result.result['content']
if dest_result.result['encoding'] == 'base64':
dest_contents = base64.b64decode(dest_contents)
else:
raise Exception("unknown encoding, failed: %s" % dest_result.result)
xfered = self.runner._transfer_str(conn, tmp, 'src', resultant)
# fix file permissions when the copy is done as a different user
if self.runner.sudo and self.runner.sudo_user != 'root' or self.runner.su and self.runner.su_user != 'root':
self.runner._remote_chmod(conn, 'a+r', xfered, tmp)
# run the copy module
new_module_args = dict(
src=xfered,
dest=dest,
original_basename=os.path.basename(src),
)
module_args_tmp = utils.merge_module_args(module_args, new_module_args)
if self.runner.noop_on_check(inject):
return ReturnData(conn=conn, comm_ok=True, result=dict(changed=True), diff=dict(before_header=dest, after_header=src, after=resultant))
else:
res = self.runner._execute_module(conn, tmp, 'copy', module_args_tmp, inject=inject)
res.diff = dict(after=resultant)
return res
else:
new_module_args = dict(
src=xfered,
dest=dest,
original_basename=os.path.basename(src),
)
# make sure checkmod is passed on correctly
if self.runner.noop_on_check(inject):
new_module_args['CHECKMODE'] = True
module_args_tmp = utils.merge_module_args(module_args, new_module_args)
return self.runner._execute_module(conn, tmp, 'file', module_args_tmp, inject=inject)
| gpl-3.0 |
techdragon/django | tests/utils_tests/test_decorators.py | 319 | 4870 | from django.http import HttpResponse
from django.template import engines
from django.template.response import TemplateResponse
from django.test import RequestFactory, SimpleTestCase
from django.utils.decorators import classproperty, decorator_from_middleware
class ProcessViewMiddleware(object):
def process_view(self, request, view_func, view_args, view_kwargs):
pass
process_view_dec = decorator_from_middleware(ProcessViewMiddleware)
@process_view_dec
def process_view(request):
return HttpResponse()
class ClassProcessView(object):
def __call__(self, request):
return HttpResponse()
class_process_view = process_view_dec(ClassProcessView())
class FullMiddleware(object):
def process_request(self, request):
request.process_request_reached = True
def process_view(self, request, view_func, view_args, view_kwargs):
request.process_view_reached = True
def process_template_response(self, request, response):
request.process_template_response_reached = True
return response
def process_response(self, request, response):
# This should never receive unrendered content.
request.process_response_content = response.content
request.process_response_reached = True
return response
full_dec = decorator_from_middleware(FullMiddleware)
class DecoratorFromMiddlewareTests(SimpleTestCase):
"""
Tests for view decorators created using
``django.utils.decorators.decorator_from_middleware``.
"""
rf = RequestFactory()
def test_process_view_middleware(self):
"""
Test a middleware that implements process_view.
"""
process_view(self.rf.get('/'))
def test_callable_process_view_middleware(self):
"""
Test a middleware that implements process_view, operating on a callable class.
"""
class_process_view(self.rf.get('/'))
def test_full_dec_normal(self):
"""
Test that all methods of middleware are called for normal HttpResponses
"""
@full_dec
def normal_view(request):
template = engines['django'].from_string("Hello world")
return HttpResponse(template.render())
request = self.rf.get('/')
normal_view(request)
self.assertTrue(getattr(request, 'process_request_reached', False))
self.assertTrue(getattr(request, 'process_view_reached', False))
# process_template_response must not be called for HttpResponse
self.assertFalse(getattr(request, 'process_template_response_reached', False))
self.assertTrue(getattr(request, 'process_response_reached', False))
def test_full_dec_templateresponse(self):
"""
Test that all methods of middleware are called for TemplateResponses in
the right sequence.
"""
@full_dec
def template_response_view(request):
template = engines['django'].from_string("Hello world")
return TemplateResponse(request, template)
request = self.rf.get('/')
response = template_response_view(request)
self.assertTrue(getattr(request, 'process_request_reached', False))
self.assertTrue(getattr(request, 'process_view_reached', False))
self.assertTrue(getattr(request, 'process_template_response_reached', False))
# response must not be rendered yet.
self.assertFalse(response._is_rendered)
# process_response must not be called until after response is rendered,
# otherwise some decorators like csrf_protect and gzip_page will not
# work correctly. See #16004
self.assertFalse(getattr(request, 'process_response_reached', False))
response.render()
self.assertTrue(getattr(request, 'process_response_reached', False))
# Check that process_response saw the rendered content
self.assertEqual(request.process_response_content, b"Hello world")
class ClassPropertyTest(SimpleTestCase):
def test_getter(self):
class Foo(object):
foo_attr = 123
def __init__(self):
self.foo_attr = 456
@classproperty
def foo(cls):
return cls.foo_attr
class Bar(object):
bar = classproperty()
@bar.getter
def bar(cls):
return 123
self.assertEqual(Foo.foo, 123)
self.assertEqual(Foo().foo, 123)
self.assertEqual(Bar.bar, 123)
self.assertEqual(Bar().bar, 123)
def test_override_getter(self):
class Foo(object):
@classproperty
def foo(cls):
return 123
@foo.getter
def foo(cls):
return 456
self.assertEqual(Foo.foo, 456)
self.assertEqual(Foo().foo, 456)
| bsd-3-clause |
ncos/lisa | src/lisa_drive/scripts/venv/lib/python3.5/site-packages/pip-10.0.1-py3.5.egg/pip/_vendor/chardet/eucjpprober.py | 8 | 3841 | ######################## BEGIN LICENSE BLOCK ########################
# The Original Code is mozilla.org code.
#
# The Initial Developer of the Original Code is
# Netscape Communications Corporation.
# Portions created by the Initial Developer are Copyright (C) 1998
# the Initial Developer. All Rights Reserved.
#
# Contributor(s):
# Mark Pilgrim - port to Python
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
# 02110-1301 USA
######################### END LICENSE BLOCK #########################
from .enums import ProbingState, MachineState
from .mbcharsetprober import MultiByteCharSetProber
from .codingstatemachine import CodingStateMachine
from .chardistribution import EUCJPDistributionAnalysis
from .jpcntx import EUCJPContextAnalysis
from .mbcssm import EUCJP_SM_MODEL
class EUCJPProber(MultiByteCharSetProber):
def __init__(self):
super(EUCJPProber, self).__init__()
self.coding_sm = CodingStateMachine(EUCJP_SM_MODEL)
self.distribution_analyzer = EUCJPDistributionAnalysis()
self.context_analyzer = EUCJPContextAnalysis()
self.reset()
def reset(self):
super(EUCJPProber, self).reset()
self.context_analyzer.reset()
@property
def charset_name(self):
return "EUC-JP"
@property
def language(self):
return "Japanese"
def feed(self, byte_str):
for i in range(len(byte_str)):
# PY3K: byte_str is a byte array, so byte_str[i] is an int, not a byte
coding_state = self.coding_sm.next_state(byte_str[i])
if coding_state == MachineState.ERROR:
self.logger.debug('%s %s prober hit error at byte %s',
self.charset_name, self.language, i)
self._state = ProbingState.NOT_ME
break
elif coding_state == MachineState.ITS_ME:
self._state = ProbingState.FOUND_IT
break
elif coding_state == MachineState.START:
char_len = self.coding_sm.get_current_charlen()
if i == 0:
self._last_char[1] = byte_str[0]
self.context_analyzer.feed(self._last_char, char_len)
self.distribution_analyzer.feed(self._last_char, char_len)
else:
self.context_analyzer.feed(byte_str[i - 1:i + 1],
char_len)
self.distribution_analyzer.feed(byte_str[i - 1:i + 1],
char_len)
self._last_char[0] = byte_str[-1]
if self.state == ProbingState.DETECTING:
if (self.context_analyzer.got_enough_data() and
(self.get_confidence() > self.SHORTCUT_THRESHOLD)):
self._state = ProbingState.FOUND_IT
return self.state
def get_confidence(self):
context_conf = self.context_analyzer.get_confidence()
distrib_conf = self.distribution_analyzer.get_confidence()
return max(context_conf, distrib_conf)
| mit |
HybridF5/jacket | jacket/tests/compute/functional/api_sample_tests/test_migrations.py | 1 | 7205 | # Copyright 2012 Nebula, Inc.
# Copyright 2013 IBM Corp.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import datetime
from oslo_config import cfg
from jacket import context
from jacket.objects import compute
from jacket.tests.compute.functional.api_sample_tests import api_sample_base
CONF = cfg.CONF
CONF.import_opt('osapi_compute_extension',
'compute.api.openstack.compute.legacy_v2.extensions')
# NOTE(ShaoHe Feng) here I can not use uuidsentinel, it generate a random
# UUID. The uuid in doc/api_samples files is fixed.
INSTANCE_UUID_1 = "8600d31b-d1a1-4632-b2ff-45c2be1a70ff"
INSTANCE_UUID_2 = "9128d044-7b61-403e-b766-7547076ff6c1"
class MigrationsSamplesJsonTest(api_sample_base.ApiSampleTestBaseV21):
ADMIN_API = True
extension_name = "os-migrations"
def _get_flags(self):
f = super(MigrationsSamplesJsonTest, self)._get_flags()
f['osapi_compute_extension'] = CONF.osapi_compute_extension[:]
f['osapi_compute_extension'].append(
'compute.api.openstack.compute.contrib.migrations.Migrations')
return f
def _stub_migrations(self, context, filters):
fake_migrations = [
{
'id': 1234,
'source_node': 'node1',
'dest_node': 'node2',
'source_compute': 'compute1',
'dest_compute': 'compute2',
'dest_host': '1.2.3.4',
'status': 'Done',
'instance_uuid': 'instance_id_123',
'old_instance_type_id': 1,
'new_instance_type_id': 2,
'migration_type': 'resize',
'hidden': False,
'created_at': datetime.datetime(2012, 10, 29, 13, 42, 2),
'updated_at': datetime.datetime(2012, 10, 29, 13, 42, 2),
'deleted_at': None,
'deleted': False
},
{
'id': 5678,
'source_node': 'node10',
'dest_node': 'node20',
'source_compute': 'compute10',
'dest_compute': 'compute20',
'dest_host': '5.6.7.8',
'status': 'Done',
'instance_uuid': 'instance_id_456',
'old_instance_type_id': 5,
'new_instance_type_id': 6,
'migration_type': 'resize',
'hidden': False,
'created_at': datetime.datetime(2013, 10, 22, 13, 42, 2),
'updated_at': datetime.datetime(2013, 10, 22, 13, 42, 2),
'deleted_at': None,
'deleted': False
}
]
return fake_migrations
def setUp(self):
super(MigrationsSamplesJsonTest, self).setUp()
self.stub_out('compute.compute.api.API.get_migrations',
self._stub_migrations)
def test_get_migrations(self):
response = self._do_get('os-migrations')
self.assertEqual(200, response.status_code)
self._verify_response('migrations-get', {}, response, 200)
class MigrationsSamplesJsonTestV2_23(api_sample_base.ApiSampleTestBaseV21):
ADMIN_API = True
extension_name = "os-migrations"
microversion = '2.23'
scenarios = [('v2_23', {'api_major_version': 'v2.1'})]
fake_migrations = [
# in-progress live-migration.
{
'source_node': 'node1',
'dest_node': 'node2',
'source_compute': 'compute1',
'dest_compute': 'compute2',
'dest_host': '1.2.3.4',
'status': 'running',
'instance_uuid': INSTANCE_UUID_1,
'old_instance_type_id': 1,
'new_instance_type_id': 2,
'migration_type': 'live-migration',
'hidden': False,
'created_at': datetime.datetime(2016, 0o1, 29, 13, 42, 2),
'updated_at': datetime.datetime(2016, 0o1, 29, 13, 42, 2),
'deleted_at': None,
'deleted': False
},
# non in-progress live-migration.
{
'source_node': 'node1',
'dest_node': 'node2',
'source_compute': 'compute1',
'dest_compute': 'compute2',
'dest_host': '1.2.3.4',
'status': 'error',
'instance_uuid': INSTANCE_UUID_1,
'old_instance_type_id': 1,
'new_instance_type_id': 2,
'migration_type': 'live-migration',
'hidden': False,
'created_at': datetime.datetime(2016, 0o1, 29, 13, 42, 2),
'updated_at': datetime.datetime(2016, 0o1, 29, 13, 42, 2),
'deleted_at': None,
'deleted': False
},
# non in-progress resize.
{
'source_node': 'node10',
'dest_node': 'node20',
'source_compute': 'compute10',
'dest_compute': 'compute20',
'dest_host': '5.6.7.8',
'status': 'error',
'instance_uuid': INSTANCE_UUID_2,
'old_instance_type_id': 5,
'new_instance_type_id': 6,
'migration_type': 'resize',
'hidden': False,
'created_at': datetime.datetime(2016, 0o1, 22, 13, 42, 2),
'updated_at': datetime.datetime(2016, 0o1, 22, 13, 42, 2),
'deleted_at': None,
'deleted': False
},
# in-progress resize.
{
'source_node': 'node10',
'dest_node': 'node20',
'source_compute': 'compute10',
'dest_compute': 'compute20',
'dest_host': '5.6.7.8',
'status': 'migrating',
'instance_uuid': INSTANCE_UUID_2,
'old_instance_type_id': 5,
'new_instance_type_id': 6,
'migration_type': 'resize',
'hidden': False,
'created_at': datetime.datetime(2016, 0o1, 22, 13, 42, 2),
'updated_at': datetime.datetime(2016, 0o1, 22, 13, 42, 2),
'deleted_at': None,
'deleted': False
}
]
def setUp(self):
super(MigrationsSamplesJsonTestV2_23, self).setUp()
self.api.microversion = self.microversion
fake_context = context.RequestContext('fake', 'fake')
for mig in self.fake_migrations:
mig_obj = compute.Migration(context=fake_context, **mig)
mig_obj.create()
def test_get_migrations_v2_23(self):
response = self._do_get('os-migrations')
self.assertEqual(200, response.status_code)
self._verify_response(
'migrations-get',
{"instance_1": INSTANCE_UUID_1, "instance_2": INSTANCE_UUID_2},
response, 200)
| apache-2.0 |
manasapte/pants | tests/python/pants_test/goal/test_products.py | 17 | 4205 | # coding=utf-8
# Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import (absolute_import, division, generators, nested_scopes, print_function,
unicode_literals, with_statement)
import os
from collections import defaultdict
from contextlib import contextmanager
from pants.goal.products import MultipleRootedProducts, Products
from pants.util.contextutil import temporary_dir
from pants.util.dirutil import safe_open
from pants_test.base_test import BaseTest
class ProductsTest(BaseTest):
def setUp(self):
super(ProductsTest, self).setUp()
self.products = Products()
def test_require(self):
self.products.require('foo')
self.assertTrue(self.products.isrequired('foo'))
self.assertFalse(self.products.isrequired('bar'))
# require should not cross-contaminate require_data
self.assertFalse(self.products.is_required_data('foo'))
self.assertFalse(self.products.is_required_data('bar'))
def test_get(self):
foo_product_mapping1 = self.products.get('foo')
foo_product_mapping2 = self.products.get('foo')
self.assertIsInstance(foo_product_mapping1, Products.ProductMapping)
self.assertIs(foo_product_mapping1, foo_product_mapping2)
def test_get_does_not_require(self):
self.assertFalse(self.products.isrequired('foo'))
self.products.get('foo')
self.assertFalse(self.products.isrequired('foo'))
self.products.require('foo')
self.assertTrue(self.products.isrequired('foo'))
def test_require_data(self):
self.products.require_data('foo')
self.assertTrue(self.products.is_required_data('foo'))
self.assertFalse(self.products.is_required_data('bar'))
# require_data should not cross-contaminate require
self.assertFalse(self.products.isrequired('foo'))
self.assertFalse(self.products.isrequired('bar'))
def test_get_data(self):
self.assertIsNone(self.products.get_data('foo'))
data1 = self.products.get_data('foo', dict)
data2 = self.products.get_data('foo', dict)
self.assertIsInstance(data1, dict)
self.assertIs(data1, data2)
def test_get_data_does_not_require_data(self):
self.assertFalse(self.products.is_required_data('foo'))
self.products.get_data('foo')
self.assertFalse(self.products.is_required_data('foo'))
self.products.require_data('foo')
self.assertTrue(self.products.is_required_data('foo'))
def test_empty_products(self):
foo_product_mapping = self.products.get('foo')
self.assertFalse(foo_product_mapping)
@contextmanager
def add_products(self, context_products, product_type, target, *products):
product_mapping = context_products.get(product_type)
with temporary_dir() as outdir:
def create_product(product):
with safe_open(os.path.join(outdir, product), mode='w') as fp:
fp.write(product)
return product
product_mapping.add(target, outdir, map(create_product, products))
yield temporary_dir
def test_non_empty_products(self):
target = self.make_target('c')
with self.add_products(self.products, 'foo', target, 'a.class'):
foo_product_mapping = self.products.get('foo')
self.assertTrue(foo_product_mapping)
def test_empty_data(self):
foo_product_mapping = self.products.get_data('foo')
self.assertFalse(foo_product_mapping)
@contextmanager
def add_data(self, context_products, data_type, target, *products):
make_products = lambda: defaultdict(MultipleRootedProducts)
data_by_target = context_products.get_data(data_type, make_products)
with temporary_dir() as outdir:
def create_product(product):
abspath = os.path.join(outdir, product)
with safe_open(abspath, mode='w') as fp:
fp.write(product)
return abspath
data_by_target[target].add_abs_paths(outdir, map(create_product, products))
yield temporary_dir
def test_non_empty_data(self):
target = self.make_target('c')
with self.add_data(self.products, 'foo', target, 'a.class'):
foo_product_mapping = self.products.get_data('foo')
self.assertTrue(foo_product_mapping)
| apache-2.0 |
ivanovev/tmb | gui/RSW.py | 1 | 1830 |
from collections import OrderedDict as OD
from util import Data, control_cb, monitor_cb, alarm_trace_cb, dev_io_cb
def status_fmt_cb(val, read=True, n=0):
val = int(val, 16)
return '1' if val & (1 << n) else '0'
def get_ctrl(dev):
ctrl = Data(name='Settings', send=True, io_cb=dev_io_cb)
ctrl.add('chup', label='Channel UP', wdgt='combo', state='readonly', value=OD([('Channel A', '1'), ('Channel B', '0')]))
ctrl.add('chdn', label='Channel DOWN', wdgt='combo', state='readonly', value=OD([('Channel A', '1'), ('Channel B', '0')]))
ctrl.add('test', label='Test signal', wdgt='combo', state='readonly', value=OD([('UC-HF', '0'), ('UC-IF', '1'), ('DC-HF', '2'), ('DC-IF', '3')]))
ctrl.add_page('System')
ctrl.add('commit', label='EFC commit enable', wdgt='combo', state='readonly', value=['ON', 'OFF'], text='ON')
return ctrl
def get_mntr(dev):
mntr = Data(name='status', send=True, io_cb=dev_io_cb)
mntr.add('status', wdgt='alarm', trace_cb=alarm_trace_cb, fmt_cb=status_fmt_cb, msg='Power supply')
add_status = lambda n, msg: mntr.add('status%d' % n, wdgt='alarm', send=False, trace_cb=alarm_trace_cb, fmt_cb=lambda val, read: status_fmt_cb(val,read,n), msg=msg)
add_status(1, 'Ch1 (sig)')
add_status(2, 'Ch2 (aux)')
add_status(3, 'Ch3 (br)')
add_status(4, 'Uch1 (sig) < LVL')
add_status(5, 'Uch3 (br) < LVL')
mntr.add_page('Vcc')
mntr.add('3v', wdgt='entry', label='3V', state='readonly', msg='3V')
mntr.add('11v', wdgt='entry', label='11V', state='readonly', msg='11V')
mntr.add('2_5v1', wdgt='entry', label='UC 2.5V', state='readonly', msg='UC 2.5V')
mntr.add('2_5v2', wdgt='entry', label='DC 2.5V', state='readonly', msg='DC 2.5V')
return mntr
def get_menu(dev):
return OD([('Control', control_cb), ('Monitor', monitor_cb)])
| gpl-3.0 |
moijes12/oh-mainline | mysite/customs/management/commands/customs_debugger.py | 15 | 3664 | # This file is part of OpenHatch.
# Copyright (C) 2010 Jack Grigg
# Copyright (C) 2010 OpenHatch, Inc.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import datetime
from django.core.management.base import BaseCommand
from mysite.search.models import Bug
class Command(BaseCommand):
help = "A bunch of tools for checking and cleaning the Bug database."
def list_old_bugs(self, days, hours=0):
count = 0
x_days_ago = (datetime.datetime.now() -
datetime.timedelta(days=days, hours=hours))
for bug in Bug.all_bugs.filter(last_polled__lt=x_days_ago):
count += 1
print "%d - %s" % (count, str(bug))
print "There are a total of %d Bug objects that are %d days %d hours old." % (count, days, hours)
def list_closed_bugs(self):
count = 0
for bug in Bug.all_bugs.filter(looks_closed=True):
count += 1
print "%d - %s" % (count, str(bug))
print "There are a total of %d closed Bug objects." % count
def delete_old_bugs(self, days, hours=0):
x_days_ago = (datetime.datetime.now() -
datetime.timedelta(days=days, hours=hours))
Bug.all_bugs.filter(last_polled__lt=x_days_ago).delete()
def delete_closed_bugs(self):
Bug.all_bugs.filter(looks_closed=True).delete()
def delete_all_bugs(self):
Bug.all_bugs.all().delete()
def show_usage(self):
print """
usage: ./manage.py customs_debugger COMMAND
The following commands are available:
list_old_bugs List all Bug objects older than one day plus one hour.
list_very_old_bugs List all Bug objects older than two days.
list_closed_bugs List all Bug objects that look closed.
delete_old_bugs Delete all Bug objects older than one day plus one hour.
delete_very_old_bugs Delete all Bug objects older than two days.
delete_closed_bugs Delete all Bug objects that look closed.
delete_all_bugs Delete ALL Bug objects. Period. Useful if you want to
test a bug import from scratch. Not so useful on a
production server.
NOTE: These commands are executed immediately, so make sure you are
executing what you want, especially with the deleting commands."""
def handle(self, *args, **options):
if len(args) > 1:
self.show_usage()
elif 'list_old_bugs' in args:
self.list_old_bugs(days=1, hours=1)
elif 'list_very_old_bugs' in args:
self.list_old_bugs(days=2)
elif 'list_closed_bugs' in args:
self.list_closed_bugs()
elif 'delete_old_bugs' in args:
self.delete_old_bugs(days=1, hours=1)
elif 'delete_very_old_bugs' in args:
self.delete_old_bugs(days=2)
elif 'delete_closed_bugs' in args:
self.delete_closed_bugs()
elif 'delete_all_bugs' in args:
self.delete_all_bugs()
else:
self.show_usage()
| agpl-3.0 |
ClimbsRocks/scikit-learn | sklearn/linear_model/tests/test_bayes.py | 299 | 1770 | # Author: Alexandre Gramfort <alexandre.gramfort@inria.fr>
# Fabian Pedregosa <fabian.pedregosa@inria.fr>
#
# License: BSD 3 clause
import numpy as np
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import SkipTest
from sklearn.linear_model.bayes import BayesianRidge, ARDRegression
from sklearn import datasets
from sklearn.utils.testing import assert_array_almost_equal
def test_bayesian_on_diabetes():
# Test BayesianRidge on diabetes
raise SkipTest("XFailed Test")
diabetes = datasets.load_diabetes()
X, y = diabetes.data, diabetes.target
clf = BayesianRidge(compute_score=True)
# Test with more samples than features
clf.fit(X, y)
# Test that scores are increasing at each iteration
assert_array_equal(np.diff(clf.scores_) > 0, True)
# Test with more features than samples
X = X[:5, :]
y = y[:5]
clf.fit(X, y)
# Test that scores are increasing at each iteration
assert_array_equal(np.diff(clf.scores_) > 0, True)
def test_toy_bayesian_ridge_object():
# Test BayesianRidge on toy
X = np.array([[1], [2], [6], [8], [10]])
Y = np.array([1, 2, 6, 8, 10])
clf = BayesianRidge(compute_score=True)
clf.fit(X, Y)
# Check that the model could approximately learn the identity function
test = [[1], [3], [4]]
assert_array_almost_equal(clf.predict(test), [1, 3, 4], 2)
def test_toy_ard_object():
# Test BayesianRegression ARD classifier
X = np.array([[1], [2], [3]])
Y = np.array([1, 2, 3])
clf = ARDRegression(compute_score=True)
clf.fit(X, Y)
# Check that the model could approximately learn the identity function
test = [[1], [3], [4]]
assert_array_almost_equal(clf.predict(test), [1, 3, 4], 2)
| bsd-3-clause |
bbsan2k/nzbToMedia | libs/unidecode/x054.py | 251 | 4583 | data = (
'Mie ', # 0x00
'Xu ', # 0x01
'Mang ', # 0x02
'Chi ', # 0x03
'Ge ', # 0x04
'Xuan ', # 0x05
'Yao ', # 0x06
'Zi ', # 0x07
'He ', # 0x08
'Ji ', # 0x09
'Diao ', # 0x0a
'Cun ', # 0x0b
'Tong ', # 0x0c
'Ming ', # 0x0d
'Hou ', # 0x0e
'Li ', # 0x0f
'Tu ', # 0x10
'Xiang ', # 0x11
'Zha ', # 0x12
'Xia ', # 0x13
'Ye ', # 0x14
'Lu ', # 0x15
'A ', # 0x16
'Ma ', # 0x17
'Ou ', # 0x18
'Xue ', # 0x19
'Yi ', # 0x1a
'Jun ', # 0x1b
'Chou ', # 0x1c
'Lin ', # 0x1d
'Tun ', # 0x1e
'Yin ', # 0x1f
'Fei ', # 0x20
'Bi ', # 0x21
'Qin ', # 0x22
'Qin ', # 0x23
'Jie ', # 0x24
'Bu ', # 0x25
'Fou ', # 0x26
'Ba ', # 0x27
'Dun ', # 0x28
'Fen ', # 0x29
'E ', # 0x2a
'Han ', # 0x2b
'Ting ', # 0x2c
'Hang ', # 0x2d
'Shun ', # 0x2e
'Qi ', # 0x2f
'Hong ', # 0x30
'Zhi ', # 0x31
'Shen ', # 0x32
'Wu ', # 0x33
'Wu ', # 0x34
'Chao ', # 0x35
'Ne ', # 0x36
'Xue ', # 0x37
'Xi ', # 0x38
'Chui ', # 0x39
'Dou ', # 0x3a
'Wen ', # 0x3b
'Hou ', # 0x3c
'Ou ', # 0x3d
'Wu ', # 0x3e
'Gao ', # 0x3f
'Ya ', # 0x40
'Jun ', # 0x41
'Lu ', # 0x42
'E ', # 0x43
'Ge ', # 0x44
'Mei ', # 0x45
'Ai ', # 0x46
'Qi ', # 0x47
'Cheng ', # 0x48
'Wu ', # 0x49
'Gao ', # 0x4a
'Fu ', # 0x4b
'Jiao ', # 0x4c
'Hong ', # 0x4d
'Chi ', # 0x4e
'Sheng ', # 0x4f
'Ne ', # 0x50
'Tun ', # 0x51
'Fu ', # 0x52
'Yi ', # 0x53
'Dai ', # 0x54
'Ou ', # 0x55
'Li ', # 0x56
'Bai ', # 0x57
'Yuan ', # 0x58
'Kuai ', # 0x59
'[?] ', # 0x5a
'Qiang ', # 0x5b
'Wu ', # 0x5c
'E ', # 0x5d
'Shi ', # 0x5e
'Quan ', # 0x5f
'Pen ', # 0x60
'Wen ', # 0x61
'Ni ', # 0x62
'M ', # 0x63
'Ling ', # 0x64
'Ran ', # 0x65
'You ', # 0x66
'Di ', # 0x67
'Zhou ', # 0x68
'Shi ', # 0x69
'Zhou ', # 0x6a
'Tie ', # 0x6b
'Xi ', # 0x6c
'Yi ', # 0x6d
'Qi ', # 0x6e
'Ping ', # 0x6f
'Zi ', # 0x70
'Gu ', # 0x71
'Zi ', # 0x72
'Wei ', # 0x73
'Xu ', # 0x74
'He ', # 0x75
'Nao ', # 0x76
'Xia ', # 0x77
'Pei ', # 0x78
'Yi ', # 0x79
'Xiao ', # 0x7a
'Shen ', # 0x7b
'Hu ', # 0x7c
'Ming ', # 0x7d
'Da ', # 0x7e
'Qu ', # 0x7f
'Ju ', # 0x80
'Gem ', # 0x81
'Za ', # 0x82
'Tuo ', # 0x83
'Duo ', # 0x84
'Pou ', # 0x85
'Pao ', # 0x86
'Bi ', # 0x87
'Fu ', # 0x88
'Yang ', # 0x89
'He ', # 0x8a
'Zha ', # 0x8b
'He ', # 0x8c
'Hai ', # 0x8d
'Jiu ', # 0x8e
'Yong ', # 0x8f
'Fu ', # 0x90
'Que ', # 0x91
'Zhou ', # 0x92
'Wa ', # 0x93
'Ka ', # 0x94
'Gu ', # 0x95
'Ka ', # 0x96
'Zuo ', # 0x97
'Bu ', # 0x98
'Long ', # 0x99
'Dong ', # 0x9a
'Ning ', # 0x9b
'Tha ', # 0x9c
'Si ', # 0x9d
'Xian ', # 0x9e
'Huo ', # 0x9f
'Qi ', # 0xa0
'Er ', # 0xa1
'E ', # 0xa2
'Guang ', # 0xa3
'Zha ', # 0xa4
'Xi ', # 0xa5
'Yi ', # 0xa6
'Lie ', # 0xa7
'Zi ', # 0xa8
'Mie ', # 0xa9
'Mi ', # 0xaa
'Zhi ', # 0xab
'Yao ', # 0xac
'Ji ', # 0xad
'Zhou ', # 0xae
'Ge ', # 0xaf
'Shuai ', # 0xb0
'Zan ', # 0xb1
'Xiao ', # 0xb2
'Ke ', # 0xb3
'Hui ', # 0xb4
'Kua ', # 0xb5
'Huai ', # 0xb6
'Tao ', # 0xb7
'Xian ', # 0xb8
'E ', # 0xb9
'Xuan ', # 0xba
'Xiu ', # 0xbb
'Wai ', # 0xbc
'Yan ', # 0xbd
'Lao ', # 0xbe
'Yi ', # 0xbf
'Ai ', # 0xc0
'Pin ', # 0xc1
'Shen ', # 0xc2
'Tong ', # 0xc3
'Hong ', # 0xc4
'Xiong ', # 0xc5
'Chi ', # 0xc6
'Wa ', # 0xc7
'Ha ', # 0xc8
'Zai ', # 0xc9
'Yu ', # 0xca
'Di ', # 0xcb
'Pai ', # 0xcc
'Xiang ', # 0xcd
'Ai ', # 0xce
'Hen ', # 0xcf
'Kuang ', # 0xd0
'Ya ', # 0xd1
'Da ', # 0xd2
'Xiao ', # 0xd3
'Bi ', # 0xd4
'Yue ', # 0xd5
'[?] ', # 0xd6
'Hua ', # 0xd7
'Sasou ', # 0xd8
'Kuai ', # 0xd9
'Duo ', # 0xda
'[?] ', # 0xdb
'Ji ', # 0xdc
'Nong ', # 0xdd
'Mou ', # 0xde
'Yo ', # 0xdf
'Hao ', # 0xe0
'Yuan ', # 0xe1
'Long ', # 0xe2
'Pou ', # 0xe3
'Mang ', # 0xe4
'Ge ', # 0xe5
'E ', # 0xe6
'Chi ', # 0xe7
'Shao ', # 0xe8
'Li ', # 0xe9
'Na ', # 0xea
'Zu ', # 0xeb
'He ', # 0xec
'Ku ', # 0xed
'Xiao ', # 0xee
'Xian ', # 0xef
'Lao ', # 0xf0
'Bo ', # 0xf1
'Zhe ', # 0xf2
'Zha ', # 0xf3
'Liang ', # 0xf4
'Ba ', # 0xf5
'Mie ', # 0xf6
'Le ', # 0xf7
'Sui ', # 0xf8
'Fou ', # 0xf9
'Bu ', # 0xfa
'Han ', # 0xfb
'Heng ', # 0xfc
'Geng ', # 0xfd
'Shuo ', # 0xfe
'Ge ', # 0xff
)
| gpl-3.0 |
mheap/ansible | lib/ansible/modules/system/sefcontext.py | 15 | 8216 | #!/usr/bin/python
# (c) 2016, Dag Wieers <dag@wieers.com>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = r'''
---
module: sefcontext
short_description: Manages SELinux file context mapping definitions
description:
- Manages SELinux file context mapping definitions.
- Similar to the C(semanage fcontext) command.
version_added: '2.2'
options:
target:
description:
- Target path (expression).
required: yes
aliases: [ path ]
ftype:
description:
- File type.
default: a
setype:
description:
- SELinux type for the specified target.
required: yes
seuser:
description:
- SELinux user for the specified target.
selevel:
description:
- SELinux range for the specified target.
aliases: [ serange ]
state:
description:
- Desired boolean value.
choices: [ absent, present ]
default: present
reload:
description:
- Reload SELinux policy after commit.
type: bool
default: 'yes'
notes:
- The changes are persistent across reboots
- The M(sefcontext) module does not modify existing files to the new
SELinux context(s), so it is advisable to first create the SELinux
file contexts before creating files, or run C(restorecon) manually
for the existing files that require the new SELinux file contexts.
requirements:
- libselinux-python
- policycoreutils-python
author:
- Dag Wieers (@dagwieers)
'''
EXAMPLES = r'''
# Allow apache to modify files in /srv/git_repos
- sefcontext:
target: '/srv/git_repos(/.*)?'
setype: httpd_git_rw_content_t
state: present
'''
RETURN = r'''
# Default return values
'''
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.pycompat24 import get_exception
from ansible.module_utils._text import to_native
try:
import selinux
HAVE_SELINUX = True
except ImportError:
HAVE_SELINUX = False
try:
import seobject
HAVE_SEOBJECT = True
except ImportError:
HAVE_SEOBJECT = False
# Add missing entries (backward compatible)
if HAVE_SEOBJECT:
seobject.file_types.update(dict(
a=seobject.SEMANAGE_FCONTEXT_ALL,
b=seobject.SEMANAGE_FCONTEXT_BLOCK,
c=seobject.SEMANAGE_FCONTEXT_CHAR,
d=seobject.SEMANAGE_FCONTEXT_DIR,
f=seobject.SEMANAGE_FCONTEXT_REG,
l=seobject.SEMANAGE_FCONTEXT_LINK,
p=seobject.SEMANAGE_FCONTEXT_PIPE,
s=seobject.SEMANAGE_FCONTEXT_SOCK,
))
# Make backward compatible
option_to_file_type_str = dict(
a='all files',
b='block device',
c='character device',
d='directory',
f='regular file',
l='symbolic link',
p='named pipe',
s='socket file',
)
def semanage_fcontext_exists(sefcontext, target, ftype):
''' Get the SELinux file context mapping definition from policy. Return None if it does not exist. '''
# Beware that records comprise of a string representation of the file_type
record = (target, option_to_file_type_str[ftype])
records = sefcontext.get_all()
try:
return records[record]
except KeyError:
return None
def semanage_fcontext_modify(module, result, target, ftype, setype, do_reload, serange, seuser, sestore=''):
''' Add or modify SELinux file context mapping definition to the policy. '''
changed = False
prepared_diff = ''
try:
sefcontext = seobject.fcontextRecords(sestore)
sefcontext.set_reload(do_reload)
exists = semanage_fcontext_exists(sefcontext, target, ftype)
if exists:
# Modify existing entry
orig_seuser, orig_serole, orig_setype, orig_serange = exists
if seuser is None:
seuser = orig_seuser
if serange is None:
serange = orig_serange
if setype != orig_setype or seuser != orig_seuser or serange != orig_serange:
if not module.check_mode:
sefcontext.modify(target, setype, ftype, serange, seuser)
changed = True
if module._diff:
prepared_diff += '# Change to semanage file context mappings\n'
prepared_diff += '-%s %s %s:%s:%s:%s\n' % (target, ftype, orig_seuser, orig_serole, orig_setype, orig_serange)
prepared_diff += '+%s %s %s:%s:%s:%s\n' % (target, ftype, seuser, orig_serole, setype, serange)
else:
# Add missing entry
if seuser is None:
seuser = 'system_u'
if serange is None:
serange = 's0'
if not module.check_mode:
sefcontext.add(target, setype, ftype, serange, seuser)
changed = True
if module._diff:
prepared_diff += '# Addition to semanage file context mappings\n'
prepared_diff += '+%s %s %s:%s:%s:%s\n' % (target, ftype, seuser, 'object_r', setype, serange)
except Exception:
e = get_exception()
module.fail_json(msg="%s: %s\n" % (e.__class__.__name__, to_native(e)))
if module._diff and prepared_diff:
result['diff'] = dict(prepared=prepared_diff)
module.exit_json(changed=changed, seuser=seuser, serange=serange, **result)
def semanage_fcontext_delete(module, result, target, ftype, do_reload, sestore=''):
''' Delete SELinux file context mapping definition from the policy. '''
changed = False
prepared_diff = ''
try:
sefcontext = seobject.fcontextRecords(sestore)
sefcontext.set_reload(do_reload)
exists = semanage_fcontext_exists(sefcontext, target, ftype)
if exists:
# Remove existing entry
orig_seuser, orig_serole, orig_setype, orig_serange = exists
if not module.check_mode:
sefcontext.delete(target, ftype)
changed = True
if module._diff:
prepared_diff += '# Deletion to semanage file context mappings\n'
prepared_diff += '-%s %s %s:%s:%s:%s\n' % (target, ftype, exists[0], exists[1], exists[2], exists[3])
except Exception:
e = get_exception()
module.fail_json(msg="%s: %s\n" % (e.__class__.__name__, to_native(e)))
if module._diff and prepared_diff:
result['diff'] = dict(prepared=prepared_diff)
module.exit_json(changed=changed, **result)
def main():
module = AnsibleModule(
argument_spec=dict(
target=dict(required=True, aliases=['path']),
ftype=dict(type='str', default='a', choices=option_to_file_type_str.keys()),
setype=dict(type='str', required=True),
seuser=dict(type='str'),
selevel=dict(type='str', aliases=['serange']),
state=dict(type='str', default='present', choices=['absent', 'present']),
reload=dict(type='bool', default=True),
),
supports_check_mode=True,
)
if not HAVE_SELINUX:
module.fail_json(msg="This module requires libselinux-python")
if not HAVE_SEOBJECT:
module.fail_json(msg="This module requires policycoreutils-python")
if not selinux.is_selinux_enabled():
module.fail_json(msg="SELinux is disabled on this host.")
target = module.params['target']
ftype = module.params['ftype']
setype = module.params['setype']
seuser = module.params['seuser']
serange = module.params['selevel']
state = module.params['state']
do_reload = module.params['reload']
result = dict(target=target, ftype=ftype, setype=setype, state=state)
if state == 'present':
semanage_fcontext_modify(module, result, target, ftype, setype, do_reload, serange, seuser)
elif state == 'absent':
semanage_fcontext_delete(module, result, target, ftype, do_reload)
else:
module.fail_json(msg='Invalid value of argument "state": {0}'.format(state))
if __name__ == '__main__':
main()
| gpl-3.0 |
GiovanniConserva/TestDeploy | venv/Lib/warnings.py | 68 | 14748 | """Python part of the warnings subsystem."""
# Note: function level imports should *not* be used
# in this module as it may cause import lock deadlock.
# See bug 683658.
import linecache
import sys
import types
__all__ = ["warn", "warn_explicit", "showwarning",
"formatwarning", "filterwarnings", "simplefilter",
"resetwarnings", "catch_warnings"]
def warnpy3k(message, category=None, stacklevel=1):
"""Issue a deprecation warning for Python 3.x related changes.
Warnings are omitted unless Python is started with the -3 option.
"""
if sys.py3kwarning:
if category is None:
category = DeprecationWarning
warn(message, category, stacklevel+1)
def _show_warning(message, category, filename, lineno, file=None, line=None):
"""Hook to write a warning to a file; replace if you like."""
if file is None:
file = sys.stderr
if file is None:
# sys.stderr is None - warnings get lost
return
try:
file.write(formatwarning(message, category, filename, lineno, line))
except (IOError, UnicodeError):
pass # the file (probably stderr) is invalid - this warning gets lost.
# Keep a working version around in case the deprecation of the old API is
# triggered.
showwarning = _show_warning
def formatwarning(message, category, filename, lineno, line=None):
"""Function to format a warning the standard way."""
try:
unicodetype = unicode
except NameError:
unicodetype = ()
try:
message = str(message)
except UnicodeEncodeError:
pass
s = "%s: %s: %s\n" % (lineno, category.__name__, message)
line = linecache.getline(filename, lineno) if line is None else line
if line:
line = line.strip()
if isinstance(s, unicodetype) and isinstance(line, str):
line = unicode(line, 'latin1')
s += " %s\n" % line
if isinstance(s, unicodetype) and isinstance(filename, str):
enc = sys.getfilesystemencoding()
if enc:
try:
filename = unicode(filename, enc)
except UnicodeDecodeError:
pass
s = "%s:%s" % (filename, s)
return s
def filterwarnings(action, message="", category=Warning, module="", lineno=0,
append=0):
"""Insert an entry into the list of warnings filters (at the front).
'action' -- one of "error", "ignore", "always", "default", "module",
or "once"
'message' -- a regex that the warning message must match
'category' -- a class that the warning must be a subclass of
'module' -- a regex that the module name must match
'lineno' -- an integer line number, 0 matches all warnings
'append' -- if true, append to the list of filters
"""
import re
assert action in ("error", "ignore", "always", "default", "module",
"once"), "invalid action: %r" % (action,)
assert isinstance(message, basestring), "message must be a string"
assert isinstance(category, (type, types.ClassType)), \
"category must be a class"
assert issubclass(category, Warning), "category must be a Warning subclass"
assert isinstance(module, basestring), "module must be a string"
assert isinstance(lineno, int) and lineno >= 0, \
"lineno must be an int >= 0"
item = (action, re.compile(message, re.I), category,
re.compile(module), lineno)
if append:
filters.append(item)
else:
filters.insert(0, item)
def simplefilter(action, category=Warning, lineno=0, append=0):
"""Insert a simple entry into the list of warnings filters (at the front).
A simple filter matches all modules and messages.
'action' -- one of "error", "ignore", "always", "default", "module",
or "once"
'category' -- a class that the warning must be a subclass of
'lineno' -- an integer line number, 0 matches all warnings
'append' -- if true, append to the list of filters
"""
assert action in ("error", "ignore", "always", "default", "module",
"once"), "invalid action: %r" % (action,)
assert isinstance(lineno, int) and lineno >= 0, \
"lineno must be an int >= 0"
item = (action, None, category, None, lineno)
if append:
filters.append(item)
else:
filters.insert(0, item)
def resetwarnings():
"""Clear the list of warning filters, so that no filters are active."""
filters[:] = []
class _OptionError(Exception):
"""Exception used by option processing helpers."""
pass
# Helper to process -W options passed via sys.warnoptions
def _processoptions(args):
for arg in args:
try:
_setoption(arg)
except _OptionError, msg:
print >>sys.stderr, "Invalid -W option ignored:", msg
# Helper for _processoptions()
def _setoption(arg):
import re
parts = arg.split(':')
if len(parts) > 5:
raise _OptionError("too many fields (max 5): %r" % (arg,))
while len(parts) < 5:
parts.append('')
action, message, category, module, lineno = [s.strip()
for s in parts]
action = _getaction(action)
message = re.escape(message)
category = _getcategory(category)
module = re.escape(module)
if module:
module = module + '$'
if lineno:
try:
lineno = int(lineno)
if lineno < 0:
raise ValueError
except (ValueError, OverflowError):
raise _OptionError("invalid lineno %r" % (lineno,))
else:
lineno = 0
filterwarnings(action, message, category, module, lineno)
# Helper for _setoption()
def _getaction(action):
if not action:
return "default"
if action == "all": return "always" # Alias
for a in ('default', 'always', 'ignore', 'module', 'once', 'error'):
if a.startswith(action):
return a
raise _OptionError("invalid action: %r" % (action,))
# Helper for _setoption()
def _getcategory(category):
import re
if not category:
return Warning
if re.match("^[a-zA-Z0-9_]+$", category):
try:
cat = eval(category)
except NameError:
raise _OptionError("unknown warning category: %r" % (category,))
else:
i = category.rfind(".")
module = category[:i]
klass = category[i+1:]
try:
m = __import__(module, None, None, [klass])
except ImportError:
raise _OptionError("invalid module name: %r" % (module,))
try:
cat = getattr(m, klass)
except AttributeError:
raise _OptionError("unknown warning category: %r" % (category,))
if not issubclass(cat, Warning):
raise _OptionError("invalid warning category: %r" % (category,))
return cat
# Code typically replaced by _warnings
def warn(message, category=None, stacklevel=1):
"""Issue a warning, or maybe ignore it or raise an exception."""
# Check if message is already a Warning object
if isinstance(message, Warning):
category = message.__class__
# Check category argument
if category is None:
category = UserWarning
assert issubclass(category, Warning)
# Get context information
try:
caller = sys._getframe(stacklevel)
except ValueError:
globals = sys.__dict__
lineno = 1
else:
globals = caller.f_globals
lineno = caller.f_lineno
if '__name__' in globals:
module = globals['__name__']
else:
module = "<string>"
filename = globals.get('__file__')
if filename:
fnl = filename.lower()
if fnl.endswith((".pyc", ".pyo")):
filename = filename[:-1]
else:
if module == "__main__":
try:
filename = sys.argv[0]
except AttributeError:
# embedded interpreters don't have sys.argv, see bug #839151
filename = '__main__'
if not filename:
filename = module
registry = globals.setdefault("__warningregistry__", {})
warn_explicit(message, category, filename, lineno, module, registry,
globals)
def warn_explicit(message, category, filename, lineno,
module=None, registry=None, module_globals=None):
lineno = int(lineno)
if module is None:
module = filename or "<unknown>"
if module[-3:].lower() == ".py":
module = module[:-3] # XXX What about leading pathname?
if registry is None:
registry = {}
if isinstance(message, Warning):
text = str(message)
category = message.__class__
else:
text = message
message = category(message)
key = (text, category, lineno)
# Quick test for common case
if registry.get(key):
return
# Search the filters
for item in filters:
action, msg, cat, mod, ln = item
if ((msg is None or msg.match(text)) and
issubclass(category, cat) and
(mod is None or mod.match(module)) and
(ln == 0 or lineno == ln)):
break
else:
action = defaultaction
# Early exit actions
if action == "ignore":
registry[key] = 1
return
# Prime the linecache for formatting, in case the
# "file" is actually in a zipfile or something.
linecache.getlines(filename, module_globals)
if action == "error":
raise message
# Other actions
if action == "once":
registry[key] = 1
oncekey = (text, category)
if onceregistry.get(oncekey):
return
onceregistry[oncekey] = 1
elif action == "always":
pass
elif action == "module":
registry[key] = 1
altkey = (text, category, 0)
if registry.get(altkey):
return
registry[altkey] = 1
elif action == "default":
registry[key] = 1
else:
# Unrecognized actions are errors
raise RuntimeError(
"Unrecognized action (%r) in warnings.filters:\n %s" %
(action, item))
# Print message and context
showwarning(message, category, filename, lineno)
class WarningMessage(object):
"""Holds the result of a single showwarning() call."""
_WARNING_DETAILS = ("message", "category", "filename", "lineno", "file",
"line")
def __init__(self, message, category, filename, lineno, file=None,
line=None):
local_values = locals()
for attr in self._WARNING_DETAILS:
setattr(self, attr, local_values[attr])
self._category_name = category.__name__ if category else None
def __str__(self):
return ("{message : %r, category : %r, filename : %r, lineno : %s, "
"line : %r}" % (self.message, self._category_name,
self.filename, self.lineno, self.line))
class catch_warnings(object):
"""A context manager that copies and restores the warnings filter upon
exiting the context.
The 'record' argument specifies whether warnings should be captured by a
custom implementation of warnings.showwarning() and be appended to a list
returned by the context manager. Otherwise None is returned by the context
manager. The objects appended to the list are arguments whose attributes
mirror the arguments to showwarning().
The 'module' argument is to specify an alternative module to the module
named 'warnings' and imported under that name. This argument is only useful
when testing the warnings module itself.
"""
def __init__(self, record=False, module=None):
"""Specify whether to record warnings and if an alternative module
should be used other than sys.modules['warnings'].
For compatibility with Python 3.0, please consider all arguments to be
keyword-only.
"""
self._record = record
self._module = sys.modules['warnings'] if module is None else module
self._entered = False
def __repr__(self):
args = []
if self._record:
args.append("record=True")
if self._module is not sys.modules['warnings']:
args.append("module=%r" % self._module)
name = type(self).__name__
return "%s(%s)" % (name, ", ".join(args))
def __enter__(self):
if self._entered:
raise RuntimeError("Cannot enter %r twice" % self)
self._entered = True
self._filters = self._module.filters
self._module.filters = self._filters[:]
self._showwarning = self._module.showwarning
if self._record:
log = []
def showwarning(*args, **kwargs):
log.append(WarningMessage(*args, **kwargs))
self._module.showwarning = showwarning
return log
else:
return None
def __exit__(self, *exc_info):
if not self._entered:
raise RuntimeError("Cannot exit %r without entering first" % self)
self._module.filters = self._filters
self._module.showwarning = self._showwarning
# filters contains a sequence of filter 5-tuples
# The components of the 5-tuple are:
# - an action: error, ignore, always, default, module, or once
# - a compiled regex that must match the warning message
# - a class representing the warning category
# - a compiled regex that must match the module that is being warned
# - a line number for the line being warning, or 0 to mean any line
# If either if the compiled regexs are None, match anything.
_warnings_defaults = False
try:
from _warnings import (filters, default_action, once_registry,
warn, warn_explicit)
defaultaction = default_action
onceregistry = once_registry
_warnings_defaults = True
except ImportError:
filters = []
defaultaction = "default"
onceregistry = {}
# Module initialization
_processoptions(sys.warnoptions)
if not _warnings_defaults:
silence = [ImportWarning, PendingDeprecationWarning]
# Don't silence DeprecationWarning if -3 or -Q was used.
if not sys.py3kwarning and not sys.flags.division_warning:
silence.append(DeprecationWarning)
for cls in silence:
simplefilter("ignore", category=cls)
bytes_warning = sys.flags.bytes_warning
if bytes_warning > 1:
bytes_action = "error"
elif bytes_warning:
bytes_action = "default"
else:
bytes_action = "ignore"
simplefilter(bytes_action, category=BytesWarning, append=1)
del _warnings_defaults
| bsd-3-clause |
Yen-Chung-En/2015cdb_g1_0420 | static/Brython3.1.1-20150328-091302/Lib/unittest/test/test_loader.py | 738 | 49593 | import sys
import types
import unittest
class Test_TestLoader(unittest.TestCase):
### Tests for TestLoader.loadTestsFromTestCase
################################################################
# "Return a suite of all tests cases contained in the TestCase-derived
# class testCaseClass"
def test_loadTestsFromTestCase(self):
class Foo(unittest.TestCase):
def test_1(self): pass
def test_2(self): pass
def foo_bar(self): pass
tests = unittest.TestSuite([Foo('test_1'), Foo('test_2')])
loader = unittest.TestLoader()
self.assertEqual(loader.loadTestsFromTestCase(Foo), tests)
# "Return a suite of all tests cases contained in the TestCase-derived
# class testCaseClass"
#
# Make sure it does the right thing even if no tests were found
def test_loadTestsFromTestCase__no_matches(self):
class Foo(unittest.TestCase):
def foo_bar(self): pass
empty_suite = unittest.TestSuite()
loader = unittest.TestLoader()
self.assertEqual(loader.loadTestsFromTestCase(Foo), empty_suite)
# "Return a suite of all tests cases contained in the TestCase-derived
# class testCaseClass"
#
# What happens if loadTestsFromTestCase() is given an object
# that isn't a subclass of TestCase? Specifically, what happens
# if testCaseClass is a subclass of TestSuite?
#
# This is checked for specifically in the code, so we better add a
# test for it.
def test_loadTestsFromTestCase__TestSuite_subclass(self):
class NotATestCase(unittest.TestSuite):
pass
loader = unittest.TestLoader()
try:
loader.loadTestsFromTestCase(NotATestCase)
except TypeError:
pass
else:
self.fail('Should raise TypeError')
# "Return a suite of all tests cases contained in the TestCase-derived
# class testCaseClass"
#
# Make sure loadTestsFromTestCase() picks up the default test method
# name (as specified by TestCase), even though the method name does
# not match the default TestLoader.testMethodPrefix string
def test_loadTestsFromTestCase__default_method_name(self):
class Foo(unittest.TestCase):
def runTest(self):
pass
loader = unittest.TestLoader()
# This has to be false for the test to succeed
self.assertFalse('runTest'.startswith(loader.testMethodPrefix))
suite = loader.loadTestsFromTestCase(Foo)
self.assertIsInstance(suite, loader.suiteClass)
self.assertEqual(list(suite), [Foo('runTest')])
################################################################
### /Tests for TestLoader.loadTestsFromTestCase
### Tests for TestLoader.loadTestsFromModule
################################################################
# "This method searches `module` for classes derived from TestCase"
def test_loadTestsFromModule__TestCase_subclass(self):
m = types.ModuleType('m')
class MyTestCase(unittest.TestCase):
def test(self):
pass
m.testcase_1 = MyTestCase
loader = unittest.TestLoader()
suite = loader.loadTestsFromModule(m)
self.assertIsInstance(suite, loader.suiteClass)
expected = [loader.suiteClass([MyTestCase('test')])]
self.assertEqual(list(suite), expected)
# "This method searches `module` for classes derived from TestCase"
#
# What happens if no tests are found (no TestCase instances)?
def test_loadTestsFromModule__no_TestCase_instances(self):
m = types.ModuleType('m')
loader = unittest.TestLoader()
suite = loader.loadTestsFromModule(m)
self.assertIsInstance(suite, loader.suiteClass)
self.assertEqual(list(suite), [])
# "This method searches `module` for classes derived from TestCase"
#
# What happens if no tests are found (TestCases instances, but no tests)?
def test_loadTestsFromModule__no_TestCase_tests(self):
m = types.ModuleType('m')
class MyTestCase(unittest.TestCase):
pass
m.testcase_1 = MyTestCase
loader = unittest.TestLoader()
suite = loader.loadTestsFromModule(m)
self.assertIsInstance(suite, loader.suiteClass)
self.assertEqual(list(suite), [loader.suiteClass()])
# "This method searches `module` for classes derived from TestCase"s
#
# What happens if loadTestsFromModule() is given something other
# than a module?
#
# XXX Currently, it succeeds anyway. This flexibility
# should either be documented or loadTestsFromModule() should
# raise a TypeError
#
# XXX Certain people are using this behaviour. We'll add a test for it
def test_loadTestsFromModule__not_a_module(self):
class MyTestCase(unittest.TestCase):
def test(self):
pass
class NotAModule(object):
test_2 = MyTestCase
loader = unittest.TestLoader()
suite = loader.loadTestsFromModule(NotAModule)
reference = [unittest.TestSuite([MyTestCase('test')])]
self.assertEqual(list(suite), reference)
# Check that loadTestsFromModule honors (or not) a module
# with a load_tests function.
def test_loadTestsFromModule__load_tests(self):
m = types.ModuleType('m')
class MyTestCase(unittest.TestCase):
def test(self):
pass
m.testcase_1 = MyTestCase
load_tests_args = []
def load_tests(loader, tests, pattern):
self.assertIsInstance(tests, unittest.TestSuite)
load_tests_args.extend((loader, tests, pattern))
return tests
m.load_tests = load_tests
loader = unittest.TestLoader()
suite = loader.loadTestsFromModule(m)
self.assertIsInstance(suite, unittest.TestSuite)
self.assertEqual(load_tests_args, [loader, suite, None])
load_tests_args = []
suite = loader.loadTestsFromModule(m, use_load_tests=False)
self.assertEqual(load_tests_args, [])
def test_loadTestsFromModule__faulty_load_tests(self):
m = types.ModuleType('m')
def load_tests(loader, tests, pattern):
raise TypeError('some failure')
m.load_tests = load_tests
loader = unittest.TestLoader()
suite = loader.loadTestsFromModule(m)
self.assertIsInstance(suite, unittest.TestSuite)
self.assertEqual(suite.countTestCases(), 1)
test = list(suite)[0]
self.assertRaisesRegex(TypeError, "some failure", test.m)
################################################################
### /Tests for TestLoader.loadTestsFromModule()
### Tests for TestLoader.loadTestsFromName()
################################################################
# "The specifier name is a ``dotted name'' that may resolve either to
# a module, a test case class, a TestSuite instance, a test method
# within a test case class, or a callable object which returns a
# TestCase or TestSuite instance."
#
# Is ValueError raised in response to an empty name?
def test_loadTestsFromName__empty_name(self):
loader = unittest.TestLoader()
try:
loader.loadTestsFromName('')
except ValueError as e:
self.assertEqual(str(e), "Empty module name")
else:
self.fail("TestLoader.loadTestsFromName failed to raise ValueError")
# "The specifier name is a ``dotted name'' that may resolve either to
# a module, a test case class, a TestSuite instance, a test method
# within a test case class, or a callable object which returns a
# TestCase or TestSuite instance."
#
# What happens when the name contains invalid characters?
def test_loadTestsFromName__malformed_name(self):
loader = unittest.TestLoader()
# XXX Should this raise ValueError or ImportError?
try:
loader.loadTestsFromName('abc () //')
except ValueError:
pass
except ImportError:
pass
else:
self.fail("TestLoader.loadTestsFromName failed to raise ValueError")
# "The specifier name is a ``dotted name'' that may resolve ... to a
# module"
#
# What happens when a module by that name can't be found?
def test_loadTestsFromName__unknown_module_name(self):
loader = unittest.TestLoader()
try:
loader.loadTestsFromName('sdasfasfasdf')
except ImportError as e:
self.assertEqual(str(e), "No module named 'sdasfasfasdf'")
else:
self.fail("TestLoader.loadTestsFromName failed to raise ImportError")
# "The specifier name is a ``dotted name'' that may resolve either to
# a module, a test case class, a TestSuite instance, a test method
# within a test case class, or a callable object which returns a
# TestCase or TestSuite instance."
#
# What happens when the module is found, but the attribute can't?
def test_loadTestsFromName__unknown_attr_name(self):
loader = unittest.TestLoader()
try:
loader.loadTestsFromName('unittest.sdasfasfasdf')
except AttributeError as e:
self.assertEqual(str(e), "'module' object has no attribute 'sdasfasfasdf'")
else:
self.fail("TestLoader.loadTestsFromName failed to raise AttributeError")
# "The specifier name is a ``dotted name'' that may resolve either to
# a module, a test case class, a TestSuite instance, a test method
# within a test case class, or a callable object which returns a
# TestCase or TestSuite instance."
#
# What happens when we provide the module, but the attribute can't be
# found?
def test_loadTestsFromName__relative_unknown_name(self):
loader = unittest.TestLoader()
try:
loader.loadTestsFromName('sdasfasfasdf', unittest)
except AttributeError as e:
self.assertEqual(str(e), "'module' object has no attribute 'sdasfasfasdf'")
else:
self.fail("TestLoader.loadTestsFromName failed to raise AttributeError")
# "The specifier name is a ``dotted name'' that may resolve either to
# a module, a test case class, a TestSuite instance, a test method
# within a test case class, or a callable object which returns a
# TestCase or TestSuite instance."
# ...
# "The method optionally resolves name relative to the given module"
#
# Does loadTestsFromName raise ValueError when passed an empty
# name relative to a provided module?
#
# XXX Should probably raise a ValueError instead of an AttributeError
def test_loadTestsFromName__relative_empty_name(self):
loader = unittest.TestLoader()
try:
loader.loadTestsFromName('', unittest)
except AttributeError as e:
pass
else:
self.fail("Failed to raise AttributeError")
# "The specifier name is a ``dotted name'' that may resolve either to
# a module, a test case class, a TestSuite instance, a test method
# within a test case class, or a callable object which returns a
# TestCase or TestSuite instance."
# ...
# "The method optionally resolves name relative to the given module"
#
# What happens when an impossible name is given, relative to the provided
# `module`?
def test_loadTestsFromName__relative_malformed_name(self):
loader = unittest.TestLoader()
# XXX Should this raise AttributeError or ValueError?
try:
loader.loadTestsFromName('abc () //', unittest)
except ValueError:
pass
except AttributeError:
pass
else:
self.fail("TestLoader.loadTestsFromName failed to raise ValueError")
# "The method optionally resolves name relative to the given module"
#
# Does loadTestsFromName raise TypeError when the `module` argument
# isn't a module object?
#
# XXX Accepts the not-a-module object, ignorning the object's type
# This should raise an exception or the method name should be changed
#
# XXX Some people are relying on this, so keep it for now
def test_loadTestsFromName__relative_not_a_module(self):
class MyTestCase(unittest.TestCase):
def test(self):
pass
class NotAModule(object):
test_2 = MyTestCase
loader = unittest.TestLoader()
suite = loader.loadTestsFromName('test_2', NotAModule)
reference = [MyTestCase('test')]
self.assertEqual(list(suite), reference)
# "The specifier name is a ``dotted name'' that may resolve either to
# a module, a test case class, a TestSuite instance, a test method
# within a test case class, or a callable object which returns a
# TestCase or TestSuite instance."
#
# Does it raise an exception if the name resolves to an invalid
# object?
def test_loadTestsFromName__relative_bad_object(self):
m = types.ModuleType('m')
m.testcase_1 = object()
loader = unittest.TestLoader()
try:
loader.loadTestsFromName('testcase_1', m)
except TypeError:
pass
else:
self.fail("Should have raised TypeError")
# "The specifier name is a ``dotted name'' that may
# resolve either to ... a test case class"
def test_loadTestsFromName__relative_TestCase_subclass(self):
m = types.ModuleType('m')
class MyTestCase(unittest.TestCase):
def test(self):
pass
m.testcase_1 = MyTestCase
loader = unittest.TestLoader()
suite = loader.loadTestsFromName('testcase_1', m)
self.assertIsInstance(suite, loader.suiteClass)
self.assertEqual(list(suite), [MyTestCase('test')])
# "The specifier name is a ``dotted name'' that may resolve either to
# a module, a test case class, a TestSuite instance, a test method
# within a test case class, or a callable object which returns a
# TestCase or TestSuite instance."
def test_loadTestsFromName__relative_TestSuite(self):
m = types.ModuleType('m')
class MyTestCase(unittest.TestCase):
def test(self):
pass
m.testsuite = unittest.TestSuite([MyTestCase('test')])
loader = unittest.TestLoader()
suite = loader.loadTestsFromName('testsuite', m)
self.assertIsInstance(suite, loader.suiteClass)
self.assertEqual(list(suite), [MyTestCase('test')])
# "The specifier name is a ``dotted name'' that may resolve ... to
# ... a test method within a test case class"
def test_loadTestsFromName__relative_testmethod(self):
m = types.ModuleType('m')
class MyTestCase(unittest.TestCase):
def test(self):
pass
m.testcase_1 = MyTestCase
loader = unittest.TestLoader()
suite = loader.loadTestsFromName('testcase_1.test', m)
self.assertIsInstance(suite, loader.suiteClass)
self.assertEqual(list(suite), [MyTestCase('test')])
# "The specifier name is a ``dotted name'' that may resolve either to
# a module, a test case class, a TestSuite instance, a test method
# within a test case class, or a callable object which returns a
# TestCase or TestSuite instance."
#
# Does loadTestsFromName() raise the proper exception when trying to
# resolve "a test method within a test case class" that doesn't exist
# for the given name (relative to a provided module)?
def test_loadTestsFromName__relative_invalid_testmethod(self):
m = types.ModuleType('m')
class MyTestCase(unittest.TestCase):
def test(self):
pass
m.testcase_1 = MyTestCase
loader = unittest.TestLoader()
try:
loader.loadTestsFromName('testcase_1.testfoo', m)
except AttributeError as e:
self.assertEqual(str(e), "type object 'MyTestCase' has no attribute 'testfoo'")
else:
self.fail("Failed to raise AttributeError")
# "The specifier name is a ``dotted name'' that may resolve ... to
# ... a callable object which returns a ... TestSuite instance"
def test_loadTestsFromName__callable__TestSuite(self):
m = types.ModuleType('m')
testcase_1 = unittest.FunctionTestCase(lambda: None)
testcase_2 = unittest.FunctionTestCase(lambda: None)
def return_TestSuite():
return unittest.TestSuite([testcase_1, testcase_2])
m.return_TestSuite = return_TestSuite
loader = unittest.TestLoader()
suite = loader.loadTestsFromName('return_TestSuite', m)
self.assertIsInstance(suite, loader.suiteClass)
self.assertEqual(list(suite), [testcase_1, testcase_2])
# "The specifier name is a ``dotted name'' that may resolve ... to
# ... a callable object which returns a TestCase ... instance"
def test_loadTestsFromName__callable__TestCase_instance(self):
m = types.ModuleType('m')
testcase_1 = unittest.FunctionTestCase(lambda: None)
def return_TestCase():
return testcase_1
m.return_TestCase = return_TestCase
loader = unittest.TestLoader()
suite = loader.loadTestsFromName('return_TestCase', m)
self.assertIsInstance(suite, loader.suiteClass)
self.assertEqual(list(suite), [testcase_1])
# "The specifier name is a ``dotted name'' that may resolve ... to
# ... a callable object which returns a TestCase ... instance"
#*****************************************************************
#Override the suiteClass attribute to ensure that the suiteClass
#attribute is used
def test_loadTestsFromName__callable__TestCase_instance_ProperSuiteClass(self):
class SubTestSuite(unittest.TestSuite):
pass
m = types.ModuleType('m')
testcase_1 = unittest.FunctionTestCase(lambda: None)
def return_TestCase():
return testcase_1
m.return_TestCase = return_TestCase
loader = unittest.TestLoader()
loader.suiteClass = SubTestSuite
suite = loader.loadTestsFromName('return_TestCase', m)
self.assertIsInstance(suite, loader.suiteClass)
self.assertEqual(list(suite), [testcase_1])
# "The specifier name is a ``dotted name'' that may resolve ... to
# ... a test method within a test case class"
#*****************************************************************
#Override the suiteClass attribute to ensure that the suiteClass
#attribute is used
def test_loadTestsFromName__relative_testmethod_ProperSuiteClass(self):
class SubTestSuite(unittest.TestSuite):
pass
m = types.ModuleType('m')
class MyTestCase(unittest.TestCase):
def test(self):
pass
m.testcase_1 = MyTestCase
loader = unittest.TestLoader()
loader.suiteClass=SubTestSuite
suite = loader.loadTestsFromName('testcase_1.test', m)
self.assertIsInstance(suite, loader.suiteClass)
self.assertEqual(list(suite), [MyTestCase('test')])
# "The specifier name is a ``dotted name'' that may resolve ... to
# ... a callable object which returns a TestCase or TestSuite instance"
#
# What happens if the callable returns something else?
def test_loadTestsFromName__callable__wrong_type(self):
m = types.ModuleType('m')
def return_wrong():
return 6
m.return_wrong = return_wrong
loader = unittest.TestLoader()
try:
suite = loader.loadTestsFromName('return_wrong', m)
except TypeError:
pass
else:
self.fail("TestLoader.loadTestsFromName failed to raise TypeError")
# "The specifier can refer to modules and packages which have not been
# imported; they will be imported as a side-effect"
def test_loadTestsFromName__module_not_loaded(self):
# We're going to try to load this module as a side-effect, so it
# better not be loaded before we try.
#
module_name = 'unittest.test.dummy'
sys.modules.pop(module_name, None)
loader = unittest.TestLoader()
try:
suite = loader.loadTestsFromName(module_name)
self.assertIsInstance(suite, loader.suiteClass)
self.assertEqual(list(suite), [])
# module should now be loaded, thanks to loadTestsFromName()
self.assertIn(module_name, sys.modules)
finally:
if module_name in sys.modules:
del sys.modules[module_name]
################################################################
### Tests for TestLoader.loadTestsFromName()
### Tests for TestLoader.loadTestsFromNames()
################################################################
# "Similar to loadTestsFromName(), but takes a sequence of names rather
# than a single name."
#
# What happens if that sequence of names is empty?
def test_loadTestsFromNames__empty_name_list(self):
loader = unittest.TestLoader()
suite = loader.loadTestsFromNames([])
self.assertIsInstance(suite, loader.suiteClass)
self.assertEqual(list(suite), [])
# "Similar to loadTestsFromName(), but takes a sequence of names rather
# than a single name."
# ...
# "The method optionally resolves name relative to the given module"
#
# What happens if that sequence of names is empty?
#
# XXX Should this raise a ValueError or just return an empty TestSuite?
def test_loadTestsFromNames__relative_empty_name_list(self):
loader = unittest.TestLoader()
suite = loader.loadTestsFromNames([], unittest)
self.assertIsInstance(suite, loader.suiteClass)
self.assertEqual(list(suite), [])
# "The specifier name is a ``dotted name'' that may resolve either to
# a module, a test case class, a TestSuite instance, a test method
# within a test case class, or a callable object which returns a
# TestCase or TestSuite instance."
#
# Is ValueError raised in response to an empty name?
def test_loadTestsFromNames__empty_name(self):
loader = unittest.TestLoader()
try:
loader.loadTestsFromNames([''])
except ValueError as e:
self.assertEqual(str(e), "Empty module name")
else:
self.fail("TestLoader.loadTestsFromNames failed to raise ValueError")
# "The specifier name is a ``dotted name'' that may resolve either to
# a module, a test case class, a TestSuite instance, a test method
# within a test case class, or a callable object which returns a
# TestCase or TestSuite instance."
#
# What happens when presented with an impossible module name?
def test_loadTestsFromNames__malformed_name(self):
loader = unittest.TestLoader()
# XXX Should this raise ValueError or ImportError?
try:
loader.loadTestsFromNames(['abc () //'])
except ValueError:
pass
except ImportError:
pass
else:
self.fail("TestLoader.loadTestsFromNames failed to raise ValueError")
# "The specifier name is a ``dotted name'' that may resolve either to
# a module, a test case class, a TestSuite instance, a test method
# within a test case class, or a callable object which returns a
# TestCase or TestSuite instance."
#
# What happens when no module can be found for the given name?
def test_loadTestsFromNames__unknown_module_name(self):
loader = unittest.TestLoader()
try:
loader.loadTestsFromNames(['sdasfasfasdf'])
except ImportError as e:
self.assertEqual(str(e), "No module named 'sdasfasfasdf'")
else:
self.fail("TestLoader.loadTestsFromNames failed to raise ImportError")
# "The specifier name is a ``dotted name'' that may resolve either to
# a module, a test case class, a TestSuite instance, a test method
# within a test case class, or a callable object which returns a
# TestCase or TestSuite instance."
#
# What happens when the module can be found, but not the attribute?
def test_loadTestsFromNames__unknown_attr_name(self):
loader = unittest.TestLoader()
try:
loader.loadTestsFromNames(['unittest.sdasfasfasdf', 'unittest'])
except AttributeError as e:
self.assertEqual(str(e), "'module' object has no attribute 'sdasfasfasdf'")
else:
self.fail("TestLoader.loadTestsFromNames failed to raise AttributeError")
# "The specifier name is a ``dotted name'' that may resolve either to
# a module, a test case class, a TestSuite instance, a test method
# within a test case class, or a callable object which returns a
# TestCase or TestSuite instance."
# ...
# "The method optionally resolves name relative to the given module"
#
# What happens when given an unknown attribute on a specified `module`
# argument?
def test_loadTestsFromNames__unknown_name_relative_1(self):
loader = unittest.TestLoader()
try:
loader.loadTestsFromNames(['sdasfasfasdf'], unittest)
except AttributeError as e:
self.assertEqual(str(e), "'module' object has no attribute 'sdasfasfasdf'")
else:
self.fail("TestLoader.loadTestsFromName failed to raise AttributeError")
# "The specifier name is a ``dotted name'' that may resolve either to
# a module, a test case class, a TestSuite instance, a test method
# within a test case class, or a callable object which returns a
# TestCase or TestSuite instance."
# ...
# "The method optionally resolves name relative to the given module"
#
# Do unknown attributes (relative to a provided module) still raise an
# exception even in the presence of valid attribute names?
def test_loadTestsFromNames__unknown_name_relative_2(self):
loader = unittest.TestLoader()
try:
loader.loadTestsFromNames(['TestCase', 'sdasfasfasdf'], unittest)
except AttributeError as e:
self.assertEqual(str(e), "'module' object has no attribute 'sdasfasfasdf'")
else:
self.fail("TestLoader.loadTestsFromName failed to raise AttributeError")
# "The specifier name is a ``dotted name'' that may resolve either to
# a module, a test case class, a TestSuite instance, a test method
# within a test case class, or a callable object which returns a
# TestCase or TestSuite instance."
# ...
# "The method optionally resolves name relative to the given module"
#
# What happens when faced with the empty string?
#
# XXX This currently raises AttributeError, though ValueError is probably
# more appropriate
def test_loadTestsFromNames__relative_empty_name(self):
loader = unittest.TestLoader()
try:
loader.loadTestsFromNames([''], unittest)
except AttributeError:
pass
else:
self.fail("Failed to raise ValueError")
# "The specifier name is a ``dotted name'' that may resolve either to
# a module, a test case class, a TestSuite instance, a test method
# within a test case class, or a callable object which returns a
# TestCase or TestSuite instance."
# ...
# "The method optionally resolves name relative to the given module"
#
# What happens when presented with an impossible attribute name?
def test_loadTestsFromNames__relative_malformed_name(self):
loader = unittest.TestLoader()
# XXX Should this raise AttributeError or ValueError?
try:
loader.loadTestsFromNames(['abc () //'], unittest)
except AttributeError:
pass
except ValueError:
pass
else:
self.fail("TestLoader.loadTestsFromNames failed to raise ValueError")
# "The method optionally resolves name relative to the given module"
#
# Does loadTestsFromNames() make sure the provided `module` is in fact
# a module?
#
# XXX This validation is currently not done. This flexibility should
# either be documented or a TypeError should be raised.
def test_loadTestsFromNames__relative_not_a_module(self):
class MyTestCase(unittest.TestCase):
def test(self):
pass
class NotAModule(object):
test_2 = MyTestCase
loader = unittest.TestLoader()
suite = loader.loadTestsFromNames(['test_2'], NotAModule)
reference = [unittest.TestSuite([MyTestCase('test')])]
self.assertEqual(list(suite), reference)
# "The specifier name is a ``dotted name'' that may resolve either to
# a module, a test case class, a TestSuite instance, a test method
# within a test case class, or a callable object which returns a
# TestCase or TestSuite instance."
#
# Does it raise an exception if the name resolves to an invalid
# object?
def test_loadTestsFromNames__relative_bad_object(self):
m = types.ModuleType('m')
m.testcase_1 = object()
loader = unittest.TestLoader()
try:
loader.loadTestsFromNames(['testcase_1'], m)
except TypeError:
pass
else:
self.fail("Should have raised TypeError")
# "The specifier name is a ``dotted name'' that may resolve ... to
# ... a test case class"
def test_loadTestsFromNames__relative_TestCase_subclass(self):
m = types.ModuleType('m')
class MyTestCase(unittest.TestCase):
def test(self):
pass
m.testcase_1 = MyTestCase
loader = unittest.TestLoader()
suite = loader.loadTestsFromNames(['testcase_1'], m)
self.assertIsInstance(suite, loader.suiteClass)
expected = loader.suiteClass([MyTestCase('test')])
self.assertEqual(list(suite), [expected])
# "The specifier name is a ``dotted name'' that may resolve ... to
# ... a TestSuite instance"
def test_loadTestsFromNames__relative_TestSuite(self):
m = types.ModuleType('m')
class MyTestCase(unittest.TestCase):
def test(self):
pass
m.testsuite = unittest.TestSuite([MyTestCase('test')])
loader = unittest.TestLoader()
suite = loader.loadTestsFromNames(['testsuite'], m)
self.assertIsInstance(suite, loader.suiteClass)
self.assertEqual(list(suite), [m.testsuite])
# "The specifier name is a ``dotted name'' that may resolve ... to ... a
# test method within a test case class"
def test_loadTestsFromNames__relative_testmethod(self):
m = types.ModuleType('m')
class MyTestCase(unittest.TestCase):
def test(self):
pass
m.testcase_1 = MyTestCase
loader = unittest.TestLoader()
suite = loader.loadTestsFromNames(['testcase_1.test'], m)
self.assertIsInstance(suite, loader.suiteClass)
ref_suite = unittest.TestSuite([MyTestCase('test')])
self.assertEqual(list(suite), [ref_suite])
# "The specifier name is a ``dotted name'' that may resolve ... to ... a
# test method within a test case class"
#
# Does the method gracefully handle names that initially look like they
# resolve to "a test method within a test case class" but don't?
def test_loadTestsFromNames__relative_invalid_testmethod(self):
m = types.ModuleType('m')
class MyTestCase(unittest.TestCase):
def test(self):
pass
m.testcase_1 = MyTestCase
loader = unittest.TestLoader()
try:
loader.loadTestsFromNames(['testcase_1.testfoo'], m)
except AttributeError as e:
self.assertEqual(str(e), "type object 'MyTestCase' has no attribute 'testfoo'")
else:
self.fail("Failed to raise AttributeError")
# "The specifier name is a ``dotted name'' that may resolve ... to
# ... a callable object which returns a ... TestSuite instance"
def test_loadTestsFromNames__callable__TestSuite(self):
m = types.ModuleType('m')
testcase_1 = unittest.FunctionTestCase(lambda: None)
testcase_2 = unittest.FunctionTestCase(lambda: None)
def return_TestSuite():
return unittest.TestSuite([testcase_1, testcase_2])
m.return_TestSuite = return_TestSuite
loader = unittest.TestLoader()
suite = loader.loadTestsFromNames(['return_TestSuite'], m)
self.assertIsInstance(suite, loader.suiteClass)
expected = unittest.TestSuite([testcase_1, testcase_2])
self.assertEqual(list(suite), [expected])
# "The specifier name is a ``dotted name'' that may resolve ... to
# ... a callable object which returns a TestCase ... instance"
def test_loadTestsFromNames__callable__TestCase_instance(self):
m = types.ModuleType('m')
testcase_1 = unittest.FunctionTestCase(lambda: None)
def return_TestCase():
return testcase_1
m.return_TestCase = return_TestCase
loader = unittest.TestLoader()
suite = loader.loadTestsFromNames(['return_TestCase'], m)
self.assertIsInstance(suite, loader.suiteClass)
ref_suite = unittest.TestSuite([testcase_1])
self.assertEqual(list(suite), [ref_suite])
# "The specifier name is a ``dotted name'' that may resolve ... to
# ... a callable object which returns a TestCase or TestSuite instance"
#
# Are staticmethods handled correctly?
def test_loadTestsFromNames__callable__call_staticmethod(self):
m = types.ModuleType('m')
class Test1(unittest.TestCase):
def test(self):
pass
testcase_1 = Test1('test')
class Foo(unittest.TestCase):
@staticmethod
def foo():
return testcase_1
m.Foo = Foo
loader = unittest.TestLoader()
suite = loader.loadTestsFromNames(['Foo.foo'], m)
self.assertIsInstance(suite, loader.suiteClass)
ref_suite = unittest.TestSuite([testcase_1])
self.assertEqual(list(suite), [ref_suite])
# "The specifier name is a ``dotted name'' that may resolve ... to
# ... a callable object which returns a TestCase or TestSuite instance"
#
# What happens when the callable returns something else?
def test_loadTestsFromNames__callable__wrong_type(self):
m = types.ModuleType('m')
def return_wrong():
return 6
m.return_wrong = return_wrong
loader = unittest.TestLoader()
try:
suite = loader.loadTestsFromNames(['return_wrong'], m)
except TypeError:
pass
else:
self.fail("TestLoader.loadTestsFromNames failed to raise TypeError")
# "The specifier can refer to modules and packages which have not been
# imported; they will be imported as a side-effect"
def test_loadTestsFromNames__module_not_loaded(self):
# We're going to try to load this module as a side-effect, so it
# better not be loaded before we try.
#
module_name = 'unittest.test.dummy'
sys.modules.pop(module_name, None)
loader = unittest.TestLoader()
try:
suite = loader.loadTestsFromNames([module_name])
self.assertIsInstance(suite, loader.suiteClass)
self.assertEqual(list(suite), [unittest.TestSuite()])
# module should now be loaded, thanks to loadTestsFromName()
self.assertIn(module_name, sys.modules)
finally:
if module_name in sys.modules:
del sys.modules[module_name]
################################################################
### /Tests for TestLoader.loadTestsFromNames()
### Tests for TestLoader.getTestCaseNames()
################################################################
# "Return a sorted sequence of method names found within testCaseClass"
#
# Test.foobar is defined to make sure getTestCaseNames() respects
# loader.testMethodPrefix
def test_getTestCaseNames(self):
class Test(unittest.TestCase):
def test_1(self): pass
def test_2(self): pass
def foobar(self): pass
loader = unittest.TestLoader()
self.assertEqual(loader.getTestCaseNames(Test), ['test_1', 'test_2'])
# "Return a sorted sequence of method names found within testCaseClass"
#
# Does getTestCaseNames() behave appropriately if no tests are found?
def test_getTestCaseNames__no_tests(self):
class Test(unittest.TestCase):
def foobar(self): pass
loader = unittest.TestLoader()
self.assertEqual(loader.getTestCaseNames(Test), [])
# "Return a sorted sequence of method names found within testCaseClass"
#
# Are not-TestCases handled gracefully?
#
# XXX This should raise a TypeError, not return a list
#
# XXX It's too late in the 2.5 release cycle to fix this, but it should
# probably be revisited for 2.6
def test_getTestCaseNames__not_a_TestCase(self):
class BadCase(int):
def test_foo(self):
pass
loader = unittest.TestLoader()
names = loader.getTestCaseNames(BadCase)
self.assertEqual(names, ['test_foo'])
# "Return a sorted sequence of method names found within testCaseClass"
#
# Make sure inherited names are handled.
#
# TestP.foobar is defined to make sure getTestCaseNames() respects
# loader.testMethodPrefix
def test_getTestCaseNames__inheritance(self):
class TestP(unittest.TestCase):
def test_1(self): pass
def test_2(self): pass
def foobar(self): pass
class TestC(TestP):
def test_1(self): pass
def test_3(self): pass
loader = unittest.TestLoader()
names = ['test_1', 'test_2', 'test_3']
self.assertEqual(loader.getTestCaseNames(TestC), names)
################################################################
### /Tests for TestLoader.getTestCaseNames()
### Tests for TestLoader.testMethodPrefix
################################################################
# "String giving the prefix of method names which will be interpreted as
# test methods"
#
# Implicit in the documentation is that testMethodPrefix is respected by
# all loadTestsFrom* methods.
def test_testMethodPrefix__loadTestsFromTestCase(self):
class Foo(unittest.TestCase):
def test_1(self): pass
def test_2(self): pass
def foo_bar(self): pass
tests_1 = unittest.TestSuite([Foo('foo_bar')])
tests_2 = unittest.TestSuite([Foo('test_1'), Foo('test_2')])
loader = unittest.TestLoader()
loader.testMethodPrefix = 'foo'
self.assertEqual(loader.loadTestsFromTestCase(Foo), tests_1)
loader.testMethodPrefix = 'test'
self.assertEqual(loader.loadTestsFromTestCase(Foo), tests_2)
# "String giving the prefix of method names which will be interpreted as
# test methods"
#
# Implicit in the documentation is that testMethodPrefix is respected by
# all loadTestsFrom* methods.
def test_testMethodPrefix__loadTestsFromModule(self):
m = types.ModuleType('m')
class Foo(unittest.TestCase):
def test_1(self): pass
def test_2(self): pass
def foo_bar(self): pass
m.Foo = Foo
tests_1 = [unittest.TestSuite([Foo('foo_bar')])]
tests_2 = [unittest.TestSuite([Foo('test_1'), Foo('test_2')])]
loader = unittest.TestLoader()
loader.testMethodPrefix = 'foo'
self.assertEqual(list(loader.loadTestsFromModule(m)), tests_1)
loader.testMethodPrefix = 'test'
self.assertEqual(list(loader.loadTestsFromModule(m)), tests_2)
# "String giving the prefix of method names which will be interpreted as
# test methods"
#
# Implicit in the documentation is that testMethodPrefix is respected by
# all loadTestsFrom* methods.
def test_testMethodPrefix__loadTestsFromName(self):
m = types.ModuleType('m')
class Foo(unittest.TestCase):
def test_1(self): pass
def test_2(self): pass
def foo_bar(self): pass
m.Foo = Foo
tests_1 = unittest.TestSuite([Foo('foo_bar')])
tests_2 = unittest.TestSuite([Foo('test_1'), Foo('test_2')])
loader = unittest.TestLoader()
loader.testMethodPrefix = 'foo'
self.assertEqual(loader.loadTestsFromName('Foo', m), tests_1)
loader.testMethodPrefix = 'test'
self.assertEqual(loader.loadTestsFromName('Foo', m), tests_2)
# "String giving the prefix of method names which will be interpreted as
# test methods"
#
# Implicit in the documentation is that testMethodPrefix is respected by
# all loadTestsFrom* methods.
def test_testMethodPrefix__loadTestsFromNames(self):
m = types.ModuleType('m')
class Foo(unittest.TestCase):
def test_1(self): pass
def test_2(self): pass
def foo_bar(self): pass
m.Foo = Foo
tests_1 = unittest.TestSuite([unittest.TestSuite([Foo('foo_bar')])])
tests_2 = unittest.TestSuite([Foo('test_1'), Foo('test_2')])
tests_2 = unittest.TestSuite([tests_2])
loader = unittest.TestLoader()
loader.testMethodPrefix = 'foo'
self.assertEqual(loader.loadTestsFromNames(['Foo'], m), tests_1)
loader.testMethodPrefix = 'test'
self.assertEqual(loader.loadTestsFromNames(['Foo'], m), tests_2)
# "The default value is 'test'"
def test_testMethodPrefix__default_value(self):
loader = unittest.TestLoader()
self.assertEqual(loader.testMethodPrefix, 'test')
################################################################
### /Tests for TestLoader.testMethodPrefix
### Tests for TestLoader.sortTestMethodsUsing
################################################################
# "Function to be used to compare method names when sorting them in
# getTestCaseNames() and all the loadTestsFromX() methods"
def test_sortTestMethodsUsing__loadTestsFromTestCase(self):
def reversed_cmp(x, y):
return -((x > y) - (x < y))
class Foo(unittest.TestCase):
def test_1(self): pass
def test_2(self): pass
loader = unittest.TestLoader()
loader.sortTestMethodsUsing = reversed_cmp
tests = loader.suiteClass([Foo('test_2'), Foo('test_1')])
self.assertEqual(loader.loadTestsFromTestCase(Foo), tests)
# "Function to be used to compare method names when sorting them in
# getTestCaseNames() and all the loadTestsFromX() methods"
def test_sortTestMethodsUsing__loadTestsFromModule(self):
def reversed_cmp(x, y):
return -((x > y) - (x < y))
m = types.ModuleType('m')
class Foo(unittest.TestCase):
def test_1(self): pass
def test_2(self): pass
m.Foo = Foo
loader = unittest.TestLoader()
loader.sortTestMethodsUsing = reversed_cmp
tests = [loader.suiteClass([Foo('test_2'), Foo('test_1')])]
self.assertEqual(list(loader.loadTestsFromModule(m)), tests)
# "Function to be used to compare method names when sorting them in
# getTestCaseNames() and all the loadTestsFromX() methods"
def test_sortTestMethodsUsing__loadTestsFromName(self):
def reversed_cmp(x, y):
return -((x > y) - (x < y))
m = types.ModuleType('m')
class Foo(unittest.TestCase):
def test_1(self): pass
def test_2(self): pass
m.Foo = Foo
loader = unittest.TestLoader()
loader.sortTestMethodsUsing = reversed_cmp
tests = loader.suiteClass([Foo('test_2'), Foo('test_1')])
self.assertEqual(loader.loadTestsFromName('Foo', m), tests)
# "Function to be used to compare method names when sorting them in
# getTestCaseNames() and all the loadTestsFromX() methods"
def test_sortTestMethodsUsing__loadTestsFromNames(self):
def reversed_cmp(x, y):
return -((x > y) - (x < y))
m = types.ModuleType('m')
class Foo(unittest.TestCase):
def test_1(self): pass
def test_2(self): pass
m.Foo = Foo
loader = unittest.TestLoader()
loader.sortTestMethodsUsing = reversed_cmp
tests = [loader.suiteClass([Foo('test_2'), Foo('test_1')])]
self.assertEqual(list(loader.loadTestsFromNames(['Foo'], m)), tests)
# "Function to be used to compare method names when sorting them in
# getTestCaseNames()"
#
# Does it actually affect getTestCaseNames()?
def test_sortTestMethodsUsing__getTestCaseNames(self):
def reversed_cmp(x, y):
return -((x > y) - (x < y))
class Foo(unittest.TestCase):
def test_1(self): pass
def test_2(self): pass
loader = unittest.TestLoader()
loader.sortTestMethodsUsing = reversed_cmp
test_names = ['test_2', 'test_1']
self.assertEqual(loader.getTestCaseNames(Foo), test_names)
# "The default value is the built-in cmp() function"
# Since cmp is now defunct, we simply verify that the results
# occur in the same order as they would with the default sort.
def test_sortTestMethodsUsing__default_value(self):
loader = unittest.TestLoader()
class Foo(unittest.TestCase):
def test_2(self): pass
def test_3(self): pass
def test_1(self): pass
test_names = ['test_2', 'test_3', 'test_1']
self.assertEqual(loader.getTestCaseNames(Foo), sorted(test_names))
# "it can be set to None to disable the sort."
#
# XXX How is this different from reassigning cmp? Are the tests returned
# in a random order or something? This behaviour should die
def test_sortTestMethodsUsing__None(self):
class Foo(unittest.TestCase):
def test_1(self): pass
def test_2(self): pass
loader = unittest.TestLoader()
loader.sortTestMethodsUsing = None
test_names = ['test_2', 'test_1']
self.assertEqual(set(loader.getTestCaseNames(Foo)), set(test_names))
################################################################
### /Tests for TestLoader.sortTestMethodsUsing
### Tests for TestLoader.suiteClass
################################################################
# "Callable object that constructs a test suite from a list of tests."
def test_suiteClass__loadTestsFromTestCase(self):
class Foo(unittest.TestCase):
def test_1(self): pass
def test_2(self): pass
def foo_bar(self): pass
tests = [Foo('test_1'), Foo('test_2')]
loader = unittest.TestLoader()
loader.suiteClass = list
self.assertEqual(loader.loadTestsFromTestCase(Foo), tests)
# It is implicit in the documentation for TestLoader.suiteClass that
# all TestLoader.loadTestsFrom* methods respect it. Let's make sure
def test_suiteClass__loadTestsFromModule(self):
m = types.ModuleType('m')
class Foo(unittest.TestCase):
def test_1(self): pass
def test_2(self): pass
def foo_bar(self): pass
m.Foo = Foo
tests = [[Foo('test_1'), Foo('test_2')]]
loader = unittest.TestLoader()
loader.suiteClass = list
self.assertEqual(loader.loadTestsFromModule(m), tests)
# It is implicit in the documentation for TestLoader.suiteClass that
# all TestLoader.loadTestsFrom* methods respect it. Let's make sure
def test_suiteClass__loadTestsFromName(self):
m = types.ModuleType('m')
class Foo(unittest.TestCase):
def test_1(self): pass
def test_2(self): pass
def foo_bar(self): pass
m.Foo = Foo
tests = [Foo('test_1'), Foo('test_2')]
loader = unittest.TestLoader()
loader.suiteClass = list
self.assertEqual(loader.loadTestsFromName('Foo', m), tests)
# It is implicit in the documentation for TestLoader.suiteClass that
# all TestLoader.loadTestsFrom* methods respect it. Let's make sure
def test_suiteClass__loadTestsFromNames(self):
m = types.ModuleType('m')
class Foo(unittest.TestCase):
def test_1(self): pass
def test_2(self): pass
def foo_bar(self): pass
m.Foo = Foo
tests = [[Foo('test_1'), Foo('test_2')]]
loader = unittest.TestLoader()
loader.suiteClass = list
self.assertEqual(loader.loadTestsFromNames(['Foo'], m), tests)
# "The default value is the TestSuite class"
def test_suiteClass__default_value(self):
loader = unittest.TestLoader()
self.assertTrue(loader.suiteClass is unittest.TestSuite)
| gpl-3.0 |
adrienverge/linux | scripts/gdb/linux/utils.py | 509 | 4833 | #
# gdb helper commands and functions for Linux kernel debugging
#
# common utilities
#
# Copyright (c) Siemens AG, 2011-2013
#
# Authors:
# Jan Kiszka <jan.kiszka@siemens.com>
#
# This work is licensed under the terms of the GNU GPL version 2.
#
import gdb
class CachedType:
def __init__(self, name):
self._type = None
self._name = name
def _new_objfile_handler(self, event):
self._type = None
gdb.events.new_objfile.disconnect(self._new_objfile_handler)
def get_type(self):
if self._type is None:
self._type = gdb.lookup_type(self._name)
if self._type is None:
raise gdb.GdbError(
"cannot resolve type '{0}'".format(self._name))
if hasattr(gdb, 'events') and hasattr(gdb.events, 'new_objfile'):
gdb.events.new_objfile.connect(self._new_objfile_handler)
return self._type
long_type = CachedType("long")
def get_long_type():
global long_type
return long_type.get_type()
def offset_of(typeobj, field):
element = gdb.Value(0).cast(typeobj)
return int(str(element[field].address).split()[0], 16)
def container_of(ptr, typeobj, member):
return (ptr.cast(get_long_type()) -
offset_of(typeobj, member)).cast(typeobj)
class ContainerOf(gdb.Function):
"""Return pointer to containing data structure.
$container_of(PTR, "TYPE", "ELEMENT"): Given PTR, return a pointer to the
data structure of the type TYPE in which PTR is the address of ELEMENT.
Note that TYPE and ELEMENT have to be quoted as strings."""
def __init__(self):
super(ContainerOf, self).__init__("container_of")
def invoke(self, ptr, typename, elementname):
return container_of(ptr, gdb.lookup_type(typename.string()).pointer(),
elementname.string())
ContainerOf()
BIG_ENDIAN = 0
LITTLE_ENDIAN = 1
target_endianness = None
def get_target_endianness():
global target_endianness
if target_endianness is None:
endian = gdb.execute("show endian", to_string=True)
if "little endian" in endian:
target_endianness = LITTLE_ENDIAN
elif "big endian" in endian:
target_endianness = BIG_ENDIAN
else:
raise gdb.GdbError("unknown endianness '{0}'".format(str(endian)))
return target_endianness
def read_memoryview(inf, start, length):
return memoryview(inf.read_memory(start, length))
def read_u16(buffer):
value = [0, 0]
if type(buffer[0]) is str:
value[0] = ord(buffer[0])
value[1] = ord(buffer[1])
else:
value[0] = buffer[0]
value[1] = buffer[1]
if get_target_endianness() == LITTLE_ENDIAN:
return value[0] + (value[1] << 8)
else:
return value[1] + (value[0] << 8)
def read_u32(buffer):
if get_target_endianness() == LITTLE_ENDIAN:
return read_u16(buffer[0:2]) + (read_u16(buffer[2:4]) << 16)
else:
return read_u16(buffer[2:4]) + (read_u16(buffer[0:2]) << 16)
def read_u64(buffer):
if get_target_endianness() == LITTLE_ENDIAN:
return read_u32(buffer[0:4]) + (read_u32(buffer[4:8]) << 32)
else:
return read_u32(buffer[4:8]) + (read_u32(buffer[0:4]) << 32)
target_arch = None
def is_target_arch(arch):
if hasattr(gdb.Frame, 'architecture'):
return arch in gdb.newest_frame().architecture().name()
else:
global target_arch
if target_arch is None:
target_arch = gdb.execute("show architecture", to_string=True)
return arch in target_arch
GDBSERVER_QEMU = 0
GDBSERVER_KGDB = 1
gdbserver_type = None
def get_gdbserver_type():
def exit_handler(event):
global gdbserver_type
gdbserver_type = None
gdb.events.exited.disconnect(exit_handler)
def probe_qemu():
try:
return gdb.execute("monitor info version", to_string=True) != ""
except:
return False
def probe_kgdb():
try:
thread_info = gdb.execute("info thread 2", to_string=True)
return "shadowCPU0" in thread_info
except:
return False
global gdbserver_type
if gdbserver_type is None:
if probe_qemu():
gdbserver_type = GDBSERVER_QEMU
elif probe_kgdb():
gdbserver_type = GDBSERVER_KGDB
if gdbserver_type is not None and hasattr(gdb, 'events'):
gdb.events.exited.connect(exit_handler)
return gdbserver_type
def gdb_eval_or_none(expresssion):
try:
return gdb.parse_and_eval(expresssion)
except:
return None
def dentry_name(d):
parent = d['d_parent']
if parent == d or parent == 0:
return ""
p = dentry_name(d['d_parent']) + "/"
return p + d['d_iname'].string()
| gpl-2.0 |
DeanSherwin/django-dynamic-scraper | dynamic_scraper/models.py | 1 | 17900 | #Stage 2 Update (Python 3)
from __future__ import unicode_literals
from django.utils.encoding import python_2_unicode_compatible
from builtins import range
from builtins import str
from builtins import object
import datetime
from django.db import models
from django.db.models import Q
@python_2_unicode_compatible
class ScrapedObjClass(models.Model):
name = models.CharField(max_length=200)
scraper_scheduler_conf = models.TextField(default='\
"MIN_TIME": 15,\n\
"MAX_TIME": 10080,\n\
"INITIAL_NEXT_ACTION_FACTOR": 10,\n\
"ZERO_ACTIONS_FACTOR_CHANGE": 20,\n\
"FACTOR_CHANGE_FACTOR": 1.3,\n')
checker_scheduler_conf = models.TextField(default='\
"MIN_TIME": 1440,\n\
"MAX_TIME": 10080,\n\
"INITIAL_NEXT_ACTION_FACTOR": 1,\n\
"ZERO_ACTIONS_FACTOR_CHANGE": 5,\n\
"FACTOR_CHANGE_FACTOR": 1.3,\n')
comments = models.TextField(blank=True)
def __str__(self):
return self.name
class Meta(object):
verbose_name = "Scraped object class"
verbose_name_plural = "Scraped object classes"
ordering = ['name',]
@python_2_unicode_compatible
class ScrapedObjAttr(models.Model):
ATTR_TYPE_CHOICES = (
('S', 'STANDARD'),
('T', 'STANDARD (UPDATE)'),
('B', 'BASE'),
('U', 'DETAIL_PAGE_URL'),
('I', 'IMAGE'),
)
name = models.CharField(max_length=200)
order = models.IntegerField(default=100)
obj_class = models.ForeignKey(ScrapedObjClass)
attr_type = models.CharField(max_length=1, choices=ATTR_TYPE_CHOICES)
id_field = models.BooleanField(default=False)
save_to_db = models.BooleanField(default=True)
def __str__(self):
return self.name + " (" + str(self.obj_class) + ")"
class Meta(object):
ordering = ['order',]
@python_2_unicode_compatible
class Scraper(models.Model):
STATUS_CHOICES = (
('A', 'ACTIVE'),
('M', 'MANUAL'),
('P', 'PAUSED'),
('I', 'INACTIVE'),
)
WORK_STATUS_CHOICES = (
('R2', 'REVISION NEEDED (MAJOR)'),
('R1', 'REVISION NEEDED (MINOR)'),
('UR', 'UNRESOLVED'),
('BR', 'BROKEN'),
('W', 'WORKING'),
('RC', 'RELEASE CANDIDATE'),
('BE', 'BETA'),
('A', 'ALPHA'),
('D', 'DRAFT'),
('S', 'SUSPENDED'),
('U', 'UNKNOWN'),
('N', 'NOT SET'),
)
CONTENT_TYPE_CHOICES = (
('H', 'HTML'),
('X', 'XML'),
('J', 'JSON'),
)
REQUEST_TYPE_CHOICES = (
('R', 'Request'),
('F', 'FormRequest'),
)
METHOD_CHOICES = (
('GET', 'GET'),
('POST', 'POST'),
)
PAGINATION_TYPE = (
('N', 'NONE'),
('R', 'RANGE_FUNCT (+FOLLOW)'),
('F', 'FREE_LIST (+FOLLOW)'),
('O', 'FOLLOW'),
)
name = models.CharField(max_length=200)
scraped_obj_class = models.ForeignKey(ScrapedObjClass)
help_text = "Runtime status of the scraper, used by scheduling mechanism."
status = models.CharField(max_length=1, choices=STATUS_CHOICES, default='P', help_text=help_text)
help_text = "Internal work/progress status of the scraper."
work_status = models.CharField(max_length=2, choices=WORK_STATUS_CHOICES, default='N', help_text=help_text)
help_text = "Optional owner when working on scrapers with various people"
owner = models.CharField(max_length=12, blank=True, help_text=help_text)
max_items_read = models.IntegerField(blank=True, null=True, help_text="Max number of items to be read (empty: unlimited).")
max_items_save = models.IntegerField(blank=True, null=True, help_text="Max number of items to be saved (empty: unlimited).")
pagination_type = models.CharField(max_length=1, choices=PAGINATION_TYPE, default='N')
pagination_on_start = models.BooleanField(default=False)
pagination_append_str = models.CharField(max_length=200, blank=True, help_text="Syntax: /somepartofurl/{page}/moreurlstuff.html")
pagination_page_replace = models.TextField(blank=True,
help_text="RANGE_FUNCT: uses Python range funct., syntax: [start], stop[, step], FREE_LIST: 'Replace text 1', 'Some other text 2', 'Maybe a number 3', ...")
help_text = "Optional, follow links from a single non-paginated or all statically paginated (RANGE_FUNCT, FREE_LIST) main pages"
follow_pages_url_xpath = models.TextField(blank=True, help_text=help_text)
help_text = "Optional additional XPath for the page number, can be used in {follow_page} placeholder."
follow_pages_page_xpath = models.TextField(blank=True, help_text=help_text)
help_text = "Optionally limit number of pages to follow (default: follow until XPath fails)"
num_pages_follow = models.IntegerField(blank=True, null=True, help_text=help_text)
last_scraper_save_alert_period = models.CharField(max_length=5, blank=True,
help_text="Optional, used for scraper monitoring with 'check_last_scraper_saves' management cmd, \
syntax: [HOURS]h or [DAYS]d or [WEEKS]w (e.g. '6h', '5d', '2w')")
next_last_scraper_save_alert = models.DateTimeField(default=datetime.datetime.now,
help_text="Next time the last scraper save will be alerted, normally set on management cmd run.",)
last_checker_delete_alert_period = models.CharField(max_length=5, blank=True,
help_text="Optional, used for scraper monitoring with 'check_last_checker_deletes' management cmd, \
syntax: [HOURS]h or [DAYS]d or [WEEKS]w (e.g. '6h', '5d', '2w')")
next_last_checker_delete_alert = models.DateTimeField(default=datetime.datetime.now,
help_text="Next time the last checker delete will be alerted, normally set on management cmd run.",)
comments = models.TextField(blank=True)
last_scraper_save = models.DateTimeField(null=True, blank=True)
last_checker_delete = models.DateTimeField(null=True, blank=True)
def get_alert_period_timedelta(self, attribute_str):
if getattr(self, attribute_str) and len(getattr(self, attribute_str)) >= 2:
period_str = getattr(self, attribute_str)[-1]
num_str = getattr(self, attribute_str)[:-1]
if period_str in ('h', 'd', 'w',):
try:
num_int = int(num_str)
if period_str == 'h':
return datetime.timedelta(0, 0, 0, 0, 0, num_int)
if period_str == 'd':
return datetime.timedelta(num_int)
if period_str == 'w':
return datetime.timedelta(0, 0, 0, 0, 0, 0, num_int)
except ValueError:
return None
else:
return None
else:
return None
def get_last_scraper_save_alert_period_timedelta(self):
return self.get_alert_period_timedelta('last_scraper_save_alert_period')
def get_last_checker_delete_alert_period_timedelta(self):
return self.get_alert_period_timedelta('last_checker_delete_alert_period')
def get_main_page_rpt(self):
return self.requestpagetype_set.get(page_type='MP')
def get_follow_page_rpts(self):
return self.requestpagetype_set.filter(page_type='FP')
def get_detail_page_rpts(self):
return s.requestpagetype_set.filter(~Q(page_type='MP'))
def get_rpt(self, page_type):
return self.requestpagetype_set.get(page_type=page_type)
def get_rpt_for_scraped_obj_attr(self, soa):
return self.requestpagetype_set.get(scraped_obj_attr=soa)
def get_base_elems(self):
return self.scraperelem_set.filter(scraped_obj_attr__attr_type='B')
def get_base_elem(self):
return self.scraperelem_set.get(scraped_obj_attr__attr_type='B')
def get_detail_page_url_elems(self):
return self.scraperelem_set.filter(scraped_obj_attr__attr_type='U')
def get_detail_page_url_id_elems(self):
return self.scraperelem_set.filter(scraped_obj_attr__attr_type='U', scraped_obj_attr__id_field=True)
def get_standard_elems(self):
q1 = Q(scraped_obj_attr__attr_type='S')
q2 = Q(scraped_obj_attr__attr_type='T')
return self.scraperelem_set.filter(q1 | q2)
def get_id_field_elems(self):
q1 = Q(scraped_obj_attr__id_field=True)
return self.scraperelem_set.filter(q1)
def get_standard_fixed_elems(self):
return self.scraperelem_set.filter(scraped_obj_attr__attr_type='S')
def get_standard_update_elems(self):
return self.scraperelem_set.filter(scraped_obj_attr__attr_type='T')
def get_standard_update_elems_from_detail_pages(self):
return self.scraperelem_set.filter(scraped_obj_attr__attr_type='T').filter(~Q(request_page_type='MP'))
def get_image_elems(self):
return self.scraperelem_set.filter(scraped_obj_attr__attr_type='I')
def get_image_elem(self):
return self.scraperelem_set.get(scraped_obj_attr__attr_type='I')
def get_scrape_elems(self):
q1 = Q(scraped_obj_attr__attr_type='S')
q2 = Q(scraped_obj_attr__attr_type='T')
q3 = Q(scraped_obj_attr__attr_type='U')
q4 = Q(scraped_obj_attr__attr_type='I')
return self.scraperelem_set.filter(q1 | q2 | q3 | q4)
def get_mandatory_scrape_elems(self):
q1 = Q(scraped_obj_attr__attr_type='S')
q2 = Q(scraped_obj_attr__attr_type='T')
q3 = Q(scraped_obj_attr__attr_type='U')
q4 = Q(scraped_obj_attr__attr_type='I')
return self.scraperelem_set.filter(q1 | q2 | q3 | q4).filter(mandatory=True)
def get_from_detail_pages_scrape_elems(self):
return self.scraperelem_set.filter(~Q(request_page_type='MP'))
def __str__(self):
return self.name + " (" + self.scraped_obj_class.name + ")"
class Meta(object):
ordering = ['name', 'scraped_obj_class',]
@python_2_unicode_compatible
class RequestPageType(models.Model):
TYPE_CHOICES = tuple([("MP", "Main Page"), ("FP", "Follow Page"),] + [("DP{n}".format(n=str(n)), "Detail Page {n}".format(n=str(n))) for n in list(range(1, 26))])
CONTENT_TYPE_CHOICES = (
('H', 'HTML'),
('X', 'XML'),
('J', 'JSON'),
)
REQUEST_TYPE_CHOICES = (
('R', 'Request'),
('F', 'FormRequest'),
)
METHOD_CHOICES = (
('GET', 'GET'),
('POST', 'POST'),
)
help_text = "One main page RPT, an optional follow page RPT (if follow pagination is used) and detail page RPTs for all DETAIL_PAGE_URLs"
page_type = models.CharField(max_length=3, choices=TYPE_CHOICES, help_text=help_text)
scraped_obj_attr = models.ForeignKey(ScrapedObjAttr, blank=True, null=True, help_text="Empty for main page, attribute of type DETAIL_PAGE_URL scraped from main page for detail pages.")
scraper = models.ForeignKey(Scraper)
content_type = models.CharField(max_length=1, choices=CONTENT_TYPE_CHOICES, default='H', help_text="Data type format for scraped pages of page type (for JSON use JSONPath instead of XPath)")
render_javascript = models.BooleanField(default=False, help_text="Render Javascript on pages (ScrapyJS/Splash deployment needed, careful: resource intense)")
request_type = models.CharField(max_length=1, choices=REQUEST_TYPE_CHOICES, default='R', help_text="Normal (typically GET) request (default) or form request (typically POST), using Scrapys corresponding request classes (not used for checker).")
method = models.CharField(max_length=10, choices=METHOD_CHOICES, default='GET', help_text="HTTP request via GET or POST.")
headers = models.TextField(blank=True, help_text='Optional HTTP headers sent with each request, provided as a JSON dict (e.g. {"Referer":"http://referer_url"}, use double quotes!)), can use {main page attribute}, {page} and {follow_page} placeholders.')
body = models.TextField(blank=True, help_text="Optional HTTP message body provided as a unicode string, can use {main page attribute}, {page} and {follow_page} placeholders.")
cookies = models.TextField(blank=True, help_text="Optional cookies as JSON dict (use double quotes!), can use {main page attribute}, {page} and {follow_page} placeholders.")
meta = models.TextField(blank=True, help_text="Optional Scrapy meta attributes as JSON dict (use double quotes!), see Scrapy docs for reference.")
form_data = models.TextField(blank=True, help_text="Optional HTML form data as JSON dict (use double quotes!), only used with FormRequest request type, can use {main page attribute}, {page} and {follow_page} placeholders.")
dont_filter = models.BooleanField(default=False, help_text="Do not filter duplicate requests, useful for some scenarios with requests falsely marked as being duplicate (e.g. uniform URL + pagination by HTTP header).")
comments = models.TextField(blank=True)
def __str__(self):
ret_str = self.get_page_type_display()
if self.scraped_obj_attr:
ret_str += ' (' + str(self.scraped_obj_attr) + ')'
return ret_str
@python_2_unicode_compatible
class Checker(models.Model):
CHECKER_TYPE = (
('4', '404'),
('X', '404_OR_X_PATH'),
)
scraped_obj_attr = models.ForeignKey(ScrapedObjAttr, help_text="Attribute of type DETAIL_PAGE_URL, several checkers for same DETAIL_PAGE_URL attribute possible.")
scraper = models.ForeignKey(Scraper)
checker_type = models.CharField(max_length=1, choices=CHECKER_TYPE, default='4')
checker_x_path = models.TextField(blank=True)
checker_x_path_result = models.TextField(blank=True)
checker_ref_url = models.URLField(max_length=500, blank=True)
comments = models.TextField(blank=True)
def __str__(self):
return str(self.scraped_obj_attr) + ' > ' + self.get_checker_type_display()
@python_2_unicode_compatible
class ScraperElem(models.Model):
REQUEST_PAGE_TYPE_CHOICES = tuple([("MP", "Main Page")] + [("DP{n}".format(n=str(n)), "Detail Page {n}".format(n=str(n))) for n in list(range(1, 26))])
help_text = "The different attributes to be scraped, exactly one attribute of type BASE necessary."
scraped_obj_attr = models.ForeignKey(ScrapedObjAttr, help_text=help_text)
scraper = models.ForeignKey(Scraper)
x_path = models.TextField(blank=True, help_text='XPath or JSONPath expression, leave blank on "static" processor use.')
reg_exp = models.TextField(blank=True, help_text="Optional filtering by regular expression (e.g. 'Scrape only (.*) the text in between').")
help_text = "Corresponding Request Page Types created for this scraper."
request_page_type = models.CharField(max_length=3, choices=REQUEST_PAGE_TYPE_CHOICES, default="MP", help_text=help_text)
help_text = "Use the default processors (Scrapy TakeFirst, DDS string_strip) for convenience."
use_default_procs = models.BooleanField(default=True, help_text=help_text)
help_text = 'Optional comma-separated list of processors used (e.g. "pre_url, post_string").'
processors = models.TextField(blank=True, help_text=help_text)
help_text = "Comma-separated aditional context (depending on processor) (e.g. 'pre_url': 'http://append_before.org/', 'post_string': '?append_after=True')."
proc_ctxt = models.TextField(blank=True, help_text=help_text)
help_text = "Drop item if attribute could not be scraped."
mandatory = models.BooleanField(default=True, help_text=help_text)
def __str__(self):
return '{s} > {soa} Attribute ({rpt})'.format(
s=str(self.scraper),
soa=self.scraped_obj_attr.name,
rpt=self.get_request_page_type_display())
class Meta(object):
ordering = ['scraped_obj_attr__order',]
@python_2_unicode_compatible
class SchedulerRuntime(models.Model):
TYPE = (
('S', 'SCRAPER'),
('C', 'CHECKER'),
)
runtime_type = models.CharField(max_length=1, choices=TYPE, default='P')
next_action_time = models.DateTimeField(default=datetime.datetime.now)
next_action_factor = models.FloatField(blank=True, null=True)
num_zero_actions = models.IntegerField(default=0)
def __str__(self):
return str(self.id)
class Meta(object):
ordering = ['next_action_time',]
class LogMarker(models.Model):
TYPE_CHOICES = (
('PE', 'Planned Error'),
('DD', 'Dirty Data'),
('IM', 'Important'),
('IG', 'Ignore'),
('MI', 'Miscellaneous'),
('CU', 'Custom'),
)
message_contains = models.CharField(max_length=255)
help_text = "Use the string format from the log messages"
ref_object = models.CharField(max_length=200, blank=True)
help_text = 'Choose "Custom" and enter your own type in the next field for a custom type'
mark_with_type = models.CharField(max_length=2, choices=TYPE_CHOICES, help_text=help_text)
custom_type = models.CharField(max_length=25, blank=True)
spider_name = models.CharField(max_length=200, blank=True)
scraper = models.ForeignKey(Scraper, blank=True, null=True)
class Log(models.Model):
LEVEL_CHOICES = (
(50, 'CRITICAL'),
(40, 'ERROR'),
(30, 'WARNING'),
(20, 'INFO'),
(10, 'DEBUG'),
)
message = models.CharField(max_length=255)
ref_object = models.CharField(max_length=200)
type = models.CharField(max_length=25, blank=True)
level = models.IntegerField(choices=LEVEL_CHOICES)
spider_name = models.CharField(max_length=200)
scraper = models.ForeignKey(Scraper, blank=True, null=True)
date = models.DateTimeField(default=datetime.datetime.now)
@staticmethod
def numeric_level(level):
numeric_level = 0
for choice in Log.LEVEL_CHOICES:
if choice[1] == level:
numeric_level = choice[0]
return numeric_level
class Meta(object):
ordering = ['-date']
| bsd-3-clause |
agconti/njode | env/lib/python2.7/site-packages/pip/commands/show.py | 344 | 2767 | import os
from pip.basecommand import Command
from pip.log import logger
from pip._vendor import pkg_resources
class ShowCommand(Command):
"""Show information about one or more installed packages."""
name = 'show'
usage = """
%prog [options] <package> ..."""
summary = 'Show information about installed packages.'
def __init__(self, *args, **kw):
super(ShowCommand, self).__init__(*args, **kw)
self.cmd_opts.add_option(
'-f', '--files',
dest='files',
action='store_true',
default=False,
help='Show the full list of installed files for each package.')
self.parser.insert_option_group(0, self.cmd_opts)
def run(self, options, args):
if not args:
logger.warn('ERROR: Please provide a package name or names.')
return
query = args
results = search_packages_info(query)
print_results(results, options.files)
def search_packages_info(query):
"""
Gather details from installed distributions. Print distribution name,
version, location, and installed files. Installed files requires a
pip generated 'installed-files.txt' in the distributions '.egg-info'
directory.
"""
installed_packages = dict(
[(p.project_name.lower(), p) for p in pkg_resources.working_set])
for name in query:
normalized_name = name.lower()
if normalized_name in installed_packages:
dist = installed_packages[normalized_name]
package = {
'name': dist.project_name,
'version': dist.version,
'location': dist.location,
'requires': [dep.project_name for dep in dist.requires()],
}
filelist = os.path.join(
dist.location,
dist.egg_name() + '.egg-info',
'installed-files.txt')
if os.path.isfile(filelist):
package['files'] = filelist
yield package
def print_results(distributions, list_all_files):
"""
Print the informations from installed distributions found.
"""
for dist in distributions:
logger.notify("---")
logger.notify("Name: %s" % dist['name'])
logger.notify("Version: %s" % dist['version'])
logger.notify("Location: %s" % dist['location'])
logger.notify("Requires: %s" % ', '.join(dist['requires']))
if list_all_files:
logger.notify("Files:")
if 'files' in dist:
for line in open(dist['files']):
logger.notify(" %s" % line.strip())
else:
logger.notify("Cannot locate installed-files.txt")
| bsd-3-clause |
saguziel/incubator-airflow | airflow/contrib/operators/dataflow_operator.py | 22 | 9090 | # -*- coding: utf-8 -*-
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import copy
import re
import uuid
from airflow.contrib.hooks.gcs_hook import GoogleCloudStorageHook
from airflow.contrib.hooks.gcp_dataflow_hook import DataFlowHook
from airflow.models import BaseOperator
from airflow.utils.decorators import apply_defaults
class DataFlowJavaOperator(BaseOperator):
"""
Start a Java Cloud DataFlow batch job. The parameters of the operation
will be passed to the job.
It's a good practice to define dataflow_* parameters in the default_args of the dag
like the project, zone and staging location.
```
default_args = {
'dataflow_default_options': {
'project': 'my-gcp-project',
'zone': 'europe-west1-d',
'stagingLocation': 'gs://my-staging-bucket/staging/'
}
}
```
You need to pass the path to your dataflow as a file reference with the ``jar``
parameter, the jar needs to be a self executing jar. Use ``options`` to pass on
options to your job.
```
t1 = DataFlowOperation(
task_id='datapflow_example',
jar='{{var.value.gcp_dataflow_base}}pipeline/build/libs/pipeline-example-1.0.jar',
options={
'autoscalingAlgorithm': 'BASIC',
'maxNumWorkers': '50',
'start': '{{ds}}',
'partitionType': 'DAY'
},
dag=my-dag)
```
Both ``jar`` and ``options`` are templated so you can use variables in them.
"""
template_fields = ['options', 'jar']
ui_color = '#0273d4'
@apply_defaults
def __init__(
self,
jar,
dataflow_default_options=None,
options=None,
gcp_conn_id='google_cloud_default',
delegate_to=None,
*args,
**kwargs):
"""
Create a new DataFlowJavaOperator. Note that both
dataflow_default_options and options will be merged to specify pipeline
execution parameter, and dataflow_default_options is expected to save
high-level options, for instances, project and zone information, which
apply to all dataflow operators in the DAG.
For more detail on job submission have a look at the reference:
https://cloud.google.com/dataflow/pipelines/specifying-exec-params
:param jar: The reference to a self executing DataFlow jar.
:type jar: string
:param dataflow_default_options: Map of default job options.
:type dataflow_default_options: dict
:param options: Map of job specific options.
:type options: dict
:param gcp_conn_id: The connection ID to use connecting to Google Cloud
Platform.
:type gcp_conn_id: string
:param delegate_to: The account to impersonate, if any.
For this to work, the service account making the request must have
domain-wide delegation enabled.
:type delegate_to: string
"""
super(DataFlowJavaOperator, self).__init__(*args, **kwargs)
dataflow_default_options = dataflow_default_options or {}
options = options or {}
self.gcp_conn_id = gcp_conn_id
self.delegate_to = delegate_to
self.jar = jar
self.dataflow_default_options = dataflow_default_options
self.options = options
def execute(self, context):
bucket_helper = GoogleCloudBucketHelper(
self.gcp_conn_id, self.delegate_to)
self.jar = bucket_helper.google_cloud_to_local(self.jar)
hook = DataFlowHook(gcp_conn_id=self.gcp_conn_id,
delegate_to=self.delegate_to)
dataflow_options = copy.copy(self.dataflow_default_options)
dataflow_options.update(self.options)
hook.start_java_dataflow(self.task_id, dataflow_options, self.jar)
class DataFlowPythonOperator(BaseOperator):
@apply_defaults
def __init__(
self,
py_file,
py_options=None,
dataflow_default_options=None,
options=None,
gcp_conn_id='google_cloud_default',
delegate_to=None,
*args,
**kwargs):
"""
Create a new DataFlowPythonOperator. Note that both
dataflow_default_options and options will be merged to specify pipeline
execution parameter, and dataflow_default_options is expected to save
high-level options, for instances, project and zone information, which
apply to all dataflow operators in the DAG.
For more detail on job submission have a look at the reference:
https://cloud.google.com/dataflow/pipelines/specifying-exec-params
:param py_file: Reference to the python dataflow pipleline file, e.g.,
/some/local/file/path/to/your/python/pipeline/file.py.
:type py_file: string
:param py_options: Additional python options.
:type pyt_options: list of strings, e.g., ["-m", "-v"].
:param dataflow_default_options: Map of default job options.
:type dataflow_default_options: dict
:param options: Map of job specific options.
:type options: dict
:param gcp_conn_id: The connection ID to use connecting to Google Cloud
Platform.
:type gcp_conn_id: string
:param delegate_to: The account to impersonate, if any.
For this to work, the service account making the request must have
domain-wide delegation enabled.
:type delegate_to: string
"""
super(DataFlowPythonOperator, self).__init__(*args, **kwargs)
self.py_file = py_file
self.py_options = py_options or []
self.dataflow_default_options = dataflow_default_options or {}
self.options = options or {}
self.gcp_conn_id = gcp_conn_id
self.delegate_to = delegate_to
def execute(self, context):
"""Execute the python dataflow job."""
bucket_helper = GoogleCloudBucketHelper(
self.gcp_conn_id, self.delegate_to)
self.py_file = bucket_helper.google_cloud_to_local(self.py_file)
hook = DataFlowHook(gcp_conn_id=self.gcp_conn_id,
delegate_to=self.delegate_to)
dataflow_options = self.dataflow_default_options.copy()
dataflow_options.update(self.options)
# Convert argument names from lowerCamelCase to snake case.
camel_to_snake = lambda name: re.sub(
r'[A-Z]', lambda x: '_' + x.group(0).lower(), name)
formatted_options = {camel_to_snake(key): dataflow_options[key]
for key in dataflow_options}
hook.start_python_dataflow(
self.task_id, formatted_options,
self.py_file, self.py_options)
class GoogleCloudBucketHelper():
"""GoogleCloudStorageHook helper class to download GCS object."""
GCS_PREFIX_LENGTH = 5
def __init__(self,
gcp_conn_id='google_cloud_default',
delegate_to=None):
self._gcs_hook = GoogleCloudStorageHook(gcp_conn_id, delegate_to)
def google_cloud_to_local(self, file_name):
"""
Checks whether the file specified by file_name is stored in Google Cloud
Storage (GCS), if so, downloads the file and saves it locally. The full
path of the saved file will be returned. Otherwise the local file_name
will be returned immediately.
:param file_name: The full path of input file.
:type file_name: string
:return: The full path of local file.
:type: string
"""
if not file_name.startswith('gs://'):
return file_name
# Extracts bucket_id and object_id by first removing 'gs://' prefix and
# then split the remaining by path delimiter '/'.
path_components = file_name[self.GCS_PREFIX_LENGTH:].split('/')
if path_components < 2:
raise Exception(
'Invalid Google Cloud Storage (GCS) object path: {}.'
.format(file_name))
bucket_id = path_components[0]
object_id = '/'.join(path_components[1:])
local_file = '/tmp/dataflow{}-{}'.format(str(uuid.uuid1())[:8],
path_components[-1])
file_size = self._gcs_hook.download(bucket_id, object_id, local_file)
if file_size > 0:
return local_file
raise Exception(
'Failed to download Google Cloud Storage GCS object: {}'
.format(file_name))
| apache-2.0 |
ericmjl/chameleon | game/player.py | 1 | 3954 | from random import choice
class Player(object):
"""
A base Python class to represent game players.
"""
def __init__(self, chameleon):
super(Player, self).__init__()
self.hand = [] # the five cards that the player has.
self.penalty = [] # the penalty pile that belongs to the player.
self.chameleon = chameleon # the card value that is the chameleon card
def deal(self, pile):
"""
Every Player subclass must implement this method. This method
specifies how the cards will be dealt to the pile.
The naive implementation here iterates over the cards and returns the
first card in the deck that matches either the suite or value criteria.
More complex criteria can be added, such as implementing a strategy
for keeping chameleon cards while sacrificing a small card.
"""
if not self.has_cards():
pass
else:
if self.has_matching_suite(pile):
c = sorted([c for c in self.hand if c.suite == pile.current_suite], key=lambda x:x.value)
c = c[0]
print('Dealing {0} of {1}'.format(c.value, c.suite))
pile.receive_card(c, new_suite=None, is_using_chameleon=False)
self.hand.remove(c)
elif self.has_matching_value(pile):
c = choice([c for c in self.hand if c.value == pile.current_value])
print('Dealing {0} of {1}'.format(c.value, c.suite))
pile.receive_card(c, new_suite=None, is_using_chameleon=False)
self.hand.remove(c)
elif self.has_chameleon() or self.has_joker():
c = choice([c for c in self.hand if c.value == self.chameleon or c.value == 14])
new_suite = choice([c.suite for c in self.hand if c.value != self.chameleon and c.value != 14])
print('Dealing {0} of {1}'.format(c.value, c.suite))
pile.receive_card(c, new_suite=new_suite, is_using_chameleon=False)
self.hand.remove(c)
else:
# Choose a card of minimum value.
c = sorted([c for c in self.hand], key = lambda x:x.value)
new_suite = None
c = c[0]
print("No cards. Penalty with {0} of {1}.".format(c.value, c.suite))
self.penalty.append(c)
self.hand.remove(c)
def take_card(self, deck):
"""
Takes a card from the deck.
"""
if deck.has_cards():
card = deck.deal()
self.hand.append(card)
else:
print('Deck has no more cards.')
def penalty_score(self):
"""
Computes the final penalty score.
"""
return sum([c.value for c in self.penalty])
def has_chameleon(self):
"""
Boolean that returns whether the chameleon card is present in hand.
"""
return self.chameleon in [c.value for c in self.hand]
def has_joker(self):
"""
Boolean that returns whether the joker card is present in hand.
"""
return 14 in [c.value for c in self.hand]
def has_matching_suite(self, pile):
"""
Boolean that returns whether the hand contains a card of matching suite.
"""
cs = pile.current_suite
return cs in [c.suite for c in self.hand]
def has_cards(self):
"""
Boolean that returns whether the player still has cards or not.
"""
return bool(self.hand)
def has_matching_value(self, pile):
"""
Boolean that returns whether the hand contains a card of matching value.
"""
cv = pile.current_value
return cv in [c.value for c in self.hand]
def most_common_suite(self, value=None):
"""
Returns the most common suite on hand.
""" | mit |
openstack/zaqar | zaqar/conf/opts.py | 1 | 3623 | # Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Single point of entry to generate the sample configuration file.
This module collects all the necessary info from the other modules in this
package. It is assumed that:
* Every other module in this package has a 'list_opts' function which
returns a dict where:
* The keys are strings which are the group names.
* The value of each key is a list of config options for that group.
* The conf package doesn't have further packages with config options.
* This module is only used in the context of sample file generation.
"""
import collections
import importlib
import os
import pkgutil
LIST_OPTS_FUNC_NAME = 'list_opts'
IGNORED_MODULES = ('opts', 'constants', 'utils')
def list_opts():
opts = collections.defaultdict(list)
module_names = _list_module_names()
imported_modules = _import_modules(module_names)
_append_config_options(imported_modules, opts)
return _tupleize(opts)
def list_opts_by_group():
opts = []
module_names = _list_module_names()
imported_modules = _import_modules(module_names)
for module in imported_modules:
configs = module.list_opts()
group_name = (module.GROUP_NAME if module.GROUP_NAME != 'DEFAULT'
else None)
opts.append((group_name, configs[module.GROUP_NAME]))
return opts
def _tupleize(d):
"""Convert a dict of options to the 2-tuple format."""
return [(key, value) for key, value in d.items()]
def _list_module_names():
module_names = []
package_path = os.path.dirname(os.path.abspath(__file__))
for _, module_name, ispkg in pkgutil.iter_modules(path=[package_path]):
if module_name in IGNORED_MODULES or ispkg:
# Skip this module.
continue
else:
module_names.append(module_name)
return module_names
def _import_modules(module_names):
imported_modules = []
for module_name in module_names:
full_module_path = '.'.join(__name__.split('.')[:-1] + [module_name])
module = importlib.import_module(full_module_path)
if not hasattr(module, LIST_OPTS_FUNC_NAME):
raise Exception(
"The module '%s' should have a '%s' function which "
"returns the config options." % (
full_module_path,
LIST_OPTS_FUNC_NAME))
else:
imported_modules.append(module)
return imported_modules
def _process_old_opts(configs):
"""Convert old-style 2-tuple configs to dicts."""
if isinstance(configs, tuple):
configs = [configs]
return {label: options for label, options in configs}
def _append_config_options(imported_modules, config_options):
for module in imported_modules:
configs = module.list_opts()
# TODO(markus_z): Remove this compatibility shim once all list_opts()
# functions have been updated to return dicts.
if not isinstance(configs, dict):
configs = _process_old_opts(configs)
for key, val in configs.items():
config_options[key].extend(val)
| apache-2.0 |
nwchandler/ansible | lib/ansible/modules/system/make.py | 69 | 4784 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2015, Linus Unnebäck <linus@folkdatorn.se>
#
# This file is part of Ansible
#
# This module is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This software is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this software. If not, see <http://www.gnu.org/licenses/>.
ANSIBLE_METADATA = {'metadata_version': '1.0',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: make
short_description: Run targets in a Makefile
requirements: [ make ]
version_added: "2.1"
author: Linus Unnebäck (@LinusU) <linus@folkdatorn.se>
description:
- Run targets in a Makefile.
options:
target:
description:
- The target to run
required: false
default: none
params:
description:
- Any extra parameters to pass to make
required: false
default: none
chdir:
description:
- cd into this directory before running make
required: true
'''
EXAMPLES = '''
# Build the default target
- make:
chdir: /home/ubuntu/cool-project
# Run `install` target as root
- make:
chdir: /home/ubuntu/cool-project
target: install
become: yes
# Pass in extra arguments to build
- make:
chdir: /home/ubuntu/cool-project
target: all
params:
NUM_THREADS: 4
BACKEND: lapack
'''
# TODO: Disabled the RETURN as it was breaking docs building. Someone needs to
# fix this
RETURN = '''# '''
from ansible.module_utils.six import iteritems
from ansible.module_utils.basic import AnsibleModule
def run_command(command, module, check_rc=True):
"""
Run a command using the module, return
the result code and std{err,out} content.
:param command: list of command arguments
:param module: Ansible make module instance
:return: return code, stdout content, stderr content
"""
rc, out, err = module.run_command(command, check_rc=check_rc, cwd=module.params['chdir'])
return rc, sanitize_output(out), sanitize_output(err)
def sanitize_output(output):
"""
Sanitize the output string before we
pass it to module.fail_json. Defaults
the string to empty if it is None, else
strips trailing newlines.
:param output: output to sanitize
:return: sanitized output
"""
if output is None:
return ''
else:
return output.rstrip("\r\n")
def main():
module = AnsibleModule(
supports_check_mode=True,
argument_spec=dict(
target=dict(required=False, default=None, type='str'),
params=dict(required=False, default=None, type='dict'),
chdir=dict(required=True, default=None, type='path'),
),
)
# Build up the invocation of `make` we are going to use
make_path = module.get_bin_path('make', True)
make_target = module.params['target']
if module.params['params'] is not None:
make_parameters = [k + '=' + str(v) for k, v in iteritems(module.params['params'])]
else:
make_parameters = []
base_command = [make_path, make_target]
base_command.extend(make_parameters)
# Check if the target is already up to date
rc, out, err = run_command(base_command + ['--question'], module, check_rc=False)
if module.check_mode:
# If we've been asked to do a dry run, we only need
# to report whether or not the target is up to date
changed = (rc != 0)
else:
if rc == 0:
# The target is up to date, so we don't have to
# do anything
changed = False
else:
# The target isn't upd to date, so we need to run it
rc, out, err = run_command(base_command, module)
changed = True
# We don't report the return code, as if this module failed
# we would be calling fail_json from run_command, so even if
# we had a non-zero return code, we did not fail. However, if
# we report a non-zero return code here, we will be marked as
# failed regardless of what we signal using the failed= kwarg.
module.exit_json(
changed=changed,
failed=False,
stdout=out,
stderr=err,
target=module.params['target'],
params=module.params['params'],
chdir=module.params['chdir']
)
if __name__ == '__main__':
main()
| gpl-3.0 |
mgagne/nova | nova/cmd/console.py | 37 | 1284 | # Copyright (c) 2010 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Starter script for Nova Console Proxy."""
import sys
from oslo.config import cfg
from nova import config
from nova.openstack.common import log as logging
from nova.openstack.common.report import guru_meditation_report as gmr
from nova import service
from nova import version
CONF = cfg.CONF
CONF.import_opt('console_topic', 'nova.console.rpcapi')
def main():
config.parse_args(sys.argv)
logging.setup("nova")
gmr.TextGuruMeditation.setup_autorun(version)
server = service.Service.create(binary='nova-console',
topic=CONF.console_topic)
service.serve(server)
service.wait()
| apache-2.0 |
bowang/tensorflow | tensorflow/tools/pip_package/simple_console_for_windows.py | 605 | 1028 | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Start a simple interactive console with TensorFlow available."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import code
import sys
def main(_):
"""Run an interactive console."""
code.interact()
return 0
if __name__ == '__main__':
sys.exit(main(sys.argv))
| apache-2.0 |
megalithic/wee-slack | wee_slack.py | 1 | 88248 | # -*- coding: utf-8 -*-
#
from functools import wraps
import time
import json
import os
import pickle
import sha
import re
import urllib
import HTMLParser
import sys
import traceback
import collections
import ssl
from websocket import create_connection,WebSocketConnectionClosedException
# hack to make tests possible.. better way?
try:
import weechat as w
except:
pass
SCRIPT_NAME = "slack_extension"
SCRIPT_AUTHOR = "Ryan Huber <rhuber@gmail.com>"
SCRIPT_VERSION = "0.99.9"
SCRIPT_LICENSE = "MIT"
SCRIPT_DESC = "Extends weechat for typing notification/search/etc on slack.com"
BACKLOG_SIZE = 200
SCROLLBACK_SIZE = 500
CACHE_VERSION = "4"
SLACK_API_TRANSLATOR = {
"channel": {
"history": "channels.history",
"join": "channels.join",
"leave": "channels.leave",
"mark": "channels.mark",
"info": "channels.info",
},
"im": {
"history": "im.history",
"join": "im.open",
"leave": "im.close",
"mark": "im.mark",
},
"group": {
"history": "groups.history",
"join": "channels.join",
"leave": "groups.leave",
"mark": "groups.mark",
}
}
NICK_GROUP_HERE = "0|Here"
NICK_GROUP_AWAY = "1|Away"
sslopt_ca_certs = {}
if hasattr(ssl, "get_default_verify_paths") and callable(ssl.get_default_verify_paths):
ssl_defaults = ssl.get_default_verify_paths()
if ssl_defaults.cafile is not None:
sslopt_ca_certs = {'ca_certs': ssl_defaults.cafile}
def dbg(message, fout=False, main_buffer=False):
"""
send debug output to the slack-debug buffer and optionally write to a file.
"""
message = "DEBUG: {}".format(message)
#message = message.encode('utf-8', 'replace')
if fout:
file('/tmp/debug.log', 'a+').writelines(message + '\n')
if main_buffer:
w.prnt("", message)
else:
if slack_debug is not None:
w.prnt(slack_debug, message)
class SearchList(list):
"""
A normal python list with some syntactic sugar for searchability
"""
def __init__(self):
self.hashtable = {}
super(SearchList, self).__init__(self)
def find(self, name):
if name in self.hashtable.keys():
return self.hashtable[name]
#this is a fallback to __eq__ if the item isn't in the hashtable already
if self.count(name) > 0:
self.update_hashtable()
return self[self.index(name)]
def append(self, item, aliases=[]):
super(SearchList, self).append(item)
self.update_hashtable()
def update_hashtable(self):
for child in self:
if hasattr(child, "get_aliases"):
for alias in child.get_aliases():
if alias is not None:
self.hashtable[alias] = child
def find_by_class(self, class_name):
items = []
for child in self:
if child.__class__ == class_name:
items.append(child)
return items
def find_by_class_deep(self, class_name, attribute):
items = []
for child in self:
if child.__class__ == self.__class__:
items += child.find_by_class_deep(class_name, attribute)
else:
items += (eval('child.' + attribute).find_by_class(class_name))
return items
class SlackServer(object):
"""
Root object used to represent connection and state of the connection to a slack group.
"""
def __init__(self, token):
self.nick = None
self.name = None
self.team = None
self.domain = None
self.server_buffer_name = None
self.login_data = None
self.buffer = None
self.token = token
self.ws = None
self.ws_hook = None
self.users = SearchList()
self.bots = SearchList()
self.channels = SearchList()
self.connecting = False
self.connected = False
self.communication_counter = 0
self.message_buffer = {}
self.ping_hook = None
self.alias = None
self.identifier = None
self.connect_to_slack()
def __eq__(self, compare_str):
if compare_str == self.identifier or compare_str == self.token or compare_str == self.buffer:
return True
else:
return False
def __str__(self):
return "{}".format(self.identifier)
def __repr__(self):
return "{}".format(self.identifier)
def add_user(self, user):
self.users.append(user, user.get_aliases())
users.append(user, user.get_aliases())
def add_bot(self, bot):
self.bots.append(bot)
def add_channel(self, channel):
self.channels.append(channel, channel.get_aliases())
channels.append(channel, channel.get_aliases())
def get_aliases(self):
aliases = filter(None, [self.identifier, self.token, self.buffer, self.alias])
return aliases
def find(self, name, attribute):
attribute = eval("self." + attribute)
return attribute.find(name)
def get_communication_id(self):
if self.communication_counter > 999:
self.communication_counter = 0
self.communication_counter += 1
return self.communication_counter
def send_to_websocket(self, data, expect_reply=True):
data["id"] = self.get_communication_id()
message = json.dumps(data)
try:
if expect_reply:
self.message_buffer[data["id"]] = data
self.ws.send(message)
dbg("Sent {}...".format(message[:100]))
except:
dbg("Unexpected error: {}\nSent: {}".format(sys.exc_info()[0], data))
self.connected = False
def ping(self):
request = {"type": "ping"}
self.send_to_websocket(request)
def connect_to_slack(self):
t = time.time()
if not self.connecting:
async_slack_api_request("slack.com", self.token, "rtm.start", {"ts": t})
self.connecting = True
def connected_to_slack(self, login_data):
if login_data["ok"]:
self.team = login_data["team"]["domain"]
self.domain = login_data["team"]["domain"] + ".slack.com"
dbg("connected to {}".format(self.domain))
self.identifier = self.domain
alias = w.config_get_plugin("server_alias.{}".format(login_data["team"]["domain"]))
if alias:
self.server_buffer_name = alias
self.alias = alias
else:
self.server_buffer_name = self.domain
self.nick = login_data["self"]["name"]
self.create_local_buffer()
if self.create_slack_websocket(login_data):
if self.ping_hook:
w.unhook(self.ping_hook)
self.communication_counter = 0
self.ping_hook = w.hook_timer(1000 * 5, 0, 0, "slack_ping_cb", self.domain)
if len(self.users) == 0 or len(self.channels) == 0:
self.create_slack_mappings(login_data)
self.connected = True
self.connecting = False
self.print_connection_info(login_data)
if len(self.message_buffer) > 0:
for message_id in self.message_buffer.keys():
if self.message_buffer[message_id]["type"] != 'ping':
resend = self.message_buffer.pop(message_id)
dbg("Resent failed message.")
self.send_to_websocket(resend)
#sleep to prevent being disconnected by websocket server
time.sleep(1)
else:
self.message_buffer.pop(message_id)
return True
else:
w.prnt("", "\n!! slack.com login error: " + login_data["error"] + "\n Please check your API token with\n \"/set plugins.var.python.slack_extension.slack_api_token (token)\"\n\n ")
self.connected = False
def print_connection_info(self, login_data):
self.buffer_prnt('Connected to Slack', backlog=True)
self.buffer_prnt('{:<20} {}'.format("Websocket URL", login_data["url"]), backlog=True)
self.buffer_prnt('{:<20} {}'.format("User name", login_data["self"]["name"]), backlog=True)
self.buffer_prnt('{:<20} {}'.format("User ID", login_data["self"]["id"]), backlog=True)
self.buffer_prnt('{:<20} {}'.format("Team name", login_data["team"]["name"]), backlog=True)
self.buffer_prnt('{:<20} {}'.format("Team domain", login_data["team"]["domain"]), backlog=True)
self.buffer_prnt('{:<20} {}'.format("Team id", login_data["team"]["id"]), backlog=True)
def create_local_buffer(self):
if not w.buffer_search("", self.server_buffer_name):
self.buffer = w.buffer_new(self.server_buffer_name, "buffer_input_cb", "", "", "")
if w.config_string(w.config_get('irc.look.server_buffer')) == 'merge_with_core':
w.buffer_merge(self.buffer, w.buffer_search_main())
w.buffer_set(self.buffer, "nicklist", "1")
def create_slack_websocket(self, data):
web_socket_url = data['url']
try:
self.ws = create_connection(web_socket_url, sslopt=sslopt_ca_certs)
self.ws_hook = w.hook_fd(self.ws.sock._sock.fileno(), 1, 0, 0, "slack_websocket_cb", self.identifier)
self.ws.sock.setblocking(0)
return True
except Exception as e:
print("websocket connection error: {}".format(e))
return False
def create_slack_mappings(self, data):
for item in data["users"]:
self.add_user(User(self, item["name"], item["id"], item["presence"], item["deleted"], is_bot=item.get('is_bot', False)))
for item in data["bots"]:
self.add_bot(Bot(self, item["name"], item["id"], item["deleted"]))
for item in data["channels"]:
if "last_read" not in item:
item["last_read"] = 0
if "members" not in item:
item["members"] = []
if "topic" not in item:
item["topic"] = {}
item["topic"]["value"] = ""
if not item["is_archived"]:
self.add_channel(Channel(self, item["name"], item["id"], item["is_member"], item["last_read"], "#", item["members"], item["topic"]["value"]))
for item in data["groups"]:
if "last_read" not in item:
item["last_read"] = 0
if not item["is_archived"]:
if item["name"].startswith("mpdm-"):
self.add_channel(MpdmChannel(self, item["name"], item["id"], item["is_open"], item["last_read"], "#", item["members"], item["topic"]["value"]))
else:
self.add_channel(GroupChannel(self, item["name"], item["id"], item["is_open"], item["last_read"], "#", item["members"], item["topic"]["value"]))
for item in data["ims"]:
if "last_read" not in item:
item["last_read"] = 0
if item["unread_count"] > 0:
item["is_open"] = True
name = self.users.find(item["user"]).name
self.add_channel(DmChannel(self, name, item["id"], item["is_open"], item["last_read"]))
for item in data['self']['prefs']['muted_channels'].split(','):
if item == '':
continue
if self.channels.find(item) is not None:
self.channels.find(item).muted = True
for item in self.channels:
item.get_history()
def buffer_prnt(self, message='no message', user="SYSTEM", backlog=False):
message = message.encode('ascii', 'ignore')
if backlog:
tags = "no_highlight,notify_none,logger_backlog_end"
else:
tags = ""
if user == "SYSTEM":
user = w.config_string(w.config_get('weechat.look.prefix_network'))
if self.buffer:
w.prnt_date_tags(self.buffer, 0, tags, "{}\t{}".format(user, message))
else:
pass
#w.prnt("", "%s\t%s" % (user, message))
def buffer_input_cb(b, buffer, data):
channel = channels.find(buffer)
if not channel:
return w.WEECHAT_RC_OK_EAT
reaction = re.match("(\d*)(\+|-):(.*):", data)
if not reaction and not data.startswith('s/'):
channel.send_message(data)
#channel.buffer_prnt(channel.server.nick, data)
elif reaction:
if reaction.group(2) == "+":
channel.send_add_reaction(int(reaction.group(1) or 1), reaction.group(3))
elif reaction.group(2) == "-":
channel.send_remove_reaction(int(reaction.group(1) or 1), reaction.group(3))
elif data.count('/') == 3:
old, new = data.split('/')[1:3]
channel.change_previous_message(old.decode("utf-8"), new.decode("utf-8"))
channel.mark_read(True)
return w.WEECHAT_RC_ERROR
class Channel(object):
"""
Represents a single channel and is the source of truth
for channel <> weechat buffer
"""
def __init__(self, server, name, identifier, active, last_read=0, prepend_name="", members=[], topic=""):
self.name = prepend_name + name
self.current_short_name = prepend_name + name
self.identifier = identifier
self.active = active
self.last_read = float(last_read)
self.members = set(members)
self.topic = topic
self.members_table = {}
self.channel_buffer = None
self.type = "channel"
self.server = server
self.typing = {}
self.last_received = None
self.messages = []
self.scrolling = False
self.last_active_user = None
self.muted = False
if active:
self.create_buffer()
self.attach_buffer()
self.create_members_table()
self.update_nicklist()
self.set_topic(self.topic)
buffer_list_update_next()
def __str__(self):
return self.name
def __repr__(self):
return self.name
def __eq__(self, compare_str):
if compare_str == self.fullname() or compare_str == self.name or compare_str == self.identifier or compare_str == self.name[1:] or (compare_str == self.channel_buffer and self.channel_buffer is not None):
return True
else:
return False
def get_aliases(self):
aliases = [self.fullname(), self.name, self.identifier, self.name[1:], ]
if self.channel_buffer is not None:
aliases.append(self.channel_buffer)
return aliases
def create_members_table(self):
for user in self.members:
self.members_table[user] = self.server.users.find(user)
def create_buffer(self):
channel_buffer = w.buffer_search("", "{}.{}".format(self.server.server_buffer_name, self.name))
if channel_buffer:
self.channel_buffer = channel_buffer
else:
self.channel_buffer = w.buffer_new("{}.{}".format(self.server.server_buffer_name, self.name), "buffer_input_cb", self.name, "", "")
if self.type == "im":
w.buffer_set(self.channel_buffer, "localvar_set_type", 'private')
else:
w.buffer_set(self.channel_buffer, "localvar_set_type", 'channel')
if self.server.alias:
w.buffer_set(self.channel_buffer, "localvar_set_server", self.server.alias)
else:
w.buffer_set(self.channel_buffer, "localvar_set_server", self.server.team)
w.buffer_set(self.channel_buffer, "localvar_set_channel", self.name)
w.buffer_set(self.channel_buffer, "short_name", self.name)
buffer_list_update_next()
def attach_buffer(self):
channel_buffer = w.buffer_search("", "{}.{}".format(self.server.server_buffer_name, self.name))
if channel_buffer != main_weechat_buffer:
self.channel_buffer = channel_buffer
w.buffer_set(self.channel_buffer, "localvar_set_nick", self.server.nick)
w.buffer_set(self.channel_buffer, "highlight_words", self.server.nick)
else:
self.channel_buffer = None
channels.update_hashtable()
self.server.channels.update_hashtable()
def detach_buffer(self):
if self.channel_buffer is not None:
w.buffer_close(self.channel_buffer)
self.channel_buffer = None
channels.update_hashtable()
self.server.channels.update_hashtable()
def update_nicklist(self, user=None):
if not self.channel_buffer:
return
w.buffer_set(self.channel_buffer, "nicklist", "1")
#create nicklists for the current channel if they don't exist
#if they do, use the existing pointer
here = w.nicklist_search_group(self.channel_buffer, '', NICK_GROUP_HERE)
if not here:
here = w.nicklist_add_group(self.channel_buffer, '', NICK_GROUP_HERE, "weechat.color.nicklist_group", 1)
afk = w.nicklist_search_group(self.channel_buffer, '', NICK_GROUP_AWAY)
if not afk:
afk = w.nicklist_add_group(self.channel_buffer, '', NICK_GROUP_AWAY, "weechat.color.nicklist_group", 1)
if user:
user = self.members_table[user]
nick = w.nicklist_search_nick(self.channel_buffer, "", user.name)
#since this is a change just remove it regardless of where it is
w.nicklist_remove_nick(self.channel_buffer, nick)
#now add it back in to whichever..
if user.presence == 'away':
w.nicklist_add_nick(self.channel_buffer, afk, user.name, user.color_name, "", "", 1)
else:
w.nicklist_add_nick(self.channel_buffer, here, user.name, user.color_name, "", "", 1)
#if we didn't get a user, build a complete list. this is expensive.
else:
try:
for user in self.members:
user = self.members_table[user]
if user.deleted:
continue
if user.presence == 'away':
w.nicklist_add_nick(self.channel_buffer, afk, user.name, user.color_name, "", "", 1)
else:
w.nicklist_add_nick(self.channel_buffer, here, user.name, user.color_name, "", "", 1)
except Exception as e:
dbg("DEBUG: {} {} {}".format(self.identifier, self.name, e))
def fullname(self):
return "{}.{}".format(self.server.server_buffer_name, self.name)
def has_user(self, name):
return name in self.members
def user_join(self, name):
self.members.add(name)
self.create_members_table()
self.update_nicklist()
def user_leave(self, name):
if name in self.members:
self.members.remove(name)
self.create_members_table()
self.update_nicklist()
def set_active(self):
self.active = True
def set_inactive(self):
self.active = False
def set_typing(self, user):
if self.channel_buffer:
if w.buffer_get_integer(self.channel_buffer, "hidden") == 0:
self.typing[user] = time.time()
buffer_list_update_next()
def unset_typing(self, user):
if self.channel_buffer:
if w.buffer_get_integer(self.channel_buffer, "hidden") == 0:
try:
del self.typing[user]
buffer_list_update_next()
except:
pass
def send_message(self, message):
message = self.linkify_text(message)
dbg(message)
request = {"type": "message", "channel": self.identifier, "text": message, "_server": self.server.domain}
self.server.send_to_websocket(request)
def linkify_text(self, message):
message = message.split(' ')
for item in enumerate(message):
if item[1].startswith('@') and len(item[1]) > 1:
named = re.match('.*[@#]([\w.]+\w)(\W*)', item[1]).groups()
if named[0] in ["group", "channel", "here"]:
message[item[0]] = "<!{}>".format(named[0])
if self.server.users.find(named[0]):
message[item[0]] = "<@{}>{}".format(self.server.users.find(named[0]).identifier, named[1])
if item[1].startswith('#') and self.server.channels.find(item[1]):
named = re.match('.*[@#](\w+)(\W*)', item[1]).groups()
if self.server.channels.find(named[0]):
message[item[0]] = "<#{}|{}>{}".format(self.server.channels.find(named[0]).identifier, named[0], named[1])
dbg(message)
return " ".join(message)
def set_topic(self, topic):
topic = topic.encode('utf-8')
w.buffer_set(self.channel_buffer, "title", topic)
def open(self, update_remote=True):
self.create_buffer()
self.active = True
self.get_history()
if "info" in SLACK_API_TRANSLATOR[self.type]:
async_slack_api_request(self.server.domain, self.server.token, SLACK_API_TRANSLATOR[self.type]["info"], {"name": self.name.lstrip("#")})
if update_remote:
if "join" in SLACK_API_TRANSLATOR[self.type]:
async_slack_api_request(self.server.domain, self.server.token, SLACK_API_TRANSLATOR[self.type]["join"], {"name": self.name.lstrip("#")})
async_slack_api_request(self.server.domain, self.server.token, SLACK_API_TRANSLATOR[self.type]["join"], {"user": users.find(self.name).identifier})
def close(self, update_remote=True):
#remove from cache so messages don't reappear when reconnecting
if self.active:
self.active = False
self.current_short_name = ""
self.detach_buffer()
if update_remote:
t = time.time()
async_slack_api_request(self.server.domain, self.server.token, SLACK_API_TRANSLATOR[self.type]["leave"], {"channel": self.identifier})
def closed(self):
self.channel_buffer = None
self.last_received = None
self.close()
def is_someone_typing(self):
for user in self.typing.keys():
if self.typing[user] + 4 > time.time():
return True
if len(self.typing) > 0:
self.typing = {}
buffer_list_update_next()
return False
def get_typing_list(self):
typing = []
for user in self.typing.keys():
if self.typing[user] + 4 > time.time():
typing.append(user)
return typing
def mark_read(self, update_remote=True):
t = time.time()
if self.channel_buffer:
w.buffer_set(self.channel_buffer, "unread", "")
if update_remote:
self.last_read = time.time()
self.update_read_marker(self.last_read)
def update_read_marker(self, time):
async_slack_api_request(self.server.domain, self.server.token, SLACK_API_TRANSLATOR[self.type]["mark"], {"channel": self.identifier, "ts": time})
def rename(self):
if self.is_someone_typing():
new_name = ">{}".format(self.name[1:])
else:
new_name = self.name
if self.channel_buffer:
if self.current_short_name != new_name:
self.current_short_name = new_name
w.buffer_set(self.channel_buffer, "short_name", new_name)
def buffer_prnt(self, user='unknown_user', message='no message', time=0):
"""
writes output (message) to a buffer (channel)
"""
set_read_marker = False
time_float = float(time)
tags = "nick_" + user
# XXX: we should not set log1 for robots.
if time_float != 0 and self.last_read >= time_float:
tags += ",no_highlight,notify_none,logger_backlog_end"
set_read_marker = True
elif message.find(self.server.nick.encode('utf-8')) > -1:
tags = ",notify_highlight,log1"
elif user != self.server.nick and self.name in self.server.users:
tags = ",notify_private,notify_message,log1,irc_privmsg"
elif self.muted:
tags = ",no_highlight,notify_none,logger_backlog_end"
elif user in [x.strip() for x in w.prefix("join"), w.prefix("quit")]:
tags = ",irc_smart_filter"
else:
tags = ",notify_message,log1,irc_privmsg"
#don't write these to local log files
#tags += ",no_log"
time_int = int(time_float)
if self.channel_buffer:
prefix_same_nick = w.config_string(w.config_get('weechat.look.prefix_same_nick'))
if user == self.last_active_user and prefix_same_nick != "":
if colorize_nicks and self.server.users.find(user):
name = self.server.users.find(user).color + prefix_same_nick
else:
name = prefix_same_nick
else:
nick_prefix = w.config_string(w.config_get('weechat.look.nick_prefix'))
nick_suffix = w.config_string(w.config_get('weechat.look.nick_suffix'))
if self.server.users.find(user):
name = self.server.users.find(user).formatted_name()
self.last_active_user = user
# XXX: handle bots properly here.
else:
name = user
self.last_active_user = None
name = nick_prefix + name + nick_suffix
name = name.decode('utf-8')
#colorize nicks in each line
chat_color = w.config_string(w.config_get('weechat.color.chat'))
if type(message) is not unicode:
message = message.decode('UTF-8', 'replace')
curr_color = w.color(chat_color)
if colorize_nicks and colorize_messages and self.server.users.find(user):
curr_color = self.server.users.find(user).color
message = curr_color + message
for user in self.server.users:
if user.name in message:
message = user.name_regex.sub(
r'\1\2{}\3'.format(user.formatted_name() + curr_color),
message)
message = HTMLParser.HTMLParser().unescape(message)
data = u"{}\t{}".format(name, message).encode('utf-8')
w.prnt_date_tags(self.channel_buffer, time_int, tags, data)
if set_read_marker:
self.mark_read(False)
else:
self.open(False)
self.last_received = time
self.unset_typing(user)
def buffer_redraw(self):
if self.channel_buffer and not self.scrolling:
w.buffer_clear(self.channel_buffer)
self.messages.sort()
for message in self.messages:
process_message(message.message_json, False)
def set_scrolling(self):
self.scrolling = True
def unset_scrolling(self):
self.scrolling = False
def has_message(self, ts):
return self.messages.count(ts) > 0
def change_message(self, ts, text=None, suffix=''):
if self.has_message(ts):
message_index = self.messages.index(ts)
if text is not None:
self.messages[message_index].change_text(text)
text = render_message(self.messages[message_index].message_json, True)
#if there is only one message with this timestamp, modify it directly.
#we do this because time resolution in weechat is less than slack
int_time = int(float(ts))
if self.messages.count(str(int_time)) == 1:
modify_buffer_line(self.channel_buffer, text + suffix, int_time)
#otherwise redraw the whole buffer, which is expensive
else:
self.buffer_redraw()
return True
def add_reaction(self, ts, reaction, user):
if self.has_message(ts):
message_index = self.messages.index(ts)
self.messages[message_index].add_reaction(reaction, user)
self.change_message(ts)
return True
def remove_reaction(self, ts, reaction, user):
if self.has_message(ts):
message_index = self.messages.index(ts)
self.messages[message_index].remove_reaction(reaction, user)
self.change_message(ts)
return True
def send_add_reaction(self, msg_number, reaction):
self.send_change_reaction("reactions.add", msg_number, reaction)
def send_remove_reaction(self, msg_number, reaction):
self.send_change_reaction("reactions.remove", msg_number, reaction)
def send_change_reaction(self, method, msg_number, reaction):
if 0 < msg_number < len(self.messages):
timestamp = self.messages[-msg_number].message_json["ts"]
data = {"channel": self.identifier, "timestamp": timestamp, "name": reaction}
async_slack_api_request(self.server.domain, self.server.token, method, data)
def change_previous_message(self, old, new):
message = self.my_last_message()
if new == "" and old == "":
async_slack_api_request(self.server.domain, self.server.token, 'chat.delete', {"channel": self.identifier, "ts": message['ts']})
else:
new_message = message["text"].replace(old, new)
async_slack_api_request(self.server.domain, self.server.token, 'chat.update', {"channel": self.identifier, "ts": message['ts'], "text": new_message.encode("utf-8")})
def my_last_message(self):
for message in reversed(self.messages):
if "user" in message.message_json and "text" in message.message_json and message.message_json["user"] == self.server.users.find(self.server.nick).identifier:
return message.message_json
def cache_message(self, message_json, from_me=False):
if from_me:
message_json["user"] = self.server.users.find(self.server.nick).identifier
self.messages.append(Message(message_json))
if len(self.messages) > SCROLLBACK_SIZE:
self.messages = self.messages[-SCROLLBACK_SIZE:]
def get_history(self):
if self.active:
for message in message_cache[self.identifier]:
process_message(json.loads(message), True)
if self.last_received != None:
async_slack_api_request(self.server.domain, self.server.token, SLACK_API_TRANSLATOR[self.type]["history"], {"channel": self.identifier, "oldest": self.last_received, "count": BACKLOG_SIZE})
else:
async_slack_api_request(self.server.domain, self.server.token, SLACK_API_TRANSLATOR[self.type]["history"], {"channel": self.identifier, "count": BACKLOG_SIZE})
class GroupChannel(Channel):
def __init__(self, server, name, identifier, active, last_read=0, prepend_name="", members=[], topic=""):
super(GroupChannel, self).__init__(server, name, identifier, active, last_read, prepend_name, members, topic)
self.type = "group"
class MpdmChannel(Channel):
def __init__(self, server, name, identifier, active, last_read=0, prepend_name="", members=[], topic=""):
name = ",".join("-".join(name.split("-")[1:-1]).split("--"))
super(MpdmChannel, self).__init__(server, name, identifier, active, last_read, prepend_name, members, topic)
self.type = "group"
class DmChannel(Channel):
def __init__(self, server, name, identifier, active, last_read=0, prepend_name=""):
super(DmChannel, self).__init__(server, name, identifier, active, last_read, prepend_name)
self.type = "im"
def rename(self):
global colorize_private_chats
if self.server.users.find(self.name).presence == "active":
new_name = self.server.users.find(self.name).formatted_name('+', colorize_private_chats)
else:
new_name = self.server.users.find(self.name).formatted_name(' ', colorize_private_chats)
if self.channel_buffer:
if self.current_short_name != new_name:
self.current_short_name = new_name
w.buffer_set(self.channel_buffer, "short_name", new_name)
def update_nicklist(self, user=None):
pass
class User(object):
def __init__(self, server, name, identifier, presence="away", deleted=False, is_bot=False):
self.server = server
self.name = name
self.identifier = identifier
self.deleted = deleted
self.presence = presence
self.channel_buffer = w.info_get("irc_buffer", "{}.{}".format(domain, self.name))
self.update_color()
self.name_regex = re.compile(r"([\W]|\A)(@{0,1})" + self.name + "('s|[^'\w]|\Z)")
self.is_bot = is_bot
if deleted:
return
self.nicklist_pointer = w.nicklist_add_nick(server.buffer, "", self.name, self.color_name, "", "", 1)
if self.presence == 'away':
w.nicklist_nick_set(self.server.buffer, self.nicklist_pointer, "visible", "0")
else:
w.nicklist_nick_set(self.server.buffer, self.nicklist_pointer, "visible", "1")
# w.nicklist_add_nick(server.buffer, "", self.formatted_name(), "", "", "", 1)
def __str__(self):
return self.name
def __repr__(self):
return self.name
def __eq__(self, compare_str):
try:
if compare_str == self.name or compare_str == "@" + self.name or compare_str == self.identifier:
return True
else:
return False
except:
return False
def get_aliases(self):
return [self.name, "@" + self.name, self.identifier]
def set_active(self):
if self.deleted:
return
self.presence = "active"
for channel in self.server.channels:
if channel.has_user(self.identifier):
channel.update_nicklist(self.identifier)
w.nicklist_nick_set(self.server.buffer, self.nicklist_pointer, "visible", "1")
dm_channel = self.server.channels.find(self.name)
if dm_channel and dm_channel.active:
buffer_list_update_next()
def set_inactive(self):
if self.deleted:
return
self.presence = "away"
for channel in self.server.channels:
if channel.has_user(self.identifier):
channel.update_nicklist(self.identifier)
w.nicklist_nick_set(self.server.buffer, self.nicklist_pointer, "visible", "0")
dm_channel = self.server.channels.find(self.name)
if dm_channel and dm_channel.active:
buffer_list_update_next()
def update_color(self):
if colorize_nicks:
if self.name == self.server.nick:
self.color_name = w.config_string(w.config_get('weechat.color.chat_nick_self'))
else:
self.color_name = w.info_get('irc_nick_color_name', self.name)
self.color = w.color(self.color_name)
else:
self.color = ""
self.color_name = ""
def formatted_name(self, prepend="", enable_color=True):
if colorize_nicks and enable_color:
print_color = self.color
else:
print_color = ""
return print_color + prepend + self.name
def create_dm_channel(self):
async_slack_api_request(self.server.domain, self.server.token, "im.open", {"user": self.identifier})
class Bot(object):
def __init__(self, server, name, identifier, deleted=False):
self.server = server
self.name = name
self.identifier = identifier
self.deleted = deleted
self.update_color()
def __eq__(self, compare_str):
if compare_str == self.identifier or compare_str == self.name:
return True
else:
return False
def __str__(self):
return "{}".format(self.identifier)
def __repr__(self):
return "{}".format(self.identifier)
def update_color(self):
if colorize_nicks:
self.color_name = w.info_get('irc_nick_color_name', self.name.encode('utf-8'))
self.color = w.color(self.color_name)
else:
self.color_name = ""
self.color = ""
def formatted_name(self, prepend="", enable_color=True):
if colorize_nicks and enable_color:
print_color = self.color
else:
print_color = ""
return print_color + prepend + self.name
class Message(object):
def __init__(self, message_json):
self.message_json = message_json
self.ts = message_json['ts']
#split timestamp into time and counter
self.ts_time, self.ts_counter = message_json['ts'].split('.')
def change_text(self, new_text):
if not isinstance(new_text, unicode):
new_text = unicode(new_text, 'utf-8')
self.message_json["text"] = new_text
def add_reaction(self, reaction, user):
if "reactions" in self.message_json:
found = False
for r in self.message_json["reactions"]:
if r["name"] == reaction and user not in r["users"]:
r["users"].append(user)
found = True
if not found:
self.message_json["reactions"].append({u"name": reaction, u"users": [user]})
else:
self.message_json["reactions"] = [{u"name": reaction, u"users": [user]}]
def remove_reaction(self, reaction, user):
if "reactions" in self.message_json:
for r in self.message_json["reactions"]:
if r["name"] == reaction and user in r["users"]:
r["users"].remove(user)
else:
pass
def __eq__(self, other):
return self.ts_time == other or self.ts == other
def __repr__(self):
return "{} {} {} {}\n".format(self.ts_time, self.ts_counter, self.ts, self.message_json)
def __lt__(self, other):
return self.ts < other.ts
def slack_command_cb(data, current_buffer, args):
a = args.split(' ', 1)
if len(a) > 1:
function_name, args = a[0], " ".join(a[1:])
else:
function_name, args = a[0], None
try:
command = cmds[function_name](current_buffer, args)
except KeyError:
w.prnt("", "Command not found: " + function_name)
return w.WEECHAT_RC_OK
def me_command_cb(data, current_buffer, args):
if channels.find(current_buffer):
channel = channels.find(current_buffer)
nick = channel.server.nick
message = "_{}_".format(args)
buffer_input_cb("", current_buffer, message)
return w.WEECHAT_RC_OK
def join_command_cb(data, current_buffer, args):
args = args.split()
if len(args) < 2:
w.prnt(current_buffer, "Missing channel argument")
return w.WEECHAT_RC_OK_EAT
elif command_talk(current_buffer, args[1]):
return w.WEECHAT_RC_OK_EAT
else:
return w.WEECHAT_RC_OK
def part_command_cb(data, current_buffer, args):
if channels.find(current_buffer) or servers.find(current_buffer):
args = args.split()
if len(args) > 1:
channel = args[1:]
servers.find(current_domain_name()).channels.find(channel).close(True)
else:
channels.find(current_buffer).close(True)
return w.WEECHAT_RC_OK_EAT
else:
return w.WEECHAT_RC_OK
# Wrap command_ functions that require they be performed in a slack buffer
def slack_buffer_required(f):
@wraps(f)
def wrapper(current_buffer, *args, **kwargs):
server = servers.find(current_domain_name())
if not server:
w.prnt(current_buffer, "This command must be used in a slack buffer")
return
return f(current_buffer, *args, **kwargs)
return wrapper
@slack_buffer_required
def msg_command_cb(data, current_buffer, args):
dbg("msg_command_cb")
aargs = args.split(None, 2)
who = aargs[1]
command_talk(current_buffer, who)
if len(aargs) > 2:
message = aargs[2]
server = servers.find(current_domain_name())
if server:
channel = server.channels.find(who)
if channel:
channel.send_message(message)
return w.WEECHAT_RC_OK_EAT
@slack_buffer_required
def command_upload(current_buffer, args):
"""
Uploads a file to the current buffer
/slack upload [file_path]
"""
post_data = {}
channel = current_buffer_name(short=True)
domain = current_domain_name()
token = servers.find(domain).token
if servers.find(domain).channels.find(channel):
channel_identifier = servers.find(domain).channels.find(channel).identifier
if channel_identifier:
post_data["token"] = token
post_data["channels"] = channel_identifier
post_data["file"] = args
async_slack_api_upload_request(token, "files.upload", post_data)
def command_talk(current_buffer, args):
"""
Open a chat with the specified user
/slack talk [user]
"""
server = servers.find(current_domain_name())
if server:
channel = server.channels.find(args)
if channel is None:
user = server.users.find(args)
if user:
user.create_dm_channel()
else:
server.buffer_prnt("User or channel {} not found.".format(args))
else:
channel.open()
if w.config_get_plugin('switch_buffer_on_join') != '0':
w.buffer_set(channel.channel_buffer, "display", "1")
return True
else:
return False
def command_join(current_buffer, args):
"""
Join the specified channel
/slack join [channel]
"""
domain = current_domain_name()
if domain == "":
if len(servers) == 1:
domain = servers[0]
else:
w.prnt(current_buffer, "You are connected to multiple Slack instances, please execute /join from a server buffer. i.e. (domain).slack.com")
return
channel = servers.find(domain).channels.find(args)
if channel != None:
servers.find(domain).channels.find(args).open()
else:
w.prnt(current_buffer, "Channel not found.")
@slack_buffer_required
def command_channels(current_buffer, args):
"""
List all the channels for the slack instance (name, id, active)
/slack channels
"""
server = servers.find(current_domain_name())
for channel in server.channels:
line = "{:<25} {} {}".format(channel.name, channel.identifier, channel.active)
server.buffer_prnt(line)
def command_nodistractions(current_buffer, args):
global hide_distractions
hide_distractions = not hide_distractions
if distracting_channels != ['']:
for channel in distracting_channels:
try:
channel_buffer = channels.find(channel).channel_buffer
if channel_buffer:
w.buffer_set(channels.find(channel).channel_buffer, "hidden", str(int(hide_distractions)))
except:
dbg("Can't hide channel {}".format(channel), main_buffer=True)
def command_distracting(current_buffer, args):
global distracting_channels
distracting_channels = [x.strip() for x in w.config_get_plugin("distracting_channels").split(',')]
if channels.find(current_buffer) is None:
w.prnt(current_buffer, "This command must be used in a channel buffer")
return
fullname = channels.find(current_buffer).fullname()
if distracting_channels.count(fullname) == 0:
distracting_channels.append(fullname)
else:
distracting_channels.pop(distracting_channels.index(fullname))
new = ','.join(distracting_channels)
w.config_set_plugin('distracting_channels', new)
@slack_buffer_required
def command_users(current_buffer, args):
"""
List all the users for the slack instance (name, id, away)
/slack users
"""
server = servers.find(current_domain_name())
for user in server.users:
line = "{:<40} {} {}".format(user.formatted_name(), user.identifier, user.presence)
server.buffer_prnt(line)
def command_setallreadmarkers(current_buffer, args):
"""
Sets the read marker for all channels
/slack setallreadmarkers
"""
for channel in channels:
channel.mark_read()
def command_changetoken(current_buffer, args):
w.config_set_plugin('slack_api_token', args)
def command_test(current_buffer, args):
w.prnt(current_buffer, "worked!")
@slack_buffer_required
def command_away(current_buffer, args):
"""
Sets your status as 'away'
/slack away
"""
server = servers.find(current_domain_name())
async_slack_api_request(server.domain, server.token, 'presence.set', {"presence": "away"})
@slack_buffer_required
def command_back(current_buffer, args):
"""
Sets your status as 'back'
/slack back
"""
server = servers.find(current_domain_name())
async_slack_api_request(server.domain, server.token, 'presence.set', {"presence": "active"})
@slack_buffer_required
def command_markread(current_buffer, args):
"""
Marks current channel as read
/slack markread
"""
# refactor this - one liner i think
channel = current_buffer_name(short=True)
domain = current_domain_name()
if servers.find(domain).channels.find(channel):
servers.find(domain).channels.find(channel).mark_read()
def command_flushcache(current_buffer, args):
global message_cache
message_cache = collections.defaultdict(list)
cache_write_cb("","")
def command_cachenow(current_buffer, args):
cache_write_cb("","")
def command_neveraway(current_buffer, args):
global never_away
if never_away:
never_away = False
dbg("unset never_away", main_buffer=True)
else:
never_away = True
dbg("set never_away", main_buffer=True)
def command_printvar(current_buffer, args):
w.prnt("", "{}".format(eval(args)))
def command_p(current_buffer, args):
w.prnt("", "{}".format(eval(args)))
def command_debug(current_buffer, args):
create_slack_debug_buffer()
def command_debugstring(current_buffer, args):
global debug_string
if args == '':
debug_string = None
else:
debug_string = args
def command_search(current_buffer, args):
pass
# if not slack_buffer:
# create_slack_buffer()
# w.buffer_set(slack_buffer, "display", "1")
# query = args
# w.prnt(slack_buffer,"\nSearched for: %s\n\n" % (query))
# reply = slack_api_request('search.messages', {"query":query}).read()
# data = json.loads(reply)
# for message in data['messages']['matches']:
# message["text"] = message["text"].encode('ascii', 'ignore')
# formatted_message = "%s / %s:\t%s" % (message["channel"]["name"], message['username'], message['text'])
# w.prnt(slack_buffer,str(formatted_message))
def command_nick(current_buffer, args):
pass
# urllib.urlopen("https://%s/account/settings" % (domain))
# browser.select_form(nr=0)
# browser.form['username'] = args
# reply = browser.submit()
def command_help(current_buffer, args):
help_cmds = { k[8:]: v.__doc__ for k, v in globals().items() if k.startswith("command_") }
if args:
try:
help_cmds = {args: help_cmds[args]}
except KeyError:
w.prnt("", "Command not found: " + args)
return
for cmd, helptext in help_cmds.items():
w.prnt('', w.color("bold") + cmd)
w.prnt('', (helptext or 'No help text').strip())
w.prnt('', '')
# Websocket handling methods
def command_openweb(current_buffer, args):
trigger = w.config_get_plugin('trigger_value')
if trigger != "0":
if args is None:
channel = channels.find(current_buffer)
url = "{}/messages/{}".format(channel.server.server_buffer_name, channel.name)
topic = w.buffer_get_string(channel.channel_buffer, "title")
w.buffer_set(channel.channel_buffer, "title", "{}:{}".format(trigger, url))
w.hook_timer(1000, 0, 1, "command_openweb", json.dumps({"topic": topic, "buffer": current_buffer}))
else:
#TODO: fix this dirty hack because i don't know the right way to send multiple args.
args = current_buffer
data = json.loads(args)
channel_buffer = channels.find(data["buffer"]).channel_buffer
w.buffer_set(channel_buffer, "title", data["topic"])
return w.WEECHAT_RC_OK
def topic_command_cb(data, current_buffer, args):
if command_topic(current_buffer, args.split(None, 1)[1]):
return w.WEECHAT_RC_OK_EAT
else:
return w.WEECHAT_RC_OK
def command_topic(current_buffer, args):
"""
Change the topic of a channel
/slack topic [<channel>] [<topic>|-delete]
"""
server = servers.find(current_domain_name())
if server:
arrrrgs = args.split(None, 1)
if arrrrgs[0].startswith('#'):
channel = server.channels.find(arrrrgs[0])
topic = arrrrgs[1]
else:
channel = server.channels.find(current_buffer)
topic = args
if channel:
if topic == "-delete":
async_slack_api_request(server.domain, server.token, 'channels.setTopic', {"channel": channel.identifier, "topic": ""})
else:
async_slack_api_request(server.domain, server.token, 'channels.setTopic', {"channel": channel.identifier, "topic": topic})
return True
else:
return False
else:
return False
def slack_websocket_cb(server, fd):
try:
data = servers.find(server).ws.recv()
message_json = json.loads(data)
# this magic attaches json that helps find the right dest
message_json['_server'] = server
except WebSocketConnectionClosedException:
servers.find(server).ws.close()
return w.WEECHAT_RC_OK
except Exception:
dbg("socket issue: {}\n".format(traceback.format_exc()))
return w.WEECHAT_RC_OK
# dispatch here
if "reply_to" in message_json:
function_name = "reply"
elif "type" in message_json:
function_name = message_json["type"]
else:
function_name = "unknown"
try:
proc[function_name](message_json)
except KeyError:
if function_name:
dbg("Function not implemented: {}\n{}".format(function_name, message_json))
else:
dbg("Function not implemented\n{}".format(message_json))
w.bar_item_update("slack_typing_notice")
return w.WEECHAT_RC_OK
def process_reply(message_json):
global unfurl_ignore_alt_text
server = servers.find(message_json["_server"])
identifier = message_json["reply_to"]
item = server.message_buffer.pop(identifier)
if 'text' in item and type(item['text']) is not unicode:
item['text'] = item['text'].decode('UTF-8', 'replace')
if "type" in item:
if item["type"] == "message" and "channel" in item.keys():
item["ts"] = message_json["ts"]
channels.find(item["channel"]).cache_message(item, from_me=True)
text = unfurl_refs(item["text"], ignore_alt_text=unfurl_ignore_alt_text)
channels.find(item["channel"]).buffer_prnt(item["user"], text, item["ts"])
dbg("REPLY {}".format(item))
def process_pong(message_json):
pass
def process_pref_change(message_json):
server = servers.find(message_json["_server"])
if message_json['name'] == u'muted_channels':
muted = message_json['value'].split(',')
for c in server.channels:
if c.identifier in muted:
c.muted = True
else:
c.muted = False
else:
dbg("Preference change not implemented: {}\n".format(message_json['name']))
def process_team_join(message_json):
server = servers.find(message_json["_server"])
item = message_json["user"]
server.add_user(User(server, item["name"], item["id"], item["presence"]))
server.buffer_prnt("New user joined: {}".format(item["name"]))
def process_manual_presence_change(message_json):
process_presence_change(message_json)
def process_presence_change(message_json):
server = servers.find(message_json["_server"])
identifier = message_json.get("user", server.nick)
if message_json["presence"] == 'active':
server.users.find(identifier).set_active()
else:
server.users.find(identifier).set_inactive()
def process_channel_marked(message_json):
channel = channels.find(message_json["channel"])
channel.mark_read(False)
w.buffer_set(channel.channel_buffer, "hotlist", "-1")
def process_group_marked(message_json):
channel = channels.find(message_json["channel"])
channel.mark_read(False)
w.buffer_set(channel.channel_buffer, "hotlist", "-1")
def process_channel_created(message_json):
server = servers.find(message_json["_server"])
item = message_json["channel"]
if server.channels.find(message_json["channel"]["name"]):
server.channels.find(message_json["channel"]["name"]).open(False)
else:
item = message_json["channel"]
server.add_channel(Channel(server, item["name"], item["id"], False, prepend_name="#"))
server.buffer_prnt("New channel created: {}".format(item["name"]))
def process_channel_left(message_json):
server = servers.find(message_json["_server"])
server.channels.find(message_json["channel"]).close(False)
def process_channel_join(message_json):
server = servers.find(message_json["_server"])
channel = server.channels.find(message_json["channel"])
text = unfurl_refs(message_json["text"], ignore_alt_text=False)
channel.buffer_prnt(w.prefix("join").rstrip(), text, message_json["ts"])
channel.user_join(message_json["user"])
def process_channel_topic(message_json):
server = servers.find(message_json["_server"])
channel = server.channels.find(message_json["channel"])
text = unfurl_refs(message_json["text"], ignore_alt_text=False)
channel.buffer_prnt(w.prefix("network").rstrip(), text, message_json["ts"])
channel.set_topic(message_json["topic"])
def process_channel_joined(message_json):
server = servers.find(message_json["_server"])
if server.channels.find(message_json["channel"]["name"]):
server.channels.find(message_json["channel"]["name"]).open(False)
else:
item = message_json["channel"]
server.add_channel(Channel(server, item["name"], item["id"], item["is_open"], item["last_read"], "#", item["members"], item["topic"]["value"]))
def process_channel_leave(message_json):
server = servers.find(message_json["_server"])
channel = server.channels.find(message_json["channel"])
text = unfurl_refs(message_json["text"], ignore_alt_text=False)
channel.buffer_prnt(w.prefix("quit").rstrip(), text, message_json["ts"])
channel.user_leave(message_json["user"])
def process_channel_archive(message_json):
server = servers.find(message_json["_server"])
channel = server.channels.find(message_json["channel"])
channel.detach_buffer()
def process_group_join(message_json):
process_channel_join(message_json)
def process_group_leave(message_json):
process_channel_leave(message_json)
def process_group_topic(message_json):
process_channel_topic(message_json)
def process_group_left(message_json):
server = servers.find(message_json["_server"])
server.channels.find(message_json["channel"]).close(False)
def process_group_joined(message_json):
server = servers.find(message_json["_server"])
if server.channels.find(message_json["channel"]["name"]):
server.channels.find(message_json["channel"]["name"]).open(False)
else:
item = message_json["channel"]
server.add_channel(GroupChannel(server, item["name"], item["id"], item["is_open"], item["last_read"], "#", item["members"], item["topic"]["value"]))
def process_group_archive(message_json):
channel = server.channels.find(message_json["channel"])
channel.detach_buffer()
def process_im_close(message_json):
server = servers.find(message_json["_server"])
server.channels.find(message_json["channel"]).close(False)
def process_im_open(message_json):
server = servers.find(message_json["_server"])
server.channels.find(message_json["channel"]).open()
def process_im_marked(message_json):
channel = channels.find(message_json["channel"])
channel.mark_read(False)
if channel.channel_buffer is not None:
w.buffer_set(channel.channel_buffer, "hotlist", "-1")
def process_im_created(message_json):
server = servers.find(message_json["_server"])
item = message_json["channel"]
channel_name = server.users.find(item["user"]).name
if server.channels.find(channel_name):
server.channels.find(channel_name).open(False)
else:
item = message_json["channel"]
server.add_channel(DmChannel(server, channel_name, item["id"], item["is_open"], item["last_read"]))
server.buffer_prnt("New direct message channel created: {}".format(item["name"]))
def process_user_typing(message_json):
server = servers.find(message_json["_server"])
channel = server.channels.find(message_json["channel"])
if channel:
channel.set_typing(server.users.find(message_json["user"]).name)
def process_bot_enable(message_json):
process_bot_integration(message_json)
def process_bot_disable(message_json):
process_bot_integration(message_json)
def process_bot_integration(message_json):
server = servers.find(message_json["_server"])
channel = server.channels.find(message_json["channel"])
time = message_json['ts']
text = "{} {}".format(server.users.find(message_json['user']).formatted_name(),
render_message(message_json))
bot_name = get_user(message_json, server)
bot_name = bot_name.encode('utf-8')
channel.buffer_prnt(bot_name, text, time)
# todo: does this work?
def process_error(message_json):
pass
def process_reaction_added(message_json):
if message_json["item"].get("type") == "message":
channel = channels.find(message_json["item"]["channel"])
channel.add_reaction(message_json["item"]["ts"], message_json["reaction"], message_json["user"])
else:
dbg("Reaction to item type not supported: " + str(message_json))
def process_reaction_removed(message_json):
if message_json["item"].get("type") == "message":
channel = channels.find(message_json["item"]["channel"])
channel.remove_reaction(message_json["item"]["ts"], message_json["reaction"], message_json["user"])
else:
dbg("Reaction to item type not supported: " + str(message_json))
def create_reaction_string(reactions):
count = 0
if not isinstance(reactions, list):
reaction_string = " [{}]".format(reactions)
else:
reaction_string = ' ['
for r in reactions:
if len(r["users"]) > 0:
count += 1
if show_reaction_nicks:
nicks = [resolve_ref("@{}".format(user)) for user in r["users"]]
users = "({})".format(",".join(nicks))
else:
users = len(r["users"])
reaction_string += ":{}:{} ".format(r["name"], users)
reaction_string = reaction_string[:-1] + ']'
if count == 0:
reaction_string = ''
return reaction_string
def modify_buffer_line(buffer, new_line, time):
time = int(float(time))
# get a pointer to this buffer's lines
own_lines = w.hdata_pointer(w.hdata_get('buffer'), buffer, 'own_lines')
if own_lines:
#get a pointer to the last line
line_pointer = w.hdata_pointer(w.hdata_get('lines'), own_lines, 'last_line')
#hold the structure of a line and of line data
struct_hdata_line = w.hdata_get('line')
struct_hdata_line_data = w.hdata_get('line_data')
while line_pointer:
#get a pointer to the data in line_pointer via layout of struct_hdata_line
data = w.hdata_pointer(struct_hdata_line, line_pointer, 'data')
if data:
date = w.hdata_time(struct_hdata_line_data, data, 'date')
prefix = w.hdata_string(struct_hdata_line_data, data, 'prefix')
if int(date) == int(time):
#w.prnt("", "found matching time date is {}, time is {} ".format(date, time))
w.hdata_update(struct_hdata_line_data, data, {"message": new_line})
break
else:
pass
#move backwards one line and try again - exit the while if you hit the end
line_pointer = w.hdata_move(struct_hdata_line, line_pointer, -1)
return w.WEECHAT_RC_OK
def render_message(message_json, force=False):
global unfurl_ignore_alt_text
#If we already have a rendered version in the object, just return that.
if not force and message_json.get("_rendered_text", ""):
return message_json["_rendered_text"]
else:
server = servers.find(message_json["_server"])
if "fallback" in message_json:
text = message_json["fallback"]
elif "text" in message_json:
if message_json['text'] is not None:
text = message_json["text"]
else:
text = u""
else:
text = u""
text = unfurl_refs(text, ignore_alt_text=unfurl_ignore_alt_text)
text_before = (len(text) > 0)
text += unfurl_refs(unwrap_attachments(message_json, text_before), ignore_alt_text=unfurl_ignore_alt_text)
text = text.lstrip()
text = text.replace("\t", " ")
text = text.encode('utf-8')
if "reactions" in message_json:
text += create_reaction_string(message_json["reactions"])
message_json["_rendered_text"] = text
return text
def process_message(message_json, cache=True):
try:
# send these subtype messages elsewhere
known_subtypes = ["message_changed", 'message_deleted', 'channel_join', 'channel_leave', 'channel_topic', 'group_join', 'group_leave', 'group_topic', 'bot_enable', 'bot_disable']
if "subtype" in message_json and message_json["subtype"] in known_subtypes:
proc[message_json["subtype"]](message_json)
else:
server = servers.find(message_json["_server"])
channel = channels.find(message_json["channel"])
#do not process messages in unexpected channels
if not channel.active:
channel.open(False)
dbg("message came for closed channel {}".format(channel.name))
return
time = message_json['ts']
text = render_message(message_json)
name = get_user(message_json, server)
name = name.encode('utf-8')
#special case with actions.
if text.startswith("_") and text.endswith("_"):
text = text[1:-1]
if name != channel.server.nick:
text = name + " " + text
channel.buffer_prnt(w.prefix("action").rstrip(), text, time)
else:
suffix = ''
if 'edited' in message_json:
suffix = ' (edited)'
channel.buffer_prnt(name, text + suffix, time)
if cache:
channel.cache_message(message_json)
except Exception:
channel = channels.find(message_json["channel"])
dbg("cannot process message {}\n{}".format(message_json, traceback.format_exc()))
if channel and ("text" in message_json) and message_json['text'] is not None:
channel.buffer_prnt('unknown', message_json['text'])
def process_message_changed(message_json):
m = message_json["message"]
if "message" in message_json:
if "attachments" in m:
message_json["attachments"] = m["attachments"]
if "text" in m:
if "text" in message_json:
message_json["text"] += m["text"]
dbg("added text!")
else:
message_json["text"] = m["text"]
if "fallback" in m:
if "fallback" in message_json:
message_json["fallback"] += m["fallback"]
else:
message_json["fallback"] = m["fallback"]
text_before = (len(m['text']) > 0)
m["text"] += unwrap_attachments(message_json, text_before)
channel = channels.find(message_json["channel"])
if "edited" in m:
channel.change_message(m["ts"], m["text"], ' (edited)')
else:
channel.change_message(m["ts"], m["text"])
def process_message_deleted(message_json):
channel = channels.find(message_json["channel"])
channel.change_message(message_json["deleted_ts"], "(deleted)")
def unwrap_attachments(message_json, text_before):
attachment_text = ''
if "attachments" in message_json:
if text_before:
attachment_text = u'\n'
for attachment in message_json["attachments"]:
# Attachments should be rendered roughly like:
#
# $pretext
# $author: (if rest of line is non-empty) $title ($title_link) OR $from_url
# $author: (if no $author on previous line) $text
# $fields
t = []
prepend_title_text = ''
if 'author_name' in attachment:
prepend_title_text = attachment['author_name'] + ": "
if 'pretext' in attachment:
t.append(attachment['pretext'])
if "title" in attachment:
if 'title_link' in attachment:
t.append('%s%s (%s)' % (prepend_title_text, attachment["title"], attachment["title_link"],))
else:
t.append(prepend_title_text + attachment["title"])
prepend_title_text = ''
elif "from_url" in attachment:
t.append(attachment["from_url"])
if "text" in attachment:
tx = re.sub(r' *\n[\n ]+', '\n', attachment["text"])
t.append(prepend_title_text + tx)
prepend_title_text = ''
if 'fields' in attachment:
for f in attachment['fields']:
if f['title'] != '':
t.append('%s %s' % (f['title'], f['value'],))
else:
t.append(f['value'])
if t == [] and "fallback" in attachment:
t.append(attachment["fallback"])
attachment_text += "\n".join([x.strip() for x in t if x])
return attachment_text
def resolve_ref(ref):
if ref.startswith('@U'):
if users.find(ref[1:]):
try:
return "@{}".format(users.find(ref[1:]).name)
except:
dbg("NAME: {}".format(ref))
elif ref.startswith('#C'):
if channels.find(ref[1:]):
try:
return "{}".format(channels.find(ref[1:]).name)
except:
dbg("CHANNEL: {}".format(ref))
# Something else, just return as-is
return ref
def unfurl_ref(ref, ignore_alt_text=False):
id = ref.split('|')[0]
display_text = ref
if ref.find('|') > -1:
if ignore_alt_text:
display_text = resolve_ref(id)
else:
if id.startswith("#C") or id.startswith("@U"):
display_text = ref.split('|')[1]
else:
url, desc = ref.split('|', 1)
display_text = u"{} ({})".format(url, desc)
else:
display_text = resolve_ref(ref)
return display_text
def unfurl_refs(text, ignore_alt_text=False):
"""
input : <@U096Q7CQM|someuser> has joined the channel
ouput : someuser has joined the channel
"""
# Find all strings enclosed by <>
# - <https://example.com|example with spaces>
# - <#C2147483705|#otherchannel>
# - <@U2147483697|@othernick>
# Test patterns lives in ./_pytest/test_unfurl.py
matches = re.findall(r"(<[@#]?(?:[^<]*)>)", text)
for m in matches:
# Replace them with human readable strings
text = text.replace(m, unfurl_ref(m[1:-1], ignore_alt_text))
return text
def get_user(message_json, server):
if 'bot_id' in message_json and message_json['bot_id'] is not None:
name = u"{} :]".format(server.bots.find(message_json["bot_id"]).formatted_name())
elif 'user' in message_json:
u = server.users.find(message_json['user'])
if u.is_bot:
name = u"{} :]".format(u.formatted_name())
else:
name = u.name
elif 'username' in message_json:
name = u"-{}-".format(message_json["username"])
elif 'service_name' in message_json:
name = u"-{}-".format(message_json["service_name"])
else:
name = u""
return name
# END Websocket handling methods
def typing_bar_item_cb(data, buffer, args):
typers = [x for x in channels if x.is_someone_typing()]
if len(typers) > 0:
direct_typers = []
channel_typers = []
for dm in channels.find_by_class(DmChannel):
direct_typers.extend(dm.get_typing_list())
direct_typers = ["D/" + x for x in direct_typers]
current_channel = w.current_buffer()
channel = channels.find(current_channel)
try:
if channel and channel.__class__ != DmChannel:
channel_typers = channels.find(current_channel).get_typing_list()
except:
w.prnt("", "Bug on {}".format(channel))
typing_here = ", ".join(channel_typers + direct_typers)
if len(typing_here) > 0:
color = w.color('yellow')
return color + "typing: " + typing_here
return ""
def typing_update_cb(data, remaining_calls):
w.bar_item_update("slack_typing_notice")
return w.WEECHAT_RC_OK
def buffer_list_update_cb(data, remaining_calls):
global buffer_list_update
now = time.time()
if buffer_list_update and previous_buffer_list_update + 1 < now:
gray_check = False
if len(servers) > 1:
gray_check = True
for channel in channels:
channel.rename()
buffer_list_update = False
return w.WEECHAT_RC_OK
def buffer_list_update_next():
global buffer_list_update
buffer_list_update = True
def hotlist_cache_update_cb(data, remaining_calls):
# this keeps the hotlist dupe up to date for the buffer switch, but is prob technically a race condition. (meh)
global hotlist
prev_hotlist = hotlist
hotlist = w.infolist_get("hotlist", "", "")
w.infolist_free(prev_hotlist)
return w.WEECHAT_RC_OK
def buffer_closing_cb(signal, sig_type, data):
if channels.find(data):
channels.find(data).closed()
return w.WEECHAT_RC_OK
def buffer_switch_cb(signal, sig_type, data):
global previous_buffer, hotlist
# this is to see if we need to gray out things in the buffer list
if channels.find(previous_buffer):
channels.find(previous_buffer).mark_read()
channel_name = current_buffer_name()
previous_buffer = data
return w.WEECHAT_RC_OK
def typing_notification_cb(signal, sig_type, data):
if len(w.buffer_get_string(data, "input")) > 8:
global typing_timer
now = time.time()
if typing_timer + 4 < now:
channel = channels.find(current_buffer_name())
if channel:
identifier = channel.identifier
request = {"type": "typing", "channel": identifier}
channel.server.send_to_websocket(request, expect_reply=False)
typing_timer = now
return w.WEECHAT_RC_OK
def slack_ping_cb(data, remaining):
"""
Periodic websocket ping to detect broken connection.
"""
servers.find(data).ping()
return w.WEECHAT_RC_OK
def slack_connection_persistence_cb(data, remaining_calls):
"""
Reconnect if a connection is detected down
"""
for server in servers:
if not server.connected:
server.buffer_prnt("Disconnected from slack, trying to reconnect..")
if server.ws_hook is not None:
w.unhook(server.ws_hook)
server.connect_to_slack()
return w.WEECHAT_RC_OK
def slack_never_away_cb(data, remaining):
global never_away
if never_away:
for server in servers:
identifier = server.channels.find("slackbot").identifier
request = {"type": "typing", "channel": identifier}
#request = {"type":"typing","channel":"slackbot"}
server.send_to_websocket(request, expect_reply=False)
return w.WEECHAT_RC_OK
def nick_completion_cb(data, completion_item, buffer, completion):
"""
Adds all @-prefixed nicks to completion list
"""
channel = channels.find(buffer)
if channel is None or channel.members is None:
return w.WEECHAT_RC_OK
for m in channel.members:
user = channel.server.users.find(m)
w.hook_completion_list_add(completion, "@" + user.name, 1, w.WEECHAT_LIST_POS_SORT)
return w.WEECHAT_RC_OK
def complete_next_cb(data, buffer, command):
"""Extract current word, if it is equal to a nick, prefix it with @ and
rely on nick_completion_cb adding the @-prefixed versions to the
completion lists, then let Weechat's internal completion do its
thing
"""
channel = channels.find(buffer)
if channel is None or channel.members is None:
return w.WEECHAT_RC_OK
input = w.buffer_get_string(buffer, "input")
current_pos = w.buffer_get_integer(buffer, "input_pos") - 1
input_length = w.buffer_get_integer(buffer, "input_length")
word_start = 0
word_end = input_length
# If we're on a non-word, look left for something to complete
while current_pos >= 0 and input[current_pos] != '@' and not input[current_pos].isalnum():
current_pos = current_pos - 1
if current_pos < 0:
current_pos = 0
for l in range(current_pos, 0, -1):
if input[l] != '@' and not input[l].isalnum():
word_start = l + 1
break
for l in range(current_pos, input_length):
if not input[l].isalnum():
word_end = l
break
word = input[word_start:word_end]
for m in channel.members:
user = channel.server.users.find(m)
if user.name == word:
# Here, we cheat. Insert a @ in front and rely in the @
# nicks being in the completion list
w.buffer_set(buffer, "input", input[:word_start] + "@" + input[word_start:])
w.buffer_set(buffer, "input_pos", str(w.buffer_get_integer(buffer, "input_pos") + 1))
return w.WEECHAT_RC_OK_EAT
return w.WEECHAT_RC_OK
# Slack specific requests
# NOTE: switched to async because sync slowed down the UI
def async_slack_api_request(domain, token, request, post_data, priority=False):
if not STOP_TALKING_TO_SLACK:
post_data["token"] = token
url = 'url:https://{}/api/{}?{}'.format(domain, request, urllib.urlencode(post_data))
context = pickle.dumps({"request": request, "token": token, "post_data": post_data})
params = { 'useragent': 'wee_slack {}'.format(SCRIPT_VERSION) }
dbg("URL: {} context: {} params: {}".format(url, context, params))
w.hook_process_hashtable(url, params, 20000, "url_processor_cb", context)
def async_slack_api_upload_request(token, request, post_data, priority=False):
if not STOP_TALKING_TO_SLACK:
url = 'https://slack.com/api/{}'.format(request)
file_path = os.path.expanduser(post_data["file"])
command = 'curl -F file=@{} -F channels={} -F token={} {}'.format(file_path, post_data["channels"], token, url)
context = pickle.dumps({"request": request, "token": token, "post_data": post_data})
w.hook_process(command, 20000, "url_processor_cb", context)
# funny, right?
big_data = {}
def url_processor_cb(data, command, return_code, out, err):
global big_data
data = pickle.loads(data)
identifier = sha.sha("{}{}".format(data, command)).hexdigest()
if identifier not in big_data:
big_data[identifier] = ''
big_data[identifier] += out
if return_code == 0:
try:
my_json = json.loads(big_data[identifier])
except:
dbg("request failed, doing again...")
dbg("response length: {} identifier {}\n{}".format(len(big_data[identifier]), identifier, data))
my_json = False
big_data.pop(identifier, None)
if my_json:
if data["request"] == 'rtm.start':
servers.find(data["token"]).connected_to_slack(my_json)
servers.update_hashtable()
else:
if "channel" in data["post_data"]:
channel = data["post_data"]["channel"]
token = data["token"]
if "messages" in my_json:
messages = my_json["messages"].reverse()
for message in my_json["messages"]:
message["_server"] = servers.find(token).domain
message["channel"] = servers.find(token).channels.find(channel).identifier
process_message(message)
if "channel" in my_json:
if "members" in my_json["channel"]:
channels.find(my_json["channel"]["id"]).members = set(my_json["channel"]["members"])
else:
if return_code != -1:
big_data.pop(identifier, None)
dbg("return code: {}, data: {}, output: {}, error: {}".format(return_code, data, out, err))
return w.WEECHAT_RC_OK
def cache_write_cb(data, remaining):
cache_file = open("{}/{}".format(WEECHAT_HOME, CACHE_NAME), 'w')
cache_file.write(CACHE_VERSION + "\n")
for channel in channels:
if channel.active:
for message in channel.messages:
cache_file.write("{}\n".format(json.dumps(message.message_json)))
return w.WEECHAT_RC_OK
def cache_load():
global message_cache
try:
file_name = "{}/{}".format(WEECHAT_HOME, CACHE_NAME)
cache_file = open(file_name, 'r')
if cache_file.readline() == CACHE_VERSION + "\n":
dbg("Loading messages from cache.", main_buffer=True)
for line in cache_file:
j = json.loads(line)
message_cache[j["channel"]].append(line)
dbg("Completed loading messages from cache.", main_buffer=True)
except IOError:
w.prnt("", "cache file not found")
pass
# END Slack specific requests
# Utility Methods
def current_domain_name():
buffer = w.current_buffer()
if servers.find(buffer):
return servers.find(buffer).domain
else:
#number = w.buffer_get_integer(buffer, "number")
name = w.buffer_get_string(buffer, "name")
name = ".".join(name.split(".")[:-1])
return name
def current_buffer_name(short=False):
buffer = w.current_buffer()
#number = w.buffer_get_integer(buffer, "number")
name = w.buffer_get_string(buffer, "name")
if short:
try:
name = name.split('.')[-1]
except:
pass
return name
def closed_slack_buffer_cb(data, buffer):
global slack_buffer
slack_buffer = None
return w.WEECHAT_RC_OK
def create_slack_buffer():
global slack_buffer
slack_buffer = w.buffer_new("slack", "", "", "closed_slack_buffer_cb", "")
w.buffer_set(slack_buffer, "notify", "0")
#w.buffer_set(slack_buffer, "display", "1")
return w.WEECHAT_RC_OK
def closed_slack_debug_buffer_cb(data, buffer):
global slack_debug
slack_debug = None
return w.WEECHAT_RC_OK
def create_slack_debug_buffer():
global slack_debug, debug_string
if slack_debug is not None:
w.buffer_set(slack_debug, "display", "1")
else:
debug_string = None
slack_debug = w.buffer_new("slack-debug", "", "", "closed_slack_debug_buffer_cb", "")
w.buffer_set(slack_debug, "notify", "0")
def config_changed_cb(data, option, value):
global slack_api_token, distracting_channels, colorize_nicks, colorize_private_chats, slack_debug, debug_mode, \
unfurl_ignore_alt_text, colorize_messages, show_reaction_nicks
slack_api_token = w.config_get_plugin("slack_api_token")
if slack_api_token.startswith('${sec.data'):
slack_api_token = w.string_eval_expression(slack_api_token, {}, {}, {})
distracting_channels = [x.strip() for x in w.config_get_plugin("distracting_channels").split(',')]
colorize_nicks = w.config_get_plugin('colorize_nicks') == "1"
colorize_messages = w.config_get_plugin("colorize_messages") == "1"
debug_mode = w.config_get_plugin("debug_mode").lower()
if debug_mode != '' and debug_mode != 'false':
create_slack_debug_buffer()
colorize_private_chats = w.config_string_to_boolean(w.config_get_plugin("colorize_private_chats"))
show_reaction_nicks = w.config_string_to_boolean(w.config_get_plugin("show_reaction_nicks"))
unfurl_ignore_alt_text = False
if w.config_get_plugin('unfurl_ignore_alt_text') != "0":
unfurl_ignore_alt_text = True
return w.WEECHAT_RC_OK
def quit_notification_cb(signal, sig_type, data):
stop_talking_to_slack()
def script_unloaded():
stop_talking_to_slack()
return w.WEECHAT_RC_OK
def stop_talking_to_slack():
"""
Prevents a race condition where quitting closes buffers
which triggers leaving the channel because of how close
buffer is handled
"""
global STOP_TALKING_TO_SLACK
STOP_TALKING_TO_SLACK = True
cache_write_cb("", "")
return w.WEECHAT_RC_OK
def scrolled_cb(signal, sig_type, data):
try:
if w.window_get_integer(data, "scrolling") == 1:
channels.find(w.current_buffer()).set_scrolling()
else:
channels.find(w.current_buffer()).unset_scrolling()
except:
pass
return w.WEECHAT_RC_OK
# END Utility Methods
# Main
if __name__ == "__main__":
if w.register(SCRIPT_NAME, SCRIPT_AUTHOR, SCRIPT_VERSION, SCRIPT_LICENSE,
SCRIPT_DESC, "script_unloaded", ""):
version = w.info_get("version_number", "") or 0
if int(version) < 0x1030000:
w.prnt("", "\nERROR: Weechat version 1.3+ is required to use {}.\n\n".format(SCRIPT_NAME))
else:
WEECHAT_HOME = w.info_get("weechat_dir", "")
CACHE_NAME = "slack.cache"
STOP_TALKING_TO_SLACK = False
if not w.config_get_plugin('slack_api_token'):
w.config_set_plugin('slack_api_token', "INSERT VALID KEY HERE!")
if not w.config_get_plugin('distracting_channels'):
w.config_set_plugin('distracting_channels', "")
if not w.config_get_plugin('debug_mode'):
w.config_set_plugin('debug_mode', "")
if not w.config_get_plugin('colorize_nicks'):
w.config_set_plugin('colorize_nicks', "1")
if not w.config_get_plugin('colorize_messages'):
w.config_set_plugin('colorize_messages', "0")
if not w.config_get_plugin('colorize_private_chats'):
w.config_set_plugin('colorize_private_chats', "0")
if not w.config_get_plugin('trigger_value'):
w.config_set_plugin('trigger_value', "0")
if not w.config_get_plugin('unfurl_ignore_alt_text'):
w.config_set_plugin('unfurl_ignore_alt_text', "0")
if not w.config_get_plugin('switch_buffer_on_join'):
w.config_set_plugin('switch_buffer_on_join', "1")
if not w.config_get_plugin('show_reaction_nicks'):
w.config_set_plugin('show_reaction_nicks', "0")
if w.config_get_plugin('channels_not_on_current_server_color'):
w.config_option_unset('channels_not_on_current_server_color')
# Global var section
slack_debug = None
config_changed_cb("", "", "")
cmds = {k[8:]: v for k, v in globals().items() if k.startswith("command_")}
proc = {k[8:]: v for k, v in globals().items() if k.startswith("process_")}
typing_timer = time.time()
domain = None
previous_buffer = None
slack_buffer = None
buffer_list_update = False
previous_buffer_list_update = 0
never_away = False
hide_distractions = False
hotlist = w.infolist_get("hotlist", "", "")
main_weechat_buffer = w.info_get("irc_buffer", "{}.{}".format(domain, "DOESNOTEXIST!@#$"))
message_cache = collections.defaultdict(list)
cache_load()
servers = SearchList()
for token in slack_api_token.split(','):
server = SlackServer(token)
servers.append(server)
channels = SearchList()
users = SearchList()
w.hook_config("plugins.var.python." + SCRIPT_NAME + ".*", "config_changed_cb", "")
w.hook_timer(3000, 0, 0, "slack_connection_persistence_cb", "")
# attach to the weechat hooks we need
w.hook_timer(1000, 0, 0, "typing_update_cb", "")
w.hook_timer(1000, 0, 0, "buffer_list_update_cb", "")
w.hook_timer(1000, 0, 0, "hotlist_cache_update_cb", "")
w.hook_timer(1000 * 60 * 29, 0, 0, "slack_never_away_cb", "")
w.hook_timer(1000 * 60 * 5, 0, 0, "cache_write_cb", "")
w.hook_signal('buffer_closing', "buffer_closing_cb", "")
w.hook_signal('buffer_switch', "buffer_switch_cb", "")
w.hook_signal('window_switch', "buffer_switch_cb", "")
w.hook_signal('input_text_changed', "typing_notification_cb", "")
w.hook_signal('quit', "quit_notification_cb", "")
w.hook_signal('window_scrolled', "scrolled_cb", "")
w.hook_command(
# Command name and description
'slack', 'Plugin to allow typing notification and sync of read markers for slack.com',
# Usage
'[command] [command options]',
# Description of arguments
'Commands:\n' +
'\n'.join(cmds.keys()) +
'\nUse /slack help [command] to find out more\n',
# Completions
'|'.join(cmds.keys()),
# Function name
'slack_command_cb', '')
# w.hook_command('me', 'me_command_cb', '')
w.hook_command('me', '', 'stuff', 'stuff2', '', 'me_command_cb', '')
w.hook_command_run('/query', 'join_command_cb', '')
w.hook_command_run('/join', 'join_command_cb', '')
w.hook_command_run('/part', 'part_command_cb', '')
w.hook_command_run('/leave', 'part_command_cb', '')
w.hook_command_run('/topic', 'topic_command_cb', '')
w.hook_command_run('/msg', 'msg_command_cb', '')
w.hook_command_run("/input complete_next", "complete_next_cb", "")
w.hook_completion("nicks", "complete @-nicks for slack",
"nick_completion_cb", "")
w.bar_item_new('slack_typing_notice', 'typing_bar_item_cb', '')
# END attach to the weechat hooks we need
| mit |
mikekap/batchy | batchy/clients/memcached.py | 1 | 4404 | from collections import defaultdict
from itertools import chain
from ..compat import iteritems, itervalues
from ..runloop import coro_return, runloop_coroutine
from ..batch_coroutine import class_batch_coroutine
class BatchMemcachedClient(object):
def __init__(self, real_client):
self.client = real_client
@runloop_coroutine()
def get(self, k):
results = yield self.get_multi([k])
coro_return(next(itervalues(results), None))
@class_batch_coroutine(0)
def get_multi(self, args_list):
"""get_multi(iterable_of_keys, key_prefix=b'')"""
saved_key_lists = []
for args, kwargs in args_list:
assert len(args) == 1, 'get_multi only accepts a single argument: ' + args
key_prefix = kwargs.pop('key_prefix', b'')
assert not kwargs, 'get_multi only accepts the `key_prefix` kwarg'
# In case args[0] is a generator, save the entire list for later merging.
saved_key_lists.append([key_prefix + k for k in args[0]])
results = self.client.get_multi(frozenset(chain.from_iterable(saved_key_lists)))
coro_return([{k: results[k] for k in lst if k in results}
for lst in saved_key_lists])
yield # pragma: no cover
@runloop_coroutine()
def set(self, key, value, time=0):
failed = yield self.set_multi({key: value}, time=time)
coro_return(key not in failed)
@class_batch_coroutine(0)
def set_multi(self, args):
"""set_multi(dict, key_prefix=b'', time=0)"""
coro_return(self._do_set_command(self.client.set_multi, args))
yield # pragma: no cover
@runloop_coroutine()
def delete(self, key, time=None):
yield self.delete_multi([key], time=time)
@class_batch_coroutine(0)
def delete_multi(self, args_list):
"""delete_multi(iterable, time=0, key_prefix=b'')"""
by_time = defaultdict(set)
def fill_by_time(it, key_prefix=b'', time=None):
by_time[time].update(key_prefix + k for k in it)
for ar, kw in args_list:
fill_by_time(*ar, **kw)
for time, d in iteritems(by_time):
self.client.delete_multi(d, **({'time': time} if time is not None else {}))
coro_return(None)
yield # pragma: no cover
@runloop_coroutine()
def add(self, key, value, time=0):
failed = yield self.add_multi({key: value}, time=time)
coro_return(key not in failed)
@class_batch_coroutine(0)
def add_multi(self, args_list):
"""add_multi(dict, key_prefix=b'', time=0)"""
coro_return(self._do_set_command(self.client.add_multi, args_list))
yield # pragma: no cover
def _do_set_command(self, fn, args):
"""add & set implementation."""
by_time = defaultdict(dict)
def fill_by_time(d, key_prefix=b'', time=0):
by_time[time].update((key_prefix + k, v) for k, v in iteritems(d))
for ar, kw in args:
fill_by_time(*ar, **kw)
failed_keys = frozenset(chain.from_iterable(
fn(d, time=time)
for time, d in iteritems(by_time)))
return [list(failed_keys & frozenset(ar[0].keys())) for ar, _ in args]
@runloop_coroutine()
def incr(self, *args, **kwargs):
coro_return(self.client.incr(*args, **kwargs))
yield # pragma: no cover
@runloop_coroutine()
def incr_multi(self, *args, **kwargs):
"""pylibmc's incr_multi is NOT a superset of incr - it does not return the new value."""
coro_return(self.client.incr_multi(*args, **kwargs))
yield # pragma: no cover
@runloop_coroutine()
def decr(self, *args, **kwargs):
coro_return(self.client.decr(*args, **kwargs))
yield # pragma: no cover
@runloop_coroutine()
def replace(self, *args, **kwargs):
coro_return(self.client.replace(*args, **kwargs))
yield # pragma: no cover
@runloop_coroutine()
def append(self, *args, **kwargs):
coro_return(self.client.append(*args, **kwargs))
yield # pragma: no cover
@runloop_coroutine()
def prepend(self, *args, **kwargs):
coro_return(self.client.prepend(*args, **kwargs))
yield # pragma: no cover
@runloop_coroutine()
def flush_all(self):
self.client.flush_all()
yield # pragma: no cover
| apache-2.0 |
int19h/PTVS | Python/Product/Miniconda/Miniconda3-x64/Lib/sqlite3/dbapi2.py | 126 | 2687 | # pysqlite2/dbapi2.py: the DB-API 2.0 interface
#
# Copyright (C) 2004-2005 Gerhard Häring <gh@ghaering.de>
#
# This file is part of pysqlite.
#
# This software is provided 'as-is', without any express or implied
# warranty. In no event will the authors be held liable for any damages
# arising from the use of this software.
#
# Permission is granted to anyone to use this software for any purpose,
# including commercial applications, and to alter it and redistribute it
# freely, subject to the following restrictions:
#
# 1. The origin of this software must not be misrepresented; you must not
# claim that you wrote the original software. If you use this software
# in a product, an acknowledgment in the product documentation would be
# appreciated but is not required.
# 2. Altered source versions must be plainly marked as such, and must not be
# misrepresented as being the original software.
# 3. This notice may not be removed or altered from any source distribution.
import datetime
import time
import collections.abc
from _sqlite3 import *
paramstyle = "qmark"
threadsafety = 1
apilevel = "2.0"
Date = datetime.date
Time = datetime.time
Timestamp = datetime.datetime
def DateFromTicks(ticks):
return Date(*time.localtime(ticks)[:3])
def TimeFromTicks(ticks):
return Time(*time.localtime(ticks)[3:6])
def TimestampFromTicks(ticks):
return Timestamp(*time.localtime(ticks)[:6])
version_info = tuple([int(x) for x in version.split(".")])
sqlite_version_info = tuple([int(x) for x in sqlite_version.split(".")])
Binary = memoryview
collections.abc.Sequence.register(Row)
def register_adapters_and_converters():
def adapt_date(val):
return val.isoformat()
def adapt_datetime(val):
return val.isoformat(" ")
def convert_date(val):
return datetime.date(*map(int, val.split(b"-")))
def convert_timestamp(val):
datepart, timepart = val.split(b" ")
year, month, day = map(int, datepart.split(b"-"))
timepart_full = timepart.split(b".")
hours, minutes, seconds = map(int, timepart_full[0].split(b":"))
if len(timepart_full) == 2:
microseconds = int('{:0<6.6}'.format(timepart_full[1].decode()))
else:
microseconds = 0
val = datetime.datetime(year, month, day, hours, minutes, seconds, microseconds)
return val
register_adapter(datetime.date, adapt_date)
register_adapter(datetime.datetime, adapt_datetime)
register_converter("date", convert_date)
register_converter("timestamp", convert_timestamp)
register_adapters_and_converters()
# Clean up namespace
del(register_adapters_and_converters)
| apache-2.0 |
zrhans/pythonanywhere | .virtualenvs/django19/lib/python3.4/site-packages/pandas/tseries/tests/test_frequencies.py | 9 | 25284 | from datetime import datetime, time, timedelta
from pandas.compat import range
import sys
import os
import nose
import numpy as np
from pandas import Index, DatetimeIndex, Timestamp, Series, date_range, period_range
import pandas.tseries.frequencies as frequencies
from pandas.tseries.tools import to_datetime
import pandas.tseries.offsets as offsets
from pandas.tseries.period import PeriodIndex
import pandas.compat as compat
from pandas.compat import is_platform_windows
import pandas.util.testing as tm
from pandas import Timedelta
def test_to_offset_multiple():
freqstr = '2h30min'
freqstr2 = '2h 30min'
result = frequencies.to_offset(freqstr)
assert(result == frequencies.to_offset(freqstr2))
expected = offsets.Minute(150)
assert(result == expected)
freqstr = '2h30min15s'
result = frequencies.to_offset(freqstr)
expected = offsets.Second(150 * 60 + 15)
assert(result == expected)
freqstr = '2h 60min'
result = frequencies.to_offset(freqstr)
expected = offsets.Hour(3)
assert(result == expected)
freqstr = '15l500u'
result = frequencies.to_offset(freqstr)
expected = offsets.Micro(15500)
assert(result == expected)
freqstr = '10s75L'
result = frequencies.to_offset(freqstr)
expected = offsets.Milli(10075)
assert(result == expected)
freqstr = '2800N'
result = frequencies.to_offset(freqstr)
expected = offsets.Nano(2800)
assert(result == expected)
# malformed
try:
frequencies.to_offset('2h20m')
except ValueError:
pass
else:
assert(False)
def test_to_offset_negative():
freqstr = '-1S'
result = frequencies.to_offset(freqstr)
assert(result.n == -1)
freqstr = '-5min10s'
result = frequencies.to_offset(freqstr)
assert(result.n == -310)
def test_to_offset_leading_zero():
freqstr = '00H 00T 01S'
result = frequencies.to_offset(freqstr)
assert(result.n == 1)
freqstr = '-00H 03T 14S'
result = frequencies.to_offset(freqstr)
assert(result.n == -194)
def test_to_offset_pd_timedelta():
# Tests for #9064
td = Timedelta(days=1, seconds=1)
result = frequencies.to_offset(td)
expected = offsets.Second(86401)
assert(expected==result)
td = Timedelta(days=-1, seconds=1)
result = frequencies.to_offset(td)
expected = offsets.Second(-86399)
assert(expected==result)
td = Timedelta(hours=1, minutes=10)
result = frequencies.to_offset(td)
expected = offsets.Minute(70)
assert(expected==result)
td = Timedelta(hours=1, minutes=-10)
result = frequencies.to_offset(td)
expected = offsets.Minute(50)
assert(expected==result)
td = Timedelta(weeks=1)
result = frequencies.to_offset(td)
expected = offsets.Day(7)
assert(expected==result)
td1 = Timedelta(hours=1)
result1 = frequencies.to_offset(td1)
result2 = frequencies.to_offset('60min')
assert(result1 == result2)
td = Timedelta(microseconds=1)
result = frequencies.to_offset(td)
expected = offsets.Micro(1)
assert(expected == result)
td = Timedelta(microseconds=0)
tm.assertRaises(ValueError, lambda: frequencies.to_offset(td))
def test_anchored_shortcuts():
result = frequencies.to_offset('W')
expected = frequencies.to_offset('W-SUN')
assert(result == expected)
result1 = frequencies.to_offset('Q')
result2 = frequencies.to_offset('Q-DEC')
expected = offsets.QuarterEnd(startingMonth=12)
assert(result1 == expected)
assert(result2 == expected)
result1 = frequencies.to_offset('Q-MAY')
expected = offsets.QuarterEnd(startingMonth=5)
assert(result1 == expected)
def test_get_rule_month():
result = frequencies._get_rule_month('W')
assert(result == 'DEC')
result = frequencies._get_rule_month(offsets.Week())
assert(result == 'DEC')
result = frequencies._get_rule_month('D')
assert(result == 'DEC')
result = frequencies._get_rule_month(offsets.Day())
assert(result == 'DEC')
result = frequencies._get_rule_month('Q')
assert(result == 'DEC')
result = frequencies._get_rule_month(offsets.QuarterEnd(startingMonth=12))
print(result == 'DEC')
result = frequencies._get_rule_month('Q-JAN')
assert(result == 'JAN')
result = frequencies._get_rule_month(offsets.QuarterEnd(startingMonth=1))
assert(result == 'JAN')
result = frequencies._get_rule_month('A-DEC')
assert(result == 'DEC')
result = frequencies._get_rule_month(offsets.YearEnd())
assert(result == 'DEC')
result = frequencies._get_rule_month('A-MAY')
assert(result == 'MAY')
result = frequencies._get_rule_month(offsets.YearEnd(month=5))
assert(result == 'MAY')
class TestFrequencyCode(tm.TestCase):
def test_freq_code(self):
self.assertEqual(frequencies.get_freq('A'), 1000)
self.assertEqual(frequencies.get_freq('3A'), 1000)
self.assertEqual(frequencies.get_freq('-1A'), 1000)
self.assertEqual(frequencies.get_freq('W'), 4000)
self.assertEqual(frequencies.get_freq('W-MON'), 4001)
self.assertEqual(frequencies.get_freq('W-FRI'), 4005)
for freqstr, code in compat.iteritems(frequencies._period_code_map):
result = frequencies.get_freq(freqstr)
self.assertEqual(result, code)
result = frequencies.get_freq_group(freqstr)
self.assertEqual(result, code // 1000 * 1000)
result = frequencies.get_freq_group(code)
self.assertEqual(result, code // 1000 * 1000)
def test_freq_group(self):
self.assertEqual(frequencies.get_freq_group('A'), 1000)
self.assertEqual(frequencies.get_freq_group('3A'), 1000)
self.assertEqual(frequencies.get_freq_group('-1A'), 1000)
self.assertEqual(frequencies.get_freq_group('A-JAN'), 1000)
self.assertEqual(frequencies.get_freq_group('A-MAY'), 1000)
self.assertEqual(frequencies.get_freq_group(offsets.YearEnd()), 1000)
self.assertEqual(frequencies.get_freq_group(offsets.YearEnd(month=1)), 1000)
self.assertEqual(frequencies.get_freq_group(offsets.YearEnd(month=5)), 1000)
self.assertEqual(frequencies.get_freq_group('W'), 4000)
self.assertEqual(frequencies.get_freq_group('W-MON'), 4000)
self.assertEqual(frequencies.get_freq_group('W-FRI'), 4000)
self.assertEqual(frequencies.get_freq_group(offsets.Week()), 4000)
self.assertEqual(frequencies.get_freq_group(offsets.Week(weekday=1)), 4000)
self.assertEqual(frequencies.get_freq_group(offsets.Week(weekday=5)), 4000)
def test_get_to_timestamp_base(self):
tsb = frequencies.get_to_timestamp_base
self.assertEqual(tsb(frequencies.get_freq_code('D')[0]),
frequencies.get_freq_code('D')[0])
self.assertEqual(tsb(frequencies.get_freq_code('W')[0]),
frequencies.get_freq_code('D')[0])
self.assertEqual(tsb(frequencies.get_freq_code('M')[0]),
frequencies.get_freq_code('D')[0])
self.assertEqual(tsb(frequencies.get_freq_code('S')[0]),
frequencies.get_freq_code('S')[0])
self.assertEqual(tsb(frequencies.get_freq_code('T')[0]),
frequencies.get_freq_code('S')[0])
self.assertEqual(tsb(frequencies.get_freq_code('H')[0]),
frequencies.get_freq_code('S')[0])
def test_freq_to_reso(self):
Reso = frequencies.Resolution
self.assertEqual(Reso.get_str_from_freq('A'), 'year')
self.assertEqual(Reso.get_str_from_freq('Q'), 'quarter')
self.assertEqual(Reso.get_str_from_freq('M'), 'month')
self.assertEqual(Reso.get_str_from_freq('D'), 'day')
self.assertEqual(Reso.get_str_from_freq('H'), 'hour')
self.assertEqual(Reso.get_str_from_freq('T'), 'minute')
self.assertEqual(Reso.get_str_from_freq('S'), 'second')
self.assertEqual(Reso.get_str_from_freq('L'), 'millisecond')
self.assertEqual(Reso.get_str_from_freq('U'), 'microsecond')
self.assertEqual(Reso.get_str_from_freq('N'), 'nanosecond')
for freq in ['A', 'Q', 'M', 'D', 'H', 'T', 'S', 'L', 'U', 'N']:
# check roundtrip
result = Reso.get_freq(Reso.get_str_from_freq(freq))
self.assertEqual(freq, result)
for freq in ['D', 'H', 'T', 'S', 'L', 'U']:
result = Reso.get_freq(Reso.get_str(Reso.get_reso_from_freq(freq)))
self.assertEqual(freq, result)
def test_get_freq_code(self):
# freqstr
self.assertEqual(frequencies.get_freq_code('A'),
(frequencies.get_freq('A'), 1))
self.assertEqual(frequencies.get_freq_code('3D'),
(frequencies.get_freq('D'), 3))
self.assertEqual(frequencies.get_freq_code('-2M'),
(frequencies.get_freq('M'), -2))
# tuple
self.assertEqual(frequencies.get_freq_code(('D', 1)),
(frequencies.get_freq('D'), 1))
self.assertEqual(frequencies.get_freq_code(('A', 3)),
(frequencies.get_freq('A'), 3))
self.assertEqual(frequencies.get_freq_code(('M', -2)),
(frequencies.get_freq('M'), -2))
# numeric tuple
self.assertEqual(frequencies.get_freq_code((1000, 1)), (1000, 1))
# offsets
self.assertEqual(frequencies.get_freq_code(offsets.Day()),
(frequencies.get_freq('D'), 1))
self.assertEqual(frequencies.get_freq_code(offsets.Day(3)),
(frequencies.get_freq('D'), 3))
self.assertEqual(frequencies.get_freq_code(offsets.Day(-2)),
(frequencies.get_freq('D'), -2))
self.assertEqual(frequencies.get_freq_code(offsets.MonthEnd()),
(frequencies.get_freq('M'), 1))
self.assertEqual(frequencies.get_freq_code(offsets.MonthEnd(3)),
(frequencies.get_freq('M'), 3))
self.assertEqual(frequencies.get_freq_code(offsets.MonthEnd(-2)),
(frequencies.get_freq('M'), -2))
self.assertEqual(frequencies.get_freq_code(offsets.Week()),
(frequencies.get_freq('W'), 1))
self.assertEqual(frequencies.get_freq_code(offsets.Week(3)),
(frequencies.get_freq('W'), 3))
self.assertEqual(frequencies.get_freq_code(offsets.Week(-2)),
(frequencies.get_freq('W'), -2))
# monday is weekday=0
self.assertEqual(frequencies.get_freq_code(offsets.Week(weekday=1)),
(frequencies.get_freq('W-TUE'), 1))
self.assertEqual(frequencies.get_freq_code(offsets.Week(3, weekday=0)),
(frequencies.get_freq('W-MON'), 3))
self.assertEqual(frequencies.get_freq_code(offsets.Week(-2, weekday=4)),
(frequencies.get_freq('W-FRI'), -2))
_dti = DatetimeIndex
class TestFrequencyInference(tm.TestCase):
def test_raise_if_period_index(self):
index = PeriodIndex(start="1/1/1990", periods=20, freq="M")
self.assertRaises(TypeError, frequencies.infer_freq, index)
def test_raise_if_too_few(self):
index = _dti(['12/31/1998', '1/3/1999'])
self.assertRaises(ValueError, frequencies.infer_freq, index)
def test_business_daily(self):
index = _dti(['12/31/1998', '1/3/1999', '1/4/1999'])
self.assertEqual(frequencies.infer_freq(index), 'B')
def test_day(self):
self._check_tick(timedelta(1), 'D')
def test_day_corner(self):
index = _dti(['1/1/2000', '1/2/2000', '1/3/2000'])
self.assertEqual(frequencies.infer_freq(index), 'D')
def test_non_datetimeindex(self):
dates = to_datetime(['1/1/2000', '1/2/2000', '1/3/2000'])
self.assertEqual(frequencies.infer_freq(dates), 'D')
def test_hour(self):
self._check_tick(timedelta(hours=1), 'H')
def test_minute(self):
self._check_tick(timedelta(minutes=1), 'T')
def test_second(self):
self._check_tick(timedelta(seconds=1), 'S')
def test_millisecond(self):
self._check_tick(timedelta(microseconds=1000), 'L')
def test_microsecond(self):
self._check_tick(timedelta(microseconds=1), 'U')
def test_nanosecond(self):
self._check_tick(np.timedelta64(1, 'ns'), 'N')
def _check_tick(self, base_delta, code):
b = Timestamp(datetime.now())
for i in range(1, 5):
inc = base_delta * i
index = _dti([b + inc * j for j in range(3)])
if i > 1:
exp_freq = '%d%s' % (i, code)
else:
exp_freq = code
self.assertEqual(frequencies.infer_freq(index), exp_freq)
index = _dti([b + base_delta * 7] +
[b + base_delta * j for j in range(3)])
self.assertIsNone(frequencies.infer_freq(index))
index = _dti([b + base_delta * j for j in range(3)] +
[b + base_delta * 7])
self.assertIsNone(frequencies.infer_freq(index))
def test_weekly(self):
days = ['MON', 'TUE', 'WED', 'THU', 'FRI', 'SAT', 'SUN']
for day in days:
self._check_generated_range('1/1/2000', 'W-%s' % day)
def test_week_of_month(self):
days = ['MON', 'TUE', 'WED', 'THU', 'FRI', 'SAT', 'SUN']
for day in days:
for i in range(1, 5):
self._check_generated_range('1/1/2000', 'WOM-%d%s' % (i, day))
def test_fifth_week_of_month(self):
# Only supports freq up to WOM-4. See #9425
func = lambda: date_range('2014-01-01', freq='WOM-5MON')
self.assertRaises(ValueError, func)
def test_fifth_week_of_month_infer(self):
# Only attempts to infer up to WOM-4. See #9425
index = DatetimeIndex(["2014-03-31", "2014-06-30", "2015-03-30"])
assert frequencies.infer_freq(index) is None
def test_week_of_month_fake(self):
#All of these dates are on same day of week and are 4 or 5 weeks apart
index = DatetimeIndex(["2013-08-27","2013-10-01","2013-10-29","2013-11-26"])
assert frequencies.infer_freq(index) != 'WOM-4TUE'
def test_monthly(self):
self._check_generated_range('1/1/2000', 'M')
def test_monthly_ambiguous(self):
rng = _dti(['1/31/2000', '2/29/2000', '3/31/2000'])
self.assertEqual(rng.inferred_freq, 'M')
def test_business_monthly(self):
self._check_generated_range('1/1/2000', 'BM')
def test_business_start_monthly(self):
self._check_generated_range('1/1/2000', 'BMS')
def test_quarterly(self):
for month in ['JAN', 'FEB', 'MAR']:
self._check_generated_range('1/1/2000', 'Q-%s' % month)
def test_annual(self):
for month in MONTHS:
self._check_generated_range('1/1/2000', 'A-%s' % month)
def test_business_annual(self):
for month in MONTHS:
self._check_generated_range('1/1/2000', 'BA-%s' % month)
def test_annual_ambiguous(self):
rng = _dti(['1/31/2000', '1/31/2001', '1/31/2002'])
self.assertEqual(rng.inferred_freq, 'A-JAN')
def _check_generated_range(self, start, freq):
freq = freq.upper()
gen = date_range(start, periods=7, freq=freq)
index = _dti(gen.values)
if not freq.startswith('Q-'):
self.assertEqual(frequencies.infer_freq(index), gen.freqstr)
else:
inf_freq = frequencies.infer_freq(index)
self.assertTrue((inf_freq == 'Q-DEC' and
gen.freqstr in ('Q', 'Q-DEC', 'Q-SEP', 'Q-JUN',
'Q-MAR'))
or
(inf_freq == 'Q-NOV' and
gen.freqstr in ('Q-NOV', 'Q-AUG', 'Q-MAY', 'Q-FEB'))
or
(inf_freq == 'Q-OCT' and
gen.freqstr in ('Q-OCT', 'Q-JUL', 'Q-APR', 'Q-JAN')))
gen = date_range(start, periods=5, freq=freq)
index = _dti(gen.values)
if not freq.startswith('Q-'):
self.assertEqual(frequencies.infer_freq(index), gen.freqstr)
else:
inf_freq = frequencies.infer_freq(index)
self.assertTrue((inf_freq == 'Q-DEC' and
gen.freqstr in ('Q', 'Q-DEC', 'Q-SEP', 'Q-JUN',
'Q-MAR'))
or
(inf_freq == 'Q-NOV' and
gen.freqstr in ('Q-NOV', 'Q-AUG', 'Q-MAY', 'Q-FEB'))
or
(inf_freq == 'Q-OCT' and
gen.freqstr in ('Q-OCT', 'Q-JUL', 'Q-APR', 'Q-JAN')))
def test_infer_freq(self):
rng = period_range('1959Q2', '2009Q3', freq='Q')
rng = Index(rng.to_timestamp('D', how='e').asobject)
self.assertEqual(rng.inferred_freq, 'Q-DEC')
rng = period_range('1959Q2', '2009Q3', freq='Q-NOV')
rng = Index(rng.to_timestamp('D', how='e').asobject)
self.assertEqual(rng.inferred_freq, 'Q-NOV')
rng = period_range('1959Q2', '2009Q3', freq='Q-OCT')
rng = Index(rng.to_timestamp('D', how='e').asobject)
self.assertEqual(rng.inferred_freq, 'Q-OCT')
def test_infer_freq_tz(self):
freqs = {'AS-JAN': ['2009-01-01', '2010-01-01', '2011-01-01', '2012-01-01'],
'Q-OCT': ['2009-01-31', '2009-04-30', '2009-07-31', '2009-10-31'],
'M': ['2010-11-30', '2010-12-31', '2011-01-31', '2011-02-28'],
'W-SAT': ['2010-12-25', '2011-01-01', '2011-01-08', '2011-01-15'],
'D': ['2011-01-01', '2011-01-02', '2011-01-03', '2011-01-04'],
'H': ['2011-12-31 22:00', '2011-12-31 23:00', '2012-01-01 00:00', '2012-01-01 01:00']
}
# GH 7310
for tz in [None, 'Australia/Sydney', 'Asia/Tokyo', 'Europe/Paris',
'US/Pacific', 'US/Eastern']:
for expected, dates in compat.iteritems(freqs):
idx = DatetimeIndex(dates, tz=tz)
self.assertEqual(idx.inferred_freq, expected)
def test_infer_freq_tz_transition(self):
# Tests for #8772
date_pairs = [['2013-11-02', '2013-11-5'], #Fall DST
['2014-03-08', '2014-03-11'], #Spring DST
['2014-01-01', '2014-01-03']] #Regular Time
freqs = ['3H', '10T', '3601S', '3600001L', '3600000001U', '3600000000001N']
for tz in [None, 'Australia/Sydney', 'Asia/Tokyo', 'Europe/Paris',
'US/Pacific', 'US/Eastern']:
for date_pair in date_pairs:
for freq in freqs:
idx = date_range(date_pair[0], date_pair[1], freq=freq, tz=tz)
self.assertEqual(idx.inferred_freq, freq)
index = date_range("2013-11-03", periods=5, freq="3H").tz_localize("America/Chicago")
self.assertIsNone(index.inferred_freq)
def test_infer_freq_businesshour(self):
# GH 7905
idx = DatetimeIndex(['2014-07-01 09:00', '2014-07-01 10:00', '2014-07-01 11:00',
'2014-07-01 12:00', '2014-07-01 13:00', '2014-07-01 14:00'])
# hourly freq in a day must result in 'H'
self.assertEqual(idx.inferred_freq, 'H')
idx = DatetimeIndex(['2014-07-01 09:00', '2014-07-01 10:00', '2014-07-01 11:00',
'2014-07-01 12:00', '2014-07-01 13:00', '2014-07-01 14:00',
'2014-07-01 15:00', '2014-07-01 16:00',
'2014-07-02 09:00', '2014-07-02 10:00', '2014-07-02 11:00'])
self.assertEqual(idx.inferred_freq, 'BH')
idx = DatetimeIndex(['2014-07-04 09:00', '2014-07-04 10:00', '2014-07-04 11:00',
'2014-07-04 12:00', '2014-07-04 13:00', '2014-07-04 14:00',
'2014-07-04 15:00', '2014-07-04 16:00',
'2014-07-07 09:00', '2014-07-07 10:00', '2014-07-07 11:00'])
self.assertEqual(idx.inferred_freq, 'BH')
idx = DatetimeIndex(['2014-07-04 09:00', '2014-07-04 10:00', '2014-07-04 11:00',
'2014-07-04 12:00', '2014-07-04 13:00', '2014-07-04 14:00',
'2014-07-04 15:00', '2014-07-04 16:00',
'2014-07-07 09:00', '2014-07-07 10:00', '2014-07-07 11:00',
'2014-07-07 12:00', '2014-07-07 13:00', '2014-07-07 14:00',
'2014-07-07 15:00', '2014-07-07 16:00',
'2014-07-08 09:00', '2014-07-08 10:00', '2014-07-08 11:00',
'2014-07-08 12:00', '2014-07-08 13:00', '2014-07-08 14:00',
'2014-07-08 15:00', '2014-07-08 16:00'])
self.assertEqual(idx.inferred_freq, 'BH')
def test_not_monotonic(self):
rng = _dti(['1/31/2000', '1/31/2001', '1/31/2002'])
rng = rng[::-1]
self.assertEqual(rng.inferred_freq, '-1A-JAN')
def test_non_datetimeindex(self):
rng = _dti(['1/31/2000', '1/31/2001', '1/31/2002'])
vals = rng.to_pydatetime()
result = frequencies.infer_freq(vals)
self.assertEqual(result, rng.inferred_freq)
def test_invalid_index_types(self):
# test all index types
for i in [ tm.makeIntIndex(10),
tm.makeFloatIndex(10),
tm.makePeriodIndex(10) ]:
self.assertRaises(TypeError, lambda : frequencies.infer_freq(i))
# GH 10822
# odd error message on conversions to datetime for unicode
if not is_platform_windows():
for i in [ tm.makeStringIndex(10),
tm.makeUnicodeIndex(10) ]:
self.assertRaises(ValueError, lambda : frequencies.infer_freq(i))
def test_string_datetimelike_compat(self):
# GH 6463
expected = frequencies.infer_freq(['2004-01', '2004-02', '2004-03', '2004-04'])
result = frequencies.infer_freq(Index(['2004-01', '2004-02', '2004-03', '2004-04']))
self.assertEqual(result,expected)
def test_series(self):
# GH6407
# inferring series
# invalid type of Series
for s in [ Series(np.arange(10)),
Series(np.arange(10.))]:
self.assertRaises(TypeError, lambda : frequencies.infer_freq(s))
# a non-convertible string
self.assertRaises(ValueError, lambda : frequencies.infer_freq(Series(['foo','bar'])))
# cannot infer on PeriodIndex
for freq in [None, 'L']:
s = Series(period_range('2013',periods=10,freq=freq))
self.assertRaises(TypeError, lambda : frequencies.infer_freq(s))
for freq in ['Y']:
with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
s = Series(period_range('2013',periods=10,freq=freq))
self.assertRaises(TypeError, lambda : frequencies.infer_freq(s))
# DateTimeIndex
for freq in ['M', 'L', 'S']:
s = Series(date_range('20130101',periods=10,freq=freq))
inferred = frequencies.infer_freq(s)
self.assertEqual(inferred,freq)
s = Series(date_range('20130101','20130110'))
inferred = frequencies.infer_freq(s)
self.assertEqual(inferred,'D')
def test_legacy_offset_warnings(self):
for k, v in compat.iteritems(frequencies._rule_aliases):
with tm.assert_produces_warning(FutureWarning):
result = frequencies.get_offset(k)
exp = frequencies.get_offset(v)
self.assertEqual(result, exp)
with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
idx = date_range('2011-01-01', periods=5, freq=k)
exp = date_range('2011-01-01', periods=5, freq=v)
self.assert_index_equal(idx, exp)
MONTHS = ['JAN', 'FEB', 'MAR', 'APR', 'MAY', 'JUN', 'JUL', 'AUG', 'SEP',
'OCT', 'NOV', 'DEC']
def test_is_superperiod_subperiod():
assert(frequencies.is_superperiod(offsets.YearEnd(), offsets.MonthEnd()))
assert(frequencies.is_subperiod(offsets.MonthEnd(), offsets.YearEnd()))
assert(frequencies.is_superperiod(offsets.Hour(), offsets.Minute()))
assert(frequencies.is_subperiod(offsets.Minute(), offsets.Hour()))
assert(frequencies.is_superperiod(offsets.Second(), offsets.Milli()))
assert(frequencies.is_subperiod(offsets.Milli(), offsets.Second()))
assert(frequencies.is_superperiod(offsets.Milli(), offsets.Micro()))
assert(frequencies.is_subperiod(offsets.Micro(), offsets.Milli()))
assert(frequencies.is_superperiod(offsets.Micro(), offsets.Nano()))
assert(frequencies.is_subperiod(offsets.Nano(), offsets.Micro()))
if __name__ == '__main__':
import nose
nose.runmodule(argv=[__file__, '-vvs', '-x', '--pdb', '--pdb-failure'],
exit=False)
| apache-2.0 |
LandRegistry/address-search-api | service/es_access.py | 1 | 2191 | from elasticsearch import Elasticsearch # type: ignore
from elasticsearch_dsl import Search # type: ignore
from typing import Any, Dict, List, Tuple, Union
from service import app
ELASTICSEARCH_ENDPOINT = app.config['ELASTIC_SEARCH_ENDPOINT']
MAX_NUMBER_SEARCH_RESULTS = app.config['MAX_NUMBER_SEARCH_RESULTS']
SEARCH_RESULTS_PER_PAGE = app.config['SEARCH_RESULTS_PER_PAGE']
def _get_start_and_end_indexes(page_number: int, page_size: int) -> Tuple[int, int]:
start_index = page_number * page_size
end_index = start_index + page_size
return start_index, end_index
# TODO: write integration tests for this module
def get_addresses_for_postcode(postcode: str, page_number: int, page_size: int):
search = create_search('address_by_postcode')
query = search.query("term", postcode=postcode.upper()).sort(
{'thoroughfare_name': {'missing': '_last'}},
{'dependent_thoroughfare_name': {'missing': '_last'}},
{'building_number': {'missing': '_last', 'order': 'asc'}},
{'building_name': {'missing': '_last'}},
{'sub_building_name': {'missing': '_last'}},
)
start_index, end_index = _get_start_and_end_indexes(page_number, page_size)
return query[start_index:end_index].execute().hits
def get_addresses_for_phrase(phrase: str, page_number: int, page_size: int):
search = create_search('address_by_joined_fields')
query = search.filter('term', joined_fields=phrase.lower()).sort(
{'sub_building_name': {'missing': '_last'}},
{'building_name': {'missing': '_last'}},
{'building_number': {'missing': '_last'}},
{'dependent_thoroughfare_name': {'missing': '_last'}},
{'thoroughfare_name': {'missing': '_last'}},
)
start_index, end_index = _get_start_and_end_indexes(page_number, page_size)
return query[start_index:end_index].execute().hits
def create_search(doc_type: str):
client = Elasticsearch([ELASTICSEARCH_ENDPOINT])
search = Search(using=client, index='address-search-api-index', doc_type=doc_type)
search = search[0:MAX_NUMBER_SEARCH_RESULTS]
return search
def get_info():
return Elasticsearch([ELASTICSEARCH_ENDPOINT]).info()
| mit |
mrquim/repository.mrquim | script.kodilogemail/pyaes/util.py | 81 | 2050 | # The MIT License (MIT)
#
# Copyright (c) 2014 Richard Moore
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
# Why to_bufferable?
# Python 3 is very different from Python 2.x when it comes to strings of text
# and strings of bytes; in Python 3, strings of bytes do not exist, instead to
# represent arbitrary binary data, we must use the "bytes" object. This method
# ensures the object behaves as we need it to.
def to_bufferable(binary):
return binary
def _get_byte(c):
return ord(c)
try:
xrange
except:
def to_bufferable(binary):
if isinstance(binary, bytes):
return binary
return bytes(ord(b) for b in binary)
def _get_byte(c):
return c
def append_PKCS7_padding(data):
pad = 16 - (len(data) % 16)
return data + to_bufferable(chr(pad) * pad)
def strip_PKCS7_padding(data):
if len(data) % 16 != 0:
raise ValueError("invalid length")
pad = _get_byte(data[-1])
if pad > 16:
raise ValueError("invalid padding byte")
return data[:-pad]
| gpl-2.0 |
anryko/ansible | lib/ansible/module_utils/facts/timeout.py | 62 | 2452 | # This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import multiprocessing
import multiprocessing.pool as mp
# timeout function to make sure some fact gathering
# steps do not exceed a time limit
GATHER_TIMEOUT = None
DEFAULT_GATHER_TIMEOUT = 10
class TimeoutError(Exception):
pass
def timeout(seconds=None, error_message="Timer expired"):
"""
Timeout decorator to expire after a set number of seconds. This raises an
ansible.module_utils.facts.TimeoutError if the timeout is hit before the
function completes.
"""
def decorator(func):
def wrapper(*args, **kwargs):
timeout_value = seconds
if timeout_value is None:
timeout_value = globals().get('GATHER_TIMEOUT') or DEFAULT_GATHER_TIMEOUT
pool = mp.ThreadPool(processes=1)
res = pool.apply_async(func, args, kwargs)
pool.close()
try:
return res.get(timeout_value)
except multiprocessing.TimeoutError:
# This is an ansible.module_utils.common.facts.timeout.TimeoutError
raise TimeoutError('Timer expired after %s seconds' % timeout_value)
return wrapper
# If we were called as @timeout, then the first parameter will be the
# function we are to wrap instead of the number of seconds. Detect this
# and correct it by setting seconds to our default value and return the
# inner decorator function manually wrapped around the function
if callable(seconds):
func = seconds
seconds = None
return decorator(func)
# If we were called as @timeout([...]) then python itself will take
# care of wrapping the inner decorator around the function
return decorator
| gpl-3.0 |
compas-dev/compas | src/compas_rhino/forms/image.py | 1 | 3344 | from __future__ import print_function
from __future__ import absolute_import
from __future__ import division
try:
basestring
except NameError:
basestring = str
from compas_rhino.forms.base import BaseForm
from System.Windows.Forms import PictureBox
from System.Windows.Forms import PictureBoxSizeMode
from System.Windows.Forms import DockStyle
from System.Drawing import Image
from System.Net import WebClient
from System.IO import MemoryStream
__all__ = ['ImageForm', 'image_from_remote', 'image_from_local']
def image_from_remote(source):
"""Construct an image from a remote source.
Parameters
----------
source : str
The url of the remote source.
Returns
-------
System.Drawing.Image
Representation of an miage in memory.
Examples
--------
.. code-block:: python
image = image_from_remote('http://block.arch.ethz.ch/brg/images/cache/dsc02360_ni-2_cropped_1528706473_624x351.jpg')
"""
w = WebClient()
d = w.DownloadData(source)
m = MemoryStream(d)
return Image.FromStream(m)
def image_from_local(source):
"""Construct an image from a local source.
Parameters
----------
source : str
The path to the local source file.
Returns
-------
System.Drawing.Image
Representation of an miage in memory.
Examples
--------
.. code-block:: python
image = image_from_local('theblock.jpg')
"""
return Image.FromFile(source)
class ImageForm(BaseForm):
"""A form for displaying images.
Parameters
----------
image : {str, Image}
The image that should be displayed.
This can be a url of a remote image file,
or a local file path,
or an instance of ``System.Drawing.Image``.
title : str, optional
Title of the form.
Default is ``ImageForm``.
width : int, optional
Width of the form.
Default is ``None``.
height : int, optional
Height of the form.
Default is ``None``.
Examples
--------
.. code-block:: python
from compas_rhino.forms import ImageForm
form = ImageForm('http://block.arch.ethz.ch/brg/images/cache/dsc02360_ni-2_cropped_1528706473_624x351.jpg')
form.show()
"""
def __init__(self, image, title='Image', width=None, height=None):
self._image = None
self.image = image
super(ImageForm, self).__init__(title, width, height)
@property
def image(self):
"""System.Drawing.Image: An instance of ``System.Drawing.Image``.
"""
return self._image
@image.setter
def image(self, image):
if isinstance(image, basestring):
if image.startswith('http'):
self._image = image_from_remote(image)
else:
self._image = image_from_local(image)
elif isinstance(image, Image):
self._image = image
else:
raise NotImplementedError
def init(self):
box = PictureBox()
box.Dock = DockStyle.Fill
box.SizeMode = PictureBoxSizeMode.AutoSize
box.Image = self.image
self.image = box.Image
self.Controls.Add(box)
self.ClientSize = box.Size
def on_form_closed(self, sender, e):
self.image.Dispose()
| mit |
DroneMapp/powerlibs-django-restless-contrib | setup.py | 1 | 1772 | import re
try:
from setuptools import setup
except ImportError:
from distutils.core import setup
version = '0.30.0'
def pip_git_to_setuptools_git(url):
match = re.match(r'git\+https://github.com/(?P<organization>[^/]+)/(?P<repository>[^/]+).git@(?P<tag>.+)', url.strip())
if match:
url = 'http://github.com/{organization}/{repository}/tarball/master#egg={tag}'.format(
**match.groupdict()
)
return url
requires = []
dependency_links = []
with open('requirements/production.txt') as requirements_file:
for line in requirements_file:
if 'git+http' in line:
dependency_links.append(pip_git_to_setuptools_git(line))
else:
requires.append(line)
with open('README.md') as f:
readme = f.read()
with open('LICENSE') as f:
license = f.read()
setup(
name='powerlibs-django-restless-contrib',
version=version,
description="Contrib moduls for Powerlibs Django Restless",
long_description=readme,
author='Cléber Zavadniak',
author_email='cleberman@gmail.com',
url='https://github.com/Dronemapp/powerlibs-django-restless-contrib',
license=license,
packages=['powerlibs.django.restless.contrib.endpoints'],
package_data={'': ['LICENSE', 'README.md']},
include_package_data=True,
install_requires=requires,
dependency_links=dependency_links,
zip_safe=False,
keywords='generic libraries',
classifiers=(
'Development Status :: 5 - Production/Stable',
'Intended Audience :: Developers',
'Natural Language :: English',
'Operating System :: OS Independent',
'Programming Language :: Python :: 3.6',
'Topic :: Software Development :: Libraries :: Python Modules'
),
)
| mit |
Seek/RayTracing | blender_export/blender_scene_export.py | 1 | 2760 | import bpy
import bmesh
import gzip
# Date 12/28/15
# Wrriten for Blender ver. 2.76.2b
# This file will serve as a record of how we will transmit data from blender to our raytracer.
# These parameters are more or less mirrored information about BlendData types from:
# http://www.blender.org/api/blender_python_api_2_76_2/bpy.types.BlendData.html#bpy.types.BlendData
# TODO: Accept cmd line params for controlling output
scene = bpy.data.scenes[0]
def matrix_to_string(matrix):
val = ""
rowlen = len(matrix.row)
collen = len(matrix.col)
for i in range(collen):
for j in range(rowlen):
if j != (rowlen - 1):
val += '{0},'.format(matrix[i][j])
else:
val += '{0}'.format(matrix[i][j])
val += '\n'
return val
def is_mesh_triagulated(mesh):
pass
# Get the current filename
CURRENT_FILE = bpy.path.basename(bpy.context.blend_data.filepath)
tmpp = CURRENT_FILE.find('.')
FILENAME = CURRENT_FILE[:tmpp] + '_rt.txt'
print(FILENAME)
# Get only the data we're interested in
cams = bpy.data.cameras
lights = bpy.data.lamps
meshes = bpy.data.meshes
buf = ''
# Export lights
# TODO Environment maps?
with open(FILENAME, 'wt') as f:
buf += "[Lights]\n"
f.write(buf)
for l in lights:
buf = ''
lo = bpy.data.objects[l.name]
buf += 'begin light\n'
buf += 'name={0}\n'.format(l.name)
buf += 'type={0}\n'.format(l.type)
buf += 'color={0},{1},{2}\n'.format(l.color.r,
l.color.g, l.color.b)
buf += 'position={0},{1},{2}\n'.format(lo.location.x, lo.location.y,
lo.location.z)
buf += 'energy={0}\n'.format(l.energy)
buf += 'distance={0}\n'.format(l.distance)
buf += 'object_to_world={0}'.format(matrix_to_string(lo.matrix_world))
if l.type == 'HEMI':
pass
elif l.type == 'POINT':
pass
elif l.type == 'AREA':
buf += 'shape={0}\n'.format(l.shape)
buf += 'sizex={0}\n'.format(l.size)
buf += 'sizey={0}\n'.format(l.size_y)
elif l.type == 'SPOT':
buf += 'spot_size={0}\n'.format(l.spot_size)
buf += 'spot_blend={0}\n'.format(l.spot_blend)
elif l.type == 'SUN':
pass
buf += 'end light\n'
f.write(buf)
for m in meshes:
apply_modifiers = True
settings = 'PREVIEW'
buf = ''
mo = bpy.data.objects[m.name]
mesh = mo.to_mesh(scene, apply_modifiers, settings)
bm = bmesh.new()
bm.from_mesh(m)
bmesh.ops.triangulate(bm, faces=bm.faces)
bm.free()
bpy.data.meshes.remove(mesh)
| mit |
Azure/azure-sdk-for-python | sdk/containerservice/azure-mgmt-containerservice/azure/mgmt/containerservice/v2019_02_01/models/__init__.py | 1 | 5249 | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
try:
from ._models_py3 import AgentPool
from ._models_py3 import AgentPoolListResult
from ._models_py3 import CloudErrorBody
from ._models_py3 import ContainerServiceDiagnosticsProfile
from ._models_py3 import ContainerServiceLinuxProfile
from ._models_py3 import ContainerServiceMasterProfile
from ._models_py3 import ContainerServiceNetworkProfile
from ._models_py3 import ContainerServiceSshConfiguration
from ._models_py3 import ContainerServiceSshPublicKey
from ._models_py3 import ContainerServiceVMDiagnostics
from ._models_py3 import ContainerServiceWindowsProfile
from ._models_py3 import CredentialResult
from ._models_py3 import CredentialResults
from ._models_py3 import ManagedCluster
from ._models_py3 import ManagedClusterAADProfile
from ._models_py3 import ManagedClusterAccessProfile
from ._models_py3 import ManagedClusterAddonProfile
from ._models_py3 import ManagedClusterAgentPoolProfile
from ._models_py3 import ManagedClusterAgentPoolProfileProperties
from ._models_py3 import ManagedClusterListResult
from ._models_py3 import ManagedClusterPoolUpgradeProfile
from ._models_py3 import ManagedClusterServicePrincipalProfile
from ._models_py3 import ManagedClusterUpgradeProfile
from ._models_py3 import OperationListResult
from ._models_py3 import OperationValue
from ._models_py3 import OrchestratorProfile
from ._models_py3 import Resource
from ._models_py3 import SubResource
from ._models_py3 import TagsObject
except (SyntaxError, ImportError):
from ._models import AgentPool # type: ignore
from ._models import AgentPoolListResult # type: ignore
from ._models import CloudErrorBody # type: ignore
from ._models import ContainerServiceDiagnosticsProfile # type: ignore
from ._models import ContainerServiceLinuxProfile # type: ignore
from ._models import ContainerServiceMasterProfile # type: ignore
from ._models import ContainerServiceNetworkProfile # type: ignore
from ._models import ContainerServiceSshConfiguration # type: ignore
from ._models import ContainerServiceSshPublicKey # type: ignore
from ._models import ContainerServiceVMDiagnostics # type: ignore
from ._models import ContainerServiceWindowsProfile # type: ignore
from ._models import CredentialResult # type: ignore
from ._models import CredentialResults # type: ignore
from ._models import ManagedCluster # type: ignore
from ._models import ManagedClusterAADProfile # type: ignore
from ._models import ManagedClusterAccessProfile # type: ignore
from ._models import ManagedClusterAddonProfile # type: ignore
from ._models import ManagedClusterAgentPoolProfile # type: ignore
from ._models import ManagedClusterAgentPoolProfileProperties # type: ignore
from ._models import ManagedClusterListResult # type: ignore
from ._models import ManagedClusterPoolUpgradeProfile # type: ignore
from ._models import ManagedClusterServicePrincipalProfile # type: ignore
from ._models import ManagedClusterUpgradeProfile # type: ignore
from ._models import OperationListResult # type: ignore
from ._models import OperationValue # type: ignore
from ._models import OrchestratorProfile # type: ignore
from ._models import Resource # type: ignore
from ._models import SubResource # type: ignore
from ._models import TagsObject # type: ignore
from ._container_service_client_enums import (
AgentPoolType,
ContainerServiceStorageProfileTypes,
ContainerServiceVMSizeTypes,
Count,
NetworkPlugin,
NetworkPolicy,
OSType,
)
__all__ = [
'AgentPool',
'AgentPoolListResult',
'CloudErrorBody',
'ContainerServiceDiagnosticsProfile',
'ContainerServiceLinuxProfile',
'ContainerServiceMasterProfile',
'ContainerServiceNetworkProfile',
'ContainerServiceSshConfiguration',
'ContainerServiceSshPublicKey',
'ContainerServiceVMDiagnostics',
'ContainerServiceWindowsProfile',
'CredentialResult',
'CredentialResults',
'ManagedCluster',
'ManagedClusterAADProfile',
'ManagedClusterAccessProfile',
'ManagedClusterAddonProfile',
'ManagedClusterAgentPoolProfile',
'ManagedClusterAgentPoolProfileProperties',
'ManagedClusterListResult',
'ManagedClusterPoolUpgradeProfile',
'ManagedClusterServicePrincipalProfile',
'ManagedClusterUpgradeProfile',
'OperationListResult',
'OperationValue',
'OrchestratorProfile',
'Resource',
'SubResource',
'TagsObject',
'AgentPoolType',
'ContainerServiceStorageProfileTypes',
'ContainerServiceVMSizeTypes',
'Count',
'NetworkPlugin',
'NetworkPolicy',
'OSType',
]
| mit |
SpaceGroupUCL/qgisSpaceSyntaxToolkit | esstoolkit/external/networkx/algorithms/community/lukes.py | 1 | 8124 | """Lukes Algorithm for exact optimal weighted tree partitioning."""
from copy import deepcopy
from functools import lru_cache
from random import choice
import networkx as nx
from networkx.utils import not_implemented_for
__all__ = ["lukes_partitioning"]
D_EDGE_W = "weight"
D_EDGE_VALUE = 1.0
D_NODE_W = "weight"
D_NODE_VALUE = 1
PKEY = "partitions"
CLUSTER_EVAL_CACHE_SIZE = 2048
def _split_n_from(n: int, min_size_of_first_part: int):
# splits j in two parts of which the first is at least
# the second argument
assert n >= min_size_of_first_part
for p1 in range(min_size_of_first_part, n + 1):
yield p1, n - p1
def lukes_partitioning(G, max_size: int, node_weight=None, edge_weight=None) -> list:
"""Optimal partitioning of a weighted tree using the Lukes algorithm.
This algorithm partitions a connected, acyclic graph featuring integer
node weights and float edge weights. The resulting clusters are such
that the total weight of the nodes in each cluster does not exceed
max_size and that the weight of the edges that are cut by the partition
is minimum. The algorithm is based on LUKES[1].
Parameters
----------
G : graph
max_size : int
Maximum weight a partition can have in terms of sum of
node_weight for all nodes in the partition
edge_weight : key
Edge data key to use as weight. If None, the weights are all
set to one.
node_weight : key
Node data key to use as weight. If None, the weights are all
set to one. The data must be int.
Returns
-------
partition : list
A list of sets of nodes representing the clusters of the
partition.
Raises
-------
NotATree
If G is not a tree.
TypeError
If any of the values of node_weight is not int.
References
----------
.. Lukes, J. A. (1974).
"Efficient Algorithm for the Partitioning of Trees."
IBM Journal of Research and Development, 18(3), 217–224.
"""
# First sanity check and tree preparation
if not nx.is_tree(G):
raise nx.NotATree("lukes_partitioning works only on trees")
else:
if nx.is_directed(G):
root = [n for n, d in G.in_degree() if d == 0]
assert len(root) == 1
root = root[0]
t_G = deepcopy(G)
else:
root = choice(list(G.nodes))
# this has the desirable side effect of not inheriting attributes
t_G = nx.dfs_tree(G, root)
# Since we do not want to screw up the original graph,
# if we have a blank attribute, we make a deepcopy
if edge_weight is None or node_weight is None:
safe_G = deepcopy(G)
if edge_weight is None:
nx.set_edge_attributes(safe_G, D_EDGE_VALUE, D_EDGE_W)
edge_weight = D_EDGE_W
if node_weight is None:
nx.set_node_attributes(safe_G, D_NODE_VALUE, D_NODE_W)
node_weight = D_NODE_W
else:
safe_G = G
# Second sanity check
# The values of node_weight MUST BE int.
# I cannot see any room for duck typing without incurring serious
# danger of subtle bugs.
all_n_attr = nx.get_node_attributes(safe_G, node_weight).values()
for x in all_n_attr:
if not isinstance(x, int):
raise TypeError(
"lukes_partitioning needs integer "
f"values for node_weight ({node_weight})"
)
# SUBROUTINES -----------------------
# these functions are defined here for two reasons:
# - brevity: we can leverage global "safe_G"
# - caching: signatures are hashable
@not_implemented_for("undirected")
# this is intended to be called only on t_G
def _leaves(gr):
for x in gr.nodes:
if not nx.descendants(gr, x):
yield x
@not_implemented_for("undirected")
def _a_parent_of_leaves_only(gr):
tleaves = set(_leaves(gr))
for n in set(gr.nodes) - tleaves:
if all([x in tleaves for x in nx.descendants(gr, n)]):
return n
@lru_cache(CLUSTER_EVAL_CACHE_SIZE)
def _value_of_cluster(cluster: frozenset):
valid_edges = [e for e in safe_G.edges if e[0] in cluster and e[1] in cluster]
return sum([safe_G.edges[e][edge_weight] for e in valid_edges])
def _value_of_partition(partition: list):
return sum([_value_of_cluster(frozenset(c)) for c in partition])
@lru_cache(CLUSTER_EVAL_CACHE_SIZE)
def _weight_of_cluster(cluster: frozenset):
return sum([safe_G.nodes[n][node_weight] for n in cluster])
def _pivot(partition: list, node):
ccx = [c for c in partition if node in c]
assert len(ccx) == 1
return ccx[0]
def _concatenate_or_merge(partition_1: list, partition_2: list, x, i, ref_weigth):
ccx = _pivot(partition_1, x)
cci = _pivot(partition_2, i)
merged_xi = ccx.union(cci)
# We first check if we can do the merge.
# If so, we do the actual calculations, otherwise we concatenate
if _weight_of_cluster(frozenset(merged_xi)) <= ref_weigth:
cp1 = list(filter(lambda x: x != ccx, partition_1))
cp2 = list(filter(lambda x: x != cci, partition_2))
option_2 = [merged_xi] + cp1 + cp2
return option_2, _value_of_partition(option_2)
else:
option_1 = partition_1 + partition_2
return option_1, _value_of_partition(option_1)
# INITIALIZATION -----------------------
leaves = set(_leaves(t_G))
for lv in leaves:
t_G.nodes[lv][PKEY] = dict()
slot = safe_G.nodes[lv][node_weight]
t_G.nodes[lv][PKEY][slot] = [{lv}]
t_G.nodes[lv][PKEY][0] = [{lv}]
for inner in [x for x in t_G.nodes if x not in leaves]:
t_G.nodes[inner][PKEY] = dict()
slot = safe_G.nodes[inner][node_weight]
t_G.nodes[inner][PKEY][slot] = [{inner}]
# CORE ALGORITHM -----------------------
while True:
x_node = _a_parent_of_leaves_only(t_G)
weight_of_x = safe_G.nodes[x_node][node_weight]
best_value = 0
best_partition = None
bp_buffer = dict()
x_descendants = nx.descendants(t_G, x_node)
for i_node in x_descendants:
for j in range(weight_of_x, max_size + 1):
for a, b in _split_n_from(j, weight_of_x):
if (
a not in t_G.nodes[x_node][PKEY].keys()
or b not in t_G.nodes[i_node][PKEY].keys()
):
# it's not possible to form this particular weight sum
continue
part1 = t_G.nodes[x_node][PKEY][a]
part2 = t_G.nodes[i_node][PKEY][b]
part, value = _concatenate_or_merge(part1, part2, x_node, i_node, j)
if j not in bp_buffer.keys() or bp_buffer[j][1] < value:
# we annotate in the buffer the best partition for j
bp_buffer[j] = part, value
# we also keep track of the overall best partition
if best_value <= value:
best_value = value
best_partition = part
# as illustrated in Lukes, once we finished a child, we can
# discharge the partitions we found into the graph
# (the key phrase is make all x == x')
# so that they are used by the subsequent children
for w, (best_part_for_vl, vl) in bp_buffer.items():
t_G.nodes[x_node][PKEY][w] = best_part_for_vl
bp_buffer.clear()
# the absolute best partition for this node
# across all weights has to be stored at 0
t_G.nodes[x_node][PKEY][0] = best_partition
t_G.remove_nodes_from(x_descendants)
if x_node == root:
# the 0-labeled partition of root
# is the optimal one for the whole tree
return t_G.nodes[root][PKEY][0]
| gpl-3.0 |
p0psicles/SickGear | sickbeard/notifiers/libnotify.py | 3 | 5011 | # Author: Nic Wolfe <nic@wolfeden.ca>
# URL: http://code.google.com/p/sickbeard/
#
# This file is part of SickGear.
#
# SickGear is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# SickGear is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with SickGear. If not, see <http://www.gnu.org/licenses/>.
import os
import cgi
import sickbeard
from sickbeard import logger, common
def diagnose():
'''
Check the environment for reasons libnotify isn't working. Return a
user-readable message indicating possible issues.
'''
try:
import pynotify #@UnusedImport
except ImportError:
return (u"<p>Error: pynotify isn't installed. On Ubuntu/Debian, install the "
u"<a href=\"apt:python-notify\">python-notify</a> package.")
if 'DISPLAY' not in os.environ and 'DBUS_SESSION_BUS_ADDRESS' not in os.environ:
return (u"<p>Error: Environment variables DISPLAY and DBUS_SESSION_BUS_ADDRESS "
u"aren't set. libnotify will only work when you run SickGear "
u"from a desktop login.")
try:
import dbus
except ImportError:
pass
else:
try:
bus = dbus.SessionBus()
except dbus.DBusException as e:
return (u"<p>Error: unable to connect to D-Bus session bus: <code>%s</code>."
u"<p>Are you running SickGear in a desktop session?") % (cgi.escape(e),)
try:
bus.get_object('org.freedesktop.Notifications',
'/org/freedesktop/Notifications')
except dbus.DBusException as e:
return (u"<p>Error: there doesn't seem to be a notification daemon available: <code>%s</code> "
u"<p>Try installing notification-daemon or notify-osd.") % (cgi.escape(e),)
return u"<p>Error: Unable to send notification."
class LibnotifyNotifier:
def __init__(self):
self.pynotify = None
self.gobject = None
def init_pynotify(self):
if self.pynotify is not None:
return True
try:
import pynotify
except ImportError:
logger.log(u"Unable to import pynotify. libnotify notifications won't work.", logger.ERROR)
return False
try:
import gobject
except ImportError:
logger.log(u"Unable to import gobject. We can't catch a GError in display.", logger.ERROR)
return False
if not pynotify.init('SickGear'):
logger.log(u"Initialization of pynotify failed. libnotify notifications won't work.", logger.ERROR)
return False
self.pynotify = pynotify
self.gobject = gobject
return True
def notify_snatch(self, ep_name):
if sickbeard.LIBNOTIFY_NOTIFY_ONSNATCH:
self._notify(common.notifyStrings[common.NOTIFY_SNATCH], ep_name)
def notify_download(self, ep_name):
if sickbeard.LIBNOTIFY_NOTIFY_ONDOWNLOAD:
self._notify(common.notifyStrings[common.NOTIFY_DOWNLOAD], ep_name)
def notify_subtitle_download(self, ep_name, lang):
if sickbeard.LIBNOTIFY_NOTIFY_ONSUBTITLEDOWNLOAD:
self._notify(common.notifyStrings[common.NOTIFY_SUBTITLE_DOWNLOAD], ep_name + ": " + lang)
def notify_git_update(self, new_version = "??"):
if sickbeard.USE_LIBNOTIFY:
update_text=common.notifyStrings[common.NOTIFY_GIT_UPDATE_TEXT]
title=common.notifyStrings[common.NOTIFY_GIT_UPDATE]
self._notify(title, update_text + new_version)
def test_notify(self):
return self._notify('Test notification', "This is a test notification from SickGear", force=True)
def _notify(self, title, message, force=False):
if not sickbeard.USE_LIBNOTIFY and not force:
return False
if not self.init_pynotify():
return False
# Can't make this a global constant because PROG_DIR isn't available
# when the module is imported.
icon_path = os.path.join(sickbeard.PROG_DIR, "data/images/sickbeard_touch_icon.png")
icon_uri = 'file://' + os.path.abspath(icon_path)
# If the session bus can't be acquired here a bunch of warning messages
# will be printed but the call to show() will still return True.
# pynotify doesn't seem too keen on error handling.
n = self.pynotify.Notification(title, message, icon_uri)
try:
return n.show()
except self.gobject.GError:
return False
notifier = LibnotifyNotifier
| gpl-3.0 |
ErasRasmuson/LA | LogTestGen/LogTestGen.py | 1 | 34979 | # -*- coding: cp1252 -*-
"""
###############################################################################
HEADER: LogTestGen.py
AUTHOR: Esa Heikkinen
DATE: 13.10.2016
DOCUMENT: -
VERSION: "$Id$"
REFERENCES: -
PURPOSE:
CHANGES: "$Log$"
###############################################################################
"""
import argparse
import os.path
import sys
import time
from datetime import datetime, timedelta
import glob
import math
import random
import configparser
lib_path = os.path.abspath(os.path.join('..', 'LogCom'))
sys.path.append(lib_path)
#from LogGUI import *
from TestGen_BML import *
g_version = "$Id$"
generate_counter = 0
#******************************************************************************
#
# CLASS: TestModel
#
#******************************************************************************
class TestModel:
def __init__(self,args):
print("TestModel")
# Generoitujen aikaleimojen alkukohta. Kovakoodattu tähän. Voi joskus muttaa parametreiksi ? 9.3.2018 Esa
self.log_start_date = "09.03.2018"
self.log_start_time = "10:00:00"
# Muutetaan ajat oikeaan muotoon
days,months,years=self.log_start_date.split(".")
print("log start date = %s %s %s" % (days,months,years))
hours,minutes,seconds=self.log_start_time.split(":")
print("log start time = %s %s %s" % (hours,minutes,seconds))
self.log_start_datetime = datetime(int(years),int(months),int(days),int(hours),int(minutes),int(seconds))
self.branch_complexity_level_params = {}
self.trace_size_variation_level_params = {}
self.branch_complexity_level_number = 3
self.trace_size_variation_level_number = 3
self.trace_blocks = {}
self.event_table = {}
self.test_name=args.test_name
self.ana_path=args.ana_path
self.ana_lang=args.ana_lang
self.log_path=args.log_path
#self.date
self.time_start=args.time_start
self.time_ev_min=args.time_ev_min
self.time_ev_max=args.time_ev_max
self.branch_complexity_level_params[0] = self.Branch_complexity_params(args.b1_btre_min,args.b1_btre_max,
args.b0_bmer_min,args.b0_bmer_max,args.b0_bctype)
self.branch_complexity_level_params[1] = self.Branch_complexity_params(args.b2_btre_min,args.b2_btre_max,
args.b0_bmer_min,args.b0_bmer_max,args.b0_bctype)
self.branch_complexity_level_params[2] = self.Branch_complexity_params(args.b2_btre_min,args.b2_btre_max,
args.b3_bmer_min,args.b3_bmer_max,args.b2_bctype)
self.trace_size_variation_level_params[0] = self.Trace_size_variation_params(args.t1_tle_min,args.t1_tle_max,
args.t0_tnu_min,args.t0_tnu_max,args.t1_tble_min,args.t1_tble_max,args.t0_tbnu_min,args.t0_tbnu_max)
self.trace_size_variation_level_params[1] = self.Trace_size_variation_params(args.t2_tle_min,args.t2_tle_max,
args.t0_tnu_min,args.t0_tnu_max,args.t2_tble_min,args.t2_tble_max,args.t0_tbnu_min,args.t0_tbnu_max)
self.trace_size_variation_level_params[2] = self.Trace_size_variation_params(args.t2_tle_min,args.t2_tle_max,
args.t3_tnu_min,args.t3_tnu_max,args.t2_tble_min,args.t2_tble_max,args.t3_tbnu_min,args.t3_tbnu_max)
# Luodaan testi matriisi
self.test_matrix = self.TestMatrix(
self.branch_complexity_level_number,
self.trace_size_variation_level_number,
self.branch_complexity_level_params,
self.trace_size_variation_level_params)
# Luodaan lokitiedostojen tiedot
self.log_files = self.LogFiles(args.lver,args.lsnoe,args.lsnof,args.lcnoi,args.lcmis,args.lcinc,args.lsrc,args.lmeta)
def generate_analyzing(self):
print("\ngenerate_analyzing\n")
for x in range(int(self.branch_complexity_level_number)):
for y in range(int(self.trace_size_variation_level_number)):
(btre_min,btre_max,bmer_min,bmer_max,bctype) = self.test_matrix.test_pattern_blocks[x,y].b_parameters
(tble_min,tble_max,tbnu_min,tbnu_max,tle_min,tle_max,tnu_min,tnu_max) = self.test_matrix.test_pattern_blocks[x,y].t_parameters
# Luodaan kaksiulottoinen event-taulukko, jossa trackit X-akselilla ja (main)tracet y-akselilla
self.create_trace_pattern("Analyzing",btre_min,btre_max,bmer_min,bmer_max,bctype,
tble_min,tble_max,tbnu_min,tbnu_max,tle_min,tle_max,tnu_min,tnu_max)
# Tulostetaan trace pattern graafina
file_info = "matrix_%s_%s" % (x,y)
self.print_trace_pattern(tble_max,tbnu_max,tle_max,tnu_max,btre_max,file_info)
# Generoidaan testitapaukset halutulla analysointikielellä
self.generate_analyzing_test_cases(x,y,btre_min,btre_max,bmer_min,bmer_max,bctype,
tble_min,tble_max,tbnu_min,tbnu_max,tle_min,tle_max,tnu_min,tnu_max,
self.event_table)
def generate_logs(self):
print("\ngenerate_logs\n")
(lver,lsnoe,lsnof,lcnoi,lcmis,lcinc,lsrc,lmeta)=self.log_files.parameters
print("lver=%s, lsnoe=%s, lsnof=%s, lcnoi=%s, lcmis=%s, lcinc=%s, lsrc=%s, lmeta=%s" % (lver,lsnoe,lsnof,lcnoi,lcmis,lcinc,lsrc,lmeta))
# Lisäksi matriisin kompleksisimman 3,3 elementin analyysitiedot
(btre_min,btre_max,bmer_min,bmer_max,bctype) = self.test_matrix.test_pattern_blocks[2,2].b_parameters
(tble_min,tble_max,tbnu_min,tbnu_max,tle_min,tle_max,tnu_min,tnu_max) = self.test_matrix.test_pattern_blocks[2,2].t_parameters
# Luodaan lokin trace pattern
self.create_trace_pattern("Generating",btre_min,btre_max,bmer_min,bmer_max,bctype,
tble_min,tble_max,tbnu_min,tbnu_max,tle_min,tle_max,tnu_min,tnu_max)
# Tulostetaan trace pattern graafina
self.print_trace_pattern(tble_max,tbnu_max,tle_max,tnu_max,btre_max,"logs")
# Tulostetaan lokit trace patternin perusteella
self.write_trace_pattern_logs(tble_max,tbnu_max,tle_max,tnu_max,btre_max,"gen")
def generate_analyzing_test_cases(self,matrix_x,matrix_y,btre_min,btre_max,bmer_min,bmer_max,bctype,
tble_min,tble_max,tbnu_min,tbnu_max,tle_min,tle_max,tnu_min,tnu_max,
event_table):
print("\n -- generate_analyzing_test_cases for matrix: x=%s, y=%s -- " % (matrix_x,matrix_y))
# Alustetaan analyysi-tiedostot KESKEN !!!
#ana_file_name = "Ana_%s_%s_%s" % (self.test_name,matrix_x,matrix_y)
#if self.ana_lang == "BML":
# ana_file_path_name = self.ana_path + self.test_name + "/" + ana_file_name + ".bml"
# print("write_file: %s" % login_file_path_name)
#self.make_dir_if_no_exist(ana_file_path_name)
#ana_fw = open(ana_file_path_name, 'w')
#init_analy_file(ana_fw)
self.event_count=0
# Lasketaan trace patternin x,y maksimikoko
track_max = tble_max * tle_max
event_max = tbnu_max * tnu_max * btre_max
# Käydään läpi jokainen merged-haara
for bmer_cnt in range(bmer_max):
event_track=0
event_number=bmer_cnt
# Rekusrsiivinen funktio, jolla haetaan kaikkien tree-haarojen eventit
self.search_next_event(track_max,event_track,event_number)
print("\n")
def search_next_event(self,track_max,event_track,event_number):
print(" *** track_max=%s, event_track=%s, event_number=%s, event_cnt=%s" % (track_max,event_track,event_number,self.event_count))
if int(event_track) < track_max:
event_data = self.get_event_data(int(event_track),int(event_number))
#print(" *** event_data = %s" % event_data)
# Generoidaan testitapauksen eventti halutulla kielellä
if self.ana_lang == "BML":
generate_BML_test_case_event(self.event_count,event_data,
self.time_start,self.time_ev_min,self.time_ev_max)
if event_data[0] == 1:
# Käydään tree-haaran kaikki eventit läpi
for target in event_data[5]:
track,number = target.split(".")
self.event_count += 1
self.search_next_event(track_max,track,number)
else:
print(" *** Not found event!")
return
else:
print(" *** Last track: %s, stops searching" % event_track)
return
def get_event_data(self,track,number):
x=track
y=number
time=""
attr=""
data=""
sources = []
targets = []
# Event ja sen tiedot
try:
#track2 = self.event_table[x,y].event_id.track
#number2 = self.event_table[x,y].event_id.id
time = self.event_table[x,y].time
attr = self.event_table[x,y].attr
data = self.event_table[x,y].data
for i in range(1,self.event_table[x,y].source_id_cnt+1):
str = "%s.%s" % (self.event_table[x,y].source_ids[i].track,
self.event_table[x,y].source_ids[i].id)
sources.append(str)
for i in range(1,self.event_table[x,y].target_id_cnt+1):
str = "%s.%s" % (self.event_table[x,y].target_ids[i].track,
self.event_table[x,y].target_ids[i].id)
targets.append(str)
#print("Found: x=%s ,y=%s" % (x,y))
ret = 1
except:
print("Not found: x=%s ,y=%s" % (x,y))
ret = 0
return [ret,time,attr,data,sources,targets]
def create_trace_pattern(self,mode,btre_min,btre_max,bmer_min,bmer_max,bctype,
tble_min,tble_max,tbnu_min,tbnu_max,tle_min,tle_max,tnu_min,tnu_max):
self.trace_blocks = {}
self.event_table = {}
print("create_event_table: %s" % mode)
print("tble_min=%s,tble_max=%s,tbnu_min=%s,tbnu_max=%s,tle_min=%s,tle_max=%s,tnu_min=%s,tnu_max=%s" %
(tble_min,tble_max,tbnu_min,tbnu_max,tle_min,tle_max,tnu_min,tnu_max))
print("btre_min=%s,btre_max=%s,bmer_min=%s,bmer_max=%s,bctype=%s" %
(btre_min,btre_max,bmer_min,bmer_max,bctype))
# Event-taulukko, jossa trackit x-akselilla ja (main)tracet y-akselilla
for x in range(tble_max):
print("\n ### Trace block x: %s / %s ----------------------------------- " % (x,tble_max-1))
for y in range(tbnu_max):
print("\n ### Trace block y: %s / %s ----------------------------------- " % (y,tbnu_max-1))
self.trace_blocks[x,y]=self.TraceBlock(x,y,tle_min,tle_max,tnu_min,tnu_max,
btre_min,btre_max,bmer_min,bmer_max,self.event_table,self.time_ev_max)
# Tehdään horisontaalisten blockien väliset event-liitokset event-taulukkoon ?
if tble_max > 1:
print("\nConnect horizontal traceblocks -- ")
for x in range(1,tble_max):
x_prev = x - 1
prev_outputs = []
curr_inputs = []
for y in range(tbnu_max):
print("x_prev=%s ,x=%s ,y=%s" % (x_prev,x,y))
if bctype == "All":
prev_outputs.extend(self.trace_blocks[x_prev,y].get_output_events("A"))
else:
prev_outputs.extend(self.trace_blocks[x_prev,y].get_output_events("M"))
curr_inputs.extend(self.trace_blocks[x,y].get_input_events())
curr_input_len = len(curr_inputs)
curr_input_cnt=0
for event in prev_outputs:
curr_track=curr_inputs[curr_input_cnt].event_id.track
curr_id=curr_inputs[curr_input_cnt].event_id.id
curr_time=curr_inputs[curr_input_cnt].time
print("Event: %s.%s, Time: %s --> %s.%s, Time: %s" % (
event.event_id.track,event.event_id.id,event.time,
curr_track,curr_id,curr_time))
# Kytketään eventit
self.event_table[curr_track,curr_id].add_source_id(event.event_id.track,event.event_id.id)
# Myös toisin päin (helpottaa testianalysointien generointia ?)
self.event_table[event.event_id.track,event.event_id.id].add_target_id(curr_track,curr_id)
curr_input_cnt+=1
if curr_input_cnt >= curr_input_len:
break
def print_trace_pattern(self,tble_max,tbnu_max,tle_max,tnu_max,btre_max,file_info):
# http://www.graphviz.org/content/switch
print("\nprint_trace_pattern -- ")
# Lasketaan trace patternin x,y maksimikoko
x_max = tble_max * tle_max
y_max = tbnu_max * tnu_max * btre_max
print (" x_max=%s ,y_max=%s" % (x_max,y_max))
# Graphviz-tiedosto, johon tulostetaan graafit (tracet) visuaalisesti
graphviz_file = "LogTestGen_%s_%s.gv" % (self.test_name,file_info)
print("write_file: %s" % graphviz_file)
fw = open(graphviz_file, 'w')
fw.write("digraph G {\n")
fw.write("\tgraph [center=1 rankdir=LR bgcolor=\"#E0E0E0\"]\n")
#fw.write("\tedge [dir=none]\n")
#fw.write("\tnode [width=0.1 height=0.1 label=\"\"]\n")
fw.write("\tnode [width=0.05 height=0.05]\n")
fw.write("\n")
# Käydään event-taulukon eventit läpi
for x in range(1,x_max):
fw.write("\n")
for y in range(y_max):
#Eventti
try:
track = self.event_table[x,y].event_id.track
number = self.event_table[x,y].event_id.id
attr = self.event_table[x,y].attr
except:
print("Not found: x=%s ,y=%s" % (x,y))
# Kirjoitetaan trace graphviz-tiedostoon
#fw.write("{%s} -> %s [node style=invis]\n" % (node_prevs,node))
continue
node="%s.%s" % (track,number)
# Eventin lähde-eventit
node_prevs = ""
for i in range(1,self.event_table[x,y].source_id_cnt+1):
track_prev = self.event_table[x,y].source_ids[i].track
number_prev = self.event_table[x,y].source_ids[i].id
attr_prev = self.event_table[x,y].attr
node_prevs += "%s.%s " % (track_prev,number_prev)
# Main- ja sivuhaarat eri väreillä
color="#000000"
if attr=="M" and attr_prev=="M":
color="#0000ff"
# Blokien väliset yhteys-tracet eri värillä
if (x % tle_max) == 0:
color="#ff0000"
# Kirjoitetaan trace graphviz-tiedostoon
#fw.write("{%s} -> %s\n" % (node_prevs,node))
fw.write("{ edge [color=\"%s\"]\n {%s} -> %s\n}\n" % (color,node_prevs,node))
fw.write("}\n")
fw.close()
def write_trace_pattern_logs(self,tble_max,tbnu_max,tle_max,tnu_max,btre_max,file_info):
print("\nwrite_trace_pattern_logs -- ")
# Lasketaan trace patternin x,y maksimikoko
x_max = tble_max * tle_max
y_max = tbnu_max * tnu_max * btre_max
#print (" x_max=%s ,y_max=%s" % (x_max,y_max))
fw={}
print("Inits logs and writes headers")
# Lokitiedostot ja niiden headerit. Myös yksi tiedosto, jossa kaikki.
for x in range(x_max+1):
# Alustetaan muut lokitiedostot
log_file_name = "Log_%s_%s_track_%s" % (self.test_name,file_info,x)
login_file_path_name = self.log_path + self.test_name + "/" + log_file_name + ".csv"
print("write_file: %s" % login_file_path_name)
self.make_dir_if_no_exist(login_file_path_name)
fw[x] = open(login_file_path_name, 'w')
header = "%s,%s,%s,%s,%s,%s\n" % ("TIME","ID","SOURCES","TARGETS","ATTR","DATA")
fw[x].write(header)
print("Writes data")
# Lokitiedostojen rivit
for x in range(x_max):
for y in range(y_max):
# Event ja sen tiedot
try:
track = self.event_table[x,y].event_id.track
number = self.event_table[x,y].event_id.id
time = self.event_table[x,y].time
attr = self.event_table[x,y].attr
data = self.event_table[x,y].data
sources = ""
for i in range(1,self.event_table[x,y].source_id_cnt+1):
str = "%s.%s;" % (self.event_table[x,y].source_ids[i].track,
self.event_table[x,y].source_ids[i].id)
sources += str
targets = ""
for i in range(1,self.event_table[x,y].target_id_cnt+1):
str = "%s.%s;" % (self.event_table[x,y].target_ids[i].track,
self.event_table[x,y].target_ids[i].id)
targets += str
# Lasketaan lokin aikaleima "aika-indeksista". 9.3.2018 Esa
log_timestamp = self.log_start_datetime + timedelta(seconds=time)
log_timestamp_str = log_timestamp.strftime("%Y-%m-%d %H:%M:%S")
line = "%s,%s.%s,%s,%s,%s,%s\n" % (log_timestamp_str,track,number,sources,targets,attr,data)
#print("log_timestamp_str: %s"%log_timestamp_str)
#line = "%s,%s.%s,%s,%s,%s,%s\n" % (time,track,number,sources,targets,attr,data)
fw[x].write(line)
# Kirjoitetaan myös kaikki yhteen tiedostoon. Lisätty 24.4.2018 Esa
# Huom! Ei ole täysin aikajärjestyksessä !!
fw[x_max].write(line)
except:
print("Not found: x=%s ,y=%s" % (x,y))
continue
fw[x].close()
fw[x_max].close()
def make_dir_if_no_exist(self,file_path_name):
# Python3
#os.makedirs(os.path.dirname(file_path_name), exist_ok=True)
# Python2
if not os.path.exists(os.path.dirname(file_path_name)):
try:
os.makedirs(os.path.dirname(file_path_name))
except OSError as exc:
if exc.errno != errno.EEXIST:
raise
class Branch_complexity_params:
def __init__(self,btre_min,btre_max,bmer_min,bmer_max,bctype):
print("Branch_complexity_params:")
self.btre_min=btre_min
self.btre_max=btre_max
self.bmer_min=bmer_min
self.bmer_max=bmer_max
self.bctype=bctype
class Trace_size_variation_params:
def __init__(self,tle_min,tle_max,tnu_min,tnu_max,tble_min,tble_max,tbnu_min,tbnu_max):
print("Trace_size_variation_params:")
self.tle_min=tle_min
self.tle_max=tle_max
self.tnu_min=tnu_min
self.tnu_max=tnu_max
self.tble_min=tble_min
self.tble_max=tble_max
self.tbnu_min=tbnu_min
self.tbnu_max=tbnu_max
class TestMatrix:
test_pattern_blocks={}
def __init__(self,matrix_x,matrix_y,branch_complexity_level_params,trace_size_variation_level_params):
print("TestMatrix")
# Luodaan matriisin elementit (patternit)
for x in range(int(matrix_x)):
btre_min = branch_complexity_level_params[x].btre_min
btre_max = branch_complexity_level_params[x].btre_max
bmer_min = branch_complexity_level_params[x].bmer_min
bmer_max = branch_complexity_level_params[x].bmer_max
bctype = branch_complexity_level_params[x].bctype
for y in range(int(matrix_y)):
tle_min = trace_size_variation_level_params[y].tle_min
tle_max = trace_size_variation_level_params[y].tle_max
tnu_min = trace_size_variation_level_params[y].tnu_min
tnu_max = trace_size_variation_level_params[y].tnu_max
tble_min = trace_size_variation_level_params[y].tble_min
tble_max = trace_size_variation_level_params[y].tble_max
tbnu_min = trace_size_variation_level_params[y].tbnu_min
tbnu_max = trace_size_variation_level_params[y].tbnu_max
self.test_pattern_blocks[x,y] = self.TracePatternBlocks(x,y,
tble_min,tble_max,tbnu_min,tbnu_max,tle_min,tle_max,tnu_min,tnu_max,
btre_min,btre_max,bmer_min,bmer_max,bctype)
class TracePatternBlocks:
def __init__(self,x,y,tble_min,tble_max,tbnu_min,tbnu_max,tle_min,tle_max,tnu_min,tnu_max,btre_min,btre_max,bmer_min,bmer_max,bctype):
print("TracePatternBlocks: x=%d, y=%d" % (x,y))
print("tble_min=%s,tble_max=%s,tbnu_min=%s,tbnu_max=%s,tle_min=%s,tle_max=%s,tnu_min=%s,tnu_max=%s" %
(tble_min,tble_max,tbnu_min,tbnu_max,tle_min,tle_max,tnu_min,tnu_max))
print("btre_min=%s,btre_max=%s,bmer_min=%s,bmer_max=%s,bctype=%s" %
(btre_min,btre_max,bmer_min,bmer_max,bctype))
self.b_parameters = (btre_min,btre_max,bmer_min,bmer_max,bctype)
self.t_parameters = (tble_min,tble_max,tbnu_min,tbnu_max,tle_min,tle_max,tnu_min,tnu_max)
class LogFiles:
def __init__(self,lver,lsnoe,lsnof,lcnoi,lcmis,lcinc,lsrc,lmeta):
print("LogFiles")
print("lver=%s,lsnoe=%s,lsnof=%s,lcnoi=%s,lcmis=%s,lcinc=%s,lsrc=%s,lmeta=%s" % (lver,lsnoe,lsnof,lcnoi,lcmis,lcinc,lsrc,lmeta))
self.parameters=(lver,lsnoe,lsnof,lcnoi,lcmis,lcinc,lsrc,lmeta)
class TraceBlock:
def __init__(self,tb_x,tb_y,tle_min,tle_max,tnu_min,tnu_max,btre_min,btre_max,bmer_min,bmer_max,
event_table,time_ev_max):
self.input_event_list=[]
self.output_event_list=[]
self.output_main_event_list=[]
self.time_ev_max=time_ev_max
# Generoidaan trace blockin eventit event-taulukko, jossa trackit x-akselilla ja (main)tracet y-akselilla
# Käydään blockin trackit läpi
for x in range(tle_max):
track = x + tb_x * tle_max
track_prev = track-1
# Jos ensimmäinen track
if x == 0:
print("\n First track")
# Käydään eventit läpi
for y in range(tnu_max):
number_y = y + tb_y * tnu_max
attr="-"
data="D%s-%s" % (track,number_y)
#timestamp = 1 + track*10 + number_y
timestamp = 1 + track*self.time_ev_max + number_y
event_table[track,number_y] = self.Event(track,number_y,attr,data,timestamp)
event_table[track,number_y].set_attr("M") # Main-haaran eventti
self.input_event_list.append(event_table[track,number_y])
# Jos toinen track
elif x == 1:
number_z=0 + tb_y * tnu_max * btre_max
print("\n Second track: %s" % number_z)
for y in range(tnu_max):
number_y = y + tb_y * tnu_max
# Käydään tree-haarat läpi
for z in range(btre_max):
attr="-"
data="D%s-%s" % (track,number_z)
#timestamp = 1 + track*10 + number_z
timestamp = 1 + track*self.time_ev_max + number_z
event_table[track,number_z] = self.Event(track,number_z,attr,data,timestamp)
event_table[track,number_z].add_source_id(track_prev,number_y)
# Myös toisin päin (helpottaa testianalysointien generointia ?)
event_table[track_prev,number_y].add_target_id(track,number_z)
# Eventit tyyppi
if (number_z % btre_max) == 0:
event_table[track,number_z].set_attr("M")
else:
event_table[track,number_z].set_attr("B")
number_z += 1
# Jos viimeinen track
elif x == int(tle_max)-1:
number_z=0 + tb_y * tnu_max * btre_max
# Lasketaan viimeisen trackin lohkon eventtien lkm
# (ei toimi aina ? jos lohkossa useita merged-haaroja ?)
last_track_max=tnu_max*btre_max - bmer_max + 1
#print(" last_track_max=%s" % last_track_max)
number_last = 0 + tb_y * last_track_max
print("\n Last track: %s, last:%s" % (number_z,number_last))
number_z_list = []
number_z_cnt = 0
for y in range(tnu_max):
main_trace=1
for z in range(btre_max):
if main_trace == 1:
print(" Main trace: number_z_cnt=%s, number_z=%s, bmer_max=%s" % (number_z_cnt,number_z,bmer_max))
number_z_list.append(number_z)
if number_z_cnt >= bmer_max-1:
print(" Merged events:")
data="D%s-%s" % (track,number_last)
#timestamp = 1 + track*10 + number_last
timestamp = 1 + track*self.time_ev_max + number_last
event_table[track,number_last] = self.Event(track,number_last,attr,data,timestamp)
event_table[track,number_last].set_attr("M")
self.output_event_list.append(event_table[track,number_last])
self.output_main_event_list.append(event_table[track,number_last])
for number_z_old in number_z_list:
event_table[track,number_last].add_source_id(track_prev,number_z_old)
# Myös toisin päin (helpottaa testianalysointien generointia ?)
event_table[track_prev,number_z_old].add_target_id(track,number_last)
number_last += 1
number_z_list = []
number_z_cnt = 0
else:
number_z_cnt += 1
else:
print(" No main trace ")
data="D%s-%s" % (track,number_last)
#timestamp = 1 + track*10 + number_last
timestamp = 1 + track*self.time_ev_max + number_last
event_table[track,number_last] = self.Event(track,number_last,attr,data,timestamp)
event_table[track,number_last].add_source_id(track_prev,number_z)
# Myös toisin päin (helpottaa testianalysointien generointia ?)
event_table[track_prev,number_z].add_target_id(track,number_last)
event_table[track,number_last].set_attr("B")
self.output_event_list.append(event_table[track,number_last])
number_last += 1
number_z += 1
main_trace=0
# Muuten väli-trackit
else:
number_z=0 + tb_y * tnu_max * btre_max
print("\n Inter tracks: %s" % number_z)
for y in range(tnu_max):
for z in range(btre_max):
attr="-"
data="D%s-%s" % (track,number_z)
#timestamp = 1 + track*10 + number_z
timestamp = 1 + track*self.time_ev_max + number_z
event_table[track,number_z] = self.Event(track,number_z,attr,data,timestamp)
event_table[track,number_z].add_source_id(track_prev,number_z)
# Myös toisin päin (helpottaa testianalysointien generointia ?)
event_table[track_prev,number_z].add_target_id(track,number_z)
event_table[track,number_z].set_attr(event_table[track_prev,number_z].get_attr())
number_z += 1
def get_output_events(self,type):
print("get_output_events: %s" % type)
if type == "M":
return self.output_main_event_list
else:
return self.output_event_list
def get_input_events(self):
print("get_input_events")
return self.input_event_list
class Event:
def __init__(self,track,number,attr,data,time):
print(" Event: Track: %s, Number: %s, Attr: %s, Data: %s, Time: %s" % (track,number,attr,data,time))
self.event_id=self.Id(track,number)
# Relationship by membership (aggregation ?)
# Tieto on eventin ulkopuolella ?
self.attr=attr
self.data=data
# Relationship by timing
self.time=time
self.source_ids={}
self.target_ids={}
self.source_id_cnt=0
self.target_id_cnt=0
# Relationship by cause ? (vain edelliset eventit, ei koko ketjua ?)
def add_source_id(self,track,number):
self.source_id_cnt+=1
self.source_ids[self.source_id_cnt]=self.Id(track,number)
print(" add sid: %s.%s for event: %s.%s" % (track,number,self.event_id.track,self.event_id.id))
# Tarviiko tätä, koska tämä ennustaa ? (tarvii ainakin testianalyysien generointiin ?)
def add_target_id(self,track,number):
self.target_id_cnt+=1
self.target_ids[self.target_id_cnt]=self.Id(track,number)
print(" add tid: %s.%s for event: %s.%s" % (track,number,self.event_id.track,self.event_id.id))
def set_attr(self,attr):
print(" set_attr: %s" % attr)
self.attr=attr
def get_attr(self):
#print("get_attr")
return self.attr
class Id:
def __init__(self,track,id):
#print(" ++ Event Id: Track: %s, Id: %s" % (track,id))
self.track=track
self.id=id
def set_test_model(args):
global test_model
# Luodaan testimalli
test_model = TestModel(args)
def generate_analyzing():
global test_model
# Generoidaan analyysit
test_model.generate_analyzing()
def generate_logs():
global test_model
# Generoidaan lokit
test_model.generate_logs()
#******************************************************************************
#
# FUNCTION: main
#
#******************************************************************************
def main():
print("version: %s" % g_version)
print("Python sys: %s\n" % sys.version)
#print("Modules : %s\n" % sys.modules.keys())
start_time = time.time()
parser = argparse.ArgumentParser()
parser.add_argument('-test_name','--test_name', dest='test_name', help='test_name')
parser.add_argument('-log_path','--log_path', dest='log_path', help='log_path')
parser.add_argument('-ana_path','--ana_path', dest='ana_path', help='ana_path')
parser.add_argument('-ana_lang','--ana_lang', dest='ana_lang', help='ana_lang')
# Branch complexity parametrit
# Oletusarvot
parser.add_argument('-b0_bmer_min','--b0_bmer_min', dest='b0_bmer_min', type=int, default=1, help='b0_bmer_min')
parser.add_argument('-b0_bmer_max','--b0_bmer_max', dest='b0_bmer_max', type=int, default=1, help='b0_bmer_max')
parser.add_argument('-b0_bctype','--b0_bctype', dest='b0_bctype', help='b0_bctype')
# Tason 1 parametrit
parser.add_argument('-b1_btre_min','--b1_btre_min', dest='b1_btre_min', type=int, help='b1_btre_min')
parser.add_argument('-b1_btre_max','--b1_btre_max', dest='b1_btre_max', type=int, help='b1_btre_max')
# Tason 2 parametrit
parser.add_argument('-b2_btre_min','--b2_btre_min', dest='b2_btre_min', type=int, help='b2_btre_min')
parser.add_argument('-b2_btre_max','--b2_btre_max', dest='b2_btre_max', type=int, help='b2_btre_max')
parser.add_argument('-b2_bctype','--b2_bctype', dest='b2_bctype', help='b2_bctype')
# Tason 3 parametrit
parser.add_argument('-b3_bmer_min','--b3_bmer_min', dest='b3_bmer_min', type=int, help='b3_bmer_min')
parser.add_argument('-b3_bmer_max','--b3_bmer_max', dest='b3_bmer_max', type=int, help='b3_bmer_max')
# Traces size variation parametrit
# Oletusarvot
parser.add_argument('-t0_tbnu_min','--t0_tbnu_min', dest='t0_tbnu_min', type=int, default=1, help='t0_tbnu_min')
parser.add_argument('-t0_tbnu_max','--t0_tbnu_max', dest='t0_tbnu_max', type=int, default=1, help='t0_tbnu_max')
parser.add_argument('-t0_tnu_min','--t0_tnu_min', dest='t0_tnu_min', type=int, default=3, help='t0_tnu_min')
parser.add_argument('-t0_tnu_max','--t0_tnu_max', dest='t0_tnu_max', type=int, default=3, help='t0_tnu_max')
# Tason 1 parametrit
parser.add_argument('-t1_tle_min','--t1_tle_min', dest='t1_tle_min', type=int, help='t1_tle_min')
parser.add_argument('-t1_tle_max','--t1_tle_max', dest='t1_tle_max', type=int, help='t1_tle_max')
parser.add_argument('-t1_tble_min','--t1_tble_min', dest='t1_tble_min', type=int, help='t1_tble_min')
parser.add_argument('-t1_tble_max','--t1_tble_max', dest='t1_tble_max', type=int, help='t1_tble_max')
# Tason 2 parametrit
parser.add_argument('-t2_tle_min','--t2_tle_min', dest='t2_tle_min', type=int, help='t2_tle_min')
parser.add_argument('-t2_tle_max','--t2_tle_max', dest='t2_tle_max', type=int, help='t2_tle_max')
parser.add_argument('-t2_tble_min','--t2_tble_min', dest='t2_tble_min', type=int, help='t2_tble_min')
parser.add_argument('-t2_tble_max','--t2_tble_max', dest='t2_tble_max', type=int, help='t2_tble_max')
# Tason 3 parametrit
parser.add_argument('-t3_tnu_min','--t3_tnu_min', dest='t3_tnu_min', type=int, help='t3_tnu_min')
parser.add_argument('-t3_tnu_max','--t3_tnu_max', dest='t3_tnu_max', type=int, help='t3_tnu_max')
parser.add_argument('-t3_tbnu_min','--t3_tbnu_min', dest='t3_tbnu_min', type=int, help='t3_tbnu_min')
parser.add_argument('-t3_tbnu_max','--t3_tbnu_max', dest='t3_tbnu_max', type=int, help='t3_tbnu_max')
# Lokitiedosto parametrit
parser.add_argument('-lver','--lver', dest='lver', type=int, help='lver')
parser.add_argument('-lsnoe','--lsnoe', dest='lsnoe', help='lsnoe')
parser.add_argument('-lsnof','--lsnof', dest='lsnof', type=int, help='lsnof')
parser.add_argument('-lcnoi','--lcnoi', dest='lcnoi', type=int, help='lcnoi')
parser.add_argument('-lcmis','--lcmis', dest='lcmis', type=int, help='lcmis')
parser.add_argument('-lcinc','--lcinc', dest='lcinc', type=int, help='lcinc')
parser.add_argument('-lsrc','--lsrc', dest='lsrc', help='lsrc')
parser.add_argument('-lmeta','--lmeta', dest='lmeta', help='lmeta')
# Aika parameterit
parser.add_argument('-time_start','--time_start', dest='time_start', type=int, help='time_start')
parser.add_argument('-time_ev_min','--time_ev_min', dest='time_ev_min', type=int, help='time_ev_min')
parser.add_argument('-time_ev_max','--time_ev_max', dest='time_ev_max', type=int, help='time_ev_max')
# Muut
parser.add_argument('-gui_enable','--gui_enable', dest='gui_enable', type=int, help='gui_enable')
args = parser.parse_args()
print("test_name : %s " % args.test_name)
print("log_path : %s " % args.log_path)
print("ana_path : %s " % args.ana_path)
print("ana_lang : %s " % args.ana_lang)
print("\nBranch complexity level ---" )
print("Default values:" )
print("b0_bmer_min : %s" % args.b0_bmer_min)
print("b0_bmer_max : %s" % args.b0_bmer_max)
print("b0_bctype : %s" % args.b0_bctype)
print("Level 1:" )
print("b1_btre_min : %s" % args.b1_btre_min)
print("b1_btre_max : %s" % args.b1_btre_max)
print("Level 2:" )
print("b2_btre_min : %s" % args.b2_btre_min)
print("b2_btre_max : %s" % args.b2_btre_max)
print("b2_bctype : %s" % args.b2_bctype)
print("Level 3:" )
print("b3_bmer_min : %s" % args.b3_bmer_min)
print("b3_bmer_max : %s" % args.b3_bmer_max)
print("\nTraces size variation level ---" )
print("Default values:" )
print("t0_tbnu_min : %s" % args.t0_tbnu_min)
print("t0_tbnu_max : %s" % args.t0_tbnu_max)
print("t0_tnu_min : %s" % args.t0_tnu_min)
print("t0_tnu_max : %s" % args.t0_tnu_max)
print("Level 1:" )
print("t1_tle_min : %s" % args.t1_tle_min)
print("t1_tle_max : %s" % args.t1_tle_max)
print("t1_tble_min : %s" % args.t1_tble_min)
print("t1_tble_max : %s" % args.t1_tble_max)
print("Level 2:" )
print("t2_tle_min : %s" % args.t2_tle_min)
print("t2_tle_max : %s" % args.t2_tle_max)
print("t2_tble_min : %s" % args.t2_tble_min)
print("t2_tble_max : %s" % args.t2_tble_max)
print("Level 3:" )
print("t3_tnu_min : %s" % args.t3_tnu_min)
print("t3_tnu_max : %s" % args.t3_tnu_max)
print("t3_tbnu_min : %s" % args.t3_tbnu_min)
print("t3_tbnu_max : %s" % args.t3_tbnu_max)
print("\nLog files parameters ---" )
print("lver : %s" % args.lver)
print("lsnoe : %s" % args.lsnoe)
print("lsnof : %s" % args.lsnof)
print("lcnoi : %s" % args.lcnoi)
print("lcmis : %s" % args.lcmis)
print("lcinc : %s" % args.lcinc)
print("lsrc : %s" % args.lsrc)
print("lmeta : %s" % args.lmeta)
print("\nTime parameters ---" )
print("time_start : %s" % args.time_start)
print("time_ev_min : %s" % args.time_ev_min)
print("time_ev_max : %s" % args.time_ev_max)
print("\nOther parameters ---" )
print("gui_enable : %s" % args.gui_enable)
config = configparser.ConfigParser()
config.read('LogTestGen.ini')
testarea_x = int(config['GEOMETRY']['Testarea_x'])
testarea_y = int(config['GEOMETRY']['Testarea_y'])
logarea_x = int(config['GEOMETRY']['Logarea_x'])
logarea_y = int(config['GEOMETRY']['Logarea_y'])
print("Testarea_x = %s" % testarea_x)
print("Testarea_y = %s" % testarea_y)
print("Logarea_x = %s" % logarea_x)
print("Logarea_y = %s" % logarea_y)
# K�ynnistet��n tarvittaessa GUI
if args.gui_enable == 1:
print("GUI enabled\n")
area_x,area_y = args.area_size.split("x")
bstop_size_x,bstop_size_y = args.busstop_size.split("x")
zoom_factor = args.gui_zoom
area_x_int = int(area_x)
area_y_int = int(area_y)
bstop_size_x_int = int(bstop_size_x)
bstop_size_y_int = int(bstop_size_y)
x_width = area_x_int + bstop_size_x_int
x_width_new = int(x_width * zoom_factor)
y_height = area_y_int + bstop_size_y_int
y_height_new = int(y_height * zoom_factor)
x_offset = int(bstop_size_x_int / 2)
y_offset = int(bstop_size_y_int / 2)
x_offset_new = int(x_offset * zoom_factor)
y_offset_new = int(y_offset * zoom_factor)
print("zoom_factor = %s" % zoom_factor)
print("x_width = %s" % x_width)
print("y_height = %s" % y_height)
print("x_width_new = %s" % x_width_new)
print("y_height_new = %s" % y_height_new)
print("x_offset = %s" % x_offset)
print("y_offset = %s" % y_offset)
print("x_offset_new = %s" % x_offset_new)
print("y_offset_new = %s" % y_offset_new)
app = QApplication(sys.argv)
#gui = GUI_TestArea(args,"Testarea",testarea_x,testarea_y,x_width_new,y_height_new,
# x_offset_new,y_offset_new,zoom_factor,generate_testarea)
#gui2 = GUI_LogArea(args,"Logarea",logarea_x,logarea_y,700,1050,0,0,1.0,generate_bus_run_logs)
#gui.show()
#gui2.show()
sys.exit(app.exec_())
else:
self_value = ""
set_test_model(args)
generate_analyzing()
generate_logs()
print("\n Total execution time: %.3f seconds" % (time.time() - start_time))
# Jos GUI k�yt�ss� lopetetaan vasta enterin painamisen j�lkeen
if args.gui_enable == 1:
user_input = input("Press enter to stop")
if __name__ == '__main__':
main()
| gpl-3.0 |
dushu1203/chromium.src | remoting/tools/build/remoting_copy_locales.py | 142 | 5150 | #!/usr/bin/env python
# Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Helper script to repack paks for a list of locales.
Gyp doesn't have any built-in looping capability, so this just provides a way to
loop over a list of locales when repacking pak files, thus avoiding a
proliferation of mostly duplicate, cut-n-paste gyp actions.
"""
import optparse
import os
import sys
sys.path.append(os.path.join(os.path.dirname(__file__), '..', '..', '..',
'tools', 'grit'))
from grit.format import data_pack
# Some build paths defined by gyp.
GRIT_DIR = None
INT_DIR = None
# The target platform. If it is not defined, sys.platform will be used.
OS = None
# Extra input files.
EXTRA_INPUT_FILES = []
class Usage(Exception):
def __init__(self, msg):
self.msg = msg
def calc_output(locale):
"""Determine the file that will be generated for the given locale."""
#e.g. '<(INTERMEDIATE_DIR)/remoting_locales/da.pak',
if OS == 'mac' or OS == 'ios':
# For Cocoa to find the locale at runtime, it needs to use '_' instead
# of '-' (http://crbug.com/20441).
return os.path.join(INT_DIR, 'remoting', 'resources',
'%s.lproj' % locale.replace('-', '_'), 'locale.pak')
else:
return os.path.join(INT_DIR, 'remoting_locales', locale + '.pak')
def calc_inputs(locale):
"""Determine the files that need processing for the given locale."""
inputs = []
#e.g. '<(grit_out_dir)/remoting/resources/da.pak'
inputs.append(os.path.join(GRIT_DIR, 'remoting/resources/%s.pak' % locale))
# Add any extra input files.
for extra_file in EXTRA_INPUT_FILES:
inputs.append('%s_%s.pak' % (extra_file, locale))
return inputs
def list_outputs(locales):
"""Returns the names of files that will be generated for the given locales.
This is to provide gyp the list of output files, so build targets can
properly track what needs to be built.
"""
outputs = []
for locale in locales:
outputs.append(calc_output(locale))
# Quote each element so filename spaces don't mess up gyp's attempt to parse
# it into a list.
return " ".join(['"%s"' % x for x in outputs])
def list_inputs(locales):
"""Returns the names of files that will be processed for the given locales.
This is to provide gyp the list of input files, so build targets can properly
track their prerequisites.
"""
inputs = []
for locale in locales:
inputs += calc_inputs(locale)
# Quote each element so filename spaces don't mess up gyp's attempt to parse
# it into a list.
return " ".join(['"%s"' % x for x in inputs])
def repack_locales(locales):
""" Loop over and repack the given locales."""
for locale in locales:
inputs = calc_inputs(locale)
output = calc_output(locale)
data_pack.DataPack.RePack(output, inputs)
def DoMain(argv):
global GRIT_DIR
global INT_DIR
global OS
global EXTRA_INPUT_FILES
parser = optparse.OptionParser("usage: %prog [options] locales")
parser.add_option("-i", action="store_true", dest="inputs", default=False,
help="Print the expected input file list, then exit.")
parser.add_option("-o", action="store_true", dest="outputs", default=False,
help="Print the expected output file list, then exit.")
parser.add_option("-g", action="store", dest="grit_dir",
help="GRIT build files output directory.")
parser.add_option("-x", action="store", dest="int_dir",
help="Intermediate build files output directory.")
parser.add_option("-e", action="append", dest="extra_input", default=[],
help="Full path to an extra input pak file without the\
locale suffix and \".pak\" extension.")
parser.add_option("-p", action="store", dest="os",
help="The target OS. (e.g. mac, linux, win, etc.)")
options, locales = parser.parse_args(argv)
if not locales:
parser.error('Please specificy at least one locale to process.\n')
print_inputs = options.inputs
print_outputs = options.outputs
GRIT_DIR = options.grit_dir
INT_DIR = options.int_dir
EXTRA_INPUT_FILES = options.extra_input
OS = options.os
if not OS:
if sys.platform == 'darwin':
OS = 'mac'
elif sys.platform.startswith('linux'):
OS = 'linux'
elif sys.platform in ('cygwin', 'win32'):
OS = 'win'
else:
OS = sys.platform
if print_inputs and print_outputs:
parser.error('Please specify only one of "-i" or "-o".\n')
if print_inputs and not GRIT_DIR:
parser.error('Please specify "-g".\n')
if print_outputs and not INT_DIR:
parser.error('Please specify "-x".\n')
if not (print_inputs or print_outputs or (GRIT_DIR and INT_DIR)):
parser.error('Please specify both "-g" and "-x".\n')
if print_inputs:
return list_inputs(locales)
if print_outputs:
return list_outputs(locales)
return repack_locales(locales)
if __name__ == '__main__':
results = DoMain(sys.argv[1:])
if results:
print results
| bsd-3-clause |
ponty/arduino-sketchbook | libacoll/xtests/test_examples.py | 1 | 2094 | from nose.tools import eq_, ok_
from path import path
from pyavrutils import support, arduino
from pyavrutils.arduino import Arduino, ArduinoCompileError
import logging
root = path(__file__).parent.parent
examples = support.find_examples(root)
fails = [
('PWM.pde', 'atmega8'),
]
# def check_build(ex, hwpack, board):
# cc = Arduino(hwpack=hwpack, board=board)
# cc.extra_lib = root
# print cc.hwpack, cc.board, ex
# print (str(path(ex).name), cc.mcu_compiler())
# if (str(path(ex).name), cc.mcu_compiler()) in fails:
# class Dummy(TestCase):
# def runTest(self):
# pass
# Dummy().assertRaises(ArduinoCompileError, cc.build, cc, ex)
# else:
# cc.build(ex)
# assert cc.size().ok
def check_build(ex, hwpack, board):
cc = Arduino(hwpack=hwpack, board=board)
# cc.extra_lib = root
print cc.hwpack, cc.board, ex
cc.build(ex)
assert cc.size().ok
def generate(func, params, labels=None):
if not labels:
labels = params
if not hasattr(func, '_index'):
func._index = 0
func._index += 1
cmd = '''def test_{func._index}_{labels}(): {func.__name__}({params})'''.format(func=func,
params=','.join(
['"%s"' % x for x in params]),
labels='_'.join(labels))
logging.debug('cmd:' + cmd)
return cmd
# def test_build():
# for ex in examples:
# for cc in arduino.targets():
# cc.extra_lib = root
# if cc.hwpack=='arduino':
# yield check_build, ex, cc
for ex in examples:
for cc in arduino.targets():
if cc.hwpack == 'arduino':
if (str(path(ex).name), cc.mcu_compiler()) not in fails:
exec generate(check_build,
[ex, cc.hwpack, cc.board],
[ex.namebase, cc.hwpack, cc.board])
| bsd-2-clause |
rosmo/boto | boto/ec2/autoscale/limits.py | 152 | 1958 | # Copyright (c) 2013 Amazon.com, Inc. or its affiliates. All Rights Reserved
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
class AccountLimits(object):
def __init__(self, connection=None):
self.connection = connection
self.max_autoscaling_groups = None
self.max_launch_configurations = None
def __repr__(self):
return 'AccountLimits: [%s, %s]' % (self.max_autoscaling_groups,
self.max_launch_configurations)
def startElement(self, name, attrs, connection):
return None
def endElement(self, name, value, connection):
if name == 'RequestId':
self.request_id = value
elif name == 'MaxNumberOfAutoScalingGroups':
self.max_autoscaling_groups = int(value)
elif name == 'MaxNumberOfLaunchConfigurations':
self.max_launch_configurations = int(value)
else:
setattr(self, name, value)
| mit |
dvliman/jaikuengine | .google_appengine/lib/django-1.2/tests/regressiontests/one_to_one_regress/models.py | 92 | 1180 | from django.db import models
class Place(models.Model):
name = models.CharField(max_length=50)
address = models.CharField(max_length=80)
def __unicode__(self):
return u"%s the place" % self.name
class Restaurant(models.Model):
place = models.OneToOneField(Place)
serves_hot_dogs = models.BooleanField()
serves_pizza = models.BooleanField()
def __unicode__(self):
return u"%s the restaurant" % self.place.name
class Bar(models.Model):
place = models.OneToOneField(Place)
serves_cocktails = models.BooleanField()
def __unicode__(self):
return u"%s the bar" % self.place.name
class UndergroundBar(models.Model):
place = models.OneToOneField(Place, null=True)
serves_cocktails = models.BooleanField()
class Favorites(models.Model):
name = models.CharField(max_length = 50)
restaurants = models.ManyToManyField(Restaurant)
def __unicode__(self):
return u"Favorites for %s" % self.name
class Target(models.Model):
pass
class Pointer(models.Model):
other = models.OneToOneField(Target, primary_key=True)
class Pointer2(models.Model):
other = models.OneToOneField(Target)
| apache-2.0 |
tdautc19841202/SmartQQ-for-Raspberry-Pi | HttpClient.py | 5 | 1871 | import cookielib, urllib, urllib2, socket
class HttpClient:
__cookie = cookielib.CookieJar()
__req = urllib2.build_opener(urllib2.HTTPCookieProcessor(__cookie))
__req.addheaders = [
('Accept', 'application/javascript, */*;q=0.8'),
('User-Agent', 'Mozilla/5.0 (compatible; MSIE 9.0; Windows NT 6.1; WOW64; Trident/5.0)')
]
urllib2.install_opener(__req)
def Get(self, url, refer=None):
try:
req = urllib2.Request(url)
if not (refer is None):
req.add_header('Referer', refer)
return urllib2.urlopen(req, timeout=120).read()
except urllib2.HTTPError, e:
return e.read()
except socket.timeout, e:
return ''
except socket.error, e:
return ''
def Post(self, url, data, refer=None):
try:
req = urllib2.Request(url, urllib.urlencode(data))
if not (refer is None):
req.add_header('Referer', refer)
return urllib2.urlopen(req, timeout=120).read()
except urllib2.HTTPError, e:
return e.read()
except socket.timeout, e:
return ''
except socket.error, e:
return ''
def Download(self, url, file):
output = open(file, 'wb')
output.write(urllib2.urlopen(url).read())
output.close()
# def urlencode(self, data):
# return urllib.quote(data)
def getCookie(self, key):
for c in self.__cookie:
if c.name == key:
return c.value
return ''
def setCookie(self, key, val, domain):
ck = cookielib.Cookie(version=0, name=key, value=val, port=None, port_specified=False, domain=domain, domain_specified=False, domain_initial_dot=False, path='/', path_specified=True, secure=False, expires=None, discard=True, comment=None, comment_url=None, rest={'HttpOnly': None}, rfc2109=False)
self.__cookie.set_cookie(ck)
#self.__cookie.clear() clean cookie
# vim : tabstop=2 shiftwidth=2 softtabstop=2 expandtab | gpl-3.0 |
DavidLi2010/ramcloud | scripts/common.py | 7 | 10155 | #!/usr/bin/env python
# Copyright (c) 2010 Stanford University
#
# Permission to use, copy, modify, and distribute this software for any
# purpose with or without fee is hereby granted, provided that the above
# copyright notice and this permission notice appear in all copies.
#
# THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR(S) DISCLAIM ALL WARRANTIES
# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL AUTHORS BE LIABLE FOR
# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
# OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
"""Misc utilities and variables for Python scripts."""
import contextlib
import os
import random
import re
import shlex
import signal
import subprocess
import sys
import time
__all__ = ['sh', 'captureSh', 'Sandbox', 'getDumpstr']
def sh(command, bg=False, **kwargs):
"""Execute a local command."""
kwargs['shell'] = True
if bg:
return subprocess.Popen(command, **kwargs)
else:
subprocess.check_call(command, **kwargs)
def captureSh(command, **kwargs):
"""Execute a local command and capture its output."""
kwargs['shell'] = True
kwargs['stdout'] = subprocess.PIPE
p = subprocess.Popen(command, **kwargs)
output = p.communicate()[0]
if p.returncode:
raise subprocess.CalledProcessError(p.returncode, command)
if output.count('\n') and output[-1] == '\n':
return output[:-1]
else:
return output
class Sandbox(object):
"""A context manager for launching and cleaning up remote processes."""
class Process(object):
def __init__(self, host, command, kwargs, sonce, proc,
ignoreFailures, kill_on_exit, server_process):
self.host = host
self.command = command
self.kwargs = kwargs
self.sonce = sonce
self.proc = proc
self.ignoreFailures = ignoreFailures
self.kill_on_exit = kill_on_exit
self.server_process = server_process
def __repr__(self):
return repr(self.__dict__)
def __init__(self, cleanup=True):
# cleanup indicates whether this this Sandbox needs to clean up
# processes that are currently running as part of this run of
# clusterperf or not.
self.processes = []
self.cleanup = cleanup
def rsh(self, host, command, locator=None, ignoreFailures=False,
is_server=False, kill_on_exit=True, bg=False, **kwargs):
"""Execute a remote command.
@return: If bg is True then a Process corresponding to the command
which was run, otherwise None.
"""
if bg:
sonce = ''.join([chr(random.choice(range(ord('a'), ord('z'))))
for c in range(8)])
server_process = is_server
if is_server:
# Assumes scripts are at same path on remote machine
sh_command = ['ssh', host,
'%s/serverexec' % scripts_path,
host, os.getcwd(), "'%s'" % locator,
"'%s'" % command]
else:
# Assumes scripts are at same path on remote machine
sh_command = ['ssh', host,
'%s/regexec' % scripts_path, sonce,
os.getcwd(), "'%s'" % command]
p = subprocess.Popen(sh_command, **kwargs)
process = self.Process(host, command, kwargs, sonce,
p, ignoreFailures, kill_on_exit,
server_process)
self.processes.append(process)
return process
else:
sh_command = ['ssh', host,
'%s/remoteexec.py' % scripts_path,
"'%s'" % command, os.getcwd()]
subprocess.check_call(sh_command, **kwargs)
return None
def kill(self, process):
"""Kill a remote process started with rsh().
@param process: A Process corresponding to the command to kill which
was created with rsh().
"""
killer = subprocess.Popen(['ssh', process.host,
'%s/killpid' % scripts_path,
process.sonce])
killer.wait()
try:
process.proc.kill()
except:
pass
process.proc.wait()
self.processes.remove(process)
def restart(self, process):
self.kill(process)
self.rsh(process.host, process.command, process.ignoreFailures, True, **process.kwargs)
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, exc_tb):
with delayedInterrupts():
killers = []
for p in self.processes:
# If this sandbox does not require a cleanup of its processes
# now, don't do it. Currently only servers are started in the
# context of such objects. They will be reaped later by some
# object in whose context, this object was created.
if not self.cleanup:
to_kill = '0'
killers.append(subprocess.Popen(['ssh', p.host,
'%s/killserver' % scripts_path,
to_kill, os.getcwd(), p.host]))
# invoke killpid only for processes that are not servers.
# server processes will be killed by killserver outside this
# loop below.
elif not p.server_process:
# Assumes scripts are at same path on remote machine
killers.append(subprocess.Popen(['ssh', p.host,
'%s/killpid' % scripts_path,
p.sonce]))
if self.cleanup:
chost = hosts[0] # coordinator
killers.append(subprocess.Popen(['ssh', chost[0],
'%s/killcoord' % scripts_path]))
path = '%s/logs/shm' % os.getcwd()
files = ""
try:
files = sorted([f for f in os.listdir(path)
if os.path.isfile( os.path.join(path, f) )])
except:
pass
# kill all the servers that are running
for mhost in files:
if mhost != 'README' and not mhost.startswith("cluster"):
to_kill = '1'
killers.append(subprocess.Popen(['ssh', mhost.split('_')[0],
'%s/killserver' % scripts_path,
to_kill, os.getcwd(), mhost]))
try:
os.remove('%s/logs/shm/README' % os.getcwd())
# remove the file that represents the name of the cluster.
# This is used so that new backups can be told whether
# or not to read data from their disks
for fname in os.listdir(path):
if fname.startswith("cluster"):
os.remove(os.path.join(path, fname))
except:
pass
for killer in killers:
killer.wait()
# a half-assed attempt to clean up zombies
for p in self.processes:
try:
p.proc.kill()
except:
pass
p.proc.wait()
def checkFailures(self):
"""Raise exception if any process has exited with a non-zero status."""
for p in self.processes:
if (p.ignoreFailures == False):
rc = p.proc.poll()
if rc is not None and rc != 0:
# raise subprocess.CalledProcessError(rc, p.command)
# don't raise exception because the test may involve intentional
# crashing of the coordinator or master/backup servers
pass
@contextlib.contextmanager
def delayedInterrupts():
"""Block SIGINT and SIGTERM temporarily."""
quit = []
def delay(sig, frame):
if quit:
print ('Ctrl-C: Quitting during delayed interrupts section ' +
'because user insisted')
raise KeyboardInterrupt
else:
quit.append((sig, frame))
sigs = [signal.SIGINT, signal.SIGTERM]
prevHandlers = [signal.signal(sig, delay)
for sig in sigs]
try:
yield None
finally:
for sig, handler in zip(sigs, prevHandlers):
signal.signal(sig, handler)
if quit:
raise KeyboardInterrupt(
'Signal received while in delayed interrupts section')
# This stuff has to be here, rather than at the beginning of the file,
# because config needs some of the functions defined above.
from config import *
import config
__all__.extend(config.__all__)
def getDumpstr():
"""Returns an instance of Dumpstr for uploading reports.
You should set dumpstr_base_url in your config file if you want to use this
to upload reports. See the dumpstr README for more info. You might be able
to find that README on the web at
https://github.com/ongardie/dumpstr/blob/master/README.rst
"""
from dumpstr import Dumpstr
try:
url = config.dumpstr_base_url
except AttributeError:
d = Dumpstr("")
def error(*args, **kwargs):
raise Exception(
"You did not set your dumpstr_base_url "
"in localconfig.py, so you can't upload reports.")
d.upload_report = error
return d
else:
return Dumpstr(url)
| isc |
szopu/datadiffs | datadiffs/freezing.py | 1 | 1812 | from collections import Mapping
class frozendict(Mapping):
def __init__(self, input):
if not isinstance(input, dict):
raise TypeError('{0} is not type of dict'.format(type(input)))
self.__dict = input.copy()
def __getitem__(self, key):
return self.__dict[key]
def __len__(self):
return len(self.__dict)
def __iter__(self):
return iter(self.__dict)
def __contains__(self, item):
return item in self.__dict
def __hash__(self):
h = 0
for item in self.__dict.items():
h += hash(item)
return h
def __repr__(self):
return '{0}({1!r})'.format(self.__class__.__name__, self.__dict)
def items(self):
return self.__dict.items()
def keys(self):
return self.__dict.keys()
def values(self):
return self.__dict.values()
def get(self, key, default=None):
return self.__dict.get(key, default)
def put(self, key, value):
copy = self.__dict.copy()
copy[key] = value
return self.__class__(copy)
def delete(self, key):
copy = self.__dict.copy()
del copy[key]
return self.__class__(copy)
def copy(self):
return self.__class__(self.__dict.copy())
def freeze_data(data):
if isinstance(data, (dict, frozendict)):
return frozendict({k: freeze_data(v) for k, v in data.items()})
elif isinstance(data, (tuple, list)):
return tuple(freeze_data(el) for el in data)
else:
return data
def unfreeze_data(data):
if isinstance(data, (dict, frozendict)):
return {k: unfreeze_data(v) for k, v in data.items()}
elif isinstance(data, (tuple, list)):
return [unfreeze_data(el) for el in data]
else:
return data
| mit |
jorik041/phantomjs | src/qt/qtwebkit/Tools/Scripts/webkitpy/port/config.py | 126 | 6136 | # Copyright (C) 2010 Google Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Wrapper objects for WebKit-specific utility routines."""
# FIXME: This file needs to be unified with common/config/ports.py .
import logging
from webkitpy.common import webkit_finder
_log = logging.getLogger(__name__)
#
# FIXME: This is used to record if we've already hit the filesystem to look
# for a default configuration. We cache this to speed up the unit tests,
# but this can be reset with clear_cached_configuration(). This should be
# replaced with us consistently using MockConfigs() for tests that don't
# hit the filesystem at all and provide a reliable value.
#
_have_determined_configuration = False
_configuration = "Release"
def clear_cached_configuration():
global _have_determined_configuration, _configuration
_have_determined_configuration = False
_configuration = "Release"
class Config(object):
_FLAGS_FROM_CONFIGURATIONS = {
"Debug": "--debug",
"Release": "--release",
}
def __init__(self, executive, filesystem, port_implementation=None):
self._executive = executive
self._filesystem = filesystem
self._webkit_finder = webkit_finder.WebKitFinder(self._filesystem)
self._default_configuration = None
self._build_directories = {}
self._port_implementation = port_implementation
def build_directory(self, configuration):
"""Returns the path to the build directory for the configuration."""
if configuration:
flags = ["--configuration", self.flag_for_configuration(configuration)]
else:
configuration = ""
flags = []
if self._port_implementation:
flags.append('--' + self._port_implementation)
if not self._build_directories.get(configuration):
args = ["perl", self._webkit_finder.path_to_script("webkit-build-directory")] + flags
output = self._executive.run_command(args, cwd=self._webkit_finder.webkit_base(), return_stderr=False).rstrip()
parts = output.split("\n")
self._build_directories[configuration] = parts[0]
if len(parts) == 2:
default_configuration = parts[1][len(parts[0]):]
if default_configuration.startswith("/"):
default_configuration = default_configuration[1:]
self._build_directories[default_configuration] = parts[1]
return self._build_directories[configuration]
def flag_for_configuration(self, configuration):
return self._FLAGS_FROM_CONFIGURATIONS[configuration]
def default_configuration(self):
"""Returns the default configuration for the user.
Returns the value set by 'set-webkit-configuration', or "Release"
if that has not been set. This mirrors the logic in webkitdirs.pm."""
if not self._default_configuration:
self._default_configuration = self._determine_configuration()
if not self._default_configuration:
self._default_configuration = 'Release'
if self._default_configuration not in self._FLAGS_FROM_CONFIGURATIONS:
_log.warn("Configuration \"%s\" is not a recognized value.\n" % self._default_configuration)
_log.warn("Scripts may fail. See 'set-webkit-configuration --help'.")
return self._default_configuration
def _determine_configuration(self):
# This mirrors the logic in webkitdirs.pm:determineConfiguration().
#
# FIXME: See the comment at the top of the file regarding unit tests
# and our use of global mutable static variables.
# FIXME: We should just @memoize this method and then this will only
# be read once per object lifetime (which should be sufficiently fast).
global _have_determined_configuration, _configuration
if not _have_determined_configuration:
contents = self._read_configuration()
if not contents:
contents = "Release"
if contents == "Deployment":
contents = "Release"
if contents == "Development":
contents = "Debug"
_configuration = contents
_have_determined_configuration = True
return _configuration
def _read_configuration(self):
try:
configuration_path = self._filesystem.join(self.build_directory(None), "Configuration")
if not self._filesystem.exists(configuration_path):
return None
except:
return None
return self._filesystem.read_text_file(configuration_path).rstrip()
| bsd-3-clause |
mne-tools/mne-tools.github.io | 0.13/_downloads/plot_stats_cluster_methods.py | 6 | 8607 | # doc:slow-example
"""
.. _tut_stats_cluster_methods:
======================================================
Permutation t-test on toy data with spatial clustering
======================================================
Following the illustrative example of Ridgway et al. 2012,
this demonstrates some basic ideas behind both the "hat"
variance adjustment method, as well as threshold-free
cluster enhancement (TFCE) methods in mne-python.
This toy dataset consists of a 40 x 40 square with a "signal"
present in the center (at pixel [20, 20]) with white noise
added and a 5-pixel-SD normal smoothing kernel applied.
For more information, see:
Ridgway et al. 2012, "The problem of low variance voxels in
statistical parametric mapping; a new hat avoids a 'haircut'",
NeuroImage. 2012 Feb 1;59(3):2131-41.
Smith and Nichols 2009, "Threshold-free cluster enhancement:
addressing problems of smoothing, threshold dependence, and
localisation in cluster inference", NeuroImage 44 (2009) 83-98.
In the top row plot the T statistic over space, peaking toward the
center. Note that it has peaky edges. Second, with the "hat" variance
correction/regularization, the peak becomes correctly centered. Third,
the TFCE approach also corrects for these edge artifacts. Fourth, the
the two methods combined provide a tighter estimate, for better or
worse.
Now considering multiple-comparisons corrected statistics on these
variables, note that a non-cluster test (e.g., FDR or Bonferroni) would
mis-localize the peak due to sharpness in the T statistic driven by
low-variance pixels toward the edge of the plateau. Standard clustering
(first plot in the second row) identifies the correct region, but the
whole area must be declared significant, so no peak analysis can be done.
Also, the peak is broad. In this method, all significances are
family-wise error rate (FWER) corrected, and the method is
non-parametric so assumptions of Gaussian data distributions (which do
actually hold for this example) don't need to be satisfied. Adding the
"hat" technique tightens the estimate of significant activity (second
plot). The TFCE approach (third plot) allows analyzing each significant
point independently, but still has a broadened estimate. Note that
this is also FWER corrected. Finally, combining the TFCE and "hat"
methods tightens the area declared significant (again FWER corrected),
and allows for evaluation of each point independently instead of as
a single, broad cluster.
Note that this example does quite a bit of processing, so even on a
fast machine it can take a few minutes to complete.
"""
# Authors: Eric Larson <larson.eric.d@gmail.com>
# License: BSD (3-clause)
import numpy as np
from scipy import stats
from functools import partial
import matplotlib.pyplot as plt
# this changes hidden MPL vars:
from mpl_toolkits.mplot3d import Axes3D # noqa
from mne.stats import (spatio_temporal_cluster_1samp_test,
bonferroni_correction, ttest_1samp_no_p)
try:
from sklearn.feature_extraction.image import grid_to_graph
except ImportError:
from scikits.learn.feature_extraction.image import grid_to_graph
print(__doc__)
###############################################################################
# Set parameters
# --------------
width = 40
n_subjects = 10
signal_mean = 100
signal_sd = 100
noise_sd = 0.01
gaussian_sd = 5
sigma = 1e-3 # sigma for the "hat" method
threshold = -stats.distributions.t.ppf(0.05, n_subjects - 1)
threshold_tfce = dict(start=0, step=0.2)
n_permutations = 1024 # number of clustering permutations (1024 for exact)
###############################################################################
# Construct simulated data
# ------------------------
#
# Make the connectivity matrix just next-neighbor spatially
n_src = width * width
connectivity = grid_to_graph(width, width)
# For each "subject", make a smoothed noisy signal with a centered peak
rng = np.random.RandomState(42)
X = noise_sd * rng.randn(n_subjects, width, width)
# Add a signal at the dead center
X[:, width // 2, width // 2] = signal_mean + rng.randn(n_subjects) * signal_sd
# Spatially smooth with a 2D Gaussian kernel
size = width // 2 - 1
gaussian = np.exp(-(np.arange(-size, size + 1) ** 2 / float(gaussian_sd ** 2)))
for si in range(X.shape[0]):
for ri in range(X.shape[1]):
X[si, ri, :] = np.convolve(X[si, ri, :], gaussian, 'same')
for ci in range(X.shape[2]):
X[si, :, ci] = np.convolve(X[si, :, ci], gaussian, 'same')
###############################################################################
# Do some statistics
# ------------------
#
# .. note::
# X needs to be a multi-dimensional array of shape
# samples (subjects) x time x space, so we permute dimensions:
X = X.reshape((n_subjects, 1, n_src))
###############################################################################
# Now let's do some clustering using the standard method.
#
# .. note::
# Not specifying a connectivity matrix implies grid-like connectivity,
# which we want here:
T_obs, clusters, p_values, H0 = \
spatio_temporal_cluster_1samp_test(X, n_jobs=1, threshold=threshold,
connectivity=connectivity,
tail=1, n_permutations=n_permutations)
# Let's put the cluster data in a readable format
ps = np.zeros(width * width)
for cl, p in zip(clusters, p_values):
ps[cl[1]] = -np.log10(p)
ps = ps.reshape((width, width))
T_obs = T_obs.reshape((width, width))
# To do a Bonferroni correction on these data is simple:
p = stats.distributions.t.sf(T_obs, n_subjects - 1)
p_bon = -np.log10(bonferroni_correction(p)[1])
# Now let's do some clustering using the standard method with "hat":
stat_fun = partial(ttest_1samp_no_p, sigma=sigma)
T_obs_hat, clusters, p_values, H0 = \
spatio_temporal_cluster_1samp_test(X, n_jobs=1, threshold=threshold,
connectivity=connectivity,
tail=1, n_permutations=n_permutations,
stat_fun=stat_fun)
# Let's put the cluster data in a readable format
ps_hat = np.zeros(width * width)
for cl, p in zip(clusters, p_values):
ps_hat[cl[1]] = -np.log10(p)
ps_hat = ps_hat.reshape((width, width))
T_obs_hat = T_obs_hat.reshape((width, width))
# Now the threshold-free cluster enhancement method (TFCE):
T_obs_tfce, clusters, p_values, H0 = \
spatio_temporal_cluster_1samp_test(X, n_jobs=1, threshold=threshold_tfce,
connectivity=connectivity,
tail=1, n_permutations=n_permutations)
T_obs_tfce = T_obs_tfce.reshape((width, width))
ps_tfce = -np.log10(p_values.reshape((width, width)))
# Now the TFCE with "hat" variance correction:
T_obs_tfce_hat, clusters, p_values, H0 = \
spatio_temporal_cluster_1samp_test(X, n_jobs=1, threshold=threshold_tfce,
connectivity=connectivity,
tail=1, n_permutations=n_permutations,
stat_fun=stat_fun)
T_obs_tfce_hat = T_obs_tfce_hat.reshape((width, width))
ps_tfce_hat = -np.log10(p_values.reshape((width, width)))
###############################################################################
# Visualize results
# -----------------
fig = plt.figure(facecolor='w')
x, y = np.mgrid[0:width, 0:width]
kwargs = dict(rstride=1, cstride=1, linewidth=0, cmap='Greens')
Ts = [T_obs, T_obs_hat, T_obs_tfce, T_obs_tfce_hat]
titles = ['T statistic', 'T with "hat"', 'TFCE statistic', 'TFCE w/"hat" stat']
for ii, (t, title) in enumerate(zip(Ts, titles)):
ax = fig.add_subplot(2, 4, ii + 1, projection='3d')
ax.plot_surface(x, y, t, **kwargs)
ax.set_xticks([])
ax.set_yticks([])
ax.set_title(title)
p_lims = [1.3, -np.log10(1.0 / n_permutations)]
pvals = [ps, ps_hat, ps_tfce, ps_tfce_hat]
titles = ['Standard clustering', 'Clust. w/"hat"',
'Clust. w/TFCE', 'Clust. w/TFCE+"hat"']
axs = []
for ii, (p, title) in enumerate(zip(pvals, titles)):
ax = fig.add_subplot(2, 4, 5 + ii)
plt.imshow(p, cmap='Purples', vmin=p_lims[0], vmax=p_lims[1])
ax.set_xticks([])
ax.set_yticks([])
ax.set_title(title)
axs.append(ax)
plt.tight_layout()
for ax in axs:
cbar = plt.colorbar(ax=ax, shrink=0.75, orientation='horizontal',
fraction=0.1, pad=0.025)
cbar.set_label('-log10(p)')
cbar.set_ticks(p_lims)
cbar.set_ticklabels(['%0.1f' % p for p in p_lims])
plt.show()
| bsd-3-clause |
ashishnitinpatil/django_appengine_project_template | django/core/management/__init__.py | 104 | 15442 | import collections
import os
import sys
from optparse import OptionParser, NO_DEFAULT
import imp
from django.core.exceptions import ImproperlyConfigured
from django.core.management.base import BaseCommand, CommandError, handle_default_options
from django.core.management.color import color_style
from django.utils.importlib import import_module
from django.utils import six
# For backwards compatibility: get_version() used to be in this module.
from django import get_version
# A cache of loaded commands, so that call_command
# doesn't have to reload every time it's called.
_commands = None
def find_commands(management_dir):
"""
Given a path to a management directory, returns a list of all the command
names that are available.
Returns an empty list if no commands are defined.
"""
command_dir = os.path.join(management_dir, 'commands')
try:
return [f[:-3] for f in os.listdir(command_dir)
if not f.startswith('_') and f.endswith('.py')]
except OSError:
return []
def find_management_module(app_name):
"""
Determines the path to the management module for the given app_name,
without actually importing the application or the management module.
Raises ImportError if the management module cannot be found for any reason.
"""
parts = app_name.split('.')
parts.append('management')
parts.reverse()
part = parts.pop()
path = None
# When using manage.py, the project module is added to the path,
# loaded, then removed from the path. This means that
# testproject.testapp.models can be loaded in future, even if
# testproject isn't in the path. When looking for the management
# module, we need look for the case where the project name is part
# of the app_name but the project directory itself isn't on the path.
try:
f, path, descr = imp.find_module(part, path)
except ImportError as e:
if os.path.basename(os.getcwd()) != part:
raise e
else:
if f:
f.close()
while parts:
part = parts.pop()
f, path, descr = imp.find_module(part, [path] if path else None)
if f:
f.close()
return path
def load_command_class(app_name, name):
"""
Given a command name and an application name, returns the Command
class instance. All errors raised by the import process
(ImportError, AttributeError) are allowed to propagate.
"""
module = import_module('%s.management.commands.%s' % (app_name, name))
return module.Command()
def get_commands():
"""
Returns a dictionary mapping command names to their callback applications.
This works by looking for a management.commands package in django.core, and
in each installed application -- if a commands package exists, all commands
in that package are registered.
Core commands are always included. If a settings module has been
specified, user-defined commands will also be included.
The dictionary is in the format {command_name: app_name}. Key-value
pairs from this dictionary can then be used in calls to
load_command_class(app_name, command_name)
If a specific version of a command must be loaded (e.g., with the
startapp command), the instantiated module can be placed in the
dictionary in place of the application name.
The dictionary is cached on the first call and reused on subsequent
calls.
"""
global _commands
if _commands is None:
_commands = dict([(name, 'django.core') for name in find_commands(__path__[0])])
# Find the installed apps
from django.conf import settings
try:
apps = settings.INSTALLED_APPS
except ImproperlyConfigured:
# Still useful for commands that do not require functional settings,
# like startproject or help
apps = []
# Find and load the management module for each installed app.
for app_name in apps:
try:
path = find_management_module(app_name)
_commands.update(dict([(name, app_name)
for name in find_commands(path)]))
except ImportError:
pass # No management module - ignore this app
return _commands
def call_command(name, *args, **options):
"""
Calls the given command, with the given options and args/kwargs.
This is the primary API you should use for calling specific commands.
Some examples:
call_command('syncdb')
call_command('shell', plain=True)
call_command('sqlall', 'myapp')
"""
# Load the command object.
try:
app_name = get_commands()[name]
except KeyError:
raise CommandError("Unknown command: %r" % name)
if isinstance(app_name, BaseCommand):
# If the command is already loaded, use it directly.
klass = app_name
else:
klass = load_command_class(app_name, name)
# Grab out a list of defaults from the options. optparse does this for us
# when the script runs from the command line, but since call_command can
# be called programatically, we need to simulate the loading and handling
# of defaults (see #10080 for details).
defaults = {}
for opt in klass.option_list:
if opt.default is NO_DEFAULT:
defaults[opt.dest] = None
else:
defaults[opt.dest] = opt.default
defaults.update(options)
return klass.execute(*args, **defaults)
class LaxOptionParser(OptionParser):
"""
An option parser that doesn't raise any errors on unknown options.
This is needed because the --settings and --pythonpath options affect
the commands (and thus the options) that are available to the user.
"""
def error(self, msg):
pass
def print_help(self):
"""Output nothing.
The lax options are included in the normal option parser, so under
normal usage, we don't need to print the lax options.
"""
pass
def print_lax_help(self):
"""Output the basic options available to every command.
This just redirects to the default print_help() behavior.
"""
OptionParser.print_help(self)
def _process_args(self, largs, rargs, values):
"""
Overrides OptionParser._process_args to exclusively handle default
options and ignore args and other options.
This overrides the behavior of the super class, which stop parsing
at the first unrecognized option.
"""
while rargs:
arg = rargs[0]
try:
if arg[0:2] == "--" and len(arg) > 2:
# process a single long option (possibly with value(s))
# the superclass code pops the arg off rargs
self._process_long_opt(rargs, values)
elif arg[:1] == "-" and len(arg) > 1:
# process a cluster of short options (possibly with
# value(s) for the last one only)
# the superclass code pops the arg off rargs
self._process_short_opts(rargs, values)
else:
# it's either a non-default option or an arg
# either way, add it to the args list so we can keep
# dealing with options
del rargs[0]
raise Exception
except:
largs.append(arg)
class ManagementUtility(object):
"""
Encapsulates the logic of the django-admin.py and manage.py utilities.
A ManagementUtility has a number of commands, which can be manipulated
by editing the self.commands dictionary.
"""
def __init__(self, argv=None):
self.argv = argv or sys.argv[:]
self.prog_name = os.path.basename(self.argv[0])
def main_help_text(self, commands_only=False):
"""
Returns the script's main help text, as a string.
"""
if commands_only:
usage = sorted(get_commands().keys())
else:
usage = [
"",
"Type '%s help <subcommand>' for help on a specific subcommand." % self.prog_name,
"",
"Available subcommands:",
]
commands_dict = collections.defaultdict(lambda: [])
for name, app in six.iteritems(get_commands()):
if app == 'django.core':
app = 'django'
else:
app = app.rpartition('.')[-1]
commands_dict[app].append(name)
style = color_style()
for app in sorted(commands_dict.keys()):
usage.append("")
usage.append(style.NOTICE("[%s]" % app))
for name in sorted(commands_dict[app]):
usage.append(" %s" % name)
return '\n'.join(usage)
def fetch_command(self, subcommand):
"""
Tries to fetch the given subcommand, printing a message with the
appropriate command called from the command line (usually
"django-admin.py" or "manage.py") if it can't be found.
"""
# Get commands outside of try block to prevent swallowing exceptions
commands = get_commands()
try:
app_name = commands[subcommand]
except KeyError:
sys.stderr.write("Unknown command: %r\nType '%s help' for usage.\n" % \
(subcommand, self.prog_name))
sys.exit(1)
if isinstance(app_name, BaseCommand):
# If the command is already loaded, use it directly.
klass = app_name
else:
klass = load_command_class(app_name, subcommand)
return klass
def autocomplete(self):
"""
Output completion suggestions for BASH.
The output of this function is passed to BASH's `COMREPLY` variable and
treated as completion suggestions. `COMREPLY` expects a space
separated string as the result.
The `COMP_WORDS` and `COMP_CWORD` BASH environment variables are used
to get information about the cli input. Please refer to the BASH
man-page for more information about this variables.
Subcommand options are saved as pairs. A pair consists of
the long option string (e.g. '--exclude') and a boolean
value indicating if the option requires arguments. When printing to
stdout, a equal sign is appended to options which require arguments.
Note: If debugging this function, it is recommended to write the debug
output in a separate file. Otherwise the debug output will be treated
and formatted as potential completion suggestions.
"""
# Don't complete if user hasn't sourced bash_completion file.
if 'DJANGO_AUTO_COMPLETE' not in os.environ:
return
cwords = os.environ['COMP_WORDS'].split()[1:]
cword = int(os.environ['COMP_CWORD'])
try:
curr = cwords[cword-1]
except IndexError:
curr = ''
subcommands = list(get_commands()) + ['help']
options = [('--help', None)]
# subcommand
if cword == 1:
print(' '.join(sorted(filter(lambda x: x.startswith(curr), subcommands))))
# subcommand options
# special case: the 'help' subcommand has no options
elif cwords[0] in subcommands and cwords[0] != 'help':
subcommand_cls = self.fetch_command(cwords[0])
# special case: 'runfcgi' stores additional options as
# 'key=value' pairs
if cwords[0] == 'runfcgi':
from django.core.servers.fastcgi import FASTCGI_OPTIONS
options += [(k, 1) for k in FASTCGI_OPTIONS]
# special case: add the names of installed apps to options
elif cwords[0] in ('dumpdata', 'sql', 'sqlall', 'sqlclear',
'sqlcustom', 'sqlindexes', 'sqlsequencereset', 'test'):
try:
from django.conf import settings
# Get the last part of the dotted path as the app name.
options += [(a.split('.')[-1], 0) for a in settings.INSTALLED_APPS]
except ImportError:
# Fail silently if DJANGO_SETTINGS_MODULE isn't set. The
# user will find out once they execute the command.
pass
options += [(s_opt.get_opt_string(), s_opt.nargs) for s_opt in
subcommand_cls.option_list]
# filter out previously specified options from available options
prev_opts = [x.split('=')[0] for x in cwords[1:cword-1]]
options = [opt for opt in options if opt[0] not in prev_opts]
# filter options by current input
options = sorted([(k, v) for k, v in options if k.startswith(curr)])
for option in options:
opt_label = option[0]
# append '=' to options which require args
if option[1]:
opt_label += '='
print(opt_label)
sys.exit(1)
def execute(self):
"""
Given the command-line arguments, this figures out which subcommand is
being run, creates a parser appropriate to that command, and runs it.
"""
# Preprocess options to extract --settings and --pythonpath.
# These options could affect the commands that are available, so they
# must be processed early.
parser = LaxOptionParser(usage="%prog subcommand [options] [args]",
version=get_version(),
option_list=BaseCommand.option_list)
self.autocomplete()
try:
options, args = parser.parse_args(self.argv)
handle_default_options(options)
except:
pass # Ignore any option errors at this point.
try:
subcommand = self.argv[1]
except IndexError:
subcommand = 'help' # Display help if no arguments were given.
if subcommand == 'help':
if len(args) <= 2:
parser.print_lax_help()
sys.stdout.write(self.main_help_text() + '\n')
elif args[2] == '--commands':
sys.stdout.write(self.main_help_text(commands_only=True) + '\n')
else:
self.fetch_command(args[2]).print_help(self.prog_name, args[2])
elif subcommand == 'version':
sys.stdout.write(parser.get_version() + '\n')
# Special-cases: We want 'django-admin.py --version' and
# 'django-admin.py --help' to work, for backwards compatibility.
elif self.argv[1:] == ['--version']:
# LaxOptionParser already takes care of printing the version.
pass
elif self.argv[1:] in (['--help'], ['-h']):
parser.print_lax_help()
sys.stdout.write(self.main_help_text() + '\n')
else:
self.fetch_command(subcommand).run_from_argv(self.argv)
def execute_from_command_line(argv=None):
"""
A simple method that runs a ManagementUtility.
"""
utility = ManagementUtility(argv)
utility.execute()
| bsd-2-clause |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.