hexsha stringlengths 40 40 | size int64 4 1.02M | ext stringclasses 8
values | lang stringclasses 1
value | max_stars_repo_path stringlengths 4 209 | max_stars_repo_name stringlengths 5 121 | max_stars_repo_head_hexsha stringlengths 40 40 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 191k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 4 209 | max_issues_repo_name stringlengths 5 121 | max_issues_repo_head_hexsha stringlengths 40 40 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 67k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 4 209 | max_forks_repo_name stringlengths 5 121 | max_forks_repo_head_hexsha stringlengths 40 40 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 4 1.02M | avg_line_length float64 1.07 66.1k | max_line_length int64 4 266k | alphanum_fraction float64 0.01 1 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
81d2e92136c305836487f7cd719abbdf22087fbd | 1,222 | py | Python | users/forms.py | KristenZuber/pawpals | e4da24c9e407544aecc56fa764b3fbf0d0dc865c | [
"MIT"
] | null | null | null | users/forms.py | KristenZuber/pawpals | e4da24c9e407544aecc56fa764b3fbf0d0dc865c | [
"MIT"
] | null | null | null | users/forms.py | KristenZuber/pawpals | e4da24c9e407544aecc56fa764b3fbf0d0dc865c | [
"MIT"
] | null | null | null | from django import forms
from django.contrib.auth.models import User
from django.contrib.auth.forms import UserCreationForm
from.models import Profile, Apply
# Define fields for the registration form
class UserRegistrationForm(UserCreationForm):
email = forms.EmailField(required = False)
class Meta:
model = User
fields = ['username', 'email', 'password1', 'password2']
# Define fields for the update form
class UserUpdateForm(forms.ModelForm):
email = forms.EmailField(required = False)
class Meta:
model = User
fields = ['username', 'email']
# Define fields for the apply form
class UserApplyForm(forms.ModelForm):
email = forms.EmailField()
class Meta:
model = Apply
labels = {
'first_name':'First Name',
'last_name':'Last Name',
'email':'Email',
'phone_number':'Phone Number',
'answer1':'Who are you interested in adopting? (name)',
'answer2':'How many pets do you have at home?',
'answer3':'How much time do you have to devote to a pet? (hours/day)',
}
fields = [
'first_name', 'last_name', 'email', 'phone_number', 'answer1', 'answer2', 'answer3'
]
| 30.55 | 91 | 0.643208 |
92ec402ceffd6cc26e8edbb3abcfd3c720717c09 | 26,239 | py | Python | myuw/dao/visual_schedule.py | uw-it-aca/myuw | 3fa1fabeb3c09d81a049f7c1a8c94092d612438a | [
"Apache-2.0"
] | 18 | 2015-02-04T01:09:11.000Z | 2021-11-25T03:10:39.000Z | myuw/dao/visual_schedule.py | uw-it-aca/myuw | 3fa1fabeb3c09d81a049f7c1a8c94092d612438a | [
"Apache-2.0"
] | 2,323 | 2015-01-15T19:45:10.000Z | 2022-03-21T19:57:06.000Z | myuw/dao/visual_schedule.py | uw-it-aca/myuw | 3fa1fabeb3c09d81a049f7c1a8c94092d612438a | [
"Apache-2.0"
] | 9 | 2015-01-15T19:29:26.000Z | 2022-02-11T04:51:23.000Z | # Copyright 2021 UW-IT, University of Washington
# SPDX-License-Identifier: Apache-2.0
import logging
from myuw.dao.registration import get_schedule_by_term
from myuw.dao.instructor_schedule import get_instructor_schedule_by_term
from myuw.dao.term import get_current_quarter, get_current_summer_term
from myuw.dao.building import get_building_by_code
from restclients_core.exceptions import DataFailureException
from dateutil.relativedelta import *
from datetime import timedelta
import math
import copy
logger = logging.getLogger(__name__)
def get_schedule_json(visual_schedule, term, summer_term=None):
response = {}
schedule_periods = []
period_id = 0
for period in visual_schedule:
period_data = period.json_data()
if period.meetings_trimmed_front:
period_data['disabled_days'] = \
_get_disabled_days(period.start_date, True)
if period.meetings_trimmed_back:
period_data['disabled_days'] = \
_get_disabled_days(period.end_date, False)
if period.is_finals:
period_data['id'] = 'finals'
else:
period_data['id'] = period_id
schedule_periods.append(period_data)
period_id += 1
for section in period_data["sections"]:
for meeting in section["meetings"]:
if 'building' in meeting:
building_code = meeting["building"]
building = get_building_by_code(building_code)
if building is not None:
meeting["latitude"] = building.latitude
meeting["longitude"] = building.longitude
response['periods'] = schedule_periods
# Add term data for schedule
response['term'] = {
'year': term.year,
'quarter': term.quarter,
'first_day_quarter': term.first_day_quarter,
'last_day_instruction': term.last_day_instruction,
'aterm_last_date': term.aterm_last_date,
'bterm_first_date': term.bterm_first_date,
'last_final_exam_date': term.last_final_exam_date,
'summer_term': summer_term
}
response['off_term_trimmed'] = _get_off_term_trimmed(visual_schedule)
return response
def _get_disabled_days(date, is_before):
disabled_days = {'sunday': False,
'monday': False,
'tuesday': False,
'wednesday': False,
'thursday': False,
'friday': False,
'saturday': False}
day_index = date.weekday()
if day_index == 6:
day_index = 0
else:
day_index += 1
if is_before:
if day_index > 0:
disabled_days['sunday'] = True
if day_index > 1:
disabled_days['monday'] = True
if day_index > 2:
disabled_days['tuesday'] = True
if day_index > 3:
disabled_days['wednesday'] = True
if day_index > 4:
disabled_days['thursday'] = True
if day_index > 5:
disabled_days['friday'] = True
else:
if day_index < 6:
disabled_days['saturday'] = True
if day_index < 5:
disabled_days['friday'] = True
if day_index < 4:
disabled_days['thursday'] = True
if day_index < 3:
disabled_days['wednesday'] = True
if day_index < 2:
disabled_days['tuesday'] = True
if day_index < 1:
disabled_days['monday'] = True
return disabled_days
def _get_off_term_trimmed(visual_schedule):
seen_sections = {}
trimmed_sections = []
for period in visual_schedule:
for section in period.sections:
if hasattr(section, 'real_end_date'):
section_slug = '{} {} {}'.format(section.curriculum_abbr,
section.course_number,
section.section_id)
seen_sections[section_slug] = section
for slug, section in seen_sections.items():
trimmed_sections.append({'section': slug,
'end_date': section.real_end_date})
return trimmed_sections
def get_future_visual_schedule(request, term, summer_term=None):
schedule = _get_combined_future_schedule(request, term, summer_term)
if schedule is None or len(schedule.sections) == 0:
return None
vs = get_visual_schedule_from_schedule(request, schedule, summer_term)
return vs
def get_current_visual_schedule(request):
schedule = _get_combined_schedule(request)
if schedule is None or len(schedule.sections) == 0:
return None, None, None
summer_term = None
if schedule.term.is_summer_quarter():
summer_term = schedule.summer_term
vs = get_visual_schedule_from_schedule(request, schedule, summer_term)
return vs, schedule.term, summer_term
def get_visual_schedule_from_schedule(request, schedule, summer_term):
visual_schedule = _get_visual_schedule_from_schedule(
schedule, request, summer_term)
if summer_term and _is_split_summer(schedule):
visual_schedule = _trim_summer_term(visual_schedule, summer_term)
return visual_schedule
def _get_combined_schedule(request):
try:
student_schedule = get_schedule_by_term(request)
_set_student_sections(student_schedule)
except DataFailureException:
student_schedule = None
try:
instructor_schedule = get_instructor_schedule_by_term(request)
_set_instructor_sections(instructor_schedule)
except DataFailureException:
instructor_schedule = None
return __combine_schedules(student_schedule, instructor_schedule)
def _get_combined_future_schedule(request, term, summer_term):
try:
student_schedule = get_schedule_by_term(
request, term=term, summer_term=summer_term)
_set_student_sections(student_schedule)
except DataFailureException:
student_schedule = None
try:
instructor_schedule = get_instructor_schedule_by_term(
request, term=term, summer_term=summer_term)
_set_instructor_sections(instructor_schedule)
except DataFailureException:
instructor_schedule = None
return __combine_schedules(student_schedule, instructor_schedule)
def __combine_schedules(student_schedule, instructor_schedule):
if (student_schedule is None or
len(student_schedule.sections) == 0):
return instructor_schedule
schedule = student_schedule
if instructor_schedule is not None:
schedule.sections += instructor_schedule.sections
return schedule
def _set_instructor_sections(instructor_schedule):
for section in instructor_schedule.sections:
section.is_teaching = True
return instructor_schedule
def _set_student_sections(student_schedule):
for section in student_schedule.sections:
section.is_teaching = False
return student_schedule
def _get_visual_schedule_from_schedule(schedule, request, summer_term):
# common courses default to term start/end dates
_add_dates_to_sections(schedule)
if _is_split_summer(schedule):
_adjust_off_term_dates(schedule)
a_bounds, b_bounds = get_summer_schedule_bounds(schedule)
a_weeks = _get_weeks_from_bounds(a_bounds)
for week in a_weeks:
week.summer_term = "A-term"
a_weeks = _add_sections_to_weeks(schedule.sections, a_weeks)
a_consolidated = _consolidate_weeks(a_weeks)
trim_summer_meetings(a_consolidated)
a_consolidated[-1].meetings_trimmed_back = True
b_weeks = _get_weeks_from_bounds(b_bounds)
for week in b_weeks:
week.summer_term = "B-term"
b_weeks = _add_sections_to_weeks(schedule.sections, b_weeks)
b_consolidated = _consolidate_weeks(b_weeks)
trim_summer_meetings(b_consolidated)
b_consolidated[0].meetings_trimmed_front = True
consolidated = a_consolidated + b_consolidated
else:
try:
# find sections beyond term
bounds = get_schedule_bounds(schedule)
weeks = _get_weeks_from_bounds(bounds)
weeks = _add_qtr_start_data_to_weeks(weeks, schedule)
weeks = _add_sections_to_weeks(schedule.sections, weeks)
weeks = trim_section_meetings(weeks)
weeks = trim_weeks_no_meetings(weeks)
consolidated = _consolidate_weeks(weeks)
except AttributeError:
return None
_add_weekend_meeting_data(consolidated)
consolidated = _remove_empty_periods(consolidated)
_adjust_period_dates(consolidated)
finals = _get_finals_period(schedule)
if len(finals.sections) > 0:
consolidated.append(finals)
return consolidated
def _add_qtr_start_data_to_weeks(weeks, schedule):
if schedule.term.quarter != "summer":
qtr_start = schedule.term.first_day_quarter
for week in weeks:
if week.start_date < qtr_start < week.end_date:
week.qtr_start = schedule.term.first_day_quarter
return weeks
def _remove_empty_periods(schedule):
periods = []
for period in schedule:
try:
if len(period.sections) > 0:
periods.append(period)
except AttributeError:
pass
return periods
def _adjust_off_term_dates(schedule):
qtr_end_date = schedule.term.last_day_instruction
for section in schedule.sections:
if section.end_date > qtr_end_date:
section.real_end_date = section.end_date
section.end_date = qtr_end_date
def _adjust_period_dates(schedule):
i = 0
for period in schedule:
i += 1
# modify start date
if period.qtr_start:
period.start_date = period.qtr_start
else:
if period.meetings_trimmed_front:
try:
new_start = _get_earliest_start_from_period(period)
period.start_date = new_start
except TypeError:
# section has no meetings, leave date alone
pass
if not period.meets_sunday and not period.meetings_trimmed_front:
period.start_date = period.start_date + timedelta(days=1)
# modify end date
if period.meetings_trimmed_back:
try:
new_end = _get_latest_end_from_period(period)
period.end_date = new_end
except TypeError:
# section has no meetings, leave date alone
pass
if not period.meets_saturday and not period.meetings_trimmed_back:
period.end_date = period.end_date - timedelta(days=1)
def _get_earliest_start_from_period(period):
"""
return the earliest date in the period
"""
earliest_meeting = None # week day
for section in period.sections:
for meeting in section.meetings:
if meeting.wont_meet():
# if a section has a NON mtg set start date to section start
return section.start_date
earliest_section_meeting = _get_earliest_meeting_day(meeting)
if earliest_section_meeting is not None:
if earliest_meeting is None:
earliest_meeting = earliest_section_meeting
elif earliest_section_meeting < earliest_meeting:
earliest_meeting = earliest_section_meeting
start_day = period.start_date.weekday()
# Treat sunday as 'first' day
if start_day == 6:
days_to_add = earliest_meeting + 1
else:
days_to_add = earliest_meeting - start_day
start_date = (period.start_date + timedelta(days=days_to_add))
return start_date
def _get_latest_end_from_period(period):
latest_meeting = None
for section in period.sections:
for meeting in section.meetings:
if meeting.wont_meet():
# if a section has a NON mtg set end date to section end
return section.end_date
latest_section_meeting = _get_latest_meeting_day(meeting)
if latest_meeting is None:
latest_meeting = latest_section_meeting
elif latest_meeting < latest_section_meeting:
latest_meeting = latest_section_meeting
end_day = period.end_date.weekday()
days_to_subtract = end_day - latest_meeting
end_date = period.end_date - timedelta(days=days_to_subtract)
return end_date
def _get_earliest_meeting_day(meeting):
day_index = None
if meeting.meets_saturday:
day_index = 5
if meeting.meets_friday:
day_index = 4
if meeting.meets_thursday:
day_index = 3
if meeting.meets_wednesday:
day_index = 2
if meeting.meets_tuesday:
day_index = 1
if meeting.meets_monday:
day_index = 0
if meeting.meets_sunday:
day_index = 6
return day_index
def _get_latest_meeting_day(meeting):
day_index = None
if meeting.meets_sunday:
day_index = 6
if meeting.meets_monday:
day_index = 0
if meeting.meets_tuesday:
day_index = 1
if meeting.meets_wednesday:
day_index = 2
if meeting.meets_thursday:
day_index = 3
if meeting.meets_friday:
day_index = 4
if meeting.meets_saturday:
day_index = 5
return day_index
def _get_finals_period(schedule):
finals = SchedulePeriod()
finals.is_finals = True
finals.sections = copy.deepcopy(schedule.sections)
return finals
def trim_weeks_no_meetings(weeks):
trimmed_weeks = copy.copy(weeks)
for week in weeks:
non_meeting_sections = []
for section in week.sections:
is_non_meeting = True
for meeting in section.meetings:
if meeting.wont_meet() or not meeting.no_meeting():
is_non_meeting = False
if is_non_meeting:
non_meeting_sections.append(section)
if len(non_meeting_sections) == len(week.sections):
trimmed_weeks.remove(week)
return trimmed_weeks
def trim_section_meetings(weeks):
for week in weeks:
front_trim_count = 0
back_trim_count = 0
for section in week.sections:
if section.start_date > week.start_date:
trimmed = _trim_section_before(section, section.start_date)
if trimmed:
front_trim_count += 1
if section.end_date < week.end_date:
trimmed = _trim_section_after(section, section.end_date)
if trimmed:
back_trim_count += 1
if front_trim_count > 0:
week.meetings_trimmed = True
week.meetings_trimmed_front = True
if back_trim_count > 0:
week.meetings_trimmed = True
week.meetings_trimmed_back = True
return weeks
def get_summer_schedule_bounds(schedule):
a_start = schedule.term.first_day_quarter
# set start to first sunday
if a_start.strftime('%w') != 0:
days_to_remove = int(a_start.strftime('%w'))
a_start -= relativedelta(days=days_to_remove)
b_end = schedule.term.last_day_instruction
# set end to last saturday
if b_end.strftime('%w') != 6:
days_to_add = 6 - int(b_end.strftime('%w'))
b_end += relativedelta(days=days_to_add)
a_bounds = a_start, schedule.term.aterm_last_date
b_bounds = schedule.term.bterm_first_date, b_end
return a_bounds, b_bounds
def trim_summer_meetings(weeks):
if weeks[0].summer_term == "A-term":
week_to_trim = weeks[-1]
week_to_trim.sections = _trim_sections_after(week_to_trim.sections,
week_to_trim.end_date)
if weeks[0].summer_term == "B-term":
week_to_trim = weeks[0]
week_to_trim.sections = _trim_sections_before(week_to_trim.sections,
week_to_trim.start_date)
return weeks
def _trim_sections_after(sections, date):
cutoff_day = int(date.strftime('%w'))
for section in sections:
if section.summer_term == "A-term" and section.end_date > date:
# preserve a-term course meetings that goes beyond term last day
continue
for meeting in section.meetings:
if cutoff_day <= 5:
meeting.meets_saturday = False
if cutoff_day <= 4:
meeting.meets_friday = False
if cutoff_day <= 3:
meeting.meets_thursday = False
if cutoff_day <= 2:
meeting.meets_wednesday = False
if cutoff_day <= 1:
meeting.meets_tuesday = False
if cutoff_day <= 0:
meeting.meets_monday = False
return sections
def _trim_sections_before(sections, date):
cutoff_day = int(date.strftime('%w'))
for section in sections:
if section.summer_term == "B-term" and section.start_date < date:
# preserve b-term course meetings that occurs before term 1st day
continue
for meeting in section.meetings:
if cutoff_day >= 1:
meeting.meets_sunday = False
if cutoff_day >= 2:
meeting.meets_monday = False
if cutoff_day >= 3:
meeting.meets_tuesday = False
if cutoff_day >= 4:
meeting.meets_wednesday = False
if cutoff_day >= 5:
meeting.meets_thursday = False
if cutoff_day >= 6:
meeting.meets_friday = False
return sections
def _trim_section_after(section, date):
cutoff_day = int(date.strftime('%w'))
trimmed = False
for meeting in section.meetings:
if cutoff_day <= 5:
if meeting.meets_saturday:
trimmed = True
meeting.meets_saturday = False
if cutoff_day <= 4:
if meeting.meets_friday:
trimmed = True
meeting.meets_friday = False
if cutoff_day <= 3:
if meeting.meets_thursday:
trimmed = True
meeting.meets_thursday = False
if cutoff_day <= 2:
if meeting.meets_wednesday:
trimmed = True
meeting.meets_wednesday = False
if cutoff_day <= 1:
if meeting.meets_tuesday:
trimmed = True
meeting.meets_tuesday = False
if cutoff_day <= 0:
if meeting.meets_monday:
trimmed = True
meeting.meets_monday = False
return trimmed
def _trim_section_before(section, date):
cutoff_day = int(date.strftime('%w'))
trimmed = False
for meeting in section.meetings:
if cutoff_day >= 1:
if meeting.meets_sunday:
trimmed = True
meeting.meets_sunday = False
if cutoff_day >= 2:
if meeting.meets_monday:
trimmed = True
meeting.meets_monday = False
if cutoff_day >= 3:
if meeting.meets_tuesday:
trimmed = True
meeting.meets_tuesday = False
if cutoff_day >= 4:
if meeting.meets_wednesday:
trimmed = True
meeting.meets_wednesday = False
if cutoff_day >= 5:
if meeting.meets_thursday:
trimmed = True
meeting.meets_thursday = False
if cutoff_day >= 6:
if meeting.meets_friday:
trimmed = True
meeting.meets_friday = False
return trimmed
def _is_split_summer(schedule):
if schedule.term.quarter != 'summer':
return False
for section in schedule.sections:
if section.summer_term != "Full-term":
return True
def _add_weekend_meeting_data(weeks):
for week in weeks:
try:
for section in week.sections:
for meeting in section.meetings:
if meeting.meets_saturday:
week.meets_saturday = True
if meeting.meets_sunday:
week.meets_sunday = True
except AttributeError:
pass
return weeks
def _add_sections_to_weeks(sections, weeks):
for week in weeks:
for section in sections:
if section.start_date <= week.end_date \
and section.end_date >= week.start_date:
# make a copy of section as we'll modify meetings per week
week.sections.append(copy.deepcopy(section))
return weeks
def _consolidate_weeks(weeks):
consolidated_weeks = []
prev_week = None
for week in weeks:
if prev_week is None:
prev_week = week
else:
will_merge = True
# Don't merge last week of A-term
if week.summer_term == "A-term" \
and weeks.index(week) == len(weeks) - 1:
will_merge = False
# Don't merge 2nd week of B term with 1st
elif week.summer_term == "B-term" and weeks.index(week) == 1:
will_merge = False
else:
# Merge weeks with same sections
if _section_lists_are_same(prev_week.sections, week.sections):
will_merge = True
else:
will_merge = False
if week.meetings_trimmed or prev_week.meetings_trimmed:
will_merge = False
if will_merge:
prev_week.end_date = week.end_date
else:
consolidated_weeks.append(prev_week)
prev_week = week
# Append last week block
consolidated_weeks.append(prev_week)
return consolidated_weeks
def _section_lists_are_same(list1, list2):
if len(list1) is not len(list2):
return False
for l1_section in list1:
found_match = False
for l2_section in list2:
if _sections_are_same(l1_section, l2_section):
found_match = True
if not found_match:
return False
return True
def _sections_are_same(section1, section2):
return (section1.curriculum_abbr == section2.curriculum_abbr) \
and (section1.course_number == section2.course_number) \
and (section1.section_id == section2.section_id)
def _get_weeks_from_bounds(bounds):
start, end = bounds
periods = []
# weeks between start>end dates, including first day
schedule_length = math.ceil(((end-start).days + 1)/7.0)
while schedule_length > 0:
period = SchedulePeriod()
period.start_date = start
start_day = int(start.strftime('%w'))
end_offset = 6-start_day
end_date = (start + timedelta(days=end_offset))
# handle case where week ends midweek
if end_date > end:
end_date = end
period.end_date = end_date
periods.append(period)
next_start_offset = 7-start_day
start = (start + timedelta(days=next_start_offset))
schedule_length -= 1
return periods
def get_schedule_bounds(schedule):
start = None
end = None
for section in schedule.sections:
if start is None:
start = section.start_date
elif start > section.start_date:
start = section.start_date
if end is None:
end = section.end_date
elif end < section.end_date:
end = section.end_date
# set start to first sunday
if int(start.strftime('%w')) != 0:
days_to_remove = int(start.strftime('%w'))
start = start - relativedelta(days=days_to_remove)
# set end to last saturday
if int(end.strftime('%w')) != 6:
days_to_add = 6 - int(end.strftime('%w'))
end += relativedelta(days=days_to_add)
return start, end
def _add_dates_to_sections(schedule):
"""
Adds term start/end dates to sections that do not have them (ie non-PCE)
"""
for section in schedule.sections:
if section.start_date is None:
if section.summer_term == "B-term":
section.start_date = schedule.term.bterm_first_date
else:
section.start_date = schedule.term.first_day_quarter
if section.end_date is None:
if section.summer_term == "A-term":
section.end_date = schedule.term.aterm_last_date
else:
section.end_date = schedule.term.last_day_instruction
return schedule
def _trim_summer_term(schedule, summer_term):
term_periods = []
for period in schedule:
if period.summer_term is not None:
if period.summer_term.lower() == summer_term:
term_periods.append(period)
return term_periods
class SchedulePeriod():
def __init__(self):
self.start_date = None
self.end_date = None
self.sections = []
self.meets_saturday = False
self.meets_sunday = False
self.is_finals = False
self.qtr_start = None
# sections will be either A term OR B term, full term classes will
# be split into corresponding A and B term pieces
self.summer_term = None
self.meetings_trimmed = False
self.meetings_trimmed_front = False
self.meetings_trimmed_back = False
def json_data(self):
section_data = []
for section in self.sections:
section_json = section.json_data()
try:
section_json['color_id'] = section.color_id
except AttributeError:
pass
section_json['is_teaching'] = section.is_teaching
section_data.append(section_json)
data = {'start_date': self.start_date,
'end_date': self.end_date,
'meets_saturday': self.meets_saturday,
'meets_sunday': self.meets_sunday,
'sections': section_data}
return data
| 33.298223 | 78 | 0.623156 |
2c758ed187d6e17ad0937d52dc8deabe9a28d926 | 16,301 | py | Python | mbed_lstools/platform_database.py | bentcooke/mbed-ls | 04e57d6f632245ba636efbf00e8b75815871c4c5 | [
"Apache-2.0"
] | null | null | null | mbed_lstools/platform_database.py | bentcooke/mbed-ls | 04e57d6f632245ba636efbf00e8b75815871c4c5 | [
"Apache-2.0"
] | null | null | null | mbed_lstools/platform_database.py | bentcooke/mbed-ls | 04e57d6f632245ba636efbf00e8b75815871c4c5 | [
"Apache-2.0"
] | null | null | null | """
mbed SDK
Copyright (c) 2017 ARM Limited
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
"""Functions that manage a platform database"""
import datetime
import json
import re
from collections import OrderedDict, defaultdict
from copy import copy
from io import open
from os import makedirs
from os.path import join, dirname, getmtime
from appdirs import user_data_dir
from fasteners import InterProcessLock
try:
unicode
except NameError:
unicode = str
import logging
logger = logging.getLogger("mbedls.platform_database")
logger.addHandler(logging.NullHandler())
del logging
LOCAL_PLATFORM_DATABASE = join(user_data_dir("mbedls"), "platforms.json")
LOCAL_MOCKS_DATABASE = join(user_data_dir("mbedls"), "mock.json")
DEFAULT_PLATFORM_DB = {
u'daplink': {
u'0001': u'LPC2368',
u'0002': u'LPC2368',
u'0003': u'LPC2368',
u'0004': u'LPC2368',
u'0005': u'LPC2368',
u'0006': u'LPC2368',
u'0007': u'LPC2368',
u'0100': u'LPC2368',
u'0183': u'UBLOX_C027',
u'0200': u'KL25Z',
u'0201': u'KW41Z',
u'0210': u'KL05Z',
u'0214': u'HEXIWEAR',
u'0217': u'K82F',
u'0218': u'KL82Z',
u'0220': u'KL46Z',
u'0227': u'MIMXRT1050_EVK',
u'0230': u'K20D50M',
u'0231': u'K22F',
u'0240': u'K64F',
u'0245': u'K64F',
u'0250': u'KW24D',
u'0261': u'KL27Z',
u'0262': u'KL43Z',
u'0300': u'MTS_GAMBIT',
u'0305': u'MTS_MDOT_F405RG',
u'0310': u'MTS_DRAGONFLY_F411RE',
u'0311': u'K66F',
u'0315': u'MTS_MDOT_F411RE',
u'0350': u'XDOT_L151CC',
u'0400': u'MAXWSNENV',
u'0405': u'MAX32600MBED',
u'0406': u'MAX32620MBED',
u'0407': u'MAX32620HSP',
u'0408': u'MAX32625NEXPAQ',
u'0409': u'MAX32630FTHR',
u'0415': u'MAX32625MBED',
u'0450': u'MTB_UBLOX_ODIN_W2',
u'0451': u'MTB_MXCHIP_EMW3166',
u'0452': u'MTB_LAIRD_BL600',
u'0453': u'MTB_MTS_XDOT',
u'0454': u'MTB_MTS_DRAGONFLY',
u'0455': u'MTB_UBLOX_NINA_B1',
u'0456': u'MTB_MURATA_ABZ',
u'0457': u'MTB_RAK811',
u'0458': u'MTB_ADV_WISE_1510',
u'0459': u'MTB_ADV_WISE_1530',
u'0460': u'MTB_ADV_WISE_1570',
u'0461': u'MTB_LAIRD_BL652',
u'0462': u'MTB_USI_WM_BN_BM_22',
u'0500': u'SPANSION_PLACEHOLDER',
u'0505': u'SPANSION_PLACEHOLDER',
u'0510': u'SPANSION_PLACEHOLDER',
u'0602': u'EV_COG_AD3029LZ',
u'0603': u'EV_COG_AD4050LZ',
u'0700': u'NUCLEO_F103RB',
u'0705': u'NUCLEO_F302R8',
u'0710': u'NUCLEO_L152RE',
u'0715': u'NUCLEO_L053R8',
u'0720': u'NUCLEO_F401RE',
u'0725': u'NUCLEO_F030R8',
u'0730': u'NUCLEO_F072RB',
u'0735': u'NUCLEO_F334R8',
u'0740': u'NUCLEO_F411RE',
u'0742': u'NUCLEO_F413ZH',
u'0743': u'DISCO_F413ZH',
u'0744': u'NUCLEO_F410RB',
u'0745': u'NUCLEO_F303RE',
u'0746': u'DISCO_F303VC',
u'0747': u'NUCLEO_F303ZE',
u'0750': u'NUCLEO_F091RC',
u'0755': u'NUCLEO_F070RB',
u'0760': u'NUCLEO_L073RZ',
u'0764': u'DISCO_L475VG_IOT01A',
u'0765': u'NUCLEO_L476RG',
u'0766': u'SILICA_SENSOR_NODE',
u'0770': u'NUCLEO_L432KC',
u'0775': u'NUCLEO_F303K8',
u'0777': u'NUCLEO_F446RE',
u'0778': u'NUCLEO_F446ZE',
u'0779': u'NUCLEO_L433RC_P',
u'0780': u'NUCLEO_L011K4',
u'0785': u'NUCLEO_F042K6',
u'0788': u'DISCO_F469NI',
u'0790': u'NUCLEO_L031K6',
u'0791': u'NUCLEO_F031K6',
u'0795': u'DISCO_F429ZI',
u'0796': u'NUCLEO_F429ZI',
u'0797': u'NUCLEO_F439ZI',
u'0799': u'ST_PLACEHOLDER',
u'0805': u'DISCO_L053C8',
u'0810': u'DISCO_F334C8',
u'0812': u'NUCLEO_F722ZE',
u'0813': u'NUCLEO_H743ZI',
u'0815': u'DISCO_F746NG',
u'0816': u'NUCLEO_F746ZG',
u'0817': u'DISCO_F769NI',
u'0818': u'NUCLEO_F767ZI',
u'0819': u'NUCLEO_F756ZG',
u'0820': u'DISCO_L476VG',
u'0821': u'NUCLEO_L452RE',
u'0822': u'DISCO_L496AG',
u'0823': u'NUCLEO_L496ZG',
u'0824': u'LPC824',
u'0826': u'NUCLEO_F412ZG',
u'0827': u'NUCLEO_L486RG',
u'0828': u'NUCLEO_L496ZG_P',
u'0829': u'NUCLEO_L452RE_P',
u'0830': u'DISCO_F407VG',
u'0833': u'DISCO_L072CZ_LRWAN1',
u'0835': u'NUCLEO_F207ZG',
u'0839': u'NUCLEO_WB55RG',
u'0840': u'B96B_F446VE',
u'0900': u'XPRO_SAMR21',
u'0905': u'XPRO_SAMW25',
u'0910': u'XPRO_SAML21',
u'0915': u'XPRO_SAMD21',
u'1000': u'LPC2368',
u'1001': u'LPC2368',
u'1010': u'LPC1768',
u'1017': u'HRM1017',
u'1018': u'SSCI824',
u'1019': u'TY51822R3',
u'1022': u'RO359B',
u'1034': u'LPC11U34',
u'1040': u'LPC11U24',
u'1045': u'LPC11U24',
u'1050': u'LPC812',
u'1054': u'LPC54114',
u'1056': u'LPC546XX',
u'1060': u'LPC4088',
u'1061': u'LPC11U35_401',
u'1062': u'LPC4088_DM',
u'1070': u'NRF51822',
u'1075': u'NRF51822_OTA',
u'1080': u'OC_MBUINO',
u'1090': u'RBLAB_NRF51822',
u'1095': u'RBLAB_BLENANO',
u'1100': u'NRF51_DK',
u'1101': u'NRF52_DK',
u'1102': u'NRF52840_DK',
u'1105': u'NRF51_DK_OTA',
u'1114': u'LPC1114',
u'1120': u'NRF51_DONGLE',
u'1130': u'NRF51822_SBK',
u'1140': u'WALLBOT_BLE',
u'1168': u'LPC11U68',
u'1200': u'NCS36510',
u'1234': u'UBLOX_C027',
u'1235': u'UBLOX_C027',
u'1236': u'UBLOX_EVK_ODIN_W2',
u'1237': u'UBLOX_EVK_NINA_B1',
u'1300': u'NUC472-NUTINY',
u'1301': u'NUMBED',
u'1302': u'NUMAKER_PFM_NUC472',
u'1303': u'NUMAKER_PFM_M453',
u'1304': u'NUMAKER_PFM_M487',
u'1305': u'NUMAKER_PFM_M2351',
u'1306': u'NUMAKER_PFM_NANO130',
u'1307': u'NUMAKER_PFM_NUC240',
u'1549': u'LPC1549',
u'1600': u'LPC4330_M4',
u'1605': u'LPC4330_M4',
u'2000': u'EFM32_G8XX_STK',
u'2005': u'EFM32HG_STK3400',
u'2010': u'EFM32WG_STK3800',
u'2015': u'EFM32GG_STK3700',
u'2020': u'EFM32LG_STK3600',
u'2025': u'EFM32TG_STK3300',
u'2030': u'EFM32ZG_STK3200',
u'2035': u'EFM32PG_STK3401',
u'2040': u'EFM32PG12_STK3402',
u'2041': u'TB_SENSE_12',
u'2045': u'TB_SENSE_1',
u'2100': u'XBED_LPC1768',
u'2201': u'WIZWIKI_W7500',
u'2202': u'WIZWIKI_W7500ECO',
u'2203': u'WIZWIKI_W7500P',
u'2500': u'ADV_WISE_1570',
u'3001': u'LPC11U24',
u'4000': u'LPC11U35_Y5_MBUG',
u'4005': u'NRF51822_Y5_MBUG',
u'4100': u'MOTE_L152RC',
u'4337': u'LPC4337',
u'4500': u'DELTA_DFCM_NNN40',
u'4501': u'DELTA_DFBM_NQ620',
u'4502': u'DELTA_DFCM_NNN50',
u'4600': u'REALTEK_RTL8195AM',
u'5000': u'ARM_MPS2',
u'5001': u'ARM_MPS2_M0',
u'5002': u'ARM_BEETLE_SOC',
u'5003': u'ARM_MPS2_M0P',
u'5005': u'ARM_MPS2_M0DS',
u'5007': u'ARM_MPS2_M1',
u'5009': u'ARM_MPS2_M3',
u'5011': u'ARM_MPS2_M4',
u'5015': u'ARM_MPS2_M7',
u'5020': u'HOME_GATEWAY_6LOWPAN',
u'5500': u'RZ_A1H',
u'5501': u'GR_LYCHEE',
u'6660': u'NZ32_SC151',
u'7010': u'BLUENINJA_CDP_TZ01B',
u'7011': u'TMPM066',
u'7013': u'TMPM46B',
u'7402': u'MBED_BR_HAT',
u'7778': u'TEENSY3_1',
u'8001': u'UNO_91H',
u'8002': u'UNO_81C',
u'8003': u'UNO_81AM',
u'8004': u'UNO_81A',
u'8080': u'FF1705_L151CC',
u'8081': u'FF_LPC546XX',
u'9001': u'LPC1347',
u'9002': u'LPC11U24',
u'9003': u'LPC1347',
u'9004': u'ARCH_PRO',
u'9006': u'LPC11U24',
u'9007': u'LPC11U35_501',
u'9008': u'XADOW_M0',
u'9009': u'ARCH_BLE',
u'9010': u'ARCH_GPRS',
u'9011': u'ARCH_MAX',
u'9012': u'SEEED_TINY_BLE',
u'9900': u'NRF51_MICROBIT',
u'C002': u'VK_RZ_A1H',
u'C005': u'MTM_MTCONNECT04S',
u'C006': u'VBLUNO51',
u'C008': u'SAKURAIO_EVB_01',
u'C030': u'UBLOX_C030_U201',
u'C031': u'UBLOX_C030_N211',
u'C032': u'UBLOX_C030_R404M',
u'C033': u'UBLOX_C030_R410M',
u'C034': u'UBLOX_C030_S200',
u'C035': u'UBLOX_C030_R3121',
u'FFFF': u'K20 BOOTLOADER',
u'RIOT': u'RIOT',
},
u'jlink': {
u'X349858SLYN': {
u'platform_name': u'NRF52_DK',
u'jlink_device_name': u'nRF52832_xxaa'
},
u'FRDM-KL25Z': {
u'platform_name': u'KL25Z',
u'jlink_device_name': u'MKL25Z128xxx4'
},
u'FRDM-KL27Z': {
u'platform_name': u'KL27Z',
u'jlink_device_name': u'MKL27Z64xxx4'
},
u'FRDM-KL43Z': {
u'platform_name': u'KL43Z',
u'jlink_device_name': u'MKL43Z256xxx4'
}
}
}
def _get_modified_time(path):
try:
mtime = getmtime(path)
except OSError:
mtime = 0
return datetime.datetime.fromtimestamp(mtime)
def _older_than_me(path):
return _get_modified_time(path) < _get_modified_time(__file__)
def _modify_data_format(data, verbose_data, simple_data_key='platform_name'):
if isinstance(data, dict):
if verbose_data:
return data
return data[simple_data_key]
else:
if verbose_data:
return {
simple_data_key: data
}
return data
def _overwrite_or_open(db):
try:
if db is LOCAL_PLATFORM_DATABASE and _older_than_me(db):
raise ValueError("Platform Database is out of date")
with open(db, encoding="utf-8") as db_in:
return json.load(db_in)
except (IOError, ValueError) as exc:
if db is LOCAL_PLATFORM_DATABASE:
logger.warning(
"Error loading database %s: %s; Recreating", db, str(exc))
try:
makedirs(dirname(db))
except OSError:
pass
try:
with open(db, "w", encoding="utf-8") as out:
out.write(unicode(json.dumps(DEFAULT_PLATFORM_DB)))
except IOError:
pass
return copy(DEFAULT_PLATFORM_DB)
else:
return {}
class PlatformDatabase(object):
"""Represents a union of multiple platform database files.
Handles inter-process synchronization of database files.
"""
target_id_pattern = re.compile(r'^[a-fA-F0-9]{4}$')
def __init__(self, database_files, primary_database=None):
"""Construct a PlatformDatabase object from a series of platform database files"""
self._prim_db = primary_database
if not self._prim_db and len(database_files) == 1:
self._prim_db = database_files[0]
self._dbs = OrderedDict()
self._keys = defaultdict(set)
for db in database_files:
new_db = _overwrite_or_open(db)
first_value = None
if new_db.values():
first_value = next(iter(new_db.values()))
if not isinstance(first_value, dict):
new_db = {
'daplink': new_db
}
if new_db:
for device_type in new_db:
duplicates = self._keys[device_type].intersection(set(new_db[device_type].keys()))
duplicates = set(["%s.%s" % (device_type, k) for k in duplicates])
if duplicates:
logger.warning(
"Duplicate platform ids found: %s,"
" ignoring the definitions from %s",
" ".join(duplicates), db)
self._dbs[db] = new_db
self._keys[device_type] = self._keys[device_type].union(new_db[device_type].keys())
else:
self._dbs[db] = new_db
def items(self, device_type='daplink'):
for db in self._dbs.values():
for entry in db.get(device_type, {}).items():
yield entry
def all_ids(self, device_type='daplink'):
return iter(self._keys[device_type])
def get(self, index, default=None, device_type='daplink', verbose_data=False):
"""Standard lookup function. Works exactly like a dict. If 'verbose_data'
is True, all data for the platform is returned as a dict."""
for db in self._dbs.values():
if device_type in db:
maybe_answer = db[device_type].get(index, None)
if maybe_answer:
return _modify_data_format(maybe_answer, verbose_data)
return default
def _update_db(self):
if self._prim_db:
lock = InterProcessLock("%s.lock" % self._prim_db)
acquired = lock.acquire(blocking=False)
if not acquired:
logger.debug("Waiting 60 seconds for file lock")
acquired = lock.acquire(blocking=True, timeout=60)
if acquired:
try:
with open(self._prim_db, "w", encoding="utf-8") as out:
out.write(unicode(
json.dumps(self._dbs[self._prim_db])))
return True
finally:
lock.release()
else:
logger.error("Could not update platform database: "
"Lock acquire failed after 60 seconds")
return False
else:
logger.error("Can't update platform database: "
"destination database is ambiguous")
return False
def add(self, id, platform_name, permanent=False, device_type='daplink'):
"""Add a platform to this database, optionally updating an origin
database
"""
if self.target_id_pattern.match(id):
if self._prim_db:
if device_type not in self._dbs[self._prim_db]:
self._dbs[self._prim_db][device_type] = {}
self._dbs[self._prim_db][device_type][id] = platform_name
else:
cur_db = next(iter(self._dbs.values()))
if device_type not in cur_db:
cur_db[device_type] = {}
cur_db[device_type][id] = platform_name
self._keys[device_type].add(id)
if permanent:
self._update_db()
else:
raise ValueError("Invald target id: %s" % id)
def remove(self, id, permanent=False, device_type='daplink', verbose_data=False):
"""Remove a platform from this database, optionally updating an origin
database. If 'verbose_data' is True, all data for the platform is returned
as a dict.
"""
logger.debug("Trying remove of %s", id)
if id is '*' and device_type in self._dbs[self._prim_db]:
self._dbs[self._prim_db][device_type] = {}
for db in self._dbs.values():
if device_type in db and id in db[device_type]:
logger.debug("Removing id...")
removed = db[device_type][id]
del db[device_type][id]
self._keys[device_type].remove(id)
if permanent:
self._update_db()
return _modify_data_format(removed, verbose_data)
| 34.245798 | 103 | 0.554751 |
fcbeaf9744d51a550160cb7231612f65b4a220c2 | 4,169 | py | Python | src/1-Phenotype-web/ext/wgetdl/csv/python/preTaskAnalysis.py | glasgowm148/osgenome_rccx | 08f82ddf85253f3911ca5f7ccfdc5ef6cd2b3220 | [
"MIT",
"Unlicense"
] | 8 | 2019-09-17T10:22:40.000Z | 2022-03-30T22:22:19.000Z | src/1-Phenotype-web/ext/wgetdl/csv/python/preTaskAnalysis.py | glasgowm148/osgenome_rccx | 08f82ddf85253f3911ca5f7ccfdc5ef6cd2b3220 | [
"MIT",
"Unlicense"
] | 4 | 2021-02-02T10:36:55.000Z | 2021-11-30T13:17:52.000Z | src/1-Phenotype-web/ext/wgetdl/csv/python/preTaskAnalysis.py | glasgowm148/osgenome_rccx | 08f82ddf85253f3911ca5f7ccfdc5ef6cd2b3220 | [
"MIT",
"Unlicense"
] | 2 | 2019-09-11T00:10:26.000Z | 2021-01-21T22:38:01.000Z | #Analysis of the pre-task results
#How to run:
#python preTaskAnalysis.py oldFileName newFileName_UserAnalysis newFileName2_QuestionAnalysis
import sys
commandList = sys.argv
oldFileName = commandList[1]
newFileName = commandList[2]
newFileName2 = commandList[3]
numUsers = 0 #set when reading in the file
perfectScore = 0 #keeps track of the num of users with a perfect score
perfectScorers = [] #keeps track of all userIDs of users with a perfect score
qIncorrect = [0,0,0,0,0,0] #keeps track of the num of users who got a specific question
#incorrect. each place in the array corresponds to a question
# reads in a file, creates an array of users. each user is also an array of their ID
# and quiz answers
def processFile(file):
file = open(file, "r").read()
allData = []
for line in file.splitlines():
firstArray = line.split(",")
firstArray[len(firstArray)-1] = firstArray[len(firstArray)-1].strip("\n")
allData.append(firstArray)
#print firstArray
numUsers = len(allData)
#print numUsers
return allData
# returns a 1 if the question is answered incorrectly, returns 0 if it is not
def processQuestion(user, question, answer):
print (user[question + 1].lower())
print answer
print (user[question + 1].lower() != answer and (user[question + 1].lower()) != '"'+ answer + '"')
if (user[question + 1].lower()) != answer and (user[question + 1].lower()) != '"'+ answer + '"':
qIncorrect[question-1] = qIncorrect[question-1] + 1
return 1
else :
return 0
#adds up the number of incorrect answers of a user, calls on the helper method above
def processOneUser(user):
numIncorrect = 0
numIncorrect += processQuestion(user, 1, "false") #calls helper method above
numIncorrect += processQuestion(user, 2, "99")
numIncorrect += processQuestion(user, 3, "false")
numIncorrect += processQuestion(user, 4, "true")
numIncorrect += processQuestion(user, 5, "true")
numIncorrect += processQuestion(user, 6, "true")
print numIncorrect
if numIncorrect == 0:
global perfectScore
perfectScore = perfectScore + 1
perfectScorers.append(user[1])
percentIncorrect = round(((float(numIncorrect)/6)*100), 2)
#to calculate and round the percent to 2 decimals
user.append(str(numIncorrect))
user.append(str(percentIncorrect))
#writes the results in two files
#@params: allData takes in the user data that has been read in and will be processed
#file is the name of the file that all of the analysed data will be written into, if the
#file already exists, it will be overwritten, otherwise a new file is created
#file2 is the name of the file that all of the analysed data will be written into, if the
#file already exists, it will be overwritten, otherwise a new file is created
def writeResults(allData, file, file2):
newFile = open(file, "w") #creates file to write the results of each user
newFile.write(" , UserID, Q1, Q2, Q3, Q4, Q5, Q6, time, #Incorrect, %Incorrect, \n")
for user in allData:
processOneUser(user) #calls on helper method to process their data
temp = ""
for item in user:
temp = temp + item + ", "
temp = temp[0:len(temp)-2]
newFile.write(temp)
newFile.write("\n")
newFile.close()
#writing num of ppl who got each question incorrect
newFile2 = open(file2, "w") #creates file to write the analysis of the questions, etc
newFile2.write("Q1, Q2, Q3, Q4, Q5, Q6, \n")
temp2 = ""
for datapoint in qIncorrect:
temp2 = temp2 + str(datapoint) + ", "
temp2 = temp2[0:len(temp2)-2]
newFile2.write(temp2)
newFile2.write("\n")
newFile2.write("#Users with Perfect Score: ," + str(perfectScore))
newFile2.write("\nUsers with Perfect Score:\n")
for userID in perfectScorers:
newFile2.write(userID)
newFile2.write("\n")
newFile2.close()
def preTaskAnalysis(inFile, outFile1, outFile2):
allUserData = processFile(inFile)
writeResults(allUserData,outFile1, outFile2)
# TESTING USER DATA
#preTaskAnalysis(oldFileName, newFileName, newFileName2)
#allUserData = processFile(oldFileName)
#writeResults(allUserData, newFileName, newFileName2)
#allUserData = processFile("pretask_4.csv")
#writeResults(allUserData, "pretask_4_Analysis.csv", "pretask_4_QuestionAnalysis.csv")
| 37.223214 | 99 | 0.730631 |
9a24691505909a174e09ac1cb4802106fadc0459 | 43,905 | py | Python | data_utils.py | glahr/RGAN | 91541616c1bb6ecc4db4aa8032c8667fc9758618 | [
"MIT"
] | null | null | null | data_utils.py | glahr/RGAN | 91541616c1bb6ecc4db4aa8032c8667fc9758618 | [
"MIT"
] | null | null | null | data_utils.py | glahr/RGAN | 91541616c1bb6ecc4db4aa8032c8667fc9758618 | [
"MIT"
] | null | null | null | import numpy as np
import pandas as pd
import pdb
import re
from time import time
import json
import random
import os
import model
import paths
from scipy.spatial.distance import pdist, squareform
from scipy.stats import multivariate_normal, invgamma, mode
from scipy.special import gamma
# from scipy.misc import imresize
from functools import partial
from math import ceil
from sklearn.metrics.pairwise import rbf_kernel
from sklearn.preprocessing import MinMaxScaler, OneHotEncoder
from sklearn.model_selection import train_test_split
import tensorflow as tf
# --- to do with loading --- #
def get_samples_and_labels(settings):
"""
Parse settings options to load or generate correct type of data,
perform test/train split as necessary, and reform into 'samples' and 'labels'
dictionaries.
"""
if settings['data_load_from']:
data_path = './experiments/data/' + settings['data_load_from'] + '.data.npy'
print('Loading data from', data_path)
samples, pdf, labels = get_data('load', data_path)
train, vali, test = samples['train'], samples['vali'], samples['test']
train_labels, vali_labels, test_labels = labels['train'], labels['vali'], labels['test']
del samples, labels
elif settings['data'] == 'eICU_task':
# always load eICU
samples, pdf, labels = get_data('eICU_task', {})
# del samples, labels
train, vali, test = samples['train'], samples['vali'], samples['test']
train_labels, vali_labels, test_labels = labels['train'], labels['vali'], labels['test']
assert train_labels.shape[1] == settings['cond_dim']
# normalise to between -1, 1
train, vali, test = normalise_data(train, vali, test)
elif settings['data'] == 'threading':
samples, pdf, labels = get_data('threading', {})
max_seq_len = 0
for data_i in samples:
max_seq_len = max(max_seq_len, len(data_i[:, 0]))
samples = tf.keras.preprocessing.sequence.pad_sequences(samples, maxlen=max_seq_len, padding='post',
dtype='float32')
train, test, train_labels, test_labels = train_test_split(samples, labels, test_size = 0.33, random_state = 42)
vali, test, vali_labels, test_labels = train_test_split(test, test_labels, test_size = 0.5, random_state = 42)
else:
# generate the data
data_vars = ['num_samples', 'seq_length', 'num_signals', 'freq_low',
'freq_high', 'amplitude_low', 'amplitude_high', 'scale',
'full_mnist']
data_settings = dict((k, settings[k]) for k in data_vars if k in settings.keys())
samples, pdf, labels = get_data(settings['data'], data_settings)
if 'multivariate_mnist' in settings and settings['multivariate_mnist']:
seq_length = samples.shape[1]
samples = samples.reshape(-1, int(np.sqrt(seq_length)), int(np.sqrt(seq_length)))
if 'normalise' in settings and settings['normalise']: # TODO this is a mess, fix
print(settings['normalise'])
norm = True
else:
norm = False
if labels is None:
train, vali, test = split(samples, [0.6, 0.2, 0.2], normalise=norm)
train_labels, vali_labels, test_labels = None, None, None
else:
train, vali, test, labels_list = split(samples, [0.6, 0.2, 0.2], normalise=norm, labels=labels)
train_labels, vali_labels, test_labels = labels_list
labels = dict()
if settings['data'] == 'threading':
labels['train'], labels['vali'], labels['test'] = train_labels.values, vali_labels.values, test_labels.values
else:
labels['train'], labels['vali'], labels['test'] = train_labels, vali_labels, test_labels
samples = dict()
samples['train'], samples['vali'], samples['test'] = train, vali, test
# futz around with labels
# TODO refactor cause this is messy
if 'one_hot' in settings and settings['one_hot'] and not settings['data_load_from']:
if len(labels['train'].shape) == 1:
# ASSUME labels go from 0 to max_val inclusive, find max-val
max_val = int(np.max([labels['train'].max(), labels['test'].max(), labels['vali'].max()]))
# now we have max_val + 1 dimensions
print('Setting cond_dim to', max_val + 1, 'from', settings['cond_dim'])
settings['cond_dim'] = max_val + 1
print('Setting max_val to 1 from', settings['max_val'])
settings['max_val'] = 1
labels_oh = dict()
for (k, v) in labels.items():
A = np.zeros(shape=(len(v), settings['cond_dim']))
A[np.arange(len(v)), (v).astype(int)] = 1
labels_oh[k] = A
labels = labels_oh
else:
assert settings['max_val'] == 1
# this is already one-hot!
if 'predict_labels' in settings and settings['predict_labels']:
samples, labels = data_utils.make_predict_labels(samples, labels)
print('Setting cond_dim to 0 from', settings['cond_dim'])
settings['cond_dim'] = 0
# update the settings dictionary to update erroneous settings
# (mostly about the sequence length etc. - it gets set by the data!)
settings['seq_length'] = samples['train'].shape[1]
settings['num_samples'] = samples['train'].shape[0] + samples['vali'].shape[0] + samples['test'].shape[0]
settings['num_signals'] = samples['train'].shape[2]
settings['num_generated_features'] = samples['train'].shape[2]
return samples, pdf, labels
def get_data(data_type, data_options=None):
"""
Helper/wrapper function to get the requested data.
"""
labels = None
pdf = None
if data_type == 'load':
data_dict = np.load(data_options).item()
samples = data_dict['samples']
pdf = data_dict['pdf']
labels = data_dict['labels']
elif data_type == 'sine':
samples = sine_wave(**data_options)
elif data_type == 'mnist':
if data_options['full_mnist']:
samples, labels = mnist()
else:
#samples, labels = load_resized_mnist_0_5(14)
samples, labels = load_resized_mnist(14) # this is the 0-2 setting
elif data_type == 'gp_rbf':
print(data_options)
samples, pdf = GP(**data_options, kernel='rbf')
elif data_type == 'linear':
samples, pdf = linear(**data_options)
elif data_type == 'eICU_task':
samples, labels = eICU_task()
elif data_type == 'resampled_eICU':
samples, labels = resampled_eICU(**data_options)
elif data_type == 'threading':
features = ['fx', 'fy', 'fz', 'mx', 'my', 'mz']
pos = ['x', 'y', 'z', 'rotx', 'roty', 'rotz']
vel = ['vx', 'vy', 'vz', 'vrotx', 'vroty'] # , 'vrotz'] # nao vou usar por enquanto
dt = 0.012
n_batches = 2
def remove_offset(data):
for feature in features:
# feature = 'fy'
n = 50
mean = np.mean(data[feature][:n])
data[feature] = data[feature] - mean
return data
def generate_velocity(data):
for feature in data[pos]:
data['v' + feature] = data[feature].diff() / dt
data['v' + feature][0] = 0.0
return data
def get_data_with_velocity():
forces = ['fx', 'fy', 'fz', 'mx', 'my', 'mz']
vel = ['vx', 'vy', 'vz', 'vrotx', 'vroty']
all_data = []
for batch in range(n_batches):
for bolt in range(40):
file = '~/kuka-ml-threading/dataset/dataset_new_iros21/new_dataset_with_linear_error/data_insertion/data_insertion_batch_' + \
str(batch).zfill(4) + '_bolt_' + str(bolt).zfill(2)
data = pd.read_csv(file + '.csv')
data = remove_offset(data)
# plt.plot(data[features])
# plt.legend(features)
# plt.title('Batch #' + str(batch) + ', Bolt #' + str(bolt))
# plt.show()
data = generate_velocity(data)
data.drop(columns=['Unnamed: 13'], inplace=True)
all_data.append(data[forces + vel].values)
return all_data
samples = get_data_with_velocity()
labels = pd.read_csv(
'~/kuka-ml-threading/dataset/dataset_new_iros21/new_dataset_with_linear_error/data_labels/labels.csv')
labels = labels.loc[:n_batches*40-1]
# labels = OneHotEncoder().fit_transform(labels.values).toarray()
else:
raise ValueError(data_type)
print('Generated/loaded', len(samples), 'samples from data-type', data_type)
return samples, pdf, labels
def get_batch(samples, batch_size, batch_idx, labels=None):
start_pos = batch_idx * batch_size
end_pos = start_pos + batch_size
if labels is None:
return samples[start_pos:end_pos], None
else:
if type(labels) == tuple: # two sets of labels
assert len(labels) == 2
return samples[start_pos:end_pos], labels[0][start_pos:end_pos], labels[1][start_pos:end_pos]
else:
assert type(labels) == np.ndarray
return samples[start_pos:end_pos], labels[start_pos:end_pos]
def normalise_data(train, vali, test, low=-1, high=1):
""" Apply some sort of whitening procedure
"""
# remember, data is num_samples x seq_length x signals
# whiten each signal - mean 0, std 1
mean = np.mean(np.vstack([train, vali]), axis=(0, 1))
std = np.std(np.vstack([train-mean, vali-mean]), axis=(0, 1))
normalised_train = (train - mean)/std
normalised_vali = (vali - mean)/std
normalised_test = (test - mean)/std
# normalised_data = data - np.nanmean(data, axis=(0, 1))
# normalised_data /= np.std(data, axis=(0, 1))
# # normalise samples to be between -1 and +1
# normalise just using train and vali
# min_val = np.nanmin(np.vstack([train, vali]), axis=(0, 1))
# max_val = np.nanmax(np.vstack([train, vali]), axis=(0, 1))
#
# normalised_train = (train - min_val)/(max_val - min_val)
# normalised_train = (high - low)*normalised_train + low
#
# normalised_vali = (vali - min_val)/(max_val - min_val)
# normalised_vali = (high - low)*normalised_vali + low
#
# normalised_test = (test - min_val)/(max_val - min_val)
# normalised_test = (high - low)*normalised_test + low
return normalised_train, normalised_vali, normalised_test
def scale_data(train, vali, test, scale_range=(-1, 1)):
signal_length = train.shape[1]
num_signals = train.shape[2]
# reshape everything
train_r = train.reshape(-1, signal_length*num_signals)
vali_r = vali.reshape(-1, signal_length*num_signals)
test_r = test.reshape(-1, signal_length*num_signals)
# fit scaler using train, vali
scaler = MinMaxScaler(feature_range=scale_range).fit(np.vstack([train_r, vali_r]))
# scale everything
scaled_train = scaler.transform(train_r).reshape(-1, signal_length, num_signals)
scaled_vali = scaler.transform(vali_r).reshape(-1, signal_length, num_signals)
scaled_test = scaler.transform(test_r).reshape(-1, signal_length, num_signals)
return scaled_train, scaled_vali, scaled_test
def split(samples, proportions, normalise=False, scale=False, labels=None, random_seed=None):
"""
Return train/validation/test split.
"""
if random_seed != None:
random.seed(random_seed)
np.random.seed(random_seed)
assert np.sum(proportions) == 1
n_total = samples.shape[0]
n_train = ceil(n_total*proportions[0])
n_test = ceil(n_total*proportions[2])
n_vali = n_total - (n_train + n_test)
# permutation to shuffle the samples
shuff = np.random.permutation(n_total)
train_indices = shuff[:n_train]
vali_indices = shuff[n_train:(n_train + n_vali)]
test_indices = shuff[(n_train + n_vali):]
# TODO when we want to scale we can just return the indices
assert len(set(train_indices).intersection(vali_indices)) == 0
assert len(set(train_indices).intersection(test_indices)) == 0
assert len(set(vali_indices).intersection(test_indices)) == 0
# split up the samples
train = samples[train_indices]
vali = samples[vali_indices]
test = samples[test_indices]
# apply the same normalisation scheme to all parts of the split
if normalise:
if scale: raise ValueError(normalise, scale) # mutually exclusive
train, vali, test = normalise_data(train, vali, test)
elif scale:
train, vali, test = scale_data(train, vali, test)
if labels is None:
return train, vali, test
else:
print('Splitting labels...')
if type(labels) == np.ndarray:
train_labels = labels[train_indices]
vali_labels = labels[vali_indices]
test_labels = labels[test_indices]
labels_split = [train_labels, vali_labels, test_labels]
elif type(labels) == dict:
# more than one set of labels! (weird case)
labels_split = dict()
for (label_name, label_set) in labels.items():
train_labels = label_set[train_indices]
vali_labels = label_set[vali_indices]
test_labels = label_set[test_indices]
labels_split[label_name] = [train_labels, vali_labels, test_labels]
else:
raise ValueError(type(labels))
return train, vali, test, labels_split
def make_predict_labels(samples, labels):
""" Given two dictionaries of samples, labels (already normalised, split etc)
append the labels on as additional signals in the data
"""
print('Appending label to samples')
assert not labels is None
if len(labels['train'].shape) > 1:
num_labels = labels['train'].shape[1]
else:
num_labels = 1
seq_length = samples['train'].shape[1]
num_signals = samples['train'].shape[2]
new_samples = dict()
new_labels = dict()
for (k, X) in samples.items():
num_samples = X.shape[0]
lab = labels[k]
# slow code because i am sick and don't want to try to be smart
new_X = np.zeros(shape=(num_samples, seq_length, num_signals + num_labels))
for row in range(num_samples):
new_X[row, :, :] = np.hstack([X[row, :, :], np.array(seq_length*[(2*lab[row]-1).reshape(num_labels)])])
new_samples[k] = new_X
new_labels[k] = None
return new_samples, new_labels
# --- specific data-types --- #
def eICU_task(predict_label=False):
"""
Load the eICU data for the extreme-value prediction task
"""
path = 'REDACTED'
data = np.load(path).item()
# convert it into similar format
labels = {'train': data['Y_train'], 'vali': data['Y_vali'], 'test': data['Y_test']}
samples = {'train': data['X_train'], 'vali': data['X_vali'], 'test': data['X_test']}
# reshape
for (k, X) in samples.items():
samples[k] = X.reshape(-1, 16, 4)
return samples, labels
def mnist(randomize=False):
""" Load and serialise """
try:
train = np.load('./data/mnist_train.npy')
print('Loaded mnist from .npy')
except IOError:
print('Failed to load MNIST data from .npy, loading from csv')
# read from the csv
train = np.loadtxt(open('./data/mnist_train.csv', 'r'), delimiter=',')
# scale samples from 0 to 1
train[:, 1:] /= 255
# scale from -1 to 1
train[:, 1:] = 2*train[:, 1:] - 1
# save to the npy
np.save('./data/mnist_train.npy', train)
# the first column is labels, kill them
labels = train[:, 0]
samples = train[:, 1:]
if randomize:
# not needed for GAN experiments...
print('Applying fixed permutation to mnist digits.')
fixed_permutation = np.random.permutation(28*28)
samples = train[:, fixed_permutation]
samples = samples.reshape(-1, 28*28, 1) # add redundant additional signals
return samples, labels
def load_resized_mnist_0_5(new_size, randomize=False):
""" Load resised mnist digits from 0 to 5 """
samples, labels = mnist()
print('Resizing...')
samples = samples[np.in1d(labels,[0,1,2,3,4,5])]
labels = labels[np.in1d(labels,[0,1,2,3,4,5])]
if new_size != 28:
resized_imgs = [imresize(img.reshape([28,28]), [new_size,new_size], interp='lanczos').ravel()[np.newaxis].T
for img in samples]
resized_imgs = np.array(resized_imgs)
resized_imgs = resized_imgs.astype(float)
resized_imgs /= 255.0
resized_imgs = 2*resized_imgs - 1
np.save('./data/resized_mnist_1_5_samples.npy', resized_imgs)
np.save('./data/resized_mnist_1_5_labels.npy', labels)
return resized_imgs, labels
else:
return samples, labels
def load_resized_mnist(new_size, from_to_digits=(0,2), randomize=False):
""" Load resised mnist digits from 0 to 5 """
samples, labels = mnist()
print('Resizing...')
samples = samples[np.in1d(labels,np.arange(from_to_digits[0], from_to_digits[1]+1))]
labels = labels[np.in1d(labels,np.arange(from_to_digits[0], from_to_digits[1]+1))]
if new_size != 28:
resized_imgs = [imresize(img.reshape([28,28]), [new_size,new_size], interp='lanczos').ravel()[np.newaxis].T
for img in samples]
resized_imgs = np.array(resized_imgs)
resized_imgs = resized_imgs.astype(float)
resized_imgs /= 255.0
resized_imgs = 2*resized_imgs - 1
np.save('./data/resized_mnist_'+ str(from_to_digits[0]) + '_' + str(from_to_digits[1]) + '_5_samples.npy', resized_imgs)
np.save('./data/resized_mnist_'+ str(from_to_digits[0]) + '_' + str(from_to_digits[1]) + '_labels.npy', labels)
return resized_imgs, labels
else:
return samples, labels
def resampled_eICU(seq_length=16, resample_rate_in_min=15,
variables=['sao2', 'heartrate', 'respiration', 'systemicmean'], **kwargs):
"""
Note: resampling rate is 15 minutes
"""
print('Getting resampled eICU data')
try:
data = np.load(paths.eICU_proc_dir + 'eICU_' + str(resample_rate_in_min) + '.npy').item()
samples = data['samples']
pids = data['pids']
print('Loaded from file!')
return samples, pids
except FileNotFoundError:
# in this case, we go into the main logic of the function
pass
resampled_data_path = paths.eICU_proc_dir + 'complete_resampled_pats_' + str(resample_rate_in_min) + 'min.csv'
resampled_pids_path = paths.eICU_proc_dir + 'cohort_complete_resampled_pats_' + str(resample_rate_in_min) + 'min.csv'
if not os.path.isfile(resampled_data_path):
generate_eICU_resampled_patients(resample_factor_in_min=resample_rate_in_min, upto_in_minutes=None)
get_cohort_of_complete_downsampled_patients(time_in_hours=1.5*resample_rate_in_min*seq_length, resample_factor_in_min=resample_rate_in_min)
pids = set(np.loadtxt(resampled_pids_path, dtype=int))
df = pd.read_csv(resampled_data_path)
# restrict to variables
df_restricted = df.loc[:, variables + ['offset', 'pid']]
# restrict to patients in the "good list"
df_restricted = df_restricted.where(df_restricted.pid.isin(pids)).dropna()
# assert no negative offsets
assert np.all(df_restricted.offset >= 0)
# restrict to 1.5 time the region length
# df_restricted = df_restricted.loc[np.all([df_restricted.offset <= 1.5*resample_rate_in_min*seq_length, df_restricted.offset >= 0], axis=0), :]
df_restricted = df_restricted.loc[df_restricted.offset <= 1.5*resample_rate_in_min*seq_length, :]
# for each patient, return the first seq_length observations
patient_starts = df_restricted.groupby('pid').head(seq_length)
n_pats_prefilter = len(set(patient_starts.pid))
# filter out patients who have fewer than seq_length observations
patient_starts = patient_starts.groupby('pid').filter(lambda x: x.pid.count() == seq_length)
n_pats_postfilter = len(set(patient_starts.pid))
print('Removed', n_pats_prefilter - n_pats_postfilter, 'patients with <', seq_length, 'observations in the first', 1.5*resample_rate_in_min*seq_length, 'minutes, leaving', n_pats_postfilter, 'patients remaining.')
# convert to samples - shape is [n_pats, seq_length, num_signals]
n_patients = n_pats_postfilter
num_signals = len(variables)
samples = np.empty(shape=(n_patients, seq_length, num_signals))
pats_grouped = patient_starts.groupby('pid')
pids = []
for (i, patient) in enumerate(pats_grouped.groups):
samples[i, :, :] = pats_grouped.get_group(patient).loc[:, variables].values
pids.append(patient)
assert i == n_patients - 1
assert np.mean(np.isnan(samples) == 0)
np.save(paths.eICU_proc_dir + 'eICU_' + str(resample_rate_in_min) + '.npy', {'samples': samples, 'pids': pids})
print('Saved to file!')
return samples, pids
def sine_wave(seq_length=30, num_samples=28*5*100, num_signals=1,
freq_low=1, freq_high=5, amplitude_low = 0.1, amplitude_high=0.9, **kwargs):
ix = np.arange(seq_length) + 1
samples = []
for i in range(num_samples):
signals = []
for i in range(num_signals):
f = np.random.uniform(low=freq_high, high=freq_low) # frequency
A = np.random.uniform(low=amplitude_high, high=amplitude_low) # amplitude
# offset
offset = np.random.uniform(low=-np.pi, high=np.pi)
signals.append(A*np.sin(2*np.pi*f*ix/float(seq_length) + offset))
samples.append(np.array(signals).T)
# the shape of the samples is num_samples x seq_length x num_signals
samples = np.array(samples)
return samples
def periodic_kernel(T, f=1.45/30, gamma=7.0, A=0.1):
"""
Calculates periodic kernel between all pairs of time points (there
should be seq_length of those), returns the Gram matrix.
f is frequency - higher means more peaks
gamma is a scale, smaller makes the covariance peaks shallower (smoother)
Heuristic for non-singular rbf:
periodic_kernel(np.arange(len), f=1.0/(0.79*len), A=1.0, gamma=len/4.0)
"""
dists = squareform(pdist(T.reshape(-1, 1)))
cov = A*np.exp(-gamma*(np.sin(2*np.pi*dists*f)**2))
return cov
def GP(seq_length=30, num_samples=28*5*100, num_signals=1, scale=0.1, kernel='rbf', **kwargs):
# the shape of the samples is num_samples x seq_length x num_signals
samples = np.empty(shape=(num_samples, seq_length, num_signals))
#T = np.arange(seq_length)/seq_length # note, between 0 and 1
T = np.arange(seq_length) # note, not between 0 and 1
if kernel == 'periodic':
cov = periodic_kernel(T)
elif kernel =='rbf':
cov = rbf_kernel(T.reshape(-1, 1), gamma=scale)
else:
raise NotImplementedError
# scale the covariance
cov *= 0.2
# define the distribution
mu = np.zeros(seq_length)
print(np.linalg.det(cov))
distribution = multivariate_normal(mean=np.zeros(cov.shape[0]), cov=cov)
pdf = distribution.logpdf
# now generate samples
for i in range(num_signals):
samples[:, :, i] = distribution.rvs(size=num_samples)
return samples, pdf
def linear_marginal_likelihood(Y, X, a0, b0, mu0, lambda0, log=True, **kwargs):
"""
Marginal likelihood for linear model.
See https://en.wikipedia.org/wiki/Bayesian_linear_regression pretty much
"""
seq_length = Y.shape[1] # note, y is just a line (one channel) TODO
n = seq_length
an = a0 + 0.5*n
XtX = np.dot(X.T, X)
lambdan = XtX + lambda0
prefactor = (2*np.pi)**(-0.5*n)
dets = np.sqrt(np.linalg.det(lambda0)/np.linalg.det(lambdan))
marginals = np.empty(Y.shape[0])
for (i, y) in enumerate(Y):
y_reshaped = y.reshape(seq_length)
betahat = np.dot(np.linalg.inv(XtX), np.dot(X.T, y_reshaped))
mun = np.dot(np.linalg.inv(lambdan), np.dot(XtX, betahat) + np.dot(lambda0, mu0))
bn = b0 + 0.5*(np.dot(y_reshaped.T, y_reshaped) + np.dot(np.dot(mu0.T, lambda0), mu0) - np.dot(np.dot(mun.T, lambdan), mun))
bs = (b0**a0)/(bn**an)
gammas = gamma(an)/gamma(a0)
marginals[i] = prefactor*dets*bs*gammas
if log:
marginals = np.log(marginals)
return marginals
def linear(seq_length=30, num_samples=28*5*100, a0=10, b0=0.01, k=2, **kwargs):
"""
Generate data from linear trend from probabilistic model.
The invgamma function in scipy corresponds to wiki defn. of inverse gamma:
scipy a = wiki alpha = a0
scipy scale = wiki beta = b0
k is the number of regression coefficients (just 2 here, slope and intercept)
"""
T = np.zeros(shape=(seq_length, 2))
T[:, 0] = np.arange(seq_length)
T[:, 1] = 1 # equivalent to X
lambda0 = 0.01*np.eye(k) # diagonal covariance for beta
y = np.zeros(shape=(num_samples, seq_length, 1))
sigmasq = invgamma.rvs(a=a0, scale=b0, size=num_samples)
increasing = np.random.choice([-1, 1], num_samples) # flip slope
for n in range(num_samples):
sigmasq_n = sigmasq[n]
offset = np.random.uniform(low=-0.5, high=0.5) # todo limits
mu0 = np.array([increasing[n]*(1.0-offset)/seq_length, offset])
beta = multivariate_normal.rvs(mean=mu0, cov=sigmasq_n*lambda0)
epsilon = np.random.normal(loc=0, scale=np.sqrt(sigmasq_n), size=seq_length)
y[n, :, :] = (np.dot(T, beta) + epsilon).reshape(seq_length, 1)
marginal = partial(linear_marginal_likelihood, X=T, a0=a0, b0=b0, mu0=mu0, lambda0=lambda0)
samples = y
pdf = marginal
return samples, pdf
def changepoint_pdf(Y, cov_ms, cov_Ms):
"""
"""
seq_length = Y.shape[0]
logpdf = []
for (i, m) in enumerate(range(int(seq_length/2), seq_length-1)):
Y_m = Y[:m, 0]
Y_M = Y[m:, 0]
M = seq_length - m
# generate mean function for second part
Ymin = np.min(Y_m)
initial_val = Y_m[-1]
if Ymin > 1:
final_val = (1.0 - M/seq_length)*Ymin
else:
final_val = (1.0 + M/seq_length)*Ymin
mu_M = np.linspace(initial_val, final_val, M)
# ah yeah
logpY_m = multivariate_normal.logpdf(Y_m, mean=np.zeros(m), cov=cov_ms[i])
logpY_M = multivariate_normal.logpdf(Y_M, mean=mu_M, cov=cov_Ms[i])
logpdf_m = logpY_m + logpY_M
logpdf.append(logpdf_m)
return logpdf
def changepoint_cristobal(seq_length=30, num_samples=28*5*100):
"""
Porting Cristobal's code for generating data with a changepoint.
"""
raise NotImplementedError
basal_values_signal_a = np.random.randn(n_samples) * 0.33
trends_seed_a = np.random.randn(n_samples) * 0.005
trends = np.array([i*trends_seed_a for i in range(51)[1:]]).T
signal_a = (basal_values_signal_a + trends.T).T
time_noise = np.random.randn(n_samples, n_steps) * 0.01
signal_a = time_noise + signal_a
basal_values_signal_b = np.random.randn(n_samples) * 0.33
trends_seed_b = np.random.randn(n_samples) * 0.005
trends = np.array([i*trends_seed_b for i in range(51)[1:]]).T
signal_b = (basal_values_signal_b + trends.T).T
time_noise = np.random.randn(n_samples, n_steps) * 0.01
signal_b = time_noise + signal_b
signal_a = np.clip(signal_a, -1, 1)
signal_b = np.clip(signal_b, -1, 1)
# the change in the trend is based on the top extreme values of each
# signal in the first half
time_steps_until_change = np.max(np.abs(signal_a), axis=1) + np.max(np.abs(signal_b), axis=1)*100
# noise added to the starting point
time_steps_until_change += np.random.randn(n_samples) * 5
time_steps_until_change = np.round(time_steps_until_change)
time_steps_until_change = np.clip(time_steps_until_change, 0, n_steps-1)
time_steps_until_change = n_steps - 1 - time_steps_until_change
trends = np.array([i*trends_seed_a for i in range(101)[51:]]).T
signal_a_target = (basal_values_signal_a + trends.T).T
time_noise = np.random.randn(n_samples, n_steps) * 0.01
signal_a_target = time_noise + signal_a_target
trends = np.array([i*trends_seed_b for i in range(101)[51:]]).T
signal_b_target = (basal_values_signal_b + trends.T).T
time_noise = np.random.randn(n_samples, n_steps) * 0.01
signal_b_target = time_noise + signal_b_target
signal_multipliers = []
for ts in time_steps_until_change:
signal_multiplier = []
if ts > 0:
for i in range(int(ts)):
signal_multiplier.append(1)
i += 1
else:
i = 0
multiplier = 1.25
while(i<n_steps):
signal_multiplier.append(multiplier)
multiplier += 0.25
i+=1
signal_multipliers.append(signal_multiplier)
signal_multipliers = np.array(signal_multipliers)
for s_idx, signal_choice in enumerate(basal_values_signal_b > basal_values_signal_a):
if signal_choice == False:
signal_a_target[s_idx] *= signal_multipliers[s_idx]
else:
signal_b_target[s_idx] *= signal_multipliers[s_idx]
signal_a_target = np.clip(signal_a_target, -1, 1)
signal_b_target = np.clip(signal_b_target, -1, 1)
# merging signals
signal_a = np.swapaxes(signal_a[np.newaxis].T, 0, 1)
signal_b = np.swapaxes(signal_b[np.newaxis].T, 0, 1)
signal_a_target = np.swapaxes(signal_a_target[np.newaxis].T, 0, 1)
signal_b_target = np.swapaxes(signal_b_target[np.newaxis].T, 0, 1)
input_seqs = np.dstack((signal_a,signal_b))
target_seqs = np.dstack((signal_a_target,signal_b_target))
return False
def changepoint(seq_length=30, num_samples=28*5*100):
"""
Generate data from two GPs, roughly speaking.
The first part (up to m) is as a normal GP.
The second part (m to end) has a linear downwards trend conditioned on the
first part.
"""
print('Generating samples from changepoint...')
T = np.arange(seq_length)
# sample breakpoint from latter half of sequence
m_s = np.random.choice(np.arange(int(seq_length/2), seq_length-1), size=num_samples)
samples = np.zeros(shape=(num_samples, seq_length, 1))
# kernel parameters and stuff
gamma=5.0/seq_length
A = 0.01
sigmasq = 0.8*A
lamb = 0.0 # if non-zero, cov_M risks not being positive semidefinite...
kernel = partial(rbf_kernel, gamma=gamma)
# multiple values per m
N_ms = []
cov_ms = []
cov_Ms = []
pdfs = []
for m in range(int(seq_length/2), seq_length-1):
# first part
M = seq_length - m
T_m = T[:m].reshape(m, 1)
cov_m = A*kernel(T_m.reshape(-1, 1), T_m.reshape(-1, 1))
cov_ms.append(cov_m)
# the second part
T_M = T[m:].reshape(M, 1)
cov_mM = kernel(T_M.reshape(-1, 1), T_m.reshape(-1, 1))
cov_M = sigmasq*(np.eye(M) - lamb*np.dot(np.dot(cov_mM, np.linalg.inv(cov_m)), cov_mM.T))
cov_Ms.append(cov_M)
for n in range(num_samples):
m = m_s[n]
M = seq_length-m
# sample the first m
cov_m = cov_ms[m - int(seq_length/2)]
Xm = multivariate_normal.rvs(cov=cov_m)
# generate mean function for second
Xmin = np.min(Xm)
initial_val = Xm[-1]
if Xmin > 1:
final_val = (1.0 - M/seq_length)*Xmin
else:
final_val = (1.0 + M/seq_length)*Xmin
mu_M = np.linspace(initial_val, final_val, M)
# sample the rest
cov_M = cov_Ms[m -int(seq_length/2)]
XM = multivariate_normal.rvs(mean=mu_M, cov=cov_M)
# combine the sequence
# NOTE: just one dimension
samples[n, :, 0] = np.concatenate([Xm, XM])
pdf = partial(changepoint_pdf, cov_ms=cov_ms, cov_Ms=cov_Ms)
return samples, pdf, m_s
def resample_eICU_patient(pid, resample_factor_in_min, variables, upto_in_minutes):
"""
Resample a *single* patient.
"""
pat_df = pd.read_hdf(paths.eICU_hdf_dir + '/vitalPeriodic.h5',
where='patientunitstayid = ' + str(pid),
columns=['observationoffset', 'patientunitstayid'] + variables,
mode='r')
# sometimes it's empty
if pat_df.empty:
return None
if not upto_in_minutes is None:
pat_df = pat_df.loc[0:upto_in_minutes*60]
# convert the offset to a TimedeltaIndex (necessary for resampling)
pat_df.observationoffset = pd.TimedeltaIndex(pat_df.observationoffset, unit='m')
pat_df.set_index('observationoffset', inplace=True)
pat_df.sort_index(inplace=True)
# resample by time
pat_df_resampled = pat_df.resample(str(resample_factor_in_min) + 'T').median() # pandas ignores NA in median by default
# rename pid, cast to int
pat_df_resampled.rename(columns={'patientunitstayid': 'pid'}, inplace=True)
pat_df_resampled['pid'] = np.int32(pat_df_resampled['pid'])
# get offsets in minutes from index
pat_df_resampled['offset'] = np.int32(pat_df_resampled.index.total_seconds()/60)
return pat_df_resampled
def generate_eICU_resampled_patients(resample_factor_in_min=15,
upto_in_minutes=None):
"""
Generates a dataframe with resampled patients. One sample every "resample_factor_in_min" minutes.
"""
pids = set(np.loadtxt(paths.eICU_proc_dir + 'pids.txt', dtype=int))
exclude_pids = set(np.loadtxt(paths.eICU_proc_dir + 'pids_missing_vitals.txt', dtype=int))
print('Excluding', len(exclude_pids), 'patients for not having vitals information')
pids = pids.difference(exclude_pids)
variables = ['sao2', 'heartrate', 'respiration', 'systemicmean']
num_pat = 0
num_miss = 0
f_miss = open(paths.eICU_proc_dir + 'pids_missing_vitals.txt', 'a')
for pid in pids: # have to go patient by patient
pat_df_resampled = resample_eICU_patient(pid, resample_factor_in_min, variables, upto_in_minutes)
if pat_df_resampled is None:
f_miss.write(str(pid) + '\n')
num_miss += 1
continue
else:
if num_pat == 0:
f = open(paths.eICU_proc_dir + 'resampled_pats' + str(resample_factor_in_min) +'min.csv', 'w')
pat_df_resampled.to_csv(f, header=True, index=False)
else:
pat_df_resampled.to_csv(f, header=False, index=False)
num_pat += 1
if num_pat % 100 == 0:
print(num_pat)
f.flush()
f_miss.flush()
print('Acquired data on', num_pat, 'patients.')
print('Skipped', num_miss, 'patients.')
return True
def get_cohort_of_complete_downsampled_patients(time_in_hours=4, resample_factor_in_min=15):
"""
Finds the set of patients that have no missing data during the first "time_in_hours".
"""
resampled_pats = pd.read_csv(paths.eICU_proc_dir + 'resampled_pats' + str(resample_factor_in_min) + 'min.csv')
time_in_minutes = time_in_hours * 60
# delete patients with any negative offset
print('Deleting patients with negative offsets...')
df_posoffset = resampled_pats.groupby('pid').filter(lambda x: np.all(x.offset >= 0))
# restrict time consideration
print('Restricting to offsets below', time_in_minutes)
df = df_posoffset.loc[df_posoffset.offset <= time_in_minutes]
#variables = ['sao2', 'heartrate', 'respiration', 'systemicmean']
variables = ['sao2', 'heartrate', 'respiration']
# patients with no missing values in those variables (this is slow)
print('Finding patients with no missing values in', ','.join(variables))
good_patients = df.groupby('pid').filter(lambda x: np.all(x.loc[:, variables].isnull().sum() == 0))
# extract the pids, save the cohort
cohort = good_patients.pid.drop_duplicates()
if cohort.shape[0] < 2:
print('ERROR: not enough patients in cohort.', cohort.shape[0])
return False
else:
print('Saving...')
cohort.to_csv(paths.eICU_proc_dir + 'cohort_complete_resampled_pats_' + str(resample_factor_in_min) + 'min.csv', header=False, index=False)
# save the full data (not just cohort)
good_patients.to_csv(paths.eICU_proc_dir + 'complete_resampled_pats_' + str(resample_factor_in_min) + 'min.csv', index=False)
return True
def get_eICU_with_targets(use_age=False, use_gender=False, save=False):
"""
Load resampled eICU data and get static prediction targets from demographics
(patients) file
"""
if use_age: print('Using age!')
if use_gender: print('Using gender!')
if save: print('Save!')
# load resampled eICU data (the labels are the patientunitstayids)
samples, pdf, labels = get_data('resampled_eICU', {})
# load patients static information
eICU_dir = 'REDACTED'
pat_dfs = pd.read_hdf(eICU_dir + '/patient.h5', mode='r')
# keep only static information of patients that are in the resampled table
pat_dfs = pat_dfs[pat_dfs.patientunitstayid.isin(labels)]
# reordering df to have the same order as samples and labels
pat_dfs.set_index('patientunitstayid', inplace=True)
pat_dfs.reindex(labels)
# target variables to keep. For now we don't use hospitaldischargeoffset since it is the only integer variable.
#target_vars = ['hospitaldischargeoffset', 'hospitaldischargestatus', 'apacheadmissiondx', 'hospitaldischargelocation', 'unittype', 'unitadmitsource']
real_vars = ['age']
binary_vars = ['hospitaldischargestatus', 'gender']
categorical_vars = ['apacheadmissiondx', 'hospitaldischargelocation', 'unittype', 'unitadmitsource']
target_vars = categorical_vars + ['hospitaldischargestatus']
if use_age: target_vars += ['age']
if use_gender: target_vars += ['gender']
targets_df = pat_dfs.loc[:, target_vars]
# remove patients by criteria
# missing data in any target
targets_df.dropna(how='any', inplace=True)
if use_age:
# age belonw 18 or above 89
targets_df = targets_df[targets_df.age != '> 89'] # yes, some ages are strings
targets_df.age = list(map(int, targets_df.age))
targets_df = targets_df[targets_df.age >= 18]
if use_gender:
# remove non-binary genders (sorry!)
targets_df['gender'] = targets_df['gender'].replace(['Female', 'Male', 'Other', 'Unknown'], [0, 1, -1, -1])
targets_df = targets_df[targets_df.gender >= 0]
# record patients to keep
keep_indices = [i for (i, pid) in enumerate(labels) if pid in targets_df.index]
assert len(keep_indices) == targets_df.shape[0]
new_samples = samples[keep_indices]
new_labels = np.array(labels)[keep_indices]
# triple check the labels are correct
assert np.array_equal(targets_df.index, new_labels)
# getn non-one-hot targets (strings)
targets = targets_df.values
# one hot encoding of categorical variables
dummies = pd.get_dummies(targets_df[categorical_vars], dummy_na=True)
targets_df_oh = pd.DataFrame()
targets_df_oh[dummies.columns] = dummies
# convert binary variables to one-hot, too
targets_df_oh['hospitaldischargestatus']= targets_df['hospitaldischargestatus'].replace(['Alive', 'Expired'],[1, 0])
if use_gender:
targets_df_oh['gender'] = targets_df['gender'] # already binarised
if use_age:
targets_df_oh['age'] = 2*targets_df['age']/89 - 1 # 89 is max
# drop dummy columns marking missing data (they should be empty)
nancols = [col for col in targets_df_oh.columns if col.endswith('nan')]
assert np.all(targets_df_oh[nancols].sum() == 0)
targets_df_oh.drop(nancols, axis=1, inplace=True)
targets_oh = targets_df_oh.values
if save:
# save!
# merge with training data, for LR saving
assert new_samples.shape[0] == targets_df_oh.shape[0]
flat_samples = new_samples.reshape(new_samples.shape[0], -1)
features_df = pd.DataFrame(flat_samples)
features_df.index = targets_df_oh.index
features_df.columns = ['feature_' + str(i) for i in range(features_df.shape[1])]
all_data = pd.concat([targets_df_oh, features_df], axis=1)
all_data.to_csv('./data/eICU_with_targets.csv')
# do the split
proportions = [0.6, 0.2, 0.2]
labels = {'targets': targets, 'targets_oh': targets_oh}
train_seqs, vali_seqs, test_seqs, labels_split = split(new_samples, proportions, scale=True, labels=labels)
train_targets, vali_targets, test_targets = labels_split['targets']
train_targets_oh, vali_targets_oh, test_targets_oh = labels_split['targets_oh']
return train_seqs, vali_seqs, test_seqs, train_targets, vali_targets, test_targets, train_targets_oh, vali_targets_oh, test_targets_oh
### --- TSTR ---- ####
def generate_synthetic(identifier, epoch, n_train, predict_labels=False):
"""
- Load a CGAN pretrained model
- Load its corresponding test data (+ labels)
- Generate num_examples synthetic training data (+labels)
- Save to format easy for training classifier on (see Eval)
"""
settings = json.load(open('./experiments/settings/' + identifier + '.txt', 'r'))
if not settings['cond_dim'] > 0:
assert settings['predict_labels']
assert predict_labels
# get the test data
print('Loading test (real) data for', identifier)
data_dict = np.load('./experiments/data/' + identifier + '.data.npy').item()
test_data = data_dict['samples']['test']
test_labels = data_dict['labels']['test']
train_data = data_dict['samples']['train']
train_labels = data_dict['labels']['train']
print('Loaded', test_data.shape[0], 'test examples')
print('Sampling', n_train, 'train examples from the model')
if not predict_labels:
assert test_data.shape[0] == test_labels.shape[0]
if 'eICU' in settings['data']:
synth_labels = train_labels[np.random.choice(train_labels.shape[0], n_train), :]
else:
# this doesn't really work for eICU...
synth_labels = model.sample_C(n_train, settings['cond_dim'], settings['max_val'], settings['one_hot'])
synth_data = model.sample_trained_model(settings, epoch, n_train, Z_samples=None, cond_dim=settings['cond_dim'], C_samples=synth_labels)
else:
assert settings['predict_labels']
synth_data = model.sample_trained_model(settings, epoch, n_train, Z_samples=None, cond_dim=0)
# extract the labels
if 'eICU' in settings['data']:
n_labels = 7
synth_labels = synth_data[:, :, -n_labels:]
train_labels = train_data[:, :, -n_labels:]
test_labels = test_data[:, :, -n_labels:]
else:
n_labels = 6 # mnist
synth_labels, _ = mode(np.argmax(synth_data[:, :, -n_labels:], axis=2), axis=1)
train_labels, _ = mode(np.argmax(train_data[:, :, -n_labels:], axis=2), axis=1)
test_labels, _ = mode(np.argmax(test_data[:, :, -n_labels:], axis=2), axis=1)
synth_data = synth_data[:, :, :-n_labels]
train_data = train_data[:, :, :-n_labels]
test_data = test_data[:, :, :-n_labels]
# package up, save
exp_data = dict()
exp_data['test_data'] = test_data
exp_data['test_labels'] = test_labels
exp_data['train_data'] = train_data
exp_data['train_labels'] = train_labels
exp_data['synth_data'] = synth_data
exp_data['synth_labels'] = synth_labels
# save it all up
np.save('./experiments/tstr/' + identifier + '_' + str(epoch) + '.data.npy', exp_data)
return True
| 43.817365 | 217 | 0.645348 |
23eafa61085f883bdf5c8f10f88dee3b0dc1b00e | 9,979 | py | Python | train.py | saravana611/Cardiac-Image-Segmentation | 9b6a5e0762c5a8829415d3ec50f9c33220d70d9c | [
"MIT"
] | 3 | 2022-01-12T07:11:13.000Z | 2022-02-20T16:52:18.000Z | train.py | saravana611/Cardiac-Image-Segmentation | 9b6a5e0762c5a8829415d3ec50f9c33220d70d9c | [
"MIT"
] | null | null | null | train.py | saravana611/Cardiac-Image-Segmentation | 9b6a5e0762c5a8829415d3ec50f9c33220d70d9c | [
"MIT"
] | 1 | 2022-03-28T07:53:58.000Z | 2022-03-28T07:53:58.000Z | import os
import argparse
import datetime
import uuid
import tensorflow as tf
import matplotlib.pyplot as plt
from azureml.core.run import Run
from azureml.core import Datastore
from azureml.core.model import Model, Dataset
from tensorflow.keras import backend as K
from tensorflow.keras.layers import (
Flatten, Dense, Reshape, Conv2D, MaxPool2D, Conv2DTranspose)
class DisplayCallback(tf.keras.callbacks.Callback):
def on_epoch_end(self, epoch, logs=None):
print ('\nSample Prediction after epoch {}\n'.format(epoch+1))
class Train():
def __init__(self):
self._parser = argparse.ArgumentParser("train")
self._parser.add_argument("--model_name", type=str, help="Name of the tf model")
self._args = self._parser.parse_args()
self._run = Run.get_context()
self._exp = self._run.experiment
self._ws = self._run.experiment.workspace
self._image_feature_description = {
'height': tf.io.FixedLenFeature([], tf.int64),
'width': tf.io.FixedLenFeature([], tf.int64),
'depth': tf.io.FixedLenFeature([], tf.int64),
'name' : tf.io.FixedLenFeature([], tf.string),
'image_raw': tf.io.FixedLenFeature([], tf.string),
'label_raw': tf.io.FixedLenFeature([], tf.string),
}
self._model = self.__get_model()
self._parsed_training_dataset, self._parsed_val_dataset = self.__load_dataset()
self.__steps_per_epoch = len(list(self._parsed_training_dataset))
self._buffer_size = 10
self._batch_size = 1
self.__epochs = 30
def main(self):
plt.rcParams['image.cmap'] = 'Greys_r'
tf_autotune = tf.data.experimental.AUTOTUNE
train = self._parsed_training_dataset.map(
self.__read_and_decode, num_parallel_calls=tf_autotune)
val = self._parsed_val_dataset.map(self.__read_and_decode)
train_dataset = train.cache().shuffle(self._buffer_size).batch(self._batch_size).repeat()
train_dataset = train_dataset.prefetch(buffer_size=tf_autotune)
test_dataset = val.batch(self._batch_size)
for image, label in train.take(2):
sample_image, sample_label = image, label
self.__display("Training Images", [sample_image, sample_label])
for image, label in val.take(2):
sample_image, sample_label = image, label
self.__display("Eval Images", [sample_image, sample_label])
logdir = os.path.join("logs", datetime.datetime.now().strftime("%Y%m%d-%H%M%S"))
tensorboard_callback = tf.keras.callbacks.TensorBoard(logdir, histogram_freq=1)
tf.keras.backend.clear_session()
self._model = self.__get_model()
model_history = self._model.fit(train_dataset, epochs=self.__epochs,
steps_per_epoch=self.__steps_per_epoch,
validation_data=test_dataset,
callbacks=[DisplayCallback()])
metrics_results = self._model.evaluate(test_dataset)
self._run.log("DICE", "{:.2f}%".format(metrics_results[0]))
self._run.log("Accuracy", "{:.2f}%".format(metrics_results[1]))
self.__plot_training_logs(model_history)
self.__show_predictions(test_dataset, 5)
self.__register_model(metrics_results)
def __parse_image_function(self, example_proto):
return tf.io.parse_single_example(example_proto, self._image_feature_description)
def __load_dataset(self):
raw_training_dataset = tf.data.TFRecordDataset('data/train_images.tfrecords')
raw_val_dataset = tf.data.TFRecordDataset('data/val_images.tfrecords')
parsed_training_dataset = raw_training_dataset.map(self.__parse_image_function)
parsed_val_dataset = raw_val_dataset.map(self.__parse_image_function)
return parsed_training_dataset, parsed_val_dataset
@tf.function
def __read_and_decode(self, example):
image_raw = tf.io.decode_raw(example['image_raw'], tf.int64)
image_raw.set_shape([65536])
image = tf.reshape(image_raw, [256, 256, 1])
image = tf.cast(image, tf.float32) * (1. / 1024)
label_raw = tf.io.decode_raw(example['label_raw'], tf.uint8)
label_raw.set_shape([65536])
label = tf.reshape(label_raw, [256, 256, 1])
return image, label
def __display(self, image_title, display_list):
plt.figure(figsize=(10, 10))
title = ['Input Image', 'Label', 'Predicted Label']
for i in range(len(display_list)):
display_resized = tf.reshape(display_list[i], [256, 256])
plt.subplot(1, len(display_list), i+1)
plt.title(title[i])
plt.imshow(display_resized)
plt.axis('off')
title = uuid.uuid4()
self._run.log_image(f'{title}', plot=plt)
def __create_mask(self, pred_mask):
pred_mask = tf.argmax(pred_mask, axis=-1)
pred_mask = pred_mask[..., tf.newaxis]
return pred_mask[0]
def __show_predictions(self, dataset=None, num=1):
if dataset:
for image, label in dataset.take(num):
pred_mask = self._model.predict(image)
self.__display("Show predictions", [image[0], label[0], self.__create_mask(pred_mask)])
else:
prediction = self.__create_mask(self._.predict(sample_image[tf.newaxis, ...]))
self.__display("Show predictions sample image", [sample_image, sample_label, prediction])
def __get_dice_coef(self, y_true, y_pred, smooth=1):
indices = K.argmax(y_pred, 3)
indices = K.reshape(indices, [-1, 256, 256, 1])
true_cast = y_true
indices_cast = K.cast(indices, dtype='float32')
axis = [1, 2, 3]
intersection = K.sum(true_cast * indices_cast, axis=axis)
union = K.sum(true_cast, axis=axis) + K.sum(indices_cast, axis=axis)
dice = K.mean((2. * intersection + smooth)/(union + smooth), axis=0)
return dice
def __get_model(self):
layers = [
Conv2D(input_shape=[256, 256, 1],
filters=100,
kernel_size=5,
strides=2,
padding="same",
activation=tf.nn.relu,
name="Conv1"),
MaxPool2D(pool_size=2, strides=2, padding="same"),
Conv2D(filters=200,
kernel_size=5,
strides=2,
padding="same",
activation=tf.nn.relu),
MaxPool2D(pool_size=2, strides=2, padding="same"),
Conv2D(filters=300,
kernel_size=3,
strides=1,
padding="same",
activation=tf.nn.relu),
Conv2D(filters=300,
kernel_size=3,
strides=1,
padding="same",
activation=tf.nn.relu),
Conv2D(filters=2,
kernel_size=1,
strides=1,
padding="same",
activation=tf.nn.relu),
Conv2DTranspose(filters=2, kernel_size=31, strides=16, padding="same")
]
tf.keras.backend.clear_session()
model = tf.keras.models.Sequential(layers)
model.compile(
optimizer='adam',
loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True),
metrics=[self.__get_dice_coef, 'accuracy', self.__f1_score,
self.__precision, self.__recall])
return model
def __plot_training_logs(self, model_history):
loss = model_history.history['loss']
val_loss = model_history.history['val_loss']
accuracy = model_history.history['accuracy']
val_accuracy = model_history.history['val_accuracy']
dice = model_history.history['__get_dice_coef']
epochs = range(self.__epochs)
plt.figure()
plt.plot(epochs, loss, 'r', label='Training loss')
plt.plot(epochs, val_loss, 'bo', label='Validation loss')
plt.plot(epochs, dice, 'go', label='Dice Coefficient')
plt.title('Training and Validation Loss')
plt.xlabel('Epoch')
plt.ylabel('Loss Value')
plt.ylim([0, 1])
plt.legend()
self._run.log_image("Training and Validation Loss", plot=plt)
def __recall(self, y_true, y_pred):
y_true = K.ones_like(y_true)
true_positives = K.sum(K.round(K.clip(y_true * y_pred, 0, 1)))
all_positives = K.sum(K.round(K.clip(y_true, 0, 1)))
recall = true_positives / (all_positives + K.epsilon())
return recall
def __precision(self, y_true, y_pred):
y_true = K.ones_like(y_true)
true_positives = K.sum(K.round(K.clip(y_true * y_pred, 0, 1)))
predicted_positives = K.sum(K.round(K.clip(y_pred, 0, 1)))
precision = true_positives / (predicted_positives + K.epsilon())
return precision
def __f1_score(self, y_true, y_pred):
precision = self.__precision(y_true, y_pred)
recall = self.__recall(y_true, y_pred)
return 2*((precision*recall)/(precision+recall+K.epsilon()))
def __register_model(self, metrics_results):
tf.keras.models.save_model(
self._model, "./model", overwrite=True, include_optimizer=True, save_format=tf,
signatures=None, options=None)
Model.register(workspace=self._ws,
model_path="./model",
model_name=self._args.model_name,
properties = {"run_id": self._run.id,
"experiment": self._run.experiment.name},
tags={
"DICE": float(metrics_results[0]),
"Accuracy": float(metrics_results[1])
})
if __name__ == '__main__':
tr = Train()
tr.main() | 36.822878 | 103 | 0.609179 |
a44d73f5bc986a8fa986dd82d7e5409a5ed60964 | 1,238 | py | Python | remotedocker/manage/handler.py | Plesoun/remotedocker | 14ee1bf4095199650c2e976d3b48ee022a207ee7 | [
"Apache-2.0"
] | null | null | null | remotedocker/manage/handler.py | Plesoun/remotedocker | 14ee1bf4095199650c2e976d3b48ee022a207ee7 | [
"Apache-2.0"
] | null | null | null | remotedocker/manage/handler.py | Plesoun/remotedocker | 14ee1bf4095199650c2e976d3b48ee022a207ee7 | [
"Apache-2.0"
] | null | null | null | import logging
import asab
import asab.web.rest
L = logging.getLogger(__name__)
class ManageWebHandler:
def __init__(self, app, svc):
self.ManageWebservice = svc
web_app = app.WebContainer.WebApp
web_app.router.add_get(
r"/manage/cli/{manage_command}/{manage_arguments}", self.manage_cli
)
web_app.router.add_get(
r"/manage/api/{manage_command}/{manage_arguments}", self.manage_api
)
async def manage_cli(self, request):
# Could send multiple commands in the future
command = request.match_info["manage_command"]
arguments = request.match_info["manage_arguments"]
response = await self.ManageWebservice.run_manage_cli_command(
command, arguments
)
if response is False:
return asab.web.rest.json_response(request, {"result": "Command not found"})
return asab.web.rest.json_response(request, response)
async def manage_api(self, request):
command = request.match_info["manage_command"]
arguments = request.match_info["manage_arguments"]
response = await self.ManageWebservice.run_manage_api_command(
command, arguments
)
if response is False:
return asab.web.rest.json_response(request, {"result": "Command not found"})
return asab.web.rest.json_response(request, response)
| 30.95 | 79 | 0.760905 |
ff6abce402a494f7ee18cc60f7257cb3fdec2178 | 688 | py | Python | manage.py | vijujo/Assignment | a59c3834c95eccc48079c69d7c3168e7bc112352 | [
"MIT"
] | null | null | null | manage.py | vijujo/Assignment | a59c3834c95eccc48079c69d7c3168e7bc112352 | [
"MIT"
] | null | null | null | manage.py | vijujo/Assignment | a59c3834c95eccc48079c69d7c3168e7bc112352 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
"""Django's command-line utility for administrative tasks."""
import os
import sys
def main():
"""Run administrative tasks."""
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'Assignment.settings')
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
if __name__ == '__main__':
main()
| 29.913043 | 75 | 0.65843 |
f420b34f064b3d4b21d2f4dd19a3776f3dc0fc46 | 16,136 | py | Python | lip_model/training_graph.py | danoneata/deep_lip_reading | bb46cd7ee2764e1d932d9ea95cc405bef0934332 | [
"Apache-2.0"
] | null | null | null | lip_model/training_graph.py | danoneata/deep_lip_reading | bb46cd7ee2764e1d932d9ea95cc405bef0934332 | [
"Apache-2.0"
] | null | null | null | lip_model/training_graph.py | danoneata/deep_lip_reading | bb46cd7ee2764e1d932d9ea95cc405bef0934332 | [
"Apache-2.0"
] | null | null | null | import tensorflow as tf
import numpy as np
from config import load_args
from lip_model.losses import cer
from lip_model.modules import (
embedding,
sinusoid_encoding,
multihead_attention,
feedforward,
label_smoothing,
)
from lip_model.visual_frontend import VisualFrontend
from util.tf_util import shape_list
config = load_args()
class TransformerTrainGraph:
def __init__(
self,
x,
y,
is_training=True,
reuse=None,
embed_input=False,
go_token_index=2,
chars=None,
):
self.is_training = is_training
self.x = x
if config.featurizer:
vid_inp = x[0] if type(x) is tuple or type(x) is list else x
istarget = tf.not_equal(vid_inp, 0)
self.padding_mask = tf.reduce_any(istarget, axis=[2, 3, 4])
self.padding_mask = tf.cast(self.padding_mask, dtype=tf.float32)
with tf.variable_scope("visual_frontend", reuse=reuse):
self.visual_frontend = VisualFrontend(vid_inp)
vid_inp = self.visual_frontend.output
vid_inp = vid_inp * tf.expand_dims(self.padding_mask, -1)
# pad = 30
# x = tf.keras.layers.ZeroPadding1D(padding=(pad, pad))(x)
if type(x) is tuple or type(x) is list:
x = [vid_inp] + list(x[1:])
else:
x = vid_inp
if is_training:
self.prev = y
self.y = y
else:
# This is the partial prediction used for the autoregression -
# augmented by one more element on every step when autoregression
# is on:
self.prev = y[0]
self.y = y[1] # This is the whole ground truth transcription
self.alignment_history = {} # to be filled in by decoder
self.go_token_idx = go_token_index
# define decoder inputs
self.decoder_inputs = tf.concat(
(tf.ones_like(self.prev[:, :1]) * go_token_index, self.prev[:, :-1]), -1
) # 2:<S>
# Encoder
self.enc = x
with tf.variable_scope("encoder", reuse=reuse) as scope:
self.enc = self.encoder_body(self.enc, is_training)
# import ipdb; ipdb.set_trace()
# Decoder
self.dec = self.decoder_inputs
# This is a hack to be able to use same model:
top_scope = tf.get_variable_scope()
self.chars = chars # needed for decoding with external LM
# --------------- index to char dict for summaries --------------------------------
if chars is not None:
keys = tf.constant(np.arange(len(chars)), dtype=tf.int64)
values = tf.constant(chars, dtype=tf.string)
self.char_table = tf.contrib.lookup.HashTable(
tf.contrib.lookup.KeyValueTensorInitializer(keys, values), ""
)
with tf.variable_scope("decoder", reuse=reuse) as scope:
self.dec = self.decoder_body(
self.enc, self.dec, is_training, top_scope=top_scope
)
if type(self.dec) == tuple:
self.preds, self.scores, self.dec = self.dec # Inference graph output
self.add_loss_and_metrics(reuse, is_training)
if config.tb_eval:
self.add_tb_summaries()
self.tb_sum = tf.summary.merge_all()
def project_output(self):
return True
def decoder_body(self, enc, dec, is_training, top_scope=None):
# Initialize the masks for the pads from here,
# because after positional embeddings are added, nothing will be 0
if config.mask_pads: # Guard this for backwards compatibility
key_masks_enc = tf.sign(tf.abs(tf.reduce_sum(enc, axis=-1))) # (N, T_k)
key_masks_dec = tf.cast(tf.sign(tf.abs(dec)), "float32") # (N, T_k)
query_masks_dec = tf.cast(tf.sign(tf.abs(dec)), "float32") # (N, T_k)
else:
key_masks_enc = key_masks_dec = query_masks_dec = None
# Embedding
dec = self.decoder_embeddings(dec, is_training)
for i in range(config.num_blocks):
with tf.variable_scope("num_blocks_{}".format(i)):
# self-attention
dec, alignmets = multihead_attention(
queries=dec,
query_masks=query_masks_dec,
keys=dec,
key_masks=key_masks_dec,
num_units=config.hidden_units,
num_heads=config.num_heads,
dropout_rate=config.dropout_rate,
is_training=is_training,
causality=True,
scope="self_attention",
)
# self.alignment_history["dec_self_att_{}".format(i)] = alignmets # save for tb
# vanilla attention
dec, alignmets = multihead_attention(
queries=dec,
query_masks=query_masks_dec,
keys=enc,
key_masks=key_masks_enc,
num_units=config.hidden_units,
num_heads=config.num_heads,
dropout_rate=config.dropout_rate,
is_training=is_training,
causality=False,
scope="vanilla_attention",
)
self.alignment_history[
"enc_dec_attention_{}".format(i)
] = alignmets # save for tb
# Feed Forward
dec = feedforward(
dec, num_units=[4 * config.hidden_units, config.hidden_units]
)
return dec
def decoder_embeddings(self, decoder_inputs, is_training):
dec = embedding(
decoder_inputs,
vocab_size=config.n_labels,
num_units=config.hidden_units,
scale=True,
scope="dec_embed",
)
# if self.is_training:
# dec = dec[:,:self.out_last_non_pad_idx]
# Positional Encoding
pos = self.positional_encoding(decoder_inputs, scope="dec_pe")
# if self.is_training:
# pos = pos[:,:self.out_last_non_pad_idx]
dec += pos
# Dropout
dec = tf.layers.dropout(
dec, rate=config.dropout_rate, training=tf.convert_to_tensor(is_training)
)
return dec
def positional_encoding(self, inp, scope):
if config.sinusoid:
return sinusoid_encoding(
inp,
num_units=config.hidden_units,
zero_pad=False,
scale=False,
scope=scope,
T=config.maxlen,
)
else:
return embedding(
tf.tile(
tf.expand_dims(tf.range(tf.shape(inp)[1]), 0), [tf.shape(inp)[0], 1]
),
vocab_size=config.maxlen,
num_units=config.hidden_units,
zero_pad=False,
scale=False,
scope="dec_pe",
)
def encoder_body(self, enc, is_training):
num_blocks = config.num_blocks
if config.mask_pads: # Guard this for backwards compatibility
# Initialize the masks for the pads from here,
# because after positional embeddings are added, nothing will be 0
key_masks = tf.sign(tf.abs(tf.reduce_sum(enc, axis=-1))) # (N, T_k)
query_masks = tf.sign(tf.abs(tf.reduce_sum(enc, axis=-1))) # (N, T_k)
else:
key_masks = query_masks = None
enc = self.encoder_embeddings(enc, is_training)
for i in range(num_blocks):
with tf.variable_scope("num_blocks_{}".format(i)):
# Multihead Attention
enc, alignmets = multihead_attention(
queries=enc,
query_masks=query_masks,
keys=enc,
key_masks=key_masks,
num_units=config.hidden_units,
num_heads=config.num_heads,
dropout_rate=config.dropout_rate,
is_training=is_training,
causality=False,
)
# key_masks = query_masks = None #
# Feed Forward
enc = feedforward(
enc, num_units=[4 * config.hidden_units, config.hidden_units]
)
# self.alignment_history["enc_self_att_{}".format(i)] = alignmets # save for tb
return enc
def encoder_embeddings(self, x, is_training, embed_input=0):
# Embedding
if embed_input:
enc = embedding(
x,
vocab_size=config.n_input_vocab,
num_units=config.hidden_units,
scale=True,
scope="enc_embed",
)
else:
enc = x
# Positional Encoding
feat_dim = shape_list(enc)[-1]
# if input features are not same size as transformer units, make a linear projection
if not feat_dim == config.hidden_units:
enc = tf.layers.dense(enc, config.hidden_units)
enc += self.positional_encoding(enc, scope="enc_pe")
if embed_input:
# Dropout
enc = tf.layers.dropout(
enc,
rate=config.dropout_rate,
training=tf.convert_to_tensor(is_training),
)
return enc
def add_loss_and_metrics(self, reuse, is_training):
# Final linear projection
if self.project_output():
# with tf.variable_scope("ctc_conv1d_net/ctc_probs", reuse=reuse) as scope:
self.logits = tf.layers.dense(self.dec, config.n_labels, reuse=reuse)
else:
assert self.dec.get_shape()[-1].value == config.n_labels
self.logits = self.dec
if config.test_aug_times:
self.logits_aug = self.logits
self.logits = tf.reduce_mean(self.logits, 0, keep_dims=True)
self.istarget = tf.to_float(tf.not_equal(self.y, 0))
# Loss
self.y_one_hot = tf.one_hot(self.y, depth=config.n_labels)
self.y_smoothed = label_smoothing(self.y_one_hot)
self.loss = tf.nn.softmax_cross_entropy_with_logits(
logits=self.logits, labels=self.y_smoothed
)
# we want to know when to stop so learn padding as well
self.mean_loss = tf.reduce_sum(self.loss) / (tf.reduce_sum(self.istarget))
self.logprobs = tf.log(tf.nn.softmax(self.logits))
# fmt: off
if not "infer" in config.graph_type:
self.preds = tf.to_int32(tf.argmax(self.logits, axis=-1))
self.cer, self.cer_per_sample = cer(self.y_one_hot, self.logits, return_all=True)
else:
self.preds = tf.to_int32(self.preds)
one_hot_from_preds = tf.one_hot(self.preds, depth=config.n_labels)
self.cer, self.cer_per_sample = cer(self.y_one_hot, one_hot_from_preds, return_all=True)
# fmt: on
if is_training:
with tf.name_scope("optimizer"):
optimizer = tf.train.AdamOptimizer(learning_rate=0.0001)
self.train_op = optimizer.minimize(self.mean_loss)
# instrument tensorboard
tf.summary.histogram("logits", self.logits)
tf.summary.histogram("loss", self.loss)
tf.summary.scalar("cer", self.cer)
self.add_text_summaries()
def add_text_summaries(self):
p = self.char_table.lookup(tf.cast(self.preds, tf.int64))
p = tf.string_join(tf.split(p, p.shape[1], 1))[:, 0]
g = self.char_table.lookup(tf.cast(self.y, tf.int64))
g = tf.string_join(tf.split(g, g.shape[1], 1))[:, 0]
j = tf.string_join([g, p], " → ")
tf.summary.text("Predictions", j)
def add_tb_summaries(self):
from util.tb_util import add_gif_summary, colorize_image
fps = 10
timeline = False
# ---------------- Add video summaries -------------------------------
bs = int(self.visual_frontend.output.shape[0])
b_id = 0
non_pad_inds = tf.cast(tf.where(self.padding_mask[b_id] > 0)[:, 0], tf.int64)
fr_in, to_in = (
non_pad_inds[0],
non_pad_inds[-1] + 1,
) # For masking out input paddings
add_gif_summary(
"1-video_input",
self.visual_frontend.input[b_id][fr_in:to_in],
fps=fps,
timeline=timeline,
)
if not config.test_aug_times:
add_gif_summary(
"2-input_to_resnet",
self.visual_frontend.aug_out[b_id][fr_in:to_in],
fps=fps,
timeline=timeline,
)
else:
# Viz the different test augmentations
add_gif_summary(
"2-input_to_resnet",
tf.concat(
[
self.visual_frontend.aug_out[b_id][fr_in:to_in]
for b_id in xrange(bs)
],
axis=2,
),
fps=fps,
timeline=timeline,
)
# ---------------- Add text summaries -------------------------------
self.add_text_summaries()
# ---------------- Add image summaries -------------------------------
all_atts = []
for layer_name, alignment_history in self.alignment_history.items():
for att_head_idx, attention_images in enumerate(alignment_history):
all_atts.append(attention_images)
avg_att = tf.exp(tf.reduce_mean(tf.log(all_atts), axis=0))
# Permute and reshape (batch, t_dec, t_enc) --> (batch, t_enc, t_dec, 1)
attention_img = tf.expand_dims(tf.transpose(avg_att, [0, 2, 1]), -1)
attention_img *= 255 # Scale to range [0, 255]
b_id = 0 # visualize only the first sample of the batch
to_out = (
tf.where(self.preds[b_id] > 0)[-1][0] + 1
) # To mask output paddings |~
color_img = tf.map_fn(colorize_image, (attention_img[:, fr_in:to_in, :to_out]))
tf.summary.image("3-enc_dec_attention", color_img)
# ---------------- Add image with subs summaries -------------------------------
# import ipdb; ipdb.set_trace()
add_gif_summary(
"4-subs",
self.visual_frontend.input[b_id][fr_in:to_in],
fps=fps,
timeline=timeline,
attention=attention_img[b_id][fr_in:to_in, :to_out, 0],
pred=joined_pred[b_id],
)
@classmethod
def get_input_shapes_and_types(cls, batch=0):
input_types = []
input_shape = []
if config.featurizer:
input_shape += [
(
config.time_dim,
config.img_width,
config.img_height,
config.img_channels,
)
]
input_types += ["float32"]
else:
input_shape += [(config.time_dim, config.feat_dim)]
input_types += ["float32"]
if batch:
input_shape = [(config.batch_size,) + shape for shape in input_shape]
return input_shape, input_types
@classmethod
def get_target_shapes_and_types(cls, batch=0):
target_shape = [(config.time_dim,)]
if batch:
target_shape = [(config.batch_size,) + shape for shape in target_shape]
target_types = ["int64"]
return target_shape, target_types
@classmethod
def get_model_input_target_shapes_and_types(cls, batch_dims=1):
return (
cls.get_input_shapes_and_types(batch=batch_dims),
cls.get_target_shapes_and_types(batch=batch_dims),
)
| 35.699115 | 165 | 0.542885 |
d11fa03986d691a17bddcda5a687a6b525c2e106 | 1,149 | py | Python | tresearcher/researcher/test_records.py | Lewington-pitsos/tabularresearcher | bb69d15e24a1734e6c7293e00867a25fd5fb99a3 | [
"MIT"
] | null | null | null | tresearcher/researcher/test_records.py | Lewington-pitsos/tabularresearcher | bb69d15e24a1734e6c7293e00867a25fd5fb99a3 | [
"MIT"
] | null | null | null | tresearcher/researcher/test_records.py | Lewington-pitsos/tabularresearcher | bb69d15e24a1734e6c7293e00867a25fd5fb99a3 | [
"MIT"
] | null | null | null | import unittest
import numpy as np
from tresearcher.researcher.records import *
class TestRecordManagement(unittest.TestCase):
def setUp(self):
self.data_path = "tresearcher/researcher/data/"
def test_does_not_mutate_params(self):
params = {"a": 4, "b": 8, "c": [5, 6, 7, ]}
expected = {"a": 4, "b": 8, "c": [5, 6, 7, ]}
save_experiment(self.data_path, "somename", params, {"loss": [0.1, 0.4, 0.231]})
self.assertDictEqual(params, expected)
def test_handles_floats(self):
params = {"a": 4, "b": 8, "c": [5, 6, 7, ]}
expected = {"a": 4, "b": 8, "c": [5, 6, 7, ]}
save_experiment(self.data_path, "somename", params, {"loss": [np.float32(0.1), 0.4, 0.231]})
def test_saves_correctly(self):
params = {"a": 4, "b": 8, "c": [5, 6, 7, ]}
expected = {"a": 4, "b": 8, "c": [5, 6, 7, ], "results": {"loss": [0.1, 0.4, 0.231]}}
save_experiment(self.data_path, "somename", params, {"loss": [0.1, 0.4, 0.231]})
with open(self.data_path + "somename.json") as f:
saved = json.load(f)
self.assertDictEqual(saved, expected) | 34.818182 | 100 | 0.557006 |
deafcc77d9540673b7c3960a7a999e31536adfd7 | 38,833 | py | Python | umsgpack.py | wzab/micropython-msgpack | 6d50c6d393939b694b53bf832e72ad7d4b5e457c | [
"MIT"
] | null | null | null | umsgpack.py | wzab/micropython-msgpack | 6d50c6d393939b694b53bf832e72ad7d4b5e457c | [
"MIT"
] | null | null | null | umsgpack.py | wzab/micropython-msgpack | 6d50c6d393939b694b53bf832e72ad7d4b5e457c | [
"MIT"
] | 1 | 2021-07-11T13:35:54.000Z | 2021-07-11T13:35:54.000Z | # u-msgpack-python v2.5.1 - v at sergeev.io
# https://github.com/vsergeev/u-msgpack-python
#
# u-msgpack-python is a lightweight MessagePack serializer and deserializer
# module, compatible with both Python 2 and 3, as well CPython and PyPy
# implementations of Python. u-msgpack-python is fully compliant with the
# latest MessagePack specification.com/msgpack/msgpack/blob/master/spec.md). In
# particular, it supports the new binary, UTF-8 string, and application ext
# types.
#
# MIT License
#
# Copyright (c) 2013-2016 vsergeev / Ivan (Vanya) A. Sergeev
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
#
"""
u-msgpack-python v2.5.1 - v at sergeev.io
https://github.com/vsergeev/u-msgpack-python
u-msgpack-python is a lightweight MessagePack serializer and deserializer
module, compatible with both Python 2 and 3, as well CPython and PyPy
implementations of Python. u-msgpack-python is fully compliant with the
latest MessagePack specification.com/msgpack/msgpack/blob/master/spec.md). In
particular, it supports the new binary, UTF-8 string, and application ext
types.
License: MIT
"""
import struct
import collections
import datetime
import sys
import io
__version__ = "2.5.1"
"Module version string"
version = (2, 5, 1)
"Module version tuple"
##############################################################################
# Ext Class
##############################################################################
# Extension type for application-defined types and data
class Ext(object):
"""
The Ext class facilitates creating a serializable extension object to store
an application-defined type and data byte array.
"""
def __init__(self, type, data):
"""
Construct a new Ext object.
Args:
type: application-defined type integer
data: application-defined data byte array
Example:
>>> foo = umsgpack.Ext(0x05, b"\x01\x02\x03")
>>> umsgpack.packb({u"special stuff": foo, u"awesome": True})
'\x82\xa7awesome\xc3\xadspecial stuff\xc7\x03\x05\x01\x02\x03'
>>> bar = umsgpack.unpackb(_)
>>> print(bar["special stuff"])
Ext Object (Type: 0x05, Data: 01 02 03)
>>>
"""
# Check type is type int
if not isinstance(type, int):
raise TypeError("ext type is not type integer")
# Check data is type bytes
elif sys.version_info[0] == 3 and not isinstance(data, bytes):
raise TypeError("ext data is not type \'bytes\'")
elif sys.version_info[0] == 2 and not isinstance(data, str):
raise TypeError("ext data is not type \'str\'")
self.type = type
self.data = data
def __eq__(self, other):
"""
Compare this Ext object with another for equality.
"""
return (isinstance(other, self.__class__) and
self.type == other.type and
self.data == other.data)
def __ne__(self, other):
"""
Compare this Ext object with another for inequality.
"""
return not self.__eq__(other)
def __str__(self):
"""
String representation of this Ext object.
"""
s = "Ext Object (Type: 0x%02x, Data: " % self.type
s += " ".join(["0x%02x" % ord(self.data[i:i + 1])
for i in xrange(min(len(self.data), 8))])
if len(self.data) > 8:
s += " ..."
s += ")"
return s
def __hash__(self):
"""
Provide a hash of this Ext object.
"""
return hash((self.type, self.data))
class InvalidString(bytes):
"""Subclass of bytes to hold invalid UTF-8 strings."""
pass
##############################################################################
# Exceptions
##############################################################################
# Base Exception classes
class PackException(Exception):
"Base class for exceptions encountered during packing."
pass
class UnpackException(Exception):
"Base class for exceptions encountered during unpacking."
pass
# Packing error
class UnsupportedTypeException(PackException):
"Object type not supported for packing."
pass
# Unpacking error
class InsufficientDataException(UnpackException):
"Insufficient data to unpack the serialized object."
pass
class InvalidStringException(UnpackException):
"Invalid UTF-8 string encountered during unpacking."
pass
class UnsupportedTimestampException(UnpackException):
"Unsupported timestamp format encountered during unpacking."
pass
class ReservedCodeException(UnpackException):
"Reserved code encountered during unpacking."
pass
class UnhashableKeyException(UnpackException):
"""
Unhashable key encountered during map unpacking.
The serialized map cannot be deserialized into a Python dictionary.
"""
pass
class DuplicateKeyException(UnpackException):
"Duplicate key encountered during map unpacking."
pass
# Backwards compatibility
KeyNotPrimitiveException = UnhashableKeyException
KeyDuplicateException = DuplicateKeyException
#############################################################################
# Exported Functions and Glob
#############################################################################
# Exported functions and variables, set up in __init()
pack = None
packb = None
unpack = None
unpackb = None
dump = None
dumps = None
load = None
loads = None
compatibility = False
"""
Compatibility mode boolean.
When compatibility mode is enabled, u-msgpack-python will serialize both
unicode strings and bytes into the old "raw" msgpack type, and deserialize the
"raw" msgpack type into bytes. This provides backwards compatibility with the
old MessagePack specification.
Example:
>>> umsgpack.compatibility = True
>>>
>>> umsgpack.packb([u"some string", b"some bytes"])
b'\x92\xabsome string\xaasome bytes'
>>> umsgpack.unpackb(_)
[b'some string', b'some bytes']
>>>
"""
##############################################################################
# Packing
##############################################################################
# You may notice struct.pack("B", obj) instead of the simpler chr(obj) in the
# code below. This is to allow for seamless Python 2 and 3 compatibility, as
# chr(obj) has a str return type instead of bytes in Python 3, and
# struct.pack(...) has the right return type in both versions.
def _pack_integer(obj, fp, options):
if obj < 0:
if obj >= -32:
fp.write(struct.pack("b", obj))
elif obj >= -2**(8 - 1):
fp.write(b"\xd0" + struct.pack("b", obj))
elif obj >= -2**(16 - 1):
fp.write(b"\xd1" + struct.pack(">h", obj))
elif obj >= -2**(32 - 1):
fp.write(b"\xd2" + struct.pack(">i", obj))
elif obj >= -2**(64 - 1):
fp.write(b"\xd3" + struct.pack(">q", obj))
else:
raise UnsupportedTypeException("huge signed int")
else:
if obj < 128:
fp.write(struct.pack("B", obj))
elif obj < 2**8:
fp.write(b"\xcc" + struct.pack("B", obj))
elif obj < 2**16:
fp.write(b"\xcd" + struct.pack(">H", obj))
elif obj < 2**32:
fp.write(b"\xce" + struct.pack(">I", obj))
elif obj < 2**64:
fp.write(b"\xcf" + struct.pack(">Q", obj))
else:
raise UnsupportedTypeException("huge unsigned int")
def _pack_nil(obj, fp, options):
fp.write(b"\xc0")
def _pack_boolean(obj, fp, options):
fp.write(b"\xc3" if obj else b"\xc2")
def _pack_float(obj, fp, options):
float_precision = options.get('force_float_precision', _float_precision)
if float_precision == "double":
fp.write(b"\xcb" + struct.pack(">d", obj))
elif float_precision == "single":
fp.write(b"\xca" + struct.pack(">f", obj))
else:
raise ValueError("invalid float precision")
def _pack_string(obj, fp, options):
obj = obj.encode('utf-8')
obj_len = len(obj)
if obj_len < 32:
fp.write(struct.pack("B", 0xa0 | obj_len) + obj)
elif obj_len < 2**8:
fp.write(b"\xd9" + struct.pack("B", obj_len) + obj)
elif obj_len < 2**16:
fp.write(b"\xda" + struct.pack(">H", obj_len) + obj)
elif obj_len < 2**32:
fp.write(b"\xdb" + struct.pack(">I", obj_len) + obj)
else:
raise UnsupportedTypeException("huge string")
def _pack_binary(obj, fp, options):
obj_len = len(obj)
if obj_len < 2**8:
fp.write(b"\xc4" + struct.pack("B", obj_len) + obj)
elif obj_len < 2**16:
fp.write(b"\xc5" + struct.pack(">H", obj_len) + obj)
elif obj_len < 2**32:
fp.write(b"\xc6" + struct.pack(">I", obj_len) + obj)
else:
raise UnsupportedTypeException("huge binary string")
def _pack_oldspec_raw(obj, fp, options):
obj_len = len(obj)
if obj_len < 32:
fp.write(struct.pack("B", 0xa0 | obj_len) + obj)
elif obj_len < 2**16:
fp.write(b"\xda" + struct.pack(">H", obj_len) + obj)
elif obj_len < 2**32:
fp.write(b"\xdb" + struct.pack(">I", obj_len) + obj)
else:
raise UnsupportedTypeException("huge raw string")
def _pack_ext(obj, fp, options):
obj_len = len(obj.data)
if obj_len == 1:
fp.write(b"\xd4" + struct.pack("B", obj.type & 0xff) + obj.data)
elif obj_len == 2:
fp.write(b"\xd5" + struct.pack("B", obj.type & 0xff) + obj.data)
elif obj_len == 4:
fp.write(b"\xd6" + struct.pack("B", obj.type & 0xff) + obj.data)
elif obj_len == 8:
fp.write(b"\xd7" + struct.pack("B", obj.type & 0xff) + obj.data)
elif obj_len == 16:
fp.write(b"\xd8" + struct.pack("B", obj.type & 0xff) + obj.data)
elif obj_len < 2**8:
fp.write(b"\xc7" +
struct.pack("BB", obj_len, obj.type & 0xff) + obj.data)
elif obj_len < 2**16:
fp.write(b"\xc8" +
struct.pack(">HB", obj_len, obj.type & 0xff) + obj.data)
elif obj_len < 2**32:
fp.write(b"\xc9" +
struct.pack(">IB", obj_len, obj.type & 0xff) + obj.data)
else:
raise UnsupportedTypeException("huge ext data")
def _pack_ext_timestamp(obj, fp, options):
if not obj.tzinfo:
# Object is naive datetime, convert to aware date time,
# assuming UTC timezone
delta = obj.replace(tzinfo=_utc_tzinfo) - _epoch
else:
# Object is aware datetime
delta = obj - _epoch
seconds = delta.seconds + delta.days * 86400
microseconds = delta.microseconds
if microseconds == 0 and 0 <= seconds <= 2**32 - 1:
# 32-bit timestamp
fp.write(b"\xd6\xff" +
struct.pack(">I", seconds))
elif 0 <= seconds <= 2**34 - 1:
# 64-bit timestamp
value = ((microseconds * 1000) << 34) | seconds
fp.write(b"\xd7\xff" +
struct.pack(">Q", value))
elif -2**63 <= abs(seconds) <= 2**63 - 1:
# 96-bit timestamp
fp.write(b"\xc7\x0c\xff" +
struct.pack(">I", microseconds * 1000) +
struct.pack(">q", seconds))
else:
raise UnsupportedTypeException("huge timestamp")
def _pack_array(obj, fp, options):
obj_len = len(obj)
if obj_len < 16:
fp.write(struct.pack("B", 0x90 | obj_len))
elif obj_len < 2**16:
fp.write(b"\xdc" + struct.pack(">H", obj_len))
elif obj_len < 2**32:
fp.write(b"\xdd" + struct.pack(">I", obj_len))
else:
raise UnsupportedTypeException("huge array")
for e in obj:
pack(e, fp, **options)
def _pack_map(obj, fp, options):
obj_len = len(obj)
if obj_len < 16:
fp.write(struct.pack("B", 0x80 | obj_len))
elif obj_len < 2**16:
fp.write(b"\xde" + struct.pack(">H", obj_len))
elif obj_len < 2**32:
fp.write(b"\xdf" + struct.pack(">I", obj_len))
else:
raise UnsupportedTypeException("huge array")
for k, v in obj.items():
pack(k, fp, **options)
pack(v, fp, **options)
########################################
# Pack for Python 2, with 'unicode' type, 'str' type, and 'long' type
def _pack2(obj, fp, **options):
"""
Serialize a Python object into MessagePack bytes.
Args:
obj: a Python object
fp: a .write()-supporting file-like object
Kwargs:
ext_handlers (dict): dictionary of Ext handlers, mapping a custom type
to a callable that packs an instance of the type
into an Ext object
force_float_precision (str): "single" to force packing floats as
IEEE-754 single-precision floats,
"double" to force packing floats as
IEEE-754 double-precision floats.
Returns:
None.
Raises:
UnsupportedType(PackException):
Object type not supported for packing.
Example:
>>> f = open('test.bin', 'wb')
>>> umsgpack.pack({u"compact": True, u"schema": 0}, f)
>>>
"""
global compatibility
ext_handlers = options.get("ext_handlers")
if obj is None:
_pack_nil(obj, fp, options)
elif ext_handlers and obj.__class__ in ext_handlers:
_pack_ext(ext_handlers[obj.__class__](obj), fp, options)
elif isinstance(obj, bool):
_pack_boolean(obj, fp, options)
elif isinstance(obj, (int, long)):
_pack_integer(obj, fp, options)
elif isinstance(obj, float):
_pack_float(obj, fp, options)
elif compatibility and isinstance(obj, unicode):
_pack_oldspec_raw(bytes(obj), fp, options)
elif compatibility and isinstance(obj, bytes):
_pack_oldspec_raw(obj, fp, options)
elif isinstance(obj, unicode):
_pack_string(obj, fp, options)
elif isinstance(obj, str):
_pack_binary(obj, fp, options)
elif isinstance(obj, (list, tuple)):
_pack_array(obj, fp, options)
elif isinstance(obj, dict):
_pack_map(obj, fp, options)
elif isinstance(obj, datetime.datetime):
_pack_ext_timestamp(obj, fp, options)
elif isinstance(obj, Ext):
_pack_ext(obj, fp, options)
elif ext_handlers:
# Linear search for superclass
t = next((t for t in ext_handlers.keys() if isinstance(obj, t)), None)
if t:
_pack_ext(ext_handlers[t](obj), fp, options)
else:
raise UnsupportedTypeException(
"unsupported type: %s" % str(type(obj)))
else:
raise UnsupportedTypeException("unsupported type: %s" % str(type(obj)))
# Pack for Python 3, with unicode 'str' type, 'bytes' type, and no 'long' type
def _pack3(obj, fp, **options):
"""
Serialize a Python object into MessagePack bytes.
Args:
obj: a Python object
fp: a .write()-supporting file-like object
Kwargs:
ext_handlers (dict): dictionary of Ext handlers, mapping a custom type
to a callable that packs an instance of the type
into an Ext object
force_float_precision (str): "single" to force packing floats as
IEEE-754 single-precision floats,
"double" to force packing floats as
IEEE-754 double-precision floats.
Returns:
None.
Raises:
UnsupportedType(PackException):
Object type not supported for packing.
Example:
>>> f = open('test.bin', 'wb')
>>> umsgpack.pack({u"compact": True, u"schema": 0}, f)
>>>
"""
global compatibility
ext_handlers = options.get("ext_handlers")
if obj is None:
_pack_nil(obj, fp, options)
elif ext_handlers and obj.__class__ in ext_handlers:
_pack_ext(ext_handlers[obj.__class__](obj), fp, options)
elif isinstance(obj, bool):
_pack_boolean(obj, fp, options)
elif isinstance(obj, int):
_pack_integer(obj, fp, options)
elif isinstance(obj, float):
_pack_float(obj, fp, options)
elif compatibility and isinstance(obj, str):
_pack_oldspec_raw(obj.encode('utf-8'), fp, options)
elif compatibility and isinstance(obj, bytes):
_pack_oldspec_raw(obj, fp, options)
elif isinstance(obj, str):
_pack_string(obj, fp, options)
elif isinstance(obj, bytes):
_pack_binary(obj, fp, options)
elif isinstance(obj, (list, tuple)):
_pack_array(obj, fp, options)
elif isinstance(obj, dict):
_pack_map(obj, fp, options)
elif isinstance(obj, datetime.datetime):
_pack_ext_timestamp(obj, fp, options)
elif isinstance(obj, Ext):
_pack_ext(obj, fp, options)
elif ext_handlers:
# Linear search for superclass
t = next((t for t in ext_handlers.keys() if isinstance(obj, t)), None)
if t:
_pack_ext(ext_handlers[t](obj), fp, options)
else:
raise UnsupportedTypeException(
"unsupported type: %s" % str(type(obj)))
else:
raise UnsupportedTypeException(
"unsupported type: %s" % str(type(obj)))
def _packb2(obj, **options):
"""
Serialize a Python object into MessagePack bytes.
Args:
obj: a Python object
Kwargs:
ext_handlers (dict): dictionary of Ext handlers, mapping a custom type
to a callable that packs an instance of the type
into an Ext object
force_float_precision (str): "single" to force packing floats as
IEEE-754 single-precision floats,
"double" to force packing floats as
IEEE-754 double-precision floats.
Returns:
A 'str' containing serialized MessagePack bytes.
Raises:
UnsupportedType(PackException):
Object type not supported for packing.
Example:
>>> umsgpack.packb({u"compact": True, u"schema": 0})
'\x82\xa7compact\xc3\xa6schema\x00'
>>>
"""
fp = io.BytesIO()
_pack2(obj, fp, **options)
return fp.getvalue()
def _packb3(obj, **options):
"""
Serialize a Python object into MessagePack bytes.
Args:
obj: a Python object
Kwargs:
ext_handlers (dict): dictionary of Ext handlers, mapping a custom type
to a callable that packs an instance of the type
into an Ext object
force_float_precision (str): "single" to force packing floats as
IEEE-754 single-precision floats,
"double" to force packing floats as
IEEE-754 double-precision floats.
Returns:
A 'bytes' containing serialized MessagePack bytes.
Raises:
UnsupportedType(PackException):
Object type not supported for packing.
Example:
>>> umsgpack.packb({u"compact": True, u"schema": 0})
b'\x82\xa7compact\xc3\xa6schema\x00'
>>>
"""
fp = io.BytesIO()
_pack3(obj, fp, **options)
return fp.getvalue()
#############################################################################
# Unpacking
#############################################################################
def _read_except(fp, n):
if n == 0:
return b""
data = fp.read(n)
if len(data) == 0:
raise InsufficientDataException()
while len(data) < n:
chunk = fp.read(n - len(data))
if len(chunk) == 0:
raise InsufficientDataException()
data += chunk
return data
def _unpack_integer(code, fp, options):
if (ord(code) & 0xe0) == 0xe0:
return struct.unpack("b", code)[0]
elif code == b'\xd0':
return struct.unpack("b", _read_except(fp, 1))[0]
elif code == b'\xd1':
return struct.unpack(">h", _read_except(fp, 2))[0]
elif code == b'\xd2':
return struct.unpack(">i", _read_except(fp, 4))[0]
elif code == b'\xd3':
return struct.unpack(">q", _read_except(fp, 8))[0]
elif (ord(code) & 0x80) == 0x00:
return struct.unpack("B", code)[0]
elif code == b'\xcc':
return struct.unpack("B", _read_except(fp, 1))[0]
elif code == b'\xcd':
return struct.unpack(">H", _read_except(fp, 2))[0]
elif code == b'\xce':
return struct.unpack(">I", _read_except(fp, 4))[0]
elif code == b'\xcf':
return struct.unpack(">Q", _read_except(fp, 8))[0]
raise Exception("logic error, not int: 0x%02x" % ord(code))
def _unpack_reserved(code, fp, options):
if code == b'\xc1':
raise ReservedCodeException(
"encountered reserved code: 0x%02x" % ord(code))
raise Exception(
"logic error, not reserved code: 0x%02x" % ord(code))
def _unpack_nil(code, fp, options):
if code == b'\xc0':
return None
raise Exception("logic error, not nil: 0x%02x" % ord(code))
def _unpack_boolean(code, fp, options):
if code == b'\xc2':
return False
elif code == b'\xc3':
return True
raise Exception("logic error, not boolean: 0x%02x" % ord(code))
def _unpack_float(code, fp, options):
if code == b'\xca':
return struct.unpack(">f", _read_except(fp, 4))[0]
elif code == b'\xcb':
return struct.unpack(">d", _read_except(fp, 8))[0]
raise Exception("logic error, not float: 0x%02x" % ord(code))
def _unpack_string(code, fp, options):
if (ord(code) & 0xe0) == 0xa0:
length = ord(code) & ~0xe0
elif code == b'\xd9':
length = struct.unpack("B", _read_except(fp, 1))[0]
elif code == b'\xda':
length = struct.unpack(">H", _read_except(fp, 2))[0]
elif code == b'\xdb':
length = struct.unpack(">I", _read_except(fp, 4))[0]
else:
raise Exception("logic error, not string: 0x%02x" % ord(code))
# Always return raw bytes in compatibility mode
global compatibility
if compatibility:
return _read_except(fp, length)
data = _read_except(fp, length)
try:
return bytes.decode(data, 'utf-8')
except UnicodeDecodeError:
if options.get("allow_invalid_utf8"):
return InvalidString(data)
raise InvalidStringException("unpacked string is invalid utf-8")
def _unpack_binary(code, fp, options):
if code == b'\xc4':
length = struct.unpack("B", _read_except(fp, 1))[0]
elif code == b'\xc5':
length = struct.unpack(">H", _read_except(fp, 2))[0]
elif code == b'\xc6':
length = struct.unpack(">I", _read_except(fp, 4))[0]
else:
raise Exception("logic error, not binary: 0x%02x" % ord(code))
return _read_except(fp, length)
def _unpack_ext(code, fp, options):
if code == b'\xd4':
length = 1
elif code == b'\xd5':
length = 2
elif code == b'\xd6':
length = 4
elif code == b'\xd7':
length = 8
elif code == b'\xd8':
length = 16
elif code == b'\xc7':
length = struct.unpack("B", _read_except(fp, 1))[0]
elif code == b'\xc8':
length = struct.unpack(">H", _read_except(fp, 2))[0]
elif code == b'\xc9':
length = struct.unpack(">I", _read_except(fp, 4))[0]
else:
raise Exception("logic error, not ext: 0x%02x" % ord(code))
ext_type = struct.unpack("b", _read_except(fp, 1))[0]
ext_data = _read_except(fp, length)
# Create extension object
ext = Ext(ext_type, ext_data)
# Unpack with ext handler, if we have one
ext_handlers = options.get("ext_handlers")
if ext_handlers and ext.type in ext_handlers:
return ext_handlers[ext.type](ext)
# Timestamp extension
if ext.type == -1:
return _unpack_ext_timestamp(ext, options)
return ext
def _unpack_ext_timestamp(ext, options):
obj_len = len(ext.data)
if obj_len == 4:
# 32-bit timestamp
seconds = struct.unpack(">I", ext.data)[0]
microseconds = 0
elif obj_len == 8:
# 64-bit timestamp
value = struct.unpack(">Q", ext.data)[0]
seconds = value & 0x3ffffffff
microseconds = (value >> 34) // 1000
elif obj_len == 12:
# 96-bit timestamp
seconds = struct.unpack(">q", ext.data[4:12])[0]
microseconds = struct.unpack(">I", ext.data[0:4])[0] // 1000
else:
raise UnsupportedTimestampException(
"unsupported timestamp with data length %d" % len(ext.data))
return _epoch + datetime.timedelta(seconds=seconds,
microseconds=microseconds)
def _unpack_array(code, fp, options):
if (ord(code) & 0xf0) == 0x90:
length = (ord(code) & ~0xf0)
elif code == b'\xdc':
length = struct.unpack(">H", _read_except(fp, 2))[0]
elif code == b'\xdd':
length = struct.unpack(">I", _read_except(fp, 4))[0]
else:
raise Exception("logic error, not array: 0x%02x" % ord(code))
return [_unpack(fp, options) for i in xrange(length)]
def _deep_list_to_tuple(obj):
if isinstance(obj, list):
return tuple([_deep_list_to_tuple(e) for e in obj])
return obj
def _unpack_map(code, fp, options):
if (ord(code) & 0xf0) == 0x80:
length = (ord(code) & ~0xf0)
elif code == b'\xde':
length = struct.unpack(">H", _read_except(fp, 2))[0]
elif code == b'\xdf':
length = struct.unpack(">I", _read_except(fp, 4))[0]
else:
raise Exception("logic error, not map: 0x%02x" % ord(code))
d = {} if not options.get('use_ordered_dict') \
else collections.OrderedDict()
for _ in xrange(length):
# Unpack key
k = _unpack(fp, options)
if isinstance(k, list):
# Attempt to convert list into a hashable tuple
k = _deep_list_to_tuple(k)
elif not isinstance(k, collections.Hashable):
raise UnhashableKeyException(
"encountered unhashable key: %s, %s" % (str(k), str(type(k))))
elif k in d:
raise DuplicateKeyException(
"encountered duplicate key: %s, %s" % (str(k), str(type(k))))
# Unpack value
v = _unpack(fp, options)
try:
d[k] = v
except TypeError:
raise UnhashableKeyException(
"encountered unhashable key: %s" % str(k))
return d
def _unpack(fp, options):
code = _read_except(fp, 1)
return _unpack_dispatch_table[code](code, fp, options)
########################################
def _unpack2(fp, **options):
"""
Deserialize MessagePack bytes into a Python object.
Args:
fp: a .read()-supporting file-like object
Kwargs:
ext_handlers (dict): dictionary of Ext handlers, mapping integer Ext
type to a callable that unpacks an instance of
Ext into an object
use_ordered_dict (bool): unpack maps into OrderedDict, instead of
unordered dict (default False)
allow_invalid_utf8 (bool): unpack invalid strings into instances of
InvalidString, for access to the bytes
(default False)
Returns:
A Python object.
Raises:
InsufficientDataException(UnpackException):
Insufficient data to unpack the serialized object.
InvalidStringException(UnpackException):
Invalid UTF-8 string encountered during unpacking.
UnsupportedTimestampException(UnpackException):
Unsupported timestamp format encountered during unpacking.
ReservedCodeException(UnpackException):
Reserved code encountered during unpacking.
UnhashableKeyException(UnpackException):
Unhashable key encountered during map unpacking.
The serialized map cannot be deserialized into a Python dictionary.
DuplicateKeyException(UnpackException):
Duplicate key encountered during map unpacking.
Example:
>>> f = open('test.bin', 'rb')
>>> umsgpack.unpackb(f)
{u'compact': True, u'schema': 0}
>>>
"""
return _unpack(fp, options)
def _unpack3(fp, **options):
"""
Deserialize MessagePack bytes into a Python object.
Args:
fp: a .read()-supporting file-like object
Kwargs:
ext_handlers (dict): dictionary of Ext handlers, mapping integer Ext
type to a callable that unpacks an instance of
Ext into an object
use_ordered_dict (bool): unpack maps into OrderedDict, instead of
unordered dict (default False)
allow_invalid_utf8 (bool): unpack invalid strings into instances of
InvalidString, for access to the bytes
(default False)
Returns:
A Python object.
Raises:
InsufficientDataException(UnpackException):
Insufficient data to unpack the serialized object.
InvalidStringException(UnpackException):
Invalid UTF-8 string encountered during unpacking.
UnsupportedTimestampException(UnpackException):
Unsupported timestamp format encountered during unpacking.
ReservedCodeException(UnpackException):
Reserved code encountered during unpacking.
UnhashableKeyException(UnpackException):
Unhashable key encountered during map unpacking.
The serialized map cannot be deserialized into a Python dictionary.
DuplicateKeyException(UnpackException):
Duplicate key encountered during map unpacking.
Example:
>>> f = open('test.bin', 'rb')
>>> umsgpack.unpackb(f)
{'compact': True, 'schema': 0}
>>>
"""
return _unpack(fp, options)
# For Python 2, expects a str object
def _unpackb2(s, **options):
"""
Deserialize MessagePack bytes into a Python object.
Args:
s: a 'str' or 'bytearray' containing serialized MessagePack bytes
Kwargs:
ext_handlers (dict): dictionary of Ext handlers, mapping integer Ext
type to a callable that unpacks an instance of
Ext into an object
use_ordered_dict (bool): unpack maps into OrderedDict, instead of
unordered dict (default False)
allow_invalid_utf8 (bool): unpack invalid strings into instances of
InvalidString, for access to the bytes
(default False)
Returns:
A Python object.
Raises:
TypeError:
Packed data type is neither 'str' nor 'bytearray'.
InsufficientDataException(UnpackException):
Insufficient data to unpack the serialized object.
InvalidStringException(UnpackException):
Invalid UTF-8 string encountered during unpacking.
UnsupportedTimestampException(UnpackException):
Unsupported timestamp format encountered during unpacking.
ReservedCodeException(UnpackException):
Reserved code encountered during unpacking.
UnhashableKeyException(UnpackException):
Unhashable key encountered during map unpacking.
The serialized map cannot be deserialized into a Python dictionary.
DuplicateKeyException(UnpackException):
Duplicate key encountered during map unpacking.
Example:
>>> umsgpack.unpackb(b'\x82\xa7compact\xc3\xa6schema\x00')
{u'compact': True, u'schema': 0}
>>>
"""
if not isinstance(s, (str, bytearray)):
raise TypeError("packed data must be type 'str' or 'bytearray'")
return _unpack(io.BytesIO(s), options)
# For Python 3, expects a bytes object
def _unpackb3(s, **options):
"""
Deserialize MessagePack bytes into a Python object.
Args:
s: a 'bytes' or 'bytearray' containing serialized MessagePack bytes
Kwargs:
ext_handlers (dict): dictionary of Ext handlers, mapping integer Ext
type to a callable that unpacks an instance of
Ext into an object
use_ordered_dict (bool): unpack maps into OrderedDict, instead of
unordered dict (default False)
allow_invalid_utf8 (bool): unpack invalid strings into instances of
InvalidString, for access to the bytes
(default False)
Returns:
A Python object.
Raises:
TypeError:
Packed data type is neither 'bytes' nor 'bytearray'.
InsufficientDataException(UnpackException):
Insufficient data to unpack the serialized object.
InvalidStringException(UnpackException):
Invalid UTF-8 string encountered during unpacking.
UnsupportedTimestampException(UnpackException):
Unsupported timestamp format encountered during unpacking.
ReservedCodeException(UnpackException):
Reserved code encountered during unpacking.
UnhashableKeyException(UnpackException):
Unhashable key encountered during map unpacking.
The serialized map cannot be deserialized into a Python dictionary.
DuplicateKeyException(UnpackException):
Duplicate key encountered during map unpacking.
Example:
>>> umsgpack.unpackb(b'\x82\xa7compact\xc3\xa6schema\x00')
{'compact': True, 'schema': 0}
>>>
"""
if not isinstance(s, (bytes, bytearray)):
raise TypeError("packed data must be type 'bytes' or 'bytearray'")
return _unpack(io.BytesIO(s), options)
#############################################################################
# Module Initialization
#############################################################################
def __init():
global pack
global packb
global unpack
global unpackb
global dump
global dumps
global load
global loads
global compatibility
global _epoch
global _utc_tzinfo
global _float_precision
global _unpack_dispatch_table
global xrange
# Compatibility mode for handling strings/bytes with the old specification
compatibility = False
if sys.version_info[0] == 3:
_utc_tzinfo = datetime.timezone.utc
else:
class UTC(datetime.tzinfo):
ZERO = datetime.timedelta(0)
def utcoffset(self, dt):
return UTC.ZERO
def tzname(self, dt):
return "UTC"
def dst(self, dt):
return UTC.ZERO
_utc_tzinfo = UTC()
# Calculate an aware epoch datetime
_epoch = datetime.datetime(1970, 1, 1, tzinfo=_utc_tzinfo)
# Auto-detect system float precision
if sys.float_info.mant_dig == 53:
_float_precision = "double"
else:
_float_precision = "single"
# Map packb and unpackb to the appropriate version
if sys.version_info[0] == 3:
pack = _pack3
packb = _packb3
dump = _pack3
dumps = _packb3
unpack = _unpack3
unpackb = _unpackb3
load = _unpack3
loads = _unpackb3
xrange = range
else:
pack = _pack2
packb = _packb2
dump = _pack2
dumps = _packb2
unpack = _unpack2
unpackb = _unpackb2
load = _unpack2
loads = _unpackb2
# Build a dispatch table for fast lookup of unpacking function
_unpack_dispatch_table = {}
# Fix uint
for code in range(0, 0x7f + 1):
_unpack_dispatch_table[struct.pack("B", code)] = _unpack_integer
# Fix map
for code in range(0x80, 0x8f + 1):
_unpack_dispatch_table[struct.pack("B", code)] = _unpack_map
# Fix array
for code in range(0x90, 0x9f + 1):
_unpack_dispatch_table[struct.pack("B", code)] = _unpack_array
# Fix str
for code in range(0xa0, 0xbf + 1):
_unpack_dispatch_table[struct.pack("B", code)] = _unpack_string
# Nil
_unpack_dispatch_table[b'\xc0'] = _unpack_nil
# Reserved
_unpack_dispatch_table[b'\xc1'] = _unpack_reserved
# Boolean
_unpack_dispatch_table[b'\xc2'] = _unpack_boolean
_unpack_dispatch_table[b'\xc3'] = _unpack_boolean
# Bin
for code in range(0xc4, 0xc6 + 1):
_unpack_dispatch_table[struct.pack("B", code)] = _unpack_binary
# Ext
for code in range(0xc7, 0xc9 + 1):
_unpack_dispatch_table[struct.pack("B", code)] = _unpack_ext
# Float
_unpack_dispatch_table[b'\xca'] = _unpack_float
_unpack_dispatch_table[b'\xcb'] = _unpack_float
# Uint
for code in range(0xcc, 0xcf + 1):
_unpack_dispatch_table[struct.pack("B", code)] = _unpack_integer
# Int
for code in range(0xd0, 0xd3 + 1):
_unpack_dispatch_table[struct.pack("B", code)] = _unpack_integer
# Fixext
for code in range(0xd4, 0xd8 + 1):
_unpack_dispatch_table[struct.pack("B", code)] = _unpack_ext
# String
for code in range(0xd9, 0xdb + 1):
_unpack_dispatch_table[struct.pack("B", code)] = _unpack_string
# Array
_unpack_dispatch_table[b'\xdc'] = _unpack_array
_unpack_dispatch_table[b'\xdd'] = _unpack_array
# Map
_unpack_dispatch_table[b'\xde'] = _unpack_map
_unpack_dispatch_table[b'\xdf'] = _unpack_map
# Negative fixint
for code in range(0xe0, 0xff + 1):
_unpack_dispatch_table[struct.pack("B", code)] = _unpack_integer
__init()
| 33.133959 | 79 | 0.595576 |
52b1f41cadf3c452cb66dda411edb0199e475466 | 2,349 | py | Python | som.py | arnav1598/som0 | f24ae597bfd0577f560615f869f2edb5f3b07119 | [
"CC-BY-3.0"
] | 5 | 2018-11-10T11:33:02.000Z | 2018-12-15T20:44:30.000Z | som.py | arnav1598/som0 | f24ae597bfd0577f560615f869f2edb5f3b07119 | [
"CC-BY-3.0"
] | null | null | null | som.py | arnav1598/som0 | f24ae597bfd0577f560615f869f2edb5f3b07119 | [
"CC-BY-3.0"
] | null | null | null | # Self Organizing Map
# Importing the libraries
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
# Importing the dataset
dataset = pd.read_csv('Untitled form.csv')
X = dataset.iloc[:, 3:].values
y = dataset.iloc[:, 1].values
#Encoding Categorical Data
from sklearn.preprocessing import LabelEncoder, OneHotEncoder
labelencoder_X0 = LabelEncoder()
X[:, 0] = labelencoder_X0.fit_transform(X[:, 0])
labelencoder_X1 = LabelEncoder()
X[:, 1] = labelencoder_X1.fit_transform(X[:, 1])
labelencoder_X2 = LabelEncoder()
X[:, 2] = labelencoder_X2.fit_transform(X[:, 2])
labelencoder_X3 = LabelEncoder()
X[:, 3] = labelencoder_X3.fit_transform(X[:, 3])
labelencoder_X4 = LabelEncoder()
X[:, 4] = labelencoder_X4.fit_transform(X[:, 4])
labelencoder_X5 = LabelEncoder()
X[:, 5] = labelencoder_X5.fit_transform(X[:, 5])
labelencoder_X6 = LabelEncoder()
X[:, 6] = labelencoder_X6.fit_transform(X[:, 6])
labelencoder_X7 = LabelEncoder()
X[:, 7] = labelencoder_X7.fit_transform(X[:, 7])
onehotencoder = OneHotEncoder(categorical_features = [0, 1, 2, 3, 4, 5, 6, 7])
X = onehotencoder.fit_transform(X).toarray()
X=X[:, [0, 2, 4, 6, 8, 10, 12, 14, 15, 17, 18, 19, 20, 21, 22, 23]]
# Feature Scaling
from sklearn.preprocessing import MinMaxScaler
sc = MinMaxScaler(feature_range = (0, 1))
X = sc.fit_transform(X)
# Training the SOM
map_dimx=20
map_dimy=15
from minisom import MiniSom
som = MiniSom(x = map_dimx, y = map_dimy, input_len = 16, sigma = 1.0, learning_rate = 0.3)
som.random_weights_init(X)
som.train_random(data = X, num_iteration = 1000)
#Creating the map
from collections import defaultdict
mappings = defaultdict(list)
for i, x in enumerate(X):
mappings[som.winner(x)].append(y[i])
#Visualisation of the mapping
fact=2.
plt.figure(figsize=(map_dimx*fact, map_dimy*fact))
for i, x in enumerate(X):
winnin_position = som.winner(x)
plt.text(x=winnin_position[0]*fact+fact/2,
y=winnin_position[1]*fact+np.random.rand()*(fact-0.1),
horizontalalignment='center',
fontsize='large',
s=y[i],
color=(winnin_position[0]/map_dimx, 1-(winnin_position[0]/map_dimx), winnin_position[1]/map_dimy, 1))
plt.xlim([0, map_dimx*fact])
plt.ylim([0, map_dimy*fact])
plt.axis('off')
plt.plot()
| 34.043478 | 115 | 0.684121 |
d6b9d91db56ca222979fb0b61f438d0b5ecb51d0 | 24,036 | py | Python | rasa/model.py | City-of-Turku/PaohRasaForBotfront | ea0eb32405be7a7359d1db717f30e090aea63f52 | [
"Apache-2.0",
"MIT"
] | null | null | null | rasa/model.py | City-of-Turku/PaohRasaForBotfront | ea0eb32405be7a7359d1db717f30e090aea63f52 | [
"Apache-2.0",
"MIT"
] | null | null | null | rasa/model.py | City-of-Turku/PaohRasaForBotfront | ea0eb32405be7a7359d1db717f30e090aea63f52 | [
"Apache-2.0",
"MIT"
] | null | null | null | import copy
import glob
import hashlib
import logging
import os
import shutil
from subprocess import CalledProcessError, DEVNULL, check_output # skipcq:BAN-B404
import tempfile
import typing
from packaging import version
from pathlib import Path
from typing import Any, Text, Tuple, Union, Optional, List, Dict, NamedTuple
from packaging import version
from rasa.constants import MINIMUM_COMPATIBLE_VERSION
import rasa.shared.utils.io
import rasa.utils.io
from rasa.cli.utils import create_output_path
from rasa.shared.utils.cli import print_success
from rasa.shared.constants import (
CONFIG_KEYS_CORE,
CONFIG_KEYS_NLU,
CONFIG_KEYS,
DEFAULT_DOMAIN_PATH,
DEFAULT_MODELS_PATH,
DEFAULT_CORE_SUBDIRECTORY_NAME,
DEFAULT_NLU_SUBDIRECTORY_NAME,
)
from rasa.constants import MINIMUM_COMPATIBLE_VERSION
from rasa.exceptions import ModelNotFound
from rasa.utils.common import TempDirectoryPath
if typing.TYPE_CHECKING:
from rasa.shared.importers.importer import TrainingDataImporter
logger = logging.getLogger(__name__)
# Type alias for the fingerprint
Fingerprint = Dict[Text, Union[Optional[Text], List[Text], int, float]]
FINGERPRINT_FILE_PATH = "fingerprint.json"
FINGERPRINT_CONFIG_KEY = "config"
FINGERPRINT_CONFIG_CORE_KEY = "core-config"
FINGERPRINT_CONFIG_NLU_KEY = "nlu-config"
FINGERPRINT_CONFIG_WITHOUT_EPOCHS_KEY = "config-without-epochs"
FINGERPRINT_DOMAIN_WITHOUT_NLG_KEY = "domain"
FINGERPRINT_NLG_KEY = "nlg"
FINGERPRINT_RASA_VERSION_KEY = "version"
FINGERPRINT_STORIES_KEY = "stories"
FINGERPRINT_NLU_DATA_KEY = "messages"
FINGERPRINT_NLU_LABELS_KEY = "nlu_labels"
FINGERPRINT_PROJECT = "project"
FINGERPRINT_TRAINED_AT_KEY = "trained_at"
class Section(NamedTuple):
"""Specifies which fingerprint keys decide whether this sub-model is retrained."""
name: Text
relevant_keys: List[Text]
SECTION_CORE = Section(
name="Core model",
relevant_keys=[
FINGERPRINT_CONFIG_KEY,
FINGERPRINT_CONFIG_CORE_KEY,
FINGERPRINT_DOMAIN_WITHOUT_NLG_KEY,
FINGERPRINT_STORIES_KEY,
FINGERPRINT_RASA_VERSION_KEY,
],
)
SECTION_NLU = Section(
name="NLU model",
relevant_keys=[
FINGERPRINT_CONFIG_KEY,
FINGERPRINT_CONFIG_NLU_KEY,
FINGERPRINT_NLU_DATA_KEY,
FINGERPRINT_RASA_VERSION_KEY,
],
)
SECTION_NLG = Section(name="NLG responses", relevant_keys=[FINGERPRINT_NLG_KEY])
class FingerprintComparisonResult:
"""Container for the results of a fingerprint comparison."""
def __init__(
self,
nlu: bool = True,
core: bool = True,
nlg: bool = True,
force_training: bool = False,
):
"""Creates a `FingerprintComparisonResult` instance.
Args:
nlu: `True` if the NLU model should be retrained.
core: `True` if the Core model should be retrained.
nlg: `True` if the responses in the domain should be updated.
force_training: `True` if a training of all parts is forced.
"""
self.nlu = nlu
self.core = core
self.nlg = nlg
self.force_training = force_training
def is_training_required(self) -> bool:
"""Check if anything has to be retrained."""
return any([self.nlg, self.nlu, self.core, self.force_training])
def should_retrain_core(self) -> bool:
"""Check if the Core model has to be updated."""
return self.force_training or self.core
def should_retrain_nlg(self) -> bool:
"""Check if the responses have to be updated."""
return self.should_retrain_core() or self.nlg
def should_retrain_nlu(self) -> bool:
"""Check if the NLU model has to be updated."""
return self.force_training or self.nlu
def get_local_model(model_path: Text = DEFAULT_MODELS_PATH) -> Text:
"""Returns verified path to local model archive.
Args:
model_path: Path to the zipped model. If it's a directory, the latest
trained model is returned.
Returns:
Path to the zipped model. If it's a directory, the latest
trained model is returned.
Raises:
ModelNotFound Exception: When no model could be found at the provided path.
"""
if not model_path:
raise ModelNotFound("No path specified.")
elif not os.path.exists(model_path):
raise ModelNotFound(f"No file or directory at '{model_path}'.")
if os.path.isdir(model_path):
model_path = get_latest_model(model_path)
if not model_path:
raise ModelNotFound(
f"Could not find any Rasa model files in '{model_path}'."
)
elif not model_path.endswith(".tar.gz"):
raise ModelNotFound(f"Path '{model_path}' does not point to a Rasa model file.")
return model_path
def get_model(model_path: Text = DEFAULT_MODELS_PATH) -> TempDirectoryPath:
"""Gets a model and unpacks it.
Args:
model_path: Path to the zipped model. If it's a directory, the latest
trained model is returned.
Returns:
Path to the unpacked model.
Raises:
ModelNotFound Exception: When no model could be found at the provided path.
"""
model_path = get_local_model(model_path)
try:
model_relative_path = os.path.relpath(model_path)
except ValueError:
model_relative_path = model_path
logger.info(f"Loading model {model_relative_path}...")
return unpack_model(model_path)
def get_latest_model(model_path: Text = DEFAULT_MODELS_PATH) -> Optional[Text]:
"""Get the latest model from a path.
Args:
model_path: Path to a directory containing zipped models.
Returns:
Path to latest model in the given directory.
"""
if not os.path.exists(model_path) or os.path.isfile(model_path):
model_path = os.path.dirname(model_path)
list_of_files = glob.glob(os.path.join(model_path, "*.tar.gz"))
if len(list_of_files) == 0:
return None
return max(list_of_files, key=os.path.getctime)
def unpack_model(
model_file: Text, working_directory: Optional[Union[Path, Text]] = None
) -> TempDirectoryPath:
"""Unpack a zipped Rasa model.
Args:
model_file: Path to zipped model.
working_directory: Location where the model should be unpacked to.
If `None` a temporary directory will be created.
Returns:
Path to unpacked Rasa model.
"""
import tarfile
if working_directory is None:
working_directory = tempfile.mkdtemp()
# All files are in a subdirectory.
try:
with tarfile.open(model_file, mode="r:gz") as tar:
tar.extractall(working_directory)
logger.debug(f"Extracted model to '{working_directory}'.")
except (tarfile.TarError, ValueError) as e:
logger.error(f"Failed to extract model at {model_file}. Error: {e}")
raise
return TempDirectoryPath(working_directory)
def get_model_subdirectories(
unpacked_model_path: Text,
) -> Tuple[Optional[Text], Optional[Text]]:
"""Return paths for Core and NLU model directories, if they exist.
If neither directories exist, a `ModelNotFound` exception is raised.
Args:
unpacked_model_path: Path to unpacked Rasa model.
Returns:
Tuple (path to Core subdirectory if it exists or `None` otherwise,
path to NLU subdirectory if it exists or `None` otherwise).
"""
core_path = os.path.join(unpacked_model_path, DEFAULT_CORE_SUBDIRECTORY_NAME)
# bf mod
# nlu_path = os.path.join(unpacked_model_path, DEFAULT_NLU_SUBDIRECTORY_NAME)
nlu_models = list(
filter(lambda d: d.startswith("nlu"), os.listdir(unpacked_model_path))
)
models_fingerprint = fingerprint_from_path(unpacked_model_path)
nlu_paths = {
lang: None
for lang in models_fingerprint.get(FINGERPRINT_CONFIG_NLU_KEY, {}).keys()
}
try:
for model in nlu_models:
lang = model.split("-")[1]
nlu_paths[lang] = os.path.join(unpacked_model_path, model)
except Exception:
pass
if not os.path.isdir(core_path):
core_path = None
if not core_path and not len(nlu_paths):
raise ModelNotFound(
"No NLU or Core data for unpacked model at: '{}'.".format(
unpacked_model_path
)
)
return core_path, nlu_paths
# /bf mod
def create_package_rasa(
training_directory: Text,
output_filename: Text,
fingerprint: Optional[Fingerprint] = None,
) -> Text:
"""Create a zipped Rasa model from trained model files.
Args:
training_directory: Path to the directory which contains the trained
model files.
output_filename: Name of the zipped model file to be created.
fingerprint: A unique fingerprint to identify the model version.
Returns:
Path to zipped model.
"""
import tarfile
if fingerprint:
persist_fingerprint(training_directory, fingerprint)
output_directory = os.path.dirname(output_filename)
if not os.path.exists(output_directory):
os.makedirs(output_directory)
with tarfile.open(output_filename, "w:gz") as tar:
for elem in os.scandir(training_directory):
tar.add(elem.path, arcname=elem.name)
shutil.rmtree(training_directory)
return output_filename
def project_fingerprint() -> Optional[Text]:
"""Create a hash for the project in the current working directory.
Returns:
project hash
"""
try:
remote = check_output( # skipcq:BAN-B607,BAN-B603
["git", "remote", "get-url", "origin"], stderr=DEVNULL
)
return hashlib.sha256(remote).hexdigest()
except (CalledProcessError, OSError):
return None
async def model_fingerprint(file_importer: "TrainingDataImporter") -> Fingerprint:
"""Create a model fingerprint from its used configuration and training data.
Args:
file_importer: File importer which provides the training data and model config.
Returns:
The fingerprint.
"""
import time
# bf mod
config = await file_importer.get_config()
domain = await file_importer.get_domain()
stories = await file_importer.get_stories()
# stories_hash = await file_importer.get_stories_hash()
nlu_data = await file_importer.get_nlu_data()
nlu_config = await file_importer.get_nlu_config()
responses = domain.responses
# Do a copy of the domain to not change the actual domain (shallow is enough)
domain = copy.copy(domain)
# don't include the response texts in the fingerprint.
# Their fingerprint is separate.
domain.responses = {}
return {
FINGERPRINT_CONFIG_KEY: _get_fingerprint_of_config(
config, exclude_keys=CONFIG_KEYS
),
FINGERPRINT_CONFIG_CORE_KEY: _get_fingerprint_of_config(
config, include_keys=CONFIG_KEYS_CORE
),
FINGERPRINT_CONFIG_NLU_KEY: {
lang: _get_fingerprint_of_config(config, include_keys=CONFIG_KEYS_NLU)
for (lang, config) in nlu_config.items()
}
if len(nlu_config)
else "",
FINGERPRINT_CONFIG_WITHOUT_EPOCHS_KEY: _get_fingerprint_of_config_without_epochs(
config
),
FINGERPRINT_DOMAIN_WITHOUT_NLG_KEY: domain.fingerprint(),
FINGERPRINT_NLG_KEY: rasa.shared.utils.io.deep_container_fingerprint(responses),
FINGERPRINT_PROJECT: project_fingerprint(),
FINGERPRINT_NLU_DATA_KEY: {
lang: nlu_data[lang].fingerprint() for lang in nlu_data
},
FINGERPRINT_NLU_LABELS_KEY: {
lang: nlu_data[lang].label_fingerprint() for lang in nlu_data
},
FINGERPRINT_STORIES_KEY: stories.fingerprint(), # stories_hash,
FINGERPRINT_TRAINED_AT_KEY: time.time(),
FINGERPRINT_RASA_VERSION_KEY: rasa.__version__,
}
# /bf mod
def _get_fingerprint_of_config(
config: Optional[Dict[Text, Any]],
include_keys: Optional[List[Text]] = None,
exclude_keys: Optional[List[Text]] = None,
) -> Text:
if not config:
return ""
keys = include_keys or list(filter(lambda k: k not in exclude_keys, config.keys()))
sub_config = {k: config[k] for k in keys if k in config}
return rasa.shared.utils.io.deep_container_fingerprint(sub_config)
def _get_fingerprint_of_config_without_epochs(
config: Optional[Dict[Text, Any]],
) -> Text:
if not config:
return ""
copied_config = copy.deepcopy(config)
for key in ["pipeline", "policies"]:
if copied_config.get(key):
for p in copied_config[key]:
if "epochs" in p:
del p["epochs"]
return rasa.shared.utils.io.deep_container_fingerprint(copied_config)
def fingerprint_from_path(model_path: Text) -> Fingerprint:
"""Load a persisted fingerprint.
Args:
model_path: Path to directory containing the fingerprint.
Returns:
The fingerprint or an empty dict if no fingerprint was found.
"""
if not model_path or not os.path.exists(model_path):
return {}
fingerprint_path = os.path.join(model_path, FINGERPRINT_FILE_PATH)
if os.path.isfile(fingerprint_path):
return rasa.shared.utils.io.read_json_file(fingerprint_path)
else:
return {}
def persist_fingerprint(output_path: Text, fingerprint: Fingerprint) -> None:
"""Persist a model fingerprint.
Args:
output_path: Directory in which the fingerprint should be saved.
fingerprint: The fingerprint to be persisted.
"""
path = os.path.join(output_path, FINGERPRINT_FILE_PATH)
rasa.shared.utils.io.dump_obj_as_json_to_file(path, fingerprint)
def did_section_fingerprint_change(
fingerprint1: Fingerprint, fingerprint2: Fingerprint, section: Section
) -> bool:
"""Check whether the fingerprint of a section has changed."""
# bf mod >
if section.name == "NLU model":
all_languages = set(
list(fingerprint1.get(FINGERPRINT_NLU_DATA_KEY).keys())
+ list(fingerprint1.get(FINGERPRINT_CONFIG_NLU_KEY).keys())
+ list(fingerprint2.get(FINGERPRINT_NLU_DATA_KEY).keys())
+ list(fingerprint2.get(FINGERPRINT_CONFIG_NLU_KEY).keys())
)
languages_in_new_model = set(
list(fingerprint2.get(FINGERPRINT_NLU_DATA_KEY).keys())
+ list(fingerprint2.get(FINGERPRINT_CONFIG_NLU_KEY).keys())
)
languages_in_old_model = set(
list(fingerprint1.get(FINGERPRINT_NLU_DATA_KEY).keys())
+ list(fingerprint1.get(FINGERPRINT_CONFIG_NLU_KEY).keys())
)
languages_added = list(languages_in_new_model - languages_in_old_model)
languages_removed = list(languages_in_old_model - languages_in_new_model)
languages_to_retrain = set()
for k in section.relevant_keys:
if not isinstance(fingerprint1.get(k), dict):
if fingerprint1.get(k) != fingerprint2.get(k):
logger.info("Data ({}) for NLU model changed.".format(k))
return list(all_languages)
else:
for lang in fingerprint1.get(k).keys():
if fingerprint1.get(k).get(lang) != fingerprint2.get(k).get(lang):
languages_to_retrain.add(lang)
for l in languages_added:
languages_to_retrain.add(l)
for l in languages_removed:
if l in languages_to_retrain:
languages_to_retrain.remove(l)
return list(languages_to_retrain)
# </ bf mod
for k in section.relevant_keys:
if fingerprint1.get(k) != fingerprint2.get(k):
logger.info(f"Data ({k}) for {section.name} section changed.")
return True
return False
def move_model(source: Text, target: Text) -> bool:
"""Move two model directories.
Args:
source: The original folder which should be merged in another.
target: The destination folder where it should be moved to.
Returns:
`True` if the merge was successful, else `False`.
"""
try:
shutil.move(source, target)
return True
except Exception as e:
logging.debug(f"Could not merge model: {e}")
return False
def should_retrain(
new_fingerprint: Fingerprint,
old_model: Optional[Text],
train_path: Text,
has_e2e_examples: bool = False,
force_training: bool = False,
nlu_untrainable: List[Text] = [], # bf
) -> FingerprintComparisonResult:
"""Check which components of a model should be retrained.
Args:
new_fingerprint: The fingerprint of the new model to be trained.
old_model: Path to the old zipped model file.
train_path: Path to the directory in which the new model will be trained.
has_e2e_examples: Whether the new training data contains e2e examples.
force_training: Indicates if the model needs to be retrained even if the data
has not changed.
Returns:
A FingerprintComparisonResult object indicating whether Rasa Core and/or Rasa
NLU needs to be retrained or not.
"""
fingerprint_comparison = FingerprintComparisonResult()
if old_model is None or not os.path.exists(old_model):
return fingerprint_comparison
try:
unpack_model(old_model)
except:
return fingerprint_comparison
with unpack_model(old_model) as unpacked:
last_fingerprint = fingerprint_from_path(unpacked)
old_core, old_nlu = get_model_subdirectories(unpacked)
model_outdated = version.parse(last_fingerprint.get("version")) < version.parse(
MINIMUM_COMPATIBLE_VERSION
)
fingerprint_comparison = FingerprintComparisonResult(
core=did_section_fingerprint_change(
last_fingerprint, new_fingerprint, SECTION_CORE
),
nlu=did_section_fingerprint_change(
last_fingerprint, new_fingerprint, SECTION_NLU
),
nlg=did_section_fingerprint_change(
last_fingerprint, new_fingerprint, SECTION_NLG
),
force_training=force_training or model_outdated,
)
# We should retrain core if nlu data changes and there are e2e stories.
if has_e2e_examples and fingerprint_comparison.should_retrain_nlu():
fingerprint_comparison.core = True
core_merge_failed = False
if not fingerprint_comparison.should_retrain_core():
target_path = os.path.join(train_path, DEFAULT_CORE_SUBDIRECTORY_NAME)
core_merge_failed = not move_model(old_core, target_path)
fingerprint_comparison.core = core_merge_failed
if not fingerprint_comparison.should_retrain_nlg() and core_merge_failed:
# If moving the Core model failed, we should also retrain NLG
fingerprint_comparison.nlg = True
# bf mod >
# if not fingerprint_comparison.should_retrain_nlu():
# target_path = os.path.join(train_path, "nlu")
# fingerprint_comparison.nlu = not move_model(old_nlu, target_path)
languages_to_train = fingerprint_comparison.should_retrain_nlu()
if languages_to_train == True: # replace True with list of all langs
languages_to_train = [
l
for l in new_fingerprint.get("nlu-config", {}).keys()
if l not in nlu_untrainable
]
for lang in old_nlu.keys():
target_path = os.path.join(train_path, "nlu-{}".format(lang))
if (
lang in new_fingerprint.get("nlu-config").keys()
and lang not in nlu_untrainable
):
# only attempt move if language is still in new fingerprints
# that way new model will not include that old lang
if not move_model(old_nlu.get(lang), target_path):
languages_to_train.append(lang)
else:
# remove lang model
import shutil
shutil.rmtree(target_path, True)
fingerprint_comparison.nlu = languages_to_train
# </ bf mod
return fingerprint_comparison
def can_finetune(
last_fingerprint: Fingerprint,
new_fingerprint: Fingerprint,
core: bool = False,
nlu: bool = False,
) -> bool:
"""Checks if components of a model can be finetuned with incremental training.
Args:
last_fingerprint: The fingerprint of the old model to potentially be fine-tuned.
new_fingerprint: The fingerprint of the new model.
core: Check sections for finetuning a core model.
nlu: Check sections for finetuning an nlu model.
Returns:
`True` if the old model can be finetuned, `False` otherwise.
"""
section_keys = [
FINGERPRINT_CONFIG_WITHOUT_EPOCHS_KEY,
]
if core:
section_keys.append(FINGERPRINT_DOMAIN_WITHOUT_NLG_KEY)
if nlu:
section_keys.append(FINGERPRINT_NLU_LABELS_KEY)
fingerprint_changed = did_section_fingerprint_change(
last_fingerprint,
new_fingerprint,
Section(name="finetune", relevant_keys=section_keys),
)
old_model_above_min_version = version.parse(
last_fingerprint.get(FINGERPRINT_RASA_VERSION_KEY)
) >= version.parse(MINIMUM_COMPATIBLE_VERSION)
return old_model_above_min_version and not fingerprint_changed
def package_model(
fingerprint: Fingerprint,
output_directory: Text,
train_path: Text,
fixed_model_name: Optional[Text] = None,
model_prefix: Text = "",
) -> Text:
"""
Compress a trained model.
Args:
fingerprint: fingerprint of the model
output_directory: path to the directory in which the model should be stored
train_path: path to uncompressed model
fixed_model_name: name of the compressed model file
model_prefix: prefix of the compressed model file
Returns: path to 'tar.gz' model file
"""
output_directory = create_output_path(
output_directory, prefix=model_prefix, fixed_name=fixed_model_name
)
create_package_rasa(train_path, output_directory, fingerprint)
print_success(
"Your Rasa model is trained and saved at '{}'.".format(
os.path.abspath(output_directory)
)
)
return output_directory
async def update_model_with_new_domain(
importer: "TrainingDataImporter", unpacked_model_path: Union[Path, Text]
) -> None:
"""Overwrites the domain of an unpacked model with a new domain.
Args:
importer: Importer which provides the new domain.
unpacked_model_path: Path to the unpacked model.
"""
model_path = Path(unpacked_model_path) / DEFAULT_CORE_SUBDIRECTORY_NAME
domain = await importer.get_domain()
domain.persist(model_path / DEFAULT_DOMAIN_PATH)
def get_model_for_finetuning(
previous_model_file: Optional[Union[Path, Text]]
) -> Optional[Text]:
"""Gets validated path for model to finetune.
Args:
previous_model_file: Path to model file which should be used for finetuning or
a directory in case the latest trained model should be used.
Returns:
Path to model archive. `None` if there is no model.
"""
if Path(previous_model_file).is_dir():
logger.debug(
f"Trying to load latest model from '{previous_model_file}' for "
f"finetuning."
)
return get_latest_model(previous_model_file)
if Path(previous_model_file).is_file():
return previous_model_file
logger.debug(
"No valid model for finetuning found as directory either "
"contains no model or model file cannot be found."
)
return None
| 32.437247 | 89 | 0.671493 |
18464535bb8cff18949ddcc2a25b271a0462f73f | 1,579 | py | Python | game/renderer/pantool.py | Sipondo/ulix-dexflow | de46482fe08e3d600dd5da581f0524b55e5df961 | [
"MIT"
] | 5 | 2021-06-25T16:44:38.000Z | 2021-12-31T01:29:00.000Z | game/renderer/pantool.py | Sipondo/ulix-dexflow | de46482fe08e3d600dd5da581f0524b55e5df961 | [
"MIT"
] | null | null | null | game/renderer/pantool.py | Sipondo/ulix-dexflow | de46482fe08e3d600dd5da581f0524b55e5df961 | [
"MIT"
] | 1 | 2021-06-25T20:33:47.000Z | 2021-06-25T20:33:47.000Z | class PanTool:
def __init__(self, size):
self.total_x = 0.0
self.total_y = 0.0
self.zoom = 800.0
self.zoom_level = 4
self.zoom_options = [12000, 7000, 4600, 3000, 2000, 1200, 800, 100]
self.size = size
def on_tick(self, time, frame_time):
frame_time = max(0.001, frame_time)
target = self.zoom_options[self.zoom_level]
speed = target - self.zoom
# print(self.zoom, target, speed)
if abs(speed) < 2:
self.zoom = target
return
if abs(speed) < 10:
if speed < 0:
speed = -10
else:
speed = 10
self.zoom += speed * frame_time * 4
def zoom_in(self):
# self.zoom = max(500, self.zoom - 200)
self.zoom_level = min(len(self.zoom_options) - 1, self.zoom_level + 1)
def zoom_out(self):
# self.zoom = min(4000, self.zoom + 200)
self.zoom_level = max(0, self.zoom_level - 1)
def zoom_encounter(self):
# self.zoom = min(4000, self.zoom + 200)
self.zoom_level = len(self.zoom_options) - 1
@property
def warp_x(self):
return self.size[0] / 320
@property
def warp_y(self):
return self.size[1] / 320
@property
def pan_value(self):
panv = (
round(16 * 5 * self.total_x), # + 128,
round(9 * -(5 * self.total_y)), # + 42,
)
# print(panv)
return panv
@property
def zoom_value(self):
return 100 / self.zoom, 100 / self.zoom
| 25.467742 | 78 | 0.529449 |
c05f1e17fe09ef7fd4f2eb29f937f0d6dffefa19 | 3,167 | py | Python | qikify/tests/test_controllers.py | brucedispassion/qikify | ed1384a1cbaf57fe4d570937e8a5859ab0858fde | [
"MIT"
] | 1 | 2019-09-07T12:16:12.000Z | 2019-09-07T12:16:12.000Z | qikify/tests/test_controllers.py | brucedispassion/qikify | ed1384a1cbaf57fe4d570937e8a5859ab0858fde | [
"MIT"
] | null | null | null | qikify/tests/test_controllers.py | brucedispassion/qikify | ed1384a1cbaf57fe4d570937e8a5859ab0858fde | [
"MIT"
] | 4 | 2017-01-09T02:44:43.000Z | 2022-02-15T03:30:32.000Z | import pandas
import numpy as np
from scipy.stats.stats import kurtosis
from scipy import c_, r_
from qikify.helpers.identify_outliers import identify_outliers
from qikify.controllers.KNN import KNN
from qikify.controllers.KDE import KDE
from qikify.controllers.LSFS import LSFS
from qikify.models.chip import Chip
from qikify.models.specs import Specs
def test_identify_outliers():
"""Test for the identifyOutliers controller.
TODO: At the moment, this only tests the mu +/- k sigma outlier filter,
not the spec-based outlier filter.
"""
A = pandas.DataFrame(np.ones((100, 3)))
A.ix[99, 2] = 30
assert identify_outliers(A, 6).tolist() == [True] * 99 + [False]
def test_kde():
"""
Tests for kernel density estimation. To test standard KDE, we use a random
normal to learn the density, generate 1000 samples, and then check that the
means/standard deviations/kurtosis figures are similar to within a margin
epsilon.
TODO: Need a test that adjusts the bandwidth factor a.
"""
eps = 0.1
kde = KDE()
# Test standard KDE.
X = pandas.DataFrame(np.random.multivariate_normal([0, 0], \
np.eye(2), (1000, )))
S = kde.run(X, n_samples = 1000)
assert np.mean( S.std(0) - X.std(0)) < eps
assert np.mean( S.mean(0) - X.mean(0)) < eps
assert np.mean(kurtosis(S, 0) - kurtosis(X, 0)) < eps
# Test partitioned KDE
counts = {'nGood': 250, 'nCritical': 100, 'nFail': 100}
columns = ['A', 'B']
spec_lims = pandas.DataFrame({columns[0]: np.array([-2.0, 2.0]), \
columns[1]: np.array([-2.0, 2.0])})
specs = Specs(specs=spec_lims).gen_crit_region(5.5/6, 6.5/6)
A = pandas.DataFrame(np.random.multivariate_normal([0, 0], \
np.eye(2), (1000, )), columns=columns)
S = kde.run(A, specs=specs, counts=counts)
assert np.mean( S.std(0) - 1.3 * A.std(0)) < eps
assert np.mean( S.mean(0) - A.mean(0)) < eps
def test_lsfs():
"""Tests for Laplacian score feature selection. We create a matrix where
the first two columns are very good discriminators of the class label, and
all the remaining features are random. Then run LSFS and ensure the scores
for the first two features are close to 1.0, and the remaining scores are
small (less than 0.25).
"""
lsfs = LSFS()
Xa = (np.random.randn(500, 2) / 10) + [1, 1]
Xb = (np.random.randn(500, 2) / 10) + [-1, -1]
y = r_[np.ones((500, )), np.zeros((500, ))]
X = c_[r_[Xa, Xb], np.random.randn(1000, 10)]
lsfs.run(X, y)
assert abs(sum(lsfs.scores[0:2]) - 2.0) < 0.2
assert np.mean(lsfs.scores[2:]) < 0.25
def test_knn():
knn = KNN(n_neighbors=1)
chip_data1 = {'ORB_a':1, 'ORB_b':1, 'gnd':1}
chip_data2 = {'ORB_a':-1, 'ORB_b':-1, 'gnd':-1}
chip1 = Chip(chip_data1, LCT_prefix='ORB')
chip2 = Chip(chip_data2, LCT_prefix='ORB')
chips = [chip1, chip2]
knn.fit(chips)
chip3 = Chip({'ORB_a':0.9, 'ORB_b':0.9, 'gnd':1}, LCT_prefix='ORB')
assert knn.predict(chip3) == 1, 'fail: prediction not correct.'
| 35.58427 | 79 | 0.619198 |
93c820b0969f704c19e953a1f3a4fea86a9f7ec0 | 634 | py | Python | src/utils/logging/handlers.py | gcdevops/OdooImport | 44b2396d48dfb2eed0843554b59a738497af103d | [
"MIT"
] | null | null | null | src/utils/logging/handlers.py | gcdevops/OdooImport | 44b2396d48dfb2eed0843554b59a738497af103d | [
"MIT"
] | null | null | null | src/utils/logging/handlers.py | gcdevops/OdooImport | 44b2396d48dfb2eed0843554b59a738497af103d | [
"MIT"
] | null | null | null | import os
import requests
import json
from logging import Handler, Formatter, ERROR
class SlackHandler(Handler):
def __init__(self, level=ERROR, slack_url=None):
super().__init__()
if slack_url is None:
self.slack_url = os.environ.get("SLACK_URL")
else:
self.slack_url = slack_url
def emit(self, record):
if(self.slack_url):
formatted_log = self.format(record)
requests.post(
self.slack_url,
json.dumps({"text": formatted_log }),
headers={"Content-Type": "application/json"}
) | 28.818182 | 60 | 0.578864 |
817e659b7c8fe3e62e1a3ee8f490f81cc7586018 | 2,313 | py | Python | presidio-analyzer/analyzer/predefined_recognizers/iban_recognizer.py | eliperkins/presidio | 5c73e679c2982c047e2198895d153260406cb162 | [
"MIT"
] | 1 | 2019-08-31T19:57:55.000Z | 2019-08-31T19:57:55.000Z | presidio-analyzer/analyzer/predefined_recognizers/iban_recognizer.py | eliperkins/presidio | 5c73e679c2982c047e2198895d153260406cb162 | [
"MIT"
] | null | null | null | presidio-analyzer/analyzer/predefined_recognizers/iban_recognizer.py | eliperkins/presidio | 5c73e679c2982c047e2198895d153260406cb162 | [
"MIT"
] | null | null | null | import string
from analyzer.predefined_recognizers.iban_patterns import regex_per_country
from analyzer import Pattern, PatternRecognizer
from analyzer.entity_recognizer import EntityRecognizer
# Import 're2' regex engine if installed, if not- import 'regex'
try:
import re2 as re
except ImportError:
import regex as re
IBAN_GENERIC_REGEX = r'\b[A-Z]{2}[0-9]{2}[ ]?([a-zA-Z0-9][ ]?){11,28}\b'
IBAN_GENERIC_SCORE = 0.5
CONTEXT = ["iban", "bank", "transaction"]
LETTERS = {
ord(d): str(i)
for i, d in enumerate(string.digits + string.ascii_uppercase)
}
class IbanRecognizer(PatternRecognizer):
"""
Recognizes IBAN code using regex and checksum
"""
def __init__(self):
patterns = [Pattern('IBAN Generic',
IBAN_GENERIC_REGEX,
IBAN_GENERIC_SCORE)]
super().__init__(supported_entity="IBAN_CODE",
patterns=patterns,
context=CONTEXT)
def validate_result(self, pattern_text, pattern_result):
pattern_text = pattern_text.replace(' ', '')
is_valid_checksum = (IbanRecognizer.__generate_iban_check_digits(
pattern_text) == pattern_text[2:4])
score = EntityRecognizer.MIN_SCORE
if is_valid_checksum:
if IbanRecognizer.__is_valid_format(pattern_text):
score = EntityRecognizer.MAX_SCORE
elif IbanRecognizer.__is_valid_format(pattern_text.upper()):
score = IBAN_GENERIC_SCORE
pattern_result.score = score
return pattern_result
@staticmethod
def __number_iban(iban):
return (iban[4:] + iban[:4]).translate(LETTERS)
@staticmethod
def __generate_iban_check_digits(iban):
transformed_iban = (iban[:2] + '00' + iban[4:]).upper()
number_iban = IbanRecognizer.__number_iban(transformed_iban)
return '{:0>2}'.format(98 - (int(number_iban) % 97))
@staticmethod
def __is_valid_format(iban):
country_code = iban[:2]
if country_code in regex_per_country:
country_regex = regex_per_country[country_code]
return country_regex and re.match(country_regex, iban,
flags=re.DOTALL | re.MULTILINE)
return False
| 33.521739 | 77 | 0.641159 |
b8ef5fb423fff03b0888000970ffc56a34ff3c59 | 25,347 | py | Python | assignment2/cs231n/layers.py | halimacc/CS231n-assignments | e2095450c42780a090d596e7790daf59ac80712b | [
"Unlicense"
] | null | null | null | assignment2/cs231n/layers.py | halimacc/CS231n-assignments | e2095450c42780a090d596e7790daf59ac80712b | [
"Unlicense"
] | null | null | null | assignment2/cs231n/layers.py | halimacc/CS231n-assignments | e2095450c42780a090d596e7790daf59ac80712b | [
"Unlicense"
] | null | null | null | from builtins import range
import numpy as np
def affine_forward(x, w, b):
"""
Computes the forward pass for an affine (fully-connected) layer.
The input x has shape (N, d_1, ..., d_k) and contains a minibatch of N
examples, where each example x[i] has shape (d_1, ..., d_k). We will
reshape each input into a vector of dimension D = d_1 * ... * d_k, and
then transform it to an output vector of dimension M.
Inputs:
- x: A numpy array containing input data, of shape (N, d_1, ..., d_k)
- w: A numpy array of weights, of shape (D, M)
- b: A numpy array of biases, of shape (M,)
Returns a tuple of:
- out: output, of shape (N, M)
- cache: (x, w, b)
"""
out = None
###########################################################################
# TODO: Implement the affine forward pass. Store the result in out. You #
# will need to reshape the input into rows. #
###########################################################################
N = x.shape[0]
D = w.shape[0]
out = np.dot(np.reshape(x, [N, D]), w) + b
###########################################################################
# END OF YOUR CODE #
###########################################################################
cache = (x, w, b)
return out, cache
def affine_backward(dout, cache):
"""
Computes the backward pass for an affine layer.
Inputs:
- dout: Upstream derivative, of shape (N, M)
- cache: Tuple of:
- x: Input data, of shape (N, d_1, ... d_k)
- w: Weights, of shape (D, M)
Returns a tuple of:
- dx: Gradient with respect to x, of shape (N, d1, ..., d_k)
- dw: Gradient with respect to w, of shape (D, M)
- db: Gradient with respect to b, of shape (M,)
"""
x, w, b = cache
dx, dw, db = None, None, None
###########################################################################
# TODO: Implement the affine backward pass. #
###########################################################################
N = x.shape[0]
D = w.shape[0]
dx = np.dot(dout, w.T).reshape(x.shape)
dw = np.dot(np.reshape(x, [N, D]).T, dout)
db = np.sum(dout, 0)
###########################################################################
# END OF YOUR CODE #
###########################################################################
return dx, dw, db
def relu_forward(x):
"""
Computes the forward pass for a layer of rectified linear units (ReLUs).
Input:
- x: Inputs, of any shape
Returns a tuple of:
- out: Output, of the same shape as x
- cache: x
"""
out = None
###########################################################################
# TODO: Implement the ReLU forward pass. #
###########################################################################
out = x.copy()
out[x < 0] = 0
###########################################################################
# END OF YOUR CODE #
###########################################################################
cache = x
return out, cache
def relu_backward(dout, cache):
"""
Computes the backward pass for a layer of rectified linear units (ReLUs).
Input:
- dout: Upstream derivatives, of any shape
- cache: Input x, of same shape as dout
Returns:
- dx: Gradient with respect to x
"""
dx, x = None, cache
###########################################################################
# TODO: Implement the ReLU backward pass. #
###########################################################################
dx = dout.copy()
dx[x < 0] = 0
###########################################################################
# END OF YOUR CODE #
###########################################################################
return dx
def batchnorm_forward(x, gamma, beta, bn_param):
"""
Forward pass for batch normalization.
During training the sample mean and (uncorrected) sample variance are
computed from minibatch statistics and used to normalize the incoming data.
During training we also keep an exponentially decaying running mean of the
mean and variance of each feature, and these averages are used to normalize
data at test-time.
At each timestep we update the running averages for mean and variance using
an exponential decay based on the momentum parameter:
running_mean = momentum * running_mean + (1 - momentum) * sample_mean
running_var = momentum * running_var + (1 - momentum) * sample_var
Note that the batch normalization paper suggests a different test-time
behavior: they compute sample mean and variance for each feature using a
large number of training images rather than using a running average. For
this implementation we have chosen to use running averages instead since
they do not require an additional estimation step; the torch7
implementation of batch normalization also uses running averages.
Input:
- x: Data of shape (N, D)
- gamma: Scale parameter of shape (D,)
- beta: Shift paremeter of shape (D,)
- bn_param: Dictionary with the following keys:
- mode: 'train' or 'test'; required
- eps: Constant for numeric stability
- momentum: Constant for running mean / variance.
- running_mean: Array of shape (D,) giving running mean of features
- running_var Array of shape (D,) giving running variance of features
Returns a tuple of:
- out: of shape (N, D)
- cache: A tuple of values needed in the backward pass
"""
mode = bn_param['mode']
eps = bn_param.get('eps', 1e-5)
momentum = bn_param.get('momentum', 0.9)
N, D = x.shape
running_mean = bn_param.get('running_mean', np.zeros(D, dtype=x.dtype))
running_var = bn_param.get('running_var', np.zeros(D, dtype=x.dtype))
out, cache = None, None
if mode == 'train':
#######################################################################
# TODO: Implement the training-time forward pass for batch norm. #
# Use minibatch statistics to compute the mean and variance, use #
# these statistics to normalize the incoming data, and scale and #
# shift the normalized data using gamma and beta. #
# #
# You should store the output in the variable out. Any intermediates #
# that you need for the backward pass should be stored in the cache #
# variable. #
# #
# You should also use your computed sample mean and variance together #
# with the momentum variable to update the running mean and running #
# variance, storing your result in the running_mean and running_var #
# variables. #
#######################################################################
sample_mean = np.mean(x, 0)
x_mean = x - sample_mean
x_mean_sqr = np.square(x_mean)
sample_var = np.mean(x_mean_sqr, 0)
sample_var_sqrt = np.sqrt(sample_var + eps)
inv_svs = 1 / sample_var_sqrt #6
x_norm = x_mean * inv_svs #7
out = gamma * x_norm + beta
running_mean = momentum * running_mean + (1 - momentum) * sample_mean
running_var = momentum * running_var + (1 - momentum) * sample_var
cache = (x, x_norm, gamma, x_mean, inv_svs, sample_var_sqrt, x_mean_sqr)
#######################################################################
# END OF YOUR CODE #
#######################################################################
elif mode == 'test':
#######################################################################
# TODO: Implement the test-time forward pass for batch normalization. #
# Use the running mean and variance to normalize the incoming data, #
# then scale and shift the normalized data using gamma and beta. #
# Store the result in the out variable. #
#######################################################################
x_norm = (x - running_mean) / np.sqrt(running_var + eps)
out = gamma * x_norm + beta
#######################################################################
# END OF YOUR CODE #
#######################################################################
else:
raise ValueError('Invalid forward batchnorm mode "%s"' % mode)
# Store the updated running means back into bn_param
bn_param['running_mean'] = running_mean
bn_param['running_var'] = running_var
return out, cache
def batchnorm_backward(dout, cache):
"""
Backward pass for batch normalization.
For this implementation, you should write out a computation graph for
batch normalization on paper and propagate gradients backward through
intermediate nodes.
Inputs:
- dout: Upstream derivatives, of shape (N, D)
- cache: Variable of intermediates from batchnorm_forward.
Returns a tuple of:
- dx: Gradient with respect to inputs x, of shape (N, D)
- dgamma: Gradient with respect to scale parameter gamma, of shape (D,)
- dbeta: Gradient with respect to shift parameter beta, of shape (D,)
"""
dx, dgamma, dbeta = None, None, None
###########################################################################
# TODO: Implement the backward pass for batch normalization. Store the #
# results in the dx, dgamma, and dbeta variables. #
###########################################################################
N = dout.shape[0]
x, x_norm, gamma, x_mean, inv_svs, sample_var_sqrt, x_mean_sqr = cache
dbeta = np.sum(dout, 0)
dgamma = np.sum(dout * x_norm, 0)
dx_norm = dout * gamma
dinv_svs = np.sum(dx_norm * x_mean, 0)
dsample_var_sqrt = dinv_svs * -np.square(inv_svs)
dsample_var = dsample_var_sqrt * 1 / (2 * sample_var_sqrt)
dx_mean_sqr = dsample_var / N * np.ones_like(x_mean_sqr)
dx_mean = dx_norm * inv_svs
dx_mean += dx_mean_sqr * 2 * x_mean
dsample_mean = -np.sum(dx_mean, 0)
dx = dx_mean
dx += dsample_mean / N
###########################################################################
# END OF YOUR CODE #
###########################################################################
return dx, dgamma, dbeta
def batchnorm_backward_alt(dout, cache):
"""
Alternative backward pass for batch normalization.
For this implementation you should work out the derivatives for the batch
normalizaton backward pass on paper and simplify as much as possible. You
should be able to derive a simple expression for the backward pass.
Note: This implementation should expect to receive the same cache variable
as batchnorm_backward, but might not use all of the values in the cache.
Inputs / outputs: Same as batchnorm_backward
"""
dx, dgamma, dbeta = None, None, None
###########################################################################
# TODO: Implement the backward pass for batch normalization. Store the #
# results in the dx, dgamma, and dbeta variables. #
# #
# After computing the gradient with respect to the centered inputs, you #
# should be able to compute gradients with respect to the inputs in a #
# single statement; our implementation fits on a single 80-character line.#
###########################################################################
N = dout.shape[0]
x, x_norm, gamma, x_mean, inv_svs, sample_var_sqrt, x_mean_sqr = cache
dbeta = np.sum(dout, 0)
dgamma = np.sum(dout * x_norm, 0)
dx_norm = dout * gamma
tmp = (1 - 1 / N)
dx_normdx = (1 - 1 / N) * (inv_svs + -0.5 * inv_svs ** 3 * 2 * x_mean * x_mean)
dx = dx_norm * dx_normdx
###########################################################################
# END OF YOUR CODE #
###########################################################################
return dx, dgamma, dbeta
def dropout_forward(x, dropout_param):
"""
Performs the forward pass for (inverted) dropout.
Inputs:
- x: Input data, of any shape
- dropout_param: A dictionary with the following keys:
- p: Dropout parameter. We drop each neuron output with probability p.
- mode: 'test' or 'train'. If the mode is train, then perform dropout;
if the mode is test, then just return the input.
- seed: Seed for the random number generator. Passing seed makes this
function deterministic, which is needed for gradient checking but not
in real networks.
Outputs:
- out: Array of the same shape as x.
- cache: tuple (dropout_param, mask). In training mode, mask is the dropout
mask that was used to multiply the input; in test mode, mask is None.
"""
p, mode = dropout_param['p'], dropout_param['mode']
if 'seed' in dropout_param:
np.random.seed(dropout_param['seed'])
mask = None
out = None
if mode == 'train':
#######################################################################
# TODO: Implement training phase forward pass for inverted dropout. #
# Store the dropout mask in the mask variable. #
#######################################################################
pass
#######################################################################
# END OF YOUR CODE #
#######################################################################
elif mode == 'test':
#######################################################################
# TODO: Implement the test phase forward pass for inverted dropout. #
#######################################################################
pass
#######################################################################
# END OF YOUR CODE #
#######################################################################
cache = (dropout_param, mask)
out = out.astype(x.dtype, copy=False)
return out, cache
def dropout_backward(dout, cache):
"""
Perform the backward pass for (inverted) dropout.
Inputs:
- dout: Upstream derivatives, of any shape
- cache: (dropout_param, mask) from dropout_forward.
"""
dropout_param, mask = cache
mode = dropout_param['mode']
dx = None
if mode == 'train':
#######################################################################
# TODO: Implement training phase backward pass for inverted dropout #
#######################################################################
pass
#######################################################################
# END OF YOUR CODE #
#######################################################################
elif mode == 'test':
dx = dout
return dx
def conv_forward_naive(x, w, b, conv_param):
"""
A naive implementation of the forward pass for a convolutional layer.
The input consists of N data points, each with C channels, height H and
width W. We convolve each input with F different filters, where each filter
spans all C channels and has height HH and width HH.
Input:
- x: Input data of shape (N, C, H, W)
- w: Filter weights of shape (F, C, HH, WW)
- b: Biases, of shape (F,)
- conv_param: A dictionary with the following keys:
- 'stride': The number of pixels between adjacent receptive fields in the
horizontal and vertical directions.
- 'pad': The number of pixels that will be used to zero-pad the input.
Returns a tuple of:
- out: Output data, of shape (N, F, H', W') where H' and W' are given by
H' = 1 + (H + 2 * pad - HH) / stride
W' = 1 + (W + 2 * pad - WW) / stride
- cache: (x, w, b, conv_param)
"""
out = None
###########################################################################
# TODO: Implement the convolutional forward pass. #
# Hint: you can use the function np.pad for padding. #
###########################################################################
pass
###########################################################################
# END OF YOUR CODE #
###########################################################################
cache = (x, w, b, conv_param)
return out, cache
def conv_backward_naive(dout, cache):
"""
A naive implementation of the backward pass for a convolutional layer.
Inputs:
- dout: Upstream derivatives.
- cache: A tuple of (x, w, b, conv_param) as in conv_forward_naive
Returns a tuple of:
- dx: Gradient with respect to x
- dw: Gradient with respect to w
- db: Gradient with respect to b
"""
dx, dw, db = None, None, None
###########################################################################
# TODO: Implement the convolutional backward pass. #
###########################################################################
pass
###########################################################################
# END OF YOUR CODE #
###########################################################################
return dx, dw, db
def max_pool_forward_naive(x, pool_param):
"""
A naive implementation of the forward pass for a max pooling layer.
Inputs:
- x: Input data, of shape (N, C, H, W)
- pool_param: dictionary with the following keys:
- 'pool_height': The height of each pooling region
- 'pool_width': The width of each pooling region
- 'stride': The distance between adjacent pooling regions
Returns a tuple of:
- out: Output data
- cache: (x, pool_param)
"""
out = None
###########################################################################
# TODO: Implement the max pooling forward pass #
###########################################################################
pass
###########################################################################
# END OF YOUR CODE #
###########################################################################
cache = (x, pool_param)
return out, cache
def max_pool_backward_naive(dout, cache):
"""
A naive implementation of the backward pass for a max pooling layer.
Inputs:
- dout: Upstream derivatives
- cache: A tuple of (x, pool_param) as in the forward pass.
Returns:
- dx: Gradient with respect to x
"""
dx = None
###########################################################################
# TODO: Implement the max pooling backward pass #
###########################################################################
pass
###########################################################################
# END OF YOUR CODE #
###########################################################################
return dx
def spatial_batchnorm_forward(x, gamma, beta, bn_param):
"""
Computes the forward pass for spatial batch normalization.
Inputs:
- x: Input data of shape (N, C, H, W)
- gamma: Scale parameter, of shape (C,)
- beta: Shift parameter, of shape (C,)
- bn_param: Dictionary with the following keys:
- mode: 'train' or 'test'; required
- eps: Constant for numeric stability
- momentum: Constant for running mean / variance. momentum=0 means that
old information is discarded completely at every time step, while
momentum=1 means that new information is never incorporated. The
default of momentum=0.9 should work well in most situations.
- running_mean: Array of shape (D,) giving running mean of features
- running_var Array of shape (D,) giving running variance of features
Returns a tuple of:
- out: Output data, of shape (N, C, H, W)
- cache: Values needed for the backward pass
"""
out, cache = None, None
###########################################################################
# TODO: Implement the forward pass for spatial batch normalization. #
# #
# HINT: You can implement spatial batch normalization using the vanilla #
# version of batch normalization defined above. Your implementation should#
# be very short; ours is less than five lines. #
###########################################################################
pass
###########################################################################
# END OF YOUR CODE #
###########################################################################
return out, cache
def spatial_batchnorm_backward(dout, cache):
"""
Computes the backward pass for spatial batch normalization.
Inputs:
- dout: Upstream derivatives, of shape (N, C, H, W)
- cache: Values from the forward pass
Returns a tuple of:
- dx: Gradient with respect to inputs, of shape (N, C, H, W)
- dgamma: Gradient with respect to scale parameter, of shape (C,)
- dbeta: Gradient with respect to shift parameter, of shape (C,)
"""
dx, dgamma, dbeta = None, None, None
###########################################################################
# TODO: Implement the backward pass for spatial batch normalization. #
# #
# HINT: You can implement spatial batch normalization using the vanilla #
# version of batch normalization defined above. Your implementation should#
# be very short; ours is less than five lines. #
###########################################################################
pass
###########################################################################
# END OF YOUR CODE #
###########################################################################
return dx, dgamma, dbeta
def svm_loss(x, y):
"""
Computes the loss and gradient using for multiclass SVM classification.
Inputs:
- x: Input data, of shape (N, C) where x[i, j] is the score for the jth
class for the ith input.
- y: Vector of labels, of shape (N,) where y[i] is the label for x[i] and
0 <= y[i] < C
Returns a tuple of:
- loss: Scalar giving the loss
- dx: Gradient of the loss with respect to x
"""
N = x.shape[0]
correct_class_scores = x[np.arange(N), y]
margins = np.maximum(0, x - correct_class_scores[:, np.newaxis] + 1.0)
margins[np.arange(N), y] = 0
loss = np.sum(margins) / N
num_pos = np.sum(margins > 0, axis=1)
dx = np.zeros_like(x)
dx[margins > 0] = 1
dx[np.arange(N), y] -= num_pos
dx /= N
return loss, dx
def softmax_loss(x, y):
"""
Computes the loss and gradient for softmax classification.
Inputs:
- x: Input data, of shape (N, C) where x[i, j] is the score for the jth
class for the ith input.
- y: Vector of labels, of shape (N,) where y[i] is the label for x[i] and
0 <= y[i] < C
Returns a tuple of:
- loss: Scalar giving the loss
- dx: Gradient of the loss with respect to x
"""
shifted_logits = x - np.max(x, axis=1, keepdims=True)
Z = np.sum(np.exp(shifted_logits), axis=1, keepdims=True)
log_probs = shifted_logits - np.log(Z)
probs = np.exp(log_probs)
N = x.shape[0]
loss = -np.sum(log_probs[np.arange(N), y]) / N
dx = probs.copy()
dx[np.arange(N), y] -= 1
dx /= N
return loss, dx
| 42.104651 | 83 | 0.458121 |
3251bda8209f79bcfc51a0fcf97c533d8548a6bc | 2,182 | py | Python | budget/templatetags/budget.py | kwabena-aboah/ofos | b0ed9b36a7f2364777aa12abf398324ecf5e2ef2 | [
"MIT"
] | null | null | null | budget/templatetags/budget.py | kwabena-aboah/ofos | b0ed9b36a7f2364777aa12abf398324ecf5e2ef2 | [
"MIT"
] | 1 | 2021-06-08T19:39:41.000Z | 2021-06-08T19:39:41.000Z | budget/templatetags/budget.py | kwabena-aboah/ofos | b0ed9b36a7f2364777aa12abf398324ecf5e2ef2 | [
"MIT"
] | null | null | null | from decimal import Decimal
from django import template
from django.conf import settings
register = template.Library()
# To override, copy to your settings file. Make sure to keep the tuples in
# descending order by percentage.
BUDGET_DEFAULT_COLORS = (
# (percentage, CSS color class)
(1.001, 'red'),
(0.75, 'yellow'),
(0.0, 'green'),
)
class ColorizeAmountNode(template.Node):
def __init__(self, estimated_amount, actual_amount):
self.estimated_amount = template.Variable(estimated_amount)
self.actual_amount = template.Variable(actual_amount)
def render(self, context):
if hasattr(settings, 'BUDGET_DEFAULT_COLORS'):
colors = settings.BUDGET_DEFAULT_COLORS
else:
colors = BUDGET_DEFAULT_COLORS
try:
estimate = self.estimated_amount.resolve(context)
actual = self.actual_amount.resolve(context)
estimate = make_decimal(estimate)
if estimate == 0:
return ''
actual = make_decimal(actual)
percentage = actual / estimate
for color in colors:
color_percentage = make_decimal(color[0])
if percentage >= color_percentage:
return color[1]
except template.VariableDoesNotExist:
return ''
def make_decimal(amount):
"""
If it's not a Decimal, it should be...
"""
if not isinstance(amount, Decimal):
amount = Decimal(str(amount))
return amount
def colorize_amount(parser, token):
"""
Compares an estimate with an actual amount and returns an appropriate
color as a visual indicator.
Example:
{% colorize_amount estimated_amount actual_amount %}
"""
try:
tag_name, estimated_amount, actual_amount = token.split_contents()
except ValueError:
raise template.TemplateSyntaxError("%r tag requires exactly two arguments" % token.contents.split()[0])
return ColorizeAmountNode(estimated_amount, actual_amount)
register.tag('colorize_amount', colorize_amount)
| 28.710526 | 112 | 0.631531 |
8387182a627a7af8563cf07273718e3a7e506ace | 9,061 | py | Python | src/files/scripts/neutron_offline_network_type_update.py | openstack/charm-neutron-api-plugin-ovn | a676eb9d90ea576b0036f3f4a0e8752d1a934992 | [
"Apache-2.0"
] | 1 | 2020-07-27T09:07:46.000Z | 2020-07-27T09:07:46.000Z | src/files/scripts/neutron_offline_network_type_update.py | openstack/charm-neutron-api-plugin-ovn | a676eb9d90ea576b0036f3f4a0e8752d1a934992 | [
"Apache-2.0"
] | null | null | null | src/files/scripts/neutron_offline_network_type_update.py | openstack/charm-neutron-api-plugin-ovn | a676eb9d90ea576b0036f3f4a0e8752d1a934992 | [
"Apache-2.0"
] | 1 | 2019-10-30T15:46:53.000Z | 2019-10-30T15:46:53.000Z | #!/usr/bin/env python3
# Copyright 2020 Canonical Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""neutron_offline_network_type_update
The purpose of this module is to provide a tool that allow the user to perform
Neutron database surgery to change the type of tunnel networks from 'gre' and
'vxlan' to 'geneve'.
It is an optional part of a migration from a legacy Neutron ML2+OVS to ML2+OVN
deployment.
At the time of this writing the Neutron OVN ML2 driver will assume that all
chassis participating in a network to use the 'geneve' tunnel protocol and it
will ignore the value of the `network_type` field in any non-physical network
in the Neutron database. It will also ignore the `segmentation_id` field and
let OVN assign the VNIs [0].
The Neutron API currently does not support changing the type of a network, so
when doing a migration the above described behaviour is actually a welcomed
one.
However, after the migration is done and all the primary functions are working,
the end user of the cloud will be left with the false impression of their
existing 'gre' or 'vxlan' typed networks still being operational on said tunnel
protocols. In reality 'geneve' is used under the hood.
The end user will also run into issues with modifying any existing networks
with `openstack network set` throwing error messages about networks of type
'gre' or 'vxlan' not being supported.
After running this script said networks will have their `network_type` field
changed to 'geneve' which will fix the above described problems.
NOTE: Use this script with caution, it is of absolute importance that the
`neutron-server` process is stopped while the script is running.
NOTE: While we regularly exercise the script as part of our functional testing
of the charmed migration path and the script is touching fundamental data
structures that are not likely to have their definition changed much in
the Neutron database, we would still advise you to take a fresh backup of
the Neutron database and keep it for a while just in case.
0: https://github.com/ovn-org/ovn/blob/1e07781310d8155997672bdce01a2ff4f5a93e83/northd/ovn-northd.c#L1188-L1268
""" # noqa
import os
import sys
from oslo_db.sqlalchemy import session
import sqlalchemy
class NotFound(Exception):
pass
def main(argv):
"""Main function.
:param argv: Argument list
:type argv: List[str]
:returns: POSIX exit code
:rtype: int
"""
program = os.path.basename(argv[0])
if len(argv) < 2:
usage(program)
return os.EX_USAGE
elif len(argv) < 3 or argv[2] != 'morph':
print('DRY-RUN, WILL NOT COMMIT TRANSACTION')
db_engine = session.create_engine(argv[1])
db_maker = session.get_maker(db_engine, autocommit=False)
db_session = db_maker(bind=db_engine)
to_network_type = 'geneve'
for network_type in ('gre', 'vxlan'):
n_morphed = morph_networks(db_session, network_type, to_network_type)
print('Morphed {} networks of type {} to {}.'
.format(n_morphed, network_type, to_network_type))
if len(argv) < 3 or argv[2] != 'morph':
print('DRY-RUN, WILL NOT COMMIT TRANSACTION')
return os.EX_USAGE
db_session.commit()
db_session.close()
db_engine.dispose()
return os.EX_OK
def usage(program):
"""Print information about how to use program.
:param program: Name of program
:type program: str
"""
print('usage {} db-connection-string [morph]\n'
'\n'
'Morph non-physical networks of type "gre" and "vxlan" into '
'geneve networks.\n'
'\n'
'The Neutron database must already have enough free "geneve" VNIs\n'
'before running this tool. If the process stops because there are\n'
'no more VNIs, increase the VNI range with the `vni_ranges`\n'
'configuration option on the `ml2_type_geneve` section and then\n'
'start and stop the neutron-server before trying again.\n'
'\n'
'The second argument must be the literal string "morph" for the\n'
'tool to perform an action, otherwise it will not commit the\n'
'transaction to the database, effectively performing a dry run.\n'
''.format(program),
file=sys.stderr)
def vni_row_name(network_type):
"""Determine name of row for VNI in allocations table.
:param network_type: Network type to determine row name for.
:type network_type: str
:returns: Row name
:rtype: str
:raises: ValueError
"""
if network_type in ('gre',):
return '{}_id'.format(network_type)
elif network_type in ('geneve', 'vxlan'):
return '{}_vni'.format(network_type)
raise ValueError('Unsupported network_type: {}'.format(network_type))
def allocate_segment(db_session, network_type):
"""Allocate VNI for network_type.
:param db_session: SQLAlchemy DB Session object.
:type db_session: SQLAlchemy DB Session object.
:param network_type: Network type to allocate vni for.
:type network_type: str
:returns: Allocated VNI
:rtype: int
"""
alloc_table = 'ml2_{}_allocations'.format(network_type)
vni_row = vni_row_name(network_type)
# Get next available VNI
vni = None
stmt = sqlalchemy.text(
'SELECT MIN({}) FROM {} WHERE allocated=0'
.format(vni_row, alloc_table))
rs = db_session.execute(stmt)
for row in rs:
vni = next(row.itervalues())
# A aggregated query will always provide a result, check for NULL
if vni is None:
raise NotFound(
'unable to allocate "{}" segment.'.format(network_type))
break
# Allocate VNI
stmt = sqlalchemy.text(
'UPDATE {} SET allocated=1 WHERE {}=:vni'.format(alloc_table, vni_row))
db_session.execute(stmt, {'vni': vni})
return vni
def deallocate_segment(db_session, network_type, vni):
"""Deallocate VNI for network_type.
:param db_session: SQLAlchemy DB Session object.
:type db_session: SQLAlchemy DB Session object.
:param network_type: Network type to de-allocate vni for.
:type network_type: str
:param vni: VNI
:type vni: int
"""
alloc_table = 'ml2_{}_allocations'.format(network_type)
vni_row = vni_row_name(network_type)
# De-allocate VNI
stmt = sqlalchemy.text(
'UPDATE {} SET allocated=0 WHERE {}=:vni'.format(alloc_table, vni_row))
db_session.execute(stmt, {'vni': vni})
def get_network_segments(db_session, network_type):
"""Get tunnel networks of certain type.
:param db_session: SQLAlchemy DB Session object.
:type db_session: SQLAlchemy DB Session object.
:param network_type: Network type to iterate over.
:type network_type: str
:returns: Iterator for data
:rtype: Iterator[str,str,str,int]
"""
# Get networks
stmt = sqlalchemy.text(
'SELECT id,network_id,network_type,segmentation_id '
'FROM networksegments '
'WHERE physical_network IS NULL AND '
' network_type=:network_type')
rs = db_session.execute(stmt, {'network_type': network_type})
for row in rs:
yield row.values()
def morph_networks(db_session, from_network_type, to_network_type):
"""Morph all networks of one network type to another.
:param db_session: SQLAlchemy DB Session object.
:type db_session: SQLAlchemy DB Session object.
:param from_network_type: Network type to morph from.
:type from_network_type: str
:param to_network_type: Network type to morph to.
:type to_network_type: str
:returns: Number of networks morphed
:rtype: int
"""
stmt = sqlalchemy.text(
'UPDATE networksegments '
'SET network_type=:new_network_type,segmentation_id=:new_vni '
'WHERE id=:id')
n_morphed = 0
for segment_id, network_id, network_type, vni in get_network_segments(
db_session, from_network_type):
new_vni = allocate_segment(db_session, to_network_type)
db_session.execute(stmt, {
'new_network_type': to_network_type,
'new_vni': new_vni,
'id': segment_id,
})
print('segment {} for network {} changed from {}:{} to {}:{}'
.format(segment_id, network_id, network_type, vni,
to_network_type, new_vni))
deallocate_segment(db_session, from_network_type, vni)
n_morphed += 1
return n_morphed
if __name__ == '__main__':
sys.exit(main(sys.argv))
| 35.394531 | 111 | 0.691094 |
21fb0d3896a0abe4ca4f7145c31526b23f153848 | 8,555 | py | Python | oneflow/compatible_single_client_python/ops/math_binary_elementwise_ops.py | xcnick/oneflow | 7b786b27069dec35d2493256011e773988c91f56 | [
"Apache-2.0"
] | null | null | null | oneflow/compatible_single_client_python/ops/math_binary_elementwise_ops.py | xcnick/oneflow | 7b786b27069dec35d2493256011e773988c91f56 | [
"Apache-2.0"
] | null | null | null | oneflow/compatible_single_client_python/ops/math_binary_elementwise_ops.py | xcnick/oneflow | 7b786b27069dec35d2493256011e773988c91f56 | [
"Apache-2.0"
] | null | null | null | """
Copyright 2020 The OneFlow Authors. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from __future__ import absolute_import
import os
from typing import Optional, Union
from oneflow.compatible import single_client as flow
from oneflow.compatible.single_client.python.framework import id_util as id_util
from oneflow.compatible.single_client.python.framework import (
remote_blob as remote_blob_util,
)
from oneflow.compatible.single_client.python.oneflow_export import oneflow_export
import oneflow._oneflow_internal
def build_math_binary_elementwise_op(math_op, x, y, name=None):
if name is None:
name = id_util.UniqueStr(math_op + "_")
return (
flow.user_op_builder(name)
.Op(math_op)
.Input("x", [x])
.Input("y", [y])
.Output("z")
.Build()
.InferAndTryRun()
.RemoteBlobList()[0]
)
@oneflow_export("math.atan2")
def atan2(
x: oneflow._oneflow_internal.BlobDesc,
y: oneflow._oneflow_internal.BlobDesc,
name: Optional[str] = None,
) -> oneflow._oneflow_internal.BlobDesc:
r"""This operator computes the values of :math:`arctan(\frac{x}{y})`.
The equation is:
.. math::
out = arctan(\frac{x}{y})
Args:
x (oneflow._oneflow_internal.BlobDesc): A Blob
y (oneflow._oneflow_internal.BlobDesc): A Blob
name (Optional[str], optional): The name for the operation. Defaults to None.
Returns:
oneflow._oneflow_internal.BlobDesc: The result Blob
For example:
.. code-block:: python
import oneflow.compatible.single_client as flow
import numpy as np
import oneflow.compatible.single_client.typing as tp
@flow.global_function()
def atan2Job(x: tp.Numpy.Placeholder((3,),), y: tp.Numpy.Placeholder((3, ))
)-> tp.Numpy:
return flow.math.atan2(x, y)
x = np.array([1, 2, 3]).astype(np.float32)
y = np.array([4, 4, 4]).astype(np.float32)
out = atan2Job(x, y)
# out [0.24497867 0.4636476 0.6435011 ]
# We take the first value as an example
# (arctan(1/4) * pi) / 180 = 0.24497867
"""
return build_math_binary_elementwise_op("atan2", x, y, name)
@oneflow_export("math.pow")
def pow(
x: oneflow._oneflow_internal.BlobDesc,
y: Union[oneflow._oneflow_internal.BlobDesc, float],
name: Optional[str] = None,
) -> oneflow._oneflow_internal.BlobDesc:
"""This operator computes the Pow result.
The equation is:
.. math::
out = x^y
Args:
x (oneflow._oneflow_internal.BlobDesc): A Blob
y (Union[oneflow._oneflow_internal.BlobDesc, float]): A Blob or float value, the exponential factor of Pow
name (Optional[str], optional): The name for the operation. Defaults to None.
Returns:
oneflow._oneflow_internal.BlobDesc: The result Blob
For example:
Example 1:
.. code-block:: python
import oneflow.compatible.single_client as flow
import numpy as np
import oneflow.compatible.single_client.typing as tp
@flow.global_function()
def powJob(x: tp.Numpy.Placeholder((3,), ), y: tp.Numpy.Placeholder((3,))
) -> tp.Numpy:
return flow.math.pow(x, y)
x = np.array([2, 3, 4]).astype(np.float32)
y = np.array([2, 3, 4]).astype(np.float32)
out = powJob(x, y)
# out [ 4. 27. 256.]
Example 2:
.. code-block:: python
import oneflow.compatible.single_client as flow
import oneflow.compatible.single_client.typing as tp
import numpy as np
@flow.global_function()
def scalar_pow_job(x: tp.Numpy.Placeholder(shape=(3, )))->tp.Numpy:
with flow.scope.placement("cpu", "0:0"):
out = flow.math.pow(x, 2.0)
return out
x = np.array([1, 2, 3]).astype(np.float32)
out = scalar_pow_job(x)
# out [1. 4. 9.]
"""
if name is None:
name = id_util.UniqueStr("Pow_")
if isinstance(y, (int, float)):
return (
flow.user_op_builder(name)
.Op("scalar_pow")
.Input("in", [x])
.Attr("exponent", float(y))
.Output("out")
.Build()
.InferAndTryRun()
.RemoteBlobList()[0]
)
else:
return build_math_binary_elementwise_op("pow", x, y, name)
@oneflow_export("math.floordiv")
def floordiv(
x: oneflow._oneflow_internal.BlobDesc,
y: oneflow._oneflow_internal.BlobDesc,
name: Optional[str] = None,
) -> oneflow._oneflow_internal.BlobDesc:
"""This operator computes the result of :math:`x/y`, rounding toward the most negative integer value
Args:
x (oneflow._oneflow_internal.BlobDesc): A Blob
y (oneflow._oneflow_internal.BlobDesc): A Blob
name (Optional[str], optional): The name for the operation. Defaults to None.
Returns:
oneflow._oneflow_internal.BlobDesc: The result Blob
For example:
.. code-block:: python
import oneflow.compatible.single_client as flow
import numpy as np
import oneflow.compatible.single_client.typing as tp
@flow.global_function()
def floor_div_Job(x: tp.Numpy.Placeholder((3,)),
y: tp.Numpy.Placeholder((3,))
) -> tp.Numpy:
return flow.math.floordiv(x, y)
x = np.array([4, 3, 5]).astype(np.float32)
y = np.array([3, 2, 2]).astype(np.float32)
out = floor_div_Job(x, y)
# out [1. 1. 2.]
"""
return build_math_binary_elementwise_op("floordiv", x, y, name)
@oneflow_export("math.xdivy")
def xdivy(
x: oneflow._oneflow_internal.BlobDesc,
y: oneflow._oneflow_internal.BlobDesc,
name: Optional[str] = None,
) -> oneflow._oneflow_internal.BlobDesc:
"""This operator computes the result of :math:`x/y`
Args:
x (oneflow._oneflow_internal.BlobDesc): A Blob
y (oneflow._oneflow_internal.BlobDesc): A Blob
name (Optional[str], optional): The name for the operation. Defaults to None.
Returns:
oneflow._oneflow_internal.BlobDesc: The result Blob
For example:
.. code-block:: python
import oneflow.compatible.single_client as flow
import numpy as np
import oneflow.compatible.single_client.typing as tp
@flow.global_function()
def xdivy_Job(x: tp.Numpy.Placeholder((3,)),
y: tp.Numpy.Placeholder((3,))
) -> tp.Numpy:
return flow.math.xdivy(x, y)
x = np.array([4, 3, 5]).astype(np.float32)
y = np.array([3, 2, 2]).astype(np.float32)
out = xdivy_Job(x, y)
# out [1.3333334 1.5 2.5 ]
"""
return build_math_binary_elementwise_op("xdivy", x, y, name)
@oneflow_export("math.xlogy")
def xlogy(
x: oneflow._oneflow_internal.BlobDesc,
y: oneflow._oneflow_internal.BlobDesc,
name: Optional[str] = None,
) -> oneflow._oneflow_internal.BlobDesc:
"""This operator computes the result of :math:`x*log(y)`
Args:
x (oneflow._oneflow_internal.BlobDesc): A Blob
y (oneflow._oneflow_internal.BlobDesc): A Blob
name (Optional[str], optional): The name for the operation. Defaults to None.
Returns:
oneflow._oneflow_internal.BlobDesc: The result Blob
For example:
.. code-block:: python
import oneflow.compatible.single_client as flow
import numpy as np
import oneflow.compatible.single_client.typing as tp
@flow.global_function()
def xlogy_Job(x: tp.Numpy.Placeholder((3,)),
y: tp.Numpy.Placeholder((3,))
) -> tp.Numpy:
return flow.math.xlogy(x, y)
x = np.array([2, 2, 2]).astype(np.float32)
y = np.array([4, 8, 16]).astype(np.float32)
out = xlogy_Job(x, y)
# out [2.7725887 4.158883 5.5451775]
"""
return build_math_binary_elementwise_op("xlogy", x, y, name)
| 28.516667 | 114 | 0.630392 |
4418e830c2457fdf55ce7302adc9f341a9c1849a | 4,462 | py | Python | codebase/train.py | RuiShu/dirt-t | 2da404a0c67d4eadbec0fd223f40fcfa07cdfb2d | [
"MIT"
] | 167 | 2018-03-18T05:52:45.000Z | 2022-03-20T15:29:36.000Z | codebase/train.py | RuiShu/dirt-t | 2da404a0c67d4eadbec0fd223f40fcfa07cdfb2d | [
"MIT"
] | 4 | 2018-06-17T04:34:39.000Z | 2021-05-19T14:10:23.000Z | codebase/train.py | RuiShu/dirt-t | 2da404a0c67d4eadbec0fd223f40fcfa07cdfb2d | [
"MIT"
] | 36 | 2018-03-20T05:39:01.000Z | 2022-03-28T08:15:14.000Z | import tensorflow as tf
import tensorbayes as tb
from codebase.args import args
from codebase.datasets import PseudoData, get_info
from utils import delete_existing, save_value, save_model
import os
import sys
import numpy as np
def update_dict(M, feed_dict, src=None, trg=None, bs=100):
"""Update feed_dict with new mini-batch
M - (TensorDict) the model
feed_dict - (dict) tensorflow feed dict
src - (obj) source domain. Contains train/test Data obj
trg - (obj) target domain. Contains train/test Data obj
bs - (int) batch size
"""
if src:
src_x, src_y = src.train.next_batch(bs)
feed_dict.update({M.src_x: src_x, M.src_y: src_y})
if trg:
trg_x, trg_y = trg.train.next_batch(bs)
feed_dict.update({M.trg_x: trg_x, M.trg_y: trg_y})
def train(M, src=None, trg=None, has_disc=True, saver=None, model_name=None):
"""Main training function
Creates log file, manages datasets, trains model
M - (TensorDict) the model
src - (obj) source domain. Contains train/test Data obj
trg - (obj) target domain. Contains train/test Data obj
has_disc - (bool) whether model requires a discriminator update
saver - (Saver) saves models during training
model_name - (str) name of the model being run with relevant parms info
"""
# Training settings
bs = 64
iterep = 1000
itersave = 20000
n_epoch = 80
epoch = 0
feed_dict = {}
# Create a log directory and FileWriter
log_dir = os.path.join(args.logdir, model_name)
delete_existing(log_dir)
train_writer = tf.summary.FileWriter(log_dir)
# Create a save directory
if saver:
model_dir = os.path.join('checkpoints', model_name)
delete_existing(model_dir)
os.makedirs(model_dir)
# Replace src domain with psuedolabeled trg
if args.dirt > 0:
print "Setting backup and updating backup model"
src = PseudoData(args.trg, trg, M.teacher)
M.sess.run(M.update_teacher)
# Sanity check model
print_list = []
if src:
save_value(M.fn_ema_acc, 'test/src_test_ema_1k',
src.test, train_writer, 0, print_list, full=False)
if trg:
save_value(M.fn_ema_acc, 'test/trg_test_ema',
trg.test, train_writer, 0, print_list)
save_value(M.fn_ema_acc, 'test/trg_train_ema_1k',
trg.train, train_writer, 0, print_list, full=False)
print print_list
if src: get_info(args.src, src)
if trg: get_info(args.trg, trg)
print "Batch size:", bs
print "Iterep:", iterep
print "Total iterations:", n_epoch * iterep
print "Log directory:", log_dir
for i in xrange(n_epoch * iterep):
# Run discriminator optimizer
if has_disc:
update_dict(M, feed_dict, src, trg, bs)
summary, _ = M.sess.run(M.ops_disc, feed_dict)
train_writer.add_summary(summary, i + 1)
# Run main optimizer
update_dict(M, feed_dict, src, trg, bs)
summary, _ = M.sess.run(M.ops_main, feed_dict)
train_writer.add_summary(summary, i + 1)
train_writer.flush()
end_epoch, epoch = tb.utils.progbar(i, iterep,
message='{}/{}'.format(epoch, i),
display=args.run >= 999)
# Update pseudolabeler
if args.dirt and (i + 1) % args.dirt == 0:
print "Updating teacher model"
M.sess.run(M.update_teacher)
# Log end-of-epoch values
if end_epoch:
print_list = M.sess.run(M.ops_print, feed_dict)
if src:
save_value(M.fn_ema_acc, 'test/src_test_ema_1k',
src.test, train_writer, i + 1, print_list, full=False)
if trg:
save_value(M.fn_ema_acc, 'test/trg_test_ema',
trg.test, train_writer, i + 1, print_list)
save_value(M.fn_ema_acc, 'test/trg_train_ema_1k',
trg.train, train_writer, i + 1, print_list, full=False)
print_list += ['epoch', epoch]
print print_list
if saver and (i + 1) % itersave == 0:
save_model(saver, M, model_dir, i + 1)
# Saving final model
if saver:
save_model(saver, M, model_dir, i + 1)
| 34.323077 | 80 | 0.598162 |
a3481edcf3aa45c42f182bb2eb2f817e4781fa17 | 4,708 | py | Python | updated_pr.py | sahabi/opt | 79f82bd9e0db77dada9554950b0b95d4e1435ca0 | [
"MIT"
] | null | null | null | updated_pr.py | sahabi/opt | 79f82bd9e0db77dada9554950b0b95d4e1435ca0 | [
"MIT"
] | null | null | null | updated_pr.py | sahabi/opt | 79f82bd9e0db77dada9554950b0b95d4e1435ca0 | [
"MIT"
] | null | null | null | # This algorith is based around DQN, parameterised like so:
# Q-function is a dense network with 2x100 node hidden layers
# experience replay contained the most recent 10000 state, action, reward triplets.
# learning took place after every episode using a minibatch size of 100
# learning rate = 0.01
# gamma = 0.99
# eGreediness = 0.05
from collections import deque
import env_road as env_m
#from gym import wrappers
import numpy as np
from agent_pr import agent
import random
import time
from hyperopt import STATUS_OK
import gym
def send_email(body):
import smtplib
gmail_user = 'sahabi'
gmail_pwd = 'b0nit0'
FROM = 'sahabi@gmail.com'
TO = 'malshiekh@utexas.edu'
SUBJECT = 'Found One!'
TEXT = body
# Prepare actual message
message = """From: %s\nTo: %s\nSubject: %s\n\n%s
""" % (FROM, ", ".join(TO), SUBJECT, TEXT)
try:
server = smtplib.SMTP("smtp.gmail.com", 587)
server.ehlo()
server.starttls()
server.login(gmail_user, gmail_pwd)
server.sendmail(FROM, TO, message)
server.close()
print ('successfully sent the mail')
except:
print ("failed to send mail")
def objective(args):
LR, MS, BS, WE, target = args
LEFT = 0
RIGHT = 1
MAX_TIMESTEPS = 500
blob = agent(4,[i for i in range(0,8)], epsilon=1, learningRate=LR, memorySize=MS, batch_size=BS, WE=WE)
env = env_m.Env()
#env = wrappers.Monitor(env, '/tmp/cartpole-experiment-v1',force=True)
notify_value = -1
t = 0
avgreward = deque([],100)
avgQ = deque([],100)
trials = 100000
x = deque([],500)
x.append(0)
y = deque([],500)
y.append(-1)
xQ = deque([],500)
xQ.append(0)
yQ = deque([],500)
yQ.append(-1)
maxsofar = 0
maxQsofar = 0
viz_flag = False
S_list = []
q_est_trials = 1000
for i_episode in range(q_est_trials):
#print('{}/{}'.format(i_episode,q_est_trials))
S = env.reset(blob.Q_est, t, viz_flag)
done = False
t = 0
tot_R = 0
while not done:
t += 1
S_list.append(S)
A = random.choice([0,1,2,3,4,5,6,7])#blob.act(S)
S_dash, R, done = env.step(A)
blob.observe(S,A,R,S_dash)
#self.Q.predict(state[np.newaxis,:])
tot_R += R
S = np.copy(S_dash)
for i_episode in range(trials):
S = env.reset(blob.Q_est, t, viz_flag)
done = False
t = 0
tot_R = 0
while not done:
t += 1
A = blob.act(S)
S_dash, R, done = env.step(A)
blob.observe(S,A,R,S_dash)
tot_R += R
S = np.copy(S_dash)
# every now and then stop, and think things through:
if i_episode > 55:
blob.reflect(i_episode)
# when the episode ends the agent will have hit a terminal state so give it a zero reward
if t < MAX_TIMESTEPS:
blob.observe(S,A,0.,None)
else:
blob.observe(S,A,1.,None)
avgreward.append(tot_R)
avg_Q = 100* np.average(np.amax(blob.Q.model.predict(np.array(S_list)), axis=1))
avgQ.append(avg_Q)
avg_reward = np.mean(avgreward)
viz_flag = True if avg_reward > .5 else False
# update the xy data
yQ.append(np.mean(avgQ))
x.append(i_episode)
y.append(avg_reward)
if len(avgreward) > 10:
maxsofar = max(maxsofar,np.mean(avgreward))
if len(avgQ) > 85:
maxQsofar = max(maxQsofar,np.mean(avgQ))
#print(np.average(np.amax(blob.Q.model.predict(np.array(S_list)), axis=1)))
if i_episode % 1000 == 0:
print('Learning rate: {}, Memory size: {}, Batch size: {}, Q update: {}'.format(LR, MS, BS, WE))
print("episode: {}, average reward: {}, Reward: {:.2f}, Memory: {}/{}, Epsilon: {:.2f}, Max: {:.2f}, Q: {:.2f}".format(i_episode,str(np.round(np.mean(avgreward),3)),tot_R, len(blob.experience_pr._experience), MS, blob.policy.epsilon,maxsofar,np.mean(avgQ)))
blob.Q_est.model.save('model_{}_{}_{}_{}.h5'.format(LR, MS, BS, WE))
string = 'Args: '+str(args[:-1])+'\n'
string += 'Max R: '+str(maxsofar)+'\n'
string += 'Max Q: '+str(maxQsofar)+'\n'
target.write(string)
if maxsofar > 0.25:
send_email(string)
model.save('my_model.h5')
res = {
'loss': -1*maxsofar,
'status': STATUS_OK,
# -- store other results like this
'eval_time': time.time(),
'maxQ': maxQsofar
}
print (res)
return res
objective() | 31.597315 | 269 | 0.566058 |
0de744a4bb705147ff8173f9e9c01029aabd464e | 15,980 | py | Python | test_elasticsearch/test_server/test_helpers.py | turtle321/elasticsearch-py | 80cd96ef96f34e3bb291fdf4f643da5a1016a8d7 | [
"Apache-2.0"
] | null | null | null | test_elasticsearch/test_server/test_helpers.py | turtle321/elasticsearch-py | 80cd96ef96f34e3bb291fdf4f643da5a1016a8d7 | [
"Apache-2.0"
] | null | null | null | test_elasticsearch/test_server/test_helpers.py | turtle321/elasticsearch-py | 80cd96ef96f34e3bb291fdf4f643da5a1016a8d7 | [
"Apache-2.0"
] | null | null | null | from elasticsearch6 import helpers, TransportError
from . import ElasticsearchTestCase
from ..test_cases import SkipTest
class FailingBulkClient(object):
def __init__(self, client, fail_at=(2, ), fail_with=TransportError(599, "Error!", {})):
self.client = client
self._called = 0
self._fail_at = fail_at
self.transport = client.transport
self._fail_with = fail_with
def bulk(self, *args, **kwargs):
self._called += 1
if self._called in self._fail_at:
raise self._fail_with
return self.client.bulk(*args, **kwargs)
class TestStreamingBulk(ElasticsearchTestCase):
def test_actions_remain_unchanged(self):
actions = [{'_id': 1}, {'_id': 2}]
for ok, item in helpers.streaming_bulk(self.client, actions, index='test-index', doc_type='answers'):
self.assertTrue(ok)
self.assertEquals([{'_id': 1}, {'_id': 2}], actions)
def test_all_documents_get_inserted(self):
docs = [{"answer": x, '_id': x} for x in range(100)]
for ok, item in helpers.streaming_bulk(self.client, docs, index='test-index', doc_type='answers', refresh=True):
self.assertTrue(ok)
self.assertEquals(100, self.client.count(index='test-index', doc_type='answers')['count'])
self.assertEquals({"answer": 42}, self.client.get(index='test-index', doc_type='answers', id=42)['_source'])
def test_all_errors_from_chunk_are_raised_on_failure(self):
self.client.indices.create("i",
{
"mappings": {"t": {"properties": {"a": {"type": "integer"}}}},
"settings": {"number_of_shards": 1, "number_of_replicas": 0}
})
self.client.cluster.health(wait_for_status="yellow")
try:
for ok, item in helpers.streaming_bulk(self.client, [{"a": "b"},
{"a": "c"}], index="i", doc_type="t", raise_on_error=True):
self.assertTrue(ok)
except helpers.BulkIndexError as e:
self.assertEquals(2, len(e.errors))
else:
assert False, "exception should have been raised"
def test_different_op_types(self):
if self.es_version < (0, 90, 1):
raise SkipTest('update supported since 0.90.1')
self.client.index(index='i', doc_type='t', id=45, body={})
self.client.index(index='i', doc_type='t', id=42, body={})
docs = [
{'_index': 'i', '_type': 't', '_id': 47, 'f': 'v'},
{'_op_type': 'delete', '_index': 'i', '_type': 't', '_id': 45},
{'_op_type': 'update', '_index': 'i', '_type': 't', '_id': 42, 'doc': {'answer': 42}}
]
for ok, item in helpers.streaming_bulk(self.client, docs):
self.assertTrue(ok)
self.assertFalse(self.client.exists(index='i', doc_type='t', id=45))
self.assertEquals({'answer': 42}, self.client.get(index='i', doc_type='t', id=42)['_source'])
self.assertEquals({'f': 'v'}, self.client.get(index='i', doc_type='t', id=47)['_source'])
def test_transport_error_can_becaught(self):
failing_client = FailingBulkClient(self.client)
docs = [
{'_index': 'i', '_type': 't', '_id': 47, 'f': 'v'},
{'_index': 'i', '_type': 't', '_id': 45, 'f': 'v'},
{'_index': 'i', '_type': 't', '_id': 42, 'f': 'v'},
]
results = list(helpers.streaming_bulk(failing_client, docs, raise_on_exception=False, raise_on_error=False, chunk_size=1))
self.assertEquals(3, len(results))
self.assertEquals([True, False, True], [r[0] for r in results])
exc = results[1][1]['index'].pop('exception')
self.assertIsInstance(exc, TransportError)
self.assertEquals(599, exc.status_code)
self.assertEquals(
{
'index': {
'_index': 'i',
'_type': 't',
'_id': 45,
'data': {'f': 'v'},
'error': "TransportError(599, 'Error!')",
'status': 599
}
},
results[1][1]
)
def test_rejected_documents_are_retried(self):
failing_client = FailingBulkClient(self.client, fail_with=TransportError(429, 'Rejected!', {}))
docs = [
{'_index': 'i', '_type': 't', '_id': 47, 'f': 'v'},
{'_index': 'i', '_type': 't', '_id': 45, 'f': 'v'},
{'_index': 'i', '_type': 't', '_id': 42, 'f': 'v'},
]
results = list(helpers.streaming_bulk(failing_client, docs,
raise_on_exception=False,
raise_on_error=False,
chunk_size=1, max_retries=1,
initial_backoff=0))
self.assertEquals(3, len(results))
self.assertEquals([True, True, True], [r[0] for r in results])
self.client.indices.refresh(index='i')
res = self.client.search(index='i')
self.assertEquals(3, res['hits']['total'])
self.assertEquals(4, failing_client._called)
def test_rejected_documents_are_retried_at_most_max_retries_times(self):
failing_client = FailingBulkClient(self.client, fail_at=(1, 2, ),
fail_with=TransportError(429, 'Rejected!', {}))
docs = [
{'_index': 'i', '_type': 't', '_id': 47, 'f': 'v'},
{'_index': 'i', '_type': 't', '_id': 45, 'f': 'v'},
{'_index': 'i', '_type': 't', '_id': 42, 'f': 'v'},
]
results = list(helpers.streaming_bulk(failing_client, docs,
raise_on_exception=False,
raise_on_error=False,
chunk_size=1, max_retries=1,
initial_backoff=0))
self.assertEquals(3, len(results))
self.assertEquals([False, True, True], [r[0] for r in results])
self.client.indices.refresh(index='i')
res = self.client.search(index='i')
self.assertEquals(2, res['hits']['total'])
self.assertEquals(4, failing_client._called)
def test_transport_error_is_raised_with_max_retries(self):
failing_client = FailingBulkClient(self.client, fail_at=(1, 2, 3, 4, ),
fail_with=TransportError(429, 'Rejected!', {}))
def streaming_bulk():
results = list(helpers.streaming_bulk(
failing_client,
[{"a": 42}, {"a": 39}],
raise_on_exception=True,
max_retries=3,
initial_backoff=0
))
return results
self.assertRaises(TransportError, streaming_bulk)
self.assertEquals(4, failing_client._called)
class TestBulk(ElasticsearchTestCase):
def test_bulk_works_with_single_item(self):
docs = [{"answer": 42, '_id': 1}]
success, failed = helpers.bulk(self.client, docs, index='test-index', doc_type='answers', refresh=True)
self.assertEquals(1, success)
self.assertFalse(failed)
self.assertEquals(1, self.client.count(index='test-index', doc_type='answers')['count'])
self.assertEquals({"answer": 42}, self.client.get(index='test-index', doc_type='answers', id=1)['_source'])
def test_all_documents_get_inserted(self):
docs = [{"answer": x, '_id': x} for x in range(100)]
success, failed = helpers.bulk(self.client, docs, index='test-index', doc_type='answers', refresh=True)
self.assertEquals(100, success)
self.assertFalse(failed)
self.assertEquals(100, self.client.count(index='test-index', doc_type='answers')['count'])
self.assertEquals({"answer": 42}, self.client.get(index='test-index', doc_type='answers', id=42)['_source'])
def test_stats_only_reports_numbers(self):
docs = [{"answer": x} for x in range(100)]
success, failed = helpers.bulk(self.client, docs, index='test-index', doc_type='answers', refresh=True, stats_only=True)
self.assertEquals(100, success)
self.assertEquals(0, failed)
self.assertEquals(100, self.client.count(index='test-index', doc_type='answers')['count'])
def test_errors_are_reported_correctly(self):
self.client.indices.create("i",
{
"mappings": {"t": {"properties": {"a": {"type": "integer"}}}},
"settings": {"number_of_shards": 1, "number_of_replicas": 0}
})
self.client.cluster.health(wait_for_status="yellow")
success, failed = helpers.bulk(
self.client,
[{"a": 42}, {"a": "c", '_id': 42}],
index="i",
doc_type="t",
raise_on_error=False
)
self.assertEquals(1, success)
self.assertEquals(1, len(failed))
error = failed[0]
self.assertEquals('42', error['index']['_id'])
self.assertEquals('t', error['index']['_type'])
self.assertEquals('i', error['index']['_index'])
print(error['index']['error'])
self.assertTrue('MapperParsingException' in repr(error['index']['error']) or 'mapper_parsing_exception' in repr(error['index']['error']))
def test_error_is_raised(self):
self.client.indices.create("i",
{
"mappings": {"t": {"properties": {"a": {"type": "integer"}}}},
"settings": {"number_of_shards": 1, "number_of_replicas": 0}
})
self.client.cluster.health(wait_for_status="yellow")
self.assertRaises(helpers.BulkIndexError, helpers.bulk,
self.client,
[{"a": 42}, {"a": "c"}],
index="i",
doc_type="t"
)
def test_errors_are_collected_properly(self):
self.client.indices.create("i",
{
"mappings": {"t": {"properties": {"a": {"type": "integer"}}}},
"settings": {"number_of_shards": 1, "number_of_replicas": 0}
})
self.client.cluster.health(wait_for_status="yellow")
success, failed = helpers.bulk(
self.client,
[{"a": 42}, {"a": "c"}],
index="i",
doc_type="t",
stats_only=True,
raise_on_error=False
)
self.assertEquals(1, success)
self.assertEquals(1, failed)
class TestScan(ElasticsearchTestCase):
def test_order_can_be_preserved(self):
bulk = []
for x in range(100):
bulk.append({"index": {"_index": "test_index", "_type": "answers", "_id": x}})
bulk.append({"answer": x, "correct": x == 42})
self.client.bulk(bulk, refresh=True)
docs = list(helpers.scan(self.client, index="test_index", doc_type="answers", query={"sort": "answer"}, preserve_order=True))
self.assertEquals(100, len(docs))
self.assertEquals(list(map(str, range(100))), list(d['_id'] for d in docs))
self.assertEquals(list(range(100)), list(d['_source']['answer'] for d in docs))
def test_all_documents_are_read(self):
bulk = []
for x in range(100):
bulk.append({"index": {"_index": "test_index", "_type": "answers", "_id": x}})
bulk.append({"answer": x, "correct": x == 42})
self.client.bulk(bulk, refresh=True)
docs = list(helpers.scan(self.client, index="test_index", doc_type="answers", size=2))
self.assertEquals(100, len(docs))
self.assertEquals(set(map(str, range(100))), set(d['_id'] for d in docs))
self.assertEquals(set(range(100)), set(d['_source']['answer'] for d in docs))
class TestReindex(ElasticsearchTestCase):
def setUp(self):
super(TestReindex, self).setUp()
bulk = []
for x in range(100):
bulk.append({"index": {"_index": "test_index", "_type": "post", "_id": x}})
bulk.append({"answer": x, "correct": x == 42, "type": "answers" if x % 2 == 0 else "questions"})
self.client.bulk(bulk, refresh=True)
def test_reindex_passes_kwargs_to_scan_and_bulk(self):
helpers.reindex(self.client, "test_index", "prod_index", scan_kwargs={'q': 'type:answers'}, bulk_kwargs={'refresh': True})
self.assertTrue(self.client.indices.exists("prod_index"))
self.assertEquals(50, self.client.count(index='prod_index', q='type:answers')['count'])
self.assertEquals({"answer": 42, "correct": True, "type": "answers"}, self.client.get(index="prod_index", doc_type="post", id=42)['_source'])
def test_reindex_accepts_a_query(self):
helpers.reindex(self.client, "test_index", "prod_index", query={"query": {"bool": {"filter": {"term": {"type": "answers"}}}}})
self.client.indices.refresh()
self.assertTrue(self.client.indices.exists("prod_index"))
self.assertEquals(50, self.client.count(index='prod_index', q='type:answers')['count'])
self.assertEquals({"answer": 42, "correct": True, "type": "answers"}, self.client.get(index="prod_index", doc_type="post", id=42)['_source'])
def test_all_documents_get_moved(self):
helpers.reindex(self.client, "test_index", "prod_index")
self.client.indices.refresh()
self.assertTrue(self.client.indices.exists("prod_index"))
self.assertEquals(50, self.client.count(index='prod_index', q='type:questions')['count'])
self.assertEquals(50, self.client.count(index='prod_index', q='type:answers')['count'])
self.assertEquals({"answer": 42, "correct": True, "type": "answers"}, self.client.get(index="prod_index", doc_type="post", id=42)['_source'])
class TestParentChildReindex(ElasticsearchTestCase):
def setUp(self):
super(TestParentChildReindex, self).setUp()
body={
'settings': {"number_of_shards": 1, "number_of_replicas": 0},
'mappings': {
'post': {
'properties': {
'question_answer': {
'type': 'join',
'relations': {'question': 'answer'}
}
}
}
}
}
self.client.indices.create(index='test-index', body=body)
self.client.indices.create(index='real-index', body=body)
self.client.index(
index='test-index',
doc_type='post',
id=42,
body={'question_answer': 'question'},
)
self.client.index(
index='test-index',
doc_type='post',
id=47,
routing=42,
body={'some': 'data', 'question_answer': {'name': 'answer', 'parent': 42}},
)
self.client.indices.refresh(index='test-index')
def test_children_are_reindexed_correctly(self):
helpers.reindex(self.client, 'test-index', 'real-index')
q = self.client.get(
index='real-index',
doc_type='post',
id=42
)
self.assertEquals(
{
'_id': '42',
'_index': 'real-index',
'_source': {'question_answer': 'question'},
'_type': 'post',
'_version': 1,
'found': True
}, q
)
q = self.client.get(
index='test-index',
doc_type='post',
id=47,
routing=42
)
self.assertEquals(
{
'_routing': '42',
'_id': '47',
'_index': 'test-index',
'_source': {'some': 'data', 'question_answer': {'name': 'answer', 'parent': 42}},
'_type': 'post',
'_version': 1,
'found': True
}, q
)
| 42.613333 | 149 | 0.548874 |
40ab58f847fd18fe9b9cfdb2441e9660eeaad600 | 4,118 | py | Python | metricbeat/module/haproxy/test_haproxy.py | ContinuumLLC/beats | 4b9bc97d7e95c187a0326ba52d5fb052dd5d5a30 | [
"ECL-2.0",
"Apache-2.0"
] | 5 | 2018-05-29T06:02:52.000Z | 2020-09-16T00:59:05.000Z | metricbeat/module/haproxy/test_haproxy.py | ContinuumLLC/beats | 4b9bc97d7e95c187a0326ba52d5fb052dd5d5a30 | [
"ECL-2.0",
"Apache-2.0"
] | 3 | 2021-06-08T22:35:14.000Z | 2022-01-13T03:22:26.000Z | metricbeat/module/haproxy/test_haproxy.py | ContinuumLLC/beats | 4b9bc97d7e95c187a0326ba52d5fb052dd5d5a30 | [
"ECL-2.0",
"Apache-2.0"
] | 2 | 2020-03-04T03:40:53.000Z | 2020-03-05T12:44:51.000Z | import os
import sys
import unittest
from nose.plugins.attrib import attr
sys.path.append(os.path.join(os.path.dirname(__file__), '../../tests/system'))
import metricbeat
HAPROXY_FIELDS = metricbeat.COMMON_FIELDS + ["haproxy"]
@metricbeat.parameterized_with_supported_versions
class HaproxyTest(metricbeat.BaseTest):
COMPOSE_SERVICES = ['haproxy']
def _test_info(self):
proc = self.start_beat()
self.wait_until(lambda: self.output_lines() > 0)
proc.check_kill_and_wait()
self.assert_no_logged_warnings()
output = self.read_output_json()
self.assertEqual(len(output), 1)
evt = output[0]
self.assertCountEqual(self.de_dot(HAPROXY_FIELDS + ["process"]), evt.keys(), evt)
self.assert_fields_are_documented(evt)
@unittest.skipUnless(metricbeat.INTEGRATION_TESTS, "integration test")
def test_info_socket(self):
"""
haproxy info unix socket metricset test
"""
self.render_config_template(modules=[{
"name": "haproxy",
"metricsets": ["info"],
"hosts": ["tcp://%s" % (self.compose_host(port="14567/tcp"))],
"period": "5s"
}])
self._test_info()
def _test_stat(self):
proc = self.start_beat()
self.wait_until(lambda: self.output_lines() > 0)
proc.check_kill_and_wait()
self.assert_no_logged_warnings(replace=['SSL/TLS verifications disabled.'])
output = self.read_output_json()
self.assertGreater(len(output), 0)
for evt in output:
print(evt)
self.assertCountEqual(self.de_dot(HAPROXY_FIELDS + ["process"]), evt.keys(), evt)
self.assert_fields_are_documented(evt)
@unittest.skipUnless(metricbeat.INTEGRATION_TESTS, "integration test")
def test_stat_socket(self):
"""
haproxy stat unix socket metricset test
"""
self.render_config_template(modules=[{
"name": "haproxy",
"metricsets": ["stat"],
"hosts": ["tcp://%s" % (self.compose_host(port="14567/tcp"))],
"period": "5s"
}])
self._test_stat()
@unittest.skipUnless(metricbeat.INTEGRATION_TESTS, "integration test")
def test_stat_http(self):
"""
haproxy stat http metricset test
"""
self.render_config_template(modules=[{
"name": "haproxy",
"metricsets": ["stat"],
"hosts": ["http://%s/stats" % (self.compose_host(port="14568/tcp"))],
"period": "5s"
}])
self._test_stat()
@unittest.skipUnless(metricbeat.INTEGRATION_TESTS, "integration test")
def test_stat_https(self):
"""
haproxy stat https metricset test
"""
self.render_config_template(modules=[{
"name": "haproxy",
"metricsets": ["stat"],
"hosts": ["https://%s/stats" % (self.compose_host(port="14570/tcp"))],
"period": "5s",
"extras": {
"ssl.certificate_authorities": [os.path.join(os.path.dirname(__file__), '_meta/certs/ca.pem')],
"ssl.certificate": os.path.join(os.path.dirname(__file__), '_meta/certs/client.pem'),
"ssl.key": os.path.join(os.path.dirname(__file__), '_meta/certs/client.key'),
# TODO: verification_mode: "certificate"
# compose uses dynamic IP addresses and there are no IP SAN records in the certificate
"ssl.verification_mode": "none"
}
}])
self._test_stat()
@unittest.skipUnless(metricbeat.INTEGRATION_TESTS, "integration test")
def test_stat_http_auth(self):
"""
haproxy stat http basic auth metricset test
"""
self.render_config_template(modules=[{
"name": "haproxy",
"metricsets": ["stat"],
"username": "admin",
"password": "admin",
"hosts": ["http://%s/stats" % (self.compose_host(port="14569/tcp"))],
"period": "5s"
}])
self._test_stat()
| 34.316667 | 111 | 0.58815 |
20e9c7a28db07ca3013ded9f7e97289464755668 | 38 | py | Python | tri_distance/__init__.py | EdwardSmith1884/GEOMetrics | a39d4a45dfd33c257ff0f68069a5a3072bda7071 | [
"MIT"
] | 123 | 2019-02-01T03:34:04.000Z | 2022-02-20T21:02:17.000Z | tri_distance/__init__.py | EdwardSmith1884/GEOMetrics | a39d4a45dfd33c257ff0f68069a5a3072bda7071 | [
"MIT"
] | 18 | 2019-02-20T21:22:45.000Z | 2020-10-13T12:57:05.000Z | tri_distance/__init__.py | EdwardSmith1884/GEOMetrics | a39d4a45dfd33c257ff0f68069a5a3072bda7071 | [
"MIT"
] | 15 | 2019-04-17T13:50:04.000Z | 2021-02-07T20:34:54.000Z | from .tri_distance import TriDistance
| 19 | 37 | 0.868421 |
b6dbb64bb82e1b459f6b554c1b37c50c884799bd | 5,127 | py | Python | tools/gen_length_codes.py | selavy/plszip | 638bc10f715b77fdf3e1fb50325013c94594fd79 | [
"MIT"
] | null | null | null | tools/gen_length_codes.py | selavy/plszip | 638bc10f715b77fdf3e1fb50325013c94594fd79 | [
"MIT"
] | null | null | null | tools/gen_length_codes.py | selavy/plszip | 638bc10f715b77fdf3e1fb50325013c94594fd79 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
length_info = (
# code, extra_bits, start, stop
( 257, 0, 3, 3, ),
( 258, 0, 4, 4, ),
( 259, 0, 5, 5, ),
( 260, 0, 6, 6, ),
( 261, 0, 7, 7, ),
( 262, 0, 8, 8, ),
( 263, 0, 9, 9, ),
( 264, 0, 10, 10, ),
( 265, 1, 11, 12, ),
( 266, 1, 13, 14, ),
( 267, 1, 15, 16, ),
( 268, 1, 17, 18, ),
( 269, 2, 19, 22, ),
( 270, 2, 23, 26, ),
( 271, 2, 27, 30, ),
( 272, 2, 31, 34, ),
( 273, 3, 35, 42, ),
( 274, 3, 43, 50, ),
( 275, 3, 51, 58, ),
( 276, 3, 59, 66, ),
( 277, 4, 67, 82, ),
( 278, 4, 83, 98, ),
( 279, 4, 99, 114, ),
( 280, 4, 115, 130, ),
( 281, 5, 131, 162, ),
( 282, 5, 163, 194, ),
( 283, 5, 195, 226, ),
( 284, 5, 227, 257, ),
( 285, 0, 258, 258, ),
)
distance_info = (
# code, extra_bits, start, stop
( 0, 0, 1, 1, ),
( 1, 0, 2, 2, ),
( 2, 0, 3, 3, ),
( 3, 0, 4, 4, ),
( 4, 1, 5, 6, ),
( 5, 1, 7, 8, ),
( 6, 2, 9, 12, ),
( 7, 2, 13, 16, ),
( 8, 3, 17, 24, ),
( 9, 3, 25, 32, ),
( 10, 4, 33, 48, ),
( 11, 4, 49, 64, ),
( 12, 5, 65, 96, ),
( 13, 5, 97, 128, ),
( 14, 6, 129, 192, ),
( 15, 6, 193, 256, ),
( 16, 7, 257, 384, ),
( 17, 7, 385, 512, ),
( 18, 8, 513, 768, ),
( 19, 8, 769, 1024, ),
( 20, 9, 1025, 1536, ),
( 21, 9, 1537, 2048, ),
( 22, 10, 2049, 3072, ),
( 23, 10, 3073, 4096, ),
( 24, 11, 4097, 6144, ),
( 25, 11, 6145, 8192, ),
( 26, 12, 8193, 12288, ),
( 27, 12, 12289, 16384, ),
( 28, 13, 16385, 24576, ),
( 29, 13, 24577, 32768, ),
)
def get_distance_extra_bits(dst):
for code, extra_bits, start, stop in distance_info:
if start <= dst <= stop:
return extra_bits
raise ValueError("invalid distance: {dst}")
def get_distance_base(x):
for code, extra_bits, start, stop in distance_info:
if start <= x <= stop:
return start
else:
raise ValueError(f"invalid distance: {x}")
def get_distance_code(x):
for code, extra_bits, start, stop in distance_info:
if start <= x <= stop:
return code
else:
raise ValueError(f"invalid distance: {x}")
def get_extra_bits_from_distance_code(dst_code):
for code, extra_bits, start, stop in distance_info:
if dst_code == code:
return extra_bits
raise ValueError(f"invalid distance code: {dst_code}")
def get_extra_bits_from_literal(lit):
if lit < 257:
return 0
for code, extra_bits, start, stop in length_info:
if code == lit:
return extra_bits
raise ValueError(f"invalid literal: {lit}")
def get_length_code(x):
for code, extra_bits, start, stop in length_info:
if start <= x <= stop:
return code
else:
raise ValueError(f"invalid length: {x}")
def get_length_base(x):
for code, extra_bits, start, stop in length_info:
if start <= x <= stop:
return start
else:
raise ValueError(f"invalid length: {x}")
def get_length_extra(x):
for code, extra_bits, start, stop in length_info:
if start <= x <= stop:
return extra_bits
else:
raise ValueError(f"invalid length: {x}")
# Length Tables
N = 258+1
length_codes = [-1]*N
length_bases = [-1]*N
length_extra = [-1]*N
for index in range(3, N):
length_codes[index] = get_length_code(index)
length_bases[index] = get_length_base(index)
length_extra[index] = get_length_extra(index)
literal_to_extra_bits = [
get_extra_bits_from_literal(lit) for lit in range(0, 285+1)
]
distance_code_to_extra_bits = [
get_extra_bits_from_distance_code(dst_code) for dst_code in range(0, 29+1)
]
# Distance Tables
def dist_index(dst):
if dst <= 256:
return dst-1
else:
return 256 + ((dst-1) >> 7)
max_dist = 32768-1
max_dist_index = dist_index(max_dist)
distance_codes = [-1]*(max_dist_index+1)
distance_bases = [-1]*(max_dist_index+1)
distance_extra = [-1]*(max_dist_index+1)
for dst in range(1, max_dist+1):
index = dist_index(dst)
code = get_distance_code(dst)
base = get_distance_base(dst)
extra = get_distance_extra_bits(dst)
assert distance_codes[index] == -1 or distance_codes[index] == code
assert distance_bases[index] == -1 or distance_bases[index] == base
assert distance_extra[index] == -1 or distance_extra[index] == extra
distance_codes[index] = code
distance_bases[index] = base
distance_extra[index] = extra
# Header Tables
header_extra_bits = [0]*19
header_extra_bits[16] = 2
header_extra_bits[17] = 3
header_extra_bits[18] = 7
| 27.86413 | 78 | 0.503998 |
6911b9930d99e11e39cfd04df0aded504c63f7cf | 1,025 | py | Python | schedulers/mosaic_schedulers/__init__.py | nasa/MOSAIC | af396ec450bd9f6f95fc5c603e13964035e05cd6 | [
"Apache-2.0"
] | 18 | 2019-04-01T02:58:38.000Z | 2022-01-02T07:31:03.000Z | schedulers/mosaic_schedulers/__init__.py | nasa/MOSAIC | af396ec450bd9f6f95fc5c603e13964035e05cd6 | [
"Apache-2.0"
] | null | null | null | schedulers/mosaic_schedulers/__init__.py | nasa/MOSAIC | af396ec450bd9f6f95fc5c603e13964035e05cd6 | [
"Apache-2.0"
] | 3 | 2019-11-04T01:52:50.000Z | 2021-09-13T01:52:50.000Z | """
Copyright 2019 by California Institute of Technology. ALL RIGHTS RESERVED.
United States Government sponsorship acknowledged. Any commercial use
must be negotiated with the Office of Technology Transfer at the
California Institute of Technology.
This software may be subject to U.S. export control laws and regulations.
By accepting this document, the user agrees to comply with all applicable
U.S. export laws and regulations. User has the responsibility to obtain
export licenses, or other export authority as may be required before
exporting such information to foreign countries or providing access to
foreign persons.
This software is a copy and may not be current. The latest version is
maintained by and may be obtained from the Mobility and Robotics Sytstem
Section (347) at the Jet Propulsion Laboratory. Suggestions and patches
are welcome and should be sent to the software's maintainer.
"""
from . import schedulers
from . import common
| 44.565217 | 76 | 0.76 |
62533a878cb5174178126f3a1183e5aeb87c4985 | 28,120 | py | Python | pytorch_lightning/core/step_result.py | shivdhar/pytorch-lightning | e4f7223c6384a65520c958f64b02db1952c61430 | [
"Apache-2.0"
] | null | null | null | pytorch_lightning/core/step_result.py | shivdhar/pytorch-lightning | e4f7223c6384a65520c958f64b02db1952c61430 | [
"Apache-2.0"
] | null | null | null | pytorch_lightning/core/step_result.py | shivdhar/pytorch-lightning | e4f7223c6384a65520c958f64b02db1952c61430 | [
"Apache-2.0"
] | null | null | null | # Copyright The PyTorch Lightning team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numbers
from copy import copy
from typing import Optional, Dict, Union, Sequence, Callable, MutableMapping, Any, List, Tuple
import torch
from torch import Tensor
import os
from pytorch_lightning.metrics.converters import _sync_ddp_if_available
class Result(Dict):
def __init__(
self,
minimize: Optional[Tensor] = None,
early_stop_on: Optional[Tensor] = None,
checkpoint_on: Union[Tensor, bool, None] = None,
hiddens: Optional[Tensor] = None,
):
super().__init__()
# temporary until dict results are deprecated
os.environ['PL_USING_RESULT_OBJ'] = '1'
if early_stop_on is not None:
self.early_stop_on = early_stop_on
if checkpoint_on is not None and checkpoint_on:
self.checkpoint_on = checkpoint_on
if hiddens is not None:
self.hiddens = hiddens.detach()
if minimize is not None:
err = 'Minimize can only be used in training_step, training_step_end, training_epoch_end'
self._assert_grad_tensor_metric('minimize', minimize, err)
self.minimize = minimize
if minimize is not None and checkpoint_on is None:
self.checkpoint_on = minimize.detach()
self['meta'] = {'_internal': {'_reduce_on_epoch': False, 'batch_sizes': []}}
def __getitem__(self, key: Union[str, Any]) -> Any:
try:
return super().__getitem__(key)
except KeyError:
return super().__getitem__(f'step_{key}')
def __getattr__(self, key: str) -> Any:
try:
if key == 'callback_metrics':
return self.get_callback_metrics()
elif key == 'batch_log_metrics':
return self.get_batch_log_metrics()
elif key == 'batch_pbar_metrics':
return self.get_batch_pbar_metrics()
elif key == 'epoch_log_metrics':
return self.get_epoch_log_metrics()
elif key == 'epoch_pbar_metrics':
return self.get_epoch_pbar_metrics()
else:
return self[key]
except KeyError:
return None
def __setattr__(self, key: str, val: Union[Tensor, Any]):
# ensure reserve keys are tensors and detached
if key in {'checkpoint_on', 'early_stop_on'}:
self._assert_tensor_metric(key, val)
if val is not None and isinstance(val, torch.Tensor):
val = val.detach()
# ensure anything else that is a tensor is detached
elif isinstance(val, torch.Tensor) and key != 'minimize':
val = val.detach()
self[key] = val
def _assert_tensor_metric(self, name: str, potential_metric: Union[bool, Tensor, None, Any]):
if potential_metric is not None and not isinstance(potential_metric, bool):
assert isinstance(potential_metric, Tensor), f'{name} must be a torch.Tensor'
def _assert_grad_tensor_metric(self, name: str, x: Union[torch.Tensor, Any], additional_err: str = ''):
if x is not None:
assert isinstance(x, Tensor), f'{name} must be a torch.Tensor'
m = f'{name} must have a computational graph.'
if additional_err:
m += f' {additional_err}'
assert x.grad_fn is not None, m
def log(
self,
name: str,
value: Any,
prog_bar: bool = False,
logger: bool = True,
on_step: bool = False,
on_epoch: bool = True,
reduce_fx: Callable = torch.mean,
tbptt_reduce_fx: Callable = torch.mean,
tbptt_pad_token: int = 0,
enable_graph: bool = False,
sync_dist: bool = False,
sync_dist_op: Union[Any, str] = 'mean',
sync_dist_group: Optional[Any] = None,
):
# no metrics should be logged with graphs
if not enable_graph and isinstance(value, torch.Tensor):
value = value.detach()
# sync across ddp
if sync_dist and isinstance(value, (torch.Tensor, numbers.Number)):
value = _sync_ddp_if_available(value, group=sync_dist_group, reduce_op=sync_dist_op)
if 'meta' not in self:
self.__setitem__('meta', {})
# if user requests both step and epoch, then we split the metric in two automatically
# one will be logged per step. the other per epoch
if on_step and on_epoch:
# set step version
step_name = f'step_{name}'
self.__set_meta(
step_name,
value,
prog_bar,
logger,
on_step=True,
on_epoch=False,
reduce_fx=reduce_fx,
tbptt_reduce_fx=tbptt_reduce_fx,
tbptt_pad_token=tbptt_pad_token,
)
self.__setitem__(step_name, value)
# set epoch version
epoch_name = f'epoch_{name}'
self.__set_meta(
epoch_name,
value,
prog_bar,
logger,
on_step=False,
on_epoch=True,
reduce_fx=reduce_fx,
tbptt_reduce_fx=tbptt_reduce_fx,
tbptt_pad_token=tbptt_pad_token,
)
self.__setitem__(epoch_name, value)
else:
self.__set_meta(
name,
value,
prog_bar,
logger,
on_step,
on_epoch,
reduce_fx,
tbptt_reduce_fx=tbptt_reduce_fx,
tbptt_pad_token=tbptt_pad_token,
)
# set the value
self.__setitem__(name, value)
def __set_meta(
self,
name: str,
value: Any,
prog_bar: bool,
logger: bool,
on_step: bool,
on_epoch: bool,
reduce_fx: Callable,
tbptt_pad_token: int,
tbptt_reduce_fx: Callable,
):
# set the meta for the item
meta_value = value
meta = dict(
prog_bar=prog_bar,
logger=logger,
on_step=on_step,
on_epoch=on_epoch,
reduce_fx=reduce_fx,
value=meta_value,
tbptt_reduce_fx=tbptt_reduce_fx,
tbptt_pad_token=tbptt_pad_token,
)
self['meta'][name] = meta
# track whether any input requires reduction on epoch end
_internal = self['meta']['_internal']
_internal['_reduce_on_epoch'] = max(_internal['_reduce_on_epoch'], on_epoch)
def track_batch_size(self, batch_size):
meta = self['meta']
meta['_internal']['batch_sizes'].append(batch_size)
def get_batch_sizes(self):
meta = self['meta']
return torch.tensor(meta['_internal']['batch_sizes'])
def get_callback_metrics(self) -> dict:
result = {'early_stop_on': self.early_stop_on, 'checkpoint_on': self.checkpoint_on}
return result
def get_batch_log_metrics(self) -> dict:
"""
Gets the metrics to log at the end of the batch step
"""
result = {}
meta = self['meta']
for k, options in meta.items():
if k == '_internal':
continue
if options['logger'] and options['on_step']:
result[k] = self[k]
return result
def get_epoch_log_metrics(self) -> dict:
"""
Gets the metrics to log at the end of the batch step
"""
result = {}
meta = self['meta']
for k, options in meta.items():
if k == '_internal':
continue
if options['logger'] and options['on_epoch']:
result[k] = self[k]
return result
def get_epoch_pbar_metrics(self):
"""
Gets the metrics to log at the end of the batch step
"""
result = {}
meta = self['meta']
for k, options in meta.items():
if k == '_internal':
continue
if options['prog_bar'] and options['on_epoch']:
result[k] = self[k]
return result
def get_batch_pbar_metrics(self):
"""
Gets the metrics to log at the end of the batch step
"""
result = {}
meta = self['meta']
for k, options in meta.items():
if k == '_internal':
continue
if options['prog_bar'] and options['on_step']:
result[k] = self[k]
return result
def detach(self):
for k, v in self.items():
if isinstance(v, torch.Tensor):
self.__setitem__(k, v.detach())
def __repr__(self):
self_copy = self.copy()
if 'meta' in self_copy:
del self_copy['meta']
return str(self_copy)
def __str__(self):
copy = self.copy()
del copy['meta']
return str(copy)
def __copy__(self):
newone = type(self)()
for k, v in self.items():
newone[k] = copy(v)
return newone
@classmethod
def gather(cls, outputs):
meta = outputs[0].get('meta')
result = cls()
result = recursive_gather(outputs, result)
recursive_stack(result)
if meta:
result['meta'] = meta
return result
@classmethod
def padded_gather(cls, outputs):
meta = outputs[0].get('meta')
result = cls()
result = recursive_gather(outputs, result)
# find the padding used for other values
default_padding_idx = 0
for name, value in result.items():
if isinstance(value, list) and len(value) > 0 and isinstance(value[0], torch.Tensor):
if name not in {'checkpoint_on', 'early_stop_on', 'minimize'}:
default_padding_idx = meta[name]['tbptt_pad_token']
break
# pad across each key individually
for name, value in result.items():
is_reserved = name in {'checkpoint_on', 'early_stop_on', 'minimize'}
if isinstance(value, list) and len(value) > 0 and isinstance(value[0], torch.Tensor):
if is_reserved:
padding_key = default_padding_idx
else:
padding_key = meta[name]['tbptt_pad_token']
padded = torch.nn.utils.rnn.pad_sequence(value, batch_first=True, padding_value=padding_key)
result[name] = padded
# also update the result
if meta and not is_reserved:
meta[name]['value'] = padded
if meta:
result['meta'] = meta
return result
@classmethod
def reduce_on_epoch_end(cls, outputs):
# get the batch sizes for all outputs
batch_sizes = torch.stack([x.get_batch_sizes() for x in outputs]).view(-1)
meta = outputs[0]['meta']
result = cls()
result = recursive_gather(outputs, result)
recursive_stack(result)
for k, option in meta.items():
if k == '_internal':
continue
if option['on_epoch']:
fx = option['reduce_fx']
if fx == torch.mean:
reduced_val = weighted_mean(result[k], batch_sizes)
else:
reduced_val = fx(result[k])
result[k] = reduced_val
result['meta'] = meta
return result
@classmethod
def reduce_across_time(cls, time_outputs):
# auto-reduce across time for tbptt
meta = time_outputs[0]['meta']
result = cls()
result = recursive_gather(time_outputs, result)
recursive_stack(result)
for k, value in result.items():
if k == 'meta':
continue
# pick the reduce fx
if k in ['checkpoint_on', 'early_stop_on', 'minimize']:
tbptt_reduce_fx = torch.mean
else:
tbptt_reduce_fx = meta[k]['tbptt_reduce_fx']
result[k] = tbptt_reduce_fx(value)
result['meta'] = meta
return result
def dp_reduce(self):
for k, value in self.items():
if k == 'meta':
continue
if isinstance(value, list):
value = torch.tensor(value)
self[k] = value.mean(dim=-1)
@property
def should_reduce_on_epoch_end(self) -> bool:
return self['meta']['_internal']['_reduce_on_epoch']
def drop_hiddens(self):
if 'hiddens' in self:
del self['hiddens']
def rename_keys(self, map_dict: dict):
"""
Maps key values to the target values. Useful when renaming variables in mass.
Args:
map_dict:
"""
meta = self.meta
for source, dest in map_dict.items():
# map the main keys
self[dest] = self[source]
del self[source]
# map meta
meta[dest] = meta[source]
del meta[source]
def recursive_gather(outputs: Sequence[dict], result: Optional[MutableMapping] = None) -> Optional[MutableMapping]:
for out in outputs:
if 'meta' in out:
del out['meta']
for k, v in out.items():
if isinstance(v, dict):
v = recursive_gather([v], result)
if k not in result:
result[k] = []
result[k].append(v)
return result
def recursive_stack(result: MutableMapping):
for k, v in result.items():
if isinstance(v, dict):
recursive_stack(v)
result[k] = collate_tensors(v)
def collate_tensors(items: Union[List, Tuple]) -> Union[Tensor, List, Tuple]:
if not items or not isinstance(items, (list, tuple)) or any(not isinstance(item, Tensor) for item in items):
# items is not a sequence, empty, or contains non-tensors
return items
if all(item.ndim == 0 for item in items):
# all tensors are scalars, we need to stack
return torch.stack(items)
if all(item.ndim >= 1 and item.shape[1:] == items[0].shape[1:] for item in items):
# we can concatenate along the first dimension
return torch.cat(items)
return items
class TrainResult(Result):
def __init__(
self,
minimize: Optional[Tensor] = None,
early_stop_on: Tensor = None,
checkpoint_on: Union[Tensor, bool] = None,
hiddens: Optional[Tensor] = None,
):
"""
Used in train loop to auto-log to a logger or progress bar without needing to define
a train_step_end or train_epoch_end method
Example::
def training_step(self, batch, batch_idx):
loss = ...
result = pl.TrainResult(loss)
result.log('train_loss', loss)
return result
# without val/test loop can model checkpoint or early stop
def training_step(self, batch, batch_idx):
loss = ...
result = pl.TrainResult(loss, early_stop_on=loss, checkpoint_on=loss)
result.log('train_loss', loss)
return result
Args:
minimize: Metric currently being minimized.
early_stop_on: Metric to early stop on.
checkpoint_on: Metric to checkpoint on.
hiddens:
"""
super().__init__(minimize, early_stop_on, checkpoint_on, hiddens)
def log(
self,
name,
value,
prog_bar: bool = False,
logger: bool = True,
on_step: bool = True,
on_epoch: bool = False,
reduce_fx: Callable = torch.mean,
tbptt_reduce_fx: Callable = torch.mean,
tbptt_pad_token: int = 0,
enable_graph: bool = False,
sync_dist: bool = False,
sync_dist_op: Union[Any, str] = 'mean',
sync_dist_group: Optional[Any] = None,
):
"""
Log a key, value
Example::
result.log('train_loss', loss)
# defaults used
result.log(
name,
value,
on_step=True,
on_epoch=False,
logger=True,
prog_bar=False,
reduce_fx=torch.mean,
enable_graph=False
)
Args:
name: key name
value: value name
prog_bar: if True logs to the progress base
logger: if True logs to the logger
on_step: if True logs the output of validation_step or test_step
on_epoch: if True, logs the output of the training loop aggregated
reduce_fx: Torch.mean by default
tbptt_reduce_fx: function to reduce on truncated back prop
tbptt_pad_token: token to use for padding
enable_graph: if True, will not auto detach the graph
sync_dist: if True, reduces the metric across GPUs/TPUs
sync_dist_op: the op to sync across
sync_dist_group: the ddp group
"""
super().log(
name=name,
value=value,
prog_bar=prog_bar,
logger=logger,
on_step=on_step,
on_epoch=on_epoch,
reduce_fx=reduce_fx,
enable_graph=enable_graph,
sync_dist=sync_dist,
sync_dist_group=sync_dist_group,
sync_dist_op=sync_dist_op,
tbptt_pad_token=tbptt_pad_token,
tbptt_reduce_fx=tbptt_reduce_fx,
)
def log_dict(
self,
dictionary: dict,
prog_bar: bool = False,
logger: bool = True,
on_step: bool = False,
on_epoch: bool = True,
reduce_fx: Callable = torch.mean,
tbptt_reduce_fx: Callable = torch.mean,
tbptt_pad_token: int = 0,
enable_graph: bool = False,
sync_dist: bool = False,
sync_dist_op: Union[Any, str] = 'mean',
sync_dist_group: Optional[Any] = None,
):
"""
Log a dictonary of values at once
Example::
values = {'loss': loss, 'acc': acc, ..., 'metric_n': metric_n}
result.log_dict(values)
Args:
dictionary: key value pairs (str, tensors)
prog_bar: if True logs to the progress base
logger: if True logs to the logger
on_step: if True logs the output of validation_step or test_step
on_epoch: if True, logs the output of the training loop aggregated
reduce_fx: Torch.mean by default
tbptt_reduce_fx: function to reduce on truncated back prop
tbptt_pad_token: token to use for padding
enable_graph: if True, will not auto detach the graph
sync_dist: if True, reduces the metric across GPUs/TPUs
sync_dist_op: the op to sync across
sync_dist_group: the ddp group:
"""
for k, v in dictionary.items():
self.log(
name=k,
value=v,
prog_bar=prog_bar,
logger=logger,
on_step=on_step,
on_epoch=on_epoch,
reduce_fx=reduce_fx,
enable_graph=enable_graph,
sync_dist=sync_dist,
sync_dist_group=sync_dist_group,
sync_dist_op=sync_dist_op,
tbptt_pad_token=tbptt_pad_token,
tbptt_reduce_fx=tbptt_reduce_fx,
)
class EvalResult(Result):
def __init__(
self,
early_stop_on: Optional[Tensor] = None,
checkpoint_on: Optional[Tensor] = None,
hiddens: Optional[Tensor] = None,
):
"""
Used in val/train loop to auto-log to a logger or progress bar without needing to define
a _step_end or _epoch_end method
Example::
def validation_step(self, batch, batch_idx):
loss = ...
result = EvalResult()
result.log('val_loss', loss)
return result
def test_step(self, batch, batch_idx):
loss = ...
result = EvalResult()
result.log('val_loss', loss)
return result
Args:
early_stop_on: Metric to early stop on.
checkpoint_on: Metric to checkpoint on.
hiddens:
"""
super().__init__(None, early_stop_on, checkpoint_on, hiddens)
def log(
self,
name,
value,
prog_bar: bool = False,
logger: bool = True,
on_step: bool = False,
on_epoch: bool = True,
reduce_fx: Callable = torch.mean,
tbptt_reduce_fx: Callable = torch.mean,
tbptt_pad_token: int = 0,
enable_graph: bool = False,
sync_dist: bool = False,
sync_dist_op: Union[Any, str] = 'mean',
sync_dist_group: Optional[Any] = None,
):
"""
Log a key, value
Example::
result.log('val_loss', loss)
# defaults used
result.log(
name,
value,
on_step=False,
on_epoch=True,
logger=True,
prog_bar=False,
reduce_fx=torch.mean
)
Args:
name: key name
value: value name
prog_bar: if True logs to the progress base
logger: if True logs to the logger
on_step: if True logs the output of validation_step or test_step
on_epoch: if True, logs the output of the training loop aggregated
reduce_fx: Torch.mean by default
tbptt_reduce_fx: function to reduce on truncated back prop
tbptt_pad_token: token to use for padding
enable_graph: if True, will not auto detach the graph
sync_dist: if True, reduces the metric across GPUs/TPUs
sync_dist_op: the op to sync across
sync_dist_group: the ddp group
"""
super().log(
name=name,
value=value,
prog_bar=prog_bar,
logger=logger,
on_step=on_step,
on_epoch=on_epoch,
reduce_fx=reduce_fx,
enable_graph=enable_graph,
sync_dist=sync_dist,
sync_dist_group=sync_dist_group,
sync_dist_op=sync_dist_op,
tbptt_pad_token=tbptt_pad_token,
tbptt_reduce_fx=tbptt_reduce_fx,
)
def log_dict(
self,
dictionary: dict,
prog_bar: bool = False,
logger: bool = True,
on_step: bool = False,
on_epoch: bool = True,
reduce_fx: Callable = torch.mean,
tbptt_reduce_fx: Callable = torch.mean,
tbptt_pad_token: int = 0,
enable_graph: bool = False,
sync_dist: bool = False,
sync_dist_op: Union[Any, str] = 'mean',
sync_dist_group: Optional[Any] = None,
):
"""
Log a dictonary of values at once
Example::
values = {'loss': loss, 'acc': acc, ..., 'metric_n': metric_n}
result.log_dict(values)
Args:
dictionary: key value pairs (str, tensors)
prog_bar: if True logs to the progress base
logger: if True logs to the logger
on_step: if True logs the output of validation_step or test_step
on_epoch: if True, logs the output of the training loop aggregated
reduce_fx: Torch.mean by default
tbptt_reduce_fx: function to reduce on truncated back prop
tbptt_pad_token: token to use for padding
enable_graph: if True, will not auto detach the graph
sync_dist: if True, reduces the metric across GPUs/TPUs
sync_dist_op: the op to sync across
sync_dist_group: the ddp group
"""
for k, v in dictionary.items():
self.log(
name=k,
value=v,
prog_bar=prog_bar,
logger=logger,
on_step=on_step,
on_epoch=on_epoch,
reduce_fx=reduce_fx,
enable_graph=enable_graph,
sync_dist=sync_dist,
sync_dist_group=sync_dist_group,
sync_dist_op=sync_dist_op,
tbptt_pad_token=tbptt_pad_token,
tbptt_reduce_fx=tbptt_reduce_fx,
)
def get_callback_metrics(self) -> dict:
result = {'val_early_stop_on': self.early_stop_on, 'val_checkpoint_on': self.checkpoint_on}
return result
def write(self, name: str, values: Union[Tensor, list], filename: str = 'predictions.pt'):
"""Add feature name and value pair to collection of predictions that will be written to disk on
`validation_end` or `test_end`. If running on multiple GPUs, you will get separate `n_gpu`
prediction files with the rank prepended onto filename.
Example::
result = pl.EvalResult()
result.write('ids', [0, 1, 2])
result.write('preds', ['cat', 'dog', 'dog'])
Args:
name: Feature name that will turn into column header of predictions file
values: Flat tensor or list of row values for given feature column 'name'.
filename: Filepath where your predictions will be saved. Defaults to 'predictions.pt'.
"""
# Type check the incoming arguments
if not isinstance(name, str):
raise ValueError(f"Expected str for 'name' but got {type(name)}")
if not isinstance(filename, str):
raise ValueError(f"Expected str for 'filename' but got {type(name)}")
if isinstance(values, Tensor):
values = values.detach()
preds = getattr(self, 'predictions', None)
if preds is None:
self.predictions = {filename: {name: values}}
elif filename not in preds:
preds[filename] = {name: values}
elif name not in preds[filename]:
preds[filename][name] = values
elif isinstance(values, Tensor):
preds[filename][name] = torch.cat((preds[filename][name], values))
elif isinstance(values, list):
preds[filename][name].extend(values)
def write_dict(self, predictions_dict, filename='predictions.pt'):
"""Calls EvalResult.write() for each key-value pair in predictions_dict.
It is recommended that you use this function call instead of .write if you need to
store more than one column of predictions in your output file.
Example::
predictions_to_write = {'preds': ['cat', 'dog'], 'ids': tensor([0, 1])}
result.write_dict(predictions_to_write)
Args:
predictions_dict ([type]): Dict of predictions to store and then write to filename at eval end.
filename (str, optional): File where your predictions will be stored. Defaults to './predictions.pt'.
"""
for k, v in predictions_dict.items():
self.write(k, v, filename)
def weighted_mean(result, weights):
weights = weights.to(result.device)
numerator = torch.dot(result.float(), weights.t().float())
result = numerator / weights.sum().float()
return result
| 33.121319 | 115 | 0.56394 |
541101bb86b65efbe25cfa0f4aaf8bd63e460f4a | 356 | py | Python | default_cfg.py | mantoshkumar1/daily_task_automation | 113615938d81a39c9237e089ee300a64e767dd0e | [
"MIT"
] | null | null | null | default_cfg.py | mantoshkumar1/daily_task_automation | 113615938d81a39c9237e089ee300a64e767dd0e | [
"MIT"
] | null | null | null | default_cfg.py | mantoshkumar1/daily_task_automation | 113615938d81a39c9237e089ee300a64e767dd0e | [
"MIT"
] | null | null | null | from logics.app_constants import *
import os
APP_CFG = {
HOSTNAME: "10.100.57.99",
USERNAME: 'root',
PASSWORD: '',
DEFAULT_LOCAL_FILES_DIR: os.path.join(os.getcwd(), 'files_container'),
REMOTE_DIR_PATH: '/tmp',
UPLOAD_FILE_NAMES: ['upgmgr', 'dcoField.tar.gz'],
LOG_LOC: os.path.join(os.getcwd(), 'logs') # todo?? incomplete
}
| 27.384615 | 74 | 0.657303 |
016583c7ec3b87971245a34a1580f9c7058f1e37 | 106 | py | Python | csat/collectors/pipermail/admin.py | GaretJax/csat | db63441136369436140a91c9657444353c8944e6 | [
"MIT"
] | null | null | null | csat/collectors/pipermail/admin.py | GaretJax/csat | db63441136369436140a91c9657444353c8944e6 | [
"MIT"
] | 7 | 2020-06-05T17:15:29.000Z | 2022-02-11T03:38:15.000Z | csat/collectors/pipermail/admin.py | GaretJax/csat | db63441136369436140a91c9657444353c8944e6 | [
"MIT"
] | null | null | null | from csat.acquisition import admin
class PipermailConfigAdmin(admin.DataCollectorConfigAdmin):
pass
| 17.666667 | 59 | 0.830189 |
681b53f9d3856fe14cb1ce61fc9415ada10d07a9 | 960 | py | Python | setup.py | kamicut/tilepie | 103ae2be1c3c4e6f7ec4a3bdd265ffcddee92b96 | [
"MIT"
] | 9 | 2017-12-14T17:38:26.000Z | 2021-04-06T18:30:46.000Z | setup.py | kamicut/tilepie | 103ae2be1c3c4e6f7ec4a3bdd265ffcddee92b96 | [
"MIT"
] | 5 | 2017-12-20T20:40:39.000Z | 2018-01-17T22:42:35.000Z | setup.py | kamicut/tilepie | 103ae2be1c3c4e6f7ec4a3bdd265ffcddee92b96 | [
"MIT"
] | 2 | 2017-12-20T20:40:56.000Z | 2018-09-21T04:14:16.000Z | from setuptools import setup
from codecs import open
from os import path
long_description='''
tilepie
=======
Simple ``.mbtiles`` processor for python. Built with the `QA Tiles <https://osmlab.github.io/osm-qa-tiles/>`_ in mind.
Based on the map/reduce/end structure in `@mapbox/tilereduce <https://github.com/mapbox/tile-reduce>`_.
Acknowledgements & pevious work
--------------------------------
- `mapbox/tilereduce <https://github.com/mapbox/tile-reduce>`_
- `jwass/tile-reduce-py: <https://github.com/jwass/tile-reduce-py/>`_
- `makinacorpus/landez: <https://github.com/makinacorpus/landez/>`_
'''
setup(name='tilepie',
version='0.2.1',
description='multiproc mbtile processing',
long_description=long_description,
url='https://github.com/kamicut/tilepie',
author='Marc Farra',
author_email='marcfarra@gmail.com',
license='MIT',
packages=['tilepie'],
install_requires = [],
zip_safe=False)
| 32 | 118 | 0.673958 |
34aa102172500f27be027593c14874c03bb5e37b | 1,171 | py | Python | pyvisdk/do/vm_config_missing_event.py | Infinidat/pyvisdk | f2f4e5f50da16f659ccc1d84b6a00f397fa997f8 | [
"MIT"
] | null | null | null | pyvisdk/do/vm_config_missing_event.py | Infinidat/pyvisdk | f2f4e5f50da16f659ccc1d84b6a00f397fa997f8 | [
"MIT"
] | null | null | null | pyvisdk/do/vm_config_missing_event.py | Infinidat/pyvisdk | f2f4e5f50da16f659ccc1d84b6a00f397fa997f8 | [
"MIT"
] | null | null | null |
import logging
from pyvisdk.exceptions import InvalidArgumentError
########################################
# Automatically generated, do not edit.
########################################
log = logging.getLogger(__name__)
def VmConfigMissingEvent(vim, *args, **kwargs):
'''This event records if the configuration file can not be found.'''
obj = vim.client.factory.create('{urn:vim25}VmConfigMissingEvent')
# do some validation checking...
if (len(args) + len(kwargs)) < 5:
raise IndexError('Expected at least 6 arguments got: %d' % len(args))
required = [ 'template', 'chainId', 'createdTime', 'key', 'userName' ]
optional = [ 'changeTag', 'computeResource', 'datacenter', 'ds', 'dvs',
'fullFormattedMessage', 'host', 'net', 'vm', 'dynamicProperty', 'dynamicType' ]
for name, arg in zip(required+optional, args):
setattr(obj, name, arg)
for name, value in kwargs.items():
if name in required + optional:
setattr(obj, name, value)
else:
raise InvalidArgumentError("Invalid argument: %s. Expected one of %s" % (name, ", ".join(required + optional)))
return obj
| 34.441176 | 124 | 0.607173 |
70805bfcf649e63cdaab32468b761cea705d3868 | 1,492 | py | Python | Sentiment Detection/ConfusionMatrix.py | nafiul-araf/Sentiment-Detection | 51080bb9b8f1735dc765e01b3ca1cb7afa10c87f | [
"Apache-2.0"
] | null | null | null | Sentiment Detection/ConfusionMatrix.py | nafiul-araf/Sentiment-Detection | 51080bb9b8f1735dc765e01b3ca1cb7afa10c87f | [
"Apache-2.0"
] | null | null | null | Sentiment Detection/ConfusionMatrix.py | nafiul-araf/Sentiment-Detection | 51080bb9b8f1735dc765e01b3ca1cb7afa10c87f | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
# coding: utf-8
# In[1]:
import numpy as np
import matplotlib.pyplot as plt
get_ipython().run_line_magic('matplotlib', 'inline')
# In[2]:
def plot_confusion_matrix(cm, classes, normalize=False, title='Confusion matrix', cmap=plt.cm.RdBu):
"""
Source:
http://scikit-learn.org/stable/auto_examples/model_selection/plot_confusion_matrix.html
Input:
cm: confusion matrix
classes: output classes name
normalize: normalization can be applied by setting `normalize=True`
Output:
This function prints and plots the confusion matrix.
"""
plt.figure(figsize=(20,10))
plt.imshow(cm, interpolation='nearest', cmap=cmap)
plt.title(title)
plt.colorbar()
tick_marks = np.arange(len(classes))
plt.xticks(tick_marks, classes, rotation=45)
plt.yticks(tick_marks, classes)
if normalize:
cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]
print("Normalized confusion matrix")
else:
print('Confusion matrix, without normalization')
thresh = cm.max() / 2.
for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):
plt.text(j, i, cm[i, j],
horizontalalignment="center",
color="green" if cm[i, j] > thresh else "black")
plt.tight_layout()
plt.ylabel('True label')
plt.xlabel('Predicted label')
# In[ ]:
| 25.288136 | 101 | 0.609249 |
d505b49c812b599e5706a510c44721645e607a38 | 1,731 | py | Python | egs/wham/TwoStep/local/preprocess_wham.py | michelolzam/asteroid | d69e1567577f8c830af9d4a942eb3e1a40eb8f9b | [
"MIT"
] | 2 | 2020-06-22T14:06:40.000Z | 2020-07-15T00:46:27.000Z | egs/wham/TwoStep/local/preprocess_wham.py | michelolzam/asteroid | d69e1567577f8c830af9d4a942eb3e1a40eb8f9b | [
"MIT"
] | null | null | null | egs/wham/TwoStep/local/preprocess_wham.py | michelolzam/asteroid | d69e1567577f8c830af9d4a942eb3e1a40eb8f9b | [
"MIT"
] | null | null | null | """
WHAM directory preprocessing
@author : Manuel Pariente, Inria-Nancy
Modified and extended from github.com/kaituoxu/Conv-TasNet/
MIT Copyright (c) 2018 Kaituo XU
"""
import argparse
import json
import os
import soundfile as sf
def preprocess_one_dir(in_dir, out_dir, out_filename):
""" Create .json file for one condition."""
file_infos = []
in_dir = os.path.abspath(in_dir)
wav_list = os.listdir(in_dir)
wav_list.sort()
for wav_file in wav_list:
if not wav_file.endswith('.wav'):
continue
wav_path = os.path.join(in_dir, wav_file)
samples = sf.SoundFile(wav_path)
file_infos.append((wav_path, len(samples)))
if not os.path.exists(out_dir):
os.makedirs(out_dir)
with open(os.path.join(out_dir, out_filename + '.json'), 'w') as f:
json.dump(file_infos, f, indent=4)
def preprocess(inp_args):
""" Create .json files for all conditions."""
speaker_list = ['mix_both', 'mix_clean',
'mix_single', 's1', 's2', 'noise']
for data_type in ['tr', 'cv', 'tt']:
for spk in speaker_list:
preprocess_one_dir(os.path.join(inp_args.in_dir, data_type, spk),
os.path.join(inp_args.out_dir, data_type),
spk)
if __name__ == "__main__":
parser = argparse.ArgumentParser("WHAM data preprocessing")
parser.add_argument('--in_dir', type=str, default=None,
help='Directory path of wham including tr, cv and tt')
parser.add_argument('--out_dir', type=str, default=None,
help='Directory path to put output files')
args = parser.parse_args()
print(args)
preprocess(args)
| 33.288462 | 78 | 0.626228 |
e0605dac2ffd70fabf12b3dfe09acd0e25481ca3 | 11,630 | py | Python | tests/test_visibility.py | Cadair/synapse | 466866a1d9dd1fcf82348a36c0532cb0c6614767 | [
"Apache-2.0"
] | 1 | 2019-09-14T03:24:03.000Z | 2019-09-14T03:24:03.000Z | tests/test_visibility.py | Cadair/synapse | 466866a1d9dd1fcf82348a36c0532cb0c6614767 | [
"Apache-2.0"
] | 4 | 2020-03-04T23:47:05.000Z | 2021-12-09T21:41:44.000Z | tests/test_visibility.py | Cadair/synapse | 466866a1d9dd1fcf82348a36c0532cb0c6614767 | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
# Copyright 2018 New Vector Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
from twisted.internet import defer
from twisted.internet.defer import succeed
from synapse.api.room_versions import RoomVersions
from synapse.events import FrozenEvent
from synapse.visibility import filter_events_for_server
import tests.unittest
from tests.utils import create_room, setup_test_homeserver
logger = logging.getLogger(__name__)
TEST_ROOM_ID = "!TEST:ROOM"
class FilterEventsForServerTestCase(tests.unittest.TestCase):
@defer.inlineCallbacks
def setUp(self):
self.hs = yield setup_test_homeserver(self.addCleanup)
self.event_creation_handler = self.hs.get_event_creation_handler()
self.event_builder_factory = self.hs.get_event_builder_factory()
self.store = self.hs.get_datastore()
yield create_room(self.hs, TEST_ROOM_ID, "@someone:ROOM")
@defer.inlineCallbacks
def test_filtering(self):
#
# The events to be filtered consist of 10 membership events (it doesn't
# really matter if they are joins or leaves, so let's make them joins).
# One of those membership events is going to be for a user on the
# server we are filtering for (so we can check the filtering is doing
# the right thing).
#
# before we do that, we persist some other events to act as state.
self.inject_visibility("@admin:hs", "joined")
for i in range(0, 10):
yield self.inject_room_member("@resident%i:hs" % i)
events_to_filter = []
for i in range(0, 10):
user = "@user%i:%s" % (i, "test_server" if i == 5 else "other_server")
evt = yield self.inject_room_member(user, extra_content={"a": "b"})
events_to_filter.append(evt)
filtered = yield filter_events_for_server(
self.store, "test_server", events_to_filter
)
# the result should be 5 redacted events, and 5 unredacted events.
for i in range(0, 5):
self.assertEqual(events_to_filter[i].event_id, filtered[i].event_id)
self.assertNotIn("a", filtered[i].content)
for i in range(5, 10):
self.assertEqual(events_to_filter[i].event_id, filtered[i].event_id)
self.assertEqual(filtered[i].content["a"], "b")
@tests.unittest.DEBUG
@defer.inlineCallbacks
def test_erased_user(self):
# 4 message events, from erased and unerased users, with a membership
# change in the middle of them.
events_to_filter = []
evt = yield self.inject_message("@unerased:local_hs")
events_to_filter.append(evt)
evt = yield self.inject_message("@erased:local_hs")
events_to_filter.append(evt)
evt = yield self.inject_room_member("@joiner:remote_hs")
events_to_filter.append(evt)
evt = yield self.inject_message("@unerased:local_hs")
events_to_filter.append(evt)
evt = yield self.inject_message("@erased:local_hs")
events_to_filter.append(evt)
# the erasey user gets erased
yield self.hs.get_datastore().mark_user_erased("@erased:local_hs")
# ... and the filtering happens.
filtered = yield filter_events_for_server(
self.store, "test_server", events_to_filter
)
for i in range(0, len(events_to_filter)):
self.assertEqual(
events_to_filter[i].event_id,
filtered[i].event_id,
"Unexpected event at result position %i" % (i,),
)
for i in (0, 3):
self.assertEqual(
events_to_filter[i].content["body"],
filtered[i].content["body"],
"Unexpected event content at result position %i" % (i,),
)
for i in (1, 4):
self.assertNotIn("body", filtered[i].content)
@defer.inlineCallbacks
def inject_visibility(self, user_id, visibility):
content = {"history_visibility": visibility}
builder = self.event_builder_factory.for_room_version(
RoomVersions.V1,
{
"type": "m.room.history_visibility",
"sender": user_id,
"state_key": "",
"room_id": TEST_ROOM_ID,
"content": content,
},
)
event, context = yield self.event_creation_handler.create_new_client_event(
builder
)
yield self.hs.get_datastore().persist_event(event, context)
return event
@defer.inlineCallbacks
def inject_room_member(self, user_id, membership="join", extra_content={}):
content = {"membership": membership}
content.update(extra_content)
builder = self.event_builder_factory.for_room_version(
RoomVersions.V1,
{
"type": "m.room.member",
"sender": user_id,
"state_key": user_id,
"room_id": TEST_ROOM_ID,
"content": content,
},
)
event, context = yield self.event_creation_handler.create_new_client_event(
builder
)
yield self.hs.get_datastore().persist_event(event, context)
return event
@defer.inlineCallbacks
def inject_message(self, user_id, content=None):
if content is None:
content = {"body": "testytest", "msgtype": "m.text"}
builder = self.event_builder_factory.for_room_version(
RoomVersions.V1,
{
"type": "m.room.message",
"sender": user_id,
"room_id": TEST_ROOM_ID,
"content": content,
},
)
event, context = yield self.event_creation_handler.create_new_client_event(
builder
)
yield self.hs.get_datastore().persist_event(event, context)
return event
@defer.inlineCallbacks
def test_large_room(self):
# see what happens when we have a large room with hundreds of thousands
# of membership events
# As above, the events to be filtered consist of 10 membership events,
# where one of them is for a user on the server we are filtering for.
import cProfile
import pstats
import time
# we stub out the store, because building up all that state the normal
# way is very slow.
test_store = _TestStore()
# our initial state is 100000 membership events and one
# history_visibility event.
room_state = []
history_visibility_evt = FrozenEvent(
{
"event_id": "$history_vis",
"type": "m.room.history_visibility",
"sender": "@resident_user_0:test.com",
"state_key": "",
"room_id": TEST_ROOM_ID,
"content": {"history_visibility": "joined"},
}
)
room_state.append(history_visibility_evt)
test_store.add_event(history_visibility_evt)
for i in range(0, 100000):
user = "@resident_user_%i:test.com" % (i,)
evt = FrozenEvent(
{
"event_id": "$res_event_%i" % (i,),
"type": "m.room.member",
"state_key": user,
"sender": user,
"room_id": TEST_ROOM_ID,
"content": {"membership": "join", "extra": "zzz,"},
}
)
room_state.append(evt)
test_store.add_event(evt)
events_to_filter = []
for i in range(0, 10):
user = "@user%i:%s" % (i, "test_server" if i == 5 else "other_server")
evt = FrozenEvent(
{
"event_id": "$evt%i" % (i,),
"type": "m.room.member",
"state_key": user,
"sender": user,
"room_id": TEST_ROOM_ID,
"content": {"membership": "join", "extra": "zzz"},
}
)
events_to_filter.append(evt)
room_state.append(evt)
test_store.add_event(evt)
test_store.set_state_ids_for_event(
evt, {(e.type, e.state_key): e.event_id for e in room_state}
)
pr = cProfile.Profile()
pr.enable()
logger.info("Starting filtering")
start = time.time()
filtered = yield filter_events_for_server(
test_store, "test_server", events_to_filter
)
logger.info("Filtering took %f seconds", time.time() - start)
pr.disable()
with open("filter_events_for_server.profile", "w+") as f:
ps = pstats.Stats(pr, stream=f).sort_stats("cumulative")
ps.print_stats()
# the result should be 5 redacted events, and 5 unredacted events.
for i in range(0, 5):
self.assertEqual(events_to_filter[i].event_id, filtered[i].event_id)
self.assertNotIn("extra", filtered[i].content)
for i in range(5, 10):
self.assertEqual(events_to_filter[i].event_id, filtered[i].event_id)
self.assertEqual(filtered[i].content["extra"], "zzz")
test_large_room.skip = "Disabled by default because it's slow"
class _TestStore(object):
"""Implements a few methods of the DataStore, so that we can test
filter_events_for_server
"""
def __init__(self):
# data for get_events: a map from event_id to event
self.events = {}
# data for get_state_ids_for_events mock: a map from event_id to
# a map from (type_state_key) -> event_id for the state at that
# event
self.state_ids_for_events = {}
def add_event(self, event):
self.events[event.event_id] = event
def set_state_ids_for_event(self, event, state):
self.state_ids_for_events[event.event_id] = state
def get_state_ids_for_events(self, events, types):
res = {}
include_memberships = False
for (type, state_key) in types:
if type == "m.room.history_visibility":
continue
if type != "m.room.member" or state_key is not None:
raise RuntimeError(
"Unimplemented: get_state_ids with type (%s, %s)"
% (type, state_key)
)
include_memberships = True
if include_memberships:
for event_id in events:
res[event_id] = self.state_ids_for_events[event_id]
else:
k = ("m.room.history_visibility", "")
for event_id in events:
hve = self.state_ids_for_events[event_id][k]
res[event_id] = {k: hve}
return succeed(res)
def get_events(self, events):
return succeed({event_id: self.events[event_id] for event_id in events})
def are_users_erased(self, users):
return succeed({u: False for u in users})
| 34.820359 | 83 | 0.591745 |
1df23406d6524513058e9b84d79ce3c3ad281875 | 1,175 | py | Python | pysubunit/tests/sample-script.py | mtreinish/pysubunit | 5588ae7308dd51abb748212373eec00652179504 | [
"Apache-2.0"
] | null | null | null | pysubunit/tests/sample-script.py | mtreinish/pysubunit | 5588ae7308dd51abb748212373eec00652179504 | [
"Apache-2.0"
] | null | null | null | pysubunit/tests/sample-script.py | mtreinish/pysubunit | 5588ae7308dd51abb748212373eec00652179504 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import sys
if sys.platform == "win32":
import msvcrt
import os
msvcrt.setmode(sys.stdout.fileno(), os.O_BINARY)
if len(sys.argv) == 2:
# subunit.tests.test_test_protocol.TestExecTestCase.test_sample_method_args
# uses this code path to be sure that the arguments were passed to
# sample-script.py
print("test fail")
print("error fail")
sys.exit(0)
print("test old mcdonald")
print("success old mcdonald")
print("test bing crosby")
print("failure bing crosby [")
print("foo.c:53:ERROR invalid state")
print("]")
print("test an error")
print("error an error")
sys.exit(0)
| 31.756757 | 79 | 0.729362 |
55d5dd8a97b8ff713e5e6792aaf9ae491895931f | 953 | py | Python | xlsxwriter/test/comparison/test_table03.py | edparcell/XlsxWriter | d6a5df232ac0091017ae5c65f592bcc776d296ea | [
"BSD-2-Clause-FreeBSD"
] | 1 | 2019-01-09T19:43:43.000Z | 2019-01-09T19:43:43.000Z | xlsxwriter/test/comparison/test_table03.py | edparcell/XlsxWriter | d6a5df232ac0091017ae5c65f592bcc776d296ea | [
"BSD-2-Clause-FreeBSD"
] | null | null | null | xlsxwriter/test/comparison/test_table03.py | edparcell/XlsxWriter | d6a5df232ac0091017ae5c65f592bcc776d296ea | [
"BSD-2-Clause-FreeBSD"
] | null | null | null | ###############################################################################
#
# Tests for XlsxWriter.
#
# Copyright (c), 2013-2018, John McNamara, jmcnamara@cpan.org
#
from ..excel_comparsion_test import ExcelComparisonTest
from ...workbook import Workbook
class TestCompareXLSXFiles(ExcelComparisonTest):
"""
Test file created by XlsxWriter against a file created by Excel.
"""
def setUp(self):
self.set_filename('table03.xlsx')
def test_create_file(self):
"""Test the creation of a simple XlsxWriter file with tables."""
workbook = Workbook(self.got_filename)
# Turn off default URL format for testing.
workbook.default_url_format = None
worksheet = workbook.add_worksheet()
worksheet.set_column('C:F', 10.288)
worksheet.add_table('C3:F13')
worksheet.write('A1', 'http://perl.com/')
workbook.close()
self.assertExcelEqual()
| 23.243902 | 79 | 0.609654 |
67977a64c382aa8736fc174c2467b67eb6120245 | 465 | pyw | Python | py/hGen_19000101_Test Example Person.pyw | pinecoding/scrappydox | 274b8c0b046ae66b4f637acf3cedaa3b0def7afe | [
"MIT"
] | null | null | null | py/hGen_19000101_Test Example Person.pyw | pinecoding/scrappydox | 274b8c0b046ae66b4f637acf3cedaa3b0def7afe | [
"MIT"
] | 55 | 2015-12-19T16:13:14.000Z | 2016-09-13T22:44:58.000Z | py/hGen_19000101_Test Example Person.pyw | pinecoding/scrappydox | 274b8c0b046ae66b4f637acf3cedaa3b0def7afe | [
"MIT"
] | null | null | null | import person
id = "hGen_19000101_Test"
this = person.Person(f"""\
ID: {id}
Name: Example Person
Date of Birth: 1 January 1900
Location of Birth: Berkeley, CA
Date of Death: 01 January 1995
Location of Death: Los Angeles, CA
Children:
- type: daughter
file: hGen_19250101_Test2
""",
"""\
Born {p["Date of Birth"]}, {p["Location of Birth"]}. Died {p["Date of Death"]}, {p["Location of Death"]}.
""")
if __name__ == '__main__':
this.run()
| 24.473684 | 106 | 0.647312 |
d093639e6eba0376b858264b1396ddc82ff8a81a | 10,694 | py | Python | vistrails/core/inspector.py | remram44/VisTrails-mybinder | ee7477b471920d738f3ac430932f01901b56ed44 | [
"BSD-3-Clause"
] | 83 | 2015-01-05T14:50:50.000Z | 2021-09-17T19:45:26.000Z | vistrails/core/inspector.py | remram44/VisTrails-mybinder | ee7477b471920d738f3ac430932f01901b56ed44 | [
"BSD-3-Clause"
] | 254 | 2015-01-02T20:39:19.000Z | 2018-11-28T17:16:44.000Z | vistrails/core/inspector.py | remram44/VisTrails-mybinder | ee7477b471920d738f3ac430932f01901b56ed44 | [
"BSD-3-Clause"
] | 40 | 2015-04-17T16:46:36.000Z | 2021-09-28T22:43:24.000Z | ###############################################################################
##
## Copyright (C) 2014-2016, New York University.
## Copyright (C) 2011-2014, NYU-Poly.
## Copyright (C) 2006-2011, University of Utah.
## All rights reserved.
## Contact: contact@vistrails.org
##
## This file is part of VisTrails.
##
## "Redistribution and use in source and binary forms, with or without
## modification, are permitted provided that the following conditions are met:
##
## - Redistributions of source code must retain the above copyright notice,
## this list of conditions and the following disclaimer.
## - Redistributions in binary form must reproduce the above copyright
## notice, this list of conditions and the following disclaimer in the
## documentation and/or other materials provided with the distribution.
## - Neither the name of the New York University nor the names of its
## contributors may be used to endorse or promote products derived from
## this software without specific prior written permission.
##
## THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
## AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
## THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
## PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
## CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
## EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
## PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
## OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
## WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
## OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
## ADVISED OF THE POSSIBILITY OF SUCH DAMAGE."
##
###############################################################################
"""Helper classes for inspecting vistrails/pipelines at runtime"""
from __future__ import division
from vistrails.core.modules.module_registry import get_module_registry
################################################################################
class PipelineInspector(object):
"""
PipelineInspector inspects a pipeline to get informations such as
the number of spreadsheet cells or compatibility for sub-modules
"""
def __init__(self):
""" PipelineInspector() -> None
Initialize pipeline information
"""
# A dict of input port module ids to its name/type
self.input_ports = {}
self.input_port_by_name = {}
# A dict of output port module ids to its name/type
self.output_ports = {}
self.output_port_by_name = {}
# A list of ids of module of type cell
self.spreadsheet_cells = []
# A dict of ambiguous modules mapped to their annotated id
self.annotated_modules = {}
def inspect(self, pipeline):
""" inspect(pipeline: Pipeline) -> None
Inspect a pipeline and update information
"""
self.inspect_input_output_ports(pipeline)
self.inspect_spreadsheet_cells(pipeline)
self.inspect_ambiguous_modules(pipeline)
def has_input_ports(self):
""" has_input_ports() -> bool
Check if the inspected pipeline has any input ports
"""
return len(self.input_ports)>0
def has_output_ports(self):
""" has_output_ports() -> bool
Check if the inspected pipeline has any output ports
"""
return len(self.output_ports)>0
def number_of_cells(self):
""" number_of_cells() -> int
Return the number of cells that will occupied on the spreadsheet
"""
return len(self.spreadsheet_cells)
def is_sub_module(self):
""" is_sub_module() -> bool
Check whether or not this pipeline is a sub module
"""
return self.has_input_ports() or self.has_output_ports()
def inspect_input_output_ports(self, pipeline):
""" inspect_input_output_ports(pipeline: Pipeline) -> None
Inspect the pipeline input/output ports, useful for submodule
"""
registry = get_module_registry()
self.input_ports = {}
self.input_port_by_name = {}
self.output_ports = {}
self.output_port_by_name = {}
if not pipeline: return
for cId, conn in pipeline.connections.iteritems():
src_module = pipeline.modules[conn.source.moduleId]
dst_module = pipeline.modules[conn.destination.moduleId]
if src_module.name=='InputPort':
spec = registry.getInputPortSpec(dst_module,
conn.destination.name)
name = self.get_port_name(src_module)
if name=='':
name = conn.destination.name
self.input_ports[src_module.id] = (name,
spec[0])
self.input_port_by_name[name] = src_module.id
if dst_module.name=='OutputPort':
spec = registry.getOutputPortSpec(src_module,
conn.source.name)
name = self.get_port_name(dst_module)
if name=='':
name = conn.source.name
self.output_ports[dst_module.id] = (name,
spec[0])
self.output_port_by_name[name] = dst_module.id
def get_port_name(self, module):
""" get_port_name(module: InputPort/OutputPort) -> str
Return the real name of the port module based on 'name' function
"""
for f in module.functions:
if f.name=='name' and f.params:
return f.params[0].strValue
return ''
def inspect_spreadsheet_cells(self, pipeline):
""" inspect_spreadsheet_cells(pipeline: Pipeline) -> None
Inspect the pipeline to see how many cells is needed
"""
self.spreadsheet_cells = []
if not pipeline:
return
registry = get_module_registry()
# Sometimes we run without the spreadsheet!
if not registry.has_module('org.vistrails.vistrails.spreadsheet',
'SpreadsheetCell'):
return
cell_desc = registry.get_descriptor_by_name(
'org.vistrails.vistrails.spreadsheet',
'SpreadsheetCell')
output_desc = registry.get_descriptor_by_name(
'org.vistrails.vistrails.basic',
'OutputModule')
def find_spreadsheet_cells(pipeline, root_id=None):
if root_id is None:
root_id = []
for mId, module in pipeline.modules.iteritems():
desc = registry.get_descriptor_by_name(module.package,
module.name,
module.namespace)
# SpreadsheetCell subclasses
if registry.is_descriptor_subclass(desc, cell_desc):
self.spreadsheet_cells.append(root_id + [mId])
# Output modules with a 'spreadsheet' mode
elif registry.is_descriptor_subclass(desc, output_desc):
if desc.module.get_mode_class('spreadsheet') is not None:
self.spreadsheet_cells.append(root_id + [mId])
for subworkflow_id in self.find_subworkflows(pipeline):
subworkflow = pipeline.modules[subworkflow_id]
if subworkflow.pipeline is not None:
find_spreadsheet_cells(subworkflow.pipeline,
root_id + [subworkflow_id])
find_spreadsheet_cells(pipeline)
def find_subworkflows(self, pipeline):
if not pipeline:
return
subworkflows = []
for m_id, module in pipeline.modules.iteritems():
if module.is_abstraction() or module.is_group():
subworkflows.append(m_id)
return subworkflows
def inspect_ambiguous_modules(self, pipeline):
""" inspect_ambiguous_modules(pipeline: Pipeline) -> None
inspect_ambiguous_modules returns a dict of ambiguous modules,
i.e. cannot determine the exact module by giving just its
name. Then in each group of dupplicate modules, a set of
annotated id is generated for them sorted based on their id.
The annotated_modules dictionary will map actual module id into
their annotated one (if it is ambiguous)
"""
self.annotated_modules = {}
if not pipeline: return
orig_pipeline = pipeline
count = {}
module_name = {}
for moduleId in pipeline.modules.iterkeys():
module = pipeline.modules[moduleId]
if module_name.has_key(module.name): # ambiguous
if count[module.name]==1:
self.annotated_modules[module_name[module.name]] = 1
count[module.name] += 1
self.annotated_modules[moduleId] = count[module.name]
else:
module_name[module.name] = moduleId
count[module.name] = 1
for id_list in self.spreadsheet_cells:
pipeline = orig_pipeline
# only need to worry about nested cells here
if len(id_list) >= 2:
id_iter = iter(id_list)
m = pipeline.modules[id_iter.next()]
for m_id in id_iter:
pipeline = m.pipeline
m = pipeline.modules[m_id]
if m.name in module_name:
if count[m.name] == 1:
self.annotated_modules[module_name[m.name]] = 1
count[m.name] += 1
self.annotated_modules[tuple(id_list)] = count[m.name]
else:
module_name[m.name] = tuple(id_list)
count[m.name] = 1
# if __name__ == '__main__':
# from core.startup import VistrailsStartup
# from core.xml_parser import XMLParser
# xmlFile = 'C:/cygwin/home/stew/src/vistrails/trunk/examples/vtk.xml'
# vs = VistrailsStartup()
# vs.init()
# parser = XMLParser()
# parser.openVistrail(xmlFile)
# vistrail = parser.getVistrail()
# pipeline = vistrail.getPipeline('Single Renderer')
# print vistrail.latestTime
| 41.449612 | 80 | 0.590331 |
bf936492ceaf4a3fd0404f4bb605aa16011e8a05 | 624 | py | Python | ros/build/waypoint_follower/catkin_generated/pkg.installspace.context.pc.py | mahfuz195/sdc_capstone | 9a8c09c65bd6c5abcb243d83d8ec51df17cb2385 | [
"MIT"
] | null | null | null | ros/build/waypoint_follower/catkin_generated/pkg.installspace.context.pc.py | mahfuz195/sdc_capstone | 9a8c09c65bd6c5abcb243d83d8ec51df17cb2385 | [
"MIT"
] | null | null | null | ros/build/waypoint_follower/catkin_generated/pkg.installspace.context.pc.py | mahfuz195/sdc_capstone | 9a8c09c65bd6c5abcb243d83d8ec51df17cb2385 | [
"MIT"
] | null | null | null | # generated from catkin/cmake/template/pkg.context.pc.in
CATKIN_PACKAGE_PREFIX = ""
PROJECT_PKG_CONFIG_INCLUDE_DIRS = "/home/student/Desktop/git_folder/sdc_capstone/ros/install/include".split(';') if "/home/student/Desktop/git_folder/sdc_capstone/ros/install/include" != "" else []
PROJECT_CATKIN_DEPENDS = "roscpp;std_msgs;tf;geometry_msgs;styx_msgs".replace(';', ' ')
PKG_CONFIG_LIBRARIES_WITH_PREFIX = "-llibwaypoint_follower".split(';') if "-llibwaypoint_follower" != "" else []
PROJECT_NAME = "waypoint_follower"
PROJECT_SPACE_DIR = "/home/student/Desktop/git_folder/sdc_capstone/ros/install"
PROJECT_VERSION = "0.0.0"
| 69.333333 | 197 | 0.783654 |
b3ea272e3e5095daeb2a77409e0556eaa1b55e07 | 243 | py | Python | unicaps/__init__.py | sergey-scat/unicaps | 8f4a3c3f802c58464e93f953bcf11ecf44ef8f3b | [
"Apache-2.0"
] | 8 | 2020-07-27T19:18:27.000Z | 2022-02-23T04:05:56.000Z | unicaps/__init__.py | sergey-scat/unicaps | 8f4a3c3f802c58464e93f953bcf11ecf44ef8f3b | [
"Apache-2.0"
] | 2 | 2021-01-19T07:06:03.000Z | 2021-09-03T13:27:12.000Z | unicaps/__init__.py | sergey-scat/unicaps | 8f4a3c3f802c58464e93f953bcf11ecf44ef8f3b | [
"Apache-2.0"
] | 5 | 2021-03-22T23:09:05.000Z | 2022-01-21T09:00:14.000Z | # -*- coding: UTF-8 -*-
"""
Unicaps package
~~~~~~~~~~~~~~~
"""
# pylint: disable=unused-import,import-error
from ._solver import CaptchaSolver
from ._service import CaptchaSolvingService
__all__ = ('CaptchaSolver', 'CaptchaSolvingService')
| 20.25 | 52 | 0.699588 |
8673d9c7840f20408fd7aff2d180eba7ddc30548 | 7,135 | py | Python | ptolemy.py | yunruse/ptolemy | e672dc275bf88975bbc86ec3c28520db567b9692 | [
"MIT"
] | null | null | null | ptolemy.py | yunruse/ptolemy | e672dc275bf88975bbc86ec3c28520db567b9692 | [
"MIT"
] | null | null | null | ptolemy.py | yunruse/ptolemy | e672dc275bf88975bbc86ec3c28520db567b9692 | [
"MIT"
] | null | null | null | '''
Ptolemy: a mapping tile fetch-and-stitch tool.
Tiles are fetches to the tiles/ folder for caching/use,
and the output is stitched together with optional debugging symbols.
'''
import argparse
import csv
import os
from itertools import product
import urllib.request
import sys
import numpy as np
from PIL import Image, ImageDraw
from projections import PROJECTIONS, project
class Tilemap:
def __init__(self, kind, url, size):
self.kind = kind
self.url = url
self.tile_size = int(size)
self.user_agent = None
# TODO: API keys?
def grab_file(self, to_fmt, redownload=False, **fmt):
out = to_fmt.format(**fmt)
if redownload or not os.path.isfile(out):
folder, _ = os.path.split(out)
if not os.path.exists(folder):
os.makedirs(folder)
url = self.url.format(**fmt)
headers = {}
if self.user_agent:
headers['User-Agent'] = self.user_agent
try:
request = urllib.request.Request(url, headers=headers)
with urllib.request.urlopen(request) as fin:
with open(out, 'wb') as fout:
fout.write(fin.read())
except urllib.error.HTTPError as e:
print(url)
print(e)
return 'empty'
return out
def get_tiles(self, bounds, zoom, callback=lambda x: None):
(x0, y0), (x1, y1) = bounds
coords = product(range(x0, x1), range(y0, y1))
tiles = {}
for i, c in enumerate(coords):
tiles[c] = self.grab_file(
STORAGE,
x=c[0], y=c[1], z=zoom, kind=self.kind)
callback(i)
return tiles
with open('styles.txt') as f:
STYLES = {
kind: Tilemap(kind, url, size)
for (kind, url, size)
in csv.reader(f)
}
STORAGE = "tiles/{kind}/{z}/{x}/{y}.jpg"
def paint(args):
'''
Stitch tiles given zoom and bounds.
'''
styles = []
for s in args.styles:
# TODO: custom zoom for each style?
styles.append(STYLES[s])
bounds = np.array([[args.x0, args.y0], [args.x1, args.y1]], dtype=int)
if args.scale < 0:
bounds //= int(2 ** -args.scale)
else:
bounds *= int(2 ** args.scale)
size = (bounds[1]-bounds[0])
N = size[0] * size[1]
zoom = args.zoom + args.scale
S = styles[0].tile_size
print(f'drawing {N} tiles')
print(f'zoom: {zoom}')
print(f'top left: {bounds[0]}')
print(f'size: {size}')
img = Image.new('RGBA', tuple(size * S))
draw = ImageDraw.Draw(img)
for style in styles:
print(f'fetching {style.kind}')
style.user_agent = args.user_agent
tiles = style.get_tiles(
bounds, zoom, lambda i: print(f'{i/N*100:>6.2f}%'))
for c, path in tiles.items():
if path == 'empty':
# TODO: fill with default sea color
continue
tile = Image.open(path).convert('RGBA')
if style.tile_size != S:
tile = tile.resize((S, S))
x, y = S * (c - bounds[0])
img.alpha_composite(tile, (x, y))
if args.indicators:
for c in tiles.keys():
x, y = S * (c - bounds[0])
text = "{}, {}".format(*c)
draw.rectangle(
(x, y, x+6*len(text)+1, y+10),
fill='white')
draw.rectangle(
(x, y, x+style.tile_size, y+style.tile_size),
fill=None, outline='red', width=1)
draw.text((x+2, y), text, fill='red')
if f := PROJECTIONS.get(args.project):
print(f'Projecting to {args.project}...')
img = project(img, f)
img.save(args.out)
epilog = '''\
Coordinates are from 0 to 2 ^ zoom.
If you want to keep the same coordinates but increase the tile resolution,
increas scale; if you increase zoom by 1 you must also double the coordinates
(and so on.)
x and y must be provided. (x1,y1) and (width, height) must be provided together.
If multiple options for size are provided, precedence is established in the order
(x1, y1) > (width, height) > radius.
'''
parser = argparse.ArgumentParser(description=__doc__, epilog=epilog)
OPTINT = dict(type=int, default=None)
parser.add_argument('--zoom', '-z', type=int, default=0, help='zoom factor (doubles per unit)')
parser.add_argument('-x', **OPTINT, help='x of top-left tile')
parser.add_argument('-y', **OPTINT, help='y of top-left tile')
parser.add_argument('--x1', **OPTINT, help='x of bottom-right tile')
parser.add_argument('--y1', **OPTINT, help='y of bottom-right tile')
parser.add_argument('--width', '-W', **OPTINT, help='width of image')
parser.add_argument('--height', '-H', **OPTINT, help='y of bottom-right tile')
parser.add_argument('--radius', '-r', **OPTINT,
help='Treat (x,y) as the centre of a square and use RADIUS'
' as its width.')
parser.add_argument('styles', type=str, nargs='+', choices=STYLES.keys(),
help='Map tile source to use, as defined in styles.txt.',)
parser.add_argument('--indicators', '-i', action='store_true',
help='Draw helpful coordinate indicators.'
'Useful for finding exactly what size you want.')
parser.add_argument('--scale', '-s', metavar='dz', type=int, default=0,
help='Zoom factor to scale in by. ')
parser.add_argument('--user-agent', '-u', type=str, default=None,
help='HTTP user agent. Provide `_` in place of `/`.')
parser.add_argument('--out', '-o', default=None,
help='File to output result to.'
' Defaults to the arguments provided in the current folder,'
' for easy comparison of different options.')
parser.add_argument('--project', '-p', choices=PROJECTIONS.keys(),
help='Project to a certain map projection. Works only if viewing whole Earth.')
def exit(code, message):
print(message, sys.stdout)
exit(code)
if __name__ == '__main__':
args = parser.parse_args()
# x,y parsing
args.x0 = args.x
args.y0 = args.y
if not (args.x and args.y):
args.x0 = args.y0 = 0
args.x1 = args.y1 = 1
elif args.x1 or args.y1:
if not (args.x1 and args.y1):
exit(2, "--x1 and --y1 must both be present")
elif args.width or args.height:
if not (args.width and args.height):
exit(2, "--width and --height must both be present")
args.x1 = args.x + args.width
args.y1 = args.y + args.width
elif args.radius:
args.x0 = args.x - args.radius
args.y0 = args.y - args.radius
args.x1 = args.x + args.radius
args.y1 = args.y + args.radius
if args.out is None:
from sys import argv
args.out = ' '.join(argv[1:]).replace('/', '_') + '.png'
if args.user_agent is not None:
args.user_agent = args.user_agent.replace('_', '/')
paint(args)
| 35.147783 | 99 | 0.568605 |
2d15c6aa239f61970192f728fccbeb1254fc2d7e | 2,395 | py | Python | cloudformation/tests/test_cloudformation.py | semyonmor/aqua-aws | 81a707e62a5cf2172a3e50269f919945ce1b4d7e | [
"Apache-2.0"
] | null | null | null | cloudformation/tests/test_cloudformation.py | semyonmor/aqua-aws | 81a707e62a5cf2172a3e50269f919945ce1b4d7e | [
"Apache-2.0"
] | null | null | null | cloudformation/tests/test_cloudformation.py | semyonmor/aqua-aws | 81a707e62a5cf2172a3e50269f919945ce1b4d7e | [
"Apache-2.0"
] | null | null | null | import argparse
import logging
import os
# import pytest
# Main
from dep_aws import cloudformation
from dep_aws import boto3
logging.root.setLevel(logging.INFO)
logging.basicConfig(format='%(asctime)s %(levelname)s: %(message)s', datefmt='%m/%d/%Y %I:%M:%S %p')
parser = argparse.ArgumentParser(description='Personal information')
parser.add_argument('--aws_access_key_id', dest='aws_access_key_id', type=str, help='aws_access_key_id', default=os.environ.get("AWS_ACCESS_KEY_ID"))
parser.add_argument('--aws_secret_access_key', dest='aws_secret_access_key', type=str, help='aws_secret_access_key', default=os.environ.get("AWS_SECRET_ACCESS_KEY"))
parser.add_argument('--region', dest='region', type=str, help='region', default=os.environ.get("AWS_REGION"))
parser.add_argument('--filename', dest='filename', type=str, help='cloudformation file name')
args = parser.parse_args()
aws_access_key_id = args.aws_access_key_id
aws_secret_access_key = args.aws_secret_access_key
region = args.region
filename = args.filename
cluster_name = "test"
parameters = ["test"]
if aws_access_key_id is None:
# logging.error("Missing aws_access_key_id arg, please add it with --aws_access_key_id or with env as: AWS_ACCESS_KEY_ID")
raise Exception("Missing aws_access_key_id arg, please add it with --aws_access_key_id or with env as: AWS_ACCESS_KEY_ID")
if aws_secret_access_key is None:
# logging.error("Missing aws_secret_access_key arg, please add it with --aws_secret_access_key or with env as: AWS_SECRET_ACCESS_KEY")
raise Exception("Missing aws_secret_access_key arg, please add it with --aws_secret_access_key or with env as: AWS_SECRET_ACCESS_KEY")
if region is None:
# logging.error("Missing region arg, please add it with --region or with env as: REGION")
raise Exception("Missing region arg, please add it with --region or with env as: REGION")
if filename is None:
# logging.error("Missing region arg, please add it with --region or with env as: REGION")
raise Exception("Missing filename arg, please add it with --filename")
logging.info("Succeeded to get cred and region")
session = boto3.Boto3(aws_access_key_id, aws_secret_access_key, region)
ecs_client = session.create_client("ecs")
cf_client = session.create_client("cloudformation")
cf = cloudformation.Cloudformation(cf_client, filename, cluster_name, parameters)
cf.verify_cloudformation_template() | 50.957447 | 165 | 0.781628 |
6441019136a953d1718e547a675878a570e7bdf0 | 4,049 | py | Python | RaftEscortSim/states/State.py | gluver/RAFTEscortSim | 3a37b6e78fabe8503278d648df6078e28d25b1b0 | [
"MIT"
] | null | null | null | RaftEscortSim/states/State.py | gluver/RAFTEscortSim | 3a37b6e78fabe8503278d648df6078e28d25b1b0 | [
"MIT"
] | null | null | null | RaftEscortSim/states/State.py | gluver/RAFTEscortSim | 3a37b6e78fabe8503278d648df6078e28d25b1b0 | [
"MIT"
] | null | null | null | from RaftEscortSim.messages.LogRP import LogRP
from RaftEscortSim.messages.LogRQ import LogRQ
from RaftEscortSim.messages.VoteResponseRP import VoteRequestRP
from RaftEscortSim.messages.BaseMessage import BaseMessage
# from RaftEscortSim.nodes.ServerNode import Node
from RaftEscortSim.messages.VoteRequestRQ import VoteRequestRQ
import time,random
ELECTION_TIMEOUT=5000 #ms
class State():
'''
Class Summary:
Responsibale for behaviour logic of the nodes,while ServerNode class in charge of network config initializtion
'''
def __init__(self,node):
self.node=node
self.election_timeout=random.uniform(ELECTION_TIMEOUT,ELECTION_TIMEOUT*2)/1000
def handle_message(self,msg:BaseMessage):
if msg.type=='BaseMessage':
print(f"Message from {msg.senderId} handled")
if msg.type=='VoteRequestRQ':
self.handle_vote_request(msg)
if msg.type=='VoteRequestRP':
self.handle_vote_response(msg)
if msg.type=="LogRP":
self.handle_log_response(msg)
if msg.type=='LogRQ':
self.handle_log_request(msg)
def handle_vote_request(self,msg:VoteRequestRQ):
my_logterm=self.node.log[-1].term
log_ok=(msg.c_lastterm>my_logterm) or \
(msg.c_lastterm==my_logterm and msg.c_loglen>=len(self.node.log))
term_ok=(msg.c_term>self.node.current_term) or \
(msg.c_term==self.node.current_term and \
(self.node.vote_for==None or self.node.vote_for==msg.c_id))
if log_ok and term_ok:
self.node.current_term=msg.c_term
if self.node.state_str !='Follower':
self.node.change_state('Follower')
self.node.vote_for=msg.c_id
response=VoteRequestRP(self.node.node_id,self.node.current_term,True,msg.c_id)
else:
response=VoteRequestRP(self.node.node_id,self.node.current_term,False,msg.c_id)
self.node.queue.put(response)
def handle_vote_response(self,msg):
''''''
pass
def send_vote_request(self,msg):
''''''
pass
def handle_log_request(self, msg:LogRQ):
# print(f"{msg.f_id} handling {msg.type} from {msg.senderId}, ")
if msg.f_id==self.node.node_id:
if msg.l_term>self.node.current_term:
self.node.current_term=msg.l_term
self.node.vote_for=None
if self.node.state_str !='Follower':
self.node.change_state('Follower')
self.node.current_leader=msg.senderId
if msg.l_term==self.node.current_term and self.node.state_str=='Candidate':
self.node.current_leader=msg.senderId
self.node.change_state('Follower')
logOk=(len(self.node.log)>=msg.log_len) and (msg.log_len==0 or msg.l_term==self.node.log[-1].term)
if msg.l_term==self.node.current_term and logOk:
self.append_entiries(msg.log_len,msg.l_commitlen,msg.entries)
ack=msg.log_len+len(msg.entries)
response=LogRP(self.node.node_id,self.node.current_term,ack,True)
else:
response=LogRP(self.node.node_id,self.node.current_term,0,False)
self.node.queue.put(response)
def handle_log_response(self,msg):
''''''
pass
def call_election(self):
print(f"{self.node.node_id} calling election")
self.node.last_term=self.node.current_term
self.node.current_term+=1
self.node.vote_for=self.node.node_id
self.node.votes_received.append(self.node.node_id)
if len(self.node.log)>0 :
self.node.last_term=self.node.log[-1].term
msg=VoteRequestRQ(self.node.node_id,self.node.current_term,
len(self.node.log),self.node.last_term)
self.node.queue.put(msg)
if self.node.state_str=="Follower":
self.node.change_state('Candidate')
self.node.last_update=time.time()
| 42.177083 | 118 | 0.642381 |
a477cf8962e26bdf54210d85cf1df3fa09ae52aa | 3,594 | py | Python | tests/integration/docusaurus/connecting_to_your_data/database/redshift_python_example.py | UtahDave/great_expectations | 99a54370f7ebeea5d95bca726200db01c7326d68 | [
"Apache-2.0"
] | 2 | 2022-01-28T15:51:32.000Z | 2022-02-02T05:07:58.000Z | tests/integration/docusaurus/connecting_to_your_data/database/redshift_python_example.py | UtahDave/great_expectations | 99a54370f7ebeea5d95bca726200db01c7326d68 | [
"Apache-2.0"
] | null | null | null | tests/integration/docusaurus/connecting_to_your_data/database/redshift_python_example.py | UtahDave/great_expectations | 99a54370f7ebeea5d95bca726200db01c7326d68 | [
"Apache-2.0"
] | 1 | 2021-10-08T01:24:50.000Z | 2021-10-08T01:24:50.000Z | import os
from ruamel import yaml
import great_expectations as ge
from great_expectations.core.batch import BatchRequest, RuntimeBatchRequest
redshift_username = os.environ.get("REDSHIFT_USERNAME")
redshift_password = os.environ.get("REDSHIFT_PASSWORD")
redshift_host = os.environ.get("REDSHIFT_HOST")
redshift_port = os.environ.get("REDSHIFT_PORT")
redshift_database = os.environ.get("REDSHIFT_DATABASE")
redshift_sslmode = os.environ.get("REDSHIFT_SSLMODE")
CONNECTION_STRING = f"postgresql+psycopg2://{redshift_username}:{redshift_password}@{redshift_host}:{redshift_port}/{redshift_database}?sslmode={redshift_sslmode}"
# This utility is not for general use. It is only to support testing.
from util import load_data_into_database
load_data_into_database(
table_name="taxi_data",
csv_path="./data/yellow_trip_data_sample_2019-01.csv",
connection_string=CONNECTION_STRING,
)
context = ge.get_context()
datasource_config = {
"name": "my_redshift_datasource",
"class_name": "Datasource",
"execution_engine": {
"class_name": "SqlAlchemyExecutionEngine",
"connection_string": "postgresql+psycopg2://<USER_NAME>:<PASSWORD>@<HOST>:<PORT>/<DATABASE>?sslmode=<SSLMODE>",
},
"data_connectors": {
"default_runtime_data_connector_name": {
"class_name": "RuntimeDataConnector",
"batch_identifiers": ["default_identifier_name"],
},
"default_inferred_data_connector_name": {
"class_name": "InferredAssetSqlDataConnector",
"name": "whole_table",
},
},
}
# Please note this override is only to provide good UX for docs and tests.
# In normal usage you'd set your path directly in the yaml above.
datasource_config["execution_engine"]["connection_string"] = CONNECTION_STRING
context.test_yaml_config(yaml.dump(datasource_config))
context.add_datasource(**datasource_config)
# First test for RuntimeBatchRequest using a query
batch_request = RuntimeBatchRequest(
datasource_name="my_redshift_datasource",
data_connector_name="default_runtime_data_connector_name",
data_asset_name="default_name", # this can be anything that identifies this data
runtime_parameters={"query": "SELECT * from taxi_data LIMIT 10"},
batch_identifiers={"default_identifier_name": "something_something"},
)
context.create_expectation_suite(
expectation_suite_name="test_suite", overwrite_existing=True
)
validator = context.get_validator(
batch_request=batch_request, expectation_suite_name="test_suite"
)
print(validator.head())
# NOTE: The following code is only for testing and can be ignored by users.
assert isinstance(validator, ge.validator.validator.Validator)
# Second test for BatchRequest naming a table
batch_request = BatchRequest(
datasource_name="my_redshift_datasource",
data_connector_name="default_inferred_data_connector_name",
data_asset_name="taxi_data", # this is the name of the table you want to retrieve
)
context.create_expectation_suite(
expectation_suite_name="test_suite", overwrite_existing=True
)
validator = context.get_validator(
batch_request=batch_request, expectation_suite_name="test_suite"
)
print(validator.head())
# NOTE: The following code is only for testing and can be ignored by users.
assert isinstance(validator, ge.validator.validator.Validator)
assert [ds["name"] for ds in context.list_datasources()] == ["my_redshift_datasource"]
assert "taxi_data" in set(
context.get_available_data_asset_names()["my_redshift_datasource"][
"default_inferred_data_connector_name"
]
)
| 37.051546 | 163 | 0.766834 |
805585238638fd582d6cca0c278a2ec190a6da89 | 1,133 | py | Python | wd-orgs/main.py | tocororo/py-labs | ba16ae26df90f38030e6094c8740edb814150469 | [
"MIT"
] | null | null | null | wd-orgs/main.py | tocororo/py-labs | ba16ae26df90f38030e6094c8740edb814150469 | [
"MIT"
] | null | null | null | wd-orgs/main.py | tocororo/py-labs | ba16ae26df90f38030e6094c8740edb814150469 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
from DataCollect import collect, getDataInstance
from Database.instanceOfDao import InstanceOfDao
import sys
value = ''
# Q43229
while value != '6':
value = input(str("""
1 - Crear tablas instanceOf y subClass
2 - Obtener instancias y subclases de wikidata.
3 - Actualizar informacion sobre instancias.
4 - Crear copia de instancias y actualizar informacion en ella.
5 - Eliminar tablas
6 - Generar JSON instanceOf
7 - Salir.
Elija una opcion del 1-7: """))
if value == '1':
InstanceOfDao.createTableInstance()
InstanceOfDao.createTableSubclass()
elif value == '2':
_class = input(str('Provea un id de clase de wikidata: '))
collect(_class)
elif value == '3':
getDataInstance('original')
elif value == '4':
InstanceOfDao.createInstanceCopy()
getDataInstance('copy')
elif value == '5':
InstanceOfDao.dropTables()
InstanceOfDao.dropFunctions()
elif value == '6':
InstanceOfDao.generateJSON()
elif value == '7':
sys.exit()
if __name__ == '__main__':
... | 29.051282 | 67 | 0.638129 |
a7ae2fc6dcd8e8e4dc9579e5bbc57a146b5b522e | 800 | py | Python | game_data.py | KomalKalyan/connect4 | 08e9abc550b4ed8a73798bac43209ca338bcf3f2 | [
"MIT"
] | 16 | 2019-11-13T15:08:47.000Z | 2021-04-16T11:40:07.000Z | game_data.py | KomalKalyan/connect4 | 08e9abc550b4ed8a73798bac43209ca338bcf3f2 | [
"MIT"
] | 38 | 2019-10-06T05:36:15.000Z | 2021-06-01T22:03:42.000Z | game_data.py | KomalKalyan/connect4 | 08e9abc550b4ed8a73798bac43209ca338bcf3f2 | [
"MIT"
] | 35 | 2019-10-06T00:39:59.000Z | 2021-03-27T04:46:47.000Z | from typing import Tuple
from game_board import GameBoard
class GameData:
"""
The game data class contains all of the data for the game.
"""
radius: int
height: int
width: int
sq_size: int
size: Tuple[int, int]
game_over: bool
turn: int
last_move_row: [int]
last_move_col: [int]
game_board: GameBoard
def __init__(self):
self.game_over = False
self.turn = 0
self.last_move_row = []
self.last_move_col = []
self.game_board = GameBoard()
self.action = None
self.sq_size: int = 100
self.width: int = 7 * self.sq_size
self.height: int = 7 * self.sq_size
self.size: Tuple[int, int] = (self.width, self.height)
self.radius: int = int(self.sq_size / 2 - 5)
| 22.857143 | 62 | 0.59625 |
4d5f71a9b1a4d8199d162fe51e36c29d021ed1fe | 18,690 | py | Python | test/test.py | thsuanwu/aegea | 6ccc386f7615d6a380c6f827b1a20676cd4e6b33 | [
"Apache-2.0"
] | null | null | null | test/test.py | thsuanwu/aegea | 6ccc386f7615d6a380c6f827b1a20676cd4e6b33 | [
"Apache-2.0"
] | null | null | null | test/test.py | thsuanwu/aegea | 6ccc386f7615d6a380c6f827b1a20676cd4e6b33 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
# coding: utf-8
from __future__ import absolute_import, division, print_function, unicode_literals
import os, sys, unittest, argparse, collections, copy, re, subprocess, importlib, pkgutil, json, datetime, glob, time
pkg_root = os.path.abspath(os.path.join(os.path.dirname(__file__), '..')) # noqa
sys.path.insert(0, pkg_root) # noqa
import aegea, aegea.util.aws.dns
from aegea.util import Timestamp
from aegea.util.cloudinit import get_user_data
from aegea.util.aws import (resolve_ami, locate_ami, get_ondemand_price_usd, ARN,
get_public_ip_ranges, ensure_s3_bucket, encode_tags, decode_tags, filter_by_tags,
clients, resources, get_bdm, get_iam_role_for_instance, make_waiter)
from aegea.util.aws.iam import IAMPolicyBuilder
from aegea.util.aws.batch import ensure_job_definition
from aegea.util.aws.spot import SpotFleetBuilder
from aegea.util.compat import USING_PYTHON2, str
from aegea.util.exceptions import AegeaException
from aegea.util.git import private_submodules
for importer, modname, is_pkg in pkgutil.iter_modules(aegea.__path__):
importlib.import_module((aegea.__package__ or "aegea") + "." + modname)
class TestAegea(unittest.TestCase):
SubprocessResult = collections.namedtuple("SubprocessResult", "stdout stderr returncode")
def setUp(self):
pass
def call(self, cmd, **kwargs):
print('Running "{}"'.format(cmd), file=sys.stderr)
expect = kwargs.pop("expect", [dict(return_codes=[os.EX_OK], stdout=None, stderr=None)])
process = subprocess.Popen(cmd, stdin=kwargs.get("stdin", subprocess.PIPE), stdout=subprocess.PIPE,
stderr=subprocess.PIPE, **kwargs)
out, err = process.communicate()
return_code = process.poll()
out = out.decode(sys.stdin.encoding)
err = err.decode(sys.stdin.encoding)
def match(return_code, out, err, expected):
exit_ok = return_code in expected["return_codes"]
stdout_ok = re.search(expected.get("stdout") or "", out)
stderr_ok = re.search(expected.get("stderr") or "", err)
return exit_ok and stdout_ok and stderr_ok
if not any(match(return_code, out, err, exp) for exp in expect):
print(err)
e = subprocess.CalledProcessError(return_code, cmd, output=out)
e.stdout, e.stderr = out, err
raise e
return self.SubprocessResult(out, err, return_code)
@unittest.skipIf("GITHUB_ACTIONS" in os.environ, "suffers from rate limits in CI")
def test_pricing_commands(self):
self.call(["aegea", "pricing", "AmazonEC2"])
self.call(["aegea", "pricing", "AmazonRDS"])
def test_basic_aegea_commands(self):
self.call(["aegea"], expect=[dict(return_codes=[1])])
self.call(["aegea", "--help"])
self.call(["aegea", "--version"])
self.call(["aegea", "pricing"])
self.call(["aegea", "ls", "-w9"])
for ssh_cmd in "ssh", "scp":
self.call(["aegea", ssh_cmd, "nonexistent_instance:"],
expect=[dict(return_codes=[1, os.EX_SOFTWARE], stderr="AegeaException: Could not resolve")])
instance_id = json.loads(self.call(["aegea", "ls", "--json"]).stdout)[0]["id"]
for subcommand in aegea.parser._actions[-1].choices:
expect = [dict(return_codes=[os.EX_OK]),
dict(return_codes=[1, os.EX_SOFTWARE],
stderr="(UnauthorizedOperation|AccessDenied|DryRunOperation)")]
args = []
if subcommand in ("ssh", "scp", "run", "put-alarm", "batch", "rm"):
args += ["--help"]
elif subcommand == "top" and sys.version_info < (3, 5):
continue # concurrent.futures.ThreadPoolExecutor thread count autotune introduced in 3.5
elif "_" in subcommand:
continue
elif subcommand == "build-docker-image":
args += ["--dry-run", "docker-example"]
elif subcommand == "console":
args += [instance_id]
elif subcommand == "iam":
args += ["users"]
elif subcommand in ("start", "stop", "reboot", "terminate", "rename"):
args += [instance_id, instance_id, "--dry-run"]
elif subcommand in ("grep", "filter"):
args += ["--help"] if USING_PYTHON2 else ["error", "syslog", "--start-time=-2h", "--end-time=-5m"]
expect.append(dict(return_codes=[os.EX_DATAERR]))
elif subcommand == "launch":
args += ["--no-verify-ssh-key-pem-file", "--dry-run", "test", "--ubuntu-linux-ami"]
elif subcommand == "build-ami":
args += ["--no-verify-ssh-key-pem-file", "--dry-run", "test"]
elif subcommand == "s3":
args += ["buckets"]
elif subcommand in ("secrets", "rds", "elb", "flow-logs", "deploy", "zones", "ebs", "efs",
"ecr", "lambda", "configure", "sfn"):
args += ["ls"]
elif subcommand == "pricing":
args += ["AmazonS3", "--json"]
elif subcommand == "billing":
continue # FIXME
args += ["ls", "--min-cost", "0.1"]
if "AWS_BILLING_REPORTS_BUCKET" in os.environ:
args += ["--billing-reports-bucket", os.environ["AWS_BILLING_REPORTS_BUCKET"]]
elif subcommand == "ls":
args += ["--filter", "state=running"]
elif subcommand == "tag":
args += [instance_id, "test=test test2=test"]
elif subcommand == "untag":
args += [instance_id, "test test2"]
elif subcommand == "ecs":
args += ["clusters"]
self.call(["aegea", subcommand] + args, expect=expect)
def test_dry_run_commands(self):
unauthorized_ok = [dict(return_codes=[os.EX_OK]),
dict(return_codes=[1, os.EX_SOFTWARE], stderr="UnauthorizedOperation")]
self.call("aegea launch unittest --dry-run --storage /x=512 /y=1024 --no-verify --ubuntu-linux-ami",
shell=True, expect=unauthorized_ok)
self.call("aegea launch unittest --dry-run --no-verify-ssh-key-pem-file --ubuntu-linux-ami",
shell=True, expect=unauthorized_ok)
self.call("aegea launch unittest --dry-run --spot --no-verify-ssh-key-pem-file --amazon-linux-ami",
shell=True, expect=unauthorized_ok)
self.call("aegea launch unittest --dry-run --duration-hours 1 --no-verify-ssh-key-pem-file --amazon-linux-ami",
shell=True, expect=unauthorized_ok)
self.call(("aegea launch unittest --duration 0.5 --min-mem 6 --cores 2 --dry-run --no-verify --client-token t "
"--amazon-linux-ami"),
shell=True, expect=unauthorized_ok)
self.call("aegea build-ami i --dry-run --no-verify-ssh-key-pem-file",
shell=True, expect=unauthorized_ok)
self.call("aegea batch submit --command pwd --dry-run", shell=True)
self.call("echo pwd > run.sh && aegea batch submit --execute run.sh --dry-run", shell=True)
self.call("aegea batch submit --wdl '{}' --dry-run".format(__file__.replace(".py", ".wdl")), shell=True)
self.call("aegea ecs run --command pwd --dry-run", shell=True)
@unittest.skipIf(sys.version_info < (3, 8), "Skipping test which is prone to rate limiting")
def test_spot_fleet_builder(self):
builder = SpotFleetBuilder(launch_spec={})
self.assertEqual(set(spec["InstanceType"] for spec in builder.launch_specs()),
{"c3.large", "c4.large", "m3.large", "m4.large", "m3.medium"})
self.assertEqual(set(spec["InstanceType"] for spec in builder.launch_specs(max_overprovision=4)),
{"c3.large", "c4.large", "m3.large", "m4.large", "m3.medium", "m4.xlarge", "c3.xlarge",
"c4.xlarge", "m3.xlarge"})
with self.assertRaises(AegeaException):
builder = SpotFleetBuilder(launch_spec={}, min_cores_per_instance=16)
builder = SpotFleetBuilder(launch_spec={}, cores=16, min_cores_per_instance=16)
self.assertEqual(set(spec["InstanceType"] for spec in builder.launch_specs()),
{'c3.4xlarge', 'c4.8xlarge', 'c4.4xlarge', 'm4.10xlarge', 'c3.8xlarge', 'm4.4xlarge'})
builder = SpotFleetBuilder(launch_spec={}, cores=16, min_cores_per_instance=16, min_mem_per_core_gb=6)
self.assertEqual(set(spec["InstanceType"] for spec in builder.launch_specs()),
{'r3.4xlarge', 'r3.8xlarge', 'd2.4xlarge', 'i2.8xlarge', 'd2.8xlarge', 'i2.4xlarge',
'i3.4xlarge', 'r4.4xlarge', 'i3.8xlarge', 'r4.8xlarge'})
builder = SpotFleetBuilder(launch_spec={}, cores=32, min_cores_per_instance=32, min_mem_per_core_gb=6)
self.assertEqual(set(spec["InstanceType"] for spec in builder.launch_specs()),
{'r3.8xlarge', 'i2.8xlarge', 'd2.8xlarge', 'i3.8xlarge', 'r4.16xlarge', 'i3.16xlarge',
'r4.8xlarge'})
# TODO: This will need updating when X1s come out
builder = SpotFleetBuilder(launch_spec={}, cores=32, min_cores_per_instance=16, min_mem_per_core_gb=8)
self.assertFalse(set(spec["InstanceType"] for spec in builder.launch_specs()))
builder = SpotFleetBuilder(launch_spec={}, cores=4, gpus_per_instance=1)
self.assertEqual(set(spec["InstanceType"] for spec in builder.launch_specs()), {"g2.2xlarge"})
builder = SpotFleetBuilder(launch_spec={}, cores=16, gpus_per_instance=4, client_token="t")
self.assertEqual(set(spec["InstanceType"] for spec in builder.launch_specs()), {"g2.8xlarge"})
builder = SpotFleetBuilder(launch_spec={}, min_ephemeral_storage_gb=1)
self.assertEqual(set(spec["InstanceType"] for spec in builder.launch_specs()),
{'m3.large', 'c3.large', 'm3.medium'})
def test_iam_policy_builder(self):
policy = IAMPolicyBuilder(principal="arn:aws:iam::account-id:user/foo", action="s3:GetObject")
policy.add_action("s3:PutObject")
policy.add_resource("arn:aws:s3:::examplebucket")
policy.add_statement(effect="Deny")
expected = {"Version": "2012-10-17",
"Statement": [{"Action": ["s3:GetObject", "s3:PutObject"],
"Resource": ["arn:aws:s3:::examplebucket"],
"Effect": "Allow",
"Principal": {"AWS": "arn:aws:iam::account-id:user/foo"}},
{"Action": [], "Effect": "Deny"}]}
self.assertEqual(json.loads(str(policy)), expected)
policy = IAMPolicyBuilder(expected)
self.assertEqual(json.loads(str(policy)), expected)
self.assertEqual(len(policy.policy["Statement"]), 2)
policy.add_statement(principal="arn:aws:iam::account-id:user/foo",
action=["s3:GetObject", "s3:PutObject"],
resource=["arn:aws:s3:::examplebucket"])
self.assertEqual(len(policy.policy["Statement"]), 2)
policy.add_statement(principal="arn:aws:iam::account-id:user/foo",
action="s3:GetObject",
resource="arn:aws:s3:::examplebucket")
self.assertEqual(len(policy.policy["Statement"]), 2)
policy.add_statement(principal="arn:aws:iam::account-id:user/foo",
action=["s3:GetObject", "s3:PutObject", "s3:DeleteObject"],
resource=["arn:aws:s3:::examplebucket"])
self.assertEqual(len(policy.policy["Statement"]), 3)
policy.add_statement(principal="arn:aws:iam::account-id:user/foo",
action=["s3:GetObject", "s3:PutObject", "s3:DeleteObject"],
resource=["arn:aws:s3:::examplebucket"])
self.assertEqual(len(policy.policy["Statement"]), 3)
policy.add_statement(principal="arn:aws:iam::account-id:user/foo",
action=["s3:GetObject", "s3:PutObject", "s3:DeleteObject"],
resource=["arn:aws:s3:::examplebucket2"])
self.assertEqual(len(policy.policy["Statement"]), 4)
policy.add_statement(principal="arn:aws:iam::account-id:user/bar",
action=["s3:GetObject", "s3:PutObject", "s3:DeleteObject"],
resource=["arn:aws:s3:::examplebucket2"])
self.assertEqual(len(policy.policy["Statement"]), 5)
policy.add_statement(effect="Deny")
self.assertEqual(len(policy.policy["Statement"]), 6)
policy.add_statement()
self.assertEqual(len(policy.policy["Statement"]), 7)
def test_aws_utils(self):
if not USING_PYTHON2:
self.assertTrue(isinstance(get_ondemand_price_usd("us-east-1", "t2.micro"), str))
self.assertEquals(str(ARN(region="", account_id="")), "arn:aws::::")
self.assertTrue(str(ARN()).startswith("arn:aws:"))
self.assertEquals(str(ARN("arn:aws:foo:bar:xyz:zzt")), "arn:aws:foo:bar:xyz:zzt")
self.assertEquals(str(ARN("arn:aws:a:b:c:d", service="x", region="us-west-1", account_id="1", resource="2")),
"arn:aws:x:us-west-1:1:2")
get_user_data(commands=["ls"], packages=["foo"], files=["bar"])
# Test serialization of tweak.Config objects
from tweak import Config
d = dict(x={}, y=[1, 2])
c = Config(save_on_exit=False, _parent=self, _data=d)
aegea.util.aws.dns.config = c
c.dns = {}
c.dns.private_zone = "aegea.test"
with self.assertRaises(AegeaException):
aegea.util.aws.dns.DNSZone(create_default_private_zone=False)
with self.assertRaises(AegeaException):
aegea.util.aws.dns.DNSZone(zone_name="foobar", create_default_private_zone=False)
self.assertEquals(get_user_data(foo=c, bar=2), get_user_data(bar=2, foo=c))
# ensure_s3_bucket()
self.assertEqual(encode_tags(["foo=bar"]), [{'Key': 'foo', 'Value': 'bar'}])
self.assertEqual(encode_tags(dict(foo="bar")), [{'Key': 'foo', 'Value': 'bar'}])
self.assertEqual(decode_tags([dict(Key="foo", Value="bar")]), {'foo': 'bar'})
filter_by_tags(resources.ec2.instances, Name="")
self.assertEqual(get_bdm(),
[dict(VirtualName="ephemeral" + str(i), DeviceName="xvd" + chr(ord("b") + i))
for i in range(12)])
# for instance in resources.ec2.instances.all():
# get_iam_role_for_instance(instance.id)
# break
make_waiter(clients.efs.describe_file_systems, "FileSystems[].LifeCycleState", "available", "pathAny")
def test_locate_ami(self):
self.assertTrue(locate_ami("com.ubuntu.cloud:server:16.04:amd64", "us-east-1").startswith("ami-"))
ami = locate_ami(product="com.ubuntu.cloud:server:16.04:amd64", channel="releases", stream="released",
region="us-west-2")
self.assertTrue(ami.startswith("ami-"))
self.assertTrue(locate_ami("Amazon Linux AMI 2016.09").startswith("ami-"))
def test_ip_ranges(self):
get_public_ip_ranges()
get_public_ip_ranges(region="us-east-1", service="ROUTE53_HEALTHCHECKS")
def test_date_utils(self):
with self.assertRaises(TypeError):
Timestamp()
self.assertEqual(str(Timestamp(12345)), "1970-01-01 00:00:12+00:00")
self.assertEqual(str(Timestamp(1466533609099)), "2016-06-21 18:26:49+00:00")
for valid_input in "5s", "-5s", "5m", "-5m", "5h", "-5h", "5d", "-5d", "5w", "-5w", "2016-06-21 18:26:49":
self.assertTrue(isinstance(Timestamp(valid_input), datetime.datetime))
for invalid_input in None, "", {}, []:
with self.assertRaises(Exception):
print(Timestamp(invalid_input))
@unittest.skipIf(USING_PYTHON2, "requires Python 3 dependencies")
def test_deploy_utils(self):
deploy_utils_bindir = os.path.join(pkg_root, "aegea", "rootfs.skel", "usr", "bin")
for script in glob.glob(deploy_utils_bindir + "/aegea*"):
self.call([script, "--help"], expect=[dict(return_codes=[0, 1])])
for script in "aegea-deploy-pilot", "aegea-git-ssh-helper":
self.call(os.path.join(deploy_utils_bindir, script),
expect=[dict(return_codes=[2], stderr="(required|too few)")])
@unittest.skipIf(sys.version_info[:2] != (3, 9), "Skipping test which is prone to race conditions")
def test_secrets(self):
unauthorized_ok = [dict(return_codes=[os.EX_OK]),
dict(return_codes=[1, os.EX_SOFTWARE], stderr="(AccessDenied|NoSuchKey)")]
secret_name = "test_secret_{}".format(int(time.time()))
self.call("{s}=test aegea secrets put {s} --iam-role aegea.launch".format(s=secret_name),
shell=True, expect=unauthorized_ok)
self.call("aegea secrets put {s} --generate-ssh-key --iam-role aegea.launch".format(s=secret_name),
shell=True, expect=unauthorized_ok)
self.call("aegea secrets ls", shell=True, expect=unauthorized_ok)
self.call("aegea secrets ls --json", shell=True, expect=unauthorized_ok)
self.call("aegea secrets get {s} --iam-role aegea.launch".format(s=secret_name), shell=True,
expect=unauthorized_ok)
self.call("aegea secrets delete {s} --iam-role aegea.launch".format(s=secret_name), shell=True,
expect=unauthorized_ok)
@unittest.skipIf("GITHUB_ACTIONS" in os.environ, "does not correctly run in CI")
def test_ensure_job_definition(self):
from aegea.batch import submit_parser
args = submit_parser.parse_args(["--command", ""])
args.default_job_role_iam_policies = []
args.user, args.job_role, args.default_job_role_iam_policies = "0", "aegea.batch.worker", []
jd1 = ensure_job_definition(args)
jd2 = ensure_job_definition(args)
self.assertEqual(jd1["jobDefinitionArn"], jd2["jobDefinitionArn"])
@unittest.skipUnless("GH_AUTH" in os.environ, "requires GitHub credentials")
def test_git_utils(self):
for submodule in private_submodules("git@github.com:ansible/ansible.git"):
print(submodule)
if __name__ == '__main__':
unittest.main()
| 58.773585 | 119 | 0.604815 |
0603055312d7e5060a3097e0891a49d96fb12656 | 5,038 | py | Python | content/solving-traveling-salesmans-problem-with-genetic-algorithm/example.py | root-11/root-11.github.io | 8099f32abb46a21fce51dac326be78ea1e613d79 | [
"MIT"
] | 2 | 2021-12-20T11:03:04.000Z | 2022-02-23T23:53:36.000Z | content/solving-traveling-salesmans-problem-with-genetic-algorithm/example.py | root-11/root-11.github.io | 8099f32abb46a21fce51dac326be78ea1e613d79 | [
"MIT"
] | null | null | null | content/solving-traveling-salesmans-problem-with-genetic-algorithm/example.py | root-11/root-11.github.io | 8099f32abb46a21fce51dac326be78ea1e613d79 | [
"MIT"
] | null | null | null | # Understanding the principles of genes and mutation as the driving mechanism
# for evolution is common today. Less common is the availability of a minimal
# viable example, that showcases the method.
# So here's an example I've used to enlighten friends, where I deliberately
# deviate from pep-8 to only introduce imports when they're needed.
#
#
# To solve the Traveling Salesmans Problem (TSP), we need cities to travel to,
# and to keep the world simple, we have only `x` and `y` to worry about, and
# use a straight line distance:
from collections import namedtuple
City = namedtuple("City", ["x", "y", "id"])
def distance(a, b):
return ((a.x - b.x) ** 2 + (a.y - b.y) ** 2) ** (1 / 2)
# With this out of the way, the next element is a map:
#
# 1. We want to keep the cities in a dictionary, for easy of lookup.
# 2. We want the cities to be on the map, so we set a max `x` and a max `y` value.
# 2. We want to be able to generate the cities somewhat random.
import random
random.seed(43) # set the seed for repeatability.
def make_map(n_cities, x_max=1200, y_max=600):
cities = {}
for city in range(n_cities):
x, y = random.randint(0, x_max), random.randint(0, y_max)
c = City(x, y, id=f"{x},{y}")
cities[c.id] = c
return cities
# We can now make with any number of cities in a single line:
city_map = make_map(n_cities=5)
# Making a random route visiting all cities, should also be easy, as we can choose
# any random sequence that contains all towns:
def make_random_route(city_map):
""" creates a random route. """
cities = list(city_map)
random.shuffle(cities)
return cities
# To determine which of two routes is the shorter, it is nice to have a function
# that does the work for us. Just remember one thing:
# The TSP returns to start after traveling through all cities, so the distance
# must include the consideration that it returns to start after all cities have
# been visited.
def route_length(citymap, route):
dist = 0.0
a = route[0]
for b in route[1:] + route[:1]:
city_a, city_b = citymap[a], citymap[b]
dist += distance(city_a, city_b)
a = b
return int(dist)
# Let's make a helper to look at it:
from matplotlib import pyplot as plt
from itertools import count
map_no = count()
def plot(citymap, route):
plt.figure()
xs = [c.x for c in citymap.values()]
ys = [c.y for c in citymap.values()]
plt.plot(xs, ys, 'o')
a = route[0]
for b in route[1:] + route[:1]:
city_a, city_b = citymap[a], citymap[b]
plt.plot([city_a.x, city_b.x], [city_a.y, city_b.y], 'bo-', clip_on=False)
a = b
plt.title("({}) length: {:,}".format(next(map_no), route_length(citymap, route)))
plt.show()
# Then plot it:
first_route = make_random_route(city_map)
plot(city_map, first_route)
# 
# -----------
# From this point we can **_mutate_** our `first_route` simply by changing the order
# in which we visit the different cities. It would work this way:
#
# If we have 9 cities as a list like this:
#
# [1,2,3,4,5,6,7,8,9]
#
# We can select a random index point in the list and swap the numbers in the position
# before and after the index point
#
# [1,2,3,4,5,6,7,8,9]
# ^--- here.
#
# The position with the one after:
#
# before \[1,2,3,4,_**5,6**_,7,8,9]
# after \[1,2,3,4,_**6,5**_,7,8,9]
#
# We can express this change as a very simple function:
def mutate(route):
new_route = route[:] # copy the route
cut = random.randint(1, len(route)-2) # select the index point.
new_route[cut], new_route[cut+1] = route[cut+1], route[cut] # swap the values.
return new_route
# The one thing that remains to be discussed is the relationship between fitness
# and evolution. We want the "fittest" to be the shortest path.
# Lets' try it out and check if the new route is better. Spoiler alert: I won't be.
new_route = mutate(first_route)
plot(city_map, new_route)
# 
before = route_length(city_map, first_route)
after = route_length(city_map, new_route)
print("distance before:", before, ", distance after:", after)
# >>> distance before: 2701, distance after: 3480
# As you observe, the mutation is not better than the first randomly created
# route. This leads us to acknowledge that randomised evolution is quite wasteful,
# despite that it can find a good solution. To overcome this we require retention
# of the _fittest_ solution:
generations = 30 # number of generations to explore
shortest_distance = float('inf')
for _ in range(generations):
new_route = mutate(first_route) # make mutation
dist = route_length(city_map, new_route) # measure fitness.
if dist < shortest_distance: # retain fittest solution.
first_route = new_route
shortest_distance = dist
print("shortest distance after", generations, ":", shortest_distance)
plot(city_map, first_route)
# 
| 28.788571 | 85 | 0.672092 |
5cb4b4cf24196c14bac7e05e971d2f816ce518b1 | 12,816 | py | Python | transition_sampling/engines/gromacs/gromacs_engine.py | UWPRG/transition-sampling | 6602bd87a66b70fb74711685f0ecfcb2d7bcbfe9 | [
"MIT"
] | 2 | 2020-12-04T01:54:44.000Z | 2021-03-09T18:44:15.000Z | transition_sampling/engines/gromacs/gromacs_engine.py | UWPRG/transition-sampling | 6602bd87a66b70fb74711685f0ecfcb2d7bcbfe9 | [
"MIT"
] | 10 | 2020-11-26T18:32:43.000Z | 2021-03-14T00:09:24.000Z | transition_sampling/engines/gromacs/gromacs_engine.py | UWPRG/transition-sampling | 6602bd87a66b70fb74711685f0ecfcb2d7bcbfe9 | [
"MIT"
] | null | null | null | """
Engine implementation of GROMACS
"""
from __future__ import annotations
import asyncio
import os
import subprocess
from typing import Sequence
import numpy as np
import parmed
from parmed.gromacs import GromacsGroFile
from mdtraj.formats import TRRTrajectoryFile
from .mdp import MDPHandler
from .. import AbstractEngine
from ..plumed import PlumedOutputHandler
class GromacsEngine(AbstractEngine):
"""
Engine implementation of Gromacs.
Relevant docstrings for overridden methods can be found in the base class.
Parameters
----------
inputs
In addition to the inputs required by AbstractEngine, GromacsEngine also
requires
- mdp_file : str
The path to the .mdp file to use as a template for the simulations.
This file will not be modified
- gro_file : str
The path to a .gro file of the system. The positions and velocities
are present are taken as initial values and can be changed, but the
atoms and their indices are fixed by this file. This file will not
be modified
- top_file : str
The path to the .top topology file of the system. This file will not
be modified.
- grompp_cmd : str
Command to call grompp and compile the simulation parameters.
Additional leading arguments such as mpirun can be included, but any
arguments following grompp should be excluded.
Example: "gmx grompp"
- should_pin : bool
If true, each instance of an mdrun have its threads pinned to an
set of cores, minimizing overlap with threads from other instances.
This includes within this engine (forwards and reverse) and between
other engines that may be running. If this option is used, the
number of threads for each mdrun should still be set manually in
`md_cmd` with `-nt <# threads>`. If false, no pinning will be done,
and resource isolation is the responsibility of `md_cmd.`
Example: If two Gromacs Engines are run in parallel, there are 4
parallel mdruns occurring at once. Setting should_pin=True would assign
the threads of <engine_0_fwd> cores (0, 4, 8..),
<engine_0_rev> cores (1, 5, 9..), <engine_1_fwd> (2, 6, 10) and
<engine_1_rev> cores (3, 7, 11..)
Attributes
----------
grompp_cmd : list[str]
Similar to `md_cmd`, the command to be used to compile input files to a
.tpr, e.g. "gmx grompp"
mdp : MDPHandler
Stores the original passed MDP file and provides methods to modify and
write it
gro_struct : parmed.Structure
Stores atoms, positions and velocities given by the template GRO file.
These positions and velocities can then be modified via assignment, and
a new GRO file written.
topology : str
Raw string of the template topology file. This does not need to be
modified, so it's just written to new locations as needed
"""
def __init__(self, inputs: dict, working_dir: str = None):
super().__init__(inputs, working_dir)
self.grompp_cmd = inputs["grompp_cmd"].split()
self.mdp = MDPHandler(inputs["mdp_file"])
self.gro_struct = GromacsGroFile.parse(inputs["gro_file"], skip_bonds=True)
# This is a hacky way of getting around parmed's Structure. Structure
# implements a correct deep copy in __copy__, but does not implement
# __deepcopy__, and the default behavior is incorrect. Since
# GromacsEngine gets deep copied, we need the correct version to be called.
# See https://github.com/ParmEd/ParmEd/issues/1205 for if this can be
# safely removed
self.gro_struct.__deepcopy__ = lambda memo_dict: self.gro_struct.__copy__()
with open(inputs["top_file"], "r") as file:
self.topology = file.read()
self.set_delta_t(inputs["delta_t"])
self.should_pin = inputs["should_pin"]
@property
def atoms(self) -> Sequence[str]:
return [atom.element_name for atom in self.gro_struct.atoms]
@property
def box_size(self) -> tuple[float]:
# Parmed uses A. First 3 are box lengths, 2nd 3 are angles (90, 90, 90)
return tuple(self.gro_struct.box[:3])
def set_positions(self, positions: np.ndarray) -> None:
# Check positions are valid by passing to base class
super().set_positions(positions)
self.gro_struct.coordinates = positions
def set_velocities(self, velocities: np.ndarray) -> None:
# Check velocities are valid by passing to base class
super().set_velocities(velocities)
# convert from m/s to gromacs km/s (nm/ps)
velocities /= 1000
self.gro_struct.velocities = velocities
def validate_inputs(self, inputs: dict) -> (bool, str):
if "mdp_file" not in inputs:
return False, "mdp_file required for gromacs"
if "gro_file" not in inputs:
return False, "gro_file required for gromacs"
if "top_file" not in inputs:
return False, "top_file required for gromacs"
if "grompp_cmd" not in inputs:
return False, "grompp_cmd required for gromacs"
if "should_pin" not in inputs:
return False, "should_pin required for gromacs"
# Otherwise let the base class validate
return super().validate_inputs(inputs)
def set_delta_t(self, value: float) -> None:
# Make gromacs print trajectory after every delta_t amount of time
# rounded to the nearest frame. We can then retrieve multiples of
# delta_t by looking at printed frames
frames_in_dt = int(np.round(value / self.mdp.timestep))
self.logger.info("dt of %s fs set, corresponding to %s md frames",
value, frames_in_dt)
self.mdp.set_traj_print_freq(frames_in_dt)
def get_engine_str(self) -> str:
return "gromacs"
def flip_velocity(self) -> None:
self.gro_struct.velocities *= -1
async def _launch_traj_fwd(self, projname: str):
# forward gets assigned an offset of instance * 2
self.pin_offset = self.instance * 2
return await super()._launch_traj_fwd(projname)
async def _launch_traj_rev(self, projname: str):
# reverse gets assigned an offset of (instance * 2) + 1
self.pin_offset = self.instance * 2 + 1
return await super()._launch_traj_rev(projname)
async def _run_grompp(self, projname: str) -> str:
# Writing files for grompp
gro_path = os.path.join(self.working_dir, f"{projname}.gro")
top_path = os.path.join(self.working_dir, f"{projname}.top")
mdp_path = os.path.join(self.working_dir, f"{projname}.mdp")
tpr_path = os.path.join(self.working_dir, f"{projname}.tpr")
GromacsGroFile.write(self.gro_struct, gro_path)
with open(top_path, "w") as file:
file.write(self.topology)
self.mdp.write_mdp(mdp_path)
command_list = [*self.grompp_cmd, "-f", mdp_path, "-c",
gro_path, "-p", top_path, "-o", tpr_path]
self.logger.debug("grompp-ing trajectory %s with command %s", projname,
command_list)
grompp_proc = subprocess.Popen(command_list, cwd=self.working_dir,
stderr=subprocess.PIPE,
stdout=subprocess.PIPE)
# Wait for it to finish
while grompp_proc.poll() is None:
# Non-blocking sleep
await asyncio.sleep(0.1)
if grompp_proc.returncode != 0:
stdout, stderr = grompp_proc.communicate()
stdout_msg = stdout.decode('ascii')
stderror_msg = stderr.decode('ascii')
self.logger.error("Trajectory %s exited fatally when grompp-ing:\n"
"stdout: %s\n stderr: %s", projname, stdout_msg,
stderror_msg)
raise RuntimeError(f"grompp of {projname} failed")
return tpr_path
async def _launch_traj(self, projname: str) -> dict:
"""Launch a trajectory with the current state to completion.
Launch a trajectory using the current state with the given command in
a new process. Runs in the given working directory. Waits for its
completion with async, then checks for failures or warnings.
Parameters
----------
projname
The unique project name. No other project should have this name
Returns
-------
A dictionary with the keys:
"commit": basin integer the trajectory committed to or None if it
did not commit
"frames": np.array with the +delta_t and +2delta_t xyz frames. Has
the shape (2, n_atoms, 3)
Raises
------
RuntimeError
If CP2K fails to run.
"""
# We are saving the state of the class before calling a method with
# async.sleep in a local variable so it is not changed out from underneath
# us. Any call to async.sleep gives an opportunity for another async method
# to modify this class. All other variables are safe, but the pin_offset
# is in contention between the forwards and reverse, so we save it here.
pin_offset = str(self.pin_offset)
tpr_path = await self._run_grompp(projname)
# Set the name for the committor output and write the unique plumed file
plumed_out_name = f"{projname}_plumed.out"
plumed_in_path = os.path.join(self.working_dir,
f"{projname}_plumed.dat")
self.plumed_handler.write_plumed(plumed_in_path, plumed_out_name)
command_list = ["-s", tpr_path, "-plumed", plumed_in_path, "-deffnm", projname]
if self.should_pin:
# total_instances * 2 because each has a forward and reverse mdrun
command_list.extend(["-pinoffset", pin_offset, "-pinstride",
str(self.total_instances * 2), "-pin", "on"])
# run
proc = await self._open_md_and_wait(command_list, projname)
plumed_out_path = os.path.join(self.working_dir, plumed_out_name)
# Check if there was a fatal error that wasn't caused by a committing
# basin
if proc.returncode != 0:
stdout, stderr = proc.communicate()
stdout_msg = stdout.decode('ascii')
stderror_msg = stderr.decode('ascii')
# Copy the output file to a place we can see it
failed_log = os.path.join(self.working_dir, f"{projname}.log")
copied_log = f"{projname}_FATAL.log"
with open(copied_log, "a") as out:
with open(failed_log, "r") as f:
out.write(f.read())
out.write("\nFAILURE \n")
out.write("STDOUT: \n")
out.write(stdout_msg)
out.write("\nSTDERR: \n")
out.write(stderror_msg)
self.logger.warning("Trajectory %s exited fatally:\n stdout: %s\n stderr: %s",
projname, stdout_msg, stderror_msg)
raise RuntimeError(f"Trajectory {projname} failed")
# TODO: check warnings in gromacs log file
parser = PlumedOutputHandler(plumed_out_path)
basin = parser.check_basin()
if basin is not None:
self.logger.info("Trajectory %s committed to basin %s", projname,
basin)
else:
self.logger.info("Trajectory %s did not commit before simulation ended",
projname)
try:
traj_path = os.path.join(self.working_dir, f"{projname}.trr")
with TRRTrajectoryFile(traj_path, "r") as file:
xyz, _, _, box, _ = file.read(3, stride=1)
# Convert from nm read to A
xyz *= 10
box *= 10
# return last two frames of the three read
return {"commit": basin,
"frames": xyz[1:, :, :]}
except EOFError:
self.logger.warning("Required frames could not be be read from the"
" output trajectory. This may be cased by a delta_t"
" that is too large where the traj committed to a"
" basin before 2*delta_t fs or a simulation wall time"
" that is too short and exited before reaching 2*delta_t fs")
return None
| 40.175549 | 93 | 0.612516 |
95bc48431dfedb4813e88ada7667a66eebe785df | 398 | py | Python | pyethmobisir/__init__.py | balakrishnan2/pyethmobisir | 208083afe169e2178c07bf52209cbe0844caf0c9 | [
"Unlicense"
] | null | null | null | pyethmobisir/__init__.py | balakrishnan2/pyethmobisir | 208083afe169e2178c07bf52209cbe0844caf0c9 | [
"Unlicense"
] | null | null | null | pyethmobisir/__init__.py | balakrishnan2/pyethmobisir | 208083afe169e2178c07bf52209cbe0844caf0c9 | [
"Unlicense"
] | null | null | null | from pyethmobisir.client import (EthJsonRpc, ParityEthJsonRpc,
ETH_DEFAULT_RPC_PORT, GETH_DEFAULT_RPC_PORT,
PYETHAPP_DEFAULT_RPC_PORT)
from pyethmobisir.exceptions import (ConnectionError, BadStatusCodeError,
BadJsonError, BadResponseError)
from pyethmobisir.utils import wei_to_ether, ether_to_wei
| 44.222222 | 75 | 0.663317 |
77a7cbe68714ba66dd555d7f0813ae7ad403c2cb | 27,936 | py | Python | src/cuisine.py | ghuntley/cuisine | 69b470e7acb7d613f18ddae9d2f6bc74166dc03f | [
"BSD-3-Clause"
] | 1 | 2019-06-27T11:39:30.000Z | 2019-06-27T11:39:30.000Z | src/cuisine.py | ghuntley/cuisine | 69b470e7acb7d613f18ddae9d2f6bc74166dc03f | [
"BSD-3-Clause"
] | null | null | null | src/cuisine.py | ghuntley/cuisine | 69b470e7acb7d613f18ddae9d2f6bc74166dc03f | [
"BSD-3-Clause"
] | null | null | null | # -*- coding: utf-8 -*-
# -----------------------------------------------------------------------------
# Project : Cuisine - Functions to write Fabric recipes
# -----------------------------------------------------------------------------
# Author : Sebastien Pierre <sebastien@ffctn.com>
# Author : Thierry Stiegler (gentoo port) <thierry.stiegler@gmail.com>
# Author : Jim McCoy (distro checks and rpm port) <jim.mccoy@gmail.com>
# License : Revised BSD License
# -----------------------------------------------------------------------------
# Creation : 26-Apr-2010
# Last mod : 14-Mar-2012
# -----------------------------------------------------------------------------
"""
`cuisine` makes it easy to write automatic server installation
and configuration recipes by wrapping common administrative tasks
(installing packages, creating users and groups) in Python
functions.
`cuisine` is designed to work with Fabric and provide all you
need for getting your new server up and running in minutes.
Note, that right now, Cuisine only supports Debian-based Linux
systems.
See also:
- Deploying Django with Fabric
<http://lethain.com/entry/2008/nov/04/deploying-django-with-fabric>
- Notes on Python Fabric 0.9b1
<http://www.saltycrane.com/blog/2009/10/notes-python-fabric-09b1>`_
- EC2, fabric, and "err: stdin: is not a tty"
<http://blog.markfeeney.com/2009/12/ec2-fabric-and-err-stdin-is-not-tty.html>`_
:copyright: (c) 2011,2012 by Sébastien Pierre.
:license: BSD, see LICENSE for more details.
"""
from __future__ import with_statement
import base64, bz2, hashlib, os, random, sys, re, string, tempfile, subprocess, types, functools
import fabric, fabric.api, fabric.operations, fabric.context_managers
VERSION = "0.2.4"
RE_SPACES = re.compile("[\s\t]+")
MAC_EOL = "\n"
UNIX_EOL = "\n"
WINDOWS_EOL = "\r\n"
# FIXME: MODE should be in the fabric env, as this is definitely not thread-safe
MODE_LOCAL = False
MODE_SUDO = False
SUDO_PASSWORD = None
DEFAULT_OPTIONS = dict(
package="apt"
)
# context managers and wrappers around fabric's run/sudo; used to
# either execute cuisine functions with sudo or as current user:
#
# with mode_sudo():
# pass
def sudo_password(password):
global SUDO_PASSWORD
SUDO_PASSWORD= password
def mode_local():
"""Sets Cuisine into local mode, where run/sudo won't go through
Fabric's API, but directly through a popen. This allows you to
easily test your Cuisine scripts without using Fabric."""
global MODE_LOCAL, SUDO_PASSWORD
sudo_cmd = "sudo "
if not SUDO_PASSWORD is None:
sudo_cmd= "echo %s|sudo -S -p '' "%SUDO_PASSWORD
if MODE_LOCAL is False:
def custom_run( cmd ):
global MODE_SUDO
if MODE_SUDO:
return os.popen(sudo_cmd + cmd).read()[:-1]
else:
return os.popen(cmd).read()[:-1]
def custom_sudo( cmd ):
return os.popen(sudo_cmd + cmd).read()[:-1]
module = sys.modules[__name__]
old_run = getattr(module, "run")
old_sudo = getattr(module, "sudo")
setattr(module, "run", custom_run)
setattr(module, "sudo", custom_sudo)
MODE_LOCAL = (old_run, old_sudo)
return True
else:
return False
def mode_remote():
"""Comes back to Fabric's API for run/sudo. This basically reverts
the effect of calling `mode_local()`."""
global MODE_LOCAL
if not (MODE_LOCAL is False):
module = sys.modules[__name__]
setattr(module, "run", MODE_LOCAL[0])
setattr(module, "sudo", MODE_LOCAL[1])
MODE_LOCAL = False
return True
else:
return False
class mode_user(object):
"""Cuisine functions will be executed as the current user."""
def __init__(self):
global MODE_SUDO
self._old_mode = MODE_SUDO
MODE_SUDO = False
def __enter__(self):
pass
def __exit__(self, *args, **kws):
global MODE_SUDO
MODE_SUDO = self._old_mode
class mode_sudo(object):
"""Cuisine functions will be executed with sudo."""
def __init__(self):
global MODE_SUDO
self._old_mode = MODE_SUDO
MODE_SUDO = True
def __enter__(self):
pass
def __exit__(self, *args, **kws):
global MODE_SUDO
MODE_SUDO = self._old_mode
# =============================================================================
#
# OPTIONS
#
# =============================================================================
def select_package( option=None ):
supported = ["apt", "yum"]
if not (option is None):
assert option in supported, "Option must be one of: %s" % (supported)
fabric.api.env["option_package"] = option
return (fabric.api.env["option_package"], supported)
# =============================================================================
#
# RUN/SUDO METHODS
#
# =============================================================================
def run(*args, **kwargs):
"""A wrapper to Fabric's run/sudo commands, using the
'cuisine.MODE' global to tell whether the command should be run as
regular user or sudo."""
if MODE_SUDO:
return fabric.api.sudo(*args, **kwargs)
else:
return fabric.api.run(*args, **kwargs)
def run_local(command):
"""A wrapper around subprocess."""
pipe = subprocess.Popen(command, shell=True, stdout=subprocess.PIPE).stdout
res = pipe.read()
# FIXME: Should stream the pipe, and only print it if fabric's properties allow it
# print res
return pipe
def sudo(*args, **kwargs):
"""A wrapper to Fabric's run/sudo commands, using the
'cuisine.MODE_SUDO' global to tell whether the command should be run as
regular user or sudo."""
return fabric.api.sudo(*args, **kwargs)
# =============================================================================
#
# DECORATORS
#
# =============================================================================
def dispatch(prefix=None):
"""Dispatches the current function to specific implementation. The `prefix`
parameter indicates the common option prefix, and the `option_select()`
function will determine the function suffix.
For instance the package functions are defined like that:
{{{
@dispatch("package")
def package_ensure(...):
...
def package_ensure_apt(...):
...
def package_ensure_yum(...):
...
}}}
and then when a user does
{{{
cuisine.option_select("package", "yum")
cuisine.package_ensure(...)
}}}
then the `dispatch` function will dispatch `package_ensure` to
`package_ensure_yum`.
If your prefix is the first word of the function name before the
first `_` then you can simply use `@dispatch` without parameters.
"""
def dispatch_wrapper(function, prefix=prefix):
def wrapper(*args, **kwargs):
function_name = function.__name__
_prefix = prefix or function_name.split("_")[0]
select = fabric.api.env.get("option_" + _prefix)
assert select, "No option defined for: %s, call select_%s(<YOUR OPTION>) to set it" % (_prefix, prefix)
function_name = function.__name__ + "_" + select
specific = eval(function_name)
if specific:
if type(specific) == types.FunctionType:
return specific(*args, **kwargs)
else:
raise Exception("Function expected for: " + function_name)
else:
raise Exception("Function variant not defined: " + function_name)
# We copy name and docstring
functools.update_wrapper(wrapper, function)
return wrapper
if type(prefix) == types.FunctionType:
return dispatch_wrapper(prefix, None)
else:
return dispatch_wrapper
# =============================================================================
#
# TEXT PROCESSING
#
# =============================================================================
def text_detect_eol(text):
# FIXME: Should look at the first line
if text.find("\r\n") != -1:
return WINDOWS_EOL
elif text.find("\n") != -1:
return UNIX_EOL
elif text.find("\r") != -1:
return MAC_EOL
else:
return "\n"
def text_get_line(text, predicate):
"""Returns the first line that matches the given predicate."""
for line in text.split("\n"):
if predicate(line):
return line
return ""
def text_normalize(text):
"""Converts tabs and spaces to single space and strips the text."""
return RE_SPACES.sub(" ", text).strip()
def text_nospace(text):
"""Converts tabs and spaces to single space and strips the text."""
return RE_SPACES.sub("", text).strip()
def text_replace_line(text, old, new, find=lambda old, new: old == new, process=lambda _: _):
"""Replaces lines equal to 'old' with 'new', returning the new
text and the count of replacements."""
res = []
replaced = 0
eol = text_detect_eol(text)
for line in text.split(eol):
if find(process(line), process(old)):
res.append(new)
replaced += 1
else:
res.append(line)
return eol.join(res), replaced
def text_ensure_line(text, *lines):
"""Ensures that the given lines are present in the given text,
otherwise appends the lines that are not already in the text at
the end of it."""
eol = text_detect_eol(text)
res = list(text.split(eol))
if res[0] is '' and len(res) is 1:
res = list()
for line in lines:
assert line.find(eol) == -1, "No EOL allowed in lines parameter: " + repr(line)
found = False
for l in res:
if l == line:
found = True
break
if not found:
res.append(line)
return eol.join(res)
def text_strip_margin(text, margin="|"):
res = []
eol = text_detect_eol(text)
for line in text.split(eol):
l = line.split(margin, 1)
if len(l) == 2:
_, line = l
res.append(line)
return eol.join(res)
def text_template(text, variables):
"""Substitutes '${PLACEHOLDER}'s within the text with the
corresponding values from variables."""
template = string.Template(text)
return template.safe_substitute(variables)
# =============================================================================
#
# FILE OPERATIONS
#
# =============================================================================
def file_local_read(location):
"""Reads a *local* file from the given location, expanding '~' and
shell variables."""
p = os.path.expandvars(os.path.expanduser(location))
f = file(p, 'rb')
t = f.read()
f.close()
return t
def file_read(location):
"""Reads the *remote* file at the given location."""
# NOTE: We use base64 here to be sure to preserve the encoding (UNIX/DOC/MAC) of EOLs
return base64.b64decode(run('cat "%s" | base64' % (location)))
def file_exists(location):
"""Tests if there is a *remote* file at the given location."""
return run('test -e "%s" && echo OK ; true' % (location)) == "OK"
def file_is_file(location):
return run("test -f '%s' && echo OK ; true" % (location)) == "OK"
def file_is_dir(location):
return run("test -d '%s' && echo OK ; true" % (location)) == "OK"
def file_is_link(location):
return run("test -L '%s' && echo OK ; true" % (location)) == "OK"
def file_attribs(location, mode=None, owner=None, group=None, recursive=False):
"""Updates the mode/owner/group for the remote file at the given
location."""
recursive = recursive and "-R " or ""
if mode:
run('chmod %s %s "%s"' % (recursive, mode, location))
if owner:
run('chown %s %s "%s"' % (recursive, owner, location))
if group:
run('chgrp %s %s "%s"' % (recursive, group, location))
def file_attribs_get(location):
"""Return mode, owner, and group for remote path.
Return mode, owner, and group if remote path exists, 'None'
otherwise.
"""
if file_exists(location):
fs_check = run('stat %s %s' % (location, '--format="%a %U %G"'))
(mode, owner, group) = fs_check.split(' ')
return {'mode': mode, 'owner': owner, 'group': group}
else:
return None
def file_write(location, content, mode=None, owner=None, group=None, sudo=None):
"""Writes the given content to the file at the given remote
location, optionally setting mode/owner/group."""
# FIXME: Big files are never transferred properly!
# Gets the content signature and write it to a secure tempfile
sig = hashlib.sha256(content).hexdigest()
fd, local_path = tempfile.mkstemp()
os.write(fd, content)
# Upload the content if necessary
if not file_exists(location) or sig != file_sha256(location):
if MODE_LOCAL:
run('cp "%s" "%s"'%(local_path,location))
else:
if MODE_SUDO:
sudo = MODE_SUDO
try:
fabric.operations.put(local_path, location, use_sudo=sudo)
except Exception, e:
print "cuisine.file_write exception:"
# Remove the local temp file
os.close(fd)
os.unlink(local_path)
# Ensures that the signature matches
assert sig == file_sha256(location)
file_attribs(location, mode=mode, owner=owner, group=group)
def file_ensure(location, mode=None, owner=None, group=None, recursive=False):
"""Updates the mode/owner/group for the remote file at the given
location."""
if file_exists(location):
file_attribs(location,mode=mode,owner=owner,group=group)
else:
file_write(location,"",mode=mode,owner=owner,group=group)
def file_upload(remote, local, sudo=None):
"""Uploads the local file to the remote location only if the remote location does not
exists or the content are different."""
# FIXME: Big files are never transferred properly!
f = file(local, 'rb')
content = f.read()
f.close()
sig = hashlib.sha256(content).hexdigest()
if MODE_SUDO: sudo = MODE_SUDO
if not file_exists(remote) or sig != file_sha256(remote):
if MODE_LOCAL:
if sudo:
sudo('cp "%s" "%s"'%(local,remote))
else:
run('cp "%s" "%s"'%(local,remote))
else:
fabric.operations.put(local, remote, use_sudo=sudo)
def file_update(location, updater=lambda x: x):
"""Updates the content of the given by passing the existing
content of the remote file at the given location to the 'updater'
function.
For instance, if you'd like to convert an existing file to all
uppercase, simply do:
> file_update("/etc/myfile", lambda _:_.upper())
"""
assert file_exists(location), "File does not exists: " + location
new_content = updater(file_read(location))
# assert type(new_content) in (str, unicode, fabric.operations._AttributeString), "Updater must be like (string)->string, got: %s() = %s" % (updater, type(new_content))
run('echo "%s" | base64 -d > "%s"' % (base64.b64encode(new_content), location))
def file_append(location, content, mode=None, owner=None, group=None):
"""Appends the given content to the remote file at the given
location, optionally updating its mode/owner/group."""
run('echo "%s" | base64 -d >> "%s"' % (base64.b64encode(content), location))
file_attribs(location, mode, owner, group)
def file_unlink(path):
if file_exists(path):
run("unlink '%s'" % (path))
def file_link(source, destination, symbolic=True, mode=None, owner=None, group=None):
"""Creates a (symbolic) link between source and destination on the remote host,
optionally setting its mode/owner/group."""
if file_exists(destination) and (not file_is_link(destination)):
raise Exception("Destination already exists and is not a link: %s" % (destination))
if file_is_link(destination):
file_unlink(destination)
if symbolic:
run('ln -sf "%s" "%s"' % (source, destination))
else:
run('ln -f "%s" "%s"' % (source, destination))
file_attribs(destination, mode, owner, group)
def file_sha256(location):
"""Returns the SHA-256 sum (as a hex string) for the remote file at the given location."""
# NOTE: In some cases, sudo can output errors in here -- but the errors will
# appear before the result, so we simply split and get the last line to
# be on the safe side.
return run('sha256sum "%s" | cut -d" " -f1' % (location)).split("\n")[-1]
# =============================================================================
#
# DIRECTORY OPERATIONS
#
# =============================================================================
def dir_attribs(location, mode=None, owner=None, group=None, recursive=False):
"""Updates the mode/owner/group for the given remote directory."""
file_attribs(location, mode, owner, group, recursive)
def dir_exists(location):
"""Tells if there is a remote directory at the given location."""
return run('test -d "%s" && echo OK ; true' % (location)).endswith("OK")
def dir_ensure(location, recursive=False, mode=None, owner=None, group=None):
"""Ensures that there is a remote directory at the given location,
optionally updating its mode/owner/group.
If we are not updating the owner/group then this can be done as a single
ssh call, so use that method, otherwise set owner/group after creation."""
if not dir_exists(location):
run('mkdir %s "%s" && echo OK ; true' % (recursive and "-p" or "", location))
if owner or group or mode:
dir_attribs(location, owner=owner, group=group, mode=mode)
# =============================================================================
#
# PACKAGE OPERATIONS
#
# =============================================================================
@dispatch
def package_upgrade():
"""Updates every package present on the system."""
@dispatch
def package_update(package=None):
"""Updates the package database (when no argument) or update the package
or list of packages given as argument."""
@dispatch
def package_update(package=None):
"""Upgrade the system."""
@dispatch
def package_install(package, update=False):
"""Installs the given package/list of package, optionally updating
the package database."""
@dispatch
def package_ensure(package, update=False):
"""Tests if the given package is installed, and installs it in
case it's not already there. If `update` is true, then the
package will be updated if it already exists."""
@dispatch
def package_clean(package=None):
"""Clean the repository for un-needed files
."""
# -----------------------------------------------------------------------------
# APT PACKAGE (DEBIAN/UBUNTU)
# -----------------------------------------------------------------------------
def repository_ensure_apt(repository):
sudo("add-apt-repository " + repository)
def package_upgrade_apt():
sudo("apt-get --yes upgrade")
def package_update_apt(package=None):
if package == None:
sudo("apt-get --yes update")
else:
if type(package) in (list, tuple):
package = " ".join(package)
sudo("apt-get --yes upgrade " + package)
def package_upgrade_apt(package=None):
sudo("apt-get --yes upgrade")
def package_install_apt(package, update=False):
if update:
sudo("apt-get --yes update")
if type(package) in (list, tuple):
package = " ".join(package)
sudo("apt-get --yes install %s" % (package))
def package_ensure_apt(package, update=False):
status = run("dpkg-query -W -f='${Status}' %s ; true" % package)
if status.find("not-installed") != -1 or status.find("installed") == -1:
package_install(package)
return False
else:
if update: package_update(package)
return True
def package_clean_apt(package=None):
pass
# -----------------------------------------------------------------------------
# YUM PACKAGE (RedHat, CentOS)
# added by Prune - 20120408 - v1.0
# -----------------------------------------------------------------------------
def repository_ensure_yum(repository):
pass
def package_upgrade_yum():
sudo("yum --assumeyes update")
def package_update_yum(package=None):
if package == None:
sudo("yum --assumeyes update")
else:
if type(package) in (list, tuple):
package = " ".join(package)
sudo("yum --assumeyes upgrade " + package)
def package_upgrade_yum(package=None):
sudo("yum --assumeyes upgrade")
def package_install_yum(package, update=False):
if update:
sudo("yum --assumeyes update")
if type(package) in (list, tuple):
package = " ".join(package)
sudo("yum --assumeyes install %s" % (package))
def package_ensure_yum(package, update=False):
status = run("yum list installed %s ; true" % package)
if status.find("No matching Packages") != -1 or status.find(package) == -1:
package_install(package)
return False
else:
if update: package_update(package)
return True
def package_clean_yum(package=None):
sudo("yum --assumeyes clean all")
# =============================================================================
#
# SHELL COMMANDS
#
# =============================================================================
def command_check(command):
"""Tests if the given command is available on the system."""
return run("which '%s' >& /dev/null && echo OK ; true" % command).endswith("OK")
def command_ensure(command, package=None):
"""Ensures that the given command is present, if not installs the
package with the given name, which is the same as the command by
default."""
if package is None:
package = command
if not command_check(command):
package_install(package)
assert command_check(command), \
"Command was not installed, check for errors: %s" % (command)
# =============================================================================
#
# USER OPERATIONS
#
# =============================================================================
def user_passwd(name, passwd):
"""Sets the given user password."""
encoded_password = base64.b64encode("%s:%s" % (name, passwd))
sudo("echo %s | base64 --decode | chpasswd" % encoded_password)
def user_create(name, passwd=None, home=None, uid=None, gid=None, shell=None,
uid_min=None, uid_max=None):
"""Creates the user with the given name, optionally giving a
specific password/home/uid/gid/shell."""
options = ["-m"]
if home:
options.append("-d '%s'" % (home))
if uid:
options.append("-u '%s'" % (uid))
#if group exists already but is not specified, useradd fails
if not gid and group_check(name):
gid = name
if gid:
options.append("-g '%s'" % (gid))
if shell:
options.append("-s '%s'" % (shell))
if uid_min:
options.append("-K UID_MIN='%s'" % (uid_min))
if uid_max:
options.append("-K UID_MAX='%s'" % (uid_max))
sudo("useradd %s '%s'" % (" ".join(options), name))
if passwd:
user_passwd(name,passwd)
def user_check(name):
"""Checks if there is a user defined with the given name,
returning its information as a
'{"name":<str>,"uid":<str>,"gid":<str>,"home":<str>,"shell":<str>}'
or 'None' if the user does not exists."""
d = sudo("cat /etc/passwd | egrep '^%s:' ; true" % (name))
s = sudo("cat /etc/shadow | egrep '^%s:' | awk -F':' '{print $2}'" % (name))
results = {}
if d:
d = d.split(":")
assert len(d) >= 7, "/etc/passwd entry is expected to have at least 7 fields, got %s in: %s" % (len(d), ":".join(d))
results = dict(name=d[0], uid=d[2], gid=d[3], home=d[5], shell=d[6])
if s:
results['passwd'] = s
if results:
return results
else:
return None
def user_ensure(name, passwd=None, home=None, uid=None, gid=None, shell=None):
"""Ensures that the given users exists, optionally updating their
passwd/home/uid/gid/shell."""
d = user_check(name)
if not d:
user_create(name, passwd, home, uid, gid, shell)
else:
options = []
if home != None and d.get("home") != home:
options.append("-d '%s'" % (home))
if uid != None and d.get("uid") != uid:
options.append("-u '%s'" % (uid))
if gid != None and d.get("gid") != gid:
options.append("-g '%s'" % (gid))
if shell != None and d.get("shell") != shell:
options.append("-s '%s'" % (shell))
if options:
sudo("usermod %s '%s'" % (" ".join(options), name))
if passwd:
user_passwd(name, passwd)
def user_remove(name, rmhome=None):
"""Removes the user with the given name, optionally
removing the home directory and mail spool."""
options = ["-f"]
if rmhome:
options.append("-r")
sudo("userdel %s '%s'" % (" ".join(options), name))
# =============================================================================
#
# GROUP OPERATIONS
#
# =============================================================================
def group_create(name, gid=None):
"""Creates a group with the given name, and optionally given gid."""
options = []
if gid:
options.append("-g '%s'" % (gid))
sudo("groupadd %s '%s'" % (" ".join(options), name))
def group_check(name):
"""Checks if there is a group defined with the given name,
returning its information as a
'{"name":<str>,"gid":<str>,"members":<list[str]>}' or 'None' if
the group does not exists."""
group_data = run("cat /etc/group | egrep '^%s:' ; true" % (name))
if group_data:
name, _, gid, members = group_data.split(":", 4)
return dict(name=name, gid=gid,
members=tuple(m.strip() for m in members.split(",")))
else:
return None
def group_ensure(name, gid=None):
"""Ensures that the group with the given name (and optional gid)
exists."""
d = group_check(name)
if not d:
group_create(name, gid)
else:
if gid != None and d.get("gid") != gid:
sudo("groupmod -g %s '%s'" % (gid, name))
def group_user_check(group, user):
"""Checks if the given user is a member of the given group. It
will return 'False' if the group does not exist."""
d = group_check(group)
if d is None:
return False
else:
return user in d["members"]
def group_user_add(group, user):
"""Adds the given user/list of users to the given group/groups."""
assert group_check(group), "Group does not exist: %s" % (group)
if not group_user_check(group, user):
sudo("usermod -a -G '%s' '%s'" % (group, user))
def group_user_ensure(group, user):
"""Ensure that a given user is a member of a given group."""
d = group_check(group)
if user not in d["members"]:
group_user_add(group, user)
### ssh_<operation> functions
def ssh_keygen(user, keytype="dsa"):
"""Generates a pair of ssh keys in the user's home .ssh directory."""
d = user_check(user)
assert d, "User does not exist: %s" % (user)
home = d["home"]
key_file = home + "/.ssh/id_%s.pub" % keytype
if not file_exists(key_file):
dir_ensure(home + "/.ssh", mode="0700", owner=user, group=user)
run("ssh-keygen -q -t %s -f '%s/.ssh/id_%s' -N ''" %
(keytype, home, keytype))
file_attribs(home + "/.ssh/id_%s" % keytype, owner=user, group=user)
file_attribs(home + "/.ssh/id_%s.pub" % keytype, owner=user, group=user)
return key_file
else:
return key_file
# =============================================================================
#
# MISC
#
# =============================================================================
def ssh_authorize(user, key):
"""Adds the given key to the '.ssh/authorized_keys' for the given
user."""
d = user_check(user)
keyf = d["home"] + "/.ssh/authorized_keys"
if key[-1] != "\n":
key += "\n"
if file_exists(keyf):
d = file_read(keyf)
if file_read(keyf).find(key[:-1]) == -1:
file_append(keyf, key)
return False
else:
return True
else:
# Make sure that .ssh directory exists, see #42
dir_ensure(os.path.dirname(keyf), owner=user, group=user, mode="700")
file_write(keyf, key, owner=user, group=user, mode="600")
return False
def upstart_ensure(name):
"""Ensures that the given upstart service is running, restarting
it if necessary."""
with fabric.api.settings(warn_only=True):
status = sudo("service %s status" % name)
if status.failed:
sudo("service %s start" % name)
else:
sudo("service %s restart" % name)
def system_uuid_alias_add():
"""Adds system UUID alias to /etc/hosts.
Some tools/processes rely/want the hostname as an alias in
/etc/hosts e.g. `127.0.0.1 localhost <hostname>`.
"""
with mode_sudo():
old = "127.0.0.1 localhost"
new = old + " " + system_uuid()
file_update('/etc/hosts', lambda x: text_replace_line(x, old, new)[0])
def system_uuid():
"""Gets a machines UUID (Universally Unique Identifier)."""
return sudo('dmidecode -s system-uuid | tr "[A-Z]" "[a-z]"')
#Only tested on Ubuntu!
def locale_check(locale):
locale_data = sudo("locale -a | egrep '^%s$' ; true" % (locale,))
return locale_data == locale
def locale_ensure(locale):
if locale_check(locale):
return
with fabric.context_managers.settings(warn_only=True):
sudo("/usr/share/locales/install-language-pack %s" % (locale,))
sudo("dpkg-reconfigure locales")
# Sets up the default options so that @dispatch'ed functions work
for option, value in DEFAULT_OPTIONS.items():
eval("select_" + option)(value)
# EOF - vim: ts=4 sw=4 noet
| 32.445993 | 170 | 0.628078 |
f04ae0f96ffa44f86da23dfad82a859d0ec77f66 | 3,369 | py | Python | profiles_project/settings.py | SahilShingari/profiles-rest-api | cae36c8ea340c78b61727cd4ec194b3ec2a53019 | [
"MIT"
] | null | null | null | profiles_project/settings.py | SahilShingari/profiles-rest-api | cae36c8ea340c78b61727cd4ec194b3ec2a53019 | [
"MIT"
] | 6 | 2019-12-05T00:33:17.000Z | 2021-06-10T19:04:56.000Z | profiles_project/settings.py | SahilShingari/profiles-rest-api | cae36c8ea340c78b61727cd4ec194b3ec2a53019 | [
"MIT"
] | null | null | null | """
Django settings for profiles_project project.
Generated by 'django-admin startproject' using Django 2.2.
For more information on this file, see
https://docs.djangoproject.com/en/2.2/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.2/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.2/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'uy97q&i0z1pxh=(ym8ihi4k!jroed7f$tbv^i_0h2(i97$4e-_'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = bool(int(os.environ.get('DEBUG', 1)))
ALLOWED_HOSTS = [
'ec2-18-130-122-213.eu-west-2.compute.amazonaws.com',
'127.0.0.1'
]
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'rest_framework',
'rest_framework.authtoken',
'profiles_api',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'profiles_project.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'profiles_project.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.2/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/2.2/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.2/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.2/howto/static-files/
STATIC_URL = '/static/'
AUTH_USER_MODEL = 'profiles_api.UserProfile'
STATIC_ROOT = 'static/'
| 25.717557 | 91 | 0.69813 |
730f2c1d58571063fe02a5387733ac0fc410f7da | 81 | py | Python | setup.py | derek1906/YouTube2Spotify | d16244a0a7327195261c8f045c7c8125c8d11866 | [
"MIT"
] | 1 | 2017-11-24T14:08:31.000Z | 2017-11-24T14:08:31.000Z | setup.py | derek1906/YouTube2Spotify | d16244a0a7327195261c8f045c7c8125c8d11866 | [
"MIT"
] | null | null | null | setup.py | derek1906/YouTube2Spotify | d16244a0a7327195261c8f045c7c8125c8d11866 | [
"MIT"
] | null | null | null | """Setup"""
from setuptools import setup
setup(name="apis", packages=["apis"])
| 13.5 | 37 | 0.679012 |
c36d518ef32ca854db4a7af849f2b9612de92543 | 4,177 | py | Python | package/spack-gettext/package.py | ctuning/ck-spack | 307934efce1be2d4f104251275c82fbc70127105 | [
"BSD-3-Clause"
] | 1 | 2018-07-17T07:45:09.000Z | 2018-07-17T07:45:09.000Z | package/spack-gettext/package.py | ctuning/ck-spack | 307934efce1be2d4f104251275c82fbc70127105 | [
"BSD-3-Clause"
] | null | null | null | package/spack-gettext/package.py | ctuning/ck-spack | 307934efce1be2d4f104251275c82fbc70127105 | [
"BSD-3-Clause"
] | null | null | null | ##############################################################################
# Copyright (c) 2013-2018, Lawrence Livermore National Security, LLC.
# Produced at the Lawrence Livermore National Laboratory.
#
# This file is part of Spack.
# Created by Todd Gamblin, tgamblin@llnl.gov, All rights reserved.
# LLNL-CODE-647188
#
# For details, see https://github.com/spack/spack
# Please also see the NOTICE and LICENSE files for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License (as
# published by the Free Software Foundation) version 2.1, February 1999.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
# conditions of the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
from spack import *
class Gettext(AutotoolsPackage):
"""GNU internationalization (i18n) and localization (l10n) library."""
homepage = "https://www.gnu.org/software/gettext/"
url = "http://ftpmirror.gnu.org/gettext/gettext-0.19.7.tar.xz"
version('0.19.8.1', 'df3f5690eaa30fd228537b00cb7b7590')
version('0.19.7', 'f81e50556da41b44c1d59ac93474dca5')
# Recommended variants
variant('curses', default=True, description='Use libncurses')
variant('libxml2', default=True, description='Use libxml2')
variant('git', default=True, description='Enable git support')
variant('tar', default=True, description='Enable tar support')
variant('bzip2', default=True, description='Enable bzip2 support')
variant('xz', default=True, description='Enable xz support')
# Optional variants
variant('libunistring', default=False, description='Use libunistring')
# Recommended dependencies
depends_on('ncurses', when='+curses')
depends_on('libxml2', when='+libxml2')
# Java runtime and compiler (e.g. GNU gcj or kaffe)
# C# runtime and compiler (e.g. pnet or mono)
depends_on('tar', when='+tar')
# depends_on('gzip', when='+gzip')
depends_on('bzip2', when='+bzip2')
depends_on('xz', when='+xz')
# Optional dependencies
# depends_on('glib') # circular dependency?
# depends_on('libcroco@0.6.1:')
depends_on('libunistring', when='+libunistring')
# depends_on('cvs')
patch('test-verify-parallel-make-check.patch', when='@:0.19.8.1')
def configure_args(self):
spec = self.spec
config_args = [
'--disable-java',
'--disable-csharp',
'--with-included-glib',
'--with-included-gettext',
'--with-included-libcroco',
'--without-emacs',
'--with-lispdir=%s/emacs/site-lisp/gettext' % self.prefix.share,
'--without-cvs'
]
if '+curses' in spec:
config_args.append('--with-ncurses-prefix={0}'.format(
spec['ncurses'].prefix))
else:
config_args.append('--disable-curses')
if '+libxml2' in spec:
config_args.append('CPPFLAGS=-I{0}/include'.format(
spec['libxml2'].prefix))
config_args.append('LDFLAGS=-L{0} -Wl,-rpath,{0}'.format(
spec['libxml2'].libs.directories[0]))
else:
config_args.append('--with-included-libxml')
if '+bzip2' not in spec:
config_args.append('--without-bzip2')
if '+xz' not in spec:
config_args.append('--without-xz')
if '+libunistring' in spec:
config_args.append('--with-libunistring-prefix={0}'.format(
spec['libunistring'].prefix))
else:
config_args.append('--with-included-libunistring')
return config_args
| 39.037383 | 78 | 0.618865 |
1c69eb1ccdf56dde4659304634fd471f30e86c9a | 14,641 | py | Python | morecantile/scripts/cli.py | geospatial-jeff/morecantile | 4f1041daae3cadb6dcf6c11eee515c521b637665 | [
"MIT"
] | null | null | null | morecantile/scripts/cli.py | geospatial-jeff/morecantile | 4f1041daae3cadb6dcf6c11eee515c521b637665 | [
"MIT"
] | null | null | null | morecantile/scripts/cli.py | geospatial-jeff/morecantile | 4f1041daae3cadb6dcf6c11eee515c521b637665 | [
"MIT"
] | null | null | null | """Morecantile command line interface"""
import json
import logging
import click
from mercantile.scripts import configure_logging, coords, iter_lines, normalize_input
from rasterio.crs import CRS
import morecantile
logger = logging.getLogger(__name__)
def normalize_source(input):
"""Yield features from GeoJSON source."""
src = iter(normalize_input(input))
first_line = next(src)
# If input is RS-delimited JSON sequence.
if first_line.startswith(u"\x1e"):
def feature_gen():
buffer = first_line.strip(u"\x1e")
for line in src:
if line.startswith(u"\x1e"):
if buffer:
yield json.loads(buffer)
buffer = line.strip(u"\x1e")
else:
buffer += line
else:
yield json.loads(buffer)
else:
def feature_gen():
yield json.loads(first_line)
for line in src:
yield json.loads(line)
return feature_gen()
# The CLI command group.
@click.group(help="Command line interface for the Morecantile Python package.")
@click.option("--verbose", "-v", count=True, help="Increase verbosity.")
@click.option("--quiet", "-q", count=True, help="Decrease verbosity.")
@click.version_option(version=morecantile.__version__, message="%(version)s")
@click.pass_context
def cli(ctx, verbose, quiet):
"""Execute the main morecantile command"""
verbosity = verbose - quiet
configure_logging(verbosity)
ctx.obj = {}
ctx.obj["verbosity"] = verbosity
################################################################################
# The shapes command.
@cli.command(short_help="Print the shapes of tiles as GeoJSON.")
# This input is either a filename, stdin, or a string.
@click.argument("input", default="-", required=False)
@click.option(
"--identifier",
type=click.Choice(morecantile.tms.list()),
default="WebMercatorQuad",
help="TileMatrixSet identifier.",
)
# Coordinate precision option.
@click.option(
"--precision", type=int, default=None, help="Decimal precision of coordinates."
)
# JSON formatting options.
@click.option(
"--indent", default=None, type=int, help="Indentation level for JSON output"
)
@click.option(
"--compact/--no-compact", default=False, help="Use compact separators (',', ':')."
)
@click.option(
"--projected/--geographic",
"projected",
default=False,
help="Output coordinate system",
)
@click.option(
"--seq",
is_flag=True,
default=False,
help="Write a RS-delimited JSON sequence (default is LF).",
)
# GeoJSON feature (default) or collection switch. Meaningful only
# when --x-json-seq is used.
@click.option(
"--feature",
"output_mode",
flag_value="feature",
default=True,
help="Output as sequence of GeoJSON features (the default).",
)
@click.option(
"--bbox",
"output_mode",
flag_value="bbox",
help="Output as sequence of GeoJSON bbox arrays.",
)
@click.option(
"--collect",
is_flag=True,
default=False,
help="Output as a GeoJSON feature collections.",
)
# Optionally write out bboxen in a form that goes
# straight into GDAL utilities like gdalwarp.
@click.option(
"--extents/--no-extents",
default=False,
help="Write shape extents as ws-separated strings (default is " "False).",
)
# Optionally buffer the shapes by shifting the x and y values of each
# vertex by a constant number of decimal degrees or meters (depending
# on whether --geographic or --mercator is in effect).
@click.option(
"--buffer",
type=float,
default=None,
help="Shift shape x and y values by a constant number",
)
@click.pass_context
def shapes(
ctx,
input,
identifier,
precision,
indent,
compact,
projected,
seq,
output_mode,
collect,
extents,
buffer,
):
"""
Reads one or more Web Mercator tile descriptions
from stdin and writes either a GeoJSON feature collection (the
default) or a JSON sequence of GeoJSON features/collections to
stdout.
Input may be a compact newline-delimited sequences of JSON or
a pretty-printed ASCII RS-delimited sequence of JSON (like
https://tools.ietf.org/html/rfc8142 and
https://tools.ietf.org/html/rfc7159).
Tile descriptions may be either an [x, y, z] array or a JSON
object of the form {"tile": [x, y, z], "properties": {"name": "foo", ...}}
In the latter case, the properties object will be used to update
the properties object of the output feature.
"""
tms = morecantile.tms.get(identifier)
dump_kwds = {"sort_keys": True}
if indent:
dump_kwds["indent"] = indent
if compact:
dump_kwds["separators"] = (",", ":")
src = normalize_input(input)
features = []
col_xs = []
col_ys = []
for i, line in enumerate(iter_lines(src)):
obj = json.loads(line)
if isinstance(obj, dict):
x, y, z = obj["tile"][:3]
props = obj.get("properties")
fid = obj.get("id")
elif isinstance(obj, list):
x, y, z = obj[:3]
props = {}
fid = None
else:
raise click.BadParameter("{0}".format(obj), param=input, param_hint="input")
feature = tms.feature(
(x, y, z),
fid=fid,
props=props,
projected=projected,
buffer=buffer,
precision=precision,
)
bbox = feature["bbox"]
w, s, e, n = bbox
col_xs.extend([w, e])
col_ys.extend([s, n])
if collect:
features.append(feature)
elif extents:
click.echo(" ".join(map(str, bbox)))
else:
if seq:
click.echo(u"\x1e")
if output_mode == "bbox":
click.echo(json.dumps(bbox, **dump_kwds))
elif output_mode == "feature":
click.echo(json.dumps(feature, **dump_kwds))
if collect and features:
bbox = [min(col_xs), min(col_ys), max(col_xs), max(col_ys)]
click.echo(
json.dumps(
{"type": "FeatureCollection", "bbox": bbox, "features": features},
**dump_kwds
)
)
################################################################################
# The tiles command.
@cli.command(
short_help=(
"Print tiles that overlap or contain a lng/lat point, "
"bounding box, or GeoJSON objects."
)
)
# Mandatory Mercator zoom level argument.
@click.argument("zoom", type=int, default=-1)
# This input is either a filename, stdin, or a string.
# Has to follow the zoom arg.
@click.argument("input", default="-", required=False)
@click.option(
"--identifier",
type=click.Choice(morecantile.tms.list()),
default="WebMercatorQuad",
help="TileMatrixSet identifier.",
)
@click.option(
"--seq/--lf",
default=False,
help="Write a RS-delimited JSON sequence (default is LF).",
)
@click.pass_context
def tiles(ctx, zoom, input, identifier, seq):
"""
Lists TMS tiles at ZOOM level intersecting
GeoJSON [west, south, east, north] bounding boxen, features, or
collections read from stdin. Output is a JSON
[x, y, z] array.
Input may be a compact newline-delimited sequences of JSON or
a pretty-printed ASCII RS-delimited sequence of JSON (like
https://tools.ietf.org/html/rfc8142 and
https://tools.ietf.org/html/rfc7159).
Example:
$ echo "[-105.05, 39.95, -105, 40]" | morecantiles tiles 12
Output:
[852, 1550, 12]
[852, 1551, 12]
[853, 1550, 12]
[853, 1551, 12]
"""
tms = morecantile.tms.get(identifier)
for obj in normalize_source(input):
if isinstance(obj, list):
bbox = obj
if len(bbox) == 2:
bbox += bbox
if len(bbox) != 4:
raise click.BadParameter(
"{0}".format(bbox), param=input, param_hint="input"
)
elif isinstance(obj, dict):
if "bbox" in obj:
bbox = obj["bbox"]
else:
box_xs = []
box_ys = []
for feat in obj.get("features", [obj]):
lngs, lats = zip(*list(coords(feat)))
box_xs.extend([min(lngs), max(lngs)])
box_ys.extend([min(lats), max(lats)])
bbox = min(box_xs), min(box_ys), max(box_xs), max(box_ys)
west, south, east, north = bbox
epsilon = 1.0e-10
if east != west and north != south:
# 2D bbox
# shrink the bounds a small amount so that
# shapes/tiles round trip.
west += epsilon
south += epsilon
east -= epsilon
north -= epsilon
for tile in tms.tiles(west, south, east, north, [zoom], truncate=False):
vals = (tile.x, tile.y, zoom)
output = json.dumps(vals)
if seq:
click.echo(u"\x1e")
click.echo(output)
################################################################################
# The tms command.
@cli.command(short_help="Print TileMatrixSet JSON document.")
@click.option(
"--identifier",
type=click.Choice(morecantile.tms.list()),
help="TileMatrixSet identifier.",
required=True,
)
def tms(identifier):
"""Print TMS JSON."""
tms = morecantile.tms.get(identifier)
click.echo(tms.json(exclude_none=True))
################################################################################
# The custom command.
@cli.command(short_help="Create Custom TileMatrixSet")
@click.option(
"--epsg", type=int, help="EPSG number.", required=True,
)
@click.option(
"--extent",
type=float,
nargs=4,
help="left, bottom, right, top Bounding box of the Tile Matrix Set.",
required=True,
)
@click.option(
"--name",
type=str,
help="Identifier of the custom TMS.",
default="CustomTileMatrixSet",
)
@click.option("--minzoom", type=int, default=0, help="Minumum Zoom level.")
@click.option("--maxzoom", type=int, default=24, help="Maximum Zoom level.")
@click.option("--tile-width", type=int, default=256, help="Width of each tile.")
@click.option("--tile-height", type=int, default=256, help="Height of each tile.")
@click.option(
"--extent-epsg", type=int, help="EPSG number for the bounding box.",
)
def custom(epsg, extent, name, minzoom, maxzoom, tile_width, tile_height, extent_epsg):
"""Create Custom TMS."""
extent_crs = CRS.from_epsg(extent_epsg) if extent_epsg else None
tms = morecantile.TileMatrixSet.custom(
extent,
CRS.from_epsg(epsg),
identifier=name,
minzoom=minzoom,
maxzoom=maxzoom,
tile_width=tile_width,
tile_height=tile_height,
extent_crs=extent_crs,
)
click.echo(tms.json(exclude_none=True))
################################################################################
# The tms_to_geojson command.
@cli.command(short_help="Print TileMatrixSet MatrixSet as GeoJSON.")
@click.argument("input", type=click.File(mode="r"), default="-", required=False)
@click.option("--level", type=int, required=True, help="Zoom/Matrix level.")
# Coordinate precision option.
@click.option(
"--precision", type=int, default=None, help="Decimal precision of coordinates."
)
# JSON formatting options.
@click.option(
"--indent", default=None, type=int, help="Indentation level for JSON output"
)
@click.option(
"--compact/--no-compact", default=False, help="Use compact separators (',', ':')."
)
@click.option(
"--projected/--geographic",
"projected",
default=False,
help="Output coordinate system",
)
@click.option(
"--seq",
is_flag=True,
default=False,
help="Write a RS-delimited JSON sequence (default is LF).",
)
# GeoJSON feature (default) or collection switch. Meaningful only
# when --x-json-seq is used.
@click.option(
"--feature",
"output_mode",
flag_value="feature",
default=True,
help="Output as sequence of GeoJSON features (the default).",
)
@click.option(
"--bbox",
"output_mode",
flag_value="bbox",
help="Output as sequence of GeoJSON bbox arrays.",
)
@click.option(
"--collect",
is_flag=True,
default=False,
help="Output as a GeoJSON feature collections.",
)
# Optionally write out bboxen in a form that goes
# straight into GDAL utilities like gdalwarp.
@click.option(
"--extents/--no-extents",
default=False,
help="Write shape extents as ws-separated strings (default is " "False).",
)
# Optionally buffer the shapes by shifting the x and y values of each
# vertex by a constant number of decimal degrees or meters (depending
# on whether --geographic or --mercator is in effect).
@click.option(
"--buffer",
type=float,
default=None,
help="Shift shape x and y values by a constant number",
)
def tms_to_geojson(
input,
level,
precision,
indent,
compact,
projected,
seq,
output_mode,
collect,
extents,
buffer,
):
"""Print TMS document as GeoJSON."""
tms = morecantile.TileMatrixSet(**json.load(input))
matrix = tms.matrix(level)
dump_kwds = {"sort_keys": True}
if indent:
dump_kwds["indent"] = indent
if compact:
dump_kwds["separators"] = (",", ":")
features = []
col_xs = []
col_ys = []
for x in range(0, matrix.matrixWidth):
for y in range(0, matrix.matrixHeight):
feature = tms.feature(
(x, y, level), projected=projected, buffer=buffer, precision=precision,
)
bbox = feature["bbox"]
w, s, e, n = bbox
col_xs.extend([w, e])
col_ys.extend([s, n])
if collect:
features.append(feature)
elif extents:
click.echo(" ".join(map(str, bbox)))
else:
if seq:
click.echo(u"\x1e")
if output_mode == "bbox":
click.echo(json.dumps(bbox, **dump_kwds))
elif output_mode == "feature":
click.echo(json.dumps(feature, **dump_kwds))
if collect and features:
bbox = [min(col_xs), min(col_ys), max(col_xs), max(col_ys)]
feature_collection = {
"type": "FeatureCollection",
"bbox": bbox,
"features": features,
}
click.echo(json.dumps(feature_collection, **dump_kwds))
| 29.458753 | 88 | 0.587255 |
13df792e3fccd2f930c81cb349eb146d2435498e | 1,480 | py | Python | pycon/finaid/context_processors.py | azkarmoulana/pycon | 931388e6f640c35b892bb4b2d12581ba7ec8cf4e | [
"BSD-3-Clause"
] | null | null | null | pycon/finaid/context_processors.py | azkarmoulana/pycon | 931388e6f640c35b892bb4b2d12581ba7ec8cf4e | [
"BSD-3-Clause"
] | null | null | null | pycon/finaid/context_processors.py | azkarmoulana/pycon | 931388e6f640c35b892bb4b2d12581ba7ec8cf4e | [
"BSD-3-Clause"
] | null | null | null | from pycon.finaid.utils import applications_open, has_application, is_reviewer,\
offer_accepted, has_withdrawn_application
def financial_aid(request):
open = applications_open()
if has_application(request.user):
application = request.user.financial_aid
else:
application = None
ctx = {
"show_finaid_apply_button": open and (not application or has_withdrawn_application(request.user)),
"show_finaid_edit_button": application and application.show_edit_button,
"show_finaid_status_button": application and application.show_status_button,
"show_finaid_review_button": is_reviewer(request.user),
"show_finaid_download_button": is_reviewer(request.user),
"show_finaid_receipt_form": offer_accepted(request.user) and False,
"show_finaid_withdraw_button": application and application.show_withdraw_button,
"show_finaid_accept_button": application and application.show_accept_button,
"show_finaid_decline_button": application and application.show_decline_button,
"show_finaid_request_more_button": application and application.show_request_more_button,
"show_finaid_provide_info_button": application and application.show_provide_info_button,
}
ctx["show_financial_aid_section"] = \
ctx["show_finaid_apply_button"] or ctx["show_finaid_edit_button"] \
or ctx["show_finaid_status_button"] or ctx["show_finaid_review_button"]
return ctx
| 47.741935 | 106 | 0.762162 |
db4dd58e7aec13e04a2fce9a7152df4053ae8e0c | 4,477 | py | Python | tests/test_client_server.py | zsheep5/pyModbusTCP | 1f12010bffae56e81ae406690008213e34d03768 | [
"MIT"
] | 177 | 2015-02-25T19:00:35.000Z | 2022-03-31T16:58:22.000Z | tests/test_client_server.py | zsheep5/pyModbusTCP | 1f12010bffae56e81ae406690008213e34d03768 | [
"MIT"
] | 52 | 2015-01-22T12:18:39.000Z | 2022-03-24T16:39:24.000Z | tests/test_client_server.py | zsheep5/pyModbusTCP | 1f12010bffae56e81ae406690008213e34d03768 | [
"MIT"
] | 90 | 2015-01-19T15:44:19.000Z | 2022-03-31T16:58:24.000Z | # -*- coding: utf-8 -*-
import unittest
from random import randint, getrandbits
from pyModbusTCP.server import ModbusServer
from pyModbusTCP.client import ModbusClient
class TestModbusClient(unittest.TestCase):
def test_except_init_host(self):
# should raise an exception for bad hostname
self.assertRaises(ValueError, ModbusClient, host='wrong@host')
def test_except_init_port(self):
# should raise an exception for bad port
self.assertRaises(ValueError, ModbusClient, port=-1)
def test_except_unit_id(self):
# should raise an exception for bad unit_id
self.assertRaises(ValueError, ModbusClient, unit_id=420)
def test_host(self):
# test valid/invalid cases for host()
c = ModbusClient()
self.assertEqual(c.host(), 'localhost', 'default host is localhost')
self.assertEqual(c.host('wrong@host'), None)
self.assertEqual(c.host('my.good.host'), 'my.good.host')
self.assertEqual(c.host('127.0.0.1'), '127.0.0.1')
self.assertEqual(c.host('::1'), '::1')
def test_port(self):
# test valid/invalid cases for port()
c = ModbusClient()
self.assertEqual(c.port(), 502, 'default modbus/TCP port is 502')
self.assertEqual(c.port(-1), None)
self.assertEqual(c.port(42), 42)
def test_debug(self):
# test valid/invalid cases for debug()
c = ModbusClient()
self.assertEqual(c.debug(), False, 'debug default is off')
self.assertEqual(c.debug(False), False)
self.assertEqual(c.debug(True), True)
def test_unit_id(self):
# test valid/invalid cases for debug()
c = ModbusClient()
self.assertEqual(c.unit_id(), 1, 'default unit_id is 1')
self.assertEqual(c.unit_id(42), 42)
self.assertEqual(c.unit_id(0), 0)
self.assertEqual(c.unit_id(420), None)
# TODO improve this basic test
class TestClientServer(unittest.TestCase):
def setUp(self):
# modbus server
self.server = ModbusServer(port=5020, no_block=True)
self.server.start()
# modbus client
self.client = ModbusClient(port=5020)
self.client.open()
def tearDown(self):
self.client.close()
def test_read_and_write(self):
# word space
self.assertEqual(self.client.read_holding_registers(0), [0], 'Default value is 0 when server start')
self.assertEqual(self.client.read_input_registers(0), [0], 'Default value is 0 when server start')
# single read/write
self.assertEqual(self.client.write_single_register(0, 0xffff), True)
self.assertEqual(self.client.read_input_registers(0), [0xffff])
# multi-write at max size
words_l = [randint(0, 0xffff)] * 0x7b
self.assertEqual(self.client.write_multiple_registers(0, words_l), True)
self.assertEqual(self.client.read_holding_registers(0, len(words_l)), words_l)
self.assertEqual(self.client.read_input_registers(0, len(words_l)), words_l)
# write over sized
words_l = [randint(0, 0xffff)] * 0x7c
self.assertEqual(self.client.write_multiple_registers(0, words_l), None)
# bit space
self.assertEqual(self.client.read_coils(0), [False], 'Default value is False when server start')
self.assertEqual(self.client.read_discrete_inputs(0), [False], 'Default value is False when server start')
# single read/write
self.assertEqual(self.client.write_single_coil(0, True), True)
self.assertEqual(self.client.read_coils(0), [True])
self.assertEqual(self.client.read_discrete_inputs(0), [True])
# multi-write at min size
bits_l = [getrandbits(1)] * 0x1
self.assertEqual(self.client.write_multiple_coils(0, bits_l), True)
self.assertEqual(self.client.read_coils(0, len(bits_l)), bits_l)
self.assertEqual(self.client.read_discrete_inputs(0, len(bits_l)), bits_l)
# multi-write at max size
bits_l = [getrandbits(1)] * 0x7b0
self.assertEqual(self.client.write_multiple_coils(0, bits_l), True)
self.assertEqual(self.client.read_coils(0, len(bits_l)), bits_l)
self.assertEqual(self.client.read_discrete_inputs(0, len(bits_l)), bits_l)
# multi-write over sized
bits_l = [getrandbits(1)] * 0x7b1
self.assertEqual(self.client.write_multiple_coils(0, bits_l), None)
if __name__ == '__main__':
unittest.main()
| 41.841121 | 114 | 0.665848 |
61b5a03f43cb1be517957f5b40da103fe25e0918 | 9,961 | py | Python | tests/notrun_test_lme_forecast_verbose.py | ramittal/lme-for-forecast | 17b76f131ae00519ee66d6913246f4ed109e507b | [
"BSD-2-Clause"
] | null | null | null | tests/notrun_test_lme_forecast_verbose.py | ramittal/lme-for-forecast | 17b76f131ae00519ee66d6913246f4ed109e507b | [
"BSD-2-Clause"
] | null | null | null | tests/notrun_test_lme_forecast_verbose.py | ramittal/lme-for-forecast | 17b76f131ae00519ee66d6913246f4ed109e507b | [
"BSD-2-Clause"
] | 1 | 2020-10-23T04:24:47.000Z | 2020-10-23T04:24:47.000Z | import numpy as np
import pytest
from flme.lme_forecast_verbose import LME
import flme.rutils as rutils
class TestLME:
"""Tests for `lme.lme_forecast_verbose` """
@pytest.mark.parametrize("dimensions", [[5, 4, 3, 2], [5, 4, 1, 2]])
@pytest.mark.parametrize("random_intercept", [[5, 4, 1, 1], [5, 1, 1, 1], [5, 1, 1, 2]])
def test_random_intercept(self, dimensions, random_intercept):
"""
Test if random intercept matrix is built correctly.
"""
dct = {'intercept': ([random_intercept[j] == dimensions[j] for j in range(len(dimensions))], None)}
y = np.random.randn(np.prod(dimensions))
model = LME(dimensions, 1, y, {}, {}, {}, True, dct)
Z = np.tile(rutils.kronecker(random_intercept[1:], dimensions, 1), (dimensions[0], 1))
model.buildZ()
assert np.linalg.norm(Z-model.Z) == 0.0
@pytest.mark.parametrize("dimensions", [[5, 4, 3, 2], [5, 4, 1, 2]])
@pytest.mark.parametrize("indicator", [[5, 1, 1, 2], [1, 4, 1, 1], [1, 1, 1, 1]])
def test_indicators(self, dimensions, indicator):
"""
Test if indicator matrix is built correctly.
"""
dct = {'intercept': [indicator[j] == dimensions[j] for j in range(len(dimensions))]}
y = np.random.randn(np.prod(dimensions))
model = LME(dimensions, 0, y, {}, dct, {}, False, {})
Z = rutils.kronecker(indicator, dimensions, 0)
x = np.random.randn(np.prod(indicator))
assert (np.linalg.norm(model.X(x) - Z.dot(x)) < 1e-10) and \
(np.linalg.norm(model.XT(y) - np.transpose(Z).dot(y)) < 1e-10)
@pytest.mark.parametrize("dimensions", [[5, 4, 3, 2], [5, 4, 1, 2]])
@pytest.mark.parametrize("cov_dim", [[5, 1, 1, 2], [1, 4, 1, 1], [1, 1, 1, 1]])
def test_repeat_covariate(self, dimensions, cov_dim):
N = np.prod(dimensions)
X = np.ones((N, 2)) # 1st column is intercept
cov = np.random.randn(np.prod(cov_dim))
cov_dim_bool = [cov_dim[i] == dimensions[i] for i in range(len(dimensions))]
Z = rutils.kronecker(cov_dim, dimensions, 0)
X[:, 1] = Z.dot(cov)
beta_true = [1., -0.6] # beta_0 for intercept
Y = X.dot(beta_true)
model = LME(dimensions, 0, Y, {'cov1': (cov, cov_dim_bool)}, {}, {'cov1': [-float('inf'), float('inf')]},
True, {})
beta = np.random.randn(2)
assert np.linalg.norm(model.X(beta) - X.dot(beta)) < 1e-10
y = np.random.randn(N)
assert np.linalg.norm(model.XT(y) - np.transpose(X).dot(y)) < 1e-10
model._buildX()
assert np.linalg.norm(model.Xm - X) < 1e-10
@pytest.mark.parametrize("bounds", [[0, 1], [-1, 1], [-2, -1]])
def test_global_cov_bounds(self, bounds):
dimensions = [100]
N = np.prod(dimensions)
X = np.random.randn(N, 1)
beta_true = [-0.6]
Y_true = X.dot(beta_true)
delta_true = .005
Y = Y_true + np.random.randn(N) * np.sqrt(delta_true)
model = LME(dimensions, 0, Y, {'cov1': (X[:, 0], [True] * len(dimensions))}, {},
{'cov1': bounds}, False, {})
model.optimize(inner_print_level=0)
beta_soln = model.beta_soln[0]
assert beta_soln >= bounds[0]
assert beta_soln <= bounds[1]
def test_post_var_global(self):
dimensions = [100]
N = np.prod(dimensions)
X = np.random.randn(N, 2)
beta_true = [.5, -0.6]
Y_true = X.dot(beta_true)
delta_true = .005
Y = Y_true + np.random.randn(N)*np.sqrt(delta_true)
model = LME(dimensions, 0, Y, {'cov1': (X[:, 0], [True]*len(dimensions)),
'cov2': (X[:, 1], [True]*len(dimensions))}, {},
{'cov1': [-float('inf'), float('inf')], 'cov2': [-float('inf'), float('inf')]}, False, {})
model.optimize(inner_print_level=0)
assert model.gamma_soln == 1e-8
model.postVarGlobal()
varmat1 = model.var_beta
model._postVarGlobal()
varmat2 = model.var_beta
assert np.linalg.norm(varmat1 - varmat2) < 1e-10
@pytest.mark.parametrize("random_effect", [[200, 1, 1, 1], [200, 2, 1, 1], [200, 1, 3, 1]])
@pytest.mark.parametrize("sd", [.05, .1, .5])
def test_random_effect_with_gaussian_prior(self, random_effect, sd):
np.random.seed(127)
dimensions = [200, 2, 3, 2]
N = np.prod(dimensions)
Y_true = np.zeros(N)
Z = rutils.kronecker(random_effect, dimensions, 0)
u = np.random.randn(np.prod(random_effect))*.5
dct1 = {'intercept': ([random_effect[j] == dimensions[j] for j in range(len(dimensions))], None)}
dct2 = {'intercept': ([random_effect[j] == dimensions[j] for j in range(len(dimensions))], sd)}
delta_true = 0.005
Y_true += Z.dot(u)
Y = Y_true + np.random.randn(N) * np.sqrt(delta_true)
model1 = LME(dimensions, 1, Y, {},
{}, {}, False, random_effects=dct1)
model1.optimize(inner_print_level=0)
gamma1 = model1.gamma_soln
u_var1 = np.var(model1.u_soln)
model2 = LME(dimensions, 1, Y, {},
{}, {}, False, random_effects=dct2)
model2.optimize(inner_print_level=0)
gamma2 = model2.gamma_soln
u_var2 = np.var(model2.u_soln)
assert all(gamma1 > gamma2)
assert u_var1 > u_var2
@pytest.mark.parametrize("random_effects", [[[9, 1, 2, 1], [9, 3, 1, 1]], [[9, 1, 1, 1], [9, 1, 2, 1]]])
def test_draw_random_only(self, random_effects):
np.random.seed(127)
dimensions = [9, 3, 2, 2]
N = np.prod(dimensions)
Y_true = np.zeros(N)
dct = {}
for i, effect in enumerate(random_effects):
Z = rutils.kronecker(effect, dimensions, 0)
u = np.random.randn(np.prod(effect))
Y_true += Z.dot(u)
dct['intercept'+str(i)] = ([effect[j] == dimensions[j] for j in range(len(dimensions))], None)
delta_true = .005
Y = Y_true + np.random.randn(N)*np.sqrt(delta_true)
model = LME(dimensions, 1, Y, {},
{}, {}, False, random_effects=dct)
model.optimize(inner_print_level=0)
model.postVarRandom()
n_draws = 1000
_, u_samples = model.draw(n_draws=n_draws)
u1 = np.concatenate([u[:np.prod(random_effects[0][1:])] for u in model.u_soln])
u1_sample_mean = np.mean(u_samples[0].reshape((-1, n_draws)), axis=1)
assert np.linalg.norm(u1 - u1_sample_mean)/np.linalg.norm(u1) < .05
u2 = np.concatenate([u[np.prod(random_effects[0][1:]):np.prod(random_effects[0][1:])
+ np.prod(random_effects[1][1:])] for u in model.u_soln])
u2_sample_mean = np.mean(u_samples[1].reshape((-1, n_draws)), axis=1)
assert np.linalg.norm(u2 - u2_sample_mean)/np.linalg.norm(u2) < .05
model.outputDraws()
return
@pytest.mark.parametrize("dimensions", [[5, 4, 3, 2], [5, 4, 1, 2]])
@pytest.mark.parametrize("random_effects", [[[5, 4, 1, 1], [5, 1, 1, 2]], [[5, 1, 1, 1], [5, 1, 1, 2]], []])
def test_draw(self, dimensions, random_effects):
np.random.seed(127)
#dimensions = [9, 3, 2, 2]
N = np.prod(dimensions)
X = np.ones((N, 2))
X[:, 1] = np.random.randn(N)
beta_true = [1., -0.6]
Y_true = X.dot(beta_true)
dct = {}
for i, effect in enumerate(random_effects):
Z = rutils.kronecker(effect, dimensions, 0)
u = np.random.randn(np.prod(effect))
Y_true += Z.dot(u)
dct['intercept'+str(i)] = ([effect[j] == dimensions[j] for j in range(len(dimensions))], None)
delta_true = .005
Y = Y_true + np.random.randn(N)*np.sqrt(delta_true)
model = LME(dimensions, 1, Y, {'cov': (X[:, 1], [True]*len(dimensions))},
{}, {'cov': [-float('inf'), float('inf')]}, True, random_effects=dct)
model.optimize(inner_print_level=0)
model.postVarGlobal()
if len(random_effects) > 0:
model.postVarRandom()
n_draws = 1000
beta_samples, u_samples = model.draw(n_draws=n_draws)
beta_sample_mean = np.mean(beta_samples, axis=1)
assert np.linalg.norm(beta_sample_mean - model.beta_soln)/np.linalg.norm(model.beta_soln) < .02
if len(random_effects) > 0:
u1 = np.concatenate([u[:np.prod(random_effects[0][1:])] for u in model.u_soln])
u1_sample_mean = np.mean(u_samples[0].reshape((-1, n_draws)), axis=1)
assert np.linalg.norm(u1 - u1_sample_mean)/np.linalg.norm(u1) < .05
u2 = np.concatenate([u[np.prod(random_effects[0][1:]):np.prod(random_effects[0][1:])
+ np.prod(random_effects[1][1:])] for u in model.u_soln])
u2_sample_mean = np.mean(u_samples[1].reshape((-1, n_draws)), axis=1)
assert np.linalg.norm(u2 - u2_sample_mean)/np.linalg.norm(u2) < .05
model.outputDraws()
@pytest.mark.parametrize("bounds", [[-1, 2], [0, 2], [-1, 0], [2, 3]])
def test_draw_with_bounds(self, bounds):
dimensions = [5, 4, 3, 2]
N = np.prod(dimensions)
X = np.random.randn(N, 2)
beta_true = [1., -0.6]
Y_true = X.dot(beta_true)
delta_true = .005
Y = Y_true + np.random.randn(N) * np.sqrt(delta_true)
model = LME(dimensions, 1, Y, {'cov1': (X[:, 0], [True] * len(dimensions)),
'cov2': (X[:, 1], [True] * len(dimensions))}, {},
{'cov1': bounds, 'cov2': bounds}, False, {})
model.optimize(inner_print_level=0)
model.postVarGlobal()
n_draws = 1000
beta_samples = model._drawBeta(n_draws)
assert beta_samples.shape[1] == n_draws
assert np.all(beta_samples >= bounds[0]) and np.all(beta_samples <= bounds[1])
| 46.765258 | 113 | 0.560988 |
c2a2860e107ce3de9e6c67d53a2ef0d9ef61844e | 561 | py | Python | django_tasker/migrations/0009_auto_20170512_2240.py | wooyek/django-tasker | dc411dd0438f523afb770d4f82c4369f93ea34ed | [
"MIT"
] | 2 | 2018-05-05T16:57:59.000Z | 2019-03-05T10:40:54.000Z | django_tasker/migrations/0009_auto_20170512_2240.py | wooyek/django-tasker | dc411dd0438f523afb770d4f82c4369f93ea34ed | [
"MIT"
] | 5 | 2017-03-09T17:30:04.000Z | 2019-04-19T20:22:31.000Z | django_tasker/migrations/0009_auto_20170512_2240.py | wooyek/django-tasker | dc411dd0438f523afb770d4f82c4369f93ea34ed | [
"MIT"
] | 1 | 2022-03-18T20:31:51.000Z | 2022-03-18T20:31:51.000Z | # -*- coding: utf-8 -*-
# Generated by Django 1.11.1 on 2017-05-12 20:40
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('django_tasker', '0008_taskinfo_name'),
]
operations = [
migrations.AlterIndexTogether(
name='taskinfo',
index_together=set([('status', 'eta'), ('target', 'status'), ('id', 'target', 'status', 'eta'), ('target', 'eta'), ('id', 'target'), ('id', 'eta', 'status')]),
),
]
| 28.05 | 172 | 0.566845 |
76d570f7dea44aef275599d24d9fea3e7fbee879 | 490 | py | Python | pessoas/migrations/0012_auto_20200304_2251.py | iamgomes/ichurch | c6569aeb0388102affc8cf17c5fc0c781b2c2732 | [
"MIT"
] | 2 | 2020-02-26T22:43:27.000Z | 2022-01-07T01:26:00.000Z | pessoas/migrations/0012_auto_20200304_2251.py | iamgomes/ichurch | c6569aeb0388102affc8cf17c5fc0c781b2c2732 | [
"MIT"
] | 9 | 2020-02-11T23:41:49.000Z | 2021-09-22T18:26:57.000Z | pessoas/migrations/0012_auto_20200304_2251.py | iamgomes/ichurch | c6569aeb0388102affc8cf17c5fc0c781b2c2732 | [
"MIT"
] | null | null | null | # Generated by Django 3.0.1 on 2020-03-05 01:51
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('pessoas', '0011_auto_20200302_0205'),
]
operations = [
migrations.RemoveField(
model_name='pessoa',
name='situacao',
),
migrations.AddField(
model_name='pessoa',
name='ativo',
field=models.BooleanField(default=True),
),
]
| 21.304348 | 52 | 0.569388 |
1b978030ef56219b744a9a6200471337f5455917 | 1,305 | py | Python | Youtube_Downloader.py | ThunderboltSH/Youtube-Video-Downloader-Program-with-GUI | 5bbd3b246277a1d189feedfba27a48c94f315767 | [
"Apache-2.0"
] | 3 | 2021-05-16T14:38:50.000Z | 2021-05-17T10:01:43.000Z | Youtube_Downloader.py | ThunderboltSH/Youtube-Video-Downloader-Program-with-GUI | 5bbd3b246277a1d189feedfba27a48c94f315767 | [
"Apache-2.0"
] | null | null | null | Youtube_Downloader.py | ThunderboltSH/Youtube-Video-Downloader-Program-with-GUI | 5bbd3b246277a1d189feedfba27a48c94f315767 | [
"Apache-2.0"
] | null | null | null | #Youtube Downloader GUI
#ThunderboltSH
#Limitations - no validation for invalid URLs but a fix will be coming soon
#Program will be reuploaded with more comments later
from tkinter import *
from pytube import YouTube as yt
root = Tk()
root.title("YouTube Video Downloader")
root.resizable(False, False)
def error():
statusLabel["text"] = "DOWNLOAD UNSUCCESSFUL! INVALID URL!"
def download():
url = urlEntry.get()
#valid = requests.get(url)
if url == "":
error()
else:
yt(url).streams.first().download()
statusLabel["text"] = "DOWNLOAD SUCCESSFUL! CHECK THE PROGRAM FOLDER"
url = StringVar()
#https://www.youtube.com/watch?v=BWqus0Yp3M8
instructionLabel = Label(root, font=("Agentcy FB",20,"bold"), text="Paste youtube url here:")
instructionLabel.grid(row=0, column=0, sticky=S+N+E+W )
urlEntry = Entry(root, font=("Agentcy FB",20,"bold"), textvariable=url, insertwidth=1, width=70)
urlEntry.grid(row=1, column=0, sticky=S+N+E+W )
downloadButton = Button(root, padx=16, pady=16, font=("Agentcy FB",20,"bold"), text="DOWNLOAD", command=lambda: download())
downloadButton.grid(row=2, column=0, sticky=S+N+E+W )
statusLabel = Label(root, font=("Agentcy FB",20,"bold"), text="")
statusLabel.grid(row=3, column=0, sticky=S+N+E+W )
root.mainloop()
| 33.461538 | 124 | 0.695019 |
beee2799112ba9cf8de32c23a028f44687aa167c | 1,310 | py | Python | mysite/urls.py | bayazidtamim/Safaesying-verssion-0.0.0 | eaf86a211a26ea8e47326c15475eb76bb4e42214 | [
"Apache-2.0"
] | null | null | null | mysite/urls.py | bayazidtamim/Safaesying-verssion-0.0.0 | eaf86a211a26ea8e47326c15475eb76bb4e42214 | [
"Apache-2.0"
] | 2 | 2020-07-27T18:42:54.000Z | 2020-07-27T18:42:54.000Z | mysite/urls.py | bayazidtamim/Safaesying-verssion-0.0.0 | eaf86a211a26ea8e47326c15475eb76bb4e42214 | [
"Apache-2.0"
] | null | null | null | """mysite URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/2.0/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.contrib.auth import views as auth_views
from django.urls import path, include
from blog.views.register import RegisterView
from blog.views.about import about,contact
urlpatterns = [
path('', include('blog.urls')),
path('admin/', admin.site.urls),
path('login/', auth_views.LoginView.as_view(template_name='blog/login.html'), name='login'),
path('logout/', auth_views.LogoutView.as_view(next_page='blog:home')),
path('register/', RegisterView.as_view(), name='register'),
path('', include('django.contrib.auth.urls')),
path('/about',about, name='about'),
path('/contact',contact, name='contact'),
]
| 38.529412 | 96 | 0.70687 |
febfa1ac7faa120472fbc6623731ffe2530782c1 | 2,611 | py | Python | src/gtk_helper.py | RipcordSoftware/avancedb-replication-monitor | bfa59cc36f5d8aa383a43ffeb6c82e938625ce34 | [
"MIT"
] | 8 | 2016-03-01T15:29:34.000Z | 2019-06-16T21:15:45.000Z | src/gtk_helper.py | RipcordSoftware/avancedb-replication-monitor | bfa59cc36f5d8aa383a43ffeb6c82e938625ce34 | [
"MIT"
] | 38 | 2015-12-27T20:34:25.000Z | 2016-02-19T15:48:12.000Z | src/gtk_helper.py | RipcordSoftware/replication-monitor | bfa59cc36f5d8aa383a43ffeb6c82e938625ce34 | [
"MIT"
] | null | null | null | import threading
from gi.repository import Gtk, GObject
class GtkHelper:
"""
A class which makes living with GTK and multiple threads slightly easier
"""
@staticmethod
def is_gtk_thread():
"""
Determines if the current thread is the main GTK thread
:return: True if the current thread is the main GTK thread, False otherwise
"""
return threading.current_thread().name is 'MainThread'
@staticmethod
def invoke(func, async=True):
"""
Invokes a callable func on the main GTK thread
:param func: The callable to invoke
:param async: When True the callable will execute asynchronously
:return: if executed on the main thread or synchronously then the returns the result of func, otherwise None
"""
result = None
if GtkHelper.is_gtk_thread():
result = func()
else:
event = threading.Event() if async is not True else None
def task():
nonlocal func, result
result = func()
if event is not None:
event.set()
GObject.idle_add(task)
if event is not None:
event.wait()
return result
@staticmethod
def idle(task):
"""
Adds a task to the Gtk queue for processing
:param task: the task (function/lambda) to run
:return: nothing
"""
GObject.idle_add(task)
@staticmethod
def invoke_func(func):
"""
A decorator for functions which should be run on the main Gtk thread. The function is
executed asynchronously
:param func: The callable to run on the UI thread
:return: nothing
"""
def inner(*args, **kwargs):
GtkHelper.invoke(lambda: func(*args, **kwargs))
return inner
@staticmethod
def invoke_func_sync(func):
"""
A decorator for functions which should be run on the main Gtk thread. If run from a non-UI
thread the caller will block until the function completes
:param func: The callable to run on the UI thread
:return: The value returned by the callable
"""
def inner(*args, **kwargs):
return GtkHelper.invoke(lambda: func(*args, **kwargs), False)
return inner
@staticmethod
def run_dialog(win, message_type, buttons_type, msg):
dialog = Gtk.MessageDialog(win, 0, message_type, buttons_type, msg)
response = dialog.run()
dialog.destroy()
return response
| 30.360465 | 116 | 0.599387 |
1b057b7d04ec9baf1829ee5a6861424dfcb3ff05 | 247 | py | Python | module1-web-application-development-with-flask/assignment1.py | Nburkhal/DS-Unit-3-Sprint-3-Productization-and-Cloud | 64ea058d0584b442b48e091ca3001cd3bf9c9a06 | [
"MIT"
] | null | null | null | module1-web-application-development-with-flask/assignment1.py | Nburkhal/DS-Unit-3-Sprint-3-Productization-and-Cloud | 64ea058d0584b442b48e091ca3001cd3bf9c9a06 | [
"MIT"
] | null | null | null | module1-web-application-development-with-flask/assignment1.py | Nburkhal/DS-Unit-3-Sprint-3-Productization-and-Cloud | 64ea058d0584b442b48e091ca3001cd3bf9c9a06 | [
"MIT"
] | null | null | null | from flask import Flask, render_template
app = Flask(__name__)
@app.route("/")
def hello():
return "Hello World!"
@app.route("/about")
def pred():
return render_template('about.html')
if __name__ == "__main__":
app.run(debug=True) | 16.466667 | 40 | 0.672065 |
01d4e08b18a17c2b9d02b077a6a673c7ee7c4c6b | 6,542 | py | Python | feeder_utilities/rpc_message_processor.py | LandRegistry/feeder-utilities | 580f2ba09a8aa62c52103e46c4c105210c05fcbb | [
"MIT"
] | null | null | null | feeder_utilities/rpc_message_processor.py | LandRegistry/feeder-utilities | 580f2ba09a8aa62c52103e46c4c105210c05fcbb | [
"MIT"
] | null | null | null | feeder_utilities/rpc_message_processor.py | LandRegistry/feeder-utilities | 580f2ba09a8aa62c52103e46c4c105210c05fcbb | [
"MIT"
] | 1 | 2021-04-11T05:25:09.000Z | 2021-04-11T05:25:09.000Z | from feeder_utilities.health import FeederHealth
from feeder_utilities.exceptions import RpcMessageProcessingException
from feeder_utilities.dependencies.rabbitmq import ErrorQueueClient
from feeder_utilities.dependencies import rabbitmq
from feeder_utilities.dependencies.register import Register
class RpcMessageProcessor:
def __init__(self, logger, app_name, integrity_check, rabbitmq_url, queue_name, rpc_queue_name, error_queue_name,
register_url, routing_key):
self.logger = logger
self.app_name = app_name
self.integrity_check = integrity_check
self.rabbitmq_url = rabbitmq_url
self.queue_name = queue_name
self.rpc_queue_name = rpc_queue_name
self.error_queue_name = error_queue_name
self.register_url = register_url
self.routing_key = routing_key
def startup_integrity_check(self, requests):
self.logger.info("Checking database integrity")
max_entry = Register(self.register_url, self.routing_key, requests).max_entry()
missing_entries = self.integrity_check.check_integrity(max_entry)
result = None
if missing_entries:
self.logger.error("Detected missing_entries: {}".format(missing_entries))
self.logger.error("Requesting missing_entries from register")
result = Register(self.register_url,
self.routing_key, requests).republish_entries(missing_entries)
return result
def process_rpc_message(self, body, message, requests):
self.logger.info("Processing rpc message")
reply_to = message.properties.get('reply_to', None)
correlation_id = message.properties.get('correlation_id', None)
if not reply_to or not correlation_id:
self.logger.error("Message must have reply_to and correlation_id")
message.reject()
else:
try:
rpc_response = None
if 'method' not in body or not body['method']:
raise RpcMessageProcessingException("Message body must contain method name")
if body['method'] == 'health':
self.logger.info("Processing health check message")
rpc_result = FeederHealth(self.app_name, self.rabbitmq_url, self.queue_name, self.rpc_queue_name,
self.error_queue_name).generate_health_msg()
if 'status' in rpc_result and rpc_result['status'] == 'BAD':
self.logger.error("Feeder reporting BAD status: {}".format(rpc_result))
elif body['method'] == 'integrity_check':
self.logger.info("Detecting gaps in entry sequence")
max_entry = Register(self.register_url, self.routing_key, requests).max_entry()
rpc_result = {"missing_entries": self.integrity_check.check_integrity(max_entry)}
if rpc_result["missing_entries"]:
self.logger.error("Detected missing_entries: {}".format(rpc_result))
elif body['method'] == 'integrity_fix':
self.logger.info("Fixing database integrity")
max_entry = Register(self.register_url, self.routing_key, requests).max_entry()
missing_entries = self.integrity_check.check_integrity(max_entry)
if missing_entries:
self.logger.error("Detected missing_entries: {}".format(missing_entries))
self.logger.error("Requesting missing_entries from register")
rpc_result = Register(self.register_url,
self.routing_key, requests).republish_entries(missing_entries)
else:
self.logger.info("No missing entries detected")
rpc_result = {"entries_not_found": [], "republished_entries": []}
elif body['method'] == 'dump_error_queue':
self.logger.info("Dumping contents of error queue")
rpc_result = {
"error_messages": ErrorQueueClient(self.logger, self.rabbitmq_url, self.queue_name,
self.error_queue_name).retrieve_messages()}
self.logger.info("Dumping {} error messages".format(len(rpc_result)))
elif body['method'] == 'requeue_errors':
self.logger.info("Requeuing contents of error queue")
rpc_result = {
"requeued_messages": ErrorQueueClient(self.logger, self.rabbitmq_url, self.queue_name,
self.error_queue_name).requeue_messages()}
self.logger.info("Requeued {} error messages".format(len(rpc_result)))
elif body['method'] == 'delete_errors':
self.logger.info("Deleting contents of error queue")
rpc_result = {
"deleted_messages": ErrorQueueClient(self.logger, self.rabbitmq_url, self.queue_name,
self.error_queue_name).delete_messages()}
self.logger.info("Deleted {} error messages".format(len(rpc_result)))
else:
raise RpcMessageProcessingException("Unknown method '{}'".format(body['method']))
rpc_response = {"success": True, "result": rpc_result, "error": None}
self.logger.info("Publishing rpc response message")
rabbitmq.publish_message(self.logger, rpc_response, self.rabbitmq_url, '', reply_to,
correlation_id=correlation_id)
message.ack()
except Exception as e:
self.logger.exception("Failed to process rpc message")
rpc_response = {
"success": False,
"result": None,
"error": {
"error_message": "Exception occured: {}".format(
repr(e))}}
rabbitmq.publish_message(self.logger, rpc_response, self.rabbitmq_url, '', reply_to,
correlation_id=correlation_id)
self.logger.error("Failure message sent to queue '{}'".format(reply_to))
message.reject()
| 58.936937 | 117 | 0.585906 |
85eb6d4d75c5ec87b9f02732fe9e31db36a596a5 | 6,936 | py | Python | jython_kernel.py | birnam/IJython | 3bd55fe20eeb99e65244e0ee0b4f368f4e53ad02 | [
"MIT"
] | null | null | null | jython_kernel.py | birnam/IJython | 3bd55fe20eeb99e65244e0ee0b4f368f4e53ad02 | [
"MIT"
] | null | null | null | jython_kernel.py | birnam/IJython | 3bd55fe20eeb99e65244e0ee0b4f368f4e53ad02 | [
"MIT"
] | null | null | null | from ipykernel.kernelbase import Kernel
from IPython.paths import locate_profile
from pexpect import replwrap,EOF,spawn
import signal
import re
import os
from distutils.spawn import find_executable
import sys
__version__ = '1.0.1'
class JythonKernel(Kernel):
implementation = 'Jython Kernel'
implementation_version = __version__
language = 'jython'
language_version = '2.7.0'
language_info = {'mimetype': 'text/x-python','name':'jython','file_extension':'.py','codemirror_mode':{'version':2,'name':'text/x-python'},'pygments_lexer':'python','help_links':[{'text':'Jython', 'url': 'www.jython.org'},{'text':'Jython Kernel Help','url':'https://github.com/suvarchal/IJython'}]}
banner = "Jython Kernel"
def __init__(self, **kwargs):
Kernel.__init__(self, **kwargs)
self._start_jython()
try:
self.hist_file = os.path.join(locate_profile(),'jython_kernel.hist')
except:
self.hist_file = None
self.log.warn('No default profile found, history unavailable')
self.max_hist_cache = 1000
self.hist_cache = []
def _start_jython(self):
sig = signal.signal(signal.SIGINT, signal.SIG_DFL)
#for some reason kernel needs two excepts with jython executable so using only jython.jar
try:
if not find_executable("jython")==None:
self._executable=find_executable("jython")
elif "JYTHON_HOME" in os.environ and "JAVA_HOME" in os.environ :
self._executable=os.environ['JAVA_HOME']+"/bin/java -jar "+os.environ['JYTHON_HOME']+"/jython.jar"
else:
raise Exception("JYTHON_HOME not set or jython not found")
self._child = spawn(self._executable,timeout = None)
self._child.waitnoecho(True)
self._child.expect(u">>> ")
self._child.setwinsize(600,400)
finally:
signal.signal(signal.SIGINT, sig)
def do_execute(self, code, silent, store_history=False, user_expressions=None,
allow_stdin=False):
code = code.strip()
abort_msg = {'status': 'abort',
'execution_count': self.execution_count}
interrupt = False
try:
output = self.jyrepl(code, timeout=None)
output = '\n'.join([line for line in output.splitlines()])+'\n'
except KeyboardInterrupt:
self._child.sendintr()
output = self._child.before+output+'\n Current Jython cannot interrupt so restarting Jython'
interrupt = True
self._start_jython()
except EOF:
output = self._child.before + 'Reached EOF Restarting Jython'
self._start_jython()
if not silent:
stream_content = {'name': 'stdout', 'text': output}
self.send_response(self.iopub_socket, 'stream', stream_content)
if code.strip() and store_history:
self.hist_cache.append(code.strip())
if interrupt:
return {'status': 'abort', 'execution_count': self.execution_count}
return {'status': 'ok','execution_count': self.execution_count,'payload': [],'user_expressions': {}}
def do_complete(self, code, cursor_pos):
code = code[:cursor_pos]
default = {'matches': [], 'cursor_start': 0,
'cursor_end': cursor_pos, 'metadata': dict(),
'status': 'ok'}
if not code or code[-1] == ' ':
return default
tokens = code.split()
if not tokens:
return default
token = tokens[-1]
start = cursor_pos - len(token)
matches = []
if len(re.split(r"[^\w]",token)) > 1:
cmd="dir("+re.split(r"[^\w]",token)[-2]+")"
output=self.jyrepl(cmd,timeout=None)
matches.extend([e for e in re.split(r"[^\w]",output)[2:] if not e.strip()=="" and not e.strip().startswith("__")])
token=re.split(r"[^\w]",token)[-1]
start = cursor_pos - len(token)
else:
cmd=("import sys;sys.builtins.keys()")
output=self.jyrepl(cmd,timeout=None)
matches.extend([e for e in re.split(r"[^\w]",output)[2:] if not e.strip()=="" and not e.strip().startswith("__")])
if not matches:
return default
matches = [m for m in matches if m.startswith(token)]
return {'matches': sorted(matches), 'cursor_start': start,
'cursor_end': cursor_pos, 'metadata': dict(),
'status': 'ok'}
def do_history(self,hist_access_type,output,raw,session=None,start=None,stoop=None,n=None,pattern=None,unique=None):
if not self.hist_file:
return {'history':[]}
if not os.path.exists(self.hist_file):
with open(self.hist_file, 'wb') as f:
f.write('')
with open(self.hist_file, 'rb') as f:
history = f.readlines()
history = history[:self.max_hist_cache]
self.hist_cache = history
self.log.debug('**HISTORY:')
self.log.debug(history)
history = [(None, None, h) for h in history]
return {'history': history}
def do_shutdown(self,restart):
try:
self.send("exit()")
except:
self._child.kill(signal.SIGKILL)
return {'status':'ok', 'restart':restart}
def jyrepl(self,code,timeout=None):
out=""
#this if is needed for printing output if command entered is "variable" or fucntions like abc(var) and for code completion
# if (len(re.split(r"\=",code.strip()))==1) and (len(re.split(r"[\ ]",code.strip()))==1):
# code='eval('+repr(code.strip())+')'
# self._child.sendline(code)
# now_prompt=self._child.expect_exact([u">>> ",u"... "])
# if len(self._child.before.splitlines())>1: out+='\n'.join(self._child.before.splitlines()[1:])+'\n'
# now_prompt=self._child.expect_exact([u">>> ",u"... "])
# else:
# code='exec('+repr(code)+')'
# for line in code.splitlines():
# self._child.sendline(line)
# now_prompt=self._child.expect_exact([u">>> ",u"... "])
# if len(self._child.before.splitlines())>1: out+='\n'.join(self._child.before.splitlines()[1:])+'\n'
# now_prompt=self._child.expect_exact([u">>> ",u"... "])
code='exec('+repr(code)+')'
for line in code.splitlines():
self._child.sendline(line)
now_prompt=self._child.expect_exact([u">>> ",u"... "])
if len(self._child.before.splitlines())>1: out+='\n'.join(self._child.before.decode('UTF-8').splitlines()[1:])+'\n'
return out
if __name__ == '__main__':
from ipykernel.kernelapp import IPKernelApp
IPKernelApp.launch_instance(kernel_class=JythonKernel)
| 42.292683 | 302 | 0.581027 |
25a1800a57f62dfe1799b7e50d190497fd37ecd1 | 822 | py | Python | openzgy/__init__.py | equinor/pyzgy | 94cd3d9050c3027d042a83b98779da9182041137 | [
"Apache-2.0"
] | null | null | null | openzgy/__init__.py | equinor/pyzgy | 94cd3d9050c3027d042a83b98779da9182041137 | [
"Apache-2.0"
] | null | null | null | openzgy/__init__.py | equinor/pyzgy | 94cd3d9050c3027d042a83b98779da9182041137 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python3
# Copyright 2017-2020, Schlumberger
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# The purpose of the line below was to have the main part of the API directly
# below the "openzgy" namespace. But this messes with the coverasge report.
#from .api import ZgyReader, SampleDataType, UnitDimension
| 39.142857 | 77 | 0.765207 |
3f85c39e329a824aad3ed9a7b3b7a31257a1b4fc | 2,294 | py | Python | slot_language/object_slot/unet_params.py | jiaqi-xi/slot_attention | 8420414eb261501e5b056e4d409c338d909397ef | [
"Apache-2.0"
] | null | null | null | slot_language/object_slot/unet_params.py | jiaqi-xi/slot_attention | 8420414eb261501e5b056e4d409c338d909397ef | [
"Apache-2.0"
] | null | null | null | slot_language/object_slot/unet_params.py | jiaqi-xi/slot_attention | 8420414eb261501e5b056e4d409c338d909397ef | [
"Apache-2.0"
] | null | null | null | from typing import Optional
from typing import Tuple
import attr
@attr.s(auto_attribs=True)
class SlotAttentionParams:
# model configs
resolution: Tuple[int, int] = (128, 128) # since we not using ViT
num_slots: int = 7 # at most 6 obj per image/video
# dim of slots embedding
slot_size: int = 64
num_iterations: int = 3
# MLP hidden size in Slot Attention
slot_mlp_size: int = 128 # FFN after cross attention
dec_resolution: Tuple[int, int] = (8, 8)
kernel_size: int = 5
enc_channels: Tuple[int, ...] = (64, 64, 64, 64, 64)
dec_channels: Tuple[int, ...] = (64, 64, 64, 64, 64)
# use self-entropy loss to masks
use_entropy_loss: bool = False
entropy_loss_w: float = 1.0
# whether treat bg slot separately
use_bg_sep_slot: bool = False
# pos emb for each slot
use_slot_pos_emb: bool = False
num_pos_slot: int = 4
share_pos_slot: bool = True # slots share the same positional embedding!
# architecture of CLIP pre-trained model
use_clip_vision: bool = False
clip_arch: str = 'ViT-B/32'
clip_vision_channel: int = 64
clip_text_channel: int = 512
enc_pos_enc: bool = True
# Text2Slot model
use_text2slot: bool = True
text2slot_arch: str = 'MLP' # or 'Transformer' or 'DETR'
# for MLP
text2slot_hidden_sizes: Tuple[int] = (512, )
# data
# data_root: str = "/scratch/ssd004/scratch/ziyiwu/data/CLEVR_viewpoint_video_4obj"
# data_root: str = "/scratch/ssd004/scratch/ziyiwu/data/CLEVR_viewpoint_video"
data_root: str = "/scratch/ssd004/scratch/ziyiwu/data/clevr_video/train/"
shuffle_obj: bool = False
# Normalization for natural img or original slot attention one
simple_normalize: bool = True # since we not using ViT
# training settings
gpus: int = 4
lr: float = 0.001
batch_size: int = 64 * 4
val_batch_size: int = 64 * 4
max_epochs: int = 16
num_sanity_val_steps: int = 1
scheduler_gamma: float = 0.5
weight_decay: float = 0.0
num_train_images: Optional[int] = None
num_val_images: Optional[int] = None
is_logger_enabled: bool = True
is_verbose: bool = True
num_workers: int = 6
n_samples: int = 5
warmup_steps_pct: float = 0.02
decay_steps_pct: float = 0.2
| 32.771429 | 87 | 0.672624 |
05032aa8420dca4efde4f65aafc62d5de043cdac | 52 | py | Python | project/ctnotify/__init__.py | yosukesuzuki/let-me-notify | 39f50214403822712329c1cd953167d6e9b315d6 | [
"MIT"
] | null | null | null | project/ctnotify/__init__.py | yosukesuzuki/let-me-notify | 39f50214403822712329c1cd953167d6e9b315d6 | [
"MIT"
] | null | null | null | project/ctnotify/__init__.py | yosukesuzuki/let-me-notify | 39f50214403822712329c1cd953167d6e9b315d6 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
# Kay application: ctnotify
| 17.333333 | 27 | 0.615385 |
202f6e4bf55eaf16230d634a607eacd8624379eb | 1,012 | py | Python | derp/__init__.py | bfolie/derp | 2f69f9f2d867988d4433221b24e1fa5c0b4f3bcc | [
"MIT"
] | 1 | 2021-05-27T21:30:10.000Z | 2021-05-27T21:30:10.000Z | derp/__init__.py | bfolie/derp | 2f69f9f2d867988d4433221b24e1fa5c0b4f3bcc | [
"MIT"
] | 1 | 2021-07-13T23:48:22.000Z | 2021-07-13T23:48:22.000Z | derp/__init__.py | bfolie/derp | 2f69f9f2d867988d4433221b24e1fa5c0b4f3bcc | [
"MIT"
] | null | null | null | import ast
from derp.deprecation import PythonDeprecation
"""Tuple of ast node types that could contain deprecation warnings, either on themselves
or on a node some levels down. This (functions and classes) captures the typical cases.
I would not be surprised if there are situations in which a deprecation warning is missed
by this simplification, and this list is eventually expanded.
The safest thing to do would be to walk the entire ast tree. On the modestly-sized library
I tested derp on, walking the entire ast tree for every module took ~80 ms. Limiting the walker
to the relvant node types listed below caused it to take ~30 ms.
"""
RELEVANT_NODE_TYPES = (ast.Module, ast.ClassDef, ast.FunctionDef)
"""List of classes corresponding to ways a developer might mark a deprecation.
This currently only supports the @deprecated decorator from python's deprecation
library. For a discussion of how to add more possibilities, see discussion in deprecation.py
"""
DEPRECATION_TYPE_LIST = [PythonDeprecation]
| 53.263158 | 95 | 0.80336 |
6531c9228e04d21058fe27f765814353ae5c80a7 | 1,730 | py | Python | scraping/drymask.py | Asyikin98/SkinFerm | 72fd1ad6339c96adf5ec154bde566de9eb1472c3 | [
"MIT"
] | null | null | null | scraping/drymask.py | Asyikin98/SkinFerm | 72fd1ad6339c96adf5ec154bde566de9eb1472c3 | [
"MIT"
] | 2 | 2021-02-03T01:55:13.000Z | 2021-04-30T12:46:33.000Z | scraping/drymask.py | Asyikin98/SkinFerm | 72fd1ad6339c96adf5ec154bde566de9eb1472c3 | [
"MIT"
] | null | null | null | import urllib.request
import random
from bs4 import BeautifulSoup
from requests import get
import mysql.connector
conn = mysql.connector.connect(user="root", passwd="",host="localhost", database="product")
cursor = conn.cursor()
sql = """INSERT INTO drymask (image, name, price, rating) VALUES (%s, %s, %s, %s)"""
def crawl_url(pageUrl, maskdry_arr):
url = 'https://www.ry.com.au/skin-care/dry-skin.list?pageNumber=1&facetFilters=en_skincareproducttype_content:Mask|averageReviewScore_auto_content:%5B4+TO+5%5D'
page = get(url)
soup = BeautifulSoup(page.text, 'html.parser')
type(soup)
#######################################################for product 1############################################################################
mask = soup.find_all('li', class_='productListProducts_product')
try:
for masks in mask :
first_product_image = masks.find('img')['src']
img_name = random.randrange(1,500)
full_name = str(img_name) + ".jpg"
urllib.request.urlretrieve(first_product_image, full_name)
first_product_name = masks.find("h3",{"class":"productBlock_productName"}).get_text().strip()
first_product_price = masks.find("div",{"class":"productBlock_price"}).get_text().strip()
first_product_rating = masks.find("span",{"class":"visually-hidden productBlock_rating_hiddenLabel"}).get_text().strip()
maskdry_arr.append((first_product_image, first_product_name, first_product_price, first_product_rating))
finally:
return maskdry_arr
maskdry_arr = crawl_url("", [])
print(len(maskdry_arr))
cursor.executemany(sql, maskdry_arr)
conn.commit()
cursor.close()
conn.close()
| 37.608696 | 164 | 0.643353 |
8f9adf575a0c076c066b2ff151012ff9bc8bf5c2 | 2,753 | py | Python | pwn/middleagedheap/solve/solve.py | b01lers/b01lers-ctf-2021 | 1b6eb358bd0a36458fbefc72da2a88d9b3103faf | [
"MIT"
] | 6 | 2021-04-10T10:46:01.000Z | 2022-03-15T08:04:53.000Z | pwn/middleagedheap/solve/solve.py | b01lers/b01lers-ctf-2021 | 1b6eb358bd0a36458fbefc72da2a88d9b3103faf | [
"MIT"
] | 1 | 2021-05-20T03:38:52.000Z | 2021-05-20T03:44:55.000Z | pwn/middleagedheap/solve/solve.py | b01lers/b01lers-ctf-2021 | 1b6eb358bd0a36458fbefc72da2a88d9b3103faf | [
"MIT"
] | 3 | 2021-04-10T12:10:01.000Z | 2021-11-05T18:05:44.000Z | from pwn import *
import time
context.terminal = ['tmux', 'splitw', '-h']
# gcc - libc_start is 0x5bcff0 after the alloc at 0
elf_header_ptr = 0x2edff0
print("elf_header_ptr: {}".format(hex(elf_header_ptr)))
# readelf -S /libc
gnu_hash = elf_header_ptr + 0x00003930
dynsym = elf_header_ptr + 0x00007660
# readelf --dyn-sym /libc
# exit_address = elf_header_ptr + 0x44be0
# root@df0ceb4b0d2e:/home/pwn# readelf --dyn-sym /lib/x86_64-linux-gnu/libc.so.6 | grep exit
# 138: 0000000000044be0 32 FUNC GLOBAL DEFAULT 16 exit@@GLIBC_2.2.5
# entry #138, each entry is 6 4-byte ints
exit_symbol_entry = dynsym + 138 * 6 * 4
# pwndbg> x/5xw 0x7fc23dfc7000 + 0x00007660 + 138 * 6 * 4
# 0x7fc23dfcf350: 0x00003031 0x00100012 0x00044be0 0x00000000
# 0x7fc23dfcf360: 0x00000020
new_exit_entry = b''
new_exit_entry += p32(0x00003031)
new_exit_entry += p32(0x00100012)
#new_exit_entry += p32(0x00044be0) # exit
new_exit_entry += p32(0x000df54f) # one gadget
new_exit_entry += p32(0x00000000)
new_exit_entry += p32(0x00000020)
new_exit_entry += p32(0x00000000)
elf_bitmask_ptr = elf_header_ptr + 0
print('exit offset from input: ', hex(exit_symbol_entry))
print('exit offset from libc: ', hex(exit_symbol_entry - elf_header_ptr))
# p = process('./pwn')
p = remote('172.19.0.3', 1337)
# gdb.attach(p, """
# c
# """)
def allocate(idx, size):
p.sendlineafter("1: alloc; 2: free; 3: write> ", "1")
p.sendlineafter("idx> ", str(idx))
p.sendlineafter("size> ", str(size))
print("Allocated chunk:\n idx {}\n size {}".format(idx, size))
def free(idx):
p.sendlineafter("1: alloc; 2: free; 3: write> ", "2")
p.sendlineafter("idx> ", str(idx))
print("Freed chunk:\n idx {}".format(idx))
def write(idx, offset, content: bytes):
p.sendlineafter("1: alloc; 2: free; 3: write> ", "3")
time.sleep(1)
p.sendlineafter("idx> ", str(idx))
p.sendlineafter("offset> ", str(offset))
p.sendline(content)
print("Wrote to chunk:\n idx: {}, offset: {}, content: {}".format(idx, offset, enhex(content)))
mmap_size = 0xfffe8
free(1) # Just ensure that memset has been called/loaded.
allocate(1, mmap_size) # e8 is max to get size of 0x101000
allocate(2, mmap_size)
allocate(3, mmap_size)
size = 0x100000 * 2
overwrite_amount = 0x15000
write(3, mmap_size, cyclic(8) + p64(0x100000) + p64(size + overwrite_amount + 2))
free(2)
allocate(0, 0x300000)
write(0, exit_symbol_entry, new_exit_entry)
print(p64(0x010220a044103081))
write(0, elf_header_ptr + 0x3940, p64(0x010220a044103081))
write(0, elf_header_ptr + 0x4100, p64(0xf000028c0200130e))
write(0, elf_header_ptr + 0x4228, p64(0x0000000000000089))
write(0, elf_header_ptr + 0x52f8, p64(0x7c967e3e7c93f2a0) + p64(0xa36b4f2b7c967e3e))
p.interactive()
| 32.388235 | 100 | 0.701053 |
49202e93bb60483a4754f707aa8d12e8351e108b | 3,475 | py | Python | app/app/settings.py | ogelinas/recipe-app-api | 5da3b7801eb77e79e6008a42487135810cb21f61 | [
"MIT"
] | null | null | null | app/app/settings.py | ogelinas/recipe-app-api | 5da3b7801eb77e79e6008a42487135810cb21f61 | [
"MIT"
] | null | null | null | app/app/settings.py | ogelinas/recipe-app-api | 5da3b7801eb77e79e6008a42487135810cb21f61 | [
"MIT"
] | null | null | null | """
Django settings for app project.
Generated by 'django-admin startproject' using Django 3.1.7.
For more information on this file, see
https://docs.djangoproject.com/en/3.1/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/3.1/ref/settings/
"""
from pathlib import Path
import os
# Build paths inside the project like this: BASE_DIR / 'subdir'.
BASE_DIR = Path(__file__).resolve().parent.parent
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.1/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '&run_ly)pkr49un%@y14fk11nffonm^oaw6&q_1-ye_e7ncx%u'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'rest_framework',
'rest_framework.authtoken',
'core',
'user',
'recipe',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'app.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'app.wsgi.application'
# Database
# https://docs.djangoproject.com/en/3.1/ref/settings/#databases
# DATABASES = {
# 'default': {
# 'ENGINE': 'django.db.backends.sqlite3',
# 'NAME': BASE_DIR / 'db.sqlite3',
# }
# }
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.postgresql',
'HOST': os.environ.get('DB_HOST'),
'NAME': os.environ.get('DB_NAME'),
'USER': os.environ.get('DB_USER'),
'PASSWORD': os.environ.get('DB_PASS'),
}
}
# Password validation
# https://docs.djangoproject.com/en/3.1/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.1/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.1/howto/static-files/
STATIC_URL = '/static/'
# Custom
AUTH_USER_MODEL = 'core.User' | 24.64539 | 91 | 0.678849 |
05af2ecf65b342563377e4c91356e76a8249ac94 | 11,717 | py | Python | python/cucim/src/cucim/skimage/transform/pyramids.py | aasthajh/cucim | a95cc5c4ab25beffeac42d642dea8cb1bbf21408 | [
"Apache-2.0"
] | 131 | 2021-04-09T19:02:10.000Z | 2022-03-25T08:49:11.000Z | python/cucim/src/cucim/skimage/transform/pyramids.py | aasthajh/cucim | a95cc5c4ab25beffeac42d642dea8cb1bbf21408 | [
"Apache-2.0"
] | 222 | 2021-04-12T07:15:14.000Z | 2022-03-31T20:01:01.000Z | python/cucim/src/cucim/skimage/transform/pyramids.py | aasthajh/cucim | a95cc5c4ab25beffeac42d642dea8cb1bbf21408 | [
"Apache-2.0"
] | 34 | 2021-04-09T18:54:13.000Z | 2022-03-29T12:59:26.000Z | import math
from functools import reduce
import cupy as cp
from cupyx.scipy import ndimage as ndi
from .._shared.utils import convert_to_float
from ..transform import resize
def _smooth(image, sigma, mode, cval, multichannel=None):
"""Return image with each channel smoothed by the Gaussian filter."""
smoothed = cp.empty_like(image)
# apply Gaussian filter to all channels independently
if multichannel:
sigma = (sigma,) * (image.ndim - 1) + (0,)
ndi.gaussian_filter(image, sigma, output=smoothed,
mode=mode, cval=cval)
return smoothed
def _check_factor(factor):
if factor <= 1:
raise ValueError('scale factor must be greater than 1')
def pyramid_reduce(image, downscale=2, sigma=None, order=1,
mode='reflect', cval=0, multichannel=False,
preserve_range=False):
"""Smooth and then downsample image.
Parameters
----------
image : ndarray
Input image.
downscale : float, optional
Downscale factor.
sigma : float, optional
Sigma for Gaussian filter. Default is `2 * downscale / 6.0` which
corresponds to a filter mask twice the size of the scale factor that
covers more than 99% of the Gaussian distribution.
order : int, optional
Order of splines used in interpolation of downsampling. See
`skimage.transform.warp` for detail.
mode : {'reflect', 'constant', 'edge', 'symmetric', 'wrap'}, optional
The mode parameter determines how the array borders are handled, where
cval is the value when mode is equal to 'constant'.
cval : float, optional
Value to fill past edges of input if mode is 'constant'.
multichannel : bool, optional
Whether the last axis of the image is to be interpreted as multiple
channels or another spatial dimension.
preserve_range : bool, optional
Whether to keep the original range of values. Otherwise, the input
image is converted according to the conventions of `img_as_float`.
Also see https://scikit-image.org/docs/dev/user_guide/data_types.html
Returns
-------
out : array
Smoothed and downsampled float image.
References
----------
.. [1] http://persci.mit.edu/pub_pdfs/pyramid83.pdf
"""
_check_factor(downscale)
image = convert_to_float(image, preserve_range)
out_shape = tuple([math.ceil(d / float(downscale)) for d in image.shape])
if multichannel:
out_shape = out_shape[:-1]
if sigma is None:
# automatically determine sigma which covers > 99% of distribution
sigma = 2 * downscale / 6.0
smoothed = _smooth(image, sigma, mode, cval, multichannel)
out = resize(smoothed, out_shape, order=order, mode=mode, cval=cval,
anti_aliasing=False)
return out
def pyramid_expand(image, upscale=2, sigma=None, order=1,
mode='reflect', cval=0, multichannel=False,
preserve_range=False):
"""Upsample and then smooth image.
Parameters
----------
image : ndarray
Input image.
upscale : float, optional
Upscale factor.
sigma : float, optional
Sigma for Gaussian filter. Default is `2 * upscale / 6.0` which
corresponds to a filter mask twice the size of the scale factor that
covers more than 99% of the Gaussian distribution.
order : int, optional
Order of splines used in interpolation of upsampling. See
`skimage.transform.warp` for detail.
mode : {'reflect', 'constant', 'edge', 'symmetric', 'wrap'}, optional
The mode parameter determines how the array borders are handled, where
cval is the value when mode is equal to 'constant'.
cval : float, optional
Value to fill past edges of input if mode is 'constant'.
multichannel : bool, optional
Whether the last axis of the image is to be interpreted as multiple
channels or another spatial dimension.
preserve_range : bool, optional
Whether to keep the original range of values. Otherwise, the input
image is converted according to the conventions of `img_as_float`.
Also see https://scikit-image.org/docs/dev/user_guide/data_types.html
Returns
-------
out : array
Upsampled and smoothed float image.
References
----------
.. [1] http://persci.mit.edu/pub_pdfs/pyramid83.pdf
"""
_check_factor(upscale)
image = convert_to_float(image, preserve_range)
out_shape = tuple([math.ceil(upscale * d) for d in image.shape])
if multichannel:
out_shape = out_shape[:-1]
if sigma is None:
# automatically determine sigma which covers > 99% of distribution
sigma = 2 * upscale / 6.0
resized = resize(image, out_shape, order=order,
mode=mode, cval=cval, anti_aliasing=False)
out = _smooth(resized, sigma, mode, cval, multichannel)
return out
def pyramid_gaussian(image, max_layer=-1, downscale=2, sigma=None, order=1,
mode='reflect', cval=0, multichannel=False,
preserve_range=False):
"""Yield images of the Gaussian pyramid formed by the input image.
Recursively applies the `pyramid_reduce` function to the image, and yields
the downscaled images.
Note that the first image of the pyramid will be the original, unscaled
image. The total number of images is `max_layer + 1`. In case all layers
are computed, the last image is either a one-pixel image or the image where
the reduction does not change its shape.
Parameters
----------
image : ndarray
Input image.
max_layer : int, optional
Number of layers for the pyramid. 0th layer is the original image.
Default is -1 which builds all possible layers.
downscale : float, optional
Downscale factor.
sigma : float, optional
Sigma for Gaussian filter. Default is `2 * downscale / 6.0` which
corresponds to a filter mask twice the size of the scale factor that
covers more than 99% of the Gaussian distribution.
order : int, optional
Order of splines used in interpolation of downsampling. See
`skimage.transform.warp` for detail.
mode : {'reflect', 'constant', 'edge', 'symmetric', 'wrap'}, optional
The mode parameter determines how the array borders are handled, where
cval is the value when mode is equal to 'constant'.
cval : float, optional
Value to fill past edges of input if mode is 'constant'.
multichannel : bool, optional
Whether the last axis of the image is to be interpreted as multiple
channels or another spatial dimension.
preserve_range : bool, optional
Whether to keep the original range of values. Otherwise, the input
image is converted according to the conventions of `img_as_float`.
Also see https://scikit-image.org/docs/dev/user_guide/data_types.html
Returns
-------
pyramid : generator
Generator yielding pyramid layers as float images.
References
----------
.. [1] http://persci.mit.edu/pub_pdfs/pyramid83.pdf
"""
_check_factor(downscale)
# cast to float for consistent data type in pyramid
image = convert_to_float(image, preserve_range)
layer = 0
current_shape = image.shape
prev_layer_image = image
yield image
# build downsampled images until max_layer is reached or downscale process
# does not change image size
while layer != max_layer:
layer += 1
layer_image = pyramid_reduce(prev_layer_image, downscale, sigma, order,
mode, cval, multichannel=multichannel)
prev_shape = current_shape
prev_layer_image = layer_image
current_shape = layer_image.shape
# no change to previous pyramid layer
if current_shape == prev_shape:
break
yield layer_image
def pyramid_laplacian(image, max_layer=-1, downscale=2, sigma=None, order=1,
mode='reflect', cval=0, multichannel=False,
preserve_range=False):
"""Yield images of the laplacian pyramid formed by the input image.
Each layer contains the difference between the downsampled and the
downsampled, smoothed image::
layer = resize(prev_layer) - smooth(resize(prev_layer))
Note that the first image of the pyramid will be the difference between the
original, unscaled image and its smoothed version. The total number of
images is `max_layer + 1`. In case all layers are computed, the last image
is either a one-pixel image or the image where the reduction does not
change its shape.
Parameters
----------
image : ndarray
Input image.
max_layer : int, optional
Number of layers for the pyramid. 0th layer is the original image.
Default is -1 which builds all possible layers.
downscale : float, optional
Downscale factor.
sigma : float, optional
Sigma for Gaussian filter. Default is `2 * downscale / 6.0` which
corresponds to a filter mask twice the size of the scale factor that
covers more than 99% of the Gaussian distribution.
order : int, optional
Order of splines used in interpolation of downsampling. See
`skimage.transform.warp` for detail.
mode : {'reflect', 'constant', 'edge', 'symmetric', 'wrap'}, optional
The mode parameter determines how the array borders are handled, where
cval is the value when mode is equal to 'constant'.
cval : float, optional
Value to fill past edges of input if mode is 'constant'.
multichannel : bool, optional
Whether the last axis of the image is to be interpreted as multiple
channels or another spatial dimension.
preserve_range : bool, optional
Whether to keep the original range of values. Otherwise, the input
image is converted according to the conventions of `img_as_float`.
Also see https://scikit-image.org/docs/dev/user_guide/data_types.html
Returns
-------
pyramid : generator
Generator yielding pyramid layers as float images.
References
----------
.. [1] http://persci.mit.edu/pub_pdfs/pyramid83.pdf
.. [2] http://sepwww.stanford.edu/data/media/public/sep/morgan/texturematch/paper_html/node3.html
""" # noqa
_check_factor(downscale)
# cast to float for consistent data type in pyramid
image = convert_to_float(image, preserve_range)
if sigma is None:
# automatically determine sigma which covers > 99% of distribution
sigma = 2 * downscale / 6.0
current_shape = image.shape
smoothed_image = _smooth(image, sigma, mode, cval, multichannel)
yield image - smoothed_image
# build downsampled images until max_layer is reached or downscale process
# does not change image size
if max_layer == -1:
max_layer = math.ceil(math.log(reduce(max, current_shape), downscale))
for layer in range(max_layer):
out_shape = tuple(
[math.ceil(d / float(downscale)) for d in current_shape])
if multichannel:
out_shape = out_shape[:-1]
resized_image = resize(smoothed_image, out_shape, order=order,
mode=mode, cval=cval, anti_aliasing=False)
smoothed_image = _smooth(resized_image, sigma, mode, cval,
multichannel)
current_shape = cp.asarray(resized_image.shape)
yield resized_image - smoothed_image
| 36.501558 | 101 | 0.661603 |
706d67f2affc6d88216f0ce914577f690eb84d4d | 155 | py | Python | ponyexpress/api/lib/providers/provider.py | TelekomCloud/pony-express | a825b518687719be5dfe95692008c2129db115cd | [
"Apache-2.0"
] | null | null | null | ponyexpress/api/lib/providers/provider.py | TelekomCloud/pony-express | a825b518687719be5dfe95692008c2129db115cd | [
"Apache-2.0"
] | null | null | null | ponyexpress/api/lib/providers/provider.py | TelekomCloud/pony-express | a825b518687719be5dfe95692008c2129db115cd | [
"Apache-2.0"
] | null | null | null | # Base class
class Provider:
def __init__(self):
pass
def set_url(self, url):
pass
def fetch_metadata(self):
pass
| 11.071429 | 29 | 0.56129 |
be3c5640c355a22baa418702c757e9e766adbf21 | 71 | py | Python | insta.py | shubhamg0sai/hack-wifi | bb200d57b102f93bd0a64558086ed99949920a35 | [
"MIT"
] | 13 | 2022-01-09T10:20:03.000Z | 2022-03-08T07:17:44.000Z | insta.py | shubhamg0sai/hack-wifi | bb200d57b102f93bd0a64558086ed99949920a35 | [
"MIT"
] | 1 | 2022-01-03T17:42:21.000Z | 2022-01-09T17:46:10.000Z | insta.py | shubhamg0sai/hack-wifi | bb200d57b102f93bd0a64558086ed99949920a35 | [
"MIT"
] | null | null | null | import os
os.system('xdg-open https://www.instagram.com/shubhamg0sai')
| 23.666667 | 60 | 0.774648 |
687b649dc01d4b776822211dd2ac4bcfedbd03b5 | 1,753 | py | Python | config/wsgi.py | kirmishov/redesigned-winner | 6d23a334665ae20ead031a656855d7364ddcb35a | [
"MIT"
] | null | null | null | config/wsgi.py | kirmishov/redesigned-winner | 6d23a334665ae20ead031a656855d7364ddcb35a | [
"MIT"
] | null | null | null | config/wsgi.py | kirmishov/redesigned-winner | 6d23a334665ae20ead031a656855d7364ddcb35a | [
"MIT"
] | null | null | null | """
WSGI config for My cookiecutter-Django project project.
This module contains the WSGI application used by Django's development server
and any production WSGI deployments. It should expose a module-level variable
named ``application``. Django's ``runserver`` and ``runfcgi`` commands discover
this application via the ``WSGI_APPLICATION`` setting.
Usually you will have the standard Django WSGI application here, but it also
might make sense to replace the whole Django WSGI application with a custom one
that later delegates to the Django one. For example, you could introduce WSGI
middleware here, or combine a Django application with an application of another
framework.
"""
import os
import sys
from django.core.wsgi import get_wsgi_application
# This allows easy placement of apps within the interior
# my_cookiecutter_django_project directory.
app_path = os.path.abspath(
os.path.join(os.path.dirname(os.path.abspath(__file__)), os.pardir)
)
sys.path.append(os.path.join(app_path, "my_cookiecutter_django_project"))
# We defer to a DJANGO_SETTINGS_MODULE already in the environment. This breaks
# if running multiple sites in the same mod_wsgi process. To fix this, use
# mod_wsgi daemon mode with each site in its own daemon process, or use
# os.environ["DJANGO_SETTINGS_MODULE"] = "config.settings.production"
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "config.settings.production")
# This application object is used by any WSGI server configured to use this
# file. This includes Django's development server, if the WSGI_APPLICATION
# setting points here.
application = get_wsgi_application()
# Apply WSGI middleware here.
# from helloworld.wsgi import HelloWorldApplication
# application = HelloWorldApplication(application)
| 43.825 | 79 | 0.803195 |
cf3b157e3db878a2947c7c8fc63039318614069e | 5,072 | py | Python | tensorflow/lite/testing/op_tests/strided_slice_np_style.py | ashutom/tensorflow-upstream | c16069c19de9e286dd664abb78d0ea421e9f32d4 | [
"Apache-2.0"
] | 10 | 2021-05-25T17:43:04.000Z | 2022-03-08T10:46:09.000Z | tensorflow/lite/testing/op_tests/strided_slice_np_style.py | CaptainGizzy21/tensorflow | 3457a2b122e50b4d44ceaaed5a663d635e5c22df | [
"Apache-2.0"
] | 1,056 | 2019-12-15T01:20:31.000Z | 2022-02-10T02:06:28.000Z | tensorflow/lite/testing/op_tests/strided_slice_np_style.py | CaptainGizzy21/tensorflow | 3457a2b122e50b4d44ceaaed5a663d635e5c22df | [
"Apache-2.0"
] | 6 | 2016-09-07T04:00:15.000Z | 2022-01-12T01:47:38.000Z | # Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Test configs for strided_slice_np_style."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow.compat.v1 as tf
from tensorflow.lite.testing.zip_test_utils import create_tensor_data
from tensorflow.lite.testing.zip_test_utils import make_zip_of_tests
from tensorflow.lite.testing.zip_test_utils import register_make_test_function
# TODO(b/137615945): Expand the test coverage of this one and remove the old
# ones.
@register_make_test_function()
def make_strided_slice_np_style_tests(options):
"""Make a set of tests to test strided_slice in np style."""
test_parameters = [
{
"dtype": [tf.float32],
"shape": [[12, 7], [33, 1]],
"spec": [[slice(3, 7, 2), slice(None)],
[tf.newaxis,
slice(3, 7, 1), tf.newaxis,
slice(None)], [slice(1, 5, 1), slice(None)]],
},
# 1-D case
{
"dtype": [tf.float32],
"shape": [[44]],
"spec": [[slice(3, 7, 2)], [tf.newaxis, slice(None)]],
},
# Shrink mask.
{
"dtype": [tf.float32],
"shape": [[21, 15, 7]],
"spec": [[slice(3, 7, 2), slice(None), 2]],
},
# Ellipsis 3d.
{
"dtype": [tf.float32],
"shape": [[21, 15, 7]],
"spec": [[slice(3, 7, 2), Ellipsis],
[slice(1, 11, 3), Ellipsis,
slice(3, 7, 2)]],
},
# Ellipsis 4d.
{
"dtype": [tf.float32],
"shape": [[21, 15, 7, 9]],
"spec": [[slice(3, 7, 2), Ellipsis]],
},
# Ellipsis 5d.
{
"dtype": [tf.float32],
"shape": [[11, 21, 15, 7, 9]],
"spec": [[
slice(3, 7, 2),
slice(None),
slice(None),
slice(None),
slice(None)
]],
},
# Ellipsis + Shrink Mask
{
"dtype": [tf.float32],
"shape": [[22, 15, 7]],
"spec": [
[
2, # shrink before ellipsis
Ellipsis
],
],
},
# Ellipsis + New Axis Mask
{
"dtype": [tf.float32],
"shape": [[23, 15, 7]],
"spec": [
[
tf.newaxis, # new_axis before ellipsis
slice(3, 7, 2),
slice(None),
Ellipsis
],
[
tf.newaxis, # new_axis after (and before) ellipsis
slice(3, 7, 2),
slice(None),
Ellipsis,
tf.newaxis
]
],
},
]
if options.use_experimental_converter:
# The case when Ellipsis is expanded to multiple dimension is only supported
# by MLIR converter (b/183902491).
test_parameters = test_parameters + [
# Ellipsis 3d.
{
"dtype": [tf.float32],
"shape": [[21, 15, 7]],
"spec": [[Ellipsis, slice(3, 7, 2)]],
},
# Ellipsis 4d.
{
"dtype": [tf.float32],
"shape": [[21, 15, 7, 9]],
"spec": [[Ellipsis, slice(3, 7, 2)],
[slice(1, 11, 3), Ellipsis,
slice(3, 7, 2)]],
},
# Ellipsis 5d.
{
"dtype": [tf.float32],
"shape": [[11, 21, 15, 7, 9]],
"spec": [[Ellipsis, slice(3, 7, 2)]],
},
# Ellipsis + Shrink Mask
{
"dtype": [tf.float32],
"shape": [[22, 15, 7]],
"spec": [[
Ellipsis, # shrink after ellipsis
2
]],
},
]
def build_graph(parameters):
"""Build a simple graph with np style strided_slice."""
input_value = tf.compat.v1.placeholder(
dtype=parameters["dtype"], shape=parameters["shape"])
out = input_value.__getitem__(parameters["spec"])
return [input_value], [out]
def build_inputs(parameters, sess, inputs, outputs):
input_value = create_tensor_data(parameters["dtype"], parameters["shape"])
return [input_value], sess.run(
outputs, feed_dict=dict(zip(inputs, [input_value])))
make_zip_of_tests(options, test_parameters, build_graph, build_inputs)
| 31.7 | 80 | 0.500197 |
ceb1be692b75965215e6ea72edf6ebc043d04efb | 18,392 | py | Python | tests/examples/minlplib/smallinvDAXr4b200-220.py | ouyang-w-19/decogo | 52546480e49776251d4d27856e18a46f40c824a1 | [
"MIT"
] | 2 | 2021-07-03T13:19:10.000Z | 2022-02-06T10:48:13.000Z | tests/examples/minlplib/smallinvDAXr4b200-220.py | ouyang-w-19/decogo | 52546480e49776251d4d27856e18a46f40c824a1 | [
"MIT"
] | 1 | 2021-07-04T14:52:14.000Z | 2021-07-15T10:17:11.000Z | tests/examples/minlplib/smallinvDAXr4b200-220.py | ouyang-w-19/decogo | 52546480e49776251d4d27856e18a46f40c824a1 | [
"MIT"
] | null | null | null | # MINLP written by GAMS Convert at 04/21/18 13:54:14
#
# Equation counts
# Total E G L N X C B
# 4 0 2 2 0 0 0 0
#
# Variable counts
# x b i s1s s2s sc si
# Total cont binary integer sos1 sos2 scont sint
# 31 1 0 30 0 0 0 0
# FX 0 0 0 0 0 0 0 0
#
# Nonzero counts
# Total const NL DLL
# 121 91 30 0
from pyomo.environ import *
model = m = ConcreteModel()
m.i1 = Var(within=Integers,bounds=(0,1E20),initialize=0)
m.i2 = Var(within=Integers,bounds=(0,1E20),initialize=0)
m.i3 = Var(within=Integers,bounds=(0,1E20),initialize=0)
m.i4 = Var(within=Integers,bounds=(0,1E20),initialize=0)
m.i5 = Var(within=Integers,bounds=(0,1E20),initialize=0)
m.i6 = Var(within=Integers,bounds=(0,1E20),initialize=0)
m.i7 = Var(within=Integers,bounds=(0,1E20),initialize=0)
m.i8 = Var(within=Integers,bounds=(0,1E20),initialize=0)
m.i9 = Var(within=Integers,bounds=(0,1E20),initialize=0)
m.i10 = Var(within=Integers,bounds=(0,1E20),initialize=0)
m.i11 = Var(within=Integers,bounds=(0,1E20),initialize=0)
m.i12 = Var(within=Integers,bounds=(0,1E20),initialize=0)
m.i13 = Var(within=Integers,bounds=(0,1E20),initialize=0)
m.i14 = Var(within=Integers,bounds=(0,1E20),initialize=0)
m.i15 = Var(within=Integers,bounds=(0,1E20),initialize=0)
m.i16 = Var(within=Integers,bounds=(0,1E20),initialize=0)
m.i17 = Var(within=Integers,bounds=(0,1E20),initialize=0)
m.i18 = Var(within=Integers,bounds=(0,1E20),initialize=0)
m.i19 = Var(within=Integers,bounds=(0,1E20),initialize=0)
m.i20 = Var(within=Integers,bounds=(0,1E20),initialize=0)
m.i21 = Var(within=Integers,bounds=(0,1E20),initialize=0)
m.i22 = Var(within=Integers,bounds=(0,1E20),initialize=0)
m.i23 = Var(within=Integers,bounds=(0,1E20),initialize=0)
m.i24 = Var(within=Integers,bounds=(0,1E20),initialize=0)
m.i25 = Var(within=Integers,bounds=(0,1E20),initialize=0)
m.i26 = Var(within=Integers,bounds=(0,1E20),initialize=0)
m.i27 = Var(within=Integers,bounds=(0,1E20),initialize=0)
m.i28 = Var(within=Integers,bounds=(0,1E20),initialize=0)
m.i29 = Var(within=Integers,bounds=(0,1E20),initialize=0)
m.i30 = Var(within=Integers,bounds=(0,1E20),initialize=0)
m.x31 = Var(within=Reals,bounds=(None,None),initialize=0)
m.obj = Objective(expr=m.x31, sense=minimize)
m.c1 = Constraint(expr=0.00558391*m.i1**2 + 0.0103737*m.i2**2 + 0.0221075*m.i3**2 + 0.00399551*m.i4**2 + 0.00267064*m.i5
**2 + 0.00516451*m.i6**2 + 0.00421051*m.i7**2 + 0.00368008*m.i8**2 + 0.00372788*m.i9**2 +
0.00668969*m.i10**2 + 0.00613034*m.i11**2 + 0.0129271*m.i12**2 + 0.00697595*m.i13**2 + 0.0104282*
m.i14**2 + 0.00899168*m.i15**2 + 0.0206685*m.i16**2 + 0.0488863*m.i17**2 + 0.00894867*m.i18**2 +
0.0124118*m.i19**2 + 0.0122291*m.i20**2 + 0.0128973*m.i21**2 + 0.00668043*m.i22**2 + 0.0153425*
m.i23**2 + 0.0128605*m.i24**2 + 0.00718697*m.i25**2 + 0.0102616*m.i26**2 + 0.0123235*m.i27**2 +
0.00569465*m.i28**2 + 0.00818114*m.i29**2 + 0.00469456*m.i30**2 + 0.00901964*m.i1*m.i2 +
0.00860972*m.i1*m.i3 + 0.00248004*m.i1*m.i4 + 0.001821292*m.i1*m.i5 + 0.00561078*m.i1*m.i6 +
0.0051287*m.i1*m.i7 + 0.000691196*m.i1*m.i8 + 0.000805982*m.i1*m.i9 + 0.00531452*m.i1*m.i10 +
0.00556768*m.i1*m.i11 + 0.00745224*m.i1*m.i12 + 0.00478224*m.i1*m.i13 + 0.00610824*m.i1*m.i14 +
0.00577634*m.i1*m.i15 + 0.00613326*m.i1*m.i16 + 0.01432596*m.i1*m.i17 + 0.007501*m.i1*m.i18 +
0.00716212*m.i1*m.i19 + 0.00512922*m.i1*m.i20 + 0.0087283*m.i1*m.i21 + 0.00245846*m.i1*m.i22 +
0.0071572*m.i1*m.i23 + 0.00543966*m.i1*m.i24 + 0.00708258*m.i1*m.i25 + 0.00243422*m.i1*m.i26 +
0.00729094*m.i1*m.i27 + 0.00386642*m.i1*m.i28 + 0.0061908*m.i1*m.i29 + 0.00366754*m.i1*m.i30 +
0.01583972*m.i2*m.i3 + 0.00394608*m.i2*m.i4 + 0.001773554*m.i2*m.i5 + 0.00861376*m.i2*m.i6 +
0.00604454*m.i2*m.i7 + 0.00312866*m.i2*m.i8 + 0.00184686*m.i2*m.i9 + 0.00924638*m.i2*m.i10 +
0.01131902*m.i2*m.i11 + 0.01253232*m.i2*m.i12 + 0.00675858*m.i2*m.i13 + 0.00804604*m.i2*m.i14 +
0.00869872*m.i2*m.i15 + 0.0094047*m.i2*m.i16 + 0.0251538*m.i2*m.i17 + 0.01321532*m.i2*m.i18 +
0.01127964*m.i2*m.i19 + 0.0096635*m.i2*m.i20 + 0.0160783*m.i2*m.i21 + 0.00271*m.i2*m.i22 +
0.01486022*m.i2*m.i23 + 0.01091018*m.i2*m.i24 + 0.01009426*m.i2*m.i25 + 0.00754144*m.i2*m.i26 +
0.01408844*m.i2*m.i27 + 0.00544162*m.i2*m.i28 + 0.01096178*m.i2*m.i29 + 0.00574964*m.i2*m.i30 +
0.00299428*m.i3*m.i4 + 0.001239314*m.i3*m.i5 + 0.01256412*m.i3*m.i6 + 0.00899714*m.i3*m.i7 +
0.00444448*m.i3*m.i8 + 0.00616612*m.i3*m.i9 + 0.0146019*m.i3*m.i10 + 0.01249836*m.i3*m.i11 +
0.0264968*m.i3*m.i12 + 0.01266506*m.i3*m.i13 + 0.01358566*m.i3*m.i14 + 0.01419766*m.i3*m.i15 +
0.01033796*m.i3*m.i16 + 0.040104*m.i3*m.i17 + 0.01504214*m.i3*m.i18 + 0.0210518*m.i3*m.i19 +
0.0169342*m.i3*m.i20 + 0.020394*m.i3*m.i21 + 0.006361*m.i3*m.i22 + 0.0173249*m.i3*m.i23 +
0.01157254*m.i3*m.i24 + 0.01601196*m.i3*m.i25 + 0.01305808*m.i3*m.i26 + 0.018918*m.i3*m.i27 +
0.0100768*m.i3*m.i28 + 0.01415258*m.i3*m.i29 + 0.00890208*m.i3*m.i30 + 0.00365082*m.i4*m.i5 +
0.0031533*m.i4*m.i6 + 0.001664882*m.i4*m.i7 + 0.000487746*m.i4*m.i8 + 0.00074873*m.i4*m.i9 +
0.00279536*m.i4*m.i10 + 0.000948078*m.i4*m.i11 + 0.00218644*m.i4*m.i12 + 0.001471884*m.i4*m.i13
+ 0.001764448*m.i4*m.i14 + 0.001707856*m.i4*m.i15 + 0.00415534*m.i4*m.i16 + 0.00552118*m.i4*
m.i17 + 0.00298928*m.i4*m.i18 + 0.000446818*m.i4*m.i19 + 0.0042709*m.i4*m.i20 + 0.00437068*m.i4*
m.i21 + 0.001584414*m.i4*m.i22 + 0.0028495*m.i4*m.i23 + 0.00550266*m.i4*m.i24 + 0.0019381*m.i4*
m.i25 - 0.000779792*m.i4*m.i26 + 0.00383714*m.i4*m.i27 + 0.00170793*m.i4*m.i28 + 0.00220852*m.i4*
m.i29 + 0.001897386*m.i4*m.i30 + 0.00226608*m.i5*m.i6 + 0.001391572*m.i5*m.i7 + 0.001434726*m.i5*
m.i8 + 0.000718962*m.i5*m.i9 + 0.00117417*m.i5*m.i10 + 0.001240914*m.i5*m.i11 + 0.000587866*m.i5*
m.i12 + 0.0020154*m.i5*m.i13 + 0.00126883*m.i5*m.i14 + 0.000645164*m.i5*m.i15 + 0.0001425196*m.i5
*m.i16 + 0.001199014*m.i5*m.i17 + 0.001896292*m.i5*m.i18 - 0.000289412*m.i5*m.i19 + 0.001457998*
m.i5*m.i20 + 0.00199702*m.i5*m.i21 + 0.001266598*m.i5*m.i22 + 0.000764624*m.i5*m.i23 +
0.001961312*m.i5*m.i24 + 0.001748826*m.i5*m.i25 - 0.00122625*m.i5*m.i26 + 0.000753266*m.i5*m.i27
+ 0.00063941*m.i5*m.i28 + 0.001644068*m.i5*m.i29 + 0.001587886*m.i5*m.i30 + 0.00454154*m.i6*m.i7
+ 0.001157686*m.i6*m.i8 + 0.0032018*m.i6*m.i9 + 0.00727798*m.i6*m.i10 + 0.0064553*m.i6*m.i11 +
0.00791618*m.i6*m.i12 + 0.00687526*m.i6*m.i13 + 0.00638032*m.i6*m.i14 + 0.00425538*m.i6*m.i15 +
0.00583332*m.i6*m.i16 + 0.01491304*m.i6*m.i17 + 0.00876772*m.i6*m.i18 + 0.00814434*m.i6*m.i19 +
0.00549208*m.i6*m.i20 + 0.0103848*m.i6*m.i21 + 0.001352278*m.i6*m.i22 + 0.0063097*m.i6*m.i23 +
0.0052012*m.i6*m.i24 + 0.00808494*m.i6*m.i25 + 0.00595234*m.i6*m.i26 + 0.00960786*m.i6*m.i27 +
0.0035648*m.i6*m.i28 + 0.00730486*m.i6*m.i29 + 0.0036145*m.i6*m.i30 + 0.0027426*m.i7*m.i8 +
0.00224138*m.i7*m.i9 + 0.00558948*m.i7*m.i10 + 0.00489378*m.i7*m.i11 + 0.0073565*m.i7*m.i12 +
0.0050794*m.i7*m.i13 + 0.00363244*m.i7*m.i14 + 0.00634576*m.i7*m.i15 + 0.001588982*m.i7*m.i16 +
0.00877926*m.i7*m.i17 + 0.00710862*m.i7*m.i18 + 0.00675396*m.i7*m.i19 + 0.00621206*m.i7*m.i20 +
0.00746652*m.i7*m.i21 + 0.001927036*m.i7*m.i22 + 0.00410122*m.i7*m.i23 + 0.00344774*m.i7*m.i24 +
0.00594546*m.i7*m.i25 + 0.00461784*m.i7*m.i26 + 0.00530234*m.i7*m.i27 + 0.00320122*m.i7*m.i28 +
0.00474356*m.i7*m.i29 + 0.00341222*m.i7*m.i30 + 0.00105347*m.i8*m.i9 + 0.001879822*m.i8*m.i10 +
0.00290244*m.i8*m.i11 + 0.00353818*m.i8*m.i12 + 0.0035513*m.i8*m.i13 + 0.00294406*m.i8*m.i14 +
0.00389942*m.i8*m.i15 + 0.00286866*m.i8*m.i16 + 0.000920126*m.i8*m.i17 + 0.00274282*m.i8*m.i18 +
0.0027675*m.i8*m.i19 + 0.00464592*m.i8*m.i20 + 0.001093444*m.i8*m.i21 + 0.000948594*m.i8*m.i22 +
0.00275316*m.i8*m.i23 + 0.001626794*m.i8*m.i24 + 0.00209498*m.i8*m.i25 + 0.0031962*m.i8*m.i26 +
0.001767658*m.i8*m.i27 + 0.00109948*m.i8*m.i28 + 0.00292004*m.i8*m.i29 + 0.00215496*m.i8*m.i30 +
0.00329222*m.i9*m.i10 + 0.00239978*m.i9*m.i11 + 0.00365066*m.i9*m.i12 + 0.00463422*m.i9*m.i13 +
0.00260888*m.i9*m.i14 + 0.00330432*m.i9*m.i15 + 0.000950274*m.i9*m.i16 + 0.00309664*m.i9*m.i17 +
0.00325462*m.i9*m.i18 + 0.00494078*m.i9*m.i19 + 0.00339202*m.i9*m.i20 + 0.00283784*m.i9*m.i21 +
0.001862472*m.i9*m.i22 + 0.001457294*m.i9*m.i23 + 0.000292408*m.i9*m.i24 + 0.00434258*m.i9*m.i25
+ 0.0051917*m.i9*m.i26 + 0.00442724*m.i9*m.i27 + 0.00235362*m.i9*m.i28 + 0.0023207*m.i9*m.i29 +
0.00232972*m.i9*m.i30 + 0.00661128*m.i10*m.i11 + 0.0099349*m.i10*m.i12 + 0.00670728*m.i10*m.i13
+ 0.00688756*m.i10*m.i14 + 0.00814804*m.i10*m.i15 + 0.00387536*m.i10*m.i16 + 0.01709622*m.i10*
m.i17 + 0.00921546*m.i10*m.i18 + 0.01138012*m.i10*m.i19 + 0.0073598*m.i10*m.i20 + 0.012047*m.i10*
m.i21 + 0.001953884*m.i10*m.i22 + 0.01110682*m.i10*m.i23 + 0.00744232*m.i10*m.i24 + 0.00846572*
m.i10*m.i25 + 0.00811902*m.i10*m.i26 + 0.01093528*m.i10*m.i27 + 0.00642736*m.i10*m.i28 +
0.00817838*m.i10*m.i29 + 0.00467066*m.i10*m.i30 + 0.01089978*m.i11*m.i12 + 0.00580646*m.i11*m.i13
+ 0.00479126*m.i11*m.i14 + 0.00655088*m.i11*m.i15 + 0.00784072*m.i11*m.i16 + 0.0171429*m.i11*
m.i17 + 0.0099023*m.i11*m.i18 + 0.00881158*m.i11*m.i19 + 0.0065332*m.i11*m.i20 + 0.01111462*m.i11
*m.i21 + 0.00238226*m.i11*m.i22 + 0.00942038*m.i11*m.i23 + 0.00509366*m.i11*m.i24 + 0.0079177*
m.i11*m.i25 + 0.00653764*m.i11*m.i26 + 0.00963386*m.i11*m.i27 + 0.00518254*m.i11*m.i28 +
0.00839924*m.i11*m.i29 + 0.00396162*m.i11*m.i30 + 0.00812884*m.i12*m.i13 + 0.00932748*m.i12*m.i14
+ 0.01172114*m.i12*m.i15 + 0.00937084*m.i12*m.i16 + 0.033621*m.i12*m.i17 + 0.0125625*m.i12*m.i18
+ 0.01635358*m.i12*m.i19 + 0.01460644*m.i12*m.i20 + 0.01374474*m.i12*m.i21 + 0.00526496*m.i12*
m.i22 + 0.01402198*m.i12*m.i23 + 0.00931776*m.i12*m.i24 + 0.01195866*m.i12*m.i25 + 0.00822682*
m.i12*m.i26 + 0.01241788*m.i12*m.i27 + 0.00706034*m.i12*m.i28 + 0.01219462*m.i12*m.i29 +
0.00598988*m.i12*m.i30 + 0.0068538*m.i13*m.i14 + 0.00620178*m.i13*m.i15 + 0.00379406*m.i13*m.i16
+ 0.00889862*m.i13*m.i17 + 0.00816594*m.i13*m.i18 + 0.01033824*m.i13*m.i19 + 0.00577162*m.i13*
m.i20 + 0.00736548*m.i13*m.i21 + 0.00410776*m.i13*m.i22 + 0.00580558*m.i13*m.i23 + 0.00459074*
m.i13*m.i24 + 0.0072167*m.i13*m.i25 + 0.00956086*m.i13*m.i26 + 0.00943468*m.i13*m.i27 +
0.00587164*m.i13*m.i28 + 0.00902842*m.i13*m.i29 + 0.00550608*m.i13*m.i30 + 0.00635356*m.i14*m.i15
+ 0.00709628*m.i14*m.i16 + 0.01555038*m.i14*m.i17 + 0.00826722*m.i14*m.i18 + 0.00751614*m.i14*
m.i19 + 0.00814342*m.i14*m.i20 + 0.00995652*m.i14*m.i21 + 0.00477798*m.i14*m.i22 + 0.0076843*
m.i14*m.i23 + 0.00817698*m.i14*m.i24 + 0.00886056*m.i14*m.i25 + 0.00579636*m.i14*m.i26 +
0.01128084*m.i14*m.i27 + 0.00483444*m.i14*m.i28 + 0.0068342*m.i14*m.i29 + 0.0077372*m.i14*m.i30
+ 0.00973548*m.i15*m.i16 + 0.01556958*m.i15*m.i17 + 0.00926266*m.i15*m.i18 + 0.01281188*m.i15*
m.i19 + 0.00669072*m.i15*m.i20 + 0.00937684*m.i15*m.i21 + 0.00639856*m.i15*m.i22 + 0.00611934*
m.i15*m.i23 + 0.00853942*m.i15*m.i24 + 0.00964296*m.i15*m.i25 + 0.00704584*m.i15*m.i26 +
0.0119279*m.i15*m.i27 + 0.00648174*m.i15*m.i28 + 0.01050128*m.i15*m.i29 + 0.00502696*m.i15*m.i30
+ 0.01809222*m.i16*m.i17 + 0.00823288*m.i16*m.i18 + 0.01161214*m.i16*m.i19 + 0.00533676*m.i16*
m.i20 + 0.01233794*m.i16*m.i21 + 0.00512778*m.i16*m.i22 + 0.00722276*m.i16*m.i23 + 0.01715638*
m.i16*m.i24 + 0.00677738*m.i16*m.i25 + 0.0069565*m.i16*m.i26 + 0.01691522*m.i16*m.i27 +
0.00246824*m.i16*m.i28 + 0.00934088*m.i16*m.i29 + 0.00393866*m.i16*m.i30 + 0.01858542*m.i17*m.i18
+ 0.0224912*m.i17*m.i19 + 0.01793624*m.i17*m.i20 + 0.0270204*m.i17*m.i21 + 0.01083832*m.i17*
m.i22 + 0.0216678*m.i17*m.i23 + 0.0183347*m.i17*m.i24 + 0.01893*m.i17*m.i25 + 0.01089098*m.i17*
m.i26 + 0.0209142*m.i17*m.i27 + 0.01273162*m.i17*m.i28 + 0.0200902*m.i17*m.i29 + 0.00774366*m.i17
*m.i30 + 0.01171594*m.i18*m.i19 + 0.00861454*m.i18*m.i20 + 0.01414322*m.i18*m.i21 + 0.001961404*
m.i18*m.i22 + 0.00910214*m.i18*m.i23 + 0.01003468*m.i18*m.i24 + 0.0094743*m.i18*m.i25 +
0.00825794*m.i18*m.i26 + 0.01336058*m.i18*m.i27 + 0.00607998*m.i18*m.i28 + 0.01070732*m.i18*m.i29
+ 0.00492858*m.i18*m.i30 + 0.0082848*m.i19*m.i20 + 0.0126004*m.i19*m.i21 + 0.00407366*m.i19*
m.i22 + 0.01381284*m.i19*m.i23 + 0.00838908*m.i19*m.i24 + 0.01198264*m.i19*m.i25 + 0.01583126*
m.i19*m.i26 + 0.01664044*m.i19*m.i27 + 0.00924324*m.i19*m.i28 + 0.01214842*m.i19*m.i29 +
0.00592778*m.i19*m.i30 + 0.01071434*m.i20*m.i21 + 0.00296964*m.i20*m.i22 + 0.00736528*m.i20*m.i23
+ 0.00606396*m.i20*m.i24 + 0.00628822*m.i20*m.i25 + 0.00817696*m.i20*m.i26 + 0.00776894*m.i20*
m.i27 + 0.0026202*m.i20*m.i28 + 0.00717342*m.i20*m.i29 + 0.00579184*m.i20*m.i30 + 0.00469936*
m.i21*m.i22 + 0.0138599*m.i21*m.i23 + 0.0125037*m.i21*m.i24 + 0.01211002*m.i21*m.i25 + 0.00836436
*m.i21*m.i26 + 0.016494*m.i21*m.i27 + 0.00602872*m.i21*m.i28 + 0.01180462*m.i21*m.i29 +
0.00570478*m.i21*m.i30 + 0.0032176*m.i22*m.i23 + 0.00379112*m.i22*m.i24 + 0.00301976*m.i22*m.i25
+ 0.00308424*m.i22*m.i26 + 0.00369962*m.i22*m.i27 + 0.00278784*m.i22*m.i28 + 0.00465846*m.i22*
m.i29 + 0.00297212*m.i22*m.i30 + 0.01019176*m.i23*m.i24 + 0.00779098*m.i23*m.i25 + 0.00577776*
m.i23*m.i26 + 0.01267514*m.i23*m.i27 + 0.00735432*m.i23*m.i28 + 0.00786386*m.i23*m.i29 +
0.00559972*m.i23*m.i30 + 0.00725022*m.i24*m.i25 + 0.00455648*m.i24*m.i26 + 0.0157223*m.i24*m.i27
+ 0.00579512*m.i24*m.i28 + 0.00792398*m.i24*m.i29 + 0.0045755*m.i24*m.i30 + 0.00723442*m.i25*
m.i26 + 0.01196012*m.i25*m.i27 + 0.0063273*m.i25*m.i28 + 0.0099815*m.i25*m.i29 + 0.0041794*m.i25*
m.i30 + 0.01139894*m.i26*m.i27 + 0.0080092*m.i26*m.i28 + 0.0080044*m.i26*m.i29 + 0.00493602*m.i26
*m.i30 + 0.00826208*m.i27*m.i28 + 0.01246152*m.i27*m.i29 + 0.0067556*m.i27*m.i30 + 0.00575648*
m.i28*m.i29 + 0.0044929*m.i28*m.i30 + 0.00469952*m.i29*m.i30 - m.x31 <= 0)
m.c2 = Constraint(expr= 0.00318236*m.i1 - 0.00015175*m.i2 - 0.00695114*m.i3 + 0.012626*m.i4 + 0.0094202*m.i5
+ 0.00596825*m.i6 + 0.00354677*m.i7 - 0.00134318*m.i8 - 0.00670481*m.i9 - 0.00123195*m.i10
- 0.00051299*m.i11 - 0.00348068*m.i12 + 0.00486372*m.i13 - 0.0102346*m.i14 - 0.00211219*m.i15
+ 0.00457072*m.i16 - 0.00828342*m.i17 - 0.00168175*m.i18 - 0.00531856*m.i19 - 0.00111388*m.i20
+ 0.0142599*m.i21 + 0.00545572*m.i22 + 0.0077442*m.i23 + 0.0195808*m.i24 + 0.00260132*m.i25
- 0.00524596*m.i26 + 0.00306*m.i27 + 0.00329918*m.i28 + 0.00265023*m.i29 + 0.0045383*m.i30 >= 0)
m.c3 = Constraint(expr= 39.19*m.i1 + 41.47*m.i2 + 5.71*m.i3 + 53.59*m.i4 + 43.65*m.i5 + 85.46*m.i6 + 39.7*m.i7
+ 44.91*m.i8 + 9.6*m.i9 + 11.26*m.i10 + 39.56*m.i11 + 46*m.i12 + 45.25*m.i13 + 21.9*m.i14
+ 11.85*m.i15 + 37.4*m.i16 + 4.75*m.i17 + 44.44*m.i18 + 80.5*m.i19 + 49.46*m.i20 + 67.02*m.i21
+ 59.25*m.i22 + 71.5*m.i23 + 48.8*m.i24 + 73.22*m.i25 + 101.9*m.i26 + 20.06*m.i27 + 36.33*m.i28
+ 41.31*m.i29 + 53.09*m.i30 >= 20000)
m.c4 = Constraint(expr= 39.19*m.i1 + 41.47*m.i2 + 5.71*m.i3 + 53.59*m.i4 + 43.65*m.i5 + 85.46*m.i6 + 39.7*m.i7
+ 44.91*m.i8 + 9.6*m.i9 + 11.26*m.i10 + 39.56*m.i11 + 46*m.i12 + 45.25*m.i13 + 21.9*m.i14
+ 11.85*m.i15 + 37.4*m.i16 + 4.75*m.i17 + 44.44*m.i18 + 80.5*m.i19 + 49.46*m.i20 + 67.02*m.i21
+ 59.25*m.i22 + 71.5*m.i23 + 48.8*m.i24 + 73.22*m.i25 + 101.9*m.i26 + 20.06*m.i27 + 36.33*m.i28
+ 41.31*m.i29 + 53.09*m.i30 <= 22000)
| 94.317949 | 120 | 0.54241 |
8bfabb3a26d1109b23091cf41cae4efce0005b07 | 21,550 | py | Python | api_tests/identifiers/views/test_identifier_list.py | felliott/osf.io | b3221387f3995cdc7d7ae2da2e631f258d4afc86 | [
"Apache-2.0"
] | null | null | null | api_tests/identifiers/views/test_identifier_list.py | felliott/osf.io | b3221387f3995cdc7d7ae2da2e631f258d4afc86 | [
"Apache-2.0"
] | 72 | 2016-06-16T18:59:37.000Z | 2022-03-08T23:40:07.000Z | api_tests/identifiers/views/test_identifier_list.py | Johnetordoff/osf.io | de10bf249c46cede04c78f7e6f7e352c69e6e6b5 | [
"Apache-2.0"
] | null | null | null | import pytest
from future.moves.urllib.parse import urlparse
import responses
from django.utils import timezone
from framework.auth.core import Auth
from api.base.settings.defaults import API_BASE
from osf.models import Identifier
from osf_tests.factories import (
RegistrationFactory,
AuthUserFactory,
IdentifierFactory,
NodeFactory,
PreprintFactory,
WithdrawnRegistrationFactory,
)
from osf.utils.permissions import READ, WRITE
from osf.utils.workflows import DefaultStates
from tests.utils import assert_equals
from website.identifiers.clients import DataCiteClient
from website import settings
@pytest.fixture()
def user():
return AuthUserFactory()
@pytest.mark.django_db
class TestRegistrationIdentifierList:
@pytest.fixture()
def node(self, user):
return NodeFactory(creator=user, is_public=True)
@pytest.fixture()
def identifier_node(self, node):
return IdentifierFactory(referent=node)
@pytest.fixture()
def registration(self, user):
return RegistrationFactory(creator=user, is_public=True)
@pytest.fixture()
def identifier_registration(self, registration):
return IdentifierFactory(referent=registration)
@pytest.fixture()
def url_registration_identifiers(self, registration):
return '/{}registrations/{}/identifiers/'.format(
API_BASE, registration._id)
@pytest.fixture()
def res_registration_identifiers(self, app, url_registration_identifiers):
return app.get(url_registration_identifiers)
@pytest.fixture()
def data_registration_identifiers(self, res_registration_identifiers):
return res_registration_identifiers.json['data']
def test_identifier_list_success(self, res_registration_identifiers):
assert res_registration_identifiers.status_code == 200
assert res_registration_identifiers.content_type == 'application/vnd.api+json'
def test_identifier_list_returns_correct_number_and_referent(
self, registration, identifier_registration,
data_registration_identifiers, res_registration_identifiers,
):
# test_identifier_list_returns_correct_number
total = res_registration_identifiers.json['links']['meta']['total']
assert total == Identifier.objects.filter(object_id=registration.id).count()
# test_identifier_list_returns_correct_referent
paths = [
urlparse(
item['relationships']['referent']['links']['related']['href']
).path for item in data_registration_identifiers
]
assert '/{}registrations/{}/'.format(API_BASE,
registration._id) in paths
def test_identifier_list_returns_correct_categories_and_values(self, data_registration_identifiers):
# test_identifier_list_returns_correct_categories
categories = [identifier.category for identifier in Identifier.objects.all()]
categories_in_response = [identifier['attributes']['category']
for identifier in data_registration_identifiers]
assert_equals(categories_in_response, categories)
# test_identifier_list_returns_correct_values
values = [identifier.value for identifier in Identifier.objects.all()]
values_in_response = [identifier['attributes']['value']
for identifier in data_registration_identifiers]
assert_equals(values_in_response, values)
def test_identifier_filter_by_category(
self, app, registration, identifier_registration,
url_registration_identifiers
):
IdentifierFactory(referent=registration, category='nopeid')
identifiers_for_registration = registration.identifiers
assert identifiers_for_registration.count() == 2
assert_equals(
list(
identifiers_for_registration.values_list(
'category',
flat=True
)
), ['carpid', 'nopeid']
)
filter_url = '{}?filter[category]=carpid'.format(
url_registration_identifiers)
new_res = app.get(filter_url)
carpid_total = Identifier.objects.filter(category='carpid').count()
total = new_res.json['links']['meta']['total']
assert total == carpid_total
def test_node_identifier_not_returned_from_registration_endpoint(
self, identifier_node, identifier_registration,
res_registration_identifiers,
data_registration_identifiers
):
assert res_registration_identifiers.status_code == 200
assert len(data_registration_identifiers) == 1
assert identifier_registration._id == data_registration_identifiers[0]['id']
assert identifier_node._id != data_registration_identifiers[0]['id']
def test_node_not_allowed_from_registrations_endpoint(
self, app, node):
url = '/{}registrations/{}/identifiers/'.format(API_BASE, node._id)
res = app.get(url, expect_errors=True)
assert res.status_code == 404
def test_do_not_return_deleted_identifier(
self, app, registration):
registration.is_deleted = True
registration.save()
url = '/{}registrations/{}/identifiers/'.format(API_BASE, registration._id)
res = app.get(url, expect_errors=True)
assert res.status_code == 410
@pytest.mark.django_db
class TestNodeIdentifierList:
@pytest.fixture()
def node(self, user):
return NodeFactory(creator=user, is_public=True)
@pytest.fixture()
def identifier_node(self, node):
return IdentifierFactory(referent=node)
@pytest.fixture()
def url_node_identifiers(self, node):
return '/{}nodes/{}/identifiers/'.format(API_BASE, node._id)
@pytest.fixture()
def res_node_identifiers(self, app, url_node_identifiers):
return app.get(url_node_identifiers)
@pytest.fixture()
def data_node_identifiers(self, res_node_identifiers):
return res_node_identifiers.json['data']
@pytest.fixture()
def registration(self, user):
return RegistrationFactory(creator=user, is_public=True)
@pytest.fixture()
def identifier_registration(self, registration):
return IdentifierFactory(referent=registration)
def test_identifier_list_success(self, res_node_identifiers):
assert res_node_identifiers.status_code == 200
assert res_node_identifiers.content_type == 'application/vnd.api+json'
def test_identifier_list_returns_correct_number_and_referent(
self, node, identifier_node, res_node_identifiers,
data_node_identifiers
):
# test_identifier_list_returns_correct_number
total = res_node_identifiers.json['links']['meta']['total']
assert total == Identifier.objects.all().count()
# test_identifier_list_returns_correct_referent
paths = [
urlparse(
item['relationships']['referent']['links']['related']['href']
).path for item in data_node_identifiers
]
assert '/{}nodes/{}/'.format(API_BASE, node._id) in paths
def test_identifier_list_returns_correct_categories_and_values(
self, data_node_identifiers):
# test_identifier_list_returns_correct_categories
categories = [identifier.category for identifier in Identifier.objects.all()]
categories_in_response = [
identifier['attributes']['category'] for identifier in data_node_identifiers]
assert_equals(categories_in_response, categories)
# test_identifier_list_returns_correct_values
values = [identifier.value for identifier in Identifier.objects.all()]
values_in_response = [
identifier['attributes']['value'] for identifier in data_node_identifiers
]
assert_equals(values_in_response, values)
def test_identifier_filter_by_category(
self, app, node, identifier_node, url_node_identifiers):
IdentifierFactory(referent=node, category='nopeid')
identifiers_for_node = Identifier.objects.filter(object_id=node.id)
assert identifiers_for_node.count() == 2
assert_equals(
[identifier.category for identifier in identifiers_for_node],
['carpid', 'nopeid']
)
filter_url = '{}?filter[category]=carpid'.format(url_node_identifiers)
new_res = app.get(filter_url)
carpid_total = Identifier.objects.filter(category='carpid').count()
total = new_res.json['links']['meta']['total']
assert total == carpid_total
def test_registration_identifier_not_returned_from_registration_endpoint(
self, identifier_node, identifier_registration,
res_node_identifiers, data_node_identifiers
):
assert res_node_identifiers.status_code == 200
assert len(data_node_identifiers) == 1
assert identifier_node._id == data_node_identifiers[0]['id']
assert identifier_registration._id != data_node_identifiers[0]['id']
def test_registration_not_allowed_from_nodes_endpoint(
self, app, registration):
url = '/{}nodes/{}/identifiers/'.format(API_BASE, registration._id)
res = app.get(url, expect_errors=True)
assert res.status_code == 404
def test_do_not_return_deleted_identifier(
self, app, node):
node.is_deleted = True
node.save()
url = '/{}nodes/{}/identifiers/'.format(API_BASE, node._id)
res = app.get(url, expect_errors=True)
assert res.status_code == 410
@pytest.mark.django_db
class TestPreprintIdentifierList:
@pytest.fixture()
def preprint(self, user):
return PreprintFactory(creator=user)
@pytest.fixture()
def url_preprint_identifier(self, preprint):
return '/{}preprints/{}/identifiers/'.format(API_BASE, preprint._id)
@pytest.fixture()
def res_preprint_identifier(self, app, url_preprint_identifier):
return app.get(url_preprint_identifier)
@pytest.fixture()
def data_preprint_identifier(self, res_preprint_identifier):
return res_preprint_identifier.json['data']
def test_identifier_list_success(self, res_preprint_identifier):
assert res_preprint_identifier.status_code == 200
assert res_preprint_identifier.content_type == 'application/vnd.api+json'
def test_identifier_list_returns_correct_number_and_referent(
self, preprint, res_preprint_identifier,
data_preprint_identifier, user
):
# add another preprint so there are more identifiers
PreprintFactory(creator=user)
# test_identifier_list_returns_correct_number
total = res_preprint_identifier.json['links']['meta']['total']
assert total == Identifier.objects.filter(
object_id=preprint.id
).count()
# test_identifier_list_returns_correct_referent
paths = [
urlparse(
item['relationships']['referent']['links']['related']['href']
).path for item in data_preprint_identifier
]
assert '/{}preprints/{}/'.format(API_BASE, preprint._id) in paths
def test_identifier_list_returns_correct_categories_and_values(
self, data_preprint_identifier):
# test_identifier_list_returns_correct_categories
categories = Identifier.objects.all().values_list('category', flat=True)
categories_in_response = [identifier['attributes']['category']
for identifier in data_preprint_identifier]
assert_equals(categories_in_response, list(categories))
# test_identifier_list_returns_correct_values
values = Identifier.objects.all().values_list('value', flat=True)
values_in_response = [identifier['attributes']['value']
for identifier in data_preprint_identifier]
assert_equals(values_in_response, list(values))
def test_preprint_identifier_list_permissions_unpublished(
self, app, user, data_preprint_identifier, preprint, url_preprint_identifier):
preprint.is_published = False
preprint.save()
# test_unpublished_preprint_identifier_unauthenticated
res = app.get(url_preprint_identifier, expect_errors=True)
assert res.status_code == 401
# test_unpublished_preprint_identifier_noncontrib_authenticated
non_contrib = AuthUserFactory()
res = app.get(url_preprint_identifier, auth=non_contrib.auth, expect_errors=True)
assert res.status_code == 403
# test_unpublished_preprint_identifier_admin_authenticated
res = app.get(url_preprint_identifier, auth=user.auth)
assert res.status_code == 200
# test_unpublished_preprint_identifier_readcontrib_authenticated
read_user = AuthUserFactory()
preprint.add_contributor(read_user, READ, save=True)
res = app.get(url_preprint_identifier, auth=read_user.auth, expect_errors=True)
assert res.status_code == 200
# test_published_preprint_identifier_unauthenticated
preprint.set_published(True, Auth(user))
preprint.save()
res = app.get(url_preprint_identifier)
assert res.status_code == 200
def test_preprint_identifier_list_permissions_private(
self, app, user, data_preprint_identifier, preprint, url_preprint_identifier):
preprint.is_public = False
preprint.save()
# test_unpublished_preprint_identifier_unauthenticated
res = app.get(url_preprint_identifier, expect_errors=True)
assert res.status_code == 401
# test_unpublished_preprint_identifier_noncontrib_authenticated
non_contrib = AuthUserFactory()
res = app.get(url_preprint_identifier, auth=non_contrib.auth, expect_errors=True)
assert res.status_code == 403
# test_unpublished_preprint_identifier_admin_authenticated
res = app.get(url_preprint_identifier, auth=user.auth)
assert res.status_code == 200
# test_unpublished_preprint_identifier_readcontrib_authenticated
read_user = AuthUserFactory()
preprint.add_contributor(read_user, READ, save=True)
res = app.get(url_preprint_identifier, auth=read_user.auth, expect_errors=True)
assert res.status_code == 200
# test_published_preprint_identifier_unauthenticated
preprint.set_published(True, Auth(user))
preprint.save()
res = app.get(url_preprint_identifier)
assert res.status_code == 200
def test_preprint_identifier_list_permissions_deleted(
self, app, user, data_preprint_identifier, preprint, url_preprint_identifier):
preprint.deleted = timezone.now()
preprint.save()
# test_unpublished_preprint_identifier_unauthenticated
res = app.get(url_preprint_identifier, expect_errors=True)
assert res.status_code == 404
# test_unpublished_preprint_identifier_noncontrib_authenticated
non_contrib = AuthUserFactory()
res = app.get(url_preprint_identifier, auth=non_contrib.auth, expect_errors=True)
assert res.status_code == 404
# test_unpublished_preprint_identifier_admin_authenticated
res = app.get(url_preprint_identifier, auth=user.auth, expect_errors=True)
assert res.status_code == 404
# test_unpublished_preprint_identifier_readcontrib_authenticated
read_user = AuthUserFactory()
preprint.add_contributor(read_user, READ, save=True)
res = app.get(url_preprint_identifier, auth=read_user.auth, expect_errors=True)
assert res.status_code == 404
# test_published_preprint_identifier_unauthenticated
res = app.get(url_preprint_identifier, expect_errors=True)
assert res.status_code == 404
def test_preprint_identifier_list_permissions_abandoned(
self, app, user, data_preprint_identifier, preprint, url_preprint_identifier):
preprint.machine_state = DefaultStates.INITIAL.value
preprint.save()
# test_unpublished_preprint_identifier_unauthenticated
res = app.get(url_preprint_identifier, expect_errors=True)
assert res.status_code == 401
# test_unpublished_preprint_identifier_noncontrib_authenticated
non_contrib = AuthUserFactory()
res = app.get(url_preprint_identifier, auth=non_contrib.auth, expect_errors=True)
assert res.status_code == 403
# test_unpublished_preprint_identifier_readcontrib_authenticated
read_user = AuthUserFactory()
preprint.add_contributor(read_user, READ, save=True)
res = app.get(url_preprint_identifier, auth=read_user.auth, expect_errors=True)
assert res.status_code == 403
# test_unpublished_preprint_identifier_admin_authenticated
res = app.get(url_preprint_identifier, auth=user.auth)
assert res.status_code == 200
@pytest.mark.django_db
class TestNodeIdentifierCreate:
@pytest.fixture()
def resource(self, user):
return NodeFactory(creator=user, is_public=True)
@pytest.fixture()
def write_contributor(self, resource):
user = AuthUserFactory()
resource.add_contributor(user, WRITE)
resource.save()
return user
@pytest.fixture()
def read_contributor(self, resource):
user = AuthUserFactory()
resource.add_contributor(user, READ)
resource.save()
return user
@pytest.fixture()
def identifier_url(self, resource):
return '/{}{}s/{}/identifiers/'.format(API_BASE, resource.__class__.__name__.lower(), resource._id)
@pytest.fixture()
def identifier_payload(self):
return {
'data': {
'type': 'identifiers',
'attributes': {
'category': 'doi'
}
}
}
@pytest.fixture()
def ark_payload(self):
return {
'data': {
'type': 'identifiers',
'attributes': {
'category': 'ark'
}
}
}
@pytest.fixture()
def client(self, resource):
return DataCiteClient(resource)
@responses.activate
def test_create_identifier(self, app, resource, client, identifier_url, identifier_payload, user,
write_contributor, read_contributor, ark_payload):
responses.add(
responses.Response(
responses.POST,
f'{settings.DATACITE_URL}/metadata/{client.build_doi(resource)}',
body='OK (10.70102/FK2osf.io/dp438)',
status=201,
)
)
responses.add(
responses.Response(
responses.POST,
f'{settings.DATACITE_URL}/doi',
body='OK (10.70102/FK2osf.io/dp438)',
status=201,
)
)
# Can only mint DOI's
res = app.post_json_api(identifier_url, ark_payload, auth=user.auth, expect_errors=True)
assert res.status_code == 400
assert res.json['errors'][0]['detail'] == 'You can only mint a DOI, not a different type of identifier.'
res = app.post_json_api(identifier_url, identifier_payload, auth=user.auth, expect_errors=True)
assert res.status_code == 201
assert res.json['data']['attributes']['category'] == 'doi'
assert res.json['data']['attributes']['value'] == resource.get_identifier_value('doi')
assert res.json['data']['id'] == resource.identifiers.first()._id
assert res.json['data']['type'] == 'identifiers'
assert resource.logs.first().action == 'external_ids_added'
assert resource.identifiers.count() == 1
res = app.post_json_api(identifier_url, identifier_payload, auth=user.auth, expect_errors=True)
resource.reload()
# cannot request a DOI when one already exists
assert res.status_code == 400
assert res.json['errors'][0]['detail'] == 'A DOI already exists for this resource.'
# write contributor cannot create identifier
res = app.post_json_api(identifier_url, identifier_payload, auth=write_contributor.auth, expect_errors=True)
assert res.status_code == 403
# read contributor cannot create identifier
res = app.post_json_api(identifier_url, identifier_payload, auth=read_contributor.auth, expect_errors=True)
assert res.status_code == 403
# cannot request a DOI for a private resource
resource.is_public = False
resource.save()
res = app.post_json_api(identifier_url, identifier_payload, auth=user.auth, expect_errors=True)
assert res.status_code == 403
@pytest.mark.django_db
class TestRegistrationIdentifierCreate(TestNodeIdentifierCreate):
@pytest.fixture()
def resource(self, user):
return RegistrationFactory(creator=user, is_public=True)
@pytest.fixture()
def retraction(self, resource, user):
return WithdrawnRegistrationFactory(registration=resource)
def test_create_doi_for_withdrawn_registration(self, app, user, retraction, identifier_url, identifier_payload):
res = app.post_json_api(identifier_url, identifier_payload, auth=user.auth, expect_errors=True)
assert res.status_code == 403
| 39.324818 | 116 | 0.686636 |
02e506e15e4d3be220173e04bdf1885da41524d3 | 102 | py | Python | CodingBat/Python/List-1 > max_end3.py | JLJTECH/TutorialTesting | f2dbbd49a86b3b086d0fc156ac3369fb74727f86 | [
"MIT"
] | null | null | null | CodingBat/Python/List-1 > max_end3.py | JLJTECH/TutorialTesting | f2dbbd49a86b3b086d0fc156ac3369fb74727f86 | [
"MIT"
] | null | null | null | CodingBat/Python/List-1 > max_end3.py | JLJTECH/TutorialTesting | f2dbbd49a86b3b086d0fc156ac3369fb74727f86 | [
"MIT"
] | null | null | null | def max_end3(nums):
if nums[0] > nums[-1]:
i = nums[0]
else:
i = nums[-1]
return [i,i,i] | 17 | 24 | 0.509804 |
fead5b9f97689452bd9790e99f9ae02c215bdf74 | 7,296 | py | Python | experiment.py | zackchase/icu_rnn | 766ee8355f74b8a43c4fa78d2c0ebfde24ce4e49 | [
"MIT"
] | 43 | 2015-12-22T07:28:16.000Z | 2021-04-09T11:37:49.000Z | experiment.py | jiangzhongkai/icu_rnn | 766ee8355f74b8a43c4fa78d2c0ebfde24ce4e49 | [
"MIT"
] | 1 | 2016-11-24T02:36:33.000Z | 2017-11-15T22:18:49.000Z | experiment.py | jiangzhongkai/icu_rnn | 766ee8355f74b8a43c4fa78d2c0ebfde24ce4e49 | [
"MIT"
] | 27 | 2016-03-22T14:42:57.000Z | 2020-01-22T04:56:59.000Z | import cPickle
import numpy as np
import os
import re
import sys
from progressbar import ProgressBar
from sklearn.linear_model import LogisticRegression
from sklearn.multiclass import OneVsRestClassifier
from evaluation import compute_micro_evaluations
from lib import floatX
from m2m_rnn import M2M_RNN
def train_linear(X, Y, splits, model_config, results_dir, best_k=10, validation_score='f1',
threshold_score='f1', threshold_criterion='zack', fn_prefix='', label_idx=None):
label_idx = np.arange(Y.shape[1]) if label_idx is None else label_idx
best_perf = None
best_C = None
best_model = None
for C in np.logspace(-3,3, num=20):
sys.stdout.write('Training Ridge Regression with C={0}...'.format(C))
sys.stdout.flush()
model = OneVsRestClassifier(LogisticRegression(C=C))
try:
model.fit(X[splits[0]], Y[splits[0]])
except KeyboardInterrupt:
sys.stdout.write('training interrupted...')
break
except:
raise
Yp = model.predict_proba(X[splits[1]])
perf = compute_micro_evaluations(Y[splits[1]][:,label_idx], Yp[:,label_idx], k=best_k,
threshold_score=threshold_score, criterion=threshold_criterion)
sys.stdout.write(' {0}={1:.4f}'.format(validation_score, perf[validation_score]))
sys.stdout.flush()
if best_perf is None or perf[validation_score] > best_perf[validation_score]:
best_perf = perf
best_model = model
best_C = C
sys.stdout.write(' *BEST')
sys.stdout.write('\n')
model_config['C'] = best_C
cPickle.dump(best_model, open(os.path.join(results_dir, fn_prefix + '-model.pkl'), 'wb'))
return best_model, model_config
def train_lstm(X, Y, splits, model_config, results_dir, best_k=10, validation_score='f1',
threshold_score='f1', threshold_criterion='zack', fn_prefix='', label_idx=None):
Y = floatX(Y)
label_idx = np.arange(Y.shape[1]) if label_idx is None else label_idx
config = { 'nb_hidden': [64, 64], 'nb_epochs': 1000,
'alpha': 0.0, 'lambda2': 0.0,
'clip_at': 0.0, 'scale_norm': 0.0,
'starting_eta': 32.0, 'minimum_eta': 1.0,
'half_eta_every': 10 }
config.update(model_config)
P = config['nb_input'] = X[0].shape[1]
K = config['nb_output'] = Y.shape[1]
config['results_dir'] = results_dir
print 'LSTM Model Configuration\n----------'
for k in sorted(config):
print k, ':', config[k]
print '----------', '\n'
nb_hidden = config['nb_hidden']
nb_epochs = config['nb_epochs']
eta = config['starting_eta']
min_eta = config['minimum_eta']
half_every = config['half_eta_every']
alpha = config['alpha']
lambda2 = config['lambda2']
clip_at = config['clip_at']
scale_norm = config['scale_norm']
model = M2M_RNN(num_input=P, num_hidden=nb_hidden, num_output=K, clip_at=clip_at, scale_norm=scale_norm)
perf_hist = []
best_perf = None
best_epoch = 0
train_idx = splits[0]
valid_idx = splits[1]
try:
for epoch in range(1, nb_epochs+1):
if np.mod(epoch, half_every) == 0:
eta = np.max([eta/2., min_eta])
np.random.shuffle(train_idx)
running_total = 0.
it = 0
for idx in train_idx:
it += 1
cost, last_step_cost = model.train(X[idx], np.tile(Y[idx], (len(X[idx]), 1)), eta, alpha, lambda2)
cost = float(cost)
last_step_cost = float(last_step_cost)
running_total += last_step_cost
running_avg = running_total / float(it)
sys.stdout.write('\repoch {5} (eta={6:.2f}): {0:5d}/{1:5d}, cost: {2:.4f}, last: {3:.4f}, avg: {4:.4f}'.format(it, train_idx.shape[0], cost,
last_step_cost, running_avg,
epoch, eta))
## Save model to file ##
sys.stdout.write('...saving...')
sys.stdout.flush()
model.save_model_params_dumb(os.path.join(results_dir, fn_prefix + '-model-epoch{0:04d}.pkl.gz'.format(epoch)))
sys.stdout.write('\n')
## Get validation set performance ##
sys.stdout.write('epoch {0}: avg: {1:.4f}'.format(epoch, running_avg))
sys.stdout.flush()
Yp = np.vstack([ model.predict(X[idx]) for idx in valid_idx ])
perf = compute_micro_evaluations(Y[valid_idx][:,label_idx], Yp[:,label_idx], k=10, threshold_score=threshold_score, criterion=threshold_criterion)
sys.stdout.write(' valid: {0:.4f} {1:.4f} {2:.4f}'.format(perf['auroc'], perf['auprc'], perf['f1']))
if best_perf is None or perf[validation_score] > best_perf[validation_score]:
best_perf = perf
best_epoch = epoch
sys.stdout.write(' *BEST')
perfs = [ perf ]
## Get training set performance every 5 epochs ##
if np.mod(epoch, 5) == 0:
Yp = np.vstack([ model.predict(X[idx]) for idx in train_idx ])
perf = compute_micro_evaluations(Y[train_idx][:,label_idx], Yp[:,label_idx], k=10, threshold_score=threshold_score, criterion=threshold_criterion)
sys.stdout.write(' train: {0:.4f} {1:.4f} {2:.4f}'.format(perf['auroc'], perf['auprc'], perf['f1']))
else:
perf = np.zeros(perfs[0].shape) + np.nan
perfs.append(perf)
perf_hist.append(np.vstack(perfs))
sys.stdout.write('\n')
except KeyboardInterrupt:
print 'training interrupted'
model.save_model_params_dumb(os.path.join(results_dir, fn_prefix + '-model-epoch{0:04d}.pkl.gz'.format(epoch)))
except:
raise
model.load_model_params_dumb(os.path.join(results_dir, fn_prefix + '-model-epoch{0:04d}.pkl.gz'.format(best_epoch)))
model.save_model_params_dumb(os.path.join(results_dir, fn_prefix + '-model-best.pkl.gz'))
perf_hist = np.dstack(perf_hist) if len(perf_hist) > 0 else np.array([])
np.savez(os.path.join(results_dir, fn_prefix + 'performance-history.npz'), perf_hist=perf_hist, best_epoch=best_epoch)
return model, model_config, perf_hist
def load_model(model_type, model_config, model_fn):
sys.stdout.write('Loading saved ' + model_type + ' from file: ' + model_fn + '...')
sys.stdout.flush()
if model_type == 'lstm':
config = { 'nb_hidden': 64, 'nb_epochs': 1000,
'eta': 100., 'alpha': 0.0, 'lambda2': 0.000001,
'clip_at': 0.0, 'scale_norm': 0.0}
config.update(model_config)
model = M2M_RNN(num_input=config['nb_input'], num_hidden=config['nb_hidden'], num_output=config['nb_output'],
clip_at=config['clip_at'], scale_norm=config['scale_norm'])
model.load_model_params_dumb(model_fn)
else:
model = cPickle.load(model_fn)
sys.stdout.write('DONE!\n')
return model
| 44.218182 | 162 | 0.590598 |
b105cb386d09d1b3b75ac779ad0d713362cc6eab | 1,223 | py | Python | generateattcks/generateattcks/c2matrix.py | frbor/pyattck | 36339ce67b7e4ddeba1a7832892cf08936f7e99d | [
"MIT"
] | 377 | 2019-07-11T20:26:19.000Z | 2022-03-30T00:35:19.000Z | generateattcks/generateattcks/c2matrix.py | frbor/pyattck | 36339ce67b7e4ddeba1a7832892cf08936f7e99d | [
"MIT"
] | 68 | 2019-07-22T15:24:29.000Z | 2022-02-25T10:14:48.000Z | generateattcks/generateattcks/c2matrix.py | frbor/pyattck | 36339ce67b7e4ddeba1a7832892cf08936f7e99d | [
"MIT"
] | 78 | 2019-06-14T05:41:26.000Z | 2022-03-18T19:13:41.000Z | import csv
import requests
from .attacktemplate import AttackTemplate
from .base import Base
class C2Matrix(Base):
"""
Data Source: https://www.thec2matrix.com/
Authors:
- [Jorge Orchilles](https://twitter.com/jorgeorchilles)
- [Bryson Bort](https://twitter.com/brysonbort)
- [Adam Mashinchi](https://twitter.com/adam_mashinchi)
This class is a wrapper for the above data set, which is focused on details surrounding different C2 (Command & Control) tools/software
"""
_URL = 'https://docs.google.com/spreadsheet/ccc?key=1b4mUxa6cDQuTV2BPC6aA-GR4zGZi0ooPYtBe4IgPsSc&output=csv'
OFFSET = 1
def get(self):
response = requests.get(self._URL)
data = response.text
return self._parse(data)
def _parse(self, data):
count = 0
headers = None
template = AttackTemplate()
for item in csv.reader(data.splitlines()):
count += 1
if count == 1:
continue
if count == 2:
headers = item
continue
c2_dict = dict(zip(headers, item))
template.add_c2_data(c2_dict['Name'], c2_dict)
return template.get() | 29.829268 | 139 | 0.614064 |
68ce7c7c945a2cc5691805416002dfebee341f4c | 6,758 | py | Python | video_server/views/room.py | jgeneaguilar/video_server | da16bc287a5c32638195e1efe4626ecf4d324d88 | [
"MIT"
] | null | null | null | video_server/views/room.py | jgeneaguilar/video_server | da16bc287a5c32638195e1efe4626ecf4d324d88 | [
"MIT"
] | null | null | null | video_server/views/room.py | jgeneaguilar/video_server | da16bc287a5c32638195e1efe4626ecf4d324d88 | [
"MIT"
] | null | null | null | from pyramid.view import view_config
from pyramid.httpexceptions import HTTPForbidden, HTTPNotFound, HTTPBadRequest
from paginate_sqlalchemy import SqlalchemyOrmPage
from sqlalchemy import desc, asc
from ..models import Room, RoomMembership, User
from ..services import encoding
# Room public views
from ..services.helpers import to_int
@view_config(
route_name="rooms", request_method="GET", renderer="json",
)
def get_rooms(request):
"""Retrieve a list of rooms"""
page = to_int(request.GET.get("page"), 1)
limit = to_int(request.GET.get("limit"), 10)
name = request.GET.get("name")
host_id = request.GET.get("host_id")
capacity = request.GET.get("capacity")
sort = request.GET.get("sort", "created_at")
sort_order = request.GET.get("sort_order")
session = request.dbsession
query = session.query(Room)
# Filter
if host_id is not None:
query = query.filter_by(host_id=host_id)
if capacity is not None:
query = query.filter_by(capacity=capacity)
if name is not None:
query = query.filter(Room.name.ilike("%" + name + "%"))
# Sorting
try:
if sort is not None:
order = desc if sort_order == "desc" else asc
query = query.order_by(order(getattr(Room, sort)))
except AttributeError:
raise HTTPBadRequest("Invalid sort params")
# Paging
page = SqlalchemyOrmPage(query, page=page, items_per_page=limit, db_session=session)
rooms = [encoding.encode_room(room) for room in page.items]
return {"data": rooms, "total": page.item_count}
@view_config(
route_name="room", request_method="GET", renderer="json",
)
def get_room_by_id(request):
"""Retrieve information about a room.
Return:
dict of id(uuid), name(string), host_id(uuid), capacity(int), members(list)
"""
room_id = request.matchdict["room_id"]
room = request.dbsession.query(Room).filter(Room.id == room_id).first()
if room is not None:
members = [encoding.encode_user(user) for user in room.users]
room_info = encoding.encode_room(room, members=members)
return {"data": room_info}
else:
raise HTTPNotFound()
# Room auth views
@view_config(
route_name="rooms", request_method="POST", renderer="json", permission="auth",
)
def create_room(request):
"""Create a room for an authenticated user and set user as the host.
Params:
name: string
capacity: int (optional)
Return:
dict of id(uuid), name(string), host_id(uuid), capacity(int)
"""
user_id = request.authenticated_userid
name = request.json_body.get("name")
capacity = request.json_body.get("capacity") # 5 as default
if capacity is not None:
if capacity < 2:
raise HTTPBadRequest("Room capacity must be at least 2.")
if capacity >= 50:
raise HTTPBadRequest("Maximum room capacity is 50.")
session = request.dbsession
new_room = Room(name=name, capacity=capacity, host_id=user_id)
session.add(new_room)
session.flush()
# add host as member
new_member = RoomMembership(user_id=user_id, room_id=new_room.id)
session.add(new_member)
session.flush()
return {"data": encoding.encode_room(new_room)}
@view_config(
route_name="host", request_method="PATCH", renderer="json", permission="auth",
)
def change_host(request):
"""Change the room host. Current user must be a host.
Params:
new_host_id: string (uuid)
Return:
dict of id(uuid), name(string), host_id(uuid), capacity(int)
"""
user_id = request.authenticated_userid
room_id = request.matchdict["room_id"]
request.json = request.json_body or {}
new_host_id = request.json.get("new_host_id")
if new_host_id is None:
raise HTTPNotFound("Please enter a valid new host id.")
session = request.dbsession
room = session.query(Room).filter_by(id=room_id).first()
# check if new_host is a member of the room
new_host_membership_id = (
session.query(RoomMembership.user_id)
.filter(
RoomMembership.user_id == new_host_id, RoomMembership.room_id == room_id
)
.scalar()
)
if room is None:
raise HTTPNotFound("The room cannot be found.")
if new_host_membership_id is None:
raise HTTPNotFound("Please enter a valid user as host.")
if new_host_membership_id is not None and room is not None:
if user_id == new_host_membership_id:
raise HTTPForbidden("Current user is already the host.")
if user_id == str(room.host_id):
room.host_id = new_host_membership_id
return {"data": encoding.encode_room(room)}
else:
raise HTTPForbidden("Current user is not the host.")
@view_config(
route_name="room_members",
request_method="POST",
renderer="json",
permission="auth",
)
def join_room(request):
"""Enable the user to join a room if still within room capacity and if user is not already a member."""
user_id = request.authenticated_userid
room_id = request.matchdict["room_id"]
session = request.dbsession
members = [
str(i[0])
for i in session.query(User.id)
.join(RoomMembership)
.filter(RoomMembership.room_id == room_id)
.all()
]
room_capacity = session.query(Room.capacity).filter_by(id=room_id).scalar()
if user_id not in members and len(members) < room_capacity:
new_member = RoomMembership(user_id=user_id, room_id=room_id)
session.add(new_member)
session.flush()
room = session.query(Room).filter_by(id=room_id).first()
room_data = encoding.encode_room(
room, members=[encoding.encode_user(user) for user in room.users]
)
return {"data": room_data}
elif user_id in members:
raise HTTPForbidden("You have already joined the room.")
elif not len(members) < room_capacity:
raise HTTPForbidden("The room is already full.")
@view_config(
route_name="room_members",
request_method="DELETE",
renderer="json",
permission="auth",
)
def leave_room(request):
"""Delete the user membership record."""
user_id = request.authenticated_userid
room_id = request.matchdict["room_id"]
session = request.dbsession
room_membership = (
session.query(RoomMembership)
.filter(RoomMembership.room_id == room_id, RoomMembership.user_id == user_id)
.first()
)
if room_membership is not None:
session.delete(room_membership)
return "Success"
else:
raise HTTPNotFound("You have not joined this room.")
| 31.877358 | 107 | 0.660846 |
fa196f1d56443d7d357af0244cf5a7b99a7a07c5 | 14,740 | py | Python | salt/returners/local_cache.py | jwagoner0/salt | ece5b8867300143f7a2b859d55200b1800f28bf2 | [
"Apache-2.0"
] | null | null | null | salt/returners/local_cache.py | jwagoner0/salt | ece5b8867300143f7a2b859d55200b1800f28bf2 | [
"Apache-2.0"
] | null | null | null | salt/returners/local_cache.py | jwagoner0/salt | ece5b8867300143f7a2b859d55200b1800f28bf2 | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
'''
Return data to local job cache
'''
from __future__ import absolute_import
# Import python libs
import errno
import glob
import logging
import os
import shutil
import time
import hashlib
import bisect
# Import salt libs
import salt.payload
import salt.utils
import salt.utils.files
import salt.utils.jid
import salt.exceptions
# Import 3rd-party libs
import salt.ext.six as six
log = logging.getLogger(__name__)
# load is the published job
LOAD_P = '.load.p'
# the list of minions that the job is targeted to (best effort match on the
# master side)
MINIONS_P = '.minions.p'
# format string for minion lists forwarded from syndic masters (the placeholder
# will be replaced with the syndic master's id)
SYNDIC_MINIONS_P = '.minions.{0}.p'
# return is the "return" from the minion data
RETURN_P = 'return.p'
# out is the "out" from the minion data
OUT_P = 'out.p'
# endtime is the end time for a job, not stored as msgpack
ENDTIME = 'endtime'
def _job_dir():
'''
Return root of the jobs cache directory
'''
return os.path.join(__opts__['cachedir'],
'jobs')
def _jid_dir(jid):
'''
Return the jid_dir for the given job id
'''
if six.PY3:
jhash = getattr(hashlib, __opts__['hash_type'])(jid.encode('utf-8')).hexdigest()
else:
jhash = getattr(hashlib, __opts__['hash_type'])(str(jid)).hexdigest()
return os.path.join(_job_dir(),
jhash[:2],
jhash[2:])
def _walk_through(job_dir):
'''
Walk though the jid dir and look for jobs
'''
serial = salt.payload.Serial(__opts__)
for top in os.listdir(job_dir):
t_path = os.path.join(job_dir, top)
for final in os.listdir(t_path):
load_path = os.path.join(t_path, final, LOAD_P)
if not os.path.isfile(load_path):
continue
job = serial.load(salt.utils.fopen(load_path, 'rb'))
jid = job['jid']
yield jid, job, t_path, final
#TODO: add to returner docs-- this is a new one
def prep_jid(nocache=False, passed_jid=None, recurse_count=0):
'''
Return a job id and prepare the job id directory.
This is the function responsible for making sure jids don't collide (unless
it is passed a jid).
So do what you have to do to make sure that stays the case
'''
if recurse_count >= 5:
err = 'prep_jid could not store a jid after {0} tries.'.format(recurse_count)
log.error(err)
raise salt.exceptions.SaltCacheError(err)
if passed_jid is None: # this can be a None or an empty string.
jid = salt.utils.jid.gen_jid()
else:
jid = passed_jid
jid_dir_ = _jid_dir(jid)
# Make sure we create the jid dir, otherwise someone else is using it,
# meaning we need a new jid.
if not os.path.isdir(jid_dir_):
try:
os.makedirs(jid_dir_)
except OSError:
time.sleep(0.1)
if passed_jid is None:
return prep_jid(nocache=nocache, recurse_count=recurse_count+1)
try:
with salt.utils.fopen(os.path.join(jid_dir_, 'jid'), 'wb+') as fn_:
if six.PY2:
fn_.write(jid)
else:
fn_.write(bytes(jid, 'utf-8'))
if nocache:
with salt.utils.fopen(os.path.join(jid_dir_, 'nocache'), 'wb+') as fn_:
fn_.write(b'')
except IOError:
log.warning('Could not write out jid file for job {0}. Retrying.'.format(jid))
time.sleep(0.1)
return prep_jid(passed_jid=jid, nocache=nocache,
recurse_count=recurse_count+1)
return jid
def returner(load):
'''
Return data to the local job cache
'''
serial = salt.payload.Serial(__opts__)
# if a minion is returning a standalone job, get a jobid
if load['jid'] == 'req':
load['jid'] = prep_jid(nocache=load.get('nocache', False))
jid_dir = _jid_dir(load['jid'])
if os.path.exists(os.path.join(jid_dir, 'nocache')):
return
hn_dir = os.path.join(jid_dir, load['id'])
try:
os.makedirs(hn_dir)
except OSError as err:
if err.errno == errno.EEXIST:
# Minion has already returned this jid and it should be dropped
log.error(
'An extra return was detected from minion {0}, please verify '
'the minion, this could be a replay attack'.format(
load['id']
)
)
return False
elif err.errno == errno.ENOENT:
log.error(
'An inconsistency occurred, a job was received with a job id '
'that is not present in the local cache: {jid}'.format(**load)
)
return False
raise
serial.dump(
load['return'],
# Use atomic open here to avoid the file being read before it's
# completely written to. Refs #1935
salt.utils.atomicfile.atomic_open(
os.path.join(hn_dir, RETURN_P), 'w+b'
)
)
if 'out' in load:
serial.dump(
load['out'],
# Use atomic open here to avoid the file being read before
# it's completely written to. Refs #1935
salt.utils.atomicfile.atomic_open(
os.path.join(hn_dir, OUT_P), 'w+b'
)
)
def save_load(jid, clear_load, minions=None, recurse_count=0):
'''
Save the load to the specified jid
minions argument is to provide a pre-computed list of matched minions for
the job, for cases when this function can't compute that list itself (such
as for salt-ssh)
'''
if recurse_count >= 5:
err = ('save_load could not write job cache file after {0} retries.'
.format(recurse_count))
log.error(err)
raise salt.exceptions.SaltCacheError(err)
jid_dir = _jid_dir(jid)
serial = salt.payload.Serial(__opts__)
# Save the invocation information
try:
if not os.path.exists(jid_dir):
os.makedirs(jid_dir)
except OSError as exc:
if exc.errno == errno.EEXIST:
# rarely, the directory can be already concurrently created between
# the os.path.exists and the os.makedirs lines above
pass
else:
raise
try:
serial.dump(
clear_load,
salt.utils.fopen(os.path.join(jid_dir, LOAD_P), 'w+b')
)
except IOError as exc:
log.warning(
'Could not write job invocation cache file: %s', exc
)
time.sleep(0.1)
return save_load(jid=jid, clear_load=clear_load,
recurse_count=recurse_count+1)
# if you have a tgt, save that for the UI etc
if 'tgt' in clear_load:
if minions is None:
ckminions = salt.utils.minions.CkMinions(__opts__)
# Retrieve the minions list
minions = ckminions.check_minions(
clear_load['tgt'],
clear_load.get('tgt_type', 'glob')
)
# save the minions to a cache so we can see in the UI
save_minions(jid, minions)
def save_minions(jid, minions, syndic_id=None):
'''
Save/update the serialized list of minions for a given job
'''
log.debug(
'Adding minions for job %s%s: %s',
jid,
' from syndic master \'{0}\''.format(syndic_id) if syndic_id else '',
minions
)
serial = salt.payload.Serial(__opts__)
jid_dir = _jid_dir(jid)
try:
if not os.path.exists(jid_dir):
os.makedirs(jid_dir)
except OSError as exc:
if exc.errno == errno.EEXIST:
# rarely, the directory can be already concurrently created between
# the os.path.exists and the os.makedirs lines above
pass
else:
raise
if syndic_id is not None:
minions_path = os.path.join(
jid_dir,
SYNDIC_MINIONS_P.format(syndic_id)
)
else:
minions_path = os.path.join(jid_dir, MINIONS_P)
try:
serial.dump(minions, salt.utils.fopen(minions_path, 'w+b'))
except IOError as exc:
log.error(
'Failed to write minion list {0} to job cache file {1}: {2}'
.format(minions, minions_path, exc)
)
def get_load(jid):
'''
Return the load data that marks a specified jid
'''
jid_dir = _jid_dir(jid)
load_fn = os.path.join(jid_dir, LOAD_P)
if not os.path.exists(jid_dir) or not os.path.exists(load_fn):
return {}
serial = salt.payload.Serial(__opts__)
ret = serial.load(salt.utils.fopen(os.path.join(jid_dir, LOAD_P), 'rb'))
minions_cache = [os.path.join(jid_dir, MINIONS_P)]
minions_cache.extend(
glob.glob(os.path.join(jid_dir, SYNDIC_MINIONS_P.format('*')))
)
all_minions = set()
for minions_path in minions_cache:
log.debug('Reading minion list from %s', minions_path)
try:
all_minions.update(
serial.load(salt.utils.fopen(minions_path, 'rb'))
)
except IOError as exc:
salt.utils.files.process_read_exception(exc, minions_path)
if all_minions:
ret['Minions'] = sorted(all_minions)
return ret
def get_jid(jid):
'''
Return the information returned when the specified job id was executed
'''
jid_dir = _jid_dir(jid)
serial = salt.payload.Serial(__opts__)
ret = {}
# Check to see if the jid is real, if not return the empty dict
if not os.path.isdir(jid_dir):
return ret
for fn_ in os.listdir(jid_dir):
if fn_.startswith('.'):
continue
if fn_ not in ret:
retp = os.path.join(jid_dir, fn_, RETURN_P)
outp = os.path.join(jid_dir, fn_, OUT_P)
if not os.path.isfile(retp):
continue
while fn_ not in ret:
try:
ret_data = serial.load(
salt.utils.fopen(retp, 'rb'))
ret[fn_] = {'return': ret_data}
if os.path.isfile(outp):
ret[fn_]['out'] = serial.load(
salt.utils.fopen(outp, 'rb'))
except Exception as exc:
if 'Permission denied:' in str(exc):
raise
return ret
def get_jids():
'''
Return a dict mapping all job ids to job information
'''
ret = {}
for jid, job, _, _ in _walk_through(_job_dir()):
ret[jid] = salt.utils.jid.format_jid_instance(jid, job)
if __opts__.get('job_cache_store_endtime'):
endtime = get_endtime(jid)
if endtime:
ret[jid]['EndTime'] = endtime
return ret
def get_jids_filter(count, filter_find_job=True):
'''
Return a list of all jobs information filtered by the given criteria.
:param int count: show not more than the count of most recent jobs
:param bool filter_find_jobs: filter out 'saltutil.find_job' jobs
'''
keys = []
ret = []
for jid, job, _, _ in _walk_through(_job_dir()):
job = salt.utils.jid.format_jid_instance_ext(jid, job)
if filter_find_job and job['Function'] == 'saltutil.find_job':
continue
i = bisect.bisect(keys, jid)
if len(keys) == count and i == 0:
continue
keys.insert(i, jid)
ret.insert(i, job)
if len(keys) > count:
del keys[0]
del ret[0]
return ret
def clean_old_jobs():
'''
Clean out the old jobs from the job cache
'''
if __opts__['keep_jobs'] != 0:
cur = time.time()
jid_root = _job_dir()
if not os.path.exists(jid_root):
return
# Keep track of any empty t_path dirs that need to be removed later
dirs_to_remove = set()
for top in os.listdir(jid_root):
t_path = os.path.join(jid_root, top)
if not os.path.exists(t_path):
continue
# Check if there are any stray/empty JID t_path dirs
t_path_dirs = os.listdir(t_path)
if not t_path_dirs and t_path not in dirs_to_remove:
dirs_to_remove.add(t_path)
continue
for final in t_path_dirs:
f_path = os.path.join(t_path, final)
jid_file = os.path.join(f_path, 'jid')
if not os.path.isfile(jid_file):
# No jid file means corrupted cache entry, scrub it
# by removing the entire t_path directory
shutil.rmtree(t_path)
else:
jid_ctime = os.stat(jid_file).st_ctime
hours_difference = (cur - jid_ctime) / 3600.0
if hours_difference > __opts__['keep_jobs']:
# Remove the entire t_path from the original JID dir
shutil.rmtree(t_path)
# Remove empty JID dirs from job cache, if they're old enough.
# JID dirs may be empty either from a previous cache-clean with the bug
# Listed in #29286 still present, or the JID dir was only recently made
# And the jid file hasn't been created yet.
if dirs_to_remove:
for t_path in dirs_to_remove:
# Checking the time again prevents a possible race condition where
# t_path JID dirs were created, but not yet populated by a jid file.
t_path_ctime = os.stat(t_path).st_ctime
hours_difference = (cur - t_path_ctime) / 3600.0
if hours_difference > __opts__['keep_jobs']:
shutil.rmtree(t_path)
def update_endtime(jid, time):
'''
Update (or store) the end time for a given job
Endtime is stored as a plain text string
'''
jid_dir = _jid_dir(jid)
try:
if not os.path.exists(jid_dir):
os.makedirs(jid_dir)
with salt.utils.fopen(os.path.join(jid_dir, ENDTIME), 'w') as etfile:
etfile.write(time)
except IOError as exc:
log.warning('Could not write job invocation cache file: {0}'.format(exc))
def get_endtime(jid):
'''
Retrieve the stored endtime for a given job
Returns False if no endtime is present
'''
jid_dir = _jid_dir(jid)
etpath = os.path.join(jid_dir, ENDTIME)
if not os.path.exists(etpath):
return False
with salt.utils.fopen(etpath, 'r') as etfile:
endtime = etfile.read().strip('\n')
return endtime
| 30.966387 | 88 | 0.584464 |
b3cb72ebd5e4d0b7ea4d99cd954815fcaf3edadb | 470 | py | Python | Python code/lbp_function.py | AlessandroFornasier/Finger-veins-based-recognition-system-PYTHON | 6c48c505b17c02fce41095e38b946f29161b132a | [
"MIT"
] | 4 | 2019-07-02T03:57:55.000Z | 2022-03-18T09:06:40.000Z | Python code/lbp_function.py | AlessandroFornasier/Finger-veins-based-recognition-system-PYTHON | 6c48c505b17c02fce41095e38b946f29161b132a | [
"MIT"
] | null | null | null | Python code/lbp_function.py | AlessandroFornasier/Finger-veins-based-recognition-system-PYTHON | 6c48c505b17c02fce41095e38b946f29161b132a | [
"MIT"
] | 3 | 2019-09-09T04:02:29.000Z | 2022-01-12T13:13:44.000Z | import numpy as np
def lbp(I):
[r,c] = I.shape[:2]
LBPM = np.zeros((r-2, c-2),np.uint32)
xmask = [[1, 2, 4],
[128, 0, 8],
[64, 32, 16]]
mask = np.array(xmask, np.uint16);
for j in range(2,r-1):
for k in range(2,c-1):
temp = I[j-2:j+1, k-2:k+1] // 255
temp = np.multiply(mask,temp)
LBPM[j-1,k-1] = np.sum(temp)
return LBPM | 23.5 | 46 | 0.404255 |
fd11c41862d6836fcb2cd266a59b33fcd3185c6c | 14,618 | py | Python | airflow/providers/microsoft/azure/operators/azure_container_instances.py | donnut/airflow | ce66bc944d246aa3b51cce6e2fc13cd25da08d6e | [
"Apache-2.0"
] | 1 | 2020-09-15T02:32:55.000Z | 2020-09-15T02:32:55.000Z | airflow/providers/microsoft/azure/operators/azure_container_instances.py | donnut/airflow | ce66bc944d246aa3b51cce6e2fc13cd25da08d6e | [
"Apache-2.0"
] | 14 | 2019-12-03T02:54:42.000Z | 2020-02-27T16:08:10.000Z | airflow/providers/microsoft/azure/operators/azure_container_instances.py | donnut/airflow | ce66bc944d246aa3b51cce6e2fc13cd25da08d6e | [
"Apache-2.0"
] | 1 | 2020-11-04T03:12:47.000Z | 2020-11-04T03:12:47.000Z | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import re
from collections import namedtuple
from time import sleep
from typing import Any, Dict, List, Optional, Sequence, Union
from azure.mgmt.containerinstance.models import (
Container,
ContainerGroup,
EnvironmentVariable,
ResourceRequests,
ResourceRequirements,
VolumeMount,
)
from msrestazure.azure_exceptions import CloudError
from airflow.exceptions import AirflowException, AirflowTaskTimeout
from airflow.models import BaseOperator
from airflow.providers.microsoft.azure.hooks.azure_container_instance import AzureContainerInstanceHook
from airflow.providers.microsoft.azure.hooks.azure_container_registry import AzureContainerRegistryHook
from airflow.providers.microsoft.azure.hooks.azure_container_volume import AzureContainerVolumeHook
from airflow.utils.decorators import apply_defaults
Volume = namedtuple(
'Volume',
['conn_id', 'account_name', 'share_name', 'mount_path', 'read_only'],
)
DEFAULT_ENVIRONMENT_VARIABLES = {} # type: Dict[str, str]
DEFAULT_SECURED_VARIABLES = [] # type: Sequence[str]
DEFAULT_VOLUMES = [] # type: Sequence[Volume]
DEFAULT_MEMORY_IN_GB = 2.0
DEFAULT_CPU = 1.0
# pylint: disable=too-many-instance-attributes
class AzureContainerInstancesOperator(BaseOperator):
"""
Start a container on Azure Container Instances
:param ci_conn_id: connection id of a service principal which will be used
to start the container instance
:type ci_conn_id: str
:param registry_conn_id: connection id of a user which can login to a
private docker registry. If None, we assume a public registry
:type registry_conn_id: Optional[str]
:param resource_group: name of the resource group wherein this container
instance should be started
:type resource_group: str
:param name: name of this container instance. Please note this name has
to be unique in order to run containers in parallel.
:type name: str
:param image: the docker image to be used
:type image: str
:param region: the region wherein this container instance should be started
:type region: str
:param environment_variables: key,value pairs containing environment
variables which will be passed to the running container
:type environment_variables: Optional[dict]
:param secured_variables: names of environmental variables that should not
be exposed outside the container (typically passwords).
:type secured_variables: Optional[str]
:param volumes: list of ``Volume`` tuples to be mounted to the container.
Currently only Azure Fileshares are supported.
:type volumes: list[<conn_id, account_name, share_name, mount_path, read_only>]
:param memory_in_gb: the amount of memory to allocate to this container
:type memory_in_gb: double
:param cpu: the number of cpus to allocate to this container
:type cpu: double
:param gpu: GPU Resource for the container.
:type gpu: azure.mgmt.containerinstance.models.GpuResource
:param command: the command to run inside the container
:type command: Optional[str]
:param container_timeout: max time allowed for the execution of
the container instance.
:type container_timeout: datetime.timedelta
:param tags: azure tags as dict of str:str
:type tags: Optional[dict[str, str]]
**Example**::
AzureContainerInstancesOperator(
"azure_service_principal",
"azure_registry_user",
"my-resource-group",
"my-container-name-{{ ds }}",
"myprivateregistry.azurecr.io/my_container:latest",
"westeurope",
{"MODEL_PATH": "my_value",
"POSTGRES_LOGIN": "{{ macros.connection('postgres_default').login }}",
"POSTGRES_PASSWORD": "{{ macros.connection('postgres_default').password }}",
"JOB_GUID": "{{ ti.xcom_pull(task_ids='task1', key='guid') }}" },
['POSTGRES_PASSWORD'],
[("azure_wasb_conn_id",
"my_storage_container",
"my_fileshare",
"/input-data",
True),],
memory_in_gb=14.0,
cpu=4.0,
gpu=GpuResource(count=1, sku='K80'),
command=["/bin/echo", "world"],
container_timeout=timedelta(hours=2),
task_id="start_container"
)
"""
template_fields = ('name', 'image', 'command', 'environment_variables')
# pylint: disable=too-many-arguments
@apply_defaults
def __init__(
self,
*,
ci_conn_id: str,
registry_conn_id: Optional[str],
resource_group: str,
name: str,
image: str,
region: str,
environment_variables: Optional[Dict[Any, Any]] = None,
secured_variables: Optional[str] = None,
volumes: Optional[List[Any]] = None,
memory_in_gb: Optional[Any] = None,
cpu: Optional[Any] = None,
gpu: Optional[Any] = None,
command: Optional[str] = None,
remove_on_error: bool = True,
fail_if_exists: bool = True,
tags: Optional[Dict[str, str]] = None,
**kwargs,
) -> None:
super().__init__(**kwargs)
self.ci_conn_id = ci_conn_id
self.resource_group = resource_group
self.name = self._check_name(name)
self.image = image
self.region = region
self.registry_conn_id = registry_conn_id
self.environment_variables = environment_variables or DEFAULT_ENVIRONMENT_VARIABLES
self.secured_variables = secured_variables or DEFAULT_SECURED_VARIABLES
self.volumes = volumes or DEFAULT_VOLUMES
self.memory_in_gb = memory_in_gb or DEFAULT_MEMORY_IN_GB
self.cpu = cpu or DEFAULT_CPU
self.gpu = gpu
self.command = command
self.remove_on_error = remove_on_error
self.fail_if_exists = fail_if_exists
self._ci_hook: Any = None
self.tags = tags
def execute(self, context: Dict[Any, Any]) -> int:
# Check name again in case it was templated.
self._check_name(self.name)
self._ci_hook = AzureContainerInstanceHook(self.ci_conn_id)
if self.fail_if_exists:
self.log.info("Testing if container group already exists")
if self._ci_hook.exists(self.resource_group, self.name):
raise AirflowException("Container group exists")
if self.registry_conn_id:
registry_hook = AzureContainerRegistryHook(self.registry_conn_id)
image_registry_credentials: Optional[List[Any]] = [
registry_hook.connection,
]
else:
image_registry_credentials = None
environment_variables = []
for key, value in self.environment_variables.items():
if key in self.secured_variables:
e = EnvironmentVariable(name=key, secure_value=value)
else:
e = EnvironmentVariable(name=key, value=value)
environment_variables.append(e)
volumes: List[Union[Volume, Volume]] = []
volume_mounts: List[Union[VolumeMount, VolumeMount]] = []
for conn_id, account_name, share_name, mount_path, read_only in self.volumes:
hook = AzureContainerVolumeHook(conn_id)
mount_name = "mount-%d" % len(volumes)
volumes.append(hook.get_file_volume(mount_name, share_name, account_name, read_only))
volume_mounts.append(VolumeMount(name=mount_name, mount_path=mount_path, read_only=read_only))
exit_code = 1
try:
self.log.info("Starting container group with %.1f cpu %.1f mem", self.cpu, self.memory_in_gb)
if self.gpu:
self.log.info("GPU count: %.1f, GPU SKU: %s", self.gpu.count, self.gpu.sku)
resources = ResourceRequirements(
requests=ResourceRequests(memory_in_gb=self.memory_in_gb, cpu=self.cpu, gpu=self.gpu)
)
container = Container(
name=self.name,
image=self.image,
resources=resources,
command=self.command,
environment_variables=environment_variables,
volume_mounts=volume_mounts,
)
container_group = ContainerGroup(
location=self.region,
containers=[
container,
],
image_registry_credentials=image_registry_credentials,
volumes=volumes,
restart_policy='Never',
os_type='Linux',
tags=self.tags,
)
self._ci_hook.create_or_update(self.resource_group, self.name, container_group)
self.log.info("Container group started %s/%s", self.resource_group, self.name)
exit_code = self._monitor_logging(self.resource_group, self.name)
self.log.info("Container had exit code: %s", exit_code)
if exit_code != 0:
raise AirflowException("Container had a non-zero exit code, %s" % exit_code)
return exit_code
except CloudError:
self.log.exception("Could not start container group")
raise AirflowException("Could not start container group")
finally:
if exit_code == 0 or self.remove_on_error:
self.on_kill()
def on_kill(self) -> None:
if self.remove_on_error:
self.log.info("Deleting container group")
try:
self._ci_hook.delete(self.resource_group, self.name)
except Exception: # pylint: disable=broad-except
self.log.exception("Could not delete container group")
def _monitor_logging(self, resource_group: str, name: str) -> int:
last_state = None
last_message_logged = None
last_line_logged = None
# pylint: disable=too-many-nested-blocks
while True:
try:
cg_state = self._ci_hook.get_state(resource_group, name)
instance_view = cg_state.containers[0].instance_view
# If there is no instance view, we show the provisioning state
if instance_view is not None:
c_state = instance_view.current_state
state, exit_code, detail_status = (
c_state.state,
c_state.exit_code,
c_state.detail_status,
)
messages = [event.message for event in instance_view.events]
last_message_logged = self._log_last(messages, last_message_logged)
else:
state = cg_state.provisioning_state
exit_code = 0
detail_status = "Provisioning"
if state != last_state:
self.log.info("Container group state changed to %s", state)
last_state = state
if state in ["Running", "Terminated"]:
try:
logs = self._ci_hook.get_logs(resource_group, name)
last_line_logged = self._log_last(logs, last_line_logged)
except CloudError:
self.log.exception(
"Exception while getting logs from " "container instance, retrying..."
)
if state == "Terminated":
self.log.error("Container exited with detail_status %s", detail_status)
return exit_code
if state == "Failed":
self.log.error("Azure provision failure")
return 1
except AirflowTaskTimeout:
raise
except CloudError as err:
if 'ResourceNotFound' in str(err):
self.log.warning(
"ResourceNotFound, container is probably removed "
"by another process "
"(make sure that the name is unique)."
)
return 1
else:
self.log.exception("Exception while getting container groups")
except Exception: # pylint: disable=broad-except
self.log.exception("Exception while getting container groups")
sleep(1)
def _log_last(self, logs: Optional[List[Any]], last_line_logged: Any) -> Optional[Any]:
if logs:
# determine the last line which was logged before
last_line_index = 0
for i in range(len(logs) - 1, -1, -1):
if logs[i] == last_line_logged:
# this line is the same, hence print from i+1
last_line_index = i + 1
break
# log all new ones
for line in logs[last_line_index:]:
self.log.info(line.rstrip())
return logs[-1]
return None
@staticmethod
def _check_name(name: str) -> str:
if '{{' in name:
# Let macros pass as they cannot be checked at construction time
return name
regex_check = re.match("[a-z0-9]([-a-z0-9]*[a-z0-9])?", name)
if regex_check is None or regex_check.group() != name:
raise AirflowException('ACI name must match regex [a-z0-9]([-a-z0-9]*[a-z0-9])? (like "my-name")')
if len(name) > 63:
raise AirflowException('ACI name cannot be longer than 63 characters')
return name
| 40.832402 | 110 | 0.611643 |
fbef21a2630c3b33988afaf239efb83d5b72b01d | 67,597 | py | Python | sklearn/metrics/_ranking.py | dPys/scikit-learn | e11c4d21a4579f0d49f414a4b76e386f80f0f074 | [
"BSD-3-Clause"
] | null | null | null | sklearn/metrics/_ranking.py | dPys/scikit-learn | e11c4d21a4579f0d49f414a4b76e386f80f0f074 | [
"BSD-3-Clause"
] | null | null | null | sklearn/metrics/_ranking.py | dPys/scikit-learn | e11c4d21a4579f0d49f414a4b76e386f80f0f074 | [
"BSD-3-Clause"
] | null | null | null | """Metrics to assess performance on classification task given scores.
Functions named as ``*_score`` return a scalar value to maximize: the higher
the better.
Function named as ``*_error`` or ``*_loss`` return a scalar value to minimize:
the lower the better.
"""
# Authors: Alexandre Gramfort <alexandre.gramfort@inria.fr>
# Mathieu Blondel <mathieu@mblondel.org>
# Olivier Grisel <olivier.grisel@ensta.org>
# Arnaud Joly <a.joly@ulg.ac.be>
# Jochen Wersdorfer <jochen@wersdoerfer.de>
# Lars Buitinck
# Joel Nothman <joel.nothman@gmail.com>
# Noel Dawe <noel@dawe.me>
# Michal Karbownik <michakarbownik@gmail.com>
# License: BSD 3 clause
import warnings
from functools import partial
import numpy as np
from scipy.sparse import csr_matrix
from scipy.stats import rankdata
from ..utils import assert_all_finite
from ..utils import check_consistent_length
from ..utils.validation import _check_sample_weight
from ..utils import column_or_1d, check_array
from ..utils.multiclass import type_of_target
from ..utils.extmath import stable_cumsum
from ..utils.sparsefuncs import count_nonzero
from ..exceptions import UndefinedMetricWarning
from ..preprocessing import label_binarize
from ..utils._encode import _encode, _unique
from ._base import (
_average_binary_score,
_average_multiclass_ovo_score,
_check_pos_label_consistency,
)
def auc(x, y):
"""Compute Area Under the Curve (AUC) using the trapezoidal rule.
This is a general function, given points on a curve. For computing the
area under the ROC-curve, see :func:`roc_auc_score`. For an alternative
way to summarize a precision-recall curve, see
:func:`average_precision_score`.
Parameters
----------
x : ndarray of shape (n,)
x coordinates. These must be either monotonic increasing or monotonic
decreasing.
y : ndarray of shape, (n,)
y coordinates.
Returns
-------
auc : float
See Also
--------
roc_auc_score : Compute the area under the ROC curve.
average_precision_score : Compute average precision from prediction scores.
precision_recall_curve : Compute precision-recall pairs for different
probability thresholds.
Examples
--------
>>> import numpy as np
>>> from sklearn import metrics
>>> y = np.array([1, 1, 2, 2])
>>> pred = np.array([0.1, 0.4, 0.35, 0.8])
>>> fpr, tpr, thresholds = metrics.roc_curve(y, pred, pos_label=2)
>>> metrics.auc(fpr, tpr)
0.75
"""
check_consistent_length(x, y)
x = column_or_1d(x)
y = column_or_1d(y)
if x.shape[0] < 2:
raise ValueError(
"At least 2 points are needed to compute area under curve, but x.shape = %s"
% x.shape
)
direction = 1
dx = np.diff(x)
if np.any(dx < 0):
if np.all(dx <= 0):
direction = -1
else:
raise ValueError("x is neither increasing nor decreasing : {}.".format(x))
area = direction * np.trapz(y, x)
if isinstance(area, np.memmap):
# Reductions such as .sum used internally in np.trapz do not return a
# scalar by default for numpy.memmap instances contrary to
# regular numpy.ndarray instances.
area = area.dtype.type(area)
return area
def average_precision_score(
y_true, y_score, *, average="macro", pos_label=1, sample_weight=None
):
"""Compute average precision (AP) from prediction scores.
AP summarizes a precision-recall curve as the weighted mean of precisions
achieved at each threshold, with the increase in recall from the previous
threshold used as the weight:
.. math::
\\text{AP} = \\sum_n (R_n - R_{n-1}) P_n
where :math:`P_n` and :math:`R_n` are the precision and recall at the nth
threshold [1]_. This implementation is not interpolated and is different
from computing the area under the precision-recall curve with the
trapezoidal rule, which uses linear interpolation and can be too
optimistic.
Note: this implementation is restricted to the binary classification task
or multilabel classification task.
Read more in the :ref:`User Guide <precision_recall_f_measure_metrics>`.
Parameters
----------
y_true : ndarray of shape (n_samples,) or (n_samples, n_classes)
True binary labels or binary label indicators.
y_score : ndarray of shape (n_samples,) or (n_samples, n_classes)
Target scores, can either be probability estimates of the positive
class, confidence values, or non-thresholded measure of decisions
(as returned by :term:`decision_function` on some classifiers).
average : {'micro', 'samples', 'weighted', 'macro'} or None, \
default='macro'
If ``None``, the scores for each class are returned. Otherwise,
this determines the type of averaging performed on the data:
``'micro'``:
Calculate metrics globally by considering each element of the label
indicator matrix as a label.
``'macro'``:
Calculate metrics for each label, and find their unweighted
mean. This does not take label imbalance into account.
``'weighted'``:
Calculate metrics for each label, and find their average, weighted
by support (the number of true instances for each label).
``'samples'``:
Calculate metrics for each instance, and find their average.
Will be ignored when ``y_true`` is binary.
pos_label : int or str, default=1
The label of the positive class. Only applied to binary ``y_true``.
For multilabel-indicator ``y_true``, ``pos_label`` is fixed to 1.
sample_weight : array-like of shape (n_samples,), default=None
Sample weights.
Returns
-------
average_precision : float
See Also
--------
roc_auc_score : Compute the area under the ROC curve.
precision_recall_curve : Compute precision-recall pairs for different
probability thresholds.
Notes
-----
.. versionchanged:: 0.19
Instead of linearly interpolating between operating points, precisions
are weighted by the change in recall since the last operating point.
References
----------
.. [1] `Wikipedia entry for the Average precision
<https://en.wikipedia.org/w/index.php?title=Information_retrieval&
oldid=793358396#Average_precision>`_
Examples
--------
>>> import numpy as np
>>> from sklearn.metrics import average_precision_score
>>> y_true = np.array([0, 0, 1, 1])
>>> y_scores = np.array([0.1, 0.4, 0.35, 0.8])
>>> average_precision_score(y_true, y_scores)
0.83...
"""
def _binary_uninterpolated_average_precision(
y_true, y_score, pos_label=1, sample_weight=None
):
precision, recall, _ = precision_recall_curve(
y_true, y_score, pos_label=pos_label, sample_weight=sample_weight
)
# Return the step function integral
# The following works because the last entry of precision is
# guaranteed to be 1, as returned by precision_recall_curve
return -np.sum(np.diff(recall) * np.array(precision)[:-1])
y_type = type_of_target(y_true, input_name="y_true")
if y_type == "multilabel-indicator" and pos_label != 1:
raise ValueError(
"Parameter pos_label is fixed to 1 for "
"multilabel-indicator y_true. Do not set "
"pos_label or set pos_label to 1."
)
elif y_type == "binary":
# Convert to Python primitive type to avoid NumPy type / Python str
# comparison. See https://github.com/numpy/numpy/issues/6784
present_labels = np.unique(y_true).tolist()
if len(present_labels) == 2 and pos_label not in present_labels:
raise ValueError(
f"pos_label={pos_label} is not a valid label. It should be "
f"one of {present_labels}"
)
average_precision = partial(
_binary_uninterpolated_average_precision, pos_label=pos_label
)
return _average_binary_score(
average_precision, y_true, y_score, average, sample_weight=sample_weight
)
def det_curve(y_true, y_score, pos_label=None, sample_weight=None):
"""Compute error rates for different probability thresholds.
.. note::
This metric is used for evaluation of ranking and error tradeoffs of
a binary classification task.
Read more in the :ref:`User Guide <det_curve>`.
.. versionadded:: 0.24
Parameters
----------
y_true : ndarray of shape (n_samples,)
True binary labels. If labels are not either {-1, 1} or {0, 1}, then
pos_label should be explicitly given.
y_score : ndarray of shape of (n_samples,)
Target scores, can either be probability estimates of the positive
class, confidence values, or non-thresholded measure of decisions
(as returned by "decision_function" on some classifiers).
pos_label : int or str, default=None
The label of the positive class.
When ``pos_label=None``, if `y_true` is in {-1, 1} or {0, 1},
``pos_label`` is set to 1, otherwise an error will be raised.
sample_weight : array-like of shape (n_samples,), default=None
Sample weights.
Returns
-------
fpr : ndarray of shape (n_thresholds,)
False positive rate (FPR) such that element i is the false positive
rate of predictions with score >= thresholds[i]. This is occasionally
referred to as false acceptance propability or fall-out.
fnr : ndarray of shape (n_thresholds,)
False negative rate (FNR) such that element i is the false negative
rate of predictions with score >= thresholds[i]. This is occasionally
referred to as false rejection or miss rate.
thresholds : ndarray of shape (n_thresholds,)
Decreasing score values.
See Also
--------
DetCurveDisplay.from_estimator : Plot DET curve given an estimator and
some data.
DetCurveDisplay.from_predictions : Plot DET curve given the true and
predicted labels.
DetCurveDisplay : DET curve visualization.
roc_curve : Compute Receiver operating characteristic (ROC) curve.
precision_recall_curve : Compute precision-recall curve.
Examples
--------
>>> import numpy as np
>>> from sklearn.metrics import det_curve
>>> y_true = np.array([0, 0, 1, 1])
>>> y_scores = np.array([0.1, 0.4, 0.35, 0.8])
>>> fpr, fnr, thresholds = det_curve(y_true, y_scores)
>>> fpr
array([0.5, 0.5, 0. ])
>>> fnr
array([0. , 0.5, 0.5])
>>> thresholds
array([0.35, 0.4 , 0.8 ])
"""
fps, tps, thresholds = _binary_clf_curve(
y_true, y_score, pos_label=pos_label, sample_weight=sample_weight
)
if len(np.unique(y_true)) != 2:
raise ValueError(
"Only one class present in y_true. Detection error "
"tradeoff curve is not defined in that case."
)
fns = tps[-1] - tps
p_count = tps[-1]
n_count = fps[-1]
# start with false positives zero
first_ind = (
fps.searchsorted(fps[0], side="right") - 1
if fps.searchsorted(fps[0], side="right") > 0
else None
)
# stop with false negatives zero
last_ind = tps.searchsorted(tps[-1]) + 1
sl = slice(first_ind, last_ind)
# reverse the output such that list of false positives is decreasing
return (fps[sl][::-1] / n_count, fns[sl][::-1] / p_count, thresholds[sl][::-1])
def _binary_roc_auc_score(y_true, y_score, sample_weight=None, max_fpr=None):
"""Binary roc auc score."""
if len(np.unique(y_true)) != 2:
raise ValueError(
"Only one class present in y_true. ROC AUC score "
"is not defined in that case."
)
fpr, tpr, _ = roc_curve(y_true, y_score, sample_weight=sample_weight)
if max_fpr is None or max_fpr == 1:
return auc(fpr, tpr)
if max_fpr <= 0 or max_fpr > 1:
raise ValueError("Expected max_fpr in range (0, 1], got: %r" % max_fpr)
# Add a single point at max_fpr by linear interpolation
stop = np.searchsorted(fpr, max_fpr, "right")
x_interp = [fpr[stop - 1], fpr[stop]]
y_interp = [tpr[stop - 1], tpr[stop]]
tpr = np.append(tpr[:stop], np.interp(max_fpr, x_interp, y_interp))
fpr = np.append(fpr[:stop], max_fpr)
partial_auc = auc(fpr, tpr)
# McClish correction: standardize result to be 0.5 if non-discriminant
# and 1 if maximal
min_area = 0.5 * max_fpr**2
max_area = max_fpr
return 0.5 * (1 + (partial_auc - min_area) / (max_area - min_area))
def roc_auc_score(
y_true,
y_score,
*,
average="macro",
sample_weight=None,
max_fpr=None,
multi_class="raise",
labels=None,
):
"""Compute Area Under the Receiver Operating Characteristic Curve (ROC AUC)
from prediction scores.
Note: this implementation can be used with binary, multiclass and
multilabel classification, but some restrictions apply (see Parameters).
Read more in the :ref:`User Guide <roc_metrics>`.
Parameters
----------
y_true : array-like of shape (n_samples,) or (n_samples, n_classes)
True labels or binary label indicators. The binary and multiclass cases
expect labels with shape (n_samples,) while the multilabel case expects
binary label indicators with shape (n_samples, n_classes).
y_score : array-like of shape (n_samples,) or (n_samples, n_classes)
Target scores.
* In the binary case, it corresponds to an array of shape
`(n_samples,)`. Both probability estimates and non-thresholded
decision values can be provided. The probability estimates correspond
to the **probability of the class with the greater label**,
i.e. `estimator.classes_[1]` and thus
`estimator.predict_proba(X, y)[:, 1]`. The decision values
corresponds to the output of `estimator.decision_function(X, y)`.
See more information in the :ref:`User guide <roc_auc_binary>`;
* In the multiclass case, it corresponds to an array of shape
`(n_samples, n_classes)` of probability estimates provided by the
`predict_proba` method. The probability estimates **must**
sum to 1 across the possible classes. In addition, the order of the
class scores must correspond to the order of ``labels``,
if provided, or else to the numerical or lexicographical order of
the labels in ``y_true``. See more information in the
:ref:`User guide <roc_auc_multiclass>`;
* In the multilabel case, it corresponds to an array of shape
`(n_samples, n_classes)`. Probability estimates are provided by the
`predict_proba` method and the non-thresholded decision values by
the `decision_function` method. The probability estimates correspond
to the **probability of the class with the greater label for each
output** of the classifier. See more information in the
:ref:`User guide <roc_auc_multilabel>`.
average : {'micro', 'macro', 'samples', 'weighted'} or None, \
default='macro'
If ``None``, the scores for each class are returned.
Otherwise, this determines the type of averaging performed on the data.
Note: multiclass ROC AUC currently only handles the 'macro' and
'weighted' averages. For multiclass targets, `average=None`
is only implemented for `multi_class='ovo'`.
``'micro'``:
Calculate metrics globally by considering each element of the label
indicator matrix as a label.
``'macro'``:
Calculate metrics for each label, and find their unweighted
mean. This does not take label imbalance into account.
``'weighted'``:
Calculate metrics for each label, and find their average, weighted
by support (the number of true instances for each label).
``'samples'``:
Calculate metrics for each instance, and find their average.
Will be ignored when ``y_true`` is binary.
sample_weight : array-like of shape (n_samples,), default=None
Sample weights.
max_fpr : float > 0 and <= 1, default=None
If not ``None``, the standardized partial AUC [2]_ over the range
[0, max_fpr] is returned. For the multiclass case, ``max_fpr``,
should be either equal to ``None`` or ``1.0`` as AUC ROC partial
computation currently is not supported for multiclass.
multi_class : {'raise', 'ovr', 'ovo'}, default='raise'
Only used for multiclass targets. Determines the type of configuration
to use. The default value raises an error, so either
``'ovr'`` or ``'ovo'`` must be passed explicitly.
``'ovr'``:
Stands for One-vs-rest. Computes the AUC of each class
against the rest [3]_ [4]_. This
treats the multiclass case in the same way as the multilabel case.
Sensitive to class imbalance even when ``average == 'macro'``,
because class imbalance affects the composition of each of the
'rest' groupings.
``'ovo'``:
Stands for One-vs-one. Computes the average AUC of all
possible pairwise combinations of classes [5]_.
Insensitive to class imbalance when
``average == 'macro'``.
labels : array-like of shape (n_classes,), default=None
Only used for multiclass targets. List of labels that index the
classes in ``y_score``. If ``None``, the numerical or lexicographical
order of the labels in ``y_true`` is used.
Returns
-------
auc : float
References
----------
.. [1] `Wikipedia entry for the Receiver operating characteristic
<https://en.wikipedia.org/wiki/Receiver_operating_characteristic>`_
.. [2] `Analyzing a portion of the ROC curve. McClish, 1989
<https://www.ncbi.nlm.nih.gov/pubmed/2668680>`_
.. [3] Provost, F., Domingos, P. (2000). Well-trained PETs: Improving
probability estimation trees (Section 6.2), CeDER Working Paper
#IS-00-04, Stern School of Business, New York University.
.. [4] `Fawcett, T. (2006). An introduction to ROC analysis. Pattern
Recognition Letters, 27(8), 861-874.
<https://www.sciencedirect.com/science/article/pii/S016786550500303X>`_
.. [5] `Hand, D.J., Till, R.J. (2001). A Simple Generalisation of the Area
Under the ROC Curve for Multiple Class Classification Problems.
Machine Learning, 45(2), 171-186.
<http://link.springer.com/article/10.1023/A:1010920819831>`_
See Also
--------
average_precision_score : Area under the precision-recall curve.
roc_curve : Compute Receiver operating characteristic (ROC) curve.
RocCurveDisplay.from_estimator : Plot Receiver Operating Characteristic
(ROC) curve given an estimator and some data.
RocCurveDisplay.from_predictions : Plot Receiver Operating Characteristic
(ROC) curve given the true and predicted values.
Examples
--------
Binary case:
>>> from sklearn.datasets import load_breast_cancer
>>> from sklearn.linear_model import LogisticRegression
>>> from sklearn.metrics import roc_auc_score
>>> X, y = load_breast_cancer(return_X_y=True)
>>> clf = LogisticRegression(solver="liblinear", random_state=0).fit(X, y)
>>> roc_auc_score(y, clf.predict_proba(X)[:, 1])
0.99...
>>> roc_auc_score(y, clf.decision_function(X))
0.99...
Multiclass case:
>>> from sklearn.datasets import load_iris
>>> X, y = load_iris(return_X_y=True)
>>> clf = LogisticRegression(solver="liblinear").fit(X, y)
>>> roc_auc_score(y, clf.predict_proba(X), multi_class='ovr')
0.99...
Multilabel case:
>>> import numpy as np
>>> from sklearn.datasets import make_multilabel_classification
>>> from sklearn.multioutput import MultiOutputClassifier
>>> X, y = make_multilabel_classification(random_state=0)
>>> clf = MultiOutputClassifier(clf).fit(X, y)
>>> # get a list of n_output containing probability arrays of shape
>>> # (n_samples, n_classes)
>>> y_pred = clf.predict_proba(X)
>>> # extract the positive columns for each output
>>> y_pred = np.transpose([pred[:, 1] for pred in y_pred])
>>> roc_auc_score(y, y_pred, average=None)
array([0.82..., 0.86..., 0.94..., 0.85... , 0.94...])
>>> from sklearn.linear_model import RidgeClassifierCV
>>> clf = RidgeClassifierCV().fit(X, y)
>>> roc_auc_score(y, clf.decision_function(X), average=None)
array([0.81..., 0.84... , 0.93..., 0.87..., 0.94...])
"""
y_type = type_of_target(y_true, input_name="y_true")
y_true = check_array(y_true, ensure_2d=False, dtype=None)
y_score = check_array(y_score, ensure_2d=False)
if y_type == "multiclass" or (
y_type == "binary" and y_score.ndim == 2 and y_score.shape[1] > 2
):
# do not support partial ROC computation for multiclass
if max_fpr is not None and max_fpr != 1.0:
raise ValueError(
"Partial AUC computation not available in "
"multiclass setting, 'max_fpr' must be"
" set to `None`, received `max_fpr={0}` "
"instead".format(max_fpr)
)
if multi_class == "raise":
raise ValueError("multi_class must be in ('ovo', 'ovr')")
return _multiclass_roc_auc_score(
y_true, y_score, labels, multi_class, average, sample_weight
)
elif y_type == "binary":
labels = np.unique(y_true)
y_true = label_binarize(y_true, classes=labels)[:, 0]
return _average_binary_score(
partial(_binary_roc_auc_score, max_fpr=max_fpr),
y_true,
y_score,
average,
sample_weight=sample_weight,
)
else: # multilabel-indicator
return _average_binary_score(
partial(_binary_roc_auc_score, max_fpr=max_fpr),
y_true,
y_score,
average,
sample_weight=sample_weight,
)
def _multiclass_roc_auc_score(
y_true, y_score, labels, multi_class, average, sample_weight
):
"""Multiclass roc auc score.
Parameters
----------
y_true : array-like of shape (n_samples,)
True multiclass labels.
y_score : array-like of shape (n_samples, n_classes)
Target scores corresponding to probability estimates of a sample
belonging to a particular class
labels : array-like of shape (n_classes,) or None
List of labels to index ``y_score`` used for multiclass. If ``None``,
the lexical order of ``y_true`` is used to index ``y_score``.
multi_class : {'ovr', 'ovo'}
Determines the type of multiclass configuration to use.
``'ovr'``:
Calculate metrics for the multiclass case using the one-vs-rest
approach.
``'ovo'``:
Calculate metrics for the multiclass case using the one-vs-one
approach.
average : {'macro', 'weighted'}
Determines the type of averaging performed on the pairwise binary
metric scores
``'macro'``:
Calculate metrics for each label, and find their unweighted
mean. This does not take label imbalance into account. Classes
are assumed to be uniformly distributed.
``'weighted'``:
Calculate metrics for each label, taking into account the
prevalence of the classes.
sample_weight : array-like of shape (n_samples,) or None
Sample weights.
"""
# validation of the input y_score
if not np.allclose(1, y_score.sum(axis=1)):
raise ValueError(
"Target scores need to be probabilities for multiclass "
"roc_auc, i.e. they should sum up to 1.0 over classes"
)
# validation for multiclass parameter specifications
average_options = ("macro", "weighted", None)
if average not in average_options:
raise ValueError(
"average must be one of {0} for multiclass problems".format(average_options)
)
multiclass_options = ("ovo", "ovr")
if multi_class not in multiclass_options:
raise ValueError(
"multi_class='{0}' is not supported "
"for multiclass ROC AUC, multi_class must be "
"in {1}".format(multi_class, multiclass_options)
)
if average is None and multi_class == "ovo":
raise NotImplementedError(
"average=None is not implemented for multi_class='ovo'."
)
if labels is not None:
labels = column_or_1d(labels)
classes = _unique(labels)
if len(classes) != len(labels):
raise ValueError("Parameter 'labels' must be unique")
if not np.array_equal(classes, labels):
raise ValueError("Parameter 'labels' must be ordered")
if len(classes) != y_score.shape[1]:
raise ValueError(
"Number of given labels, {0}, not equal to the number "
"of columns in 'y_score', {1}".format(len(classes), y_score.shape[1])
)
if len(np.setdiff1d(y_true, classes)):
raise ValueError("'y_true' contains labels not in parameter 'labels'")
else:
classes = _unique(y_true)
if len(classes) != y_score.shape[1]:
raise ValueError(
"Number of classes in y_true not equal to the number of "
"columns in 'y_score'"
)
if multi_class == "ovo":
if sample_weight is not None:
raise ValueError(
"sample_weight is not supported "
"for multiclass one-vs-one ROC AUC, "
"'sample_weight' must be None in this case."
)
y_true_encoded = _encode(y_true, uniques=classes)
# Hand & Till (2001) implementation (ovo)
return _average_multiclass_ovo_score(
_binary_roc_auc_score, y_true_encoded, y_score, average=average
)
else:
# ovr is same as multi-label
y_true_multilabel = label_binarize(y_true, classes=classes)
return _average_binary_score(
_binary_roc_auc_score,
y_true_multilabel,
y_score,
average,
sample_weight=sample_weight,
)
def _binary_clf_curve(y_true, y_score, pos_label=None, sample_weight=None):
"""Calculate true and false positives per binary classification threshold.
Parameters
----------
y_true : ndarray of shape (n_samples,)
True targets of binary classification.
y_score : ndarray of shape (n_samples,)
Estimated probabilities or output of a decision function.
pos_label : int or str, default=None
The label of the positive class.
sample_weight : array-like of shape (n_samples,), default=None
Sample weights.
Returns
-------
fps : ndarray of shape (n_thresholds,)
A count of false positives, at index i being the number of negative
samples assigned a score >= thresholds[i]. The total number of
negative samples is equal to fps[-1] (thus true negatives are given by
fps[-1] - fps).
tps : ndarray of shape (n_thresholds,)
An increasing count of true positives, at index i being the number
of positive samples assigned a score >= thresholds[i]. The total
number of positive samples is equal to tps[-1] (thus false negatives
are given by tps[-1] - tps).
thresholds : ndarray of shape (n_thresholds,)
Decreasing score values.
"""
# Check to make sure y_true is valid
y_type = type_of_target(y_true, input_name="y_true")
if not (y_type == "binary" or (y_type == "multiclass" and pos_label is not None)):
raise ValueError("{0} format is not supported".format(y_type))
check_consistent_length(y_true, y_score, sample_weight)
y_true = column_or_1d(y_true)
y_score = column_or_1d(y_score)
assert_all_finite(y_true)
assert_all_finite(y_score)
# Filter out zero-weighted samples, as they should not impact the result
if sample_weight is not None:
sample_weight = column_or_1d(sample_weight)
sample_weight = _check_sample_weight(sample_weight, y_true)
nonzero_weight_mask = sample_weight != 0
y_true = y_true[nonzero_weight_mask]
y_score = y_score[nonzero_weight_mask]
sample_weight = sample_weight[nonzero_weight_mask]
pos_label = _check_pos_label_consistency(pos_label, y_true)
# make y_true a boolean vector
y_true = y_true == pos_label
# sort scores and corresponding truth values
desc_score_indices = np.argsort(y_score, kind="mergesort")[::-1]
y_score = y_score[desc_score_indices]
y_true = y_true[desc_score_indices]
if sample_weight is not None:
weight = sample_weight[desc_score_indices]
else:
weight = 1.0
# y_score typically has many tied values. Here we extract
# the indices associated with the distinct values. We also
# concatenate a value for the end of the curve.
distinct_value_indices = np.where(np.diff(y_score))[0]
threshold_idxs = np.r_[distinct_value_indices, y_true.size - 1]
# accumulate the true positives with decreasing threshold
tps = stable_cumsum(y_true * weight)[threshold_idxs]
if sample_weight is not None:
# express fps as a cumsum to ensure fps is increasing even in
# the presence of floating point errors
fps = stable_cumsum((1 - y_true) * weight)[threshold_idxs]
else:
fps = 1 + threshold_idxs - tps
return fps, tps, y_score[threshold_idxs]
def precision_recall_curve(y_true, probas_pred, *, pos_label=None, sample_weight=None):
"""Compute precision-recall pairs for different probability thresholds.
Note: this implementation is restricted to the binary classification task.
The precision is the ratio ``tp / (tp + fp)`` where ``tp`` is the number of
true positives and ``fp`` the number of false positives. The precision is
intuitively the ability of the classifier not to label as positive a sample
that is negative.
The recall is the ratio ``tp / (tp + fn)`` where ``tp`` is the number of
true positives and ``fn`` the number of false negatives. The recall is
intuitively the ability of the classifier to find all the positive samples.
The last precision and recall values are 1. and 0. respectively and do not
have a corresponding threshold. This ensures that the graph starts on the
y axis.
Read more in the :ref:`User Guide <precision_recall_f_measure_metrics>`.
Parameters
----------
y_true : ndarray of shape (n_samples,)
True binary labels. If labels are not either {-1, 1} or {0, 1}, then
pos_label should be explicitly given.
probas_pred : ndarray of shape (n_samples,)
Target scores, can either be probability estimates of the positive
class, or non-thresholded measure of decisions (as returned by
`decision_function` on some classifiers).
pos_label : int or str, default=None
The label of the positive class.
When ``pos_label=None``, if y_true is in {-1, 1} or {0, 1},
``pos_label`` is set to 1, otherwise an error will be raised.
sample_weight : array-like of shape (n_samples,), default=None
Sample weights.
Returns
-------
precision : ndarray of shape (n_thresholds + 1,)
Precision values such that element i is the precision of
predictions with score >= thresholds[i] and the last element is 1.
recall : ndarray of shape (n_thresholds + 1,)
Decreasing recall values such that element i is the recall of
predictions with score >= thresholds[i] and the last element is 0.
thresholds : ndarray of shape (n_thresholds,)
Increasing thresholds on the decision function used to compute
precision and recall. n_thresholds <= len(np.unique(probas_pred)).
See Also
--------
PrecisionRecallDisplay.from_estimator : Plot Precision Recall Curve given
a binary classifier.
PrecisionRecallDisplay.from_predictions : Plot Precision Recall Curve
using predictions from a binary classifier.
average_precision_score : Compute average precision from prediction scores.
det_curve: Compute error rates for different probability thresholds.
roc_curve : Compute Receiver operating characteristic (ROC) curve.
Examples
--------
>>> import numpy as np
>>> from sklearn.metrics import precision_recall_curve
>>> y_true = np.array([0, 0, 1, 1])
>>> y_scores = np.array([0.1, 0.4, 0.35, 0.8])
>>> precision, recall, thresholds = precision_recall_curve(
... y_true, y_scores)
>>> precision
array([0.66666667, 0.5 , 1. , 1. ])
>>> recall
array([1. , 0.5, 0.5, 0. ])
>>> thresholds
array([0.35, 0.4 , 0.8 ])
"""
fps, tps, thresholds = _binary_clf_curve(
y_true, probas_pred, pos_label=pos_label, sample_weight=sample_weight
)
precision = tps / (tps + fps)
precision[np.isnan(precision)] = 0
recall = tps / tps[-1]
# stop when full recall attained
# and reverse the outputs so recall is decreasing
last_ind = tps.searchsorted(tps[-1])
sl = slice(last_ind, None, -1)
return np.r_[precision[sl], 1], np.r_[recall[sl], 0], thresholds[sl]
def roc_curve(
y_true, y_score, *, pos_label=None, sample_weight=None, drop_intermediate=True
):
"""Compute Receiver operating characteristic (ROC).
Note: this implementation is restricted to the binary classification task.
Read more in the :ref:`User Guide <roc_metrics>`.
Parameters
----------
y_true : ndarray of shape (n_samples,)
True binary labels. If labels are not either {-1, 1} or {0, 1}, then
pos_label should be explicitly given.
y_score : ndarray of shape (n_samples,)
Target scores, can either be probability estimates of the positive
class, confidence values, or non-thresholded measure of decisions
(as returned by "decision_function" on some classifiers).
pos_label : int or str, default=None
The label of the positive class.
When ``pos_label=None``, if `y_true` is in {-1, 1} or {0, 1},
``pos_label`` is set to 1, otherwise an error will be raised.
sample_weight : array-like of shape (n_samples,), default=None
Sample weights.
drop_intermediate : bool, default=True
Whether to drop some suboptimal thresholds which would not appear
on a plotted ROC curve. This is useful in order to create lighter
ROC curves.
.. versionadded:: 0.17
parameter *drop_intermediate*.
Returns
-------
fpr : ndarray of shape (>2,)
Increasing false positive rates such that element i is the false
positive rate of predictions with score >= `thresholds[i]`.
tpr : ndarray of shape (>2,)
Increasing true positive rates such that element `i` is the true
positive rate of predictions with score >= `thresholds[i]`.
thresholds : ndarray of shape = (n_thresholds,)
Decreasing thresholds on the decision function used to compute
fpr and tpr. `thresholds[0]` represents no instances being predicted
and is arbitrarily set to `max(y_score) + 1`.
See Also
--------
RocCurveDisplay.from_estimator : Plot Receiver Operating Characteristic
(ROC) curve given an estimator and some data.
RocCurveDisplay.from_predictions : Plot Receiver Operating Characteristic
(ROC) curve given the true and predicted values.
det_curve: Compute error rates for different probability thresholds.
roc_auc_score : Compute the area under the ROC curve.
Notes
-----
Since the thresholds are sorted from low to high values, they
are reversed upon returning them to ensure they correspond to both ``fpr``
and ``tpr``, which are sorted in reversed order during their calculation.
References
----------
.. [1] `Wikipedia entry for the Receiver operating characteristic
<https://en.wikipedia.org/wiki/Receiver_operating_characteristic>`_
.. [2] Fawcett T. An introduction to ROC analysis[J]. Pattern Recognition
Letters, 2006, 27(8):861-874.
Examples
--------
>>> import numpy as np
>>> from sklearn import metrics
>>> y = np.array([1, 1, 2, 2])
>>> scores = np.array([0.1, 0.4, 0.35, 0.8])
>>> fpr, tpr, thresholds = metrics.roc_curve(y, scores, pos_label=2)
>>> fpr
array([0. , 0. , 0.5, 0.5, 1. ])
>>> tpr
array([0. , 0.5, 0.5, 1. , 1. ])
>>> thresholds
array([1.8 , 0.8 , 0.4 , 0.35, 0.1 ])
"""
fps, tps, thresholds = _binary_clf_curve(
y_true, y_score, pos_label=pos_label, sample_weight=sample_weight
)
# Attempt to drop thresholds corresponding to points in between and
# collinear with other points. These are always suboptimal and do not
# appear on a plotted ROC curve (and thus do not affect the AUC).
# Here np.diff(_, 2) is used as a "second derivative" to tell if there
# is a corner at the point. Both fps and tps must be tested to handle
# thresholds with multiple data points (which are combined in
# _binary_clf_curve). This keeps all cases where the point should be kept,
# but does not drop more complicated cases like fps = [1, 3, 7],
# tps = [1, 2, 4]; there is no harm in keeping too many thresholds.
if drop_intermediate and len(fps) > 2:
optimal_idxs = np.where(
np.r_[True, np.logical_or(np.diff(fps, 2), np.diff(tps, 2)), True]
)[0]
fps = fps[optimal_idxs]
tps = tps[optimal_idxs]
thresholds = thresholds[optimal_idxs]
# Add an extra threshold position
# to make sure that the curve starts at (0, 0)
tps = np.r_[0, tps]
fps = np.r_[0, fps]
thresholds = np.r_[thresholds[0] + 1, thresholds]
if fps[-1] <= 0:
warnings.warn(
"No negative samples in y_true, false positive value should be meaningless",
UndefinedMetricWarning,
)
fpr = np.repeat(np.nan, fps.shape)
else:
fpr = fps / fps[-1]
if tps[-1] <= 0:
warnings.warn(
"No positive samples in y_true, true positive value should be meaningless",
UndefinedMetricWarning,
)
tpr = np.repeat(np.nan, tps.shape)
else:
tpr = tps / tps[-1]
return fpr, tpr, thresholds
def label_ranking_average_precision_score(y_true, y_score, *, sample_weight=None):
"""Compute ranking-based average precision.
Label ranking average precision (LRAP) is the average over each ground
truth label assigned to each sample, of the ratio of true vs. total
labels with lower score.
This metric is used in multilabel ranking problem, where the goal
is to give better rank to the labels associated to each sample.
The obtained score is always strictly greater than 0 and
the best value is 1.
Read more in the :ref:`User Guide <label_ranking_average_precision>`.
Parameters
----------
y_true : {ndarray, sparse matrix} of shape (n_samples, n_labels)
True binary labels in binary indicator format.
y_score : ndarray of shape (n_samples, n_labels)
Target scores, can either be probability estimates of the positive
class, confidence values, or non-thresholded measure of decisions
(as returned by "decision_function" on some classifiers).
sample_weight : array-like of shape (n_samples,), default=None
Sample weights.
.. versionadded:: 0.20
Returns
-------
score : float
Examples
--------
>>> import numpy as np
>>> from sklearn.metrics import label_ranking_average_precision_score
>>> y_true = np.array([[1, 0, 0], [0, 0, 1]])
>>> y_score = np.array([[0.75, 0.5, 1], [1, 0.2, 0.1]])
>>> label_ranking_average_precision_score(y_true, y_score)
0.416...
"""
check_consistent_length(y_true, y_score, sample_weight)
y_true = check_array(y_true, ensure_2d=False)
y_score = check_array(y_score, ensure_2d=False)
if y_true.shape != y_score.shape:
raise ValueError("y_true and y_score have different shape")
# Handle badly formatted array and the degenerate case with one label
y_type = type_of_target(y_true, input_name="y_true")
if y_type != "multilabel-indicator" and not (
y_type == "binary" and y_true.ndim == 2
):
raise ValueError("{0} format is not supported".format(y_type))
y_true = csr_matrix(y_true)
y_score = -y_score
n_samples, n_labels = y_true.shape
out = 0.0
for i, (start, stop) in enumerate(zip(y_true.indptr, y_true.indptr[1:])):
relevant = y_true.indices[start:stop]
if relevant.size == 0 or relevant.size == n_labels:
# If all labels are relevant or unrelevant, the score is also
# equal to 1. The label ranking has no meaning.
aux = 1.0
else:
scores_i = y_score[i]
rank = rankdata(scores_i, "max")[relevant]
L = rankdata(scores_i[relevant], "max")
aux = (L / rank).mean()
if sample_weight is not None:
aux = aux * sample_weight[i]
out += aux
if sample_weight is None:
out /= n_samples
else:
out /= np.sum(sample_weight)
return out
def coverage_error(y_true, y_score, *, sample_weight=None):
"""Coverage error measure.
Compute how far we need to go through the ranked scores to cover all
true labels. The best value is equal to the average number
of labels in ``y_true`` per sample.
Ties in ``y_scores`` are broken by giving maximal rank that would have
been assigned to all tied values.
Note: Our implementation's score is 1 greater than the one given in
Tsoumakas et al., 2010. This extends it to handle the degenerate case
in which an instance has 0 true labels.
Read more in the :ref:`User Guide <coverage_error>`.
Parameters
----------
y_true : ndarray of shape (n_samples, n_labels)
True binary labels in binary indicator format.
y_score : ndarray of shape (n_samples, n_labels)
Target scores, can either be probability estimates of the positive
class, confidence values, or non-thresholded measure of decisions
(as returned by "decision_function" on some classifiers).
sample_weight : array-like of shape (n_samples,), default=None
Sample weights.
Returns
-------
coverage_error : float
References
----------
.. [1] Tsoumakas, G., Katakis, I., & Vlahavas, I. (2010).
Mining multi-label data. In Data mining and knowledge discovery
handbook (pp. 667-685). Springer US.
"""
y_true = check_array(y_true, ensure_2d=False)
y_score = check_array(y_score, ensure_2d=False)
check_consistent_length(y_true, y_score, sample_weight)
y_type = type_of_target(y_true, input_name="y_true")
if y_type != "multilabel-indicator":
raise ValueError("{0} format is not supported".format(y_type))
if y_true.shape != y_score.shape:
raise ValueError("y_true and y_score have different shape")
y_score_mask = np.ma.masked_array(y_score, mask=np.logical_not(y_true))
y_min_relevant = y_score_mask.min(axis=1).reshape((-1, 1))
coverage = (y_score >= y_min_relevant).sum(axis=1)
coverage = coverage.filled(0)
return np.average(coverage, weights=sample_weight)
def label_ranking_loss(y_true, y_score, *, sample_weight=None):
"""Compute Ranking loss measure.
Compute the average number of label pairs that are incorrectly ordered
given y_score weighted by the size of the label set and the number of
labels not in the label set.
This is similar to the error set size, but weighted by the number of
relevant and irrelevant labels. The best performance is achieved with
a ranking loss of zero.
Read more in the :ref:`User Guide <label_ranking_loss>`.
.. versionadded:: 0.17
A function *label_ranking_loss*
Parameters
----------
y_true : {ndarray, sparse matrix} of shape (n_samples, n_labels)
True binary labels in binary indicator format.
y_score : ndarray of shape (n_samples, n_labels)
Target scores, can either be probability estimates of the positive
class, confidence values, or non-thresholded measure of decisions
(as returned by "decision_function" on some classifiers).
sample_weight : array-like of shape (n_samples,), default=None
Sample weights.
Returns
-------
loss : float
References
----------
.. [1] Tsoumakas, G., Katakis, I., & Vlahavas, I. (2010).
Mining multi-label data. In Data mining and knowledge discovery
handbook (pp. 667-685). Springer US.
"""
y_true = check_array(y_true, ensure_2d=False, accept_sparse="csr")
y_score = check_array(y_score, ensure_2d=False)
check_consistent_length(y_true, y_score, sample_weight)
y_type = type_of_target(y_true, input_name="y_true")
if y_type not in ("multilabel-indicator",):
raise ValueError("{0} format is not supported".format(y_type))
if y_true.shape != y_score.shape:
raise ValueError("y_true and y_score have different shape")
n_samples, n_labels = y_true.shape
y_true = csr_matrix(y_true)
loss = np.zeros(n_samples)
for i, (start, stop) in enumerate(zip(y_true.indptr, y_true.indptr[1:])):
# Sort and bin the label scores
unique_scores, unique_inverse = np.unique(y_score[i], return_inverse=True)
true_at_reversed_rank = np.bincount(
unique_inverse[y_true.indices[start:stop]], minlength=len(unique_scores)
)
all_at_reversed_rank = np.bincount(unique_inverse, minlength=len(unique_scores))
false_at_reversed_rank = all_at_reversed_rank - true_at_reversed_rank
# if the scores are ordered, it's possible to count the number of
# incorrectly ordered paires in linear time by cumulatively counting
# how many false labels of a given score have a score higher than the
# accumulated true labels with lower score.
loss[i] = np.dot(true_at_reversed_rank.cumsum(), false_at_reversed_rank)
n_positives = count_nonzero(y_true, axis=1)
with np.errstate(divide="ignore", invalid="ignore"):
loss /= (n_labels - n_positives) * n_positives
# When there is no positive or no negative labels, those values should
# be consider as correct, i.e. the ranking doesn't matter.
loss[np.logical_or(n_positives == 0, n_positives == n_labels)] = 0.0
return np.average(loss, weights=sample_weight)
def _dcg_sample_scores(y_true, y_score, k=None, log_base=2, ignore_ties=False):
"""Compute Discounted Cumulative Gain.
Sum the true scores ranked in the order induced by the predicted scores,
after applying a logarithmic discount.
This ranking metric yields a high value if true labels are ranked high by
``y_score``.
Parameters
----------
y_true : ndarray of shape (n_samples, n_labels)
True targets of multilabel classification, or true scores of entities
to be ranked.
y_score : ndarray of shape (n_samples, n_labels)
Target scores, can either be probability estimates, confidence values,
or non-thresholded measure of decisions (as returned by
"decision_function" on some classifiers).
k : int, default=None
Only consider the highest k scores in the ranking. If `None`, use all
outputs.
log_base : float, default=2
Base of the logarithm used for the discount. A low value means a
sharper discount (top results are more important).
ignore_ties : bool, default=False
Assume that there are no ties in y_score (which is likely to be the
case if y_score is continuous) for efficiency gains.
Returns
-------
discounted_cumulative_gain : ndarray of shape (n_samples,)
The DCG score for each sample.
See Also
--------
ndcg_score : The Discounted Cumulative Gain divided by the Ideal Discounted
Cumulative Gain (the DCG obtained for a perfect ranking), in order to
have a score between 0 and 1.
"""
discount = 1 / (np.log(np.arange(y_true.shape[1]) + 2) / np.log(log_base))
if k is not None:
discount[k:] = 0
if ignore_ties:
ranking = np.argsort(y_score)[:, ::-1]
ranked = y_true[np.arange(ranking.shape[0])[:, np.newaxis], ranking]
cumulative_gains = discount.dot(ranked.T)
else:
discount_cumsum = np.cumsum(discount)
cumulative_gains = [
_tie_averaged_dcg(y_t, y_s, discount_cumsum)
for y_t, y_s in zip(y_true, y_score)
]
cumulative_gains = np.asarray(cumulative_gains)
return cumulative_gains
def _tie_averaged_dcg(y_true, y_score, discount_cumsum):
"""
Compute DCG by averaging over possible permutations of ties.
The gain (`y_true`) of an index falling inside a tied group (in the order
induced by `y_score`) is replaced by the average gain within this group.
The discounted gain for a tied group is then the average `y_true` within
this group times the sum of discounts of the corresponding ranks.
This amounts to averaging scores for all possible orderings of the tied
groups.
(note in the case of dcg@k the discount is 0 after index k)
Parameters
----------
y_true : ndarray
The true relevance scores.
y_score : ndarray
Predicted scores.
discount_cumsum : ndarray
Precomputed cumulative sum of the discounts.
Returns
-------
discounted_cumulative_gain : float
The discounted cumulative gain.
References
----------
McSherry, F., & Najork, M. (2008, March). Computing information retrieval
performance measures efficiently in the presence of tied scores. In
European conference on information retrieval (pp. 414-421). Springer,
Berlin, Heidelberg.
"""
_, inv, counts = np.unique(-y_score, return_inverse=True, return_counts=True)
ranked = np.zeros(len(counts))
np.add.at(ranked, inv, y_true)
ranked /= counts
groups = np.cumsum(counts) - 1
discount_sums = np.empty(len(counts))
discount_sums[0] = discount_cumsum[groups[0]]
discount_sums[1:] = np.diff(discount_cumsum[groups])
return (ranked * discount_sums).sum()
def _check_dcg_target_type(y_true):
y_type = type_of_target(y_true, input_name="y_true")
supported_fmt = (
"multilabel-indicator",
"continuous-multioutput",
"multiclass-multioutput",
)
if y_type not in supported_fmt:
raise ValueError(
"Only {} formats are supported. Got {} instead".format(
supported_fmt, y_type
)
)
def dcg_score(
y_true, y_score, *, k=None, log_base=2, sample_weight=None, ignore_ties=False
):
"""Compute Discounted Cumulative Gain.
Sum the true scores ranked in the order induced by the predicted scores,
after applying a logarithmic discount.
This ranking metric yields a high value if true labels are ranked high by
``y_score``.
Usually the Normalized Discounted Cumulative Gain (NDCG, computed by
ndcg_score) is preferred.
Parameters
----------
y_true : ndarray of shape (n_samples, n_labels)
True targets of multilabel classification, or true scores of entities
to be ranked.
y_score : ndarray of shape (n_samples, n_labels)
Target scores, can either be probability estimates, confidence values,
or non-thresholded measure of decisions (as returned by
"decision_function" on some classifiers).
k : int, default=None
Only consider the highest k scores in the ranking. If None, use all
outputs.
log_base : float, default=2
Base of the logarithm used for the discount. A low value means a
sharper discount (top results are more important).
sample_weight : ndarray of shape (n_samples,), default=None
Sample weights. If `None`, all samples are given the same weight.
ignore_ties : bool, default=False
Assume that there are no ties in y_score (which is likely to be the
case if y_score is continuous) for efficiency gains.
Returns
-------
discounted_cumulative_gain : float
The averaged sample DCG scores.
See Also
--------
ndcg_score : The Discounted Cumulative Gain divided by the Ideal Discounted
Cumulative Gain (the DCG obtained for a perfect ranking), in order to
have a score between 0 and 1.
References
----------
`Wikipedia entry for Discounted Cumulative Gain
<https://en.wikipedia.org/wiki/Discounted_cumulative_gain>`_.
Jarvelin, K., & Kekalainen, J. (2002).
Cumulated gain-based evaluation of IR techniques. ACM Transactions on
Information Systems (TOIS), 20(4), 422-446.
Wang, Y., Wang, L., Li, Y., He, D., Chen, W., & Liu, T. Y. (2013, May).
A theoretical analysis of NDCG ranking measures. In Proceedings of the 26th
Annual Conference on Learning Theory (COLT 2013).
McSherry, F., & Najork, M. (2008, March). Computing information retrieval
performance measures efficiently in the presence of tied scores. In
European conference on information retrieval (pp. 414-421). Springer,
Berlin, Heidelberg.
Examples
--------
>>> import numpy as np
>>> from sklearn.metrics import dcg_score
>>> # we have groud-truth relevance of some answers to a query:
>>> true_relevance = np.asarray([[10, 0, 0, 1, 5]])
>>> # we predict scores for the answers
>>> scores = np.asarray([[.1, .2, .3, 4, 70]])
>>> dcg_score(true_relevance, scores)
9.49...
>>> # we can set k to truncate the sum; only top k answers contribute
>>> dcg_score(true_relevance, scores, k=2)
5.63...
>>> # now we have some ties in our prediction
>>> scores = np.asarray([[1, 0, 0, 0, 1]])
>>> # by default ties are averaged, so here we get the average true
>>> # relevance of our top predictions: (10 + 5) / 2 = 7.5
>>> dcg_score(true_relevance, scores, k=1)
7.5
>>> # we can choose to ignore ties for faster results, but only
>>> # if we know there aren't ties in our scores, otherwise we get
>>> # wrong results:
>>> dcg_score(true_relevance,
... scores, k=1, ignore_ties=True)
5.0
"""
y_true = check_array(y_true, ensure_2d=False)
y_score = check_array(y_score, ensure_2d=False)
check_consistent_length(y_true, y_score, sample_weight)
_check_dcg_target_type(y_true)
return np.average(
_dcg_sample_scores(
y_true, y_score, k=k, log_base=log_base, ignore_ties=ignore_ties
),
weights=sample_weight,
)
def _ndcg_sample_scores(y_true, y_score, k=None, ignore_ties=False):
"""Compute Normalized Discounted Cumulative Gain.
Sum the true scores ranked in the order induced by the predicted scores,
after applying a logarithmic discount. Then divide by the best possible
score (Ideal DCG, obtained for a perfect ranking) to obtain a score between
0 and 1.
This ranking metric yields a high value if true labels are ranked high by
``y_score``.
Parameters
----------
y_true : ndarray of shape (n_samples, n_labels)
True targets of multilabel classification, or true scores of entities
to be ranked.
y_score : ndarray of shape (n_samples, n_labels)
Target scores, can either be probability estimates, confidence values,
or non-thresholded measure of decisions (as returned by
"decision_function" on some classifiers).
k : int, default=None
Only consider the highest k scores in the ranking. If None, use all
outputs.
ignore_ties : bool, default=False
Assume that there are no ties in y_score (which is likely to be the
case if y_score is continuous) for efficiency gains.
Returns
-------
normalized_discounted_cumulative_gain : ndarray of shape (n_samples,)
The NDCG score for each sample (float in [0., 1.]).
See Also
--------
dcg_score : Discounted Cumulative Gain (not normalized).
"""
gain = _dcg_sample_scores(y_true, y_score, k, ignore_ties=ignore_ties)
# Here we use the order induced by y_true so we can ignore ties since
# the gain associated to tied indices is the same (permuting ties doesn't
# change the value of the re-ordered y_true)
normalizing_gain = _dcg_sample_scores(y_true, y_true, k, ignore_ties=True)
all_irrelevant = normalizing_gain == 0
gain[all_irrelevant] = 0
gain[~all_irrelevant] /= normalizing_gain[~all_irrelevant]
return gain
def ndcg_score(y_true, y_score, *, k=None, sample_weight=None, ignore_ties=False):
"""Compute Normalized Discounted Cumulative Gain.
Sum the true scores ranked in the order induced by the predicted scores,
after applying a logarithmic discount. Then divide by the best possible
score (Ideal DCG, obtained for a perfect ranking) to obtain a score between
0 and 1.
This ranking metric returns a high value if true labels are ranked high by
``y_score``.
Parameters
----------
y_true : ndarray of shape (n_samples, n_labels)
True targets of multilabel classification, or true scores of entities
to be ranked.
y_score : ndarray of shape (n_samples, n_labels)
Target scores, can either be probability estimates, confidence values,
or non-thresholded measure of decisions (as returned by
"decision_function" on some classifiers).
k : int, default=None
Only consider the highest k scores in the ranking. If `None`, use all
outputs.
sample_weight : ndarray of shape (n_samples,), default=None
Sample weights. If `None`, all samples are given the same weight.
ignore_ties : bool, default=False
Assume that there are no ties in y_score (which is likely to be the
case if y_score is continuous) for efficiency gains.
Returns
-------
normalized_discounted_cumulative_gain : float in [0., 1.]
The averaged NDCG scores for all samples.
See Also
--------
dcg_score : Discounted Cumulative Gain (not normalized).
References
----------
`Wikipedia entry for Discounted Cumulative Gain
<https://en.wikipedia.org/wiki/Discounted_cumulative_gain>`_
Jarvelin, K., & Kekalainen, J. (2002).
Cumulated gain-based evaluation of IR techniques. ACM Transactions on
Information Systems (TOIS), 20(4), 422-446.
Wang, Y., Wang, L., Li, Y., He, D., Chen, W., & Liu, T. Y. (2013, May).
A theoretical analysis of NDCG ranking measures. In Proceedings of the 26th
Annual Conference on Learning Theory (COLT 2013)
McSherry, F., & Najork, M. (2008, March). Computing information retrieval
performance measures efficiently in the presence of tied scores. In
European conference on information retrieval (pp. 414-421). Springer,
Berlin, Heidelberg.
Examples
--------
>>> import numpy as np
>>> from sklearn.metrics import ndcg_score
>>> # we have groud-truth relevance of some answers to a query:
>>> true_relevance = np.asarray([[10, 0, 0, 1, 5]])
>>> # we predict some scores (relevance) for the answers
>>> scores = np.asarray([[.1, .2, .3, 4, 70]])
>>> ndcg_score(true_relevance, scores)
0.69...
>>> scores = np.asarray([[.05, 1.1, 1., .5, .0]])
>>> ndcg_score(true_relevance, scores)
0.49...
>>> # we can set k to truncate the sum; only top k answers contribute.
>>> ndcg_score(true_relevance, scores, k=4)
0.35...
>>> # the normalization takes k into account so a perfect answer
>>> # would still get 1.0
>>> ndcg_score(true_relevance, true_relevance, k=4)
1.0
>>> # now we have some ties in our prediction
>>> scores = np.asarray([[1, 0, 0, 0, 1]])
>>> # by default ties are averaged, so here we get the average (normalized)
>>> # true relevance of our top predictions: (10 / 10 + 5 / 10) / 2 = .75
>>> ndcg_score(true_relevance, scores, k=1)
0.75
>>> # we can choose to ignore ties for faster results, but only
>>> # if we know there aren't ties in our scores, otherwise we get
>>> # wrong results:
>>> ndcg_score(true_relevance,
... scores, k=1, ignore_ties=True)
0.5
"""
y_true = check_array(y_true, ensure_2d=False)
y_score = check_array(y_score, ensure_2d=False)
check_consistent_length(y_true, y_score, sample_weight)
_check_dcg_target_type(y_true)
gain = _ndcg_sample_scores(y_true, y_score, k=k, ignore_ties=ignore_ties)
return np.average(gain, weights=sample_weight)
def top_k_accuracy_score(
y_true, y_score, *, k=2, normalize=True, sample_weight=None, labels=None
):
"""Top-k Accuracy classification score.
This metric computes the number of times where the correct label is among
the top `k` labels predicted (ranked by predicted scores). Note that the
multilabel case isn't covered here.
Read more in the :ref:`User Guide <top_k_accuracy_score>`
Parameters
----------
y_true : array-like of shape (n_samples,)
True labels.
y_score : array-like of shape (n_samples,) or (n_samples, n_classes)
Target scores. These can be either probability estimates or
non-thresholded decision values (as returned by
:term:`decision_function` on some classifiers).
The binary case expects scores with shape (n_samples,) while the
multiclass case expects scores with shape (n_samples, n_classes).
In the multiclass case, the order of the class scores must
correspond to the order of ``labels``, if provided, or else to
the numerical or lexicographical order of the labels in ``y_true``.
If ``y_true`` does not contain all the labels, ``labels`` must be
provided.
k : int, default=2
Number of most likely outcomes considered to find the correct label.
normalize : bool, default=True
If `True`, return the fraction of correctly classified samples.
Otherwise, return the number of correctly classified samples.
sample_weight : array-like of shape (n_samples,), default=None
Sample weights. If `None`, all samples are given the same weight.
labels : array-like of shape (n_classes,), default=None
Multiclass only. List of labels that index the classes in ``y_score``.
If ``None``, the numerical or lexicographical order of the labels in
``y_true`` is used. If ``y_true`` does not contain all the labels,
``labels`` must be provided.
Returns
-------
score : float
The top-k accuracy score. The best performance is 1 with
`normalize == True` and the number of samples with
`normalize == False`.
See also
--------
accuracy_score
Notes
-----
In cases where two or more labels are assigned equal predicted scores,
the labels with the highest indices will be chosen first. This might
impact the result if the correct label falls after the threshold because
of that.
Examples
--------
>>> import numpy as np
>>> from sklearn.metrics import top_k_accuracy_score
>>> y_true = np.array([0, 1, 2, 2])
>>> y_score = np.array([[0.5, 0.2, 0.2], # 0 is in top 2
... [0.3, 0.4, 0.2], # 1 is in top 2
... [0.2, 0.4, 0.3], # 2 is in top 2
... [0.7, 0.2, 0.1]]) # 2 isn't in top 2
>>> top_k_accuracy_score(y_true, y_score, k=2)
0.75
>>> # Not normalizing gives the number of "correctly" classified samples
>>> top_k_accuracy_score(y_true, y_score, k=2, normalize=False)
3
"""
y_true = check_array(y_true, ensure_2d=False, dtype=None)
y_true = column_or_1d(y_true)
y_type = type_of_target(y_true, input_name="y_true")
if y_type == "binary" and labels is not None and len(labels) > 2:
y_type = "multiclass"
y_score = check_array(y_score, ensure_2d=False)
y_score = column_or_1d(y_score) if y_type == "binary" else y_score
check_consistent_length(y_true, y_score, sample_weight)
if y_type not in {"binary", "multiclass"}:
raise ValueError(
f"y type must be 'binary' or 'multiclass', got '{y_type}' instead."
)
y_score_n_classes = y_score.shape[1] if y_score.ndim == 2 else 2
if labels is None:
classes = _unique(y_true)
n_classes = len(classes)
if n_classes != y_score_n_classes:
raise ValueError(
f"Number of classes in 'y_true' ({n_classes}) not equal "
f"to the number of classes in 'y_score' ({y_score_n_classes})."
"You can provide a list of all known classes by assigning it "
"to the `labels` parameter."
)
else:
labels = column_or_1d(labels)
classes = _unique(labels)
n_labels = len(labels)
n_classes = len(classes)
if n_classes != n_labels:
raise ValueError("Parameter 'labels' must be unique.")
if not np.array_equal(classes, labels):
raise ValueError("Parameter 'labels' must be ordered.")
if n_classes != y_score_n_classes:
raise ValueError(
f"Number of given labels ({n_classes}) not equal to the "
f"number of classes in 'y_score' ({y_score_n_classes})."
)
if len(np.setdiff1d(y_true, classes)):
raise ValueError("'y_true' contains labels not in parameter 'labels'.")
if k >= n_classes:
warnings.warn(
f"'k' ({k}) greater than or equal to 'n_classes' ({n_classes}) "
"will result in a perfect score and is therefore meaningless.",
UndefinedMetricWarning,
)
y_true_encoded = _encode(y_true, uniques=classes)
if y_type == "binary":
if k == 1:
threshold = 0.5 if y_score.min() >= 0 and y_score.max() <= 1 else 0
y_pred = (y_score > threshold).astype(np.int64)
hits = y_pred == y_true_encoded
else:
hits = np.ones_like(y_score, dtype=np.bool_)
elif y_type == "multiclass":
sorted_pred = np.argsort(y_score, axis=1, kind="mergesort")[:, ::-1]
hits = (y_true_encoded == sorted_pred[:, :k].T).any(axis=0)
if normalize:
return np.average(hits, weights=sample_weight)
elif sample_weight is None:
return np.sum(hits)
else:
return np.dot(hits, sample_weight)
| 37.975843 | 88 | 0.653298 |
8ceea913757ee9544a0fd9392dc3a1de45648202 | 66,614 | py | Python | spambayes/Options.py | mpwillson/spambayes3 | b51d7bb9016066234ce88dad65faabed85f63d78 | [
"PSF-2.0"
] | 1 | 2020-03-21T15:17:22.000Z | 2020-03-21T15:17:22.000Z | spambayes/Options.py | mpwillson/spambayes3 | b51d7bb9016066234ce88dad65faabed85f63d78 | [
"PSF-2.0"
] | 1 | 2022-02-22T22:23:55.000Z | 2022-02-22T22:23:55.000Z | spambayes/Options.py | mpwillson/spambayes3 | b51d7bb9016066234ce88dad65faabed85f63d78 | [
"PSF-2.0"
] | null | null | null | """Options
Abstract:
Options.options is a globally shared options object.
This object is initialised when the module is loaded: the envar
BAYESCUSTOMIZE is checked for a list of names, if nothing is found
then the local directory and the home directory are checked for a
file called bayescustomize.ini or .spambayesrc (respectively) and
the initial values are loaded from this.
The Option class is defined in OptionsClass.py - this module
is responsible only for instantiating and loading the globally
shared instance.
To Do:
o Suggestions?
"""
import sys, os
try:
_
except NameError:
_ = lambda arg: arg
__all__ = ['options', '_']
# Grab the stuff from the core options class.
from spambayes.OptionsClass import *
# A little magic. We'd like to use ZODB as the default storage,
# because we've had so many problems with bsddb, and we'd like to swap
# to new ZODB problems <wink>. However, apart from this, we only need
# a standard Python install - if the default was ZODB then we would
# need ZODB to be installed as well (which it will br for binary users,
# but might not be for source users). So what we do is check whether
# ZODB is importable and if it is, default to that, and if not, default
# to dbm. If ZODB is sometimes importable and sometimes not (e.g. you
# muck around with the PYTHONPATH), then this may not work well - the
# best idea would be to explicitly put the type in your configuration
# file.
try:
import ZODB
except ImportError:
DB_TYPE = "dbm", "hammie.db", "spambayes.messageinfo.db"
else:
del ZODB
DB_TYPE = "zodb", "hammie.fs", "messageinfo.fs"
# Format:
# defaults is a dictionary, where the keys are the section names
# each key maps to a tuple consisting of:
# option name, display name, default,
# doc string, possible values, restore on restore-to-defaults
# The display name and doc string should be enclosed in _() to allow
# i18n. In a few cases, then possible values should also be enclosed
# in _().
defaults = {
"Tokenizer" : (
("basic_header_tokenize", _("Basic header tokenising"), False,
_("""If true, tokenizer.Tokenizer.tokenize_headers() will tokenize the
contents of each header field just like the text of the message
body, using the name of the header as a tag. Tokens look like
"header:word". The basic approach is simple and effective, but also
very sensitive to biases in the ham and spam collections. For
example, if the ham and spam were collected at different times,
several headers with date/time information will become the best
discriminators. (Not just Date, but Received and X-From_.)"""),
BOOLEAN, RESTORE),
("basic_header_tokenize_only", _("Only basic header tokenising"), False,
_("""If true and basic_header_tokenize is also true, then
basic_header_tokenize is the only action performed."""),
BOOLEAN, RESTORE),
("basic_header_skip", _("Basic headers to skip"), ("received date x-.*",),
_("""If basic_header_tokenize is true, then basic_header_skip is a set
of headers that should be skipped."""),
HEADER_NAME, RESTORE),
("check_octets", _("Check application/octet-stream sections"), False,
_("""If true, the first few characters of application/octet-stream
sections are used, undecoded. What 'few' means is decided by
octet_prefix_size."""),
BOOLEAN, RESTORE),
("octet_prefix_size", _("Number of characters of octet stream to process"), 5,
_("""The number of characters of the application/octet-stream sections
to use, if check_octets is set to true."""),
INTEGER, RESTORE),
("x-short_runs", _("Count runs of short 'words'"), False,
_("""(EXPERIMENTAL) If true, generate tokens based on max number of
short word runs. Short words are anything of length < the
skip_max_word_size option. Normally they are skipped, but one common
spam technique spells words like 'V I A G RA'.
"""),
BOOLEAN, RESTORE),
("x-lookup_ip", _("Generate IP address tokens from hostnames"), False,
_("""(EXPERIMENTAL) Generate IP address tokens from hostnames.
Requires PyDNS (http://pydns.sourceforge.net/)."""),
BOOLEAN, RESTORE),
("lookup_ip_cache", _("x-lookup_ip cache file location"), "",
_("""Tell SpamBayes where to cache IP address lookup information.
Only comes into play if lookup_ip is enabled. The default
(empty string) disables the file cache. When caching is enabled,
the cache file is stored using the same database type as the main
token store (only dbm and zodb supported so far, zodb has problems,
dbm is untested, hence the default)."""),
PATH, RESTORE),
("image_size", _("Generate image size tokens"), False,
_("""If true, generate tokens based on the sizes of
embedded images."""),
BOOLEAN, RESTORE),
("crack_images", _("Look inside images for text"), False,
_("""If true, generate tokens based on the
(hopefully) text content contained in any images in each message.
The current support is minimal, relies on the installation of
an OCR 'engine' (see ocr_engine.)"""),
BOOLEAN, RESTORE),
("ocr_engine", _("OCR engine to use"), "",
_("""The name of the OCR engine to use. If empty, all
supported engines will be checked to see if they are installed.
Engines currently supported include ocrad
(http://www.gnu.org/software/ocrad/ocrad.html) and gocr
(http://jocr.sourceforge.net/download.html) and they require the
appropriate executable be installed in either your PATH, or in the
main spambayes directory."""),
HEADER_VALUE, RESTORE),
("crack_image_cache", _("Cache to speed up ocr."), "",
_("""If non-empty, names a file from which to read cached ocr info
at start and to which to save that info at exit."""),
PATH, RESTORE),
("ocrad_scale", _("Scale factor to use with ocrad."), 2,
_("""Specifies the scale factor to apply when running ocrad. While
you can specify a negative scale it probably won't help. Scaling up
by a factor of 2 or 3 seems to work well for the sort of spam images
encountered by SpamBayes."""),
INTEGER, RESTORE),
("ocrad_charset", _("Charset to apply with ocrad."), "ascii",
_("""Specifies the charset to use when running ocrad. Valid values
are 'ascii', 'iso-8859-9' and 'iso-8859-15'."""),
OCRAD_CHARSET, RESTORE),
("max_image_size", _("Max image size to try OCR-ing"), 100000,
_("""When crack_images is enabled, this specifies the largest
image to try OCR on."""),
INTEGER, RESTORE),
("count_all_header_lines", _("Count all header lines"), False,
_("""Generate tokens just counting the number of instances of each kind
of header line, in a case-sensitive way.
Depending on data collection, some headers are not safe to count.
For example, if ham is collected from a mailing list but spam from
your regular inbox traffic, the presence of a header like List-Info
will be a very strong ham clue, but a bogus one. In that case, set
count_all_header_lines to False, and adjust safe_headers instead."""),
BOOLEAN, RESTORE),
("record_header_absence", _("Record header absence"), False,
_("""When True, generate a "noheader:HEADERNAME" token for each header
in safe_headers (below) that *doesn't* appear in the headers. This
helped in various of Tim's python.org tests, but appeared to hurt a
little in Anthony Baxter's tests."""),
BOOLEAN, RESTORE),
("safe_headers", _("Safe headers"), ("abuse-reports-to", "date", "errors-to",
"from", "importance", "in-reply-to",
"message-id", "mime-version",
"organization", "received",
"reply-to", "return-path", "subject",
"to", "user-agent", "x-abuse-info",
"x-complaints-to", "x-face"),
_("""Like count_all_header_lines, but restricted to headers in this list.
safe_headers is ignored when count_all_header_lines is true, unless
record_header_absence is also true."""),
HEADER_NAME, RESTORE),
("mine_received_headers", _("Mine the received headers"), False,
_("""A lot of clues can be gotten from IP addresses and names in
Received: headers. This can give spectacular results for bogus
reasons if your corpora are from different sources."""),
BOOLEAN, RESTORE),
("x-mine_nntp_headers", _("Mine NNTP-Posting-Host headers"), False,
_("""Usenet is host to a lot of spam. Usenet/Mailing list gateways
can let it leak across. Similar to mining received headers, we pick
apart the IP address or host name in this header for clues."""),
BOOLEAN, RESTORE),
("address_headers", _("Address headers to mine"), ("from", "to", "cc",
"sender", "reply-to"),
_("""Mine the following address headers. If you have mixed source
corpuses (as opposed to a mixed sauce walrus, which is delicious!)
then you probably don't want to use 'to' or 'cc') Address headers will
be decoded, and will generate charset tokens as well as the real
address. Others to consider: errors-to, ..."""),
HEADER_NAME, RESTORE),
("generate_long_skips", _("Generate long skips"), True,
_("""If legitimate mail contains things that look like text to the
tokenizer and turning turning off this option helps (perhaps binary
attachments get 'defanged' by something upstream from this operation
and thus look like text), this may help, and should be an alert that
perhaps the tokenizer is broken."""),
BOOLEAN, RESTORE),
("summarize_email_prefixes", _("Summarise email prefixes"), False,
_("""Try to capitalize on mail sent to multiple similar addresses."""),
BOOLEAN, RESTORE),
("summarize_email_suffixes", _("Summarise email suffixes"), False,
_("""Try to capitalize on mail sent to multiple similar addresses."""),
BOOLEAN, RESTORE),
("skip_max_word_size", _("Long skip trigger length"), 12,
_("""Length of words that triggers 'long skips'. Longer than this
triggers a skip."""),
INTEGER, RESTORE),
("x-pick_apart_urls", _("Extract clues about url structure"), False,
_("""(EXPERIMENTAL) Note whether url contains non-standard port or
user/password elements."""),
BOOLEAN, RESTORE),
("x-fancy_url_recognition", _("Extract URLs without http:// prefix"), False,
_("""(EXPERIMENTAL) Recognize 'www.python.org' or ftp.python.org as URLs
instead of just long words."""),
BOOLEAN, RESTORE),
("replace_nonascii_chars", _("Replace non-ascii characters"), False,
_("""If true, replace high-bit characters (ord(c) >= 128) and control
characters with question marks. This allows non-ASCII character
strings to be identified with little training and small database
burden. It's appropriate only if your ham is plain 7-bit ASCII, or
nearly so, so that the mere presence of non-ASCII character strings is
known in advance to be a strong spam indicator."""),
BOOLEAN, RESTORE),
("x-search_for_habeas_headers", _("Search for Habeas Headers"), False,
_("""(EXPERIMENTAL) If true, search for the habeas headers (see
http://www.habeas.com). If they are present and correct, this should
be a strong ham sign, if they are present and incorrect, this should
be a strong spam sign."""),
BOOLEAN, RESTORE),
("x-reduce_habeas_headers", _("Reduce Habeas Header Tokens to Single"), False,
_("""(EXPERIMENTAL) If SpamBayes is set to search for the Habeas
headers, nine tokens are generated for messages with habeas headers.
This should be fine, since messages with the headers should either be
ham, or result in FN so that we can send them to habeas so they can
be sued. However, to reduce the strength of habeas headers, we offer
the ability to reduce the nine tokens to one. (This option has no
effect if 'Search for Habeas Headers' is False)"""),
BOOLEAN, RESTORE),
),
# These options are all experimental; it seemed better to put them into
# their own category than have several interdependant experimental options.
# If this capability is removed, the entire section can go.
"URLRetriever" : (
("x-slurp_urls", _("Tokenize text content at the end of URLs"), False,
_("""(EXPERIMENTAL) If this option is enabled, when a message normally
scores in the 'unsure' range, and has fewer tokens than the maximum
looked at, and contains URLs, then the text at those URLs is obtained
and tokenized. If those tokens result in the message moving to a
score outside the 'unsure' range, then they are added to the
tokens for the message. This should be particularly effective
for messages that contain only a single URL and no other text."""),
BOOLEAN, RESTORE),
("x-cache_expiry_days", _("Number of days to store URLs in cache"), 7,
_("""(EXPERIMENTAL) This is the number of days that local cached copies
of the text at the URLs will be stored for."""),
INTEGER, RESTORE),
("x-cache_directory", _("URL Cache Directory"), "url-cache",
_("""(EXPERIMENTAL) So that SpamBayes doesn't need to retrieve the same
URL over and over again, it stores local copies of the text at the
end of the URL. This is the directory that will be used for those
copies."""),
PATH, RESTORE),
("x-only_slurp_base", _("Retrieve base url"), False,
_("""(EXPERIMENTAL) To try and speed things up, and to avoid following
unique URLS, if this option is enabled, SpamBayes will convert the URL
to as basic a form it we can. All directory information is removed
and the domain is reduced to the two (or three for those with a
country TLD) top-most elements. For example,
http://www.massey.ac.nz/~tameyer/index.html?you=me
would become
http://massey.ac.nz
and
http://id.example.com
would become http://example.com
This should have two beneficial effects:
o It's unlikely that any information could be contained in this 'base'
url that could identify the user (unless they have a *lot* of domains).
o Many urls (both spam and ham) will strip down into the same 'base' url.
Since we have a limited form of caching, this means that a lot fewer
urls will have to be retrieved.
However, this does mean that if the 'base' url is hammy and the full is
spammy, or vice-versa, that the slurp will give back the wrong information.
Whether or not this is the case would have to be determined by testing.
"""),
BOOLEAN, RESTORE),
("x-web_prefix", _("Prefix for tokens from web pages"), "",
_("""(EXPERIMENTAL) It may be that what is hammy/spammy for you in email
isn't from webpages. You can then set this option (to "web:", for
example), and effectively create an independent (sub)database for
tokens derived from parsing web pages."""),
r"[\S]+", RESTORE),
),
# These options control how a message is categorized
"Categorization" : (
# spam_cutoff and ham_cutoff are used in Python slice sense:
# A msg is considered ham if its score is in 0:ham_cutoff
# A msg is considered unsure if its score is in ham_cutoff:spam_cutoff
# A msg is considered spam if its score is in spam_cutoff:
#
# So it's unsure iff ham_cutoff <= score < spam_cutoff.
# For a binary classifier, make ham_cutoff == spam_cutoff.
# ham_cutoff > spam_cutoff doesn't make sense.
#
# The defaults here (.2 and .9) may be appropriate for the default chi-
# combining scheme. Cutoffs for chi-combining typically aren't touchy,
# provided you're willing to settle for "really good" instead of "optimal".
# Tim found that .3 and .8 worked very well for well-trained systems on
# his personal email, and his large comp.lang.python test. If just
# beginning training, or extremely fearful of mistakes, 0.05 and 0.95 may
# be more appropriate for you.
#
# Picking good values for gary-combining is much harder, and appears to be
# corpus-dependent, and within a single corpus dependent on how much
# training has been done. Values from 0.50 thru the low 0.60's have been
# reported to work best by various testers on their data.
("ham_cutoff", _("Ham cutoff"), 0.20,
_("""Spambayes gives each email message a spam probability between
0 and 1. Emails below the Ham Cutoff probability are classified
as Ham. Larger values will result in more messages being
classified as ham, but with less certainty that all of them
actually are ham. This value should be between 0 and 1,
and should be smaller than the Spam Cutoff."""),
REAL, RESTORE),
("spam_cutoff", _("Spam cutoff"), 0.90,
_("""Emails with a spam probability above the Spam Cutoff are
classified as Spam - just like the Ham Cutoff but at the other
end of the scale. Messages that fall between the two values
are classified as Unsure."""),
REAL, RESTORE),
),
# These control various displays in class TestDriver.Driver, and
# Tester.Test.
"TestDriver" : (
("nbuckets", _("Number of buckets"), 200,
_("""Number of buckets in histograms."""),
INTEGER, RESTORE),
("show_histograms", _("Show histograms"), True,
_(""""""),
BOOLEAN, RESTORE),
("compute_best_cutoffs_from_histograms", _("Compute best cutoffs from histograms"), True,
_("""After the display of a ham+spam histogram pair, you can get a
listing of all the cutoff values (coinciding with histogram bucket
boundaries) that minimize:
best_cutoff_fp_weight * (# false positives) +
best_cutoff_fn_weight * (# false negatives) +
best_cutoff_unsure_weight * (# unsure msgs)
This displays two cutoffs: hamc and spamc, where
0.0 <= hamc <= spamc <= 1.0
The idea is that if something scores < hamc, it's called ham; if
something scores >= spamc, it's called spam; and everything else is
called 'I am not sure' -- the middle ground.
Note: You may wish to increase nbuckets, to give this scheme more cutoff
values to analyze."""),
BOOLEAN, RESTORE),
("best_cutoff_fp_weight", _("Best cutoff false positive weight"), 10.00,
_(""""""),
REAL, RESTORE),
("best_cutoff_fn_weight", _("Best cutoff false negative weight"), 1.00,
_(""""""),
REAL, RESTORE),
("best_cutoff_unsure_weight", _("Best cutoff unsure weight"), 0.20,
_(""""""),
REAL, RESTORE),
("percentiles", _("Percentiles"), (5, 25, 75, 95),
_("""Histogram analysis also displays percentiles. For each percentile
p in the list, the score S such that p% of all scores are <= S is
given. Note that percentile 50 is the median, and is displayed (along
with the min score and max score) independent of this option."""),
INTEGER, RESTORE),
("show_spam_lo", _(""), 1.0,
_("""Display spam when show_spam_lo <= spamprob <= show_spam_hi and
likewise for ham. The defaults here do not show anything."""),
REAL, RESTORE),
("show_spam_hi", _(""), 0.0,
_("""Display spam when show_spam_lo <= spamprob <= show_spam_hi and
likewise for ham. The defaults here do not show anything."""),
REAL, RESTORE),
("show_ham_lo", _(""), 1.0,
_("""Display spam when show_spam_lo <= spamprob <= show_spam_hi and
likewise for ham. The defaults here do not show anything."""),
REAL, RESTORE),
("show_ham_hi", _(""), 0.0,
_("""Display spam when show_spam_lo <= spamprob <= show_spam_hi and
likewise for ham. The defaults here do not show anything."""),
REAL, RESTORE),
("show_false_positives", _("Show false positives"), True,
_(""""""),
BOOLEAN, RESTORE),
("show_false_negatives", _("Show false negatives"), False,
_(""""""),
BOOLEAN, RESTORE),
("show_unsure", _("Show unsure"), False,
_(""""""),
BOOLEAN, RESTORE),
("show_charlimit", _("Show character limit"), 3000,
_("""The maximum # of characters to display for a msg displayed due to
the show_xyz options above."""),
INTEGER, RESTORE),
("save_trained_pickles", _("Save trained pickles"), False,
_("""If save_trained_pickles is true, Driver.train() saves a binary
pickle of the classifier after training. The file basename is given
by pickle_basename, the extension is .pik, and increasing integers are
appended to pickle_basename. By default (if save_trained_pickles is
true), the filenames are class1.pik, class2.pik, ... If a file of
that name already exists, it is overwritten. pickle_basename is
ignored when save_trained_pickles is false."""),
BOOLEAN, RESTORE),
("pickle_basename", _("Pickle basename"), "class",
_(""""""),
r"[\w]+", RESTORE),
("save_histogram_pickles", _("Save histogram pickles"), False,
_("""If save_histogram_pickles is true, Driver.train() saves a binary
pickle of the spam and ham histogram for "all test runs". The file
basename is given by pickle_basename, the suffix _spamhist.pik
or _hamhist.pik is appended to the basename."""),
BOOLEAN, RESTORE),
("spam_directories", _("Spam directories"), "Data/Spam/Set%d",
_("""default locations for timcv and timtest - these get the set number
interpolated."""),
VARIABLE_PATH, RESTORE),
("ham_directories", _("Ham directories"), "Data/Ham/Set%d",
_("""default locations for timcv and timtest - these get the set number
interpolated."""),
VARIABLE_PATH, RESTORE),
),
"CV Driver": (
("build_each_classifier_from_scratch", _("Build each classifier from scratch"), False,
_("""A cross-validation driver takes N ham+spam sets, and builds N
classifiers, training each on N-1 sets, and the predicting against the
set not trained on. By default, it does this in a clever way,
learning *and* unlearning sets as it goes along, so that it never
needs to train on N-1 sets in one gulp after the first time. Setting
this option true forces ''one gulp from-scratch'' training every time.
There used to be a set of combining schemes that needed this, but now
it is just in case you are paranoid <wink>."""),
BOOLEAN, RESTORE),
),
"Classifier": (
("max_discriminators", _("Maximum number of extreme words"), 150,
_("""The maximum number of extreme words to look at in a message, where
"extreme" means with spam probability farthest away from 0.5. 150
appears to work well across all corpora tested."""),
INTEGER, RESTORE),
("unknown_word_prob", _("Unknown word probability"), 0.5,
_("""These two control the prior assumption about word probabilities.
unknown_word_prob is essentially the probability given to a word that
has never been seen before. Nobody has reported an improvement via
moving it away from 1/2, although Tim has measured a mean spamprob of
a bit over 0.5 (0.51-0.55) in 3 well-trained classifiers."""),
REAL, RESTORE),
("unknown_word_strength", _("Unknown word strength"), 0.45,
_("""This adjusts how much weight to give the prior
assumption relative to the probabilities estimated by counting. At 0,
the counting estimates are believed 100%, even to the extent of
assigning certainty (0 or 1) to a word that has appeared in only ham
or only spam. This is a disaster.
As unknown_word_strength tends toward infinity, all probabilities
tend toward unknown_word_prob. All reports were that a value near 0.4
worked best, so this does not seem to be corpus-dependent."""),
REAL, RESTORE),
("minimum_prob_strength", _("Minimum probability strength"), 0.1,
_("""When scoring a message, ignore all words with
abs(word.spamprob - 0.5) < minimum_prob_strength.
This may be a hack, but it has proved to reduce error rates in many
tests. 0.1 appeared to work well across all corpora."""),
REAL, RESTORE),
("use_chi_squared_combining", _("Use chi-squared combining"), True,
_("""For vectors of random, uniformly distributed probabilities,
-2*sum(ln(p_i)) follows the chi-squared distribution with 2*n degrees
of freedom. This is the "provably most-sensitive" test the original
scheme was monotonic with. Getting closer to the theoretical basis
appears to give an excellent combining method, usually very extreme in
its judgment, yet finding a tiny (in # of msgs, spread across a huge
range of scores) middle ground where lots of the mistakes live. This
is the best method so far. One systematic benefit is is immunity to
"cancellation disease". One systematic drawback is sensitivity to
*any* deviation from a uniform distribution, regardless of whether
actually evidence of ham or spam. Rob Hooft alleviated that by
combining the final S and H measures via (S-H+1)/2 instead of via
S/(S+H)). In practice, it appears that setting ham_cutoff=0.05, and
spam_cutoff=0.95, does well across test sets; while these cutoffs are
rarely optimal, they get close to optimal. With more training data,
Tim has had good luck with ham_cutoff=0.30 and spam_cutoff=0.80 across
three test data sets (original c.l.p data, his own email, and newer
general python.org traffic)."""),
BOOLEAN, RESTORE),
("use_bigrams", _("Use mixed uni/bi-grams scheme"), False,
_("""Generate both unigrams (words) and bigrams (pairs of
words). However, extending an idea originally from Gary Robinson, the
message is 'tiled' into non-overlapping unigrams and bigrams,
approximating the strongest outcome over all possible tilings.
Note that to really test this option you need to retrain with it on,
so that your database includes the bigrams - if you subsequently turn
it off, these tokens will have no effect. This option will at least
double your database size given the same training data, and will
probably at least triple it.
You may also wish to increase the max_discriminators (maximum number
of extreme words) option if you enable this option, perhaps doubling or
quadrupling it. It's not yet clear. Bigrams create many more hapaxes,
and that seems to increase the brittleness of minimalist training
regimes; increasing max_discriminators may help to soften that effect.
OTOH, max_discriminators defaults to 150 in part because that makes it
easy to prove that the chi-squared math is immune from numeric
problems. Increase it too much, and insane results will eventually
result (including fatal floating-point exceptions on some boxes).
This option is experimental, and may be removed in a future release.
We would appreciate feedback about it if you use it - email
spambayes@python.org with your comments and results.
"""),
BOOLEAN, RESTORE),
),
"Hammie": (
("train_on_filter", _("Train when filtering"), False,
_("""Train when filtering? After filtering a message, hammie can then
train itself on the judgement (ham or spam). This can speed things up
with a procmail-based solution. If you do enable this, please make
sure to retrain any mistakes. Otherwise, your word database will
slowly become useless. Note that this option is only used by
sb_filter, and will have no effect on sb_server's POP3 proxy, or
the IMAP filter."""),
BOOLEAN, RESTORE),
),
# These options control where Spambayes data will be stored, and in
# what form. They are used by many Spambayes applications (including
# pop3proxy, smtpproxy, imapfilter and hammie), and mean that data
# (such as the message database) is shared between the applications.
# If this is not the desired behaviour, you must have a different
# value for each of these options in a configuration file that gets
# loaded by the appropriate application only.
"Storage" : (
("persistent_use_database", _("Database backend"), DB_TYPE[0],
_("""SpamBayes can use either a ZODB or dbm database (quick to score
one message) or a pickle (quick to train on huge amounts of messages).
There is also (experimental) ability to use a mySQL or PostgresSQL
database."""),
("zeo", "zodb", "cdb", "mysql", "pgsql", "dbm", "pickle"), RESTORE),
("persistent_storage_file", _("Storage file name"), DB_TYPE[1],
_("""Spambayes builds a database of information that it gathers
from incoming emails and from you, the user, to get better and
better at classifying your email. This option specifies the
name of the database file. If you don't give a full pathname,
the name will be taken to be relative to the location of the
most recent configuration file loaded."""),
FILE_WITH_PATH, DO_NOT_RESTORE),
("messageinfo_storage_file", _("Message information file name"), DB_TYPE[2],
_("""Spambayes builds a database of information about messages
that it has already seen and trained or classified. This
database is used to ensure that these messages are not retrained
or reclassified (unless specifically requested to). This option
specifies the name of the database file. If you don't give a
full pathname, the name will be taken to be relative to the location
of the most recent configuration file loaded."""),
FILE_WITH_PATH, DO_NOT_RESTORE),
("cache_use_gzip", _("Use gzip"), False,
_("""Use gzip to compress the cache."""),
BOOLEAN, RESTORE),
("cache_expiry_days", _("Days before cached messages expire"), 7,
_("""Messages will be expired from the cache after this many days.
After this time, you will no longer be able to train on these messages
(note this does not affect the copy of the message that you have in
your mail client)."""),
INTEGER, RESTORE),
("spam_cache", _("Spam cache directory"), "pop3proxy-spam-cache",
_("""Directory that SpamBayes should cache spam in. If this does
not exist, it will be created."""),
PATH, DO_NOT_RESTORE),
("ham_cache", _("Ham cache directory"), "pop3proxy-ham-cache",
_("""Directory that SpamBayes should cache ham in. If this does
not exist, it will be created."""),
PATH, DO_NOT_RESTORE),
("unknown_cache", _("Unknown cache directory"), "pop3proxy-unknown-cache",
_("""Directory that SpamBayes should cache unclassified messages in.
If this does not exist, it will be created."""),
PATH, DO_NOT_RESTORE),
("core_spam_cache", _("Spam cache directory"), "core-spam-cache",
_("""Directory that SpamBayes should cache spam in. If this does
not exist, it will be created."""),
PATH, DO_NOT_RESTORE),
("core_ham_cache", _("Ham cache directory"), "core-ham-cache",
_("""Directory that SpamBayes should cache ham in. If this does
not exist, it will be created."""),
PATH, DO_NOT_RESTORE),
("core_unknown_cache", _("Unknown cache directory"), "core-unknown-cache",
_("""Directory that SpamBayes should cache unclassified messages in.
If this does not exist, it will be created."""),
PATH, DO_NOT_RESTORE),
("cache_messages", _("Cache messages"), True,
_("""You can disable the pop3proxy caching of messages. This
will make the proxy a bit faster, and make it use less space
on your hard drive. The proxy uses its cache for reviewing
and training of messages, so if you disable caching you won't
be able to do further training unless you re-enable it.
Thus, you should only turn caching off when you are satisfied
with the filtering that Spambayes is doing for you."""),
BOOLEAN, RESTORE),
("no_cache_bulk_ham", _("Suppress caching of bulk ham"), False,
_("""Where message caching is enabled, this option suppresses caching
of messages which are classified as ham and marked as
'Precedence: bulk' or 'Precedence: list'. If you subscribe to a
high-volume mailing list then your 'Review messages' page can be
overwhelmed with list messages, making training a pain. Once you've
trained Spambayes on enough list traffic, you can use this option
to prevent that traffic showing up in 'Review messages'."""),
BOOLEAN, RESTORE),
("no_cache_large_messages", _("Maximum size of cached messages"), 0,
_("""Where message caching is enabled, this option suppresses caching
of messages which are larger than this value (measured in bytes).
If you receive a lot of messages that include large attachments
(and are correctly classified), you may not wish to cache these.
If you set this to zero (0), then this option will have no effect."""),
INTEGER, RESTORE),
),
# These options control the various headers that some Spambayes
# applications add to incoming mail, including imapfilter, pop3proxy,
# and hammie.
"Headers" : (
# The name of the header that hammie, pop3proxy, and any other spambayes
# software, adds to emails in filter mode. This will definately contain
# the "classification" of the mail, and may also (i.e. with hammie)
# contain the score
("classification_header_name", _("Classification header name"), "X-Spambayes-Classification",
_("""Spambayes classifies each message by inserting a new header into
the message. This header can then be used by your email client
(provided your client supports filtering) to move spam into a
separate folder (recommended), delete it (not recommended), etc.
This option specifies the name of the header that Spambayes inserts.
The default value should work just fine, but you may change it to
anything that you wish."""),
HEADER_NAME, RESTORE),
# The three disposition names are added to the header as the following
# three words:
("header_spam_string", _("Spam disposition name"), _("spam"),
_("""The header that Spambayes inserts into each email has a name,
(Classification header name, above), and a value. If the classifier
determines that this email is probably spam, it places a header named
as above with a value as specified by this string. The default
value should work just fine, but you may change it to anything
that you wish."""),
HEADER_VALUE, RESTORE),
("header_ham_string", _("Ham disposition name"), _("ham"),
_("""As for Spam Designation, but for emails classified as Ham."""),
HEADER_VALUE, RESTORE),
("header_unsure_string", _("Unsure disposition name"), _("unsure"),
_("""As for Spam/Ham Designation, but for emails which the
classifer wasn't sure about (ie. the spam probability fell between
the Ham and Spam Cutoffs). Emails that have this classification
should always be the subject of training."""),
HEADER_VALUE, RESTORE),
("header_score_digits", _("Accuracy of reported score"), 2,
_("""Accuracy of the score in the header in decimal digits."""),
INTEGER, RESTORE),
("header_score_logarithm", _("Augment score with logarithm"), False,
_("""Set this option to augment scores of 1.00 or 0.00 by a
logarithmic "one-ness" or "zero-ness" score (basically it shows the
"number of zeros" or "number of nines" next to the score value)."""),
BOOLEAN, RESTORE),
("include_score", _("Add probability (score) header"), False,
_("""You can have Spambayes insert a header with the calculated spam
probability into each mail. If you can view headers with your
mailer, then you can see this information, which can be interesting
and even instructive if you're a serious SpamBayes junkie."""),
BOOLEAN, RESTORE),
("score_header_name", _("Probability (score) header name"), "X-Spambayes-Spam-Probability",
_(""""""),
HEADER_NAME, RESTORE),
("include_thermostat", _("Add level header"), False,
_("""You can have spambayes insert a header with the calculated spam
probability, expressed as a number of '*'s, into each mail (the more
'*'s, the higher the probability it is spam). If your mailer
supports it, you can use this information to fine tune your
classification of ham/spam, ignoring the classification given."""),
BOOLEAN, RESTORE),
("thermostat_header_name", _("Level header name"), "X-Spambayes-Level",
_(""""""),
HEADER_NAME, RESTORE),
("include_evidence", _("Add evidence header"), False,
_("""You can have spambayes insert a header into mail, with the
evidence that it used to classify that message (a collection of
words with ham and spam probabilities). If you can view headers
with your mailer, then this may give you some insight as to why
a particular message was scored in a particular way."""),
BOOLEAN, RESTORE),
("evidence_header_name", _("Evidence header name"), "X-Spambayes-Evidence",
_(""""""),
HEADER_NAME, RESTORE),
("mailid_header_name", _("Spambayes id header name"), "X-Spambayes-MailId",
_(""""""),
HEADER_NAME, RESTORE),
("include_trained", _("Add trained header"), True,
_("""sb_mboxtrain.py and sb_filter.py can add a header that details
how a message was trained, which lets you keep track of it, and
appropriately re-train messages. However, if you would rather
mboxtrain/sb_filter didn't rewrite the message files, you can disable
this option."""),
BOOLEAN, RESTORE),
("trained_header_name", _("Trained header name"), "X-Spambayes-Trained",
_("""When training on a message, the name of the header to add with how
it was trained"""),
HEADER_NAME, RESTORE),
("clue_mailheader_cutoff", _("Debug header cutoff"), 0.5,
_("""The range of clues that are added to the "debug" header in the
E-mail. All clues that have their probability smaller than this number,
or larger than one minus this number are added to the header such that
you can see why spambayes thinks this is ham/spam or why it is unsure.
The default is to show all clues, but you can reduce that by setting
showclue to a lower value, such as 0.1"""),
REAL, RESTORE),
("add_unique_id", _("Add unique spambayes id"), True,
_("""If you wish to be able to find a specific message (via the 'find'
box on the home page), or use the SMTP proxy to train using cached
messages, you will need to know the unique id of each message. This
option adds this information to a header added to each message."""),
BOOLEAN, RESTORE),
("notate_to", _("Notate to"), (),
_("""Some email clients (Outlook Express, for example) can only set up
filtering rules on a limited set of headers. These clients cannot
test for the existence/value of an arbitrary header and filter mail
based on that information. To accommodate these kind of mail clients,
you can add "spam", "ham", or "unsure" to the recipient list. A
filter rule can then use this to see if one of these words (followed
by a comma) is in the recipient list, and route the mail to an
appropriate folder, or take whatever other action is supported and
appropriate for the mail classification.
As it interferes with replying, you may only wish to do this for
spam messages; simply tick the boxes of the classifications take
should be identified in this fashion."""),
((), _("ham"), _("spam"), _("unsure")), RESTORE),
("notate_subject", _("Classify in subject: header"), (),
_("""This option will add the same information as 'Notate To',
but to the start of the mail subject line."""),
((), _("ham"), _("spam"), _("unsure")), RESTORE),
),
# pop3proxy settings: The only mandatory option is pop3proxy_servers, eg.
# "pop3.my-isp.com:110", or a comma-separated list of those. The ":110"
# is optional. If you specify more than one server in pop3proxy_servers,
# you must specify the same number of ports in pop3proxy_ports.
"pop3proxy" : (
("remote_servers", _("Remote Servers"), (),
_("""\
The SpamBayes POP3 proxy intercepts incoming email and classifies it
before sending it on to your email client. You need to specify which
POP3 server(s) and port(s) you wish it to connect to - a POP3 server
address typically looks like 'pop3.myisp.net:110' where
'pop3.myisp.net' is the name of the computer where the POP3 server runs
and '110' is the port on which the POP3 server listens. The other port
you might find is '995', which is used for secure POP3. If you use
more than one server, simply separate their names with commas. For
example: 'pop3.myisp.net:110,pop.gmail.com:995'. You can get
these server names and port numbers from your existing email
configuration, or from your ISP or system administrator. If you are
using Web-based email, you can't use the SpamBayes POP3 proxy (sorry!).
In your email client's configuration, where you would normally put your
POP3 server address, you should now put the address of the machine
running SpamBayes.
"""),
SERVER, DO_NOT_RESTORE),
("listen_ports", _("SpamBayes Ports"), (),
_("""\
Each monitored POP3 server must be assigned to a different port in the
SpamBayes POP3 proxy. You need to configure your email client to
connect to this port instead of the actual remote POP3 server. If you
don't know what port to use, try 8110 and go up from there. If you
have two servers, your list of listen ports might then be '8110,8111'.
"""),
SERVER, DO_NOT_RESTORE),
("allow_remote_connections", _("Allowed remote POP3 connections"), "localhost",
_("""Enter a list of trusted IPs, separated by commas. Remote POP
connections from any of them will be allowed. You can trust any
IP using a single '*' as field value. You can also trust ranges of
IPs using the '*' character as a wildcard (for instance 192.168.0.*).
The localhost IP will always be trusted. Type 'localhost' in the
field to trust this only address."""),
IP_LIST, RESTORE),
("retrieval_timeout", _("Retrieval timeout"), 30,
_("""When proxying messages, time out after this length of time if
all the headers have been received. The rest of the mesasge will
proxy straight through. Some clients have a short timeout period,
and will give up on waiting for the message if this is too long.
Note that the shorter this is, the less of long messages will be
used for classifications (i.e. results may be effected)."""),
REAL, RESTORE),
("use_ssl", "Connect via a secure socket layer", False,
"""Use SSL to connect to the server. This allows spambayes to connect
without sending data in plain text.
Note that this does not check the server certificate at this point in
time.""",
(False, True, "automatic"), DO_NOT_RESTORE),
),
"smtpproxy" : (
("remote_servers", _("Remote Servers"), (),
_("""Use of the SMTP proxy is optional - if you would rather just train
via the web interface, or the pop3dnd or mboxtrain scripts, then you
can safely leave this option blank. The Spambayes SMTP proxy
intercepts outgoing email - if you forward mail to one of the
addresses below, it is examined for an id and the message
corresponding to that id is trained as ham/spam. All other mail is
sent along to your outgoing mail server. You need to specify which
SMTP server(s) you wish it to intercept - a SMTP server address
typically looks like "smtp.myisp.net". If you use more than one
server, simply separate their names with commas. You can get these
server names from your existing email configuration, or from your ISP
or system administrator. If you are using Web-based email, you can't
use the Spambayes SMTP proxy (sorry!). In your email client's
configuration, where you would normally put your SMTP server address,
you should now put the address of the machine running SpamBayes."""),
SERVER, DO_NOT_RESTORE),
("listen_ports", _("SpamBayes Ports"), (),
_("""Each SMTP server that is being monitored must be assigned to a
'port' in the Spambayes SMTP proxy. This port must be different for
each monitored server, and there must be a port for
each monitored server. Again, you need to configure your email
client to use this port. If there are multiple servers, you must
specify the same number of ports as servers, separated by commas."""),
SERVER, DO_NOT_RESTORE),
("allow_remote_connections", _("Allowed remote SMTP connections"), "localhost",
_("""Enter a list of trusted IPs, separated by commas. Remote SMTP
connections from any of them will be allowed. You can trust any
IP using a single '*' as field value. You can also trust ranges of
IPs using the '*' character as a wildcard (for instance 192.168.0.*).
The localhost IP will always be trusted. Type 'localhost' in the
field to trust this only address. Note that you can unwittingly
turn a SMTP server into an open proxy if you open this up, as
connections to the server will appear to be from your machine, even
if they are from a remote machine *through* your machine, to the
server. We do not recommend opening this up fully (i.e. using '*').
"""),
IP_LIST, RESTORE),
("ham_address", _("Train as ham address"), "spambayes_ham@localhost",
_("""When a message is received that you wish to train on (for example,
one that was incorrectly classified), you need to forward or bounce
it to one of two special addresses so that the SMTP proxy can identify
it. If you wish to train it as ham, forward or bounce it to this
address. You will want to use an address that is not
a valid email address, like ham@nowhere.nothing."""),
EMAIL_ADDRESS, RESTORE),
("spam_address", _("Train as spam address"), "spambayes_spam@localhost",
_("""As with Ham Address above, but the address that you need to forward
or bounce mail that you wish to train as spam. You will want to use
an address that is not a valid email address, like
spam@nowhere.nothing."""),
EMAIL_ADDRESS, RESTORE),
("use_cached_message", _("Lookup message in cache"), False,
_("""If this option is set, then the smtpproxy will attempt to
look up the messages sent to it (for training) in the POP3 proxy cache
or IMAP filter folders, and use that message as the training data.
This avoids any problems where your mail client might change the
message when forwarding, contaminating your training data. If you can
be sure that this won't occur, then the id-lookup can be avoided.
Note that Outlook Express users cannot use the lookup option (because
of the way messages are forwarded), and so if they wish to use the
SMTP proxy they must enable this option (but as messages are altered,
may not get the best results, and this is not recommended)."""),
BOOLEAN, RESTORE),
),
# imap4proxy settings: The only mandatory option is imap4proxy_servers, eg.
# "imap4.my-isp.com:143", or a comma-separated list of those. The ":143"
# is optional. If you specify more than one server in imap4proxy_servers,
# you must specify the same number of ports in imap4proxy_ports.
"imap4proxy" : (
("remote_servers", _("Remote Servers"), (),
_("""The SpamBayes IMAP4 proxy intercepts incoming email and classifies
it before sending it on to your email client. You need to specify
which IMAP4 server(s) you wish it to intercept - a IMAP4 server
address typically looks like "mail.myisp.net". If you use more than
one server, simply separate their names with commas. You can get
these server names from your existing email configuration, or from
your ISP or system administrator. If you are using Web-based email,
you can't use the SpamBayes IMAP4 proxy (sorry!). In your email
client's configuration, where you would normally put your IMAP4 server
address, you should now put the address of the machine running
SpamBayes."""),
SERVER, DO_NOT_RESTORE),
("listen_ports", _("SpamBayes Ports"), (),
_("""Each IMAP4 server that is being monitored must be assigned to a
'port' in the SpamBayes IMAP4 proxy. This port must be different for
each monitored server, and there must be a port for each monitored
server. Again, you need to configure your email client to use this
port. If there are multiple servers, you must specify the same number
of ports as servers, separated by commas. If you don't know what to
use here, and you only have one server, try 143, or if that doesn't
work, try 8143."""),
SERVER, DO_NOT_RESTORE),
("allow_remote_connections", _("Allowed remote IMAP4 connections"), "localhost",
_("""Enter a list of trusted IPs, separated by commas. Remote IMAP
connections from any of them will be allowed. You can trust any
IP using a single '*' as field value. You can also trust ranges of
IPs using the '*' character as a wildcard (for instance 192.168.0.*).
The localhost IP will always be trusted. Type 'localhost' in the
field to trust this only address."""),
IP_LIST, RESTORE),
("use_ssl", "Connect via a secure socket layer", False,
"""Use SSL to connect to the server. This allows spambayes to connect
without sending data in plain text.
Note that this does not check the server certificate at this point in
time.""",
(False, True, "automatic"), DO_NOT_RESTORE),
),
"html_ui" : (
("port", _("Port"), 8880,
_(""""""),
PORT, RESTORE),
("launch_browser", _("Launch browser"), False,
_("""If this option is set, then whenever sb_server or sb_imapfilter is
started the default web browser will be opened to the main web
interface page. Use of the -b switch when starting from the command
line overrides this option."""),
BOOLEAN, RESTORE),
("allow_remote_connections", _("Allowed remote UI connections"), "localhost",
_("""Enter a list of trusted IPs, separated by commas. Remote
connections from any of them will be allowed. You can trust any
IP using a single '*' as field value. You can also trust ranges of
IPs using the '*' character as a wildcard (for instance 192.168.0.*).
The localhost IP will always be trusted. Type 'localhost' in the
field to trust this only address."""),
IP_LIST, RESTORE),
("display_headers", _("Headers to display in message review"), ("Subject", "From"),
_("""When reviewing messages via the web user interface, you are
presented with various information about the message. By default, you
are shown the subject and who the message is from. You can add other
message headers to display, however, such as the address the message
is to, or the date that the message was sent."""),
HEADER_NAME, RESTORE),
("display_received_time", _("Display date received in message review"), False,
_("""When reviewing messages via the web user interface, you are
presented with various information about the message. If you set
this option, you will be shown the date that the message was received.
"""),
BOOLEAN, RESTORE),
("display_score", _("Display score in message review"), False,
_("""When reviewing messages via the web user interface, you are
presented with various information about the message. If you
set this option, this information will include the score that
the message received when it was classified. You might wish to
see this purely out of curiousity, or you might wish to only
train on messages that score towards the boundaries of the
classification areas. Note that in order to use this option,
you must also enable the option to include the score in the
message headers."""),
BOOLEAN, RESTORE),
("display_adv_find", _("Display the advanced find query"), False,
_("""Present advanced options in the 'Word Query' box on the front page,
including wildcard and regular expression searching."""),
BOOLEAN, RESTORE),
("default_ham_action", _("Default training for ham"), _("discard"),
_("""When presented with the review list in the web interface,
which button would you like checked by default when the message
is classified as ham?"""),
(_("ham"), _("spam"), _("discard"), _("defer")), RESTORE),
("default_spam_action", _("Default training for spam"), _("discard"),
_("""When presented with the review list in the web interface,
which button would you like checked by default when the message
is classified as spam?"""),
(_("ham"), _("spam"), _("discard"), _("defer")), RESTORE),
("default_unsure_action", _("Default training for unsure"), _("defer"),
_("""When presented with the review list in the web interface,
which button would you like checked by default when the message
is classified as unsure?"""),
(_("ham"), _("spam"), _("discard"), _("defer")), RESTORE),
("ham_discard_level", _("Ham Discard Level"), 0.0,
_("""Hams scoring less than this percentage will default to being
discarded in the training interface (they won't be trained). You'll
need to turn off the 'Train when filtering' option, above, for this
to have any effect"""),
REAL, RESTORE),
("spam_discard_level", _("Spam Discard Level"), 100.0,
_("""Spams scoring more than this percentage will default to being
discarded in the training interface (they won't be trained). You'll
need to turn off the 'Train when filtering' option, above, for this
to have any effect"""),
REAL, RESTORE),
("http_authentication", _("HTTP Authentication"), "None",
_("""This option lets you choose the security level of the web interface.
When selecting Basic or Digest, the user will be prompted a login and a
password to access the web interface. The Basic option is faster, but
transmits the password in clear on the network. The Digest option
encrypts the password before transmission."""),
("None", "Basic", "Digest"), RESTORE),
("http_user_name", _("User name"), "admin",
_("""If you activated the HTTP authentication option, you can modify the
authorized user name here."""),
r"[\w]+", RESTORE),
("http_password", _("Password"), "admin",
_("""If you activated the HTTP authentication option, you can modify the
authorized user password here."""),
r"[\w]+", RESTORE),
("rows_per_section", _("Rows per section"), 10000,
_("""Number of rows to display per ham/spam/unsure section."""),
INTEGER, RESTORE),
),
"imap" : (
("server", _("Server"), (),
_("""These are the names and ports of the imap servers that store your
mail, and which the imap filter will connect to - for example:
mail.example.com or imap.example.com:143. The default IMAP port is
143 (or 993 if using SSL); if you connect via one of those ports, you
can leave this blank. If you use more than one server, use a comma
delimited list of the server:port values."""),
SERVER, DO_NOT_RESTORE),
("username", _("Username"), (),
_("""This is the id that you use to log into your imap server. If your
address is funkyguy@example.com, then your username is probably
funkyguy."""),
IMAP_ASTRING, DO_NOT_RESTORE),
("password", _("Password"), (),
_("""That is that password that you use to log into your imap server.
This will be stored in plain text in your configuration file, and if
you have set the web user interface to allow remote connections, then
it will be available for the whole world to see in plain text. If
I've just freaked you out, don't panic <wink>. You can leave this
blank and use the -p command line option to imapfilter.py and you will
be prompted for your password."""),
IMAP_ASTRING, DO_NOT_RESTORE),
("expunge", _("Purge//Expunge"), False,
_("""Permanently remove *all* messages flagged with //Deleted on logout.
If you do not know what this means, then please leave this as
False."""),
BOOLEAN, RESTORE),
("use_ssl", _("Connect via a secure socket layer"), False,
_("""Use SSL to connect to the server. This allows spambayes to connect
without sending the password in plain text.
Note that this does not check the server certificate at this point in
time."""),
BOOLEAN, DO_NOT_RESTORE),
("filter_folders", _("Folders to filter"), ("INBOX",),
_("""Comma delimited list of folders to be filtered"""),
IMAP_FOLDER, DO_NOT_RESTORE),
("unsure_folder", _("Folder for unsure messages"), "",
_(""""""),
IMAP_FOLDER, DO_NOT_RESTORE),
("spam_folder", _("Folder for suspected spam"), "",
_(""""""),
IMAP_FOLDER, DO_NOT_RESTORE),
("ham_folder", _("Folder for ham messages"), "",
_("""If you leave this option blank, messages classified as ham will not
be moved. However, if you wish to have ham messages moved, you can
select a folder here."""),
IMAP_FOLDER, DO_NOT_RESTORE),
("ham_train_folders", _("Folders with mail to be trained as ham"), (),
_("""Comma delimited list of folders that will be examined for messages
to train as ham."""),
IMAP_FOLDER, DO_NOT_RESTORE),
("spam_train_folders", _("Folders with mail to be trained as spam"), (),
_("""Comma delimited list of folders that will be examined for messages
to train as spam."""),
IMAP_FOLDER, DO_NOT_RESTORE),
("move_trained_spam_to_folder", _("Folder to move trained spam to"), "",
_("""When training, all messages in the spam training folder(s) (above)
are examined - if they are new, they are used to train, if not, they
are ignored. This examination does take time, however, so if speed
is an issue for you, you may wish to move messages out of this folder
once they have been trained (either to delete them or to a storage
folder). If a folder name is specified here, this will happen
automatically. Note that the filter is not yet clever enough to
move the mail to different folders depending on which folder it
was originally in - *all* messages will be moved to the same
folder."""),
IMAP_FOLDER, DO_NOT_RESTORE),
("move_trained_ham_to_folder", _("Folder to move trained ham to"), "",
_("""When training, all messages in the ham training folder(s) (above)
are examined - if they are new, they are used to train, if not, they
are ignored. This examination does take time, however, so if speed
is an issue for you, you may wish to move messages out of this folder
once they have been trained (either to delete them or to a storage
folder). If a folder name is specified here, this will happen
automatically. Note that the filter is not yet clever enough to
move the mail to different folders depending on which folder it
was originally in - *all* messages will be moved to the same
folder."""),
IMAP_FOLDER, DO_NOT_RESTORE),
),
"ZODB" : (
("zeo_addr", _(""), "",
_(""""""),
IMAP_ASTRING, DO_NOT_RESTORE),
("event_log_file", _(""), "",
_(""""""),
IMAP_ASTRING, RESTORE),
("folder_dir", _(""), "",
_(""""""),
PATH, DO_NOT_RESTORE),
("ham_folders", _(""), "",
_(""""""),
PATH, DO_NOT_RESTORE),
("spam_folders", _(""), "",
_(""""""),
PATH, DO_NOT_RESTORE),
("event_log_severity", _(""), 0,
_(""""""),
INTEGER, RESTORE),
("cache_size", _(""), 2000,
_(""""""),
INTEGER, RESTORE),
),
"imapserver" : (
("username", _("Username"), "",
_("""The username to use when logging into the SpamBayes IMAP server."""),
IMAP_ASTRING, DO_NOT_RESTORE),
("password", _("Password"), "",
_("""The password to use when logging into the SpamBayes IMAP server."""),
IMAP_ASTRING, DO_NOT_RESTORE),
("port", _("IMAP Listen Port"), 143,
_("""The port to serve the SpamBayes IMAP server on."""),
PORT, RESTORE),
),
"globals" : (
("verbose", _("Verbose"), False,
_(""""""),
BOOLEAN, RESTORE),
("dbm_type", _("Database storage type"), "best",
_("""What DBM storage type should we use? Must be best, db3hash,
dbhash or gdbm. Windows folk should steer clear of dbhash. Default
is "best", which will pick the best DBM type available on your
platform."""),
("best", "db3hash", "dbhash", "gdbm"), RESTORE),
("proxy_username", _("HTTP Proxy Username"), "",
_("""The username to give to the HTTP proxy when required. If a
username is not necessary, simply leave blank."""),
r"[\w]+", DO_NOT_RESTORE),
("proxy_password", _("HTTP Proxy Password"), "",
_("""The password to give to the HTTP proxy when required. This is
stored in clear text in your configuration file, so if that bothers
you then don't do this. You'll need to use a proxy that doesn't need
authentication, or do without any SpamBayes HTTP activity."""),
r"[\w]+", DO_NOT_RESTORE),
("proxy_server", _("HTTP Proxy Server"), "",
_("""If a spambayes application needs to use HTTP, it will try to do so
through this proxy server. The port defaults to 8080, or can be
entered with the server:port form."""),
SERVER, DO_NOT_RESTORE),
("language", _("User Interface Language"), ("en_US",),
_("""If possible, the user interface should use a language from this
list (in order of preference)."""),
r"\w\w(?:_\w\w)?", RESTORE),
),
"Plugin": (
("xmlrpc_path", _("XML-RPC path"), "/sbrpc",
_("""The path to respond to."""),
r"[\w]+", RESTORE),
("xmlrpc_host", _("XML-RPC host"), "localhost",
_("""The host to listen on."""),
SERVER, RESTORE),
("xmlrpc_port", _("XML-RPC port"), 8001,
_("""The port to listen on."""),
r"[\d]+", RESTORE),
),
}
# `optionsPathname` is the pathname of the last ini file in the list.
# This is where the web-based configuration page will write its changes.
# If no ini files are found, it defaults to bayescustomize.ini in the
# current working directory.
optionsPathname = None
# The global options object - created by load_options
options = None
def load_options():
global optionsPathname, options
options = OptionsClass()
options.load_defaults(defaults)
# Maybe we are reloading.
if optionsPathname:
options.merge_file(optionsPathname)
alternate = None
if hasattr(os, 'getenv'):
alternate = os.getenv('BAYESCUSTOMIZE')
if alternate:
filenames = alternate.split(os.pathsep)
options.merge_files(filenames)
optionsPathname = os.path.abspath(filenames[-1])
else:
alts = []
for path in ['bayescustomize.ini', '~/.spambayesrc']:
epath = os.path.expanduser(path)
if os.path.exists(epath):
alts.append(epath)
if alts:
options.merge_files(alts)
optionsPathname = os.path.abspath(alts[-1])
if not optionsPathname:
optionsPathname = os.path.abspath('bayescustomize.ini')
if sys.platform.startswith("win") and \
not os.path.isfile(optionsPathname):
# If we are on Windows and still don't have an INI, default to the
# 'per-user' directory.
try:
from win32com.shell import shell, shellcon
except ImportError:
# We are on Windows, with no BAYESCUSTOMIZE set, no ini file
# in the current directory, and no win32 extensions installed
# to locate the "user" directory - seeing things are so lamely
# setup, it is worth printing a warning
print("NOTE: We can not locate an INI file " \
"for SpamBayes, and the Python for Windows extensions " \
"are not installed, meaning we can't locate your " \
"'user' directory. An empty configuration file at " \
"'%s' will be used." % optionsPathname.encode('mbcs'), file=sys.stderr)
else:
windowsUserDirectory = os.path.join(
shell.SHGetFolderPath(0,shellcon.CSIDL_APPDATA,0,0),
"SpamBayes", "Proxy")
try:
if not os.path.isdir(windowsUserDirectory):
os.makedirs(windowsUserDirectory)
except os.error:
# unable to make the directory - stick to default.
pass
else:
optionsPathname = os.path.join(windowsUserDirectory,
'bayescustomize.ini')
# Not everyone is unicode aware - keep it a string.
optionsPathname = optionsPathname.encode("mbcs")
# If the file exists, then load it.
if os.path.exists(optionsPathname):
options.merge_file(optionsPathname)
def get_pathname_option(section, option):
"""Return the option relative to the path specified in the
gloabl optionsPathname, unless it is already an absolute path."""
filename = os.path.expanduser(options.get(section, option))
if os.path.isabs(filename):
return filename
return os.path.join(os.path.dirname(optionsPathname), filename)
# Ideally, we should not create the objects at import time - but we have
# done it this way forever!
# We avoid having the options loading code at the module level, as then
# the only way to re-read is to reload this module, and as at 2.3, that
# doesn't work in a .zip file.
load_options()
| 47.513552 | 97 | 0.675699 |
6ba32a332e3e24f934d0d4b4fd0199349fc0b763 | 1,579 | py | Python | class/cricket-player-team/main.py | whitmans-max/python-examples | 881a8f23f0eebc76816a0078e19951893f0daaaa | [
"MIT"
] | 140 | 2017-02-21T22:49:04.000Z | 2022-03-22T17:51:58.000Z | class/cricket-player-team/main.py | whitmans-max/python-examples | 881a8f23f0eebc76816a0078e19951893f0daaaa | [
"MIT"
] | 5 | 2017-12-02T19:55:00.000Z | 2021-09-22T23:18:39.000Z | class/cricket-player-team/main.py | whitmans-max/python-examples | 881a8f23f0eebc76816a0078e19951893f0daaaa | [
"MIT"
] | 79 | 2017-01-25T10:53:33.000Z | 2022-03-11T16:13:57.000Z | import random
class Player():
def __init__(self, name):
self.name = name
self.score = 0
def run(self):
self.score += random.randint(0, 6)
def __str__(self):
return "{} (score: {})".format(self.name, self.score)
class Team():
def __init__(self, players):
self.players = players
self.current_batsman = 0
self.current_run = 0
def set_next_batsman(self):
self.current_batsman += 1
if self.current_batsman >= len(self.players):
self.current_batsman = 0
def get_current_batsman(self):
return self.players[self.current_batsman]
def run(self):
self.players[self.current_batsman].run()
if self.current_run % 2 != 0:
self.set_next_batsman()
self.current_run += 1
def __str__(self):
return "Player: " + ", ".join(str(p) for p in self.players)
def total_score(self):
return sum(p.score for p in self.players)
team1 = Team( [Player("a"), Player("b"), Player("c")] )
team2 = Team( [Player("x"), Player("y"), Player("z")] )
print('Team1:', team1)
print('Team2:', team2)
for number in range(1, 5):
print('Round:', number)
print('Team1 current batsman:', team1.get_current_batsman())
team1.run()
print('Team2 current batsman:', team2.get_current_batsman())
team2.run()
print('Team1:', team1)
print('Team2:', team2)
print('Team1 total score:', team1.total_score())
print('Team2 total score:', team2.total_score())
| 25.063492 | 67 | 0.587714 |
64716f5c2867bb3790f6d8919dd1dc42604d7a30 | 2,973 | py | Python | src/social_auth/backends/asana.py | jianyuan/sentry | ceb8389c54d29f80b27703bb76c3880d923a3a5a | [
"BSD-3-Clause"
] | null | null | null | src/social_auth/backends/asana.py | jianyuan/sentry | ceb8389c54d29f80b27703bb76c3880d923a3a5a | [
"BSD-3-Clause"
] | 5 | 2020-07-17T11:20:41.000Z | 2021-05-09T12:16:53.000Z | src/social_auth/backends/asana.py | zaasmi/codeerrorhelp | 1ab8d3e314386b9b2d58dad9df45355bf6014ac9 | [
"BSD-3-Clause"
] | 1 | 2021-11-18T12:44:04.000Z | 2021-11-18T12:44:04.000Z | """
Obtain
ASANA_CLIENT_ID & ASANA_CLIENT_SECRET
and put into sentry.conf.py
"""
from __future__ import absolute_import
import requests
from social_auth.backends import BaseOAuth2, OAuthBackend
from social_auth.exceptions import AuthCanceled, AuthUnknownError
ASANA_TOKEN_EXCHANGE_URL = 'https://app.asana.com/-/oauth_token'
ASANA_AUTHORIZATION_URL = 'https://app.asana.com/-/oauth_authorize'
ASANA_USER_DETAILS_URL = 'https://app.asana.com/api/1.0/users/me'
class AsanaBackend(OAuthBackend):
"""Asana OAuth authentication backend"""
name = 'asana'
EXTRA_DATA = [
('email', 'email'),
('name', 'full_name'),
('id', 'id'),
('refresh_token', 'refresh_token')
]
def get_user_details(self, response):
"""Return user details from Asana account"""
return {
'email': response.get('email'),
'id': response.get('id'),
'full_name': response.get('name')
}
class AsanaAuth(BaseOAuth2):
"""Asana OAuth authentication mechanism"""
AUTHORIZATION_URL = ASANA_AUTHORIZATION_URL
ACCESS_TOKEN_URL = ASANA_TOKEN_EXCHANGE_URL
AUTH_BACKEND = AsanaBackend
SETTINGS_KEY_NAME = 'ASANA_CLIENT_ID'
SETTINGS_SECRET_NAME = 'ASANA_CLIENT_SECRET'
REDIRECT_STATE = False
def user_data(self, access_token, *args, **kwargs):
"""Loads user data from service"""
headers = {'Authorization': 'Bearer %s' % access_token}
try:
resp = requests.get(ASANA_USER_DETAILS_URL,
headers=headers)
resp.raise_for_status()
return resp.json()['data']
except ValueError:
return None
def auth_complete(self, *args, **kwargs):
"""Completes loging process, must return user instance"""
self.process_error(self.data)
params = self.auth_complete_params(self.validate_state())
try:
response = requests.post(self.ACCESS_TOKEN_URL, data=params,
headers=self.auth_headers())
response.raise_for_status()
except requests.exceptions.HTTPError as e:
if e.code == 400:
raise AuthCanceled(self)
else:
raise
else:
try:
response = response.json()
except (ValueError, KeyError):
raise AuthUnknownError(self)
response.pop('data')
self.process_error(response)
return self.do_auth(response['access_token'], response=response,
*args, **kwargs)
@classmethod
def refresh_token(cls, token):
params = cls.refresh_token_params(token)
response = requests.post(cls.ACCESS_TOKEN_URL, data=params,
headers=cls.auth_headers())
response.raise_for_status()
return response.json()
# Backend definition
BACKENDS = {
'asana': AsanaAuth,
}
| 31.294737 | 72 | 0.615876 |
e1fc232410999af0ad66828f6385f508d6740d63 | 19,963 | py | Python | pymc3_ext/variational/approximations.py | wlad111/pymc3 | 43432834be5bbca72caa32d40a848515eea554a8 | [
"Apache-2.0"
] | null | null | null | pymc3_ext/variational/approximations.py | wlad111/pymc3 | 43432834be5bbca72caa32d40a848515eea554a8 | [
"Apache-2.0"
] | null | null | null | pymc3_ext/variational/approximations.py | wlad111/pymc3 | 43432834be5bbca72caa32d40a848515eea554a8 | [
"Apache-2.0"
] | null | null | null | import numpy as np
import theano
from theano import tensor as tt
import pymc3_ext as pm
from pymc3_ext.distributions.dist_math import rho2sigma
from . import opvi
from pymc3_ext.variational.opvi import Group, Approximation, node_property
from pymc3_ext.util import update_start_vals
from pymc3_ext.theanof import change_flags
from pymc3_ext.math import batched_diag
from pymc3_ext.variational import flows
__all__ = [
'MeanField',
'FullRank',
'Empirical',
'NormalizingFlow',
'sample_approx'
]
@Group.register
class MeanFieldGroup(Group):
R"""Mean Field approximation to the posterior where spherical Gaussian family
is fitted to minimize KL divergence from True posterior. It is assumed
that latent space variables are uncorrelated that is the main drawback
of the method
"""
__param_spec__ = dict(mu=('d', ), rho=('d', ))
short_name = 'mean_field'
alias_names = frozenset(['mf'])
@node_property
def mean(self):
return self.params_dict['mu']
@node_property
def rho(self):
return self.params_dict['rho']
@node_property
def cov(self):
var = rho2sigma(self.rho)**2
if self.batched:
return batched_diag(var)
else:
return tt.diag(var)
@node_property
def std(self):
return rho2sigma(self.rho)
@change_flags(compute_test_value='off')
def __init_group__(self, group):
super().__init_group__(group)
if not self._check_user_params():
self.shared_params = self.create_shared_params(
self._kwargs.get('start', None)
)
self._finalize_init()
def create_shared_params(self, start=None):
if start is None:
start = self.model.test_point
else:
start_ = start.copy()
update_start_vals(start_, self.model.test_point, self.model)
start = start_
if self.batched:
start = start[self.group[0].name][0]
else:
start = self.bij.map(start)
rho = np.zeros((self.ddim,))
if self.batched:
start = np.tile(start, (self.bdim, 1))
rho = np.tile(rho, (self.bdim, 1))
return {'mu': theano.shared(
pm.floatX(start), 'mu'),
'rho': theano.shared(
pm.floatX(rho), 'rho')}
@node_property
def symbolic_random(self):
initial = self.symbolic_initial
sigma = self.std
mu = self.mean
return sigma * initial + mu
@node_property
def symbolic_logq_not_scaled(self):
z0 = self.symbolic_initial
std = rho2sigma(self.rho)
logdet = tt.log(std)
logq = pm.Normal.dist().logp(z0) - logdet
return logq.sum(range(1, logq.ndim))
@Group.register
class FullRankGroup(Group):
"""Full Rank approximation to the posterior where Multivariate Gaussian family
is fitted to minimize KL divergence from True posterior. In contrast to
MeanField approach correlations between variables are taken in account. The
main drawback of the method is computational cost.
"""
__param_spec__ = dict(mu=('d',), L_tril=('int(d * (d + 1) / 2)',))
short_name = 'full_rank'
alias_names = frozenset(['fr'])
@change_flags(compute_test_value='off')
def __init_group__(self, group):
super().__init_group__(group)
if not self._check_user_params():
self.shared_params = self.create_shared_params(
self._kwargs.get('start', None)
)
self._finalize_init()
def create_shared_params(self, start=None):
if start is None:
start = self.model.test_point
else:
start_ = start.copy()
update_start_vals(start_, self.model.test_point, self.model)
start = start_
if self.batched:
start = start[self.group[0].name][0]
else:
start = self.bij.map(start)
n = self.ddim
L_tril = (
np.eye(n)
[np.tril_indices(n)]
.astype(theano.config.floatX)
)
if self.batched:
start = np.tile(start, (self.bdim, 1))
L_tril = np.tile(L_tril, (self.bdim, 1))
return {'mu': theano.shared(start, 'mu'),
'L_tril': theano.shared(L_tril, 'L_tril')}
@node_property
def L(self):
if self.batched:
L = tt.zeros((self.ddim, self.ddim, self.bdim))
L = tt.set_subtensor(
L[self.tril_indices],
self.params_dict['L_tril'].T)
L = L.dimshuffle(2, 0, 1)
else:
L = tt.zeros((self.ddim, self.ddim))
L = tt.set_subtensor(
L[self.tril_indices],
self.params_dict['L_tril'])
return L
@node_property
def mean(self):
return self.params_dict['mu']
@node_property
def cov(self):
L = self.L
if self.batched:
return tt.batched_dot(L, L.swapaxes(-1, -2))
else:
return L.dot(L.T)
@node_property
def std(self):
if self.batched:
return tt.sqrt(batched_diag(self.cov))
else:
return tt.sqrt(tt.diag(self.cov))
@property
def num_tril_entries(self):
n = self.ddim
return int(n * (n + 1) / 2)
@property
def tril_indices(self):
return np.tril_indices(self.ddim)
@node_property
def symbolic_logq_not_scaled(self):
z = self.symbolic_random
if self.batched:
def logq(z_b, mu_b, L_b):
return pm.MvNormal.dist(mu=mu_b, chol=L_b).logp(z_b)
# it's gonna be so slow
# scan is computed over batch and then summed up
# output shape is (batch, samples)
return theano.scan(logq, [z.swapaxes(0, 1), self.mean, self.L])[0].sum(0)
else:
return pm.MvNormal.dist(mu=self.mean, chol=self.L).logp(z)
@node_property
def symbolic_random(self):
initial = self.symbolic_initial
L = self.L
mu = self.mean
if self.batched:
# initial: bxsxd
# L: bxdxd
initial = initial.swapaxes(0, 1)
return tt.batched_dot(initial, L.swapaxes(1, 2)).swapaxes(0, 1) + mu
else:
return initial.dot(L.T) + mu
@Group.register
class EmpiricalGroup(Group):
"""Builds Approximation instance from a given trace,
it has the same interface as variational approximation
"""
supports_batched = False
has_logq = False
__param_spec__ = dict(histogram=('s', 'd'))
short_name = 'empirical'
@change_flags(compute_test_value='off')
def __init_group__(self, group):
super().__init_group__(group)
self._check_trace()
if not self._check_user_params(spec_kw=dict(s=-1)):
self.shared_params = self.create_shared_params(
trace=self._kwargs.get('trace', None),
size=self._kwargs.get('size', None),
jitter=self._kwargs.get('jitter', 1),
start=self._kwargs.get('start', None)
)
self._finalize_init()
def create_shared_params(self, trace=None, size=None, jitter=1, start=None):
if trace is None:
if size is None:
raise opvi.ParametrizationError('Need `trace` or `size` to initialize')
else:
if start is None:
start = self.model.test_point
else:
start_ = self.model.test_point.copy()
update_start_vals(start_, start, self.model)
start = start_
start = pm.floatX(self.bij.map(start))
# Initialize particles
histogram = np.tile(start, (size, 1))
histogram += pm.floatX(np.random.normal(0, jitter, histogram.shape))
else:
histogram = np.empty((len(trace) * len(trace.chains), self.ddim))
i = 0
for t in trace.chains:
for j in range(len(trace)):
histogram[i] = self.bij.map(trace.point(j, t))
i += 1
return dict(histogram=theano.shared(pm.floatX(histogram), 'histogram'))
def _check_trace(self):
trace = self._kwargs.get('trace', None)
if (trace is not None
and not all([var.name in trace.varnames
for var in self.group])):
raise ValueError('trace has not all FreeRV in the group')
def randidx(self, size=None):
if size is None:
size = (1,)
elif isinstance(size, tt.TensorVariable):
if size.ndim < 1:
size = size[None]
elif size.ndim > 1:
raise ValueError('size ndim should be no more than 1d')
else:
pass
else:
size = tuple(np.atleast_1d(size))
return (self._rng
.uniform(size=size,
low=pm.floatX(0),
high=pm.floatX(self.histogram.shape[0]) - pm.floatX(1e-16))
.astype('int32'))
def _new_initial(self, size, deterministic, more_replacements=None):
theano_condition_is_here = isinstance(deterministic, tt.Variable)
if theano_condition_is_here:
return tt.switch(
deterministic,
tt.repeat(
self.mean.dimshuffle('x', 0),
size if size is not None else 1, -1),
self.histogram[self.randidx(size)])
else:
if deterministic:
return tt.repeat(
self.mean.dimshuffle('x', 0),
size if size is not None else 1, -1)
else:
return self.histogram[self.randidx(size)]
@property
def symbolic_random(self):
return self.symbolic_initial
@property
def histogram(self):
return self.params_dict['histogram']
@node_property
def mean(self):
return self.histogram.mean(0)
@node_property
def cov(self):
x = (self.histogram - self.mean)
return x.T.dot(x) / pm.floatX(self.histogram.shape[0])
@node_property
def std(self):
return tt.sqrt(tt.diag(self.cov))
def __str__(self):
if isinstance(self.histogram, theano.compile.SharedVariable):
shp = ', '.join(map(str, self.histogram.shape.eval()))
else:
shp = 'None, ' + str(self.ddim)
return '{cls}[{shp}]'.format(shp=shp, cls=self.__class__.__name__)
class NormalizingFlowGroup(Group):
R"""Normalizing flow is a series of invertible transformations on initial distribution.
.. math::
z_K &= f_K \circ \dots \circ f_2 \circ f_1(z_0) \\
& z_0 \sim \mathcal{N}(0, 1)
In that case we can compute tractable density for the flow.
.. math::
\ln q_K(z_K) = \ln q_0(z_0) - \sum_{k=1}^{K}\ln \left|\frac{\partial f_k}{\partial z_{k-1}}\right|
Every :math:`f_k` here is a parametric function with defined determinant.
We can choose every step here. For example the here is a simple flow
is an affine transform:
.. math::
z = loc(scale(z_0)) = \mu + \sigma * z_0
Here we get mean field approximation if :math:`z_0 \sim \mathcal{N}(0, 1)`
**Flow Formulas**
In PyMC3 there is a flexible way to define flows with formulas. We have 5 of them by the moment:
- Loc (:code:`loc`): :math:`z' = z + \mu`
- Scale (:code:`scale`): :math:`z' = \sigma * z`
- Planar (:code:`planar`): :math:`z' = z + u * \tanh(w^T z + b)`
- Radial (:code:`radial`): :math:`z' = z + \beta (\alpha + (z-z_r))^{-1}(z-z_r)`
- Householder (:code:`hh`): :math:`z' = H z`
Formula can be written as a string, e.g. `'scale-loc'`, `'scale-hh*4-loc'`, `'panar*10'`.
Every step is separated with `'-'`, repeated flow is marked with `'*'` producing `'flow*repeats'`.
References
----------
- Danilo Jimenez Rezende, Shakir Mohamed, 2015
Variational Inference with Normalizing Flows
arXiv:1505.05770
- Jakub M. Tomczak, Max Welling, 2016
Improving Variational Auto-Encoders using Householder Flow
arXiv:1611.09630
"""
default_flow = 'scale-loc'
@change_flags(compute_test_value='off')
def __init_group__(self, group):
super().__init_group__(group)
# objects to be resolved
# 1. string formula
# 2. not changed default value
# 3. Formula
formula = self._kwargs.get('flow', self._vfam)
jitter = self._kwargs.get('jitter', 1)
if formula is None or isinstance(formula, str):
# case 1 and 2
has_params = self._check_user_params(f=formula)
elif isinstance(formula, flows.Formula):
# case 3
has_params = self._check_user_params(f=formula.formula)
else:
raise TypeError('Wrong type provided for NormalizingFlow as `flow` argument, '
'expected Formula or string')
if not has_params:
if formula is None:
formula = self.default_flow
else:
formula = '-'.join(
flows.flow_for_params(self.user_params[i]).short_name
for i in range(len(self.user_params))
)
if not isinstance(formula, flows.Formula):
formula = flows.Formula(formula)
if self.local:
bs = -1
elif self.batched:
bs = self.bdim
else:
bs = None
self.flow = formula(
dim=self.ddim,
z0=self.symbolic_initial,
jitter=jitter,
params=self.user_params,
batch_size=bs,
)
self._finalize_init()
def _check_user_params(self, **kwargs):
params = self._user_params = self.user_params
formula = kwargs.pop('f')
if params is None:
return False
if formula is not None:
raise opvi.ParametrizationError('No formula is allowed if user params are provided')
if not isinstance(params, dict):
raise TypeError('params should be a dict')
if not all(isinstance(k, int) for k in params.keys()):
raise TypeError('params should be a dict with `int` keys')
needed = set(range(len(params)))
givens = set(params.keys())
if givens != needed:
raise opvi.ParametrizationError(
'Passed parameters do not have a needed set of keys, '
'they should be equal, needed {needed}, got {givens}'.format(
givens=list(sorted(givens)), needed='[0, 1, ..., %d]' % len(formula.flows)))
for i in needed:
flow = flows.flow_for_params(params[i])
flow_keys = set(flow.__param_spec__)
user_keys = set(params[i].keys())
if flow_keys != user_keys:
raise opvi.ParametrizationError(
'Passed parameters for flow `{i}` ({cls}) do not have a needed set of keys, '
'they should be equal, needed {needed}, got {givens}'.format(
givens=user_keys, needed=flow_keys, i=i, cls=flow.__name__))
return True
@property
def shared_params(self):
if self.user_params is not None:
return None
params = dict()
current = self.flow
i = 0
params[i] = current.shared_params
while not current.isroot:
i += 1
current = current.parent
params[i] = current.shared_params
return params
@shared_params.setter
def shared_params(self, value):
if self.user_params is not None:
raise AttributeError('Cannot set when having user params')
current = self.flow
i = 0
current.shared_params = value[i]
while not current.isroot:
i += 1
current = current.parent
current.shared_params = value[i]
@property
def params(self):
return self.flow.all_params
@node_property
def symbolic_logq_not_scaled(self):
z0 = self.symbolic_initial
q0 = pm.Normal.dist().logp(z0).sum(range(1, z0.ndim))
return q0-self.flow.sum_logdets
@property
def symbolic_random(self):
return self.flow.forward
@node_property
def bdim(self):
if not self.local:
return super().bdim
else:
return next(iter(self.user_params[0].values())).shape[0]
@classmethod
def get_param_spec_for(cls, flow, **kwargs):
return flows.Formula(flow).get_param_spec_for(**kwargs)
def sample_approx(approx, draws=100, include_transformed=True):
"""Draw samples from variational posterior.
Parameters
----------
approx : :class:`Approximation`
Approximation to sample from
draws : `int`
Number of random samples.
include_transformed : `bool`
If True, transformed variables are also sampled. Default is True.
Returns
-------
trace : class:`pymc3_ext.backends.base.MultiTrace`
Samples drawn from variational posterior.
"""
return approx.sample(draws=draws, include_transformed=include_transformed)
# single group shortcuts exported to user
class SingleGroupApproximation(Approximation):
"""Base class for Single Group Approximation"""
_group_class = None
def __init__(self, *args, **kwargs):
local_rv = kwargs.get('local_rv')
groups = [self._group_class(None, *args, **kwargs)]
if local_rv is not None:
groups.extend([Group([v], params=p, local=True, model=kwargs.get('model'))
for v, p in local_rv.items()])
super().__init__(groups, model=kwargs.get('model'))
def __getattr__(self, item):
return getattr(self.groups[0], item)
def __dir__(self):
d = set(super().__dir__())
d.update(self.groups[0].__dir__())
return list(sorted(d))
class MeanField(SingleGroupApproximation):
__doc__ = """**Single Group Mean Field Approximation**
""" + str(MeanFieldGroup.__doc__)
_group_class = MeanFieldGroup
class FullRank(SingleGroupApproximation):
__doc__ = """**Single Group Full Rank Approximation**
""" + str(FullRankGroup.__doc__)
_group_class = FullRankGroup
class Empirical(SingleGroupApproximation):
__doc__ = """**Single Group Full Rank Approximation**
""" + str(EmpiricalGroup.__doc__)
_group_class = EmpiricalGroup
def __init__(self, trace=None, size=None, **kwargs):
if kwargs.get('local_rv', None) is not None:
raise opvi.LocalGroupError('Empirical approximation does not support local variables')
super().__init__(trace=trace, size=size, **kwargs)
def evaluate_over_trace(self, node):
R"""
This allows to statically evaluate any symbolic expression over the trace.
Parameters
----------
node : Theano Variables (or Theano expressions)
Returns
-------
evaluated node(s) over the posterior trace contained in the empirical approximation
"""
node = self.to_flat_input(node)
def sample(post):
return theano.clone(node, {self.input: post})
nodes, _ = theano.scan(sample, self.histogram)
return nodes
class NormalizingFlow(SingleGroupApproximation):
__doc__ = """**Single Group Normalizing Flow Approximation**
""" + str(NormalizingFlowGroup.__doc__)
_group_class = NormalizingFlowGroup
def __init__(self, flow=NormalizingFlowGroup.default_flow, *args, **kwargs):
kwargs['flow'] = flow
super().__init__(*args, **kwargs)
| 33.106136 | 106 | 0.585734 |
fe8a7c2431d3014a0acfdd0269081081a7e36c19 | 12,928 | py | Python | homeassistant/components/mold_indicator/sensor.py | andersop91/core | 0e0ef0aa17073609eae7c974cf4c73306b7c414b | [
"Apache-2.0"
] | 4 | 2021-07-11T09:11:00.000Z | 2022-02-27T14:43:50.000Z | homeassistant/components/mold_indicator/sensor.py | andersop91/core | 0e0ef0aa17073609eae7c974cf4c73306b7c414b | [
"Apache-2.0"
] | 277 | 2021-10-04T06:39:33.000Z | 2021-12-28T22:04:17.000Z | homeassistant/components/mold_indicator/sensor.py | andersop91/core | 0e0ef0aa17073609eae7c974cf4c73306b7c414b | [
"Apache-2.0"
] | 3 | 2021-11-14T13:29:33.000Z | 2021-12-27T17:05:22.000Z | """Calculates mold growth indication from temperature and humidity."""
from __future__ import annotations
import logging
import math
import voluptuous as vol
from homeassistant import util
from homeassistant.components.sensor import PLATFORM_SCHEMA, SensorEntity
from homeassistant.const import (
ATTR_UNIT_OF_MEASUREMENT,
CONF_NAME,
EVENT_HOMEASSISTANT_START,
PERCENTAGE,
STATE_UNKNOWN,
TEMP_CELSIUS,
TEMP_FAHRENHEIT,
)
from homeassistant.core import HomeAssistant, callback
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.entity_platform import AddEntitiesCallback
from homeassistant.helpers.event import async_track_state_change_event
from homeassistant.helpers.typing import ConfigType, DiscoveryInfoType
_LOGGER = logging.getLogger(__name__)
ATTR_CRITICAL_TEMP = "estimated_critical_temp"
ATTR_DEWPOINT = "dewpoint"
CONF_CALIBRATION_FACTOR = "calibration_factor"
CONF_INDOOR_HUMIDITY = "indoor_humidity_sensor"
CONF_INDOOR_TEMP = "indoor_temp_sensor"
CONF_OUTDOOR_TEMP = "outdoor_temp_sensor"
DEFAULT_NAME = "Mold Indicator"
MAGNUS_K2 = 17.62
MAGNUS_K3 = 243.12
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend(
{
vol.Required(CONF_INDOOR_TEMP): cv.entity_id,
vol.Required(CONF_OUTDOOR_TEMP): cv.entity_id,
vol.Required(CONF_INDOOR_HUMIDITY): cv.entity_id,
vol.Optional(CONF_CALIBRATION_FACTOR): vol.Coerce(float),
vol.Optional(CONF_NAME, default=DEFAULT_NAME): cv.string,
}
)
async def async_setup_platform(
hass: HomeAssistant,
config: ConfigType,
async_add_entities: AddEntitiesCallback,
discovery_info: DiscoveryInfoType | None = None,
) -> None:
"""Set up MoldIndicator sensor."""
name = config.get(CONF_NAME, DEFAULT_NAME)
indoor_temp_sensor = config.get(CONF_INDOOR_TEMP)
outdoor_temp_sensor = config.get(CONF_OUTDOOR_TEMP)
indoor_humidity_sensor = config.get(CONF_INDOOR_HUMIDITY)
calib_factor = config.get(CONF_CALIBRATION_FACTOR)
async_add_entities(
[
MoldIndicator(
name,
hass.config.units.is_metric,
indoor_temp_sensor,
outdoor_temp_sensor,
indoor_humidity_sensor,
calib_factor,
)
],
False,
)
class MoldIndicator(SensorEntity):
"""Represents a MoldIndication sensor."""
def __init__(
self,
name,
is_metric,
indoor_temp_sensor,
outdoor_temp_sensor,
indoor_humidity_sensor,
calib_factor,
):
"""Initialize the sensor."""
self._state = None
self._name = name
self._indoor_temp_sensor = indoor_temp_sensor
self._indoor_humidity_sensor = indoor_humidity_sensor
self._outdoor_temp_sensor = outdoor_temp_sensor
self._calib_factor = calib_factor
self._is_metric = is_metric
self._available = False
self._entities = {
self._indoor_temp_sensor,
self._indoor_humidity_sensor,
self._outdoor_temp_sensor,
}
self._dewpoint = None
self._indoor_temp = None
self._outdoor_temp = None
self._indoor_hum = None
self._crit_temp = None
async def async_added_to_hass(self):
"""Register callbacks."""
@callback
def mold_indicator_sensors_state_listener(event):
"""Handle for state changes for dependent sensors."""
new_state = event.data.get("new_state")
old_state = event.data.get("old_state")
entity = event.data.get("entity_id")
_LOGGER.debug(
"Sensor state change for %s that had old state %s and new state %s",
entity,
old_state,
new_state,
)
if self._update_sensor(entity, old_state, new_state):
self.async_schedule_update_ha_state(True)
@callback
def mold_indicator_startup(event):
"""Add listeners and get 1st state."""
_LOGGER.debug("Startup for %s", self.entity_id)
async_track_state_change_event(
self.hass, list(self._entities), mold_indicator_sensors_state_listener
)
# Read initial state
indoor_temp = self.hass.states.get(self._indoor_temp_sensor)
outdoor_temp = self.hass.states.get(self._outdoor_temp_sensor)
indoor_hum = self.hass.states.get(self._indoor_humidity_sensor)
schedule_update = self._update_sensor(
self._indoor_temp_sensor, None, indoor_temp
)
schedule_update = (
False
if not self._update_sensor(
self._outdoor_temp_sensor, None, outdoor_temp
)
else schedule_update
)
schedule_update = (
False
if not self._update_sensor(
self._indoor_humidity_sensor, None, indoor_hum
)
else schedule_update
)
if schedule_update:
self.async_schedule_update_ha_state(True)
self.hass.bus.async_listen_once(
EVENT_HOMEASSISTANT_START, mold_indicator_startup
)
def _update_sensor(self, entity, old_state, new_state):
"""Update information based on new sensor states."""
_LOGGER.debug("Sensor update for %s", entity)
if new_state is None:
return False
# If old_state is not set and new state is unknown then it means
# that the sensor just started up
if old_state is None and new_state.state == STATE_UNKNOWN:
return False
if entity == self._indoor_temp_sensor:
self._indoor_temp = MoldIndicator._update_temp_sensor(new_state)
elif entity == self._outdoor_temp_sensor:
self._outdoor_temp = MoldIndicator._update_temp_sensor(new_state)
elif entity == self._indoor_humidity_sensor:
self._indoor_hum = MoldIndicator._update_hum_sensor(new_state)
return True
@staticmethod
def _update_temp_sensor(state):
"""Parse temperature sensor value."""
_LOGGER.debug("Updating temp sensor with value %s", state.state)
# Return an error if the sensor change its state to Unknown.
if state.state == STATE_UNKNOWN:
_LOGGER.error(
"Unable to parse temperature sensor %s with state: %s",
state.entity_id,
state.state,
)
return None
unit = state.attributes.get(ATTR_UNIT_OF_MEASUREMENT)
temp = util.convert(state.state, float)
if temp is None:
_LOGGER.error(
"Unable to parse temperature sensor %s with state: %s",
state.entity_id,
state.state,
)
return None
# convert to celsius if necessary
if unit == TEMP_FAHRENHEIT:
return util.temperature.fahrenheit_to_celsius(temp)
if unit == TEMP_CELSIUS:
return temp
_LOGGER.error(
"Temp sensor %s has unsupported unit: %s (allowed: %s, %s)",
state.entity_id,
unit,
TEMP_CELSIUS,
TEMP_FAHRENHEIT,
)
return None
@staticmethod
def _update_hum_sensor(state):
"""Parse humidity sensor value."""
_LOGGER.debug("Updating humidity sensor with value %s", state.state)
# Return an error if the sensor change its state to Unknown.
if state.state == STATE_UNKNOWN:
_LOGGER.error(
"Unable to parse humidity sensor %s, state: %s",
state.entity_id,
state.state,
)
return None
if (hum := util.convert(state.state, float)) is None:
_LOGGER.error(
"Unable to parse humidity sensor %s, state: %s",
state.entity_id,
state.state,
)
return None
if (unit := state.attributes.get(ATTR_UNIT_OF_MEASUREMENT)) != PERCENTAGE:
_LOGGER.error(
"Humidity sensor %s has unsupported unit: %s %s",
state.entity_id,
unit,
" (allowed: %)",
)
return None
if hum > 100 or hum < 0:
_LOGGER.error(
"Humidity sensor %s is out of range: %s %s",
state.entity_id,
hum,
"(allowed: 0-100%)",
)
return None
return hum
async def async_update(self):
"""Calculate latest state."""
_LOGGER.debug("Update state for %s", self.entity_id)
# check all sensors
if None in (self._indoor_temp, self._indoor_hum, self._outdoor_temp):
self._available = False
self._dewpoint = None
self._crit_temp = None
return
# re-calculate dewpoint and mold indicator
self._calc_dewpoint()
self._calc_moldindicator()
if self._state is None:
self._available = False
self._dewpoint = None
self._crit_temp = None
else:
self._available = True
def _calc_dewpoint(self):
"""Calculate the dewpoint for the indoor air."""
# Use magnus approximation to calculate the dew point
alpha = MAGNUS_K2 * self._indoor_temp / (MAGNUS_K3 + self._indoor_temp)
beta = MAGNUS_K2 * MAGNUS_K3 / (MAGNUS_K3 + self._indoor_temp)
if self._indoor_hum == 0:
self._dewpoint = -50 # not defined, assume very low value
else:
self._dewpoint = (
MAGNUS_K3
* (alpha + math.log(self._indoor_hum / 100.0))
/ (beta - math.log(self._indoor_hum / 100.0))
)
_LOGGER.debug("Dewpoint: %f %s", self._dewpoint, TEMP_CELSIUS)
def _calc_moldindicator(self):
"""Calculate the humidity at the (cold) calibration point."""
if None in (self._dewpoint, self._calib_factor) or self._calib_factor == 0:
_LOGGER.debug(
"Invalid inputs - dewpoint: %s, calibration-factor: %s",
self._dewpoint,
self._calib_factor,
)
self._state = None
self._available = False
self._crit_temp = None
return
# first calculate the approximate temperature at the calibration point
self._crit_temp = (
self._outdoor_temp
+ (self._indoor_temp - self._outdoor_temp) / self._calib_factor
)
_LOGGER.debug(
"Estimated Critical Temperature: %f %s", self._crit_temp, TEMP_CELSIUS
)
# Then calculate the humidity at this point
alpha = MAGNUS_K2 * self._crit_temp / (MAGNUS_K3 + self._crit_temp)
beta = MAGNUS_K2 * MAGNUS_K3 / (MAGNUS_K3 + self._crit_temp)
crit_humidity = (
math.exp(
(self._dewpoint * beta - MAGNUS_K3 * alpha)
/ (self._dewpoint + MAGNUS_K3)
)
* 100.0
)
# check bounds and format
if crit_humidity > 100:
self._state = "100"
elif crit_humidity < 0:
self._state = "0"
else:
self._state = f"{int(crit_humidity):d}"
_LOGGER.debug("Mold indicator humidity: %s", self._state)
@property
def should_poll(self):
"""Return the polling state."""
return False
@property
def name(self):
"""Return the name."""
return self._name
@property
def native_unit_of_measurement(self):
"""Return the unit of measurement."""
return PERCENTAGE
@property
def native_value(self):
"""Return the state of the entity."""
return self._state
@property
def available(self):
"""Return the availability of this sensor."""
return self._available
@property
def extra_state_attributes(self):
"""Return the state attributes."""
if self._is_metric:
return {
ATTR_DEWPOINT: round(self._dewpoint, 2),
ATTR_CRITICAL_TEMP: round(self._crit_temp, 2),
}
dewpoint = (
util.temperature.celsius_to_fahrenheit(self._dewpoint)
if self._dewpoint is not None
else None
)
crit_temp = (
util.temperature.celsius_to_fahrenheit(self._crit_temp)
if self._crit_temp is not None
else None
)
return {
ATTR_DEWPOINT: round(dewpoint, 2),
ATTR_CRITICAL_TEMP: round(crit_temp, 2),
}
| 31.764128 | 86 | 0.595142 |
547467f5ece2bab5feb1a793b69c903eedaa8f37 | 9,098 | py | Python | conceptnet5/vectors/formats.py | DbrRoxane/conceptnet5 | 82e9f6456ceadf35ffa31cee5bf500c638f3f449 | [
"Apache-2.0"
] | 2,195 | 2015-01-02T20:07:43.000Z | 2022-03-31T02:25:39.000Z | conceptnet5/vectors/formats.py | DbrRoxane/conceptnet5 | 82e9f6456ceadf35ffa31cee5bf500c638f3f449 | [
"Apache-2.0"
] | 178 | 2015-01-01T18:57:45.000Z | 2022-03-22T06:34:50.000Z | conceptnet5/vectors/formats.py | DbrRoxane/conceptnet5 | 82e9f6456ceadf35ffa31cee5bf500c638f3f449 | [
"Apache-2.0"
] | 333 | 2015-01-08T09:09:40.000Z | 2022-03-19T21:17:43.000Z | import gzip
import pickle
import struct
import numpy as np
import pandas as pd
from ordered_set import OrderedSet
from .transforms import l1_normalize_columns, l2_normalize_rows, standardize_row_labels
def load_hdf(filename):
"""
Load a semantic vector space from an HDF5 file.
HDF5 is a complex format that can contain many instances of different kinds
of data. The convention we use is that the file contains one labeled
matrix, named "mat".
"""
return pd.read_hdf(filename, 'mat', encoding='utf-8')
def save_hdf(table, filename):
"""
Save a semantic vector space into an HDF5 file, following the convention
of storing it as a labeled matrix named 'mat'.
"""
return table.to_hdf(filename, 'mat', mode='w', encoding='utf-8')
def save_labels(table, vocab_filename):
save_index_as_labels(table.index, vocab_filename)
def save_npy(values, matrix_filename):
"""
Save a semantic vector space in two files: a NumPy .npy file of the matrix,
and a text file with one label per line. We use this for exporting the
Luminoso background space.
"""
np.save(matrix_filename, values)
def vec_to_text_line(label, vec):
"""
Output a labeled vector as a line in a fastText-style text format.
"""
cells = [label] + ['%4.4f' % val for val in vec]
return ' '.join(cells)
def export_text(frame, filename, filter_language=None):
"""
Save a semantic vector space as a fastText-style text file.
If `filter_language` is set, it will output only vectors in that language.
"""
vectors = frame.values
index = frame.index
if filter_language is not None:
start_idx = index.get_loc('/c/%s/#' % filter_language, method='bfill')
try:
end_idx = index.get_loc('/c/%s0' % filter_language, method='bfill')
except KeyError:
end_idx = frame.shape[0]
frame = frame.iloc[start_idx:end_idx]
vectors = frame.values
index = frame.index
with gzip.open(filename, 'wt') as out:
dims = "%s %s" % frame.shape
print(dims, file=out)
for i in range(frame.shape[0]):
label = index[i]
if filter_language is not None:
label = label.split('/', 3)[-1]
vec = vectors[i]
print(vec_to_text_line(label, vec), file=out)
def convert_glove(glove_filename, output_filename, nrows):
"""
Convert GloVe data from a gzipped text file to an HDF5 dataframe.
"""
glove_raw = load_glove(glove_filename, nrows)
glove_std = standardize_row_labels(glove_raw, forms=False)
del glove_raw
glove_normal = l2_normalize_rows(l1_normalize_columns(glove_std))
del glove_std
save_hdf(glove_normal, output_filename)
def convert_fasttext(fasttext_filename, output_filename, nrows, language):
"""
Convert FastText data from a gzipped text file to an HDF5 dataframe.
"""
ft_raw = load_fasttext(fasttext_filename, nrows)
ft_std = standardize_row_labels(ft_raw, forms=False, language=language)
del ft_raw
ft_normal = l2_normalize_rows(l1_normalize_columns(ft_std))
del ft_std
save_hdf(ft_normal, output_filename)
def convert_word2vec(word2vec_filename, output_filename, nrows, language='en'):
"""
Convert word2vec data from its gzipped binary format to an HDF5
dataframe.
"""
def _is_long_enough(term):
term_text = term.split('/')[3]
return len(term_text) >= 3
w2v_raw = load_word2vec_bin(word2vec_filename, nrows)
w2v_std = standardize_row_labels(w2v_raw, forms=False, language=language)
del w2v_raw
# word2vec believes stupid things about two-letter combinations, so filter
# them out
filtered_labels = [
term for term in w2v_std.index
if _is_long_enough(term)
]
w2v_filtered = w2v_std.loc[filtered_labels]
del w2v_std
w2v_normal = l2_normalize_rows(l1_normalize_columns(w2v_filtered))
del w2v_filtered
save_hdf(w2v_normal, output_filename)
def convert_polyglot(polyglot_filename, output_filename, language):
"""
Convert Polyglot data from its pickled format to an HDF5 dataframe.
"""
pg_raw = load_polyglot(polyglot_filename)
pg_std = standardize_row_labels(pg_raw, language, forms=False)
del pg_raw
save_hdf(pg_std, output_filename)
def load_glove(filename, max_rows=1000000):
"""
Load a DataFrame from the GloVe text format, which is the same as the
fastText format except it doesn't tell you up front how many rows and
columns there are.
"""
arr = None
label_list = []
with gzip.open(filename, 'rt') as infile:
for i, line in enumerate(infile):
if i >= max_rows:
break
items = line.rstrip().split(' ')
label_list.append(items[0])
if arr is None:
ncols = len(items) - 1
arr = np.zeros((max_rows, ncols), 'f')
values = [float(x) for x in items[1:]]
arr[i] = values
if len(label_list) < max_rows:
arr = arr[: len(label_list)]
return pd.DataFrame(arr, index=label_list, dtype='f')
def load_fasttext(filename, max_rows=1000000):
"""
Load a DataFrame from the fastText text format.
"""
arr = None
label_list = []
with gzip.open(filename, 'rt') as infile:
nrows_str, ncols_str = infile.readline().rstrip().split()
nrows = min(int(nrows_str), max_rows)
ncols = int(ncols_str)
arr = np.zeros((nrows, ncols), dtype='f')
for line in infile:
if len(label_list) >= nrows:
break
items = line.rstrip().split(' ')
label = items[0]
if label != '</s>':
values = [float(x) for x in items[1:]]
arr[len(label_list)] = values
label_list.append(label)
if len(label_list) < max_rows:
arr = arr[: len(label_list)]
return pd.DataFrame(arr, index=label_list, dtype='f')
def _read_until_space(file):
chars = []
while True:
newchar = file.read(1)
if newchar == b'' or newchar == b' ':
break
chars.append(newchar[0])
return bytes(chars).decode('utf-8', 'replace')
def _read_vec(file, ndims):
fmt = 'f' * ndims
bytes_in = file.read(4 * ndims)
values = list(struct.unpack(fmt, bytes_in))
return np.array(values)
def load_word2vec_bin(filename, nrows):
"""
Load a DataFrame from word2vec's binary format. (word2vec's text format
should be the same as fastText's, but it's less efficient to load the
word2vec data that way.)
"""
label_list = []
arr = None
with gzip.open(filename, 'rb') as infile:
header = infile.readline().rstrip()
nrows_str, ncols_str = header.split()
nrows = min(int(nrows_str), nrows)
ncols = int(ncols_str)
arr = np.zeros((nrows, ncols), dtype='f')
while len(label_list) < nrows:
label = _read_until_space(infile)
vec = _read_vec(infile, ncols)
if label == '</s>':
# Skip the word2vec sentence boundary marker, which will not
# correspond to anything in other data
continue
idx = len(label_list)
arr[idx] = vec
label_list.append(label)
return pd.DataFrame(arr, index=label_list, dtype='f')
def load_polyglot(filename):
"""
Load a pickled matrix from the Polyglot format.
"""
labels, arr = pickle.load(open(filename, 'rb'), encoding='bytes')
label_list = list(labels)
return pd.DataFrame(arr, index=label_list, dtype='f')
def load_labels_and_npy(label_file, npy_file):
"""
Load a semantic vector space from two files: a NumPy .npy file of the matrix,
and a text file with one label per line.
"""
label_list = [line.rstrip('\n') for line in open(label_file, encoding='utf-8')]
arr = np.load(npy_file)
return pd.DataFrame(arr, index=label_list, dtype='f')
def load_labels_as_index(label_filename):
"""
Load a set of labels (with no attached vectors) from a text file, and
represent them in a pandas Index.
"""
labels = [line.rstrip('\n') for line in open(label_filename, encoding='utf-8')]
return pd.Index(labels)
def save_index_as_labels(index, label_filename):
"""
Save a pandas Index as a text file of labels.
"""
with open(label_filename, 'w', encoding='utf-8') as out:
for label in index:
print(label, file=out)
def save_ordered_set(oset, filename):
"""
Save an OrderedSet object as a text file of words.
"""
with open(filename, 'w', encoding='utf-8') as out:
for word in oset:
print(word, file=out)
def load_ordered_set(filename):
"""
Load a set of words from a text file, and
represent them in an OrderedSet object.
"""
oset = OrderedSet()
for line in open(filename, encoding='utf-8'):
oset.append(line.rstrip('\n'))
return oset
| 31.157534 | 87 | 0.640031 |
93455688b2a0e35979a22898fb0cfce42064d42d | 587 | py | Python | setup.py | hasii2011/Chip8Emulator | 96be8c0d01ccae0492ce0f980af905ec5c690f1a | [
"MIT"
] | null | null | null | setup.py | hasii2011/Chip8Emulator | 96be8c0d01ccae0492ce0f980af905ec5c690f1a | [
"MIT"
] | 8 | 2019-08-12T23:33:12.000Z | 2020-12-09T01:31:17.000Z | setup.py | hasii2011/Chip8Emulator | 96be8c0d01ccae0492ce0f980af905ec5c690f1a | [
"MIT"
] | null | null | null | import pathlib
from setuptools import setup
from setuptools import find_packages
# The directory containing this file
HERE = pathlib.Path(__file__).parent
# The text of the README file
README = (HERE / "README.md").read_text()
setup(
name="Chip8 Emulator",
version="1.0.0",
description="An OO oriented Python Chip8 Emulator",
long_description=README,
long_description_content_type="text/markdown",
url="https://github.com/hasii2011/Chip8Emulator",
packages=find_packages(),
include_package_data=True,
install_requires=["pygame", "python3-albow"]
)
| 26.681818 | 55 | 0.739353 |
33e7b129777e248ccef9193466bea05ef4cfe967 | 2,234 | py | Python | pydarknet2/classes.py | dapperfu/pydarknet2 | 0457771152d4fbb8cfb512aada62a1c7e50862af | [
"MIT"
] | 2 | 2018-11-13T04:44:29.000Z | 2019-08-06T08:25:34.000Z | pydarknet2/classes.py | dapperfu/pydarknet2 | 0457771152d4fbb8cfb512aada62a1c7e50862af | [
"MIT"
] | null | null | null | pydarknet2/classes.py | dapperfu/pydarknet2 | 0457771152d4fbb8cfb512aada62a1c7e50862af | [
"MIT"
] | null | null | null | """Custom classes.
Custom classes to abstract away darknet bits.
"""
from cached_property import cached_property
class Detections:
"""High level class for all found detected objects.
Attributes
----------
num : int
Numbed of detections.
detections_ptr : ctypes.c_void_p
Pointer to detection array from darknet.
"""
def __init__(self, num, detections_ptr):
r"""Initialize a Detections object.
Parameters
----------
num : int
Numbed of detections.
detections_ptr : ctypes.c_void_p
Pointer to detection array from darknet.
"""
self.num = num
self.detections_ptr = detections_ptr
def __repr__(self):
"""Pretty representation."""
return f"Detections<{self.num}>"
def __iter__(self):
"""__iter__ function for using Detections in a loop."""
self._idx = 0
return self
def __next__(self):
"""__next__ function for using Detections in a loop."""
idx = self._idx
if self._idx >= self.num:
raise StopIteration
else:
self._idx += 1
return self.detections_ptr[idx]
def __getitem__(self, index):
"""Return detection object at given index."""
return self.detections_ptr[index]
class ClassifiedImage:
"""High level class for a classified image object.
Attributes
----------
classification : str
Classification string.
detection : Detection
Detection structure from Darknet.
image : Image
Darknet Image object.
"""
def __init__(self, classification, detection, image):
self.classification = classification
self.detection = detection
self.image = image.asimage()
@cached_property
def crop(self):
"""Return the image cropped to the classified object."""
return self.image.crop(self.crop_box)
@property
def crop_box(self):
"""Shorthand to the pil_crop_box."""
return self.detection.bbox.pil_crop_box
def __repr__(self):
"""Return ipython representation."""
return f"Classified<{self.classification}, {self.crop_box}>"
| 25.976744 | 68 | 0.614145 |
27d04c90ec0030124f1785ef0a569e8e8e432b21 | 8,751 | py | Python | ProjectFiles/bin/Release/2.80/scripts/startup/bl_operators/uvcalc_follow_active.py | BlazesRus/Bforartists | 126bdd9e47cc984fd97ba5299bfb92ec5278e754 | [
"Naumen",
"Condor-1.1",
"MS-PL"
] | 1 | 2019-07-08T15:51:14.000Z | 2019-07-08T15:51:14.000Z | ProjectFiles/bin/Release/2.80/scripts/startup/bl_operators/uvcalc_follow_active.py | BlazesRus/Bforartists | 126bdd9e47cc984fd97ba5299bfb92ec5278e754 | [
"Naumen",
"Condor-1.1",
"MS-PL"
] | null | null | null | ProjectFiles/bin/Release/2.80/scripts/startup/bl_operators/uvcalc_follow_active.py | BlazesRus/Bforartists | 126bdd9e47cc984fd97ba5299bfb92ec5278e754 | [
"Naumen",
"Condor-1.1",
"MS-PL"
] | null | null | null | # ##### BEGIN GPL LICENSE BLOCK #####
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software Foundation,
# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
# ##### END GPL LICENSE BLOCK #####
# <pep8 compliant>
# for full docs see...
# https://docs.blender.org/manual/en/dev/editors/uv_image/uv/editing/unwrapping/mapping_types.html#follow-active-quads
import bpy
from bpy.types import Operator
STATUS_OK = (1 << 0)
STATUS_ERR_ACTIVE_FACE = (1 << 1)
STATUS_ERR_NOT_SELECTED = (1 << 2)
STATUS_ERR_NOT_QUAD = (1 << 3)
def extend(obj, operator, EXTEND_MODE):
import bmesh
me = obj.data
bm = bmesh.from_edit_mesh(me)
faces = [f for f in bm.faces if f.select and len(f.verts) == 4]
if not faces:
return 0
f_act = bm.faces.active
if f_act is None:
return STATUS_ERR_ACTIVE_FACE
if not f_act.select:
return STATUS_ERR_NOT_SELECTED
elif len(f_act.verts) != 4:
return STATUS_ERR_NOT_QUAD
# Script will fail without UVs.
if not me.uv_layers:
me.uv_layers.new()
uv_act = bm.loops.layers.uv.active
# our own local walker
def walk_face_init(faces, f_act):
# first tag all faces True (so we don't uvmap them)
for f in bm.faces:
f.tag = True
# then tag faces arg False
for f in faces:
f.tag = False
# tag the active face True since we begin there
f_act.tag = True
def walk_face(f):
# all faces in this list must be tagged
f.tag = True
faces_a = [f]
faces_b = []
while faces_a:
for f in faces_a:
for l in f.loops:
l_edge = l.edge
if (l_edge.is_manifold is True) and (l_edge.seam is False):
l_other = l.link_loop_radial_next
f_other = l_other.face
if not f_other.tag:
yield (f, l, f_other)
f_other.tag = True
faces_b.append(f_other)
# swap
faces_a, faces_b = faces_b, faces_a
faces_b.clear()
def walk_edgeloop(l):
"""
Could make this a generic function
"""
e_first = l.edge
e = None
while True:
e = l.edge
yield e
# don't step past non-manifold edges
if e.is_manifold:
# welk around the quad and then onto the next face
l = l.link_loop_radial_next
if len(l.face.verts) == 4:
l = l.link_loop_next.link_loop_next
if l.edge is e_first:
break
else:
break
else:
break
def extrapolate_uv(fac,
l_a_outer, l_a_inner,
l_b_outer, l_b_inner):
l_b_inner[:] = l_a_inner
l_b_outer[:] = l_a_inner + ((l_a_inner - l_a_outer) * fac)
def apply_uv(f_prev, l_prev, f_next):
l_a = [None, None, None, None]
l_b = [None, None, None, None]
l_a[0] = l_prev
l_a[1] = l_a[0].link_loop_next
l_a[2] = l_a[1].link_loop_next
l_a[3] = l_a[2].link_loop_next
# l_b
# +-----------+
# |(3) |(2)
# | |
# |l_next(0) |(1)
# +-----------+
# ^
# l_a |
# +-----------+
# |l_prev(0) |(1)
# | (f) |
# |(3) |(2)
# +-----------+
# copy from this face to the one above.
# get the other loops
l_next = l_prev.link_loop_radial_next
if l_next.vert != l_prev.vert:
l_b[1] = l_next
l_b[0] = l_b[1].link_loop_next
l_b[3] = l_b[0].link_loop_next
l_b[2] = l_b[3].link_loop_next
else:
l_b[0] = l_next
l_b[1] = l_b[0].link_loop_next
l_b[2] = l_b[1].link_loop_next
l_b[3] = l_b[2].link_loop_next
l_a_uv = [l[uv_act].uv for l in l_a]
l_b_uv = [l[uv_act].uv for l in l_b]
if EXTEND_MODE == 'LENGTH_AVERAGE':
fac = edge_lengths[l_b[2].edge.index][0] / edge_lengths[l_a[1].edge.index][0]
elif EXTEND_MODE == 'LENGTH':
a0, b0, c0 = l_a[3].vert.co, l_a[0].vert.co, l_b[3].vert.co
a1, b1, c1 = l_a[2].vert.co, l_a[1].vert.co, l_b[2].vert.co
d1 = (a0 - b0).length + (a1 - b1).length
d2 = (b0 - c0).length + (b1 - c1).length
try:
fac = d2 / d1
except ZeroDivisionError:
fac = 1.0
else:
fac = 1.0
extrapolate_uv(fac,
l_a_uv[3], l_a_uv[0],
l_b_uv[3], l_b_uv[0])
extrapolate_uv(fac,
l_a_uv[2], l_a_uv[1],
l_b_uv[2], l_b_uv[1])
# -------------------------------------------
# Calculate average length per loop if needed
if EXTEND_MODE == 'LENGTH_AVERAGE':
bm.edges.index_update()
edge_lengths = [None] * len(bm.edges)
for f in faces:
# we know its a quad
l_quad = f.loops[:]
l_pair_a = (l_quad[0], l_quad[2])
l_pair_b = (l_quad[1], l_quad[3])
for l_pair in (l_pair_a, l_pair_b):
if edge_lengths[l_pair[0].edge.index] is None:
edge_length_store = [-1.0]
edge_length_accum = 0.0
edge_length_total = 0
for l in l_pair:
if edge_lengths[l.edge.index] is None:
for e in walk_edgeloop(l):
if edge_lengths[e.index] is None:
edge_lengths[e.index] = edge_length_store
edge_length_accum += e.calc_length()
edge_length_total += 1
edge_length_store[0] = edge_length_accum / edge_length_total
# done with average length
# ------------------------
walk_face_init(faces, f_act)
for f_triple in walk_face(f_act):
apply_uv(*f_triple)
bmesh.update_edit_mesh(me, False)
return STATUS_OK
def main(context, operator):
num_meshes = 0
num_errors = 0
status = 0
ob_list = context.objects_in_mode_unique_data
for ob in ob_list:
num_meshes += 1
ret = extend(ob, operator, operator.properties.mode)
if ret != STATUS_OK:
num_errors += 1
status |= ret
if num_errors == num_meshes:
if status & STATUS_ERR_NOT_QUAD:
operator.report({'ERROR'}, "Active face must be a quad")
elif status & STATUS_ERR_NOT_SELECTED:
operator.report({'ERROR'}, "Active face not selected")
else:
assert((status & STATUS_ERR_ACTIVE_FACE) != 0)
operator.report({'ERROR'}, "No active face")
class FollowActiveQuads(Operator):
"""Follow Active Quads\nFollow UVs from active quads along continuous face loops"""
bl_idname = "uv.follow_active_quads"
bl_label = "Follow Active Quads"
bl_options = {'REGISTER', 'UNDO'}
mode: bpy.props.EnumProperty(
name="Edge Length Mode",
description="Method to space UV edge loops",
items=(('EVEN', "Even", "Space all UVs evenly"),
('LENGTH', "Length", "Average space UVs edge length of each loop"),
('LENGTH_AVERAGE', "Length Average", "Average space UVs edge length of each loop"),
),
default='LENGTH_AVERAGE',
)
@classmethod
def poll(cls, context):
obj = context.active_object
return (obj is not None and obj.type == 'MESH')
def execute(self, context):
main(context, self)
return {'FINISHED'}
def invoke(self, context, event):
wm = context.window_manager
return wm.invoke_props_dialog(self)
classes = (
FollowActiveQuads,
)
| 31.142349 | 118 | 0.529539 |
3bf8085479c1dac4c262092b3838e3d28cd14ffa | 2,239 | py | Python | pypy/module/cpyext/__init__.py | wdv4758h/mu-client-pypy | d2fcc01f0b4fe3ffa232762124e3e6d38ed3a0cf | [
"Apache-2.0",
"OpenSSL"
] | null | null | null | pypy/module/cpyext/__init__.py | wdv4758h/mu-client-pypy | d2fcc01f0b4fe3ffa232762124e3e6d38ed3a0cf | [
"Apache-2.0",
"OpenSSL"
] | null | null | null | pypy/module/cpyext/__init__.py | wdv4758h/mu-client-pypy | d2fcc01f0b4fe3ffa232762124e3e6d38ed3a0cf | [
"Apache-2.0",
"OpenSSL"
] | null | null | null | from pypy.interpreter.mixedmodule import MixedModule
from pypy.module.cpyext.state import State
from pypy.module.cpyext import api
class Module(MixedModule):
interpleveldefs = {
'load_module': 'api.load_extension_module',
}
appleveldefs = {
}
atexit_funcs = []
def startup(self, space):
space.fromcache(State).startup(space)
def register_atexit(self, function):
if len(self.atexit_funcs) >= 32:
raise ValueError("cannot register more than 32 atexit functions")
self.atexit_funcs.append(function)
def shutdown(self, space):
for func in self.atexit_funcs:
func()
# import these modules to register api functions by side-effect
import pypy.module.cpyext.pyobject
import pypy.module.cpyext.boolobject
import pypy.module.cpyext.floatobject
import pypy.module.cpyext.modsupport
import pypy.module.cpyext.pythonrun
import pypy.module.cpyext.pyerrors
import pypy.module.cpyext.typeobject
import pypy.module.cpyext.object
import pypy.module.cpyext.bytesobject
import pypy.module.cpyext.tupleobject
import pypy.module.cpyext.setobject
import pypy.module.cpyext.dictobject
import pypy.module.cpyext.intobject
import pypy.module.cpyext.longobject
import pypy.module.cpyext.listobject
import pypy.module.cpyext.sequence
import pypy.module.cpyext.buffer
import pypy.module.cpyext.bufferobject
import pypy.module.cpyext.eval
import pypy.module.cpyext.import_
import pypy.module.cpyext.mapping
import pypy.module.cpyext.iterator
import pypy.module.cpyext.unicodeobject
import pypy.module.cpyext.sysmodule
import pypy.module.cpyext.number
import pypy.module.cpyext.sliceobject
import pypy.module.cpyext.stubsactive
import pypy.module.cpyext.pystate
import pypy.module.cpyext.cdatetime
import pypy.module.cpyext.complexobject
import pypy.module.cpyext.weakrefobject
import pypy.module.cpyext.funcobject
import pypy.module.cpyext.frameobject
import pypy.module.cpyext.classobject
import pypy.module.cpyext.memoryobject
import pypy.module.cpyext.codecs
import pypy.module.cpyext.pyfile
import pypy.module.cpyext.pystrtod
import pypy.module.cpyext.pytraceback
# now that all rffi_platform.Struct types are registered, configure them
api.configure_types()
| 31.535211 | 77 | 0.800804 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.