id stringlengths 1 265 | text stringlengths 6 5.19M | dataset_id stringclasses 7
values |
|---|---|---|
3269098 | <reponame>uk-gov-mirror/ministryofjustice.cla_backend
import re
from cla_provider.models import Provider
import os
from datetime import timedelta, time, datetime, date
from django import forms
from django.db.transaction import atomic
from django.utils import timezone
from django.contrib.admin import widgets
from legalaid.utils import diversity
from cla_common.constants import EXPRESSIONS_OF_DISSATISFACTION
from cla_eventlog.constants import LOG_TYPES, LOG_LEVELS
from cla_eventlog import event_registry
from complaints.constants import SLA_DAYS
from knowledgebase.models import Article
from reports.widgets import MonthYearWidget
from . import sql
from .utils import get_reports_cursor, set_local_time_for_query
class ConvertDateMixin(object):
def _convert_date(self, d):
d = datetime.combine(d, time(hour=0, minute=0))
d = timezone.make_aware(d, timezone.get_current_timezone())
return d
class ReportForm(ConvertDateMixin, forms.Form):
def __init__(self, request=None, *args, **kwargs):
self.request = request
super(ReportForm, self).__init__(*args, **kwargs)
def get_headers(self):
raise NotImplementedError
def get_rows(self):
for row in self.get_queryset():
yield row
def __iter__(self):
yield self.get_headers()
for row in self.get_rows():
yield row
def get_output(self):
return list(self)
class DateRangeReportForm(ReportForm):
date_from = forms.DateField(widget=widgets.AdminDateWidget)
date_to = forms.DateField(widget=widgets.AdminDateWidget)
max_date_range = None
def clean(self):
cleaned_data = super(DateRangeReportForm, self).clean()
if self.max_date_range and "date_from" in self.cleaned_data and "date_to" in self.cleaned_data:
from_, to = self.date_range
delta = to - from_
if delta > timedelta(days=self.max_date_range, hours=12):
raise forms.ValidationError(
"The date range (%s) should span "
"no more than %s working days" % (delta, str(self.max_date_range))
)
return cleaned_data # can be removed in django 1.7
@property
def date_range(self):
return (
self._convert_date(self.cleaned_data["date_from"]),
self._convert_date(self.cleaned_data["date_to"] + timedelta(days=1)),
)
def year_range(backward=0, forward=10):
this_year = date.today().year
return range(this_year - backward, this_year + forward)
class MonthRangeReportForm(ReportForm):
date = forms.DateField(widget=MonthYearWidget(years=year_range(backward=4, forward=3)))
@property
def month(self):
return self._convert_date(self.cleaned_data["date"])
class SQLFileReportMixin(object):
def __init__(self, *args, **kwargs):
super(SQLFileReportMixin, self).__init__(*args, **kwargs)
path = os.path.join(sql.__path__[0], self.QUERY_FILE)
with open(path, "r") as f:
self.query = f.read()
def get_sql_params(self):
raise NotImplementedError()
def get_queryset(self):
return self.execute_query(self.query, self.get_sql_params())
def execute_query(self, query, params):
with atomic():
cursor = get_reports_cursor()
try:
cursor.execute(set_local_time_for_query(query), params)
self.description = cursor.description
return cursor.fetchall()
finally:
cursor.close()
class SQLFileDateRangeReport(SQLFileReportMixin, DateRangeReportForm):
def get_sql_params(self):
return self.date_range
class SQLFileMonthRangeReport(SQLFileReportMixin, MonthRangeReportForm):
def get_sql_params(self):
return (self.month.date(),)
class MIProviderAllocationExtract(SQLFileDateRangeReport):
QUERY_FILE = "MIProviderAllocation.sql"
def get_headers(self):
return ["category"] + self._get_provider_names()
def _get_provider_names(self):
regex = re.compile(r"[^ 0-9A-Za-z.-]+")
return [re.sub(regex, "", p["name"]) for p in Provider.objects.all().order_by("id").values("name")]
def get_sql_params(self):
params = super(MIProviderAllocationExtract, self).get_sql_params()
cols = '"%s" text' % '" text, "'.join(self.get_headers())
return params + (cols,)
def get_queryset(self):
return self.execute_query(self.query % self.get_sql_params(), [])
class MIVoiceReport(SQLFileMonthRangeReport):
QUERY_FILE = "MIVoiceReport.sql"
def get_headers(self):
return [
"id",
"created",
"modified",
"provider_id",
"created_by_id",
"LAA_Reference",
"Client_Ref",
"Account_Number",
"First_Name",
"Surname",
"DOB",
"Age_Range",
"Gender",
"Ethnicity",
"Postcode",
"Eligibility_Code",
"Matter_Type_1",
"Matter_Type_2",
"Stage_Reached",
"Outcome_Code",
"Date_Opened",
"Date_Closed",
"Time_Spent",
"Case_Costs",
"Disability_Code",
"Disbursements",
"Travel_Costs",
"Determination",
"Suitable_For_Telephone_Advice",
"Exceptional_Case_ref",
"Exempted_Reason_Code",
"Adaptations",
"Signposting_or_Referral",
"Media_Code",
"Telephone_or_Online",
"month",
"Provider",
"has_linked_case_in_system",
"OS_BillableTime",
"count_of_timers",
"count_of_outcomes",
]
class MICaseExtract(SQLFileDateRangeReport):
QUERY_FILE = "MIExtractByOutcome.sql"
passphrase = forms.CharField(
required=False, help_text="Optional. If not provided, the report will not include diversity data"
)
def get_headers(self):
return [
"LAA_Reference",
"Hash_ID",
"Case_ID",
"Split_Check",
"Split_Link_Case",
"Provider_ID",
"Category_Name",
"Date_Case_Created",
"Last_Modified_Date",
"Outcome_Code_Child",
"Billable_Time",
"Cumulative_Time",
"Matter_Type_1",
"Matter_Type_2",
"User_ID",
"Scope_Status",
"Eligibility_Status",
"Adjustments_BSL",
"Adjustments_LLI",
"Adjustments_MIN",
"Adjustments_TYP",
"Adjustments_CallbackPreferred",
"Adjustments_Skype",
"Gender",
"Ethnicity",
"Age(Range)",
"Religion",
"Sexual_Orientation",
"Disability",
"Time_of_Day",
"Reject_Reason",
"Media_Code",
"Contact_Type",
"Call_Back_Request_Time",
"Call_Back_Actioned_Time",
"Time_to_OS_Access",
"Time_to_SP_Access",
"Residency_Test",
"Repeat_Contact",
"Referral_Agencies",
"Complaint_Type",
"Complaint_Date",
"Complaint_Owner",
"Complaint_Target",
"Complaint_Subject",
"Complaint_Classification",
"Complaint_Outcome",
"Agree_Feedback",
"Exempt_Client",
"Welsh",
"Language",
"Outcome_Created_At",
"Username",
"Has_Third_Party",
"Time_to_OS_Action",
"Organisation",
]
def get_rows(self):
for row in self.get_queryset():
full_row = list(row)
diversity_json = full_row.pop() or {}
def insert_value(key, val):
index = self.get_headers().index(key)
full_row.insert(index, val)
insert_value("Gender", diversity_json.get("gender"))
insert_value("Ethnicity", diversity_json.get("ethnicity"))
insert_value("Religion", diversity_json.get("religion"))
insert_value("Sexual_Orientation", diversity_json.get("sexual_orientation"))
insert_value("Disability", diversity_json.get("disability"))
yield full_row
def get_queryset(self):
passphrase = self.cleaned_data.get("passphrase")
if passphrase:
diversity_expression = "pgp_pub_decrypt(pd.diversity, dearmor('{key}'), %s)::json".format(
key=diversity.get_private_key()
)
else:
diversity_expression = "%s as placeholder, '{}'::json"
sql = self.query.format(diversity_expression=diversity_expression)
sql_args = [passphrase] + list(self.date_range)
return self.execute_query(sql, sql_args)
class MIFeedbackExtract(SQLFileDateRangeReport):
QUERY_FILE = "MIExtractByFeedback.sql"
def get_headers(self):
return [
"LAA_Reference",
"Date_Feedback_Created",
"Feedback_Issue",
"Feedback_Justified",
"Feedback_Resolved",
"Text_Output",
"Category",
"Provider name",
"User email",
]
class MIDuplicateCaseExtract(SQLFileDateRangeReport):
QUERY_FILE = "MIDuplicateCases.sql"
def get_headers(self):
return ["LAA_Reference", "Reference", "Category", "Created", "Full_name", "DOB", "Postcode"]
class MIAlternativeHelpExtract(SQLFileDateRangeReport):
QUERY_FILE = "MIAlternativeHelp.sql"
def get_headers(self):
return ["Id", "Reference", "Laa_reference", "Category", "Created", "Code", "Notes", "F2F", "KB_Id"]
class MIContactsPerCaseByCategoryExtract(SQLFileDateRangeReport):
QUERY_FILE = "MIContactsPerCaseByCategory.sql"
def get_headers(self):
return ["Reference", "LAA_Reference", "outcome_count", "category", "created", "outcomes"]
def get_valid_outcomes(self):
return event_registry.filter(stops_timer=True, type=LOG_TYPES.OUTCOME).keys()
def get_sql_params(self):
return self.date_range + (self.get_valid_outcomes(),)
class MISurveyExtract(SQLFileDateRangeReport):
QUERY_FILE = "MISurveyExtract.sql"
def get_headers(self):
return [
"Hash_ID",
"created",
"modified",
"full_name",
"postcode",
"street",
"phone",
"email",
"date_of_birth",
"ni_number",
"contact_for_research",
"contact_for_research_via",
"safe_to_contact",
"Third Party Contact",
"Case Refs",
"Third Party Case Refs",
"Organisation",
]
class MICB1Extract(SQLFileDateRangeReport):
QUERY_FILE = "MICB1sSLA.sql"
max_date_range = 3
def get_now(self):
return timezone.now()
def get_headers(self):
return [
"LAA_Reference",
"Hash_ID_personal_details_captured",
"Case_ID",
"Provider_ID_if_allocated",
"Law_Category_Name",
"Date_Case_Created",
"Last_Modified_Date",
"Outcome_Code_Child",
"Matter_Type_1",
"Matter_Type_2",
"created_by_id",
"Scope_Status",
"Eligibility_Status",
"Outcome_Created_At",
"Username",
"operator_first_view_after_cb1__created",
"operator_first_log_after_cb1__created",
"Next_Outcome",
"callback_window_start",
"callback_window_end",
"missed_sla_1",
"missed_sla_2",
"Source",
"Code",
"Organisation",
]
def get_sql_params(self):
from_date, to_date = self.date_range
return {"from_date": from_date, "to_date": to_date, "now": self.get_now()}
class MICB1ExtractAgilisys(SQLFileDateRangeReport):
QUERY_FILE = "MICB1sSLAAgilisys.sql"
max_date_range = 3
def get_headers(self):
return [
"LAA_Reference",
"Hash_ID_personal_details_captured",
"Case_ID",
"Provider_ID_if_allocated",
"Law_Category_Name",
"Date_Case_Created",
"Last_Modified_Date",
"Outcome_Code_Child",
"Matter_Type_1",
"Matter_Type_2",
"created_by_id",
"Scope_Status",
"Eligibility_Status",
"Outcome_Created_At",
"Username",
"operator_first_view_after_cb1__created",
"operator_first_log_after_cb1__created",
"Next_Outcome",
"requires_action_at",
"sla_15",
"sla_120",
"sla_480",
"is_over_sla_15",
"is_over_sla_120",
"is_over_sla_480",
"Source",
"Code",
"sla_30",
"is_over_sla_30",
"Organisation",
]
class MIDigitalCaseTypesExtract(SQLFileDateRangeReport):
QUERY_FILE = "MIDigitalCaseTypes.sql"
def get_headers(self):
return [
"laa_reference",
"case_ref",
"contact_type",
"case_created_by",
"means_test_completed_online",
"call_me_back_only",
"scope_result",
"means_test_result",
"last_code_used",
"date_case_created",
]
class MIEODReport(SQLFileDateRangeReport):
QUERY_FILE = "MIEOD.sql"
def get_headers(self):
return [
"LAA_Reference",
"Case_Reference",
"Case_Category",
# 'EOD_Created',
"EOD_Updated",
"EOD_Category",
"EOD_Notes",
"Major",
# 'Is_Escalated',
# 'Is_Resolved',
# 'Is_Justified',
"Organisation",
]
def _get_col_index(self, column_name):
return self.get_headers().index(column_name)
def get_rows(self):
eod_choices = EXPRESSIONS_OF_DISSATISFACTION.CHOICES_DICT
for row in self.get_queryset():
category_col = self._get_col_index("EOD_Category")
if not row[category_col] and not row[self._get_col_index("EOD_Notes")]:
continue
row = list(row) # row is a tuple
row[category_col] = row[category_col] and eod_choices.get(row[category_col], "Unknown") or "Not set"
yield row
class ComplaintsReport(SQLFileDateRangeReport):
QUERY_FILE = "Complaints.sql"
def get_headers(self):
return [
"LAA reference",
"Case reference",
"Full name",
"Case category",
"Created by operator",
"Operator manager owner",
"Complaint method",
"Complaint received",
"Complaint category",
"Holding letter sent",
"Full response sent",
"Major/minor",
"Justified?",
"Complaint closed",
"Resolved?",
"Within SLA?",
"Organisation",
]
def get_sql_params(self):
from_date, to_date = self.date_range
return {
"from_date": from_date,
"to_date": to_date,
"major": LOG_LEVELS.HIGH,
"minor": LOG_LEVELS.MINOR,
"sla_days": "%d days" % SLA_DAYS,
}
class MIOBIEEExportExtract(MonthRangeReportForm):
passphrase = forms.CharField(
help_text="This is required, the diversity passpharse is required to"
" decrypt the diversity information that people have given "
"to us. If not provided or wrong then the report will fail "
"to generate."
)
class MetricsReport(SQLFileDateRangeReport):
QUERY_FILE = "metrics.sql"
def get_sql_params(self):
from_date, to_date = self.date_range
return {"from_date": from_date, "to_date": to_date}
def get_headers(self):
return [
"Date",
"Diagnosis_total",
"Scope_unknown",
"Outofscope",
"Scope_contact",
"Inscope",
"Eligibility_check_total",
"Eligibility_check_unknown",
"Eligibility_check_ineligible",
"Eligibility_check_eligible",
"Cases_total",
"Cases_unknown",
"Cases_ineligible",
"Cases_eligible",
"Time_total",
"Time_unknown",
"Time_ineligible",
"Time_eligible",
"Time_web_total",
"Time_web_unknown",
"Time_web_ineligible",
"Time_web_eligible",
"Time_phone_total",
"Time_phone_unknown",
"Time_phone_ineligible",
"Time_phone_eligible",
]
class MIExtractCaseViewAuditLog(SQLFileDateRangeReport):
QUERY_FILE = "MIExtractCaseAuditLog.sql"
def get_sql_params(self):
from_date, to_date = self.date_range
return {"from_date": from_date, "to_date": to_date}
def get_headers(self):
return ["Case", "Action", "Operator", "Organisation", "Date"]
class MIExtractComplaintViewAuditLog(SQLFileDateRangeReport):
QUERY_FILE = "MIExtractComplaintAuditLog.sql"
def get_sql_params(self):
from_date, to_date = self.date_range
return {"from_date": from_date, "to_date": to_date}
def get_headers(self):
return ["Case", "Complaint Id", "Action", "Operator", "Organisation", "Date"]
class AllKnowledgeBaseArticles(ReportForm):
def get_queryset(self):
return Article.objects.prefetch_related('articlecategorymatrix_set__article_category', 'telephonenumber_set')
def get_rows(self):
for article in self.get_queryset():
telephone_numbers = article.telephonenumber_set.all()
categories = article.articlecategorymatrix_set.all()
yield [
article.pk,
article.created,
article.modified,
article.resource_type,
article.service_name,
article.service_tag,
article.organisation,
article.website,
article.email,
article.description,
article.public_description,
article.how_to_use,
article.when_to_use,
article.address,
article.opening_hours,
article.keywords,
article.geographic_coverage,
article.type_of_service,
article.accessibility,
get_from_nth(telephone_numbers, 1, "name"),
get_from_nth(telephone_numbers, 1, "number"),
get_from_nth(telephone_numbers, 2, "name"),
get_from_nth(telephone_numbers, 2, "number"),
get_from_nth(telephone_numbers, 3, "name"),
get_from_nth(telephone_numbers, 3, "number"),
get_from_nth(telephone_numbers, 4, "name"),
get_from_nth(telephone_numbers, 4, "number"),
get_from_nth(categories, 1, "article_category.name"),
get_from_nth(categories, 1, "preferred_signpost"),
get_from_nth(categories, 2, "article_category.name"),
get_from_nth(categories, 2, "preferred_signpost"),
get_from_nth(categories, 3, "article_category.name"),
get_from_nth(categories, 3, "preferred_signpost"),
get_from_nth(categories, 4, "article_category.name"),
get_from_nth(categories, 4, "preferred_signpost"),
get_from_nth(categories, 5, "article_category.name"),
get_from_nth(categories, 5, "preferred_signpost"),
get_from_nth(categories, 6, "article_category.name"),
get_from_nth(categories, 6, "preferred_signpost"),
]
def get_headers(self):
return [
"ID",
"Created",
"Modified",
"Resource type",
"Service name",
"Service tag",
"Organisation",
"Website",
"Email",
"Description",
"Public description",
"How to use",
"When to use",
"Address",
"Opening hours",
"Keywords",
"Geographic coverage",
"Type of service",
"Accessibility",
"Tel 1 name",
"Tel 1 number",
"Tel 2 name",
"Tel 2 number",
"Tel 3 name",
"Tel 3 number",
"Tel 4 name",
"Tel 4 number",
"Category 1",
"Preferred signpost for category 1",
"Category 2",
"Preferred signpost for category 2",
"Category 3",
"Preferred signpost for category 3",
"Category 4",
"Preferred signpost for category 4",
"Category 5",
"Preferred signpost for category 5",
"Category 6",
"Preferred signpost for category 6",
]
def get_from_nth(items, n, attribute):
try:
item = items[n - 1]
except IndexError:
return ''
else:
return get_recursively(item, attribute)
def get_recursively(item, attribute):
attribute_parts = attribute.split('.')
value = getattr(item, attribute_parts[0])
remaining = '.'.join(attribute_parts[1:])
if remaining:
return get_recursively(value, remaining)
return value if value is not None else ''
| StarcoderdataPython |
139921 | <filename>Scripts/python/scripts mundo 1/Desafios/Desafio012.py<gh_stars>0
p=float(input('\033[32mqual o preço do produto ? R$\033[m'))
d=(p*5)/100
v=p-d
print('\033[34mO desconto em 5 porcento do produto será de \033[31mR${}\033[34m'.format(d))
print('O valor do produto com 5 porcento de desconto é de \033[31mR${}'.format(v)) | StarcoderdataPython |
1713517 | <reponame>ckw017/showdown.py<filename>showdown/__init__.py
# -*- coding: utf-8 -*-
__title__ = "showdown"
__author__ = "chriskw"
__license__ = "MIT"
__version__ = "1.0.0"
from .client import Client # noqa: F401
from .user import User # noqa: F401
from .server import Server # noqa: F401
from .message import ChatMessage, PrivateMessage # noqa: F401
from .room import Room, Battle # noqa: F401
| StarcoderdataPython |
162119 | <reponame>oat431/HomeworkCollection<gh_stars>1-10
class Queue:
qu = []
size = 0
front = 0
rear = 0
def __init__(self, size):
self.size = size
def en_queue(self, data):
self.qu.append(data)
self.size = self.size + 1
self.rear = self.rear + 1
def de_queue(self):
temp = self.qu[self.front]
del self.qu[self.front]
self.rear = self.rear - 1
self.size = self.size - 1
return temp
def de_que_all(self):
while not self.is_empty():
print(self.de_queue().__str__())
def get_front(self):
return self.qu[self.front]
def get_rear(self):
return self.qu[-1]
def is_full(self):
return self.rear == self.get_size()
def is_empty(self):
return self.rear == 0
def get_size(self):
return self.size
| StarcoderdataPython |
1688495 | <filename>apps/accounts/migrations/0003_auto_20200106_1846.py<gh_stars>0
# Generated by Django 3.0.1 on 2020-01-06 18:46
from django.db import migrations
class Migration(migrations.Migration):
def corrigir_username(apps, schema_editor):
Account = apps.get_model('accounts', 'Account')
for account in Account.objects.all():
if account.username is None or account.username == '':
account.username = account.email
account.save()
dependencies = [
('accounts', '0002_account_tp_user_financeiro'),
]
operations = [
migrations.RunPython(corrigir_username),
]
| StarcoderdataPython |
3306313 | """
How Many Vowels?
Create a function that takes a string and returns the number (count) of vowels
contained within it.
Examples:
print(count_vowels("Celebration")) ➞ 5
print(count_vowels("Palm")) ➞ 1
print(count_vowels("Prediction")) ➞ 4
NOTES:
- The following characters are considered "vowels": a, e, i, o, u (not y).
- All test cases are ONE word and only contain letters.
"""
# Objective: Find total sum of all "vowels" in the given input string.
# Constraints:
# - Only the following are "vowels" => a, e, i, o, u (not y).
# - All given inputs will be void of spaces and will only contain letters.
# Plan:
# (1) Create a function that takes in 1 input ('txt' => a String) and returns
# 1 output ('num_of_vowels' => an Integer).
# (2) Create a dictionary, 'vowels_dict', that contains 'key:value' pairs
# where the 'key' is an accepted "vowel" character and the 'value'
# is the 'True' Bool value.
# (3) Create a 'num_of_vowels' variable that will store the running total
# number of vowels.
# (4) Use a 'for' loop to iterate through the given input 'txt'.
# (5) Create a 'lower_char' variable that converts the character at the
# current index to a lower-cased version of itself.
# (6) Use a conditional statement to determine if the value of 'lower_char'
# at the current index, i, exists in the 'vowels_dict' dictionary. If it
# exists, add 1 to the 'num_of_vowels' dictionary. If it does not exist,
# skip to the next iteration.
# (7) Return the value for the 'num_of_vowels' variable.
# Implementation:
def count_vowels(txt):
vowels_dict = {
"a": True,
"e": True,
"i": True,
"o": True,
"u": True
}
num_of_vowels = 0
for i in range(0, len(txt)):
char = txt[i]
lower_char = char.lower()
if lower_char in vowels_dict:
num_of_vowels += 1
return(num_of_vowels)
print(count_vowels('Celebration')) # Should return 5
print(count_vowels('Palm')) # Should return 1
print(count_vowels('Prediction')) # Should return 4 | StarcoderdataPython |
76567 | <filename>l3py/utilities.py
# Copyright (c) 2018 <NAME>
# See LICENSE for copyright/license details.
"""
Auxiliary functions.
"""
import numpy as np
def legendre_functions(nmax, colat):
"""
Associated fully normalized Legendre functions (1st kind).
Parameters
----------
nmax : int
maximum spherical harmonic degree to compute
colat : float, array_like(m,)
co-latitude of evaluation points in radians
Returns
-------
Pnm : array_like(m, nmax + 1, nmax + 1)
Array containing the fully normalized Legendre functions. Pnm[:, n, m] returns the
Legendre function of degree n and order m for all points, as does Pnm[:, m-1, n] (for m > 0).
"""
theta = np.atleast_1d(colat)
function_array = np.zeros((theta.size, nmax + 1, nmax + 1))
function_array[:, 0, 0] = 1.0 # initial values for recursion
function_array[:, 1, 0] = np.sqrt(3) * np.cos(theta)
function_array[:, 1, 1] = np.sqrt(3) * np.sin(theta)
for n in range(2, nmax + 1):
function_array[:, n, n] = np.sqrt((2.0 * n + 1.0) / (2.0 * n)) * np.sin(theta) * \
function_array[:, n - 1, n - 1]
index = np.arange(nmax + 1)
function_array[:, index[2:], index[1:-1]] = np.sqrt(2 * index[2:] + 1) * np.cos(theta[:, np.newaxis]) * \
function_array[:, index[1:-1], index[1:-1]]
for row in range(2, nmax + 1):
n = index[row:]
m = index[0:-row]
function_array[:, n, m] = np.sqrt((2.0 * n - 1.0) / (n - m) * (2.0 * n + 1.0) / (n + m)) * \
np.cos(theta[:, np.newaxis]) * function_array[:, n - 1, m] - \
np.sqrt((2.0 * n + 1.0) / (2.0 * n - 3.0) * (n - m - 1.0) / (n - m) *
(n + m - 1.0) / (n + m)) * function_array[:, n - 2, m]
for m in range(1, nmax + 1):
function_array[:, m - 1, m:] = function_array[:, m:, m]
return function_array
def normal_gravity(r, colat, a=6378137.0, f=298.2572221010 ** -1, convergence_threshold=1e-9):
"""
Normal gravity on the ellipsoid (GRS80).
Parameters
----------
r : float, array_like, shape(m, )
radius of evaluation point(s) in meters
colat : float, array_like, shape (m,)
co-latitude of evaluation points in radians
a : float
semi-major axis of ellipsoid (Default: GRS80)
f : float
flattening of ellipsoid (Default: GRS80)
convergence_threshold : float
maximum absolute difference between latitude iterations in radians
Returns
-------
g : float, array_like, shape(m,) (depending on types of r and colat)
normal gravity at evaluation point(s) in [m/s**2]
"""
ga = 9.7803267715
gb = 9.8321863685
m = 0.00344978600308
z = np.cos(colat) * r
p = np.abs(np.sin(colat) * r)
b = a * (1 - f)
e2 = (a / b - 1) * (a / b + 1)
latitude = np.arctan2(z * (1 + e2), p)
L = np.abs(latitude) < 60 / 180 * np.pi
latitude_old = np.full(latitude.shape, np.inf)
h = np.zeros(latitude.shape)
while np.max(np.abs(latitude - latitude_old)) > convergence_threshold:
latitude_old = latitude.copy()
N = (a / b) * a / np.sqrt(1 + e2 * np.cos(latitude) ** 2)
h[L] = p[L] / np.cos(latitude[L]) - N[L]
h[~L] = z[~L] / np.sin(latitude[~L]) - N[~L] / (1 + e2)
latitude = np.arctan2(z * (1 + e2), p * (1 + e2 * h / (N + h)))
cos2 = np.cos(latitude) ** 2
sin2 = np.sin(latitude) ** 2
gamma0 = (a * ga * cos2 + b * gb * sin2) / np.sqrt(a ** 2 * cos2 + b ** 2 * sin2)
return gamma0 - 2 * ga / a * (1 + f + m + (-3 * f + 5 * m / 2) * sin2) * h + 3 * ga / a ** 2 * h ** 2
def geocentric_radius(latitude, a=6378137.0, f=298.2572221010 ** -1):
"""
Geocentric radius of a point on the ellipsoid.
Parameters
----------
latitude : float, array_like, shape(m, )
latitude of evaluation point(s) in radians
a : float
semi-major axis of ellipsoid (Default: GRS80)
f : float
flattening of ellipsoid (Default: GRS80)
Returns
-------
r : float, array_like, shape(m,) (depending on type latitude)
geocentric radius of evaluation point(s) in [m]
"""
e2 = 2 * f * (1 - f)
nu = a / np.sqrt(1 - e2 * np.sin(latitude) ** 2)
return nu * np.sqrt(np.cos(latitude) ** 2 + (1 - e2) ** 2 * np.sin(latitude) ** 2)
def colatitude(latitude, a=6378137.0, f=298.2572221010 ** -1):
"""
Co-latitude of a point on the ellipsoid.
Parameters
----------
latitude : float, array_like, shape(m, )
latitude of evaluation point(s) in radians
a : float
semi-major axis of ellipsoid (Default: GRS80)
f : float
flattening of ellipsoid (Default: GRS80)
Returns
-------
psi : float, array_like, shape(m,) (depending on type latitude)
colatitude of evaluation point(s) in [rad]
"""
e2 = 2 * f * (1 - f)
nu = a / np.sqrt(1 - e2 * np.sin(latitude) ** 2)
return np.arccos(nu * (1 - e2) * np.sin(latitude) / geocentric_radius(latitude, a, f))
| StarcoderdataPython |
148968 | # -*- coding: utf-8 -*-
# Author: <NAME> <<EMAIL>>
import pickle
import os
import pytest
import numpy as np
from renormalizer.model import MolList, MolList2, ModelTranslator, Mol, Phonon
from renormalizer.mps import Mpo, Mps
from renormalizer.tests.parameter import mol_list, ph_phys_dim, omega_quantities
from renormalizer.mps.tests import cur_dir
from renormalizer.utils import Quantity, Op
@pytest.mark.parametrize("dt, space, shift", ([30, "GS", 0.0], [30, "EX", 0.0]))
def test_exact_propagator(dt, space, shift):
prop_mpo = Mpo.exact_propagator(mol_list, -1.0j * dt, space, shift)
with open(os.path.join(cur_dir, "test_exact_propagator.pickle"), "rb") as fin:
std_dict = pickle.load(fin)
std_mpo = std_dict[space]
assert prop_mpo == std_mpo
@pytest.mark.parametrize("scheme", (1, 2, 3, 4))
def test_offset(scheme):
ph = Phonon.simple_phonon(Quantity(3.33), Quantity(1), 2)
m = Mol(Quantity(0), [ph] * 2)
mlist = MolList([m] * 2, Quantity(17), scheme=scheme)
mpo1 = Mpo(mlist)
assert mpo1.is_hermitian()
f1 = mpo1.full_operator()
evals1, _ = np.linalg.eigh(f1)
offset = Quantity(0.123)
mpo2 = Mpo(mlist, offset=offset)
f2 = mpo2.full_operator()
evals2, _ = np.linalg.eigh(f2)
assert np.allclose(evals1 - offset.as_au(), evals2)
def test_identity():
identity = Mpo.identity(mol_list)
mps = Mps.random(mol_list, nexciton=1, m_max=5)
assert mps.expectation(identity) == pytest.approx(mps.dmrg_norm) == pytest.approx(1)
def test_scheme4():
ph = Phonon.simple_phonon(Quantity(3.33), Quantity(1), 2)
m1 = Mol(Quantity(0), [ph])
m2 = Mol(Quantity(0), [ph]*2)
mlist1 = MolList([m1, m2], Quantity(17), 4)
mlist2 = MolList([m1, m2], Quantity(17), 3)
mpo4 = Mpo(mlist1)
assert mpo4.is_hermitian()
# for debugging
f = mpo4.full_operator()
mpo3 = Mpo(mlist2)
assert mpo3.is_hermitian()
# makeup two states
mps4 = Mps()
mps4.mol_list = mlist1
mps4.use_dummy_qn = True
mps4.append(np.array([1, 0]).reshape((1,2,1)))
mps4.append(np.array([0, 0, 1]).reshape((1,-1,1)))
mps4.append(np.array([0.707, 0.707]).reshape((1,2,1)))
mps4.append(np.array([1, 0]).reshape((1,2,1)))
mps4.build_empty_qn()
e4 = mps4.expectation(mpo4)
mps3 = Mps()
mps3.mol_list = mlist2
mps3.append(np.array([1, 0]).reshape((1,2,1)))
mps3.append(np.array([1, 0]).reshape((1,2,1)))
mps3.append(np.array([0, 1]).reshape((1,2,1)))
mps3.append(np.array([0.707, 0.707]).reshape((1,2,1)))
mps3.append(np.array([1, 0]).reshape((1,2,1)))
e3 = mps3.expectation(mpo3)
assert pytest.approx(e4) == e3
@pytest.mark.parametrize("scheme", (2, 3, 4))
def test_intersite(scheme):
local_mlist = mol_list.switch_scheme(scheme)
mpo1 = Mpo.intersite(local_mlist, {0:r"a^\dagger"}, {}, Quantity(1.0))
mpo2 = Mpo.onsite(local_mlist, r"a^\dagger", mol_idx_set=[0])
assert mpo1.distance(mpo2) == pytest.approx(0, abs=1e-5)
mpo3 = Mpo.intersite(local_mlist, {2:r"a^\dagger a"}, {}, Quantity(1.0))
mpo4 = Mpo.onsite(local_mlist, r"a^\dagger a", mol_idx_set=[2])
assert mpo3.distance(mpo4) == pytest.approx(0, abs=1e-5)
mpo5 = Mpo.intersite(local_mlist, {2:r"a^\dagger a"}, {}, Quantity(0.5))
assert mpo5.add(mpo5).distance(mpo4) == pytest.approx(0, abs=1e-5)
mpo6 = Mpo.intersite(local_mlist, {0:r"a^\dagger",2:"a"}, {}, Quantity(1.0))
mpo7 = Mpo.onsite(local_mlist, "a", mol_idx_set=[2])
assert mpo2.apply(mpo7).distance(mpo6) == pytest.approx(0, abs=1e-5)
# the tests are based on the similarity between scheme 2 and scheme 3
# so scheme 3 and scheme 4 will be skipped
if scheme == 2:
mpo8 = Mpo(local_mlist)
# a dirty hack to switch from scheme 2 to scheme 3
test_mlist = local_mlist.switch_scheme(2)
test_mlist.scheme = 3
mpo9 = Mpo(test_mlist)
mpo10 = Mpo.intersite(test_mlist, {0:r"a^\dagger",2:"a"}, {},
Quantity(local_mlist.j_matrix[0,2]))
mpo11 = Mpo.intersite(test_mlist, {2:r"a^\dagger",0:"a"}, {},
Quantity(local_mlist.j_matrix[0,2]))
assert mpo11.conj_trans().distance(mpo10) == pytest.approx(0, abs=1e-6)
assert mpo8.distance(mpo9 + mpo10 + mpo11) == pytest.approx(0, abs=1e-6)
test_mlist.periodic = True
mpo12 = Mpo(test_mlist)
assert mpo12.distance(mpo9 + mpo10 + mpo11) == pytest.approx(0, abs=1e-6)
ph_mpo1 = Mpo.ph_onsite(local_mlist, "b", 1, 1)
ph_mpo2 = Mpo.intersite(local_mlist, {}, {(1,1):"b"})
assert ph_mpo1.distance(ph_mpo2) == pytest.approx(0, abs=1e-6)
def test_phonon_onsite():
gs = Mps.gs(mol_list, max_entangled=False)
assert not gs.ph_occupations.any()
b2 = Mpo.ph_onsite(mol_list, r"b^\dagger", 0, 0)
p1 = b2.apply(gs).normalize()
assert np.allclose(p1.ph_occupations, [1, 0, 0, 0, 0, 0])
p2 = b2.apply(p1).normalize()
assert np.allclose(p2.ph_occupations, [2, 0, 0, 0, 0, 0])
b = b2.conj_trans()
assert b.distance(Mpo.ph_onsite(mol_list, r"b", 0, 0)) == 0
assert b.apply(p2).normalize().distance(p1) == pytest.approx(0, abs=1e-5)
from renormalizer.tests.parameter_PBI import construct_mol
@pytest.mark.parametrize("mol_list", (mol_list, construct_mol(10,10,0)))
@pytest.mark.parametrize("scheme", (
123,
4,
))
def test_general_mpo_MolList(mol_list, scheme):
if scheme == 4:
mol_list1 = mol_list.switch_scheme(4)
else:
mol_list1 = mol_list
mol_list1.mol_list2_para()
mpo = Mpo.general_mpo(mol_list1,
const=Quantity(-mol_list1[0].gs_zpe*mol_list1.mol_num))
mpo_std = Mpo(mol_list1)
check_result(mpo, mpo_std)
@pytest.mark.parametrize("mol_list", (mol_list, construct_mol(10,10,0)))
@pytest.mark.parametrize("scheme", (123, 4))
@pytest.mark.parametrize("formula", ("vibronic", "general"))
def test_general_mpo_MolList2(mol_list, scheme, formula):
if scheme == 4:
mol_list1 = mol_list.switch_scheme(4)
else:
mol_list1 = mol_list
# scheme123
mol_list2 = MolList2.MolList_to_MolList2(mol_list1, formula=formula)
mpo_std = Mpo(mol_list1)
# classmethod method
mpo = Mpo.general_mpo(mol_list2, const=Quantity(-mol_list[0].gs_zpe*mol_list.mol_num))
check_result(mpo, mpo_std)
# __init__ method, same api
mpo = Mpo(mol_list2, offset=Quantity(mol_list[0].gs_zpe*mol_list.mol_num))
check_result(mpo, mpo_std)
def test_general_mpo_others():
mol_list2 = MolList2.MolList_to_MolList2(mol_list)
# onsite
mpo_std = Mpo.onsite(mol_list, r"a^\dagger", mol_idx_set=[0])
mpo = Mpo.onsite(mol_list2, r"a^\dagger", mol_idx_set=[0])
check_result(mpo, mpo_std)
# general method
mpo = Mpo.general_mpo(mol_list2, model={("e_0",):[(Op(r"a^\dagger",0),1.0)]},
model_translator=ModelTranslator.general_model)
check_result(mpo, mpo_std)
mpo_std = Mpo.onsite(mol_list, r"a^\dagger a", dipole=True)
mpo = Mpo.onsite(mol_list2, r"a^\dagger a", dipole=True)
check_result(mpo, mpo_std)
mpo = Mpo.general_mpo(mol_list2,
model={("e_0",):[(Op(r"a^\dagger a",0),mol_list2.dipole[("e_0",)])],
("e_1",):[(Op(r"a^\dagger a",0),mol_list2.dipole[("e_1",)])],
("e_2",):[(Op(r"a^\dagger a",0),mol_list2.dipole[("e_2",)])]},
model_translator=ModelTranslator.general_model)
check_result(mpo, mpo_std)
# intersite
mpo_std = Mpo.intersite(mol_list, {0:r"a^\dagger",2:"a"},
{(0,1):"b^\dagger"}, Quantity(2.0))
mpo = Mpo.intersite(mol_list2, {0:r"a^\dagger",2:"a"},
{(0,1):r"b^\dagger"}, Quantity(2.0))
check_result(mpo, mpo_std)
mpo = Mpo.general_mpo(mol_list2,
model={("e_0","e_2","v_1"):[(Op(r"a^\dagger",1), Op(r"a",-1),
Op(r"b^\dagger", 0), 2.0)]},
model_translator=ModelTranslator.general_model)
check_result(mpo, mpo_std)
# phsite
mpo_std = Mpo.ph_onsite(mol_list, r"b^\dagger", 0, 0)
mpo = Mpo.ph_onsite(mol_list2, r"b^\dagger", 0, 0)
check_result(mpo, mpo_std)
mpo = Mpo.general_mpo(mol_list2,
model={(mol_list2.map[(0,0)],):[(Op(r"b^\dagger",0), 1.0)]},
model_translator=ModelTranslator.general_model)
check_result(mpo, mpo_std)
def check_result(mpo, mpo_std):
print("std mpo bond dims:", mpo_std.bond_dims)
print("new mpo bond dims:", mpo.bond_dims)
print("std mpo qn:", mpo_std.qn, mpo_std.qntot)
print("new mpo qn:", mpo.qn, mpo_std.qntot)
assert mpo_std.distance(mpo)/np.sqrt(mpo_std.dot(mpo_std)) == pytest.approx(0, abs=1e-5)
| StarcoderdataPython |
4842903 | import re
import datetime
import os
# Image File Upload Utilities
def set_filename_format(now, instance, filename):
return "{username}-{date}-{microsecond}{extension}" \
.format(username=instance.user_no,
date=str(now.date()),
microsecond=now.microsecond,
extension=os.path.splitext(filename)[1])
def user_directory_path(instance, filename):
now = datetime.datetime.now()
path = "images/{year}/{month}/{day}/{username}/{filename}" \
.format(year=now.year,
month=now.month,
day=now.day,
username=instance.user_no,
filename=set_filename_format(now, instance, filename))
return path
# Review Text Utilities
def is_img_tag(html_text):
index = html_text.index('<p>')
if html_text[index + 3:index + 8] == '<img ':
return True
return False
def get_first_p_tag_value(html_text: str):
open_p_index_list = [i.start() for i in re.finditer('<p>', html_text)]
close_p_index_list = [i.start() for i in re.finditer('</p>', html_text)]
for index in range(0, len(open_p_index_list)):
if is_img_tag(html_text[open_p_index_list[index]:close_p_index_list[index]]):
continue
else:
return html_text[open_p_index_list[index] + 3:close_p_index_list[index]]
| StarcoderdataPython |
22487 | from .client import Client
from .consts import *
class FutureAPI(Client):
def __init__(self, api_key, api_secret_key, passphrase, use_server_time=False, first=False):
Client.__init__(self, api_key, api_secret_key, passphrase, use_server_time, first)
# query position
def get_position(self):
return self._request_without_params(GET, FUTURE_POSITION)
# query specific position
def get_specific_position(self, instrument_id):
return self._request_without_params(GET, FUTURE_SPECIFIC_POSITION + str(instrument_id) + '/position')
# query accounts info
def get_accounts(self):
return self._request_without_params(GET, FUTURE_ACCOUNTS)
# query coin account info
def get_coin_account(self, underlying):
return self._request_without_params(GET, FUTURE_COIN_ACCOUNT + str(underlying))
# query leverage
def get_leverage(self, underlying):
return self._request_without_params(GET, FUTURE_GET_LEVERAGE + str(underlying) + '/leverage')
# set leverage
def set_leverage(self, underlying, leverage, instrument_id='', direction=''):
params = {'leverage': leverage}
if instrument_id:
params['instrument_id'] = instrument_id
if direction:
params['direction'] = direction
return self._request_with_params(POST, FUTURE_SET_LEVERAGE + str(underlying) + '/leverage', params)
# query ledger
def get_ledger(self, underlying, after='', before='', limit='', type=''):
params = {}
if after:
params['after'] = after
if before:
params['before'] = before
if limit:
params['limit'] = limit
if type:
params['type'] = type
return self._request_with_params(GET, FUTURE_LEDGER + str(underlying) + '/ledger', params, cursor=True)
# take order
# def take_order(self, instrument_id, type, price, size, client_oid='', order_type='0', match_price='0'):
# params = {'client_oid': client_oid, 'instrument_id': instrument_id, 'type': type, 'order_type': order_type, 'price': price, 'size': size, 'match_price': match_price}
# return self._request_with_params(POST, FUTURE_ORDER, params)
# take order 下单
def take_order(self, client_oid,instrument_id, otype,price, size, leverage, order_type,match_price):
params = {'client_oid':client_oid,'instrument_id': instrument_id, 'type': otype, 'price': price, 'size': size, 'leverage': leverage,'order_type':order_type,'match_price':match_price}
return self._request_with_params(POST, FUTURE_ORDER, params)
# take orders
def take_orders(self, instrument_id, orders_data):
params = {'instrument_id': instrument_id, 'orders_data': orders_data}
return self._request_with_params(POST, FUTURE_ORDERS, params)
# revoke order
def revoke_order(self, instrument_id, order_id='', client_oid=''):
if order_id:
return self._request_without_params(POST, FUTURE_REVOKE_ORDER + str(instrument_id) + '/' + str(order_id))
elif client_oid:
return self._request_without_params(POST, FUTURE_REVOKE_ORDER + str(instrument_id) + '/' + str(client_oid))
# revoke orders
def revoke_orders(self, instrument_id, order_ids='', client_oids=''):
params = {}
if order_ids:
params = {'order_ids': order_ids}
elif client_oids:
params = {'client_oids': client_oids}
return self._request_with_params(POST, FUTURE_REVOKE_ORDERS + str(instrument_id), params)
# query order list
def get_order_list(self, state, instrument_id,after='', before='', limit=''):
params = {'state': state}
if after:
params['after'] = after
if before:
params['before'] = before
if limit:
params['limit'] = limit
return self._request_with_params(GET, FUTURE_ORDERS_LIST + str(instrument_id), params, cursor=True)
# query order info
def get_order_info(self, instrument_id, order_id='', client_oid=''):
if order_id:
return self._request_without_params(GET, FUTURE_ORDER_INFO + str(instrument_id) + '/' + str(order_id))
elif client_oid:
return self._request_without_params(GET, FUTURE_ORDER_INFO + str(instrument_id) + '/' + str(client_oid))
# query fills
def get_fills(self, instrument_id, order_id='', after='', before='', limit=''):
params = {'instrument_id': instrument_id}
if order_id:
params['order_id'] = order_id
if after:
params['after'] = after
if before:
params['before'] = before
if limit:
params['limit'] = limit
return self._request_with_params(GET, FUTURE_FILLS, params, cursor=True)
# set margin_mode
def set_margin_mode(self, underlying, margin_mode):
params = {'underlying': underlying, 'margin_mode': margin_mode}
return self._request_with_params(POST, FUTURE_MARGIN_MODE, params)
# close_position
def close_position(self, instrument_id, direction):
params = {'instrument_id': instrument_id, 'direction': direction}
return self._request_with_params(POST, FUTURE_CLOSE_POSITION, params)
# cancel_all
def cancel_all(self, instrument_id, direction):
params = {'instrument_id': instrument_id, 'direction': direction}
return self._request_with_params(POST, FUTURE_CANCEL_ALL, params)
# take order_algo
def take_order_algo(self, instrument_id, type, order_type, size, trigger_price='', algo_price='', callback_rate='', algo_variance='', avg_amount='', price_limit='', sweep_range='', sweep_ratio='', single_limit='', time_interval=''):
params = {'instrument_id': instrument_id, 'type': type, 'order_type': order_type, 'size': size}
if order_type == '1': # 止盈止损参数(最多同时存在10单)
params['trigger_price'] = trigger_price
params['algo_price'] = algo_price
elif order_type == '2': # 跟踪委托参数(最多同时存在10单)
params['callback_rate'] = callback_rate
params['trigger_price'] = trigger_price
elif order_type == '3': # 冰山委托参数(最多同时存在6单)
params['algo_variance'] = algo_variance
params['avg_amount'] = avg_amount
params['price_limit'] = price_limit
elif order_type == '4': # 时间加权参数(最多同时存在6单)
params['sweep_range'] = sweep_range
params['sweep_ratio'] = sweep_ratio
params['single_limit'] = single_limit
params['price_limit'] = price_limit
params['time_interval'] = time_interval
return self._request_with_params(POST, FUTURE_ORDER_ALGO, params)
# cancel_algos
def cancel_algos(self, instrument_id, algo_ids, order_type):
params = {'instrument_id': instrument_id, 'algo_ids': algo_ids, 'order_type': order_type}
return self._request_with_params(POST, FUTURE_CANCEL_ALGOS, params)
# get order_algos
def get_order_algos(self, instrument_id, order_type, status='', algo_id='', before='', after='', limit=''):
params = {'order_type': order_type}
if status:
params['status'] = status
elif algo_id:
params['algo_id'] = algo_id
if before:
params['before'] = before
if after:
params['after'] = after
if limit:
params['limit'] = limit
return self._request_with_params(GET, FUTURE_GET_ORDER_ALGOS + str(instrument_id), params)
def get_trade_fee(self):
return self._request_without_params(GET, FUTURE_TRADE_FEE)
# get products info
def get_products(self):
return self._request_without_params(GET, FUTURE_PRODUCTS_INFO)
# get depth
def get_depth(self, instrument_id, size='', depth=''):
params = {'size': size, 'depth': depth}
return self._request_with_params(GET, FUTURE_DEPTH + str(instrument_id) + '/book', params)
# get ticker
def get_ticker(self):
return self._request_without_params(GET, FUTURE_TICKER)
# get specific ticker
def get_specific_ticker(self, instrument_id):
return self._request_without_params(GET, FUTURE_SPECIFIC_TICKER + str(instrument_id) + '/ticker')
# query trades
def get_trades(self, instrument_id, after='', before='', limit=''):
params = {}
if after:
params['after'] = after
if before:
params['before'] = before
if limit:
params['limit'] = limit
return self._request_with_params(GET, FUTURE_TRADES + str(instrument_id) + '/trades', params, cursor=True)
# query k-line
def get_kline(self, instrument_id, granularity='', start='', end=''):
params = {'granularity': granularity, 'start': start, 'end': end}
# 按时间倒叙 即由结束时间到开始时间
return self._request_with_params(GET, FUTURE_KLINE + str(instrument_id) + '/candles', params)
# 按时间正序 即由开始时间到结束时间
# data = self._request_with_params(GET, FUTURE_KLINE + str(instrument_id) + '/candles', params)
# return list(reversed(data))
# query index
def get_index(self, instrument_id):
return self._request_without_params(GET, FUTURE_INDEX + str(instrument_id) + '/index')
# query rate
def get_rate(self):
return self._request_without_params(GET, FUTURE_RATE)
# query estimate price
def get_estimated_price(self, instrument_id):
return self._request_without_params(GET, FUTURE_ESTIMAT_PRICE + str(instrument_id) + '/estimated_price')
# query the total platform of the platform
def get_holds(self, instrument_id):
return self._request_without_params(GET, FUTURE_HOLDS + str(instrument_id) + '/open_interest')
# query limit price
def get_limit(self, instrument_id):
return self._request_without_params(GET, FUTURE_LIMIT + str(instrument_id) + '/price_limit')
# query limit price
def get_liquidation(self, instrument_id, status, limit='', froms='', to=''):
params = {'status': status}
if limit:
params['limit'] = limit
if froms:
params['from'] = froms
if to:
params['to'] = to
return self._request_with_params(GET, FUTURE_LIQUIDATION + str(instrument_id) + '/liquidation', params)
# query holds amount
def get_holds_amount(self, instrument_id):
return self._request_without_params(GET, HOLD_AMOUNT + str(instrument_id) + '/holds')
# query mark price
def get_mark_price(self, instrument_id):
return self._request_without_params(GET, FUTURE_MARK + str(instrument_id) + '/mark_price')
| StarcoderdataPython |
1724603 | #!/usr/bin/python
# Copyright (c) 2012 The Chromium OS Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Unittests for chrome stages."""
import os
import sys
sys.path.insert(0, os.path.abspath('%s/../../..' % os.path.dirname(__file__)))
from chromite.cbuildbot import commands
from chromite.cbuildbot import constants
from chromite.cbuildbot.cbuildbot_unittest import BuilderRunMock
from chromite.cbuildbot.stages import chrome_stages
from chromite.cbuildbot.stages import generic_stages_unittest
from chromite.lib import cros_build_lib
from chromite.lib import cros_build_lib_unittest
from chromite.lib import cros_test_lib
from chromite.lib import osutils
from chromite.lib import parallel_unittest
# pylint: disable=R0901,W0212
class ChromeSDKStageTest(generic_stages_unittest.AbstractStageTest,
cros_test_lib.LoggingTestCase):
"""Verify stage that creates the chrome-sdk and builds chrome with it."""
BOT_ID = 'link-paladin'
RELEASE_TAG = ''
def setUp(self):
self.StartPatcher(BuilderRunMock())
self.StartPatcher(parallel_unittest.ParallelMock())
self._Prepare()
def _Prepare(self, bot_id=None, **kwargs):
super(ChromeSDKStageTest, self)._Prepare(bot_id, **kwargs)
self._run.options.chrome_root = '/tmp/non-existent'
self._run.attrs.metadata.UpdateWithDict({'toolchain-tuple': ['target'],
'toolchain-url' : 'some-url'})
def ConstructStage(self):
self._run.GetArchive().SetupArchivePath()
return chrome_stages.ChromeSDKStage(self._run, self._current_board)
def testIt(self):
"""A simple run-through test."""
rc_mock = self.StartPatcher(cros_build_lib_unittest.RunCommandMock())
rc_mock.SetDefaultCmdResult()
self.PatchObject(chrome_stages.ChromeSDKStage, '_ArchiveChromeEbuildEnv',
autospec=True)
self.PatchObject(chrome_stages.ChromeSDKStage, '_VerifyChromeDeployed',
autospec=True)
self.PatchObject(chrome_stages.ChromeSDKStage, '_VerifySDKEnvironment',
autospec=True)
self.RunStage()
def testChromeEnvironment(self):
"""Test that the Chrome environment is built."""
# Create the chrome environment compressed file.
stage = self.ConstructStage()
chrome_env_dir = os.path.join(
stage._pkg_dir, constants.CHROME_CP + '-25.3643.0_rc1')
env_file = os.path.join(chrome_env_dir, 'environment')
osutils.Touch(env_file, makedirs=True)
cros_build_lib.RunCommand(['bzip2', env_file])
# Run the code.
stage._ArchiveChromeEbuildEnv()
env_tar_base = stage._upload_queue.get()[0]
env_tar = os.path.join(stage.archive_path, env_tar_base)
self.assertTrue(os.path.exists(env_tar))
cros_test_lib.VerifyTarball(env_tar, ['./', 'environment'])
class PatchChromeStageTest(generic_stages_unittest.AbstractStageTest):
"""Tests for PatchChromeStage."""
def setUp(self):
self._Prepare(cmd_args=[
'-r', self.build_root,
'--rietveld-patches=1234',
'--rietveld-patches=555:adir',
])
self.PatchObject(commands, 'PatchChrome')
def ConstructStage(self):
return chrome_stages.PatchChromeStage(self._run)
def testBasic(self):
"""Verify requested patches are applied."""
stage = self.ConstructStage()
stage.PerformStage()
if __name__ == '__main__':
cros_test_lib.main()
| StarcoderdataPython |
3225208 | # Copyright 2017 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You
# may not use this file except in compliance with the License. A copy of
# the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
import datetime
import json
import sys
import xml.parsers.expat
import xml.dom.minidom
import colorama
from awscli.table import COLORAMA_KWARGS
from awscli.compat import six
from awscli.customizations.history.commands import HistorySubcommand
from awscli.customizations.history.filters import RegexFilter
class Formatter(object):
def __init__(self, output=None, include=None, exclude=None):
"""Formats and outputs CLI history events
:type output: File-like obj
:param output: The stream to write the formatted event to. By default
sys.stdout is used.
:type include: list
:param include: A filter specifying which event to only be displayed.
This parameter is mutually exclusive with exclude.
:type exclude: list
:param exclude: A filter specifying which events to exclude from being
displayed. This parameter is mutually exclusive with include.
"""
self._output = output
if self._output is None:
self._output = sys.stdout
if include and exclude:
raise ValueError(
'Either input or exclude can be provided but not both')
self._include = include
self._exclude = exclude
def display(self, event_record):
"""Displays a formatted version of the event record
:type event_record: dict
:param event_record: The event record to format and display.
"""
if self._should_display(event_record):
self._display(event_record)
def _display(self, event_record):
raise NotImplementedError('_display()')
def _should_display(self, event_record):
if self._include:
return event_record['event_type'] in self._include
elif self._exclude:
return event_record['event_type'] not in self._exclude
else:
return True
class DetailedFormatter(Formatter):
_SIG_FILTER = RegexFilter(
'Signature=([a-z0-9]{4})[a-z0-9]{60}',
r'Signature=\1...',
)
_SECTIONS = {
'CLI_VERSION': {
'title': 'AWS CLI command entered',
'values': [
{'description': 'with AWS CLI version'}
]
},
'CLI_ARGUMENTS': {
'values': [
{'description': 'with arguments'}
]
},
'API_CALL': {
'title': 'API call made',
'values': [
{
'description': 'to service',
'payload_key': 'service'
},
{
'description': 'using operation',
'payload_key': 'operation'
},
{
'description': 'with parameters',
'payload_key': 'params',
'value_format': 'dictionary'
}
]
},
'HTTP_REQUEST': {
'title': 'HTTP request sent',
'values': [
{
'description': 'to URL',
'payload_key': 'url'
},
{
'description': 'with method',
'payload_key': 'method'
},
{
'description': 'with headers',
'payload_key': 'headers',
'value_format': 'dictionary',
'filters': [_SIG_FILTER]
},
{
'description': 'with body',
'payload_key': 'body',
'value_format': 'http_body'
}
]
},
'HTTP_RESPONSE': {
'title': 'HTTP response received',
'values': [
{
'description': 'with status code',
'payload_key': 'status_code'
},
{
'description': 'with headers',
'payload_key': 'headers',
'value_format': 'dictionary'
},
{
'description': 'with body',
'payload_key': 'body',
'value_format': 'http_body'
}
]
},
'PARSED_RESPONSE': {
'title': 'HTTP response parsed',
'values': [
{
'description': 'parsed to',
'value_format': 'dictionary'
}
]
},
'CLI_RC': {
'title': 'AWS CLI command exited',
'values': [
{'description': 'with return code'}
]
},
}
_COMPONENT_COLORS = {
'title': colorama.Style.BRIGHT,
'description': colorama.Fore.CYAN
}
def __init__(self, output=None, include=None, exclude=None, colorize=True):
super(DetailedFormatter, self).__init__(output, include, exclude)
self._request_id_to_api_num = {}
self._num_api_calls = 0
self._colorize = colorize
self._value_pformatter = SectionValuePrettyFormatter()
if self._colorize:
colorama.init(**COLORAMA_KWARGS)
def _display(self, event_record):
section_definition = self._SECTIONS.get(event_record['event_type'])
if section_definition is not None:
self._display_section(event_record, section_definition)
def _display_section(self, event_record, section_definition):
if 'title' in section_definition:
self._display_title(section_definition['title'], event_record)
for value_definition in section_definition['values']:
self._display_value(value_definition, event_record)
def _display_title(self, title, event_record):
formatted_title = self._format_section_title(title, event_record)
self._write_output(formatted_title)
def _display_value(self, value_definition, event_record):
value_description = value_definition['description']
event_record_payload = event_record['payload']
value = event_record_payload
if 'payload_key' in value_definition:
value = event_record_payload[value_definition['payload_key']]
formatted_value = self._format_description(value_description)
formatted_value += self._format_value(
value, event_record, value_definition.get('value_format')
)
if 'filters' in value_definition:
for text_filter in value_definition['filters']:
formatted_value = text_filter.filter_text(formatted_value)
self._write_output(formatted_value)
def _write_output(self, content):
if isinstance(content, six.text_type):
content = content.encode('utf-8')
self._output.write(content)
def _format_section_title(self, title, event_record):
formatted_title = title
api_num = self._get_api_num(event_record)
if api_num is not None:
formatted_title = ('[%s] ' % api_num) + formatted_title
formatted_title = self._color_if_configured(formatted_title, 'title')
formatted_title += '\n'
formatted_timestamp = self._format_description('at time')
formatted_timestamp += self._format_value(
event_record['timestamp'], event_record, value_format='timestamp')
return '\n' + formatted_title + formatted_timestamp
def _get_api_num(self, event_record):
request_id = event_record['request_id']
if request_id:
if request_id not in self._request_id_to_api_num:
self._request_id_to_api_num[
request_id] = self._num_api_calls
self._num_api_calls += 1
return self._request_id_to_api_num[request_id]
def _format_description(self, value_description):
return self._color_if_configured(
value_description + ': ', 'description')
def _format_value(self, value, event_record, value_format=None):
if value_format:
formatted_value = self._value_pformatter.pformat(
value, value_format, event_record)
else:
formatted_value = str(value)
return formatted_value + '\n'
def _color_if_configured(self, text, component):
if self._colorize:
color = self._COMPONENT_COLORS[component]
return color + text + colorama.Style.RESET_ALL
return text
class SectionValuePrettyFormatter(object):
def pformat(self, value, value_format, event_record):
return getattr(self, '_pformat_' + value_format)(value, event_record)
def _pformat_timestamp(self, event_timestamp, event_record=None):
return datetime.datetime.fromtimestamp(
event_timestamp/1000.0).strftime('%Y-%m-%d %H:%M:%S.%f')[:-3]
def _pformat_dictionary(self, obj, event_record=None):
return json.dumps(obj=obj, sort_keys=True, indent=4)
def _pformat_http_body(self, body, event_record):
if not body:
return 'There is no associated body'
elif event_record['payload'].get('streaming', False):
return 'The body is a stream and will not be displayed'
elif self._is_xml(body):
# TODO: Figure out a way to minimize the number of times we have
# to parse the XML. Currently at worst, it will take three times.
# One to determine if it is XML, another to stip whitespace, and
# a third to convert to make it pretty. This is an issue as it
# can cause issues when there are large XML payloads such as
# an s3 ListObjects call.
return self._get_pretty_xml(body)
elif self._is_json_structure(body):
return self._get_pretty_json(body)
else:
return body
def _get_pretty_xml(self, body):
# The body is parsed and whitespace is stripped because some services
# like ec2 already return pretty XML and if toprettyxml() was applied
# to it, it will add even more newlines and spaces on top of it.
# So this just removes all whitespace from the start to prevent the
# chance of adding to much newlines and spaces when toprettyxml()
# is called.
stripped_body = self._strip_whitespace(body)
xml_dom = xml.dom.minidom.parseString(stripped_body)
return xml_dom.toprettyxml(indent=' '*4, newl='\n')
def _get_pretty_json(self, body):
# The json body is loaded so it can be dumped in a format that
# is desired.
obj = json.loads(body)
return self._pformat_dictionary(obj)
def _is_xml(self, body):
try:
xml.dom.minidom.parseString(body)
except xml.parsers.expat.ExpatError:
return False
return True
def _strip_whitespace(self, xml_string):
xml_dom = xml.dom.minidom.parseString(xml_string)
return ''.join(
[line.strip() for line in xml_dom.toxml().splitlines()]
)
def _is_json_structure(self, body):
if body.startswith('{'):
try:
json.loads(body)
return True
except json.decoder.JSONDecodeError:
return False
return False
class ShowCommand(HistorySubcommand):
NAME = 'show'
DESCRIPTION = (
'Shows the various events related to running a specific CLI command. '
'If this command is ran without any positional arguments, it will '
'display the events for the last CLI command ran.'
)
FORMATTERS = {
'detailed': DetailedFormatter
}
ARG_TABLE = [
{'name': 'command_id', 'nargs': '?', 'default': 'latest',
'positional_arg': True,
'help_text': (
'The ID of the CLI command to show. If this positional argument '
'is omitted, it will show the last the CLI command ran.')},
{'name': 'include', 'nargs': '+',
'help_text': (
'Specifies which events to **only** include when showing the '
'CLI command. This argument is mutually exclusive with '
'``--exclude``.')},
{'name': 'exclude', 'nargs': '+',
'help_text': (
'Specifies which events to exclude when showing the '
'CLI command. This argument is mutually exclusive with '
'``--include``.')},
{'name': 'format', 'choices': FORMATTERS.keys(),
'default': 'detailed', 'help_text': (
'Specifies which format to use in showing the events for '
'the specified CLI command. The following formats are '
'supported:\n\n'
'<ul>'
'<li> detailed - This the default format. It prints out a '
'detailed overview of the CLI command ran. It displays all '
'of the key events in the command lifecycle where each '
'important event has a title and its important values '
'underneath. The events are ordered by timestamp and events of '
'the same API call are associated together with the '
'[``api_id``] notation where events that share the same '
'``api_id`` belong to the lifecycle of the same API call.'
'</li>'
'</ul>'
)
}
]
def _run_main(self, parsed_args, parsed_globals):
self._connect_to_history_db()
try:
self._validate_args(parsed_args)
with self._get_output_stream() as output_stream:
formatter = self._get_formatter(
parsed_args, parsed_globals, output_stream)
for record in self._get_record_iterator(parsed_args):
formatter.display(record)
finally:
self._close_history_db()
return 0
def _validate_args(self, parsed_args):
if parsed_args.exclude and parsed_args.include:
raise ValueError(
'Either --exclude or --include can be provided but not both')
def _get_formatter(self, parsed_args, parsed_globals, output_stream):
format_type = parsed_args.format
formatter_kwargs = {
'include': parsed_args.include,
'exclude': parsed_args.exclude,
'output': output_stream
}
if format_type == 'detailed':
formatter_kwargs['colorize'] = self._should_use_color(
parsed_globals)
return self.FORMATTERS[format_type](**formatter_kwargs)
def _get_record_iterator(self, parsed_args):
if parsed_args.command_id == 'latest':
return self._db_reader.iter_latest_records()
else:
return self._db_reader.iter_records(parsed_args.command_id)
| StarcoderdataPython |
3240266 | from setuptools import setup
setup(name='embeddingsviz',
version='0.1',
description='Visualize Embeddings of a Vocabulary in TensorBoard, Including the Neighbors',
classifiers=[
'Programming Language :: Python :: 3.5',
'Topic :: Text Processing :: Linguistic',
],
url='http://github.com/harkous/embeddingsviz',
author='<NAME>',
license='MIT',
packages=['embeddingsviz'],
install_requires=[
"tensorflow",
"numpy",
],
zip_safe=False) | StarcoderdataPython |
4820979 | from .core import AvroModelContainer, avro_schema
__all__ = ["AvroModelContainer", "avro_schema"]
| StarcoderdataPython |
1612049 | <reponame>brouwa/CNNs-on-FPSPs<gh_stars>1-10
#!/usr/bin/env python
from __future__ import print_function
import rospy
from geometry_msgs.msg import Twist
import datetime
import sys
import time
FORWARD_TIME = 0.8
RIGHT_FORWARD_TIME = 0.8
RIGHT_TURN_TIME = 0.8
LEFT_FORWARD_TIME = 0.8
LEFT_TURN_TIME = 0.8
msg = """
Reading from stdin, and Publishing to Twist!
---------------------------
Moving around:
1 (left)
2 (forward)
3 (right)
CTRL-C or 'quit' to quit
"""
LEFT = 1
FORWARD = 2
RIGHT = 3
moveBindings = {
LEFT: (0,0,0,1),
FORWARD:(1,0,0,0),
RIGHT: (0,0,0,-1)
}
dir_to_str = { LEFT: str(LEFT),
FORWARD:str(FORWARD),
RIGHT: str(RIGHT)}
dir_to_readable = { LEFT: 'LEFT ',
FORWARD:'FORWARD',
RIGHT: 'RIGHT '}
speed = 0.5
turn = 1
def send_stop():
twist = Twist()
twist.linear.x = 0; twist.linear.y = 0; twist.linear.z = 0
twist.angular.x = 0; twist.angular.y = 0; twist.angular.z = 0
pub.publish(twist)
return 0
msg1 = 'SCAMP5 camera instructing to go '
msg2 = ', passing that instruction to ROS.'
def move(direction):
x = moveBindings[direction][0]
y = moveBindings[direction][1]
z = moveBindings[direction][2]
th = moveBindings[direction][3]
twist = Twist()
twist.linear.x = x*speed; twist.linear.y = y*speed; twist.linear.z = z*speed;
twist.angular.x = 0; twist.angular.y = 0; twist.angular.z = th*turn
pub.publish(twist)
def go_straight():
sys.stdout.write('\r' + msg1 + dir_to_readable[FORWARD] + msg2)
sys.stdout.flush()
move(FORWARD)
time.sleep(FORWARD_TIME)
send_stop()
time.sleep(0.5)
def turn_right():
sys.stdout.write('\r' + msg1 + dir_to_readable[RIGHT] + msg2)
sys.stdout.flush()
move(FORWARD)
time.sleep(RIGHT_FORWARD_TIME)
send_stop()
time.sleep(0.2)
move(RIGHT)
time.sleep(RIGHT_TURN_TIME)
send_stop()
time.sleep(0.5)
def turn_left():
sys.stdout.write('\r' + msg1 + dir_to_readable[LEFT] + msg2)
sys.stdout.flush()
move(FORWARD)
time.sleep(LEFT_FORWARD_TIME)
send_stop()
time.sleep(0.2)
move(LEFT)
time.sleep(LEFT_TURN_TIME)
send_stop()
time.sleep(0.5)
if __name__=="__main__":
pub = rospy.Publisher('cmd_vel', Twist, queue_size = 1)
rospy.init_node('teleop_twist_keyboard')
speed = rospy.get_param("~speed", 0.5)
turn = rospy.get_param("~turn", 1.0)
start_time = datetime.datetime.now()
stop_time = datetime.datetime.now()
try:
print(msg)
while True:
line = sys.stdin.readline()
start_time = datetime.datetime.now()
time_diff = (start_time - stop_time).microseconds
#print(time_diff)
if time_diff > 20000:
if line.startswith(dir_to_str[LEFT]):
turn_left()
elif line.startswith(dir_to_str[FORWARD]):
go_straight()
elif line.startswith(dir_to_str[RIGHT]):
turn_right()
if line.startswith('quit'):
break
stop_time = datetime.datetime.now()
except Exception as e:
print(e)
finally:
# Send STOP message
send_stop()
| StarcoderdataPython |
51658 | <gh_stars>0
from IPython.display import display
from dutil.transform import ht
def dht(arr, n: int = 2) -> None:
"""Display first and last (top and bottom) entries"""
display(ht(arr, n))
| StarcoderdataPython |
1619945 | #desafio 8: conversor de medidas
m = float(input('Digite um valor em metros: '))
km = m / 1000
hm = m / 100
dam = m / 10
dm = m * 10
cm = m * 100
mm = m * 1000
print(f'A medida de {m}m corresponde a: \n {km:.5}km \n {hm}hm \n {dam}dam \n {dm}dm \n {cm:.0f}cm \n {mm:.0f}mm')
| StarcoderdataPython |
125891 | <filename>pm08-multimax/multimax.py
def multimax(iterable):
if iterable is None:
return []
maxvars = []
max = iterable[0]
for item in iterable:
if item > max:
max = item
for item in iterable:
if item == max:
maxvars.append(item)
return maxvars
def multimax1(iterable):
if iterable is None:
return []
from collections import Counter
record = Counter(iterable)
| StarcoderdataPython |
1610645 | # Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors
# License: GNU General Public License v3. See license.txt
from __future__ import unicode_literals
import frappe
from mjt.mjt.report.accounts_receivable_mjt.accounts_receivable_mjt import ReceivablePayableReportMJT
def execute(filters=None):
args = {
"party_type": "Supplier",
"naming_by": ["Buying Settings", "supp_master_name"],
}
return ReceivablePayableReportMJT(filters).run(args)
| StarcoderdataPython |
1711489 | <reponame>rmoskal/e-springpad
import uuid
from google.appengine.api import memcache
class CollectionCache:
def __init__(self, timeout=480, hash=None):
self.contents = [];
if hash:
self.contents = memcache.get(hash)
self.timeout = timeout
def add(self, item):
hash = uuid.uuid1().hex
memcache.add(hash, item, time = self.timeout)
self.contents.append(hash)
return hash
def commit(self):
hash = uuid.uuid1().hex
memcache.add(hash, self.contents, time = self.timeout)
return hash
def fetchAll(self):
if not self.contents:
return []
return [[key,memcache.get(key)] for key in self.contents]
def fetch(self):
for key in self.contents:
item = memcache.get(key)
if item:
yield key,item | StarcoderdataPython |
1703726 | #!usr/bin/env python
# -*- coding:utf-8 -*-
import os
import random
import logging
import argparse
import importlib
import platform
from pprint import pformat
import numpy as np
import torch
from agents.utils import *
# torch.backends.cudnn.enabled = True
# torch.backends.cudnn.benchmark = True
logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(levelname)s - %(message)s') # - %(name)s
logger = logging.getLogger(__file__)
device = torch.device('cuda' if torch.cuda.is_available() and platform.system() != 'Windows' else 'cpu')
logger.info("Device: {}".format(device))
def setup_seed(seed):
torch.manual_seed(seed)
torch.cuda.manual_seed_all(seed)
np.random.seed(seed)
random.seed(seed)
torch.backends.cudnn.deterministic = True
setup_seed(42)
parser = argparse.ArgumentParser()
# agent
parser.add_argument("--agent", type=str, required=True,
help="Agent name")
parser.add_argument("--task", type=str, required=True,
help="Agent name")
# data
parser.add_argument("--dataset_path", type=str, default="data/catslu/hyps/map/",
help="Path or url of the dataset. If empty download accroding to dataset.")
parser.add_argument("--save_dir", type=str, default="checkpoint/")
parser.add_argument('--save_name', type=str, default="")
# training
parser.add_argument('--epochs', type=int, required=True)
parser.add_argument('--early_stop', default=-1, type=int)
parser.add_argument('--mode', type=str, default="train")
parser.add_argument('--lr_reduce_patience', default=-1, type=int)
parser.add_argument('--lr_decay', type=float, default=0.5)
# infer
parser.add_argument('--result_path', type=str, default="")
parser.add_argument('--infer_data', type=str, default="test")
def get_agent_task(opt):
agent_name = opt.get('agent')
task_name = opt.get('task')
# "agents.bert_agents.sequence_labeling"
trainer_module = importlib.import_module("agents." + agent_name + ".trainer")
trainer_class = getattr(trainer_module, "Trainer")
data_module = importlib.import_module("tasks." + task_name)
getdata_class = getattr(data_module, "get_datasets")
builddata_class = getattr(data_module, "build_dataset")
return trainer_class, getdata_class, builddata_class
parsed = vars(parser.parse_known_args()[0])
# trainer_class, getdata_class = AGENT_CLASSES[parsed.get('agent')]
trainer_class, getdata_class, builddata_class = get_agent_task(parsed)
trainer_class.add_cmdline_args(parser)
opt = parser.parse_args()
def main():
# my_module = importlib.import_module(module_name)
# model_class = getattr(my_module, class_name)
if not os.path.exists(opt.save_dir):
os.mkdir(opt.save_dir)
opt.best_checkpoint_path = opt.save_dir + opt.save_name + "_" + parsed.get('task') + "_" + parsed.get(
'agent') + '_best_model'
logger.info("Arguments: %s", pformat(opt))
trainer = trainer_class(opt, device)
datasets = getdata_class(opt.dataset_path)
for k, v in datasets.items():
trainer.load_data(k, v, builddata_class, infer=opt.mode == "infer")
if opt.mode == "train":
trainer.set_optim_schedule()
if opt.mode == "infer":
if os.path.exists(opt.best_checkpoint_path):
opt.checkpoint = opt.best_checkpoint_path
logger.info("load checkpoint from {} ".format(opt.checkpoint))
trainer.load(opt.checkpoint)
if opt.infer_data not in trainer.dataset:
raise Exception("%s does not exists in datasets" % opt.infer_data)
result = trainer.infer(opt.infer_data)
if opt.result_path:
save_json(result, opt.result_path)
else:
for e in range(opt.epochs):
trainer.train_epoch(e)
if trainer.patience >= opt.early_stop > 0:
break
trainer.evaluate(e, "valid")
if trainer.patience >= opt.early_stop > 0:
break
logger.info('Test performance {}'.format(trainer.test_performance))
if __name__ == '__main__':
main()
| StarcoderdataPython |
4826039 | <reponame>ebot1234/the-blue-alliance<gh_stars>0
import json
import logging
from google.appengine.api import taskqueue
from helpers.cache_clearer import CacheClearer
from helpers.manipulator_base import ManipulatorBase
from helpers.notification_helper import NotificationHelper
from helpers.tbans_helper import TBANSHelper
class AwardManipulator(ManipulatorBase):
"""
Handle Award database writes.
"""
@classmethod
def getCacheKeysAndControllers(cls, affected_refs):
return CacheClearer.get_award_cache_keys_and_controllers(affected_refs)
@classmethod
def postUpdateHook(cls, awards, updated_attr_list, is_new_list):
# Note, updated_attr_list will always be empty, for now
# Still needs to be implemented in updateMerge
# See helpers.EventManipulator
events = []
for (award, updated_attrs) in zip(awards, updated_attr_list):
event = award.event
if event not in events:
events.append(event)
for event in events:
if event.get().within_a_day:
try:
NotificationHelper.send_award_update(event.get())
except Exception:
logging.error("Error sending award update for {}".format(event.id()))
try:
TBANSHelper.awards(event.get())
except Exception:
logging.error("Error sending award update for {}".format(event.id()))
# Enqueue task to calculate district points
for event in events:
taskqueue.add(
url='/tasks/math/do/district_points_calc/{}'.format(event.id()),
method='GET')
@classmethod
def updateMerge(self, new_award, old_award, auto_union=True):
"""
Given an "old" and a "new" Award object, replace the fields in the
"old" award that are present in the "new" award, but keep fields from
the "old" award that are null in the "new" award.
"""
immutable_attrs = [
'event',
'award_type_enum',
'year',
] # These build key_name, and cannot be changed without deleting the model.
attrs = [
'name_str',
]
list_attrs = []
auto_union_attrs = [
'team_list',
'recipient_json_list',
]
json_attrs = {
'recipient_json_list'
}
# if not auto_union, treat auto_union_attrs as list_attrs
if not auto_union:
list_attrs += auto_union_attrs
auto_union_attrs = []
for attr in attrs:
if getattr(new_award, attr) is not None:
if getattr(new_award, attr) != getattr(old_award, attr):
setattr(old_award, attr, getattr(new_award, attr))
old_award.dirty = True
if getattr(new_award, attr) == "None":
if getattr(old_award, attr, None) is not None:
setattr(old_award, attr, None)
old_award.dirty = True
for attr in list_attrs:
if len(getattr(new_award, attr)) > 0 or not auto_union:
if getattr(new_award, attr) != getattr(old_award, attr):
setattr(old_award, attr, getattr(new_award, attr))
old_award.dirty = True
for attr in auto_union_attrs:
# JSON equaltiy comparison is not deterministic
if attr in json_attrs:
old_list = [json.loads(j) for j in getattr(old_award, attr)]
new_list = [json.loads(j) for j in getattr(new_award, attr)]
else:
old_list = getattr(old_award, attr)
new_list = getattr(new_award, attr)
for item in new_list:
if item not in old_list:
old_list.append(item)
old_award.dirty = True
# Turn dicts back to JSON
if attr in json_attrs:
merged_list = [json.dumps(d) for d in old_list]
else:
merged_list = old_list
setattr(old_award, attr, merged_list)
return old_award
| StarcoderdataPython |
3342698 | from selenium import webdriver
import time
import math
try:
link = "http://suninjuly.github.io/math.html"
browser = webdriver.Chrome()
browser.get(link)
# Вычисление требуемое на странице link
def calc(x):
return str(math.log(abs(12 * math.sin(int(x)))))
x_element = browser.find_element_by_css_selector("#input_value")
x = x_element.text
y = calc(x)
# Ввод вычесленного значения в input
input = browser.find_element_by_css_selector("#answer")
input.send_keys(y)
checkbox = browser.find_element_by_css_selector("#robotCheckbox")
checkbox.click()
radiobutton = browser.find_element_by_css_selector("#robotsRule")
radiobutton.click()
button = browser.find_element_by_css_selector("body > div > form > button")
button.click()
finally:
# ожидание чтобы визуально оценить результаты прохождения скрипта
time.sleep(10)
# закрываем браузер после всех манипуляций
browser.quit()
| StarcoderdataPython |
120853 | <gh_stars>100-1000
import tensorflow as tf
sess = tf.Session()
from keras import backend as K
K.set_session(sess)
# 분류 DNN 모델 구현 ########################
from keras.models import Sequential, Model
from keras.layers import Dense, Dropout
from keras.metrics import categorical_accuracy, categorical_crossentropy
class DNN():
def __init__(self, Nin, Nh_l, Nout):
self.X_ph = tf.placeholder(tf.float32, shape=(None, Nin))
self.L_ph = tf.placeholder(tf.float32, shape=(None, Nout))
# Modeling
H = Dense(Nh_l[0], activation='relu')(self.X_ph)
H = Dropout(0.5)(H)
H = Dense(Nh_l[1], activation='relu')(H)
H = Dropout(0.25)(H)
self.Y_tf = Dense(Nout, activation='softmax')(H)
# Operation
self.Loss_tf = tf.reduce_mean(
categorical_crossentropy(self.L_ph, self.Y_tf))
self.Train_tf = tf.train.AdamOptimizer().minimize(self.Loss_tf)
self.Acc_tf = categorical_accuracy(self.L_ph, self.Y_tf)
self.Init_tf = tf.global_variables_initializer()
# 데이터 준비 ##############################
import numpy as np
from keras import datasets # mnist
from keras.utils import np_utils # to_categorical
def Data_func():
(X_train, y_train), (X_test, y_test) = datasets.mnist.load_data()
Y_train = np_utils.to_categorical(y_train)
Y_test = np_utils.to_categorical(y_test)
L, W, H = X_train.shape
X_train = X_train.reshape(-1, W * H)
X_test = X_test.reshape(-1, W * H)
X_train = X_train / 255.0
X_test = X_test / 255.0
return (X_train, Y_train), (X_test, Y_test)
# 학습 효과 분석 ##############################
from keraspp.skeras import plot_loss, plot_acc
import matplotlib.pyplot as plt
def run(model, data, sess, epochs, batch_size=100):
# epochs = 2
# batch_size = 100
(X_train, Y_train), (X_test, Y_test) = data
sess.run(model.Init_tf)
with sess.as_default():
N_tr = X_train.shape[0]
for epoch in range(epochs):
for b in range(N_tr // batch_size):
X_tr_b = X_train[batch_size * (b-1):batch_size * b]
Y_tr_b = Y_train[batch_size * (b-1):batch_size * b]
model.Train_tf.run(feed_dict={model.X_ph: X_tr_b, model.L_ph: Y_tr_b, K.learning_phase(): 1})
loss = sess.run(model.Loss_tf, feed_dict={model.X_ph: X_test, model.L_ph: Y_test, K.learning_phase(): 0})
acc = model.Acc_tf.eval(feed_dict={model.X_ph: X_test, model.L_ph: Y_test, K.learning_phase(): 0})
print("Epoch {0}: loss = {1:.3f}, acc = {2:.3f}".format(epoch, loss, np.mean(acc)))
# 분류 DNN 학습 및 테스팅 ####################
def main():
Nin = 784
Nh_l = [100, 50]
number_of_class = 10
Nout = number_of_class
data = Data_func()
model = DNN(Nin, Nh_l, Nout)
run(model, data, sess, 10, 100)
if __name__ == '__main__':
main() | StarcoderdataPython |
3380795 | <filename>hanse.py
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Sat Aug 4 22:12:56 2018
@author: <EMAIL>
"""
from datetime import date, timedelta
from hanse.city import get_city_list
print("╔══════════════════╗")
print("║ Start Hanse Game ║")
print("╚══════════════════╝")
today=date(1500,1,1)
cities=get_city_list()
# MAINLOOP
# City update
# Ship update
# User-Interface
def menu():
out= "┌──────────────┬───────────┬─────────────┐\n"
out+="│ Next Day (n) │ Trade (t) │ Give Up (q) │\n"
out+="└──────────────┴───────────┴─────────────┘"
return(out)
while True:
for city in cities:
print(city)
print(menu())
user_input = input("[%s] What do you want to do today?"%(str(today)))
if user_input=="n":
for city in cities:
city.update()
print("Next Day")
today=today+timedelta(days=1)
elif user_input=="t":
#go to trade interface
print("Trade")
elif user_input=="q":
print("Bye Bye")
exit()
| StarcoderdataPython |
1647263 | #!/usr/bin/env python
##################################################################################################
## receive.py
##
## Expects post or get with parameters:
##
## imei : string
## momsn : string
## transmit_time : string
## iridium_latitude : string
## iridium_longitude : string
## iridium_cep : string
## data : hex-encoded string, format:
## lat: string as [-]NNNNNNN (not padded; 5 decimal digits only, no '.')
## lon: string as [-]NNNNNNNN (not padded; 5 decimal digits only, no '.')
## speed: string as integer, not padded
## course: string as integer, not padded
##################################################################################################
import datetime
import cgi
#import cgitb; cgitb.enable() # for troubleshooting
import config
########################################################################
# parseGeo: adds decimal to unpadded 5 decimal digit number string
#
def parseGeo(g):
r = list(g)
r.reverse()
r.insert(5, '.')
r.reverse()
s = ''.join(r)
return s
########################################################################
# Main code
form = cgi.FieldStorage()
imei = form.getvalue("imei")
momsn = form.getvalue("momsn")
transmit_time= form.getvalue("transmit_time")
iridium_latitude= form.getvalue("iridium_latitude")
iridium_longitude= form.getvalue("iridium_longitude")
iridium_cep= form.getvalue("iridium_cep")
data = form.getvalue("data")
text = ''
with open(config.log, 'a') as log:
log.write('%s,%s,%s,%s,%s,%s,%s,%s\n' %
(datetime.datetime.now(), momsn, imei, transmit_time, iridium_latitude, iridium_longitude, iridium_cep, data ))
print "Content-type: text/html"
print
print """
<html>
<head><title>RockBlock web service</title></head>
<body>
"""
ok = True
for e in [ imei, momsn, transmit_time, iridium_latitude, iridium_longitude, iridium_cep, data ]:
if e == None:
ok = False
if ok:
# Decode the data
if (data != None):
text = data.decode('hex')
# is this a text message or a telemetry message?
if text[0] == '\x01':
d = text.split(',')
lat = parseGeo(d[0])
lon = parseGeo(d[1])
speed = d[2]
course = d[3]
text = d[4]
# Write status data
with open(config.db, 'a') as db:
db.write('%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s\n' %
(datetime.datetime.now(), momsn, imei, transmit_time, iridium_latitude, iridium_longitude, iridium_cep, lat, lon, speed, course, text ))
print "<p>Message submitted. (lat: %s lon: %s speed: %s course: %s, text: \"%s\")</p>"%(lat, lon, speed, course, text)
else:
print "<p>Message: <pre>%s</pre>" % text
print """
</body>
</html>
"""
| StarcoderdataPython |
1707732 | #!/usr/bin/python
# -*- coding: utf-8 -*-
from os import environ
from ..util.url import urlsoup, soup_filter
def _urlsoup(url, data=None, **kargs):
environ['disable_fetchurl'] = 1
soup = urlsoup(url, data, **kargs)
environ['disable_fetchurl'] = 0
return soup
class Jandan(object):
baseUrl = "http://jandan.net/ooxx"
_filter = soup_filter('ol', 'commentlist')
def __init__(self, id='default'):
self.id = id
if 'SERVER_SOFTWARE' in environ:
self._urlsoup = _urlsoup
else:
self._urlsoup = urlsoup
def fetch(self, page=None):
url = self.baseUrl
if page:
url = "%s/page-%d" % (url, page)
soup = self._urlsoup(url, parse_only=self._filter)
ret = []
for item in soup.find_all('li'):
if not item.get('id'):
continue
text = item.find('div', 'text')
a = text.find('span', 'righttext').a
imgs = []
for img in text.find_all('img'):
imgs.append('<p><img src="%s"/></p>' % img.get('org_src', img.get('src')))
entry = {
'id': item['id'],
'title': a.text,
'link': a['href'],
'description': '\n'.join(imgs),
}
ret.append(entry)
return ret
| StarcoderdataPython |
3244670 | <reponame>Ahleroy/deeplodocus
import weakref
from typing import Union
from typing import List
class Connection(object):
"""
AUTHORS:
--------
:author: <NAME>
DESCRIPTION:
------------
Connection class allowing to gather information for a connection in the Thalamus.
Contains a weak reference to a receiver method and the expected arguments
"""
def __init__(self, receiver: callable, expected_arguments: Union[List, None] = None) -> None:
"""
AUTHORS:
--------
:author: <NAME>
DESCRIPTION:
------------
Initialize the connection with a weak reference to the method to connect
PARAMETERS:
-----------
:param receiver(callable): The method to connect
:param expected_arguments(Union[List, None]): The list of expected arguments (None means all the arguments)
RETURN:
-------
:return: None
"""
# Get the weak reference of the method to call
self.receiver = weakref.WeakMethod(receiver)
self.expected_arguments = expected_arguments
def get_receiver(self) -> callable:
"""
AUTHORS:
--------
:author: <NAME>
DESCRIPTION:
------------
Getter for self.receiver
PARAMETERS:
-----------
None
RETURN:
-------
:return: self.receiver
"""
return self.receiver
def get_expected_arguments(self) -> Union[List, None]:
"""
AUTHORS:
--------
:author: <NAME>
DESCRIPTION:
------------
Getter for self.expected_arguments
PARAMETERS:
-----------
None
RETURN:
-------
:return: self.expected_arguments
"""
return self.expected_arguments
| StarcoderdataPython |
151847 | src = Split('''
aos/soc_impl.c
hal/uart.c
hal/flash.c
main.c
''')
deps = Split('''
kernel/rhino
platform/arch/arm/armv7m
platform/mcu/wm_w600/
kernel/vcall
kernel/init
''')
global_macro = Split('''
STDIO_UART=0
CONFIG_NO_TCPIP
RHINO_CONFIG_TICK_TASK=0
RHINO_CONFIG_WORKQUEUE=0
CONFIG_AOS_KV_MULTIPTN_MODE
CONFIG_AOS_KV_PTN=5
CONFIG_AOS_KV_SECOND_PTN=6
CONFIG_AOS_KV_PTN_SIZE=4096
CONFIG_AOS_KV_BUFFER_SIZE=8192
SYSINFO_PRODUCT_MODEL=\\"ALI_AOS_WM_W600\\"
SYSINFO_DEVICE_NAME=\\"WM_W600\\"
''')
global_cflags = Split('''
-mcpu=cortex-m3
-mthumb
-mfloat-abi=soft
-march=armv7-m
-mthumb -mthumb-interwork
-mlittle-endian
-w
''')
local_cflags = Split('''
-Wall
-Werror
-Wno-unused-variable
-Wno-unused-parameter
-Wno-implicit-function-declaration
-Wno-type-limits
-Wno-sign-compare
-Wno-pointer-sign
-Wno-uninitialized
-Wno-return-type
-Wno-unused-function
-Wno-unused-but-set-variable
-Wno-unused-value
-Wno-strict-aliasing
''')
global_includes = Split('''
../../arch/arm/armv7m/gcc/m3
''')
global_ldflags = Split('''
-mcpu=cortex-m3
-mthumb -mthumb-interwork
-mlittle-endian
-nostartfiles
--specs=nosys.specs
''')
prefix = ''
if aos_global_config.compiler == "gcc":
prefix = 'arm-none-eabi-'
component = aos_mcu_component('WM_W600', prefix, src)
component.set_global_arch('Cortex-M3')
component.add_comp_deps(*deps)
component.add_global_macros(*global_macro)
component.add_global_cflags(*global_cflags)
component.add_cflags(*local_cflags)
component.add_global_includes(*global_includes)
component.add_global_ldflags(*global_ldflags)
| StarcoderdataPython |
1786417 | from collections import defaultdict
def constant_factory(value):
return lambda: value
d = defaultdict(constant_factory('<missing>'))
d.update(name='John', action='ran')
print('%(name)s %(action)s to %(object)s' % d)
| StarcoderdataPython |
1627549 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=invalid-name,missing-function-docstring
"""Dot product related intrinsics."""
from tvm.script import tir as T
from .. import TensorIntrin
@T.prim_func
def dp4a_desc(
A: T.Buffer((4,), "int8", offset_factor=1, align=4, scope="shared"),
B: T.Buffer((4,), "int8", offset_factor=1, align=4, scope="shared"),
C: T.Buffer((1,), "int32", offset_factor=1, align=4, scope="local"),
) -> None:
with T.block("root"):
T.reads(C[0], A[0:4], B[0:4])
T.writes(C[0])
for i in range(0, 4):
with T.block("update"):
vi = T.axis.remap("R", [i])
C[0] = C[0] + T.cast(A[vi], "int32") * T.cast(B[vi], "int32")
@T.prim_func
def dp4a_impl(
A: T.Buffer((4,), "int8", offset_factor=1, align=4, scope="shared"),
B: T.Buffer((4,), "int8", offset_factor=1, align=4, scope="shared"),
C: T.Buffer((1,), "int32", offset_factor=1, align=4, scope="local"),
) -> None:
with T.block("root"):
T.reads(C[0], A[0:4], B[0:4])
T.writes(C[0])
C[0] += T.call_pure_extern(
"__dp4a", A.vload([0], "int8x4"), B.vload([0], "int8x4"), T.int32(0), dtype="int32"
)
DP4A_INTRIN = "dp4a"
TensorIntrin.register(DP4A_INTRIN, dp4a_desc, dp4a_impl)
| StarcoderdataPython |
63557 | <filename>CGI/simple-server-with-different-languages/cgi-bin/download.py
#!/usr/bin/env python
import os
import sys
fullpath = 'images/normal.png'
filename = 'hello_world.png'
# headers
print 'Content-Type: application/octet-stream; name="%s"' % filename
print 'Content-Disposition: attachment; filename="%s"' % filename
print "Content-Length: " + str(os.stat(fullpath).st_size)
print # empty line between headers and body
#sys.stdout.flush() # send header faster
try:
# body
with open(fullpath, 'rb') as fo:
print fo.read()
except Exception as e:
print 'Content-type:text/html'
print # empty line between headers and body
print 'Exception:', e
| StarcoderdataPython |
3243176 | # Generated by Django 2.0.3 on 2019-07-10 09:07
import api.helpers
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Comment',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('body', models.TextField(max_length=500)),
('created', models.DateTimeField(auto_now_add=True)),
('modified', models.DateTimeField(auto_now=True)),
('image', models.ImageField(blank=True, null=True, upload_to='')),
('rating', models.IntegerField(choices=[(1, 1), (2, 2), (3, 3), (4, 4), (5, 5)], default=0, verbose_name='rating')),
('author', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='authors', to=settings.AUTH_USER_MODEL, verbose_name='User')),
],
),
migrations.CreateModel(
name='Reaction',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('comment', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='comments', to='api.Comment', verbose_name='Comment')),
('user_reacted', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='users_reacted', to=settings.AUTH_USER_MODEL, verbose_name='User')),
],
),
migrations.CreateModel(
name='Restaurant',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=15)),
('category', models.CharField(choices=[('Asian', 'Asian'), ('Italian', 'Italian'), ('Swiss', 'Swiss'), ('Greek', 'Greek')], max_length=15)),
('country', models.CharField(max_length=30)),
('street', models.CharField(max_length=30)),
('city', models.CharField(max_length=30)),
('zip', models.CharField(blank=True, max_length=30, null=True)),
('website', models.CharField(blank=True, max_length=30, null=True)),
('phone', models.CharField(blank=True, max_length=30, null=True)),
('email', models.CharField(blank=True, max_length=30, null=True)),
('opening_hours', models.CharField(blank=True, max_length=30, null=True)),
('price_level', models.IntegerField(blank=True, null=True)),
('restaurant_pic', models.ImageField(blank=True, max_length=30, null=True, upload_to='')),
('created', models.DateTimeField(auto_now=True)),
('updated', models.DateTimeField(auto_now_add=True)),
('restaurant_owner', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='owned_restaurants', to=settings.AUTH_USER_MODEL, verbose_name='restaurant_owner')),
],
),
migrations.CreateModel(
name='UserProfile',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('location', models.CharField(blank=True, max_length=30, null=True)),
('phone', models.CharField(blank=True, max_length=30, null=True)),
('bio', models.CharField(blank=True, max_length=300)),
('interests', models.CharField(blank=True, max_length=30, null=True)),
('profile_pic', models.ImageField(blank=True, null=True, upload_to='')),
('code', models.CharField(default=api.helpers.generate_code, max_length=255, verbose_name='code')),
('user', models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, related_name='user_profile', to=settings.AUTH_USER_MODEL, verbose_name='user')),
],
),
migrations.AddField(
model_name='comment',
name='restaurant',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='restaurants', to='api.Restaurant', verbose_name='Restaurant'),
),
migrations.AlterUniqueTogether(
name='reaction',
unique_together={('user_reacted', 'comment')},
),
]
| StarcoderdataPython |
36548 | #!/usr/bin/env python3
"""Initialize.
Turn full names into initials.
Source:
https://edabit.com/challenge/ANsubgd5zPGxov3u8
"""
def __initialize(name: str, period: bool=False) -> str:
"""Turn full name string into a initials string.
Private function used by initialize.
Arguments:
name {[str]} -- Full name to be initialized.
Keyword Arguments:
period {bool} -- Include periods in initials (default: {False})
Returns:
[str] -- Initials string.
"""
if period:
return f"{'.'.join([n[0] for n in name.split(' ')])}."
return ''.join([n[0] for n in name.split(' ')])
def initialize(names: list, **kwargs) ->list:
"""Turn a list of full names into a list of initials.
Arguments:
names {list} -- List of full names, with a space between each name.
Raises:
TypeError -- Check for names is a list.
Returns:
list -- All names initialized.
"""
if isinstance(names, list):
return [__initialize(name.strip(), **kwargs) for name in names if len(name) > 2 and ' ' in name]
else:
raise TypeError('Parameter \'names\' is not a list.')
def main():
"""Run sample initialize function."""
print(initialize(['<NAME>', '<NAME>', '<NAME>']))
print(initialize(
['<NAME>', '<NAME>', '<NAME>'], period=True))
if __name__ == "__main__":
main()
| StarcoderdataPython |
3295991 | #! /usr/bin/env python3
# -*- coding: utf-8 -*-
#
# Copyright 2019 <NAME> <<EMAIL>>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
One-time script used to remove duplicate attachments from the Anthology.
May be useful for salvaging in the future.
python3 fix_attachments ../data/xml/*.xml
"""
import argparse
import os
import shutil
import ssl
import sys
import tempfile
from anthology.utils import indent
import lxml.etree as ET
import urllib.request
def main(args):
for xml_file in args.files:
# Update XML
tree = ET.parse(xml_file)
tree.getroot().tail = '\n'
for paper in tree.getroot().findall('.//paper'):
tail = paper.tail
seen = []
for attachment in paper.findall('./attachment'):
if attachment.text in seen:
print(f'Removing: {attachment.text}')
paper.remove(attachment)
seen.append(attachment.text)
indent(paper, level=2)
paper.tail = tail
tree.write(xml_file, encoding="UTF-8", xml_declaration=True)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('files', nargs='+')
args = parser.parse_args()
main(args)
| StarcoderdataPython |
60630 | # Generated by Django 3.2.9 on 2021-12-28 03:10
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Book',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('description', models.CharField(max_length=100)),
('pages', models.IntegerField(default=None, null=True)),
('rows_per_page', models.IntegerField(default=None, null=True)),
('columns_per_row', models.IntegerField(default=None, null=True)),
('created_ts', models.DateTimeField(auto_now_add=True)),
('updated_ts', models.DateTimeField(auto_now=True)),
],
),
migrations.CreateModel(
name='Currency',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('page', models.IntegerField(default=None, null=True)),
('row', models.IntegerField(default=None, null=True)),
('column', models.IntegerField(default=None, null=True)),
('currency', models.CharField(blank=True, default='', max_length=100)),
('value', models.DecimalField(decimal_places=2, default=None, max_digits=19, null=True)),
('type', models.CharField(choices=[('Bill', 'Bill'), ('Coin', 'Coin')], max_length=4)),
('country', models.CharField(blank=True, default='', max_length=2)),
('created_ts', models.DateTimeField(auto_now_add=True)),
('updated_ts', models.DateTimeField(auto_now=True)),
('book', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='myapp.book')),
],
),
]
| StarcoderdataPython |
182583 | #!/usr/bin/env python
#-*- encoding=utf-8 -*-
import sys
def readmap(filename):
graph={}
with open(filename,'r') as mapfile:
line=mapfile.readline()
directed=int(line[:-1])
line=mapfile.readline()
nodes=line[:-1].split(' ')
for node in nodes:
graph[int(node)]=[]
for line in mapfile:
node1,node2=line[:-1].split('\t')
graph[int(node1)].append(int(node2))
if not directed:
graph[int(node2)].append(int(node1))
return graph
def dfs(map,prevnode,startnode,nexttable,prevtable,visited,record,tag):
if visited[startnode]:
#prevnode->startnode 1
record[(prevnode,startnode)]=1
if (startnode,prevnode) in record:
if startnode in prevtable:
prevtable[startnode].append(prevnode)
else:
prevtable[startnode]=[]
prevtable[startnode].append(prevnode)
return
#print startnode,'!'
tag[startnode]=len(tag)+1
visited[startnode]=1
if prevnode!=-1:
if prevnode in nexttable:
nexttable[prevnode].append(startnode)
else:
nexttable[prevnode]=[]
nexttable[prevnode].append(startnode)
for neibor in map[startnode]:
dfs(map,startnode,neibor,nexttable,prevtable,visited,record,tag)
def suffix(node,prevtable,nexttable,value,tag):
if node in nexttable:
for inode in nexttable[node]:
suffix(inode,prevtable,nexttable,value,tag)
numv=tag[node]
numw,low=100,100
if node in prevtable:
for prev in prevtable[node]:
if tag[prev]<numw:
numw=tag[prev]
if node in nexttable:
for neibor in nexttable[node]:
if value[neibor][1]<low:
low=value[neibor][1]
lowofme=min(numv,numw,low)
value[node]=(tag[node],lowofme)
def dfs_tree(map):
nexttable={}
prevtable={}
record={}
startnode,prevnode=0,-1
visited={}.fromkeys(map.keys(),0)
tag={}
dfs(map,prevnode,startnode,nexttable,prevtable,visited,record,tag)
#print prevtable
#print nexttable
#print tag
value={}.fromkeys(map.keys(),None)
suffix(startnode,prevtable,nexttable,value,tag)
return nexttable,value
def check(nexttable,value):
cutnodes=[]
for node in nexttable:
if node==0:#root
if len(nexttable[node])>1:
cutnodes.append(node)
else:
for neibor in nexttable[node]:
if value[neibor][1]>=value[node][0]:
cutnodes.append(node)
return cutnodes
if __name__ == '__main__':
map=readmap(sys.argv[1])
nexttable,value=dfs_tree(map)
cutnodes=check(nexttable,value)
print cutnodes | StarcoderdataPython |
1672572 | <reponame>Nailim/shuttler
# for new opencv
#import os,sys
#os.chdir(os.path.expanduser('~/opencv-2.4.6.1/lib'))
#sys.path.append(os.path.expanduser('~/opencv-2.4.6.1/lib/python2.7/dist-packages'))
# before starting
#export PYTHONPATH=~/opencv-2.4.6.1/lib/python2.7/dist-packages
import os
#import cv
import cv2
import string
import argparse
import numpy as np
from pylab import *
import homography
import camera
import sfm
global inputParser # just a reminder, it's used as a global variable
global inputArgs # just a reminder, it's used as a global variable
ply_header = '''ply
format ascii 1.0
element vertex %(vert_num)d
property float x
property float y
property float z
property uchar red
property uchar green
property uchar blue
end_header
'''
def parseInput() :
global inputParser
global inputArgs
inputParser = argparse.ArgumentParser(description='Rectification of stereo images using epipolar geometry.')
inputParser.add_argument('-l', '--left', dest='left', action='store', default="", type=str, help='left image')
inputParser.add_argument('-r', '--right', dest='right', action='store', default="", type=str, help='right image')
inputParser.add_argument('-lc', '--leftCalibration', dest='leftCalibration', action='store', default="", type=str, help='left image parameters')
inputParser.add_argument('-rc', '--rightCalibration', dest='rightCalibration', action='store', default="", type=str, help='right image parameters')
inputParser.add_argument('-nc', '--nameCalibration', dest='nameCalibration', action='store', default="fm_out", type=str, help='name of file with coresponding points for calibration (less but good)')
inputParser.add_argument('-np', '--namePoints', dest='namePoints', action='store', default="fm_out", type=str, help='name of file with coresponding points for reconstruction (more but probably not good)')
inputParser.add_argument('-d', '--debug', action='store_true', help='debug output')
inputArgs = inputParser.parse_args()
def processInput() :
print ""
if inputArgs.left == "" or inputArgs.right == "":
print "Missing images!"
quit()
# here we go ...
# load image pair - we might need them for later
img_l = cv2.imread(inputArgs.left)
img_r = cv2.imread(inputArgs.right)
if img_l == None or img_r == None:
print "Missing images!"
quit()
### git them points - calibration
mkp_l = []
mkp_r = []
file_kp = open(inputArgs.nameCalibration, 'r')
if file_kp == None:
print "Missing matching points file"
quit()
for line in file_kp:
l_r = line.split(';')
mkp_l.append([float(l_r[0].split(',')[0]), float(l_r[0].split(',')[1])])
mkp_r.append([float(l_r[1].split(',')[0]), float(l_r[1].split(',')[1])])
file_kp.close()
mkp_l = np.float32(mkp_l)
mkp_r = np.float32(mkp_r)
### git them points - reconstruction
mkp_l_p = []
mkp_r_p = []
file_kp = open(inputArgs.namePoints, 'r')
if file_kp == None:
print "Missing matching points file"
quit()
for line in file_kp:
l_r = line.split(';')
mkp_l_p.append([float(l_r[0].split(',')[0]), float(l_r[0].split(',')[1])])
mkp_r_p.append([float(l_r[1].split(',')[0]), float(l_r[1].split(',')[1])])
file_kp.close()
mkp_l_p = np.float32(mkp_l_p)
mkp_r_p = np.float32(mkp_r_p)
### git them calibrations - left
K_l = []
file_c_l = open(inputArgs.leftCalibration, 'r')
if file_c_l == None:
print "Missing left calibration file"
quit()
for line in file_c_l:
c_l = line.split(' ')
K_l.append([float(c_l[0]), float(c_l[1]), float(c_l[2])])
file_c_l.close()
K_l = np.float32(K_l)
### git them calibrations - right
K_r = []
file_c_r = open(inputArgs.rightCalibration, 'r')
if file_c_r == None:
print "Missing right calibration file"
quit()
for line in file_c_r:
c_l = line.split(' ')
K_r.append([float(c_l[0]), float(c_l[1]), float(c_l[2])])
file_c_r.close()
K_r = np.float32(K_r)
### ok, now we start work
# make homogeneous and normalize with inv(K)
x1 = homography.make_homog(mkp_l_p.T)
x2 = homography.make_homog(mkp_r_p.T)
x1n = dot(inv(K_l),x1)
x2n = dot(inv(K_r),x2)
# compute E (E = (K_r)T * F * K_l)
#F, mask = cv2.findFundamentalMat(mkp_l, mkp_r, cv2.FM_8POINT)
F, mask = cv2.findFundamentalMat(mkp_l, mkp_r, cv2.FM_RANSAC, 1, 0.99)
# we select only inlier points - most pf the time this makes it worse
#mkp_l = mkp_l[mask.ravel()==1]
#mkp_r = mkp_r[mask.ravel()==1]
E = K_r.transpose()
E = E.dot(F)
E = E.dot(K_l)
# compute camera matrices (P2 will be list of four solutions)
P1 = array([[1,0,0,0],[0,1,0,0],[0,0,1,0]])
P2 = sfm.compute_P_from_essential(E)
# pick the solution with points in front of cameras
ind = 0
maxres = 0
for i in range(4):
# triangulate inliers and compute depth for each camera
X = sfm.triangulate(x1n,x2n,P1,P2[i])
d1 = dot(P1,X)[2]
d2 = dot(P2[i],X)[2]
if sum(d1>0)+sum(d2>0) > maxres:
maxres = sum(d1>0)+sum(d2>0)
ind = i
infront = (d1>0) & (d2>0)
# triangulate inliers and remove points not in front of both cameras
X = sfm.triangulate(x1n,x2n,P1,P2[ind])
X = X[:,infront]
# visualization
if inputArgs.debug == 1:
# draw points
img_tmp = img_l.copy()
for kp in mkp_l_p:
x = int(kp[0])
y = int(kp[1])
cv2.circle(img_tmp, (x, y), 3, (0, 0, 255))
cv2.imwrite(inputArgs.namePoints + ".jpg", img_tmp)
# 3D plot
out_points = []
out_colors = []
for i in range(len(X[0])):
out_points.append([X[0][i], X[1][i], X[2][i]])
#out_colors.append(img_l[int(x1[1][i])][int(x1[0][i])])
out_colors.append([img_l[int(x1[1][i])][int(x1[0][i])][2], img_l[int(x1[1][i])][int(x1[0][i])][1], img_l[int(x1[1][i])][int(x1[0][i])][0]])
out_points = np.float32(out_points)
out_colors = np.float32(out_colors)
write_ply(inputArgs.namePoints + ".ply", out_points, out_colors)
def write_ply(fn, verts, colors):
verts = verts.reshape(-1, 3)
colors = colors.reshape(-1, 3)
verts = np.hstack([verts, colors])
with open(fn, 'w') as f:
f.write(ply_header % dict(vert_num=len(verts)))
np.savetxt(f, verts, '%f %f %f %d %d %d')
if __name__ == "__main__": # this is not a module
parseInput() # what do we have to do
processInput() # doing what we have to do
print "" # for estetic output
| StarcoderdataPython |
181742 | """Dimensionality reduction through dimensionality selection."""
import logging
import random
import functools
import multiprocessing
import numpy as np
import entropix.core.evaluator as evaluator
__all__ = ('sample_seq', 'sample_limit')
logger = logging.getLogger(__name__)
def _init_eval_metric(metric):
if metric not in ['spr', 'rmse', 'pearson', 'both']:
raise Exception('Unsupported metric: {}'.format(metric))
if metric in ['spr', 'pearson']:
return 0
if metric == 'rmse':
return 10**15.
return (0, 10**15)
# pylint: disable=C0103,W0621
def sample_limit(model, train_splits, metric, limit):
"""Sample dimensions in limit mode."""
best_metric = _init_eval_metric(metric)
dims = []
max_num_dim_best = 0
alldims = list(range(model.shape[1]))
for k in range(limit):
best_dim_idx = -1
least_worst_dim = -1
least_worst_metric = _init_eval_metric(metric)
for dim_idx in alldims:
if dim_idx in dims:
continue
dims.append(dim_idx)
eval_metric = evaluator.evaluate(
model[:, dims], train_splits, metric=metric)
if evaluator.is_improving(eval_metric,
best_metric,
metric=metric):
best_metric = eval_metric
best_dim_idx = dim_idx
elif evaluator.is_improving(eval_metric,
least_worst_metric,
metric=metric):
least_worst_metric = eval_metric
least_worst_dim = dim_idx
dims.pop()
if best_dim_idx == -1:
logger.info('Could not find a better metric with {} '
'dims. Added least worst dim to continue'.format(k+1))
dims.append(least_worst_dim)
else:
dims.append(best_dim_idx)
max_num_dim_best = len(dims)
logger.info('Current best {} = {} with dims = {}'
.format(metric, best_metric, dims))
logger.info('Best eval metrix = {} with ndims = {}'
.format(best_metric, max_num_dim_best))
return {1: dims} # to remain consistent with sample_seq return
def sample_seq_reduce(splits_dict, dims, step, fold, metric,
best_train_eval_metric):
"""Remove dimensions that do not negatively impact scores on train."""
logger.info('Reducing dimensions while maintaining highest score '
'on eval metric {}. Step = {}. Best train eval metric = {}'
.format(metric, step, best_train_eval_metric))
dims_set = set(dims)
for dim_idx in dims:
dims_set.remove(dim_idx)
train_eval_metric = evaluator.evaluate(
model[:, list(dims_set)], splits_dict[fold]['train'],
metric=metric)
if evaluator.is_degrading(train_eval_metric,
best_train_eval_metric,
metric=metric):
dims_set.add(dim_idx)
continue
logger.info('Constant best train {} = {} for fold {} removing '
'dim_idx = {}. New ndim = {}'
.format(metric, train_eval_metric, fold,
dim_idx, len(dims_set)))
best_train_eval_metric = train_eval_metric
logger.info('Finished reducing dims')
keep = list(sorted(dims_set, key=dims.index))
if len(keep) != len(dims):
step += 1
keep, best_train_eval_metric = sample_seq_reduce(
splits_dict, keep, step, fold, metric,
best_train_eval_metric)
return keep, best_train_eval_metric
def sample_seq_add(splits_dict, keep, alldims, metric, fold,
best_train_eval_metric):
"""Add dimensions that improve scores on train."""
logger.info('Increasing dimensions to maximize score on eval metric '
'{}. Best train eval metric = {}'
.format(metric, best_train_eval_metric))
dims = [idx for idx in alldims if idx not in keep]
added_counter = 0
for idx, dim_idx in enumerate(dims):
keep.append(dim_idx)
train_eval_metric = evaluator.evaluate(
model[:, keep], splits_dict[fold]['train'], metric=metric)
if evaluator.is_improving(train_eval_metric,
best_train_eval_metric,
metric=metric):
added_counter += 1
best_train_eval_metric = train_eval_metric
logger.info('New best train {} = {} on fold {} with ndim = {} '
'at idx = {} and dim_idx = {}'.format(
metric, best_train_eval_metric, fold,
len(keep), idx, dim_idx))
else:
keep.pop()
return keep, best_train_eval_metric
def _sample_seq(splits_dict, keep, alldims, metric, fold):
best_train_eval_metric = evaluator.evaluate(
model[:, keep], splits_dict[fold]['train'], metric=metric)
logger.debug('Initial train eval metric = {}'.format(
best_train_eval_metric))
keep, best_train_eval_metric = sample_seq_add(
splits_dict, keep, alldims, metric, fold,
best_train_eval_metric)
keep, best_train_eval_metric = sample_seq_reduce(
splits_dict, keep, 1, fold, metric, best_train_eval_metric)
return fold, keep
# pylint: disable=W0601
def sample_seq(_model, splits_dict, kfold_size, metric, shuffle,
max_num_threads):
"""Sample dimensions in sequential mode."""
global model # ugly hack to reuse same in-memory model during forking
model = _model
alldims = list(range(model.shape[1]))
if shuffle:
random.shuffle(alldims)
keep = np.random.choice(list(range(model.shape[1])),
size=2, replace=False).tolist()
else:
keep = [0, 1] # select the first two
# sample dimensons multi-threaded on all kfolds
num_folds = len(splits_dict.keys())
logger.info('Applying kfolding with k={} folds where '
'each test fold is of size {} and accounts for '
'{}% of the data'
.format(num_folds, len(splits_dict[1]['test']['sim']),
kfold_size*100))
num_threads = num_folds if num_folds <= max_num_threads \
else max_num_threads
with multiprocessing.Pool(num_threads) as pool:
_sample = functools.partial(_sample_seq, splits_dict, keep,
alldims, metric)
sampled_dims = {}
for fold, keep in pool.imap_unordered(_sample,
range(1, num_folds+1)):
sampled_dims[fold] = keep
return sampled_dims
| StarcoderdataPython |
1671598 | <filename>recipes/Python/576717_PDF_Directory_Images_using/recipe-576717.py
import os
from reportlab.pdfgen import canvas
from reportlab.lib.pagesizes import letter
from reportlab.lib.units import cm, mm, inch, pica
def pdfDirectory(imageDirectory, outputPDFName):
dirim = str(imageDirectory)
output = str(outputPDFName)
width, height = letter
height, width = letter
c = canvas.Canvas(output, pagesize=letter)
try:
for root, dirs, files in os.walk(dirim):
for name in files:
lname = name.lower()
if lname.endswith(".jpg") or lname.endswith(".gif") or lname.endswith(".png"):
filepath = os.path.join(root, name)
c.drawImage(filepath, inch, inch * 1)
c.showPage()
c.save()
print "PDF of Image directory created"
except:
print "Failed creating PDF"
| StarcoderdataPython |
177669 | from PIL import Image
import json
import os
import re
import sys
# Getting palette
# /absolute/path/to/Pxls
convertpath = os.path.abspath(os.path.join(os.path.dirname(os.path.realpath(__file__)), '..'))
# /absolute/path/to/Pxls/pxls.conf
configpath = convertpath + '\\pxls.conf'
configfile = open(configpath, 'r+')
config = str(configfile.read())
configfile.close()
hexToRGB = lambda hex : tuple(int(hex.lstrip('#')[i:i+2], 16) for i in (0, 2, 4))
lines = [line.strip() for line in config.splitlines()]
paletteMatch = None
for line in lines:
paletteMatch = paletteMatch or re.search('^palette: (\[.+\])', line)
paletteArr = json.loads(paletteMatch.group(1))
palette = [hexToRGB(hex) for hex in paletteArr]
# Getting paths
imagePath = sys.argv[1]
placemapPath = sys.argv[2]
outputPath = 'default_board.dat'
pmoutputPath = 'placemap.dat'
img = Image.open(imagePath)
pix = img.load()
pmimg = Image.open(placemapPath)
pmpix = pmimg.load()
width = img.size[0]
height = img.size[1]
print('Width:', width)
print('Height:', height)
# Convertion
def color_to_palette(c):
for i in range(len(palette)):
if c == palette[i]:
return i
diff = []
for i in range(len(palette)):
diff.append(sum([abs(palette[i][j] - c[j]) for j in range(3)]))
min = 0
for i in range(len(diff)):
if diff[i] < diff[min]:
min = i
return min
i = 0
with open(outputPath, 'wb+') as f:
f.truncate()
for y in range(height):
for x in range(width):
p = pix[x, y]
b = 0xFF
if p[3] == 255:
c = (p[0], p[1], p[2])
b = color_to_palette(c)
i += 1
f.write(bytes([b]))
with open(pmoutputPath, 'wb+') as f:
f.truncate()
for y in range(height):
for x in range(width):
p = pmpix[x, y] # (r, g, b, a)
b = 0xFF
if p[3] == 255:
b = 0x00
f.write(bytes([b]))
print(i)
| StarcoderdataPython |
1632548 | <filename>src/DCGMM/layer/Linear_Classifier_Layer.py
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import math
import numpy as np
import tensorflow as tf
from . import Layer
from DCGMM.metric import Metric
from DCGMM.utils import log
class LOSS_FUNCTION:
SCE = 'softmax_cross_entropy'
MSE = 'mean_squared_error'
class Linear_Classifier_Layer(Layer):
''' a linear classifier layer '''
def __init__(self, input=None, **kwargs):
Layer.__init__(self, input, **kwargs)
self.name = self.parser.add_argument('--name' , type=str , default=f'{self.prefix}linear', help='name of the gmm layer')
self.batch_size = self.parser.add_argument('--batch_size' , type=int , default=100 , help='size of mini-batches we feed from train dataSet.')
self.num_classes = self.parser.add_argument('--num_classes' , type=int , default=10 , help='number of output classes')
self.epsC = self.parser.add_argument('--epsC' , type=float, default=0.05 , help='learn rate')
self.epsC = self.parser.add_argument('--regEps' , type=float, default=self.epsC , help='learn rate') # for compatibility of bash files
self.return_loss = self.parser.add_argument('--return_loss' , type=str , default='loss' , help='the name of the returned loss tensor')
self.lambda_W = self.parser.add_argument('--lambda_W' , type=float, default=1.0 , help='adaption factor for Ws')
self.lambda_b = self.parser.add_argument('--lambda_b' , type=float, default=1.0 , help='adaption factor for bs')
self.loss_function = self.parser.add_argument('--loss_function' , type=str , default=LOSS_FUNCTION.SCE , help='the used loss function ["MSE" (Mean Squared Error), "SCE" (Softmax Cross Entropy)]')
self.sampling_batch_size = self.parser.add_argument('--sampling_batch_size', type=int , default=100 , help='sampling batch size')
self.loss_factor = self.parser.add_argument('--loss_factor' , type=float, default=1. , help='factor for multiplying resulting layer loss')
# evaluation metrics
metrics = ['accuracy_score'] + ['loss']
self.metrics = self.parser.add_argument('--metrics' , type=str , default=metrics , help='the evaluations metrics')
self.metric = Metric(self)
self.input_shape = self.prev.get_shape()
self.channels_in = np.prod(self.input_shape[1:])
self.channels_out = self.num_classes
log.debug(f'{self.name} input shape {self.input_shape}, sampling_bs={self.sampling_batch_size}')
def get_shape(self):
return self.batch_size, self.channels_out
def get_layer_variables(self, **kwargs):
tmp = {f'{self.prefix}W': self.W, f'{self.prefix}b': self.b}
tmp.update({f'extraW_{i}': W for i, (W, _) in enumerate(self.extra_input_variables)})
tmp.update({f'extrab_{i}': b for i, (_, b) in enumerate(self.extra_input_variables)})
return tmp
def is_trainable(self):
return True
def compile(self):
W_shape = (self.channels_in, self.channels_out)
b_shape = [self.channels_out]
init_W = tf.initializers.TruncatedNormal(stddev=1. / math.sqrt(self.channels_in))(W_shape)
self.W = self.variable(initial_value=init_W, name='weight', shape=W_shape)
self.b = self.variable(np.zeros(b_shape) , name='bias' , shape=b_shape)
self.extra_input_variables = list()
# constants to change the adaption rate by SGD step (Ws, bs)
self.lambda_W_factor = self.variable(self.lambda_W, name='lambda_W', shape=[])
self.lambda_b_factor = self.variable(self.lambda_b, name='lambda_b', shape=[])
@tf.function(autograph=False)
def forward(self, input_tensor, extra_inputs=[]):
tensor_flattened = tf.reshape(input_tensor, (-1, self.channels_in))
logits = tf.nn.bias_add(tf.matmul(tensor_flattened, self.W), self.b)
for (extra_W, extra_b), extra_input_tensor in zip(self.extra_input_variables, extra_inputs):
tensor_flattened = tf.reshape(extra_input_tensor, (self.batch_size, -1))
logits = logits + tf.nn.bias_add(tf.matmul(tensor_flattened, extra_W), extra_b)
return logits
def loss(self, logits, **kwargs):
''' classifier operator (supervised) train a linear classifier (output: predicted class label) '''
labels = kwargs.get('ys')
return self.graph_loss(logits, labels)
<EMAIL>(autograph=False)
def graph_loss(self, logits, labels):
if self.loss_function == LOSS_FUNCTION.SCE: loss = -tf.nn.softmax_cross_entropy_with_logits(labels=labels, logits=logits)
if self.loss_function == LOSS_FUNCTION.MSE: loss = -tf.reduce_sum((logits - labels) ** 2, axis=1)
return loss
def evaluate(self, logits, **kwargs):
''' evaluation on a mini-batch '''
y_true_onehot = kwargs.get('ys')
y_true = tf.argmax(y_true_onehot, axis=1)
y_pred = tf.argmax(logits, axis=1) # class value e.g. [0, 1, 3, ...]
#y_pred_onehot = tf.one_hot(tf.cast(y_pred, dtype=tf.int32), self.num_classes) # class value in one-hot format
# accuracy is calculated automatically by y_true and y_pred by Metric object
accuracy = tf.equal(y_true, y_pred)
accuracy = tf.cast(accuracy, self.dtype_tf_float)
accuracy = tf.reduce_mean(accuracy)
result = {
'accuracy': accuracy,
'y_true' : y_true ,
'y_pred' : y_pred ,
}
return result
def update_with_grad(self, grads):
self.W.assign_add(self.lambda_W_factor * self.epsC * grads[f'{self.prefix}W'])
self.b.assign_add(self.lambda_b_factor * self.epsC * grads[f'{self.prefix}b'])
for i, (W, b) in enumerate(self.extra_input_variables):
W.assign_add(self.lambda_W_factor * self.epsC * grads[f'extraW_{i}'])
b.assign_add(self.lambda_b_factor * self.epsC * grads[f'extrab_{i}'])
def backwards(self, topdown=None, **kwargs):
''' topdown is a 2D tensor_like of shape [sampling_batch_size,num_classes] in in one-hot! '''
input_shape = self.prev.get_shape()
input_shape[0] = self.sampling_batch_size
if topdown is None:
return tf.ones(self.input_shape)
# logits are created as: L = WX + b --> so X = WinvL - b. we approximate inv(W) by W.T
sampling_op = tf.cast(tf.matmul(topdown - tf.expand_dims(self.b, 0), tf.transpose(self.W)), self.dtype_tf_float)
sampling_op = tf.reshape(sampling_op - tf.reduce_min(sampling_op, axis=1, keepdims=True), input_shape)
return sampling_op
def post_test_step(self, results, xs, ys=None, **kwargs):
y_pred = results.get('y_pred')
y_true = results.get('y_true')
loss = results.get('loss')
metric_values = self.metric.eval(
dict = True , # return a dictionary with metric values
y_true = y_true,
y_pred = y_pred,
loss = loss * self.loss_factor,
special = {
'accuracy_score': dict(normalize=True),
}
)
return metric_values
def share_variables(self, *args, **kwargs):
''' label the previous layer prototyps '''
data_dict = dict(proto_labels=list())
def label_mus():
if not self.prev.is_layer_type('GMM') : return
if self.prev.h_out != 1 or self.prev.w_out != 1: return # can not feed patches respectively add labels to slices
n = int(math.sqrt(self.prev.c_in // 1)) # TODO: set to 3 if color image
mus = self.prev.mus
reshape_mus = tf.reshape(mus, [self.prev.K, 1, 1, n * n]) # convert mus to images to use as input
responsibilities = self.prev.forward(reshape_mus)
logits = self.forward(responsibilities)
y_pred = tf.argmax(logits, axis=1) # class value e.g. [0, 1, 3, ...]
labels = tf.reshape(y_pred, [self.prev.n, self.prev.n])
data_dict['proto_labels'] = labels.numpy()
label_mus()
return data_dict
| StarcoderdataPython |
6241 | <filename>pyConTextNLP/__init__.py<gh_stars>1-10
#Copyright 2010 <NAME>
#
#Licensed under the Apache License, Version 2.0 (the "License");
#you may not use this file except in compliance with the License.
#You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#Unless required by applicable law or agreed to in writing, software
#distributed under the License is distributed on an "AS IS" BASIS,
#WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
#See the License for the specific language governing permissions and
#limitations under the License.
"""This is an alternative implementation of the pyConText package where I make
use of graphs to indicate relationships between targets and modifiers. Nodes of
thegraphs are the targets and modifiers identified in the text; edges of the
graphs are relationships between the targets. This provides for much simpler
code than what exists in the other version of pyConText where each object has a
dictionary of __modifies and __modifiedby that must be kept in sync with each
other.
Also it is hoped that the use of a directional graph could ultimately simplify
our itemData structures as we could chain together items"""
import os
version = {}
with open(os.path.join(os.path.dirname(__file__),"version.py")) as f0:
exec(f0.read(), version)
__version__ = version['__version__']
| StarcoderdataPython |
3226153 | import pandas as pd
from sklearn import model_selection
from sklearn.tree import DecisionTreeClassifier
def predict(home_team, away_team, city, toss_winner, toss_decision):
matches_cleaned_data = pd.read_csv('./Dataset/matches_cleaned.csv')
matches_df = matches_cleaned_data[['team1', 'team2', 'city', 'toss_winner', 'toss_decision', 'winner']]
# Split-out validation dataset
array = matches_df.values
x = array[:, 0:5]
y = array[:, 5]
validation_size = 0.10
seed = 7
x_train, x_validation, y_train, y_validation = model_selection.train_test_split(x, y, test_size=validation_size,
random_state=seed)
# Test options and evaluation metric
knn = DecisionTreeClassifier()
knn.fit(x_train, y_train)
results = convert_to_numerical_field(home_team, away_team, city, toss_winner, toss_decision)
predictions = knn.predict([results])
team = ''
if predictions[0] == '6':
team = 'KKR'
if predictions[0] == "5":
team = 'RCB'
if predictions[0] == "9":
team = 'CSK'
if predictions[0] == "10":
team = 'RR'
if predictions[0] == "7":
team = 'DD'
if predictions[0] == "8":
team = 'KXIP'
if predictions[0] == "1":
team = 'SRH'
if predictions[0] == "2":
team = 'MI'
print("model->" + team)
if int(predictions) != convert_again(home_team).__int__() and int(predictions) != convert_again(away_team).__int__():
print("Exception Case")
winner = convert_to_shortform(calculate_ef_score(home_team, away_team))
print("EF score data ->" + winner)
return winner
else:
return team.__str__()
def convert_to_shortform(winning_team):
if winning_team == 'Kolkata':
return 'KKR'
if winning_team == "Bangalore":
return 'RCB'
if winning_team == "Pune":
return 'CSK'
if winning_team == "Jaipur":
return 'RR'
if winning_team == "Delhi":
return 'DD'
if winning_team == "Dharamshala":
return 'KXIP'
if winning_team == "Hyderabad":
return 'SRH'
if winning_team == "Mumbai":
return 'MI'
def convert_again(home_team):
if home_team == 'Kolkata':
return 6
if home_team == "Bangalore":
return 5
if home_team == "Pune":
return 9
if home_team == "Jaipur":
return 10
if home_team == "Delhi":
return 7
if home_team == "Dharamshala":
return 8
if home_team == "Hyderabad":
return 1
if home_team == "Mumbai":
return 2
def convert_to_numerical_field(home_team, away_team, city, toss_winner, toss_decision):
list = []
if home_team == 'Kolkata':
list.append(6)
if home_team == "Bangalore":
list.append(5)
if home_team == "Pune":
list.append(9)
if home_team == "Jaipur":
list.append(10)
if home_team == "Delhi":
list.append(7)
if home_team == "Dharamshala":
list.append(8)
if home_team == "Hyderabad":
list.append(1)
if home_team == "Mumbai":
list.append(2)
if away_team == "Kolkata":
list.append(6)
if away_team == "Bangalore":
list.append(5)
if away_team == "Pune":
list.append(9)
if away_team == "Jaipur":
list.append(10)
if away_team == "Delhi":
list.append(7)
if away_team == "Dharamshala":
list.append(8)
if away_team == "Hyderabad":
list.append(1)
if away_team == "Mumbai":
list.append(2)
if city[6:] == "Kolkata":
list.append(7)
if city[6:] == "Bangalore":
list.append(5)
if city[6:] == "Pune":
list.append(2)
if city[6:] == "Jaipur":
list.append(11)
if city[6:] == "Delhi":
list.append(8)
if city[6:] == "Dharamshala":
list.append(24)
if city[6:] == "Hyderabad":
list.append(1)
if city[6:] == "Mumbai":
list.append(6)
if toss_winner == "KKR":
list.append(6)
if toss_winner == "RCB":
list.append(5)
if toss_winner == "CSK":
list.append(9)
if toss_winner == "RR":
list.append(10)
if toss_winner == "DD":
list.append(7)
if toss_winner == "KXIP":
list.append(8)
if toss_winner == "SRH":
list.append(1)
if toss_winner == "MI":
list.append(2)
if toss_decision == "Bat":
list.append(2)
if toss_decision == "Field":
list.append(1)
return list
# prediction from site scrape data
def calculate_ef_score(home, away):
data = pd.read_csv('./Dataset/_team_rank.csv')
home_score = list(data.loc[data['Team'] == home]['sum'])
away_score = list(data.loc[data['Team'] == away]['sum'])
if home_score > away_score:
return home
else:
return away
# predict('Jaipur', 'Hyderabad', 'City: Jaipur', 'RR', 'Bat')
| StarcoderdataPython |
1705055 | <filename>packet.py
from bson import BSON as bson
import cryptoManager
import os
import io
import struct
class Packet:
def __init__(self, PacketID=0, StatusCode=0, PacketName="", BodyType=0, Body=b""):
self.PacketID = PacketID
self.StatusCode = StatusCode
self.PacketName = PacketName
self.BodyType = BodyType
self.Body = Body
def toLocoPacket(self):
f = io.BytesIO()
f.write(struct.pack("<I", self.PacketID))
f.write(struct.pack("<H", self.StatusCode))
if (11-len(self.PacketName)) < 0:
raise Exception("invalid packetName")
f.write(self.PacketName.encode("utf-8"))
f.write(b"\x00"*(11-len(self.PacketName)))
f.write(struct.pack("<b", self.BodyType))
f.write(struct.pack("<i", len(self.Body)))
f.write(self.Body)
return f.getvalue()
def readLocoPacket(self, packet):
self.PacketID = struct.unpack("<I", packet[:4])[0]
self.StatusCode = struct.unpack("<H", packet[4:6])[0]
self.PacketName = packet[6:17].decode().replace("\0", "")
self.BodyType = struct.unpack("<b", packet[17:18])[0]
self.BodySize = struct.unpack("<i", packet[18:22])[0]
self.Body = packet[22:]
def toEncryptedLocoPacket(self, crypto):
iv = os.urandom(16)
encrypted_packet = crypto.aesEncrypt(self.toLocoPacket(), iv)
f = io.BytesIO()
f.write(struct.pack("<I", len(encrypted_packet)+len(iv)))
f.write(iv)
f.write(encrypted_packet)
return f.getvalue()
def readEncryptedLocoPacket(self, packet, crypto):
packetLen = struct.unpack(">I", packet[0:4])[0]
iv = packet[4:20]
data = packet[20:packetLen-16]
dec = crypto.aesDecrypt(data, iv)
try:
self.readLocoPacket(dec)
except Exception as e:
print(str(e))
def toJsonBody(self):
return bson.decode(self.Body)
| StarcoderdataPython |
93538 | <reponame>mchiuminatto/MVA_Crossover
from SignalLib.Signal import Signal
def test_instantiation():
_sig = Signal()
| StarcoderdataPython |
1772469 | <gh_stars>10-100
#------------------------------------------------------------------------------
# Copyright (c) 2011, Enthought, Inc.
# All rights reserved.
#------------------------------------------------------------------------------
from .enaml_test_case import EnamlTestCase, required_method
class SelectionTestHelper(object):
""" Helper mixin for selection model test cases.
"""
@property
def widget(self):
""" Get the current toolkit "widget", as it may change.
"""
return self.component.abstract_obj.selection_model
def index(self, row, column):
""" Create an appropriate ModelIndex.
"""
return self.item_model.create_index(row, column, self.item_model)
def to_enaml_selection(self, pysel):
""" Convert a selection list given with (row, col) tuples to a
selection list with ModexIndexes.
"""
esel = []
for topleft, botright in pysel:
esel.append((self.index(*topleft), self.index(*botright)))
return esel
def from_enaml_selection(self, esel):
""" Convert an Enaml selection list with ModelIndexes to one
given with (row, col) tuples for comparison purposes.
"""
pysel = []
for topleft, botright in esel:
pysel.append(((topleft.row, topleft.column),
(botright.row, botright.column)))
return pysel
def set_py_selection(self, pysel, command):
""" Set the selection using (int, int) indices.
"""
esel = self.to_enaml_selection(pysel)
self.component.set_selection(esel, command)
def get_py_selection(self):
""" Get the selection using (int, int) indices.
"""
return self.from_enaml_selection(self.component.get_selection())
#--------------------------------------------------------------------------
# Abstract methods
#--------------------------------------------------------------------------
@required_method
def get_tk_selection(self, widget):
""" Return the widget's selection as a list of (topleft, botright)
ranges with (row, col) indexes.
"""
pass
class TestBaseSelectionModel(EnamlTestCase, SelectionTestHelper):
""" Logic for testing selection models.
"""
def setUp(self):
""" Set up tests for Enaml's BaseSelectionModel.
"""
enaml_source = """
from enaml.stdlib.table_model import TableModel
import numpy as np
nrows = 20
ncols = 10
table = np.arange(nrows * ncols).reshape((nrows, ncols))
the_item_model = TableModel(table)
enamldef MainView(MainWindow):
attr events
TableView:
name = 'table_view'
item_model = the_item_model
BaseSelectionModel:
name = 'selection_model'
"""
self.events = []
self.view = self.parse_and_create(enaml_source, events=self.events)
self.table_view = self.component_by_name(self.view, 'table_view')
self.component = self.table_view.selection_model
self.item_model = self.table_view.item_model
def test_empty_initial_selection(self):
""" No selection.
"""
self.assertEqual(self.get_tk_selection(self.widget), [])
self.assertEqual(self.get_py_selection(), [])
def test_set_selection_clear_select(self):
""" Test the 'clear_select' command.
"""
pysel = [((1, 2), (3, 4))]
self.set_py_selection(pysel, 'clear_select')
self.assertEqual(self.get_tk_selection(self.widget), pysel)
self.assertEqual(self.get_py_selection(), pysel)
pysel = [((0, 1), (6,7))]
self.set_py_selection(pysel, 'clear_select')
self.assertEqual(self.get_tk_selection(self.widget), pysel)
self.assertEqual(self.get_py_selection(), pysel)
self.component.clear()
self.assertEqual(self.get_tk_selection(self.widget), [])
self.assertEqual(self.get_py_selection(), [])
def test_set_selection_clear_select_rows(self):
""" Test the ('clear_select', 'rows') command.
"""
pysel = [((1, 2), (3, 4))]
test_sel = [((1, 0), (3, 9))]
self.set_py_selection(pysel, ('clear_select', 'rows'))
self.assertEqual(self.get_tk_selection(self.widget), test_sel)
self.assertEqual(self.get_py_selection(), test_sel)
def test_set_selection_no_update(self):
""" Test the 'no_update' command.
"""
pysel = [((1, 2), (3, 4))]
self.set_py_selection(pysel, 'no_update')
self.assertEqual(self.get_tk_selection(self.widget), [])
self.assertEqual(self.get_py_selection(), [])
new = [((0, 1), (6, 7))]
self.set_py_selection(new, 'clear_select')
self.assertEqual(self.get_tk_selection(self.widget), new)
self.assertEqual(self.get_py_selection(), new)
self.set_py_selection(pysel, 'no_update')
self.assertEqual(self.get_tk_selection(self.widget), new)
self.assertEqual(self.get_py_selection(), new)
def test_set_selection_select(self):
""" Test the 'select' command.
"""
pysel = [((1, 2), (3, 4))]
self.set_py_selection(pysel, 'select')
self.assertEqual(self.get_tk_selection(self.widget), pysel)
self.assertEqual(self.get_py_selection(), pysel)
new = [((5, 1), (7, 5))]
self.set_py_selection(new, 'select')
self.assertEqual(self.get_tk_selection(self.widget), pysel+new)
self.assertEqual(self.get_py_selection(), pysel+new)
def test_set_selection_deselect(self):
""" Test the 'deselect' command.
"""
pysel = [((1, 2), (3, 4))]
self.set_py_selection(pysel, 'clear_select')
self.assertEqual(self.get_tk_selection(self.widget), pysel)
self.assertEqual(self.get_py_selection(), pysel)
new = [((2, 2), (4, 4))]
remainder = [((1,2), (1,4))]
self.set_py_selection(new, 'deselect')
self.assertEqual(self.get_tk_selection(self.widget), remainder)
self.assertEqual(self.get_py_selection(), remainder)
def test_set_selection_toggle(self):
""" Test the 'toggle' command.
"""
pysel = [((1, 2), (3, 4))]
self.set_py_selection(pysel, 'clear_select')
self.assertEqual(self.get_tk_selection(self.widget), pysel)
self.assertEqual(self.get_py_selection(), pysel)
new = [((2, 2), (4, 4))]
remainder = [((1, 2), (1, 4)), ((4, 2), (4, 4))]
self.set_py_selection(new, 'toggle')
self.assertEqual(self.get_tk_selection(self.widget), remainder)
self.assertEqual(self.get_py_selection(), remainder)
class TestRowSelectionModel(EnamlTestCase, SelectionTestHelper):
""" Logic for testing RowSelectionModel
"""
def setUp(self):
""" Set up tests for Enaml's RowSelectionModel
"""
enaml_source = """
from enaml.stdlib.table_model import TableModel
import numpy as np
nrows = 20
ncols = 10
table = np.arange(nrows * ncols).reshape((nrows, ncols))
the_item_model = TableModel(table)
enamldef MainView(MainWindow):
attr events
TableView:
name = 'table_view'
item_model = the_item_model
RowSelectionModel:
name = 'selection_model'
"""
self.events = []
self.view = self.parse_and_create(enaml_source, events=self.events)
self.table_view = self.component_by_name(self.view, 'table_view')
self.component = self.table_view.selection_model
self.item_model = self.table_view.item_model
def test_set_selected_rows(self):
""" Test the selection of rows through the selected_rows trait.
"""
self.assertEqual(self.component.selected_rows, [])
self.component.selected_rows = [2, 3, 5, 6]
pysel = [((2, 0), (2, 9)), ((3, 0), (3, 9)), ((5, 0), (5, 9)), ((6, 0), (6, 9))]
self.assertEqual(self.get_tk_selection(self.widget), pysel)
self.assertEqual(self.get_py_selection(), pysel)
self.component.selected_rows.append(7)
new = pysel + [((7, 0), (7, 9))]
self.assertEqual(self.get_tk_selection(self.widget), new)
self.assertEqual(self.get_py_selection(), new)
del self.component.selected_rows[1]
del new[1]
self.assertEqual(self.get_tk_selection(self.widget), new)
self.assertEqual(self.get_py_selection(), new)
def test_get_selected_rows(self):
""" Test that the selected_rows trait gets updated correctly when the
selection is set elsewhere.
"""
pysel = [((2, 0), (2, 9)), ((3, 0), (3, 9)), ((5, 0), (5, 9)), ((6, 0), (6, 9))]
self.set_py_selection(pysel, ('clear_select', 'rows'))
self.assertEqual(self.component.selected_rows, [2, 3, 5, 6])
| StarcoderdataPython |
1664074 | <filename>tests/setpoint.py
from Compass import Compass
cc = Compass.connect('localhost', 'admin', 'newpoint')
Compass.setpoint(cc, "demodev1", "fi1", 2)
exit(0)
| StarcoderdataPython |
66938 | import torch
from . import networks
from os.path import join
from util.util import seg_accuracy, print_network
import pdb
class ClassifierModel:
""" Class for training Model weights
:args opt: structure containing configuration params
e.g.,
--dataset_mode -> classification / segmentation)
--arch -> network type
"""
def __init__(self, opt):
self.opt = opt
self.gpu_ids = opt.gpu_ids
self.is_train = opt.is_train
self.device = torch.device('cuda:{}'.format(self.gpu_ids[0])) if self.gpu_ids else torch.device('cpu')
self.save_dir = join(opt.checkpoints_dir, opt.name)
self.optimizer = None
self.edge_features = None
self.labels = None
self.mesh = None
self.soft_label = None
self.loss = None
#
self.nclasses = opt.nclasses
# load/define networks
self.net = networks.define_classifier(opt.input_nc, opt.ncf, opt.ninput_edges, opt.nclasses, opt,
self.gpu_ids, opt.arch, opt.init_type, opt.init_gain)
self.net.train(self.is_train)
self.criterion = networks.define_loss(opt).to(self.device)
if self.is_train:
self.optimizer = torch.optim.Adam(self.net.parameters(), lr=opt.lr, betas=(opt.beta1, 0.999))
self.scheduler = networks.get_scheduler(self.optimizer, opt)
print_network(self.net)
if not self.is_train or opt.continue_train:
self.load_network(opt.which_epoch)
def set_input(self, data):
# pdb.set_trace()
input_edge_features = torch.from_numpy(data['edge_features']).float()
labels = torch.from_numpy(data['label']).long()
# set inputs
self.edge_features = input_edge_features.to(self.device).requires_grad_(self.is_train)
self.labels = labels.to(self.device)
self.mesh = data['mesh']
if self.opt.dataset_mode == 'segmentation' and not self.is_train:
self.soft_label = torch.from_numpy(data['soft_label'])
def forward(self):
out = self.net(self.edge_features, self.mesh)
return out
def backward(self, out):
self.loss = self.criterion(out, self.labels)
self.loss.backward()
def optimize_parameters(self):
self.optimizer.zero_grad()
out = self.forward()
self.backward(out)
self.optimizer.step()
##################
def load_network(self, which_epoch):
"""load model from disk"""
save_filename = '%s_net.pth' % which_epoch
load_path = join(self.save_dir, save_filename)
net = self.net
if isinstance(net, torch.nn.DataParallel):
net = net.module
print('loading the model from %s' % load_path)
# PyTorch newer than 0.4 (e.g., built from
# GitHub source), you can remove str() on self.device
state_dict = torch.load(load_path, map_location=str(self.device))
if hasattr(state_dict, '_metadata'):
del state_dict._metadata
net.load_state_dict(state_dict)
def save_network(self, which_epoch):
"""save model to disk"""
save_filename = '%s_net.pth' % (which_epoch)
save_path = join(self.save_dir, save_filename)
if len(self.gpu_ids) > 0 and torch.cuda.is_available():
torch.save(self.net.module.cpu().state_dict(), save_path)
self.net.cuda(self.gpu_ids[0])
else:
torch.save(self.net.cpu().state_dict(), save_path)
def update_learning_rate(self):
"""update learning rate (called once every epoch)"""
self.scheduler.step()
lr = self.optimizer.param_groups[0]['lr']
print('learning rate = %.7f' % lr)
def test(self):
"""tests model
returns: number correct and total number
"""
with torch.no_grad():
out = self.forward()
# compute number of correct
pred_class = out.data.max(1)[1]
label_class = self.labels
self.export_segmentation(pred_class.cpu())
correct = self.get_accuracy(pred_class, label_class)
return correct, len(label_class)
def get_accuracy(self, pred, labels):
"""computes accuracy for classification / segmentation """
if self.opt.dataset_mode == 'classification':
correct = pred.eq(labels).sum()
elif self.opt.dataset_mode == 'segmentation':
correct = seg_accuracy(pred, self.soft_label, self.mesh)
return correct
def export_segmentation(self, pred_seg):
if self.opt.dataset_mode == 'segmentation':
for meshi, mesh in enumerate(self.mesh):
mesh.export_segments(pred_seg[meshi, :])
| StarcoderdataPython |
66175 | <filename>common.py<gh_stars>10-100
from spacy.matcher import Matcher
def create_versioned(name):
return [
[{'LOWER': name}],
[{'LOWER': {'REGEX': f'({name}\d+\.?\d*.?\d*)'}}],
[{'LOWER': name}, {'TEXT': {'REGEX': '(\d+\.?\d*.?\d*)'}}],
]
def create_patterns():
versioned_languages = ['ruby', 'php', 'python', 'perl', 'java', 'haskell',
'scala', 'c', 'cpp', 'matlab', 'bash', 'delphi']
flatten = lambda l: [item for sublist in l for item in sublist]
versioned_patterns = flatten([create_versioned(lang) for lang in versioned_languages])
lang_patterns = [
[{'LOWER': 'objective'}, {'IS_PUNCT': True, 'OP': '?'},{'LOWER': 'c'}],
[{'LOWER': 'objectivec'}],
[{'LOWER': 'c'}, {'LOWER': '#'}],
[{'LOWER': 'c'}, {'LOWER': 'sharp'}],
[{'LOWER': 'c#'}],
[{'LOWER': 'f'}, {'LOWER': '#'}],
[{'LOWER': 'f'}, {'LOWER': 'sharp'}],
[{'LOWER': 'f#'}],
[{'LOWER': 'lisp'}],
[{'LOWER': 'common'}, {'LOWER': 'lisp'}],
[{'LOWER': 'go', 'POS': {'NOT_IN': ['VERB']}}],
[{'LOWER': 'golang'}],
[{'LOWER': 'html'}],
[{'LOWER': 'css'}],
[{'LOWER': 'sql'}],
[{'LOWER': {'IN': ['js', 'javascript']}}],
[{'LOWER': 'c++'}],
]
return versioned_patterns + lang_patterns
| StarcoderdataPython |
167449 | <reponame>anayakoti/FirstSample<filename>ForLoopPractice2.py
letter="Sai Teja";
for i in letter:
print(i);
| StarcoderdataPython |
3397049 | import numpy as np
import healpy as hp
from astropy.utils.data import get_pkg_data_filename
try: # PySM >= 3.2.1
import pysm3.units as u
except ImportError:
import pysm.units as u
from .. import PrecomputedAlms
from astropy.tests.helper import assert_quantity_allclose
def test_precomputed_alms():
alms_filename = get_pkg_data_filename(
"data/Planck_bestfit_alm_seed_583_lmax_95_K_CMB.fits.zip"
)
save_name = get_pkg_data_filename("data/map_nside_32_from_Planck_bestfit_alm_seed_583_K_CMB.fits.zip")
nside = 32
# Make an IQU sim
precomputed_alms = PrecomputedAlms(
alms_filename, nside=nside, input_units="uK_CMB",
has_polarization=True,
#input_reference_frequency=148*u.GHz
)
simulated_map = precomputed_alms.get_emission(148*u.GHz).to(u.uK_CMB, equivalencies=u.cmb_equivalencies(148*u.GHz))
expected_map = hp.read_map(save_name, field=(0, 1, 2)) << u.uK_CMB
assert simulated_map.shape[0] == 3
assert_quantity_allclose(simulated_map, expected_map)
def test_precomputed_alms_clip():
# If clipping to lmax of 95 were not applied, this test would fail
alms_filename = get_pkg_data_filename(
"data/Planck_bestfit_alm_seed_583_lmax_120_K_CMB.fits.zip"
)
save_name = get_pkg_data_filename("data/map_nside_32_from_Planck_bestfit_alm_seed_583_K_CMB.fits.zip")
nside = 32
# Make an IQU sim
precomputed_alms = PrecomputedAlms(
alms_filename, nside=nside, input_units="uK_CMB",
has_polarization=True,
#input_reference_frequency=148*u.GHz
)
simulated_map = precomputed_alms.get_emission(148*u.GHz).to(u.uK_CMB, equivalencies=u.cmb_equivalencies(148*u.GHz))
expected_map = hp.read_map(save_name, field=(0, 1, 2)) << u.uK_CMB
assert simulated_map.shape[0] == 3
assert_quantity_allclose(simulated_map, expected_map)
| StarcoderdataPython |
1787756 | import numpy as np
import matplotlib.pyplot as plt
import matplotlib.pylab as pl
from mpl_toolkits.mplot3d import Axes3D
from matplotlib import rc
from matplotlib import cm
rc('font', **{'family': 'sans-serif', 'sans-serif': ['Helvetica']})
rc('text', usetex=True)
class graph_defaults:
def __init__(self, x, t, data):
self.x = x
self.t = t
self.data = data
def graph_distance(self, distance, name='stability'):
"""
This function constructs the graphs of the simulations
and the calculation of the convergence
Returns
-------
graph : object
"""
fig, ax = plt.subplots(figsize=(10, 10))
plt.rc('text', usetex=True)
plt.rc('font', family='serif')
plt.plot(self.t, distance)
plt.xlim(self.t[0], self.t[len(self.t) - 1])
plt.xlabel(r'\textit{time} (t)', fontsize=15)
plt.ylabel(r'\textbf{$\| \Psi_{t}^{x} - \Psi_{t}^{y} \|_{\mathcal{L}(\mathcal{H}, \mu)^{2}}^{2}$}', fontsize=20)
plt.title('Continuity with respect to the initial conditions',
fontsize=16, color='black')
ax.grid()
fig.savefig(name)
def graph_time(self, times, data_approx, name='Approximation_t='):
for i in times:
fig, ax = plt.subplots()
plt.rc('text', usetex=True)
plt.rc('font', family='serif')
plt.xlabel(r'\textit{x}', fontsize=15)
plt.ylabel(r'\textit{U} (x)', fontsize=15)
plt.title('Approximation with chebycheb polynomials',
fontsize=16, color='black')
ax.grid()
ax.plot(self.x, self.data[self.t.index(i), :], 'r', label='Approximation')
ax.plot(self.x, data_approx[self.t.index(i), :], 'b', label=r'\textit{U} (x)')
ax.legend()
fig.savefig(name + str(i))
def graph_3d(self, name='solution'):
"""
This function calculates the integral of the norm between two solutions
with different initial conditions for a fixed point in real space
Parameters
----------
xSpace : array; discretized real space
tim : array; discretized time
data : array, shape(len(tim), len(xSpace))
Array containing the solutions of partial equation
Returns
-------
Graphs : object,
Graphs of the simulations
"""
X, Y = np.meshgrid(self.x, self.t)
fig1 = pl.figure(1)
ax1 = Axes3D(fig1)
ax1.plot_surface(X, Y, self.data, cmap='coolwarm')
ax1.set_xlabel('x', fontsize=20)
ax1.set_ylabel(r'\textit{time} (t)', fontsize=20)
ax1.set_zlabel(r'\textit{U} (t, x)', fontsize=20)
fig1.savefig(name)
| StarcoderdataPython |
1648152 | class Result:
def __init__(self, user_query, nodes, keywords):
self.user_query = user_query
self.nodes = nodes
self.keywords = keywords
def __len__(self):
return len(self.nodes)
def get_node(self, index):
return self.nodes[index]
# noinspection PyMethodMayBeStatic
def _get_nice_content(self, node):
content = list(node.get_content())
index = 0
while index < len(content):
line = content[index].strip()
content[index] = " ".join(content[index].split())
if ' ' not in line and index > 0:
# Add single words to previous content line
content[index - 1] += ' ' + line
del content[index]
# Keep index the same as we removed the entry at this index
else:
index += 1
return content
def get_context(self, index):
node = self.nodes[index]
node_content = self._get_nice_content(node)
separator = " "
chars_delta = 40
content = separator.join(node_content)
if len(content) == 0:
return ''
content_index = -1
for word in self.keywords:
content_index = content.lower().find(word.lower())
if content_index >= 0:
break
if content_index == -1:
# Did not find any keyword in content, so simply get first content line (after title) or nothing
if len(node_content) > 1:
return node_content[1]
return ''
start_index = max(content_index - chars_delta, 0)
end_index = min(len(content), content_index + chars_delta)
prefix = ''
if start_index > 0:
prefix = "..."
return prefix + content[start_index:end_index]
| StarcoderdataPython |
3351855 | <reponame>tyrylu/feel-the-streets<gh_stars>1-10
import os
import datetime
import logging
from PySide2.QtCore import QThread, Signal
from osm_db import AreaDatabase, CHANGE_REMOVE, CHANGE_REDOWNLOAD_DATABASE
from .semantic_changelog_generator import get_change_description
log = logging.getLogger(__name__)
class ChangesApplier(QThread):
will_process_change = Signal(int)
changes_applied = Signal(str)
def __init__(self, area, retriever, generate_changelog):
super().__init__()
self._area = area
self._retriever = retriever
self._generate_changelog = generate_changelog
def run(self):
db = AreaDatabase.open_existing(self._area, server_side=False)
db.begin()
if self._generate_changelog:
changelog_path = os.path.join(os.path.dirname(AreaDatabase.path_for(12345, server_side=False)), "..", "changelogs", "{0}_{1}.txt".format(self._area, datetime.datetime.now().isoformat().replace(":", "_")))
os.makedirs(os.path.dirname(changelog_path), exist_ok=True)
changelog = open(changelog_path, "w", encoding="UTF-8")
else:
changelog_path = None
changes_applyed = False
for nth, change in enumerate(self._retriever.new_changes_in(self._area)):
self.will_process_change.emit(nth + 1)
entity = None
# We must retrieve the entity before deleting it so we can produce the display representation of it.
if self._generate_changelog and change.type is CHANGE_REMOVE:
entity = db.get_entity(change.osm_id)
db.apply_change(change)
changes_applyed = True
if self._generate_changelog:
if not entity:
entity = db.get_entity(change.osm_id)
if not entity:
log.error("Local database is missing entity with osm id %s, not generating changelog description for that one.", change.osm_id)
continue
changelog.write(get_change_description(change, entity))
db.apply_deferred_relationship_additions()
db.commit()
if self._generate_changelog:
changelog.close()
self._retriever.acknowledge_changes_for(self._area)
self._retriever.close()
if changes_applyed:
self.changes_applied.emit(changelog_path)
| StarcoderdataPython |
1798340 | # Generated by Django 3.2.12 on 2022-03-01 12:27
import datetime
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('users', '0003_auto_20220301_1750'),
]
operations = [
migrations.AlterField(
model_name='user',
name='email_preff_time',
field=models.TimeField(default=datetime.time(17, 57, 49, 955129)),
),
]
| StarcoderdataPython |
1650855 | <filename>python/name2taxid.py
#!/usr/bin/env python3
from taxadb.names import SciName
import fileinput
names = SciName()
for line in fileinput.input():
print(names.taxid(line.rstrip()))
| StarcoderdataPython |
4805494 | <filename>tracker/migrations/0005_device_descriptions.py
# Generated by Django 3.2.7 on 2021-10-05 11:18
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('tracker', '0004_device_cellnumber'),
]
operations = [
migrations.AddField(
model_name='device',
name='descriptions',
field=models.TextField(blank=True, null=True),
),
]
| StarcoderdataPython |
1752406 | import asyncio
import discord
import random
import itertools
from typing import Iterator
from async_timeout import timeout
from discord.ext.commands import Context
from discord import Guild, TextChannel
from app.ext.performance import run_in_threadpool
from .YTDLSource import YTDLSource
from app.controller.logger import Logger
class SongQueue(asyncio.Queue):
_queue: list
def __getitem__(self, item) -> list:
if isinstance(item, slice):
return list(itertools.islice(self._queue, item.start, item.stop, item.step))
else:
return self._queue[item]
def __iter__(self) -> Iterator:
return self._queue.__iter__()
def __len__(self) -> int:
return self.qsize()
def clear(self) -> None:
self._queue.clear()
def shuffle(self) -> None:
random.shuffle(self._queue)
def remove(self, index: int) -> None:
del self._queue[index]
class Player:
"""Base class for Music Player"""
__slots__ = (
"bot",
"_guild",
"_channel",
"_cog",
"queue",
"next",
"current",
"np",
"repeat",
"_volume",
"_loop",
"logger",
"status"
)
def __init__(self, ctx: Context):
self.logger = Logger.generate_log()
self.status: dict = {}
self.bot = ctx.bot
self._guild: Guild = ctx.guild
self._channel: TextChannel = ctx.channel
self._cog = ctx.cog
self.queue: SongQueue[list] = SongQueue()
self.next = asyncio.Event()
self._volume = 0.5
self.np = None
self.current = None
self._loop = False
ctx.bot.loop.create_task(self.player_loop())
@property
def loop(self):
return self._loop
@loop.setter
def loop(self, value: bool):
self._loop = value
@property
def volume(self):
return self._volume
@volume.setter
def volume(self, volume: float):
self._volume = volume
@property
def is_playing(self):
return self.np and self.current
@staticmethod
@Logger.set()
async def create_embed(source, duration, requester, current, thumbnail):
embed = (
discord.Embed(
title="Now playing",
description=f"```css\n{source.title}\n```",
color=discord.Color.blurple(),
)
.add_field(name="Duration", value=duration)
.add_field(name="Requested by", value=requester)
.add_field(
name="Uploader",
value=f"[{current.uploader}]({current.uploader_url})",
)
.add_field(
name="URL", value=f"[Click]({current.web_url})"
)
.set_thumbnail(url=thumbnail)
)
return embed
async def text_to_speech_loop(self, source):
self._guild.voice_client.play(source)
@Logger.set()
async def player_loop(self):
await self.bot.wait_until_ready()
while not self.bot.is_closed():
self.next.clear()
try:
async with timeout(300): # 5 minutes
source = await self.queue.get()
except asyncio.TimeoutError:
if self in self._cog.players.values():
return self.destroy(self._guild)
return
source.volume = self.volume
self.current = source
try:
await run_in_threadpool(lambda: self._guild.voice_client.play(
source,
after=lambda _: self.bot.loop.call_soon_threadsafe(self.next.set),
)
)
except TypeError as NoneTypeError:
self.logger.info(NoneTypeError)
pass
embed = await self.create_embed(
source=source,
duration=self.current.duration,
requester=self.current.requester,
current=self.current,
thumbnail=self.current.thumbnail
)
self.np = await self._channel.send(embed=embed)
await self.next.wait()
if self.loop:
ctx = await self.bot.get_context(self.np)
ctx.author = source.requester
search = source.web_url
try:
source_repeat = await YTDLSource.Search(
ctx, search, download=False, msg=False
)
except Exception as e:
self.logger.error(f"There was an error processing your song. {e}")
await self._channel.send(
f"There was an error processing your song.\n ```css\n[{e}]\n```"
)
continue
if self.loop:
self.queue._queue.appendleft(source_repeat)
else:
await self.queue.put(source_repeat)
try:
await self.np.delete()
except discord.HTTPException as err:
self.logger.error(err)
@Logger.set()
async def stop(self):
self.queue.clear()
if self.np:
await self._guild.voice_client.disconnect()
self.np = None
@Logger.set()
def destroy(self, guild):
# Disconnect and Cleanup
return self.bot.loop.create_task(self._cog.cleanup(guild))
| StarcoderdataPython |
13001 | """Wrapper for pygame, which exports the PSP Python API on non-PSP systems."""
__author__ = "<NAME>, <<EMAIL>>"
import pygame
pygame.init()
_vol_music = 255
_vol_sound = 255
def setMusicVolume(vol):
global _vol_music
if vol >= 0 and vol <= 255:
_vol_music = vol
pygame.mixer.music.set_volume(_vol_music / 255.0)
def setSndFxVolume(vol):
global _vol_sound
if vol >= 0 and vol <= 255:
_vol_sound = vol
class Music:
def __init__(self, filename, maxchan=128, loop=False):
self._loop = loop
pygame.mixer.music.load(filename)
pygame.mixer.music.set_volume(_vol_music / 255.0)
def start(self):
if self._loop:
pygame.mixer.music.play(-1)
else:
pygame.mixer.music.play()
def stop(self):
pygame.mixer.music.stop()
class Sound:
def __init__(self, filename):
self._snd = pygame.mixer.Sound(filename)
def start(self):
self._snd.set_volume(_vol_sound / 255.0)
self._snd.play()
| StarcoderdataPython |
110489 | <filename>aiokinesis/utils.py<gh_stars>1-10
import asyncio
from heapq import heappush
from time import time
def rate_limit_per_rolling_second(requests_per_rolling_second):
def outer_wrapper(f):
async def inner_wrapper(self, *args, **kwargs):
assert isinstance(self, object), """
rate_limit_per_rolling_second must decorate a method
"""
# Instantiate list of requests if it doesn't exist
if not hasattr(self, '_request_times'):
self._request_times = []
# Remove stale requests from list
current_time = float(time())
self._request_times = [
t
for t in self._request_times
if current_time - t < 1
]
if len(self._request_times) >= requests_per_rolling_second:
oldest_request_time = self._request_times[0]
await asyncio.sleep(1 - current_time + oldest_request_time)
heappush(self._request_times, float(time()))
return await f(self, *args, **kwargs)
return inner_wrapper
return outer_wrapper
| StarcoderdataPython |
3218697 | <filename>archive/do_InFoV_scan2.py
import numpy as np
import os
from astropy.table import Table
from astropy.io import fits
from numba import jit, njit, prange
from scipy import interpolate
from math import erf
import healpy as hp
import pandas as pd
import argparse
import logging, traceback
from copy import copy, deepcopy
from StructFunc import get_full_struct_manager
from StructClasses import Swift_Structure, Swift_Structure_Manager
from Materials import PB, TI
from Polygons import Polygon2D, Box_Polygon
from flux_models import Plaw_Flux, Cutoff_Plaw_Flux, Band_Flux
from config import solid_angle_dpi_fname, rt_dir, fp_dir, bright_source_table_fname
from logllh_ebins_funcs import log_pois_prob, get_eflux, get_gammaln
from event2dpi_funcs import det2dpis, mask_detxy
from models import Model
from minimizers import NLLH_DualAnnealingMin, NLLH_ScipyMinimize, NLLH_ScipyMinimize_Wjacob
from coord_conv_funcs import convert_radec2imxy, convert_imxy2radec,\
convert_radec2batxyz, convert_radec2thetaphi
from ray_trace_funcs import RayTraces, FootPrints
from hp_funcs import ang_sep
from do_bkg_estimation_wPSs_mp import get_srcs_infov
def cli():
parser = argparse.ArgumentParser()
parser.add_argument('--evfname', type=str,\
help="Event data file",
default=None)
parser.add_argument('--dmask', type=str,\
help="Detmask fname",
default=None)
parser.add_argument('--attfname', type=str,\
help="attitude fname",
default=None)
parser.add_argument('--job_id', type=int,\
help="ID to tell it what seeds to do",\
default=-1)
parser.add_argument('--Njobs', type=int,\
help="Total number of jobs submitted",\
default=64)
parser.add_argument('--work_dir', type=str,\
help="work directory",
default=None)
parser.add_argument('--log_fname', type=str,\
help="log file name",
default='in_fov_scan')
parser.add_argument('--Nside', type=int,\
help="Healpix Nside",\
default=2**4)
parser.add_argument('--trig_time', type=float,\
help="Trigger time",\
default=None)
parser.add_argument('--Ntdbls', type=int,\
help="Number of times to double duration size",\
default=3)
parser.add_argument('--min_dur', type=float,\
help="Trigger time",\
default=0.256)
parser.add_argument('--min_dt', type=float,\
help="Min time offset from trigger time to start at",\
default=1.25)
parser.add_argument('--max_dt', type=float,\
help="Min time offset from trigger time to start at",\
default=3.75)
parser.add_argument('--bkg_dt0', type=float,\
help="Time offset from trigger time to start bkg at",\
default=6.0)
parser.add_argument('--bkg_dur', type=float,\
help="Duration to use for bkg",\
default=4.0)
args = parser.parse_args()
return args
def detxy2batxy(detx, dety):
batx = 0.42*detx - (285*.42)/2
baty = 0.42*dety - (172*.42)/2
return batx, baty
def batxy2detxy(batx, baty):
detx = (batx + (285*.42)/2)/0.42
dety = (baty + (172*.42)/2)/0.42
return detx, dety
def bldmask2batxys(bl_dmask):
detys, detxs = np.where(bl_dmask)
return detxy2batxy(detxs, detys)
@njit(cache=True)
def shift_pha_bins(spec, pha_bins0, pha_bins1, new_pha_bins0, new_pha_bins1):
new_spec = np.zeros_like(new_pha_bins0)
for i in range(len(new_spec)):
e0 = new_pha_bins0[i]
e1 = new_pha_bins1[i]
bl = (pha_bins0>=e0)&(pha_bins1<=e1)
new_spec[i] += np.sum(spec[bl])
bl = (pha_bins0<e0)&(pha_bins1>e0)
if np.sum(bl) > 0:
ind = np.where(bl)[0][0]
dE = pha_bins1[ind] - pha_bins0[ind]
frac_in_bin = (pha_bins1[ind] - e0)/dE
new_spec[i] += frac_in_bin*spec[ind]
bl = (pha_bins0<e1)&(pha_bins1>e1)
if np.sum(bl) > 0:
ind = np.where(bl)[0][0]
dE = pha_bins1[ind] - pha_bins0[ind]
frac_in_bin = (e1 - pha_bins0[ind])/dE
new_spec[i] += frac_in_bin*spec[ind]
return new_spec
@njit(cache=True)
def shift_flor_dpi_pha_bins(flor_dpi, pha_bins0, pha_bins1, new_pha_bins0, new_pha_bins1):
Nphabins_new = new_pha_bins0.size
Ndets = flor_dpi.shape[0]
NphotonEs = flor_dpi.shape[1]
new_shp = (Ndets,NphotonEs,Nphabins_new)
new_flor_dpi = np.zeros(new_shp)
for i in range(Ndets):
for j in range(NphotonEs):
new_flor_dpi[i,j] += shift_pha_bins(flor_dpi[i,j], pha_bins0, pha_bins1,\
new_pha_bins0, new_pha_bins1)
return new_flor_dpi
def shift_resp_tab_pha_bins(resp_tab, pha_bins0, pha_bins1, new_pha_bins0, new_pha_bins1):
new_tab = Table()
new_tab['ENERG_LO'] = np.copy(resp_tab['ENERG_LO'])
new_tab['ENERG_HI'] = np.copy(resp_tab['ENERG_HI'])
NphotonEs = len(resp_tab['ENERG_LO'])
for cname in resp_tab.colnames:
if 'ENERG' in cname:
continue
new_resp = np.zeros((NphotonEs, len(new_pha_bins0)))
for i in range(NphotonEs):
new_resp[i] += shift_pha_bins(resp_tab[cname][i].astype(np.float), pha_bins0.astype(np.float),\
pha_bins1.astype(np.float),\
new_pha_bins0.astype(np.float), new_pha_bins1.astype(np.float))
new_tab[cname] = new_resp
return new_tab
def get_dist2(x0,y0,z0,x1,y1,z1):
return (x1-x0)**2 + (y1-y0)**2 + (z1-z0)**2
def get_dist(x0,y0,z0,x1,y1,z1):
return np.sqrt(get_dist2(x0,y0,z0,x1,y1,z1))
def get_dist_wts(x0,y0,z0,x1,y1,z1):
wts = 1./get_dist2(x0,y0,z0,x1,y1,z1)
wts /= np.sum(wts)
return wts
def get_sa_divA(x0,y0,z0,x1,y1,z1):
dist3 = get_dist2(x0,y0,z0,x1,y1,z1)**1.5
return np.abs(z1-z0)/dist3
def get_sa_wts(x0,y0,z0,x1,y1,z1):
wts = get_sa_divA(x0,y0,z0,x1,y1,z1)
wts /= np.sum(wts)
return wts
class Comp_Resp_Obj(object):
def __init__(self, batxs, batys, batzs, struct4comp):
self.ndets = len(batxs)
self.batxs = batxs
self.batys = batys
self.batzs = batzs
self.Ne = struct4comp.Ne
self.struct_obj = struct4comp
self.ncomp_pnts = len(self.struct_obj.batxs)
self.comp_batxs = self.struct_obj.batxs
self.comp_batys = self.struct_obj.batys
self.comp_batzs = self.struct_obj.batzs
self.calc_inds_wts4comp_dets()
def calc_inds_wts4comp_dets(self, dmax=16):
self.wts_list = []
self.inds_list = []
for i in range(self.ndets):
dists = get_dist(self.comp_batxs, self.comp_batys, self.comp_batzs,\
self.batxs[i], self.batys[i], self.batzs[i])
bl = (dists<=dmax)
wts = get_sa_wts(self.comp_batxs[bl], self.comp_batys[bl],\
self.comp_batzs[bl], self.batxs[i],\
self.batys[i], self.batzs[i])
inds = np.where(bl)[0]
self.wts_list.append(wts)
self.inds_list.append(inds)
def set_theta_phi(self, theta, phi):
self.struct_obj.set_theta_phi(theta, phi)
self.struct_obj.calc_tot_rhomu_dist()
self.calc_trans()
def calc_trans(self):
self.trans = np.zeros((self.ndets, self.Ne))
self.comp_trans = np.zeros((self.ncomp_pnts, self.Ne))
self.comp_trans[:self.ncomp_pnts] += self.struct_obj.get_trans()
print np.shape(self.trans[0]), np.shape(self.wts_list[0]),\
np.shape(self.comp_trans[self.inds_list[0],:])
print np.shape(np.sum(self.comp_trans[self.inds_list[0],:]*self.wts_list[0][:,np.newaxis],axis=0))
for i in range(self.ndets):
self.trans[i] += np.sum(self.comp_trans[self.inds_list[i],:]*self.wts_list[i][:,np.newaxis],axis=0)
def get_trans(self):
return self.trans
def get_dual_struct_obj(Ephotons):
dual_xs = []
dual_ys = []
for bi in range(8):
x_b = -52.92 + bi*15.12
y_b = 23.555
for i in range(2):
x = x_b - 3.78 + i*7.56
for j in range(4):
y = y_b - 18.935 + j*9.24
dual_xs.append(x)
dual_ys.append(y)
for bi in range(8):
x_b = -52.92 + bi*15.12
y_b = -23.555
for i in range(2):
x = x_b -(- 3.78 + i*7.56)
for j in range(4):
y = y_b -(- 18.935 + j*9.24)
dual_xs.append(x)
dual_ys.append(y)
dual_xs = np.array(dual_xs)
dual_ys = np.array(dual_ys)
print len(dual_xs), len(dual_ys)
BATZ_offset = 35.799
dual_elec_x_halfwidth = 3.55
dual_elec_y_halfwidth = 4.41
dual_elec_z0 = -3.725 -32.612 + BATZ_offset + 1.15 - 1.06 - 1.865
dual_elec_z1 = -3.725 -32.612 + BATZ_offset + 1.15 - 1.06 + 1.865
dual_elec_zmid = -3.725 -32.612 + BATZ_offset + 1.15 - 1.06
dual_elec_z_halfwidth = 1.865
# for each dual lets do 8 pnts (+/- x_hw/2, +/- y_hw/2, +/- z_hw/2)
batxs4duals = []
batys4duals = []
batzs4duals = []
Nduals = len(dual_xs)
for ii in range(Nduals):
dualx = dual_xs[ii]
dualy = dual_ys[ii]
for i in range(2):
x = dualx - dual_elec_x_halfwidth/2. + i*dual_elec_x_halfwidth
for j in range(2):
y = dualy - dual_elec_y_halfwidth/2. + j*dual_elec_y_halfwidth
for k in range(2):
z = dual_elec_zmid - dual_elec_z_halfwidth/2. + k*dual_elec_z_halfwidth
batxs4duals.append(x)
batys4duals.append(y)
batzs4duals.append(z)
batxs4duals = np.array(batxs4duals)
batys4duals = np.array(batys4duals)
batzs4duals = np.array(batzs4duals)
print len(batxs4duals)
dual_struct_obj = get_full_struct_manager(Es=Ephotons)
dual_struct_obj.set_batxyzs(batxs4duals, batys4duals, batzs4duals)
return dual_struct_obj
def get_fixture_struct():
fixture_half_dims01 = [11.656,6.944,0.477]
fixture_pos0 = (0.0, 53.578, 35.799 +98.489-32.612)
fixture_pos1 = (0.0, -53.578, 35.799 +98.489-32.612)
fixture_half_dims23 = [5.95, 5.95, 0.477]
fixture_pos2 = (114.974, 53.822, 35.799 +98.489-32.612)
fixture_pos3 = (-114.974, 53.822, 35.799 +98.489-32.612)
fixture_half_dims45 = [6.198, 6.198, 0.477]
fixture_pos4 = (-59.448, -53.518, 35.799 +98.489-32.612)
fixture_pos5 = (59.448, -53.518, 35.799 +98.489-32.612)
fixture_half_dims67 = [6.942, 6.2, 0.477]
fixture_pos6 = (113.85, 1.984, 35.799 +98.489-32.612)
fixture_pos7 = (-113.85, 1.984, 35.799 +98.489-32.612)
fixture_box0 = Box_Polygon(fixture_half_dims01[0], fixture_half_dims01[1],\
fixture_half_dims01[2], np.array(fixture_pos0))
fixture_box1 = Box_Polygon(fixture_half_dims01[0], fixture_half_dims01[1],\
fixture_half_dims01[2], np.array(fixture_pos1))
fixture_box2 = Box_Polygon(fixture_half_dims23[0], fixture_half_dims23[1],\
fixture_half_dims23[2], np.array(fixture_pos2))
fixture_box3 = Box_Polygon(fixture_half_dims23[0], fixture_half_dims23[1],\
fixture_half_dims23[2], np.array(fixture_pos3))
fixture_box4 = Box_Polygon(fixture_half_dims45[0], fixture_half_dims45[1],\
fixture_half_dims45[2], np.array(fixture_pos4))
fixture_box5 = Box_Polygon(fixture_half_dims45[0], fixture_half_dims45[1],\
fixture_half_dims45[2], np.array(fixture_pos5))
fixture_box6 = Box_Polygon(fixture_half_dims67[0], fixture_half_dims67[1],\
fixture_half_dims67[2], np.array(fixture_pos6))
fixture_box7 = Box_Polygon(fixture_half_dims67[0], fixture_half_dims67[1],\
fixture_half_dims67[2], np.array(fixture_pos7))
Fixture0 = Swift_Structure(fixture_box0, TI, Name='Fix0')
Fixture1 = Swift_Structure(fixture_box1, TI, Name='Fix1')
Fixture2 = Swift_Structure(fixture_box2, TI, Name='Fix2')
Fixture3 = Swift_Structure(fixture_box3, TI, Name='Fix3')
Fixture4 = Swift_Structure(fixture_box4, TI, Name='Fix4')
Fixture5 = Swift_Structure(fixture_box5, TI, Name='Fix5')
Fixture6 = Swift_Structure(fixture_box6, TI, Name='Fix6')
Fixture7 = Swift_Structure(fixture_box7, TI, Name='Fix7')
Fixtures = [Fixture0, Fixture1, Fixture2,
Fixture3, Fixture4, Fixture5,
Fixture6, Fixture7]
Fixture_Struct = Swift_Structure_Manager()
for fix in Fixtures:
Fixture_Struct.add_struct(fix)
return Fixture_Struct
detxs_by_sand0 = np.arange(0, 286-15, 18)
detxs_by_sand1 = detxs_by_sand0 + 15
print len(detxs_by_sand0)
detys_by_sand0 = np.arange(0, 173-7, 11)
detys_by_sand1 = detys_by_sand0 + 7
print len(detys_by_sand0)
detxs_in_cols_not_edges = [np.arange(detxs_by_sand0[i]+1, detxs_by_sand1[i], 1, dtype=np.int)\
for i in range(16)]
detys_in_rows_not_edges = [np.arange(detys_by_sand0[i]+1, detys_by_sand1[i], 1, dtype=np.int)\
for i in range(16)]
print detxs_in_cols_not_edges
dpi_shape = (173, 286)
detxax = np.arange(286, dtype=np.int)
detyax = np.arange(173, dtype=np.int)
detx_dpi, dety_dpi = np.meshgrid(detxax, detyax)
print np.shape(detx_dpi), np.shape(dety_dpi)
print np.max(detx_dpi), np.max(dety_dpi)
def get_detxys_from_colrows(col0, col1, row0, row1, orientation='NonEdges'):
if orientation == 'NonEdges':
good_detxs = np.array(detxs_in_cols_not_edges[col0:col1])
good_detys = np.array(detys_in_rows_not_edges[row0:row1])
elif orientation == 'left':
good_detxs = np.array(detxs_by_sand0[col0:col1])
good_detys = np.array(detys_in_rows_not_edges[row0:row1])
good_detys = np.append(good_detys, np.array(detys_by_sand1[row0:row1]))
elif orientation == 'top':
good_detxs = np.array(detxs_in_cols_not_edges[col0:col1])
good_detys = np.array(detys_by_sand1[row0:row1])
elif orientation == 'bot':
good_detxs = np.array(detxs_in_cols_not_edges[col0:col1])
good_detxs = np.append(good_detxs, np.array(detxs_by_sand0[col0:col1]))
good_detys = np.array(detys_by_sand0[row0:row1])
elif orientation == 'right':
good_detxs = np.array(detxs_by_sand1[col0:col1])
good_detys = np.array(detys_in_rows_not_edges[row0:row1])
good_detys = np.append(good_detys, np.array(detys_by_sand1[row0:row1]))
good_detys = np.append(good_detys, np.array(detys_by_sand0[row0:row1]))
else:
print "bad orientation"
blx = np.isin(detx_dpi, good_detxs)
bly = np.isin(dety_dpi, good_detys)
bl = blx&bly
inds = np.where(bl)
return inds
def rot_col_row_orientation(col0, col1, row0, row1, orientation, phi_rot):
if phi_rot < 0:
phi_rot = phi_rot + 2*np.pi
if (phi_rot >= np.pi/4) and (phi_rot < np.pi/2):
# bot is strong
# right is weak
new_row0 = 16 - col1
new_row1 = 16 - col0
new_col0 = 16 - row1
new_col1 = 16 - row0
if orientation == 'right':
new_orientation = 'bot'
elif orientation == 'bot':
new_orientation = 'right'
else:
new_orientation = orientation
elif (phi_rot >= np.pi/2) and (phi_rot < 3*np.pi/4):
# bot is strong
# left is weak
new_row0 = 16 - col1
new_row1 = 16 - col0
new_col0 = row0
new_col1 = row1
if orientation == 'right':
new_orientation = 'bot'
elif orientation == 'bot':
new_orientation = 'left'
elif orientation == 'left':
new_orientation = 'right'
else:
new_orientation = orientation
elif (phi_rot >= 3*np.pi/4) and (phi_rot < np.pi):
# left is strong
# bot is weak
new_row0 = row0
new_row1 = row1
new_col0 = 16 - col1
new_col1 = 16 - col0
if orientation == 'right':
new_orientation = 'left'
elif orientation == 'left':
new_orientation = 'right'
else:
new_orientation = orientation
elif (phi_rot >= np.pi) and (phi_rot < 5*np.pi/4):
# left is strong
# top is weak
new_row0 = 16 - row1
new_row1 = 16 - row0
new_col0 = 16 - col1
new_col1 = 16 - col0
if orientation == 'right':
new_orientation = 'left'
elif orientation == 'bot':
new_orientation = 'top'
elif orientation == 'left':
new_orientation = 'right'
elif orientation == 'top':
new_orientation = 'bot'
else:
new_orientation = orientation
elif (phi_rot >= 5*np.pi/4) and (phi_rot < 6*np.pi/4):
# top is strong
# left is weak
new_row0 = col0
new_row1 = col1
new_col0 = row0
new_col1 = row1
if orientation == 'right':
new_orientation = 'top'
elif orientation == 'bot':
new_orientation = 'left'
elif orientation == 'left':
new_orientation = 'right'
elif orientation == 'top':
new_orientation = 'bot'
else:
new_orientation = orientation
elif (phi_rot >= 6*np.pi/4) and (phi_rot < 7*np.pi/4):
# top is strong
# right is weak
new_row0 = col0
new_row1 = col1
new_col0 = 16-row1
new_col1 = 16-row0
if orientation == 'right':
new_orientation = 'top'
elif orientation == 'bot':
new_orientation = 'right'
elif orientation == 'top':
new_orientation = 'bot'
else:
new_orientation = orientation
elif (phi_rot >= 7*np.pi/4) and (phi_rot < 8*np.pi/4):
# right is strong
# top is weak
new_row0 = 16 - row1
new_row1 = 16 - row0
new_col0 = col0
new_col1 = col1
if orientation == 'bot':
new_orientation = 'top'
elif orientation == 'top':
new_orientation = 'bot'
else:
new_orientation = orientation
else:
new_orientation = orientation
new_row0 = row0
new_row1 = row1
new_col0 = col0
new_col1 = col1
return new_col0, new_col1, new_row0, new_row1, new_orientation
def resp_tab2resp_dpis(resp_tab, phi_rot=0.0):
line_cnames = [cname for cname in resp_tab.colnames if (not 'ENERG' in cname) and (not 'comp' in cname)]
comp_cnames = [cname for cname in resp_tab.colnames if (not 'ENERG' in cname) and ('comp' in cname)]
NphotonEs, Nphabins = resp_tab[line_cnames[0]].shape
lines_resp_dpi = np.zeros((173, 286, NphotonEs, Nphabins))
for cname in line_cnames:
cname_list = cname.split('_')
col0 = int(cname_list[-5])
col1 = int(cname_list[-4])
row0 = int(cname_list[-2])
row1 = int(cname_list[-1])
orientation = cname_list[0]
new_col0, new_col1, new_row0, new_row1, new_orientation =\
rot_col_row_orientation(col0, col1, row0, row1, orientation, phi_rot)
det_inds = get_detxys_from_colrows(new_col0, new_col1, new_row0,\
new_row1, orientation=new_orientation)
lines_resp_dpi[det_inds[0],det_inds[1],:,:] = resp_tab[cname].data.copy()
comp_resp_dpi = np.zeros((173, 286, NphotonEs, Nphabins))
for cname in comp_cnames:
cname_list = cname.split('_')
col0 = int(cname_list[-6])
col1 = int(cname_list[-5])
row0 = int(cname_list[-3])
row1 = int(cname_list[-2])
orientation = cname_list[0]
new_col0, new_col1, new_row0, new_row1, new_orientation =\
rot_col_row_orientation(col0, col1, row0, row1, orientation, phi_rot)
det_inds = get_detxys_from_colrows(new_col0, new_col1, new_row0,\
new_row1, orientation=new_orientation)
comp_resp_dpi[det_inds[0],det_inds[1],:,:] = resp_tab[cname].data.copy()
return lines_resp_dpi, comp_resp_dpi
def get_resp_arr(drm_dir):
fnames = np.array([fn for fn in os.listdir(drm_dir) if 'drm_' in fn])
thetas = np.array([float(fn.split('_')[2]) for fn in fnames])
phis = np.array([float(fn.split('_')[4]) for fn in fnames])
dtp = [('theta', np.float),('phi', np.float),('fname',fnames.dtype)]
drm_arr = np.empty(len(thetas), dtype=dtp)
drm_arr['theta'] = thetas
drm_arr['phi'] = phis
drm_arr['fname'] = fnames
return drm_arr
class ResponseDPI(object):
def __init__(self, resp_fname, pha_emins, pha_emaxs, phi0, bl_dmask):
print "initing ResponseDPI, with fname"
print resp_fname
self.orig_resp_tab = Table.read(resp_fname)
self.pha_tab = Table.read(resp_fname, hdu='EBOUNDS')
self.orig_pha_emins = self.pha_tab['E_MIN']
self.orig_pha_emaxs = self.pha_tab['E_MAX']
self.photonEmins = self.orig_resp_tab['ENERG_LO']
self.photonEmaxs = self.orig_resp_tab['ENERG_HI']
self.photonEs = (self.photonEmins + self.photonEmaxs)/2.
self.NphotonEs = len(self.photonEs)
self.phi0 = phi0 # should be in radians
self.ndets = np.sum(bl_dmask)
self.bl_dmask = bl_dmask
self.set_pha_bins(pha_emins, pha_emaxs)
self.mk_resp_dpis()
def set_pha_bins(self, pha_emins, pha_emaxs):
self.pha_emins = pha_emins
self.pha_emaxs = pha_emaxs
self.Nphabins = len(self.pha_emins)
self.resp_tab = shift_resp_tab_pha_bins(self.orig_resp_tab, self.orig_pha_emins,\
self.orig_pha_emaxs, self.pha_emins,\
self.pha_emaxs)
def set_phi0(self, phi0):
if np.abs(phi0 - self.phi0) > 1e-2:
self.phi0 = phi0
self.mk_resp_dpis()
def mk_resp_dpis(self):
lines_resp_dpis, comp_resp_dpis = resp_tab2resp_dpis(self.resp_tab, phi_rot=self.phi0)
self.lines_resp_dpis = lines_resp_dpis[self.bl_dmask]
self.comp_resp_dpis = comp_resp_dpis[self.bl_dmask]
def get_lines_resp_dpis(self):
return self.lines_resp_dpis
def get_comp_resp_dpis(self):
return self.comp_resp_dpis
def get_flor_intp_inds_wts(batxs, batys):
detxax = np.arange(-1,286+2,8, dtype=np.int)
detyax = np.arange(-2,173+2,8, dtype=np.int)
batxax, batyax = detxy2batxy(detxax, detyax)
flor_detx_dpi, flor_dety_dpi = np.meshgrid(detxax, detyax)
shp = flor_detx_dpi.shape
flor_batxs, flor_batys = detxy2batxy(flor_detx_dpi.ravel(), flor_dety_dpi.ravel())
x0inds = np.digitize(batxs, batxax) - 1
x1inds = x0inds + 1
y0inds = np.digitize(batys, batyax) - 1
y1inds = y0inds + 1
x0s = batxax[x0inds]
x1s = batxax[x1inds]
dxs = x1s - x0s
x0wts = (x1s - batxs)/dxs
x1wts = (batxs - x0s)/dxs
y0s = batyax[y0inds]
y1s = batyax[y1inds]
dys = y1s - y0s
y0wts = (y1s - batys)/dys
y1wts = (batys - y0s)/dys
inds00 = np.ravel_multi_index((y0inds,x0inds), shp)
inds01 = np.ravel_multi_index((y0inds,x1inds), shp)
inds10 = np.ravel_multi_index((y1inds,x0inds), shp)
inds11 = np.ravel_multi_index((y1inds,x1inds), shp)
inds = [inds00, inds01, inds10, inds11]
wts = [y0wts*x0wts, y0wts*x1wts, y1wts*x0wts, y1wts*x1wts]
return inds, wts
@njit(cache=True)
def flor_resp2dpis(flor_resp, flor_inds, flor_wts):
ndets = len(flor_inds[0])
NphotonEs = flor_resp.shape[1]
Nphabins = flor_resp.shape[2]
flor_dpis = np.zeros((ndets, NphotonEs, Nphabins))
for i in range(4):
for j in range(ndets):
flor_dpis[j] += flor_resp[flor_inds[i][j]]*(flor_wts[i][j])
return flor_dpis
class FlorResponseDPI(object):
def __init__(self, resp_dname, pha_tab, pha_emins, pha_emaxs, bl_dmask, Nside=2**3, NphotonEs=187):
self.resp_dname = resp_dname
self.pha_tab = pha_tab
self.orig_pha_emins = self.pha_tab['E_MIN'].astype(np.float)
self.orig_pha_emaxs = self.pha_tab['E_MAX'].astype(np.float)
self.pha_emins = pha_emins
self.pha_emaxs = pha_emaxs
self.Nphabins = len(pha_emins)
self.NphotonEs = NphotonEs
self.ndets = np.sum(bl_dmask)
self.bl_dmask = bl_dmask
self.batxs, self.batys = bldmask2batxys(self.bl_dmask)
self.flor_inds, self.flor_wts = get_flor_intp_inds_wts(self.batxs, self.batys)
self.orig_ndets = 851
self.Nside = Nside
self.resp_dict = {} # hp inds will be the keys
def set_theta_phi(self, theta, phi):
self.phi = phi
self.theta = theta
self.lat = 90.0 - self.theta
self.hp_inds2use, self.hp_wts = hp.get_interp_weights(self.Nside, self.phi, self.lat, lonlat=True)
self.calc_resp_dpi()
def open_new_file(self, hp_ind):
fname = 'hp_order_3_ind_%d_.npy'%(hp_ind)
resp_arr = np.load(os.path.join(self.resp_dname,fname))
self.resp_dict[hp_ind] = shift_flor_dpi_pha_bins(resp_arr, self.orig_pha_emins,\
self.orig_pha_emaxs,\
self.pha_emins, self.pha_emaxs)
def calc_resp_dpi(self):
resp_dpi0 = np.zeros((self.orig_ndets,self.NphotonEs,self.Nphabins))
for hp_ind,wt in zip(self.hp_inds2use,self.hp_wts):
if not hp_ind in self.resp_dict.keys():
self.open_new_file(hp_ind)
resp_dpi0 += wt*self.resp_dict[hp_ind]
self.resp_dpi = flor_resp2dpis(resp_dpi0, self.flor_inds, self.flor_wts)
def get_resp_dpi(self):
return self.resp_dpi
class FlorResponseDPI(object):
def __init__(self, resp_dname, pha_tab, pha_emins, pha_emaxs, bl_dmask, Nside=2**3, NphotonEs=187):
self.resp_dname = resp_dname
self.pha_tab = pha_tab
self.orig_pha_emins = self.pha_tab['E_MIN'].astype(np.float)
self.orig_pha_emaxs = self.pha_tab['E_MAX'].astype(np.float)
self.pha_emins = pha_emins
self.pha_emaxs = pha_emaxs
self.Nphabins = len(pha_emins)
self.NphotonEs = NphotonEs
self.ndets = np.sum(bl_dmask)
self.bl_dmask = bl_dmask
self.batxs, self.batys = bldmask2batxys(self.bl_dmask)
self.flor_inds, self.flor_wts = get_flor_intp_inds_wts(self.batxs, self.batys)
self.orig_ndets = 851
fname = '/storage/work/jjd330/local/bat_data/OutFoVbursts/GRB131014A/flor_Aeff_adjust.npz'
ratio_file = np.load(fname)
self.sn_ratios = ratio_file['sn_ratios']
self.ta_ratios = ratio_file['ta_ratios']
self.pb_ratios = ratio_file['pb_ratios']
self.Nside = Nside
self.resp_dict = {} # hp inds will be the keys
def set_theta_phi(self, theta, phi):
self.phi = phi
self.theta = theta
self.lat = 90.0 - self.theta
self.hp_inds2use, self.hp_wts = hp.get_interp_weights(self.Nside, self.phi, self.lat, lonlat=True)
self.calc_resp_dpi()
def open_new_file(self, hp_ind):
fname = 'hp_order_3_ind_%d_.npy'%(hp_ind)
resp_arr = np.load(os.path.join(self.resp_dname,fname))
sn_inds = np.arange(1,13,dtype=np.int)
ta_inds = np.arange(14,29,dtype=np.int)
pb_inds = np.arange(29,39,dtype=np.int)
for sn_ind in sn_inds:
resp_arr[:,:,sn_ind] *= self.sn_ratios
for ta_ind in ta_inds:
resp_arr[:,:,ta_ind] *= self.ta_ratios
for pb_ind in pb_inds:
resp_arr[:,:,pb_ind] *= self.pb_ratios
self.resp_dict[hp_ind] = shift_flor_dpi_pha_bins(resp_arr, self.orig_pha_emins,\
self.orig_pha_emaxs,\
self.pha_emins, self.pha_emaxs)
def calc_resp_dpi(self):
resp_dpi0 = np.zeros((self.orig_ndets,self.NphotonEs,self.Nphabins))
for hp_ind,wt in zip(self.hp_inds2use,self.hp_wts):
if not hp_ind in self.resp_dict.keys():
self.open_new_file(hp_ind)
resp_dpi0 += wt*self.resp_dict[hp_ind]
self.resp_dpi = flor_resp2dpis(resp_dpi0, self.flor_inds, self.flor_wts)
# for sn_ind in sn_inds:
# self.resp_dpi[:,:,sn_ind] *= self.sn_ratios
# for ta_ind in ta_inds:
# self.resp_dpi[:,:,ta_ind] *= self.ta_ratios
# for pb_ind in pb_inds:
# self.resp_dpi[:,:,pb_ind] *= self.pb_ratios
def get_resp_dpi(self):
return self.resp_dpi
class ResponseOutFoV(object):
def __init__(self, resp_dname, pha_emins, pha_emaxs, bl_dmask):
self.resp_dname = resp_dname
self.resp_arr = get_resp_arr(self.resp_dname)
self.thetas = np.unique(self.resp_arr['theta'])
tab = Table.read(os.path.join(self.resp_dname, self.resp_arr['fname'][0]))
pha_tab = Table.read(os.path.join(self.resp_dname, self.resp_arr['fname'][0]), hdu=2)
self.PhotonEmins = tab['ENERG_LO']
self.PhotonEmaxs = tab['ENERG_HI']
self.PhotonEs = ((self.PhotonEmins + self.PhotonEmaxs)/2.).astype(np.float)
self.NphotonEs = len(self.PhotonEs)
self.pha_emins = pha_emins
self.pha_emaxs = pha_emaxs
self.Nphabins = len(pha_emins)
# self.NphotonEs = NphotonEs
self.ndets = np.sum(bl_dmask)
self.bl_dmask = bl_dmask
self.batxs, self.batys = bldmask2batxys(self.bl_dmask)
self.batzs = 3.087 + np.zeros(self.ndets)
# self.resp_dpi_shape = (173, 286, self.NphotonEs, self.Nphabins)
self.resp_dpi_shape = (self.ndets, self.NphotonEs, self.Nphabins)
self.resp_files = {}
self.full_struct = get_full_struct_manager(Es=self.PhotonEs)
self.full_struct.set_batxyzs(self.batxs, self.batys, self.batzs)
dual_struct = get_dual_struct_obj(self.PhotonEs)
self.comp_obj = Comp_Resp_Obj(self.batxs, self.batys, self.batzs, dual_struct)
self.flor_resp_obj = FlorResponseDPI('/gpfs/scratch/jjd330/bat_data/flor_resps/',\
pha_tab, self.pha_emins, self.pha_emaxs,\
self.bl_dmask, NphotonEs=self.NphotonEs)
def set_theta_phi(self, theta, phi):
# use radians or degs ?
self.theta = theta
self.phi = phi
self.thetas2use, self.phis2use, self.wts = self.get_intp_theta_phi_wts(self.theta, self.phi)
self.inds4intp = []
for i in range(len(self.wts)):
ind = np.where(np.isclose(self.thetas2use[i],self.resp_arr['theta'])&\
np.isclose(self.phis2use[i],self.resp_arr['phi']))[0][0]
self.inds4intp.append(ind)
self.full_struct.set_theta_phi(np.radians(self.theta), np.radians(self.phi))
self.lines_trans_dpis = self.full_struct.get_trans()
# self.comp_obj.set_theta_phi(np.radians(self.theta), np.radians(self.phi))
# self.comp_trans_dpis = self.comp_obj.get_trans()
if theta > 90.0:
self.comp_obj.set_theta_phi(np.radians(self.theta), np.radians(self.phi))
self.comp_trans_dpis = self.comp_obj.get_trans()
else:
self.comp_trans_dpis = self.lines_trans_dpis
self.flor_resp_obj.set_theta_phi(self.theta, self.phi)
self.calc_resp_dpis()
self.calc_tot_resp_dpis()
def update_trans(self, theta, phi):
self.full_struct.set_theta_phi(np.radians(theta), np.radians(phi))
self.lines_trans_dpis = self.full_struct.get_trans()
# self.comp_obj.set_theta_phi(np.radians(self.theta), np.radians(self.phi))
# self.comp_trans_dpis = self.comp_obj.get_trans()
if theta > 90.0:
self.comp_obj.set_theta_phi(np.radians(theta), np.radians(phi))
self.comp_trans_dpis = self.comp_obj.get_trans()
else:
self.comp_trans_dpis = self.lines_trans_dpis
self.calc_tot_resp_dpis()
def open_resp_file_obj(self, fname):
resp_file_obj = ResponseDPI(os.path.join(self.resp_dname,fname),\
self.pha_emins, self.pha_emaxs,\
np.radians(self.phi), self.bl_dmask)
self.resp_files[fname] = resp_file_obj
def calc_resp_dpis(self):
self.lines_resp_dpis = np.zeros(self.resp_dpi_shape)
self.comp_resp_dpis = np.zeros(self.resp_dpi_shape)
for i in range(len(self.wts)):
k = self.resp_arr['fname'][self.inds4intp[i]]
if not k in self.resp_files.keys():
self.open_resp_file_obj(k)
self.lines_resp_dpis += self.wts[i]*self.resp_files[k].get_lines_resp_dpis()
self.comp_resp_dpis += self.wts[i]*self.resp_files[k].get_comp_resp_dpis()
def calc_tot_resp_dpis(self):
lines_dpi = self.lines_resp_dpis*(self.lines_trans_dpis[:,:,np.newaxis])
comp_dpi = self.comp_resp_dpis*(self.comp_trans_dpis[:,:,np.newaxis])
self.comp_resp_dpi = comp_dpi
self.lines_resp_dpi = lines_dpi
self.non_flor_resp_dpi = lines_dpi + comp_dpi
self.flor_resp_dpi = self.flor_resp_obj.get_resp_dpi()
self.tot_resp_dpis = self.non_flor_resp_dpi + self.flor_resp_dpi
def get_lines_resp_dpis(self):
return self.lines_resp_dpis
def get_comp_resp_dpis(self):
return self.comp_resp_dpis
def get_flor_resp_dpis(self):
return self.flor_resp_obj.get_resp_dpi()
def get_tot_resp_dpis(self):
return self.tot_resp_dpis
def get_rate_dpis_from_photon_fluxes(self, photon_fluxes):
rate_dpis = np.zeros((self.ndets,self.Nphabins))
for j in range(self.Nphabins):
rate_dpis[:,j] += np.sum(photon_fluxes*self.tot_resp_dpis[:,:,j],axis=1)
return rate_dpis
def get_flor_rate_dpis_from_photon_fluxes(self, photon_fluxes):
rate_dpis = np.zeros((self.ndets,self.Nphabins))
for j in range(self.Nphabins):
rate_dpis[:,j] += np.sum(photon_fluxes*self.flor_resp_dpi[:,:,j],axis=1)
return rate_dpis
def get_comp_rate_dpis_from_photon_fluxes(self, photon_fluxes):
rate_dpis = np.zeros((self.ndets,self.Nphabins))
for j in range(self.Nphabins):
rate_dpis[:,j] += np.sum(photon_fluxes*self.comp_resp_dpi[:,:,j],axis=1)
return rate_dpis
def get_photoe_rate_dpis_from_photon_fluxes(self, photon_fluxes):
rate_dpis = np.zeros((self.ndets,self.Nphabins))
for j in range(self.Nphabins):
rate_dpis[:,j] += np.sum(photon_fluxes*self.lines_resp_dpi[:,:,j],axis=1)
return rate_dpis
def get_non_flor_rate_dpis_from_photon_fluxes(self, photon_fluxes):
rate_dpis = np.zeros((self.ndets,self.Nphabins))
for j in range(self.Nphabins):
rate_dpis[:,j] += np.sum(photon_fluxes*self.non_flor_resp_dpi[:,:,j],axis=1)
return rate_dpis
def get_intp_theta_phi_wts(self, theta, phi, eps=0.1):
thetas = np.sort(np.unique(self.resp_arr['theta']))
phis = np.sort(np.unique(self.resp_arr['phi']))
th0 = np.digitize(theta, thetas) - 1
if theta == 180.0:
th0 -= 1
theta0 = thetas[th0]
theta1 = thetas[th0+1]
print theta0, theta1
if np.abs(theta0 - theta) < eps:
ths = [theta0]
th_wts = [1.0]
elif np.abs(theta1 - theta) < eps:
ths = [theta1]
th_wts = [1.0]
else:
ths = [theta0, theta1]
dth = theta1 - theta0
th_wts = [(theta1 - theta)/dth, (theta - theta0)/dth]
phi_ = phi - (int(phi)/45)*45.0
print phi_
if (int(phi)/45)%2 == 1:
phi_ = 45.0 - phi_
print phi_
ph0 = np.digitize(phi_, phis) - 1
if phi_ == 45.0:
ph0 -= 1
phi0 = phis[ph0]
phi1 = phis[ph0+1]
if np.abs(phi0 - phi_) < eps:
phs = [phi0]
ph_wts = [1.0]
elif np.abs(phi1 - phi_) < eps:
phs = [phi1]
ph_wts = [1.0]
else:
phs = [phi0, phi1]
dph = phi1 - phi0
ph_wts = [(phi1 - phi_)/dph, (phi_ - phi0)/dph]
ths_ = []
phs_ = []
wts = []
for i in range(len(ths)):
if ths[i] == 0.0 or ths[i] == 180.0:
ths_.append(ths[i])
phs_.append(0.0)
wts.append(th_wts[i])
continue
for j in range(len(phs)):
ths_.append(ths[i])
phs_.append(phs[j])
wts.append(th_wts[i]*ph_wts[j])
return ths_, phs_, wts
class Swift_Mask_Interactions(object):
'''
Should say whether photon goes through the mask poly or not
Also want it to get ray trace
Should have also contain the lead tiles where the struts screw in (which aren't included in the ray traces)
Should be able to give trans to each det,
assume each photon that goes through lead tile, goes through 0.1cm/cos(theta)
trans = (shadow_frac)*exp[-rhomu_pb * 0.1 / cos(theta)]
'''
def __init__(self, rt_obj, bl_dmask):
self.rt_obj = rt_obj
self.bl_dmask = bl_dmask
self.ds_base = 0.1
self.material = PB
self.Nmaterials = 1
self.Name = 'Mask'
self.norm_vec = np.array([0.0, 0.0, -1.0])
self.verts = np.array([(121.92, 60.95, 103.187), (121.92, -1.41, 103.187),
(61.5, -60.95, 103.187), (-61.5, -60.95, 103.187),
(-121.92, -1.41, 103.187), (-121.92, 60.95, 103.187)])
trans_vec = np.zeros(3)
self.mask_poly = Polygon2D(self.verts, trans_vec)
self.fix_struct = get_fixture_struct()
self._rt_imx = 10.0
self._rt_imy = 10.0
self._rt_im_update = 1e-6
def set_energy_arr(self, energy):
self.energy = energy
self.Ne = len(energy)
self.tot_rho_mus = self.material.get_tot_rhomu(self.energy)
self.comp_rho_mus = self.material.get_comp_rhomu(self.energy)
self.photoe_rho_mus = self.material.get_photoe_rhomu(self.energy)
if hasattr(self, 'dists'):
self.calc_tot_rhomu_dist()
self.fix_struct.set_energy_arr(self.energy)
def set_batxyzs(self, batxs, batys, batzs):
self.batxs = batxs
self.batys = batys
self.batzs = batzs
self.ndets = len(batxs)
def set_theta_phi(self, theta, phi):
self.theta = theta
self.phi = phi
self.imx, self.imy = theta_phi2imxy(theta, phi)
self.d = self.ds_base/np.cos(np.radians(theta))
self.calc_does_int_mask()
self.calc_dists()
self.calc_tot_rhomu_dist()
self.does_int_fix()
def calc_does_int_mask(self):
self.does_int_mask = self.mask_poly.does_intersect(np.radians(self.theta), np.radians(self.phi),\
self.batxs, self.batys,\
self.batzs)
def calc_dists(self):
self.dists = (self.does_int_mask.astype(np.float))*self.d
def calc_tot_rhomu_dist(self):
# self.tot_rhomu_dists = np.zeros((self.ndets,self.Ne))
self.tot_rhomu_dists = self.dists[:,np.newaxis]*self.tot_rho_mus
def does_int_fix(self):
# does_int_fix = np.zeros(self.ndets, dtype=np.bool)
self.fix_trans = np.ones((self.ndets,self.Ne))
self.fix_struct.set_batxyzs(self.batxs[self.does_int_mask],\
self.batys[self.does_int_mask],\
self.batzs[self.does_int_mask])
self.fix_struct.set_theta_phi(np.radians(self.theta), np.radians(self.phi))
self.fix_trans[self.does_int_mask] = self.fix_struct.get_trans()
def get_trans(self):
self.trans = np.ones((self.ndets,self.Ne))
rt = self.get_rt(self.imx, self.imy)[:,np.newaxis]
self.trans[self.does_int_mask] = self.fix_trans[self.does_int_mask]*(rt[self.does_int_mask] +\
(1.-rt[self.does_int_mask])*\
np.exp(-self.tot_rhomu_dists[self.does_int_mask]))
return self.trans
def get_rt(self, imx, imy):
if np.hypot(imx-self._rt_imx, imy-self._rt_imy) <\
self._rt_im_update:
return self._rt
else:
rt = self.rt_obj.get_intp_rt(imx, imy, get_deriv=False)
self._rt = np.copy(rt[self.bl_dmask])
self.max_rt = np.max(self._rt)
print("max rt: %.4f"%(self.max_rt))
self._rt /= self.max_rt
self._shadow = (1. - self._rt)
# self._shadow = (self.max_rt - self._rt)
# fp = self.get_fp(imx, imy)
# self._shadow[self.uncoded] = 0.0
# self._drt_dx = drt_dx[self.bl_dmask]
# self._drt_dy = drt_dy[self.bl_dmask]
self._rt_imx = imx
self._rt_imy = imy
return self._rt
class ResponseInFoV(object):
def __init__(self, resp_dname, pha_emins, pha_emaxs, bl_dmask, rt_obj):
self.resp_dname = resp_dname
self.resp_arr = get_resp_arr(self.resp_dname)
self.thetas = np.unique(self.resp_arr['theta'])
tab = Table.read(os.path.join(self.resp_dname, self.resp_arr['fname'][0]))
pha_tab = Table.read(os.path.join(self.resp_dname, self.resp_arr['fname'][0]), hdu=2)
self.PhotonEmins = tab['ENERG_LO']
self.PhotonEmaxs = tab['ENERG_HI']
self.PhotonEs = ((self.PhotonEmins + self.PhotonEmaxs)/2.).astype(np.float)
self.NphotonEs = len(self.PhotonEs)
self.pha_emins = pha_emins
self.pha_emaxs = pha_emaxs
self.Nphabins = len(pha_emins)
# self.NphotonEs = NphotonEs
self.ndets = np.sum(bl_dmask)
self.bl_dmask = bl_dmask
self.batxs, self.batys = bldmask2batxys(self.bl_dmask)
self.batzs = 3.087 + np.zeros(self.ndets)
# self.resp_dpi_shape = (173, 286, self.NphotonEs, self.Nphabins)
self.resp_dpi_shape = (self.ndets, self.NphotonEs, self.Nphabins)
self.resp_files = {}
self.full_struct = get_full_struct_manager(Es=self.PhotonEs)
self.full_struct.set_batxyzs(self.batxs, self.batys, self.batzs)
dual_struct = get_dual_struct_obj(self.PhotonEs)
self.comp_obj = Comp_Resp_Obj(self.batxs, self.batys, self.batzs, dual_struct)
self.flor_resp_obj = FlorResponseDPI('/gpfs/scratch/jjd330/bat_data/flor_resps/',\
pha_tab, self.pha_emins, self.pha_emaxs,\
self.bl_dmask, NphotonEs=self.NphotonEs)
self.mask_obj = Swift_Mask_Interactions(rt_obj, self.bl_dmask)
self.mask_obj.set_energy_arr(self.PhotonEs)
self.mask_obj.set_batxyzs(self.batxs, self.batys, self.batzs)
def set_theta_phi(self, theta, phi):
# use radians or degs ?
self.theta = theta
self.phi = phi
self.thetas2use, self.phis2use, self.wts = self.get_intp_theta_phi_wts(self.theta, self.phi)
self.inds4intp = []
for i in range(len(self.wts)):
ind = np.where(np.isclose(self.thetas2use[i],self.resp_arr['theta'])&\
np.isclose(self.phis2use[i],self.resp_arr['phi']))[0][0]
self.inds4intp.append(ind)
self.mask_obj.set_theta_phi(theta, phi)
self.mask_trans = self.mask_obj.get_trans()
self.full_struct.set_theta_phi(np.radians(theta), np.radians(phi))
self._lines_trans_dpis = self.full_struct.get_trans()
self.lines_trans_dpis = self._lines_trans_dpis*self.mask_trans
# self.comp_obj.set_theta_phi(np.radians(self.theta), np.radians(self.phi))
# self.comp_trans_dpis = self.comp_obj.get_trans()
if theta > 90.0:
self.comp_obj.set_theta_phi(np.radians(self.theta), np.radians(self.phi))
self.comp_trans_dpis = self.comp_obj.get_trans()
else:
self.comp_trans_dpis = self.lines_trans_dpis
self.flor_resp_obj.set_theta_phi(self.theta, self.phi)
self.calc_resp_dpis()
self.calc_tot_resp_dpis()
def update_trans(self, theta, phi):
self.mask_obj.set_theta_phi(theta, phi)
self.mask_trans = self.mask_obj.get_trans()
self.full_struct.set_theta_phi(np.radians(theta), np.radians(phi))
self._lines_trans_dpis = self.full_struct.get_trans()
self.lines_trans_dpis = self._lines_trans_dpis*self.mask_trans
# self.comp_obj.set_theta_phi(np.radians(self.theta), np.radians(self.phi))
# self.comp_trans_dpis = self.comp_obj.get_trans()
if theta > 90.0:
self.comp_obj.set_theta_phi(np.radians(theta), np.radians(phi))
self.comp_trans_dpis = self.comp_obj.get_trans()
else:
self.comp_trans_dpis = self.lines_trans_dpis
self.calc_tot_resp_dpis()
def open_resp_file_obj(self, fname):
resp_file_obj = ResponseDPI(os.path.join(self.resp_dname,fname),\
self.pha_emins, self.pha_emaxs,\
np.radians(self.phi), self.bl_dmask)
self.resp_files[fname] = resp_file_obj
def calc_resp_dpis(self):
self.lines_resp_dpis = np.zeros(self.resp_dpi_shape)
self.comp_resp_dpis = np.zeros(self.resp_dpi_shape)
for i in range(len(self.wts)):
k = self.resp_arr['fname'][self.inds4intp[i]]
if not k in self.resp_files.keys():
self.open_resp_file_obj(k)
self.lines_resp_dpis += self.wts[i]*self.resp_files[k].get_lines_resp_dpis()
self.comp_resp_dpis += self.wts[i]*self.resp_files[k].get_comp_resp_dpis()
def calc_tot_resp_dpis(self):
lines_dpi = self.lines_resp_dpis*(self.lines_trans_dpis[:,:,np.newaxis])
comp_dpi = self.comp_resp_dpis*(self.comp_trans_dpis[:,:,np.newaxis])
self.comp_resp_dpi = comp_dpi
self.lines_resp_dpi = lines_dpi
self.non_flor_resp_dpi = lines_dpi + comp_dpi
self.flor_resp_dpi = self.flor_resp_obj.get_resp_dpi()
self.tot_resp_dpis = self.non_flor_resp_dpi + self.flor_resp_dpi
def get_lines_resp_dpis(self):
return self.lines_resp_dpis
def get_comp_resp_dpis(self):
return self.comp_resp_dpis
def get_flor_resp_dpis(self):
return self.flor_resp_obj.get_resp_dpi()
def get_tot_resp_dpis(self):
return self.tot_resp_dpis
def get_rate_dpis_from_photon_fluxes(self, photon_fluxes):
rate_dpis = np.zeros((self.ndets,self.Nphabins))
for j in range(self.Nphabins):
rate_dpis[:,j] += np.sum(photon_fluxes*self.tot_resp_dpis[:,:,j],axis=1)
return rate_dpis
def get_flor_rate_dpis_from_photon_fluxes(self, photon_fluxes):
rate_dpis = np.zeros((self.ndets,self.Nphabins))
for j in range(self.Nphabins):
rate_dpis[:,j] += np.sum(photon_fluxes*self.flor_resp_dpi[:,:,j],axis=1)
return rate_dpis
def get_comp_rate_dpis_from_photon_fluxes(self, photon_fluxes):
rate_dpis = np.zeros((self.ndets,self.Nphabins))
for j in range(self.Nphabins):
rate_dpis[:,j] += np.sum(photon_fluxes*self.comp_resp_dpi[:,:,j],axis=1)
return rate_dpis
def get_photoe_rate_dpis_from_photon_fluxes(self, photon_fluxes):
rate_dpis = np.zeros((self.ndets,self.Nphabins))
for j in range(self.Nphabins):
rate_dpis[:,j] += np.sum(photon_fluxes*self.lines_resp_dpi[:,:,j],axis=1)
return rate_dpis
def get_non_flor_rate_dpis_from_photon_fluxes(self, photon_fluxes):
rate_dpis = np.zeros((self.ndets,self.Nphabins))
for j in range(self.Nphabins):
rate_dpis[:,j] += np.sum(photon_fluxes*self.non_flor_resp_dpi[:,:,j],axis=1)
return rate_dpis
def get_intp_theta_phi_wts(self, theta, phi, eps=0.1):
thetas = np.sort(np.unique(self.resp_arr['theta']))
phis = np.sort(np.unique(self.resp_arr['phi']))
th0 = np.digitize(theta, thetas) - 1
if theta == 180.0:
th0 -= 1
theta0 = thetas[th0]
theta1 = thetas[th0+1]
print theta0, theta1
if np.abs(theta0 - theta) < eps:
ths = [theta0]
th_wts = [1.0]
elif np.abs(theta1 - theta) < eps:
ths = [theta1]
th_wts = [1.0]
else:
ths = [theta0, theta1]
dth = theta1 - theta0
th_wts = [(theta1 - theta)/dth, (theta - theta0)/dth]
phi_ = phi - (int(phi)/45)*45.0
print phi_
if (int(phi)/45)%2 == 1:
phi_ = 45.0 - phi_
print phi_
ph0 = np.digitize(phi_, phis) - 1
if phi_ == 45.0:
ph0 -= 1
phi0 = phis[ph0]
phi1 = phis[ph0+1]
if np.abs(phi0 - phi_) < eps:
phs = [phi0]
ph_wts = [1.0]
elif np.abs(phi1 - phi_) < eps:
phs = [phi1]
ph_wts = [1.0]
else:
phs = [phi0, phi1]
dph = phi1 - phi0
ph_wts = [(phi1 - phi_)/dph, (phi_ - phi0)/dph]
ths_ = []
phs_ = []
wts = []
for i in range(len(ths)):
if ths[i] == 0.0 or ths[i] == 180.0:
ths_.append(ths[i])
phs_.append(0.0)
wts.append(th_wts[i])
continue
for j in range(len(phs)):
ths_.append(ths[i])
phs_.append(phs[j])
wts.append(th_wts[i]*ph_wts[j])
return ths_, phs_, wts
@njit(cache=True, fastmath=True)
def pois_norm_conv_n0(mu, sig):
sig2 = sig**2
return np.exp(((sig2-mu)**2 - mu**2)/(2.*sig2))
@njit(cache=True, fastmath=True)
def pois_norm_conv_n1(mu, sig):
sig2 = sig**2
return ((mu-sig2))*np.exp((sig2/2.)-mu)
@njit(cache=True, fastmath=True)
def pois_norm_conv_n2(mu, sig):
sig2 = sig**2
eterm = np.exp((sig2/2.) - mu)
return eterm*(-mu*sig2 + .5*(mu**2 + sig2**2 + sig2))
@njit(cache=True, fastmath=True)
def pois_norm_conv_n3(mu, sig):
sig2 = sig**2
eterm = np.exp((sig2/2.) - mu)
return eterm*.5*(((mu-sig2)**3)/3.0 + sig2*(mu-sig2))
@njit(cache=True, fastmath=True)
def pois_norm_conv_n4(mu, sig):
sig2 = sig**2
eterm = np.exp((sig2/2.) - mu)
mu_sig2 = mu - sig2
return (eterm/24.0)*((mu_sig2)**4 + 6*(sig2*mu_sig2**2) + 3*(sig2**2))
@njit(cache=True, fastmath=True)
def pois_norm_conv_n5(mu, sig):
sig2 = sig**2
eterm = np.exp((sig2/2.) - mu)
mu_sig2 = mu - sig2
return (eterm/(5*24.0))*((mu_sig2)**5 + 5*2*(sig2*mu_sig2**3) + 5*3*(sig2**2)*mu_sig2)
@njit(cache=True, fastmath=True)
def pois_norm_conv_n6(mu, sig):
sig2 = sig**2
eterm = np.exp((sig2/2.) - mu)
mu_sig2 = mu - sig2
return (eterm/(6*5*24.0))*((mu_sig2)**6 + 5*3*(sig2*mu_sig2**4) +\
5*3*3*(sig2**2)*mu_sig2**2 + 5*3*(sig2**3))
@njit(cache=True, fastmath=True)
def pois_norm_conv_n7(mu, sig):
sig2 = sig**2
eterm = np.exp((sig2/2.) - mu)
mu_sig2 = mu - sig2
return (eterm/(7*6*5*24.0))*((mu_sig2)**7 + 7*3*(sig2*mu_sig2**5) +\
7*5*3*(sig2**2)*mu_sig2**3 + 7*5*3*(sig2**3)*mu_sig2)
@njit(cache=True, fastmath=True)
def num_factorial(N):
res = 1.0
for i in range(1,N+1):
res *= i
return res
@njit(cache=True, fastmath=True)
def pois_norm_num_conv(mu, sig, N):
res = 0.0
Nmu = 256
dmu = 8.0*sig/Nmu
norm_A = (1./(2.*np.pi*sig**2))**.5
fact = num_factorial(N)
mu0 = mu - dmu*Nmu/2
if mu0 < 0:
mu0 = 0.0
for i in range(Nmu):
mu_ = mu0 + i*dmu
norm_prob = norm_A*np.exp(-(mu_ - mu)**2/(2*sig**2))
pois_prob = ((mu_**N)/fact)*np.exp(-mu_)
res += norm_prob*pois_prob*dmu
return res
@njit(cache=True, fastmath=True)
def logl_pois_norm_conv(mu, sig, N, size):
llh_ = 0.0
for i in range(size):
if N[i] == 0:
llh = np.log(pois_norm_conv_n0(mu[i], sig[i]))
elif N[i] == 1:
llh = np.log(pois_norm_conv_n1(mu[i], sig[i]))
elif N[i] == 2:
llh = np.log(pois_norm_conv_n2(mu[i], sig[i]))
elif N[i] == 3:
llh = np.log(pois_norm_conv_n3(mu[i], sig[i]))
elif N[i] == 4:
llh = np.log(pois_norm_conv_n4(mu[i], sig[i]))
elif N[i] == 5:
llh = np.log(pois_norm_conv_n5(mu[i], sig[i]))
elif N[i] == 6:
llh = np.log(pois_norm_conv_n6(mu[i], sig[i]))
elif N[i] == 7:
llh = np.log(pois_norm_conv_n7(mu[i], sig[i]))
else:
llh = np.log(pois_norm_num_conv(mu[i], sig[i], N[i]))
llh_ += llh
return llh_
class LLH_webins(object):
def __init__(self, event_data, ebins0, ebins1,\
bl_dmask, t0=None, t1=None,\
model=None, has_err=False):
self._all_data = event_data
self.ebins0 = ebins0
self.ebins1 = ebins1
self.nebins = len(ebins0)
self.bl_dmask = bl_dmask
self.t0 = 0.0
self.t1 = 0.0
self.ebin = -1
self.set_has_error(has_err)
if t0 is not None and t1 is not None:
self.set_time(t0, t1)
if model is not None:
self.set_model(model)
def set_time(self, t0, t1):
'''
Sets the start time and duration for the LLH
analysis.
Parameters:
t0: start time in MET seconds
dt: duration in seconds
'''
if np.isscalar(t0):
t0 = np.array([t0])
if np.isscalar(t1):
t1 = np.array([t1])
if np.all(self.t0 == t0) and np.all(self.t1 == t1):
return
self.t0 = t0
self.dt = 0.0
self.t1 = t1
t_bl = np.zeros(len(self._all_data), dtype=np.bool)
for i in range(len(self.t0)):
t_bl = np.logical_or((self._all_data['TIME']>=self.t0[i])&\
(self._all_data['TIME']<self.t1[i]),t_bl)
self.dt += self.t1[i] - self.t0[i]
self.data = self._all_data[t_bl]
self.data_dpis = np.array(det2dpis(self.data, self.ebins0,\
self.ebins1, bl_dmask=self.bl_dmask))
self.data_dpis_flat = np.ravel(self.data_dpis)
self.gamma_vals = get_gammaln(self.data_dpis)
self.data_size = self.data_dpis.size
def set_model(self, model):
self.model = model
self.nparams = self.model.nparams
def set_ebin(self, j):
if 'all' in str(j):
self.ebin = -1
else:
self.ebin = j
def set_has_error(self, has_error):
self.has_error = has_error
def get_llh(self, params):
if self.has_error:
# mod_cnts = self.model.get_rate_dpis(params)*self.dt
# mod_err = self.model.get_rate_dpis_err(params)*self.dt
mod_rate, mod_rate_err = self.model.get_rate_dpis_err(params, ret_rate_dpis=True)
if not np.all(mod_rate > 0):
return -np.inf
llh = logl_pois_norm_conv(np.ravel(mod_rate*self.dt),\
np.ravel(mod_rate_err*self.dt),\
self.data_dpis_flat, self.data_size)
else:
if self.ebin < 0:
mod_cnts = self.model.get_rate_dpis(params)*self.dt
if np.any(mod_cnts <= 0):
return -np.inf
llh = np.sum(log_pois_prob(mod_cnts, self.data_dpis,\
gam_val=self.gamma_vals))
else:
mod_cnts = self.model.get_rate_dpi(params, self.ebin)*self.dt
if np.any(mod_cnts <= 0):
return -np.inf
llh = np.sum(log_pois_prob(mod_cnts, self.data_dpis[self.ebin],\
gam_val=self.gamma_vals[self.ebin]))
return llh
def get_logprior(self, params):
lp = 0.0
if self.model.has_prior:
if self.ebin < 0:
j=None
else:
j=self.ebin
lp = self.model.get_log_prior(params, j=j)
return lp
def get_logprob(self, params):
logp = self.get_logprior(params)
llh = self.get_llh(params)
return logp + llh
def get_logprob_jacob(self, params):
if self.ebin < 0:
mod_cnts = self.model.get_rate_dpis(params)*self.dt
if np.any(np.isclose(mod_cnts,0)):
mod_cnts = 1e-6*np.ones_like(mod_cnts)
fact = (1. - (self.data_dpis / mod_cnts))
dNs_dparam = self.model.get_dr_dps(params)
jacob = [np.sum(fact*dNs_dparam[i])*self.dt for i\
in xrange(len(dNs_dparam))]
else:
mod_cnts = self.model.get_rate_dpi(params, self.ebin)*self.dt
if np.any(np.isclose(mod_cnts,0)):
mod_cnts = 1e-6*np.ones_like(mod_cnts)
fact = (1. - (self.data_dpis[self.ebin] / mod_cnts))
dR_dparams = self.model.get_dr_dp(params, self.ebin)
if self.model.has_prior:
dNLP_dparams = self.model.get_dnlp_dp(params, self.ebin)
else:
dNLP_dparams = np.zeros(len(dR_dparams))
jacob = [dNLP_dparams[i] + np.sum(fact*dR_dparams[i])*self.dt\
for i in xrange(len(dR_dparams))]
return jacob
def get_logprob_hess(self, params):
if self.ebin < 0:
print("Not supported for multiple ebins yet")
return 0
else:
mod_cnts = self.model.get_rate_dpi(params, self.ebin)*self.dt
if np.any(np.isclose(mod_cnts,0)):
mod_cnts = 1e-6*np.ones_like(mod_cnts)
fact = (self.data_dpis[self.ebin])/np.square(mod_cnts)
dR_dparams = self.model.get_dr_dp(params, self.ebin)
Ndim = len(dR_dparams)
dNLProb_hess = np.zeros((Ndim,Ndim))
for i in range(Ndim):
dNLProb_hess[i,i] = np.sum( np.square(dR_dparams[i]*self.dt)*fact )
for j in range(i+1,Ndim):
dNLProb_hess[i,j] = np.sum((dR_dparams[i]*self.dt)*\
(dR_dparams[j]*self.dt)*fact)
dNLProb_hess[j,i] += dNLProb_hess[i,j]
if self.model.has_prior:
dNLProb_hess += self.model.get_hess_nlogprior(params, self.ebin)
return dNLProb_hess
class Bkg_Model_wFlatA(Model):
def __init__(self, bl_dmask, solid_ang_dpi, nebins,\
use_prior=False, use_deriv=False):
self.sa_dpi = solid_ang_dpi
self.solid_angs = solid_ang_dpi[bl_dmask]
self.solid_ang_mean = np.mean(self.solid_angs)
self.rate_names = ['bkg_rate_' + str(i) for i\
in xrange(nebins)]
self.flat_names = ['flat_' + str(i) for i\
in xrange(nebins)]
# self.rat_names = ['diff_flat_' + str(i) for i\
# in xrange(nebins)]
# 1 = Af + Ad
# rat = Af/Ad
# 1 = Ad*rat + Ad
# Ad = 1 / (1 + rat)
# self.diff_As = 1. / (1. + self.ratios)
# self.flat_As = 1. - self.diff_As
param_names = self.rate_names
param_names += self.flat_names
param_dict = {}
# if t is None:
# rates = bkg_obj.get_rate((bkg_obj.t0+bkg_obj.t1)/2.)[0]
# else:
# rates = bkg_obj.get_rate(t)[0]
for i, pname in enumerate(param_names):
pdict = {}
if 'rate' in pname:
pdict['bounds'] = (5e-5, 1e2)
pdict['val'] = 0.05
else:
pdict['bounds'] = (0.0, 1.0)
pdict['val'] = 0.25
pdict['nuis'] = True
pdict['fixed'] = False
param_dict[pname] = pdict
super(Bkg_Model_wFlatA, self).__init__('Background', bl_dmask,\
param_names, param_dict,\
nebins, has_prior=use_prior)
self._rate_ones = np.ones(self.ndets)
self._rate_zeros = np.zeros(self.ndets)
self.bkg_sigs = np.zeros(self.nebins)
self.err_factor = 1.0
if use_deriv:
self.has_deriv = True
# if use_prior:
# if exp_rates is not None and bkg_sigs is not None:
# self.set_prior(exp_rates, bkg_sigs)
def set_bkg_row(self, bkg_row, bkg_name='', fix_flats=True, err_factor=2.0):
self.bkg_row = bkg_row
bkg_rates = np.array([bkg_row[bkg_name+ 'bkg_rate_'+str(j)] for j in range(self.nebins)])
bkg_rate_errs = np.array([bkg_row['err_' + bkg_name + 'bkg_rate_'+str(j)] for\
j in range(self.nebins)])
bkg_flats = np.array([bkg_row[bkg_name+ 'flat_'+str(j)] for j in range(self.nebins)])
self.flat_vals = bkg_flats
for j, pname in enumerate(self.flat_names):
self.param_dict[pname]['val'] = bkg_flats[j]
self.param_dict[self.rate_names[j]]['val'] = bkg_rates[j]
if fix_flats:
self.param_dict[pname]['fixed'] = True
self.param_dict[pname]['nuis'] = False
self.set_prior(bkg_rates, bkg_rate_errs, err_factor=err_factor)
def set_prior(self, exp_rates, bkg_sigs, err_factor=2.0):
self.exp_rates = exp_rates
self.bkg_sigs = bkg_sigs
self.err_factor = err_factor
self.log_prior_funcs = []
for j in range(self.nebins):
self.log_prior_funcs.append(Norm_1D(self.exp_rates[j],\
np.square(self.err_factor*self.bkg_sigs[j])))
def get_rate_dpis(self, params):
# rate_dpis = []
rate_dpis = np.zeros((self.nebins,self.ndets))
for j in range(self.nebins):
rate_dpis[j] += self.get_rate_dpi(params, j)
# for k, val in params.iteritems():
# for pname in self.param_names:
# j = int(pname[-1])
# rate_dpis[j] += self.diff_As[j]*params[pname]*self.solid_angs +\
# self.flat_As[j]*params[pname]
return rate_dpis
def get_rate_dpi(self, params, j):
rate = params[self.rate_names[j]]
flat_A = params[self.flat_names[j]]
diff_A = 1. - flat_A
rate_dpi = rate*((diff_A/self.solid_ang_mean)*self.solid_angs + flat_A)
return rate_dpi
def get_rate_dpis_err(self, params, ret_rate_dpis=False):
rate_dpis_err = np.zeros((self.nebins,self.ndets))
rate_dpis = np.zeros((self.nebins,self.ndets))
for j in range(self.nebins):
rate_dpi, rate_dpi_err = self.get_rate_dpi_err(params, j, ret_rate_dpi=True)
rate_dpis[j] += rate_dpi
rate_dpis_err[j] += rate_dpi_err
if ret_rate_dpis:
return rate_dpis, rate_dpis_err
return rate_dpis_err
def get_rate_dpi_err(self, params, j, ret_rate_dpi=False):
# rate = params[self.rate_names[j]]
# flat_A = params[self.flat_names[j]]
# diff_A = 1. - flat_A
# make this a flat error for now
# so the dets with lower solid angle
# will have a larger fractional error for now
bkg_sig = self.bkg_sigs[j]*self.err_factor
rate_dpi = self.get_rate_dpi(params, j)
eff_err = 0.04
rate_dpi_err = np.sqrt(bkg_sig**2 + (eff_err*rate_dpi)**2)
return rate_dpi, rate_dpi_err
def get_dr_dps(self, params):
dr_dbrs = []
dr_dlrs = []
for j in range(self.nebins):
if self.param_dict[self.rate_names[j]]['fixed'] and self.param_dict[self.flat_names[j]]['fixed']:
continue
e_zeros = np.zeros((self.nebins,self.ndets))
e_zeros[j,:] = 1.0
drdps = self.get_dr_dp(params, j)
dr_dbrs.append(drdps[0]*e_zeros)
dr_dlrs.append(drdps[1]*e_zeros)
dr_dps = dr_dbrs
dr_dps += dr_dlrs
return dr_dps
def get_dr_dp(self, params, j):
# dr_dFlats = np.zeros((self.nebins,self.ndets))
# dr_dDifs = np.zeros((self.nebins,self.ndets))
dr_dps = []
rate = params[self.rate_names[j]]
# log_rat = params[self.log_rat_names[j]]
# ratio = np.exp(log_rat)
# diff_A = ratio/(1. + ratio)
# flat_A = 1. - diff_A
flat_A = params[self.flat_names[j]]
diff_A = 1. - flat_A
# dr_drate
if not self.param_dict[self.rate_names[j]]['fixed']:
dr_dps.append(diff_A*self.solid_angs/\
self.solid_ang_mean + flat_A)
# dr_dlogratio = rate*( dAdiff_d...*solid_angs/solid_ang_mean +
# dAflat_d...)
# dAdiff_dlogratio = ratio / (ratio+1)^2
# dAflat_dlogratio = -ratio / (ratio+1)^2
# dr_dps.append( (rate*ratio/np.square(1.+ratio))*(\
# (self.solid_angs/self.solid_ang_mean) - 1.))
# dr_dflat
if not self.param_dict[self.flat_names[j]]['fixed']:
dr_dps.append( rate*( 1. - (self.solid_angs/self.solid_ang_mean) ) )
return dr_dps
def get_log_prior(self, params, j=None):
lp = 0.0
for pname in self.param_names:
j0 = int(pname[-1])
if j != j0 and j is not None:
continue
lp += self.log_prior_funcs[j].logpdf(params[self.rate_names[j]])
# lp += norm_logpdf(params[pname], self.bkg_sigs[j0], self.exp_rates[j0])
# lp += stats.norm.logpdf(params[pname], loc=self.exp_rates[j0],\
# scale=self.bkg_sigs[j0])
return lp
def get_dnlp_dp(self, params, j):
pname = self.rate_names[j]
dnlp_dps = -1*self.log_prior_funcs[j].jacob_log_pdf(params[self.rate_names[j]])
if self.param_dict[pname]['fixed']:
return []
return list(dnlp_dps)
def get_hess_nlogprior(self, params, j):
return -1*self.log_prior_funcs[j].hess_log_pdf
class Point_Source_Model_Binned_Rates(Model):
# should have methods for getting rate/fully illuminated det
# and for getting the correct ray trace
# Counts_per_full_illum_det_for_equivalent_onaxis = Counts*(sum(rt_onaxis)/sum(rt))
# rate param will be tot_rate/sum(rt)
def __init__(self, imx, imy, dimxy,\
ebins, rt_obj, bl_dmask,\
name='Point_Source', err_fact=2.0,\
use_prior=False, rates=None, errs=None,\
use_deriv=False):
self.dimxy = dimxy
self.imx = imx
self.imy = imy
self.imx0 = imx - dimxy/2.
self.imx1 = imx + dimxy/2.
self.imy0 = imy - dimxy/2.
self.imy1 = imy + dimxy/2.
self.ebins = ebins
self.ebins0 = ebins[0]
self.ebins1 = ebins[1]
nebins = len(self.ebins0)
param_names = ['imx', 'imy']
self.rate_names = ['rate_' + str(i) for i in range(nebins)]
param_names += self.rate_names
param_dict = {}
for pname in param_names:
pdict = {}
if pname == 'imx':
pdict['bounds'] = (self.imx0, self.imx1)
pdict['val'] = self.imx
elif pname == 'imy':
pdict['bounds'] = (self.imy0, self.imy1)
pdict['val'] = self.imy
else:
if rates is None:
pdict['val'] = 1e-1
else:
j = str(pname[-1])
pdict['val'] = rates[j]
pdict['bounds'] = (5e-8, 1e2)
pdict['nuis'] = False
pdict['fixed'] = False
param_dict[pname] = pdict
super(Point_Source_Model_Binned_Rates, self).__init__(name, bl_dmask,\
param_names, param_dict,\
nebins, has_prior=use_prior)
# if use_prior:
# self.set_rate_prior(rates, errs)
if use_deriv:
self.has_deriv = True
self.rt_obj = rt_obj
self._rt_im_update = 1e-7
self._rt_imx = imx - 1
self._rt_imy = imy - 1
self._rt = self.get_rt(imx, imy)
# self._rt, self._drt_dx, self._drt_dy = self.get_rt_wderiv(imx, imy)
self._rt_imx = imx
self._rt_imy = imy
def set_rate_prior(self, rates, errs):
self._rates = rates
self._errs = errs
def get_rt_wderiv(self, imx, imy):
if np.hypot(imx-self._rt_imx, imy-self._rt_imy) >\
self._rt_im_update:
rt, drt_dx, drt_dy = self.rt_obj.get_intp_rt(imx, imy, get_deriv=True)
self._rt = rt[self.bl_dmask]
self._drt_dx = drt_dx[self.bl_dmask]
self._drt_dy = drt_dy[self.bl_dmask]
self._rt_imx = imx
self._rt_imy = imy
self._rt_sum = np.sum(self._rt)
return self._rt, self._drt_dx, self._drt_dy
def get_rt(self, imx, imy):
if np.hypot(imx-self._rt_imx, imy-self._rt_imy) >\
self._rt_im_update:
rt = self.rt_obj.get_intp_rt(imx, imy)
self._rt = rt[self.bl_dmask]
# self._drt_dx = drt_dx[self.bl_dmask]
# self._drt_dy = drt_dy[self.bl_dmask]
self._rt_imx = imx
self._rt_imy = imy
self._rt_sum = np.sum(self._rt)
return self._rt
def get_rate_dpis(self, params):
imx = params['imx']
imy = params['imy']
rt = self.get_rt(imx, imy)
rate_dpis = np.array([rt*params[pname] for pname in\
self.rate_names])
return rate_dpis
def get_rate_dpis_err(self, params, ret_rate_dpis=False):
imx = params['imx']
imy = params['imy']
rt = self.get_rt(imx, imy)
rate_dpis = np.array([rt*params[pname] for pname in\
self.rate_names])
rate_dpis_err = 0.04*rate_dpis
if ret_rate_dpis:
return rate_dpis, rate_dpis_err
return rate_dpis_err
def get_rate_dpi(self, params, j):
imx = params['imx']
imy = params['imy']
rt = self.get_rt(imx, imy)
rate_dpi = rt*params[self.rate_names[j]]
return rate_dpi
def get_log_prior(self, params):
lp = 0.0
for k, val in params.iteritems():
lp += stats.norm.logpdf(val, loc=self._rates[int(k[-1])],\
scale=self._errs[int(k[-1])])
return lp
def get_dr_dps(self, params):
imx = params['imx']
imy = params['imy']
# rt, drt_dimx, drt_dimy = self.get_rt_wderiv(imx, imy)
rt = self.get_rt(imx, imy)
dr_dps = [rt for i in range(self.nebins)]
dr_dps = []
for i in range(self.nebins):
one = np.zeros(self.nebins)
one[i] = 1.0
dr_dps.append([rt*one[ii] for ii in range(self.nebins)])
if self.param_dict['imx']['fixed']:
return dr_dps
dr_dimx = rate_pdet_ebins[:,np.newaxis]*drt_dimx
dr_dimy = rate_pdet_ebins[:,np.newaxis]*drt_dimy
dr_dps = [dr_dimx, dr_dimy] + dr_dps
return dr_dps
def get_dr_dp(self, params, j):
dr_dps = []
imx = params['imx']
imy = params['imy']
if self.param_dict[self.rate_names[j]]['fixed']:
return []
rt = self.get_rt(imx, imy)
dr_dps = [rt]
return dr_dps
class CompoundModel(Model):
def __init__(self, model_list, name=None):
self.model_list = model_list
self.Nmodels = len(model_list)
self.model_names = [model.name for model in model_list]
if name is None:
name = ''
for mname in self.model_names:
name += mname + '+'
name = name[:-1]
param_names = []
self.param_name_map = {}
param_dict = {}
has_prior = False
Tdep = False
self.ntbins = 0
for model in self.model_list:
if model.has_prior:
has_prior = True
if model.Tdep:
Tdep = True
self.ntbins = max(self.ntbins, model.ntbins)
mname = model.name
pname_map = {}
for pname in model.param_names:
if mname == '':
_name = pname
else:
_name = mname + '_' + pname
param_names.append(_name)
param_dict[_name] = model.param_dict[pname]
pname_map[pname] = _name
self.param_name_map[mname] = pname_map
bl_dmask = self.model_list[0].bl_dmask
super(CompoundModel, self).__init__(name, bl_dmask,\
param_names, param_dict,\
self.model_list[0].nebins,\
has_prior=has_prior, Tdep=Tdep)
self._last_params_ebin = [{} for i in range(self.nebins)]
self._last_rate_dpi = [np.ones(self.ndets) for i in range(self.nebins)]
def get_model_params(self, params):
param_list = []
for model in self.model_list:
param = {}
pname_map = self.param_name_map[model.name]
for k in model.param_names:
param[k] = params[pname_map[k]]
param_list.append(param)
return param_list
def get_rate_dpis(self, params, **kwargs):
if self.Tdep:
# tbins0 = kwargs['tbins0']
# tbins1 = kwargs['tbins1']
ntbins = self.ntbins
rate_dpis = np.zeros((ntbins,self.nebins,self.ndets))
else:
rate_dpis = np.zeros((self.nebins,self.ndets))
for model in self.model_list:
param = {}
pname_map = self.param_name_map[model.name]
for k in model.param_names:
param[k] = params[pname_map[k]]
if model.Tdep:
rate_dpis += model.get_rate_dpis(param)
else:
if self.Tdep:
rate_dpi = (model.get_rate_dpis(param)[np.newaxis,:,:])
# print "rate_dpi shape: ", rate_dpi.shape
rate_dpis += np.ones_like(rate_dpis)*rate_dpi
else:
rate_dpis += model.get_rate_dpis(param)
return rate_dpis
def get_rate_dpis_err(self, params, ret_rate_dpis=False):
rate_dpis = np.zeros((self.nebins,self.ndets))
err_dpis2 = np.zeros_like(rate_dpis)
for model in self.model_list:
param = {}
pname_map = self.param_name_map[model.name]
for k in model.param_names:
param[k] = params[pname_map[k]]
rate_dpi, err_dpi = model.get_rate_dpis_err(param, ret_rate_dpis=True)
rate_dpis += rate_dpi
err_dpis2 += err_dpi**2
if ret_rate_dpis:
return rate_dpis, np.sqrt(err_dpis2)
return np.sqrt(err_dpis2)
def get_rate_dpi(self, params, j, **kwargs):
if params == self._last_params_ebin[j]:
return self._last_rate_dpi[j]
if self.Tdep:
# tbins0 = kwargs['tbins0']
# tbins1 = kwargs['tbins1']
ntbins = self.ntbins
rate_dpi = np.zeros((ntbins,self.ndets))
else:
rate_dpi = np.zeros(self.ndets)
for model in self.model_list:
param = {}
pname_map = self.param_name_map[model.name]
for k in model.param_names:
param[k] = params[pname_map[k]]
if model.Tdep:
# rate_dpis += model.get_rate_dpis(param, tbins0, tbins1)
rate_dpi += model.get_rate_dpi(param, j)
else:
if self.Tdep:
rate_dpi_ = model.get_rate_dpi(param, j)[np.newaxis,:]
# print "rate_dpi shape: ", rate_dpi.shape
rate_dpi += np.ones_like(rate_dpi)*rate_dpi_
else:
try:
rate_dpi += model.get_rate_dpi(param, j)
except Exception as E:
print(E)
rate_dpi += model.get_rate_dpis(param)[j]
self._last_params_ebin[j] = params
self._last_rate_dpi[j] = rate_dpi
return rate_dpi
def get_log_prior(self, params, j=None):
lp = 0.0
if self.has_prior:
param_list = self.get_model_params(params)
for i, model in enumerate(self.model_list):
if model.has_prior:
try:
lp += model.get_log_prior(param_list[i], j=j)
except:
lp += model.get_log_prior(param_list[i])
return lp
def get_dr_dps(self, params):
# loop through param list and see if it has this function
dr_dps = []
for i, model in enumerate(self.model_list):
param_list = self.get_model_params(params)
if model.has_deriv:
dr_dps += model.get_dr_dps(param_list[i])
return dr_dps
def get_dr_dp(self, params, j):
# loop through param list and see if it has this function
dr_dps = []
for i, model in enumerate(self.model_list):
param_list = self.get_model_params(params)
if model.has_deriv:
dr_dps += model.get_dr_dp(param_list[i], j)
return dr_dps
def get_dnlp_dp(self, params, j):
dNLP_dp = []
if self.has_prior:
param_list = self.get_model_params(params)
for i, model in enumerate(self.model_list):
if model.has_prior:
dNLP_dp += model.get_dnlp_dp(param_list[i], j)
return dNLP_dp
def get_hess_nlogprior(self, params, j):
Ndim = 0
hess_list = []
if self.has_prior:
param_list = self.get_model_params(params)
for i, model in enumerate(self.model_list):
if model.has_prior:
hess = model.get_hess_nlogprior(param_list[i], j)
hess_list.append(hess)
Ndim += hess.shape[0]
hess_nlogprior = np.zeros((Ndim,Ndim))
i0 = 0
for hess in hess_list:
Nd = hess.shape[0]
i1 = i0 + Nd
hess_nlogprior[i0:i1,i0:i1] += hess
i0 = i1
return hess_nlogprior
class Source_Model_OutFoV(Model):
def __init__(self, flux_model,\
ebins, bl_dmask,\
name='Signal', use_deriv=False,\
use_prior=False):
self.fmodel = flux_model
self.ebins = ebins
self.ebins0 = ebins[0]
self.ebins1 = ebins[1]
nebins = len(self.ebins0)
self.flor_resp_dname = '/storage/work/jjd330/local/bat_data/resp_tabs/'
param_names = ['theta', 'phi']
param_names += self.fmodel.param_names
param_dict = {}
for pname in param_names:
pdict = {}
if pname == 'theta':
pdict['bounds'] = (0.0, 180.0)
pdict['val'] = 90.0
pdict['nuis'] = False
elif pname == 'phi':
pdict['bounds'] = (0.0, 360.0)
pdict['val'] = 180.0
pdict['nuis'] = False
# elif pname == 'd':
# pdict['bounds'] = (1e-4, 1.)
# pdict['val'] = 1e-1
# pdict['nuis'] = False
# elif 'uncoded_frac' in pname:
# pdict['bounds'] = (1e-4, .75)
# pdict['val'] = kum_mode(self.prior_kum_a[pname], self.prior_kum_b[pname])
# pdict['nuis'] = True
# # pdict['val'] = 0.1
else:
pdict['bounds'] = self.fmodel.param_bounds[pname]
if hasattr(self.fmodel, "param_guess"):
pdict['val'] = self.fmodel.param_guess[pname]
else:
pdict['val'] = (pdict['bounds'][1] +\
pdict['bounds'][0])/2.
pdict['nuis'] = False
pdict['fixed'] = False
param_dict[pname] = pdict
super(Source_Model_OutFoV, self).__init__(name, bl_dmask,\
param_names, param_dict, nebins,\
has_prior=use_prior)
if use_deriv:
self.has_deriv = True
self.get_batxys()
self.flor_err = 0.2
self.non_flor_err = 0.05
self.ones = np.ones(self.ndets)
def get_batxys(self):
yinds, xinds = np.where(self.bl_dmask)
self.batxs, self.batys = detxy2batxy(xinds, yinds)
def set_theta_phi(self, theta, phi):
self.resp_obj = ResponseOutFoV(self.flor_resp_dname, self.ebins0, self.ebins1, self.bl_dmask)
self._theta = theta
self._phi = phi
self.resp_obj.set_theta_phi(theta, phi)
def set_flux_params(self, flux_params):
self.flux_params = flux_params
resp_ebins = np.append(self.resp_obj.PhotonEmins, [self.resp_obj.PhotonEmaxs[-1]])
self.flux_params['A'] = 1.0
self.normed_photon_fluxes = self.fmodel.get_photon_fluxes(resp_ebins, self.flux_params)
self.normed_rate_dpis = np.swapaxes(self.resp_obj.get_rate_dpis_from_photon_fluxes(\
self.normed_photon_fluxes),0,1)
self.normed_err_rate_dpis = np.swapaxes(np.sqrt((self.flor_err*self.resp_obj.\
get_flor_rate_dpis_from_photon_fluxes(self.normed_photon_fluxes))**2 +\
(self.non_flor_err*self.resp_obj.\
get_non_flor_rate_dpis_from_photon_fluxes(self.normed_photon_fluxes))**2),0,1)
def get_rate_dpis(self, params):
theta = params['theta']
phi = params['phi']
A = params['A']
return A*self.normed_rate_dpis
def get_rate_dpis_err(self, params, ret_rate_dpis=False):
err_rate_dpis = params['A']*self.normed_err_rate_dpis
if ret_rate_dpis:
rate_dpis = self.get_rate_dpis(params)
return rate_dpis, err_rate_dpis
return err_rate_dpis
def get_rate_dpi(self, params, j):
return A*self.normed_rate_dpis[:,j]
def get_log_prior(self, params, j=None):
lp = 0.0
for pname in self.frac_names:
if int(pname[-1]) == j or j is None:
lp += self.prior_func(params, pname)
# lp -= np.log((params[pname]*(np.log(\
# self.param_dict[pname]['bounds'][1]) -\
# np.log(self.param_dict[pname]['bounds'][0]))))
return lp
def get_dnlp_dp(self, params, j):
dnlp_dps = []
for pname in self.frac_names:
if int(pname[-1]) == j or j is None:
# dnlp_dps.append( 1./params[pname] )
dnlp_dps.append(self.deriv_prior_func(params, pname))
return dnlp_dps
def get_hess_nlogprior(self, params, j):
return np.array([[self.deriv2_prior_func(params, self.frac_names[j])]])
def get_dr_dgamma(self, params):
rt = self.get_rt(params['imx'], params['imy'])
drdgs = params['A']*self.flux2rate.get_gamma_deriv(params['gamma'])
drdgs_trans = params['A']*self.flux2rate_pbtrans.get_gamma_deriv(params['gamma'])
dr_dgs = np.array([rt*drdg + (self._shadow)*drdgs_trans[i] +\
self.max_rt*(self._unfp)*drdg*params[self.frac_names[i]]\
for i, drdg in enumerate(drdgs)])
return dr_dgs
def get_dr_dps(self, params):
# dr_dp = np.zeros((self.nebins,self.ndets))
# imx = params['imx']
# imy = params['imy']
# if self.use_rt_deriv:
# rt, drt_dimx, drt_dimy = self.get_rt_wderiv(imx, imy)
# else:
# rt = self.get_rt(imx, imy)
dr_dps = []
for pname in self.param_names:
if self.param_dict[pname]['fixed']:
continue
if pname == 'A':
dr_dps.append( self.get_rate_dpis(params)/params['A'] )
elif pname == 'gamma':
dr_dps.append( self.get_dr_dgamma(params) )
return dr_dps
def theta_phi2imxy(theta, phi):
imr = np.tan(np.radians(theta))
imx = imr*np.cos(np.radians(phi))
imy = imr*np.sin(np.radians(-phi))
return imx, imy
def imxy2theta_phi(imx, imy):
theta = np.rad2deg(np.arctan(np.sqrt(imx**2 + imy**2)))
phi = np.rad2deg( np.arctan2(-imy,imx) )
if np.isscalar(phi):
if phi < 0:
phi += 360.0
else:
bl = (phi<0)
if np.sum(bl)>0:
phi[bl] += 360.0
return theta, phi
class Source_Model_InFoV(Model):
def __init__(self, flux_model,\
ebins, bl_dmask, rt_obj, fp_obj,\
name='Signal', use_deriv=False,\
use_prior=False):
self.fmodel = flux_model
self.ebins = ebins
self.ebins0 = ebins[0]
self.ebins1 = ebins[1]
nebins = len(self.ebins0)
self.flor_resp_dname = '/storage/work/jjd330/local/bat_data/resp_tabs/'
param_names = ['theta', 'phi']
param_names += self.fmodel.param_names
param_dict = {}
for pname in param_names:
pdict = {}
if pname == 'theta':
pdict['bounds'] = (0.0, 180.0)
pdict['val'] = 180.0
pdict['nuis'] = False
elif pname == 'phi':
pdict['bounds'] = (0.0, 360.0)
pdict['val'] = 0.0
pdict['nuis'] = False
# elif pname == 'd':
# pdict['bounds'] = (1e-4, 1.)
# pdict['val'] = 1e-1
# pdict['nuis'] = False
# elif 'uncoded_frac' in pname:
# pdict['bounds'] = (1e-4, .75)
# pdict['val'] = kum_mode(self.prior_kum_a[pname], self.prior_kum_b[pname])
# pdict['nuis'] = True
# # pdict['val'] = 0.1
else:
pdict['bounds'] = self.fmodel.param_bounds[pname]
if hasattr(self.fmodel, "param_guess"):
pdict['val'] = self.fmodel.param_guess[pname]
else:
pdict['val'] = (pdict['bounds'][1] +\
pdict['bounds'][0])/2.
pdict['nuis'] = False
pdict['fixed'] = False
param_dict[pname] = pdict
super(Source_Model_InFoV, self).__init__(name, bl_dmask,\
param_names, param_dict, nebins,\
has_prior=use_prior)
if use_deriv:
self.has_deriv = True
self.get_batxys()
self.flor_err = 0.2
self.non_flor_err = 0.05
self.rt_obj = rt_obj
self.fp_obj = fp_obj
self._rt_im_update = 1e-7
self._rt_imx = -10.0
self._rt_imy = -10.0
self._fp_im_update = 1e-4
self._fp_imx = -10.0
self._fp_imy = -10.0
self._resp_update = 5.0
self._resp_phi = 0.0
self._resp_theta = 180.0
self._trans_update = 2.0
self._trans_phi = 0.0
self._trans_theta = 180.0
self.ones = np.ones(self.ndets)
def get_fp(self, imx, imy):
if np.hypot(imx-self._fp_imx, imy-self._fp_imy) <\
self._fp_im_update:
return self._fp
else:
fp = self.fp_obj.get_fp(imx, imy)
self._fp = fp[self.bl_dmask].astype(np.int)
self._fp[(self._rt>1e-2)] = 1
self._unfp = 1 - self._fp
self.uncoded = (self._fp<.1)
self.coded = ~self.uncoded
# self._drt_dx = drt_dx[self.bl_dmask]
# self._drt_dy = drt_dy[self.bl_dmask]
self._fp_imx = imx
self._fp_imy = imy
return self._fp
def get_rt(self, imx, imy):
if np.hypot(imx-self._rt_imx, imy-self._rt_imy) <\
self._rt_im_update:
return self._rt
else:
rt = self.rt_obj.get_intp_rt(imx, imy, get_deriv=False)
self._rt = rt[self.bl_dmask]
self.max_rt = np.max(self._rt)
# self._shadow = (1. - self._rt)
self._shadow = (self.max_rt - self._rt)
fp = self.get_fp(imx, imy)
self._shadow[self.uncoded] = 0.0
# self._drt_dx = drt_dx[self.bl_dmask]
# self._drt_dy = drt_dy[self.bl_dmask]
self._rt_imx = imx
self._rt_imy = imy
return self._rt
def get_batxys(self):
yinds, xinds = np.where(self.bl_dmask)
self.batxs, self.batys = detxy2batxy(xinds, yinds)
def set_theta_phi(self, theta, phi):
if ang_sep(phi, 90.0-theta, self._resp_phi, 90.0-self._resp_theta) > self._resp_update:
logging.info("Making new response object")
self.resp_obj = ResponseOutFoV(self.flor_resp_dname, self.ebins0, self.ebins1, self.bl_dmask)
self._resp_theta = theta
self._resp_phi = phi
self._trans_theta = theta
self._trans_phi = phi
self.resp_obj.set_theta_phi(theta, phi)
elif ang_sep(phi, 90.0-theta, self._trans_phi, 90.0-self._trans_theta) > self._trans_update:
logging.info("Updating tranmission")
self._trans_theta = theta
self._trans_phi = phi
self.resp_obj.update_trans(theta, phi)
self.theta = theta
self.phi = phi
imx, imy = theta_phi2imxy(theta, phi)
rt = self.get_rt(imx, imy)
def set_flux_params(self, flux_params):
self.flux_params = deepcopy(flux_params)
resp_ebins = np.append(self.resp_obj.PhotonEmins, [self.resp_obj.PhotonEmaxs[-1]])
self.flux_params['A'] = 1.0
self.normed_photon_fluxes = self.fmodel.get_photon_fluxes(resp_ebins, self.flux_params)
self.normed_rate_dpis = np.swapaxes(self.resp_obj.get_rate_dpis_from_photon_fluxes(\
self.normed_photon_fluxes),0,1)
self.normed_err_rate_dpis = np.swapaxes(np.sqrt((self.flor_err*self.resp_obj.\
get_flor_rate_dpis_from_photon_fluxes(self.normed_photon_fluxes))**2 +\
(self.non_flor_err*self.resp_obj.\
get_non_flor_rate_dpis_from_photon_fluxes(self.normed_photon_fluxes))**2),0,1)
self.normed_flor_rate_dpis = np.swapaxes(self.resp_obj.\
get_flor_rate_dpis_from_photon_fluxes(self.normed_photon_fluxes),0,1)
self.normed_non_flor_rate_dpis = np.swapaxes(self.resp_obj.\
get_non_flor_rate_dpis_from_photon_fluxes(self.normed_photon_fluxes),0,1)
def get_rate_dpis(self, params):
theta = params['theta']
phi = params['phi']
A = params['A']
if ang_sep(phi, 90.0-theta, self._trans_phi, 90.0-self._trans_theta) > self._trans_update:
self.set_theta_phi(theta, phi)
self.set_flux_params(params)
imx, imy = theta_phi2imxy(theta, phi)
trans_dpi0 = self.resp_obj.lines_trans_dpis[:,0]
coded = np.isclose(trans_dpi0, 1.0)
rt = self.get_rt(imx, imy)
rt[~coded] = 1.0
# rt[self.uncoded] = 1.0
rate_dpis = A*self.normed_flor_rate_dpis
for j in range(self.nebins):
rate_dpis[j] += A*rt*self.normed_non_flor_rate_dpis[j]
return rate_dpis
def get_rate_dpis_err(self, params, ret_rate_dpis=False):
theta = params['theta']
phi = params['phi']
A = params['A']
if ang_sep(phi, 90.0-theta, self._trans_phi, 90.0-self._trans_theta) > self._trans_update:
self.set_theta_phi(theta, phi)
self.set_flux_params(params)
imx, imy = theta_phi2imxy(theta, phi)
rt = self.get_rt(imx, imy)
rt[self.uncoded] = 1.0
rate_dpis = A*self.normed_flor_rate_dpis
err_rate_dpis2 = np.square(A*self.normed_flor_rate_dpis*self.flor_err)
for j in range(self.nebins):
rate_dpi = A*rt*self.normed_non_flor_rate_dpis[j]
rate_dpis[j] += rate_dpi
err_rate_dpis2[j] += np.square(rate_dpi*self.non_flor_err)
if ret_rate_dpis:
return rate_dpis, np.sqrt(err_rate_dpis2)
return np.sqrt(err_rate_dpis)
def get_rate_dpi(self, params, j):
rate_dpis = self.get_rate_dpis(params)
return rate_dpis[j]
def get_log_prior(self, params, j=None):
lp = 0.0
for pname in self.frac_names:
if int(pname[-1]) == j or j is None:
lp += self.prior_func(params, pname)
# lp -= np.log((params[pname]*(np.log(\
# self.param_dict[pname]['bounds'][1]) -\
# np.log(self.param_dict[pname]['bounds'][0]))))
return lp
def get_dnlp_dp(self, params, j):
dnlp_dps = []
for pname in self.frac_names:
if int(pname[-1]) == j or j is None:
# dnlp_dps.append( 1./params[pname] )
dnlp_dps.append(self.deriv_prior_func(params, pname))
return dnlp_dps
def get_hess_nlogprior(self, params, j):
return np.array([[self.deriv2_prior_func(params, self.frac_names[j])]])
def get_dr_dgamma(self, params):
rt = self.get_rt(params['imx'], params['imy'])
drdgs = params['A']*self.flux2rate.get_gamma_deriv(params['gamma'])
drdgs_trans = params['A']*self.flux2rate_pbtrans.get_gamma_deriv(params['gamma'])
dr_dgs = np.array([rt*drdg + (self._shadow)*drdgs_trans[i] +\
self.max_rt*(self._unfp)*drdg*params[self.frac_names[i]]\
for i, drdg in enumerate(drdgs)])
return dr_dgs
def get_dr_dps(self, params):
# dr_dp = np.zeros((self.nebins,self.ndets))
# imx = params['imx']
# imy = params['imy']
# if self.use_rt_deriv:
# rt, drt_dimx, drt_dimy = self.get_rt_wderiv(imx, imy)
# else:
# rt = self.get_rt(imx, imy)
dr_dps = []
for pname in self.param_names:
if self.param_dict[pname]['fixed']:
continue
if pname == 'A':
dr_dps.append( self.get_rate_dpis(params)/params['A'] )
elif pname == 'gamma':
dr_dps.append( self.get_dr_dgamma(params) )
return dr_dps
class Source_Model_InFoV(Model):
def __init__(self, flux_model,\
ebins, bl_dmask, rt_obj,\
name='Signal', use_deriv=False,\
use_prior=False):
self.fmodel = flux_model
self.ebins = ebins
self.ebins0 = ebins[0]
self.ebins1 = ebins[1]
nebins = len(self.ebins0)
self.flor_resp_dname = '/storage/work/jjd330/local/bat_data/resp_tabs/'
param_names = ['theta', 'phi']
param_names += self.fmodel.param_names
param_dict = {}
for pname in param_names:
pdict = {}
if pname == 'theta':
pdict['bounds'] = (0.0, 180.0)
pdict['val'] = 180.0
pdict['nuis'] = False
elif pname == 'phi':
pdict['bounds'] = (0.0, 360.0)
pdict['val'] = 0.0
pdict['nuis'] = False
# elif pname == 'd':
# pdict['bounds'] = (1e-4, 1.)
# pdict['val'] = 1e-1
# pdict['nuis'] = False
# elif 'uncoded_frac' in pname:
# pdict['bounds'] = (1e-4, .75)
# pdict['val'] = kum_mode(self.prior_kum_a[pname], self.prior_kum_b[pname])
# pdict['nuis'] = True
# # pdict['val'] = 0.1
else:
pdict['bounds'] = self.fmodel.param_bounds[pname]
if hasattr(self.fmodel, "param_guess"):
pdict['val'] = self.fmodel.param_guess[pname]
else:
pdict['val'] = (pdict['bounds'][1] +\
pdict['bounds'][0])/2.
pdict['nuis'] = False
pdict['fixed'] = False
param_dict[pname] = pdict
super(Source_Model_InFoV, self).__init__(name, bl_dmask,\
param_names, param_dict, nebins,\
has_prior=use_prior)
if use_deriv:
self.has_deriv = True
self.get_batxys()
self.flor_err = 0.2
self.non_flor_err = 0.12
self.coded_err = 0.05
self.rt_obj = rt_obj
# self.fp_obj = fp_obj
self._rt_im_update = 1e-7
self._rt_imx = -10.0
self._rt_imy = -10.0
self._fp_im_update = 1e-4
self._fp_imx = -10.0
self._fp_imy = -10.0
self._resp_update = 5.0
self._resp_phi = 0.0
self._resp_theta = 180.0
self._trans_update = 5e-3
self._trans_phi = 0.0
self._trans_theta = 180.0
self.ones = np.ones(self.ndets)
# def get_fp(self, imx, imy):
# if np.hypot(imx-self._fp_imx, imy-self._fp_imy) <\
# self._fp_im_update:
# return self._fp
# else:
# fp = self.fp_obj.get_fp(imx, imy)
# self._fp = fp[self.bl_dmask].astype(np.int)
# self._fp[(self._rt>1e-2)] = 1
# self._unfp = 1 - self._fp
# self.uncoded = (self._fp<.1)
# self.coded = ~self.uncoded
# # self._drt_dx = drt_dx[self.bl_dmask]
# # self._drt_dy = drt_dy[self.bl_dmask]
# self._fp_imx = imx
# self._fp_imy = imy
# return self._fp
# def get_rt(self, imx, imy):
# if np.hypot(imx-self._rt_imx, imy-self._rt_imy) <\
# self._rt_im_update:
# return self._rt
# else:
# rt = self.rt_obj.get_intp_rt(imx, imy, get_deriv=False)
# self._rt = rt[self.bl_dmask]
# self.max_rt = np.max(self._rt)
# print("max rt: %.4f"%(self.max_rt))
# self._rt /= self.max_rt
# self._shadow = (1. - self._rt)
# # self._shadow = (self.max_rt - self._rt)
# fp = self.get_fp(imx, imy)
# self._shadow[self.uncoded] = 0.0
# # self._drt_dx = drt_dx[self.bl_dmask]
# # self._drt_dy = drt_dy[self.bl_dmask]
# self._rt_imx = imx
# self._rt_imy = imy
# return self._rt
def get_batxys(self):
yinds, xinds = np.where(self.bl_dmask)
self.batxs, self.batys = detxy2batxy(xinds, yinds)
def set_theta_phi(self, theta, phi):
if ang_sep(phi, 90.0-theta, self._resp_phi, 90.0-self._resp_theta) > self._resp_update:
logging.info("Making new response object")
self.resp_obj = ResponseInFoV(self.flor_resp_dname, self.ebins0, self.ebins1,\
self.bl_dmask, self.rt_obj)
self._resp_theta = theta
self._resp_phi = phi
self._trans_theta = theta
self._trans_phi = phi
self.resp_obj.set_theta_phi(theta, phi)
elif ang_sep(phi, 90.0-theta, self._trans_phi, 90.0-self._trans_theta) > self._trans_update:
# logging.info("Updating transmission")
self._trans_theta = theta
self._trans_phi = phi
self.resp_obj.update_trans(theta, phi)
self.theta = theta
self.phi = phi
# imx, imy = theta_phi2imxy(theta, phi)
# rt = self.get_rt(imx, imy)
def set_flux_params(self, flux_params):
self.flux_params = deepcopy(flux_params)
resp_ebins = np.append(self.resp_obj.PhotonEmins, [self.resp_obj.PhotonEmaxs[-1]])
self.flux_params['A'] = 1.0
self.normed_photon_fluxes = self.fmodel.get_photon_fluxes(resp_ebins, self.flux_params)
self.normed_rate_dpis = np.swapaxes(self.resp_obj.get_rate_dpis_from_photon_fluxes(\
self.normed_photon_fluxes),0,1)
self.normed_err_rate_dpis = np.swapaxes(np.sqrt((self.flor_err*self.resp_obj.\
get_flor_rate_dpis_from_photon_fluxes(self.normed_photon_fluxes))**2 +\
(self.non_flor_err*self.resp_obj.\
get_non_flor_rate_dpis_from_photon_fluxes(self.normed_photon_fluxes))**2),0,1)
self.normed_flor_rate_dpis = np.swapaxes(self.resp_obj.\
get_flor_rate_dpis_from_photon_fluxes(self.normed_photon_fluxes),0,1)
self.normed_non_flor_rate_dpis = np.swapaxes(self.resp_obj.\
get_non_flor_rate_dpis_from_photon_fluxes(self.normed_photon_fluxes),0,1)
def get_rate_dpis(self, params):
theta = params['theta']
phi = params['phi']
A = params['A']
if ang_sep(phi, 90.0-theta, self._trans_phi, 90.0-self._trans_theta) > self._trans_update:
self.set_theta_phi(theta, phi)
self.set_flux_params(self.flux_params)
imx, imy = theta_phi2imxy(theta, phi)
# trans_dpi0 = self.resp_obj.lines_trans_dpis[:,0]
# coded = np.isclose(trans_dpi0, 1.0)
# rt = self.get_rt(imx, imy)
# rt[~coded] = 1.0
# rt[self.uncoded] = 1.0
# rate_dpis = A*self.normed_flor_rate_dpis
rate_dpis = A*self.normed_rate_dpis
# for j in range(self.nebins):
# rate_dpis[j] += A*rt*self.normed_non_flor_rate_dpis[j]
return rate_dpis
def get_rate_dpis_err(self, params, ret_rate_dpis=False):
theta = params['theta']
phi = params['phi']
A = params['A']
if ang_sep(phi, 90.0-theta, self._trans_phi, 90.0-self._trans_theta) > self._trans_update:
self.set_theta_phi(theta, phi)
self.set_flux_params(self.flux_params)
# imx, imy = theta_phi2imxy(theta, phi)
# rt = self.get_rt(imx, imy)
# rt[self.uncoded] = 1.0
# trans_dpi0 = self.resp_obj.lines_trans_dpis[:,0]
# coded = np.isclose(trans_dpi0, 1.0)
# rt = self.get_rt(imx, imy)
# rt[~coded] = 1.0
rate_dpis = A*self.normed_flor_rate_dpis
err_rate_dpis2 = np.square(A*self.normed_flor_rate_dpis*self.flor_err)
for j in range(self.nebins):
rate_dpi = A*self.normed_non_flor_rate_dpis[j]
rate_dpis[j] += rate_dpi
err_rate_dpis2[j] += np.square(rate_dpi*self.coded_err)
# err_rate_dpis2[j][~coded] += np.square(rate_dpi[~coded]*self.non_flor_err)
# err_rate_dpis2[j][coded] += np.square(rate_dpi[coded]*self.coded_err)
if ret_rate_dpis:
return rate_dpis, np.sqrt(err_rate_dpis2)
return np.sqrt(err_rate_dpis)
def get_rate_dpi(self, params, j):
rate_dpis = self.get_rate_dpis(params)
return rate_dpis[j]
def get_log_prior(self, params, j=None):
lp = 0.0
for pname in self.frac_names:
if int(pname[-1]) == j or j is None:
lp += self.prior_func(params, pname)
# lp -= np.log((params[pname]*(np.log(\
# self.param_dict[pname]['bounds'][1]) -\
# np.log(self.param_dict[pname]['bounds'][0]))))
return lp
def get_dnlp_dp(self, params, j):
dnlp_dps = []
for pname in self.frac_names:
if int(pname[-1]) == j or j is None:
# dnlp_dps.append( 1./params[pname] )
dnlp_dps.append(self.deriv_prior_func(params, pname))
return dnlp_dps
def get_hess_nlogprior(self, params, j):
return np.array([[self.deriv2_prior_func(params, self.frac_names[j])]])
def get_dr_dgamma(self, params):
rt = self.get_rt(params['imx'], params['imy'])
drdgs = params['A']*self.flux2rate.get_gamma_deriv(params['gamma'])
drdgs_trans = params['A']*self.flux2rate_pbtrans.get_gamma_deriv(params['gamma'])
dr_dgs = np.array([rt*drdg + (self._shadow)*drdgs_trans[i] +\
self.max_rt*(self._unfp)*drdg*params[self.frac_names[i]]\
for i, drdg in enumerate(drdgs)])
return dr_dgs
def get_dr_dps(self, params):
# dr_dp = np.zeros((self.nebins,self.ndets))
# imx = params['imx']
# imy = params['imy']
# if self.use_rt_deriv:
# rt, drt_dimx, drt_dimy = self.get_rt_wderiv(imx, imy)
# else:
# rt = self.get_rt(imx, imy)
dr_dps = []
for pname in self.param_names:
if self.param_dict[pname]['fixed']:
continue
if pname == 'A':
dr_dps.append( self.get_rate_dpis(params)/params['A'] )
elif pname == 'gamma':
dr_dps.append( self.get_dr_dgamma(params) )
return dr_dps
class ResponseInFoV(object):
def __init__(self, resp_dname, pha_emins, pha_emaxs, bl_dmask, rt_obj):
self.resp_dname = resp_dname
self.resp_arr = get_resp_arr(self.resp_dname)
self.thetas = np.unique(self.resp_arr['theta'])
tab = Table.read(os.path.join(self.resp_dname, self.resp_arr['fname'][0]))
pha_tab = Table.read(os.path.join(self.resp_dname, self.resp_arr['fname'][0]), hdu=2)
self.PhotonEmins = tab['ENERG_LO']
self.PhotonEmaxs = tab['ENERG_HI']
self.PhotonEs = ((self.PhotonEmins + self.PhotonEmaxs)/2.).astype(np.float)
self.NphotonEs = len(self.PhotonEs)
self.pha_emins = pha_emins
self.pha_emaxs = pha_emaxs
self.Nphabins = len(pha_emins)
# self.NphotonEs = NphotonEs
self.ndets = np.sum(bl_dmask)
self.bl_dmask = bl_dmask
self.batxs, self.batys = bldmask2batxys(self.bl_dmask)
self.batzs = 3.087 + np.zeros(self.ndets)
# self.resp_dpi_shape = (173, 286, self.NphotonEs, self.Nphabins)
self.resp_dpi_shape = (self.ndets, self.NphotonEs, self.Nphabins)
self.resp_files = {}
self.full_struct = get_full_struct_manager(Es=self.PhotonEs)
self.full_struct.set_batxyzs(self.batxs, self.batys, self.batzs)
dual_struct = get_dual_struct_obj(self.PhotonEs)
self.comp_obj = Comp_Resp_Obj(self.batxs, self.batys, self.batzs, dual_struct)
self.flor_resp_obj = FlorResponseDPI('/gpfs/scratch/jjd330/bat_data/flor_resps/',\
pha_tab, self.pha_emins, self.pha_emaxs,\
self.bl_dmask, NphotonEs=self.NphotonEs)
self.mask_obj = Swift_Mask_Interactions(rt_obj, self.bl_dmask)
self.mask_obj.set_energy_arr(self.PhotonEs)
self.mask_obj.set_batxyzs(self.batxs, self.batys, self.batzs)
def set_theta_phi(self, theta, phi):
# use radians or degs ?
self.theta = theta
self.phi = phi
self.thetas2use, self.phis2use, self.wts = self.get_intp_theta_phi_wts(self.theta, self.phi)
self.inds4intp = []
for i in range(len(self.wts)):
ind = np.where(np.isclose(self.thetas2use[i],self.resp_arr['theta'])&\
np.isclose(self.phis2use[i],self.resp_arr['phi']))[0][0]
self.inds4intp.append(ind)
self.mask_obj.set_theta_phi(theta, phi)
self.mask_trans = self.mask_obj.get_trans()
self.full_struct.set_theta_phi(np.radians(theta), np.radians(phi))
self._lines_trans_dpis = self.full_struct.get_trans()
self.lines_trans_dpis = self._lines_trans_dpis*self.mask_trans
# self.comp_obj.set_theta_phi(np.radians(self.theta), np.radians(self.phi))
# self.comp_trans_dpis = self.comp_obj.get_trans()
if theta > 90.0:
self.comp_obj.set_theta_phi(np.radians(self.theta), np.radians(self.phi))
self.comp_trans_dpis = self.comp_obj.get_trans()
else:
self.comp_trans_dpis = self.lines_trans_dpis
self.flor_resp_obj.set_theta_phi(self.theta, self.phi)
self.calc_resp_dpis()
self.calc_tot_resp_dpis()
def update_trans(self, theta, phi):
self.mask_obj.set_theta_phi(theta, phi)
self.mask_trans = self.mask_obj.get_trans()
self.full_struct.set_theta_phi(np.radians(theta), np.radians(phi))
self._lines_trans_dpis = self.full_struct.get_trans()
self.lines_trans_dpis = self._lines_trans_dpis*self.mask_trans
# self.comp_obj.set_theta_phi(np.radians(self.theta), np.radians(self.phi))
# self.comp_trans_dpis = self.comp_obj.get_trans()
if theta > 90.0:
self.comp_obj.set_theta_phi(np.radians(theta), np.radians(phi))
self.comp_trans_dpis = self.comp_obj.get_trans()
else:
self.comp_trans_dpis = self.lines_trans_dpis
self.calc_tot_resp_dpis()
def open_resp_file_obj(self, fname):
resp_file_obj = ResponseDPI(os.path.join(self.resp_dname,fname),\
self.pha_emins, self.pha_emaxs,\
np.radians(self.phi), self.bl_dmask)
self.resp_files[fname] = resp_file_obj
def calc_resp_dpis(self):
self.lines_resp_dpis = np.zeros(self.resp_dpi_shape)
self.comp_resp_dpis = np.zeros(self.resp_dpi_shape)
for i in range(len(self.wts)):
k = self.resp_arr['fname'][self.inds4intp[i]]
if not k in self.resp_files.keys():
self.open_resp_file_obj(k)
self.lines_resp_dpis += self.wts[i]*self.resp_files[k].get_lines_resp_dpis()
self.comp_resp_dpis += self.wts[i]*self.resp_files[k].get_comp_resp_dpis()
def calc_tot_resp_dpis(self):
lines_dpi = self.lines_resp_dpis*(self.lines_trans_dpis[:,:,np.newaxis])
comp_dpi = self.comp_resp_dpis*(self.comp_trans_dpis[:,:,np.newaxis])
self.comp_resp_dpi = comp_dpi
self.lines_resp_dpi = lines_dpi
self.non_flor_resp_dpi = lines_dpi + comp_dpi
self.flor_resp_dpi = self.flor_resp_obj.get_resp_dpi()
self.tot_resp_dpis = self.non_flor_resp_dpi + self.flor_resp_dpi
def get_lines_resp_dpis(self):
return self.lines_resp_dpis
def get_comp_resp_dpis(self):
return self.comp_resp_dpis
def get_flor_resp_dpis(self):
return self.flor_resp_obj.get_resp_dpi()
def get_tot_resp_dpis(self):
return self.tot_resp_dpis
def get_rate_dpis_from_photon_fluxes(self, photon_fluxes):
rate_dpis = np.zeros((self.ndets,self.Nphabins))
for j in range(self.Nphabins):
rate_dpis[:,j] += np.sum(photon_fluxes*self.tot_resp_dpis[:,:,j],axis=1)
return rate_dpis
def get_flor_rate_dpis_from_photon_fluxes(self, photon_fluxes):
rate_dpis = np.zeros((self.ndets,self.Nphabins))
for j in range(self.Nphabins):
rate_dpis[:,j] += np.sum(photon_fluxes*self.flor_resp_dpi[:,:,j],axis=1)
return rate_dpis
def get_comp_rate_dpis_from_photon_fluxes(self, photon_fluxes):
rate_dpis = np.zeros((self.ndets,self.Nphabins))
for j in range(self.Nphabins):
rate_dpis[:,j] += np.sum(photon_fluxes*self.comp_resp_dpi[:,:,j],axis=1)
return rate_dpis
def get_photoe_rate_dpis_from_photon_fluxes(self, photon_fluxes):
rate_dpis = np.zeros((self.ndets,self.Nphabins))
for j in range(self.Nphabins):
rate_dpis[:,j] += np.sum(photon_fluxes*self.lines_resp_dpi[:,:,j],axis=1)
return rate_dpis
def get_non_flor_rate_dpis_from_photon_fluxes(self, photon_fluxes):
rate_dpis = np.zeros((self.ndets,self.Nphabins))
for j in range(self.Nphabins):
rate_dpis[:,j] += np.sum(photon_fluxes*self.non_flor_resp_dpi[:,:,j],axis=1)
return rate_dpis
def get_intp_theta_phi_wts(self, theta, phi, eps=0.1):
thetas = np.sort(np.unique(self.resp_arr['theta']))
phis = np.sort(np.unique(self.resp_arr['phi']))
th0 = np.digitize(theta, thetas) - 1
if theta == 180.0:
th0 -= 1
theta0 = thetas[th0]
theta1 = thetas[th0+1]
print theta0, theta1
if np.abs(theta0 - theta) < eps:
ths = [theta0]
th_wts = [1.0]
elif np.abs(theta1 - theta) < eps:
ths = [theta1]
th_wts = [1.0]
else:
ths = [theta0, theta1]
dth = theta1 - theta0
th_wts = [(theta1 - theta)/dth, (theta - theta0)/dth]
phi_ = phi - (int(phi)/45)*45.0
print phi_
if (int(phi)/45)%2 == 1:
phi_ = 45.0 - phi_
print phi_
ph0 = np.digitize(phi_, phis) - 1
if phi_ == 45.0:
ph0 -= 1
phi0 = phis[ph0]
phi1 = phis[ph0+1]
if np.abs(phi0 - phi_) < eps:
phs = [phi0]
ph_wts = [1.0]
elif np.abs(phi1 - phi_) < eps:
phs = [phi1]
ph_wts = [1.0]
else:
phs = [phi0, phi1]
dph = phi1 - phi0
ph_wts = [(phi1 - phi_)/dph, (phi_ - phi0)/dph]
ths_ = []
phs_ = []
wts = []
for i in range(len(ths)):
if ths[i] == 0.0 or ths[i] == 180.0:
ths_.append(ths[i])
phs_.append(0.0)
wts.append(th_wts[i])
continue
for j in range(len(phs)):
ths_.append(ths[i])
phs_.append(phs[j])
wts.append(th_wts[i]*ph_wts[j])
return ths_, phs_, wts
def min_at_Epeaks_gammas(sig_miner, sig_mod, Epeaks, gammas):
nllhs = []
As = []
flux_params = {'A':1.0, 'Epeak':150.0, 'gamma':-0.25}
Npnts = len(gammas)
for i in range(Npnts):
flux_params['gamma'] = gammas[i]
flux_params['Epeak'] = Epeaks[i]
sig_mod.set_flux_params(flux_params)
pars, nllh, res = sig_miner.minimize()
nllhs.append(nllh[0])
As.append(pars[0][0])
return nllhs, As
def analysis_for_imxy_square(imx0, imx1, imy0, imy1, bkg_bf_params,\
bkg_mod, flux_mod, ev_data,\
ebins0, ebins1, tbins0, tbins1):
bl_dmask = bkg_mod.bl_dmask
# dimxy = 0.0025
dimxy = 0.003
imx_ax = np.arange(imx0, imx1+dimxy/2., dimxy)
imy_ax = np.arange(imy0, imy1+dimxy/2., dimxy)
imxg,imyg = np.meshgrid(imx_ax, imy_ax)
imxs = np.ravel(imxg)
imys = np.ravel(imyg)
Npnts = len(imxs)
logging.info("%d imxy points to do" %(Npnts))
thetas, phis = imxy2theta_phi(imxs, imys)
gamma_ax = np.linspace(-0.4, 1.6, 8+1)
gamma_ax = np.linspace(-0.4, 1.6, 4+1)[1:-2]
# gamma_ax = np.linspace(-0.4, 1.6, 3+1)
Epeak_ax = np.logspace(np.log10(45.0), 3, 10+1)
Epeak_ax = np.logspace(np.log10(45.0), 3, 5+1)[1:-1]
Epeak_ax = np.logspace(np.log10(45.0), 3, 5+1)[3:]
# Epeak_ax = np.logspace(np.log10(25.0), 3, 3+1)
gammas, Epeaks = np.meshgrid(gamma_ax, Epeak_ax)
gammas = gammas.ravel()
Epeaks = Epeaks.ravel()
Nspec_pnts = len(Epeaks)
ntbins = len(tbins0)
rt_obj = RayTraces(rt_dir)
fp_obj = FootPrints(fp_dir)
sig_mod = Source_Model_InFoV(flux_mod, [ebins0,ebins1], bl_dmask,\
rt_obj, use_deriv=True)
sig_mod.set_theta_phi(np.mean(thetas), np.mean(phis))
comp_mod = CompoundModel([bkg_mod, sig_mod])
sig_miner = NLLH_ScipyMinimize_Wjacob('')
sig_llh_obj = LLH_webins(ev_data, ebins0, ebins1, bl_dmask, has_err=True)
sig_llh_obj.set_model(comp_mod)
flux_params = {'A':1.0, 'gamma':0.5, 'Epeak':1e2}
pars_ = {}
pars_['Signal_theta'] = np.mean(thetas)
pars_['Signal_phi'] = np.mean(phis)
for pname,val in bkg_bf_params.iteritems():
# pars_['Background_'+pname] = val
pars_['Background+Cyg X-1_'+pname] = val
for pname,val in flux_params.iteritems():
pars_['Signal_'+pname] = val
sig_miner.set_llh(sig_llh_obj)
fixed_pnames = pars_.keys()
fixed_vals = pars_.values()
trans = [None for i in range(len(fixed_pnames))]
sig_miner.set_trans(fixed_pnames, trans)
sig_miner.set_fixed_params(fixed_pnames, values=fixed_vals)
sig_miner.set_fixed_params(['Signal_A'], fixed=False)
res_dfs_ = []
sig_llh_obj.set_time(tbins0[0], tbins1[0])
# for i in range(ntbins):
#
# t0 = tbins0[i]
# t1 = tbins1[i]
# dt = t1 - t0
# sig_llh_obj.set_time(tbins0[i], tbins1[i])
#
# res_dfs = []
#
for j in range(Nspec_pnts):
res_dfs = []
flux_params['gamma'] = gammas[j]
flux_params['Epeak'] = Epeaks[j]
sig_mod.set_flux_params(flux_params)
# res_dict = {'time':t0, 'dur':dt}
# res_dict['Epeak'] = Epeaks[j]
# res_dict['gamma'] = gammas[j]
#
# nllhs = np.zeros(Npnts)
# As = np.zeros(Npnts)
for ii in range(Npnts):
res_dict = {}
res_dict['Epeak'] = Epeaks[j]
res_dict['gamma'] = gammas[j]
nllhs = np.zeros(ntbins)
As = np.zeros(ntbins)
sig_miner.set_fixed_params(['Signal_theta', 'Signal_phi'],\
values=[thetas[ii],phis[ii]])
for i in range(ntbins):
t0 = tbins0[i]
t1 = tbins1[i]
dt = t1 - t0
sig_llh_obj.set_time(tbins0[i], tbins1[i])
pars, nllh, res = sig_miner.minimize()
if not res[0].success:
logging.warning("Failed min")
logging.warning(res[0])
As[i] = pars[0][0]
nllhs[i] = nllh[0]
res_dict['nllh'] = nllhs
res_dict['A'] = As
res_dict['time'] = tbins0
res_dict['dur'] = tbins1 - tbins0
res_dict['theta'] = thetas[ii]
res_dict['phi'] = phis[ii]
res_dict['imx'] = imxs[ii]
res_dict['imy'] = imys[ii]
res_dfs.append(pd.DataFrame(res_dict))
logging.info("Done with spec %d of %d" %(j+1,Nspec_pnts))
res_df = pd.concat(res_dfs, ignore_index=True)
bkg_nllhs = np.zeros(len(res_df))
for i in range(ntbins):
t0 = tbins0[i]
t1 = tbins1[i]
dt = t1 - t0
sig_llh_obj.set_time(tbins0[i], tbins1[i])
pars_['Signal_A'] = 1e-10
bkg_nllh = -sig_llh_obj.get_logprob(pars_)
bl = np.isclose(res_df['time']-t0,t0-t0)&np.isclose(res_df['dur'],dt)
bkg_nllhs[bl] = bkg_nllh
# pars_['Signal_A'] = 1e-10
# bkg_nllh = -sig_llh_obj.get_logprob(pars_)
res_df['bkg_nllh'] = bkg_nllhs
res_df['TS'] = np.sqrt(2.*(bkg_nllhs - res_df['nllh']))
res_dfs_.append(res_df)
return pd.concat(res_dfs_, ignore_index=True)
def analysis_at_theta_phi(theta, phi, bkg_bf_params, bkg_mod, flux_mod, ev_data, ebins0, ebins1, tbins0, tbins1):
bl_dmask = bkg_mod.bl_dmask
sig_mod = Source_Model_OutFoV(flux_mod, [ebins0,ebins1], bl_dmask, use_deriv=True)
sig_mod.set_theta_phi(theta, phi)
print "theta, phi set"
comp_mod = CompoundModel([bkg_mod, sig_mod])
sig_miner = NLLH_ScipyMinimize_Wjacob('')
sig_llh_obj = LLH_webins(ev_data, ebins0, ebins1, bl_dmask, has_err=True)
sig_llh_obj.set_model(comp_mod)
flux_params = {'A':1.0, 'gamma':0.5, 'Epeak':1e2}
pars_ = {}
pars_['Signal_theta'] = theta
pars_['Signal_phi'] = phi
for pname,val in bkg_bf_params.iteritems():
pars_['Background_'+pname] = val
for pname,val in flux_params.iteritems():
pars_['Signal_'+pname] = val
sig_miner.set_llh(sig_llh_obj)
fixed_pnames = pars_.keys()
fixed_vals = pars_.values()
trans = [None for i in range(len(fixed_pnames))]
sig_miner.set_trans(fixed_pnames, trans)
sig_miner.set_fixed_params(fixed_pnames, values=fixed_vals)
sig_miner.set_fixed_params(['Signal_A'], fixed=False)
gamma_ax = np.linspace(-0.4, 1.6, 8+1)
gamma_ax = np.linspace(-0.4, 1.6, 4+1)
# gamma_ax = np.linspace(-0.4, 1.6, 3+1)
Epeak_ax = np.logspace(np.log10(45.0), 3, 10+1)
Epeak_ax = np.logspace(np.log10(45.0), 3, 5+1)
# Epeak_ax = np.logspace(np.log10(25.0), 3, 3+1)
gammas, Epeaks = np.meshgrid(gamma_ax, Epeak_ax)
gammas = gammas.ravel()
Epeaks = Epeaks.ravel()
res_dfs = []
ntbins = len(tbins0)
for i in range(ntbins):
t0 = tbins0[i]
t1 = tbins1[i]
dt = t1 - t0
sig_llh_obj.set_time(tbins0[i], tbins1[i])
res_dict = {'theta':theta, 'phi':phi,
'time':t0, 'dur':dt}
res_dict['Epeak'] = Epeaks
res_dict['gamma'] = gammas
nllhs, As = min_at_Epeaks_gammas(sig_miner, sig_mod, Epeaks, gammas)
pars_['Signal_A'] = 1e-10
bkg_nllh = -sig_llh_obj.get_logprob(pars_)
res_dict['nllh'] = np.array(nllhs)
res_dict['A'] = np.array(As)
res_dict['TS'] = np.sqrt(2*(bkg_nllh - res_dict['nllh']))
res_dict['bkg_nllh'] = bkg_nllh
res_dfs.append(pd.DataFrame(res_dict))
print "done with %d of %d tbins"%(i+1,ntbins)
return pd.concat(res_dfs, ignore_index=True)
def main(args):
fname = os.path.join(args.work_dir,args.log_fname + '_' + str(args.job_id))
logging.basicConfig(filename=fname+'.log', level=logging.DEBUG,\
format='%(asctime)s-' '%(levelname)s- %(message)s')
resp_fname = '/storage/work/jjd330/local/bat_data/resp_tabs/drm_theta_126.0_phi_30.0_.fits'
resp_file = fits.open(resp_fname)
pha_emins, pha_emaxs = resp_file[2].data['E_MIN'].astype(np.float), resp_file[2].data['E_MAX'].astype(np.float)
ebins0 = np.array([15.0, 24.0, 35.0, 48.0, 64.0])
ebins0 = np.append(ebins0, np.logspace(np.log10(84.0), np.log10(500.0), 5+1))[:-1]
ebins0 = np.round(ebins0, decimals=1)[:-1]
ebins1 = np.append(ebins0[1:], [350.0])
nebins = len(ebins0)
ev_data = fits.open(args.evfname)[1].data
if args.trig_time is None:
trigger_time = np.min(ev_data['TIME'])
else:
trigger_time = args.trig_time
enb_tab = Table.read(args.dmask)
enb_ind = np.argmin(np.abs(enb_tab['TIME']-(trigger_time+args.min_dt)))
dmask = enb_tab[enb_ind]['FLAG']
mask_vals = mask_detxy(dmask, ev_data)
bl_dmask = (dmask==0.)
bl_ev = (ev_data['EVENT_FLAGS']<1)&\
(ev_data['ENERGY']<1e3)&(ev_data['ENERGY']>=10.)&\
(mask_vals==0.)
ev_data0 = ev_data[bl_ev]
attfile = Table.read(args.attfname)
att_ind = np.argmin(np.abs(attfile['TIME'] - (trigger_time+args.min_dt)))
att_q = attfile['QPARAM'][att_ind]
solid_angle_dpi = np.load(solid_angle_dpi_fname)
bkg_mod = Bkg_Model_wFlatA(bl_dmask, solid_angle_dpi, nebins, use_deriv=True)
llh_obj = LLH_webins(ev_data0, ebins0, ebins1, bl_dmask, has_err=True)
# bkg_miner = NLLH_ScipyMinimize('')
bkg_miner = NLLH_ScipyMinimize_Wjacob('')
bkg_t0 = trigger_time + args.bkg_dt0 #6.0
bkg_dt = args.bkg_dur #4.0
bkg_t1 = bkg_t0 + bkg_dt
brt_src_tab = get_srcs_infov(attfile, bkg_t0+bkg_dt/2.)
cygx1_row = brt_src_tab[0]
rt_obj = RayTraces(rt_dir)
cyg_mod = Point_Source_Model_Binned_Rates(cygx1_row['imx'], cygx1_row['imy'], 0.1,\
[ebins0,ebins1],\
rt_obj, bl_dmask,\
use_deriv=True, name=cygx1_row['Name'])
bkg_mod = CompoundModel([bkg_mod, cyg_mod])
llh_obj.set_time(bkg_t0, bkg_t1)
llh_obj.set_model(bkg_mod)
bkg_miner.set_llh(llh_obj)
bkg_miner.set_fixed_params(['Cyg X-1_imx', 'Cyg X-1_imy'])
pars, bkg_nllh, res = bkg_miner.minimize()
# bkg_bf_params = {bkg_mod.param_names[i]:pars[0][i] for i in range(len(bkg_mod.param_names))}
i=0
bkg_bf_params = {}
for cname in bkg_mod.param_names:
if cname in bkg_miner.fixed_params:
continue
bkg_bf_params[cname] = pars[0][i]
i += 1
bkg_bf_params['Cyg X-1_imx'] = cygx1_row['imx']
bkg_bf_params['Cyg X-1_imy'] = cygx1_row['imy']
flux_mod = Cutoff_Plaw_Flux(E0=100.0)
dur = args.min_dur
tbins0 = np.arange(args.min_dt, args.max_dt, dur/2.0) + trigger_time
tbins1 = tbins0 + dur
for i in range(args.Ntdbls):
dur *= 2
tbins0_ = np.arange(args.min_dt, args.max_dt, dur/2.0) + trigger_time
tbins1_ = tbins0_ + dur
tbins0 = np.append(tbins0, tbins0_)
tbins1 = np.append(tbins1, tbins1_)
ntbins = len(tbins0)
logging.info("ntbins: %d" %(ntbins))
Njobs = args.Njobs
job_id = args.job_id
dimxy = 0.036
dimxy = 0.024
imxax = np.arange(-1.8, 1.8, dimxy)[12:-12]
imyax = np.arange(-1.0, 1.0, dimxy)[5:-5]
imxax = np.arange(-1.8, 1.8, dimxy)[16:-16]
imyax = np.arange(-1.0, 1.0, dimxy)[7:-7]
imxg, imyg = np.meshgrid(imxax, imyax)
imx0s = np.ravel(imxg)
imy0s = np.ravel(imyg)
Nsquares = len(imx0s)
Npix2do = 1 + Nsquares/Njobs
logging.info("Npix2do: %d" %(Npix2do))
ind0 = job_id*Npix2do
ind1 = min(ind0 + Npix2do, Nsquares)
# logging.info("hp_ind0: %d" %(hp_ind0))
# logging.info("hp_ind1: %d" %(hp_ind1))
for ind in range(ind0,ind1):
imx0 = imx0s[ind]
imx1 = imx0 + dimxy
imy0 = imy0s[ind]
imy1 = imy0 + dimxy
logging.info("Starting ind %d" %(ind))
logging.info("imx0, imx1: %.3f, %.3f" %(imx0,imx1))
logging.info("imy0, imy1: %.3f, %.3f" %(imy0,imy1))
res_df = analysis_for_imxy_square(imx0, imx1, imy0, imy1, bkg_bf_params, bkg_mod, flux_mod,\
ev_data0, ebins0, ebins1, tbins0, tbins1)
res_df['dt'] = res_df['time'] - trigger_time
res_df['square_ind'] = ind
save_fname = os.path.join(args.work_dir, 'square_ind_%d_.csv'%(ind))
res_df.to_csv(save_fname)
logging.info("wrote results to, ")
logging.info(save_fname)
if __name__ == "__main__":
args = cli()
main(args)
| StarcoderdataPython |
1650814 | import json
from jupyter_server.base.handlers import APIHandler
from jupyter_server.utils import url_path_join
import tornado
from os import listdir, environ, makedirs, removedirs, getcwd
from os.path import isfile, isdir, join
basedir = getcwd()
def save_file(path, content):
content_bytes = bytearray(content)
path ='{}/{}'.format(basedir, path)
path = path.replace('//', '/')
with open(path, 'wb') as new_file:
new_file.write(content_bytes)
return path
class MediaCaptureHandler(APIHandler):
def get(self, path=''):
self.finish(json.dumps({'wip': True}))
def post(self, path=''):
body = json.loads(self.request.body)
saved_path = save_file(body['path'], body['content'])
self.finish(json.dumps({'saved_path': saved_path}))
def setup_handlers(web_app):
host_pattern = ".*$"
base_url = web_app.settings["base_url"]
route_pattern = url_path_join(base_url, "jupyterlab_media_capture", "media_capture")
handlers = [(route_pattern, MediaCaptureHandler)]
web_app.add_handlers(host_pattern, handlers)
| StarcoderdataPython |
4832257 | <reponame>fax001/tink
# Copyright 2022 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for exception (non-)propagation in Pybind11PythonFileObjectAdapter."""
import io
from absl.testing import absltest
import tink
from tink import streaming_aead
def setUpModule():
streaming_aead.register()
def get_primitive() -> streaming_aead.StreamingAead:
key_template = streaming_aead.streaming_aead_key_templates.<KEY>
keyset_handle = tink.new_keyset_handle(key_template)
primitive = keyset_handle.primitive(streaming_aead.StreamingAead)
return primitive
class BytesIOThatThrowsExceptionsOnReadWrite(io.BytesIO):
def write(self, data):
raise tink.TinkError('Called write!')
def read(self, num):
raise tink.TinkError('Called read!')
def close(self):
pass
class BytesIOThatThrowsExceptionsOnClose(io.BytesIO):
def write(self, data):
return len(data)
def read(self, _):
return b''
def close(self):
raise tink.TinkError('Called close!')
class Pybind11PythonFileObjectAdaterTest(absltest.TestCase):
# This and the following tests do not use the `with` statement. This is done
# for two reasons:
# 1. consistency with the `test_close_throws()`: there, exit from the
# context created by the `with` statement causes the `close()` function
# to be called after `assertRaises()` verified that it throws -- thus
# one more exception is raised, and the test fails.
# 2. avoiding similar unexpected sideffects in the other tests
def test_write_throws(self):
streaming_aead_primitive = get_primitive()
ciphertext_destination = BytesIOThatThrowsExceptionsOnReadWrite()
enc_stream = streaming_aead_primitive.new_encrypting_stream(
ciphertext_destination, b'associated_data')
# The exception is thrown but swallowed on the way.
_ = enc_stream.write(b'plaintext')
# The exception is thrown and is not swallowed.
self.assertRaises(tink.TinkError, enc_stream.close)
def test_read_throws(self):
streaming_aead_primitive = get_primitive()
ciphertext_source = BytesIOThatThrowsExceptionsOnReadWrite()
dec_stream = streaming_aead_primitive.new_decrypting_stream(
ciphertext_source, b'associated_data')
self.assertRaises(tink.TinkError, dec_stream.read)
dec_stream.close()
def test_close_throws(self):
streaming_aead_primitive = get_primitive()
ciphertext_destination = BytesIOThatThrowsExceptionsOnClose()
enc_stream = streaming_aead_primitive.new_encrypting_stream(
ciphertext_destination, b'associated_data')
self.assertRaises(tink.TinkError, enc_stream.close)
if __name__ == '__main__':
absltest.main()
| StarcoderdataPython |
3253933 | # (C) Copyright 2005-2021 Enthought, Inc., Austin, TX
# All rights reserved.
#
# This software is provided without warranty under the terms of the BSD
# license included in LICENSE.txt and may be redistributed only under
# the conditions described in the aforementioned license. The license
# is also available online at http://www.enthought.com/licenses/BSD.txt
#
# Thanks for using Enthought open source!
import unittest
from traits.observation._generated_parser import (
UnexpectedInput,
Lark_StandAlone,
)
PARSER = Lark_StandAlone()
class TestParsingValidation(unittest.TestCase):
""" Test parsing text using the standalone parser, for valid and invalid
text, without further evaluation on the meaning of their content.
"""
def test_invalid_examples(self):
bad_examples = [
"",
"1name",
"a.b.c^abc",
"[a.b]c",
"a*.c",
"a:[b,c]:",
".a",
"a()",
"-a",
]
for bad_example in bad_examples:
with self.subTest(bad_example=bad_example):
with self.assertRaises(UnexpectedInput):
PARSER.parse(bad_example)
def test_valid_examples(self):
good_examples = [
"name",
"name123",
"name_a",
"_name",
"foo.bar",
"foo . bar",
"foo:bar",
"foo : bar",
"foo,bar",
"foo , bar",
"[foo,bar,foo.spam]",
"[foo, bar].baz",
"[foo, [bar, baz]]:spam",
"foo:[bar.spam,baz]",
"foo.items",
"items",
"+metadata_name",
]
for good_example in good_examples:
with self.subTest(good_example=good_example):
try:
PARSER.parse(good_example)
except Exception:
self.fail(
"Parsing {!r} is expected to succeed.".format(
good_example
)
)
| StarcoderdataPython |
27335 | <gh_stars>0
from pyns.protocols import create_basestation, create_node, ProtocolType
from pyns.engine import Simulator, SimArg, TraceFormatter, TransmissionMedium
from pyns.phy import PHYLayer
import logging
import numpy
import sys
import random
import os
import json
class ConstantSimulator(Simulator):
def __init__(self, total_time, use_seed, num_nodes, protocol_type, log_prefix):
super().__init__(total_time)
self.use_seed = use_seed
self.num_nodes = num_nodes
self.protocol_type = protocol_type
self.log_prefix = log_prefix
def _run(self, env, pr):
if self.use_seed:
seeds = [i for i in range(self.num_nodes + 1)]
numpy.random.seed(0)
random.seed(0)
else:
seeds = [random.randint(0, self.num_nodes * 1000) for i in range(self.num_nodes + 1)]
special_args = {"seed": seeds[0]}
name = self.log_prefix + str(pr)
with open("100.json") as f:
config = json.load(f)
layer = PHYLayer(120, 10000, 1) # 10 KHz bandwidth. won't be used in the simulation
t = TransmissionMedium(env, name, layer=layer)
t.add_logger(name)
bs = create_basestation(self.protocol_type, 0, env, config, special_args)
t.add_device(bs)
nodes = []
for i in range(self.num_nodes):
special_arg = {"total": self.num_nodes, "scheduled_time": i, "seed": seeds[i]}
n = create_node(self.protocol_type, i, env, config, special_arg)
nodes.append(n)
t.add_device(n)
rate = pr * len(nodes)
dummy_payload = "Test"
ADJUST_FACTOR = 4 # this is for DQN adjustment
load = rate if self.protocol_type != 3 else rate / config["N"] * ADJUST_FACTOR
while True:
num_of_trans = numpy.random.poisson(load)
nodes_to_trans = random.sample(nodes, num_of_trans)
for n in nodes_to_trans:
n.send(dummy_payload, int(n.MTU / ADJUST_FACTOR))
sleep_time = numpy.random.uniform(0, 2)
yield env.timeout(sleep_time)
def main():
parser = SimArg("Simulation with various rates and 100 nodes.", remove_num = True)
args = parser.parse_args()
# setting up logger
total_time = args.sim_time
use_seed = args.use_seed
num_nodes = 100
#pr = args.packet_rate
protocol_type = args.type
log_prefix = "rate-"
if args.test:
rates = [1 / num_nodes]
use_seed = True
else:
rates = [0.05 / num_nodes * i for i in range(1, 21)]
sim = ConstantSimulator(total_time, use_seed, num_nodes, protocol_type, log_prefix)
for rate in rates:
name = log_prefix + str(rate)
logger = logging.getLogger(name)
if args.stdout or args.test:
ch = logging.StreamHandler(sys.stdout)
else:
ch = logging.FileHandler(os.path.join("100_log", str(protocol_type) + "-" + name))
ch.setFormatter(TraceFormatter())
ch.setLevel(logging.INFO)
logger.addHandler(ch)
sim.start(rates)
if __name__ == "__main__":
main()
| StarcoderdataPython |
1636693 | <gh_stars>1-10
from django.contrib import admin
from .models import Diary
# Register your models here.
class DiaryAdmin(admin.ModelAdmin):
list_display = ('id', 'email', 'title', 'content', 'emotion', 'write_date', 'rewrite_date')
admin.site.register(Diary, DiaryAdmin) | StarcoderdataPython |
169050 | import numpy as np
def run_optimizer(opt, cost_f, iterations, *args, **kwargs):
errors = [cost_f.eval(cost_f.x_start, cost_f.y_start)]
xs,ys= [cost_f.x_start],[cost_f.y_start]
for epochs in range(iterations):
x, y= opt.step(*args, **kwargs)
xs.append(x)
ys.append(y)
errors.append(cost_f.eval(x,y))
distance = np.sqrt((np.array(xs)-cost_f.x_optimum)**2 + (np.array(ys)-cost_f.y_optimum)**2)
return errors, distance, xs, ys
class Optimizer:
def __init__(self, cost_f, lr, x, y, **kwargs):
self.lr = lr
self.cost_f = cost_f
if x==None or y==None:
self.x = self.cost_f.x_start
self.y = self.cost_f.y_start
else:
self.x = x
self.y = y
self.__dict__.update(kwargs)
def step(self, lr):
raise NotImplementedError() | StarcoderdataPython |
1785231 | <filename>src/app.py<gh_stars>0
from flask import Flask, render_template
from hackernews_tidal import *
from twitter_tidal import *
import tweepy
app = Flask(__name__)
# sample dashboard constants to get this working before I try using an actual database
dashboard = 'Trendy Software Developer'
widgets = []
hn_user = 'dazebra'
twitter_user = 'kanyewest'
profile_data = find_profile(hn_user)
submission_data = find_submissions(hn_user)
comment_data = find_comments(hn_user)
tweets = get_tweets(twitter_user, 10)
@app.route("/")
def index():
return render_template('index.html', dashboard_name = profile_data[0][0], karma = profile_data[0][2], date = profile_data[0][1], dashboard_current = dashboard, sample_tweet = tweets[1].text, twitter_name = twitter_user)
# gets desired profile information
@app.route("/hackernews_profile/")
def hackernews_profile():
return "username: " + str(profile_data[0][0]) + "\n" + "date created: " + str(profile_data[0][1]) + "\n" + "karma: " + str(profile_data[0][2])
@app.route("/add_widget/")
def add_widget():
#load widget creation page
temp = 42 # to do
if __name__ == "__main__":
app.run()
# ideas to make this work:
# - accept the fact that routing will make this a multi-page application for simplicity
# - use AJAX requests to load onto the same page, not React
#
#
#
#
| StarcoderdataPython |
1647899 | # -*- coding: utf-8 -*-
"""Generate the Resilient customizations required for fn_hibp"""
from __future__ import print_function
from resilient_circuits.util import *
def codegen_reload_data():
"""Parameters to codegen used to generate the fn_hibp package"""
reload_params = {"package": u"fn_hibp",
"incident_fields": [],
"action_fields": [],
"function_params": [u"email_address"],
"datatables": [],
"message_destinations": [u"hibp"],
"functions": [u"have_i_been_pwned_get_breaches", u"have_i_been_pwned_get_pastes"],
"phases": [],
"automatic_tasks": [],
"scripts": [],
"workflows": [u"have_i_been_pwned_search"],
"actions": [u"Have I Been Pwned Search"],
"incident_artifact_types": []
}
return reload_params
def customization_data(client=None):
"""Produce any customization definitions (types, fields, message destinations, etc)
that should be installed by `resilient-circuits customize`
"""
# This import data contains:
# Function inputs:
# email_address
# Message Destinations:
# hibp
# Functions:
# have_i_been_pwned_get_breaches
# have_i_been_pwned_get_pastes
# Workflows:
# have_i_been_pwned_search
# Rules:
# Have I Been Pwned Search
yield ImportDefinition(u"""
<KEY>
LTE<KEY>
<KEY>
"""
) | StarcoderdataPython |
3278161 | <reponame>BlueWhaleMain/cipher-manager
import pyDes
from Crypto.Cipher import AES
from PyQt5 import QtWidgets, QtGui
from cm.crypto.aes.file import CipherAesFile
from cm.crypto.des.file import CipherDesFile
from cm.crypto.file import SimpleCipherFile, PPCipherFile
from cm.crypto.rsa.file import CipherRSAFile
from cm.file import CipherFile
from gui.designer.attribute_dialog import Ui_AttributeDialog
from gui.widgets.item.readonly import ReadOnlyItem
class AttributeDialog(QtWidgets.QDialog, Ui_AttributeDialog):
def __init__(self):
super().__init__()
self.setupUi(self)
self.model = QtGui.QStandardItemModel()
self.model.setHorizontalHeaderLabels(['名称', '值'])
self.attribute_tree_view.setModel(self.model)
self.attribute_tree_view.header().setSectionResizeMode(0, QtWidgets.QHeaderView.ResizeToContents)
@classmethod
def counter_row(cls, item: QtGui.QStandardItem, cipher_file):
color_gray = QtGui.QColor('gray')
left = ReadOnlyItem('存储的记录数量')
left.setForeground(color_gray)
right = ReadOnlyItem(str(len(cipher_file.records)))
right.setForeground(color_gray)
item.appendRow((left, right))
des_modes = {pyDes.ECB: 'ECB', pyDes.CBC: 'CBC'}
des_pad_modes = {pyDes.PAD_NORMAL: 'PAD_NORMAL', pyDes.PAD_PKCS5: 'PAD_PKCS5'}
aes_modes = {AES.MODE_ECB: 'MODE_ECB', AES.MODE_CBC: 'MODE_CBC', AES.MODE_CFB: 'MODE_CFB', AES.MODE_OFB: 'MODE_OFB',
AES.MODE_CTR: 'MODE_CTR', AES.MODE_OPENPGP: 'MODE_OPENPGP', AES.MODE_CCM: 'MODE_CCM',
AES.MODE_EAX: 'MODE_EAX', AES.MODE_SIV: 'MODE_SIV', AES.MODE_GCM: 'MODE_GCM', AES.MODE_OCB: 'MODE_OCB'}
def load_file(self, cipher_file: CipherFile):
cipher_item = ReadOnlyItem('密钥文件属性')
cipher_item.appendRow((ReadOnlyItem('文件编码'), ReadOnlyItem(cipher_file.encoding)))
cipher_item.appendRow((ReadOnlyItem('加密类型'), ReadOnlyItem(cipher_file.encrypt_algorithm)))
color_red = QtGui.QColor('red')
if isinstance(cipher_file, SimpleCipherFile):
simple_cipher_item = ReadOnlyItem('常规密钥文件附加属性')
simple_cipher_item.appendRow(
(ReadOnlyItem('使用的哈希算法'), ReadOnlyItem(cipher_file.hash_algorithm)))
simple_cipher_item.appendRow((ReadOnlyItem('根密码哈希值'), ReadOnlyItem(cipher_file.rph)))
simple_cipher_item.appendRow((ReadOnlyItem('根密码盐值'), ReadOnlyItem(cipher_file.salt)))
self.counter_row(simple_cipher_item, cipher_file)
if isinstance(cipher_file, CipherDesFile):
cipher_des_item = ReadOnlyItem('DES文件附加属性')
cipher_des_item.appendRow((ReadOnlyItem('模式'), ReadOnlyItem(self.des_modes[cipher_file.des_cfg.mode])))
_IV = cipher_file.des_cfg.IV
cipher_des_item.appendRow((ReadOnlyItem('向量'), ReadOnlyItem(_IV.hex() if _IV else '空')))
pad = cipher_file.des_cfg.pad
cipher_des_item.appendRow((ReadOnlyItem('填充'), ReadOnlyItem(pad.hex() if pad else '空')))
cipher_des_item.appendRow(
(ReadOnlyItem('填充模式'), ReadOnlyItem(self.des_pad_modes[cipher_file.des_cfg.padmode])))
simple_cipher_item.appendRow(cipher_des_item)
elif isinstance(cipher_file, CipherAesFile):
cipher_aes_item = ReadOnlyItem('AES文件附加属性')
cipher_aes_item.appendRow((ReadOnlyItem('模式'), ReadOnlyItem(self.aes_modes[cipher_file.aes_cfg.mode])))
_IV = cipher_file.aes_cfg.IV
cipher_aes_item.appendRow((ReadOnlyItem('向量'), ReadOnlyItem(_IV.hex() if _IV else '空')))
simple_cipher_item.appendRow(cipher_aes_item)
cipher_item.appendRow(simple_cipher_item)
elif isinstance(cipher_file, PPCipherFile):
pp_cipher_item = ReadOnlyItem('公私钥文件附加属性')
cipher_item.appendRow(pp_cipher_item)
pp_cipher_item.appendRow(
(ReadOnlyItem('签名使用的哈希算法'), ReadOnlyItem(cipher_file.sign_hash_algorithm)))
pp_cipher_item.appendRow(
(ReadOnlyItem('签名哈希值'), ReadOnlyItem(cipher_file.hash_algorithm_sign)))
self.counter_row(pp_cipher_item, cipher_file)
if isinstance(cipher_file, CipherRSAFile):
cipher_rsa_item = ReadOnlyItem('RSA文件附加属性')
pp_cipher_item.appendRow(cipher_rsa_item)
cipher_item.appendRow(pp_cipher_item)
else:
unknown_item = ReadOnlyItem('未知文件附加属性')
unknown_item.setForeground(color_red)
cipher_item.appendRow(unknown_item)
self.model.appendRow(cipher_item)
self.attribute_tree_view.expandAll()
self.exec_()
| StarcoderdataPython |
70982 | <reponame>mijo2/Eye-In_The_Sky
import math
import numpy as np
import logging
import sys
logging.basicConfig(level=logging.DEBUG,
format=' %(asctime)s - %(levelname)s- %(message)s')
# logging.disable(sys.maxsize)
def rowslice(image, gtimage, ptsz):
n = math.floor(image.shape[1]/(ptsz//2))
w = int(gtimage.shape[1])
rarr = range(n+1)
X_row = np.array([]).reshape((0,ptsz,ptsz,image.shape[2]))
Y_row = np.array([]).reshape((0,ptsz,ptsz,gtimage.shape[2]))
for i in rarr[:n-1]:
# logging.debug(" {}: ({}:{})".format(i,i*ptsz//2, (i+2)*ptsz//2))
X_row = np.append(X_row,np.array([image[:, (ptsz//2)*i:(ptsz//2)*(i+2), :]]), axis=0)
Y_row = np.append(Y_row,np.array([gtimage[:, (ptsz//2)*i:(ptsz//2)*(i+2), :]]), axis=0)
if image.shape[1] % (ptsz//2) != 0:
X_row = np.append(X_row,np.array([image[:, w-ptsz:, :]]), axis=0)
Y_row = np.append(Y_row,np.array([gtimage[:, w-ptsz:, :]]), axis=0)
return np.array(X_row), np.array(Y_row)
def imgSlice(image, gtimage, ptsz):
assert image.shape[:2] == gtimage.shape[:2], "Image shapes of the given images do not match"
h = int(gtimage.shape[0])
m = math.floor(image.shape[0]/(ptsz//2))
rarr = range(m+1)
X = np.array([]).reshape((0,ptsz,ptsz,image.shape[2]))
Y = np.array([]).reshape((0,ptsz,ptsz,gtimage.shape[2]))
for i in rarr[:m-1]:
# logging.debug("{}:->".format(i+1))
X_row, Y_row = rowslice(image[(ptsz//2)*i:(ptsz//2)*(i+2), :, :], gtimage[(ptsz//2)*i:(ptsz//2)*(i+2), :, :], ptsz)
X = np.append(X,X_row,axis=0)
Y = np.append(Y,Y_row,axis=0)
if h % (ptsz//2) != 0:
[X_row, Y_row] = rowslice(image[h-ptsz:, :, :], gtimage[h-ptsz:, :, :], ptsz)
X = np.append(X,X_row,axis=0)
Y = np.append(Y,Y_row,axis=0)
return np.array(X), np.array(Y)
| StarcoderdataPython |
3363097 | import datetime
import time
class Timer:
def __init__(self):
self.start()
pass
def start(self,totalCount = None):
self.startTime = datetime.datetime.now()
self.totalCount = totalCount
def stop(self,text=""):
now = datetime.datetime.now()
delta = (now-self.startTime).total_seconds()
print(text+", elapsed time:",delta)
self.startTime = datetime.datetime.now()
def remaining(self, nowCount, totalCount=None):
if totalCount:
self.totalCount=totalCount
if nowCount!= 0:
now = datetime.datetime.now()
delta = (now - self.startTime).total_seconds()
remainingSecs = (float(self.totalCount)/float(nowCount))*delta-delta
print(str(nowCount)+"/"+str(self.totalCount)+": remaining sec %2.2f"%remainingSecs)
class Profiling:
def __init__(self,name):
self.start(name)
def start(self,rename = None):
self.result = []
if rename:
self.name= rename
#now = datetime.datetime.now()
now = time.time()
self.initTime = now
self.startTime =now
def lap(self,label):
#now = datetime.datetime.now()
#delta = (now - self.startTime).total_seconds()
#total = (now-self.initTime).total_seconds()
now = time.time()
delta = now -self.startTime
total = now-self.initTime
self.result.append({"label":label,"delta":delta,"total":total})
self.startTime=now
def __repr__(self):
self.lap("end")
s="Time Profiling -"+self.name+" :"
for l in self.result:
s=s+f" {l['label']}: {l['delta']}"
s=s+f"total: {l['total']}"
return s
def str_lim(obj,lim):
stri = str(obj)
if len(stri)>lim:
return stri[:lim]+"..."
else:
return stri | StarcoderdataPython |
1774017 | <gh_stars>1-10
from django.apps import AppConfig
class CitiesLocalConfig(AppConfig):
name = 'cities_local'
| StarcoderdataPython |
3208722 | <filename>src/envs/starcraft2/maps/mt_maps.py
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from pysc2.maps import lib
from functools import partial
from itertools import combinations_with_replacement, product
class SMACMap(lib.Map):
directory = "SMAC_Maps"
download = "https://github.com/oxwhirl/smac#smac-maps"
players = 2
step_mul = 8
game_steps_per_episode = 0
mt_map_param_registry = {
"empty_passive": {}
}
for name in mt_map_param_registry.keys():
globals()[name] = type(name, (SMACMap,), dict(filename=name))
def get_all_unique_teams(all_types, min_len, max_len):
all_com_teams = []
for i in range(min_len, max_len + 1):
all_com_teams += list(combinations_with_replacement(all_types, i))
all_cnt_teams = []
for team in all_com_teams:
team_types = list(set(team))
team_counts = list(zip([team.count(typ) for typ in team_types], team_types))
all_cnt_teams.append(team_counts)
return all_cnt_teams
type_name2health = {'Baneling': 30.0, 'Colossus': 200.0, 'Hydralisk': 80.0,
'Marauder': 125.0, 'Marine': 45.0, 'Medivac': 150.0,
'Stalker': 80.0, 'Zealot': 100.0, 'Zergling': 35.0}
type_name2shield = {'Baneling': 0.0, 'Colossus': 150.0, 'Hydralisk': 0.0,
'Marauder': 0.0, 'Marine': 0.0, 'Medivac': 0.0,
'Stalker': 80.0, 'Zealot': 50.0, 'Zergling': 0.0}
def fixed_armies(ally_army, enemy_army, ally_centered=False, rotate=False,
separation=10, jitter=0, episode_limit=100,
map_name="empty_passive", map_type=None):
# army = [(3, 'Marine'), (1, 'Medivac')]
reward_health_shield_max = 0
for num, utype_name in enemy_army:
reward_health_shield_max += num * (type_name2health[utype_name] + type_name2shield[utype_name])
scenario_dict = {'scenarios': [(ally_army, enemy_army)],
'max_types_and_units_scenario': (ally_army, enemy_army),
'ally_centered': ally_centered,
'rotate': rotate,
'separation': separation,
'jitter': jitter,
'episode_limit': episode_limit,
'map_name': map_name,
'map_type': map_type,
'reward_health_shield_max': reward_health_shield_max}
return scenario_dict
def symmetric_armies(army_spec, ally_centered=False, rotate=False, separation=10,
jitter=0, episode_limit=100, n_extra_tags=0,
map_name="empty_passive", map_type=None):
reward_health_shield_max = 0
unique_sub_teams = []
for unit_types, n_unit_range in army_spec:
unique_sub_teams.append(get_all_unique_teams(unit_types, n_unit_range[0], n_unit_range[1]))
reward_health_shield_max += max([type_name2health[unit_type] + type_name2shield[unit_type] for unit_type in unit_types]) * n_unit_range[1]
unique_teams = [sum(prod, []) for prod in product(*unique_sub_teams)]
scenarios = list(zip(unique_teams, unique_teams))
# sort by number of types and total number of units
max_types_and_units_team = sorted(unique_teams, key=lambda x: (len(x), sum(num for num, unit in x)), reverse=True)[0]
max_types_and_units_scenario = (max_types_and_units_team, max_types_and_units_team)
# scenario = [([(3, 'Marine')], [(3, 'Marine')]),
# ...
# ([(6, 'Marauder'), (2, 'Medivac')], [(6, 'Marauder'), (2, 'Medivac')])]
#
# max_types_and_units_scenario = ([(1, 'Marauder'), (5, 'Marine'), (2, 'Medivac')],
# [(1, 'Marauder'), (5, 'Marine'), (2, 'Medivac')])
scenario_dict = {'scenarios': scenarios,
'max_types_and_units_scenario': max_types_and_units_scenario,
'ally_centered': ally_centered,
'rotate': rotate,
'separation': separation,
'jitter': jitter,
'episode_limit': episode_limit,
'n_extra_tags': n_extra_tags,
'map_name': map_name,
'map_type': map_type,
'reward_health_shield_max': reward_health_shield_max}
return scenario_dict
def asymmetric_armies(army_spec, spec_delta, ally_centered=False, rotate=False, separation=10,
jitter=0, episode_limit=100, n_extra_tags=0,
map_name="empty_passive", map_type=None):
reward_health_shield_max = 0
unique_sub_teams = []
for unit_types, n_unit_range in army_spec:
unique_sub_teams.append(get_all_unique_teams(unit_types, n_unit_range[0], n_unit_range[1]))
reward_health_shield_max += max([type_name2health[unit_type] + type_name2shield[unit_type] for unit_type in unit_types]) * n_unit_range[1]
enemy_teams = [sum(prod, []) for prod in product(*unique_sub_teams)]
agent_teams = [[(max(num + spec_delta.get(typ, 0), 0), typ) for num, typ in team] for team in enemy_teams]
scenarios = list(zip(agent_teams, enemy_teams))
# sort by number of types and total number of units
max_types_and_units_ag_team = sorted(agent_teams, key=lambda x: (len(x), sum(num for num, unit in x)), reverse=True)[0]
max_types_and_units_en_team = sorted(enemy_teams, key=lambda x: (len(x), sum(num for num, unit in x)), reverse=True)[0]
max_types_and_units_scenario = (max_types_and_units_ag_team, max_types_and_units_en_team)
scenario_dict = {'scenarios': scenarios,
'max_types_and_units_scenario': max_types_and_units_scenario,
'ally_centered': ally_centered,
'rotate': rotate,
'separation': separation,
'jitter': jitter,
'episode_limit': episode_limit,
'n_extra_tags': n_extra_tags,
'map_name': map_name,
'map_type': map_type,
'reward_health_shield_max': reward_health_shield_max}
return scenario_dict
mt_scenario_registry = {
"3-8m_symmetric": partial(symmetric_armies,
[(('Marine',), (3, 8))],
ally_centered=False,
rotate=True,
separation=14,
jitter=1,
episode_limit=100,
n_extra_tags=0,
map_name="empty_passive",
map_type='marines'),
"6-11m_asymmetric": partial(asymmetric_armies,
[(('Marine',), (6, 11))],
{'Marine': -1},
ally_centered=False,
rotate=True,
separation=14,
jitter=1,
episode_limit=100,
n_extra_tags=0,
map_name="empty_passive",
map_type='marines'),
"3-8sz_symmetric": partial(symmetric_armies,
[(('Stalker', 'Zealot'), (3, 8))],
ally_centered=False,
rotate=True,
separation=14,
jitter=1,
episode_limit=150,
n_extra_tags=0,
map_name="empty_passive",
map_type='stalkers_and_zealots'),
"5-11sz_symmetric": partial(symmetric_armies,
[(('Stalker', 'Zealot'), (5, 11))],
ally_centered=False,
rotate=True,
separation=14,
jitter=1,
episode_limit=150,
n_extra_tags=0,
map_name="empty_passive",
map_type='stalkers_and_zealots'),
"3-8MMM_symmetric": partial(symmetric_armies,
[(('Marine', 'Marauder'), (3, 6)), (('Medivac',), (0, 2))],
ally_centered=False,
rotate=True,
separation=14,
jitter=1,
episode_limit=150,
n_extra_tags=0,
map_name="empty_passive",
map_type='MMM'),
"5-11MMM_symmetric": partial(symmetric_armies,
[(('Marine', 'Marauder'), (5, 8)), (('Medivac',), (0, 3))],
ally_centered=False,
rotate=True,
separation=14,
jitter=1,
episode_limit=150,
n_extra_tags=0,
map_name="empty_passive",
map_type='MMM'),
"3-8csz_symmetric": partial(symmetric_armies,
[(('Stalker', 'Zealot'), (3, 6)), (('Colossus',), (0, 2))],
ally_centered=False,
rotate=True,
separation=14,
jitter=1,
episode_limit=150,
n_extra_tags=0,
map_name="empty_passive",
map_type='colossus_stalkers_zealots'),
"5-11csz_symmetric": partial(symmetric_armies,
[(('Stalker', 'Zealot'), (5, 8)), (('Colossus',), (0, 3))],
ally_centered=False,
rotate=True,
separation=14,
jitter=1,
episode_limit=150,
n_extra_tags=0,
map_name="empty_passive",
map_type='colossus_stalkers_zealots'),
}
def get_mt_scenario_registry():
return mt_scenario_registry
| StarcoderdataPython |
81443 | <gh_stars>10-100
import os
from django.conf import settings
from pyplan.pyplan.common.baseService import BaseService
from pyplan.pyplan.usercompanies.models import UserCompany
from .models import Activity, ActivityType
class ActivityService(BaseService):
def registerOpenModel(self, file_path):
norm_file_path = os.path.normpath(file_path)
activity = Activity.objects.update_or_create(
type=ActivityType.MODEL,
usercompany_id=self.client_session.userCompanyId,
model_path=norm_file_path,
model_name=norm_file_path[norm_file_path.rfind(os.path.sep)+1:],
)
# Delete entry 6 and beyond
objects_to_remove = Activity.objects.filter(
type=ActivityType.MODEL,
usercompany_id=self.client_session.userCompanyId,
).order_by('-updated_at')[5:]
Activity.objects.filter(pk__in=objects_to_remove).delete()
return activity
def registerOpenDashboard(self, dashboard):
activity = Activity.objects.update_or_create(
type=ActivityType.DASHBOARD,
usercompany_id=self.client_session.userCompanyId,
model_path=self.client_session.modelInfo.uri,
model_name=self.client_session.modelInfo.name,
info={'dashboardId': dashboard.pk, 'dashboardName': dashboard.name}
)
# Delete entry 6 and beyond
objects_to_remove = Activity.objects.filter(
type=ActivityType.DASHBOARD,
usercompany_id=self.client_session.userCompanyId,
).order_by('-updated_at')[5:]
Activity.objects.filter(pk__in=objects_to_remove).delete()
return activity
def lastModels(self):
model_list = Activity.objects.filter(
type=ActivityType.MODEL,
usercompany_id=self.client_session.userCompanyId
).order_by('-updated_at')[:5]
return list(filter(lambda model: os.path.isfile(os.path.join(settings.MEDIA_ROOT, 'models', model.model_path)), model_list))
def lastDashboards(self):
return Activity.objects.filter(
type=ActivityType.DASHBOARD,
usercompany_id=self.client_session.userCompanyId
).order_by('-updated_at')[:5]
| StarcoderdataPython |
3344119 | <reponame>SAKERZ/Adafruit_Learning_System_Guides<filename>pi_radio/radio_lorawan.py
"""
Example for using the RFM9x Radio with Raspberry Pi and LoRaWAN
Learn Guide: https://learn.adafruit.com/lora-and-lorawan-for-raspberry-pi
Author: <NAME> for Adafruit Industries
"""
import threading
import time
import subprocess
import busio
from digitalio import DigitalInOut, Direction, Pull
import board
# Import thte SSD1306 module.
import adafruit_ssd1306
# Import Adafruit TinyLoRa
from adafruit_tinylora.adafruit_tinylora import TTN, TinyLoRa
# Button A
btnA = DigitalInOut(board.D5)
btnA.direction = Direction.INPUT
btnA.pull = Pull.UP
# Button B
btnB = DigitalInOut(board.D6)
btnB.direction = Direction.INPUT
btnB.pull = Pull.UP
# Button C
btnC = DigitalInOut(board.D12)
btnC.direction = Direction.INPUT
btnC.pull = Pull.UP
# Create the I2C interface.
i2c = busio.I2C(board.SCL, board.SDA)
# 128x32 OLED Display
display = adafruit_ssd1306.SSD1306_I2C(128, 32, i2c, addr=0x3c)
# Clear the display.
display.fill(0)
display.show()
width = display.width
height = display.height
# TinyLoRa Configuration
spi = busio.SPI(board.SCK, MOSI=board.MOSI, MISO=board.MISO)
cs = DigitalInOut(board.CE1)
irq = DigitalInOut(board.D22)
# TTN Device Address, 4 Bytes, MSB
devaddr = bytearray([0x00, 0x00, 0x00, 0x00])
# TTN Network Key, 16 Bytes, MSB
nwkey = bytearray([0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00])
# TTN Application Key, 16 Bytess, MSB
app = bytearray([0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00])
# Initialize ThingsNetwork configuration
ttn_config = TTN(devaddr, nwkey, app, country='US')
# Initialize lora object
lora = TinyLoRa(spi, cs, irq, ttn_config)
# 2b array to store sensor data
data_pkt = bytearray(2)
# time to delay periodic packet sends (in seconds)
data_pkt_delay = 5.0
def send_pi_data_periodic():
threading.Timer(data_pkt_delay, send_pi_data_periodic).start()
print("Sending periodic data...")
send_pi_data(CPU)
print('CPU:', CPU)
def send_pi_data(data):
# Encode float as int
data = int(data * 100)
# Encode payload as bytes
data_pkt[0] = (data >> 8) & 0xff
data_pkt[1] = data & 0xff
# Send data packet
lora.send_data(data_pkt, len(data_pkt), lora.frame_counter)
lora.frame_counter += 1
display.fill(0)
display.text('Sent Data to TTN!', 15, 15, 1)
print('Data sent!')
display.show()
time.sleep(0.5)
while True:
packet = None
# draw a box to clear the image
display.fill(0)
display.text('RasPi LoRaWAN', 35, 0, 1)
# read the raspberry pi cpu load
cmd = "top -bn1 | grep load | awk '{printf \"%.1f\", $(NF-2)}'"
CPU = subprocess.check_output(cmd, shell = True )
CPU = float(CPU)
if not btnA.value:
# Send Packet
send_pi_data(CPU)
if not btnB.value:
# Display CPU Load
display.fill(0)
display.text('CPU Load %', 45, 0, 1)
display.text(str(CPU), 60, 15, 1)
display.show()
time.sleep(0.1)
if not btnC.value:
display.fill(0)
display.text('* Periodic Mode *', 15, 0, 1)
display.show()
time.sleep(0.5)
send_pi_data_periodic()
display.show()
time.sleep(.1)
| StarcoderdataPython |
3380942 | <reponame>30ideas-Software-Factory/readIT
#!/usr/bin/python3
"""Create a Super Class called BaseModel with attributes and methods that
other classes will inherit."""
import models
from uuid import uuid4
from sqlalchemy import Column, String
from sqlalchemy.ext.declarative import declarative_base
# import engine
# from engine.dbStorage import DBStorage
Base = declarative_base()
class BaseModel:
"""Class that defines all common attributes/methods
for other classes will inherit"""
def __str__(self):
"""String representation of the BaseModel class"""
Id = 'Id' + self.__class__.__name__
return "[{:s}] ({:s}) {}".format(self.__class__.__name__,
eval('self.{}'.format(Id)),
self.__dict__)
def to_dict(self):
"""Returns a dictionary containing all keys/values of the instance"""
new_dict = self.__dict__.copy()
new_dict["Class"] = self.__class__.__name__
return new_dict
| StarcoderdataPython |
1729937 | # -*- coding: utf-8 -*-
import argparse
import nltk
from nltk.corpus import wordnet
from nltk.stem import WordNetLemmatizer
from tqdm import tqdm
def lemmatize(words):
lemmatizer = WordNetLemmatizer()
maps = {"J": wordnet.ADJ, "N": wordnet.NOUN, "V": wordnet.VERB, "R": wordnet.ADV}
return [lemmatizer.lemmatize(w, maps[p[0].upper()]) if p[0].upper() in maps else w for w, p in nltk.pos_tag(words)]
def build_roles(spans, length):
rels = [''] * length
for span in spans:
prd, start, end, label = span
if label == 'O':
continue
if label == '[prd]':
rels[prd-1] = '|'.join((rels[prd-1], '0:[prd]'))
continue
rels[start-1] = '|'.join((rels[start-1], f'{prd}:B-{label}'))
for i in range(start, end):
rels[i] = '|'.join((rels[i], f'{prd}:I-{label}'))
rels = [('_' if not label else label).lstrip('|') for label in rels]
return rels
def prop2conllu(lines):
words = [line.split()[0] for line in lines]
lemmas, pred_lemmas = [line.split()[1] for line in lines], lemmatize(words)
lemmas = [i if i != '-' else pred for i, pred in zip(lemmas, pred_lemmas)]
spans = []
if len(lines[0].split()) >= 2:
prds, *args = list(zip(*[line.split()[1:] for line in lines]))
prds = [i for i, p in enumerate(prds, 1) if p != '-']
for i, p in enumerate(prds):
spans.append((p, 1, len(words), '[prd]'))
starts, rels = zip(*[(j, a.split('*')[0].split('(')[1]) for j, a in enumerate(args[i], 1) if a.startswith('(')])
ends = [j for j, a in enumerate(args[i], 1) if a.endswith(')')]
for s, r, e in zip(starts, rels, ends):
if r == 'V':
continue
spans.append((p, s, e, r))
roles = build_roles(spans, len(words))
return ['\t'.join([str(i), word, lemma, '_', '_', '_', '_', '_', role, '_'])
for i, (word, lemma, role) in enumerate(zip(words, lemmas, roles), 1)]
def process(prop, file):
with open(prop) as f:
lines = [line.strip() for line in f]
i, start, sentences = 0, 0, []
for line in tqdm(lines):
if not line:
sentences.append(prop2conllu(lines[start:i]))
start = i + 1
i += 1
with open(file, 'w') as f:
for s in sentences:
f.write('\n'.join(s) + '\n\n')
if __name__ == "__main__":
parser = argparse.ArgumentParser(
description='Convert the file of prop format to conllu format.'
)
parser.add_argument('--prop', help='path to the prop file')
parser.add_argument('--file', help='path to the converted conllu file')
args = parser.parse_args()
process(args.prop, args.file)
| StarcoderdataPython |
1695970 | # Copyright 2018 Diamond Light Source Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
.. module:: tomobar_recon
:platform: Unix
:synopsis: A wrapper around TOmographic MOdel-BAsed Reconstruction (ToMoBAR) software \
for advanced iterative image reconstruction
.. moduleauthor:: <NAME> <<EMAIL>>
"""
from savu.plugins.reconstructions.base_recon import BaseRecon
from savu.data.plugin_list import CitationInformation
from savu.plugins.driver.gpu_plugin import GpuPlugin
import numpy as np
from tomobar.methodsIR import RecToolsIR
from savu.plugins.utils import register_plugin
from scipy import ndimage
@register_plugin
class TomobarRecon(BaseRecon, GpuPlugin):
"""
A Plugin to reconstruct full-field tomographic projection data using state-of-the-art regularised iterative algorithms from \
the ToMoBAR package. ToMoBAR includes FISTA and ADMM iterative methods and depends on the ASTRA toolbox and the CCPi RGL toolkit: \
https://github.com/vais-ral/CCPi-Regularisation-Toolkit.
:param output_size: Number of rows and columns in the \
reconstruction. Default: 'auto'.
:param data_fidelity: Data fidelity, chosoe Least Squares only at the moment. Default: 'LS'.
:param data_Huber_thresh: Threshold parameter for __Huber__ data fidelity . Default: None.
:param data_any_rings: a parameter to suppress various artifacts including rings and streaks. Default: None.
:param data_any_rings_winsizes: half window sizes to collect background information [detector, angles, num of projections]. Default: (9,7,0).
:param data_any_rings_power: a power parameter for Huber model. Default: 1.5.
:param data_full_ring_GH: Regularisation variable for full constant ring removal (GH model). Default: None.
:param data_full_ring_accelerator_GH: Acceleration constant for GH ring removal. Default: 10.0.
:param algorithm_iterations: Number of outer iterations for FISTA (default) or ADMM methods. Default: 20.
:param algorithm_verbose: print iterations number and other messages ('off' by default). Default: 'off'.
:param algorithm_ordersubsets: The number of ordered-subsets to accelerate reconstruction. Default: 6.
:param algorithm_nonnegativity: ENABLE or DISABLE nonnegativity constraint. Default: 'ENABLE'.
:param regularisation_method: To regularise choose methods ROF_TV, FGP_TV, PD_TV, SB_TV, LLT_ROF,\
NDF, TGV, NLTV, Diff4th. Default: 'FGP_TV'.
:param regularisation_parameter: Regularisation (smoothing) value, higher \
the value stronger the smoothing effect. Default: 0.00001.
:param regularisation_iterations: The number of regularisation iterations. Default: 80.
:param regularisation_device: The number of regularisation iterations. Default: 'gpu'.
:param regularisation_PD_lip: Primal-dual parameter for convergence. Default: 8.
:param regularisation_methodTV: 0/1 - TV specific isotropic/anisotropic choice. Default: 0.
:param regularisation_timestep: Time marching parameter, relevant for \
(ROF_TV, LLT_ROF, NDF, Diff4th) penalties. Default: 0.003.
:param regularisation_edge_thresh: Edge (noise) related parameter, relevant for NDF and Diff4th. Default: 0.01.
:param regularisation_parameter2: Regularisation (smoothing) value for LLT_ROF method. Default: 0.005.
:param regularisation_NDF_penalty: NDF specific penalty type Huber, Perona, Tukey. Default: 'Huber'.
"""
def __init__(self):
super(TomobarRecon, self).__init__("TomobarRecon")
def _shift(self, sinogram, centre_of_rotation):
centre_of_rotation_shift = (sinogram.shape[0]/2) - centre_of_rotation
result = ndimage.interpolation.shift(sinogram,
(centre_of_rotation_shift, 0))
return result
def pre_process(self):
# extract given parameters into dictionaries suitable for ToMoBAR input
self._data_ = {'OS_number' : self.parameters['algorithm_ordersubsets'],
'huber_threshold' : self.parameters['data_Huber_thresh'],
'ring_weights_threshold' : self.parameters['data_any_rings'],
'ring_tuple_halfsizes' : self.parameters['data_any_rings_winsizes'],
'ring_huber_power' : self.parameters['data_any_rings_power'],
'ringGH_lambda' : self.parameters['data_full_ring_GH'],
'ringGH_accelerate' : self.parameters['data_full_ring_accelerator_GH']}
self._algorithm_ = {'iterations' : self.parameters['algorithm_iterations'],
'nonnegativity' : self.parameters['algorithm_nonnegativity'],
'verbose' : self.parameters['algorithm_verbose']}
self._regularisation_ = {'method' : self.parameters['regularisation_method'],
'regul_param' : self.parameters['regularisation_parameter'],
'iterations' : self.parameters['regularisation_iterations'],
'device_regulariser' : self.parameters['regularisation_device'],
'edge_threhsold' : self.parameters['regularisation_edge_thresh'],
'time_marching_step' : self.parameters['regularisation_timestep'],
'regul_param2' : self.parameters['regularisation_parameter2'],
'PD_LipschitzConstant' : self.parameters['regularisation_PD_lip'],
'NDF_penalty' : self.parameters['regularisation_NDF_penalty'],
'methodTV' : self.parameters['regularisation_methodTV']}
def process_frames(self, data):
centre_of_rotations, angles, self.vol_shape, init = self.get_frame_params()
sinogram = data[0].astype(np.float32)
anglesTot, self.DetectorsDimH = np.shape(sinogram)
self.anglesRAD = np.deg2rad(angles.astype(np.float32))
self._data_.update({'projection_norm_data' : sinogram})
"""
# if one selects PWLS model and provides raw input data
if (self.parameters['data_fidelity'] == 'PWLS'):
rawdata = data[1].astype(np.float32)
rawdata /= np.max(rawdata)
self._data_.update({'projection_raw_data' : rawdata})
"""
# set parameters and initiate the ToMoBAR class object
self.Rectools = RecToolsIR(DetectorsDimH = self.DetectorsDimH, # DetectorsDimH # detector dimension (horizontal)
DetectorsDimV = None, # DetectorsDimV # detector dimension (vertical) for 3D case only
CenterRotOffset = None, # Center of Rotation (CoR) scalar (for 3D case only)
AnglesVec = self.anglesRAD, # array of angles in radians
ObjSize = self.vol_shape[0] , # a scalar to define the reconstructed object dimensions
datafidelity=self.parameters['data_fidelity'],# data fidelity, choose LS, PWLS
device_projector='gpu')
# Run FISTA reconstrucion algorithm here
recon = self.Rectools.FISTA(self._data_, self._algorithm_, self._regularisation_)
return recon
def get_max_frames(self):
return 'single'
def get_citation_information(self):
cite_info1 = CitationInformation()
cite_info1.name = 'citation1'
cite_info1.description = \
("First-order optimisation algorithm for linear inverse problems.")
cite_info1.bibtex = \
("@article{beck2009,\n" +
"title={A fast iterative shrinkage-thresholding algorithm for linear inverse problems},\n" +
"author={<NAME> Beck, Mark and Teboulle},\n" +
"journal={SIAM Journal on Imaging Sciences},\n" +
"volume={2},\n" +
"number={1},\n" +
"pages={183--202},\n" +
"year={2009},\n" +
"publisher={SIAM}\n" +
"}")
cite_info1.endnote = \
("%0 Journal Article\n" +
"%T A fast iterative shrinkage-thresholding algorithm for linear inverse problems\n" +
"%A Beck, Amir\n" +
"%A Teboulle, Mark\n" +
"%J SIAM Journal on Imaging Sciences\n" +
"%V 2\n" +
"%N 1\n" +
"%P 183--202\n" +
"%@ --\n" +
"%D 2009\n" +
"%I SIAM\n")
cite_info1.doi = "doi: "
return cite_info1
| StarcoderdataPython |
3306314 | <filename>bowtie/_compat.py
# -*- coding: utf-8 -*-
"""
python 2/3 compatability
"""
import inspect
import sys
from os import makedirs
IS_PY2 = sys.version_info < (3, 0)
if IS_PY2:
# pylint: disable=invalid-name
makedirs_lib = makedirs
# pylint: disable=function-redefined,missing-docstring
def makedirs(name, mode=0o777, exist_ok=False):
try:
makedirs_lib(name, mode=mode)
except OSError:
if not exist_ok:
raise
def numargs(func):
"""Gets number of arguments in python 3.
"""
return len(inspect.signature(func).parameters)
if IS_PY2:
# pylint: disable=function-redefined
def numargs(func):
"""Gets number of arguments in python 2.
"""
count = 0
# pylint: disable=deprecated-method
for args in inspect.getargspec(func)[:2]:
try:
count += len(args)
except TypeError:
pass
return count
| StarcoderdataPython |
165858 |
import torch
# tempo imports
from . import compute_cell_posterior
from . import utils
from . import cell_posterior
from . import objective_functions
class ClockGenePosterior(torch.nn.Module):
def __init__(self,gene_param_dict,gene_prior_dict,num_grid_points,clock_indices,use_nb=False,log_mean_log_disp_coef=None,min_amp=0,max_amp=2.5,use_clock_output_only=False):
super(ClockGenePosterior, self).__init__()
self.clock_indices = clock_indices
self.gene_param_dict = gene_param_dict
self.gene_prior_dict = gene_prior_dict
self.num_grid_points = num_grid_points
self.use_nb = use_nb
self.log_mean_log_disp_coef = log_mean_log_disp_coef
self.min_amp = min_amp
self.max_amp = max_amp
self.num_genes = self.gene_param_dict['mu_loc'].shape[0]
self.use_clock_output_only = use_clock_output_only
def compute_cell_phase_posterior_likelihood(self,gene_X,log_L,prior_theta_euclid_dist,num_gene_samples=5):
# --- SAMPLE THE GENE PARAMETERS ---
# ** get distribution dict **
distrib_dict = utils.init_distributions_from_param_dicts(gene_param_dict = self.gene_param_dict, max_amp = self.max_amp, min_amp = self.min_amp, prep = True)
# ** sample **
mu_sampled = distrib_dict['mu'].rsample((num_gene_samples,)) # [num_gene_samples x num_genes]
A_sampled = distrib_dict['A'].rsample((num_gene_samples,)) # [num_gene_samples x num_genes]
phi_euclid_sampled = distrib_dict['phi_euclid'].rsample((num_gene_samples,)) # [num_gene_samples x num_genes x 2]
phi_sampled = torch.atan2(phi_euclid_sampled[:,:,1],phi_euclid_sampled[:,:,0]) # [num_gene_samples x num_genes x 2]
Q_sampled = utils.get_is_cycler_samples_from_dist(distrib_dict['Q_prob'],num_gene_samples=num_gene_samples,rsample=True)
# --- COMPUTE CELL POSTERIOR ---
theta_posterior_likelihood = compute_cell_posterior.compute_cell_posterior(gene_X = gene_X,
log_L = log_L,
num_grid_points = self.num_grid_points,
prior_theta_euclid_dist = prior_theta_euclid_dist, # self.prior_theta_euclid_dist
mu_sampled = mu_sampled,
A_sampled = A_sampled,
phi_sampled = phi_sampled,
Q_sampled = Q_sampled,
B_sampled = None,
use_nb = self.use_nb,
log_mean_log_disp_coef = self.log_mean_log_disp_coef)
return theta_posterior_likelihood
def get_clock_gene_param_dict(self):
clock_gene_param_dict = {}
for key in self.gene_param_dict:
if key == 'phi_euclid_loc':
clock_gene_param_dict[key] = self.gene_param_dict['phi_euclid_loc'][self.clock_indices,:]
else:
clock_gene_param_dict[key] = self.gene_param_dict[key][self.clock_indices]
return clock_gene_param_dict
def compute_loss(self,gene_X,log_L,prior_theta_euclid_dist,num_cell_samples,num_gene_samples):
# --- COMPUTE THE CELL POSTERIOR DISTRIBUTION ---
theta_posterior_likelihood = self.compute_cell_phase_posterior_likelihood(gene_X,log_L,prior_theta_euclid_dist,num_gene_samples)
# --- SAMPLE THE CELL PHASE POSTERIOR ---
theta_dist = cell_posterior.ThetaPosteriorDist(theta_posterior_likelihood)
theta_sampled = theta_dist.rsample(num_cell_samples)
# --- GET THE DISTRIB DICT AND CLOCK LOC SCALE DICT ---
# ** get distribution dict **
# input gene distrib dict
input_distrib_dict = utils.init_distributions_from_param_dicts(gene_param_dict = self.gene_param_dict, gene_prior_dict = self.gene_prior_dict, max_amp = self.max_amp, min_amp = self.min_amp)
# output gene distrib dict
if self.use_clock_output_only:
output_distrib_dict = utils.init_distributions_from_param_dicts(gene_param_dict = self.get_clock_gene_param_dict(), gene_prior_dict = self.gene_prior_dict, max_amp = self.max_amp, min_amp = self.min_amp)
else:
output_distrib_dict = input_distrib_dict
# --- COMPUTE THE EXPECTATION LOG LIKELIHOOD OF THE CYCLING GENES ---
# subset gene_X and distrib_dict to core clock genes only if need to
if self.use_clock_output_only:
gene_X = gene_X[:,self.clock_indices]
# ** compute gene LL in each cell over all samples **
cycler_log_likelihood_sampled = objective_functions.compute_sample_log_likelihood(gene_X, log_L,
theta_sampled = theta_sampled,
mu_dist = output_distrib_dict['mu'], A_dist = output_distrib_dict['A'], phi_euclid_dist = output_distrib_dict['phi_euclid'], Q_prob_dist = output_distrib_dict['Q_prob'],
num_gene_samples = num_gene_samples, use_flat_model = False,
use_nb = self.use_nb, log_mean_log_disp_coef = self.log_mean_log_disp_coef, rsample = True,
use_is_cycler_indicators = output_distrib_dict['Q_prob'] is not None)
# ** compute the MC expectations **
# cycler
cycler_mc_lls = torch.sum(torch.sum(cycler_log_likelihood_sampled,dim=0),dim=0).flatten()
cycler_gene_expectation_log_likelihood = torch.mean(cycler_mc_lls)
# clock
clock_mc_lls = torch.sum(torch.sum(cycler_log_likelihood_sampled[self.clock_indices,:,:,:],dim=0),dim=0).flatten()
clock_gene_expectation_log_likelihood = torch.mean(clock_mc_lls)
# --- COMPUTE THE KL OF THE CORE CLOCK GENES AND THE DE NOVO CYCLERS ---
# ** get variational and prior dist lists **
variational_dist_list = [input_distrib_dict['mu'],input_distrib_dict['A'],input_distrib_dict['phi_euclid']]
prior_dist_list = [input_distrib_dict['prior_mu'],input_distrib_dict['prior_A'],input_distrib_dict['prior_phi_euclid']]
if 'Q_prob' in input_distrib_dict and 'prior_Q_prob' in input_distrib_dict:
variational_dist_list += [input_distrib_dict['Q_prob']]
prior_dist_list += [input_distrib_dict['prior_Q_prob']]
# ** compute the divegence **
clock_and_de_novo_cycler_kl = objective_functions.compute_divergence(variational_dist_list = variational_dist_list,
prior_dist_list = prior_dist_list)
# # --- COMPUTE ELBO ---
kl_loss = torch.sum(clock_and_de_novo_cycler_kl)
if self.use_clock_output_only:
ll_loss = clock_gene_expectation_log_likelihood
else:
ll_loss = cycler_gene_expectation_log_likelihood
elbo_loss = kl_loss - ll_loss
return elbo_loss, ll_loss, kl_loss
| StarcoderdataPython |
4801683 | <gh_stars>0
import transformice
import discord_bot
import asyncio
# from signal import signal, SIGPIPE, SIG_DFL
# signal(SIGPIPE,SIG_DFL)
if __name__ == '__main__':
loop = asyncio.get_event_loop()
discord = discord_bot.setup(loop)
mapper = transformice.setup(loop)
mapper.discord = discord
discord.mapper = mapper
try:
loop.run_forever()
except KeyboardInterrupt:
transformice.stop(loop, mapper)
discord_bot.stop(loop, discord) | StarcoderdataPython |
14020 | # Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Contains NcfModelRunner, which can train and evaluate an NCF model."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from collections import namedtuple
import os
import time
import tensorflow as tf
from tensorflow.contrib.compiler import xla
from official.recommendation import data_preprocessing
from official.recommendation import neumf_model
class NcfModelRunner(object):
"""Creates a graph to train/evaluate an NCF model, and runs it.
This class builds both a training model and evaluation model in the graph.
The two models share variables, so that during evaluation, the trained
variables are used.
"""
# _TrainModelProperties and _EvalModelProperties store useful properties of
# the training and evaluation models, respectively.
# _SHARED_MODEL_PROPERTY_FIELDS is their shared fields.
_SHARED_MODEL_PROPERTY_FIELDS = (
# A scalar tf.string placeholder tensor, that will be fed the path to the
# directory storing the TFRecord files for the input data.
"record_files_placeholder",
# The tf.data.Iterator to iterate over the input data.
"iterator",
# A scalar float tensor representing the model loss.
"loss",
# The batch size, as a Python int.
"batch_size",
# The op to run the model. For the training model, this trains the model
# for one step. For the evaluation model, this computes the metrics and
# updates the metric variables.
"run_model_op")
_TrainModelProperties = namedtuple("_TrainModelProperties", # pylint: disable=invalid-name
_SHARED_MODEL_PROPERTY_FIELDS)
_EvalModelProperties = namedtuple( # pylint: disable=invalid-name
"_EvalModelProperties", _SHARED_MODEL_PROPERTY_FIELDS + (
# A dict from metric name to (metric, update_op) tuple.
"metrics",
# Initializes the metric variables.
"metric_initializer",))
def __init__(self, ncf_dataset, params):
with tf.Graph().as_default() as self._graph:
if params["use_xla_for_gpu"]:
# The XLA functions we use require resource variables.
tf.enable_resource_variables()
self._ncf_dataset = ncf_dataset
self._global_step = tf.train.create_global_step()
self._train_model_properties = self._build_model(params, is_training=True)
self._eval_model_properties = self._build_model(params, is_training=False)
initializer = tf.global_variables_initializer()
self._graph.finalize()
self._session = tf.Session(graph=self._graph)
self._session.run(initializer)
def _build_model(self, params, is_training):
"""Builds the NCF model.
Args:
params: A dict of hyperparameters.
is_training: If True, build the training model. If False, build the
evaluation model.
Returns:
A _TrainModelProperties if is_training is True, or an _EvalModelProperties
otherwise.
"""
record_files_placeholder = tf.placeholder(tf.string, ())
input_fn, _, _ = \
data_preprocessing.make_input_fn(
ncf_dataset=self._ncf_dataset, is_training=is_training,
record_files=record_files_placeholder)
dataset = input_fn(params)
iterator = dataset.make_initializable_iterator()
model_fn = neumf_model.neumf_model_fn
if params["use_xla_for_gpu"]:
model_fn = xla.estimator_model_fn(model_fn)
if is_training:
features, labels = iterator.get_next()
estimator_spec = model_fn(
features, labels, tf.estimator.ModeKeys.TRAIN, params)
with tf.control_dependencies([estimator_spec.train_op]):
run_model_op = self._global_step.assign_add(1)
return self._TrainModelProperties(
record_files_placeholder, iterator,
estimator_spec.loss, params["batch_size"], run_model_op)
else:
features = iterator.get_next()
estimator_spec = model_fn(
features, None, tf.estimator.ModeKeys.EVAL, params)
run_model_op = tf.group(*(update_op for _, update_op in
estimator_spec.eval_metric_ops.values()))
metric_initializer = tf.variables_initializer(
tf.get_collection(tf.GraphKeys.METRIC_VARIABLES))
return self._EvalModelProperties(
record_files_placeholder, iterator, estimator_spec.loss,
params["eval_batch_size"], run_model_op,
estimator_spec.eval_metric_ops, metric_initializer)
def _train_or_eval(self, model_properties, num_steps, is_training):
"""Either trains or evaluates, depending on whether `is_training` is True.
Args:
model_properties: _TrainModelProperties or an _EvalModelProperties
containing the properties of the training or evaluation graph.
num_steps: The number of steps to train or evaluate for.
is_training: If True, run the training model. If False, run the evaluation
model.
Returns:
record_dir: The directory of TFRecords where the training/evaluation input
data was read from.
"""
if self._ncf_dataset is not None:
epoch_metadata, record_dir, template = data_preprocessing.get_epoch_info(
is_training=is_training, ncf_dataset=self._ncf_dataset)
batch_count = epoch_metadata["batch_count"]
if batch_count != num_steps:
raise ValueError(
"Step counts do not match. ({} vs. {}) The async process is "
"producing incorrect shards.".format(batch_count, num_steps))
record_files = os.path.join(record_dir, template.format("*"))
initializer_feed_dict = {
model_properties.record_files_placeholder: record_files}
del batch_count
else:
initializer_feed_dict = None
record_dir = None
self._session.run(model_properties.iterator.initializer,
initializer_feed_dict)
fetches = (model_properties.loss, model_properties.run_model_op)
mode = "Train" if is_training else "Eval"
start = None
for i in range(num_steps):
loss, _, = self._session.run(fetches)
if i % 100 == 0:
if start is None:
# Only start the timer after 100 steps so there is a warmup.
start = time.time()
start_step = i
tf.logging.info("{} Loss = {}".format(mode, loss))
end = time.time()
if start is not None:
print("{} peformance: {} examples/sec".format(
mode, (i - start_step) * model_properties.batch_size / (end - start)))
return record_dir
def train(self, num_train_steps):
"""Trains the graph for a single cycle.
Args:
num_train_steps: The number of steps per cycle to train for.
"""
record_dir = self._train_or_eval(self._train_model_properties,
num_train_steps, is_training=True)
if record_dir:
# We delete the record_dir because each cycle, new TFRecords is generated
# by the async process.
tf.gfile.DeleteRecursively(record_dir)
def eval(self, num_eval_steps):
"""Evaluates the graph on the eval data.
Args:
num_eval_steps: The number of steps to evaluate for.
Returns:
A dict of evaluation results.
"""
self._session.run(self._eval_model_properties.metric_initializer)
self._train_or_eval(self._eval_model_properties, num_eval_steps,
is_training=False)
eval_results = {
'global_step': self._session.run(self._global_step)}
for key, (val, _) in self._eval_model_properties.metrics.items():
val_ = self._session.run(val)
tf.logging.info("{} = {}".format(key, self._session.run(val)))
eval_results[key] = val_
return eval_results
| StarcoderdataPython |
115711 | # -*- coding: utf-8 -*-
# Hikari Examples - A collection of examples for Hikari.
#
# To the extent possible under law, the author(s) have dedicated all copyright
# and related and neighboring rights to this software to the public domain worldwide.
# This software is distributed without any warranty.
#
# You should have received a copy of the CC0 Public Domain Dedication along with this software.
# If not, see <https://creativecommons.org/publicdomain/zero/1.0/>.
"""A simple bot to demonstrate how to use rillrate with hikari to make a web dashboard for the bot.
Just visit `http://localhost:6361/ui/` to explore your dashboard!
"""
import logging
import os
import rillrate
from rillrate import prime as rr_prime
import hikari
PREFIX = ","
# Name used to group dashboards.
# You could have multiple packages for different applications, such as a package for the bot
# dashboards, and another package for a web server running alongside the bot.
PACKAGE = "Rillrate Example"
# Dashboards are a part inside of package, they can be used to group different types of
# dashboards that you may want to use, like a dashboard for system status, another dashboard
# for cache status, and another one to configure features or trigger actions on the bot.
DASHBOARD = "Control Panel"
# These are menus inside the dashboard, you can use them to group specific sets
# of data inside the same dashboard.
GROUP_CONFIG = "1 - Example"
# All the 3 configurable namespaces are sorted alphabetically.
# Class with all our dashboard logic
class RillRateDashboard:
"""Global data shared across the entire bot, used to store dashboard values."""
__slots__ = ("logger", "value", "selector", "slider")
def __init__(self) -> None:
self.logger = logging.getLogger("dashboard")
self.value = 0
# Install rillrate - Spins up the rillrate service in a separate thread, making it non-blocking :)
rillrate.install()
# Register the dashboard objects
dummy_values = [str(i) for i in range(0, 256 + 1, 32)]
self.selector = rr_prime.Selector(
f"{PACKAGE}.{DASHBOARD}.{GROUP_CONFIG}.Selector", label="Choose!", options=dummy_values
)
self.slider = rr_prime.Slider(
f"{PACKAGE}.{DASHBOARD}.{GROUP_CONFIG}.Slider", label="More fine grain control", min=0, max=256, step=2
)
# Add sync callbacks - This way we tell rillrate what functions to call when a sync event occurs
self.selector.sync_callback(self._selector_callback)
self.slider.sync_callback(self._slider_callback)
def _selector_callback(self, activity: rillrate.Activity, action: rillrate.Action) -> None:
self.logger.info("Selector activity: %s | action = %s", activity, action)
if action is not None:
value = int(action.value)
self.logger.info("Selected: %s", value)
# Update the slider too, so they show the same value.
self.slider.apply(value)
# Overwrite the current stored value on the global data with the new selected value.
self.value = value
def _slider_callback(self, activity: rillrate.Activity, action: rillrate.Action) -> None:
self.logger.info("Slider activity: %s | action = %s", activity, action)
if action is not None:
value = int(action.value)
self.logger.info("Slided to: %s", value)
# Update the selector too, so they show the same value.
# It is important to note that since not all values are present in the selector, it might be empty sometimes
self.selector.apply(str(value))
# Overwrite the current stored value on the global data with the new selected value.
self.value = value
bot = hikari.GatewayBot(token=os.environ["BOT_TOKEN"])
dashboard = RillRateDashboard()
def is_command(cmd_name: str, content: str) -> bool:
"""Check if the message sent is a valid command."""
return content == f"{PREFIX}{cmd_name}"
@bot.listen()
async def message(event: hikari.GuildMessageCreateEvent) -> None:
"""Listen for messages being created."""
if not event.is_human or not event.content:
return
# Command Framework 101 :D
if event.content.startswith(PREFIX):
if is_command("ping", event.content):
await event.message.respond("Pong!")
elif is_command("value", event.content):
await event.message.respond(f"Current value: {dashboard.value}")
bot.run()
| StarcoderdataPython |
82371 | <reponame>truthiswill/intellij-community
class MyType(type):
def __instancecheck__(self, instance):
<selection>return super(MyType, self).__instancecheck__(instance)</selection>
| StarcoderdataPython |
170299 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import datetime
def time_in_range(start, end, x):
"""
Return true if x is in the range [start, end]
"""
if start <= end:
return start <= x <= end
else:
return start <= x or x <= end
def formated_date(timestamp):
return datetime.datetime.fromtimestamp(int(timestamp)).strftime("%Y-%m-%d %H:%M:%S")
def full_formated_date(timestamp):
return datetime.datetime.fromtimestamp(int(timestamp)).strftime("%Y-%m-%d %H:%M:%S.%f")
def short_formated_date(timestamp):
return datetime.datetime.fromtimestamp(int(timestamp)).strftime("%Y-%m-%d")
def time_now():
return int(datetime.datetime.utcnow().strftime("%s"))
def ts_to_date(ts):
return datetime.datetime.fromtimestamp(int(ts))
def date_to_ts(dt):
return int(dt.strftime("%s"))
def ymd_to_date(year, month, day):
return datetime.datetime(year, month, day)
def strf_to_date(str_formated_date):
return datetime.datetime.strptime(str_formated_date, '%Y-%m-%d %H:%M:%S')
def strff_to_date(str_formated_date):
return datetime.datetime.strptime(str_formated_date, '%Y-%m-%d %H:%M:%S.%f')
def strsf_to_date(str_short_formated_date):
return datetime.datetime.strptime(str_short_formated_date, '%Y-%m-%d')
def epoch():
return ymd_to_date(1970, 1, 1)
EPOCH = epoch()
| StarcoderdataPython |
3292080 | <filename>arl/skycomponent/operations.py
"""Function to manage skycomponents.
"""
import numpy
from typing import Union, List
import collections
from astropy.coordinates import SkyCoord
from astropy.wcs.utils import skycoord_to_pixel, pixel_to_skycoord
from arl.data.data_models import Image, Skycomponent, assert_same_chan_pol
from arl.data.polarisation import PolarisationFrame
from astropy.convolution import Gaussian2DKernel, Box2DKernel
from astropy.stats import gaussian_fwhm_to_sigma
import astropy.units as u
from photutils import segmentation
import logging
log = logging.getLogger(__name__)
def create_skycomponent(direction: SkyCoord, flux: numpy.array, frequency: numpy.array, shape: str = 'Point',
polarisation_frame=PolarisationFrame("stokesIQUV"), param: dict=None, name: str = '')\
-> Skycomponent:
""" A single Skycomponent with direction, flux, shape, and params for the shape
:param param:
:param direction:
:param flux:
:param frequency:
:param shape: 'Point' or 'Gaussian'
:param name:
:return: Skycomponent
"""
return Skycomponent(
direction = direction,
frequency = frequency,
name = name,
flux = numpy.array(flux),
shape = shape,
params = param,
polarisation_frame = polarisation_frame
)
def find_nearest_component(home, comps) -> Skycomponent:
""" Find nearest component to a given direction
:param home: Home direction
:param comps: list of skycomponents
:return: nearest component
"""
sep = 2 * numpy.pi
best = None
for comp in comps:
thissep = comp.direction.separation(home).rad
if thissep < sep:
sep = thissep
best = comp
return best
def find_skycomponents(im: Image, fwhm=1.0, threshold=10.0, npixels=5) -> List[Skycomponent]:
""" Find gaussian components in Image above a certain threshold as Skycomponent
:param fwhm: Full width half maximum of gaussian
:param threshold: Threshold for component detection. Default: 10 standard deviations over median.
:param im: Image to be searched
:param params:
:return: list of sky components
"""
assert type(im) == Image
log.info("find_skycomponents: Finding components in Image by segmentation")
# We use photutils segmentation - this first segments the image
# into pieces that are thought to contain individual sources, then
# identifies the concrete source properties. Having these two
# steps makes it straightforward to extract polarisation and
# spectral information.
# Make filter kernel
sigma = fwhm * gaussian_fwhm_to_sigma
kernel = Gaussian2DKernel(sigma, x_size=int(1.5*fwhm), y_size=int(1.5*fwhm))
kernel.normalize()
# Segment the average over all channels of Stokes I
image_sum = numpy.sum(im.data, axis=(0))[0,...]/float(im.shape[0])
segments = segmentation.detect_sources(image_sum, threshold, npixels=npixels, filter_kernel=kernel)
log.info("find_skycomponents: Identified %d segments" % segments.nlabels)
# Now get source properties for all polarisations and frequencies
comp_tbl = [ [ segmentation.source_properties(im.data[chan, pol], segments,
filter_kernel=kernel, wcs=im.wcs)
for pol in [0] ]
for chan in range(im.nchan) ]
def comp_prop(comp,prop_name):
return [ [ comp_tbl[chan][pol][comp][prop_name]
for pol in [0] ]
for chan in range(im.nchan) ]
# Generate components
comps = []
for segment in range(segments.nlabels):
# Get flux and position. Astropy's quantities make this
# unnecessarily complicated.
flux = numpy.array(comp_prop(segment, "max_value"))
# These values seem inconsistent with the xcentroid, and ycentroid values
# ras = u.Quantity(list(map(u.Quantity,
# comp_prop(segment, "ra_icrs_centroid"))))
# decs = u.Quantity(list(map(u.Quantity,
# comp_prop(segment, "dec_icrs_centroid"))))
xs = u.Quantity(list(map(u.Quantity,
comp_prop(segment, "xcentroid"))))
ys = u.Quantity(list(map(u.Quantity,
comp_prop(segment, "ycentroid"))))
sc = pixel_to_skycoord(xs, ys, im.wcs, 1)
ras = sc.ra
decs = sc.dec
# Remove NaNs from RA/DEC (happens if there is no flux in that
# polarsiation/channel)
# ras[numpy.isnan(ras)] = 0.0
# decs[numpy.isnan(decs)] = 0.0
# Determine "true" position by weighting
flux_sum = numpy.sum(flux)
ra = numpy.sum(flux * ras) / flux_sum
dec = numpy.sum(flux * decs) / flux_sum
xs = numpy.sum(flux * xs) / flux_sum
ys = numpy.sum(flux * ys) / flux_sum
point_flux = im.data[:,:,numpy.round(ys.value).astype('int'),numpy.round(xs.value).astype('int')]
# Add component
comps.append(Skycomponent(
direction = SkyCoord(ra=ra, dec=dec),
frequency = im.frequency,
name = "Segment %d" % segment,
flux = point_flux,
shape = 'Point',
polarisation_frame=im.polarisation_frame,
params = {'xpixel':xs, 'ypixel':ys, 'sum_flux':flux} # Table has lots of data, could add more in
# future
))
return comps
def apply_beam_to_skycomponent(sc: Union[Skycomponent, List[Skycomponent]], beam: Image) \
-> Union[Skycomponent, List[Skycomponent]]:
""" Insert a Skycomponet into an image
:param beam:
:param sc: SkyComponent or list of SkyComponents
:return: List of skycomponents
"""
assert type(beam) == Image
single = not isinstance(sc, collections.Iterable)
if single:
sc = [sc]
nchan, npol, ny, nx = beam.shape
log.debug('apply_beam_to_skycomponent: Processing %d components' % (len(sc)))
newsc = []
total_flux = numpy.zeros([nchan, npol])
for comp in sc:
assert comp.shape == 'Point', "Cannot handle shape %s" % comp.shape
assert_same_chan_pol(beam, comp)
pixloc = skycoord_to_pixel(comp.direction, beam.wcs, 0, 'wcs')
if not numpy.isnan(pixloc).any():
x, y = int(round(float(pixloc[0]))), int(round(float(pixloc[1])))
if x >= 0 and x < nx and y >= 0 and y < ny:
comp.flux[:, :] *= beam.data[:, :, y, x]
total_flux += comp.flux
newsc.append(Skycomponent(comp.direction, comp.frequency, comp.name, comp.flux,
shape=comp.shape,
polarisation_frame=comp.polarisation_frame))
log.debug('apply_beam_to_skycomponent: %d components with total flux %s' %
(len(newsc), total_flux))
if single:
return newsc[0]
else:
return newsc
def insert_skycomponent(im: Image, sc: Union[Skycomponent, List[Skycomponent]], insert_method='Nearest',
bandwidth=1.0, support=8) -> Image:
""" Insert a Skycomponent into an image
:param params:
:param im:
:param sc: SkyComponent or list of SkyComponents
:param insert_method: '' | 'Sinc' | 'Lanczos'
:param bandwidth: Fractional of uv plane to optimise over (1.0)
:param support: Support of kernel (7)
:return: image
"""
assert type(im) == Image
support=int(support/bandwidth)
nchan, npol, ny, nx = im.data.shape
if not isinstance(sc, collections.Iterable):
sc = [sc]
log.debug("insert_skycomponent: Using insert method %s" % insert_method)
for comp in sc:
assert comp.shape == 'Point', "Cannot handle shape %s" % comp.shape
assert_same_chan_pol(im, comp)
pixloc = skycoord_to_pixel(comp.direction, im.wcs, 1, 'wcs')
if insert_method == "Lanczos":
insert_array(im.data, pixloc[0], pixloc[1], comp.flux, bandwidth, support,
insert_function=insert_function_L)
elif insert_method == "Sinc":
insert_array(im.data, pixloc[0], pixloc[1], comp.flux, bandwidth, support,
insert_function=insert_function_sinc)
elif insert_method == "PSWF":
insert_array(im.data, pixloc[0], pixloc[1], comp.flux, bandwidth, support,
insert_function=insert_function_pswf)
else:
insert_method = 'Nearest'
y, x= numpy.round(pixloc[1]).astype('int'), numpy.round(pixloc[0]).astype('int')
if x >= 0 and x < nx and y >= 0 and y < ny:
im.data[:, :, y, x] += comp.flux
return im
def insert_function_sinc(x):
s = numpy.zeros_like(x)
s[x != 0.0] = numpy.sin(numpy.pi*x[x != 0.0])/(numpy.pi*x[x != 0.0])
return s
def insert_function_L(x, a = 5):
L = insert_function_sinc(x) * insert_function_sinc(x/a)
return L
def insert_function_pswf(x, a=5):
from arl.fourier_transforms.convolutional_gridding import grdsf
return grdsf(abs(x)/a)[1]
def insert_array(im, x, y, flux, bandwidth=1.0, support = 7, insert_function=insert_function_L):
""" Insert point into image using specified function
:param im: Image
:param x: x in float pixels
:param y: y in float pixels
:param flux: Flux[nchan, npol]
:param bandwidth: Support of data in uv plane
:param support: Support of function in image space
:param insert_function: insert_function_L or insert_function_Sinc or insert_function_pswf
:return:
"""
nchan, npol, ny, nx = im.shape
intx = int(numpy.round(x))
inty = int(numpy.round(y))
fracx = x - intx
fracy = y - inty
gridx = numpy.arange(-support, support)
gridy = numpy.arange(-support, support)
insert = numpy.outer(insert_function(bandwidth*(gridy - fracy)),
insert_function(bandwidth*(gridx - fracx)))
insertsum = numpy.sum(insert)
assert insertsum > 0, "Sum of interpolation coefficients %g" % insertsum
insert = insert / insertsum
for chan in range(nchan):
for pol in range(npol):
im[chan, pol, inty - support:inty + support, intx - support:intx+support] += flux[chan, pol] * insert
# for iy in gridy:
# im[chan, pol, iy + inty, gridx + intx] += flux[chan,pol] * insert[iy, gridx + support]
return im
| StarcoderdataPython |
1638815 | <filename>day2/homework/q3_b.py
a=int(input('Enter value of a: '))
b=int(input('Enter value of b: '))
a=a+b
b=a-b
a=a-b
print("After swapping the values are: a = {} b = {} ".format(a,b))
| StarcoderdataPython |
1741135 | <reponame>ttppss/simple-faster-rcnn-pytorch<filename>data/coco_generator.py<gh_stars>0
def coco_generator(image_path):
images = []
for im_path in image_path:
im = imageio.imread(im_path)
# image_nbr = re.findall(r"[0-9]+", im_path)
im_path = str(im_path)
image_nbr = im_path[(im_path.rfind('/') + 1): im_path.rfind('.')]
last_slash_pos = im_path.rfind('/')
without_last_slash = im_path[:last_slash_pos]
second_last_slash_pos = without_last_slash.rfind('/')
without_second_last_slash = im_path[:second_last_slash_pos]
third_last_slash_pos = without_second_last_slash.rfind('/')
file_name = im_path[(third_last_slash_pos + 1):]
image_info = {
"coco_url": "",
"date_captured": "",
"flickr_url": "",
"license": 0,
"id": image_nbr,
"file_name": file_name,
"height": im.shape[0],
"width": im.shape[1]
}
images.append(image_info)
ground_truth_binary_mask = np.array(im)
ground_truth_binary_mask_1 = ground_truth_binary_mask.copy()
# replace 255 with 1 in the data
ground_truth_binary_mask_1[ground_truth_binary_mask_1 > 1] = 1
fortran_ground_truth_binary_mask = np.asfortranarray(ground_truth_binary_mask_1)
encoded_ground_truth = mask.encode(fortran_ground_truth_binary_mask)
ground_truth_area = mask.area(encoded_ground_truth)
ground_truth_bounding_box = mask.toBbox(encoded_ground_truth)
# print(image_nbr, ground_truth_binary_mask_1.shape)
contours = measure.find_contours(ground_truth_binary_mask_1, 0.5)
cont = []
for contour in contours:
contour = np.flip(contour, axis=1)
cont.append(contour)
# get the largest x and y coordinate, and then create bbox.
x_list = []
y_list = []
for i in range(len(cont[0])):
x_list.append(cont[0][i][0])
y_list.append(cont[0][i][1])
x_min = min(x_list)
x_max = max(x_list)
y_min = min(y_list)
y_max = max(y_list)
bbox = [x_min, y_min, x_max - x_min, y_max - y_min]
# put everything to a single json file.
# parent_dir = os.path.dirname(path)
file_name_with_extention = os.path.basename(im_path)
image_nbr_for_anno = file_name_with_extention.split('.')[0]
anno = {
"category_id": 1,
"id": 1,
"image_id": image_nbr_for_anno,
"iscrowd": 0,
"segmentation": [],
"area": ground_truth_area.tolist(),
"bbox": bbox,
}
for contour in contours:
contour = np.flip(contour, axis=1)
segmentation = contour.ravel().tolist()
anno["segmentation"].append(segmentation)
annotation.append(anno)
coco = {"categories": [
{
"id": 1,
"name": "polyp",
"supercategory": ""
},
{
"id": 2,
"name": "instrument",
"supercategory": ""
}
],
"images": images,
"annotations": annotation}
return coco
print(json.dumps(coco, indent=4))
| StarcoderdataPython |
4821139 | <gh_stars>1000+
"""
Mini commands - Provides a template for writing quick
command classes in Python using the subprocess module.
Author: <NAME> <<EMAIL>>
"""
import os
import time
from subprocess import *
class CmdProcessor(object):
""" Class providing useful functions to execute system
commands using subprocess module """
def execute_command_in_shell(self, command,args=[]):
""" Execute a shell command
Parameters:
command - The command to execute
args - Command arguments, as a list
"""
execfn = ' '.join([command] + list(args))
try:
p = Popen(execfn, env=os.environ, shell=True)
p.wait()
return p.returncode
except Exception,e:
print e
return -1
def execute_command(self, command, args=[]):
""" Execute a command
Parameters:
command - The command to execute
args - Command arguments, as a list
"""
execfn = [command] + list(args)
try:
p = Popen(execfn, env=os.environ)
p.wait()
return p.returncode
except Exception,e:
print e
return -1
def execute_command_in_pipe(self, command, args=[], estdin=None, estdout=None):
""" Execute a command by reading/writing input/output from/to optional
streams like a pipe. After completion, return status """
execfn = [command] + list(args)
try:
in_stream = False
out_stream = False
# Check if this is a stream
if hasattr(estdin, 'read'):
fpin = estdin
in_stream = True
elif type(estdin) in (str, unicode):
fpin = open(estdin, 'r')
in_stream = True
if hasattr(estdout, 'write'):
fpout = estdout
out_stream = True
elif type(estdout) in (str, unicode):
fpout = open(estdout, 'w')
out_stream = True
if in_stream and out_stream:
p = Popen(execfn, stdin=fpin, stdout=fpout, stderr=PIPE)
elif in_stream and not out_stream:
p = Popen(execfn, stdin=fpin, stdout=PIPE, stderr=PIPE)
elif not in_stream and out_stream:
p = Popen(execfn, stdin=PIPE, stdout=fpout, stderr=PIPE)
elif not in_stream and not out_stream:
p = Popen(execfn, stdin=PIPE, stdout=PIPE, stderr=PIPE)
return p.wait()
except Exception,e:
print str(e)
return -1
class MiniCommand(object):
""" Base class for mini-commands """
# This is the original command executed by the class
command = None
# Any prefix arguments which will be used by all
# sub-classes of this class
prefix_args = []
# A command template string which can be used
# to define the skeleton of a command.
template = ''
# The base function which can be overridden
func = 'execute_cmd'
cmdproc = CmdProcessor()
def __init__(self, command=None, prefix_args=[], template=''):
if command:
self.command = command
if prefix_args:
self.prefix_args = prefix_args
if template:
self.template = template
self.call_func = getattr(self, self.func)
def __call__(self, *args, **kwargs):
args = self.prefix_args + list(args)
if self.template:
args = self.template % tuple(args)
# args = args.split()
print 'ARGS=>',args
for item in args:
if item.find('=') != -1:
args.remove(item)
name, value = item.split('=')
kwargs[name] = value
return self.call_func(*args, **kwargs)
def execute_cmd(cls, *args, **kwargs):
return cls.cmdproc.execute_command(cls.command, args, **kwargs)
def execute_shell_cmd(cls, *args, **kwargs):
return cls.cmdproc.execute_command_in_shell(cls.command, args, **kwargs)
def execute_cmd_in_pipe(cls, *args, **kwargs):
return cls.cmdproc.execute_command_in_pipe(cls.command, args, **kwargs)
execute_cmd = classmethod(execute_cmd)
execute_shell_cmd = classmethod(execute_shell_cmd)
execute_cmd_in_pipe = classmethod(execute_cmd_in_pipe)
# Simple example : ls command
class ListDirCmd(MiniCommand):
""" This is a sample command added to display functionality """
if os.name == 'posix':
command = 'ls'
elif os.name == 'nt':
command = 'dir'
func = 'execute_shell_cmd'
class DirTreeCmd(MiniCommand):
if os.name == 'nt':
command = 'tree.com'
class DeltreeCmd(MiniCommand):
""" Command to remove a directory tree """
if os.name == 'posix':
command = 'rm'
prefix_args = ['-rf']
elif os.name == 'nt':
command = 'rmdir'
prefix_args = ['/S','/Q']
func = 'execute_shell_cmd'
class IPConfigCmd(MiniCommand):
command = "ipconfig"
class PythonCmd(MiniCommand):
command = 'python'
# Java key-tool command
class JavaKeytoolCommand(MiniCommand):
""" Class encapsulating java key-tool command """
command = 'keytool'
class SampleKeystoreGenCmd(JavaKeytoolCommand):
""" Generate sample key store using key-tool """
func = 'execute_cmd_in_pipe'
template = '-genkey -keystore %s -keyalg RSA -alias %s -trustcacerts estdin=%s'
if __name__ == '__main__':
# example: ls command
lsinst = ListDirCmd()
lsinst()
lsinst('-al')
cmd = IPConfigCmd()
cmd("/all")
cmd = PythonCmd()
cmd()
try:
os.makedirs("/tmp/abcd")
os.makedirs("/tmp/abcd2")
except os.error, e:
pass
cmd = DeltreeCmd()
if os.path.isdir('/tmp/abcd'):
print cmd('/tmp/abcd')
if os.path.isdir('/tmp/abcd2'):
print cmd('/tmp/abcd2')
| StarcoderdataPython |
1629941 | import argparse
import googlemaps
import carpool_data as cd
import numpy as np
if __name__ == '__main__':
parser = argparse.ArgumentParser(description="Get Distance Matrix from Coordinates")
parser.add_argument('--api_key', default='')
parser.add_argument('--coords_file', default='map_data/carpool_map_coordinates_test.csv')
parser.add_argument('--mode', default='driving')
parser.add_argument('--units', default='metric')
parser.add_argument('--language', default='en')
args = vars(parser.parse_args())
# load location coordinates into array
points = cd.load_coordinates(args['coords_file'])
points = np.asarray(points)
num_points = len(points)
limit = num_points // 100 * 100
remainder = num_points % 100
gmaps = googlemaps.Client(key=args['api_key'])
distances = []
durations = []
for i in range(num_points):
distances.append([])
durations.append([])
j = 0
incr = 100
while j < num_points:
if j >= limit:
incr = remainder
dist_mat_rows = gmaps.distance_matrix(points[i:i + 1, :], points[j:j + incr, :],
mode=args['mode'],
units=args['units'],
language=args['language'])['rows']
for element in dist_mat_rows[0]['elements']:
distances[i].append(element['distance']['value'])
durations[i].append(element['duration']['value'])
j += incr
distance_matrix = np.asarray(distances, dtype=np.uint32)
duration_matrix = np.asarray(durations, dtype=np.uint32)
np.savetxt('map_data/distance_matrix_test.csv', distance_matrix, fmt='%d', delimiter=',', newline='\n')
np.savetxt('map_data/duration_matrix_test.csv', duration_matrix, fmt='%d', delimiter=',', newline='\n')
| StarcoderdataPython |
1708810 | # Custom template context processors
from publisher.config import SITE_CONFIG
def site_config_processor(request):
"""Context processor to make SITE_CONFIG available to all templates."""
return {'SITE_CONFIG': SITE_CONFIG}
| StarcoderdataPython |
48588 | import socket
import xmlrpc.client
""" referemce: https://stackoverflow.com/a/14397619 """
class ServerProxy:
def __init__(self, url, timeout=10):
self.__url = url
self.__timeout = timeout
self.__prevDefaultTimeout = None
def __enter__(self):
try:
if self.__timeout:
self.__prevDefaultTimeout = socket.getdefaulttimeout()
socket.setdefaulttimeout(self.__timeout)
proxy = xmlrpc.client.ServerProxy(self.__url, allow_none=True)
except Exception as ex:
raise Exception("Unable create XMLRPC-proxy for url '%s': %s" % (self.__url, ex))
return proxy
def __exit__(self, type, value, traceback):
if self.__prevDefaultTimeout is None:
socket.setdefaulttimeout(self.__prevDefaultTimeout)
| StarcoderdataPython |
178347 | # Generated by Django 3.0.6 on 2020-05-07 11:56
import datetime
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Challenge',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('time', models.IntegerField()),
('pub_date', models.DateTimeField(blank=True, default=datetime.datetime.now)),
],
),
migrations.CreateModel(
name='Location',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=32)),
('lat', models.FloatField()),
('long', models.FloatField()),
('pub_date', models.DateTimeField(blank=True, default=datetime.datetime.now)),
],
),
migrations.CreateModel(
name='Game',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=32, unique=True)),
('locations', models.ManyToManyField(to='whereami.Location')),
],
),
migrations.CreateModel(
name='ChallengeLocation',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('challenge', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='whereami.Challenge')),
('location', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='whereami.Location')),
],
),
migrations.AddField(
model_name='challenge',
name='game',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='whereami.Game'),
),
migrations.CreateModel(
name='Guess',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('lat', models.FloatField()),
('long', models.FloatField()),
('score', models.IntegerField()),
('distance', models.IntegerField()),
('pub_date', models.DateTimeField(blank=True, default=datetime.datetime.now)),
('challenge_location', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='whereami.ChallengeLocation')),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
options={
'unique_together': {('user', 'challenge_location')},
},
),
]
| StarcoderdataPython |
1620706 | from ._client import SlackClient
| StarcoderdataPython |
1606513 | from django.urls import path
from movie_api import views
app_name = 'api'
urlpatterns = [
path('movies/', views.MovieApiView.as_view(), name='movies'),
path('movies/<int:pk>/', views.MovieApiView.as_view()),
path('comments/', views.CommentList.as_view(), name='comments'),
path('comments/<int:pk>/',
views.CommentDetail.as_view(),
name='comment'),
path('top/', views.Top.as_view(), name='top'),
]
| StarcoderdataPython |
143486 | import random
from random import randint
import networkx as nx
import math
import matplotlib.pyplot as plt
import Evaluation as eval
#This is local search heuristic Simulated Annealing.
def anneal_DS(old_solution, allocated_network_topology):
#This is algortihm 9 - Optimize DDS placement
#print('----------SA---------')
#get the cost from the initialized solution.
#old_solution = solution(allocated_network_topology)
old_cost = cost(old_solution, allocated_network_topology)
T = 1.0
T_min = 0.000500
alpha = 0.9
c = 0.25
linear_factor = 0.25
i = 1
while T > T_min:
print('/')
#indicating the max iteration number for the algorithm.
while i < 300:
#Logarithmic schedule
#T = c / (math.log(i) + 1)
#Linear schedule
#T -= linear_factor
#Adaptive
#if T < 00.9:
# T = T * 0.9
#elif T < 0.9:
# T = T * 0.5
#else:
# T = T * 0.1
new_solution = neighbor(old_solution,allocated_network_topology)
new_cost = cost(new_solution, allocated_network_topology)
ap = acceptance_probability(old_cost, new_cost, T)
if ap > random.random():
#check this.
old_solution = new_solution
old_cost = new_cost
i += 1
#else:
# T = 0
assert i == i
T = T*alpha
eval.eval_annealing_DS(T)
print('DS-annealing finished!')
print('cost',old_cost)
#plt.show()
assert cost != 0
return old_solution
def solution(allocated_network_topology):
#This method finds the existing solution based on the allocate_DS()
#A list containing the placed nodes is derived from the graph - to minimize memory and iterations through the whole graph
solution = []
for node in allocated_network_topology.nodes(data=True):
if node[1]['placed'] == True:
n = len(node[1])
for i in range(n):
list.append(solution,node)
break
return solution
def neighbor(solution, allocated_network_topology):
#This is algortihm 10 - Neighbour / New solution
#this function finds the neighbour of the node. - We use a list representing the nodes in the graph.
#To avoid copying the whole graph - hence, making it more scalable.
assert len(solution) > 0
old_solution = solution
#Copying the old solution to manipulate
new_solution = old_solution
placed = False
while placed == False:
#picks random in solution
#random_node = random.choice(solution_nodes)
random_pick = random.choice(new_solution)
random_node = random_pick[0]
print('random node', random_node)
#getting the random node's neigbours from the graph
nodes_neighbours = list(allocated_network_topology.neighbors(random_node))
#Shuffling the nodes-neighbour
random.shuffle(nodes_neighbours)
n = len(nodes_neighbours)
#--------------------------------------------------------------------
assert len(nodes_neighbours) > 0
#Checking if the random-nodes-neighbours is already in the list.
for o, p in enumerate(nodes_neighbours):
neighbouring_node = nodes_neighbours[o]
print('neighbouring node',neighbouring_node)
#Finding the neighbours neighbour in order to check for the connectivity contraint.
neighbours_neighbours = list(allocated_network_topology.neighbors(neighbouring_node))
u = len(neighbours_neighbours)
u -= 1
#trying to catch the exception - the exception means that the random-neighbour is not in the list.
try:
neighbouring_node_int = int(neighbouring_node)
check = new_solution[neighbouring_node_int][0]
break
except IndexError:
if allocated_network_topology.node[neighbouring_node]['storage_capacity'] >= allocated_network_topology.node[random_node]['storage_capacity'] and allocated_network_topology.node[neighbouring_node]['storage_usage'] == 0 and u > 2:
print('loop')
#switching the random node with the random node's neighbour. - look at RB_placement for optimized code.
#Checking all nodes in the network topology
for node in allocated_network_topology.nodes(data=True):
#picks the random neighbour in the graph
if node[0] == neighbouring_node:
print('loop2')
#takes the length of the node's attributes and appending it to the solution.
#print(neighbouring_node)
k = len(node[1])
list.append(new_solution,node)
print('placed node', node)
list.remove(new_solution, random_pick)
print('node removed', random_pick)
break
break
#list.append(new_solution,nodes)
print('new sol', new_solution)
return new_solution
def cost(solution, allocated_network_topology):
#This is algortihm 11 - Cost
#Here goes the cost solution. This method calculates the cost from the new-solution generated by the previous method.
assert len(solution) > 0
latencies = []
while True:
#print('--went here--')
#Iterates the new solution for the source node from which we want to find the latency
for u, i in enumerate(solution[:-1]):
current_source = int(solution[u][0])
next_source = solution[u+1][0]
#iterates the new solution for the target want to find the latency to.
for j, L in enumerate(solution):
target = int(solution[j][0])
#This picks the current source in the new_solution - this is done because we want to
#compare the source node with every node in the same cluster/same DS.
if current_source != next_source and target != current_source:
#And marks the latency between
assert isinstance(current_source, int)
assert isinstance(target, int)
#Here we pick the target with the same cluster ID as the current source.
if solution[j][1]['cluster_ID'] == solution[u][1]['cluster_ID']:
assert solution[j][1]['cluster_ID'] != None
assert solution[u][1]['cluster_ID'] != None
try:
latency = nx.shortest_path_length(allocated_network_topology, source=current_source, target=target)
latencies.append(latency)
assert len(latencies) != 0
except nx.NetworkXNoPath:
print('no path between', current_source, 'and', target)
pass
continue
#else if current_cluster-ID does have a match
else:
assert isinstance(solution[j][1]['cluster_ID'], int)
continue
cost = sum(latencies)
eval.eval_topology_DS(cost)
print(cost)
#could MST been used here? - using the solution as a graph.
return cost
def acceptance_probability(old_cost, new_cost, T):
assert old_cost > 0
assert new_cost > 0
if new_cost < old_cost:
acceptance_probability = 1.0
else:
try:
acceptance_probability = math.exp((new_cost - old_cost) / T)
except (OverflowError , ZeroDivisionError):
print('overflowerror')
acceptance_probability = float('inf')
print('acceptance prop', acceptance_probability)
print('temperature', T)
return acceptance_probability
def place_optimization(solution, allocated_network_topology):
assert len(solution) > 0
##This method places the final annealed solution in the actual graph.
print('final solution',solution)
#Remove all storage in node. - to make space for a clean solution
#allocated_network_topology.add_node(random_node, storage_usage=0, placed=False)
for node in allocated_network_topology.nodes(data=True):
graph_node = node[0]
allocated_network_topology.add_node(graph_node, storage_usage=0, placed=False)
#Place Storage - taking the storage-usage of the randomly picked node and puts it into the randomly picked node's also randomly picked neibour
for node in allocated_network_topology.nodes(data=True):
graph_node = node[0]
for u, i in enumerate(solution):
solution_node = solution[u][0]
allocated_network_topology.add_node(solution_node, storage_usage=allocated_network_topology.node[solution_node]['storage_capacity'], placed=True)
return
###When scaling - be sure- that it compares target to all the nodes in cluster_ID!!! | StarcoderdataPython |
138103 | from stacker.context import Context
from stacker.config import Config
from stacker.variables import Variable
from stacker_blueprints.network import Network
from stacker.blueprints.testutil import BlueprintTestCase
class TestNetwork(BlueprintTestCase):
def setUp(self):
self.ctx = Context(config=Config({'namespace': 'test'}))
self.common_variables = {
"VpcId": "vpc-abc1234",
"VpcDefaultSecurityGroup": "sg-01234abc",
"AvailabilityZone": "us-east-1a",
"CidrBlock": "10.0.0.0/24",
}
def create_blueprint(self, name):
return Network(name, self.ctx)
def generate_variables(self, variable_dict=None):
variable_dict = variable_dict or {}
self.common_variables.update(variable_dict)
return [Variable(k, v) for k, v in self.common_variables.items()]
def test_network_fail_internet_nat_gateway(self):
bp = self.create_blueprint("test_network_fail_internet_nat_gateway")
variables = {
"InternetGatewayId": "gw-abc1234z",
"NatGatewayId": "nat-abc1234z",
}
bp.resolve_variables(self.generate_variables(variables))
with self.assertRaises(ValueError):
bp.create_template()
def test_network_fail_nat_gateway_and_create_nat_gateway(self):
bp = self.create_blueprint(
"test_network_fail_nat_gateway_and_create_nat_gateway"
)
variables = {
"NatGatewayId": "nat-abc1234z",
"CreateNatGateway": True,
}
bp.resolve_variables(self.generate_variables(variables))
with self.assertRaises(ValueError):
bp.create_template()
def test_network_with_nat_gateway_id(self):
bp = self.create_blueprint("test_network_with_nat_gateway_id")
variables = {
"NatGatewayId": "nat-abc1234z",
}
bp.resolve_variables(self.generate_variables(variables))
bp.create_template()
self.assertRenderedBlueprint(bp)
self.assertNotIn("NatGateway", bp.template.resources)
self.assertEqual(
bp.template.resources["DefaultRoute"].NatGatewayId,
"nat-abc1234z"
)
self.assertEqual(bp.network_type, "private")
def test_network_with_internet_gateway_id_and_create_nat_gateway(self):
bp = self.create_blueprint(
"test_network_with_internet_gateway_id_and_create_nat_gateway"
)
variables = {
"InternetGatewayId": "igw-abc1234z",
"CreateNatGateway": True,
}
bp.resolve_variables(self.generate_variables(variables))
bp.create_template()
self.assertRenderedBlueprint(bp)
self.assertIn("NatGateway", bp.template.resources)
self.assertEqual(
bp.template.resources["DefaultRoute"].GatewayId,
"igw-abc1234z"
)
self.assertEqual(bp.network_type, "public")
def test_network_with_internet_gateway_id_and_no_create_nat_gateway(self):
bp = self.create_blueprint(
"test_network_with_internet_gateway_id_and_no_create_nat_gateway"
)
variables = {
"InternetGatewayId": "igw-abc1234z",
}
bp.resolve_variables(self.generate_variables(variables))
bp.create_template()
self.assertRenderedBlueprint(bp)
self.assertNotIn("NatGateway", bp.template.resources)
self.assertEqual(
bp.template.resources["DefaultRoute"].GatewayId,
"igw-abc1234z"
)
self.assertEqual(bp.network_type, "public")
def test_network_with_extra_tags(self):
bp = self.create_blueprint("test_network_with_extra_tags")
variables = {
"NatGatewayId": "nat-abc1234z",
"Tags": {"A": "apple"},
}
bp.resolve_variables(self.generate_variables(variables))
bp.create_template()
self.assertRenderedBlueprint(bp)
route_table = bp.template.resources["RouteTable"]
found_tag = False
for tag in route_table.Tags.tags:
if tag["Key"] == "A" and tag["Value"] == "apple":
found_tag = True
self.assertTrue(found_tag)
| StarcoderdataPython |
3288172 | # -*- coding: utf-8 -*-
from datetime import datetime
from functools import wraps
def cache_result():
"""
缓存结果
:return:
"""
def decorator(func):
@wraps(func)
def wrapped_function(*args, **kwargs):
if hasattr(func, "__result"):
return func.__dict__['__result']
result = func(*args, **kwargs)
func.__dict__['__result'] = result
return result
return wrapped_function
return decorator
| StarcoderdataPython |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.