text stringlengths 0 1.05M | meta dict |
|---|---|
"""Advices for generating call graph"""
# pylint: disable=W0603
import inspect
import re
import os
COUNT = -1
DEPTH_MARKER = "|"
ENTER_MARKER = ">"
EXIT_MARKER = "<"
RJUST_LONG = 50
RJUST_SMALL = 25
def increase_depth(*arg, **kw): # pylint: disable=W0613
"""Increase count of marker that signifies depth in the call graph tree"""
global COUNT
COUNT += 1
def write(_filename, _long, enter=True):
"""Write the call info to file"""
def method(*arg, **kw): # pylint: disable=W0613
"""Reference to the advice in order to facilitate argument support."""
def get_short(_fname):
"""Get basename of the file. If file is __init__.py, get its directory too"""
dir_path, short_fname = os.path.split(_fname)
short_fname = short_fname.replace(".py", "")
if short_fname == "__init__":
short_fname = "%s.%s" % (os.path.basename(dir_path), short_fname)
return short_fname
def get_long(_fname):
"""Get full reference to the file"""
try:
return re.findall(r'(ansible.*)\.py', _fname)[-1].replace(os.sep, ".")
except IndexError:
# If ansible is extending some library, ansible won't be present in the path.
return get_short(_fname)
meth_code = arg[1].im_func.func_code
fname, lineno, _name = meth_code.co_filename, meth_code.co_firstlineno, meth_code.co_name
marker = ENTER_MARKER
if not _long:
_fname, _rjust = get_short(fname), RJUST_SMALL
else:
_fname, _rjust = get_long(fname), RJUST_LONG
if not enter:
try:
meth_line_count = len(inspect.getsourcelines(meth_code)[0])
lineno += meth_line_count - 1
except Exception: # pylint: disable=W0703
# TODO: Find other way to get ending line number for the method
# Line number same as start of method.
pass
marker = EXIT_MARKER
with open(_filename, "a") as fptr:
call_info = "%s: %s:%s %s%s\n" % (
_fname.rjust(_rjust), # filename
str(lineno).rjust(4), # line number
(" %s" % DEPTH_MARKER) * COUNT, # Depth
marker, # Method enter, exit marker
_name # Method name
)
fptr.write(call_info)
return method
def decrease_depth(*arg, **kw): # pylint: disable=W0613
"""Decrease count of marker that signifies depth in the call graph tree"""
global COUNT
COUNT -= 1
| {
"repo_name": "host-anshu/simpleInterceptor",
"path": "example/call_graph/advices.py",
"copies": "1",
"size": "2704",
"license": "mit",
"hash": -6268839843266755000,
"line_mean": 35.0533333333,
"line_max": 97,
"alpha_frac": 0.5403106509,
"autogenerated": false,
"ratio": 3.9131693198263386,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4953479970726338,
"avg_score": null,
"num_lines": null
} |
"""AdWords reporting tools"""
__author__ = 'thorwhalen'
# This pfile contains function to deal with AdWords reporting
import datetime
import pandas as pd
import numpy as np
import pickle
from adspygoogle import AdWordsClient
import ut as ms
import ut.aw.manip
import ut.daf.ch as daf_ch
# import ut.misc.erenev.data_source as venere_data_source
import json
import functools
#from datapath import datapath
import ut.pcoll.order_conserving as order_conserving
def excel_report_file_to_df(excel_file, sheetname=0):
df = pd.read_excel(excel_file, sheetname=sheetname)
if df.iloc[0, 1] == 'Unnamed: 1':
cols = np.array(df.iloc[2]) # columns are in row 2 (3rd row), so remember them
df = df.iloc[3:-1].copy() # get the data (from row 3, till the penultimate row (last row contains stats))
df.columns = cols # insert the the correct columns
else:
df.columns = df.iloc[0]
df = df.iloc[1:-1]
df = df.reset_index(drop=True) # reset the index
ms.aw.manip.process_aw_column_names(df) # process the column names
return df
def get_client(clientCustomerId='7998744469'):
# test : 7998744469
# other : 5127918221
# US 03 : 7214411738
# AU 01 : 3851930085
clientCustomerId = get_account_id(clientCustomerId)
headers = {'email': os.getenv('VEN_ADWORDS_EMAIL'),
'password': os.getenv('VEN_ADWORDS_EMAIL_PASSWORD'),
'clientCustomerId': clientCustomerId,
# 'userAgent': 'MethodicSolutions',
'userAgent': 'Venere',
'developerToken': os.getenv('VEN_ADWORDS_TOKEN'),
'validateOnly': 'n',
'partialFailure': 'n'
}
print("Getting client for clientCustomerId={}".format(clientCustomerId))
return AdWordsClient(headers=headers)
def get_report_downloader(client='test'):
"""
gets a ReportDownloader for a given client. Client can be:
- an actual AdWordsClient
- an account name or id (uses get_account_id to get an id from a name
-
"""
if not isinstance(client, AdWordsClient):
print(client)
print(type(client))
if isinstance(client, str):
client = get_client(clientCustomerId=client)
return client.GetReportDownloader(version='v201302')
def download_report(report_downloader, report_query_str, download_format='df', thousands=None, dtype=None):
"""
downloads a report using report_downloader (a ReportDownloader or client) using the given query string
Outputs a dataframe (default) or a string (default if download_format is not 'df' TSV format) or
"""
if isinstance(report_downloader, AdWordsClient):
report_downloader = report_downloader.GetReportDownloader(version='v201302')
elif isinstance(report_downloader,str):
report_downloader = get_report_downloader(client=report_downloader)
if download_format == 'df':
google_report = report_downloader.DownloadReportWithAwql(report_query_str, 'TSV')
return report_to_df(google_report, thousands, dtype)
else:
return report_downloader.DownloadReportWithAwql(report_query_str, download_format)
def mk_report_query_str(
varList='default',
source='SEARCH_QUERY_PERFORMANCE_REPORT',
start_date=1,
end_date=datetime.date.today(),
where_dict={}
):
"""
Makes a query string that will be input to DownloadReportWithAwql
"""
# where_dict POSITIVE filter (oblige these to be present, and specify the default values if not specified)
# MJM: I am removing 'Status':'=ACTIVE' from KEYWORDS_PERFORMANCE_REPORT, as discussed with TW. He will remove it manually
# from the scoring code.
if source=='KEYWORDS_PERFORMANCE_REPORT':
where_dict_filter = {'CampaignStatus':'=ACTIVE', 'AdGroupStatus':'=ENABLED', 'Status':'=ACTIVE', 'IsNegative':'=False'}
elif source=='KEYWORDS_PERFORMANCE_REPORT_IGNORE_STATUS':
where_dict_filter = {'CampaignStatus':'=ACTIVE', 'AdGroupStatus':'=ENABLED', 'IsNegative':'=False'}
elif source=='SEARCH_QUERY_PERFORMANCE_REPORT':
where_dict_filter = {'CampaignStatus':'=ACTIVE', 'AdGroupStatus':'=ENABLED'}
elif source=='AD_PERFORMANCE_REPORT':
where_dict_filter = {'CampaignStatus':'=ACTIVE', 'AdGroupStatus':'=ENABLED'}
else:
where_dict_filter = {}
#components of query
if isinstance(varList, str):
if varList.find(',') == -1: # if you do not find a comma get the string listing the vars, using varList as a group name
varList = get_var_list_str(group=varList,source=source)
else:
if not isinstance(varList,list):
raise ValueError("varList must be a comma seperated string or a list of strings")
# if not, assume this is a list string to be taken as is
# map varList names to disp names (the ones expected by google's query language)
varList = x_to_disp_name(varList)
if isinstance(varList, list):
varList = ', '.join(varList) # make a comma separated string from the varList list
# map where_dict keys to disp names
where_dict = {x_to_disp_name(k):v for (k,v) in list(where_dict.items())}
# filter where_dict
where_dict = dict(where_dict_filter,**where_dict)
# remember the where_vars
where_vars = list(where_dict.keys())
# make the where_str (that will be inserted in the query_str
where_str = ' AND '.join([k + v for (k,v) in list(where_dict.items())])
# make the date range string
date_range = dateRange(start_date, end_date)
# making the query
query_str = 'SELECT ' + varList + ' FROM ' + source + ' WHERE ' + where_str + ' DURING ' + date_range
return query_str
def report_to_df(report, thousands=None, dtype=None):
"""
make a dataframe from the report
"""
import pandas as pd
import tempfile
tempf = tempfile.NamedTemporaryFile()
try:
tempf.write(report)
tempf.seek(0)
df = pd.io.parsers.read_csv(tempf, skiprows=1, skipfooter=1, header=1, delimiter='\t', thousands=thousands, dtype=dtype)
df = daf_ch.ch_col_names(df, x_to_lu_name(list(df.columns)), list(df.columns))
return df
finally:
tempf.close()
def get_df_concatination_of_several_accounts(account_list,varList=None,number_of_days=300):
if varList is None:
varList = 'Query, Impressions, AdGroupName, CampaignName, AdGroupStatus'
report_query_str = mk_report_query_str(varList=varList, source='SEARCH_QUERY_PERFORMANCE_REPORT',start_date=number_of_days)
df = None
for a in account_list:
print("%s: downloading %s" % (datetime.now(),a))
report_downloader = get_report_downloader(a)
print(" %s: concatinating" % datetime.now())
df = pd.concat([df,download_report(report_downloader=report_downloader, report_query_str=report_query_str, download_format='df')])
return df
########################################################################################################################
# UTILS
def x_to_date(x):
"""
util to get a datetime.date() formatted date from a flexibly specified date
(list or datetime.date at the time of this writing)
"""
if isinstance(x, list) and len(x) == 3:
return datetime.date(year=x[0], month=x[1], day=x[2])
elif isinstance(x, datetime.date):
return x
else:
print("Unknown format")
#TODO: Throw exception
def dateRange(start_date=1, end_date=datetime.date.today()):
"""
util to get a date range string as a google query expects it
dateRange(i) (i is an int) returns the range between i days ago to now
dateRange(x) (x is a float) returns the range from x_to_date(x) to now
dateRange(x,y) (x is a float) returns the range from x_to_date(x) to from x_to_date(y)
"""
end_date = x_to_date(end_date)
if isinstance(start_date, int):
start_date = end_date - datetime.timedelta(days=start_date)
else:
start_date = x_to_date(start_date)
return '{},{}'.format(start_date.strftime("%Y%m%d"), end_date.strftime("%Y%m%d"))
# MJM - this method lets you set up a decorator (@lu_name_df) above any method that returns a dataframe and should have
# the dataframe column names converted to lu names
def lu_name_df(func):
@functools.wraps(func)
def inner(*args, **kwargs):
df = func(*args, **kwargs)
df.columns = x_to_lu_name(df.columns) # replace column names with lu names
return df
return inner
########################################################################################################################
# PARAMETERS
def x_to_lu_name(var):
"""
returns a (copy) of the list var where all recognized variable names are converted to their lu_name forms
"""
return _convert_name(var,lu_name_x_dict)
def x_to_xml_name(var):
"""
returns a (copy) of the list var where all recognized variable names are converted to their xml name forms
"""
return _convert_name(var, xml_x_dict)
def x_to_disp_name(var):
"""
returns a (copy) of the list var where all recognized variable names are converted to their disp name forms
"""
return _convert_name(var,disp_x_dict)
def _convert_name(list_of_names, mapping_dict):
"""
returns a (copy) of the list var where all recognized variable names are converted to a standardized name specified
by the keys of the mapping_dict
"""
if isinstance(list_of_names,str):
type_of_input_list = 'string'
list_of_names = [x.strip() for x in list_of_names.split(',')]
else:
if not isinstance(list_of_names, list):
list(list_of_names)
type_of_input_list = 'list'
list_of_names = list(list_of_names) # a copy of var
# else:
# raise TypeError("input must be a comma separated string or a list")
for vi, v in enumerate(list_of_names):
for key, value in mapping_dict.items():
if v in value:
list_of_names[vi] = key
continue
if type_of_input_list == 'list':
return list_of_names
else: # assuming type_of_input_list was a string
return ', '.join(list_of_names)
def get_var_list_str(group='default', source='SEARCH_QUERY_PERFORMANCE_REPORT'):
if source == 'SEARCH_QUERY_PERFORMANCE_REPORT':
var_list = {
'default': ('Query, AdGroupId, AdGroupName, AveragePosition, '
'CampaignId, CampaignName, Clicks, Cost, Impressions, KeywordId, KeywordTextMatchingQuery, '
'MatchType, MatchTypeWithVariant'),
'q_kmv_picc': ('Query, AveragePosition, KeywordTextMatchingQuery, MatchType, MatchTypeWithVariant, '
'AveragePosition, Impressions, Clicks, Cost'),
'q_km_picc': 'Query, KeywordTextMatchingQuery, MatchType, AveragePosition, Impressions, Clicks, Cost',
'q_matts_pick': ('Query, Impressions, AdGroupName, CampaignName, '
'KeywordTextMatchingQuery, MatchType, '
'Cost, AveragePosition, Clicks, AdGroupId, CampaignId, KeywordId'),
'q_picc': 'Query, AveragePosition, Impressions, Clicks, Cost',
'q_ipic': 'Query, KeywordId, Impressions, AveragePosition, Clicks',
'q_iipic': 'Query, AdGroupId, KeywordId, Impressions, AveragePosition, Clicks'
}
elif source == 'AD_PERFORMANCE_REPORT':
var_list = {
'default':
['AdGroupName', 'AdGroupId', 'Headline', 'Description1', 'Description2', 'CreativeDestinationUrl', 'CampaignId'],
'ad_elements':
['AdGroupName', 'AdGroupId', 'Headline', 'Description1', 'Description2', 'CreativeDestinationUrl', 'CampaignId']
}
elif source == 'KEYWORDS_PERFORMANCE_REPORT':
var_list = {
'default': ('KeywordText, KeywordMatchType, Max. CPC, AdGroupName, CampaignName, '
'Clicks, Cost, Impressions, AveragePosition, Id, AdGroupId, CampaignId'),
'kw_elements': ['KeywordText', 'KeywordMatchType', 'MaxCpc', 'Id', 'AdGroupId', 'CampaignId',
'Clicks', 'Cost', 'Impressions', 'AveragePosition', 'DestinationUrl', 'Status'],
'q_kw_perf': 'KeywordText, KeywordMatchType, AveragePosition, Impressions, Clicks, Cost, AdGroupId',
'q_01': ('KeywordText, KeywordMatchType, Max. CPC, AdGroupName, CampaignName, '
'Clicks, Cost, Impressions, AveragePosition, Id, AdGroupId, CampaignId'),
'q_static_attributes': ['KeywordText', 'KeywordMatchType', 'AdGroupName', 'CampaignName',
'Id', 'AdGroupId', 'CampaignId'],
'q_main_attributes': ['KeywordText', 'KeywordMatchType', 'MaxCpc', 'AdGroupName', 'CampaignName',
'Id', 'AdGroupId', 'CampaignId'],
'q_kmv_picc': ('KeywordText, KeywordMatchType, Max. CPC, '
'Clicks, Cost, Impressions, AveragePosition, Status'),
'q_kms': 'KeywordText, KeywordMatchType, Status'
}
else:
print("sorry, I'm not aware of the source name!")
if group == 'groups':
return var_list
else:
return var_list.get(group, ('')) # empty is the group is not found
def print_some_report_sources():
source_list = [
'SEARCH_QUERY_PERFORMANCE_REPORT'
'KEYWORDS_PERFORMANCE_REPORT'
'AD_PERFORMANCE_REPORT'
'ADGROUP_PERFORMANCE_REPORT'
'CAMPAIGN_PERFORMANCE_REPORT'
'ACCOUNT_PERFORMANCE_REPORT'
'CLICK_PERFORMANCE_REPORT'
'DESTINATION_URL_REPORT'
'GEO_PERFORMANCE_REPORT'
'URL_PERFORMANCE_REPORT'
]
for source in source_list: print(source)
def import_account_str_to_id(source='/D/Dropbox/dev/py/data/aw/account_name_accountid.csv',
target='/D/Dropbox/dev/py/data/aw/account_name_accountid.p'):
"""
This function imports a csv into a pickled dict that maps account names to account numbers (customer ids)
"""
df = pd.read_csv('/D/Dropbox/dev/py/data/aw/account_name_accountid.csv')
df.index = df['Account']
del df['Account']
dfdict = df.to_dict()
pickle.dump(dfdict, open(target, "wb"))
def get_account_id(account='test', account_str_to_id_dict=''):
"""
Once the account_str_id map is imported using import_account_str_to_id(),
you can use the get_account_id() function to grab an account id from an account name
In [70]: get_account_id('AU 01')
Out[70]: 3851930085
ac
The test account id is also there
In [80]: get_account_id('test')
Out[80]: 7998744469
In fact, it's the default
In [82]: get_account_id()
Out[82]: 7998744469
If you input an empty string as the account name, the function will print a list of available account names
In [83]: get_account_id('')
AVAILABLE ACCOUNT NAMES:
['FI 01-INDIE', 'ZH 01', 'IT 01-CONT', 'UK 01-INDIE', etc.]
You'll get this list also if you enter an account name that is not available
In [86]: get_account_id('matt is a monkey')
THIS ACCOUNT NAME IS NOT AVAILABLE! AVAILABLE ACCOUNTS:
['FI 01-INDIE', 'ZH 01', 'IT 01-CONT', 'UK 01-INDIE', etc.]
If you ask for the "account" 'dict', the function will output the dict mapping account names (keys) to account ids (values)
In [44]: get_account_id('dict')
Out[44]:
{'AU 01': 3851930085,
'DE 01': 1194790660,
'DE 01-ALL': 6985472731, etc.}
"""
# if not account_str_to_id_dict:
# account_str_to_id_dict = venere_data_source.account_str_to_id_dict
if not isinstance(account_str_to_id_dict, dict):
raise ValueError("account_str_to_id_dict must be a dict")
# if isinstance(account_str_to_id_dict, string):
# account_str_to_id_dict = pickle.load(open(account_str_to_id_dict, "rb"))
# else:
# print "Unknown account_str_to_id_dict type"
# return dict() # empty dict
if not account:
print("AVAILABLE ACCOUNT NAMES:")
print(list(account_str_to_id_dict['Customer ID'].keys()))
return dict() # empty dict
elif account == 'dict':
return account_str_to_id_dict['Customer ID']
elif account in list(account_str_to_id_dict['Customer ID'].values()):
return account
elif account not in account_str_to_id_dict['Customer ID']:
print("THIS ACCOUNT NAME IS NOT AVAILABLE! AVAILABLE ACCOUNTS:")
print(list(account_str_to_id_dict['Customer ID'].keys()))
return dict() # empty dict
else:
return account_str_to_id_dict['Customer ID'][account]
lu_name_x_dict = {
'a_ce_split': ['aCESplit', 'ACE split'],
'account': ['account', 'Account'],
'account_id': ['accountID', 'Account ID'],
'ad': ['ad', 'Ad'],
'ad_approval_status': ['adApprovalStatus', 'Ad Approval Status'],
'ad_extension_id': ['adExtensionID', 'Ad Extension ID'],
'ad_extension_type': ['adExtensionType', 'Ad Extension Type'],
'ad_group': ['adGroup', 'Ad group', 'advertentiegroep'],
'ad_group_id': ['adGroupID', 'Ad group ID', 'adGroupId'],
'ad_group_state': ['adGroupState', 'Ad group state'],
'ad_id': ['adID', 'Ad ID'],
'ad_state': ['adState', 'Ad state'],
'ad_type': ['adType', 'Ad type'],
'added': ['added', 'Added'],
'approval_status': ['approvalStatus', 'Approval Status'],
'attribute_values': ['attributeValues', 'Attribute Values'],
'audience': ['audience', 'Audience'],
'audience_state': ['audienceState', 'Audience state'],
'avg_cpc': ['avgCPC', 'Avg. CPC', 'Gem. CPC'],
'avg_cpm': ['avgCPM', 'Avg. CPM'],
'avg_cpp': ['avgCPP', 'Avg. CPP'],
'avg_position': ['avgPosition', 'Avg. position', 'Gem. positie'],
'bidding_strategy': ['biddingStrategy', 'Bidding strategy'],
'budget': ['budget', 'Budget'],
'budget_explicitly_shared': ['budgetExplicitlyShared',
'Budget explicitly shared'],
'budget_id': ['budgetID', 'Budget ID'],
'budget_name': ['budgetName', 'Budget Name'],
'budget_period': ['budgetPeriod', 'Budget period'],
'budget_state': ['budgetState', 'Budget state'],
'budget_usage': ['budgetUsage', 'Budget usage'],
'business_phone_number': ['businessPhoneNumber', 'Business phone number'],
'cpc_ace_indicator': ['cPCACEIndicator', 'CPC ACE indicator'],
'cpm_ace_indicator': ['cPMACEIndicator', 'CPM ACE indicator'],
'ctr_ace_indicator': ['cTRACEIndicator', 'CTR ACE indicator'],
'call_fee': ['callFee', 'Call fee'],
'caller_area_code': ['callerAreaCode', 'Caller area code'],
'caller_country_code': ['callerCountryCode', 'Caller country code'],
'campaign': ['campaign', 'Campaign'],
'campaign_id': ['campaignID', 'campaignId', 'Campaign ID', 'Campaign Id'],
'campaign_name': ['campaignName', 'Campaign Name'],
'campaign_state': ['campaignState', 'Campaign state'],
'campaigns': ['campaigns', '# Campaigns'],
'categories': ['categories', 'Categories'],
'city': ['city', 'City'],
'click_id': ['clickId', 'Click Id'],
'click_type': ['clickType', 'Click type'],
'clicks': ['clicks', 'Clicks', 'Aantal klikken'],
'clicks_ace_indicator': ['clicksACEIndicator', 'Clicks ACE indicator'],
'client_name': ['clientName', 'Client name'],
'company_name': ['companyName', 'Company name', 'Campagne'],
'content_impr_share': ['contentImprShare', 'Content Impr. share'],
'content_lost_is_budget': ['contentLostISBudget',
'Content Lost IS (budget)'],
'content_lost_is_rank': ['contentLostISRank', 'Content Lost IS (rank)'],
'conv': ['conv', 'Conv.', 'Conversies'],
'converted_clicks': ['convertedClicks', 'Converted clicks', 'Geconverteerde klikken'],
'conv1_per_click': ['conv1PerClick', 'Conv. (1-per-click)'],
'conv1_per_click_ace_indicator': ['conv1PerClickACEIndicator',
'Conv. (1-per-click) ACE indicator'],
'conv_many_per_click': ['convManyPerClick', 'Conv. (many-per-click)'],
'conv_many_per_click_ace_indicator': ['convManyPerClickACEIndicator',
'Conv. (many-per-click) ACE indicator'],
'conv_rate': ['convRate', 'Conv. rate'],
'conv_rate1_per_click': ['convRate1PerClick', 'Conv. rate (1-per-click)'],
'conv_rate1_per_click_ace_indicator': ['convRate1PerClickACEIndicator',
'Conv. rate (1-per-click) ACE indicator'],
'conv_rate_many_per_click': ['convRateManyPerClick',
'Conv. rate (many-per-click)'],
'conv_rate_many_per_click_ace_indicator': ['convRateManyPerClickACEIndicator',
'Conv. rate (many-per-click) ACE indicator'],
'conversion_action_name': ['conversionActionName',
'Conversion action name'],
'conversion_optimizer_bid_type': ['conversionOptimizerBidType',
'Conversion optimizer bid type'],
'conversion_tracker_id': ['conversionTrackerId', 'Conversion Tracker Id'],
'conversion_tracking_purpose': ['conversionTrackingPurpose',
'Conversion tracking purpose'],
'cost': ['cost', 'Cost', 'kosten'],
'cost_ace_indicator': ['costACEIndicator', 'Cost ACE indicator'],
'cost_conv1_per_click': ['costConv1PerClick',
'Cost / conv. (1-per-click)'],
'cost_conv1_per_click_ace_indicator': ['costConv1PerClickACEIndicator',
'Cost/conv. (1-per-click) ACE indicator'],
'cost_conv_many_per_click': ['costConvManyPerClick',
'Cost / conv. (many-per-click)'],
'cost_conv_many_per_click_ace_indicator': ['costConvManyPerClickACEIndicator',
'Cost/conv. (many-per-click) ACE indicator'],
'country_territory': ['countryTerritory', 'Country/Territory'],
'criteria_display_name': ['criteriaDisplayName', 'Criteria Display Name'],
'criteria_type': ['criteriaType', 'Criteria Type'],
'criterion_id': ['criterionID', 'criterionId', 'Criterion ID'],
'ctr': ['ctr', 'CTR'],
'currency': ['currency', 'Currency'],
'customer_id': ['customerID', 'Customer ID'],
'day': ['day', 'Day'],
'day_of_week': ['dayOfWeek', 'Day of week'],
'default_max_cpc': ['defaultMaxCPC', 'Default max. CPC'],
'delivery_method': ['deliveryMethod', 'Delivery method'],
'description_line1': ['descriptionLine1', 'Description line 1'],
'description_line2': ['descriptionLine2', 'Description line 2'],
'destination_url': ['destinationURL', 'destinationUrl', 'Destination URL', 'Destination Url', 'CreativeDestinationUrl'],
'device': ['device', 'Device'],
'device_preference': ['devicePreference', 'Device preference'],
'display_network_max_cpc': ['displayNetworkMaxCPC',
'Display Network max. CPC'],
'display_url': ['displayURL', 'Display URL'],
'domain': ['domain', 'Domain'],
'duration_seconds': ['durationSeconds', 'Duration (seconds)'],
'dynamic_ad_target': ['dynamicAdTarget', 'Dynamic ad target'],
'dynamically_generated_headline': ['dynamicallyGeneratedHeadline',
'Dynamically generated Headline'],
'end_time': ['endTime', 'End time'],
'enhanced': ['enhanced', 'Enhanced'],
'enhanced_cpc_enabled': ['enhancedCPCEnabled', 'Enhanced CPC enabled'],
'excluded': ['excluded', 'Excluded'],
'exclusion': ['exclusion', 'Exclusion'],
'explicitly_shared': ['explicitlyShared', 'Explicitly shared'],
'feed_id': ['feedID', 'Feed ID'],
'feed_item_id': ['feedItemID', 'Feed item ID'],
'feed_item_status': ['feedItemStatus', 'Feed item status'],
'feed_placeholder_type': ['feedPlaceholderType', 'Feed placeholder type'],
'first_level_sub_categories': ['firstLevelSubCategories',
'First level sub-categories'],
'first_page_cpc': ['firstPageCPC', 'First page CPC'],
'free_click_rate': ['freeClickRate', 'Free click rate'],
'free_click_type': ['freeClickType', 'Free click type'],
'free_clicks': ['freeClicks', 'Free clicks'],
'frequency': ['frequency', 'Frequency'],
'highest_position': ['highestPosition', 'Highest position'],
'hour_of_day': ['hourOfDay', 'Hour of day'],
'image_ad_name': ['imageAdName', 'Image ad name'],
'image_hosting_key': ['imageHostingKey', 'Image hosting key'],
'impressions': ['impressions', 'Impressions', 'Vertoningen'],
'impressions_ace_indicator': ['impressionsACEIndicator',
'Impressions ACE indicator'],
'invalid_click_rate': ['invalidClickRate', 'Invalid click rate'],
'invalid_clicks': ['invalidClicks', 'Invalid clicks'],
'is_negative': ['isNegative', 'Is negative'],
'is_targetable': ['isTargetable', 'Is Targetable'],
'keyword': ['keyword', 'Keyword', 'Zoekterm'],
'keyword_id': ['keywordID', 'Keyword ID'],
'keyword_max_cpc': ['keywordMaxCPC', 'Keyword max CPC'],
'keyword_placement': ['keywordPlacement', 'Keyword / Placement'],
'keyword_placement_destination_url': ['keywordPlacementDestinationURL',
'Keyword/Placement destination URL'],
'keyword_placement_state': ['keywordPlacementState',
'Keyword/Placement state'],
'keyword_state': ['keywordState', 'Keyword state'],
'keyword_text': ['keywordText', 'Keyword text'],
'landing_page_title': ['landingPageTitle', 'Landing Page Title'],
'location': ['location', 'Location'],
'location_extension_source': ['locationExtensionSource',
'Location Extension Source'],
'location_type': ['locationType', 'Location type'],
'login_email': ['loginEmail', 'Login email'],
'lowest_position': ['lowestPosition', 'Lowest position'],
'match_type': ['matchType', 'Match type', 'zoektype'],
'max_cpa': ['maxCPA', 'Max. CPA%'],
'max_cpa1_per_click': ['maxCPA1PerClick', 'Max. CPA (1-per-click)'],
'max_cpc': ['maxCPC', 'Max. CPC', 'maxCpc'],
'max_cpc_source': ['maxCPCSource', 'Max CPC source'],
'max_cpm': ['maxCPM', 'Max. CPM'],
'max_cpm_source': ['maxCPMSource', 'Max CPM Source'],
'max_cpp': ['maxCPP', 'Max. CPP'],
'member_count': ['memberCount', 'Member Count'],
'metro_area': ['metroArea', 'Metro area'],
'month': ['month', 'Month'],
'month_of_year': ['monthOfYear', 'Month of Year'],
'most_specific_location': ['mostSpecificLocation',
'Most specific location'],
'negative_keyword': ['negativeKeyword', 'Negative keyword'],
'network': ['network', 'Network'],
'network_with_search_partners': ['networkWithSearchPartners',
'Network (with search partners)'],
'page': ['page', 'Page'],
'phone_bid_type': ['phoneBidType', 'Phone bid type'],
'phone_calls': ['phoneCalls', 'Phone calls'],
'phone_cost': ['phoneCost', 'Phone cost'],
'phone_impressions': ['phoneImpressions', 'Phone impressions'],
'placement': ['placement', 'Placement'],
'placement_state': ['placementState', 'Placement state'],
'position_ace_indicator': ['positionACEIndicator',
'Position ACE indicator'],
'ptr': ['ptr', 'PTR'],
'quality_score': ['qualityScore', 'Quality score'],
'quarter': ['quarter', 'Quarter'],
'reference_count': ['referenceCount', 'Reference Count'],
'region': ['region', 'Region'],
'relative_ctr': ['relativeCTR', 'Relative CTR'],
'search_exact_match_is': ['searchExactMatchIS', 'Search Exact match IS'],
'search_impr_share': ['searchImprShare', 'Search Impr. share'],
'search_lost_is_budget': ['searchLostISBudget', 'Search Lost IS (budget)'],
'search_lost_is_rank': ['searchLostISRank', 'Search Lost IS (rank)'],
'search_term': ['searchTerm', 'Search term'],
'second_level_sub_categories': ['secondLevelSubCategories',
'Second level sub-categories'],
'shared_set_id': ['sharedSetID', 'Shared Set ID'],
'shared_set_name': ['sharedSetName', 'Shared Set Name'],
'shared_set_type': ['sharedSetType', 'Shared Set Type'],
'start_time': ['startTime', 'Start time'],
'state': ['state', 'State'],
'status': ['status', 'Status'],
'targeting_mode': ['targetingMode', 'Targeting Mode'],
'this_extension_vs_other': ['thisExtensionVsOther',
'This extension vs. Other'],
'time_zone': ['timeZone', 'Time zone'],
'top_level_categories': ['topLevelCategories', 'Top level categories'],
'top_of_page_cpc': ['topOfPageCPC', 'Top of page CPC'],
'top_vs_side': ['topVsSide', 'Top vs. side'],
'topic': ['topic', 'Topic'],
'topic_state': ['topicState', 'Topic state'],
'total_conv_value': ['totalConvValue', 'Total conv. value'],
'total_cost': ['totalCost', 'Total cost'],
'unique_users': ['uniqueUsers', 'Unique Users'],
'url': ['url', 'URL'],
'user_status': ['userStatus'],
'value_conv1_per_click': ['valueConv1PerClick',
'Value / conv. (1-per-click)'],
'value_conv_many_per_click': ['valueConvManyPerClick',
'Value / conv. (many-per-click)'],
'view_through_conv': ['viewThroughConv', 'View-through conv.'],
'view_through_conv_ace_indicator': ['viewThroughConvACEIndicator',
'View-through conv. ACE indicator'],
'week': ['week', 'Week'],
'year': ['year', 'Year']}
xml_x_dict = {
'aCESplit': ['ACE split', 'a_ce_split'],
'account': ['Account', 'account'],
'accountID': ['Account ID', 'account_id'],
'ad': ['Ad', 'ad'],
'adApprovalStatus': ['Ad Approval Status', 'ad_approval_status'],
'adExtensionID': ['Ad Extension ID', 'ad_extension_id'],
'adExtensionType': ['Ad Extension Type', 'ad_extension_type'],
'adGroup': ['Ad group', 'ad_group'],
'adGroupID': ['Ad group ID', 'Ad group Id', 'ad_group_id'],
'adGroupState': ['Ad group state', 'ad_group_state'],
'adID': ['Ad ID', 'ad_id'],
'adState': ['Ad state', 'ad_state'],
'adType': ['Ad type', 'ad_type'],
'added': ['Added', 'added'],
'approvalStatus': ['Approval Status', 'approval_status'],
'attributeValues': ['Attribute Values', 'attribute_values'],
'audience': ['Audience', 'audience'],
'audienceState': ['Audience state', 'audience_state'],
'avgCPC': ['Avg. CPC', 'avg_cpc'],
'avgCPM': ['Avg. CPM', 'avg_cpm'],
'avgCPP': ['Avg. CPP', 'avg_cpp'],
'avgPosition': ['Avg. position', 'avg_position'],
'biddingStrategy': ['Bidding strategy', 'bidding_strategy'],
'budget': ['Budget', 'budget'],
'budgetExplicitlyShared': ['Budget explicitly shared',
'budget_explicitly_shared'],
'budgetID': ['Budget ID', 'budget_id'],
'budgetName': ['Budget Name', 'budget_name'],
'budgetPeriod': ['Budget period', 'budget_period'],
'budgetState': ['Budget state', 'budget_state'],
'budgetUsage': ['Budget usage', 'budget_usage'],
'businessPhoneNumber': ['Business phone number', 'business_phone_number'],
'cPCACEIndicator': ['CPC ACE indicator', 'cpc_ace_indicator'],
'cPMACEIndicator': ['CPM ACE indicator', 'cpm_ace_indicator'],
'cTRACEIndicator': ['CTR ACE indicator', 'ctr_ace_indicator'],
'callFee': ['Call fee', 'call_fee'],
'callerAreaCode': ['Caller area code', 'caller_area_code'],
'callerCountryCode': ['Caller country code', 'caller_country_code'],
'campaign': ['Campaign', 'campaign'],
'campaignID': ['Campaign ID', 'campaign_id'],
'campaignState': ['Campaign state', 'campaign_state'],
'campaigns': ['# Campaigns', 'campaigns'],
'categories': ['Categories', 'categories'],
'city': ['City', 'city'],
'clickId': ['Click Id', 'click_id'],
'clickType': ['Click type', 'click_type'],
'clicks': ['Clicks', 'clicks'],
'clicksACEIndicator': ['Clicks ACE indicator', 'clicks_ace_indicator'],
'clientName': ['Client name', 'client_name'],
'companyName': ['Company name', 'company_name'],
'contentImprShare': ['Content Impr. share', 'content_impr_share'],
'contentLostISBudget': ['Content Lost IS (budget)',
'content_lost_is_budget'],
'contentLostISRank': ['Content Lost IS (rank)', 'content_lost_is_rank'],
'conv': ['Conv.', 'conv'],
'conv1PerClick': ['Conv. (1-per-click)', 'conv1_per_click'],
'conv1PerClickACEIndicator': ['Conv. (1-per-click) ACE indicator',
'conv1_per_click_ace_indicator'],
'convManyPerClick': ['Conv. (many-per-click)', 'conv_many_per_click'],
'convManyPerClickACEIndicator': ['Conv. (many-per-click) ACE indicator',
'conv_many_per_click_ace_indicator'],
'convRate': ['Conv. rate', 'conv_rate'],
'convRate1PerClick': ['Conv. rate (1-per-click)', 'conv_rate1_per_click'],
'convRate1PerClickACEIndicator': ['Conv. rate (1-per-click) ACE indicator',
'conv_rate1_per_click_ace_indicator'],
'convRateManyPerClick': ['Conv. rate (many-per-click)',
'conv_rate_many_per_click'],
'convRateManyPerClickACEIndicator': ['Conv. rate (many-per-click) ACE indicator',
'conv_rate_many_per_click_ace_indicator'],
'conversionActionName': ['Conversion action name',
'conversion_action_name'],
'conversionOptimizerBidType': ['Conversion optimizer bid type',
'conversion_optimizer_bid_type'],
'conversionTrackerId': ['Conversion Tracker Id', 'conversion_tracker_id'],
'conversionTrackingPurpose': ['Conversion tracking purpose',
'conversion_tracking_purpose'],
'cost': ['Cost', 'cost'],
'costACEIndicator': ['Cost ACE indicator', 'cost_ace_indicator'],
'costConv1PerClick': ['Cost / conv. (1-per-click)',
'cost_conv1_per_click'],
'costConv1PerClickACEIndicator': ['Cost/conv. (1-per-click) ACE indicator',
'cost_conv1_per_click_ace_indicator'],
'costConvManyPerClick': ['Cost / conv. (many-per-click)',
'cost_conv_many_per_click'],
'costConvManyPerClickACEIndicator': ['Cost/conv. (many-per-click) ACE indicator',
'cost_conv_many_per_click_ace_indicator'],
'countryTerritory': ['Country/Territory', 'country_territory'],
'criteriaDisplayName': ['Criteria Display Name', 'criteria_display_name'],
'criteriaType': ['Criteria Type', 'criteria_type'],
'criterionID': ['Criterion ID', 'Criterion Id', 'criterion_id'],
'ctr': ['CTR', 'ctr'],
'currency': ['Currency', 'currency'],
'customerID': ['Customer ID', 'customer_id'],
'day': ['Day', 'day'],
'dayOfWeek': ['Day of week', 'day_of_week'],
'defaultMaxCPC': ['Default max. CPC', 'default_max_cpc'],
'deliveryMethod': ['Delivery method', 'delivery_method'],
'descriptionLine1': ['Description line 1', 'description_line1'],
'descriptionLine2': ['Description line 2', 'description_line2'],
'destinationURL': ['Destination URL', 'destination_url'],
'device': ['Device', 'device'],
'devicePreference': ['Device preference', 'device_preference'],
'displayNetworkMaxCPC': ['Display Network max. CPC',
'display_network_max_cpc'],
'displayURL': ['Display URL', 'display_url'],
'domain': ['Domain', 'domain'],
'durationSeconds': ['Duration (seconds)', 'duration_seconds'],
'dynamicAdTarget': ['Dynamic ad target', 'dynamic_ad_target'],
'dynamicallyGeneratedHeadline': ['Dynamically generated Headline',
'dynamically_generated_headline'],
'endTime': ['End time', 'end_time'],
'enhanced': ['Enhanced', 'enhanced'],
'enhancedCPCEnabled': ['Enhanced CPC enabled', 'enhanced_cpc_enabled'],
'excluded': ['Excluded', 'excluded'],
'exclusion': ['Exclusion', 'exclusion'],
'explicitlyShared': ['Explicitly shared', 'explicitly_shared'],
'feedID': ['Feed ID', 'feed_id'],
'feedItemID': ['Feed item ID', 'feed_item_id'],
'feedItemStatus': ['Feed item status', 'feed_item_status'],
'feedPlaceholderType': ['Feed placeholder type', 'feed_placeholder_type'],
'firstLevelSubCategories': ['First level sub-categories',
'first_level_sub_categories'],
'firstPageCPC': ['First page CPC', 'first_page_cpc'],
'freeClickRate': ['Free click rate', 'free_click_rate'],
'freeClickType': ['Free click type', 'free_click_type'],
'freeClicks': ['Free clicks', 'free_clicks'],
'frequency': ['Frequency', 'frequency'],
'highestPosition': ['Highest position', 'highest_position'],
'hourOfDay': ['Hour of day', 'hour_of_day'],
'imageAdName': ['Image ad name', 'image_ad_name'],
'imageHostingKey': ['Image hosting key', 'image_hosting_key'],
'impressions': ['Impressions', 'impressions'],
'impressionsACEIndicator': ['Impressions ACE indicator',
'impressions_ace_indicator'],
'invalidClickRate': ['Invalid click rate', 'invalid_click_rate'],
'invalidClicks': ['Invalid clicks', 'invalid_clicks'],
'isNegative': ['Is negative', 'is_negative'],
'isTargetable': ['Is Targetable', 'is_targetable'],
'keyword': ['Keyword', 'keyword'],
'keywordID': ['Keyword ID', 'keyword_id'],
'keywordMaxCPC': ['Keyword max CPC', 'keyword_max_cpc'],
'keywordPlacement': ['Keyword / Placement', 'keyword_placement'],
'keywordPlacementDestinationURL': ['Keyword/Placement destination URL',
'keyword_placement_destination_url'],
'keywordPlacementState': ['Keyword/Placement state',
'keyword_placement_state'],
'keywordState': ['Keyword state', 'keyword_state'],
'keywordText': ['Keyword text', 'keyword_text'],
'landingPageTitle': ['Landing Page Title', 'landing_page_title'],
'location': ['Location', 'location'],
'locationExtensionSource': ['Location Extension Source',
'location_extension_source'],
'locationType': ['Location type', 'location_type'],
'loginEmail': ['Login email', 'login_email'],
'lowestPosition': ['Lowest position', 'lowest_position'],
'matchType': ['Match type', 'match_type'],
'maxCPA': ['Max. CPA%', 'max_cpa'],
'maxCPA1PerClick': ['Max. CPA (1-per-click)', 'max_cpa1_per_click'],
'maxCPC': ['Max. CPC', 'max_cpc'],
'maxCPCSource': ['Max CPC source', 'max_cpc_source'],
'maxCPM': ['Max. CPM', 'max_cpm'],
'maxCPMSource': ['Max CPM Source', 'max_cpm_source'],
'maxCPP': ['Max. CPP', 'max_cpp'],
'memberCount': ['Member Count', 'member_count'],
'metroArea': ['Metro area', 'metro_area'],
'month': ['Month', 'month'],
'monthOfYear': ['Month of Year', 'month_of_year'],
'mostSpecificLocation': ['Most specific location',
'most_specific_location'],
'negativeKeyword': ['Negative keyword', 'negative_keyword'],
'network': ['Network', 'network'],
'networkWithSearchPartners': ['Network (with search partners)',
'network_with_search_partners'],
'page': ['Page', 'page'],
'phoneBidType': ['Phone bid type', 'phone_bid_type'],
'phoneCalls': ['Phone calls', 'phone_calls'],
'phoneCost': ['Phone cost', 'phone_cost'],
'phoneImpressions': ['Phone impressions', 'phone_impressions'],
'placement': ['Placement', 'placement'],
'placementState': ['Placement state', 'placement_state'],
'positionACEIndicator': ['Position ACE indicator',
'position_ace_indicator'],
'ptr': ['PTR', 'ptr'],
'qualityScore': ['Quality score', 'quality_score'],
'quarter': ['Quarter', 'quarter'],
'referenceCount': ['Reference Count', 'reference_count'],
'region': ['Region', 'region'],
'relativeCTR': ['Relative CTR', 'relative_ctr'],
'searchExactMatchIS': ['Search Exact match IS', 'search_exact_match_is'],
'searchImprShare': ['Search Impr. share', 'search_impr_share'],
'searchLostISBudget': ['Search Lost IS (budget)', 'search_lost_is_budget'],
'searchLostISRank': ['Search Lost IS (rank)', 'search_lost_is_rank'],
'searchTerm': ['Search term', 'search_term'],
'secondLevelSubCategories': ['Second level sub-categories',
'second_level_sub_categories'],
'sharedSetID': ['Shared Set ID', 'shared_set_id'],
'sharedSetName': ['Shared Set Name', 'shared_set_name'],
'sharedSetType': ['Shared Set Type', 'shared_set_type'],
'startTime': ['Start time', 'start_time'],
'state': ['State', 'state'],
'status': ['Status', 'status'],
'targetingMode': ['Targeting Mode', 'targeting_mode'],
'thisExtensionVsOther': ['This extension vs. Other',
'this_extension_vs_other'],
'timeZone': ['Time zone', 'time_zone'],
'topLevelCategories': ['Top level categories', 'top_level_categories'],
'topOfPageCPC': ['Top of page CPC', 'top_of_page_cpc'],
'topVsSide': ['Top vs. side', 'top_vs_side'],
'topic': ['Topic', 'topic'],
'topicState': ['Topic state', 'topic_state'],
'totalConvValue': ['Total conv. value', 'total_conv_value'],
'totalCost': ['Total cost', 'total_cost'],
'uniqueUsers': ['Unique Users', 'unique_users'],
'url': ['URL', 'url'],
'valueConv1PerClick': ['Value / conv. (1-per-click)',
'value_conv1_per_click'],
'valueConvManyPerClick': ['Value / conv. (many-per-click)',
'value_conv_many_per_click'],
'viewThroughConv': ['View-through conv.', 'view_through_conv'],
'viewThroughConvACEIndicator': ['View-through conv. ACE indicator',
'view_through_conv_ace_indicator'],
'week': ['Week', 'week'],
'year': ['Year', 'year']}
disp_x_dict = {
'# Campaigns': ['campaigns', 'campaigns'],
'ACE split': ['a_ce_split', 'aCESplit'],
'Account': ['account', 'account'],
'Account ID': ['account_id', 'accountID'],
'Ad': ['ad', 'ad'],
'Ad Approval Status': ['ad_approval_status', 'adApprovalStatus'],
'Ad Extension ID': ['ad_extension_id', 'adExtensionID'],
'Ad Extension Type': ['ad_extension_type', 'adExtensionType'],
'Ad ID': ['ad_id', 'adID'],
'Ad group': ['ad_group', 'adGroup'],
'Ad group ID': ['ad_group_id', 'adGroupID', 'adGroupId'],
'Ad group state': ['ad_group_state', 'adGroupState'],
'Ad state': ['ad_state', 'adState'],
'Ad type': ['ad_type', 'adType'],
'Added': ['added', 'added'],
'Approval Status': ['approval_status', 'approvalStatus'],
'Attribute Values': ['attribute_values', 'attributeValues'],
'Audience': ['audience', 'audience'],
'Audience state': ['audience_state', 'audienceState'],
'Avg. CPC': ['avg_cpc', 'avgCPC'],
'Avg. CPM': ['avg_cpm', 'avgCPM'],
'Avg. CPP': ['avg_cpp', 'avgCPP'],
'Avg. position': ['avg_position', 'avgPosition'],
'Bidding strategy': ['bidding_strategy', 'biddingStrategy'],
'Budget': ['budget', 'budget'],
'Budget ID': ['budget_id', 'budgetID'],
'Budget Name': ['budget_name', 'budgetName'],
'Budget explicitly shared': ['budget_explicitly_shared',
'budgetExplicitlyShared'],
'Budget period': ['budget_period', 'budgetPeriod'],
'Budget state': ['budget_state', 'budgetState'],
'Budget usage': ['budget_usage', 'budgetUsage'],
'Business phone number': ['business_phone_number', 'businessPhoneNumber'],
'CPC ACE indicator': ['cpc_ace_indicator', 'cPCACEIndicator'],
'CPM ACE indicator': ['cpm_ace_indicator', 'cPMACEIndicator'],
'CTR': ['ctr', 'ctr'],
'CTR ACE indicator': ['ctr_ace_indicator', 'cTRACEIndicator'],
'Call fee': ['call_fee', 'callFee'],
'Caller area code': ['caller_area_code', 'callerAreaCode'],
'Caller country code': ['caller_country_code', 'callerCountryCode'],
'Campaign': ['campaign', 'campaign'],
'Campaign ID': ['campaign_id', 'campaignID', 'campaignId'],
'Campaign Name': ['campaign_name', 'campaignName'],
'Campaign state': ['campaign_state', 'campaignState'],
'Categories': ['categories', 'categories'],
'City': ['city', 'city'],
'Click Id': ['click_id', 'clickId'],
'Click type': ['click_type', 'clickType'],
'Clicks': ['clicks', 'clicks'],
'Clicks ACE indicator': ['clicks_ace_indicator', 'clicksACEIndicator'],
'Client name': ['client_name', 'clientName'],
'Company name': ['company_name', 'companyName'],
'Content Impr. share': ['content_impr_share', 'contentImprShare'],
'Content Lost IS (budget)': ['content_lost_is_budget',
'contentLostISBudget'],
'Content Lost IS (rank)': ['content_lost_is_rank', 'contentLostISRank'],
'Conv.': ['conv', 'conv'],
'Conv. (1-per-click)': ['conv1_per_click', 'conv1PerClick'],
'Conv. (1-per-click) ACE indicator': ['conv1_per_click_ace_indicator',
'conv1PerClickACEIndicator'],
'Conv. (many-per-click)': ['conv_many_per_click', 'convManyPerClick'],
'Conv. (many-per-click) ACE indicator': ['conv_many_per_click_ace_indicator',
'convManyPerClickACEIndicator'],
'Conv. rate': ['conv_rate', 'convRate'],
'Conv. rate (1-per-click)': ['conv_rate1_per_click', 'convRate1PerClick'],
'Conv. rate (1-per-click) ACE indicator': ['conv_rate1_per_click_ace_indicator',
'convRate1PerClickACEIndicator'],
'Conv. rate (many-per-click)': ['conv_rate_many_per_click',
'convRateManyPerClick'],
'Conv. rate (many-per-click) ACE indicator': ['conv_rate_many_per_click_ace_indicator',
'convRateManyPerClickACEIndicator'],
'Conversion Tracker Id': ['conversion_tracker_id', 'conversionTrackerId'],
'Conversion action name': ['conversion_action_name',
'conversionActionName'],
'Conversion optimizer bid type': ['conversion_optimizer_bid_type',
'conversionOptimizerBidType'],
'Conversion tracking purpose': ['conversion_tracking_purpose',
'conversionTrackingPurpose'],
'Cost': ['cost', 'cost'],
'Cost / conv. (1-per-click)': ['cost_conv1_per_click',
'costConv1PerClick'],
'Cost / conv. (many-per-click)': ['cost_conv_many_per_click',
'costConvManyPerClick'],
'Cost ACE indicator': ['cost_ace_indicator', 'costACEIndicator'],
'Cost/conv. (1-per-click) ACE indicator': ['cost_conv1_per_click_ace_indicator',
'costConv1PerClickACEIndicator'],
'Cost/conv. (many-per-click) ACE indicator': ['cost_conv_many_per_click_ace_indicator',
'costConvManyPerClickACEIndicator'],
'Country/Territory': ['country_territory', 'countryTerritory'],
'Criteria Display Name': ['criteria_display_name', 'criteriaDisplayName'],
'Criteria Type': ['criteria_type', 'criteriaType'],
'Criterion ID': ['criterion_id', 'criterionID', 'Criterion Id'],
'Currency': ['currency', 'currency'],
'Customer ID': ['customer_id', 'customerID'],
'Day': ['day', 'day'],
'Day of week': ['day_of_week', 'dayOfWeek'],
'Default max. CPC': ['default_max_cpc', 'defaultMaxCPC'],
'Delivery method': ['delivery_method', 'deliveryMethod'],
'Description line 1': ['description_line1', 'descriptionLine1'],
'Description line 2': ['description_line2', 'descriptionLine2'],
'Destination URL': ['destination_url', 'destinationURL'],
'Device': ['device', 'device'],
'Device preference': ['device_preference', 'devicePreference'],
'Display Network max. CPC': ['display_network_max_cpc',
'displayNetworkMaxCPC'],
'Display URL': ['display_url', 'displayURL'],
'Domain': ['domain', 'domain'],
'Duration (seconds)': ['duration_seconds', 'durationSeconds'],
'Dynamic ad target': ['dynamic_ad_target', 'dynamicAdTarget'],
'Dynamically generated Headline': ['dynamically_generated_headline',
'dynamicallyGeneratedHeadline'],
'End time': ['end_time', 'endTime'],
'Enhanced': ['enhanced', 'enhanced'],
'Enhanced CPC enabled': ['enhanced_cpc_enabled', 'enhancedCPCEnabled'],
'Excluded': ['excluded', 'excluded'],
'Exclusion': ['exclusion', 'exclusion'],
'Explicitly shared': ['explicitly_shared', 'explicitlyShared'],
'Feed ID': ['feed_id', 'feedID'],
'Feed item ID': ['feed_item_id', 'feedItemID'],
'Feed item status': ['feed_item_status', 'feedItemStatus'],
'Feed placeholder type': ['feed_placeholder_type', 'feedPlaceholderType'],
'First level sub-categories': ['first_level_sub_categories',
'firstLevelSubCategories'],
'First page CPC': ['first_page_cpc', 'firstPageCPC'],
'Free click rate': ['free_click_rate', 'freeClickRate'],
'Free click type': ['free_click_type', 'freeClickType'],
'Free clicks': ['free_clicks', 'freeClicks'],
'Frequency': ['frequency', 'frequency'],
'Highest position': ['highest_position', 'highestPosition'],
'Hour of day': ['hour_of_day', 'hourOfDay'],
'Image ad name': ['image_ad_name', 'imageAdName'],
'Image hosting key': ['image_hosting_key', 'imageHostingKey'],
'Impressions': ['impressions', 'impressions'],
'Impressions ACE indicator': ['impressions_ace_indicator',
'impressionsACEIndicator'],
'Invalid click rate': ['invalid_click_rate', 'invalidClickRate'],
'Invalid clicks': ['invalid_clicks', 'invalidClicks'],
'Is Targetable': ['is_targetable', 'isTargetable'],
'Is negative': ['is_negative', 'isNegative'],
'Keyword': ['keyword', 'keyword'],
'Keyword / Placement': ['keyword_placement', 'keywordPlacement'],
'Keyword ID': ['keyword_id', 'keywordID'],
'Keyword max CPC': ['keyword_max_cpc', 'keywordMaxCPC'],
'Keyword state': ['keyword_state', 'keywordState'],
'Keyword text': ['keyword_text', 'keywordText'],
'Keyword/Placement destination URL': ['keyword_placement_destination_url',
'keywordPlacementDestinationURL'],
'Keyword/Placement state': ['keyword_placement_state',
'keywordPlacementState'],
'Landing Page Title': ['landing_page_title', 'landingPageTitle'],
'Location': ['location', 'location'],
'Location Extension Source': ['location_extension_source',
'locationExtensionSource'],
'Location type': ['location_type', 'locationType'],
'Login email': ['login_email', 'loginEmail'],
'Lowest position': ['lowest_position', 'lowestPosition'],
'Match type': ['match_type', 'matchType'],
'Max CPC source': ['max_cpc_source', 'maxCPCSource'],
'Max CPM Source': ['max_cpm_source', 'maxCPMSource'],
'Max. CPA (1-per-click)': ['max_cpa1_per_click', 'maxCPA1PerClick'],
'Max. CPA%': ['max_cpa', 'maxCPA'],
'Max. CPC': ['max_cpc', 'maxCPC'],
'Max. CPM': ['max_cpm', 'maxCPM'],
'Max. CPP': ['max_cpp', 'maxCPP'],
'Member Count': ['member_count', 'memberCount'],
'Metro area': ['metro_area', 'metroArea'],
'Month': ['month', 'month'],
'Month of Year': ['month_of_year', 'monthOfYear'],
'Most specific location': ['most_specific_location',
'mostSpecificLocation'],
'Negative keyword': ['negative_keyword', 'negativeKeyword'],
'Network': ['network', 'network'],
'Network (with search partners)': ['network_with_search_partners',
'networkWithSearchPartners'],
'PTR': ['ptr', 'ptr'],
'Page': ['page', 'page'],
'Phone bid type': ['phone_bid_type', 'phoneBidType'],
'Phone calls': ['phone_calls', 'phoneCalls'],
'Phone cost': ['phone_cost', 'phoneCost'],
'Phone impressions': ['phone_impressions', 'phoneImpressions'],
'Placement': ['placement', 'placement'],
'Placement state': ['placement_state', 'placementState'],
'Position ACE indicator': ['position_ace_indicator',
'positionACEIndicator'],
'Quality score': ['quality_score', 'qualityScore'],
'Quarter': ['quarter', 'quarter'],
'Reference Count': ['reference_count', 'referenceCount'],
'Region': ['region', 'region'],
'Relative CTR': ['relative_ctr', 'relativeCTR'],
'Search Exact match IS': ['search_exact_match_is', 'searchExactMatchIS'],
'Search Impr. share': ['search_impr_share', 'searchImprShare'],
'Search Lost IS (budget)': ['search_lost_is_budget', 'searchLostISBudget'],
'Search Lost IS (rank)': ['search_lost_is_rank', 'searchLostISRank'],
'Search term': ['search_term', 'searchTerm'],
'Second level sub-categories': ['second_level_sub_categories',
'secondLevelSubCategories'],
'Shared Set ID': ['shared_set_id', 'sharedSetID'],
'Shared Set Name': ['shared_set_name', 'sharedSetName'],
'Shared Set Type': ['shared_set_type', 'sharedSetType'],
'Start time': ['start_time', 'startTime'],
'State': ['state', 'state'],
'Status': ['status', 'status'],
'Targeting Mode': ['targeting_mode', 'targetingMode'],
'This extension vs. Other': ['this_extension_vs_other',
'thisExtensionVsOther'],
'Time zone': ['time_zone', 'timeZone'],
'Top level categories': ['top_level_categories', 'topLevelCategories'],
'Top of page CPC': ['top_of_page_cpc', 'topOfPageCPC'],
'Top vs. side': ['top_vs_side', 'topVsSide'],
'Topic': ['topic', 'topic'],
'Topic state': ['topic_state', 'topicState'],
'Total conv. value': ['total_conv_value', 'totalConvValue'],
'Total cost': ['total_cost', 'totalCost'],
'URL': ['url', 'url'],
'Unique Users': ['unique_users', 'uniqueUsers'],
'Value / conv. (1-per-click)': ['value_conv1_per_click',
'valueConv1PerClick'],
'Value / conv. (many-per-click)': ['value_conv_many_per_click',
'valueConvManyPerClick'],
'View-through conv.': ['view_through_conv', 'viewThroughConv'],
'View-through conv. ACE indicator': ['view_through_conv_ace_indicator',
'viewThroughConvACEIndicator'],
'Week': ['week', 'week'],
'Year': ['year', 'year']}
########################################################################################################################
# print if ran
#print "you just ran pak/aw/reporting.py"
########################################################################################################################
# testing
# print mk_report_query_str(varList='q_km_picc',start_date=21)
# get_account_id('dict')
# {'AU 01': 3851930085,
# 'DE 01': 1194790660,
# 'DE 01-ALL': 6985472731,
# 'DE 01-INDIE': 5556267758,
# 'DE 01-ROW': 2444288938,
# 'DE 02': 2556715694,
# 'DE 04': 8972217420,
# 'DK 01': 7505070892,
# 'DK 01-INDIE': 4054652176,
# 'DK 02-INDIE': 1846160625,
# 'DK 03-INDIE': 9520958074,
# 'EN 01-ROW': 9930643268,
# 'EN 01-ROW-INDIE': 4232517800,
# 'EN 02-ROW': 1522899549,
# 'EN 10-ROW': 5584281294,
# 'EN 11-ROW': 7057635394,
# 'ES 01': 9908456190,
# 'ES 01-ALL': 8994980430,
# 'ES 01-CONT': 7874475692,
# 'ES 01-INDIE': 7180305048,
# 'ES 01-ROW': 8198397935,
# 'ES 01-ROW-INDIE': 6340692275,
# 'ES 02': 6005714737,
# 'ES 03': 7197651089,
# 'FI 01': 1296579139,
# 'FI 01-INDIE': 5715846021,
# 'FI 02-INDIE': 9571621125,
# 'FI 03-INDIE': 8861581621,
# 'FI 04-INDIE': 2081740278,
# 'FR 01': 5911041630,
# 'FR 01-ALL': 2269774098,
# 'FR 01-INDIE': 5217295598,
# 'FR 02': 4687005389,
# 'FR 02-INDIE': 4925203694,
# 'FR 03': 7466089112,
# 'FR 03-INDIE': 9467453333,
# 'FR 04-INDIE': 5965796572,
# 'GR 01': 7885244288,
# 'IT 01': 2519329330,
# 'IT 01-ALL': 1681185186,
# 'IT 01-CONT': 6177392492,
# 'IT 01-INDIE': 3274557238,
# 'IT 02': 6885141520,
# 'IT 03': 1322961450,
# 'IT 03-INDIE': 8473689494,
# 'IT 04-INDIE': 6181015380,
# 'IT 08': 2054247047,
# 'JP 01': 1672753368,
# 'NL 01': 9274485992,
# 'NL 01-INDIE': 6859081627,
# 'NO 01': 9313644618,
# 'NO 01-INDIE': 5127918221,
# 'NO 02-INDIE': 9376769080,
# 'NO 03-INDIE': 2030487180,
# 'PL 01': 8995156233,
# 'PT 01': 7897882635,
# 'RU 01': 4088933886,
# 'SE 01': 5293372478,
# 'SE 01-INDIE': 6231052325,
# 'SE 02-INDIE': 4074349225,
# 'SE 03-INDIE': 2664341927,
# 'UK 01': 9543142488,
# 'UK 01-INDIE': 8975012908,
# 'UK 03': 5615378768,
# 'US 01-INDIE': 7938359658,
# 'US 03': 7214411738,
# 'US 05': 5158555927,
# 'ZH 01': 5792026168,
# 'ZH 01-INDIE': 5135270078,
# 'test': 7998744469}
if __name__=="__main__":
import os
os.environ['MS_DATA'] = '/D/Dropbox/dev/py/data/'
report_query_str = 'SELECT Query, KeywordId, Impressions, AveragePosition, Clicks FROM SEARCH_QUERY_PERFORMANCE_REPORT DURING 20130607,20130609'
report_downloader = get_report_downloader('AU 01')
df = download_report(report_downloader=report_downloader,report_query_str=report_query_str,download_format='df')
print(len(df))
| {
"repo_name": "thorwhalen/ut",
"path": "aw/reporting.py",
"copies": "1",
"size": "58017",
"license": "mit",
"hash": -1168586740445189600,
"line_mean": 48.8,
"line_max": 148,
"alpha_frac": 0.6012030956,
"autogenerated": false,
"ratio": 3.5253691438293737,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4626572239429374,
"avg_score": null,
"num_lines": null
} |
from adxl345 import ADXL345
from math import sqrt
import time
import requests
URL = 'http://admin.kaist.ac.kr:3535/get_data?'
ID = '1'
ON_OFF_STANDARD = 0.12
SLEEP_DELAY = 0.1
ACCUMULATED_NUMBER = 10
ACCUMULATED_STANDARD = 10
# Use BLE Beacon
BLE_USED = False
# Fixed hex
BLE_HEX_FIXED_FORWARD = 'sudo hcitool -i hci0 cmd 0x08 0x0008 1E 02 01 1A 1A FF 4C 00 02 15 E2 0A 39 F4 73 F5 4B C4 A1 2F 17 D1 AD 07 A9 61 '
# Major (Machine id)
BLE_HEX_MACHINE_ID = '00 0' + ID + ' '
# Minor (State 0: idle, 1: running)
BLE_HEX_STATE_IDLE = '00 00 '
BLE_HEX_STATE_RUN = '00 01 '
BLE_HEX_FIXED_BACK = 'C8 00 '
if BLE_USED:
from subprocess import call, Popen
def check_onoff(adxl345):
std = 0
x, y, z = 0, 0, 0
for i in range(ACCUMULATED_NUMBER):
axes = adxl345.getAxes(True)
x_before, y_before, z_before = x, y, z
x, y, z = axes['x'], axes['y'], axes['z']
if i != 0:
std += sqrt((x-x_before)**2 + (y-y_before)**2 + (z-z_before)**2)
time.sleep(SLEEP_DELAY)
if std > ON_OFF_STANDARD:
print "- ON " + str(std)
return True
else:
print "- OFF " + str(std)
return False
def send_state(state):
if state:
print "* Send running_state"
r = requests.get(URL+'id='+ID+'&state=run')
else:
print "* Send idle_state"
r = requests.get(URL+'id='+ID+'&state=idle')
def change_beacon_state(state):
if state:
print "* Beacon_state running"
msg = BLE_HEX_FIXED_FORWARD + BLE_HEX_MACHINE_ID + BLE_HEX_STATE_RUN + BLE_HEX_FIXED_BACK
call(msg, shell=True)
else:
print "* Beacon_state idle"
msg = BLE_HEX_FIXED_FORWARD + BLE_HEX_MACHINE_ID + BLE_HEX_STATE_IDLE + BLE_HEX_FIXED_BACK
call(msg, shell=True)
if __name__ == "__main__":
adxl345 = ADXL345()
is_running = False
count = 0
send_state(is_running)
if BLE_USED:
Popen(['./scripts/init.sh'], shell=True)
change_beacon_state(is_running)
while True:
if check_onoff(adxl345):
if count < ACCUMULATED_STANDARD*2:
count += 1
if not is_running and count > ACCUMULATED_STANDARD:
is_running = True
send_state(is_running)
if BLE_USED:
change_beacon_state(is_running)
else:
if count > 0:
count -= 1
if is_running and count < ACCUMULATED_STANDARD+1:
is_running = False
send_state(is_running)
if BLE_USED:
change_beacon_state(is_running)
| {
"repo_name": "ben-jung/iow-rpi",
"path": "sensor.py",
"copies": "1",
"size": "2702",
"license": "bsd-2-clause",
"hash": -1977219367714727700,
"line_mean": 27.4421052632,
"line_max": 141,
"alpha_frac": 0.5662472243,
"autogenerated": false,
"ratio": 3.0257558790593504,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.409200310335935,
"avg_score": null,
"num_lines": null
} |
import smbus
#from time import sleep
bus = smbus.SMBus(1)
# ADXL345 constants
EARTH_GRAVITY_MS2 = 9.80665
SCALE_MULTIPLIER = 0.004
DATA_FORMAT = 0x31
BW_RATE = 0x2C
POWER_CTL = 0x2D
BW_RATE_1600HZ = 0x0F
BW_RATE_800HZ = 0x0E
BW_RATE_400HZ = 0x0D
BW_RATE_200HZ = 0x0C
BW_RATE_100HZ = 0x0B
BW_RATE_50HZ = 0x0A
BW_RATE_25HZ = 0x09
RANGE_2G = 0x00
RANGE_4G = 0x01
RANGE_8G = 0x02
RANGE_16G = 0x03
MEASURE = 0x08
AXES_DATA = 0x32
class ADXL345:
address = None
def __init__(self, address = 0x53):
self.address = address
self.setBandwidthRate(BW_RATE_100HZ)
self.setRange(RANGE_2G)
self.enableMeasurement()
def enableMeasurement(self):
bus.write_byte_data(self.address, POWER_CTL, MEASURE)
def setBandwidthRate(self, rate_flag):
bus.write_byte_data(self.address, BW_RATE, rate_flag)
# set the measurement range for 10-bit readings
def setRange(self, range_flag):
value = bus.read_byte_data(self.address, DATA_FORMAT)
value &= ~0x0F
value |= range_flag
value |= 0x08
bus.write_byte_data(self.address, DATA_FORMAT, value)
# returns the current reading from the sensor for each axis
#
# parameter gforce:
# False (default): result is returned in m/s^2
# True : result is returned in gs
def getAxes(self, gforce=False):
bytes = bus.read_i2c_block_data(self.address, AXES_DATA, 6)
x = bytes[0] | (bytes[1] << 8)
if(x & (1 << 16 - 1)):
x = x - (1 << 16)
y = bytes[2] | (bytes[3] << 8)
if(y & (1 << 16 - 1)):
y = y - (1 << 16)
z = bytes[4] | (bytes[5] << 8)
if(z & (1 << 16 - 1)):
z = z - (1 << 16)
x = x * SCALE_MULTIPLIER
y = y * SCALE_MULTIPLIER
z = z * SCALE_MULTIPLIER
if gforce is False:
x = x * EARTH_GRAVITY_MS2
y = y * EARTH_GRAVITY_MS2
z = z * EARTH_GRAVITY_MS2
x = round(x, 4)
y = round(y, 4)
z = round(z, 4)
return {"x": x, "y": y, "z": z}
if __name__ == "__main__":
# if run directly we'll just create an instance of the class and output
# the current readings
adxl345 = ADXL345()
axes = adxl345.getAxes(True)
print("ADXL345 on address 0x%x:" % (adxl345.address))
print(" x = %.3fG" % (axes['x']))
print(" y = %.3fG" % (axes['y']))
print(" z = %.3fG" % (axes['z']))
| {
"repo_name": "chasecaleb/adxl345-python",
"path": "adxl345.py",
"copies": "1",
"size": "3068",
"license": "bsd-3-clause",
"hash": 1978653184105043500,
"line_mean": 26.3928571429,
"line_max": 76,
"alpha_frac": 0.563559322,
"autogenerated": false,
"ratio": 2.9415148609779482,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.40050741829779485,
"avg_score": null,
"num_lines": null
} |
import smbus
from time import sleep
#bus = smbus.SMBus(0) #GEN1_I2C
bus = smbus.SMBus(1) #GEN2_I2C
#bus = smbus.SMBus(4) #PWR_I2C
# ADXL345 constants
EARTH_GRAVITY_MS2 = 9.80665
SCALE_MULTIPLIER = 0.004
DATA_FORMAT = 0x31
BW_RATE = 0x2C
POWER_CTL = 0x2D
BW_RATE_1600HZ = 0x0F
BW_RATE_800HZ = 0x0E
BW_RATE_400HZ = 0x0D
BW_RATE_200HZ = 0x0C
BW_RATE_100HZ = 0x0B
BW_RATE_50HZ = 0x0A
BW_RATE_25HZ = 0x09
RANGE_2G = 0x00
RANGE_4G = 0x01
RANGE_8G = 0x02
RANGE_16G = 0x03
MEASURE = 0x08
AXES_DATA = 0x32
class ADXL345:
address = None
def __init__(self, address = 0x53):
self.address = address
self.setBandwidthRate(BW_RATE_100HZ)
self.setRange(RANGE_2G)
self.enableMeasurement()
def enableMeasurement(self):
bus.write_byte_data(self.address, POWER_CTL, MEASURE)
def setBandwidthRate(self, rate_flag):
bus.write_byte_data(self.address, BW_RATE, rate_flag)
# set the measurement range for 10-bit readings
def setRange(self, range_flag):
value = bus.read_byte_data(self.address, DATA_FORMAT)
value &= ~0x0F;
value |= range_flag;
value |= 0x08;
bus.write_byte_data(self.address, DATA_FORMAT, value)
# returns the current reading from the sensor for each axis
#
# parameter gforce:
# False (default): result is returned in m/s^2
# True : result is returned in gs
def getAxes(self, gforce = False):
bytes = bus.read_i2c_block_data(self.address, AXES_DATA, 6)
x = bytes[0] | (bytes[1] << 8)
if(x & (1 << 16 - 1)):
x = x - (1<<16)
y = bytes[2] | (bytes[3] << 8)
if(y & (1 << 16 - 1)):
y = y - (1<<16)
z = bytes[4] | (bytes[5] << 8)
if(z & (1 << 16 - 1)):
z = z - (1<<16)
x = x * SCALE_MULTIPLIER
y = y * SCALE_MULTIPLIER
z = z * SCALE_MULTIPLIER
if gforce == False:
x = x * EARTH_GRAVITY_MS2
y = y * EARTH_GRAVITY_MS2
z = z * EARTH_GRAVITY_MS2
x = round(x, 4)
y = round(y, 4)
z = round(z, 4)
return {"x": x, "y": y, "z": z}
if __name__ == "__main__":
# if run directly we'll just create an instance of the class and output
# the current readings
adxl345 = ADXL345()
axes = adxl345.getAxes(True)
print ("ADXL345 on address 0x%x:" % (adxl345.address))
print (" x = %.3fG" % ( axes['x'] ))
print (" y = %.3fG" % ( axes['y'] ))
print (" z = %.3fG" % ( axes['z'] )) | {
"repo_name": "NeuroRoboticTech/Jetduino",
"path": "Software/Python/grove_accelerometer_16g/adxl345.py",
"copies": "1",
"size": "3057",
"license": "mit",
"hash": -3383403207416500000,
"line_mean": 26.5495495495,
"line_max": 76,
"alpha_frac": 0.551193981,
"autogenerated": false,
"ratio": 2.8866855524079322,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.3937879533407932,
"avg_score": null,
"num_lines": null
} |
import smbus
from time import sleep
import RPi.GPIO as GPIO
import time
GPIO.setmode(GPIO.BCM)
GPIO_OUT = [5, 6, 13, 19, 26, 12, 16 ,20]
for i in range(8):
GPIO.setup(GPIO_OUT[i],GPIO.OUT)
# select the correct i2c bus for this revision of Raspberry Pi
revision = ([l[12:-1] for l in open('/proc/cpuinfo','r').readlines() if l[:8]=="Revision"]+['0000'])[0]
bus = smbus.SMBus(1 if int(revision, 16) >= 4 else 0)
# ADXL345 constants
EARTH_GRAVITY_MS2 = 9.80665
SCALE_MULTIPLIER = 0.004
DATA_FORMAT = 0x31
BW_RATE = 0x2C
POWER_CTL = 0x2D
BW_RATE_1600HZ = 0x0F
BW_RATE_800HZ = 0x0E
BW_RATE_400HZ = 0x0D
BW_RATE_200HZ = 0x0C
BW_RATE_100HZ = 0x0B
BW_RATE_50HZ = 0x0A
BW_RATE_25HZ = 0x09
RANGE_2G = 0x00
RANGE_4G = 0x01
RANGE_8G = 0x02
RANGE_16G = 0x03
MEASURE = 0x08
AXES_DATA = 0x32
class ADXL345:
#LSB to MSB
address = None
def __init__(self, address = 0x53):
self.address = address
self.setBandwidthRate(BW_RATE_100HZ)
self.setRange(RANGE_2G)
self.enableMeasurement()
def enableMeasurement(self):
bus.write_byte_data(self.address, POWER_CTL, MEASURE)
def setBandwidthRate(self, rate_flag):
bus.write_byte_data(self.address, BW_RATE, rate_flag)
# set the measurement range for 10-bit readings
def setRange(self, range_flag):
value = bus.read_byte_data(self.address, DATA_FORMAT)
value &= ~0x0F;
value |= range_flag;
value |= 0x08;
bus.write_byte_data(self.address, DATA_FORMAT, value)
# returns the current reading from the sensor for each axis
#
# parameter gforce:
# False (default): result is returned in m/s^2
# True : result is returned in gs
def getAxes(self, gforce = False):
bytes = bus.read_i2c_block_data(self.address, AXES_DATA, 6)
GPIO.setmode(GPIO.BCM)
x = bytes[0] | (bytes[1] << 8)
if(x & (1 << 16 - 1)):
x = x - (1<<16)
y = bytes[2] | (bytes[3] << 8)
if(y & (1 << 16 - 1)):
y = y - (1<<16)
z = bytes[4] | (bytes[5] << 8)
if(z & (1 << 16 - 1)):
z = z - (1<<16)
x1 = x + 512
y1 = y + 512
z1 = z + 512
#TODO: truncate the bits to 8
x1 = x1 >> 2
y1 = y1 >> 2
z1 = z1 >> 2
#bits = []
#suppose we output x axix acceleration via GPIO_OUT0~8
# for i in range(8):
# bits.append((x1 & (0b1 << i))>>i)
# GPIO.output(GPIO_OUT[i], bits[i])
x = x * SCALE_MULTIPLIER
y = y * SCALE_MULTIPLIER
z = z * SCALE_MULTIPLIER
if gforce == False:
x = x * EARTH_GRAVITY_MS2
y = y * EARTH_GRAVITY_MS2
z = z * EARTH_GRAVITY_MS2
x = round(x, 4)
y = round(y, 4)
z = round(z, 4)
GPIO.cleanup()
return {"x": x, "y": y, "z": z}
if __name__ == "__main__":
# if run directly we'll just create an instance of the class and output
# the current readings
adxl345 = ADXL345()
axes = adxl345.getAxes(True)
print "ADXL345 on address 0x%x:" % (adxl345.address)
print " x = %.3fG" % ( axes['x'] )
print " y = %.3fG" % ( axes['y'] )
print " z = %.3fG" % ( axes['z'] )
| {
"repo_name": "Kaiyan2015/MEng",
"path": "adxl345.py",
"copies": "1",
"size": "3472",
"license": "bsd-3-clause",
"hash": 3849775709629254700,
"line_mean": 24.1594202899,
"line_max": 103,
"alpha_frac": 0.5944700461,
"autogenerated": false,
"ratio": 2.556701030927835,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.8038280694220958,
"avg_score": 0.12257807656137532,
"num_lines": 138
} |
from machine import I2C, Pin
from time import sleep
# select the correct i2c bus for this revision of Raspberry Pi
#revision = ([l[12:-1] for l in open('/proc/cpuinfo','r').readlines() if l[:8]=="Revision"]+['0000'])[0]
#bus = smbus.SMBus(1 if int(revision, 16) >= 4 else 0)
bus = I2C(scl = Pin(5), sda = Pin(4), freq = 100000)
# ADXL345 constants
EARTH_GRAVITY_MS2 = 9.80665
SCALE_MULTIPLIER = 0.004
DATA_FORMAT = 0x31
BW_RATE = 0x2C
POWER_CTL = 0x2D
BW_RATE_1600HZ = [0x0F]
BW_RATE_800HZ = [0x0E]
BW_RATE_400HZ = [0x0D]
BW_RATE_200HZ = [0x0C]
BW_RATE_100HZ = [0x0B]
BW_RATE_50HZ = [0x0A]
BW_RATE_25HZ = [0x09]
RANGE_2G = 0x00
RANGE_4G = 0x01
RANGE_8G = 0x02
RANGE_16G = 0x03
MEASURE = [0x08]
AXES_DATA = 0x32
class ADXL345_upy:
address = None
def __init__(self, sensor_id, address = 0x53):
self.sensor_id = sensor_id
self.address = address
self.setBandwidthRate(BW_RATE_100HZ)
self.setRange(RANGE_2G)
self.enableMeasurement()
def enableMeasurement(self):
bus.writeto_mem(self.address, POWER_CTL, bytearray(MEASURE))
def setBandwidthRate(self, rate_flag):
bus.writeto_mem(self.address, BW_RATE, bytearray(rate_flag))
# set the measurement range for 10-bit readings
def setRange(self, range_flag):
value = bus.readfrom_mem(self.address, DATA_FORMAT,1)
val2 = value[0]
val2 &= ~0x0F;
val2 |= range_flag;
val2 |= 0x08;
buf = [val2]
bus.writeto_mem(self.address, DATA_FORMAT, bytearray(buf))
# returns the current reading from the sensor for each axis
#
# parameter gforce:
# False (default): result is returned in m/s^2
# True : result is returned in gs
def sample(self, gforce = False):
#bytes = bus.read_i2c_block_data(self.address, AXES_DATA, 6)
bytes = bus.readfrom_mem(self.address, AXES_DATA, 6)
x = bytes[0] | (bytes[1] << 8)
if(x & (1 << 16 - 1)):
x = x - (1<<16)
y = bytes[2] | (bytes[3] << 8)
if(y & (1 << 16 - 1)):
y = y - (1<<16)
z = bytes[4] | (bytes[5] << 8)
if(z & (1 << 16 - 1)):
z = z - (1<<16)
x = x * SCALE_MULTIPLIER
y = y * SCALE_MULTIPLIER
z = z * SCALE_MULTIPLIER
if gforce == False:
x = x * EARTH_GRAVITY_MS2
y = y * EARTH_GRAVITY_MS2
z = z * EARTH_GRAVITY_MS2
x = round(x, 4)
y = round(y, 4)
z = round(z, 4)
return {"x": x, "y": y, "z": z}
if __name__ == "__main__":
# if run directly we'll just create an instance of the class and output
# the current readings
adxl345 = ADXL345()
axes = adxl345.sample(True)
print("ADXL345 on address 0x%x:" % (adxl345.address))
print(" x = %.3fG" % ( axes['x'] ))
print(" y = %.3fG" % ( axes['y'] ))
print(" z = %.3fG" % ( axes['z'] ))
| {
"repo_name": "mpi-sws-rse/antevents-python",
"path": "micropython/sensors/adxl345_upy.py",
"copies": "2",
"size": "3579",
"license": "apache-2.0",
"hash": 2519189862114084000,
"line_mean": 29.0756302521,
"line_max": 104,
"alpha_frac": 0.5613299804,
"autogenerated": false,
"ratio": 2.9168704156479217,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9372387803197608,
"avg_score": 0.02116251857006268,
"num_lines": 119
} |
import smbus
from time import sleep
# select the correct i2c bus for this revision of Raspberry Pi
revision = ([l[12:-1] for l in open('/proc/cpuinfo','r').readlines() if l[:8]=="Revision"]+['0000'])[0]
bus = smbus.SMBus(1 if int(revision, 16) >= 4 else 0)
# ADXL345 constants
EARTH_GRAVITY_MS2 = 9.80665
SCALE_MULTIPLIER = 0.004
DATA_FORMAT = 0x31
BW_RATE = 0x2C
POWER_CTL = 0x2D
BW_RATE_1600HZ = 0x0F
BW_RATE_800HZ = 0x0E
BW_RATE_400HZ = 0x0D
BW_RATE_200HZ = 0x0C
BW_RATE_100HZ = 0x0B
BW_RATE_50HZ = 0x0A
BW_RATE_25HZ = 0x09
RANGE_2G = 0x00
RANGE_4G = 0x01
RANGE_8G = 0x02
RANGE_16G = 0x03
MEASURE = 0x08
AXES_DATA = 0x32
class ADXL345:
address = None
def __init__(self, address = 0x53):
self.address = address
self.setBandwidthRate(BW_RATE_1600HZ)
self.setRange(RANGE_2G)
self.enableMeasurement()
def enableMeasurement(self):
bus.write_byte_data(self.address, POWER_CTL, MEASURE)
def setBandwidthRate(self, rate_flag):
bus.write_byte_data(self.address, BW_RATE, rate_flag)
# set the measurement range for 10-bit readings
def setRange(self, range_flag):
value = bus.read_byte_data(self.address, DATA_FORMAT)
value &= ~0x0F;
value |= range_flag;
value |= 0x08;
bus.write_byte_data(self.address, DATA_FORMAT, value)
# returns the current reading from the sensor for each axis
#
# parameter gforce:
# False (default): result is returned in m/s^2
# True : result is returned in gs
def getAxes(self, gforce = False):
bytes = bus.read_i2c_block_data(self.address, AXES_DATA, 6)
x = bytes[0] | (bytes[1] << 8)
if(x & (1 << 16 - 1)):
x = x - (1<<16)
y = bytes[2] | (bytes[3] << 8)
if(y & (1 << 16 - 1)):
y = y - (1<<16)
z = bytes[4] | (bytes[5] << 8)
if(z & (1 << 16 - 1)):
z = z - (1<<16)
x = x * SCALE_MULTIPLIER
y = y * SCALE_MULTIPLIER
z = z * SCALE_MULTIPLIER
if gforce == False:
x = x * EARTH_GRAVITY_MS2
y = y * EARTH_GRAVITY_MS2
z = z * EARTH_GRAVITY_MS2
x = round(x, 4)
y = round(y, 4)
z = round(z, 4)
return {"x": x, "y": y, "z": z}
if __name__ == "__main__":
# if run directly we'll just create an instance of the class and output
# the current readings
# use class
adxl345 = ADXL345()
while True:
axes = adxl345.getAxes(True)
#print "ADXL345 on address 0x%x:" % (adxl345.address)
print " x= %.3fG\ty = %.3fG\tz = %.3fG" % (axes['x'], axes['y'], axes['z'])
| {
"repo_name": "jeonghoonkang/BerePi",
"path": "apps/accelerometer/adxl345/adxl345.py",
"copies": "1",
"size": "3237",
"license": "bsd-2-clause",
"hash": -23054883697433036,
"line_mean": 26.9051724138,
"line_max": 103,
"alpha_frac": 0.5607043559,
"autogenerated": false,
"ratio": 2.961573650503202,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.8918525331223092,
"avg_score": 0.020750535036022043,
"num_lines": 116
} |
import smbus
from time import sleep
# select the correct i2c bus for this revision of Raspberry Pi
revision = ([l[12:-1] for l in open('/proc/cpuinfo','r').readlines() if l[:8]=="Revision"]+['0000'])[0]
bus = smbus.SMBus(1 if int(revision, 16) >= 4 else 0)
# ADXL345 constants
EARTH_GRAVITY_MS2 = 9.80665
SCALE_MULTIPLIER = 0.004
DATA_FORMAT = 0x31
BW_RATE = 0x2C
POWER_CTL = 0x2D
BW_RATE_1600HZ = 0x0F
BW_RATE_800HZ = 0x0E
BW_RATE_400HZ = 0x0D
BW_RATE_200HZ = 0x0C
BW_RATE_100HZ = 0x0B
BW_RATE_50HZ = 0x0A
BW_RATE_25HZ = 0x09
RANGE_2G = 0x00
RANGE_4G = 0x01
RANGE_8G = 0x02
RANGE_16G = 0x03
MEASURE = 0x08
AXES_DATA = 0x32
class ADXL345:
address = None
def __init__(self, address = 0x53):
self.address = address
ytes = bus.read_i2c_block_data(self.address, 0x00, 8)
print(ytes)
self.setBandwidthRate(BW_RATE_100HZ)
self.setRange(RANGE_2G)
self.enableMeasurement()
def enableMeasurement(self):
bus.write_byte_data(self.address, POWER_CTL, MEASURE)
def setBandwidthRate(self, rate_flag):
bus.write_byte_data(self.address, BW_RATE, rate_flag)
# set the measurement range for 10-bit readings
def setRange(self, range_flag):
value = bus.read_byte_data(self.address, DATA_FORMAT)
value &= ~0x0F;
value |= range_flag;
value |= 0x08;
bus.write_byte_data(self.address, DATA_FORMAT, value)
# returns the current reading from the sensor for each axis
#
# parameter gforce:
# False (default): result is returned in m/s^2
# True : result is returned in gs
def getAxes(self, gforce = False):
bytes = bus.read_i2c_block_data(self.address, AXES_DATA, 6)
x = bytes[0] | (bytes[1] << 8)
if(x & (1 << 16 - 1)):
x = x - (1<<16)
y = bytes[2] | (bytes[3] << 8)
if(y & (1 << 16 - 1)):
y = y - (1<<16)
z = bytes[4] | (bytes[5] << 8)
if(z & (1 << 16 - 1)):
z = z - (1<<16)
x = x * SCALE_MULTIPLIER
y = y * SCALE_MULTIPLIER
z = z * SCALE_MULTIPLIER
if gforce == False:
x = x * EARTH_GRAVITY_MS2
y = y * EARTH_GRAVITY_MS2
z = z * EARTH_GRAVITY_MS2
x = round(x, 4)
y = round(y, 4)
z = round(z, 4)
return {"x": x, "y": y, "z": z}
if __name__ == "__main__":
# if run directly we'll just create an instance of the class and output
# the current readings
adxl345 = ADXL345()
axes = adxl345.getAxes(True)
print "ADXL345 on address 0x%x:" % (adxl345.address)
print " x = %.3fG" % ( axes['x'] )
print " y = %.3fG" % ( axes['y'] )
print " z = %.3fG" % ( axes['z'] )
| {
"repo_name": "mimilei/almd",
"path": "adxl345.py",
"copies": "1",
"size": "3204",
"license": "bsd-3-clause",
"hash": 6480738634941766000,
"line_mean": 27.3539823009,
"line_max": 103,
"alpha_frac": 0.5602372035,
"autogenerated": false,
"ratio": 2.891696750902527,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.39519339544025267,
"avg_score": null,
"num_lines": null
} |
# A Dynamic Programming based Python Program for the Egg Dropping Puzzle
INT_MAX = 32767
# Function to get minimum number of trials needed in worst
# case with n eggs and k floors
def egg_drop(n, k):
# A 2D table where entery eggFloor[i][j] will represent minimum
# number of trials needed for i eggs and j floors.
egg_floor = [[0 for x in range(k+1)] for x in range(n+1)]
# We need one trial for one floor and0 trials for 0 floors
for i in range(1, n+1):
egg_floor[i][1] = 1
egg_floor[i][0] = 0
# We always need j trials for one egg and j floors.
for j in range(1, k+1):
egg_floor[1][j] = j
# Fill rest of the entries in table using optimal substructure
# property
for i in range(2, n+1):
for j in range(2, k+1):
egg_floor[i][j] = INT_MAX
for x in range(1, j+1):
res = 1 + max(egg_floor[i-1][x-1], egg_floor[i][j-x])
if res < egg_floor[i][j]:
egg_floor[i][j] = res
# eggFloor[n][k] holds the result
return egg_floor[n][k]
| {
"repo_name": "amaozhao/algorithms",
"path": "algorithms/dp/egg_drop.py",
"copies": "1",
"size": "1089",
"license": "mit",
"hash": 2316435244537988600,
"line_mean": 34.1290322581,
"line_max": 72,
"alpha_frac": 0.5821854913,
"autogenerated": false,
"ratio": 3.0762711864406778,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4158456677740678,
"avg_score": null,
"num_lines": null
} |
# AEBN
import re, os, platform, urllib, cgi
PLUGIN_LOG_TITLE='AEBN' # Log Title
VERSION_NO = '2017.07.26.0'
# Delay used when requesting HTML, may be good to have to prevent being
# banned from the site
REQUEST_DELAY = 0
# URLS
BASE_URL='http://gay.theater.aebn.net'
BASE_VIDEO_DETAILS_URL=BASE_URL + '%s'
BASE_SEARCH_URL='http://gay.theater.aebn.net/dispatcher/fts?userQuery=%s&searchType=movie&imageType=Small'
# File names to match for this agent
file_name_pattern = re.compile(Prefs['regex'])
def Start():
HTTP.CacheTime = CACHE_1WEEK
HTTP.Headers['User-agent'] = 'Mozilla/4.0 (compatible; MSIE 8.0; ' \
'Windows NT 6.2; Trident/4.0; SLCC2; .NET CLR 2.0.50727; ' \
'.NET CLR 3.5.30729; .NET CLR 3.0.30729; Media Center PC 6.0)'
class AEBN(Agent.Movies):
name = 'AEBN'
languages = [Locale.Language.NoLanguage, Locale.Language.English]
fallback_agent = False
primary_provider = False
contributes_to = ['com.plexapp.agents.cockporn']
def Log(self, message, *args):
if Prefs['debug']:
Log(PLUGIN_LOG_TITLE + ' - ' + message, *args)
def search(self, results, media, lang, manual):
self.Log('-----------------------------------------------------------------------')
self.Log('SEARCH CALLED v.%s', VERSION_NO)
self.Log('SEARCH - Platform: %s %s', platform.system(), platform.release())
self.Log('SEARCH - media.title - %s', media.title)
self.Log('SEARCH - media.items[0].parts[0].file - %s', media.items[0].parts[0].file)
self.Log('SEARCH - media.primary_metadata.title - %s', media.primary_metadata.title)
self.Log('SEARCH - media.items - %s', media.items)
self.Log('SEARCH - media.filename - %s', media.filename)
self.Log('SEARCH - lang - %s', lang)
self.Log('SEARCH - manual - %s', manual)
self.Log('SEARCH - Prefs->cover - %s', Prefs['cover'])
self.Log('SEARCH - Prefs->folders - %s', Prefs['folders'])
self.Log('SEARCH - Prefs->regex - %s', Prefs['regex'])
if not media.items[0].parts[0].file:
return
path_and_file = media.items[0].parts[0].file.lower()
self.Log('SEARCH - File Path: %s', path_and_file)
path_and_file = os.path.splitext(path_and_file)[0]
(file_dir, basename) = os.path.split(os.path.splitext(path_and_file)[0])
final_dir = os.path.split(file_dir)[1]
file_name = basename.lower() #Sets string to lower.
self.Log('SEARCH - File Name: %s', basename)
self.Log('SEARCH - Enclosing Folder: %s', final_dir)
if Prefs['folders'] != "*":
folder_list = re.split(',\s*', Prefs['folders'].lower())
if final_dir not in folder_list:
self.Log('SEARCH - Skipping %s because the folder %s is not in the acceptable folders list: %s', file_name, final_dir, ','.join(folder_list))
return
m = file_name_pattern.search(file_name)
if not m:
self.Log('SEARCH - Skipping %s because the file name is not in the expected format.', file_name)
return
groups = m.groupdict()
search_query_raw = list()
file_studio = groups['studio']
self.Log('SEARCH - Studio: %s', file_studio)
if groups['clip_name'].find("scene") > 0:
self.Log('SEARCH - This is a scene: True')
scene = groups['clip_name'].split("scene",1)[1].lstrip(' ')
file_name = file_name.split("scene",1)[0].rstrip(' ')
self.Log('SEARCH - Movie: %s', file_name)
self.Log('SEARCH - Scene: %s', scene)
for piece in file_name.split(' '):
search_query_raw.append(cgi.escape(piece))
else:
self.Log('SEARCH - This is a scene: False')
file_name = groups['clip_name']
file_name = file_name.lstrip(' ') #Removes white spaces on the left end.
file_name = file_name.lstrip('- ') #Removes white spaces on the left end.
file_name = file_name.rstrip(' ') #Removes white spaces on the right end.
self.Log('SEARCH - Split File Name: %s', file_name.split(' '))
for piece in file_name.split(' '):
search_query_raw.append(cgi.escape(piece))
search_query="+".join(search_query_raw)
self.Log('SEARCH - Search Query: %s', search_query)
html=HTML.ElementFromURL(BASE_SEARCH_URL % search_query, sleep=REQUEST_DELAY)
score=10
search_results=html.xpath('//div[@class="component main100 exactMatch"]/div[2]/div/div/div[2]')
# Enumerate the search results looking for an exact match. The hope is that by eliminating special character words from the title and searching the remainder that we will get the expected video in the results.
if len(search_results) > 0:
self.Log('SEARCH - results size exact match: %s', len(search_results))
for result in search_results:
if len(file_studio) > 0:
try:
if len(result.findall('div[@class="movieDetails"]/div')) == 4:
studios = result.findall('div[@class="movieDetails"]/div[3]/div[2]/a')
self.Log('SEARCH - studios: %s', len(studios))
elif len(result.findall('div[@class="movieDetails"]/div')) == 3:
studios = result.findall('div[@class="movieDetails"]/div[2]/div[2]/a')
self.Log('SEARCH - studios: %s', len(studios))
except:
studios = 'empty'
self.Log('SEARCH - studios: Empty')
pass
for studio in studios:
video_title = result.findall('div[@class="movie"]/div/a')[0].get("title")
video_title = video_title.lstrip(' ') #Removes white spaces on the left end.
video_title = video_title.rstrip(' ') #Removes white spaces on the right end.
video_title = video_title.replace(':', '')
if studio.text.lower() == file_studio.lower() and video_title.lower() == file_name.lower():
self.Log('SEARCH - video title: %s', video_title)
video_url = result.findall('div[@class="movie"]/div/a')[0].get('href')
if BASE_URL not in video_url:
video_url = BASE_URL + video_url
self.Log('SEARCH - video url: %s', video_url)
image_url = result.findall('div[@class="movie"]/div/a/img')[0].get("src")
if image_url[:2] == "//":
image_url = 'http:' + image_url
self.Log('SEARCH - image url: %s', image_url)
self.Log('SEARCH - Exact Match "' + file_name.lower() + '" == "%s"' % video_title.lower())
self.Log('SEARCH - Studio Match "' + studio.text.lower() + '" == "%s"' % file_studio.lower())
results.Append(MetadataSearchResult(id = video_url, name = video_title, score = 100, lang = lang))
return
else:
video_title = result.findall('div[@class="movie"]/div/a')[0].get("title")
video_title = video_title.lstrip(' ') #Removes white spaces on the left end.
video_title = video_title.rstrip(' ') #Removes white spaces on the right end.
video_title = video_title.replace(':', '')
if video_title.lower() == file_name.lower():
self.Log('SEARCH - video title: %s', video_title)
video_url = result.findall('div[@class="movie"]/div/a')[0].get('href')
if BASE_URL not in video_url:
video_url = BASE_URL + video_url
self.Log('SEARCH - video url: %s', video_url)
image_url = result.findall('div[@class="movie"]/div/a/img')[0].get("src")
if image_url[:2] == "//":
image_url = 'http:' + image_url
self.Log('SEARCH - image url: %s', image_url)
self.Log('SEARCH - Exact Match "' + file_name.lower() + '" == "%s"' % video_title.lower())
results.Append(MetadataSearchResult(id = video_url, name = video_title, score = 100, lang = lang))
return
else:
search_results=html.xpath('//*[@class="movie"]')
self.Log('SEARCH - results size: %s', len(search_results))
for result in search_results:
#result=result.find('')
video_title=result.findall("div/a")[0].get("title")
video_title = video_title.lstrip(' ') #Removes white spaces on the left end.
video_title = video_title.rstrip(' ') #Removes white spaces on the right end.
video_title = video_title.replace(':', '')
self.Log('SEARCH - video title: %s', video_title)
# Check the alt tag which includes the full title with special characters against the video title. If we match we nominate the result as the proper metadata. If we don't match we reply with a low score.
if video_title.lower() == file_name.lower():
video_url = result.findall("div/a")[0].get('href')
if BASE_URL not in video_url:
video_url = BASE_URL + video_url
self.Log('SEARCH - video url: %s', video_url)
image_url = result.findall("div/a/img")[0].get("src")
if image_url[:2] == "//":
image_url = 'http:' + image_url
self.Log('SEARCH - image url: %s', image_url)
self.Log('SEARCH - Exact Match "' + file_name.lower() + '" == "%s"' % video_title.lower())
results.Append(MetadataSearchResult(id = video_url, name = video_title, score = 98, lang = lang))
return
else:
self.Log('SEARCH - Title not found "' + file_name.lower() + '" != "%s"' % video_title.lower())
score=score-1
results.Append(MetadataSearchResult(id = '', name = media.filename, score = score, lang = lang))
def update(self, metadata, media, lang, force=False):
self.Log('UPDATE CALLED')
enclosing_directory, file_name = os.path.split(os.path.splitext(media.items[0].parts[0].file)[0])
file_name = file_name.lower()
if not media.items[0].parts[0].file:
return
file_path = media.items[0].parts[0].file
self.Log('UPDATE - File Path: %s', file_path)
self.Log('UPDATE - Video URL: %s', metadata.id)
url = metadata.id
# Fetch HTML.
html = HTML.ElementFromURL(url, sleep=REQUEST_DELAY)
# Set tagline to URL.
metadata.tagline = url
# Set video title.
def title(self, html, file_name):
video_title = [0, 1]
if file_name.find("scene") > 0:
video_titles = html.xpath('//div[@class="movieDetailsSceneResults"]/div/div[1]/div[@class="title"]/text()')
if video_titles > 0:
i = 0
for temp in video_titles:
video_titles[i] = temp.rstrip().replace(":","")
i += 1
video_titles = filter(None, video_titles)
self.Log('UPDATE - Number of Scenes: "%s"' % len(video_titles))
i = 0
for temp in video_titles:
i += 1
if temp.lower() == file_name.lower():
video_title[0] = temp
video_title[1] = i
self.Log('UPDATE - Scene found in list: "%s"' %temp.lower() + ' == "%s"' %file_name.lower())
return video_title;
else:
video_title[0] = html.xpath('//div[@class="componentHeader"]/h1/text()')[0]
self.Log('UPDATE - Scene not found in list: "%s"' %temp.lower() + ' != "%s"' %file_name.lower())
else:
video_title[0] = html.xpath('//div[@class="componentHeader"]/h1/text()')[0]
self.Log('UPDATE - Scene not found in list')
else:
video_title[0] = html.xpath('//div[@class="componentHeader"]/h1/text()')[0]
return video_title;
video_title = title(self, html, file_name)
self.Log('UPDATE - video_title: "%s"' % video_title[0])
# Try to get and process the director posters.
valid_image_names = list()
i = 0
image = html.xpath('//div[@id="md-boxCover"]/a/img')[0]
try:
thumb_url = image.get('src')
if thumb_url[:2] == "//":
thumb_url = 'http:' + thumb_url
self.Log('UPDATE - thumb_url: "%s"' % thumb_url)
poster_url = thumb_url.replace('160w', 'xlf')
self.Log('UPDATE - poster_url: "%s"' % poster_url)
valid_image_names.append(poster_url)
if poster_url not in metadata.posters:
try:
i += 1
metadata.posters[poster_url]=Proxy.Preview(HTTP.Request(thumb_url), sort_order = i)
except: pass
except: pass
# Try to get description text.
try:
raw_about_text=html.xpath('//span[@itemprop="about"]')
self.Log('UPDATE - About Text - RAW %s', raw_about_text)
about_text=' '.join(str(x.text_content().strip()) for x in raw_about_text)
metadata.summary=about_text
except Exception as e:
self.Log('UPDATE - Error getting description text: %s', e)
pass
# Try to get and process the release date.
try:
rd=html.xpath('//span[@itemprop="datePublished"]/text()')[0]
self.Log('UPDATE - Release Date: %s', rd)
metadata.originally_available_at = Datetime.ParseDate(rd).date()
metadata.year = metadata.originally_available_at.year
except Exception as e:
self.Log('UPDATE - Error getting release date: %s', e)
pass
# Try to get and process the video genres.
try:
metadata.genres.clear()
if file_name.find("scene") > 0 and file_name.lower() == video_title[0].lower():
path = '//div[@class="movieDetailsSceneResults"]/div['+str(video_title[1])+']/div[2]/div[5]/div/div/div[2]/span[2]/a/text()'
genres = html.xpath(path)
self.Log('UPDATE - video_genres count from scene: "%s"' % len(genres))
self.Log('UPDATE - video_genres from scene: "%s"' % genres)
for genre in genres:
genre = genre.strip()
if (len(genre) > 0):
metadata.genres.add(genre)
else:
genres = html.xpath('//div[@class="md-detailsCategories"]/span[2]/a/text()')
self.Log('UPDATE - video_genres count from movie: "%s"' % len(genres))
self.Log('UPDATE - video_genres from movie: "%s"' % genres)
for genre in genres:
genre = genre.strip()
if (len(genre) > 0):
metadata.genres.add(genre)
except Exception as e:
self.Log('UPDATE - Error getting video genres: %s', e)
pass
# Crew.
# Try to get and process the director.
try:
metadata.directors.clear()
director = html.xpath('//div[@class="md-detailsDirector"]/span[2]/a/text()')[0]
self.Log('UPDATE - director: "%s"', director)
metadata.directors.add(director)
except Exception as e:
self.Log('UPDATE - Error getting director: %s', e)
pass
# Try to get and process the video cast.
try:
metadata.roles.clear()
if file_name.find("scene") > 0 and file_name.lower() == video_title[0].lower():
path = '//div[@class="movieDetailsSceneResults"]/div['+str(video_title[1])+']/div[2]/div[5]/div/div/div[1]/span[2]/a/span/text()'
htmlcast = html.xpath(path)
self.Log('UPDATE - cast scene count: "%s"' % len(htmlcast))
if len(htmlcast) > 0:
self.Log('UPDATE - cast: "%s"' % htmlcast)
for cast in htmlcast:
cname = cast.strip()
if (len(cname) > 0):
role = metadata.roles.new()
role.name = cname
else:
htmlcast = html.xpath('//div[@class="md-detailsStars"]/div/div[1]/a/span/text()')
htmlcast1 = html.xpath('//div[@class="md-detailsStars"]/div/div[2]/a/span/text()')
if len(htmlcast1) > 0:
self.Log('UPDATE - cast: "%s"' % htmlcast1)
for cast in htmlcast1:
cname = cast.strip()
if (len(cname) > 0):
role = metadata.roles.new()
role.name = cname
else:
self.Log('UPDATE - cast: "%s"' % htmlcast)
for cast in htmlcast:
cname = cast.strip()
if (len(cname) > 0):
role = metadata.roles.new()
role.name = cname
else:
htmlcast = html.xpath('//div[@class="md-detailsStars"]/div/div[1]/a/span/text()')
htmlcast1 = html.xpath('//div[@class="md-detailsStars"]/div/div[2]/a/span/text()')
if len(htmlcast1) > 0:
self.Log('UPDATE - cast: "%s"' % htmlcast1)
for cast in htmlcast1:
cname = cast.strip()
if (len(cname) > 0):
role = metadata.roles.new()
role.name = cname
else:
self.Log('UPDATE - cast: "%s"' % htmlcast)
for cast in htmlcast:
cname = cast.strip()
if (len(cname) > 0):
role = metadata.roles.new()
role.name = cname
except Exception as e:
self.Log('UPDATE - Error getting cast: %s', e)
pass
# Try to get and process the studio name.
try:
studio = html.xpath('//div[@class="md-detailsStudio"]/span[2]/a/text()')[0]
self.Log('UPDATE - studio: "%s"', studio)
metadata.studio=studio
except Exception as e:
self.Log('UPDATE - Error getting studio name: %s', e)
pass
metadata.content_rating = 'X'
metadata.posters.validate_keys(valid_image_names)
metadata.title = video_title[0]
| {
"repo_name": "iklier/plex-gay-metadata-agent",
"path": "AEBN.bundle/Contents/Code/__init__.py",
"copies": "2",
"size": "16090",
"license": "mit",
"hash": 578482785861798900,
"line_mean": 40.795212766,
"line_max": 211,
"alpha_frac": 0.6121814792,
"autogenerated": false,
"ratio": 3.044465468306528,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.46566469475065286,
"avg_score": null,
"num_lines": null
} |
"""aebovl -- overlay bracketed exposures to create a composite image
Usage:
aebovl -l<val> [-d<val> -a<val> -m -h] <normal> <under> <over>
Where:
-l <val> light limit (0-255). Pixels lighter than this value are considered
to be too light. Default: 127
-d <val> dark limit (0-255). Pixels darker than this values are considered
to be too dark. Optional. If omitted, the dark limit is set to
255 - light limit.
-a <val> alpha. Adjusts level of blending between images. Optional, range
1 - 255, default is 1. 1 means use all of the light or dark image, 255
means use all of the normal image. Values in between give a blend.
-f <val> apply a filter. Optional. Default: no filter. Filters are:
BLUR, CONTOUR, DETAIL, EDGE_ENHANCE, EDGE_ENHANCE_MORE, EMBOSS,
FIND_EDGES, SMOOTH, SMOOTH_MORE, and SHARPEN.
-b Blur masks. Blur the light and dark masks to soften the overlays and help
smooth transitions.
-m Save mask image. A composite of the masks is saved for reference as
overlay-cm.jpg. Optional. The composite mask shows light areas
as white, normal areas as gray and dark areas as black.
-h Help
<normal> Normally exposed image. Required.
<under> Under-exposed (dark) image. Required.
<over> Over-exposed (light) image. Required.
Output:
Results are saved as overlay*.JPG in the current directory
Notes:
Generally it is easiest to focus on getting the largest uniform section of the
image right (e.g. the sky), then fine tuning from there. Try using -l120 -m
initially, and reduce -l by 10 until the sky is uniformaly darkened. Then set
-d to 255-l, and continue to adjust -l independantly of -d until you are happy
with the rest of the image. Try high and low values of -l. Finally add -b and
see if this improves the effect.
Example work flow:
Start:
aebovl -l120 -m normal.jpg under.jpg over.jpg
Adjust in increments of 10 until sky looks good, perhaps something like:
aebovl -l70 -m normal.jpg under.jpg over.jpg
Fix -d at 255-70:
aebovl -l80 -d185 -m normal.jpg under.jpg over.jpg
Continue to adjust -l until main subject is how you want it:
aebovl -l35 -d185 -m normal.jpg under.jpg over.jpg
Now try -b, and see if it helps:
aebovl -l35 -d185 -b normal.jpg under.jpg over.jpg
"""
import sys
import Image, ImageFilter
import getopt
from getopt import GetoptError
VALID_FILTERS = ["BLUR", "CONTOUR", "DETAIL", "EDGE_ENHANCE",
"EDGE_ENHANCE_MORE", "EMBOSS", "FIND_EDGES", "SMOOTH",
"SMOOTH_MORE", "SHARPEN"]
def abort(code, msg):
sys.stderr.write(msg + '\n')
sys.stderr.write('Run expovl -h for help\n')
sys.exit(code)
def main():
# Get command line parameters
getoptstring = 'bhml:d:a:f:'
try:
opts, args = getopt.getopt(sys.argv[1:], getoptstring)
except GetoptError:
abort(2, "Invalid or missing command line option")
llimit = 127
dlimit = None
alpha = 1
savemasks = False
filter = None
blurmasks = False
help = False
outfilename = "overlay.jpg"
for opt, val in opts:
if opt == "-l": llimit = int(val)
if opt == "-d": dlimit = int(val)
if opt == "-m": savemasks = True
if opt == "-h": help = True
if opt == "-a": alpha = int(val)
if opt == "-b": blurmasks = True
if opt == "-f": filter = val.upper()
if help:
print __doc__
sys.exit(0)
if dlimit is None: dlimit = 255 - llimit
if len(args) != 3:
abort(1, "Three input image files required.")
if filter is not None and filter not in VALID_FILTERS:
abort(3, "Invalid filter specified")
midimgfile, darkimgfile, lightimgfile = args
print "Loading images ..."
midimg = Image.open(midimgfile).convert("RGB")
darkimg = Image.open(darkimgfile).convert("RGB")
lightimg = Image.open(lightimgfile).convert("RGB")
gsimg = midimg.convert("L")
print "Processing images ..."
lightmask = gsimg.point(lambda i: ((i < llimit) * alpha) or 255)
darkmask = gsimg.point(lambda i: ((i > dlimit) * alpha) or 255)
if blurmasks:
lightmask = lightmask.filter(ImageFilter.BLUR)
darkmask = darkmask.filter(ImageFilter.BLUR)
resimg = Image.composite(midimg, lightimg, lightmask)
resimg = Image.composite(resimg, darkimg, darkmask)
if filter is not None:
resimg = resimg.filter(getattr(ImageFilter, filter))
print "Saving image file(s) ..."
resimg.save("overlay.JPG")
if savemasks:
# Recreate the masks without using the alpha factor, so the composite
# mask is composed of white, mid-gray and black, not shades in between
lightmask = gsimg.point(lambda i: (i < llimit) or 255)
darkmask = gsimg.point(lambda i: (i > dlimit) or 255)
if blurmasks:
lightmask = lightmask.filter(ImageFilter.BLUR)
darkmask = darkmask.filter(ImageFilter.BLUR)
gimg = Image.new("L", midimg.size, 128)
wimg = Image.new("L", midimg.size, 0)
bimg = Image.new("L", midimg.size, 255)
compmask = Image.composite(gimg, wimg, lightmask)
compmask = Image.composite(compmask, bimg, darkmask)
compmask.save("overlay-cm.JPG")
print "Results saved as overlay*.JPG"
main()
| {
"repo_name": "ActiveState/code",
"path": "recipes/Python/519636_aebovl__Automatic_Exposure_Bracket_Image/recipe-519636.py",
"copies": "1",
"size": "5329",
"license": "mit",
"hash": -6093576822878314000,
"line_mean": 39.679389313,
"line_max": 80,
"alpha_frac": 0.6543441546,
"autogenerated": false,
"ratio": 3.269325153374233,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9337342388834354,
"avg_score": 0.01726538382797576,
"num_lines": 131
} |
""" aecodecs -- Convert from common Python types to Apple Event Manager types and vice-versa. """
import datetime, struct, sys
from Foundation import NSAppleEventDescriptor, NSURL
from . import kae
__all__ = ['Codecs', 'AEType', 'AEEnum']
######################################################################
def fourcharcode(code):
""" Convert four-char code for use in NSAppleEventDescriptor methods.
code : bytes -- four-char code, e.g. b'utxt'
Result : int -- OSType, e.g. 1970567284
"""
return struct.unpack('>I', code)[0]
#######
class Codecs:
""" Implements mappings for common Python types with direct AppleScript equivalents. Used by AppleScript class. """
kMacEpoch = datetime.datetime(1904, 1, 1)
kUSRF = fourcharcode(kae.keyASUserRecordFields)
def __init__(self):
# Clients may add/remove/replace encoder and decoder items:
self.encoders = {
NSAppleEventDescriptor.class__(): self.packdesc,
type(None): self.packnone,
bool: self.packbool,
int: self.packint,
float: self.packfloat,
bytes: self.packbytes,
str: self.packstr,
list: self.packlist,
tuple: self.packlist,
dict: self.packdict,
datetime.datetime: self.packdatetime,
AEType: self.packtype,
AEEnum: self.packenum,
}
if sys.version_info.major < 3: # 2.7 compatibility
self.encoders[unicode] = self.packstr
self.decoders = {fourcharcode(k): v for k, v in {
kae.typeNull: self.unpacknull,
kae.typeBoolean: self.unpackboolean,
kae.typeFalse: self.unpackboolean,
kae.typeTrue: self.unpackboolean,
kae.typeSInt32: self.unpacksint32,
kae.typeIEEE64BitFloatingPoint: self.unpackfloat64,
kae.typeUTF8Text: self.unpackunicodetext,
kae.typeUTF16ExternalRepresentation: self.unpackunicodetext,
kae.typeUnicodeText: self.unpackunicodetext,
kae.typeLongDateTime: self.unpacklongdatetime,
kae.typeAEList: self.unpackaelist,
kae.typeAERecord: self.unpackaerecord,
kae.typeAlias: self.unpackfile,
kae.typeFSS: self.unpackfile,
kae.typeFSRef: self.unpackfile,
kae.typeFileURL: self.unpackfile,
kae.typeType: self.unpacktype,
kae.typeEnumeration: self.unpackenumeration,
}.items()}
def pack(self, data):
"""Pack Python data.
data : anything -- a Python value
Result : NSAppleEventDescriptor -- an AE descriptor, or error if no encoder exists for this type of data
"""
try:
return self.encoders[data.__class__](data) # quick lookup by type/class
except (KeyError, AttributeError) as e:
for type, encoder in self.encoders.items(): # slower but more thorough lookup that can handle subtypes/subclasses
if isinstance(data, type):
return encoder(data)
raise TypeError("Can't pack data into an AEDesc (unsupported type): {!r}".format(data))
def unpack(self, desc):
"""Unpack an Apple event descriptor.
desc : NSAppleEventDescriptor
Result : anything -- a Python value, or the original NSAppleEventDescriptor if no decoder is found
"""
decoder = self.decoders.get(desc.descriptorType())
# unpack known type
if decoder:
return decoder(desc)
# if it's a record-like desc, unpack as dict with an extra AEType(b'pcls') key containing the desc type
rec = desc.coerceToDescriptorType_(fourcharcode(kae.typeAERecord))
if rec:
rec = self.unpackaerecord(rec)
rec[AEType(kae.pClass)] = AEType(struct.pack('>I', desc.descriptorType()))
return rec
# return as-is
return desc
##
def _packbytes(self, desctype, data):
return NSAppleEventDescriptor.descriptorWithDescriptorType_bytes_length_(
fourcharcode(desctype), data, len(data))
def packdesc(self, val):
return val
def packnone(self, val):
return NSAppleEventDescriptor.nullDescriptor()
def packbool(self, val):
return NSAppleEventDescriptor.descriptorWithBoolean_(int(val))
def packint(self, val):
if (-2**31) <= val < (2**31):
return NSAppleEventDescriptor.descriptorWithInt32_(val)
else:
return self.pack(float(val))
def packfloat(self, val):
return self._packbytes(kae.typeFloat, struct.pack('d', val))
def packbytes(self, val):
return self._packbytes(kae.typeData, val)
def packstr(self, val):
return NSAppleEventDescriptor.descriptorWithString_(val)
def packdatetime(self, val):
delta = val - self.kMacEpoch
sec = delta.days * 3600 * 24 + delta.seconds
return self._packbytes(kae.typeLongDateTime, struct.pack('q', sec))
def packlist(self, val):
lst = NSAppleEventDescriptor.listDescriptor()
for item in val:
lst.insertDescriptor_atIndex_(self.pack(item), 0)
return lst
def packdict(self, val):
record = NSAppleEventDescriptor.recordDescriptor()
usrf = desctype = None
for key, value in val.items():
if isinstance(key, AEType):
if key.code == kae.pClass and isinstance(value, AEType): # AS packs records that contain a 'class' property by coercing the packed record to the descriptor type specified by the property's value (assuming it's an AEType)
desctype = value
else:
record.setDescriptor_forKeyword_(self.pack(value), fourcharcode(key.code))
else:
if not usrf:
usrf = NSAppleEventDescriptor.listDescriptor()
usrf.insertDescriptor_atIndex_(self.pack(key), 0)
usrf.insertDescriptor_atIndex_(self.pack(value), 0)
if usrf:
record.setDescriptor_forKeyword_(usrf, self.kUSRF)
if desctype:
newrecord = record.coerceToDescriptorType_(fourcharcode(desctype.code))
if newrecord:
record = newrecord
else: # coercion failed for some reason, so pack as normal key-value pair
record.setDescriptor_forKeyword_(self.pack(desctype), fourcharcode(key.code))
return record
def packtype(self, val):
return NSAppleEventDescriptor.descriptorWithTypeCode_(fourcharcode(val.code))
def packenum(self, val):
return NSAppleEventDescriptor.descriptorWithEnumCode_(fourcharcode(val.code))
#######
def unpacknull(self, desc):
return None
def unpackboolean(self, desc):
return desc.booleanValue()
def unpacksint32(self, desc):
return desc.int32Value()
def unpackfloat64(self, desc):
return struct.unpack('d', bytes(desc.data()))[0]
def unpackunicodetext(self, desc):
return desc.stringValue()
def unpacklongdatetime(self, desc):
return self.kMacEpoch + datetime.timedelta(seconds=struct.unpack('q', bytes(desc.data()))[0])
def unpackaelist(self, desc):
return [self.unpack(desc.descriptorAtIndex_(i + 1)) for i in range(desc.numberOfItems())]
def unpackaerecord(self, desc):
dct = {}
for i in range(desc.numberOfItems()):
key = desc.keywordForDescriptorAtIndex_(i + 1)
value = desc.descriptorForKeyword_(key)
if key == self.kUSRF:
lst = self.unpackaelist(value)
for i in range(0, len(lst), 2):
dct[lst[i]] = lst[i+1]
else:
dct[AEType(struct.pack('>I', key))] = self.unpack(value)
return dct
def unpacktype(self, desc):
return AEType(struct.pack('>I', desc.typeCodeValue()))
def unpackenumeration(self, desc):
return AEEnum(struct.pack('>I', desc.enumCodeValue()))
def unpackfile(self, desc):
url = bytes(desc.coerceToDescriptorType_(fourcharcode(kae.typeFileURL)).data()).decode('utf8')
return NSURL.URLWithString_(url).path()
#######
class AETypeBase:
""" Base class for AEType and AEEnum.
Notes:
- Hashable and comparable, so may be used as keys in dictionaries that map to AE records.
"""
def __init__(self, code):
"""
code : bytes -- four-char code, e.g. b'utxt'
"""
if not isinstance(code, bytes):
raise TypeError('invalid code (not a bytes object): {!r}'.format(code))
elif len(code) != 4:
raise ValueError('invalid code (not four bytes long): {!r}'.format(code))
self._code = code
code = property(lambda self:self._code, doc="bytes -- four-char code, e.g. b'utxt'")
def __hash__(self):
return hash(self._code)
def __eq__(self, val):
return val.__class__ == self.__class__ and val.code == self._code
def __ne__(self, val):
return not self == val
def __repr__(self):
return "{}({!r})".format(self.__class__.__name__, self._code)
##
class AEType(AETypeBase):
"""An AE type. Maps to an AppleScript type class, e.g. AEType(b'utxt') <=> 'unicode text'."""
class AEEnum(AETypeBase):
"""An AE enumeration. Maps to an AppleScript constant, e.g. AEEnum(b'yes ') <=> 'yes'."""
| {
"repo_name": "encima/NeuroSocket",
"path": "libs/py-applescript-1.0.0/applescript/aecodecs.py",
"copies": "1",
"size": "8309",
"license": "mit",
"hash": 2743566242988660000,
"line_mean": 29.8884758364,
"line_max": 224,
"alpha_frac": 0.6962330004,
"autogenerated": false,
"ratio": 3.1859662576687118,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.8529579955613252,
"avg_score": 0.17052386049109194,
"num_lines": 269
} |
"""A eden.motif.SequenceMotif wrapper for comparison against Meme."""
from utilities import MotifWrapper, MuscleAlignWrapper
from eden.motif import SequenceMotif
class EdenWrapper(MotifWrapper):
"""Wrapper for EDeN Sequence Motif."""
def __init__(self,
alphabet='dna',
gap_in_alphabet=True,
scoring_criteria='pwm', # ["pwm","hmm"]
pseudocounts=0, # integer or dictionary {'A':0, 'C': 0, 'G': 0, 'T': 0}
threshold=None,
k=1, # top-k scores returned for hmm score
min_subarray_size=7,
max_subarray_size=10,
min_motif_count=1,
min_cluster_size=1,
training_size=None,
negative_ratio=1,
shuffle_order=2,
n_iter_search=1,
complexity=4,
# radius=None,
# distance=None,
nbits=20,
clustering_algorithm=None,
n_jobs=4,
n_blocks=8,
block_size=None,
pre_processor_n_jobs=4,
pre_processor_n_blocks=8,
pre_processor_block_size=None,
random_state=1,
muscle_obj=None,
weblogo_obj=None
):
"""Initialize a EdenWrapper object."""
self.sm = SequenceMotif(min_subarray_size=min_subarray_size,
max_subarray_size=max_subarray_size,
min_motif_count=min_motif_count,
min_cluster_size=min_cluster_size,
training_size=training_size,
negative_ratio=negative_ratio,
shuffle_order=shuffle_order,
n_iter_search=n_iter_search,
complexity=complexity,
# radius=radius,
# distance=distance,
nbits=nbits,
clustering_algorithm=clustering_algorithm,
n_jobs=n_jobs,
n_blocks=n_blocks,
block_size=block_size,
pre_processor_n_jobs=pre_processor_n_jobs,
pre_processor_n_blocks=pre_processor_n_blocks,
pre_processor_block_size=pre_processor_block_size,
random_state=random_state,
)
self.alphabet = alphabet
self.gap_in_alphabet = gap_in_alphabet
self.scoring_criteria = scoring_criteria
if threshold is None:
if scoring_criteria == 'pwm':
self.threshold = 1.0e-9
else:
self.threshold = 0.8
else:
self.threshold = threshold
self.k = k
self.muscle_obj = muscle_obj
self.weblogo_obj = weblogo_obj
# Number of motives found
self.nmotifs = 0
# over-rides same attribute of MotifWrapper class
self.pseudocounts = pseudocounts
# list-of-strings representation of motifs
self.original_motives_list = list()
# aligned list-of-strings of motifs;
# also created by display_logo method
self.aligned_motives_list = list()
# adapted motives with no gaps
self.motives_list = list()
# list of sequence logos created with WebLogo
self.logos = list()
def _get_motives_list(self, db):
motives = list()
for i in db.keys():
motives.append(db[i])
return motives
def _get_aligned_motives_list(self, motives):
aligned_motives = []
ma = MuscleAlignWrapper()
for i in range(len(motives)):
aligned_motives.append(ma.transform(seqs=motives[i]))
return aligned_motives
def fit(self, seqs, neg_seqs=None):
"""Find motives with EDeN.SequenceMotif."""
self.sm.fit(seqs=seqs, neg_seqs=neg_seqs)
self.nmotifs = len(self.sm.motives_db.keys())
self.original_motives_list = self._get_motives_list(
self.sm.motives_db)[:]
self.aligned_motives_list = self._get_aligned_motives_list(
self.original_motives_list)[:]
self.motives_list = self.adapt_motives(
self.aligned_motives_list)[:]
# create PWMs
aligned_motives_list = self.aligned_motives_list[:]
super(EdenWrapper, self).fit(motives=aligned_motives_list)
def fit_predict(self,
seqs,
neg_seqs=None,
return_list=False,
):
"""Find motives and return motif occurence list."""
self.fit(seqs=seqs, neg_seqs=neg_seqs)
return self.predict(input_seqs=seqs, return_list=return_list)
def fit_transform(self,
seqs,
neg_seqs=None,
return_match=False,
):
"""Find motives and return motif match list."""
self.fit(seqs=seqs, neg_seqs=neg_seqs)
return self.transform(input_seqs=seqs, return_match=return_match)
| {
"repo_name": "fabriziocosta/pyMotif",
"path": "eden_wrapper.py",
"copies": "1",
"size": "5407",
"license": "mit",
"hash": 7395897665126314000,
"line_mean": 38.1811594203,
"line_max": 91,
"alpha_frac": 0.5010171999,
"autogenerated": false,
"ratio": 4.211059190031152,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0001660707100614177,
"num_lines": 138
} |
"""A eden.sequence_motif_decomposer.SequenceMotifDecomposer wrapper for comparison against Meme."""
from utilities import MotifWrapper, MuscleAlignWrapper
from eden.sequence_motif_decomposer import SequenceMotifDecomposer as SMoD
from sklearn.linear_model import SGDClassifier
from sklearn.cluster import MiniBatchKMeans
class SMoDWrapper(MotifWrapper):
"""Wrapper for EDeN SequenceMotifDecomposer."""
def __init__(self,
alphabet='dna',
gap_in_alphabet=True,
scoring_criteria='pwm', # ["pwm","hmm"]
pseudocounts=0,
threshold=None, # scoring threshold
k=1,
complexity=5,
n_clusters=10,
min_subarray_size=4,
max_subarray_size=10,
estimator=SGDClassifier(warm_start=True),
clusterer=MiniBatchKMeans(),
pos_block_size=300,
neg_block_size=300,
n_jobs=-1,
p_value=0.05,
similarity_th=0.5,
min_score=4,
min_freq=0.5,
min_cluster_size=10,
regex_th=0.3,
sample_size=200,
freq_th=None,
std_th=None,
muscle_obj=None,
weblogo_obj=None):
"""Initialize a SMoDWrapper object."""
self.smd = SMoD(complexity=complexity,
n_clusters=n_clusters,
min_subarray_size=min_subarray_size,
max_subarray_size=max_subarray_size,
estimator=estimator,
clusterer=clusterer,
pos_block_size=pos_block_size,
neg_block_size=neg_block_size,
n_jobs=n_jobs)
self.p_value = p_value
self.similarity_th = similarity_th
self.min_score = min_score
self.min_freq = min_freq
self.min_cluster_size = min_cluster_size
self.regex_th = regex_th
self.sample_size = sample_size
self.freq_th = freq_th
self.std_th = std_th
self.alphabet = alphabet
self.gap_in_alphabet = gap_in_alphabet
self.scoring_criteria = scoring_criteria
if threshold is None:
if scoring_criteria == 'pwm':
self.threshold = 1.0e-9
else:
self.threshold = 0.8
else:
self.threshold = threshold
self.k = k
self.muscle_obj = muscle_obj
self.weblogo_obj = weblogo_obj
# Number of motives found
self.nmotifs = 0
# over-rides same attribute of MotifWrapper class
self.pseudocounts = pseudocounts
# list-of-strings representation of motifs
self.original_motives_list = list()
# aligned list-of-strings of motifs;
# also created by display_logo method
self.aligned_motives_list = list()
# adapted motives with no gaps
self.motives_list = list()
# list of sequence logos created by WebLogo
self.logos = list()
def _get_motives_list(self, db):
motives = list()
for i in db.keys():
motives.append(db[i]['seqs'])
return motives
def _get_aligned_motives_list(self, motives):
aligned_motives = []
ma = MuscleAlignWrapper()
for i in range(len(motives)):
aligned_motives.append(ma.transform(seqs=motives[i]))
return aligned_motives
def fit(self, seqs, neg_seqs=None):
"""Find motives with SequenceMotifDecomposer."""
if neg_seqs is None:
from eden.modifier.seq import seq_to_seq, shuffle_modifier
neg_seqs = seq_to_seq(seqs, modifier=shuffle_modifier, times=1, order=2)
neg_seqs = list(neg_seqs)
self.smd = self.smd.fit(pos_seqs=seqs, neg_seqs=neg_seqs)
try:
motives = self.smd.select_motives(seqs=seqs,
p_value=self.p_value,
similarity_th=self.similarity_th,
min_score=self.min_score,
min_freq=self.min_freq,
min_cluster_size=self.min_cluster_size,
regex_th=self.regex_th,
sample_size=self.sample_size,
freq_th=self.freq_th,
std_th=self.std_th)
except AttributeError:
raise AttributeError('No motives found.')
self.nmotifs = len(motives.keys())
self.original_motives_list = self._get_motives_list(motives)[:]
self.aligned_motives_list = self._get_aligned_motives_list(
self.original_motives_list)[:]
self.motives_list = self.adapt_motives(
self.aligned_motives_list)[:]
# create PWMs
super(SMoDWrapper, self).fit(motives=self.aligned_motives_list)
def fit_predict(self,
seqs,
neg_seqs=None,
return_list=False,
):
"""Find motives and return motif occurence list."""
self.fit(seqs=seqs, neg_seqs=neg_seqs)
return self.predict(input_seqs=seqs, return_list=return_list)
def fit_transform(self,
seqs,
neg_seqs=None,
return_match=False,
):
"""Find motives and return motif match list."""
self.fit(seqs=seqs, neg_seqs=neg_seqs)
return self.transform(input_seqs=seqs, return_match=return_match)
| {
"repo_name": "fabriziocosta/pyMotif",
"path": "smod_wrapper.py",
"copies": "1",
"size": "5881",
"license": "mit",
"hash": -3949292761035741000,
"line_mean": 37.9470198675,
"line_max": 99,
"alpha_frac": 0.5189593607,
"autogenerated": false,
"ratio": 4.0670816044260025,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5086040965126003,
"avg_score": null,
"num_lines": null
} |
"""A entity class for mobile_app."""
from homeassistant.config_entries import ConfigEntry
from homeassistant.const import ATTR_ICON, CONF_NAME, CONF_UNIQUE_ID, CONF_WEBHOOK_ID
from homeassistant.core import callback
from homeassistant.helpers.device_registry import DeviceEntry
from homeassistant.helpers.dispatcher import async_dispatcher_connect
from homeassistant.helpers.restore_state import RestoreEntity
from .const import (
ATTR_SENSOR_ATTRIBUTES,
ATTR_SENSOR_DEVICE_CLASS,
ATTR_SENSOR_ICON,
ATTR_SENSOR_STATE,
ATTR_SENSOR_TYPE,
ATTR_SENSOR_UNIQUE_ID,
SIGNAL_SENSOR_UPDATE,
)
from .helpers import device_info
def unique_id(webhook_id, sensor_unique_id):
"""Return a unique sensor ID."""
return f"{webhook_id}_{sensor_unique_id}"
class MobileAppEntity(RestoreEntity):
"""Representation of an mobile app entity."""
def __init__(self, config: dict, device: DeviceEntry, entry: ConfigEntry):
"""Initialize the entity."""
self._config = config
self._device = device
self._entry = entry
self._registration = entry.data
self._unique_id = config[CONF_UNIQUE_ID]
self._entity_type = config[ATTR_SENSOR_TYPE]
self._name = config[CONF_NAME]
async def async_added_to_hass(self):
"""Register callbacks."""
self.async_on_remove(
async_dispatcher_connect(
self.hass, SIGNAL_SENSOR_UPDATE, self._handle_update
)
)
state = await self.async_get_last_state()
if state is None:
return
self.async_restore_last_state(state)
@callback
def async_restore_last_state(self, last_state):
"""Restore previous state."""
self._config[ATTR_SENSOR_STATE] = last_state.state
self._config[ATTR_SENSOR_ATTRIBUTES] = {
**last_state.attributes,
**self._config[ATTR_SENSOR_ATTRIBUTES],
}
if ATTR_ICON in last_state.attributes:
self._config[ATTR_SENSOR_ICON] = last_state.attributes[ATTR_ICON]
@property
def should_poll(self) -> bool:
"""Declare that this entity pushes its state to HA."""
return False
@property
def name(self):
"""Return the name of the mobile app sensor."""
return self._name
@property
def device_class(self):
"""Return the device class."""
return self._config.get(ATTR_SENSOR_DEVICE_CLASS)
@property
def extra_state_attributes(self):
"""Return the device state attributes."""
return self._config[ATTR_SENSOR_ATTRIBUTES]
@property
def icon(self):
"""Return the icon to use in the frontend, if any."""
return self._config[ATTR_SENSOR_ICON]
@property
def unique_id(self):
"""Return the unique ID of this sensor."""
return self._unique_id
@property
def device_info(self):
"""Return device registry information for this entity."""
return device_info(self._registration)
@callback
def _handle_update(self, data):
"""Handle async event updates."""
incoming_id = unique_id(data[CONF_WEBHOOK_ID], data[ATTR_SENSOR_UNIQUE_ID])
if incoming_id != self._unique_id:
return
self._config = {**self._config, **data}
self.async_write_ha_state()
| {
"repo_name": "adrienbrault/home-assistant",
"path": "homeassistant/components/mobile_app/entity.py",
"copies": "2",
"size": "3357",
"license": "mit",
"hash": -4849492624514028000,
"line_mean": 30.3738317757,
"line_max": 85,
"alpha_frac": 0.63598451,
"autogenerated": false,
"ratio": 4.108935128518972,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.00021993148487388869,
"num_lines": 107
} |
"""A entity class for mobile_app."""
from homeassistant.config_entries import ConfigEntry
from homeassistant.const import CONF_WEBHOOK_ID
from homeassistant.core import callback
from homeassistant.helpers.device_registry import DeviceEntry
from homeassistant.helpers.dispatcher import async_dispatcher_connect
from homeassistant.helpers.entity import Entity
from .const import (ATTR_SENSOR_ATTRIBUTES,
ATTR_SENSOR_DEVICE_CLASS, ATTR_SENSOR_ICON,
ATTR_SENSOR_NAME, ATTR_SENSOR_TYPE, ATTR_SENSOR_UNIQUE_ID,
DOMAIN, SIGNAL_SENSOR_UPDATE)
from .helpers import device_info
def sensor_id(webhook_id, unique_id):
"""Return a unique sensor ID."""
return "{}_{}".format(webhook_id, unique_id)
class MobileAppEntity(Entity):
"""Representation of an mobile app entity."""
def __init__(self, config: dict, device: DeviceEntry, entry: ConfigEntry):
"""Initialize the sensor."""
self._config = config
self._device = device
self._entry = entry
self._registration = entry.data
self._sensor_id = sensor_id(self._registration[CONF_WEBHOOK_ID],
config[ATTR_SENSOR_UNIQUE_ID])
self._entity_type = config[ATTR_SENSOR_TYPE]
self.unsub_dispatcher = None
async def async_added_to_hass(self):
"""Register callbacks."""
self.unsub_dispatcher = async_dispatcher_connect(self.hass,
SIGNAL_SENSOR_UPDATE,
self._handle_update)
async def async_will_remove_from_hass(self):
"""Disconnect dispatcher listener when removed."""
if self.unsub_dispatcher is not None:
self.unsub_dispatcher()
@property
def should_poll(self) -> bool:
"""Declare that this entity pushes its state to HA."""
return False
@property
def name(self):
"""Return the name of the mobile app sensor."""
return self._config[ATTR_SENSOR_NAME]
@property
def device_class(self):
"""Return the device class."""
return self._config.get(ATTR_SENSOR_DEVICE_CLASS)
@property
def device_state_attributes(self):
"""Return the device state attributes."""
return self._config[ATTR_SENSOR_ATTRIBUTES]
@property
def icon(self):
"""Return the icon to use in the frontend, if any."""
return self._config[ATTR_SENSOR_ICON]
@property
def unique_id(self):
"""Return the unique ID of this sensor."""
return self._sensor_id
@property
def device_info(self):
"""Return device registry information for this entity."""
return device_info(self._registration)
async def async_update(self):
"""Get the latest state of the sensor."""
data = self.hass.data[DOMAIN]
try:
self._config = data[self._entity_type][self._sensor_id]
except KeyError:
return
@callback
def _handle_update(self, data):
"""Handle async event updates."""
incoming_id = sensor_id(data[CONF_WEBHOOK_ID],
data[ATTR_SENSOR_UNIQUE_ID])
if incoming_id != self._sensor_id:
return
self._config = data
self.async_schedule_update_ha_state()
| {
"repo_name": "jabesq/home-assistant",
"path": "homeassistant/components/mobile_app/entity.py",
"copies": "2",
"size": "3386",
"license": "apache-2.0",
"hash": -4612995344400242000,
"line_mean": 33.5510204082,
"line_max": 78,
"alpha_frac": 0.6086828116,
"autogenerated": false,
"ratio": 4.437745740498034,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.6046428552098034,
"avg_score": null,
"num_lines": null
} |
"""aenumerate - enumerate for async for"""
import asyncio
from collections import abc
class aenumerate(abc.AsyncIterator):
"""enumerate for async for"""
def __init__(self, aiterable, start=0):
self._aiterable = aiterable
self._i = start - 1
async def __aiter__(self):
self._ait = await self._aiterable.__aiter__()
return self
async def __anext__(self):
# self._ait will raise the apropriate AsyncStopIteration
val = await self._ait.__anext__()
self._i += 1
return self._i, val
# Example usage
async def iter_lines(host, port):
"""Iterator over lines from host:port, print them with line number"""
rdr, wtr = await asyncio.open_connection(host, port)
async for lnum, line in aenumerate(rdr, 1):
line = line.decode().rstrip()
print('[{}:{}] {:02d} {}'.format(host, port, lnum, line))
if __name__ == '__main__':
from argparse import ArgumentParser
parser = ArgumentParser(description='enumerate lines from TCP server')
parser.add_argument('host', help='host to connect to')
parser.add_argument('port', help='port to connect to', type=int)
args = parser.parse_args()
loop = asyncio.get_event_loop()
loop.run_until_complete(iter_lines(args.host, args.port))
loop.close()
# Run server: nc -lc -p 7654 < some-file
# (or on osx: nc -l 7654 < some-file)
| {
"repo_name": "tebeka/pythonwise",
"path": "aenumerate.py",
"copies": "1",
"size": "1396",
"license": "bsd-3-clause",
"hash": -8611413296447194000,
"line_mean": 30.0222222222,
"line_max": 74,
"alpha_frac": 0.6303724928,
"autogenerated": false,
"ratio": 3.664041994750656,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4794414487550656,
"avg_score": null,
"num_lines": null
} |
import os
from math import pi
import numpy as np
import math
import matplotlib.pyplot as plt
from commonse.utilities import check_gradient_unit_test
from rotorse.rotoraero import Coefficients, SetupRunVarSpeed, \
RegulatedPowerCurve, AEP
from rotorse.rotoraerodefaults import RotorAeroVSVPWithCCBlade,GeometrySpline, \
CCBladeGeometry, CCBlade, CSMDrivetrain, WeibullCDF, \
WeibullWithMeanCDF, RayleighCDF
from rotorse.rotor import RotorSE
from rotorse.precomp import Profile, Orthotropic2DMaterial, CompositeSection
from drivese.hub import HubSE
from drivese.drive import Drive4pt
from WindDistribution import CalculateAEPConstantWind
from WindDistribution import CalculateAEPWeibull
################################################################################
### 1. Aerodynamic and structural performance using RotorSE
def EvaluateAEP(Diameter, HubHeight, RPM_Max):
# Basic Rotor Model
cdf_type = 'weibull'
rotor = RotorAeroVSVPWithCCBlade(cdf_type)
rotor.configure()
# Define blade and chord length
rotor.B = 3 # Number of blades (Do not change)
rotor.r_max_chord = 0.23577 # (Float): location of second control point (generally also max chord)
rotor.chord_sub = [3.2612, 4.5709, 3.3178, 1.4621] # (Array, m): chord at control points
rotor.theta_sub = [13.2783, 7.46036, 2.89317, -0.0878099] # (Array, deg): twist at control points
rotor.Rhub = 1.5 # (Float, m): hub radius
rotor.precone = 2.5 # (Float, deg): precone angle
rotor.tilt = -5.0 # (Float, deg): shaft tilt
rotor.yaw = 0.0 # (Float, deg): yaw error
# Hub height
rotor.hubHt = HubHeight # (Float, m)
# Blade length (if not precurved or swept) otherwise length of blade before curvature
rotor.bladeLength = Diameter/2 # (Float, m):
# Radius to tip
rotor.Rtip = rotor.bladeLength + rotor.Rhub # (Float, m): tip radius (blade radius)
# Rotor blade aerodynamic profiles... leave the same for now...
basepath = os.path.join(os.path.dirname(os.path.realpath(__file__)), \
'5MW_AFFiles')
# load all airfoils
airfoil_types = [0]*8
airfoil_types[0] = basepath + os.path.sep + 'Cylinder1.dat'
airfoil_types[1] = basepath + os.path.sep + 'Cylinder2.dat'
airfoil_types[2] = basepath + os.path.sep + 'DU40_A17.dat'
airfoil_types[3] = basepath + os.path.sep + 'DU35_A17.dat'
airfoil_types[4] = basepath + os.path.sep + 'DU30_A17.dat'
airfoil_types[5] = basepath + os.path.sep + 'DU25_A17.dat'
airfoil_types[6] = basepath + os.path.sep + 'DU21_A17.dat'
airfoil_types[7] = basepath + os.path.sep + 'NACA64_A17.dat'
# place at appropriate radial stations
af_idx = [0, 0, 1, 2, 3, 3, 4, 5, 5, 6, 6, 7, 7, 7, 7, 7, 7]
n = len(af_idx)
af = [0]*n
for i in range(n):
af[i] = airfoil_types[af_idx[i]]
rotor.airfoil_files = af # (List): paths to AeroDyn-style airfoil files
# (Array, m): locations where airfoils are defined on unit radius
rotor.r_af = np.array([0.02222276, 0.06666667, 0.11111057, 0.16666667, \
0.23333333, 0.3, 0.36666667, 0.43333333, 0.5, 0.56666667, 0.63333333, 0.7, \
0.76666667, 0.83333333, 0.88888943,0.93333333, 0.97777724])
rotor.idx_cylinder = 3 # (Int): index in r_af where cylinder section ends
# Wind Parameters are specified here !!!!!!!!!!!!!
rotor.rho = 1.225 # (Float, kg/m**3): density of air
rotor.mu = 1.81206e-5 # (Float, kg/m/s): dynamic viscosity of air
# Shear Exponent
rotor.shearExp = 0.143 # 0.2 # (Float): shear exponent!!!!!!!!!!!!!!!!!
rotor.cdf_mean_wind_speed = 6.0 # (Float, m/s): mean wind speed of site cumulative distribution function
rotor.weibull_shape_factor = 2.0 # (Float): shape factor of weibull distribution
# Rotor fixed design parameters !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
rotor.control.Vin = 3.0 # (Float, m/s): cut-in wind speed in
rotor.control.Vout = 30.0 #25.0 # (Float, m/s): cut-out wind speed !!!!!!!!!!!!!!
# Rated power should be
rotor.control.ratedPower = 1.5e6 # (Float, W): rated power !!!!!!!!!!!!!!
rotor.control.pitch = 0.0 # (Float, deg): pitch angle in region 2 (and region 3 for fixed pitch machines)
rotor.control.minOmega = 0.0 # (Float, rpm): minimum allowed rotor rotation speed
rotor.control.maxOmega = RPM_Max #12.0 # (Float, rpm): maximum allowed rotor rotation speed!!!!!!!!!!!!!!!!
rotor.control.tsr = 7 # **dv** (Float): tip-speed ratio in Region 2 (should be !!!!!!!!!!!!!!
rotor.nSector = 4 # (Int): number of sectors to divide rotor face into in computing thrust and power
# Calculation
rotor.npts_coarse_power_curve = 20 # (Int): number of points to evaluate aero analysis at
rotor.npts_spline_power_curve = 200 # (Int): number of points to use in fitting spline to power curve
rotor.AEP_loss_factor = 1.0 # (Float): availability and other losses (soiling, array, etc.)
# Energy loss estimates
rotor.tiploss = False # (Bool): include Prandtl tip loss model
rotor.hubloss = True # (Bool): include Prandtl hub loss model
rotor.wakerotation = True # (Bool): include effect of wake rotation (i.e., tangential induction factor is nonzero)
rotor.usecd = True # (Bool): use drag coefficient in computing induction factors
# No effect on AEP
rotor.VfactorPC = 0.7 # (Float): fraction of rated speed at which the deflection is assumed to representative throughout the power curve calculation
# Run to calculate rotor parameters
rotor.run()
# Get Power Curve
PowerCurve = rotor.P/1e6
PowerCurveVelocity = rotor.V
# RPM Curve
#AEP = CalculateAEPConstantWind(PowerCurve, PowerCurveVelocity, 7.5)
# Weibull Wind Parameters
WindReferenceHeight = 30
WindReferenceMeanVelocity = 6
ShearFactor = 0.2
AEP = CalculateAEPWeibull(PowerCurve,PowerCurveVelocity, HubHeight, \
rotor.weibull_shape_factor, WindReferenceHeight, \
WindReferenceMeanVelocity, ShearFactor)
print "Diameter %d m and hub height of %d m yields AEP is %f MHW " %(Diameter,HubHeight,AEP)
return AEP
| {
"repo_name": "lewisli/wind-turbine-mdo",
"path": "AEPEvaluator.py",
"copies": "1",
"size": "6121",
"license": "mit",
"hash": 9125375081895629000,
"line_mean": 38.7467532468,
"line_max": 150,
"alpha_frac": 0.6969449436,
"autogenerated": false,
"ratio": 2.803939532753092,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4000884476353092,
"avg_score": null,
"num_lines": null
} |
"""Aerial position model."""
import logging
from auvsi_suas.models import distance
from auvsi_suas.models.gps_position import GpsPositionMixin
from django.contrib import admin
from django.core import validators
from django.db import models
logger = logging.getLogger(__name__)
ALTITUDE_MSL_FT_MIN = -2000 # Lowest point on earth with buffer.
ALTITUDE_MSL_FT_MAX = 396000 # Edge of atmosphere.
ALTITUDE_VALIDATORS = [
validators.MinValueValidator(ALTITUDE_MSL_FT_MIN),
validators.MaxValueValidator(ALTITUDE_MSL_FT_MAX),
]
class AerialPositionMixin(GpsPositionMixin):
"""Aerial position mixin for adding a GPS position and altitude."""
# Altitude (MSL) in feet.
altitude_msl = models.FloatField(validators=ALTITUDE_VALIDATORS)
class Meta:
abstract = True
def distance_to(self, other):
"""Computes distance to another position.
Args:
other: The other position.
Returns:
Distance in feet.
"""
return distance.distance_to(self.latitude, self.longitude,
self.altitude_msl, other.latitude,
other.longitude, other.altitude_msl)
def duplicate(self, other):
"""Determines whether this AerialPosition is equivalent to another.
This differs from the Django __eq__() method which simply compares
primary keys. This method compares the field values.
Args:
other: The other position for comparison.
Returns:
True if they are equal.
"""
return (super(AerialPositionMixin, self).duplicate(other)
and self.altitude_msl == other.altitude_msl)
class AerialPosition(AerialPositionMixin):
"""Aerial position object."""
pass
@admin.register(AerialPosition)
class AerialPositionModelAdmin(admin.ModelAdmin):
show_full_result_count = False
list_display = ('pk', 'latitude', 'longitude', 'altitude_msl')
| {
"repo_name": "auvsi-suas/interop",
"path": "server/auvsi_suas/models/aerial_position.py",
"copies": "1",
"size": "1980",
"license": "apache-2.0",
"hash": 7287422018853348000,
"line_mean": 29.9375,
"line_max": 75,
"alpha_frac": 0.6671717172,
"autogenerated": false,
"ratio": 4.008097165991903,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0,
"num_lines": 64
} |
"""
This module contains a library of classes devoted to modeling aircraft parts.
The main purpose of this library is to model various types of aircraft parts.
Currently only wing objects are suported, however in the future it is possible
that fuselages as well as other parts will be added.
:SUMARRY OF THE METHODS:
- `K`: The kernel function used in the doublet-lattice method to relate
downwashes to panel pressures.
- `calcAIC`: Provided several vectors of numbers as well as a reduced frequency
and mach number, this method calculates a matrix of AIC's using doublet-
lattice method elementary solutions. This method is used by the FEM class
flutterAnalysis method.
:SUMARRY OF THE CLASSES:
- `Airfoil`: Primarily used for the generation of structural cross-sectional
meshes, this class represent an airfoil. This class could be expanded in
future to use simple 2D panel methods for an airfoil of arbitrary shape.
- `CQUADA`: This class creates quadrilateral panels intended to be used for
potential flow panel methods. Currently it is used for the unsteady
doublet-lattice panels.
- `CAERO1`: This class is used to generate a lattice of CQUADA panels.
"""
__docformat__ = 'restructuredtext'
# =============================================================================
# IMPORT ANACONDA ASSOCIATED MODULES
# =============================================================================
import numpy as np
#import mayavi.mlab as mlab
from numba import jit
# =============================================================================
# IMPORT ADDITIONAL MODULES
# =============================================================================
from tabulate import tabulate
# =============================================================================
# DEFINE AeroComBAT AERODYNAMIC CLASSES
# =============================================================================
# Define Constants for later sumation integration
an = np.array([.24186198,-2.7918027,24.991079,-111.59196,271.43549,-305.75288,\
-41.18363,545.98537,-644.78155,328.72755,-64.279511])
c = 0.372
n = np.array(range(1,12))
# Define Kernel Function Here
@jit(nopython=True)
def K(Xr,Xs,gamma_r,gamma_s,M,br,kr,r1):
"""Evaluates the doublet-lattice kernel function.
Provided several geometric parameters about the sending and recieving
panels, this method evaluates the kernel function which relates the
pressure on one panel to the downwash induced at another panel.
:Args:
- `Xr (1x3 np.array[float])`: The location of the recieving point.
- `Xs (1x3 np.array[float])`: The location of the sending point.
- `gamma_r (1x3 np.array[float])`: The dihedral of the panel corresponding
to the recieving point.
- `gamma_s (1x3 np.array[float])`: The dihedral of the panel corresponding
to the sending point.
- `M (float)`: The mach number
- `br (float)`: The reference semi-chord
- `kr (float)`: The reduced frequency
- `r1 (float)`: The scalar distance between the sending point and the
recieving point.
:Returns:
- `Kbar (complex128)`: The evaluation of the unsteady kernel function which
is complex in nature.
"""
# Vector pointing from sending node to recieving node
x0 = Xr[0]-Xs[0]
y0 = Xr[1]-Xs[1]
z0 = Xr[2]-Xs[2]
# Check if r1 is very small
if abs(r1)<br/10000.:
if x0>0:
return 2*np.exp(-1j*x0*kr/br)
else:
return 0.
# Prandtl-Glauert Compressability Correction Factor
beta = (1-M**2)**(0.5)
# Reduced Frequency
k1 = r1*kr/br
# Another distance value?
R = np.sqrt(x0**2+beta**2*r1**2)
# If r1 is not very small:
u1 = (M*R-x0)/(beta**2*r1)
T1 = np.cos(gamma_r-gamma_s)
T2 = (z0*np.cos(gamma_r)-y0*np.sin(gamma_r))\
*(z0*np.cos(gamma_s)-y0*np.sin(gamma_s))/r1**2
if abs(T1)<1e-6:
K1=0.
else:
if u1>=0:
I1_val = I1(u1,k1)
else:
I1_val = 2*I1(0,k1).real\
-I1(-u1,k1).real+1j*I1(-u1,k1).imag
K1 = I1_val+M*r1*np.exp(-1j*k1*u1)/(R*np.sqrt(1+u1**2))
if abs(T2)<1e-6:
K2 = 0.
else:
if u1>=0:
I2_3_val = I2_3(u1,k1)
else:
I2_3_val = 2*I2_3(0,k1).real\
-I2_3(-u1,k1).real+1j*I2_3(-u1,k1).imag
K2 = -I2_3_val\
-1j*k1*M**2*r1**2/R**2*np.exp(-1j*k1*u1)/(1+u1**2)**(0.5)\
-M*r1/R*((1+u1**2)*beta**2*r1**2/R**2+2+M*r1*u1/R)\
*np.exp(-1j*k1*u1)/(1+u1**2)**(1.5)
return np.exp(-1j*kr*x0/br)*(K1*T1+K2*T2)#-T1*(1.+x0/R)-T2*(-2-x0/R*(2+beta**2*r1**2/R**2))
# Definition of I0 integral
@jit(nopython=True)
def I0(u1,k1):
I_0 = 0.
#I_0 = np.dot(an,np.exp(-n*c*u1)/(n**2*c**2+k1**2)*(n*c-1j*k1))
for i in range(len(n)):
I_0+=an[i]*np.exp(-n[i]*c*u1)/(n[i]**2*c**2+k1**2)*(n[i]*c-1j*k1)
return I_0
# Definition of I1 integral
@jit(nopython=True)
def I1(u1,k1):
return np.exp(-1j*k1*u1)*(1-u1/np.sqrt(1+u1**2)-1j*k1*I0(u1,k1))
# Definition of J0 integral
@jit(nopython=True)
def J0(u1,k1):
J_0 = 0.
#J_0 = np.dot(an,np.exp(-n*c*u1)/(n**2*c**2+k1**2)**2\
# *(n**2*c**2-k1**2+n*c*u1*(n**2*c**2+k1**2)\
# -1j*k1*(2*n*c+u1*(n**2*c**2+k1**2))))
for i in range(0,len(n)):
J_0+=an[i]*np.exp(-n[i]*c*u1)/(n[i]**2*c**2+k1**2)**2\
*(n[i]**2*c**2-k1**2+n[i]*c*u1*(n[i]**2*c**2+k1**2)\
-1j*k1*(2*n[i]*c+u1*(n[i]**2*c**2+k1**2)))
return J_0
# Definition of 3*I2 integral
@jit(nopython=True)
def I2_3(u1,k1):
return np.exp(-1j*k1*u1)*((2+1j*k1*u1)*(1-u1/(1+u1**2)**(0.5))\
-u1/(1+u1**2)**(1.5)-1j*k1*I0(u1,k1)+k1**2*J0(u1,k1))
# Functions for JIT calcAIC Method
@jit(nopython=True)
def eta(yr,ys,zr,zs,gamma_s):
return (yr-ys)*np.cos(gamma_s)+(zr-zs)*np.sin(gamma_s)
@jit(nopython=True)
def zeta(yr,ys,zr,zs,gamma_s):
return -(yr-ys)*np.sin(gamma_s)+(zr-zs)*np.cos(gamma_s)
@jit(nopython=True)
def I_plan(A,B,C,e,eta_0):
return (eta_0**2*A+eta_0*B+C)*(1./(eta_0-e)-1./(eta_0+e))+\
(B/2+eta_0*A)*np.log(((eta_0-e)/(eta_0+e))**2)
@jit(nopython=True)
def I_nonplan(A,B,C,e,eta_0,zeta_0,r1):
return ((eta_0**2-zeta_0**2)*A+eta_0*B+C)*zeta_0**(-1)*\
np.arctan(2*e*abs(zeta_0)/(r1**2-e**2))+\
(B/2+eta_0*A)*np.log((r1**2-2*eta_0*e+e**2)/\
(r1**2+2*eta_0*e+e**2))+2*e*A
@jit(nopython=True)
def calcAIC(M,kr,br,delta_x_vec,sweep_vec,l_vec,dihedral_vec,Xr_vec,Xi_vec,Xc_vec,\
Xo_vec,symxz=False):
"""Calculate the doublet-lattice AIC's.
Provided the geometry of all of the doublet-lattice panels, this method
calculates the AIC matrix.
:Args:
- `M (float)`: The mach number.
- `kr (float)`: The reduced frequency.
- `br (float)`: The reference semi-chord.
- `delta_x_vec (1xN array[float]`: An array of chord length of the panels.
- `sweep_vec (1xN array[float])`: An array of sweep angles of the panels.
- `l_vec (1xN array[float])`: An array of average doublet line lengths of
the panels.
- `dihedral_vec (1xN array[float])`: An array of dihedral angles of the
panels.
- `Xr_vec (Nx3 np.array[float])`: A matrix of recieving points, where a row
are the 3D coordinates of the point.
- `Xi_vec (Nx3 np.array[float])`: A matrix of inboard sending points, where
a row are the 3D coordinates of the point.
- `Xc_vec (Nx3 np.array[float])`: A matrix of center sending points, where
a row are the 3D coordinates of the point.
- `Xo_vec (Nx3 np.array[float])`: A matrix of outboard sending points,
where a row are the 3D coordinates of the point.
- `symxz (bool)`: A boolean operater intended to determine whether or not
a reflection of the panels should be considered over the xz-plane.
:Returns:
- `D (NPANxNPAN np.array[complex128])`: The matrix which relates pressures
over panels to induced velocities over those panels. In more simple
terms, this is the inverse of the desired AIC matrix.
"""
# Initialize the number of panels
numPan = len(Xr_vec)
# Initialize the complex [D] matrix
D = np.zeros((numPan,numPan),dtype=np.complex128)
# For all the recieving panels
for i in range(0,numPan):
# For all the sending panels
Xr = Xr_vec[i,:]
gamma_r = dihedral_vec[i]
for j in range(0,numPan):
#sendingBox = self.aeroBox[PANIDs[j]]
# Calculate average chord of sending box
delta_x_j = delta_x_vec[j]
# Calculate sweep of sending box
lambda_j = sweep_vec[j]
# Calculate the length of the doublet line on sending box
l_j = l_vec[j]
Xi = Xi_vec[j,:]
Xc = Xc_vec[j,:]
Xo = Xo_vec[j,:]
e = 0.5*l_j*np.cos(lambda_j)
gamma_s = dihedral_vec[j]
eta_0 = eta(Xr[1],Xc[1],Xr[2],Xc[2],gamma_s)
zeta_0 = zeta(Xr[1],Xc[1],Xr[2],Xc[2],gamma_s)
r1 = np.sqrt(eta_0**2+zeta_0**2)
# Calculate the Kernel function at the inboard, middle, and
# outboard locations
Ki = K(Xr,Xi,gamma_r,gamma_s,M,br,kr,r1)
Kc = K(Xr,Xc,gamma_r,gamma_s,M,br,kr,r1)
Ko = K(Xr,Xo,gamma_r,gamma_s,M,br,kr,r1)
A = (Ki-2*Kc+Ko)/(2*e**2)
B = (Ko-Ki)/(2*e)
C = Kc
# Determine if planar or non-planar I_ij definition should be used
if abs(zeta_0)<1e-6:
I_ij = I_plan(A,B,C,e,eta_0)
else:
I_ij = I_nonplan(A,B,C,e,eta_0,zeta_0,r1)
D[i,j]=delta_x_j*np.cos(lambda_j)/(8.*np.pi)*I_ij
if symxz:
# Calculate sweep of sending box
lambda_j = -lambda_j
# Calculate parameters invloved in aproximate I_ij
Xi[1] = -Xi[1]
Xc[1] = -Xc[1]
Xo[1] = -Xo[1]
# Sending box dihedral
gamma_s = -gamma_s
eta_0 = eta(Xr[1],Xc[1],Xr[2],Xc[2],gamma_s)
zeta_0 = zeta(Xr[1],Xc[1],Xr[2],Xc[2],gamma_s)
r1 = np.sqrt(eta_0**2+zeta_0**2)
Ki = K(Xr,Xi,gamma_r,gamma_s,M,br,kr,r1)
Kc = K(Xr,Xc,gamma_r,gamma_s,M,br,kr,r1)
Ko = K(Xr,Xo,gamma_r,gamma_s,M,br,kr,r1)
A = (Ki-2*Kc+Ko)/(2*e**2)
B = (Ko-Ki)/(2*e)
C = Kc
# Determine if planar or non-planar I_ij definition should be used
if abs(zeta_0)<1e-6:
I_ij = I_plan(A,B,C,e,eta_0)
else:
I_ij = I_nonplan(A,B,C,e,eta_0,zeta_0,r1)
D[i,j]+=delta_x_j*np.cos(lambda_j)/(8.*np.pi)*I_ij
return D
class Airfoil:
"""Creates an airfoil object.
This class creates an airfoil object. Currently this class is primarily
used in the generation of cross-sectional meshes. Currently only NACA 4
series arfoil and rectangular boxes are supported.
:Attributes:
- `c (float)`: The chord length of the airfoil.
- `t (float)`: The max percent thickness of the airfoil.
- `p (float)`: The location of the max camber of the airfoil, in 10%
increments.
- `m (float)`: The max camber of the airfoil as a percent of the chord.
:Methods:
- `points`: Generates the x and y upper and lower coordinates of the
airfoil.
"""
def __init__(self,c,**kwargs):
"""Airfoil object constructor.
Initializes the airfoil object.
:Args:
- `c (float)`: The chord length of the airfoil.
- `name (str)`: The name of the airfoil section. This can either be
a 'NACAXXXX' airfoil or 'box' which signifies the OML is a
rectangle.
:Returns:
- None
"""
self.c = c
name = kwargs.pop('name','NACA0012')
if name=='box':
pass
else:
self.t = float(name[-2:])/100
self.p = float(name[-3])/10
self.m = float(name[-4])/100
self.name = name
def points(self,x):
"""Generates upper and lower airfoil curves.
This method will generate the x and y coordinates for the upper and
lower airfoil surfaces provided the non-dimensional array of points x.
:Args:
- `x (1xN np.array[float])`: An array of floats for which the upper and
lower airfoil curves should be generated.
:Returns:
- `xu (1xN np.array[float])`: The upper x-coordinates of the curve.
- `yu (1xN np.array[float])`: The upper y-coordinates of the curve.
- `xl (1xN np.array[float])`: The lower x-coordinates of the curve.
- `yl (1xN np.array[float])`: The lower y-coordinates of the curve.
"""
#Inputs:
#x, a non-dimensional chord length from the leading edge
x = x*self.c
# For more detail on the NACA 4 series airfoil,
# see: https://en.wikipedia.org/wiki/NACA_airfoil
#TODO: Comment this method more thuroughly
if self.name=='box':
return x,self.c*np.ones(len(x))/2,x,-self.c*np.ones(len(x))/2
else:
c = self.c
t = self.t
m = self.m
p = self.p
yt = 5*t*c*(0.2969*np.sqrt(x/c)-.126*(x/c)-.3516*(x/c)**2+.2843*(x/c)**3-.1015*(x/c)**4)
xc0 = x[x<c*p]
xc1 = x[x>=c*p]
if len(xc0)>0:
yc0 = (m*xc0/p**2)*(2*p-xc0/c)
dyc0dx = (2*m/p**2)*(p-xc0/c)
else:
yc0 = []
dyc0dx = []
if len(xc1)>0:
yc1 = m*((c-xc1)/(1-p)**2)*(1+xc1/c-2*p)
dyx1dx = (2*m/(1-p)**2)*(p-xc1/c)
else:
yc1 = []
dyx1dx = []
yc = np.append(yc0,yc1)
dycdx = np.append(dyc0dx,dyx1dx)
th = np.arctan(dycdx)
xu = x-yt*np.sin(th)
yu = yc+yt*np.cos(th)
xl = x+yt*np.sin(th)
yl = yc-yt*np.cos(th)
return xu,yu,xl,yl
def printSummary(self,x):
"""A method for printing a summary of the airfoil object.
Prints the airfoil chord length as well as airfoil name.
:Args:
- None
:Returns:
- (str): Prints the tabulated chord length and name of the airfoil
"""
print('Airfoil name: %s' %(self.name))
print('Airfoil Chord length: %4.4f' %(self.c))
class CQUADA:
"""Represents a CQUADA aerodynamic panel.
This CQUADA panel object is used for the unsteady aerodynamic doublet-
lattice method currently, although it could likely easily be extended to
support the vortex lattice method as well. The geometry of a generic panel
can be seen in the figure below.
.. image:: images/DoubletLatticePanel.png
:align: center
:Attributes:
- `type (str)`: The type of object.
- `PANID (int)`: The integer ID linked with the panel.
- `xs (1x4 np.array[float])`: The x coordinates of the panel.
- `ys (1x4 np.array[float])`: The y coordinates of the panel.
- `zs (1x4 np.array[float])`: The z coordinates of the panel.
- `DOF (dict[NID,factor])`: This dictionary is for connecting the movement
of the panel to the movement of an associated structure. Since a
panel's control point could be between two nodes (in the middle of an
element), the position of the panel can interpolated using a finite
element formulation. The NID's link the movement of the panel to the
movement of a corresponding node. The factor allows for a finite
element interpolation.
- `Area (float)`: The area of the panel.
- `sweep (float)`: The average sweep of the panel's doublet line.
- `delta_x (float)`: The average chord line of the panel.
- `l (float)`: The length of the panel's doublet line.
- `dihedral (float)`: The dihedral of the panel.
- `Xr (1x3 np.array[float])`: The coordiantes of the panel's sending point.
- `Xi (1x3 np.array[float])`: The coordinates of the panel's inboard
sending point.
- `Xc (1x3 np.array[float])`: The coordinates of the panel's center
sending point.
- `Xo (1x3 np.array[float])`: The coordinates of the panel's outboard
sending point.
:Methods:
- `x`: Provided the non-dimensional coordinates eta and xi which go from -1
to 1, this method returns corresponding the x coordinates.
- `y`: Provided the non-dimensional coordinates eta and xi which go from -1
to 1, this method returns corresponding the y coordinates.
- `z`: Provided the non-dimensional coordinates eta and xi which go from -1
to 1, this method returns corresponding the z coordinates.
- `J`:Provided the non-dimensional coordinates eta and xi which go from -1
to 1, this method returns the jacobian matrix at that point. This
method is primarily used to fascilitate the calculation of the panels
area.
- `printSummary`: Prints a summary of the panel.
.. Note:: The ordering of the xs, ys, and zs arrays should be ordered in a
finite element convention. The first point refers to the root trailing edge
point, followed by the tip trailling edge, then the tip leading edge, then
root leading edge.
"""
def __init__(self,PANID,xs):
"""Initializes the panel.
This method initializes the panel, including generating many of the
geometric properties required for the doublet lattice method such as
Xr, Xi, etc.
:Args:
- `PANID (int)`: The integer ID associated with the panel.
- `xs (1x4 array[1x3 np.array[float]])`: The coordinates of the four
corner points of the elements.
:Returns:
- None
"""
# Initialize type
self.type = 'CQUADA'
# Error checking on EID input
if type(PANID) is int:
self.PANID = PANID
else:
raise TypeError('The element ID given was not an integer')
if not len(xs) == 4:
raise ValueError('A CQUAD4 element requires 4 coordinates, %d '+
'were supplied in the nodes array' % (len(xs)))
# Populate the NIDs array with the IDs of the nodes used by the element
self.xs = []
self.ys = []
self.zs = []
for x in xs:
self.xs+=[x[0]]
self.ys+=[x[1]]
self.zs+=[x[2]]
self.DOF = {}
self.Area = 0
# Initialize coordinates for Guass Quadrature Integration
etas = np.array([-1,1])*np.sqrt(3)/3
xis = np.array([-1,1])*np.sqrt(3)/3
# Evaluate/sum the cross-section matricies at the Guass points
for k in range(0,np.size(xis)):
for l in range(0,np.size(etas)):
Jmat = self.J(etas[l],xis[k])
#Get determinant of the Jacobian Matrix
Jdet = abs(np.linalg.det(Jmat))
# Calculate the mass per unit length of the element
self.Area += Jdet
# Calculate box sweep angle
xtmp = self.x(1,-1)-self.x(-1,-1)
ytmp = self.y(1,-1)-self.y(-1,-1)
sweep = np.arctan(xtmp/ytmp)
if abs(sweep)<1e-3:
sweep = 0.
self.sweep = sweep
# Calculate the average chord length
self.delta_x = self.x(0,1)-self.x(0,-1)
# Calculate the length of the doublet line
xtmp = self.x(1,.5)-self.x(-1,.5)
ytmp = self.y(1,.5)-self.y(-1,.5)
ztmp = self.z(1,.5)-self.z(-1,.5)
self.l = np.linalg.norm([xtmp,ytmp,ztmp])
# Calculate box dihedral
dihedral = np.arctan(ztmp/ytmp)
if abs(dihedral)<1e-3:
dihedral = 0.
self.dihedral = dihedral
# Calculate sending and recieving points on the box
self.Xr = np.array([self.x(0.,.5),self.y(0.,.5),\
self.z(0.,.5)])
self.Xi = np.array([self.x(-1,-.5),self.y(-1,.5),\
self.z(-1,-.5)])
self.Xc = np.array([self.x(0.,-.5),self.y(0.,-.5),\
self.z(0.,-.5)])
self.Xo = np.array([self.x(1.,-.5),self.y(1.,-.5),\
self.z(1.,-.5)])
def x(self,eta,xi):
"""Calculate the x-coordinate within the panel.
Calculates the x-coordinate on the panel provided the desired master
coordinates eta and xi.
:Args:
- `eta (float)`: The eta coordinate in the master coordinate domain.*
- `xi (float)`: The xi coordinate in the master coordinate domain.*
:Returns:
- `x (float)`: The x-coordinate within the element.
.. Note:: Xi and eta can both vary between -1 and 1 respectively.
"""
xs = self.xs
return .25*(xs[0]*(1.-xi)*(1.-eta)+xs[1]*(1.+xi)*(1.-eta)+\
xs[2]*(1.+xi)*(1.+eta)+xs[3]*(1.-xi)*(1.+eta))
def y(self,eta,xi):
"""Calculate the y-coordinate within the panel.
Calculates the y-coordinate on the panel provided the desired master
coordinates eta and xi.
:Args:
- `eta (float)`: The eta coordinate in the master coordinate domain.*
- `xi (float)`: The xi coordinate in the master coordinate domain.*
:Returns:
- `y (float)`: The y-coordinate within the element.
.. Note:: Xi and eta can both vary between -1 and 1 respectively.
"""
ys = self.ys
return .25*(ys[0]*(1.-xi)*(1.-eta)+ys[1]*(1.+xi)*(1.-eta)+\
ys[2]*(1.+xi)*(1.+eta)+ys[3]*(1.-xi)*(1.+eta))
def z(self,eta,xi):
"""Calculate the z-coordinate within the panel.
Calculates the z-coordinate on the panel provided the desired master
coordinates eta and xi.
:Args:
- `eta (float)`: The eta coordinate in the master coordinate domain.*
- `xi (float)`: The xi coordinate in the master coordinate domain.*
:Returns:
- `z (float)`: The z-coordinate within the element.
.. Note:: Xi and eta can both vary between -1 and 1 respectively.
"""
zs = self.zs
return .25*(zs[0]*(1.-xi)*(1.-eta)+zs[1]*(1.+xi)*(1.-eta)+\
zs[2]*(1.+xi)*(1.+eta)+zs[3]*(1.-xi)*(1.+eta))
def J(self,eta,xi):
"""Calculates the jacobian at a point in the element.
This method calculates the jacobian at a local point within the panel
provided the master coordinates eta and xi.
:Args:
- `eta (float)`: The eta coordinate in the master coordinate domain.*
- `xi (float)`: The xi coordinate in the master coordinate domain.*
:Returns:
- `Jmat (3x3 np.array[float])`: The stress-resutlant transformation
array.
.. Note:: Xi and eta can both vary between -1 and 1 respectively.
"""
#TODO: Add support for panels not in x-y plane
xs = self.xs
ys = self.ys
zs = self.zs
J11 = 0.25*(-xs[0]*(1-eta)+xs[1]*(1-eta)+xs[2]*(1+eta)-xs[3]*(1+eta))
J12 = 0.25*(-ys[0]*(1-eta)+ys[1]*(1-eta)+ys[2]*(1+eta)-ys[3]*(1+eta))
#J13 = 0.25*(-zs[0]*(1-eta)+zs[1]*(1-eta)+zs[2]*(1+eta)-zs[3]*(1+eta))
J21 = 0.25*(-xs[0]*(1-xi)-xs[1]*(1+xi)+xs[2]*(1+xi)+xs[3]*(1-xi))
J22 = 0.25*(-ys[0]*(1-xi)-ys[1]*(1+xi)+ys[2]*(1+xi)+ys[3]*(1-xi))
#J23 = 0.25*(-zs[0]*(1-xi)-zs[1]*(1+xi)+zs[2]*(1+xi)+zs[3]*(1-xi))
# Last row of Jmat is unit normal vector of panel
Jmat = np.array([[J11,J12,0],[J21,J22,0],[0,0,1]])
return Jmat
def printSummary(self):
"""A method for printing a summary of the CQUADA panel.
Prints out a tabulated information about the panel such as it's panel
ID, and the coordinates of it's four corner points.
:Args:
- None
:Returns:
- `summary (str)`: The summary of the CQUADA attributes.
"""
print('CQUADA Summary:')
print('PANID: %d' %(self.PANID))
headers = ('Coordinates','x','y','z')
tabmat = np.zeros((4,4),dtype=object)
tabmat[:,0] = np.array(['Point 1','Point 2','Point 3','Point 4'])
for i in range(len(self.xs)):
tabmat[i,1:] = np.array([self.xs[i],self.ys[i],self.zs[i]])
print(tabulate(tabmat,headers,tablefmt="fancy_grid"))
class CAERO1:
"""Represents an aerodynamic surface.
This CAERO1 object represents an aerodynamic lifting surface to be modeled
using the doublet-lattice method.
:Attributes:
- `type (str)`: The type of object.
- `SID (int)`: The integer ID linked with the surface.
- `xs (1x4 np.array[float])`: The x coordinates of the panel.
- `ys (1x4 np.array[float])`: The y coordinates of the panel.
- `zs (1x4 np.array[float])`: The z coordinates of the panel.
- `mesh ((NPAN+1)x(NPAN+1) np.array[int])`: The panel ID's in the relative
positions of their corresponding panels.
- `xmesh ((NPAN+1)x(NPAN+1) np.array[float])`: The x-coordinates of the
lifting surface nodes.
- `ymesh ((NPAN+1)x(NPAN+1) np.array[float])`: The y-coordinates of the
lifting surface nodes.
- `zmesh ((NPAN+1)x(NPAN+1) np.array[float])`: The z-coordinates of the
lifting surface nodes.
- `CQUADAs (dict[PANID, CQUADA])`: A dictionary mapping panel ID's to
CQUADA panel objects.
:Methods:
- `x`: Provided the non-dimensional coordinates eta and xi which go from -1
to 1, this method returns corresponding the x coordinates.
- `y`: Provided the non-dimensional coordinates eta and xi which go from -1
to 1, this method returns corresponding the y coordinates.
- `z`: Provided the non-dimensional coordinates eta and xi which go from -1
to 1, this method returns corresponding the z coordinates.
- `plotLiftingSurface`: Plots the lifting surface in 3D space. Useful for
debugging purposes.
- `printSummary`: Prints a summary of the panel.
.. Note:: The ordering of the xs, ys, and zs arrays should be ordered in a
finite element convention. The first point refers to the root leading edge
point, followed by the root trailling edge, then the tip trailing edge,
then tip leading edge.
"""
def __init__(self,SID,x1,x2,x3,x4,nspan,nchord,**kwargs):
"""Constructor for the CAERO1 lifting surface object.
Provided several geometric parameters, this method initializes and
discretizes a lifting surface using CQUADA panel objects.
:Args:
- `SID (int)`: The integer ID for the surface.
- `x1 (1x3 np.array[float])`: The coordinate of the root leading edge.
- `x2 (1x3 np.array[float])`: The coordinate of the root trailing edge.
- `x3 (1x3 np.array[float])`: The coordinate of the tip trailing edge.
- `x4 (1x3 np.array[float])`: The coordinate of the tip leading edge.
- `nspan (int)`: The number of panels to run in the spanwise direction.
- `nchord (int)`: The number of panels to run in the chordwise
direction.
:Returns:
- None
"""
# Initialize type
self.type = 'CAERO1'
# Error checking on SID input
if type(SID) is int:
self.SID = SID
else:
raise TypeError('The element ID given was not an integer')
#TODO: Thrown in a check to make sure x1 and x2 share at least one value
#TODO: Thrown in a check to make sure x4 and x3 share at least one value
# Starting aero box ID
SPANID = kwargs.pop('SPANID',0)
# Populate the NIDs array with the IDs of the nodes used by the element
self.xs = [x1[0],x2[0],x3[0],x4[0]]
self.ys = [x1[1],x2[1],x3[1],x4[1]]
self.zs = [x1[2],x2[2],x3[2],x4[2]]
# Generate Grids in superelement space
xis = np.linspace(-1,1,nchord+1)
etas = np.linspace(-1,1,nspan+1)
xis, etas = np.meshgrid(xis,etas)
self.xmesh = self.x(etas,xis)
self.ymesh = self.y(etas,xis)
self.zmesh = self.z(etas,xis)
self.mesh = np.zeros((nchord,nspan),dtype=int)
self.CQUADAs = {SPANID-1:None}
for i in range(0,nspan):
for j in range(0,nchord):
newPANID = max(self.CQUADAs.keys())+1
self.mesh[j,i] = newPANID
x1 = [self.xmesh[i,j],self.ymesh[i,j],self.zmesh[i,j]]
x2 = [self.xmesh[i,j+1],self.ymesh[i,j+1],self.zmesh[i,j+1]]
x3 = [self.xmesh[i+1,j+1],self.ymesh[i+1,j+1],self.zmesh[i+1,j+1]]
x4 = [self.xmesh[i+1,j],self.ymesh[i+1,j],self.zmesh[i+1,j]]
self.CQUADAs[newPANID] = CQUADA(newPANID,[x1,x2,x3,x4])
del self.CQUADAs[-1]
def x(self,eta,xi):
"""Calculate the x-coordinate within the lifting surface.
Calculates the x-coordinate within the lifting surface provided the
desired master coordinates eta and xi.
:Args:
- `eta (float)`: The eta coordinate in the master coordinate domain.*
- `xi (float)`: The xi coordinate in the master coordinate domain.*
:Returns:
- `x (float)`: The x-coordinate within the element.
.. Note:: Xi and eta can both vary between -1 and 1 respectively.
"""
xs = self.xs
return .25*(xs[0]*(1.-xi)*(1.-eta)+xs[1]*(1.+xi)*(1.-eta)+\
xs[2]*(1.+xi)*(1.+eta)+xs[3]*(1.-xi)*(1.+eta))
def y(self,eta,xi):
"""Calculate the y-coordinate within the lifting surface.
Calculates the y-coordinate within the lifting surface provided the
desired master coordinates eta and xi.
:Args:
- `eta (float)`: The eta coordinate in the master coordinate domain.*
- `xi (float)`: The xi coordinate in the master coordinate domain.*
:Returns:
- `y (float)`: The y-coordinate within the element.
.. Note:: Xi and eta can both vary between -1 and 1 respectively.
"""
ys = self.ys
return .25*(ys[0]*(1.-xi)*(1.-eta)+ys[1]*(1.+xi)*(1.-eta)+\
ys[2]*(1.+xi)*(1.+eta)+ys[3]*(1.-xi)*(1.+eta))
def z(self,eta,xi):
"""Calculate the z-coordinate within the lifting surface.
Calculates the z-coordinate within the lifting surface provided the
desired master coordinates eta and xi.
:Args:
- `eta (float)`: The eta coordinate in the master coordinate domain.*
- `xi (float)`: The xi coordinate in the master coordinate domain.*
:Returns:
- `z (float)`: The y-coordinate within the element.
.. Note:: Xi and eta can both vary between -1 and 1 respectively.
"""
zs = self.zs
return .25*(zs[0]*(1.-xi)*(1.-eta)+zs[1]*(1.+xi)*(1.-eta)+\
zs[2]*(1.+xi)*(1.+eta)+zs[3]*(1.-xi)*(1.+eta))
def plotLiftingSurface(self,**kwargs):
"""Plots the lifting surface using the MayaVi environment.
This method plots the lifting surface using the MayaVi engine. It is
most useful for debugging models, allowing the user to verify that the
wing they thought they generated is actually what was generated.
:Args:
- `figName (str)`: The name of the figure
:Returns:
- `(figure)`: MayaVi Figure of the laminate.
"""
figName = kwargs.pop('figName','Figure'+str(int(np.random.rand()*100)))
mlab.figure(figure=figName)
mlab.mesh(self.xmesh,self.ymesh,self.zmesh,representation='wireframe',color=(0,0,0))
mlab.mesh(self.xmesh,self.ymesh,self.zmesh)
def printSummary(self):
"""A method for printing a summary of the CAERO1 element.
Prints out the surface ID, as well as the number of chordwise and
spanwise panels.
:Args:
- None
:Returns:
- `summary (str)`: A summary of the CAERO1 surface attributes.
"""
print('CQUADA Summary:')
print('SID' %(self.SID))
print('Number of chordwise panels: %d' %(np.size(self.mesh,axis=1)))
print('Number of spanwise panels: %d' %(np.size(self.mesh,axis=0))) | {
"repo_name": "bennames/AeroComBAT-Project",
"path": "AeroComBAT/Aerodynamics.py",
"copies": "1",
"size": "33707",
"license": "mit",
"hash": -6898875933253331000,
"line_mean": 38.149825784,
"line_max": 100,
"alpha_frac": 0.5486397484,
"autogenerated": false,
"ratio": 3.252629547428351,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9140506129329384,
"avg_score": 0.03215263329979336,
"num_lines": 861
} |
'Aerosol.py tests'
import time
import numpy as np
import aerosol
aero = aerosol.AeroData("./images/img6.jpg")
def test_all():
'''
Purpose: runs all test functions
Inputs: None
Outputs: Prints the number of tests that were passed
Returns: None
Assumptions: N/A
'''
testspassed = 0
numtests = 0
if test_analyzeWavelength() == 1:
print "test_analyzeWavelength() passed\n"
testspassed += 1
numtests += 1
else:
numtests += 1
print "test_analyzeWavelength() failed\n"
if test_aerolyzeImage() == 1:
print "test_aerolyzeImage() passed\n"
testspassed += 1
numtests += 1
else:
numtests += 1
print "test_aerolyzeImage() failed\n"
print "Number of Tests passed " + str(testspassed) + "/" + str(numtests) + "\n"
def test_analyzeWavelength():
'''
Purpose: Calls analyzeWavelength
Inputs: None
Outputs: Prints the result of analyzeWavelength
Returns: 1 if passed
Assumptions: N/A
'''
rand = np.random.random() * 1000.0
wavelength = 300.0 + rand
print aero.analyzeWavelength(wavelength)
return 1
def test_aerolyzeImage():
'''
Purpose: Calls aerolyzeImage
Inputs: None
Outputs: Prints the result of aerolyzeImage, Prints the runtime of aerolyzeImage
Returns: 1 if passed
Assumptions: N/A
'''
t0 = time.time()
print aero.aerolyzeImage()
t1 = time.time()
total_n = t1 - t0
print "readHazeLayer runtime: " + str(total_n)
return 1
test_all()
| {
"repo_name": "Aerolyzer/Aerolyzer",
"path": "aerolyzer/test_aerosol.py",
"copies": "1",
"size": "1657",
"license": "apache-2.0",
"hash": -5645738348085358000,
"line_mean": 26.1639344262,
"line_max": 91,
"alpha_frac": 0.5878092939,
"autogenerated": false,
"ratio": 3.503171247357294,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9570698806479836,
"avg_score": 0.004056346955491646,
"num_lines": 61
} |
a = <error descr="Python does not support a trailing 'u'">12u</error>
b = <warning descr="Python version 3.4, 3.5, 3.6, 3.7, 3.8 do not support a trailing 'l'.">12l</warning>
c = <error descr="Python does not support a trailing 'll'">12ll</error>
d = <error descr="Python does not support a trailing 'U'">12U</error>
e = <warning descr="Python version 3.4, 3.5, 3.6, 3.7, 3.8 do not support a trailing 'L'.">12L</warning>
f = <error descr="Python does not support a trailing 'LL'">12LL</error>
g = <error descr="Python does not support a trailing 'ul'">0x12ful</error>
h = <error descr="Python does not support a trailing 'uL'">0X12fuL</error>
i = <error descr="Python does not support a trailing 'Ul'">12Ul</error>
j = <error descr="Python does not support a trailing 'UL'">12UL</error>
k = <error descr="Python does not support a trailing 'ull'">0o12ull</error>
l = <error descr="Python does not support a trailing 'uLL'">0O12uLL</error>
m = <error descr="Python does not support a trailing 'Ull'">0b1Ull</error>
n = <error descr="Python does not support a trailing 'ULL'">0B1ULL</error>
o = <error descr="Python does not support a trailing 'lu'">12lu</error>
p = <error descr="Python does not support a trailing 'lU'">12lU</error>
q = <error descr="Python does not support a trailing 'Lu'">12Lu</error>
r = <error descr="Python does not support a trailing 'LU'">12LU</error>
s = <error descr="Python does not support a trailing 'llu'">12llu</error>
t = <error descr="Python does not support a trailing 'llU'">12llU</error>
u = <error descr="Python does not support a trailing 'LLu'">12LLu</error>
v = <error descr="Python does not support a trailing 'LLU'">12LLU</error>
w = <warning descr="Python version 3.4, 3.5, 3.6, 3.7, 3.8 do not support this syntax. It requires '0o' prefix for octal literals">04</warning><error descr="End of statement expected">8</error> | {
"repo_name": "leafclick/intellij-community",
"path": "python/testData/inspections/PyCompatibilityInspection/numericLiteralExpression.py",
"copies": "1",
"size": "1867",
"license": "apache-2.0",
"hash": -7222352221494946000,
"line_mean": 80.2173913043,
"line_max": 193,
"alpha_frac": 0.7064809855,
"autogenerated": false,
"ratio": 3.0506535947712417,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4257134580271241,
"avg_score": null,
"num_lines": null
} |
a = <error descr="Python does not support a trailing 'u'">12u</error>
b = <warning descr="Python versions 3.5, 3.6, 3.7, 3.8, 3.9, 3.10 do not support a trailing 'l'">12l</warning>
c = <error descr="Python does not support a trailing 'll'">12ll</error>
d = <error descr="Python does not support a trailing 'U'">12U</error>
e = <warning descr="Python versions 3.5, 3.6, 3.7, 3.8, 3.9, 3.10 do not support a trailing 'L'">12L</warning>
f = <error descr="Python does not support a trailing 'LL'">12LL</error>
g = <error descr="Python does not support a trailing 'ul'">0x12ful</error>
h = <error descr="Python does not support a trailing 'uL'">0X12fuL</error>
i = <error descr="Python does not support a trailing 'Ul'">12Ul</error>
j = <error descr="Python does not support a trailing 'UL'">12UL</error>
k = <error descr="Python does not support a trailing 'ull'">0o12ull</error>
l = <error descr="Python does not support a trailing 'uLL'">0O12uLL</error>
m = <error descr="Python does not support a trailing 'Ull'">0b1Ull</error>
n = <error descr="Python does not support a trailing 'ULL'">0B1ULL</error>
o = <error descr="Python does not support a trailing 'lu'">12lu</error>
p = <error descr="Python does not support a trailing 'lU'">12lU</error>
q = <error descr="Python does not support a trailing 'Lu'">12Lu</error>
r = <error descr="Python does not support a trailing 'LU'">12LU</error>
s = <error descr="Python does not support a trailing 'llu'">12llu</error>
t = <error descr="Python does not support a trailing 'llU'">12llU</error>
u = <error descr="Python does not support a trailing 'LLu'">12LLu</error>
v = <error descr="Python does not support a trailing 'LLU'">12LLU</error>
w = <warning descr="Python versions 3.5, 3.6, 3.7, 3.8, 3.9, 3.10 do not support this syntax. It requires '0o' prefix for octal literals">04</warning><error descr="End of statement expected">8</error> | {
"repo_name": "JetBrains/intellij-community",
"path": "python/testData/inspections/PyCompatibilityInspection/numericLiteralExpression.py",
"copies": "10",
"size": "1886",
"license": "apache-2.0",
"hash": -8854427697581713000,
"line_mean": 81.0434782609,
"line_max": 200,
"alpha_frac": 0.7057264051,
"autogenerated": false,
"ratio": 3.0224358974358974,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.8728162302535898,
"avg_score": null,
"num_lines": null
} |
""" AES-128 """
from __future__ import division, absolute_import
import pyrtl
from pyrtl.rtllib import libutils
# TODO:
# 2) All ROMs should be synchronous. This should be easy once (3) is completed
# 3) Right now decryption generates one GIANT combinatorial block. Instead
# it should generate one of 2 options -- Either an iterative design or a
# pipelined design. Both will add registers between each round of AES
# 4) aes_encryption should be added to this file as well so that an
# aes encrypter similar to (3) above is generated
# 5) a single "aes-unit" combining encryption and decryption (without making
# full independent hardware units) would be a plus as well
class AES(object):
def __init__(self):
self.memories_built = False
def _g(self, word, key_expand_round):
"""
One-byte left circular rotation, substitution of each byte
"""
self.build_memories_if_not_exists()
a = libutils.partition_wire(word, 8)
sub = [self.sbox[a[index]] for index in (3, 0, 1, 2)]
sub[3] = sub[3] ^ self.rcon[key_expand_round + 1]
return pyrtl.concat_list(sub)
def key_expansion(self, old_key, key_expand_round):
self.build_memories_if_not_exists()
w = libutils.partition_wire(old_key, 32)
x = [w[3] ^ self._g(w[0], key_expand_round)]
x.insert(0, x[0] ^ w[2])
x.insert(0, x[0] ^ w[1])
x.insert(0, x[0] ^ w[0])
return pyrtl.concat_list(x)
def inv_sub_bytes(self, in_vector):
self.build_memories_if_not_exists()
subbed = [self.inv_sbox[byte] for byte in libutils.partition_wire(in_vector, 8)]
return pyrtl.concat_list(subbed)
@staticmethod
def inv_shift_rows(in_vector):
a = libutils.partition_wire(in_vector, 8)
return pyrtl.concat_list((a[12], a[9], a[6], a[3],
a[0], a[13], a[10], a[7],
a[4], a[1], a[14], a[11],
a[8], a[5], a[2], a[15]))
def inv_galois_mult(self, c, d):
return self._inv_gal_mult_dict[d][c]
@staticmethod
def _mod_add(base, add, mod):
base_mod_floor = base // mod
return (base + add) % mod + base_mod_floor * mod
_igm_divisor = [14, 9, 13, 11]
def inv_mix_columns(self, in_vector):
def _inv_mix_single(index):
mult_items = [self.inv_galois_mult(a[self._mod_add(index, loc, 4)], mult_table)
for loc, mult_table in enumerate(self._igm_divisor)]
return mult_items[0] ^ mult_items[1] ^ mult_items[2] ^ mult_items[3]
self.build_memories_if_not_exists()
a = libutils.partition_wire(in_vector, 8)
inverted = [_inv_mix_single(index) for index in range(len(a))]
return pyrtl.concat_list(inverted)
@staticmethod
def add_round_key(t, key):
return t ^ key
def decryption_statem(self, ciphertext_in, key_in, reset):
"""
return ready, decryption_result: ready is a one bit signal showing
that the answer decryption result has been calculated.
"""
if len(key_in) != len(ciphertext_in):
raise pyrtl.PyrtlError("AES key and ciphertext should be the same length")
cipher_text, key = (pyrtl.Register(len(ciphertext_in)) for i in range(2))
key_exp_in, add_round_in = (pyrtl.WireVector(len(ciphertext_in)) for i in range(2))
# this is not part of the state machine as we need the keys in
# reverse order...
reversed_key_list = reversed(self.decryption_key_gen(key_exp_in))
counter = pyrtl.Register(4, 'counter')
round = pyrtl.WireVector(4)
counter.next <<= round
inv_shift = self.inv_shift_rows(cipher_text)
inv_sub = self.inv_sub_bytes(inv_shift)
key_out = pyrtl.mux(round, *reversed_key_list, default=0)
add_round_out = self.add_round_key(add_round_in, key_out)
inv_mix_out = self.inv_mix_columns(add_round_out)
with pyrtl.conditional_assignment:
with reset == 1:
round |= 0
key.next |= key_in
key_exp_in |= key_in # to lower the number of cycles needed
cipher_text.next |= add_round_out
add_round_in |= ciphertext_in
with counter == 10: # keep everything the same
round |= counter
cipher_text.next |= cipher_text
with pyrtl.otherwise: # running through AES
round |= counter + 1
key.next |= key
key_exp_in |= key
add_round_in |= inv_sub
with counter == 9:
cipher_text.next |= add_round_out
with pyrtl.otherwise:
cipher_text.next |= inv_mix_out
ready = (counter == 10)
return ready, cipher_text
def decryption_statem_with_rom_in(self, ciphertext_in, key_ROM, reset):
cipher_text = pyrtl.Register(len(ciphertext_in))
add_round_in = pyrtl.WireVector(len(ciphertext_in))
counter = pyrtl.Register(4, 'counter')
round = pyrtl.WireVector(4)
counter.next <<= round
inv_shift = self.inv_shift_rows(cipher_text)
inv_sub = self.inv_sub_bytes(inv_shift)
key_out = key_ROM[(10 - round)[0:4]]
add_round_out = self.add_round_key(inv_sub, key_out)
inv_mix_out = self.inv_mix_columns(add_round_out)
with pyrtl.conditional_assignment:
with reset == 1:
round |= 0
cipher_text.next |= add_round_out
add_round_in |= ciphertext_in
with counter == 10: # keep everything the same
round |= counter
cipher_text.next |= cipher_text
with pyrtl.otherwise: # running through AES
round |= counter + 1
add_round_in |= key_out
with counter == 9:
cipher_text.next |= add_round_out
with pyrtl.otherwise:
cipher_text.next |= inv_mix_out
ready = (counter == 10)
return ready, cipher_text
def decryption_key_gen(self, key):
keys = [key]
for enc_round in range(10):
key = self.key_expansion(key, enc_round)
keys.append(key)
return keys
def decryption(self, ciphertext, key):
key_list = self.decryption_key_gen(key)
t = self.add_round_key(ciphertext, key_list[10])
for round in range(1, 11):
t = self.inv_shift_rows(t)
t = self.inv_sub_bytes(t)
t = self.add_round_key(t, key_list[10 - round])
if round != 10:
t = self.inv_mix_columns(t)
return t
def build_memories_if_not_exists(self):
if not self.memories_built:
self.build_memories()
def build_memories(self):
def build_mem(data):
return pyrtl.RomBlock(bitwidth=8, addrwidth=8, romdata=data, asynchronous=True)
self.sbox = build_mem(self.sbox_data)
self.inv_sbox = build_mem(self.inv_sbox_data)
self.rcon = build_mem(self.rcon_data)
self.GM9 = build_mem(self.GM9_data)
self.GM11 = build_mem(self.GM11_data)
self.GM13 = build_mem(self.GM13_data)
self.GM14 = build_mem(self.GM14_data)
self._inv_gal_mult_dict = {9: self.GM9, 11: self.GM11, 13: self.GM13, 14: self.GM14}
self.memories_built = True
sbox_data = libutils.str_to_int_array('''
63 7c 77 7b f2 6b 6f c5 30 01 67 2b fe d7 ab 76 ca 82 c9 7d fa 59 47 f0
ad d4 a2 af 9c a4 72 c0 b7 fd 93 26 36 3f f7 cc 34 a5 e5 f1 71 d8 31 15
04 c7 23 c3 18 96 05 9a 07 12 80 e2 eb 27 b2 75 09 83 2c 1a 1b 6e 5a a0
52 3b d6 b3 29 e3 2f 84 53 d1 00 ed 20 fc b1 5b 6a cb be 39 4a 4c 58 cf
d0 ef aa fb 43 4d 33 85 45 f9 02 7f 50 3c 9f a8 51 a3 40 8f 92 9d 38 f5
bc b6 da 21 10 ff f3 d2 cd 0c 13 ec 5f 97 44 17 c4 a7 7e 3d 64 5d 19 73
60 81 4f dc 22 2a 90 88 46 ee b8 14 de 5e 0b db e0 32 3a 0a 49 06 24 5c
c2 d3 ac 62 91 95 e4 79 e7 c8 37 6d 8d d5 4e a9 6c 56 f4 ea 65 7a ae 08
ba 78 25 2e 1c a6 b4 c6 e8 dd 74 1f 4b bd 8b 8a 70 3e b5 66 48 03 f6 0e
61 35 57 b9 86 c1 1d 9e e1 f8 98 11 69 d9 8e 94 9b 1e 87 e9 ce 55 28 df
8c a1 89 0d bf e6 42 68 41 99 2d 0f b0 54 bb 16
''')
inv_sbox_data = libutils.str_to_int_array('''
52 09 6a d5 30 36 a5 38 bf 40 a3 9e 81 f3 d7 fb 7c e3 39 82 9b 2f ff 87
34 8e 43 44 c4 de e9 cb 54 7b 94 32 a6 c2 23 3d ee 4c 95 0b 42 fa c3 4e
08 2e a1 66 28 d9 24 b2 76 5b a2 49 6d 8b d1 25 72 f8 f6 64 86 68 98 16
d4 a4 5c cc 5d 65 b6 92 6c 70 48 50 fd ed b9 da 5e 15 46 57 a7 8d 9d 84
90 d8 ab 00 8c bc d3 0a f7 e4 58 05 b8 b3 45 06 d0 2c 1e 8f ca 3f 0f 02
c1 af bd 03 01 13 8a 6b 3a 91 11 41 4f 67 dc ea 97 f2 cf ce f0 b4 e6 73
96 ac 74 22 e7 ad 35 85 e2 f9 37 e8 1c 75 df 6e 47 f1 1a 71 1d 29 c5 89
6f b7 62 0e aa 18 be 1b fc 56 3e 4b c6 d2 79 20 9a db c0 fe 78 cd 5a f4
1f dd a8 33 88 07 c7 31 b1 12 10 59 27 80 ec 5f 60 51 7f a9 19 b5 4a 0d
2d e5 7a 9f 93 c9 9c ef a0 e0 3b 4d ae 2a f5 b0 c8 eb bb 3c 83 53 99 61
17 2b 04 7e ba 77 d6 26 e1 69 14 63 55 21 0c 7d
''')
rcon_data = libutils.str_to_int_array('''
8d 01 02 04 08 10 20 40 80 1b 36 6c d8 ab 4d 9a 2f 5e bc 63 c6 97 35 6a
d4 b3 7d fa ef c5 91 39 72 e4 d3 bd 61 c2 9f 25 4a 94 33 66 cc 83 1d 3a
74 e8 cb 8d 01 02 04 08 10 20 40 80 1b 36 6c d8 ab 4d 9a 2f 5e bc 63 c6
97 35 6a d4 b3 7d fa ef c5 91 39 72 e4 d3 bd 61 c2 9f 25 4a 94 33 66 cc
83 1d 3a 74 e8 cb 8d 01 02 04 08 10 20 40 80 1b 36 6c d8 ab 4d 9a 2f 5e
bc 63 c6 97 35 6a d4 b3 7d fa ef c5 91 39 72 e4 d3 bd 61 c2 9f 25 4a 94
33 66 cc 83 1d 3a 74 e8 cb 8d 01 02 04 08 10 20 40 80 1b 36 6c d8 ab 4d
9a 2f 5e bc 63 c6 97 35 6a d4 b3 7d fa ef c5 91 39 72 e4 d3 bd 61 c2 9f
25 4a 94 33 66 cc 83 1d 3a 74 e8 cb 8d 01 02 04 08 10 20 40 80 1b 36 6c
d8 ab 4d 9a 2f 5e bc 63 c6 97 35 6a d4 b3 7d fa ef c5 91 39 72 e4 d3 bd
61 c2 9f 25 4a 94 33 66 cc 83 1d 3a 74 e8 cb 8d
''')
# Galois Multiplication tables for 9, 11, 13, and 14.
GM9_data = libutils.str_to_int_array('''
00 09 12 1b 24 2d 36 3f 48 41 5a 53 6c 65 7e 77 90 99 82 8b b4 bd a6 af
d8 d1 ca c3 fc f5 ee e7 3b 32 29 20 1f 16 0d 04 73 7a 61 68 57 5e 45 4c
ab a2 b9 b0 8f 86 9d 94 e3 ea f1 f8 c7 ce d5 dc 76 7f 64 6d 52 5b 40 49
3e 37 2c 25 1a 13 08 01 e6 ef f4 fd c2 cb d0 d9 ae a7 bc b5 8a 83 98 91
4d 44 5f 56 69 60 7b 72 05 0c 17 1e 21 28 33 3a dd d4 cf c6 f9 f0 eb e2
95 9c 87 8e b1 b8 a3 aa ec e5 fe f7 c8 c1 da d3 a4 ad b6 bf 80 89 92 9b
7c 75 6e 67 58 51 4a 43 34 3d 26 2f 10 19 02 0b d7 de c5 cc f3 fa e1 e8
9f 96 8d 84 bb b2 a9 a0 47 4e 55 5c 63 6a 71 78 0f 06 1d 14 2b 22 39 30
9a 93 88 81 be b7 ac a5 d2 db c0 c9 f6 ff e4 ed 0a 03 18 11 2e 27 3c 35
42 4b 50 59 66 6f 74 7d a1 a8 b3 ba 85 8c 97 9e e9 e0 fb f2 cd c4 df d6
31 38 23 2a 15 1c 07 0e 79 70 6b 62 5d 54 4f 46
''')
GM11_data = libutils.str_to_int_array('''
00 0b 16 1d 2c 27 3a 31 58 53 4e 45 74 7f 62 69 b0 bb a6 ad 9c 97 8a 81
e8 e3 fe f5 c4 cf d2 d9 7b 70 6d 66 57 5c 41 4a 23 28 35 3e 0f 04 19 12
cb c0 dd d6 e7 ec f1 fa 93 98 85 8e bf b4 a9 a2 f6 fd e0 eb da d1 cc c7
ae a5 b8 b3 82 89 94 9f 46 4d 50 5b 6a 61 7c 77 1e 15 08 03 32 39 24 2f
8d 86 9b 90 a1 aa b7 bc d5 de c3 c8 f9 f2 ef e4 3d 36 2b 20 11 1a 07 0c
65 6e 73 78 49 42 5f 54 f7 fc e1 ea db d0 cd c6 af a4 b9 b2 83 88 95 9e
47 4c 51 5a 6b 60 7d 76 1f 14 09 02 33 38 25 2e 8c 87 9a 91 a0 ab b6 bd
d4 df c2 c9 f8 f3 ee e5 3c 37 2a 21 10 1b 06 0d 64 6f 72 79 48 43 5e 55
01 0a 17 1c 2d 26 3b 30 59 52 4f 44 75 7e 63 68 b1 ba a7 ac 9d 96 8b 80
e9 e2 ff f4 c5 ce d3 d8 7a 71 6c 67 56 5d 40 4b 22 29 34 3f 0e 05 18 13
ca c1 dc d7 e6 ed f0 fb 92 99 84 8f be b5 a8 a3
''')
GM13_data = libutils.str_to_int_array('''
00 0d 1a 17 34 39 2e 23 68 65 72 7f 5c 51 46 4b d0 dd ca c7 e4 e9 fe f3
b8 b5 a2 af 8c 81 96 9b bb b6 a1 ac 8f 82 95 98 d3 de c9 c4 e7 ea fd f0
6b 66 71 7c 5f 52 45 48 03 0e 19 14 37 3a 2d 20 6d 60 77 7a 59 54 43 4e
05 08 1f 12 31 3c 2b 26 bd b0 a7 aa 89 84 93 9e d5 d8 cf c2 e1 ec fb f6
d6 db cc c1 e2 ef f8 f5 be b3 a4 a9 8a 87 90 9d 06 0b 1c 11 32 3f 28 25
6e 63 74 79 5a 57 40 4d da d7 c0 cd ee e3 f4 f9 b2 bf a8 a5 86 8b 9c 91
0a 07 10 1d 3e 33 24 29 62 6f 78 75 56 5b 4c 41 61 6c 7b 76 55 58 4f 42
09 04 13 1e 3d 30 27 2a b1 bc ab a6 85 88 9f 92 d9 d4 c3 ce ed e0 f7 fa
b7 ba ad a0 83 8e 99 94 df d2 c5 c8 eb e6 f1 fc 67 6a 7d 70 53 5e 49 44
0f 02 15 18 3b 36 21 2c 0c 01 16 1b 38 35 22 2f 64 69 7e 73 50 5d 4a 47
dc d1 c6 cb e8 e5 f2 ff b4 b9 ae a3 80 8d 9a 97
''')
GM14_data = libutils.str_to_int_array('''
00 0e 1c 12 38 36 24 2a 70 7e 6c 62 48 46 54 5a e0 ee fc f2 d8 d6 c4 ca
90 9e 8c 82 a8 a6 b4 ba db d5 c7 c9 e3 ed ff f1 ab a5 b7 b9 93 9d 8f 81
3b 35 27 29 03 0d 1f 11 4b 45 57 59 73 7d 6f 61 ad a3 b1 bf 95 9b 89 87
dd d3 c1 cf e5 eb f9 f7 4d 43 51 5f 75 7b 69 67 3d 33 21 2f 05 0b 19 17
76 78 6a 64 4e 40 52 5c 06 08 1a 14 3e 30 22 2c 96 98 8a 84 ae a0 b2 bc
e6 e8 fa f4 de d0 c2 cc 41 4f 5d 53 79 77 65 6b 31 3f 2d 23 09 07 15 1b
a1 af bd b3 99 97 85 8b d1 df cd c3 e9 e7 f5 fb 9a 94 86 88 a2 ac be b0
ea e4 f6 f8 d2 dc ce c0 7a 74 66 68 42 4c 5e 50 0a 04 16 18 32 3c 2e 20
ec e2 f0 fe d4 da c8 c6 9c 92 80 8e a4 aa b8 b6 0c 02 10 1e 34 3a 28 26
7c 72 60 6e 44 4a 58 56 37 39 2b 25 0f 01 13 1d 47 49 5b 55 7f 71 63 6d
d7 d9 cb c5 ef e1 f3 fd a7 a9 bb b5 9f 91 83 8d
''')
| {
"repo_name": "deekshadangwal/PyRTL",
"path": "pyrtl/rtllib/aes.py",
"copies": "1",
"size": "14048",
"license": "bsd-3-clause",
"hash": -1779640991857497900,
"line_mean": 45.5165562914,
"line_max": 92,
"alpha_frac": 0.5793707289,
"autogenerated": false,
"ratio": 2.7686243594797006,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.8846494552076429,
"avg_score": 0.0003001072606540567,
"num_lines": 302
} |
# AES 192/256 bit encryption (Internet draft)
# http://tools.ietf.org/html/draft-blumenthal-aes-usm-04
from pysnmp.proto.secmod.rfc3826.priv import aes
from pysnmp.proto.secmod.rfc3414.auth import hmacmd5, hmacsha
from pysnmp.proto.secmod.rfc3414 import localkey
from pysnmp.proto import error
from math import ceil
try:
from hashlib import md5, sha1
except ImportError:
import md5, sha
md5 = md5.new
sha1 = sha.new
class AbstractAes(aes.Aes):
serviceID = ()
keySize = 0
# 3.1.2.1
def localizeKey(self, authProtocol, privKey, snmpEngineID):
if authProtocol == hmacmd5.HmacMd5.serviceID:
localPrivKey = localkey.localizeKeyMD5(privKey, snmpEngineID)
while ceil(self.keySize//len(localPrivKey)):
localPrivKey = localPrivKey + md5(localPrivKey).digest()
elif authProtocol == hmacsha.HmacSha.serviceID:
localPrivKey = localkey.localizeKeySHA(privKey, snmpEngineID)
while ceil(self.keySize//len(localPrivKey)):
localPrivKey = localPrivKey + sha1(localPrivKey).digest()
else:
raise error.ProtocolError(
'Unknown auth protocol %s' % (authProtocol,)
)
return localPrivKey[:self.keySize]
| {
"repo_name": "imron/scalyr-agent-2",
"path": "scalyr_agent/third_party/pysnmp/proto/secmod/eso/priv/aesbase.py",
"copies": "2",
"size": "1273",
"license": "apache-2.0",
"hash": 2771728608659005000,
"line_mean": 37.5757575758,
"line_max": 73,
"alpha_frac": 0.671641791,
"autogenerated": false,
"ratio": 3.3237597911227152,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9952111538832672,
"avg_score": 0.008658008658008658,
"num_lines": 33
} |
from cryptography.hazmat.primitives.ciphers import Cipher, algorithms, modes
from cryptography.hazmat.primitives import padding
from cryptography.hazmat.backends import default_backend
class AESCipher:
# EFFECTS: Constructor that sets the IV to
def __init__(self, iv, key):
self.iv = iv
backend = default_backend()
self.cipher = Cipher(algorithms.AES(key), modes.CBC(iv), backend=backend)
self.encryptor = self.cipher.encryptor()
self.decryptor = self.cipher.decryptor()
# EFFECTS: Encrypts the given plaintext in AES CBC mode. returns a cipher text.
def AES_cbc_encrypt(self, plaintext):
plaintext = self.padPKCS7(plaintext)
ciphertext = self.encryptor.update(plaintext)
return ciphertext
# EFFECTS: Encrypts the given ciphertext in AES CBC mode. returns a plaintext.
def AES_cbc_decrypt(self, ciphertext):
plaintext = self.decryptor.update(ciphertext)
return plaintext
# EFFECTS: Pads the PKCS7
def padPKCS7(self, newPlainText):
padder = padding.PKCS7(128).padder()
padded_data = padder.update(newPlainText)
padded_data += padder.finalize()
return padded_data
| {
"repo_name": "hologram-io/hologram-python",
"path": "Hologram/Authentication/AES/AESCipher.py",
"copies": "1",
"size": "1421",
"license": "mit",
"hash": 8564344184000206000,
"line_mean": 33.6585365854,
"line_max": 83,
"alpha_frac": 0.6988036594,
"autogenerated": false,
"ratio": 3.7394736842105263,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.49382773436105265,
"avg_score": null,
"num_lines": null
} |
KLEN_OPTIONS = {
16: 10,
24: 12,
32: 14}
RCON = [ # http://en.wikipedia.org/wiki/Rijndael_key_schedule
0x8d, 0x01, 0x02, 0x04, 0x08, 0x10, 0x20, 0x40,
0x80, 0x1b, 0x36, 0x6c, 0xd8, 0xab, 0x4d, 0x9a,
0x2f, 0x5e, 0xbc, 0x63, 0xc6, 0x97, 0x35, 0x6a,
0xd4, 0xb3, 0x7d, 0xfa, 0xef, 0xc5, 0x91, 0x39,
0x72, 0xe4, 0xd3, 0xbd, 0x61, 0xc2, 0x9f, 0x25,
0x4a, 0x94, 0x33, 0x66, 0xcc, 0x83, 0x1d, 0x3a,
0x74, 0xe8, 0xcb, 0x8d, 0x01, 0x02, 0x04, 0x08,
0x10, 0x20, 0x40, 0x80, 0x1b, 0x36, 0x6c, 0xd8,
0xab, 0x4d, 0x9a, 0x2f, 0x5e, 0xbc, 0x63, 0xc6,
0x97, 0x35, 0x6a, 0xd4, 0xb3, 0x7d, 0xfa, 0xef,
0xc5, 0x91, 0x39, 0x72, 0xe4, 0xd3, 0xbd, 0x61,
0xc2, 0x9f, 0x25, 0x4a, 0x94, 0x33, 0x66, 0xcc,
0x83, 0x1d, 0x3a, 0x74, 0xe8, 0xcb, 0x8d, 0x01,
0x02, 0x04, 0x08, 0x10, 0x20, 0x40, 0x80, 0x1b,
0x36, 0x6c, 0xd8, 0xab, 0x4d, 0x9a, 0x2f, 0x5e,
0xbc, 0x63, 0xc6, 0x97, 0x35, 0x6a, 0xd4, 0xb3,
0x7d, 0xfa, 0xef, 0xc5, 0x91, 0x39, 0x72, 0xe4,
0xd3, 0xbd, 0x61, 0xc2, 0x9f, 0x25, 0x4a, 0x94,
0x33, 0x66, 0xcc, 0x83, 0x1d, 0x3a, 0x74, 0xe8,
0xcb, 0x8d, 0x01, 0x02, 0x04, 0x08, 0x10, 0x20,
0x40, 0x80, 0x1b, 0x36, 0x6c, 0xd8, 0xab, 0x4d,
0x9a, 0x2f, 0x5e, 0xbc, 0x63, 0xc6, 0x97, 0x35,
0x6a, 0xd4, 0xb3, 0x7d, 0xfa, 0xef, 0xc5, 0x91,
0x39, 0x72, 0xe4, 0xd3, 0xbd, 0x61, 0xc2, 0x9f,
0x25, 0x4a, 0x94, 0x33, 0x66, 0xcc, 0x83, 0x1d,
0x3a, 0x74, 0xe8, 0xcb, 0x8d, 0x01, 0x02, 0x04,
0x08, 0x10, 0x20, 0x40, 0x80, 0x1b, 0x36, 0x6c,
0xd8, 0xab, 0x4d, 0x9a, 0x2f, 0x5e, 0xbc, 0x63,
0xc6, 0x97, 0x35, 0x6a, 0xd4, 0xb3, 0x7d, 0xfa,
0xef, 0xc5, 0x91, 0x39, 0x72, 0xe4, 0xd3, 0xbd,
0x61, 0xc2, 0x9f, 0x25, 0x4a, 0x94, 0x33, 0x66,
0xcc, 0x83, 0x1d, 0x3a, 0x74, 0xe8, 0xcb, 0x8d]
SBOX = [ # http://en.wikipedia.org/wiki/Rijndael_S-box
0x63, 0x7c, 0x77, 0x7b, 0xf2, 0x6b, 0x6f, 0xc5,
0x30, 0x01, 0x67, 0x2b, 0xfe, 0xd7, 0xab, 0x76,
0xca, 0x82, 0xc9, 0x7d, 0xfa, 0x59, 0x47, 0xf0,
0xad, 0xd4, 0xa2, 0xaf, 0x9c, 0xa4, 0x72, 0xc0,
0xb7, 0xfd, 0x93, 0x26, 0x36, 0x3f, 0xf7, 0xcc,
0x34, 0xa5, 0xe5, 0xf1, 0x71, 0xd8, 0x31, 0x15,
0x04, 0xc7, 0x23, 0xc3, 0x18, 0x96, 0x05, 0x9a,
0x07, 0x12, 0x80, 0xe2, 0xeb, 0x27, 0xb2, 0x75,
0x09, 0x83, 0x2c, 0x1a, 0x1b, 0x6e, 0x5a, 0xa0,
0x52, 0x3b, 0xd6, 0xb3, 0x29, 0xe3, 0x2f, 0x84,
0x53, 0xd1, 0x00, 0xed, 0x20, 0xfc, 0xb1, 0x5b,
0x6a, 0xcb, 0xbe, 0x39, 0x4a, 0x4c, 0x58, 0xcf,
0xd0, 0xef, 0xaa, 0xfb, 0x43, 0x4d, 0x33, 0x85,
0x45, 0xf9, 0x02, 0x7f, 0x50, 0x3c, 0x9f, 0xa8,
0x51, 0xa3, 0x40, 0x8f, 0x92, 0x9d, 0x38, 0xf5,
0xbc, 0xb6, 0xda, 0x21, 0x10, 0xff, 0xf3, 0xd2,
0xcd, 0x0c, 0x13, 0xec, 0x5f, 0x97, 0x44, 0x17,
0xc4, 0xa7, 0x7e, 0x3d, 0x64, 0x5d, 0x19, 0x73,
0x60, 0x81, 0x4f, 0xdc, 0x22, 0x2a, 0x90, 0x88,
0x46, 0xee, 0xb8, 0x14, 0xde, 0x5e, 0x0b, 0xdb,
0xe0, 0x32, 0x3a, 0x0a, 0x49, 0x06, 0x24, 0x5c,
0xc2, 0xd3, 0xac, 0x62, 0x91, 0x95, 0xe4, 0x79,
0xe7, 0xc8, 0x37, 0x6d, 0x8d, 0xd5, 0x4e, 0xa9,
0x6c, 0x56, 0xf4, 0xea, 0x65, 0x7a, 0xae, 0x08,
0xba, 0x78, 0x25, 0x2e, 0x1c, 0xa6, 0xb4, 0xc6,
0xe8, 0xdd, 0x74, 0x1f, 0x4b, 0xbd, 0x8b, 0x8a,
0x70, 0x3e, 0xb5, 0x66, 0x48, 0x03, 0xf6, 0x0e,
0x61, 0x35, 0x57, 0xb9, 0x86, 0xc1, 0x1d, 0x9e,
0xe1, 0xf8, 0x98, 0x11, 0x69, 0xd9, 0x8e, 0x94,
0x9b, 0x1e, 0x87, 0xe9, 0xce, 0x55, 0x28, 0xdf,
0x8c, 0xa1, 0x89, 0x0d, 0xbf, 0xe6, 0x42, 0x68,
0x41, 0x99, 0x2d, 0x0f, 0xb0, 0x54, 0xbb, 0x16]
INVSBOX = [
0x52, 0x09, 0x6a, 0xd5, 0x30, 0x36, 0xa5, 0x38,
0xbf, 0x40, 0xa3, 0x9e, 0x81, 0xf3, 0xd7, 0xfb,
0x7c, 0xe3, 0x39, 0x82, 0x9b, 0x2f, 0xff, 0x87,
0x34, 0x8e, 0x43, 0x44, 0xc4, 0xde, 0xe9, 0xcb,
0x54, 0x7b, 0x94, 0x32, 0xa6, 0xc2, 0x23, 0x3d,
0xee, 0x4c, 0x95, 0x0b, 0x42, 0xfa, 0xc3, 0x4e,
0x08, 0x2e, 0xa1, 0x66, 0x28, 0xd9, 0x24, 0xb2,
0x76, 0x5b, 0xa2, 0x49, 0x6d, 0x8b, 0xd1, 0x25,
0x72, 0xf8, 0xf6, 0x64, 0x86, 0x68, 0x98, 0x16,
0xd4, 0xa4, 0x5c, 0xcc, 0x5d, 0x65, 0xb6, 0x92,
0x6c, 0x70, 0x48, 0x50, 0xfd, 0xed, 0xb9, 0xda,
0x5e, 0x15, 0x46, 0x57, 0xa7, 0x8d, 0x9d, 0x84,
0x90, 0xd8, 0xab, 0x00, 0x8c, 0xbc, 0xd3, 0x0a,
0xf7, 0xe4, 0x58, 0x05, 0xb8, 0xb3, 0x45, 0x06,
0xd0, 0x2c, 0x1e, 0x8f, 0xca, 0x3f, 0x0f, 0x02,
0xc1, 0xaf, 0xbd, 0x03, 0x01, 0x13, 0x8a, 0x6b,
0x3a, 0x91, 0x11, 0x41, 0x4f, 0x67, 0xdc, 0xea,
0x97, 0xf2, 0xcf, 0xce, 0xf0, 0xb4, 0xe6, 0x73,
0x96, 0xac, 0x74, 0x22, 0xe7, 0xad, 0x35, 0x85,
0xe2, 0xf9, 0x37, 0xe8, 0x1c, 0x75, 0xdf, 0x6e,
0x47, 0xf1, 0x1a, 0x71, 0x1d, 0x29, 0xc5, 0x89,
0x6f, 0xb7, 0x62, 0x0e, 0xaa, 0x18, 0xbe, 0x1b,
0xfc, 0x56, 0x3e, 0x4b, 0xc6, 0xd2, 0x79, 0x20,
0x9a, 0xdb, 0xc0, 0xfe, 0x78, 0xcd, 0x5a, 0xf4,
0x1f, 0xdd, 0xa8, 0x33, 0x88, 0x07, 0xc7, 0x31,
0xb1, 0x12, 0x10, 0x59, 0x27, 0x80, 0xec, 0x5f,
0x60, 0x51, 0x7f, 0xa9, 0x19, 0xb5, 0x4a, 0x0d,
0x2d, 0xe5, 0x7a, 0x9f, 0x93, 0xc9, 0x9c, 0xef,
0xa0, 0xe0, 0x3b, 0x4d, 0xae, 0x2a, 0xf5, 0xb0,
0xc8, 0xeb, 0xbb, 0x3c, 0x83, 0x53, 0x99, 0x61,
0x17, 0x2b, 0x04, 0x7e, 0xba, 0x77, 0xd6, 0x26,
0xe1, 0x69, 0x14, 0x63, 0x55, 0x21, 0x0c, 0x7d]
def xor_words(a, b):
return map(lambda x: x[0] ^ x[1], zip(a, b))
def expand_key(zkey):
klen = len(zkey)
nr = KLEN_OPTIONS[klen]
ekdlen = 16 * (nr + 1) # 16 = 4 * Nb, Nb = 4
ekey = zkey[:]
eklen = klen
rcon_iter = 0
while eklen < ekdlen:
temp = ekey[-4:]
if eklen % klen == 0:
# rotate
temp = temp[1:] + temp[:1]
# sub word
for i in xrange(4):
temp[i] = SBOX[temp[i]]
# xor w rcon
rcon_iter += 1 # incremet first, RCON starts from 1
temp[0] ^= RCON[rcon_iter]
if klen == 32 and eklen % 32 == 16:
for i in xrange(4):
temp[i] = SBOX[temp[i]]
for t in temp:
ekey.append(ekey[-klen] ^ t)
eklen += 1
return ekey, nr
def gm(a, b): # Galois multiplication of 8 bit characters a and b.
p = 0
for _ in xrange(8):
if b & 1:
p ^= a
a <<= 1
if a & 0x100:
a ^= 0x1b
b >>= 1
return p & 0xff
def mix_col(col, mul):
r = []
for i in xrange(4):
t = 0
for j in xrange(4):
t ^= gm(col[(i + 4 - j) % 4], mul[j])
r.append(t)
return r
def mix_cols(st, mul):
for s in xrange(0, 16, 4):
p = s + 4
st[s:p] = mix_col(st[s:p], mul)
return st
def sub_bytes(st, sbox):
for i in xrange(16):
st[i] = sbox[st[i]]
return st
def shift_rows(st):
for r in xrange(1, 4):
s = r * 5 # s = r + r * 4
st[r:16:4] = st[s:16:4] + st[r:s:4]
return st
def inv_shift_rows(st):
for r in xrange(1, 4):
s = 16 - 3 * r # r + 16 - 4 * r
st[r:16:4] = st[s:16:4] + st[r:s:4]
return st
def encryption_loop(etext, ekey, nr):
nr16 = nr * 16
state = xor_words(etext, ekey[:16]) # add round key
for eks in xrange(16, nr16, 16):
state = sub_bytes(state, SBOX)
state = shift_rows(state)
state = mix_cols(state, (2, 1, 1, 3))
state = xor_words(state, ekey[eks:eks + 16]) # add round key
state = sub_bytes(state, SBOX)
state = shift_rows(state)
state = xor_words(state, ekey[nr16:nr16 + 16]) # add round key
return state
def decryption_loop(dcryp, ekey, nr):
nr16 = nr * 16
state = xor_words(dcryp, ekey[nr16:nr16 + 16]) # add round key
for eks in xrange(nr16, 31, -16):
state = inv_shift_rows(state)
state = sub_bytes(state, INVSBOX)
state = xor_words(state, ekey[eks - 16:eks]) # add round key
state = mix_cols(state, (14, 9, 13, 11))
state = inv_shift_rows(state)
state = sub_bytes(state, INVSBOX)
state = xor_words(state, ekey[:16]) # add round key
return state
def encrypt(etext, zkey):
ekey, nr = expand_key(zkey)
return encryption_loop(etext, ekey, nr)
def decrypt(ecryp, zkey):
ekey, nr = expand_key(zkey)
return decryption_loop(ecryp, ekey, nr)
def str_to_vec(x):
return list(map(ord, x))
def vec_to_str(x):
return ''.join(map(chr, x))
class Aes:
def __init__(self, key):
self.ekey, self.nr = expand_key(str_to_vec(key))
def enc(self, text):
return vec_to_str(encryption_loop(str_to_vec(text), self.ekey, self.nr))
def dec(self, cryp):
return vec_to_str(decryption_loop(str_to_vec(cryp), self.ekey, self.nr))
| {
"repo_name": "inuitwallet/NuBippy",
"path": "encrypt/aes.py",
"copies": "1",
"size": "10543",
"license": "mit",
"hash": 8971340324680281000,
"line_mean": 37.9040590406,
"line_max": 94,
"alpha_frac": 0.5859812198,
"autogenerated": false,
"ratio": 2.1455026455026456,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.8228134863752417,
"avg_score": 0.0006698003100458267,
"num_lines": 271
} |
# AES trial decryption FeatherModule by Daniel Crowley
#
# Try various common AES keys and check for proper padding
from Crypto.Cipher import AES
import feathermodules
# Our main function
def aes_key_brute(samples):
# Check that the samples are the correct size to match AES
if not all([len(sample) % 16 == 0 for sample in samples]):
return False
default_keylist = [
'0'*32,
'f'*32,
'30'*16, #'0'*16 hex encoded
'66'*16, #'f'*16 hex encoded
'31323334353637383930313233343536',
'30313233343536373839303132333435',
'70617373776f726470617373776f7264', #'passwordpassword'.encode('hex')
'5f4dcc3b5aa765d61d8327deb882cf99' # md5('password')
]
def decrypt_and_check(cipher, ciphertext):
'''Decrypt under constructed cipher and return True or False indicating correct pkcs7 padding'''
pt = cipher.decrypt(ciphertext)
last_byte = ord(pt[-1])
if last_byte > 16:
return False
elif pt[-last_byte:] == chr(last_byte)*last_byte:
return True
else:
return False
# Load the current set of options from FD, using dict() so we get a copy
# rather than manipulating the original dict
options = dict(feathermodules.current_options)
results = []
if options['keyfile'] != '':
try:
keys = open(options['keyfile'],'r').readlines()
except:
print '[*] Key file is not a set of hex encoded 16 byte values. Using default key list.'
else:
keys = default_keylist
# filter samples into one-block samples and multi-block samples
one_block_samples = filter(lambda x: len(x)==16, samples)
multi_block_samples = filter(lambda x: len(x) > 16, samples)
if len(multi_block_samples) == 1:
print '[*] One a single multi-block sample exists. This has a 1 in 256 chance of false positives with the CBC test.'
if len(one_block_samples) == 1:
print '[*] One a single one-block sample exists. This has a 1 in 256 chance of false positives with the ECB, CBC key-as-IV, and CBC known IV tests.'
for key in keys:
try:
key = key.decode('hex')
except:
print '[*] Bad key provided, bailing out.'
return False
# set all bad_decryption flags to False
ecb_bad_decrypt = cbc_key_as_iv_bad_decrypt = cbc_bad_decrypt = cbc_known_iv_bad_decrypt = False
# ECB
for sample in samples:
cipher = AES.new(key, AES.MODE_ECB)
# If any decryption fails to produce valid padding, flag bad ECB decryption and break
if decrypt_and_check(cipher, sample[-16:]) == False:
ecb_bad_decrypt = True
break
# CBC last block with second to last block as IV
if len(multi_block_samples) != 0:
for sample in multi_block_samples:
cipher = AES.new(key, AES.MODE_CBC, sample[-32:-16])
# If any decryption fails to produce valid padding, flag bad CBC decryption and break
if decrypt_and_check(cipher, sample[-16:]) == False:
cbc_bad_decrypt = True
break
else:
cbc_bad_decrypt = True
if len(one_block_samples) != 0:
if options['known_iv'] != '':
cbc_key_as_iv_bad_decrypt = True
# CBC with entered IV
for sample in one_block_samples:
cipher = AES.new(key, AES.MODE_CBC, options['known_iv'].decode('hex'))
# If any decryption fails to produce valid padding, flag bad CBC decryption and break
if decrypt_and_check(cipher, sample) == False:
cbc_known_iv_bad_decrypt = True
break
else:
cbc_known_iv_bad_decrypt = True
# CBC with key as IV
for sample in one_block_samples:
cipher = AES.new(key, AES.MODE_CBC, key)
# If any decryption fails to produce valid padding, flag bad CBC_key_as_IV decryption and break
if decrypt_and_check(cipher, sample) == False:
cbc_key_as_iv_bad_decrypt = True
break
else:
cbc_known_iv_bad_decrypt = cbc_key_as_iv_bad_decrypt = True
if not ecb_bad_decrypt:
results.append(key.encode('hex') + ' may be the correct key in ECB mode or CBC mode with static all-NUL IV.')
if not cbc_bad_decrypt:
results.append(key.encode('hex') + ' may be the correct key in CBC mode, IV unknown.')
if not cbc_key_as_iv_bad_decrypt:
results.append(key.encode('hex') + ' may be the correct key and static IV in CBC mode.')
if not cbc_known_iv_bad_decrypt:
results.append(key.encode('hex') + ' may be the correct key in CBC mode using the provided IV.')
print 'Potentially correct AES keys:'
print '-' * 80
print '\n'.join(results)
return results
feathermodules.module_list['aes_key_brute'] = {
'attack_function': aes_key_brute,
'type':'brute',
'keywords':['block'],
'description':'Try a list of potential AES keys (or user-provided list of hex-encoded keys) against a list of AES ciphertexts.',
'options': {
'known_iv': '',
'keyfile': ''
}
}
| {
"repo_name": "nccgroup/featherduster",
"path": "feathermodules/block/aes_key_brute.py",
"copies": "1",
"size": "5220",
"license": "bsd-3-clause",
"hash": -7432172728107185000,
"line_mean": 38.2481203008,
"line_max": 154,
"alpha_frac": 0.6111111111,
"autogenerated": false,
"ratio": 3.670886075949367,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4781997187049367,
"avg_score": null,
"num_lines": null
} |
""" Aether library - routines for a modelling the lung
The Aether library is an advanced modelling library for models of the lung.
"""
classifiers = """\
Development Status :: 4 - Beta
Intended Audience :: Developers
Intended Audience :: Education
Intended Audience :: Science/Research
License :: OSI Approved :: Apache Software License
Programming Language :: Python
Programming Language :: Python :: @PYTHONLIBS_MAJOR_VERSION@.@PYTHONLIBS_MINOR_VERSION@
@SETUP_PY_OPERATING_SYSTEM_CLASSIFIER@
Topic :: Scientific/Engineering :: Medical Science Apps.
Topic :: Software Development :: Libraries :: Python Modules
"""
import sys
from setuptools import setup
from setuptools.dist import Distribution
class BinaryDistribution(Distribution):
def is_pure(self):
return False
def has_ext_modules(self):
return True
doclines = __doc__#.split("\n")
PLATFORM_PACKAGE_DATA = ["*.so", "*.pyd", ]
if sys.platform.startswith('win32'):
PLATFORM_PACKAGE_DATA.extend(["aether.dll", "aether_c.dll"])
setup(
name='lungnoodle.aether',
version='@Aether_VERSION@',
author='Lung Group, Auckland Bioengineering Institute.',
author_email='h.sorby@auckland.ac.nz',
packages=['aether'],
package_data={'aether': PLATFORM_PACKAGE_DATA},
url='https://lung.bioeng.auckland.ac.nz/',
license='http://www.apache.org/licenses/LICENSE-2.0',
description='Aether library of routines for modelling the lung.',
classifiers = filter(None, classifiers.split("\n")),
long_description=doclines,
distclass=BinaryDistribution,
include_package_data=True,
)
| {
"repo_name": "LungNoodle/lungsim",
"path": "src/bindings/python/setup.in.py",
"copies": "1",
"size": "1601",
"license": "apache-2.0",
"hash": 914464364239322200,
"line_mean": 29.7884615385,
"line_max": 87,
"alpha_frac": 0.7170518426,
"autogenerated": false,
"ratio": 3.697459584295612,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9889633210636564,
"avg_score": 0.004975643251809645,
"num_lines": 52
} |
"""aetypes - Python objects representing various AE types."""
from Carbon.AppleEvents import *
import struct
from types import *
import string
#
# convoluted, since there are cyclic dependencies between this file and
# aetools_convert.
#
def pack(*args, **kwargs):
from aepack import pack
return pack( *args, **kwargs)
def nice(s):
"""'nice' representation of an object"""
if type(s) is StringType: return repr(s)
else: return str(s)
class Unknown:
"""An uninterpreted AE object"""
def __init__(self, type, data):
self.type = type
self.data = data
def __repr__(self):
return "Unknown(%r, %r)" % (self.type, self.data)
def __aepack__(self):
return pack(self.data, self.type)
class Enum:
"""An AE enumeration value"""
def __init__(self, enum):
self.enum = "%-4.4s" % str(enum)
def __repr__(self):
return "Enum(%r)" % (self.enum,)
def __str__(self):
return string.strip(self.enum)
def __aepack__(self):
return pack(self.enum, typeEnumeration)
def IsEnum(x):
return isinstance(x, Enum)
def mkenum(enum):
if IsEnum(enum): return enum
return Enum(enum)
# Jack changed the way this is done
class InsertionLoc:
def __init__(self, of, pos):
self.of = of
self.pos = pos
def __repr__(self):
return "InsertionLoc(%r, %r)" % (self.of, self.pos)
def __aepack__(self):
rec = {'kobj': self.of, 'kpos': self.pos}
return pack(rec, forcetype='insl')
# Convenience functions for dsp:
def beginning(of):
return InsertionLoc(of, Enum('bgng'))
def end(of):
return InsertionLoc(of, Enum('end '))
class Boolean:
"""An AE boolean value"""
def __init__(self, bool):
self.bool = (not not bool)
def __repr__(self):
return "Boolean(%r)" % (self.bool,)
def __str__(self):
if self.bool:
return "True"
else:
return "False"
def __aepack__(self):
return pack(struct.pack('b', self.bool), 'bool')
def IsBoolean(x):
return isinstance(x, Boolean)
def mkboolean(bool):
if IsBoolean(bool): return bool
return Boolean(bool)
class Type:
"""An AE 4-char typename object"""
def __init__(self, type):
self.type = "%-4.4s" % str(type)
def __repr__(self):
return "Type(%r)" % (self.type,)
def __str__(self):
return string.strip(self.type)
def __aepack__(self):
return pack(self.type, typeType)
def IsType(x):
return isinstance(x, Type)
def mktype(type):
if IsType(type): return type
return Type(type)
class Keyword:
"""An AE 4-char keyword object"""
def __init__(self, keyword):
self.keyword = "%-4.4s" % str(keyword)
def __repr__(self):
return "Keyword(%r)" % `self.keyword`
def __str__(self):
return string.strip(self.keyword)
def __aepack__(self):
return pack(self.keyword, typeKeyword)
def IsKeyword(x):
return isinstance(x, Keyword)
class Range:
"""An AE range object"""
def __init__(self, start, stop):
self.start = start
self.stop = stop
def __repr__(self):
return "Range(%r, %r)" % (self.start, self.stop)
def __str__(self):
return "%s thru %s" % (nice(self.start), nice(self.stop))
def __aepack__(self):
return pack({'star': self.start, 'stop': self.stop}, 'rang')
def IsRange(x):
return isinstance(x, Range)
class Comparison:
"""An AE Comparison"""
def __init__(self, obj1, relo, obj2):
self.obj1 = obj1
self.relo = "%-4.4s" % str(relo)
self.obj2 = obj2
def __repr__(self):
return "Comparison(%r, %r, %r)" % (self.obj1, self.relo, self.obj2)
def __str__(self):
return "%s %s %s" % (nice(self.obj1), string.strip(self.relo), nice(self.obj2))
def __aepack__(self):
return pack({'obj1': self.obj1,
'relo': mkenum(self.relo),
'obj2': self.obj2},
'cmpd')
def IsComparison(x):
return isinstance(x, Comparison)
class NComparison(Comparison):
# The class attribute 'relo' must be set in a subclass
def __init__(self, obj1, obj2):
Comparison.__init__(obj1, self.relo, obj2)
class Ordinal:
"""An AE Ordinal"""
def __init__(self, abso):
# self.obj1 = obj1
self.abso = "%-4.4s" % str(abso)
def __repr__(self):
return "Ordinal(%r)" % (self.abso,)
def __str__(self):
return "%s" % (string.strip(self.abso))
def __aepack__(self):
return pack(self.abso, 'abso')
def IsOrdinal(x):
return isinstance(x, Ordinal)
class NOrdinal(Ordinal):
# The class attribute 'abso' must be set in a subclass
def __init__(self):
Ordinal.__init__(self, self.abso)
class Logical:
"""An AE logical expression object"""
def __init__(self, logc, term):
self.logc = "%-4.4s" % str(logc)
self.term = term
def __repr__(self):
return "Logical(%r, %r)" % (self.logc, self.term)
def __str__(self):
if type(self.term) == ListType and len(self.term) == 2:
return "%s %s %s" % (nice(self.term[0]),
string.strip(self.logc),
nice(self.term[1]))
else:
return "%s(%s)" % (string.strip(self.logc), nice(self.term))
def __aepack__(self):
return pack({'logc': mkenum(self.logc), 'term': self.term}, 'logi')
def IsLogical(x):
return isinstance(x, Logical)
class StyledText:
"""An AE object respresenting text in a certain style"""
def __init__(self, style, text):
self.style = style
self.text = text
def __repr__(self):
return "StyledText(%r, %r)" % (self.style, self.text)
def __str__(self):
return self.text
def __aepack__(self):
return pack({'ksty': self.style, 'ktxt': self.text}, 'STXT')
def IsStyledText(x):
return isinstance(x, StyledText)
class AEText:
"""An AE text object with style, script and language specified"""
def __init__(self, script, style, text):
self.script = script
self.style = style
self.text = text
def __repr__(self):
return "AEText(%r, %r, %r)" % (self.script, self.style, self.text)
def __str__(self):
return self.text
def __aepack__(self):
return pack({keyAEScriptTag: self.script, keyAEStyles: self.style,
keyAEText: self.text}, typeAEText)
def IsAEText(x):
return isinstance(x, AEText)
class IntlText:
"""A text object with script and language specified"""
def __init__(self, script, language, text):
self.script = script
self.language = language
self.text = text
def __repr__(self):
return "IntlText(%r, %r, %r)" % (self.script, self.language, self.text)
def __str__(self):
return self.text
def __aepack__(self):
return pack(struct.pack('hh', self.script, self.language)+self.text,
typeIntlText)
def IsIntlText(x):
return isinstance(x, IntlText)
class IntlWritingCode:
"""An object representing script and language"""
def __init__(self, script, language):
self.script = script
self.language = language
def __repr__(self):
return "IntlWritingCode(%r, %r)" % (self.script, self.language)
def __str__(self):
return "script system %d, language %d"%(self.script, self.language)
def __aepack__(self):
return pack(struct.pack('hh', self.script, self.language),
typeIntlWritingCode)
def IsIntlWritingCode(x):
return isinstance(x, IntlWritingCode)
class QDPoint:
"""A point"""
def __init__(self, v, h):
self.v = v
self.h = h
def __repr__(self):
return "QDPoint(%r, %r)" % (self.v, self.h)
def __str__(self):
return "(%d, %d)"%(self.v, self.h)
def __aepack__(self):
return pack(struct.pack('hh', self.v, self.h),
typeQDPoint)
def IsQDPoint(x):
return isinstance(x, QDPoint)
class QDRectangle:
"""A rectangle"""
def __init__(self, v0, h0, v1, h1):
self.v0 = v0
self.h0 = h0
self.v1 = v1
self.h1 = h1
def __repr__(self):
return "QDRectangle(%r, %r, %r, %r)" % (self.v0, self.h0, self.v1, self.h1)
def __str__(self):
return "(%d, %d)-(%d, %d)"%(self.v0, self.h0, self.v1, self.h1)
def __aepack__(self):
return pack(struct.pack('hhhh', self.v0, self.h0, self.v1, self.h1),
typeQDRectangle)
def IsQDRectangle(x):
return isinstance(x, QDRectangle)
class RGBColor:
"""An RGB color"""
def __init__(self, r, g, b):
self.r = r
self.g = g
self.b = b
def __repr__(self):
return "RGBColor(%r, %r, %r)" % (self.r, self.g, self.b)
def __str__(self):
return "0x%x red, 0x%x green, 0x%x blue"% (self.r, self.g, self.b)
def __aepack__(self):
return pack(struct.pack('hhh', self.r, self.g, self.b),
typeRGBColor)
def IsRGBColor(x):
return isinstance(x, RGBColor)
class ObjectSpecifier:
"""A class for constructing and manipulation AE object specifiers in python.
An object specifier is actually a record with four fields:
key type description
--- ---- -----------
'want' type 4-char class code of thing we want,
e.g. word, paragraph or property
'form' enum how we specify which 'want' thing(s) we want,
e.g. by index, by range, by name, or by property specifier
'seld' any which thing(s) we want,
e.g. its index, its name, or its property specifier
'from' object the object in which it is contained,
or null, meaning look for it in the application
Note that we don't call this class plain "Object", since that name
is likely to be used by the application.
"""
def __init__(self, want, form, seld, fr = None):
self.want = want
self.form = form
self.seld = seld
self.fr = fr
def __repr__(self):
s = "ObjectSpecifier(%r, %r, %r" % (self.want, self.form, self.seld)
if self.fr:
s = s + ", %r)" % (self.fr,)
else:
s = s + ")"
return s
def __aepack__(self):
return pack({'want': mktype(self.want),
'form': mkenum(self.form),
'seld': self.seld,
'from': self.fr},
'obj ')
def IsObjectSpecifier(x):
return isinstance(x, ObjectSpecifier)
# Backwards compatability, sigh...
class Property(ObjectSpecifier):
def __init__(self, which, fr = None, want='prop'):
ObjectSpecifier.__init__(self, want, 'prop', mktype(which), fr)
def __repr__(self):
if self.fr:
return "Property(%r, %r)" % (self.seld.type, self.fr)
else:
return "Property(%r)" % (self.seld.type,)
def __str__(self):
if self.fr:
return "Property %s of %s" % (str(self.seld), str(self.fr))
else:
return "Property %s" % str(self.seld)
class NProperty(ObjectSpecifier):
# Subclasses *must* self baseclass attributes:
# want is the type of this property
# which is the property name of this property
def __init__(self, fr = None):
#try:
# dummy = self.want
#except:
# self.want = 'prop'
self.want = 'prop'
ObjectSpecifier.__init__(self, self.want, 'prop',
mktype(self.which), fr)
def __repr__(self):
rv = "Property(%r" % (self.seld.type,)
if self.fr:
rv = rv + ", fr=%r" % (self.fr,)
if self.want != 'prop':
rv = rv + ", want=%r" % (self.want,)
return rv + ")"
def __str__(self):
if self.fr:
return "Property %s of %s" % (str(self.seld), str(self.fr))
else:
return "Property %s" % str(self.seld)
class SelectableItem(ObjectSpecifier):
def __init__(self, want, seld, fr = None):
t = type(seld)
if t == StringType:
form = 'name'
elif IsRange(seld):
form = 'rang'
elif IsComparison(seld) or IsLogical(seld):
form = 'test'
elif t == TupleType:
# Breakout: specify both form and seld in a tuple
# (if you want ID or rele or somesuch)
form, seld = seld
else:
form = 'indx'
ObjectSpecifier.__init__(self, want, form, seld, fr)
class ComponentItem(SelectableItem):
# Derived classes *must* set the *class attribute* 'want' to some constant
# Also, dictionaries _propdict and _elemdict must be set to map property
# and element names to the correct classes
_propdict = {}
_elemdict = {}
def __init__(self, which, fr = None):
SelectableItem.__init__(self, self.want, which, fr)
def __repr__(self):
if not self.fr:
return "%s(%r)" % (self.__class__.__name__, self.seld)
return "%s(%r, %r)" % (self.__class__.__name__, self.seld, self.fr)
def __str__(self):
seld = self.seld
if type(seld) == StringType:
ss = repr(seld)
elif IsRange(seld):
start, stop = seld.start, seld.stop
if type(start) == InstanceType == type(stop) and \
start.__class__ == self.__class__ == stop.__class__:
ss = str(start.seld) + " thru " + str(stop.seld)
else:
ss = str(seld)
else:
ss = str(seld)
s = "%s %s" % (self.__class__.__name__, ss)
if self.fr: s = s + " of %s" % str(self.fr)
return s
def __getattr__(self, name):
if self._elemdict.has_key(name):
cls = self._elemdict[name]
return DelayedComponentItem(cls, self)
if self._propdict.has_key(name):
cls = self._propdict[name]
return cls(self)
raise AttributeError, name
class DelayedComponentItem:
def __init__(self, compclass, fr):
self.compclass = compclass
self.fr = fr
def __call__(self, which):
return self.compclass(which, self.fr)
def __repr__(self):
return "%s(???, %r)" % (self.__class__.__name__, self.fr)
def __str__(self):
return "selector for element %s of %s"%(self.__class__.__name__, str(self.fr))
template = """
class %s(ComponentItem): want = '%s'
"""
exec template % ("Text", 'text')
exec template % ("Character", 'cha ')
exec template % ("Word", 'cwor')
exec template % ("Line", 'clin')
exec template % ("paragraph", 'cpar')
exec template % ("Window", 'cwin')
exec template % ("Document", 'docu')
exec template % ("File", 'file')
exec template % ("InsertionPoint", 'cins')
| {
"repo_name": "MalloyPower/parsing-python",
"path": "front-end/testsuite-python-lib/Python-2.4/Lib/plat-mac/aetypes.py",
"copies": "1",
"size": "14915",
"license": "mit",
"hash": 1464577773710701800,
"line_mean": 25.2588028169,
"line_max": 87,
"alpha_frac": 0.5521957761,
"autogenerated": false,
"ratio": 3.3668171557562077,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9386054427594084,
"avg_score": 0.006591700852424843,
"num_lines": 568
} |
"""aetypes - Python objects representing various AE types."""
from warnings import warnpy3k
warnpy3k("In 3.x, the aetypes module is removed.", stacklevel=2)
from Carbon.AppleEvents import *
import struct
from types import *
import string
#
# convoluted, since there are cyclic dependencies between this file and
# aetools_convert.
#
def pack(*args, **kwargs):
from aepack import pack
return pack( *args, **kwargs)
def nice(s):
"""'nice' representation of an object"""
if type(s) is StringType: return repr(s)
else: return str(s)
class Unknown:
"""An uninterpreted AE object"""
def __init__(self, type, data):
self.type = type
self.data = data
def __repr__(self):
return "Unknown(%r, %r)" % (self.type, self.data)
def __aepack__(self):
return pack(self.data, self.type)
class Enum:
"""An AE enumeration value"""
def __init__(self, enum):
self.enum = "%-4.4s" % str(enum)
def __repr__(self):
return "Enum(%r)" % (self.enum,)
def __str__(self):
return string.strip(self.enum)
def __aepack__(self):
return pack(self.enum, typeEnumeration)
def IsEnum(x):
return isinstance(x, Enum)
def mkenum(enum):
if IsEnum(enum): return enum
return Enum(enum)
# Jack changed the way this is done
class InsertionLoc:
def __init__(self, of, pos):
self.of = of
self.pos = pos
def __repr__(self):
return "InsertionLoc(%r, %r)" % (self.of, self.pos)
def __aepack__(self):
rec = {'kobj': self.of, 'kpos': self.pos}
return pack(rec, forcetype='insl')
# Convenience functions for dsp:
def beginning(of):
return InsertionLoc(of, Enum('bgng'))
def end(of):
return InsertionLoc(of, Enum('end '))
class Boolean:
"""An AE boolean value"""
def __init__(self, bool):
self.bool = (not not bool)
def __repr__(self):
return "Boolean(%r)" % (self.bool,)
def __str__(self):
if self.bool:
return "True"
else:
return "False"
def __aepack__(self):
return pack(struct.pack('b', self.bool), 'bool')
def IsBoolean(x):
return isinstance(x, Boolean)
def mkboolean(bool):
if IsBoolean(bool): return bool
return Boolean(bool)
class Type:
"""An AE 4-char typename object"""
def __init__(self, type):
self.type = "%-4.4s" % str(type)
def __repr__(self):
return "Type(%r)" % (self.type,)
def __str__(self):
return string.strip(self.type)
def __aepack__(self):
return pack(self.type, typeType)
def IsType(x):
return isinstance(x, Type)
def mktype(type):
if IsType(type): return type
return Type(type)
class Keyword:
"""An AE 4-char keyword object"""
def __init__(self, keyword):
self.keyword = "%-4.4s" % str(keyword)
def __repr__(self):
return "Keyword(%r)" % `self.keyword`
def __str__(self):
return string.strip(self.keyword)
def __aepack__(self):
return pack(self.keyword, typeKeyword)
def IsKeyword(x):
return isinstance(x, Keyword)
class Range:
"""An AE range object"""
def __init__(self, start, stop):
self.start = start
self.stop = stop
def __repr__(self):
return "Range(%r, %r)" % (self.start, self.stop)
def __str__(self):
return "%s thru %s" % (nice(self.start), nice(self.stop))
def __aepack__(self):
return pack({'star': self.start, 'stop': self.stop}, 'rang')
def IsRange(x):
return isinstance(x, Range)
class Comparison:
"""An AE Comparison"""
def __init__(self, obj1, relo, obj2):
self.obj1 = obj1
self.relo = "%-4.4s" % str(relo)
self.obj2 = obj2
def __repr__(self):
return "Comparison(%r, %r, %r)" % (self.obj1, self.relo, self.obj2)
def __str__(self):
return "%s %s %s" % (nice(self.obj1), string.strip(self.relo), nice(self.obj2))
def __aepack__(self):
return pack({'obj1': self.obj1,
'relo': mkenum(self.relo),
'obj2': self.obj2},
'cmpd')
def IsComparison(x):
return isinstance(x, Comparison)
class NComparison(Comparison):
# The class attribute 'relo' must be set in a subclass
def __init__(self, obj1, obj2):
Comparison.__init__(obj1, self.relo, obj2)
class Ordinal:
"""An AE Ordinal"""
def __init__(self, abso):
# self.obj1 = obj1
self.abso = "%-4.4s" % str(abso)
def __repr__(self):
return "Ordinal(%r)" % (self.abso,)
def __str__(self):
return "%s" % (string.strip(self.abso))
def __aepack__(self):
return pack(self.abso, 'abso')
def IsOrdinal(x):
return isinstance(x, Ordinal)
class NOrdinal(Ordinal):
# The class attribute 'abso' must be set in a subclass
def __init__(self):
Ordinal.__init__(self, self.abso)
class Logical:
"""An AE logical expression object"""
def __init__(self, logc, term):
self.logc = "%-4.4s" % str(logc)
self.term = term
def __repr__(self):
return "Logical(%r, %r)" % (self.logc, self.term)
def __str__(self):
if type(self.term) == ListType and len(self.term) == 2:
return "%s %s %s" % (nice(self.term[0]),
string.strip(self.logc),
nice(self.term[1]))
else:
return "%s(%s)" % (string.strip(self.logc), nice(self.term))
def __aepack__(self):
return pack({'logc': mkenum(self.logc), 'term': self.term}, 'logi')
def IsLogical(x):
return isinstance(x, Logical)
class StyledText:
"""An AE object respresenting text in a certain style"""
def __init__(self, style, text):
self.style = style
self.text = text
def __repr__(self):
return "StyledText(%r, %r)" % (self.style, self.text)
def __str__(self):
return self.text
def __aepack__(self):
return pack({'ksty': self.style, 'ktxt': self.text}, 'STXT')
def IsStyledText(x):
return isinstance(x, StyledText)
class AEText:
"""An AE text object with style, script and language specified"""
def __init__(self, script, style, text):
self.script = script
self.style = style
self.text = text
def __repr__(self):
return "AEText(%r, %r, %r)" % (self.script, self.style, self.text)
def __str__(self):
return self.text
def __aepack__(self):
return pack({keyAEScriptTag: self.script, keyAEStyles: self.style,
keyAEText: self.text}, typeAEText)
def IsAEText(x):
return isinstance(x, AEText)
class IntlText:
"""A text object with script and language specified"""
def __init__(self, script, language, text):
self.script = script
self.language = language
self.text = text
def __repr__(self):
return "IntlText(%r, %r, %r)" % (self.script, self.language, self.text)
def __str__(self):
return self.text
def __aepack__(self):
return pack(struct.pack('hh', self.script, self.language)+self.text,
typeIntlText)
def IsIntlText(x):
return isinstance(x, IntlText)
class IntlWritingCode:
"""An object representing script and language"""
def __init__(self, script, language):
self.script = script
self.language = language
def __repr__(self):
return "IntlWritingCode(%r, %r)" % (self.script, self.language)
def __str__(self):
return "script system %d, language %d"%(self.script, self.language)
def __aepack__(self):
return pack(struct.pack('hh', self.script, self.language),
typeIntlWritingCode)
def IsIntlWritingCode(x):
return isinstance(x, IntlWritingCode)
class QDPoint:
"""A point"""
def __init__(self, v, h):
self.v = v
self.h = h
def __repr__(self):
return "QDPoint(%r, %r)" % (self.v, self.h)
def __str__(self):
return "(%d, %d)"%(self.v, self.h)
def __aepack__(self):
return pack(struct.pack('hh', self.v, self.h),
typeQDPoint)
def IsQDPoint(x):
return isinstance(x, QDPoint)
class QDRectangle:
"""A rectangle"""
def __init__(self, v0, h0, v1, h1):
self.v0 = v0
self.h0 = h0
self.v1 = v1
self.h1 = h1
def __repr__(self):
return "QDRectangle(%r, %r, %r, %r)" % (self.v0, self.h0, self.v1, self.h1)
def __str__(self):
return "(%d, %d)-(%d, %d)"%(self.v0, self.h0, self.v1, self.h1)
def __aepack__(self):
return pack(struct.pack('hhhh', self.v0, self.h0, self.v1, self.h1),
typeQDRectangle)
def IsQDRectangle(x):
return isinstance(x, QDRectangle)
class RGBColor:
"""An RGB color"""
def __init__(self, r, g, b):
self.r = r
self.g = g
self.b = b
def __repr__(self):
return "RGBColor(%r, %r, %r)" % (self.r, self.g, self.b)
def __str__(self):
return "0x%x red, 0x%x green, 0x%x blue"% (self.r, self.g, self.b)
def __aepack__(self):
return pack(struct.pack('hhh', self.r, self.g, self.b),
typeRGBColor)
def IsRGBColor(x):
return isinstance(x, RGBColor)
class ObjectSpecifier:
"""A class for constructing and manipulation AE object specifiers in python.
An object specifier is actually a record with four fields:
key type description
--- ---- -----------
'want' type 4-char class code of thing we want,
e.g. word, paragraph or property
'form' enum how we specify which 'want' thing(s) we want,
e.g. by index, by range, by name, or by property specifier
'seld' any which thing(s) we want,
e.g. its index, its name, or its property specifier
'from' object the object in which it is contained,
or null, meaning look for it in the application
Note that we don't call this class plain "Object", since that name
is likely to be used by the application.
"""
def __init__(self, want, form, seld, fr = None):
self.want = want
self.form = form
self.seld = seld
self.fr = fr
def __repr__(self):
s = "ObjectSpecifier(%r, %r, %r" % (self.want, self.form, self.seld)
if self.fr:
s = s + ", %r)" % (self.fr,)
else:
s = s + ")"
return s
def __aepack__(self):
return pack({'want': mktype(self.want),
'form': mkenum(self.form),
'seld': self.seld,
'from': self.fr},
'obj ')
def IsObjectSpecifier(x):
return isinstance(x, ObjectSpecifier)
# Backwards compatibility, sigh...
class Property(ObjectSpecifier):
def __init__(self, which, fr = None, want='prop'):
ObjectSpecifier.__init__(self, want, 'prop', mktype(which), fr)
def __repr__(self):
if self.fr:
return "Property(%r, %r)" % (self.seld.type, self.fr)
else:
return "Property(%r)" % (self.seld.type,)
def __str__(self):
if self.fr:
return "Property %s of %s" % (str(self.seld), str(self.fr))
else:
return "Property %s" % str(self.seld)
class NProperty(ObjectSpecifier):
# Subclasses *must* self baseclass attributes:
# want is the type of this property
# which is the property name of this property
def __init__(self, fr = None):
#try:
# dummy = self.want
#except:
# self.want = 'prop'
self.want = 'prop'
ObjectSpecifier.__init__(self, self.want, 'prop',
mktype(self.which), fr)
def __repr__(self):
rv = "Property(%r" % (self.seld.type,)
if self.fr:
rv = rv + ", fr=%r" % (self.fr,)
if self.want != 'prop':
rv = rv + ", want=%r" % (self.want,)
return rv + ")"
def __str__(self):
if self.fr:
return "Property %s of %s" % (str(self.seld), str(self.fr))
else:
return "Property %s" % str(self.seld)
class SelectableItem(ObjectSpecifier):
def __init__(self, want, seld, fr = None):
t = type(seld)
if t == StringType:
form = 'name'
elif IsRange(seld):
form = 'rang'
elif IsComparison(seld) or IsLogical(seld):
form = 'test'
elif t == TupleType:
# Breakout: specify both form and seld in a tuple
# (if you want ID or rele or somesuch)
form, seld = seld
else:
form = 'indx'
ObjectSpecifier.__init__(self, want, form, seld, fr)
class ComponentItem(SelectableItem):
# Derived classes *must* set the *class attribute* 'want' to some constant
# Also, dictionaries _propdict and _elemdict must be set to map property
# and element names to the correct classes
_propdict = {}
_elemdict = {}
def __init__(self, which, fr = None):
SelectableItem.__init__(self, self.want, which, fr)
def __repr__(self):
if not self.fr:
return "%s(%r)" % (self.__class__.__name__, self.seld)
return "%s(%r, %r)" % (self.__class__.__name__, self.seld, self.fr)
def __str__(self):
seld = self.seld
if type(seld) == StringType:
ss = repr(seld)
elif IsRange(seld):
start, stop = seld.start, seld.stop
if type(start) == InstanceType == type(stop) and \
start.__class__ == self.__class__ == stop.__class__:
ss = str(start.seld) + " thru " + str(stop.seld)
else:
ss = str(seld)
else:
ss = str(seld)
s = "%s %s" % (self.__class__.__name__, ss)
if self.fr: s = s + " of %s" % str(self.fr)
return s
def __getattr__(self, name):
if self._elemdict.has_key(name):
cls = self._elemdict[name]
return DelayedComponentItem(cls, self)
if self._propdict.has_key(name):
cls = self._propdict[name]
return cls(self)
raise AttributeError, name
class DelayedComponentItem:
def __init__(self, compclass, fr):
self.compclass = compclass
self.fr = fr
def __call__(self, which):
return self.compclass(which, self.fr)
def __repr__(self):
return "%s(???, %r)" % (self.__class__.__name__, self.fr)
def __str__(self):
return "selector for element %s of %s"%(self.__class__.__name__, str(self.fr))
template = """
class %s(ComponentItem): want = '%s'
"""
exec template % ("Text", 'text')
exec template % ("Character", 'cha ')
exec template % ("Word", 'cwor')
exec template % ("Line", 'clin')
exec template % ("paragraph", 'cpar')
exec template % ("Window", 'cwin')
exec template % ("Document", 'docu')
exec template % ("File", 'file')
exec template % ("InsertionPoint", 'cins')
| {
"repo_name": "wskplho/sl4a",
"path": "python/src/Lib/plat-mac/aetypes.py",
"copies": "33",
"size": "15011",
"license": "apache-2.0",
"hash": -1968584301339179800,
"line_mean": 25.288966725,
"line_max": 87,
"alpha_frac": 0.553594031,
"autogenerated": false,
"ratio": 3.366449876653958,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": null,
"num_lines": null
} |
"""A eventlet based handler."""
from __future__ import absolute_import
import contextlib
import logging
import eventlet
from eventlet.green import select as green_select
from eventlet.green import socket as green_socket
from eventlet.green import time as green_time
from eventlet.green import threading as green_threading
from eventlet import queue as green_queue
from kazoo.handlers import utils
import kazoo.python2atexit as python2atexit
LOG = logging.getLogger(__name__)
# sentinel objects
_STOP = object()
@contextlib.contextmanager
def _yield_before_after():
# Yield to any other co-routines...
#
# See: http://eventlet.net/doc/modules/greenthread.html
# for how this zero sleep is really a cooperative yield to other potential
# co-routines...
eventlet.sleep(0)
try:
yield
finally:
eventlet.sleep(0)
class TimeoutError(Exception):
pass
class AsyncResult(utils.AsyncResult):
"""A one-time event that stores a value or an exception"""
def __init__(self, handler):
super(AsyncResult, self).__init__(handler,
green_threading.Condition,
TimeoutError)
class SequentialEventletHandler(object):
"""Eventlet handler for sequentially executing callbacks.
This handler executes callbacks in a sequential manner. A queue is
created for each of the callback events, so that each type of event
has its callback type run sequentially. These are split into two
queues, one for watch events and one for async result completion
callbacks.
Each queue type has a greenthread worker that pulls the callback event
off the queue and runs it in the order the client sees it.
This split helps ensure that watch callbacks won't block session
re-establishment should the connection be lost during a Zookeeper
client call.
Watch and completion callbacks should avoid blocking behavior as
the next callback of that type won't be run until it completes. If
you need to block, spawn a new greenthread and return immediately so
callbacks can proceed.
.. note::
Completion callbacks can block to wait on Zookeeper calls, but
no other completion callbacks will execute until the callback
returns.
"""
name = "sequential_eventlet_handler"
queue_impl = green_queue.LightQueue
queue_empty = green_queue.Empty
def __init__(self):
"""Create a :class:`SequentialEventletHandler` instance"""
self.callback_queue = self.queue_impl()
self.completion_queue = self.queue_impl()
self._workers = []
self._started = False
@staticmethod
def sleep_func(wait):
green_time.sleep(wait)
@property
def running(self):
return self._started
timeout_exception = TimeoutError
def _process_completion_queue(self):
while True:
cb = self.completion_queue.get()
if cb is _STOP:
break
try:
with _yield_before_after():
cb()
except Exception:
LOG.warning("Exception in worker completion queue greenlet",
exc_info=True)
finally:
del cb # release before possible idle
def _process_callback_queue(self):
while True:
cb = self.callback_queue.get()
if cb is _STOP:
break
try:
with _yield_before_after():
cb()
except Exception:
LOG.warning("Exception in worker callback queue greenlet",
exc_info=True)
finally:
del cb # release before possible idle
def start(self):
if not self._started:
# Spawn our worker threads, we have
# - A callback worker for watch events to be called
# - A completion worker for completion events to be called
w = eventlet.spawn(self._process_completion_queue)
self._workers.append((w, self.completion_queue))
w = eventlet.spawn(self._process_callback_queue)
self._workers.append((w, self.callback_queue))
self._started = True
python2atexit.register(self.stop)
def stop(self):
while self._workers:
w, q = self._workers.pop()
q.put(_STOP)
w.wait()
self._started = False
python2atexit.unregister(self.stop)
def socket(self, *args, **kwargs):
return utils.create_tcp_socket(green_socket)
def create_socket_pair(self):
return utils.create_socket_pair(green_socket)
def event_object(self):
return green_threading.Event()
def lock_object(self):
return green_threading.Lock()
def rlock_object(self):
return green_threading.RLock()
def create_connection(self, *args, **kwargs):
return utils.create_tcp_connection(green_socket, *args, **kwargs)
def select(self, *args, **kwargs):
with _yield_before_after():
return green_select.select(*args, **kwargs)
def async_result(self):
return AsyncResult(self)
def spawn(self, func, *args, **kwargs):
t = green_threading.Thread(target=func, args=args, kwargs=kwargs)
t.daemon = True
t.start()
return t
def dispatch_callback(self, callback):
self.callback_queue.put(lambda: callback.func(*callback.args))
| {
"repo_name": "kawamon/hue",
"path": "desktop/core/ext-py/kazoo-2.8.0/kazoo/handlers/eventlet.py",
"copies": "3",
"size": "5579",
"license": "apache-2.0",
"hash": -7586523435406491000,
"line_mean": 30.1675977654,
"line_max": 78,
"alpha_frac": 0.625201649,
"autogenerated": false,
"ratio": 4.466773418734988,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.6591975067734988,
"avg_score": null,
"num_lines": null
} |
"""A eventlet based handler."""
from __future__ import absolute_import
import contextlib
import logging
import kazoo.python2atexit as python2atexit
import eventlet
from eventlet.green import select as green_select
from eventlet.green import socket as green_socket
from eventlet.green import time as green_time
from eventlet.green import threading as green_threading
from eventlet import queue as green_queue
from kazoo.handlers import utils
LOG = logging.getLogger(__name__)
# sentinel objects
_STOP = object()
@contextlib.contextmanager
def _yield_before_after():
# Yield to any other co-routines...
#
# See: http://eventlet.net/doc/modules/greenthread.html
# for how this zero sleep is really a cooperative yield to other potential
# co-routines...
eventlet.sleep(0)
try:
yield
finally:
eventlet.sleep(0)
class TimeoutError(Exception):
pass
class AsyncResult(utils.AsyncResult):
"""A one-time event that stores a value or an exception"""
def __init__(self, handler):
super(AsyncResult, self).__init__(handler,
green_threading.Condition,
TimeoutError)
class SequentialEventletHandler(object):
"""Eventlet handler for sequentially executing callbacks.
This handler executes callbacks in a sequential manner. A queue is
created for each of the callback events, so that each type of event
has its callback type run sequentially. These are split into two
queues, one for watch events and one for async result completion
callbacks.
Each queue type has a greenthread worker that pulls the callback event
off the queue and runs it in the order the client sees it.
This split helps ensure that watch callbacks won't block session
re-establishment should the connection be lost during a Zookeeper
client call.
Watch and completion callbacks should avoid blocking behavior as
the next callback of that type won't be run until it completes. If
you need to block, spawn a new greenthread and return immediately so
callbacks can proceed.
.. note::
Completion callbacks can block to wait on Zookeeper calls, but
no other completion callbacks will execute until the callback
returns.
"""
name = "sequential_eventlet_handler"
def __init__(self):
"""Create a :class:`SequentialEventletHandler` instance"""
self.callback_queue = green_queue.LightQueue()
self.completion_queue = green_queue.LightQueue()
self._workers = []
self._started = False
@staticmethod
def sleep_func(wait):
green_time.sleep(wait)
@property
def running(self):
return self._started
timeout_exception = TimeoutError
def _process_completion_queue(self):
while True:
cb = self.completion_queue.get()
if cb is _STOP:
break
try:
with _yield_before_after():
cb()
except Exception:
LOG.warning("Exception in worker completion queue greenlet",
exc_info=True)
def _process_callback_queue(self):
while True:
cb = self.callback_queue.get()
if cb is _STOP:
break
try:
with _yield_before_after():
cb()
except Exception:
LOG.warning("Exception in worker callback queue greenlet",
exc_info=True)
def start(self):
if not self._started:
# Spawn our worker threads, we have
# - A callback worker for watch events to be called
# - A completion worker for completion events to be called
w = eventlet.spawn(self._process_completion_queue)
self._workers.append((w, self.completion_queue))
w = eventlet.spawn(self._process_callback_queue)
self._workers.append((w, self.callback_queue))
self._started = True
python2atexit.register(self.stop)
def stop(self):
while self._workers:
w, q = self._workers.pop()
q.put(_STOP)
w.wait()
self._started = False
python2atexit.unregister(self.stop)
def socket(self, *args, **kwargs):
return utils.create_tcp_socket(green_socket)
def create_socket_pair(self):
return utils.create_socket_pair(green_socket)
def event_object(self):
return green_threading.Event()
def lock_object(self):
return green_threading.Lock()
def rlock_object(self):
return green_threading.RLock()
def create_connection(self, *args, **kwargs):
return utils.create_tcp_connection(green_socket, *args, **kwargs)
def select(self, *args, **kwargs):
with _yield_before_after():
return green_select.select(*args, **kwargs)
def async_result(self):
return AsyncResult(self)
def spawn(self, func, *args, **kwargs):
t = green_threading.Thread(target=func, args=args, kwargs=kwargs)
t.daemon = True
t.start()
return t
def dispatch_callback(self, callback):
self.callback_queue.put(lambda: callback.func(*callback.args))
| {
"repo_name": "kormat/kazoo",
"path": "kazoo/handlers/eventlet.py",
"copies": "13",
"size": "5365",
"license": "apache-2.0",
"hash": 1308649559128967400,
"line_mean": 30.0115606936,
"line_max": 78,
"alpha_frac": 0.6288909599,
"autogenerated": false,
"ratio": 4.444904722452361,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0,
"num_lines": 173
} |
"""ae_web URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.8/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Add an import: from blog import urls as blog_urls
2. Add a URL to urlpatterns: url(r'^blog/', include(blog_urls))
"""
from django.conf.urls import include, url, patterns
from django.contrib import admin
from django.views.generic import TemplateView
from rest_framework import routers
from rest_framework.urlpatterns import format_suffix_patterns
from publications import views as publication_views
from accounts import views as account_views
from logs import views as log_views
# import actstream
# router = routers.DefaultRouter()
# router.register(r'users', views.UserListView)
# router.register(r'groups', views.GroupViewSet)
# router.register(r'publications', publication_views.PublicationList.as_view(), base_name='publication')
# router.register(r'experiments', publication_views.ExperimentList.as_view(), base_name='experiment')
# router.register(r'associations', publication_views.AssociationList.as_view(), base_name='association')
# router.register(r'accounts', account_views.UserListView.as_view(), base_name='a7a')
# router.register(r'logs', log_views.LogListView.as_view(), base_name='log')
from accounts import urls as accounts_urls
from rest_api import urls as rest_api_urls
urlpatterns = patterns(
'',
url(r'^$', TemplateView.as_view(template_name='index.html')),
url(r'^admin/', include(admin.site.urls)),
url(r'^accounts/', include(accounts_urls)),
url(r'^api/', include(rest_api_urls)),
# url(r'^api-auth/', include('rest_framework.urls', namespace='rest_framework')),
url(r'^publications/$', publication_views.PublicationList.as_view(), name='publication-list'),
url(r'^publications/(?P<pk>[0-9]+)/$', publication_views.PublicationDetail.as_view(), name='publication-detail'),
)
# urlpatterns = [
#
# url(r'^', include(router.urls)),
# url(r'^admin/', include(admin.site.urls)),
# # url(r'^activity/', include('pinax.eventlog.')),
# url(r'^', include(router.urls)),
# url(r'^api-auth/', include('rest_framework.urls', namespace='rest_framework')),
#
# url(r'^publications/$', publication_views.PublicationList.as_view(), name='publication-list'),
# url(r'^experiments/$', publication_views.ExperimentList.as_view(), name='experiment-list'),
# url(r'^associations/$', publication_views.AssociationList.as_view(), name='association-list'),
# # url(r'^logs/$', log_views.LogListView.as_view(), name='log-list'),
#
#
# url(r'^publications/(?P<pk>[0-9]+)/$', publication_views.PublicationDetail.as_view(), name='publication-detail'),
# url(r'^experiments/(?P<pk>[0-9]+)/$', publication_views.ExperimentDetail.as_view(), name='experiment-detail'),
# url(r'^associations/(?P<pk>[0-9]+)/$', publication_views.AssociationDetail.as_view(), name='association-detail'),
# # url(r'^logs/(?P<pk>[0-9]+)/$', log_views.LogDetail.as_view(), name='log-detail'),
# # url(r'^activities/(?P<pk>[0-9]+)/$', log_views.LogViewSet, name='log-detail')
# url(r'^accounts/', include(accounts_urls)),
# url(r'^api/', include(rest_api_urls)),
#
#
# ]
# urlpatterns = format_suffix_patterns(urlpatterns)
| {
"repo_name": "arrayexpress/ae_auto",
"path": "ae_web/ae_web/urls.py",
"copies": "1",
"size": "3592",
"license": "apache-2.0",
"hash": 746222809652551800,
"line_mean": 46.8933333333,
"line_max": 119,
"alpha_frac": 0.6968262806,
"autogenerated": false,
"ratio": 3.407969639468691,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.46047959200686905,
"avg_score": null,
"num_lines": null
} |
"""A experimental script for setting up DVS recording with Tracking dataset.
This script uses Sacred from IDSIA lab to setup the experiment.
This allows me to configure experiment by JSON file.
Author: Yuhuang Hu
Email : duugyue100@gmail.com
"""
from sacred import Experiment
import os
import cPickle as pickle
import numpy as np
import cv2
from spikefuel import tools, gui, helpers
exp = Experiment("DVS Recording - TrackingDataset")
exp.add_config({
"tracking_dir": "",
"tracking_stats_path": "",
"recording_save_path": "",
"viewer_id": 1,
"screen_height": 0,
"screen_width": 0,
"work_win_scale": 0.9,
"bg_color": [255, 0, 0],
"fps": 0
})
@exp.automain
def dvs_vot_exp(tracking_dir,
tracking_stats_path,
recording_save_path,
viewer_id,
screen_height,
screen_width,
work_win_scale,
bg_color,
fps):
"""Setup an experiment for VOT dataset.
Parameters
----------
tracking_dir : string
absolute path of Tracking dataset
e.g. /home/user/vot2015
tracking_stats_path : string
path to tracking dataset stats
recording_save_path : string
path to logged recording data
viewer_id : int
the ID of jAER viewer, for Linux is 1, Mac OS X is 2
screen_height : int
height of the screen in pixel
screen_width : int
width of the screen in pixel
work_win_scale : float
the scaling factor that calculates working window size
bg_color : list
background color definition
fps : int
frame per second while displaying the video,
will round to closest number
"""
if not os.path.exists(str(recording_save_path)):
os.mkdir(str(recording_save_path))
# Load VOT stats
f = file(tracking_stats_path, mode="r")
tracking_stats = pickle.load(f)
f.close()
# primary list
pl = tracking_stats["primary_list"]
# secondary list
sl = tracking_stats["secondary_list"]
# Create full background
background = (np.ones((screen_height,
screen_width, 3))*bg_color).astype(np.uint8)
# Setup OpenCV display window
window_title = "DVS-TRACKING-EXP"
cv2.namedWindow(window_title, cv2.WND_PROP_FULLSCREEN)
# Experiment setup calibration
# Not without tuning images
swin_h, swin_w = helpers.calibration(win_h=screen_height,
win_w=screen_width,
scale=work_win_scale,
window_title=window_title,
bg_color=bg_color)
# Init a general UDP socket
s = tools.init_dvs()
tools.reset_dvs_time(s)
for pcg in pl:
# remove sequence Kalal until I got more memory
if pcg != "Kalal":
for scg in sl[pcg]:
print "[MESSAGE] Display video sequence "+scg
seq_base_path = os.path.join(tracking_dir, pcg, scg)
frames = []
for fn in tracking_stats[scg]:
frames.append(cv2.imread(os.path.join(seq_base_path, fn)))
frames = gui.rescale_image_sequence(frames, swin_h, swin_w,
bg_color)
frames = gui.create_border_sequence(frames, screen_height,
screen_width, bg_color)
cv2.imshow(window_title, frames[0])
print "[MESSAGE] Adapting video sequence "+scg
cv2.waitKey(delay=2000)
tools.start_log_dvs(s, recording_save_path, scg, viewer_id)
for frame in frames:
cv2.imshow(window_title, frame)
key = cv2.waitKey(delay=int(1000/fps)) & 0xFF
if key == 27:
cv2.destroyAllWindows()
quit()
cv2.imshow(window_title, frames[-1])
tools.stop_log_dvs(s, viewer_id)
print "[MESSAGE] Releasing video sequence "+scg
cv2.waitKey(delay=2000)
cv2.imshow(window_title, background)
cv2.waitKey(delay=1000)
tools.reset_dvs_time(s)
print "[MESSAGE] Video sequence "+scg+" is logged."
# Destory both scoket and opencv window
tools.destroy_dvs(s)
cv2.destroyAllWindows()
| {
"repo_name": "duguyue100/spikefuel",
"path": "scripts/dvs_tracking_exp.py",
"copies": "1",
"size": "4540",
"license": "mit",
"hash": 3993334908465649000,
"line_mean": 32.8805970149,
"line_max": 78,
"alpha_frac": 0.5552863436,
"autogenerated": false,
"ratio": 3.9894551845342705,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5044741528134271,
"avg_score": null,
"num_lines": null
} |
"""A experimental script for setting up DVS recording with VOT dataset.
This script uses Sacred from IDSIA lab to setup the experiment.
This allows me to configure experiment by JSON file.
Author: Yuhuang Hu
Email : duugyue100@gmail.com
"""
from sacred import Experiment
import os
import cPickle as pickle
import numpy as np
import cv2
from spikefuel import tools, gui, helpers
exp = Experiment("DVS Recording - VOT")
exp.add_config({
"vot_dir": "",
"vot_stats_path": "",
"recording_save_path": "",
"viewer_id": 1,
"screen_height": 0,
"screen_width": 0,
"work_win_scale": 0.9,
"bg_color": [255, 0, 0],
"fps": 0
})
@exp.automain
def dvs_vot_exp(vot_dir,
vot_stats_path,
recording_save_path,
viewer_id,
screen_height,
screen_width,
work_win_scale,
bg_color,
fps):
"""Setup an experiment for VOT dataset.
Parameters
----------
vot_dir : string
absolute path of VOT dataset
e.g. /home/user/vot2015
vot_stats_path : string
path to vot dataset stats
recording_save_path : string
path to logged recording data
viewer_id : int
the ID of jAER viewer, for Linux is 1, Mac OS X is 2
screen_height : int
height of the screen in pixel
screen_width : int
width of the screen in pixel
work_win_scale : float
the scaling factor that calculates working window size
bg_color : list
background color definition
fps : int
frame per second while displaying the video,
will round to closest number
"""
if not os.path.exists(str(recording_save_path)):
os.mkdir(str(recording_save_path))
# Load VOT stats
f = file(vot_stats_path, mode="r")
vot_stats = pickle.load(f)
f.close()
vot_list = vot_stats['vot_list']
num_frames = vot_stats['num_frames']
# Load groundtruth and image lists
print "[MESSAGE] Loading image lists."
lists = []
for i in xrange(len(num_frames)):
list_path = os.path.join(vot_dir, vot_list[i])
temp_list = tools.create_vot_image_list(list_path, num_frames[i])
lists.append(temp_list)
print "[MESSAGE] Ground truths and image lists are loaded."
# Create full background
background = (np.ones((screen_height,
screen_width, 3))*bg_color).astype(np.uint8)
# Setup OpenCV display window
window_title = "DVS-VOT-EXP"
cv2.namedWindow(window_title, cv2.WND_PROP_FULLSCREEN)
# Experiment setup calibration
# Not without tuning images
swin_h, swin_w = helpers.calibration(win_h=screen_height,
win_w=screen_width,
scale=work_win_scale,
window_title=window_title,
bg_color=bg_color)
# Init a general UDP socket
s = tools.init_dvs()
tools.reset_dvs_time(s)
for k in xrange(len(num_frames)):
print "[MESSAGE] Display video sequence %i" % (k+1)
frames = []
for i in xrange(num_frames[k]):
frames.append(cv2.imread(lists[k][i]))
new_frames = gui.rescale_image_sequence(frames, swin_h, swin_w,
bg_color)
new_frames = gui.create_border_sequence(new_frames,
screen_height, screen_width,
bg_color)
cv2.imshow(window_title, new_frames[0])
print "[MESSAGE] Adapting video sequence %i" % (k+1)
cv2.waitKey(delay=2000)
tools.start_log_dvs(s, recording_save_path, vot_list[k], viewer_id)
for i in xrange(num_frames[k]):
cv2.imshow(window_title, new_frames[i])
key = cv2.waitKey(delay=int(1000/fps)) & 0xFF
if key == 27:
cv2.destroyAllWindows()
quit()
cv2.imshow(window_title, new_frames[-1])
tools.stop_log_dvs(s, viewer_id)
print "[MESSAGE] Releasing video sequence %i" % (k+1)
cv2.waitKey(delay=2000)
cv2.imshow(window_title, background)
cv2.waitKey(delay=1000)
tools.reset_dvs_time(s)
print "[MESSAGE] Video sequence %i is logged." % (k+1)
# Destory both scoket and opencv window
tools.destroy_dvs(s)
cv2.destroyAllWindows()
| {
"repo_name": "duguyue100/spikefuel",
"path": "scripts/dvs_vot_exp.py",
"copies": "1",
"size": "4517",
"license": "mit",
"hash": 1409361087306467600,
"line_mean": 31.731884058,
"line_max": 76,
"alpha_frac": 0.571175559,
"autogenerated": false,
"ratio": 3.6339501206757845,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9705125679675785,
"avg_score": 0,
"num_lines": 138
} |
a = f"{0[ ]:X>+10d}"
a = f"{0[ ]!s:X>+10d}"
a = f"{0[ ]:Xd>+10d}" #invalid
a : source.python
: source.python
= : keyword.operator.assignment.python, source.python
: source.python
f : meta.fstring.python, source.python, storage.type.string.python, string.interpolated.python, string.quoted.single.python
" : meta.fstring.python, punctuation.definition.string.begin.python, source.python, string.interpolated.python, string.quoted.single.python
{ : constant.character.format.placeholder.other.python, meta.fstring.python, source.python
0 : constant.numeric.dec.python, meta.fstring.python, source.python
[ : meta.fstring.python, punctuation.definition.list.begin.python, source.python
: meta.fstring.python, source.python
] : meta.fstring.python, punctuation.definition.list.end.python, source.python
:X>+10d : meta.fstring.python, source.python, storage.type.format.python
} : constant.character.format.placeholder.other.python, meta.fstring.python, source.python
" : meta.fstring.python, punctuation.definition.string.end.python, source.python, string.interpolated.python, string.quoted.single.python
a : source.python
: source.python
= : keyword.operator.assignment.python, source.python
: source.python
f : meta.fstring.python, source.python, storage.type.string.python, string.interpolated.python, string.quoted.single.python
" : meta.fstring.python, punctuation.definition.string.begin.python, source.python, string.interpolated.python, string.quoted.single.python
{ : constant.character.format.placeholder.other.python, meta.fstring.python, source.python
0 : constant.numeric.dec.python, meta.fstring.python, source.python
[ : meta.fstring.python, punctuation.definition.list.begin.python, source.python
: meta.fstring.python, source.python
] : meta.fstring.python, punctuation.definition.list.end.python, source.python
!s : meta.fstring.python, source.python, storage.type.format.python
:X>+10d : meta.fstring.python, source.python, storage.type.format.python
} : constant.character.format.placeholder.other.python, meta.fstring.python, source.python
" : meta.fstring.python, punctuation.definition.string.end.python, source.python, string.interpolated.python, string.quoted.single.python
a : source.python
: source.python
= : keyword.operator.assignment.python, source.python
: source.python
f : meta.fstring.python, source.python, storage.type.string.python, string.interpolated.python, string.quoted.single.python
" : meta.fstring.python, punctuation.definition.string.begin.python, source.python, string.interpolated.python, string.quoted.single.python
{ : constant.character.format.placeholder.other.python, meta.fstring.python, source.python
0 : constant.numeric.dec.python, meta.fstring.python, source.python
[ : meta.fstring.python, punctuation.definition.list.begin.python, source.python
: meta.fstring.python, source.python
] : meta.fstring.python, punctuation.definition.list.end.python, source.python
: : meta.fstring.python, punctuation.separator.colon.python, source.python
Xd : meta.fstring.python, source.python
> : keyword.operator.comparison.python, meta.fstring.python, source.python
+ : keyword.operator.arithmetic.python, meta.fstring.python, source.python
10d : invalid.illegal.name.python, meta.fstring.python, source.python
} : constant.character.format.placeholder.other.python, meta.fstring.python, source.python
" : meta.fstring.python, punctuation.definition.string.end.python, source.python, string.interpolated.python, string.quoted.single.python
: source.python
# : comment.line.number-sign.python, punctuation.definition.comment.python, source.python
invalid : comment.line.number-sign.python, source.python
| {
"repo_name": "MagicStack/MagicPython",
"path": "test/fstrings/simple1.py",
"copies": "1",
"size": "4248",
"license": "mit",
"hash": -6016978812674962000,
"line_mean": 73.5263157895,
"line_max": 151,
"alpha_frac": 0.6819679849,
"autogenerated": false,
"ratio": 3.789473684210526,
"config_test": false,
"has_no_keywords": true,
"few_assignments": false,
"quality_score": 0.9820471612757562,
"avg_score": 0.03019401127059263,
"num_lines": 57
} |
# A fab file for executing commands on remote Linux hosts
from fabric.api import env
from fabric.api import run
import paramiko
import socket
env.use_ssh_config = True
env.user = 'vikash'
def is_host_up(host, port):
# Set the timeout
original_timeout = socket.getdefaulttimeout()
new_timeout = 3
socket.setdefaulttimeout(new_timeout)
host_status = False
try:
transport = paramiko.Transport((host, port))
host_status = True
except:
print("***Warning*** Host {host} on port {port} is down.\n".format(
host=host, port=port)
)
socket.setdefaulttimeout(original_timeout)
return host_status
def execute_commands(command='ls -l'):
if is_host_up(env.host, int(env.port)) is True:
print("Executing on %s as %s. Command: %s" % (env.host, env.user, command))
run(command)
def shutdown():
command = "sudo -S shutdown -h now"
execute_commands(command)
def uname():
command = "sudo -S uname -a"
execute_commands(command)
| {
"repo_name": "vikash-india/UnixNotes2Myself",
"path": "linux/scripts/archive/remote_executer_fabfile.py",
"copies": "1",
"size": "1030",
"license": "mit",
"hash": -8708543737761951000,
"line_mean": 25.4102564103,
"line_max": 83,
"alpha_frac": 0.6524271845,
"autogenerated": false,
"ratio": 3.5395189003436425,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.46919460848436423,
"avg_score": null,
"num_lines": null
} |
'''A facade to all the modules for mp3scrub functionality. Can also be used
as a CLI.
'''
import re, os
from shutil import move
from mp3scrub import globalz
from mp3scrub.util import strtool, mylog, fileio
from mp3scrub.util.musicTypes import Artist, Album, Track, MP3File
from mp3scrub.netquery import puidquery, tagGuessCache
def ExportWork(mp3_list, file_name):
'''Wrapper for persisting the mp3 tags to an XML file.
mp3_list -> list of MP3File objects (supposedly populated via main.FindMusic)
file_name -> name of the xml file to output
returns -> None
'''
fileio.exportMP3s(mp3_list, file_name)
def ImportWork(mp3_list, file_name):
'''load up a previously exported list
file_name -> name of the xml file of exported list
mp3_list -> empty list to populate with MP3File's
returns -> None
'''
fileio.importMP3s(mp3_list, file_name)
def MakeDirTree(mp3_list, dir_path):
'''Given a list of MP3File, will move the *.mp3 files they represnt to a new
directory in dir_path. Will make a pretty tree with a new folder per artist.
mp3_list -> list of MP3File objects (supposedly populated via main.FindMusic)
dir_path -> directory to create the tree
returns -> None
'''
if not os.path.exists(dir_path):
mylog.ERR('no existe \'%s\'' % dir_path)
for i,mp3 in enumerate(mp3_list):
if mp3.is_dup:
mylog.INFO('skipping dup track \'%s\'...' % (mp3.orig_track.path))
continue
sub_dir = strtool.safeUni(mp3.clean_track.artist)
if not sub_dir or re.search(r'^\s+$', sub_dir):
sub_dir = strtool.safeUni(mp3.orig_track.artist)
if not sub_dir or re.search(r'^\s+$', sub_dir):
sub_dir = 'UNKNOWN'
new_dir = os.path.join(dir_path, sub_dir)
orig_path = mp3.orig_track.path
if not os.path.exists(new_dir):
try:
os.mkdir(new_dir)
except:
mylog.ERR('error creating \'%s\'...' % (orig_path))
continue
mylog.DBG('moving \'%s\' to \'%s\'...' % (orig_path, new_dir))
try:
move(orig_path, new_dir)
except:
mylog.ERR('error moving \'%s\' to \'%s\'...' % (orig_path, new_dir))
continue
def FindMusic(my_dir, callback, only_artist_str=''):
'''Given a directory, will populate a list with all the *.mp3 files found in
that directory (recursively) and read their current tag info. Usually the
first step in any use case.
my_dir -> directory to search for *.mp3
callback -> a function pointer funct(str) used to return status info
only_artist_str -> optional tag to only find artists matching this string
returns -> mp3_list, a list of MP3File objects
'''
mp3_list = []
fileio.MP3Finder(my_dir, mp3_list, callback)
new_mp3_list = []
for mp3_file in mp3_list:
fn = mp3_file.orig_track.path
is_dup = mp3_file.is_dup
callback('processing: %s' % fn)
# load up the id3 tags from the mp3s in our xml list
try:
id3_reader = fileio.Id3tool(fn)
orig_mp3 = Track(_artist=id3_reader.readTag('artist'),
_name=id3_reader.readTag('title'),
_album=id3_reader.readTag('album'),
_track_num=id3_reader.readTag('tracknumber'),
_path=fn)
if only_artist_str:
srch = strtool.sanitizeTrackStr(only_artist_str)
artist = strtool.sanitizeTrackStr(mp3_obj.orig_track.artist)
if not re.search(srch, artist, re.IGNORECASE):
mylog.DBG1(6,'skipping artist %s for match %s' % (srch, artist))
continue
new_mp3_list.append(MP3File(orig_track=orig_mp3.copy(),
my_path=fn, is_dup_flag=is_dup))
except:
mylog.ERR('ID3 read failed on \'%s\'\n' % fn)
new_mp3_list.append(MP3File(my_path=fn, is_dup_flag=is_dup))
del mp3_list
return new_mp3_list
def WriteMusic(mp3_list, callback):
'''Given a list of MP3File, will write to the *.mp3 files the new tag info stored
in the MP3File.
mp3_list -> list of MP3File objects (supposedly populated via main.FindMusic)
callback -> a function pointer funct(str) used to return status info
returns -> None
'''
for i,mp3 in enumerate(mp3_list):
callback('updating tags for %s...' % mp3.orig_track.path)
if not os.path.exists(mp3.orig_track.path):
mylog.ERR('no existe \'%s\'' % mp3.orig_track.path)
continue
if mp3.result != MP3File.QRY_RESULT.FIELDS_CHANGED:
mylog.ERR('failed cleanup for \'%s\', skipping' % mp3.orig_track.path)
continue
try:
mylog.INFO('writing tags to fn: %s' % mp3.orig_track.path)
id3_writer = fileio.Id3tool(mp3.orig_track.path)
id3_writer.writeTag('artist', mp3.clean_track.artist)
id3_writer.writeTag('title', mp3.clean_track.name)
id3_writer.writeTag('album', mp3.clean_track.album)
id3_writer.writeTag('tracknumber', mp3.clean_track.track_num)
id3_writer.save()
except:
mylog.ERR('ID3 write failed on \'%s\'\n' % mp3.orig_track.path)
continue
def IdentifyMusic(mp3_list, callback=None):
'''The meat of the entire program. Loops through a list of MP3File objs, and will
attempt to find better tag matches for the artist, album, track, and tracknum.
mp3_list -> list of MP3File objects (supposedly populated via main.FindMusic)
(note that these objects specify both old and refined tags. entering
this function, the refined tags will be empty. exiting this function,
they will be populated)
callback -> a function pointer funct(str) used to return status info
returns -> None
'''
if globalz.PERSIST_CACHE_ON:
tagGuessCache.undump()
try:
# PASS 1: use google to refine the artist name
for mp3_count, mp3_obj in enumerate(mp3_list):
if callback:
callback('pass1: %s - %s' % (mp3_obj.orig_track.artist, mp3_obj.orig_track.name))
# use google instead of last.fm to correct the artist name
(net_error, web_guess) = tagGuessCache.queryGoogCache(mp3_obj.orig_track.artist)
if not net_error:
if web_guess:
# run some heuristics to make sure the artist makes sense
if strtool.artistCompare(mp3_obj.orig_track.artist, web_guess):
mp3_obj.clean_track.artist = web_guess
# now look up the last.fm track list for the top 10 albums of artist
is_track_found = tagGuessCache.updateGuessCache(mp3_obj.orig_track.path,
mp3_obj.orig_track.name,
mp3_obj.clean_track.artist)
if not is_track_found:
mp3_obj.result = MP3File.QRY_RESULT.TRACK_NOT_FOUND
mp3_obj.clean_track.artist = mp3_obj.orig_track.artist
else:
mp3_obj.result = MP3File.QRY_RESULT.OK
else:
mp3_obj.result = MP3File.QRY_RESULT.ARTIST_BAD_MATCH
mp3_obj.clean_track.artist = mp3_obj.orig_track.artist
else:
mp3_obj.result = MP3File.QRY_RESULT.ARTIST_NOT_FOUND
mp3_obj.clean_track.artist = mp3_obj.orig_track.artist
else:
mp3_obj.result = MP3File.QRY_RESULT.NET_ERROR
mp3_obj.clean_track.artist = mp3_obj.orig_track.artist
if (mp3_count % 100) == 0:
mylog.INFO('processed %d files' % (mp3_count))
if globalz.CACHE_DEBUG_ON:
with open('pprint.txt','w') as f:
f.write('BEFORE:\n')
tagGuessCache.dbgPrint(f)
mylog.INFO("refining track info...")
# PASS 2: use lastfm and musicbrainz for better track/album info
tagGuessCache.refineGuessCache()
# see if we found a guess in last.fm
for mp3_obj in mp3_list:
if mp3_obj.result == MP3File.QRY_RESULT.OK:
if callback:
callback('pass2: lastfm: %s - %s' % (mp3_obj.orig_track.artist,
mp3_obj.orig_track.name))
guess_track_obj = tagGuessCache.searchGuessCache(mp3_obj.clean_track.artist,
mp3_obj.orig_track.path)
if not guess_track_obj:
mp3_obj.result = MP3File.QRY_RESULT.TRACK_NOT_FOUND
else:
mp3_obj.clean_track.name = guess_track_obj.name
mp3_obj.clean_track.album = guess_track_obj.album
mp3_obj.clean_track.track_num = guess_track_obj.track_num
mp3_obj.method1 = MP3File.METHOD.ID3ID
# now use musicbrainz for what lastfm couldn't find
# (skip NET_ERROR tracks too...want to be clear in the gui that
# these tracks failed due to network problems, not algorithm failure
for mp3_obj in mp3_list:
if mp3_obj.result != MP3File.QRY_RESULT.OK and \
mp3_obj.result != MP3File.QRY_RESULT.NET_ERROR:
if callback:
callback('pass2: hashing: %s - %s' % (mp3_obj.orig_track.artist,
mp3_obj.orig_track.name))
mylog.INFO('using hashing for unknown file %s' % (mp3_obj.orig_track.path))
puid_qry_obj = puidquery.PUIDQuery()
(mp3_obj.clean_track.artist,
mp3_obj.clean_track.album,
mp3_obj.clean_track.name,
mp3_obj.clean_track.track_num) = puid_qry_obj.lookupTrack(mp3_obj.orig_track.path)
if mp3_obj.clean_track.artist:
mp3_obj.method1 = MP3File.METHOD.HASHED
else:
mp3_obj.method1 = MP3File.METHOD.FAILEDHASH
# PASS 3: retry album name guessing. now that the data has been partially cleaned, we'll have
# better luck guessing the correct album name
tagGuessCache.clearCache()
for mp3_obj in mp3_list:
if mp3_obj.result == MP3File.QRY_RESULT.NET_ERROR: continue
mylog.INFO('pass3: on file \'%s\' track \'%s\'' %
(mp3_obj.orig_track.path, mp3_obj.clean_track.name))
if callback:
callback('pass3: on file \'%s\' track \'%s\'' %
(mp3_obj.orig_track.path, mp3_obj.clean_track.name))
if mp3_obj.clean_track.artist:
if mp3_obj.clean_track.name:
tagGuessCache.updateGuessCache(mp3_obj.orig_track.path,
mp3_obj.clean_track.name,
mp3_obj.clean_track.artist)
else:
tagGuessCache.updateGuessCache(mp3_obj.orig_track.path,
mp3_obj.orig_track.name,
mp3_obj.clean_track.artist)
tagGuessCache.refineGuessCache()
for mp3_obj in mp3_list:
if mp3_obj.result == MP3File.QRY_RESULT.NET_ERROR: continue
guess_track_obj = tagGuessCache.searchGuessCache(mp3_obj.clean_track.artist,
mp3_obj.orig_track.path)
if guess_track_obj:
mylog.INFO('pass3_result: found guess on file \'%s\' track \'%s\'' %
(mp3_obj.orig_track.path, mp3_obj.clean_track.name))
mp3_obj.clean_track.name = guess_track_obj.name
mp3_obj.clean_track.album = guess_track_obj.album
mp3_obj.clean_track.track_num = guess_track_obj.track_num
mp3_obj.updateResults()
mp3_obj.method2 = MP3File.METHOD.ID3ID
mp3_obj.result = MP3File.QRY_RESULT.FIELDS_CHANGED
else:
mylog.INFO('pass3_result: no guess found on file \'%s\' track \'%s\'' %
(mp3_obj.orig_track.path, mp3_obj.clean_track.name))
mp3_obj.method2 = MP3File.METHOD.SECONDPASSFAIL
# make a final call on whether we got good results or not
if mp3_obj.method1 == MP3File.METHOD.FAILEDHASH or \
mp3_obj.method1 == MP3File.METHOD.UNKNOWN:
mp3_obj.result = MP3File.QRY_RESULT.NO_GUESS
else:
mp3_obj.result = MP3File.QRY_RESULT.FIELDS_CHANGED
if globalz.CACHE_DEBUG_ON:
for x in mp3_list: print unicode(x).encode('utf-8')
with open('pprint.txt','w') as f:
f.write('AFTER:\n')
tagGuessCache.dbgPrint(f)
finally:
if globalz.PERSIST_CACHE_ON:
mylog.INFO('persisting track guesses')
tagGuessCache.dump()
return mp3_list
| {
"repo_name": "sgoranson/mp3scrub",
"path": "mp3scrub/scrubCmds.py",
"copies": "1",
"size": "13683",
"license": "mit",
"hash": 3825504415991876000,
"line_mean": 35.2944297082,
"line_max": 102,
"alpha_frac": 0.5486369948,
"autogenerated": false,
"ratio": 3.700108166576528,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9703448371795951,
"avg_score": 0.009059357916115443,
"num_lines": 377
} |
""" A factory for building individual commands based on the full list
of commands and inputs.
author: Brian Schrader
since: 2016-01-12
"""
from .tokens import Input, Output, PathToken, CommentToken
from .command import Command
from .command_template import CommandTemplate
from .grammar import OR_TOKEN, AND_TOKEN
def get_command_templates(command_tokens, file_tokens=[], path_tokens=[],
job_options=[]):
""" Given a list of tokens from the grammar, return a
list of commands.
"""
files = get_files(file_tokens)
paths = get_paths(path_tokens)
job_options = get_options(job_options)
templates = _get_command_templates(command_tokens, files, paths,
job_options)
for command_template in templates:
command_template._dependencies = _get_prelim_dependencies(
command_template, templates)
return templates
def get_files(file_tokens, cwd=None):
""" Given a list of parser file tokens, return a list of input objects
for them.
"""
if not file_tokens:
return []
token = file_tokens.pop()
try:
filename = token.filename
except AttributeError:
filename = ''
if cwd:
input = Input(token.alias, filename, cwd=cwd)
else:
input = Input(token.alias, filename)
return [input] + get_files(file_tokens)
def get_paths(path_tokens):
""" Given a list of parser path tokens, return a list of path objects
for them.
"""
if len(path_tokens) == 0:
return []
token = path_tokens.pop()
path = PathToken(token.alias, token.path)
return [path] + get_paths(path_tokens)
def get_options(options):
""" Given a list of options, tokenize them. """
return _get_comments(options)
# Internal Implementation
def _get_command_templates(command_tokens, files=[], paths=[], job_options=[],
count=1):
""" Reversivly create command templates. """
if not command_tokens:
return []
comment_tokens, command_token = command_tokens.pop()
parts = []
parts += job_options + _get_comments(comment_tokens)
for part in command_token[0]:
# Check for file
try:
parts.append(_get_file_by_alias(part, files))
continue
except (AttributeError, ValueError):
pass
# Check for path/string
for cut in part.split():
try:
parts.append(_get_path_by_name(cut, paths))
continue
except ValueError:
pass
parts.append(cut)
command_template = CommandTemplate(alias=str(count), parts=parts)
[setattr(p, 'alias', command_template.alias)
for p in command_template.output_parts]
return [command_template] + _get_command_templates(command_tokens,
files, paths, job_options, count+1)
def _get_prelim_dependencies(command_template, all_templates):
""" Given a command_template determine which other templates it
depends on. This should not be used as the be-all end-all of
dependencies and before calling each command, ensure that it's
requirements are met.
"""
deps = []
for input in command_template.input_parts:
if '.' not in input.alias:
continue
for template in all_templates:
for output in template.output_parts:
if input.fuzzy_match(output):
deps.append(template)
break
return list(set(deps))
def _get_file_by_alias(part, files):
""" Given a command part, find the file it represents. If not found,
then returns a new token representing that file.
:throws ValueError: if the value is not a command file alias.
"""
# Make Output
if _is_output(part):
return Output.from_string(part.pop())
# Search/Make Input
else:
inputs = [[]]
if part.magic_or:
and_or = 'or'
else:
and_or = 'and'
for cut in part.asList():
if cut == OR_TOKEN:
inputs.append([])
continue
if cut == AND_TOKEN:
continue
input = Input(cut, filename=cut, and_or=and_or)
for file in files:
if file.alias == cut:
# Override the filename
input.filename = file.filename
inputs[-1].append(input)
break
else:
inputs[-1].append(input)
return [input for input in inputs if input]
def _get_path_by_name(part, paths):
""" Given a command part, find the path it represents.
:throws ValueError: if no valid file is found.
"""
for path in paths:
if path.alias == part:
return path
raise ValueError
def _get_comments(parts):
""" Given a list of parts representing a list of comments, return the list
of comment tokens
"""
return [CommentToken(part) for part in parts]
def _is_output(part):
""" Returns whether the given part represents an output variable. """
if part[0].lower() == 'o':
return True
elif part[0][:2].lower() == 'o:':
return True
elif part[0][:2].lower() == 'o.':
return True
else:
return False
| {
"repo_name": "Sonictherocketman/metapipe",
"path": "metapipe/models/command_template_factory.py",
"copies": "2",
"size": "5303",
"license": "mit",
"hash": -3543494928946701000,
"line_mean": 26.6197916667,
"line_max": 78,
"alpha_frac": 0.5977748444,
"autogenerated": false,
"ratio": 4.14296875,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.57407435944,
"avg_score": null,
"num_lines": null
} |
# A factory for Indexer class instances. This will return an indexer
# suitable for using in the context defined in the input.
import logging
import os
import time
from xia2.DriverExceptions.NotAvailableError import NotAvailableError
from xia2.Handlers.Phil import PhilIndex
from xia2.Handlers.PipelineSelection import get_preferences
from xia2.Modules.Indexer.DialsIndexer import DialsIndexer
from xia2.Modules.Indexer.XDSIndexer import XDSIndexer
from xia2.Modules.Indexer.XDSIndexerII import XDSIndexerII
from xia2.Modules.Indexer.XDSIndexerInteractive import XDSIndexerInteractive
logger = logging.getLogger("xia2.Modules.Indexer.IndexerFactory")
def IndexerForXSweep(xsweep, json_file=None):
"""Provide an indexer to work with XSweep instance xsweep."""
# check what is going on
if xsweep is None:
raise RuntimeError("XSweep instance needed")
if not xsweep.__class__.__name__ == "XSweep":
raise RuntimeError("XSweep instance needed")
crystal_lattice = xsweep.get_crystal_lattice()
multi_sweep_indexing = PhilIndex.params.xia2.settings.multi_sweep_indexing
# FIXME SCI-599 decide from the width of the sweep and the preference
# which indexer to return...
sweep_images = xsweep.get_image_range()
imageset = xsweep.get_imageset()
scan = imageset.get_scan()
oscillation = scan.get_oscillation()
sweep_width = oscillation[1] * (sweep_images[1] - sweep_images[0] + 1)
# hack now - if XDS integration switch to XDS indexer if (i) labelit and
# (ii) sweep < 10 degrees
if multi_sweep_indexing and len(xsweep.sample.get_sweeps()) > 1:
indexer = xsweep.sample.multi_indexer or Indexer()
xsweep.sample.multi_indexer = indexer
elif (
sweep_width < 10.0
and not get_preferences().get("indexer")
and get_preferences().get("integrater")
and "xds" in get_preferences().get("integrater")
):
logger.debug("Overriding indexer as XDSII")
indexer = Indexer(preselection="xdsii")
else:
indexer = Indexer()
if json_file is not None:
assert os.path.isfile(json_file)
logger.debug("Loading indexer from json: %s", json_file)
t0 = time.time()
indexer = indexer.__class__.from_json(filename=json_file)
t1 = time.time()
logger.debug("Loaded indexer in %.2f seconds", t1 - t0)
else:
# configure the indexer
indexer.add_indexer_imageset(xsweep.get_imageset())
if crystal_lattice:
# this is e.g. ('aP', (1.0, 2.0, 3.0, 90.0, 98.0, 88.0))
indexer.set_indexer_input_lattice(crystal_lattice[0])
indexer.set_indexer_input_cell(crystal_lattice[1])
# FIXME - it is assumed that all programs which implement the Indexer
# interface will also implement FrameProcessor, which this uses.
# verify this, or assert it in some way...
# if xsweep.get_beam_centre():
# indexer.set_beam_centre(xsweep.get_beam_centre())
## N.B. This does not need to be done for the integrater, since
## that gets it's numbers from the indexer it uses.
# if xsweep.get_distance():
# logger.debug('Indexer factory: Setting distance: %.2f' % \
# xsweep.get_distance())
# indexer.set_distance(xsweep.get_distance())
# FIXME more - need to check if we should be indexing in a specific
# lattice - check xsweep.get_crystal_lattice()
# need to do the same for wavelength now as that could be wrong in
# the image header...
# if xsweep.get_wavelength_value():
# logger.debug('Indexer factory: Setting wavelength: %.6f' % \
# xsweep.get_wavelength_value())
# indexer.set_wavelength(xsweep.get_wavelength_value())
indexer.set_indexer_sweep(xsweep)
if xsweep.sample.multi_indexer:
assert xsweep.sample.multi_indexer is indexer, (
xsweep.sample.multi_indexer,
indexer,
)
if len(indexer._indxr_imagesets) == 1:
for xsweep_other in xsweep.sample.get_sweeps()[1:]:
xsweep_other._get_indexer()
return indexer
# FIXME need to provide framework for input passing
def Indexer(preselection=None):
"""Create an instance of Indexer for use with a dataset."""
# FIXME need to check that these implement indexer
indexer = None
if not preselection:
preselection = get_preferences().get("indexer")
indexerlist = [
(DialsIndexer, "dials", "DialsIndexer"),
(XDSIndexer, "xds", "XDS Indexer"),
]
if PhilIndex.params.xia2.settings.interactive:
indexerlist.append((XDSIndexerInteractive, "xdsii", "XDS Interactive Indexer"))
else:
indexerlist.append((XDSIndexerII, "xdsii", "XDS II Indexer"))
for (idxfactory, idxname, idxdisplayname) in indexerlist:
if not indexer and (not preselection or preselection == idxname):
try:
indexer = idxfactory()
logger.debug("Using %s", idxdisplayname)
except NotAvailableError:
if preselection:
raise RuntimeError("preselected indexer %s not available" % idxname)
if not indexer:
raise RuntimeError("no indexer implementations found")
return indexer
| {
"repo_name": "xia2/xia2",
"path": "src/xia2/Modules/Indexer/IndexerFactory.py",
"copies": "1",
"size": "5257",
"license": "bsd-3-clause",
"hash": 1093246189227757700,
"line_mean": 33.1363636364,
"line_max": 88,
"alpha_frac": 0.6686322998,
"autogenerated": false,
"ratio": 3.709950599858857,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9876791905612431,
"avg_score": 0.00035819880928506605,
"num_lines": 154
} |
# A factory for Integrater implementations. At the moment this will
# support only XDS and the null integrater implementation.
import logging
import os
from xia2.DriverExceptions.NotAvailableError import NotAvailableError
from xia2.Handlers.Phil import PhilIndex
from xia2.Handlers.PipelineSelection import get_preferences
from xia2.Modules.Integrater.DialsIntegrater import DialsIntegrater
from xia2.Modules.Integrater.XDSIntegrater import XDSIntegrater
logger = logging.getLogger("xia2.Modules.Integrater.IntegraterFactory")
def IntegraterForXSweep(xsweep, json_file=None):
"""Create an Integrater implementation to work with the provided
XSweep."""
# FIXME this needs properly implementing...
if xsweep is None:
raise RuntimeError("XSweep instance needed")
if not xsweep.__class__.__name__ == "XSweep":
raise RuntimeError("XSweep instance needed")
integrater = Integrater()
if json_file is not None:
assert os.path.isfile(json_file)
logger.debug("Loading integrater from json: %s" % json_file)
import time
t0 = time.time()
integrater = integrater.__class__.from_json(filename=json_file)
t1 = time.time()
logger.debug("Loaded integrater in %.2f seconds" % (t1 - t0))
else:
integrater.setup_from_imageset(xsweep.get_imageset())
integrater.set_integrater_sweep_name(xsweep.get_name())
# copy across resolution limits
if xsweep.get_resolution_high() or xsweep.get_resolution_low():
d_min = PhilIndex.params.xia2.settings.resolution.d_min
d_max = PhilIndex.params.xia2.settings.resolution.d_max
# override with sweep versions if set - xia2#146
if xsweep.get_resolution_high():
d_min = xsweep.get_resolution_high()
if xsweep.get_resolution_low():
d_max = xsweep.get_resolution_low()
if d_min is not None and d_min != integrater.get_integrater_high_resolution():
logger.debug("Assigning resolution limits from XINFO input:")
logger.debug("d_min: %.3f" % d_min)
integrater.set_integrater_high_resolution(d_min, user=True)
if d_max is not None and d_max != integrater.get_integrater_low_resolution():
logger.debug("Assigning resolution limits from XINFO input:")
logger.debug("d_max: %.3f" % d_max)
integrater.set_integrater_low_resolution(d_max, user=True)
# check the epoch and perhaps pass this in for future reference
# (in the scaling)
if xsweep._epoch > 0:
integrater.set_integrater_epoch(xsweep._epoch)
# need to do the same for wavelength now as that could be wrong in
# the image header...
if xsweep.get_wavelength_value():
logger.debug(
"Integrater factory: Setting wavelength: %.6f"
% xsweep.get_wavelength_value()
)
integrater.set_wavelength(xsweep.get_wavelength_value())
# likewise the distance...
if xsweep.get_distance():
logger.debug(
"Integrater factory: Setting distance: %.2f" % xsweep.get_distance()
)
integrater.set_distance(xsweep.get_distance())
integrater.set_integrater_sweep(xsweep, reset=False)
return integrater
def Integrater():
"""Return an Integrater implementation."""
# FIXME this should take an indexer as an argument...
integrater = None
preselection = get_preferences().get("integrater")
if not integrater and (not preselection or preselection == "dials"):
try:
integrater = DialsIntegrater()
logger.debug("Using Dials Integrater")
if PhilIndex.params.xia2.settings.scaler == "dials":
integrater.set_output_format("pickle")
except NotAvailableError:
if preselection == "dials":
raise RuntimeError(
"preselected integrater dials not available: "
+ "dials not installed?"
)
if not integrater and (not preselection or preselection == "xdsr"):
try:
integrater = XDSIntegrater()
logger.debug("Using XDS Integrater in new resolution mode")
except NotAvailableError:
if preselection == "xdsr":
raise RuntimeError(
"preselected integrater xdsr not available: " + "xds not installed?"
)
if not integrater:
raise RuntimeError("no integrater implementations found")
# check to see if resolution limits were passed in through the
# command line...
dmin = PhilIndex.params.xia2.settings.resolution.d_min
dmax = PhilIndex.params.xia2.settings.resolution.d_max
if dmin:
logger.debug("Adding user-assigned resolution limits:")
if dmax:
logger.debug(f"dmin: {dmin:.3f} dmax: {dmax:.2f}")
integrater.set_integrater_resolution(dmin, dmax, user=True)
else:
logger.debug("dmin: %.3f" % dmin)
integrater.set_integrater_high_resolution(dmin, user=True)
return integrater
| {
"repo_name": "xia2/xia2",
"path": "src/xia2/Modules/Integrater/IntegraterFactory.py",
"copies": "1",
"size": "5115",
"license": "bsd-3-clause",
"hash": 1356374139325041400,
"line_mean": 33.7959183673,
"line_max": 88,
"alpha_frac": 0.6490713587,
"autogenerated": false,
"ratio": 3.825729244577412,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4974800603277412,
"avg_score": null,
"num_lines": null
} |
"""A factory for unique sentinel values.
`Sentinel` objects are unique in the sense that they are equal only to
themselves. `Sentinel` objects can not be pickled.
Written by Peter Duerr
"""
from __future__ import print_function
from __future__ import unicode_literals
from __future__ import division
from __future__ import absolute_import
import inspect
def create(name, description=''):
"""Creates a new sentinel
"""
if description == '':
description = "Sentinel '%s'" % name
class Sentinel(object): # pylint: disable=too-few-public-methods
"""Sentinel class
"""
def __init__(self):
"""Initializer
"""
self.name = str(name)
self.__name__ = self.name
self.__class__.__name__ = self.name
# Allow no instances
self.__slots__ = ()
# Make Sentinel belong to the module where it is created
self.__class__.__module__ = inspect.stack(
)[2][0].f_globals['__name__']
def __repr__(self):
"""Represent the sentinel"""
return description
def __copy__(self):
"""Copy the sentinel returns itself
"""
return self
def __deepcopy__(self, _):
"""Copy the sentinel returns itself
"""
return self
# Create an instance, then make sure no one else can instantiate
sentinel = Sentinel()
del Sentinel
return sentinel
| {
"repo_name": "duerrp/pyexperiment",
"path": "pyexperiment/utils/sentinel.py",
"copies": "4",
"size": "1515",
"license": "mit",
"hash": -2257626471346534000,
"line_mean": 25.5789473684,
"line_max": 70,
"alpha_frac": 0.5570957096,
"autogenerated": false,
"ratio": 4.6189024390243905,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0,
"num_lines": 57
} |
''' A failsafe wrapper for python3 unicode text file handling and also a
working example of using UltiSnips'''
import tempfile
import struct
import logging
logger = logging.getLogger(__name__)
def mixed_text_decoder(datablock):
if isinstance(datablock, str):
return datablock
cunpack = struct.Struct('=c')
result = str()
for coff in range(len(datablock)):
cchar = cunpack.unpack_from(datablock, coff)[0]
try:
result += cchar.decode('utf-8')
except UnicodeDecodeError:
result += cchar.decode('latin-1')
return result
def UnicodeSpooledTemporaryFile(orig_fh, *args, **kwargs): # noqa=N802
'''UnicodeSpooledTemporary
Wraps tempfile.SpooledTemporaryFile functionality to safely enxure that the
passed orig_fh is cleanly converted from a malformed mixed encoding to a
pure unicode (utf-8) encoded file.'''
if 'max_size' not in kwargs:
if not args or not isinstance(int, args[0]):
kwargs['max_size'] = 1024**2 * 30
buffer_size = 4096
stf = tempfile.SpooledTemporaryFile(*args, **kwargs)
with open(orig_fh.name, 'r+b') as ofnfh:
datablock = ofnfh.read(buffer_size)
while datablock:
stf.write(mixed_text_decoder(datablock).encode('UTF-8'))
datablock = orig_fh.read(buffer_size)
stf.seek(0)
return stf
| {
"repo_name": "SkyLeach/poweruser_tools",
"path": "skypy/clean_unicode_spooler.py",
"copies": "1",
"size": "1374",
"license": "unlicense",
"hash": 4179673942756320000,
"line_mean": 34.2307692308,
"line_max": 79,
"alpha_frac": 0.6608442504,
"autogenerated": false,
"ratio": 3.6157894736842104,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.47766337240842105,
"avg_score": null,
"num_lines": null
} |
"""A fake AirPlay device."""
import binascii
from collections import namedtuple
import logging
import plistlib
from typing import Optional
from aiohttp import web
from pyatv.support.net import unused_port
from tests.utils import simple_get
_LOGGER = logging.getLogger(__name__)
# --- START AUTHENTICATION DATA VALID SESSION (FROM DEVICE) ---
DEVICE_IDENTIFIER = "75FBEEC773CFC563"
DEVICE_AUTH_KEY = "8F06696F2542D70DF59286C761695C485F815BE3D152849E1361282D46AB1493"
DEVICE_PIN = 2271
DEVICE_CREDENTIALS = DEVICE_IDENTIFIER + ":" + DEVICE_AUTH_KEY
# pylint: disable=E501
# For authentication
_DEVICE_AUTH_STEP1 = b"62706c6973743030d201020304566d6574686f6454757365725370696e5f101037354642454543373733434643353633080d14191d0000000000000101000000000000000500000000000000000000000000000030" # noqa
_DEVICE_AUTH_STEP1_RESP = b"62706c6973743030d20102030452706b5473616c744f1101008817e16146c7d12b45e810b0bf190a4ccb25d9a20a8d0504d874daa8db5574c51c8b33703a95c00bdbe99c8c3745d1ef1b38e538edfd98e09ec029effe6f28b3b54a1bd41c28d8f33da6f5ac9327bfce9a66869dae645b5cbd2c6b8fbe14a30ad4f8598154f2ef7f4f52cee3e3042a69780463c26bbb764870eb1995b26a2a4ade05564836d788baf07469a143c410ea9d07a068eb790b2b0aa5b86c990636814e3fa1a899ceba1af45b211ca4bd3b5b66ffaf16051a4f851e120476054258f257b8521a068907ad5e9c7220d5cef9aa072dec9edb7ebf633cad4d52d105cf58440f17e236332b0b26539851a879e9ac8d3c2da4c590785468e590296d39d7374f1010fca6dcb6b83a7c716a692f806e9159540008000d001000150119000000000000020100000000000000050000000000000000000000000000012c" # noqa
_DEVICE_AUTH_STEP2 = b"62706c6973743030d20102030452706b5570726f6f664f1101000819b6ba7feead4753809314e2b4c5db9109f737a0fc70b758342b6bbf536fae4e40cf94607588abb17c2076030cc00c2c1fa5fc3b3dfe8aa1ec2f23f74d917c0792fbf02f131377dfb8ae2a1656ceaa0a36bb3ab752586e1af17e1d5ef24ce083f3f9298d0be761f26c0d48af86510bf9aac7940cf90bff6bd214cf34b5536856c80f076cfbe06fd69af9d6a07a6d3ac580dfffc8a40b9730575a16c5046cd73321a944880dcf9fac952afc7ffd2d135e57ec208b11cef22b734f331ad4d8c9a737b588f7b30bd5210c65cae2ba0226f69ce7b505771faa63af89ed2f9e8325d7d5f3a2da7412f9d837860632d7f81b7fa5e09dd85e1539184070c0fa8433c24f1014fc6286910833d3e7ae0631d47ddbb0f492ef85b80008000d00100016011a0000000000000201000000000000000500000000000000000000000000000131" # noqa
_DEVICE_AUTH_STEP2_RESP = b"62706c6973743030d101025570726f6f664f101484a88548b12bce122ad1cea6caff312630edcf27080b110000000000000101000000000000000300000000000000000000000000000028" # noqa
_DEVICE_AUTH_STEP3 = b"62706c6973743030d20102030457617574685461675365706b4f101052a92f8712c6ea417f3adb3d03d8e5634f1020ff07fc8520d10728e6f2ab0a0245dfa20709b5d1ae5f9a19328b0663ba9414f2080d15192c000000000000010100000000000000050000000000000000000000000000004f" # noqa
_DEVICE_AUTH_STEP3_RESP = b"62706c6973743030d2010203045365706b57617574685461674f10206285b20afad4cefe1fce40cee685ab072c75240cb47fb71bc3b3d03dca52dc5d4f1010893eb8e5ae418b245e9b1bf7cba9116b080d11193c000000000000010100000000000000050000000000000000000000000000004f" # noqa
# For verification
_DEVICE_VERIFY_STEP1 = b"01000000891bae9f581f68f9c9933c4f713fbb5b9de639ec7df5d0a4fd4f342f1c21aa6a5e9d1e843302d6265b8c48dd169e273460e567916b0b36280ac071001118f6b2" # noqa
_DEVICE_VERIFY_STEP1_RESP = b"3221371da9f00d035955caa912455fd2acee68117b557f25e39168746af4b631cfab7b2c6d0b58e96cc10af884f5a4cdef8063858a9d9c04e866743cf4b77b4be50de1352ab4ff2691a1a7afd8c1341475b4170ac50455973b7fcf3c24324fa9" # noqa
_DEVICE_VERIFY_STEP2 = b"00000000a1f91acf64aacb185684080b817103b423816ad63b7f5e001f62337b4cc4b3b92c1474959930b7c2a59d0004814300580459d06fc6cc6441bd82bac72a5c5cc7" # noqa
_DEVICE_VERIFY_STEP2_RESP = b"" # Value not used by pyatv
# pylint: enable=E501
# --- END AUTHENTICATION DATA ---
AirPlayPlaybackResponse = namedtuple("AirPlayPlaybackResponse", "code content")
class FakeAirPlayState:
def __init__(self):
self.airplay_responses = []
self.has_authenticated = True
self.always_auth_fail = False
self.last_airplay_url = None
self.last_airplay_start = None
self.last_airplay_uuid = None
self.last_airplay_content: Optional[bytes] = None
self.play_count = 0
self.injected_play_fails = 0
class FakeAirPlayService:
def __init__(self, state, app, loop):
self.state = state
self.port = None
self.app = app
self.runner = None
self.app.router.add_post("/play", self.handle_airplay_play)
self.app.router.add_get("/playback-info", self.handle_airplay_playback_info)
self.app.router.add_post("/pair-pin-start", self.handle_pair_pin_start)
self.app.router.add_post("/pair-setup-pin", self.handle_pair_setup_pin)
self.app.router.add_post("/pair-verify", self.handle_airplay_pair_verify)
async def start(self, start_web_server):
if start_web_server:
self.port = unused_port()
self.runner = web.AppRunner(self.app)
await self.runner.setup()
site = web.TCPSite(self.runner, "0.0.0.0", self.port)
await site.start()
async def cleanup(self):
if self.runner:
await self.runner.cleanup()
async def handle_airplay_play(self, request):
"""Handle AirPlay play requests."""
self.state.play_count += 1
if self.state.always_auth_fail or not self.state.has_authenticated:
return web.Response(status=503)
if self.state.injected_play_fails > 0:
self.state.injected_play_fails -= 1
return web.Response(status=500)
headers = request.headers
# Verify headers first
assert headers["User-Agent"] == "MediaControl/1.0"
assert headers["Content-Type"] == "application/x-apple-binary-plist"
body = await request.read()
parsed = plistlib.loads(body)
self.state.last_airplay_url = parsed["Content-Location"]
self.state.last_airplay_start = parsed["Start-Position"]
self.state.last_airplay_uuid = parsed["X-Apple-Session-ID"]
# Simulate that fake device streams if URL is localhost
if self.state.last_airplay_url.startswith("http://127.0.0.1"):
_LOGGER.debug("Retrieving file from %s", self.state.last_airplay_url)
self.state.last_airplay_content, _ = await simple_get(
self.state.last_airplay_url
)
return web.Response(status=200)
async def handle_airplay_playback_info(self, request):
"""Handle AirPlay playback-info requests."""
if self.state.airplay_responses:
response = self.state.airplay_responses.pop()
else:
plist = dict(readyToPlay=False, uuid=123)
response = AirPlayPlaybackResponse(
200, plistlib.dumps(plist).encode("utf-8")
)
return web.Response(
body=response.content,
status=response.code,
content_type="text/x-apple-plist+xml",
)
# TODO: Extract device auth code to separate module and make it more
# general. This is a dumb implementation that verifies hard coded values,
# which is fine for regression but an implementation with better validation
# would be better.
async def handle_pair_pin_start(self, request):
"""Handle start of AirPlay device authentication."""
return web.Response(status=200) # Normally never fails
async def handle_pair_setup_pin(self, request):
"""Handle AirPlay device authentication requests."""
content = await request.content.read()
hexlified = binascii.hexlify(content)
if hexlified == _DEVICE_AUTH_STEP1:
return web.Response(
body=binascii.unhexlify(_DEVICE_AUTH_STEP1_RESP), status=200
)
elif hexlified == _DEVICE_AUTH_STEP2:
return web.Response(
body=binascii.unhexlify(_DEVICE_AUTH_STEP2_RESP), status=200
)
elif hexlified == _DEVICE_AUTH_STEP3:
return web.Response(
body=binascii.unhexlify(_DEVICE_AUTH_STEP3_RESP), status=200
)
return web.Response(status=403)
async def handle_airplay_pair_verify(self, request):
"""Handle verification of AirPlay device authentication."""
content = await request.content.read()
hexlified = binascii.hexlify(content)
if hexlified == _DEVICE_VERIFY_STEP1:
return web.Response(
body=binascii.unhexlify(_DEVICE_VERIFY_STEP1_RESP), status=200
)
elif hexlified == _DEVICE_VERIFY_STEP2:
self.state.has_authenticated = True
return web.Response(body=_DEVICE_VERIFY_STEP2_RESP, status=200)
return web.Response(body=b"", status=403)
class FakeAirPlayUseCases:
"""Wrapper for altering behavior of a FakeAirPlayDevice instance."""
def __init__(self, state):
"""Initialize a new AirPlayUseCases."""
self.state = state
def airplay_play_failure(self, count):
"""Make play command fail a number of times."""
self.state.injected_play_fails = count
def airplay_playback_idle(self):
"""Make playback-info return idle info."""
plist = dict(readyToPlay=False, uuid=123)
self.state.airplay_responses.insert(
0, AirPlayPlaybackResponse(200, plistlib.dumps(plist))
)
def airplay_playback_playing(self):
"""Make playback-info return that something is playing."""
# This is _not_ complete, currently not needed
plist = dict(duration=0.8)
self.state.airplay_responses.insert(
0, AirPlayPlaybackResponse(200, plistlib.dumps(plist))
)
def airplay_require_authentication(self):
"""Require device authentication for AirPlay."""
self.state.has_authenticated = False
def airplay_always_fail_authentication(self):
"""Always fail authentication for AirPlay."""
self.state.always_auth_fail = True
def airplay_playback_playing_no_permission(self):
"""Make playback-info return forbidden."""
self.state.airplay_responses.insert(0, AirPlayPlaybackResponse(403, None))
| {
"repo_name": "postlund/pyatv",
"path": "tests/fake_device/airplay.py",
"copies": "1",
"size": "10163",
"license": "mit",
"hash": 5192651712560401000,
"line_mean": 47.1658767773,
"line_max": 726,
"alpha_frac": 0.7340352258,
"autogenerated": false,
"ratio": 3.0528687293481527,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9285482916085634,
"avg_score": 0.00028420781250345614,
"num_lines": 211
} |
"""A fake application's model objects"""
from datetime import datetime
from zope.sqlalchemy import ZopeTransactionExtension
from sqlalchemy import Table, ForeignKey, Column
from sqlalchemy.orm import scoped_session, sessionmaker, relation, backref, \
synonym
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.types import String, Unicode, UnicodeText, Integer, DateTime, \
Boolean, Float
# Global session manager. DBSession() returns the session object
# appropriate for the current web request.
maker = sessionmaker(autoflush=True, autocommit=False,
extension=ZopeTransactionExtension())
DBSession = scoped_session(maker)
# By default, the data model is defined with SQLAlchemy's declarative
# extension, but if you need more control, you can switch to the traditional
# method.
DeclarativeBase = declarative_base()
# Global metadata.
# The default metadata is the one from the declarative base.
metadata = DeclarativeBase.metadata
def init_model(engine):
"""Call me before using any of the tables or classes in the model."""
DBSession.configure(bind=engine)
class Group(DeclarativeBase):
"""An ultra-simple group definition.
"""
__tablename__ = 'tg_group'
group_id = Column(Integer, autoincrement=True, primary_key=True)
group_name = Column(Unicode(16), unique=True)
display_name = Column(Unicode(255))
created = Column(DateTime, default=datetime.now)
def __repr__(self):
return '<Group: name=%s>' % self.group_name
| {
"repo_name": "lucius-feng/tg2",
"path": "tests/fixtures/model.py",
"copies": "3",
"size": "1600",
"license": "mit",
"hash": 2295129228073881000,
"line_mean": 32.3333333333,
"line_max": 79,
"alpha_frac": 0.706875,
"autogenerated": false,
"ratio": 4.3478260869565215,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.6554701086956521,
"avg_score": null,
"num_lines": null
} |
a=[[False for x in range(1000)]for x in range(1000)]
def sumfunction(counter):
if(a[counter[0]][counter[1]]!=True):
a[counter[0]][counter[1]]=True
return 1
else:
return 0
def main():
file=open("input3.txt")
data=file.read()
counter=[500, 500]
counter_robo=[500, 500]
answer=0
track=1
for letter in data:
if (track%2==1):
if (letter=='^'):
counter[0]+=1
answer=answer+sumfunction(counter)
elif (letter=='v'):
counter[0]-=1
answer=answer+sumfunction(counter)
elif (letter=='>'):
counter[1]+=1
answer=answer+sumfunction(counter)
elif (letter=='<'):
counter[1]-=1
answer=answer+sumfunction(counter)
if (track%2==0):
if (letter=='^'):
counter_robo[0]+=1
answer=answer+sumfunction(counter_robo)
elif (letter=='v'):
counter_robo[0]-=1
answer=answer+sumfunction(counter_robo)
elif (letter=='>'):
counter_robo[1]+=1
answer=answer+sumfunction(counter_robo)
elif (letter=='<'):
counter_robo[1]-=1
answer=answer+sumfunction(counter_robo)
track+=1
print answer
main() | {
"repo_name": "abdulfaizp/adventofcode",
"path": "xmas3_1.py",
"copies": "1",
"size": "1089",
"license": "cc0-1.0",
"hash": -2497184055739047000,
"line_mean": 18.8181818182,
"line_max": 52,
"alpha_frac": 0.6189164371,
"autogenerated": false,
"ratio": 2.675675675675676,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.8338003818382391,
"avg_score": 0.09131765887865681,
"num_lines": 55
} |
"""A fancy pants plot of a given VTEC headline."""
# local
from datetime import timezone
# third party
import pandas as pd
from geopandas import read_postgis
from pyiem.nws import vtec
from pyiem.plot.geoplot import MapPlot
from pyiem.util import get_autoplot_context, get_dbconn, utc
from pyiem.exceptions import NoDataFound
from pyiem.reference import Z_OVERLAY2, Z_OVERLAY2_LABEL
import cartopy.crs as ccrs
import pytz
TFORMAT = "%b %-d %Y %-I:%M %p %Z"
PDICT = {
"single": "Plot just this single VTEC Event",
"expand": "Plot same VTEC Phenom/Sig from any WFO coincident with event",
"etn": "Plot same VTEC Phenom/Sig + Event ID from any WFO",
}
PDICT3 = {
"on": "Overlay NEXRAD Mosaic",
"auto": "Let autoplot decide when to include NEXRAD overlay",
"off": "No NEXRAD Mosaic Please",
}
def get_description():
"""Return a dict describing how to call this plotter"""
desc = dict()
desc["cache"] = 300
desc["data"] = True
desc[
"description"
] = """This application generates a map showing the coverage of a given
VTEC alert. The tricky part here is how time is handled
for events whereby zones/counties can be added / removed from the alert.
If you specific an exact time, you should get the proper extent of the
alert at that time. If you do not specify the time, you should get the
total inclusion of any zones/counties that were added to the alert.
<p>This plot can be run in three special modes, those are:
<ul>
<li><strong>Single Event</strong>: Plots for a given WFO and event id.
</li>
<li><strong>Same Phen/Sig over multiple WFOs</strong>: The given single
event is expanded in space to cover any coincident events for the given
single event.</li>
<li><strong>Same Phen/Sig over multiple WFOs</strong>: In this case, the
event id is used to expand the plot. This makes most sense for SVR + TOR
watches along with Tropical Alerts as the same event id is used over
multiple WFOs.</li>
</ul>
</p>
"""
now = utc()
desc["arguments"] = [
dict(
optional=True,
type="datetime",
name="valid",
default=now.strftime("%Y/%m/%d %H%M"),
label="UTC Timestamp (inclusive) to plot the given alert at:",
min="1986/01/01 0000",
),
dict(
type="networkselect",
name="wfo",
network="WFO",
default="DMX",
label="Select WFO:",
),
dict(type="year", min=1986, default=2019, name="year", label="Year"),
dict(
type="vtec_ps",
name="v",
default="SV.W",
label="VTEC Phenomena and Significance",
),
dict(
type="int",
default=1,
label="VTEC Event Identifier / Sequence Number",
name="etn",
),
dict(
type="select",
default="single",
name="opt",
options=PDICT,
label="Special Plot Options / Modes",
),
dict(
type="select",
options=PDICT3,
default="auto",
name="n",
label="Should a NEXRAD Mosaic be overlain:",
),
]
return desc
def plotter(fdict):
"""Go"""
pgconn = get_dbconn("postgis")
ctx = get_autoplot_context(fdict, get_description())
utcvalid = ctx.get("valid")
wfo = ctx["wfo"]
tzname = ctx["_nt"].sts[wfo]["tzname"]
p1 = ctx["phenomenav"][:2]
s1 = ctx["significancev"][:1]
etn = int(ctx["etn"])
year = int(ctx["year"])
df = read_postgis(
f"""
SELECT w.ugc, simple_geom, u.name,
issue at time zone 'UTC' as issue,
expire at time zone 'UTC' as expire,
init_expire at time zone 'UTC' as init_expire,
1 as val,
status, is_emergency, is_pds, w.wfo
from warnings_{year} w JOIN ugcs u on (w.gid = u.gid)
WHERE w.wfo = %s and eventid = %s and significance = %s and
phenomena = %s ORDER by issue ASC
""",
pgconn,
params=(wfo[-3:], etn, s1, p1),
index_col="ugc",
geom_col="simple_geom",
)
if df.empty:
raise NoDataFound("VTEC Event was not found, sorry.")
if ctx["opt"] == "expand":
# Get all phenomena coincident with the above alert
df = read_postgis(
f"""
SELECT w.ugc, simple_geom, u.name,
issue at time zone 'UTC' as issue,
expire at time zone 'UTC' as expire,
init_expire at time zone 'UTC' as init_expire,
1 as val,
status, is_emergency, is_pds, w.wfo
from warnings_{year} w JOIN ugcs u on (w.gid = u.gid)
WHERE significance = %s and
phenomena = %s and issue < %s and expire > %s
ORDER by issue ASC
""",
pgconn,
params=(s1, p1, df["expire"].min(), df["issue"].min()),
index_col="ugc",
geom_col="simple_geom",
)
elif ctx["opt"] == "etn":
# Get all phenomena coincident with the above alert
df = read_postgis(
f"""
SELECT w.ugc, simple_geom, u.name,
issue at time zone 'UTC' as issue,
expire at time zone 'UTC' as expire,
init_expire at time zone 'UTC' as init_expire,
1 as val,
status, is_emergency, is_pds, w.wfo
from warnings_{year} w JOIN ugcs u on (w.gid = u.gid)
WHERE significance = %s and
phenomena = %s and eventid = %s
ORDER by issue ASC
""",
pgconn,
params=(s1, p1, etn),
index_col="ugc",
geom_col="simple_geom",
)
sbwdf = read_postgis(
f"""
SELECT status, geom, wfo,
polygon_begin at time zone 'UTC' as polygon_begin,
polygon_end at time zone 'UTC' as polygon_end from sbw_{year}
WHERE wfo = %s and eventid = %s and significance = %s and
phenomena = %s ORDER by polygon_begin ASC
""",
pgconn,
params=(wfo[-3:], etn, s1, p1),
geom_col="geom",
)
if not sbwdf.empty and ctx["opt"] == "expand":
# Get all phenomena coincident with the above alert
sbwdf = read_postgis(
f"""
SELECT status, geom, wfo,
polygon_begin at time zone 'UTC' as polygon_begin,
polygon_end at time zone 'UTC' as polygon_end from sbw_{year}
WHERE status = 'NEW' and significance = %s and
phenomena = %s and issue < %s and expire > %s
ORDER by polygon_begin ASC
""",
pgconn,
params=(s1, p1, df["expire"].min(), df["issue"].min()),
geom_col="geom",
)
elif not sbwdf.empty and ctx["opt"] == "etn":
# Get all phenomena coincident with the above alert
sbwdf = read_postgis(
f"""
SELECT status, geom, wfo,
polygon_begin at time zone 'UTC' as polygon_begin,
polygon_end at time zone 'UTC' as polygon_end from sbw_{year}
WHERE status = 'NEW' and significance = %s and
phenomena = %s and eventid = %s
ORDER by polygon_begin ASC
""",
pgconn,
params=(s1, p1, etn),
geom_col="geom",
)
if utcvalid is None:
utcvalid = df["issue"].max()
else:
# hack for an assumption below
utcvalid = pd.Timestamp(utcvalid.replace(tzinfo=None))
def m(valid):
"""Convert to our local timestamp."""
return (
valid.tz_localize(pytz.UTC)
.astimezone(pytz.timezone(tzname))
.strftime(TFORMAT)
)
df["color"] = vtec.NWS_COLORS.get("%s.%s" % (p1, s1), "#FF0000")
if not sbwdf.empty:
df["color"] = "tan"
if len(df["wfo"].unique()) == 1:
bounds = df["simple_geom"].total_bounds
else:
df2 = df[~df["wfo"].isin(["AJK", "AFC", "AFG", "HFO", "JSJ"])]
bounds = df2["simple_geom"].total_bounds
buffer = 0.4
title = "%s %s %s%s %s (%s.%s) #%s" % (
year,
wfo,
vtec.VTEC_PHENOMENA.get(p1, p1),
" (PDS) " if True in df["is_pds"].values else "",
(
"Emergency"
if True in df["is_emergency"].values
else vtec.VTEC_SIGNIFICANCE.get(s1, s1)
),
p1,
s1,
etn,
)
if ctx["opt"] in ["expand", "etn"]:
title = (
f"{year} NWS {vtec.VTEC_PHENOMENA.get(p1, p1)} "
f"{vtec.VTEC_SIGNIFICANCE.get(s1, s1)} ({p1}.{s1})"
)
if ctx["opt"] == "etn":
title += f" #{etn}"
mp = MapPlot(
subtitle="Map Valid: %s, Event: %s to %s"
% (m(utcvalid), m(df["issue"].min()), m(df["expire"].max())),
title=title,
sector="custom",
west=bounds[0] - buffer,
south=bounds[1] - buffer,
east=bounds[2] + buffer,
north=bounds[3] + buffer,
nocaption=True,
twitter=True,
)
if len(df["wfo"].unique()) == 1 and wfo not in ["PHEB", "PAAQ"]:
mp.sector = "cwa"
mp.cwa = wfo[-3:]
# CAN statements come here with time == expire :/
if ctx["opt"] == "single":
df2 = df[(df["issue"] <= utcvalid) & (df["expire"] > utcvalid)]
else:
df2 = df
if df2.empty:
mp.ax.text(
0.5,
0.5,
"Event No Longer Active",
zorder=1000,
transform=mp.ax.transAxes,
fontsize=24,
bbox=dict(color="white"),
ha="center",
)
else:
mp.fill_ugcs(
df2["val"].to_dict(),
color=df2["color"].to_dict(),
nocbar=True,
labels=df2["name"].to_dict(),
missingval="",
ilabel=(len(df2.index) <= 10),
labelbuffer=5,
is_firewx=(p1 == "FW"),
)
if not sbwdf.empty:
color = vtec.NWS_COLORS.get("%s.%s" % (p1, s1), "#FF0000")
poly = sbwdf.iloc[0]["geom"]
df2 = sbwdf[
(sbwdf["polygon_begin"] <= utcvalid)
& (sbwdf["polygon_end"] > utcvalid)
]
if not df2.empty:
# draw new
mp.ax.add_geometries(
[poly],
ccrs.PlateCarree(),
facecolor="None",
edgecolor="k",
zorder=Z_OVERLAY2,
)
poly = df2.iloc[0]["geom"]
mp.ax.add_geometries(
[poly],
ccrs.PlateCarree(),
facecolor=color,
alpha=0.5,
edgecolor="k",
zorder=Z_OVERLAY2,
)
mp.drawcities(textsize=12, color="#fff", outlinecolor="#000")
mp.drawcounties()
if ctx["n"] != "off":
if (p1 in ["SV", "TO", "FF", "MA"] and s1 == "W") or ctx["n"] == "on":
radval = mp.overlay_nexrad(
utcvalid.to_pydatetime().replace(tzinfo=timezone.utc),
caxpos=(0.02, 0.07, 0.3, 0.005),
)
if radval is not None:
tstamp = radval.astimezone(pytz.timezone(tzname)).strftime(
"%-I:%M %p"
)
mp.ax.text(
0.01,
0.99,
f"NEXRAD: {tstamp}",
transform=mp.ax.transAxes,
bbox=dict(color="white"),
va="top",
zorder=Z_OVERLAY2_LABEL + 100,
)
return mp.fig, df.drop("simple_geom", axis=1)
if __name__ == "__main__":
plotter(
dict(
phenomenav="FF",
significancev="A",
wfo="MOB",
year=2021,
etn=5,
valid="2021-04-17 0000",
)
)
| {
"repo_name": "akrherz/iem",
"path": "htdocs/plotting/auto/scripts200/p208.py",
"copies": "1",
"size": "11996",
"license": "mit",
"hash": 5809693833892676000,
"line_mean": 31.8657534247,
"line_max": 79,
"alpha_frac": 0.5063354451,
"autogenerated": false,
"ratio": 3.5127379209370426,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.45190733660370425,
"avg_score": null,
"num_lines": null
} |
""" A fantastic python code to determine the quenched SFH parameters of galaxies using emcee (http://dan.iel.fm/emcee/current/). This file contains all the functions needed to determine the mean SFH parameters of a population.
N.B. The data files .ised_ASCII contain the extracted bc03 models and have a 0 in the origin at [0,0]. The first row contains the model ages (from the second column) - data[0,1:]. The first column contains the model lambda values (from the second row) - data[1:,0]. The remaining data[1:,1:] are the flux values at each of the ages (columns, x) and lambda (rows, y) values
"""
import numpy as N
import scipy as S
import pylab as P
import pyfits as F
from scipy.io.idl import readsav
import pyfits as F
import emcee
import triangle
import time
import os
import matplotlib.image as mpimg
from astropy.cosmology import FlatLambdaCDM
from scipy.stats import kde
from scipy.interpolate import LinearNDInterpolator
from scipy.interpolate import interp2d
from itertools import product
import sys
cosmo = FlatLambdaCDM(H0 = 71.0, Om0 = 0.26)
font = {'family':'serif', 'size':16}
P.rc('font', **font)
P.rc('xtick', labelsize='medium')
P.rc('ytick', labelsize='medium')
P.rc('axes', labelsize='medium')
method = raw_input('Do you wish to use a look-up table? (yes/no) :')
if method == 'yes' or method =='y':
prov = raw_input('Do you wish to use the provided u-r and NUV-u look up tables? (yes/no) :')
if prov == 'yes' or prov =='y':
print 'gridding...'
tq = N.linspace(0.003, 13.8, 100)
tau = N.linspace(0.003, 4, 100)
ages = N.linspace(10.88861228, 13.67023409, 50)
grid = N.array(list(product(ages, tau, tq)))
print 'loading...'
nuv_pred = N.load('nuv_look_up_ssfr.npy')
ur_pred = N.load('ur_look_up_ssfr.npy')
lu = N.append(nuv_pred.reshape(-1,1), ur_pred.reshape(-1,1), axis=1)
elif prov=='no' or prov=='n':
col1 = str(raw_input('Location of your NUV-u colour look up table :'))
col2 = str(raw_input('Location of your u-r colour look up table :'))
one = N.array(input('Define first axis values (ages) of look up table start, stop, len(axis1); e.g. 10, 13.8, 50 :'))
ages = N.linspace(float(one[0]), float(one[1]), float(one[2]))
two = N.array(input('Define second axis values (tau) of look up table start, stop, len(axis1); e.g. 0, 4, 100 : '))
tau = N.linspace(float(two[0]), float(two[1]), float(two[2]))
three = N.array(input('Define third axis values (tq) of look up table start, stop, len(axis1); e.g. 0, 13.8, 100 : '))
tq = N.linspace(float(three[0]), float(three[1]), float(three[2]))
grid = N.array(list(product(ages, tau, tq)))
print 'loading...'
nuv_pred = N.load(col1)
ur_pred = N.load(col2)
lu = N.append(nuv_pred.reshape(-1,1), ur_pred.reshape(-1,1), axis=1)
else:
sys.exit("You didn't give a valid answer (yes/no). Try running again.")
def lnlike_one(theta, ur, sigma_ur, nuvu, sigma_nuvu, age):
""" Function for determining the likelihood of ONE quenching model described by theta = [tq, tau] for all the galaxies in the sample. Simple chi squared likelihood between predicted and observed colours of the galaxies.
:theta:
An array of size (1,2) containing the values [tq, tau] in Gyr.
:tq:
The time at which the onset of quenching begins in Gyr. Allowed ranges from the beginning to the end of known cosmic time.
:tau:
The exponential timescale decay rate of the star formation history in Gyr. Allowed range from the rest of the functions is 0 < tau [Gyr] < 5.
:ur:
Observed u-r colour of a galaxy; k-corrected.
:sigma_ur:
Error on the observed u-r colour of a galaxy
:nuvu:
Observed nuv-u colour of a galaxy; k-corrected.
:sigma_nuvu:
Error on the observed nuv-u colour of a galaxy
:age:
Observed age of a galaxy, often calculated from the redshift i.e. at z=0.1 the age ~ 12.5. Must be in units of Gyr.
RETURNS:
Array of same shape as :age: containing the likelihood for each galaxy at the given :theta:
"""
tq, tau = theta
pred_nuvu, pred_ur = lookup_col_one(theta, age)
return -0.5*N.log(2*N.pi*sigma_ur**2)-0.5*((ur-pred_ur)**2/sigma_ur**2)-0.5*N.log10(2*N.pi*sigma_nuvu**2)-0.5*((nuvu-pred_nuvu)**2/sigma_nuvu**2)
elif method == 'no' or method =='n':
"""We first define the directory in which we will find the BC03 model, extracted from the original files downloaded from the BC03 website into a usable format. Here we implement a solar metallicity model with a Chabrier IMF."""
model = str(raw_input('Location of the extracted (.ised_ASCII) SPS model to use to predict the u-r and NUV-u colours, e.g. ~/extracted_bc2003_lr_m62_chab_ssp.ised_ASCII :'))
data = N.loadtxt(model)
import fluxes
def lnlike_one(theta, ur, sigma_ur, nuvu, sigma_nuvu, age):
""" Function for determining the likelihood of ONE quenching model described by theta = [tq, tau] for all the galaxies in the sample. Simple chi squared likelihood between predicted and observed colours of the galaxies.
:theta:
An array of size (1,2) containing the values [tq, tau] in Gyr.
:tq:
The time at which the onset of quenching begins in Gyr. Allowed ranges from the beginning to the end of known cosmic time.
:tau:
The exponential timescale decay rate of the star formation history in Gyr. Allowed range from the rest of the functions is 0 < tau [Gyr] < 5.
:ur:
Observed u-r colour of a galaxy; k-corrected.
:sigma_ur:
Error on the observed u-r colour of a galaxy
:nuvu:
Observed nuv-u colour of a galaxy; k-corrected.
:sigma_nuvu:
Error on the observed nuv-u colour of a galaxy
:age:
Observed age of a galaxy, often calculated from the redshift i.e. at z=0.1 the age ~ 12.5. Must be in units of Gyr.
RETURNS:
Array of same shape as :age: containing the likelihood for each galaxy at the given :theta:
"""
tq, tau = theta
pred_nuvu, pred_ur = predict_c_one(theta, age)
return -0.5*N.log(2*N.pi*sigma_ur**2)-0.5*((ur-pred_ur)**2/sigma_ur**2)-0.5*N.log10(2*N.pi*sigma_nuvu**2)-0.5*((nuvu-pred_nuvu)**2/sigma_nuvu**2)
else:
sys.exit("You didn't give a valid answer (yes/no). Try running again.")
n=0
def expsfh(tq, tau, time):
""" This function when given a single combination of [tq, tau] values will calcualte the SFR at all times. First calculate the sSFR at all times as defined by Peng et al. (2010) - then the SFR at the specified time of quenching, tq and set the SFR at this value at all times before tq. Beyond this time the SFR is an exponentially declining function with timescale tau.
INPUT:
:tau:
The exponential timescale decay rate of the star formation history in Gyr. Allowed range from the rest of the functions is 0 < tau [Gyr] < 5.
:tq:
The time at which the onset of quenching begins in Gyr. Allowed ranges from the beginning to the end of known cosmic time.
:time:
An array of time values at which the SFR is calcualted at each step.
RETURNS:
:sfr:
Array of the same dimensions of time containing the sfr at each timestep.
"""
ssfr = 2.5*(((10**10.27)/1E10)**(-0.1))*(time/3.5)**(-2.2)
c = time.searchsorted(3.0)
ssfr[:c] = N.interp(3.0, time, ssfr)
c_sfr = N.interp(tq, time, ssfr)*(1E10)/(1E9)
### definition is for 10^10 M_solar galaxies and per gyr - convert to M_solar/year ###
a = time.searchsorted(tq)
sfr = N.ones(len(time))*c_sfr
sfr[a:] = c_sfr*N.exp(-(time[a:]-tq)/tau)
return sfr
def expsfh_mass(ur, Mr, age, tq, tau, time):
"""Calculate exponential decline star formation rates at each time step input by matching to the mass of the observed galaxy at the observed time. This is calculated from the mass-to-light ratio that is a function of one color band u-r as in Bladry et al. (2006; see Figure 5) who fit to data from Glazebrrok et al (2004) and Kauffmann et al (2003).
INPUT:
:ur:
u-r optical colour, needed to calculate the mass of the observed galaxy
:Mr:
Absolute r-band magnitude, needed to calculate the mass of the observed galaxy
:age:
Observed age of a galaxy, often calculated from the redshift i.e. at z=0.1 the age ~ 12.5. Must be in units of Gyr.
:tq:
The time at which the onset of quenching begins in Gyr. Allowed ranges from the beginning to the end of known cosmic time.
:tau:
The exponential timescale decay rate of the star formation history in Gyr. Allowed range from the rest of the functions is 0 < tau [Gyr] < 5.
:time:
An array of time values at which the SFR is calcualted at each step.
RETURNS:
:sfr:
Array of the same dimensions of time containing the sfr at each timestep.
"""
t_end = age # time at which to integrate under the exponential curve until to gain the final mass
if ur <=2.1:
log_m_l = -0.95 + 0.56 * ur
else:
log_m_l = -0.16 + 0.18 * ur
m_msun = 10**(((4.62 - Mr)/2.5) + log_m_l)
print 'Mass [M_solar]', m_msun
c_sfr = (m_msun/(tq + tau*(1 - N.exp((tq - t_end)/tau)))) / 1E9
a = time.searchsorted(tq)
sfr = N.ones(len(time))*c_sfr
sfr[a:] = c_sfr*N.exp(-(time[a:]-tq)/tau)
return sfr
def predict_c_one(theta, age):
""" This function predicts the u-r and nuv-u colours of a galaxy with a SFH defined by [tq, tau], according to the BC03 model at a given "age" i.e. observation time. It calculates the colours at all times then interpolates for the observed age - it has to this in order to work out the cumulative mass across the SFH to determine how much each population of stars contributes to the flux at each time step.
:theta:
An array of size (1,2) containing the values [tq, tau] in Gyr.
:tq:
The time at which the onset of quenching begins in Gyr. Allowed ranges from the beginning to the end of known cosmic time.
:tau:
The exponential timescale decay rate of the star formation history in Gyr. Allowed range from the rest of the functions is 0 < tau [Gyr] < 5.
:age:
Observed age of a galaxy, often calculated from the redshift i.e. at z=0.1 the age ~ 12.5. Must be in units of Gyr.
RETURNS:
:nuv_u_age:
Array the same shape as :age: with the nuv-u colour values at each given age for the specified :theta: values
:u_r_age:
Array the same shape as :age: with the u-r colour values at each given age for the specified :theta: values
"""
ti = N.arange(0, 0.01, 0.003)
t = N.linspace(0,14.0,100)
t = N.append(ti, t[1:])
tq, tau = theta
sfr = expsfh(tq, tau, t)
### Work out total flux at each time given the sfh model of tau and tq (calls fluxes function) ###
total_flux = fluxes.assign_total_flux(data[0,1:], data[1:,0], data[1:,1:], t*1E9, sfr)
### Calculate fluxes from the flux at all times then interpolate to get one colour for the age you are observing the galaxy at - if many galaxies are being observed, this also works with an array of ages to give back an array of colours ###
nuv_u, u_r = get_colours(t*1E9, total_flux, data)
nuv_u_age = N.interp(age, t, nuv_u)
u_r_age = N.interp(age, t, u_r)
return nuv_u_age, u_r_age
def get_colours(time, flux, data):
"""" Calculates the colours of a given sfh fluxes across time given the BC03 models from the magnitudes of the SED.
:time:
Array of times at which the colours should be calculated. In units of Gyrs.
:flux:
SED of fluxes describing the calcualted SFH. Returned from the assign_total_flux function in fluxes.py
:data:
BC03 model values for wavelengths, time steps and fluxes. The wavelengths are needed to calculate the magnitudes.
RETURNS:
:nuv_u: :u_r:
Arrays the same shape as :time: with the predicted nuv-u and u-r colours
"""
nuvmag = fluxes.calculate_AB_mag(time, data[1:,0], flux, nuvwave, nuvtrans)
umag = fluxes.calculate_AB_mag(time, data[1:,0], flux, uwave, utrans)
rmag = fluxes.calculate_AB_mag(time, data[1:,0], flux, rwave, rtrans)
nuv_u = nuvmag - umag
u_r = umag - rmag
return nuv_u, u_r
def lookup_col_one(theta, age):
ur_pred = u(theta[0], theta[1])
nuv_pred = v(theta[0], theta[1])
return nuv_pred, ur_pred
# Prior likelihood on theta values given the inital w values assumed for the mean and stdev
def lnprior(theta):
""" Function to calcualted the prior likelihood on theta values given the inital w values assumed for the mean and standard deviation of the tq and tau parameters. Defined ranges are specified - outside these ranges the function returns -N.inf and does not calculate the posterior probability.
:theta:
An array of size (1,4) containing the values [tq, tau] for both smooth and disc galaxies in Gyr.
:tq:
The time at which the onset of quenching begins in Gyr. Allowed ranges from the beginning to the end of known cosmic time. Can be either for smooth or disc galaxies.
:tau:
The exponential timescale decay rate of the star formation history in Gyr. Allowed range from the rest of the functions is 0 < tau [Gyr] < 5. Can be either for smooth or disc galaxies.
RETURNS:
Value of the prior at the specified :theta: value.
"""
tq, tau = theta
if 0.003 <= tq <= 13.807108309208775 and 0.003 <= tau <= 4.0:
return 0.0
else:
return -N.inf
# Overall likelihood function combining prior and model
def lnprob(theta, ur, sigma_ur, nuvu, sigma_nuvu, age):
"""Overall posterior function combiningin the prior and calculating the likelihood. Also prints out the progress through the code with the use of n.
:theta:
An array of size (1,4) containing the values [tq, tau] for both smooth and disc galaxies in Gyr.
:tq:
The time at which the onset of quenching begins in Gyr. Allowed ranges from the beginning to the end of known cosmic time. Can be either for smooth or disc galaxies.
:tau:
The exponential timescale decay rate of the star formation history in Gyr. Allowed range from the rest of the functions is 0 < tau [Gyr] < 5. Can be either for smooth or disc galaxies.
:ur:
Observed u-r colour of a galaxy; k-corrected. An array of shape (N,1) or (N,).
:sigma_ur:
Error on the observed u-r colour of a galaxy. An array of shape (N,1) or (N,).
:nuvu:
Observed nuv-u colour of a galaxy; k-corrected. An array of shape (N,1) or (N,).
:sigma_nuvu:
Error on the observed nuv-u colour of a galaxy. An array of shape (N,1) or (N,).
:age:
Observed age of a galaxy, often calculated from the redshift i.e. at z=0.1 the age ~ 12.5. Must be in units of Gyr. An array of shape (N,1) or (N,).
RETURNS:
Value of the posterior function for the given :theta: value.
"""
global n
n+=1
if n %100 == 0:
print 'step number', n/100
lp = lnprior(theta)
if not N.isfinite(lp):
return -N.inf
return lp + lnlike_one(theta, ur, sigma_ur, nuvu, sigma_nuvu, age)
def sample(ndim, nwalkers, nsteps, burnin, start, ur, sigma_ur, nuvu, sigma_nuvu, age, id, ra, dec):
""" Function to implement the emcee EnsembleSampler function for the sample of galaxies input. Burn in is run and calcualted fir the length specified before the sampler is reset and then run for the length of steps specified.
:ndim:
The number of parameters in the model that emcee must find. In this case it always 2 with tq, tau.
:nwalkers:
The number of walkers that step around the parameter space. Must be an even integer number larger than ndim.
:nsteps:
The number of steps to take in the final run of the MCMC sampler. Integer.
:burnin:
The number of steps to take in the inital burn-in run of the MCMC sampler. Integer.
:start:
The positions in the tq and tau parameter space to start for both disc and smooth parameters. An array of shape (1,4).
:ur:
Observed u-r colour of a galaxy; k-corrected. An array of shape (N,1) or (N,).
:sigma_ur:
Error on the observed u-r colour of a galaxy. An array of shape (N,1) or (N,).
:nuvu:
Observed nuv-u colour of a galaxy; k-corrected. An array of shape (N,1) or (N,).
:sigma_nuvu:
Error on the observed nuv-u colour of a galaxy. An array of shape (N,1) or (N,).
:age:
Observed age of a galaxy, often calculated from the redshift i.e. at z=0.1 the age ~ 12.5. Must be in units of Gyr. An array of shape (N,1) or (N,).
:id:
ID number to specify which galaxy this run is for.
:ra:
right ascension of source, used for identification purposes
:dec:
declination of source, used for identification purposes
RETURNS:
:samples:
Array of shape (nsteps*nwalkers, 4) containing the positions of the walkers at all steps for all 4 parameters.
:samples_save:
Location at which the :samples: array was saved to.
"""
if method == 'yes' or method=='y':
global u
global v
a = N.searchsorted(ages, age)
b = N.array([a-1, a])
print 'interpolating function, bear with...'
g = grid[N.where(N.logical_or(grid[:,0]==ages[b[0]], grid[:,0]==ages[b[1]]))]
values = lu[N.where(N.logical_or(grid[:,0]==ages[b[0]], grid[:,0]==ages[b[1]]))]
f = LinearNDInterpolator(g, values, fill_value=(-N.inf))
look = f(age, grid[:10000, 1], grid[:10000, 2])
lunuv = look[:,0].reshape(100,100)
v = interp2d(tq, tau, lunuv)
luur = look[:,1].reshape(100,100)
u = interp2d(tq, tau, luur)
else:
pass
print 'emcee running...'
p0 = [start + 1e-4*N.random.randn(ndim) for i in range(nwalkers)]
sampler = emcee.EnsembleSampler(nwalkers, ndim, lnprob, threads=2, args=(ur, sigma_ur, nuvu, sigma_nuvu, age))
""" Burn in run here..."""
pos, prob, state = sampler.run_mcmc(p0, burnin)
lnp = sampler.flatlnprobability
N.save('lnprob_burnin_'+str(int(id))+'_'+str(ra)+'_'+str(dec)+'_'+str(time.strftime('%H_%M_%d_%m_%y'))+'.npy', lnp)
samples = sampler.chain[:,:,:].reshape((-1,ndim))
samples_save = 'samples_burn_in_'+str(int(id))+'_'+str(ra)+'_'+str(dec)+'_'+str(time.strftime('%H_%M_%d_%m_%y'))+'.npy'
N.save(samples_save, samples)
sampler.reset()
print 'Burn in complete...'
""" Main sampler run here..."""
sampler.run_mcmc(pos, nsteps)
lnpr = sampler.flatlnprobability
N.save('lnprob_run_'+str(int(id))+'_'+str(ra)+'_'+str(dec)+'_'+str(time.strftime('%H_%M_%d_%m_%y'))+'.npy', lnpr)
samples = sampler.chain[:,:,:].reshape((-1,ndim))
samples_save = 'samples_'+str(int(id))+'_'+str(ra)+'_'+str(dec)+'_'+str(time.strftime('%H_%M_%d_%m_%y'))+'.npy'
N.save(samples_save, samples)
print 'Main emcee run completed.'
return samples, samples_save
#Define function to plot the walker positions as a function of the step
def walker_plot(samples, nwalkers, limit, id):
""" Plotting function to visualise the steps of the walkers in each parameter dimension for smooth and disc theta values.
:samples:
Array of shape (nsteps*nwalkers, 4) produced by the emcee EnsembleSampler in the sample function.
:nwalkers:
The number of walkers that step around the parameter space used to produce the samples by the sample function. Must be an even integer number larger than ndim.
:limit:
Integer value less than nsteps to plot the walker steps to.
:id:
ID number to specify which galaxy this plot is for.
RETURNS:
:fig:
The figure object
"""
s = samples.reshape(nwalkers, -1, 2)
s = s[:,:limit, :]
fig = P.figure(figsize=(8,5))
ax1 = P.subplot(2,1,1)
ax2 = P.subplot(2,1,2)
for n in range(len(s)):
ax1.plot(s[n,:,0], 'k')
ax2.plot(s[n,:,1], 'k')
ax1.tick_params(axis='x', labelbottom='off')
ax2.set_xlabel(r'step number')
ax1.set_ylabel(r'$t_{quench}$')
ax2.set_ylabel(r'$\tau$')
P.subplots_adjust(hspace=0.1)
save_fig = 'walkers_steps_'+str(int(id))+'_'+str(time.strftime('%H_%M_%d_%m_%y'))+'.pdf'
fig.savefig(save_fig)
return fig
def corner_plot(s, labels, extents, bf, id):
""" Plotting function to visualise the gaussian peaks found by the sampler function. 2D contour plots of tq against tau are plotted along with kernelly smooth histograms for each parameter.
:s:
Array of shape (#, 2) for either the smooth or disc produced by the emcee EnsembleSampler in the sample function of length determined by the number of walkers which resulted at the specified peak.
:labels:
List of x and y axes labels i.e. disc or smooth parameters
:extents:
Range over which to plot the samples, list shape [[xmin, xmax], [ymin, ymax]]
:bf:
Best fit values for the distribution peaks in both tq and tau found from mapping the samples. List shape [(tq, poserrtq, negerrtq), (tau, poserrtau, negerrtau)]
:id:
ID number to specify which galaxy this plot is for.
RETURNS:
:fig:
The figure object
"""
x, y = s[:,0], s[:,1]
fig = P.figure(figsize=(6.25,6.25))
ax2 = P.subplot2grid((3,3), (1,0), colspan=2, rowspan=2)
ax2.set_xlabel(labels[0])
ax2.set_ylabel(labels[1])
triangle.hist2d(x, y, ax=ax2, bins=100, extent=extents, plot_contours=True)
ax2.axvline(x=bf[0][0], linewidth=1)
ax2.axhline(y=bf[1][0], linewidth=1)
[l.set_rotation(45) for l in ax2.get_xticklabels()]
[j.set_rotation(45) for j in ax2.get_yticklabels()]
ax2.tick_params(axis='x', labeltop='off')
ax1 = P.subplot2grid((3,3), (0,0),colspan=2)
den = kde.gaussian_kde(x[N.logical_and(x>=extents[0][0], x<=extents[0][1])])
pos = N.linspace(extents[0][0], extents[0][1], 750)
ax1.plot(pos, den(pos), 'k-', linewidth=1)
ax1.axvline(x=bf[0][0], linewidth=1)
ax1.axvline(x=bf[0][0]+bf[0][1], c='b', linestyle='--')
ax1.axvline(x=bf[0][0]-bf[0][2], c='b', linestyle='--')
ax1.set_xlim(extents[0][0], extents[0][1])
ax12 = ax1.twiny()
ax12.set_xlim(extents[0][0], extents[0][1])
ax12.set_xticks(N.array([1.87, 3.40, 6.03, 8.77, 10.9, 12.5]))
ax12.set_xticklabels(N.array([3.5, 2.0 , 1.0, 0.5, 0.25, 0.1]))
[l.set_rotation(45) for l in ax12.get_xticklabels()]
ax12.tick_params(axis='x', labelbottom='off')
ax12.set_xlabel(r'$z$')
ax1.tick_params(axis='x', labelbottom='off', labeltop='off')
ax1.tick_params(axis='y', labelleft='off')
ax3 = P.subplot2grid((3,3), (1,2), rowspan=2)
ax3.tick_params(axis='x', labelbottom='off')
ax3.tick_params(axis='y', labelleft='off')
den = kde.gaussian_kde(y[N.logical_and(y>=extents[1][0], y<=extents[1][1])])
pos = N.linspace(extents[1][0], extents[1][1], 750)
ax3.plot(den(pos), pos, 'k-', linewidth=1)
ax3.axhline(y=bf[1][0], linewidth=1)
ax3.axhline(y=bf[1][0]+bf[1][1], c='b', linestyle='--')
ax3.axhline(y=bf[1][0]-bf[1][2], c='b', linestyle='--')
ax3.set_ylim(extents[1][0], extents[1][1])
if os.path.exists(str(int(id))+'.jpeg') == True:
ax4 = P.subplot2grid((3,3), (0,2), rowspan=1, colspan=1)
img = mpimg.imread(str(int(id))+'.jpeg')
ax4.imshow(img)
ax4.tick_params(axis='x', labelbottom='off', labeltop='off')
ax4.tick_params(axis='y', labelleft='off', labelright='off')
P.tight_layout()
P.subplots_adjust(wspace=0.0)
P.subplots_adjust(hspace=0.0)
return fig
""" Load the magnitude bandpass filters using idl save """
filters = readsav('ugriz.sav')
fuvwave= filters.ugriz.fuvwave[0]
fuvtrans = filters.ugriz.fuvtrans[0]
nuvwave= filters.ugriz.nuvwave[0]
nuvtrans = filters.ugriz.nuvtrans[0]
uwave= filters.ugriz.uwave[0]
utrans = filters.ugriz.utrans[0]
gwave= filters.ugriz.gwave[0]
gtrans = filters.ugriz.gtrans[0]
rwave= filters.ugriz.rwave[0]
rtrans = filters.ugriz.rtrans[0]
iwave= filters.ugriz.iwave[0]
itrans = filters.ugriz.itrans[0]
zwave= filters.ugriz.zwave[0]
ztrans = filters.ugriz.ztrans[0]
vwave= filters.ugriz.vwave[0]
vtrans = filters.ugriz.vtrans[0]
jwave= filters.ugriz.jwave[0]
jtrans = filters.ugriz.jtrans[0]
hwave= filters.ugriz.hwave[0]
htrans = filters.ugriz.htrans[0]
kwave= filters.ugriz.kwave[0]
ktrans = filters.ugriz.ktrans[0]
| {
"repo_name": "zooniverse/starpy",
"path": "posterior.py",
"copies": "2",
"size": "25744",
"license": "apache-2.0",
"hash": -7317574959128430000,
"line_mean": 45.3855855856,
"line_max": 411,
"alpha_frac": 0.6239123679,
"autogenerated": false,
"ratio": 3.218402300287536,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9723668135992038,
"avg_score": 0.023729306439099693,
"num_lines": 555
} |
"""A fastener class for bolted joint calculations."""
import math
import numpy as np
class Fastener:
"""Fastener class containing an ID, location in space, and load vector."""
def __init__(self,
ID,
xyz,
diameter,
E,
G,
length,
wt=1,
axis=[0, 0, 0]):
"""Initialize the instance."""
self.ID = ID
self.xyz = xyz
self.diameter = diameter
self.E = E
self.G = G
self.length = length
self.force = [0, 0, 0]
self.wt = wt
self.axis = np.array(axis) / np.linalg.norm(axis) # unit vector
def __repr__(self):
"""Return the "official" Fastener string representation."""
return "Fastener ID: %s\nLocation: %s\nForce: %s\nDiameter: %s" \
% (self.ID,
self.xyz,
self.force,
self.diameter)
@property
def area(self):
"""Cross-sectional area of the fastener."""
return (self.diameter ** 2) * math.pi / 4
@area.setter
def area(self, a):
"""Determine the diameter if the area is set manually."""
self.diameter = math.sqrt(4 * a / math.pi)
@property
def stiffness(self):
"""Return stiffness of the fastener in each direction.
The stiffness is calculated based on the diameter and material
moduli of the fastener. The x-direction is the shaft axis.
Note: this does not account for total behavior of the joints.
"""
return {'x': self.E * self.area / self.length,
'y': self.G * self.area / self.length,
'z': self.G * self.area / self.length}
@property
def compliance(self):
"""Return the compliance of the fastener in each direction.
The compliance in each direction is the inverse of the stiffness.
"""
return {'x': self.length / (self.E * self.area),
'y': self.length / (self.G * self.area),
'z': self.length / (self.G * self.area)}
| {
"repo_name": "sharkweek/mechpy",
"path": "mechpy/bolted_joints/fastener.py",
"copies": "1",
"size": "2150",
"license": "mit",
"hash": 6927011280653962000,
"line_mean": 27.6666666667,
"line_max": 78,
"alpha_frac": 0.5237209302,
"autogenerated": false,
"ratio": 3.866906474820144,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9890627405020144,
"avg_score": 0,
"num_lines": 75
} |
'''A faster implementation than DendroPy.
These methods do not deal with missing or uncertain sequence data.
It is up to the user to filter out uncertain base calls, otherwise methods
will revert to Dendropy functions (which are not guaranteed to be correct for
uncertain base calls).
'''
import numpy as np
import math
from collections import Counter
import operator as op
from functools import reduce
def n_choose_k(n, k):
"Binomial Coefficient."
k = min(k, n-k)
if k < 0:
return 0
numer = reduce(op.mul, range(n, n-k, -1), 1)
denom = reduce(op.mul, range(1, k+1), 1)
return numer // denom
# Scipy may not be installed. If not, use our own combinatorial function.
# Scipy implementation is slightly faster.
try:
from scipy.special import comb
except ImportError:
comb = n_choose_k
def count_differences(seqs):
'''Calculate the average number of pairwise differences.
Rather than iterate over all possible pairs of sequences,
calculates the binomial coefficient for each sequence letter.
The number of differences is the total number of possible comparisons
minus the number of comparisons where any given nucleotide letter is
compared to the same letter.
Args:
seqs (list): A list of nucleotide sequences.
Returns:
float, int: Average number of pairwise difference, number of
segregating sites.
'''
total_sum = 0
depth = len(seqs)
num_seg_sites = 0
for pos in range(len(seqs[0])):
base_at_pos = [seq[pos] for seq in seqs]
count_ = Counter(base_at_pos)
num_pairwise_differences = comb(depth, 2) - np.sum([comb(count_[base], 2)
for base in count_])
total_sum += num_pairwise_differences
if len(count_) > 1:
num_seg_sites += 1
avg_pairwise_diff = total_sum / comb(depth, 2)
return avg_pairwise_diff, num_seg_sites
def _tajimas_d(num_seqs, avg_pairwise_diffs, num_seg_sites):
'''Tajima's D formula.
Args:
num_seqs (int): The number of sequences.
avg_pairwise_diffs (float): The average number of pairwise differences.
Returns:
float: Tajima's D value.
'''
a1 = np.sum(np.reciprocal(np.arange(1, num_seqs, dtype='float64')))
a2 = np.sum(np.reciprocal(np.square(np.arange(1, num_seqs, dtype='float64'))))
b1 = float(num_seqs + 1) / (3 * (num_seqs - 1))
b2 = float(2 * ((num_seqs**2) + num_seqs + 3 )) / (9*num_seqs*(num_seqs-1))
c1 = b1 - 1.0 / a1
c2 = b2 - float(num_seqs+2)/(a1 * num_seqs) + float(a2)/(a1 ** 2)
e1 = float(c1) / a1
e2 = float(c2) / ( (a1**2) + a2 )
d = (
float(avg_pairwise_diffs - (float(num_seg_sites)/a1))
/ math.sqrt(
(e1 * num_seg_sites )
+ ((e2 * num_seg_sites) * (num_seg_sites - 1) ))
)
return d
def calculate_nucleotide_diversity(seqs):
avg_pairwise_diff, _num_seg_sites = count_differences(seqs)
num_sites = len(seqs[0])
return avg_pairwise_diff / num_sites
def calculate_tajimas_d(seqs):
num_seqs = len(seqs)
avg_pairwise_diff, num_seg_sites = count_differences(seqs)
return _tajimas_d(num_seqs, avg_pairwise_diff, num_seg_sites)
def calculate_wattersons_theta(seqs):
num_seqs = len(seqs)
_avg_pairwise_diff, num_seg_sites = count_differences(seqs)
a1 = np.sum(np.reciprocal(np.arange(1, num_seqs, dtype='float64')))
return float(num_seg_sites) / a1
| {
"repo_name": "andrewguy/biostructmap",
"path": "biostructmap/population_stats.py",
"copies": "1",
"size": "3497",
"license": "mit",
"hash": 1240266993093476400,
"line_mean": 33.2843137255,
"line_max": 82,
"alpha_frac": 0.6354017729,
"autogenerated": false,
"ratio": 3.2590866728797763,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4394488445779776,
"avg_score": null,
"num_lines": null
} |
""" A fast implementation of the Voigt function.
"""
# p2.6+ compatibility
from __future__ import division, print_function, unicode_literals
import numpy as np
from math import sqrt, pi
sqrtpi = sqrt(pi)
# u values corresponding to H table values below
U = np.arange(0, 20, 0.01)
# Table of h1 and h3 for use in Voigt function
H1 = np.array(
[-1.128379167096,-1.128153506307,-1.127476704444,-1.126349302806,
-1.124772202831,-1.122746665023,-1.120274307436,-1.117357103734,
-1.113997380833,-1.110197816115,-1.105961434223,-1.101291603457,
-1.096192031753,-1.090666762269,-1.084720168573,-1.078356949458,
-1.071582123367,-1.064401022458,-1.056819286313,-1.048842855295,
-1.040477963566,-1.031731131783,-1.022609159473,-1.013119117112,
-1.003268337903,-0.993064409287,-0.982515164176,-0.971628671947,
-0.960413229188,-0.948877350223,-0.937029757433,-0.924879371370,
-0.912435300708,-0.899706832010,-0.886703419362,-0.873434673861,
-0.859910352995,-0.846140349907,-0.832134682585,-0.817903482971,
-0.803456986018,-0.788805518705,-0.773959489031,-0.758929374994,
-0.743725713581,-0.728359089780,-0.712840125622,-0.697179469285,
-0.681387784257,-0.665475738580,-0.649453994194,-0.633333196387,
-0.617123963365,-0.600836875965,-0.584482467511,-0.568071213835,
-0.551613523468,-0.535119728024,-0.518600072770,-0.502064707417,
-0.485523677115,-0.468986913693,-0.452464227121,-0.435965297228,
-0.419499665677,-0.403076728196,-0.386705727090,-0.370395744018,
-0.354155693073,-0.337994314132,-0.321920166521,-0.305941622964,
-0.290066863852,-0.274303871809,-0.258660426580,-0.243144100224,
-0.227762252632,-0.212522027360,-0.197430347784,-0.182493913569,
-0.167719197466,-0.153112442425,-0.138679659026,-0.124426623230,
-0.110358874442,-0.096481713891,-0.082800203315,-0.069319163960,
-0.056043175874,-0.042976577504,-0.030123465589,-0.017487695339,
-0.005072880900, 0.007117603905, 0.019080624559, 0.030813284570,
0.042312923934, 0.053577117337, 0.064603672120, 0.075390626005,
0.085936244587, 0.096239018606, 0.106297661007, 0.116111103790,
0.125678494668, 0.134999193535, 0.144072768747, 0.152898993242,
0.161477840493, 0.169809480309, 0.177894274488, 0.185732772342,
0.193325706088, 0.200673986130, 0.207778696230, 0.214641088574,
0.221262578761, 0.227644740700, 0.233789301442, 0.239698135944,
0.245373261784, 0.250816833826, 0.256031138848, 0.261018590143,
0.265781722099, 0.270323184764, 0.274645738407, 0.278752248083,
0.282645678207, 0.286329087142, 0.289805621818, 0.293078512380,
0.296151066866, 0.299026665943, 0.301708757680, 0.304200852389,
0.306506517515, 0.308629372606, 0.310573084348, 0.312341361679,
0.313937950984, 0.315366631379, 0.316631210086, 0.317735517896,
0.318683404738, 0.319478735348, 0.320125385037, 0.320627235572,
0.320988171168, 0.321212074587, 0.321302823355, 0.321264286099,
0.321100318992, 0.320814762332, 0.320411437226, 0.319894142361,
0.319266651125, 0.318532708393, 0.317696027886, 0.316760289435,
0.315729136408, 0.314606173263, 0.313394963225, 0.312099026077,
0.310721836083, 0.309266820022, 0.307737355347, 0.306136768458,
0.304468333094, 0.302735268836, 0.300940739729, 0.299087853007,
0.297179657938, 0.295219144761, 0.293209243745, 0.291152824341,
0.289052694434, 0.286911599696, 0.284732223035, 0.282517184136,
0.280269039094, 0.277990280130, 0.275683335398, 0.273350568872,
0.270994280310, 0.268616705302, 0.266220015385, 0.263806318229,
0.261377657901, 0.258936015180, 0.256483307947, 0.254021391629,
0.251552059701, 0.249077044244, 0.246598016547, 0.244116587773,
0.241634309652, 0.239152675232, 0.236673119663, 0.234197021025,
0.231725701178, 0.229260426666, 0.226802409630, 0.224352808763,
0.221912730283, 0.219483228934, 0.217065309004, 0.214659925362,
0.212267984517, 0.209890345679, 0.207527821848, 0.205181180899,
0.202851146686, 0.200538400145, 0.198243580407, 0.195967285910,
0.193710075514, 0.191472469618, 0.189254951272, 0.187057967287,
0.184881929338, 0.182727215069, 0.180594169179, 0.178483104495,
0.176394303104, 0.174328017332, 0.172284470848, 0.170263859698,
0.168266353322, 0.166292095577, 0.164341205726, 0.162413779421,
0.160509889676, 0.158629587805, 0.156772904362, 0.154939850049,
0.153130416616, 0.151344577731, 0.149582289845, 0.147843493024,
0.146128111769, 0.144436055819, 0.142767220922, 0.141121489599,
0.139498731881, 0.137898806029, 0.136321559226, 0.134766828262,
0.133234440187, 0.131724212947, 0.130235956007, 0.128769470940,
0.127324552012, 0.125900986736, 0.124498556410, 0.123117036639,
0.121756197832, 0.120415805687, 0.119095621649, 0.117795403363,
0.116514905091, 0.115253878132, 0.114012071207, 0.112789230837,
0.111585101702, 0.110399426986, 0.109231948698, 0.108082407992,
0.106950545455, 0.105836101395, 0.104738816107, 0.103658430123,
0.102594684457, 0.101547320830, 0.100516081880, 0.099500711373,
0.098500954380, 0.097516557468, 0.096547268856, 0.095592838578,
0.094653018623, 0.093727563075, 0.092816228235, 0.091918772736,
0.091034957656, 0.090164546608, 0.089307305837, 0.088463004296,
0.087631413725, 0.086812308713, 0.086005466764, 0.085210668341,
0.084427696923, 0.083656339036, 0.082896384293, 0.082147625423,
0.081409858291, 0.080682881921, 0.079966498505, 0.079260513420,
0.078564735226, 0.077878975668, 0.077203049679, 0.076536775369,
0.075879974015, 0.075232470052, 0.074594091054, 0.073964667719,
0.073344033845, 0.072732026308, 0.072128485038, 0.071533252989,
0.070946176112, 0.070367103323, 0.069795886469, 0.069232380298,
0.068676442412, 0.068127933258, 0.067586716054, 0.067052656776,
0.066525624108, 0.066005489409, 0.065492126662, 0.064985412445,
0.064485225879, 0.063991448591, 0.063503964673, 0.063022660634,
0.062547425362, 0.062078150081, 0.061614728306, 0.061157055799,
0.060705030534, 0.060258552644, 0.059817524389, 0.059381850104,
0.058951436167, 0.058526190951, 0.058106024786, 0.057690849916,
0.057280580461, 0.056875132379, 0.056474423420, 0.056078373094,
0.055686902597, 0.055299934908, 0.054917394549, 0.054539207682,
0.054165302043, 0.053795606904, 0.053430053038, 0.053068572685,
0.052711099521, 0.052357568621, 0.052007916428, 0.051662080722,
0.051320000589, 0.050981616389, 0.050646869729, 0.050315703428,
0.049988061495, 0.049663889096, 0.049343132530, 0.049025739199,
0.048711657583, 0.048400837216, 0.048093228658, 0.047788783472,
0.047487454201, 0.047189194340, 0.046893958321, 0.046601701483,
0.046312380055, 0.046025951131, 0.045742372654, 0.045461603391,
0.045183602918, 0.044908331595, 0.044635750552, 0.044365821669,
0.044098507559, 0.043833771549, 0.043571577663, 0.043311890609,
0.043054675757, 0.042799899130, 0.042547527383, 0.042297527792,
0.042049868238, 0.041804517192, 0.041561443703, 0.041320617383,
0.041082008396, 0.040845587444, 0.040611325754, 0.040379195067,
0.040149167627, 0.039921216168, 0.039695313903, 0.039471434514,
0.039249552141, 0.039029641375, 0.038811677240, 0.038595635191,
0.038381491103, 0.038169221257, 0.037958802339, 0.037750211423,
0.037543425969, 0.037338423811, 0.037135183150, 0.036933682546,
0.036733900911, 0.036535817501, 0.036339411909, 0.036144664057,
0.035951554189, 0.035760062868, 0.035570170964, 0.035381859650,
0.035195110398, 0.035009904968, 0.034826225408, 0.034644054043,
0.034463373472, 0.034284166563, 0.034106416445, 0.033930106506,
0.033755220385, 0.033581741970, 0.033409655381, 0.033238945006,
0.033069595437, 0.032901591502, 0.032734918257, 0.032569560976,
0.032405505149, 0.032242736480, 0.032081240877, 0.031921004457,
0.031762013533, 0.031604254614, 0.031447714408, 0.031292379807,
0.031138237889, 0.030985275916, 0.030833481330, 0.030682841747,
0.030533344956, 0.030384978917, 0.030237731757, 0.030091591765,
0.029946547392, 0.029802587246, 0.029659700092, 0.029517874848,
0.029377100578, 0.029237366499, 0.029098661968, 0.028960976487,
0.028824299698, 0.028688621380, 0.028553931447, 0.028420219946,
0.028287477146, 0.028155693172, 0.028024858549, 0.027894963834,
0.027765999707, 0.027637956969, 0.027510826538, 0.027384599450,
0.027259266855, 0.027134820015, 0.027011250304, 0.026888549204,
0.026766708306, 0.026645719305, 0.026525574002, 0.026406264299,
0.026287782199, 0.026170119804, 0.026053269315, 0.025937223027,
0.025821973333, 0.025707512715, 0.025593833750, 0.025480929104,
0.025368791533, 0.025257413880, 0.025146789074, 0.025036910129,
0.024927770145, 0.024819362301, 0.024711679860, 0.024604716163,
0.024498464633, 0.024392918767, 0.024288072140, 0.024183918405,
0.024080451285, 0.023977664578, 0.023875552157, 0.023774107962,
0.023673326005, 0.023573200367, 0.023473725197, 0.023374894712,
0.023276703194, 0.023179144990, 0.023082214514, 0.022985906240,
0.022890214705, 0.022795134511, 0.022700660318, 0.022606786845,
0.022513508873, 0.022420821238, 0.022328718838, 0.022237196622,
0.022146249601, 0.022055872835, 0.021966061444, 0.021876810597,
0.021788115520, 0.021699971488, 0.021612373829, 0.021525317922,
0.021438799195, 0.021352813126, 0.021267355243, 0.021182421120,
0.021098006380, 0.021014106692, 0.020930717773, 0.020847835383,
0.020765455329, 0.020683573461, 0.020602185675, 0.020521287909,
0.020440876143, 0.020360946400, 0.020281494745, 0.020202517283,
0.020124010162, 0.020045969566, 0.019968391723, 0.019891272897,
0.019814609391, 0.019738397547, 0.019662633744, 0.019587314399,
0.019512435964, 0.019437994927, 0.019363987815, 0.019290411187,
0.019217261637, 0.019144535794, 0.019072230323, 0.019000341920,
0.018928867313, 0.018857803268, 0.018787146573, 0.018716894064,
0.018647042595, 0.018577589056, 0.018508530367, 0.018439863479,
0.018371585372, 0.018303693057, 0.018236183573, 0.018169053990,
0.018102301405, 0.018035922942, 0.017969915757, 0.017904277029,
0.017839003967, 0.017774093806, 0.017709543809, 0.017645351263,
0.017581513483, 0.017518027809, 0.017454891607, 0.017392102266,
0.017329657201, 0.017267553854, 0.017205789687, 0.017144362190,
0.017083268872, 0.017022507270, 0.016962074942, 0.016901969469,
0.016842188454, 0.016782729524, 0.016723590326, 0.016664768531,
0.016606261831, 0.016548067937, 0.016490184585, 0.016432609529,
0.016375340544, 0.016318375428, 0.016261711995, 0.016205348082,
0.016149281544, 0.016093510257, 0.016038032116, 0.015982845035,
0.015927946945, 0.015873335798, 0.015819009564, 0.015764966230,
0.015711203802, 0.015657720305, 0.015604513780, 0.015551582286,
0.015498923899, 0.015446536712, 0.015394418835, 0.015342568396,
0.015290983537, 0.015239662419, 0.015188603218, 0.015137804125,
0.015087263349, 0.015036979113, 0.014986949655, 0.014937173231,
0.014887648110, 0.014838372576, 0.014789344929, 0.014740563483,
0.014692026566, 0.014643732522, 0.014595679707, 0.014547866494,
0.014500291267, 0.014452952426, 0.014405848383, 0.014358977564,
0.014312338410, 0.014265929373, 0.014219748919, 0.014173795527,
0.014128067689, 0.014082563909, 0.014037282704, 0.013992222604,
0.013947382150, 0.013902759897, 0.013858354410, 0.013814164268,
0.013770188060, 0.013726424388, 0.013682871864, 0.013639529114,
0.013596394772, 0.013553467487, 0.013510745917, 0.013468228730,
0.013425914607, 0.013383802238, 0.013341890327, 0.013300177583,
0.013258662731, 0.013217344504, 0.013176221644, 0.013135292905,
0.013094557051, 0.013054012855, 0.013013659100, 0.012973494580,
0.012933518098, 0.012893728465, 0.012854124505, 0.012814705048,
0.012775468934, 0.012736415014, 0.012697542147, 0.012658849200,
0.012620335051, 0.012581998585, 0.012543838698, 0.012505854291,
0.012468044278, 0.012430407578, 0.012392943121, 0.012355649844,
0.012318526692, 0.012281572619, 0.012244786587, 0.012208167567,
0.012171714535, 0.012135426478, 0.012099302390, 0.012063341272,
0.012027542133, 0.011991903989, 0.011956425866, 0.011921106795,
0.011885945815, 0.011850941973, 0.011816094322, 0.011781401924,
0.011746863846, 0.011712479164, 0.011678246960, 0.011644166323,
0.011610236415, 0.011576456207, 0.011542824873, 0.011509341531,
0.011476005304, 0.011442815321, 0.011409770717, 0.011376870636,
0.011344114226, 0.011311500642, 0.011279029046, 0.011246698605,
0.011214508494, 0.011182457891, 0.011150545983, 0.011118771961,
0.011087135025, 0.011055634376, 0.011024269226, 0.010993038789,
0.010961942287, 0.010930978946, 0.010900147999, 0.010869448684,
0.010838880244, 0.010808441929, 0.010778132993, 0.010747952695,
0.010717900302, 0.010687975084, 0.010658176315, 0.010628503279,
0.010598955260, 0.010569531549, 0.010540231444, 0.010511054245,
0.010481999259, 0.010453065798, 0.010424253175, 0.010395560715,
0.010366987742, 0.010338533587, 0.010310197585, 0.010281979076,
0.010253877405, 0.010225891921, 0.010198021978, 0.010170266935,
0.010142626154, 0.010115099002, 0.010087684852, 0.010060383080,
0.010033193065, 0.010006114194, 0.009979145854, 0.009952287440,
0.009925538348, 0.009898897981, 0.009872365744, 0.009845941047,
0.009819623305, 0.009793411934, 0.009767306357, 0.009741306000,
0.009715410293, 0.009689618670, 0.009663930567, 0.009638345428,
0.009612862696, 0.009587481821, 0.009562202255, 0.009537023456,
0.009511944883, 0.009486966000, 0.009462086274, 0.009437305177,
0.009412622182, 0.009388036769, 0.009363548418, 0.009339156614,
0.009314860847, 0.009290660607, 0.009266555391, 0.009242544697,
0.009218628026, 0.009194804885, 0.009171074781, 0.009147437227,
0.009123891737, 0.009100437830, 0.009077075027, 0.009053802853,
0.009030620835, 0.009007528505, 0.008984525396, 0.008961611045,
0.008938784992, 0.008916046779, 0.008893395954, 0.008870832064,
0.008848354662, 0.008825963303, 0.008803657543, 0.008781436944,
0.008759301068, 0.008737249482, 0.008715281756, 0.008693397459,
0.008671596168, 0.008649877459, 0.008628240913, 0.008606686111,
0.008585212639, 0.008563820085, 0.008542508040, 0.008521276096,
0.008500123850, 0.008479050900, 0.008458056847, 0.008437141293,
0.008416303846, 0.008395544113, 0.008374861705, 0.008354256236,
0.008333727321, 0.008313274579, 0.008292897630, 0.008272596097,
0.008252369605, 0.008232217783, 0.008212140259, 0.008192136667,
0.008172206641, 0.008152349817, 0.008132565835, 0.008112854337,
0.008093214965, 0.008073647366, 0.008054151187, 0.008034726079,
0.008015371695, 0.007996087687, 0.007976873714, 0.007957729433,
0.007938654505, 0.007919648594, 0.007900711364, 0.007881842482,
0.007863041617, 0.007844308441, 0.007825642625, 0.007807043845,
0.007788511779, 0.007770046104, 0.007751646503, 0.007733312657,
0.007715044251, 0.007696840973, 0.007678702509, 0.007660628552,
0.007642618793, 0.007624672926, 0.007606790647, 0.007588971653,
0.007571215645, 0.007553522324, 0.007535891393, 0.007518322556,
0.007500815521, 0.007483369996, 0.007465985690, 0.007448662316,
0.007431399588, 0.007414197219, 0.007397054929, 0.007379972434,
0.007362949455, 0.007345985715, 0.007329080936, 0.007312234844,
0.007295447166, 0.007278717629, 0.007262045965, 0.007245431904,
0.007228875179, 0.007212375525, 0.007195932679, 0.007179546378,
0.007163216361, 0.007146942369, 0.007130724144, 0.007114561431,
0.007098453973, 0.007082401519, 0.007066403816, 0.007050460614,
0.007034571663, 0.007018736717, 0.007002955529, 0.006987227854,
0.006971553450, 0.006955932074, 0.006940363485, 0.006924847446,
0.006909383717, 0.006893972062, 0.006878612247, 0.006863304038,
0.006848047201, 0.006832841507, 0.006817686725, 0.006802582626,
0.006787528984, 0.006772525572, 0.006757572167, 0.006742668544,
0.006727814481, 0.006713009757, 0.006698254153, 0.006683547451,
0.006668889432, 0.006654279882, 0.006639718585, 0.006625205327,
0.006610739897, 0.006596322083, 0.006581951674, 0.006567628463,
0.006553352241, 0.006539122801, 0.006524939939, 0.006510803450,
0.006496713130, 0.006482668778, 0.006468670193, 0.006454717175,
0.006440809524, 0.006426947044, 0.006413129538, 0.006399356810,
0.006385628666, 0.006371944913, 0.006358305357, 0.006344709808,
0.006331158076, 0.006317649971, 0.006304185306, 0.006290763892,
0.006277385544, 0.006264050077, 0.006250757306, 0.006237507049,
0.006224299123, 0.006211133347, 0.006198009541, 0.006184927525,
0.006171887122, 0.006158888154, 0.006145930444, 0.006133013818,
0.006120138100, 0.006107303117, 0.006094508697, 0.006081754667,
0.006069040857, 0.006056367097, 0.006043733219, 0.006031139053,
0.006018584432, 0.006006069192, 0.005993593165, 0.005981156187,
0.005968758095, 0.005956398726, 0.005944077917, 0.005931795508,
0.005919551338, 0.005907345249, 0.005895177080, 0.005883046675,
0.005870953876, 0.005858898528, 0.005846880474, 0.005834899562,
0.005822955635, 0.005811048543, 0.005799178132, 0.005787344252,
0.005775546751, 0.005763785481, 0.005752060291, 0.005740371034,
0.005728717562, 0.005717099730, 0.005705517389, 0.005693970397,
0.005682458607, 0.005670981876, 0.005659540062, 0.005648133022,
0.005636760614, 0.005625422698, 0.005614119134, 0.005602849782,
0.005591614503, 0.005580413160, 0.005569245616, 0.005558111734,
0.005547011377, 0.005535944412, 0.005524910702, 0.005513910116,
0.005502942519, 0.005492007778, 0.005481105763, 0.005470236342,
0.005459399385, 0.005448594761, 0.005437822342, 0.005427081999,
0.005416373605, 0.005405697031, 0.005395052152, 0.005384438841,
0.005373856973, 0.005363306424, 0.005352787069, 0.005342298785,
0.005331841449, 0.005321414939, 0.005311019132, 0.005300653909,
0.005290319149, 0.005280014731, 0.005269740537, 0.005259496447,
0.005249282345, 0.005239098112, 0.005228943631, 0.005218818786,
0.005208723461, 0.005198657585, 0.005188620956, 0.005178613502,
0.005168635112, 0.005158685671, 0.005148765068, 0.005138873190,
0.005129009927, 0.005119175167, 0.005109368800, 0.005099590716,
0.005089840807, 0.005080118963, 0.005070425077, 0.005060759041,
0.005051120748, 0.005041510090, 0.005031926964, 0.005022371261,
0.005012842879, 0.005003341711, 0.004993867654, 0.004984420605,
0.004975000461, 0.004965607118, 0.004956240476, 0.004946900431,
0.004937586884, 0.004928299734, 0.004919038880, 0.004909804223,
0.004900595664, 0.004891413104, 0.004882256445, 0.004873125588,
0.004864020438, 0.004854940896, 0.004845886867, 0.004836858255,
0.004827854964, 0.004818876899, 0.004809923965, 0.004800996069,
0.004792093117, 0.004783215015, 0.004774361670, 0.004765532991,
0.004756728886, 0.004747949262, 0.004739194029, 0.004730463096,
0.004721756373, 0.004713073770, 0.004704415197, 0.004695780566,
0.004687169789, 0.004678582776, 0.004670019440, 0.004661479694,
0.004652963451, 0.004644470625, 0.004636001128, 0.004627554876,
0.004619131783, 0.004610731765, 0.004602354735, 0.004594000612,
0.004585669309, 0.004577360745, 0.004569074837, 0.004560811501,
0.004552570655, 0.004544352217, 0.004536156107, 0.004527982242,
0.004519830542, 0.004511700927, 0.004503593316, 0.004495507630,
0.004487443790, 0.004479401716, 0.004471381331, 0.004463382555,
0.004455405311, 0.004447449521, 0.004439515108, 0.004431601996,
0.004423710108, 0.004415839367, 0.004407989699, 0.004400161027,
0.004392353276, 0.004384566371, 0.004376800239, 0.004369054805,
0.004361329995, 0.004353625735, 0.004345941954, 0.004338278577,
0.004330635532, 0.004323012748, 0.004315410152, 0.004307827672,
0.004300265239, 0.004292722780, 0.004285200226, 0.004277697505,
0.004270214548, 0.004262751286, 0.004255307649, 0.004247883568,
0.004240478974, 0.004233093799, 0.004225727975, 0.004218381434,
0.004211054109, 0.004203745931, 0.004196456836, 0.004189186754,
0.004181935622, 0.004174703371, 0.004167489937, 0.004160295255,
0.004153119258, 0.004145961882, 0.004138823063, 0.004131702735,
0.004124600836, 0.004117517301, 0.004110452067, 0.004103405070,
0.004096376248, 0.004089365537, 0.004082372876, 0.004075398203,
0.004068441455, 0.004061502571, 0.004054581489, 0.004047678149,
0.004040792489, 0.004033924449, 0.004027073970, 0.004020240990,
0.004013425450, 0.004006627290, 0.003999846452, 0.003993082875,
0.003986336503, 0.003979607275, 0.003972895133, 0.003966200021,
0.003959521879, 0.003952860651, 0.003946216278, 0.003939588705,
0.003932977874, 0.003926383729, 0.003919806213, 0.003913245271,
0.003906700846, 0.003900172883, 0.003893661326, 0.003887166121,
0.003880687213, 0.003874224546, 0.003867778066, 0.003861347720,
0.003854933453, 0.003848535212, 0.003842152942, 0.003835786592,
0.003829436106, 0.003823101434, 0.003816782521, 0.003810479316,
0.003804191766, 0.003797919820, 0.003791663426, 0.003785422532,
0.003779197086, 0.003772987038, 0.003766792337, 0.003760612932,
0.003754448773, 0.003748299808, 0.003742165989, 0.003736047266,
0.003729943588, 0.003723854906, 0.003717781171, 0.003711722334,
0.003705678346, 0.003699649159, 0.003693634723, 0.003687634991,
0.003681649915, 0.003675679447, 0.003669723539, 0.003663782143,
0.003657855213, 0.003651942701, 0.003646044561, 0.003640160745,
0.003634291208, 0.003628435902, 0.003622594783, 0.003616767803,
0.003610954917, 0.003605156080, 0.003599371246, 0.003593600370,
0.003587843407, 0.003582100311, 0.003576371040, 0.003570655547,
0.003564953789, 0.003559265722, 0.003553591301, 0.003547930483,
0.003542283225, 0.003536649482, 0.003531029212, 0.003525422371,
0.003519828917, 0.003514248807, 0.003508681999, 0.003503128449,
0.003497588116, 0.003492060959, 0.003486546934, 0.003481046000,
0.003475558116, 0.003470083240, 0.003464621331, 0.003459172348,
0.003453736250, 0.003448312997, 0.003442902548, 0.003437504862,
0.003432119899, 0.003426747620, 0.003421387983, 0.003416040950,
0.003410706481, 0.003405384537, 0.003400075077, 0.003394778063,
0.003389493457, 0.003384221218, 0.003378961309, 0.003373713691,
0.003368478325, 0.003363255173, 0.003358044198, 0.003352845361,
0.003347658624, 0.003342483951, 0.003337321302, 0.003332170642,
0.003327031932, 0.003321905136, 0.003316790217, 0.003311687138,
0.003306595863, 0.003301516354, 0.003296448576, 0.003291392492,
0.003286348067, 0.003281315264, 0.003276294048, 0.003271284383,
0.003266286233, 0.003261299564, 0.003256324339, 0.003251360524,
0.003246408084, 0.003241466984, 0.003236537189, 0.003231618665,
0.003226711377, 0.003221815291, 0.003216930373, 0.003212056589,
0.003207193904, 0.003202342286, 0.003197501700, 0.003192672113,
0.003187853491, 0.003183045802, 0.003178249012, 0.003173463088,
0.003168687997, 0.003163923706, 0.003159170184, 0.003154427396,
0.003149695312, 0.003144973898, 0.003140263123, 0.003135562954,
0.003130873359, 0.003126194308, 0.003121525767, 0.003116867706,
0.003112220093, 0.003107582897, 0.003102956086, 0.003098339630,
0.003093733497, 0.003089137657, 0.003084552079, 0.003079976733,
0.003075411587, 0.003070856612, 0.003066311777, 0.003061777051,
0.003057252406, 0.003052737811, 0.003048233237, 0.003043738652,
0.003039254029, 0.003034779337, 0.003030314547, 0.003025859629,
0.003021414556, 0.003016979297, 0.003012553823, 0.003008138106,
0.003003732117, 0.002999335828, 0.002994949209, 0.002990572233,
0.002986204871, 0.002981847094, 0.002977498876, 0.002973160187,
0.002968831000, 0.002964511287, 0.002960201021, 0.002955900173,
0.002951608717, 0.002947326624, 0.002943053868, 0.002938790421,
0.002934536257, 0.002930291348, 0.002926055667, 0.002921829188,
0.002917611884, 0.002913403728, 0.002909204693, 0.002905014755,
0.002900833885, 0.002896662058, 0.002892499247, 0.002888345427,
0.002884200572, 0.002880064656, 0.002875937653, 0.002871819538,
0.002867710284, 0.002863609866, 0.002859518260, 0.002855435439,
0.002851361379, 0.002847296054, 0.002843239440, 0.002839191511,
0.002835152243, 0.002831121611, 0.002827099590, 0.002823086155,
0.002819081283, 0.002815084948, 0.002811097127, 0.002807117795,
0.002803146928, 0.002799184502, 0.002795230493, 0.002791284877,
0.002787347631, 0.002783418730, 0.002779498151, 0.002775585871,
0.002771681866, 0.002767786112, 0.002763898587, 0.002760019267,
0.002756148128, 0.002752285149, 0.002748430306, 0.002744583575,
0.002740744935, 0.002736914363, 0.002733091835, 0.002729277330,
0.002725470825, 0.002721672297, 0.002717881724, 0.002714099084,
0.002710324354, 0.002706557514, 0.002702798539, 0.002699047410,
0.002695304103, 0.002691568597, 0.002687840870, 0.002684120901,
0.002680408668, 0.002676704149, 0.002673007324, 0.002669318170,
0.002665636667, 0.002661962793, 0.002658296527, 0.002654637849,
0.002650986737, 0.002647343170, 0.002643707128, 0.002640078589,
0.002636457533, 0.002632843940, 0.002629237789, 0.002625639059,
0.002622047730, 0.002618463782, 0.002614887194, 0.002611317947,
0.002607756019, 0.002604201392, 0.002600654045, 0.002597113958,
0.002593581111, 0.002590055485, 0.002586537060, 0.002583025816,
0.002579521733, 0.002576024793, 0.002572534976, 0.002569052261,
0.002565576631, 0.002562108065, 0.002558646545, 0.002555192052,
0.002551744566, 0.002548304068, 0.002544870540, 0.002541443962,
0.002538024316, 0.002534611584, 0.002531205746, 0.002527806784,
0.002524414679, 0.002521029413, 0.002517650967, 0.002514279324,
0.002510914464, 0.002507556370, 0.002504205023, 0.002500860405,
0.002497522499, 0.002494191286, 0.002490866749, 0.002487548869,
0.002484237628, 0.002480933010, 0.002477634995, 0.002474343568,
0.002471058709, 0.002467780402, 0.002464508629, 0.002461243373,
0.002457984616, 0.002454732341, 0.002451486532, 0.002448247169,
0.002445014238, 0.002441787720, 0.002438567598, 0.002435353856,
0.002432146477, 0.002428945444, 0.002425750740, 0.002422562349,
0.002419380253, 0.002416204437, 0.002413034883, 0.002409871575,
0.002406714497, 0.002403563633, 0.002400418965, 0.002397280478,
0.002394148183, 0.002391022009, 0.002387901967, 0.002384788041,
0.002381680216, 0.002378578474, 0.002375482801, 0.002372393180,
0.002369309596, 0.002366232033, 0.002363160475, 0.002360094907,
0.002357035312, 0.002353981676, 0.002350933983, 0.002347892217,
0.002344856364, 0.002341826407, 0.002338802331, 0.002335784122,
0.002332771764, 0.002329765241, 0.002326764540, 0.002323769644,
0.002320780538, 0.002317797209, 0.002314819640, 0.002311847818,
0.002308881726, 0.002305921351, 0.002302966678, 0.002300017692,
0.002297074378, 0.002294136722, 0.002291204709, 0.002288278326,
0.002285357556, 0.002282442387, 0.002279532804, 0.002276628792,
0.002273730337, 0.002270837425, 0.002267950042, 0.002265068174,
0.002262191807, 0.002259320926, 0.002256455517, 0.002253595568,
0.002250741063, 0.002247891990, 0.002245048333, 0.002242210080,
0.002239377217, 0.002236549730, 0.002233727606, 0.002230910830,
0.002228099390, 0.002225293271, 0.002222492461, 0.002219696946,
0.002216906713, 0.002214121748, 0.002211342038, 0.002208567569,
0.002205798330, 0.002203034306, 0.002200275484, 0.002197521851,
0.002194773395, 0.002192030102, 0.002189291959, 0.002186558953,
0.002183831072, 0.002181108303, 0.002178390633, 0.002175678048,
0.002172970537, 0.002170268087, 0.002167570685, 0.002164878319,
0.002162190975, 0.002159508643, 0.002156831308, 0.002154158958,
0.002151491582, 0.002148829167, 0.002146171700, 0.002143519169,
0.002140871562, 0.002138228868, 0.002135591072, 0.002132958164,
0.002130330132, 0.002127706963, 0.002125088645, 0.002122475167,
0.002119866516, 0.002117262680, 0.002114663648, 0.002112069408,
0.002109479948, 0.002106895257, 0.002104315321, 0.002101740131,
0.002099169674, 0.002096603938, 0.002094042912, 0.002091486585,
0.002088934945, 0.002086387980, 0.002083845679, 0.002081308031,
0.002078775024, 0.002076246648, 0.002073722889, 0.002071203738,
0.002068689184, 0.002066179214, 0.002063673818, 0.002061172985,
0.002058676703, 0.002056184961, 0.002053697750, 0.002051215056,
0.002048736871, 0.002046263181, 0.002043793978, 0.002041329250,
0.002038868985, 0.002036413174, 0.002033961805, 0.002031514868,
0.002029072352, 0.002026634246, 0.002024200540, 0.002021771223,
0.002019346285, 0.002016925715, 0.002014509503, 0.002012097637,
0.002009690108, 0.002007286906, 0.002004888019, 0.002002493438,
0.002000103152, 0.001997717151, 0.001995335424, 0.001992957962,
0.001990584754, 0.001988215791, 0.001985851061, 0.001983490555,
0.001981134263, 0.001978782174, 0.001976434279, 0.001974090568,
0.001971751031, 0.001969415658, 0.001967084438, 0.001964757363,
0.001962434422, 0.001960115605, 0.001957800903, 0.001955490306,
0.001953183805, 0.001950881388, 0.001948583048, 0.001946288774,
0.001943998556, 0.001941712386, 0.001939430253, 0.001937152148,
0.001934878062, 0.001932607985, 0.001930341907, 0.001928079819,
0.001925821713, 0.001923567577, 0.001921317404, 0.001919071184,
0.001916828908, 0.001914590565, 0.001912356148, 0.001910125647,
0.001907899053, 0.001905676356, 0.001903457548, 0.001901242619,
0.001899031560, 0.001896824363, 0.001894621019, 0.001892421517,
0.001890225851, 0.001888034009, 0.001885845985, 0.001883661768,
0.001881481350, 0.001879304722, 0.001877131876, 0.001874962802,
0.001872797491, 0.001870635936, 0.001868478127, 0.001866324056,
0.001864173714, 0.001862027093, 0.001859884183, 0.001857744977,
0.001855609465, 0.001853477639, 0.001851349492, 0.001849225013,
0.001847104196, 0.001844987031, 0.001842873509, 0.001840763624,
0.001838657366, 0.001836554726, 0.001834455698, 0.001832360271,
0.001830268439, 0.001828180193, 0.001826095525, 0.001824014426,
0.001821936888, 0.001819862904, 0.001817792465, 0.001815725562,
0.001813662189, 0.001811602337, 0.001809545997, 0.001807493163,
0.001805443825, 0.001803397977, 0.001801355609, 0.001799316715,
0.001797281286, 0.001795249314, 0.001793220792, 0.001791195711,
0.001789174065, 0.001787155845, 0.001785141043, 0.001783129652,
0.001781121664, 0.001779117071, 0.001777115866, 0.001775118040,
0.001773123588, 0.001771132500, 0.001769144769, 0.001767160388,
0.001765179349, 0.001763201645, 0.001761227267, 0.001759256210,
0.001757288464, 0.001755324024, 0.001753362880, 0.001751405027,
0.001749450457, 0.001747499161, 0.001745551134, 0.001743606367,
0.001741664853, 0.001739726586, 0.001737791557, 0.001735859760,
0.001733931188, 0.001732005833, 0.001730083687, 0.001728164745,
0.001726248999, 0.001724336441, 0.001722427065, 0.001720520863,
0.001718617829, 0.001716717955, 0.001714821235, 0.001712927662,
0.001711037227, 0.001709149926, 0.001707265750, 0.001705384693,
0.001703506748, 0.001701631907, 0.001699760165, 0.001697891514,
0.001696025948, 0.001694163459, 0.001692304041, 0.001690447687,
0.001688594391, 0.001686744146, 0.001684896944, 0.001683052780,
0.001681211646, 0.001679373537, 0.001677538444, 0.001675706363,
0.001673877285, 0.001672051206, 0.001670228117, 0.001668408013,
0.001666590887, 0.001664776732, 0.001662965542, 0.001661157311,
0.001659352031, 0.001657549698, 0.001655750303, 0.001653953842,
0.001652160307, 0.001650369692, 0.001648581990, 0.001646797197,
0.001645015304, 0.001643236306, 0.001641460196, 0.001639686969,
0.001637916618, 0.001636149137, 0.001634384519, 0.001632622759,
0.001630863850, 0.001629107786, 0.001627354561, 0.001625604168,
0.001623856603, 0.001622111858, 0.001620369927, 0.001618630805,
0.001616894486, 0.001615160962, 0.001613430230, 0.001611702281,
0.001609977111, 0.001608254714, 0.001606535083, 0.001604818212,
0.001603104096, 0.001601392729, 0.001599684105, 0.001597978218,
0.001596275062, 0.001594574631, 0.001592876920, 0.001591181923,
0.001589489633, 0.001587800046, 0.001586113155, 0.001584428955,
0.001582747439, 0.001581068603, 0.001579392440, 0.001577718945,
0.001576048112, 0.001574379936, 0.001572714411, 0.001571051531,
0.001569391290, 0.001567733684, 0.001566078706, 0.001564426351,
0.001562776613, 0.001561129487, 0.001559484967, 0.001557843049,
0.001556203725, 0.001554566992, 0.001552932843, 0.001551301272,
0.001549672276, 0.001548045847, 0.001546421981, 0.001544800672,
0.001543181915, 0.001541565705, 0.001539952036, 0.001538340903,
0.001536732301, 0.001535126223, 0.001533522666, 0.001531921623,
0.001530323090, 0.001528727061, 0.001527133531, 0.001525542495,
0.001523953947, 0.001522367883, 0.001520784296, 0.001519203183,
0.001517624538, 0.001516048355, 0.001514474629, 0.001512903357,
0.001511334531, 0.001509768148, 0.001508204202, 0.001506642688,
0.001505083601, 0.001503526936, 0.001501972689, 0.001500420853,
0.001498871424, 0.001497324398, 0.001495779768, 0.001494237530,
0.001492697680, 0.001491160212, 0.001489625121, 0.001488092403,
0.001486562052, 0.001485034064, 0.001483508433, 0.001481985156,
0.001480464226, 0.001478945640, 0.001477429393, 0.001475915478,
0.001474403893, 0.001472894632, 0.001471387690, 0.001469883063,
0.001468380745, 0.001466880732, 0.001465383020, 0.001463887603,
0.001462394478, 0.001460903638, 0.001459415080, 0.001457928799,
0.001456444791, 0.001454963050, 0.001453483572, 0.001452006352,
0.001450531386, 0.001449058669, 0.001447588197, 0.001446119965,
0.001444653969, 0.001443190203, 0.001441728664, 0.001440269347,
0.001438812248, 0.001437357361, 0.001435904683, 0.001434454208,
0.001433005933, 0.001431559854, 0.001430115964, 0.001428674261,
0.001427234740, 0.001425797396, 0.001424362225, 0.001422929223,
0.001421498385, 0.001420069706, 0.001418643183, 0.001417218812])
H3 = np.array(
[-0.752252778064,-0.751951907041,-0.751049654958,-0.749547104192,
-0.747446056783,-0.744749031550,-0.741459260066,-0.737580681497,
-0.733117936310,-0.728076358876,-0.722461968963,-0.716281462157,
-0.709542199216,-0.702252194382,-0.694420102672,-0.686055206185,
-0.677167399429,-0.667767173726,-0.657865600697,-0.647474314879,
-0.636605495506,-0.625271847477,-0.613486581562,-0.601263393883,
-0.588616444696,-0.575560336534,-0.562110091745,-0.548281129459,
-0.534089242044,-0.519550571089,-0.504681582955,-0.489499043946,
-0.474019995148,-0.458261726974,-0.442241753478,-0.425977786464,
-0.409487709464,-0.392789551607,-0.375901461443,-0.358841680766,
-0.341628518477,-0.324280324544,-0.306815464089,-0.289252291671,
-0.271609125783,-0.253904223627,-0.236155756202,-0.218381783743,
-0.200600231563,-0.182828866326,-0.165085272797,-0.147386831102,
-0.129750694537,-0.112193767960,-0.094732686795,-0.077383796679,
-0.060163133797,-0.043086405902,-0.026168974085,-0.009425835284,
0.007128394424, 0.023479495729, 0.039613661181, 0.055517509450,
0.071178098729, 0.086582939277, 0.101720005089, 0.116577744673,
0.131145090944, 0.145411470205, 0.159366810241, 0.173001547492,
0.186306633327, 0.199273539414, 0.211894262182, 0.224161326392,
0.236067787813, 0.247607235019, 0.258773790309, 0.269562109768,
0.279967382485, 0.289985328924, 0.299612198492, 0.308844766297,
0.317680329127, 0.326116700665, 0.334152205965, 0.341785675206,
0.349016436756, 0.355844309555, 0.362269594861, 0.368293067366,
0.373915965728, 0.379139982525, 0.383967253684, 0.388400347386,
0.392442252501, 0.396096366567, 0.399366483349, 0.402256780005,
0.404771803894, 0.406916459051, 0.408695992364, 0.410115979481,
0.411182310478, 0.411901175319, 0.412279049136, 0.412322677365,
0.412039060758, 0.411435440304, 0.410519282100, 0.409298262172,
0.407780251309, 0.405973299902, 0.403885622848, 0.401525584513,
0.398901683806, 0.396022539369, 0.392896874922, 0.389533504769,
0.385941319503, 0.382129271921, 0.378106363172, 0.373881629157,
0.369464127198, 0.364862923000, 0.360087077909, 0.355145636493,
0.350047614456, 0.344801986899, 0.339417676935, 0.333903544682,
0.328268376626, 0.322520875384, 0.316669649852, 0.310723205768,
0.304689936683, 0.298578115342, 0.292395885492, 0.286151254111,
0.279852084063, 0.273506087181, 0.267120817773, 0.260703666564,
0.254261855060, 0.247802430334, 0.241332260239, 0.234858029039,
0.228386233449, 0.221923179092, 0.215474977354, 0.209047542641,
0.202646590024, 0.196277633269, 0.189945983241, 0.183656746711,
0.177414825371, 0.171224915479, 0.165091507575, 0.159018886653,
0.153011132637, 0.147072121151, 0.141205524598, 0.135414813519,
0.129703258229, 0.124073930714, 0.118529706783, 0.113073268455,
0.107707106577, 0.102433523653, 0.097254636883, 0.092172381387,
0.087188513608, 0.082304614890, 0.077522095202, 0.072842197009,
0.068265999280, 0.063794421603, 0.059428228425, 0.055168033374,
0.051014303682, 0.046967364672, 0.043027404315, 0.039194477841,
0.035468512397, 0.031849311736, 0.028336560933, 0.024929831117,
0.021628584210, 0.018432177663, 0.015339869187, 0.012350821460,
0.009464106811, 0.006678711873, 0.003993542194, 0.001407426800,
-0.001080877289,-0.003472680608,-0.005769356857,-0.007972338583,
-0.010083112932,-0.012103217485,-0.014034236175,-0.015877795293,
-0.017635559582,-0.019309228431,-0.020900532156,-0.022411228396,
-0.023843098593,-0.025197944596,-0.026477585353,-0.027683853723,
-0.028818593393,-0.029883655903,-0.030880897783,-0.031812177804,
-0.032679354333,-0.033484282807,-0.034228813314,-0.034914788284,
-0.035544040295,-0.036118389978,-0.036639644043,-0.037109593368,
-0.037530011368,-0.037902652131,-0.038229248990,-0.038511513028,
-0.038751131720,-0.038949767669,-0.039109057435,-0.039230610461,
-0.039316008087,-0.039366802653,-0.039384516689,-0.039370642184,
-0.039326639941,-0.039253939009,-0.039153936188,-0.039027995612,
-0.038877448393,-0.038703592350,-0.038507691783,-0.038290977323,
-0.038054645839,-0.037799860397,-0.037527750284,-0.037239411070,
-0.036935904732,-0.036618259817,-0.036287471656,-0.035944502607,
-0.035590282354,-0.035225708231,-0.034851645582,-0.034468928160,
-0.034078358546,-0.033680708602,-0.033276719950,-0.032867104470,
-0.032452544821,-0.032033694986,-0.031611180823,-0.031185600648,
-0.030757525815,-0.030327501320,-0.029896046409,-0.029463655198,
-0.029030797297,-0.028597918444,-0.028165441141,-0.027733765290,
-0.027303268837,-0.026874308413,-0.026447219972,-0.026022319432,
-0.025599903313,-0.025180249365,-0.024763617198,-0.024350248907,
-0.023940369683,-0.023534188425,-0.023131898342,-0.022733677544,
-0.022339689625,-0.021950084244,-0.021564997680,-0.021184553396,
-0.020808862576,-0.020438024658,-0.020072127859,-0.019711249681,
-0.019355457408,-0.019004808592,-0.018659351527,-0.018319125708,
-0.017984162280,-0.017654484470,-0.017330108015,-0.017011041567,
-0.016697287096,-0.016388840267,-0.016085690819,-0.015787822923,
-0.015495215525,-0.015207842688,-0.014925673910,-0.014648674439,
-0.014376805566,-0.014110024919,-0.013848286738,-0.013591542134,
-0.013339739353,-0.013092824008,-0.012850739321,-0.012613426340,
-0.012380824123,-0.012152870052,-0.011929499863,-0.011710647936,
-0.011496247440,-0.011286230502,-0.011080528362,-0.010879071520,
-0.010681789878,-0.010488612873,-0.010299469600,-0.010114288929,
-0.009932999618,-0.009755530417,-0.009581810161,-0.009411767864,
-0.009245332805,-0.009082434602,-0.008923003292,-0.008766969393,
-0.008614263971,-0.008464818695,-0.008318565892,-0.008175438598,
-0.008035370597,-0.007898296464,-0.007764151606,-0.007632872286,
-0.007504395421,-0.007378659557,-0.007255603473,-0.007135167138,
-0.007017291500,-0.006901918498,-0.006788991072,-0.006678453177,
-0.006570249787,-0.006464326900,-0.006360631544,-0.006259111777,
-0.006159716688,-0.006062396393,-0.005967102034,-0.005873785773,
-0.005782400788,-0.005692901265,-0.005605242388,-0.005519380334,
-0.005435272260,-0.005352876292,-0.005272151518,-0.005193057969,
-0.005115556612,-0.005039609333,-0.004965178925,-0.004892229074,
-0.004820724345,-0.004750630162,-0.004681912801,-0.004614539369,
-0.004548477789,-0.004483696787,-0.004420165872,-0.004357855325,
-0.004296736179,-0.004236780206,-0.004177959899,-0.004120248458,
-0.004063619772,-0.004008048406,-0.003953509583,-0.003899979171,
-0.003847433665,-0.003795850176,-0.003745206410,-0.003695480658,
-0.003646651783,-0.003598699199,-0.003551602863,-0.003505343259,
-0.003459901384,-0.003415258734,-0.003371397294,-0.003328299521,
-0.003285948336,-0.003244327106,-0.003203419637,-0.003163210160,
-0.003123683318,-0.003084824158,-0.003046618118,-0.003009051013,
-0.002972109033,-0.002935778724,-0.002900046982,-0.002864901043,
-0.002830328472,-0.002796317156,-0.002762855295,-0.002729931387,
-0.002697534230,-0.002665652905,-0.002634276769,-0.002603395454,
-0.002572998849,-0.002543077100,-0.002513620601,-0.002484619985,
-0.002456066121,-0.002427950100,-0.002400263239,-0.002372997065,
-0.002346143316,-0.002319693929,-0.002293640920,-0.002267976851,
-0.002242694116,-0.002217785407,-0.002193243589,-0.002169061700,
-0.002145232941,-0.002121750674,-0.002098608417,-0.002075799841,
-0.002053318764,-0.002031159126,-0.002009315067,-0.001987780805,
-0.001966550708,-0.001945619272,-0.001924981119,-0.001904630990,
-0.001884563747,-0.001864774365,-0.001845257931,-0.001826009640,
-0.001807024795,-0.001788298800,-0.001769827160,-0.001751605476,
-0.001733629448,-0.001715894863,-0.001698397604,-0.001681133636,
-0.001664099014,-0.001647289874,-0.001630702434,-0.001614332989,
-0.001598179129,-0.001582234872,-0.001566497956,-0.001550964973,
-0.001535632585,-0.001520497524,-0.001505556585,-0.001490806630,
-0.001476244583,-0.001461867430,-0.001447672214,-0.001433656041,
-0.001419816070,-0.001406149518,-0.001392653656,-0.001379325806,
-0.001366163345,-0.001353163699,-0.001340324341,-0.001327642797,
-0.001315116635,-0.001302743474,-0.001290520974,-0.001278446841,
-0.001266518822,-0.001254734709,-0.001243092331,-0.001231589562,
-0.001220224309,-0.001208994523,-0.001197898190,-0.001186933332,
-0.001176098007,-0.001165390310,-0.001154808368,-0.001144350341,
-0.001134014426,-0.001123798847,-0.001113701861,-0.001103721759,
-0.001093856857,-0.001084105504,-0.001074466076,-0.001064936978,
-0.001055516643,-0.001046203530,-0.001036996125,-0.001027892940,
-0.001018892513,-0.001009993405,-0.001001194204,-0.000992493520,
-0.000983889986,-0.000975382261,-0.000966969022,-0.000958648972,
-0.000950420834,-0.000942283351,-0.000934235288,-0.000926275431,
-0.000918402585,-0.000910615574,-0.000902913243,-0.000895294453,
-0.000887758087,-0.000880303042,-0.000872928236,-0.000865632604,
-0.000858415095,-0.000851274680,-0.000844210340,-0.000837221079,
-0.000830305911,-0.000823463869,-0.000816693999,-0.000809995364,
-0.000803367040,-0.000796808118,-0.000790317704,-0.000783894917,
-0.000777538888,-0.000771248764,-0.000765023705,-0.000758862881,
-0.000752765478,-0.000746730693,-0.000740757735,-0.000734845825,
-0.000728994196,-0.000723202092,-0.000717468770,-0.000711793497,
-0.000706175549,-0.000700614217,-0.000695108799,-0.000689658606,
-0.000684262956,-0.000678921179,-0.000673632543,-0.000668396541,
-0.000663212461,-0.000658079671,-0.000652997548,-0.000647965479,
-0.000642982859,-0.000638049094,-0.000633163594,-0.000628325781,
-0.000623535085,-0.000618790943,-0.000614092801,-0.000609440112,
-0.000604832337,-0.000600268946,-0.000595749415,-0.000591273227,
-0.000586839873,-0.000582448852,-0.000578099668,-0.000573791833,
-0.000569524867,-0.000565298294,-0.000561111648,-0.000556964465,
-0.000552856292,-0.000548786679,-0.000544755183,-0.000540761368,
-0.000536804803,-0.000532885063,-0.000529001730,-0.000525154389,
-0.000521342633,-0.000517566059,-0.000513824271,-0.000510116877,
-0.000506443491,-0.000502803731,-0.000499197222,-0.000495623592,
-0.000492082475,-0.000488573510,-0.000485096341,-0.000481650614,
-0.000478235985,-0.000474852108,-0.000471498648,-0.000468175269,
-0.000464881625,-0.000461617425,-0.000458382332,-0.000455176027,
-0.000451998200,-0.000448848541,-0.000445726746,-0.000442632514,
-0.000439565547,-0.000436525555,-0.000433512245,-0.000430525334,
-0.000427564539,-0.000424629581,-0.000421720186,-0.000418836083,
-0.000415977003,-0.000413142681,-0.000410332856,-0.000407547271,
-0.000404785670,-0.000402047802,-0.000399333418,-0.000396642273,
-0.000393974124,-0.000391328732,-0.000388705860,-0.000386105276,
-0.000383526747,-0.000380970048,-0.000378434951,-0.000375921236,
-0.000373428682,-0.000370957073,-0.000368506194,-0.000366075834,
-0.000363665783,-0.000361275835,-0.000358905786,-0.000356555434,
-0.000354224580,-0.000351913028,-0.000349620581,-0.000347347050,
-0.000345092242,-0.000342855972,-0.000340638053,-0.000338438303,
-0.000336256539,-0.000334092584,-0.000331946261,-0.000329817395,
-0.000327705812,-0.000325611343,-0.000323533819,-0.000321473073,
-0.000319428941,-0.000317401259,-0.000315389867,-0.000313394605,
-0.000311415316,-0.000309451846,-0.000307504039,-0.000305571745,
-0.000303654813,-0.000301753094,-0.000299866443,-0.000297994713,
-0.000296137762,-0.000294295448,-0.000292467630,-0.000290654171,
-0.000288854932,-0.000287069779,-0.000285298577,-0.000283541195,
-0.000281797501,-0.000280067365,-0.000278350660,-0.000276647259,
-0.000274957037,-0.000273279870,-0.000271615635,-0.000269964212,
-0.000268325481,-0.000266699323,-0.000265085621,-0.000263484260,
-0.000261895124,-0.000260318101,-0.000258753078,-0.000257199946,
-0.000255658593,-0.000254128911,-0.000252610794,-0.000251104135,
-0.000249610990,-0.000248126920,-0.000246653996,-0.000245192119,
-0.000243741185,-0.000242301097,-0.000240871755,-0.000239453063,
-0.000238044925,-0.000236647244,-0.000235259927,-0.000233882881,
-0.000232516014,-0.000231159234,-0.000229812450,-0.000228475574,
-0.000227148518,-0.000225831193,-0.000224523513,-0.000223225393,
-0.000221936748,-0.000220657494,-0.000219387548,-0.000218126828,
-0.000216875252,-0.000215632741,-0.000214399215,-0.000213174594,
-0.000211958802,-0.000210751761,-0.000209553394,-0.000208363626,
-0.000207182383,-0.000206009589,-0.000204845173,-0.000203689061,
-0.000202541182,-0.000201401464,-0.000200269790,-0.000199146185,
-0.000198030532,-0.000196922764,-0.000195822813,-0.000194730613,
-0.000193646097,-0.000192569199,-0.000191499856,-0.000190438001,
-0.000189383574,-0.000188336509,-0.000187296746,-0.000186264221,
-0.000185238875,-0.000184220647,-0.000183209477,-0.000182205306,
-0.000181208074,-0.000180217725,-0.000179234200,-0.000178257443,
-0.000177287397,-0.000176324006,-0.000175367216,-0.000174416971,
-0.000173473217,-0.000172535901,-0.000171604968,-0.000170680368,
-0.000169762047,-0.000168849954,-0.000167944038,-0.000167044248,
-0.000166150534,-0.000165262847,-0.000164381136,-0.000163505354,
-0.000162635453,-0.000161771383,-0.000160913100,-0.000160060554,
-0.000159213700,-0.000158372493,-0.000157536885,-0.000156706833,
-0.000155882291,-0.000155063216,-0.000154249563,-0.000153441289,
-0.000152638351,-0.000151840707,-0.000151048314,-0.000150261130,
-0.000149479114,-0.000148702224,-0.000147930421,-0.000147163665,
-0.000146401914,-0.000145645130,-0.000144893273,-0.000144146305,
-0.000143404187,-0.000142666880,-0.000141934348,-0.000141206553,
-0.000140483459,-0.000139765027,-0.000139051222,-0.000138342008,
-0.000137637349,-0.000136937210,-0.000136241554,-0.000135550350,
-0.000134863561,-0.000134181153,-0.000133503093,-0.000132829347,
-0.000132159882,-0.000131494665,-0.000130833663,-0.000130176845,
-0.000129524178,-0.000128875630,-0.000128231170,-0.000127590768,
-0.000126954392,-0.000126322010,-0.000125693594,-0.000125069114,
-0.000124448538,-0.000123831839,-0.000123218986,-0.000122609950,
-0.000122004703,-0.000121403217,-0.000120805462,-0.000120211398,
-0.000119621024,-0.000119034299,-0.000118451195,-0.000117871686,
-0.000117295744,-0.000116723344,-0.000116154459,-0.000115589063,
-0.000115027129,-0.000114468633,-0.000113913550,-0.000113361853,
-0.000112813518,-0.000112268520,-0.000111726834,-0.000111188437,
-0.000110653305,-0.000110121412,-0.000109592736,-0.000109067253,
-0.000108544940,-0.000108025773,-0.000107509731,-0.000106996789,
-0.000106486925,-0.000105980118,-0.000105476346,-0.000104975585,
-0.000104477815,-0.000103983013,-0.000103491159,-0.000103002232,
-0.000102516209,-0.000102033071,-0.000101552797,-0.000101075366,
-0.000100600758,-0.000100128953,-0.000099659931,-0.000099193670,
-0.000098730154,-0.000098269362,-0.000097811273,-0.000097355870,
-0.000096903134,-0.000096453045,-0.000096005584,-0.000095560734,
-0.000095118477,-0.000094678791,-0.000094241663,-0.000093807071,
-0.000093375000,-0.000092945431,-0.000092518346,-0.000092093728,
-0.000091671561,-0.000091251828,-0.000090834509,-0.000090419590,
-0.000090007054,-0.000089596884,-0.000089189066,-0.000088783579,
-0.000088380409,-0.000087979542,-0.000087580961,-0.000087184648,
-0.000086790590,-0.000086398772,-0.000086009176,-0.000085621788,
-0.000085236594,-0.000084853579,-0.000084472726,-0.000084094021,
-0.000083717451,-0.000083343000,-0.000082970655,-0.000082600399,
-0.000082232220,-0.000081866104,-0.000081502037,-0.000081140003,
-0.000080779991,-0.000080421987,-0.000080065976,-0.000079711946,
-0.000079359883,-0.000079009773,-0.000078661606,-0.000078315366,
-0.000077971040,-0.000077628617,-0.000077288085,-0.000076949428,
-0.000076612636,-0.000076277697,-0.000075944598,-0.000075613325,
-0.000075283869,-0.000074956216,-0.000074630355,-0.000074306274,
-0.000073983961,-0.000073663404,-0.000073344592,-0.000073027514,
-0.000072712158,-0.000072398512,-0.000072086567,-0.000071776309,
-0.000071467731,-0.000071160818,-0.000070855559,-0.000070551947,
-0.000070249969,-0.000069949614,-0.000069650873,-0.000069353734,
-0.000069058188,-0.000068764223,-0.000068471830,-0.000068181000,
-0.000067891719,-0.000067603982,-0.000067317776,-0.000067033091,
-0.000066749920,-0.000066468249,-0.000066188072,-0.000065909379,
-0.000065632160,-0.000065356404,-0.000065082104,-0.000064809249,
-0.000064537830,-0.000064267841,-0.000063999270,-0.000063732106,
-0.000063466345,-0.000063201975,-0.000062938987,-0.000062677374,
-0.000062417127,-0.000062158236,-0.000061900694,-0.000061644492,
-0.000061389621,-0.000061136073,-0.000060883842,-0.000060632915,
-0.000060383289,-0.000060134953,-0.000059887864,-0.000059642083,
-0.000059397570,-0.000059154315,-0.000058912309,-0.000058671548,
-0.000058432022,-0.000058193723,-0.000057956645,-0.000057720779,
-0.000057486117,-0.000057252655,-0.000057020383,-0.000056789292,
-0.000056559380,-0.000056330634,-0.000056103051,-0.000055876620,
-0.000055651339,-0.000055427196,-0.000055204187,-0.000054982304,
-0.000054761541,-0.000054541891,-0.000054323347,-0.000054105902,
-0.000053889548,-0.000053674282,-0.000053460095,-0.000053246980,
-0.000053034932,-0.000052823945,-0.000052614010,-0.000052405123,
-0.000052197277,-0.000051990464,-0.000051784681,-0.000051579921,
-0.000051376175,-0.000051173441,-0.000050971711,-0.000050770978,
-0.000050571237,-0.000050372484,-0.000050174711,-0.000049977910,
-0.000049782080,-0.000049587212,-0.000049393303,-0.000049200344,
-0.000049008332,-0.000048820422,-0.000048630261,-0.000048441030,
-0.000048252722,-0.000048065332,-0.000047878856,-0.000047693287,
-0.000047508621,-0.000047324852,-0.000047141973,-0.000046959983,
-0.000046778874,-0.000046598641,-0.000046419280,-0.000046240785,
-0.000046063150,-0.000045886371,-0.000045710445,-0.000045535364,
-0.000045361124,-0.000045187721,-0.000045015150,-0.000044843405,
-0.000044672483,-0.000044502378,-0.000044333084,-0.000044164600,
-0.000043996919,-0.000043830035,-0.000043663946,-0.000043498647,
-0.000043334133,-0.000043170399,-0.000043007443,-0.000042845255,
-0.000042683836,-0.000042523182,-0.000042363284,-0.000042204141,
-0.000042045748,-0.000041888100,-0.000041731194,-0.000041575025,
-0.000041419591,-0.000041264882,-0.000041110901,-0.000040957641,
-0.000040805096,-0.000040653264,-0.000040502141,-0.000040351722,
-0.000040202005,-0.000040052984,-0.000039904655,-0.000039757016,
-0.000039610063,-0.000039463789,-0.000039318195,-0.000039173273,
-0.000039029021,-0.000038885436,-0.000038742515,-0.000038600250,
-0.000038458642,-0.000038317685,-0.000038177376,-0.000038037711,
-0.000037898687,-0.000037760301,-0.000037622548,-0.000037485426,
-0.000037348930,-0.000037213058,-0.000037077806,-0.000036943170,
-0.000036809146,-0.000036675734,-0.000036542929,-0.000036410724,
-0.000036279120,-0.000036148115,-0.000036017700,-0.000035887877,
-0.000035758640,-0.000035629986,-0.000035501914,-0.000035374420,
-0.000035247496,-0.000035121149,-0.000034995368,-0.000034870150,
-0.000034745493,-0.000034621399,-0.000034497857,-0.000034374867,
-0.000034252431,-0.000034130540,-0.000034009195,-0.000033888389,
-0.000033768121,-0.000033648386,-0.000033529187,-0.000033410517,
-0.000033292372,-0.000033174751,-0.000033057655,-0.000032941073,
-0.000032825009,-0.000032709445,-0.000032594403,-0.000032479866,
-0.000032365835,-0.000032252309,-0.000032139280,-0.000032026745,
-0.000031914707,-0.000031803162,-0.000031692103,-0.000031581532,
-0.000031471444,-0.000031361837,-0.000031252709,-0.000031144055,
-0.000031035877,-0.000030928170,-0.000030820929,-0.000030714158,
-0.000030607851,-0.000030502003,-0.000030396614,-0.000030291683,
-0.000030187205,-0.000030083179,-0.000029979603,-0.000029876472,
-0.000029773788,-0.000029671547,-0.000029569744,-0.000029468381,
-0.000029367450,-0.000029266957,-0.000029166892,-0.000029067256,
-0.000028968050,-0.000028869268,-0.000028770907,-0.000028672964,
-0.000028575442,-0.000028478335,-0.000028381639,-0.000028285359,
-0.000028189486,-0.000028094022,-0.000027998959,-0.000027904304,
-0.000027810049,-0.000027716193,-0.000027622734,-0.000027529671,
-0.000027436999,-0.000027344717,-0.000027252827,-0.000027161322,
-0.000027070206,-0.000026979468,-0.000026889117,-0.000026799141,
-0.000026709543,-0.000026620320,-0.000026531473,-0.000026442997,
-0.000026354890,-0.000026267149,-0.000026179778,-0.000026092771,
-0.000026006124,-0.000025919839,-0.000025833911,-0.000025748342,
-0.000025663130,-0.000025578271,-0.000025493760,-0.000025409601,
-0.000025325792,-0.000025242328,-0.000025159208,-0.000025076433,
-0.000024993995,-0.000024911902,-0.000024830144,-0.000024748726,
-0.000024667636,-0.000024586884,-0.000024506464,-0.000024426371,
-0.000024346609,-0.000024267170,-0.000024188056,-0.000024109265,
-0.000024030799,-0.000023952651,-0.000023874822,-0.000023797310,
-0.000023720113,-0.000023643230,-0.000023566661,-0.000023490400,
-0.000023414450,-0.000023338806,-0.000023263470,-0.000023188440,
-0.000023113711,-0.000023039284,-0.000022965158,-0.000022891329,
-0.000022817800,-0.000022744566,-0.000022671628,-0.000022598981,
-0.000022526626,-0.000022454564,-0.000022382789,-0.000022311301,
-0.000022240098,-0.000022169181,-0.000022098547,-0.000022028196,
-0.000021958126,-0.000021888335,-0.000021818823,-0.000021749584,
-0.000021680622,-0.000021611936,-0.000021543520,-0.000021475377,
-0.000021407504,-0.000021339898,-0.000021272563,-0.000021205493,
-0.000021138686,-0.000021072146,-0.000021005867,-0.000020939846,
-0.000020874089,-0.000020808589,-0.000020743350,-0.000020678365,
-0.000020613631,-0.000020549154,-0.000020484934,-0.000020420957,
-0.000020357237,-0.000020293765,-0.000020230538,-0.000020167562,
-0.000020104832,-0.000020042343,-0.000019980097,-0.000019918094,
-0.000019856335,-0.000019794814,-0.000019733531,-0.000019672485,
-0.000019611678,-0.000019551105,-0.000019490768,-0.000019430661,
-0.000019370790,-0.000019311147,-0.000019251735,-0.000019192557,
-0.000019133600,-0.000019074873,-0.000019016371,-0.000018958095,
-0.000018900039,-0.000018842210,-0.000018784603,-0.000018727213,
-0.000018670047,-0.000018613097,-0.000018556365,-0.000018499848,
-0.000018443548,-0.000018387465,-0.000018331591,-0.000018275933,
-0.000018220484,-0.000018165250,-0.000018110224,-0.000018055409,
-0.000018000797,-0.000017946368,-0.000017892173,-0.000017838178,
-0.000017784394,-0.000017730808,-0.000017677427,-0.000017624243,
-0.000017571262,-0.000017518482,-0.000017465898,-0.000017413516,
-0.000017361325,-0.000017309333,-0.000017257536,-0.000017205934,
-0.000017154522,-0.000017103307,-0.000017052279,-0.000017001446,
-0.000016950802,-0.000016900344,-0.000016850076,-0.000016799996,
-0.000016750100,-0.000016700391,-0.000016650869,-0.000016601528,
-0.000016552372,-0.000016503396,-0.000016454605,-0.000016405991,
-0.000016357560,-0.000016309309,-0.000016261231,-0.000016213333,
-0.000016165615,-0.000016118065,-0.000016070700,-0.000016023506,
-0.000015976480,-0.000015929631,-0.000015882957,-0.000015836450,
-0.000015790114,-0.000015743950,-0.000015697953,-0.000015652126,
-0.000015606467,-0.000015560973,-0.000015515645,-0.000015470483,
-0.000015425484,-0.000015380653,-0.000015335981,-0.000015291478,
-0.000015247131,-0.000015202943,-0.000015158921,-0.000015115055,
-0.000015071350,-0.000015027801,-0.000014984411,-0.000014941180,
-0.000014898105,-0.000014855180,-0.000014812414,-0.000014769805,
-0.000014727347,-0.000014685040,-0.000014642884,-0.000014600881,
-0.000014559031,-0.000014517328,-0.000014475782,-0.000014434379,
-0.000014393121,-0.000014352019,-0.000014311058,-0.000014270245,
-0.000014229578,-0.000014189055,-0.000014148680,-0.000014108446,
-0.000014068353,-0.000014028407,-0.000013988601,-0.000013948938,
-0.000013909417,-0.000013870032,-0.000013830791,-0.000013791682,
-0.000013752719,-0.000013713888,-0.000013675201,-0.000013636646,
-0.000013598224,-0.000013559943,-0.000013521794,-0.000013483787,
-0.000013445904,-0.000013408159,-0.000013370545,-0.000013333065,
-0.000013295714,-0.000013258497,-0.000013221409,-0.000013184449,
-0.000013147625,-0.000013110920,-0.000013074350,-0.000013037908,
-0.000013001586,-0.000012965401,-0.000012929339,-0.000012893399,
-0.000012857583,-0.000012821894,-0.000012786331,-0.000012750890,
-0.000012715572,-0.000012680376,-0.000012645302,-0.000012610349,
-0.000012575520,-0.000012540810,-0.000012506216,-0.000012471746,
-0.000012437394,-0.000012403161,-0.000012369047,-0.000012335050,
-0.000012301167,-0.000012267404,-0.000012233750,-0.000012200220,
-0.000012166803,-0.000012133497,-0.000012100313,-0.000012067236,
-0.000012034272,-0.000012001421,-0.000011968683,-0.000011936060,
-0.000011903547,-0.000011871144,-0.000011838848,-0.000011806664,
-0.000011774589,-0.000011742629,-0.000011710771,-0.000011679025,
-0.000011647383,-0.000011615852,-0.000011584427,-0.000011553112,
-0.000011521895,-0.000011490790,-0.000011459786,-0.000011428886,
-0.000011398096,-0.000011367410,-0.000011336821,-0.000011306340,
-0.000011275960,-0.000011245680,-0.000011215503,-0.000011185429,
-0.000011155458,-0.000011125582,-0.000011095810,-0.000011066134,
-0.000011036565,-0.000011007086,-0.000010977713,-0.000010948432,
-0.000010919250,-0.000010890170,-0.000010861182,-0.000010832294,
-0.000010803504,-0.000010774803,-0.000010746203,-0.000010717697,
-0.000010689281,-0.000010660963,-0.000010632742,-0.000010604605,
-0.000010576575,-0.000010548631,-0.000010520774,-0.000010493012,
-0.000010465346,-0.000010437768,-0.000010410283,-0.000010382885,
-0.000010355578,-0.000010328369,-0.000010301239,-0.000010274208,
-0.000010251560,-0.000010224675,-0.000010197872,-0.000010171157,
-0.000010144533,-0.000010117994,-0.000010091545,-0.000010065176,
-0.000010038899,-0.000010012705,-0.000009986596,-0.000009960573,
-0.000009934633,-0.000009908783,-0.000009883016,-0.000009857331,
-0.000009831723,-0.000009806208,-0.000009780772,-0.000009755416,
-0.000009730149,-0.000009704965,-0.000009679854,-0.000009654825,
-0.000009629881,-0.000009605015,-0.000009580232,-0.000009555527,
-0.000009530906,-0.000009506359,-0.000009481894,-0.000009457507,
-0.000009433201,-0.000009408968,-0.000009384813,-0.000009360741,
-0.000009336743,-0.000009312822,-0.000009288983,-0.000009265219,
-0.000009241523,-0.000009217903,-0.000009194367,-0.000009170904,
-0.000009147513,-0.000009124185,-0.000009100951,-0.000009077784,
-0.000009054691,-0.000009031668,-0.000009008727,-0.000008985847,
-0.000008963044,-0.000008940323,-0.000008917665,-0.000008895083,
-0.000008872572,-0.000008850132,-0.000008827756,-0.000008805460,
-0.000008783231,-0.000008761077,-0.000008738982,-0.000008716967,
-0.000008695016,-0.000008673142,-0.000008651317,-0.000008629577,
-0.000008607915,-0.000008586302,-0.000008564769,-0.000008543299,
-0.000008521889,-0.000008500552,-0.000008479283,-0.000008458087,
-0.000008436951,-0.000008415879,-0.000008394877,-0.000008373949,
-0.000008353056,-0.000008332256,-0.000008311510,-0.000008290834,
-0.000008270228,-0.000008249672,-0.000008229185,-0.000008208760,
-0.000008188409,-0.000008168114,-0.000008147887,-0.000008127708,
-0.000008107605,-0.000008087561,-0.000008067576,-0.000008047655,
-0.000008027797,-0.000008007992,-0.000007988257,-0.000007968579,
-0.000007948961,-0.000007929423,-0.000007909923,-0.000007890483,
-0.000007871118,-0.000007851794,-0.000007832545,-0.000007813350,
-0.000007794215,-0.000007775134,-0.000007756107,-0.000007737143,
-0.000007718239,-0.000007699398,-0.000007680602,-0.000007661878,
-0.000007643192,-0.000007624581,-0.000007606023,-0.000007587516,
-0.000007569072,-0.000007550684,-0.000007532342,-0.000007514064,
-0.000007495842,-0.000007477669,-0.000007459564,-0.000007441513,
-0.000007423500,-0.000007405565,-0.000007387653,-0.000007369819,
-0.000007352033,-0.000007334302,-0.000007316624,-0.000007298991,
-0.000007281419,-0.000007263898,-0.000007246430,-0.000007229017,
-0.000007211665,-0.000007194341,-0.000007177091,-0.000007159891,
-0.000007142723,-0.000007125624,-0.000007108576,-0.000007091568,
-0.000007074623,-0.000007057725,-0.000007040876,-0.000007024086,
-0.000007007336,-0.000006990642,-0.000006973998,-0.000006957391,
-0.000006940839,-0.000006924342,-0.000006907899,-0.000006891498,
-0.000006875140,-0.000006858837,-0.000006842582,-0.000006826375,
-0.000006810219,-0.000006794105,-0.000006778029,-0.000006762023,
-0.000006746059,-0.000006730128,-0.000006714271,-0.000006698432,
-0.000006682660,-0.000006666918,-0.000006651234,-0.000006635596,
-0.000006620005,-0.000006604454,-0.000006588941,-0.000006573490,
-0.000006558070,-0.000006542702,-0.000006527380,-0.000006512109,
-0.000006496882,-0.000006481687,-0.000006466546,-0.000006451435,
-0.000006436391,-0.000006421375,-0.000006406417,-0.000006391483,
-0.000006376616,-0.000006361774,-0.000006346978,-0.000006332234,
-0.000006317521,-0.000006302863,-0.000006288236,-0.000006273658,
-0.000006259130,-0.000006244639,-0.000006230172,-0.000006215769,
-0.000006201406,-0.000006187056,-0.000006172780,-0.000006158538,
-0.000006144333,-0.000006130175,-0.000006116049,-0.000006101973,
-0.000006087918,-0.000006073934,-0.000006059967,-0.000006046060,
-0.000006032157,-0.000006018329,-0.000006004539,-0.000005990767,
-0.000005977053,-0.000005963368,-0.000005949736,-0.000005936125,
-0.000005922564,-0.000005909033,-0.000005895557,-0.000005882108,
-0.000005868694,-0.000005855330,-0.000005841986,-0.000005828695,
-0.000005815440,-0.000005802219,-0.000005789042,-0.000005775894,
-0.000005762793,-0.000005749713,-0.000005736681,-0.000005723693,
-0.000005710736,-0.000005697801,-0.000005684917,-0.000005672083,
-0.000005659257,-0.000005646489,-0.000005633742,-0.000005621052,
-0.000005608376,-0.000005595751,-0.000005583147,-0.000005570596,
-0.000005558069,-0.000005545574,-0.000005533131,-0.000005520686,
-0.000005508306,-0.000005495958,-0.000005483642,-0.000005471368,
-0.000005459117,-0.000005446911,-0.000005434723,-0.000005422582,
-0.000005410465,-0.000005398405,-0.000005386318,-0.000005374325,
-0.000005362350,-0.000005350395,-0.000005338497,-0.000005326630,
-0.000005314785,-0.000005302970,-0.000005291186,-0.000005279453,
-0.000005267752,-0.000005256061,-0.000005244408,-0.000005232808,
-0.000005221217,-0.000005209672,-0.000005198143,-0.000005186671,
-0.000005175216,-0.000005163788,-0.000005152404,-0.000005141034,
-0.000005129712,-0.000005118411,-0.000005107148,-0.000005095903,
-0.000005084703,-0.000005073544,-0.000005062386,-0.000005051294,
-0.000005040207,-0.000005029153,-0.000005018130,-0.000005007138,
-0.000004996188,-0.000004985255,-0.000004974357,-0.000004963482,
-0.000004952658,-0.000004941822,-0.000004931049,-0.000004920300,
-0.000004909585,-0.000004898883,-0.000004888218,-0.000004877591,
-0.000004866978,-0.000004856408,-0.000004845851,-0.000004835339,
-0.000004824820,-0.000004814384,-0.000004803951,-0.000004793527,
-0.000004783167,-0.000004772816,-0.000004762493,-0.000004752199,
-0.000004741928,-0.000004731692,-0.000004721489,-0.000004711313,
-0.000004701153,-0.000004691028,-0.000004680906,-0.000004670841,
-0.000004660800,-0.000004650784,-0.000004640794,-0.000004630823,
-0.000004620897,-0.000004610976,-0.000004601088,-0.000004591223,
-0.000004581397,-0.000004571577,-0.000004561807,-0.000004552078,
-0.000004542317,-0.000004532634,-0.000004522956,-0.000004513308,
-0.000004503683,-0.000004494084,-0.000004484512,-0.000004474978,
-0.000004465445,-0.000004455950,-0.000004446495,-0.000004437029,
-0.000004427618,-0.000004418215,-0.000004408848,-0.000004399505,
-0.000004390179,-0.000004380881,-0.000004371601,-0.000004362365,
-0.000004353139,-0.000004343950,-0.000004334749,-0.000004325623,
-0.000004316509,-0.000004307391,-0.000004298319,-0.000004289266,
-0.000004280235,-0.000004271233,-0.000004262246,-0.000004253291,
-0.000004244373,-0.000004235455,-0.000004226566,-0.000004217706,
-0.000004208841,-0.000004200037,-0.000004191235,-0.000004182468,
-0.000004173720,-0.000004164993,-0.000004156290,-0.000004147604,
-0.000004138957,-0.000004130317,-0.000004121718,-0.000004113097,
-0.000004104542,-0.000004096010,-0.000004087470,-0.000004078981,
-0.000004070497,-0.000004062048,-0.000004053610,-0.000004045201,
-0.000004036807,-0.000004028443,-0.000004020089,-0.000004011762,
-0.000004003482,-0.000003995175,-0.000003986913,-0.000003978681,
-0.000003970453,-0.000003962262,-0.000003954084,-0.000003945935,
-0.000003937788,-0.000003929692,-0.000003921595,-0.000003913524,
-0.000003905457,-0.000003897431,-0.000003889449,-0.000003881430,
-0.000003873473,-0.000003865523,-0.000003857602,-0.000003849695,
-0.000003841812,-0.000003833951,-0.000003826106,-0.000003818275,
-0.000003810468,-0.000003802692,-0.000003794902,-0.000003787173,
-0.000003779443,-0.000003771744,-0.000003764045,-0.000003756378,
-0.000003748734,-0.000003741099,-0.000003733489,-0.000003725902,
-0.000003718344,-0.000003710764,-0.000003703250,-0.000003695750,
-0.000003688226,-0.000003680780,-0.000003673310,-0.000003665875,
-0.000003658465,-0.000003651067,-0.000003643680,-0.000003636336,
-0.000003628987,-0.000003621648,-0.000003614371,-0.000003607042,
-0.000003599790,-0.000003592538,-0.000003585317,-0.000003578103])
def voigt_wofz(a, u):
""" Compute the Voigt function using Scipy's wofz().
Parameters
----------
a: float
Ratio of Lorentzian to Gaussian linewidths.
u: array of floats
The frequency or velocity offsets from the line centre, in units
of the Gaussian broadening linewidth.
See the notes for `voigt` for more details.
"""
try:
from scipy.special import wofz
except ImportError:
s = ("Can't find scipy.special.wofz(), can only calculate Voigt "
" function for 0 < a < 0.1 (a=%g)" % a)
print(s)
else:
return wofz(u + 1j * a).real
def voigt_slow(a, u):
""" Calculate the voigt function to very high accuracy.
Uses numerical integration, so is slow. Answer is correct to 20
significant figures.
Note this needs `mpmath` or `sympy` to be installed.
"""
try:
import mpmath as mp
except ImportError:
from sympy import mpmath as mp
with mp.workdps(20):
z = mp.mpc(u, a)
result = mp.exp(-z*z) * mp.erfc(-1j*z)
return result.real
def voigt(a, u):
""" Compute the Voigt function using a fast approximation.
Parameters
----------
a : float
Ratio of Lorentzian to Gaussian linewidths (see below).
u : array of floats, shape (N,)
The frequency or velocity offsets from the line centre, in units
of the FWHM of the Gaussian broadening (see below).
Returns
-------
H : array of floats, shape (N,)
The Voigt function.
Notes
-----
The Voigt function is useful for calculating the optical depth as
function of frequency for an absorption transition associated with
an atom or ion.
The Voigt function H(a, u) is related to the Voigt profile
V(x, sigma, gamma)::
V(x, sigma, gamma) = H(a, u) / (sqrt(2*pi) * sigma)
where::
a = gamma / (sqrt(2) * sigma)
u = x / (sqrt(2) * sigma)
The Voigt profile is convolution of a Gaussian profile::
G(x, sigma) = exp(-0.5 * (x / sigma)^2) / (sigma * sqrt(2*pi))
and a Lorentzian profile::
L(x, gamma) = gamma / ((x^2 + gamma^2) * pi)
It is normalised; the integral of V over all x is 1.
This function uses a Taylor approximation to the Voigt function
for 0 < a < 0.1. (Harris 1948, ApJ, 108, 112). Relative error
with respect to `voigt_wofz` is < 10^-4.9 for a < 0.1. For larger
`a` the exact calculation is done in `voigt_wofz`.
"""
a = float(a)
if a > 0.1:
return voigt_wofz(a, u)
elif a < 0:
raise ValueError('a must be > 0 (%f)' % a)
u = np.abs(u)
out = np.empty_like(u)
u2 = u*u
cond = u > 19.99
if cond.any():
# Use asymptotic approximation.
iu2c = 1. / u2[cond]
iu2c2 = iu2c * iu2c
iu2c3 = iu2c2 * iu2c
iu2c4 = iu2c3 * iu2c
a2 = a**2
k2 = 1.5 + a2
k3 = 3.75 + 5 * a2
k4 = 26.25 * a2
out[cond] = a / sqrtpi * (iu2c + k2 * iu2c2 + k3 * iu2c3 + k4 * iu2c4)
# for u values with abs(u) <= 19.99 use lookup tables
notcond = ~cond
u = u[notcond]
u2 = u2[notcond]
expmu2 = np.exp(-u2)
out[notcond] = expmu2 + a*(np.interp(u, U, H1) + a*(
(1. - 2.*u2)*expmu2 + a*(np.interp(u, U, H3) + a*(
0.5 - 2.*u2 + 2./3.*u2*u2)*expmu2)))
return out
| {
"repo_name": "nhmc/LAE",
"path": "python_modules/barak/voigt.py",
"copies": "1",
"size": "73286",
"license": "mit",
"hash": 6486640700283049000,
"line_mean": 63.1172353456,
"line_max": 78,
"alpha_frac": 0.7392953634,
"autogenerated": false,
"ratio": 2.181585449349567,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.3420880812749567,
"avg_score": null,
"num_lines": null
} |
"""A fast, lightweight, and secure session WSGI middleware for use with GAE."""
from Cookie import CookieError, SimpleCookie
from base64 import b64decode, b64encode
import datetime
import hashlib
import hmac
import logging
import pickle
import os
import threading
import time
from google.appengine.api import memcache
from google.appengine.ext import db
# Configurable cookie options
COOKIE_NAME_PREFIX = "DgU" # identifies a cookie as being one used by gae-sessions (so you can set cookies too)
COOKIE_PATH = "/"
DEFAULT_COOKIE_ONLY_THRESH = 10240 # 10KB: GAE only allows ~16000B in HTTP header - leave ~6KB for other info
DEFAULT_LIFETIME = datetime.timedelta(days=7)
# constants
SID_LEN = 43 # timestamp (10 chars) + underscore + md5 (32 hex chars)
SIG_LEN = 44 # base 64 encoded HMAC-SHA256
MAX_COOKIE_LEN = 4096
EXPIRE_COOKIE_FMT = ' %s=; expires=Wed, 01-Jan-1970 00:00:00 GMT; Path=' + COOKIE_PATH
COOKIE_FMT = ' ' + COOKIE_NAME_PREFIX + '%02d="%s"; %sPath=' + COOKIE_PATH + '; HttpOnly'
COOKIE_FMT_SECURE = COOKIE_FMT + '; Secure'
COOKIE_DATE_FMT = '%a, %d-%b-%Y %H:%M:%S GMT'
COOKIE_OVERHEAD = len(COOKIE_FMT % (0, '', '')) + len('expires=Xxx, xx XXX XXXX XX:XX:XX GMT; ') + 150 # 150=safety margin (e.g., in case browser uses 4000 instead of 4096)
MAX_DATA_PER_COOKIE = MAX_COOKIE_LEN - COOKIE_OVERHEAD
_tls = threading.local()
def get_current_session():
"""Returns the session associated with the current request."""
return _tls.current_session
def set_current_session(session):
"""Sets the session associated with the current request."""
_tls.current_session = session
def is_gaesessions_key(k):
return k.startswith(COOKIE_NAME_PREFIX)
class SessionModel(db.Model):
"""Contains session data. key_name is the session ID and pdump contains a
pickled dictionary which maps session variables to their values."""
pdump = db.BlobProperty()
class Session(object):
"""Manages loading, reading/writing key-value pairs, and saving of a session.
``sid`` - if set, then the session for that sid (if any) is loaded. Otherwise,
sid will be loaded from the HTTP_COOKIE (if any).
"""
DIRTY_BUT_DONT_PERSIST_TO_DB = 1
def __init__(self, sid=None, lifetime=DEFAULT_LIFETIME, no_datastore=False,
cookie_only_threshold=DEFAULT_COOKIE_ONLY_THRESH, cookie_key=None):
self._accessed = False
self.sid = None
self.cookie_keys = []
self.cookie_data = None
self.data = {}
self.dirty = False # has the session been changed?
self.lifetime = lifetime
self.no_datastore = no_datastore
self.cookie_only_thresh = cookie_only_threshold
self.base_key = cookie_key
if sid:
self.__set_sid(sid, False)
self.data = None
else:
self.__read_cookie()
@staticmethod
def __compute_hmac(base_key, sid, text):
"""Computes the signature for text given base_key and sid."""
key = base_key + sid
return b64encode(hmac.new(key, text, hashlib.sha256).digest())
def __read_cookie(self):
"""Reads the HTTP Cookie and loads the sid and data from it (if any)."""
try:
# check the cookie to see if a session has been started
cookie = SimpleCookie(os.environ['HTTP_COOKIE'])
self.cookie_keys = filter(is_gaesessions_key, cookie.keys())
if not self.cookie_keys:
return # no session yet
self.cookie_keys.sort()
data = ''.join(cookie[k].value for k in self.cookie_keys)
i = SIG_LEN + SID_LEN
sig, sid, b64pdump = data[:SIG_LEN], data[SIG_LEN:i], data[i:]
pdump = b64decode(b64pdump)
actual_sig = Session.__compute_hmac(self.base_key, sid, pdump)
if sig == actual_sig:
self.__set_sid(sid, False)
# check for expiration and terminate the session if it has expired
if self.get_expiration() != 0 and time.time() > self.get_expiration():
return self.terminate()
if pdump:
self.data = self.__decode_data(pdump)
else:
self.data = None # data is in memcache/db: load it on-demand
else:
logging.warn('cookie with invalid sig received from %s: %s' % (os.environ.get('REMOTE_ADDR'), b64pdump))
except (CookieError, KeyError, IndexError, TypeError):
# there is no cookie (i.e., no session) or the cookie is invalid
self.terminate(False)
def make_cookie_headers(self):
"""Returns a list of cookie headers to send (if any)."""
# expire all cookies if the session has ended
if not self.sid:
return [EXPIRE_COOKIE_FMT % k for k in self.cookie_keys]
if self.cookie_data is None:
return [] # no cookie headers need to be sent
# build the cookie header(s): includes sig, sid, and cookie_data
if self.is_ssl_only():
m = MAX_DATA_PER_COOKIE - 8
fmt = COOKIE_FMT_SECURE
else:
m = MAX_DATA_PER_COOKIE
fmt = COOKIE_FMT
sig = Session.__compute_hmac(self.base_key, self.sid, self.cookie_data)
cv = sig + self.sid + b64encode(self.cookie_data)
num_cookies = 1 + (len(cv) - 1) / m
if self.get_expiration() > 0:
ed = "expires=%s; " % datetime.datetime.fromtimestamp(self.get_expiration()).strftime(COOKIE_DATE_FMT)
else:
ed = ''
cookies = [fmt % (i, cv[i * m:i * m + m], ed) for i in xrange(num_cookies)]
# expire old cookies which aren't needed anymore
old_cookies = xrange(num_cookies, len(self.cookie_keys))
key = COOKIE_NAME_PREFIX + '%02d'
cookies_to_ax = [EXPIRE_COOKIE_FMT % (key % i) for i in old_cookies]
return cookies + cookies_to_ax
def is_active(self):
"""Returns True if this session is active (i.e., it has been assigned a
session ID and will be or has been persisted)."""
return self.sid is not None
def is_ssl_only(self):
"""Returns True if cookies set by this session will include the "Secure"
attribute so that the client will only send them over a secure channel
like SSL)."""
return self.sid is not None and self.sid[-33] == 'S'
def is_accessed(self):
"""Returns True if any value of this session has been accessed."""
return self._accessed
def ensure_data_loaded(self):
"""Fetch the session data if it hasn't been retrieved it yet."""
self._accessed = True
if self.data is None and self.sid:
self.__retrieve_data()
def get_expiration(self):
"""Returns the timestamp at which this session will expire."""
try:
return int(self.sid[:-33])
except:
return 0
def __make_sid(self, expire_ts=None, ssl_only=False):
"""Returns a new session ID."""
# make a random ID (random.randrange() is 10x faster but less secure?)
if expire_ts is None:
expire_dt = datetime.datetime.now() + self.lifetime
expire_ts = int(time.mktime((expire_dt).timetuple()))
else:
expire_ts = int(expire_ts)
if ssl_only:
sep = 'S'
else:
sep = '_'
return ('%010d' % expire_ts) + sep + hashlib.md5(os.urandom(16)).hexdigest()
@staticmethod
def __encode_data(d):
"""Returns a "pickled+" encoding of d. d values of type db.Model are
protobuf encoded before pickling to minimize CPU usage & data size."""
# separate protobufs so we'll know how to decode (they are just strings)
eP = {} # for models encoded as protobufs
eO = {} # for everything else
for k, v in d.iteritems():
if isinstance(v, db.Model):
eP[k] = db.model_to_protobuf(v)
else:
eO[k] = v
return pickle.dumps((eP, eO), 2)
@staticmethod
def __decode_data(pdump):
"""Returns a data dictionary after decoding it from "pickled+" form."""
try:
eP, eO = pickle.loads(pdump)
for k, v in eP.iteritems():
eO[k] = db.model_from_protobuf(v)
except Exception, e:
logging.warn("failed to decode session data: %s" % e)
eO = {}
return eO
def regenerate_id(self, expiration_ts=None):
"""Assigns the session a new session ID (data carries over). This
should be called whenever a user authenticates to prevent session
fixation attacks.
``expiration_ts`` - The UNIX timestamp the session will expire at. If
omitted, the session expiration time will not be changed.
"""
if self.sid or expiration_ts is not None:
self.ensure_data_loaded() # ensure we have the data before we delete it
if expiration_ts is None:
expiration_ts = self.get_expiration()
self.__set_sid(self.__make_sid(expiration_ts, self.is_ssl_only()))
self.dirty = True # ensure the data is written to the new session
def start(self, expiration_ts=None, ssl_only=False):
"""Starts a new session. expiration specifies when it will expire. If
expiration is not specified, then self.lifetime will used to
determine the expiration date.
Normally this method does not need to be called directly - a session is
automatically started when the first value is added to the session.
``expiration_ts`` - The UNIX timestamp the session will expire at. If
omitted, the session will expire after the default ``lifetime`` has past
(as specified in ``SessionMiddleware``).
``ssl_only`` - Whether to specify the "Secure" attribute on the cookie
so that the client will ONLY transfer the cookie over a secure channel.
"""
self.dirty = True
self.data = {}
self.__set_sid(self.__make_sid(expiration_ts, ssl_only), True)
def terminate(self, clear_data=True):
"""Deletes the session and its data, and expires the user's cookie."""
if clear_data:
self.__clear_data()
self.sid = None
self.data = {}
self.dirty = False
if self.cookie_keys:
self.cookie_data = '' # trigger the cookies to expire
else:
self.cookie_data = None
def __set_sid(self, sid, make_cookie=True):
"""Sets the session ID, deleting the old session if one existed. The
session's data will remain intact (only the session ID changes)."""
if self.sid:
self.__clear_data()
self.sid = sid
self.db_key = db.Key.from_path(SessionModel.kind(), sid, namespace='')
# set the cookie if requested
if make_cookie:
self.cookie_data = '' # trigger the cookie to be sent
def __clear_data(self):
"""Deletes this session from memcache and the datastore."""
if self.sid:
memcache.delete(self.sid, namespace='') # not really needed; it'll go away on its own
try:
db.delete(self.db_key)
except:
pass # either it wasn't in the db (maybe cookie/memcache-only) or db is down => cron will expire it
def __retrieve_data(self):
"""Sets the data associated with this session after retrieving it from
memcache or the datastore. Assumes self.sid is set. Checks for session
expiration after getting the data."""
pdump = memcache.get(self.sid, namespace='')
if pdump is None:
# memcache lost it, go to the datastore
if self.no_datastore:
logging.info("can't find session data in memcache for sid=%s (using memcache only sessions)" % self.sid)
self.terminate(False) # we lost it; just kill the session
return
session_model_instance = db.get(self.db_key)
if session_model_instance:
pdump = session_model_instance.pdump
else:
logging.error("can't find session data in the datastore for sid=%s" % self.sid)
self.terminate(False) # we lost it; just kill the session
return
self.data = self.__decode_data(pdump)
def save(self, persist_even_if_using_cookie=False):
"""Saves the data associated with this session IF any changes have been
made (specifically, if any mutator methods like __setitem__ or the like
is called).
If the data is small enough it will be sent back to the user in a cookie
instead of using memcache and the datastore. If `persist_even_if_using_cookie`
evaluates to True, memcache and the datastore will also be used. If the
no_datastore option is set, then the datastore will never be used.
Normally this method does not need to be called directly - a session is
automatically saved at the end of the request if any changes were made.
"""
if not self.sid:
return # no session is active
if not self.dirty:
return # nothing has changed
dirty = self.dirty
self.dirty = False # saving, so it won't be dirty anymore
# do the pickling ourselves b/c we need it for the datastore anyway
pdump = self.__encode_data(self.data)
# persist via cookies if it is reasonably small
if len(pdump) * 4 / 3 <= self.cookie_only_thresh: # 4/3 b/c base64 is ~33% bigger
self.cookie_data = pdump
if not persist_even_if_using_cookie:
return
elif self.cookie_keys:
# latest data will only be in the backend, so expire data cookies we set
self.cookie_data = ''
memcache.set(self.sid, pdump, namespace='', time=self.get_expiration()) # may fail if memcache is down
# persist the session to the datastore
if dirty is Session.DIRTY_BUT_DONT_PERSIST_TO_DB or self.no_datastore:
return
try:
SessionModel(key_name=self.sid, pdump=pdump).put()
except Exception, e:
logging.warning("unable to persist session to datastore for sid=%s (%s)" % (self.sid, e))
# Users may interact with the session through a dictionary-like interface.
def clear(self):
"""Removes all data from the session (but does not terminate it)."""
if self.sid:
self.data = {}
self.dirty = True
def get(self, key, default=None):
"""Retrieves a value from the session."""
self.ensure_data_loaded()
return self.data.get(key, default)
def has_key(self, key):
"""Returns True if key is set."""
self.ensure_data_loaded()
return key in self.data
def pop(self, key, default=None):
"""Removes key and returns its value, or default if key is not present."""
self.ensure_data_loaded()
self.dirty = True
return self.data.pop(key, default)
def pop_quick(self, key, default=None):
"""Removes key and returns its value, or default if key is not present.
The change will only be persisted to memcache until another change
necessitates a write to the datastore."""
self.ensure_data_loaded()
if self.dirty is False:
self.dirty = Session.DIRTY_BUT_DONT_PERSIST_TO_DB
return self.data.pop(key, default)
def set_quick(self, key, value):
"""Set a value named key on this session. The change will only be
persisted to memcache until another change necessitates a write to the
datastore. This will start a session if one is not already active."""
dirty = self.dirty
self[key] = value
if dirty is False or dirty is Session.DIRTY_BUT_DONT_PERSIST_TO_DB:
self.dirty = Session.DIRTY_BUT_DONT_PERSIST_TO_DB
def __getitem__(self, key):
"""Returns the value associated with key on this session."""
self.ensure_data_loaded()
return self.data.__getitem__(key)
def __setitem__(self, key, value):
"""Set a value named key on this session. This will start a session if
one is not already active."""
self.ensure_data_loaded()
if not self.sid:
self.start()
self.data.__setitem__(key, value)
self.dirty = True
def __delitem__(self, key):
"""Deletes the value associated with key on this session."""
self.ensure_data_loaded()
self.data.__delitem__(key)
self.dirty = True
def __iter__(self):
"""Returns an iterator over the keys (names) of the stored values."""
self.ensure_data_loaded()
return self.data.iterkeys()
def __contains__(self, key):
"""Returns True if key is present on this session."""
self.ensure_data_loaded()
return self.data.__contains__(key)
def __str__(self):
"""Returns a string representation of the session."""
if self.sid:
self.ensure_data_loaded()
return "SID=%s %s" % (self.sid, self.data)
else:
return "uninitialized session"
class SessionMiddleware(object):
"""WSGI middleware that adds session support.
``cookie_key`` - A key used to secure cookies so users cannot modify their
content. Keys should be at least 32 bytes (RFC2104). Tip: generate your
key using ``os.urandom(64)`` but do this OFFLINE and copy/paste the output
into a string which you pass in as ``cookie_key``. If you use ``os.urandom()``
to dynamically generate your key at runtime then any existing sessions will
become junk every time your app starts up!
``lifetime`` - ``datetime.timedelta`` that specifies how long a session may last. Defaults to 7 days.
``no_datastore`` - By default all writes also go to the datastore in case
memcache is lost. Set to True to never use the datastore. This improves
write performance but sessions may be occassionally lost.
``cookie_only_threshold`` - A size in bytes. If session data is less than this
threshold, then session data is kept only in a secure cookie. This avoids
memcache/datastore latency which is critical for small sessions. Larger
sessions are kept in memcache+datastore instead. Defaults to 10KB.
"""
def __init__(self, app, cookie_key, lifetime=DEFAULT_LIFETIME, no_datastore=False, cookie_only_threshold=DEFAULT_COOKIE_ONLY_THRESH):
self.app = app
self.lifetime = lifetime
self.no_datastore = no_datastore
self.cookie_only_thresh = cookie_only_threshold
self.cookie_key = cookie_key
if not self.cookie_key:
raise ValueError("cookie_key MUST be specified")
if len(self.cookie_key) < 32:
raise ValueError("RFC2104 recommends you use at least a 32 character key. Try os.urandom(64) to make a key.")
def __call__(self, environ, start_response):
# initialize a session for the current user
_tls.current_session = Session(lifetime=self.lifetime, no_datastore=self.no_datastore, cookie_only_threshold=self.cookie_only_thresh, cookie_key=self.cookie_key)
# create a hook for us to insert a cookie into the response headers
def my_start_response(status, headers, exc_info=None):
_tls.current_session.save() # store the session if it was changed
for ch in _tls.current_session.make_cookie_headers():
headers.append(('Set-Cookie', ch))
return start_response(status, headers, exc_info)
# let the app do its thing
return self.app(environ, my_start_response)
class DjangoSessionMiddleware(object):
"""Django middleware that adds session support. You must specify the
session configuration parameters by modifying the call to ``SessionMiddleware``
in ``DjangoSessionMiddleware.__init__()`` since Django cannot call an
initialization method with parameters.
"""
def __init__(self):
fake_app = lambda environ, start_response: start_response
self.wrapped_wsgi_middleware = SessionMiddleware(fake_app, cookie_key='you MUST change this')
self.response_handler = None
def process_request(self, request):
self.response_handler = self.wrapped_wsgi_middleware(None, lambda status, headers, exc_info: headers)
request.session = get_current_session() # for convenience
def process_response(self, request, response):
if self.response_handler:
session_headers = self.response_handler(None, [], None)
for k, v in session_headers:
response[k] = v
self.response_handler = None
if hasattr(request, 'session') and request.session.is_accessed():
from django.utils.cache import patch_vary_headers
logging.info("Varying")
patch_vary_headers(response, ('Cookie',))
return response
def delete_expired_sessions():
"""Deletes expired sessions from the datastore.
If there are more than 500 expired sessions, only 500 will be removed.
Returns True if all expired sessions have been removed.
"""
now_str = unicode(int(time.time()))
q = db.Query(SessionModel, keys_only=True, namespace='')
key = db.Key.from_path('SessionModel', now_str + u'\ufffd', namespace='')
q.filter('__key__ < ', key)
results = q.fetch(500)
db.delete(results)
logging.info('gae-sessions: deleted %d expired sessions from the datastore' % len(results))
return len(results) < 500
| {
"repo_name": "seanpont/widowmaker-invitational",
"path": "lib/gaesessions/__init__.py",
"copies": "14",
"size": "21839",
"license": "mit",
"hash": 3650760255328601000,
"line_mean": 41.4058252427,
"line_max": 173,
"alpha_frac": 0.6205870232,
"autogenerated": false,
"ratio": 4.051762523191095,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.001130640344968598,
"num_lines": 515
} |
"""A fast, lightweight, and secure session WSGI middleware for use with GAE."""
import datetime
import hashlib
import hmac
import logging
import os
import pickle
import threading
import time
from Cookie import CookieError, SimpleCookie
from base64 import b64decode, b64encode
from google.appengine.api import memcache
from google.appengine.ext import db
# Configurable cookie options
COOKIE_NAME_PREFIX = "DgU" # identifies a cookie as being one used by gae-sessions (so you can set cookies too)
COOKIE_PATH = "/"
DEFAULT_COOKIE_ONLY_THRESH = 10240 # 10KB: GAE only allows ~16000B in HTTP header - leave ~6KB for other info
DEFAULT_LIFETIME = datetime.timedelta(days=7)
# constants
SID_LEN = 43 # timestamp (10 chars) + underscore + md5 (32 hex chars)
SIG_LEN = 44 # base 64 encoded HMAC-SHA256
MAX_COOKIE_LEN = 4096
EXPIRE_COOKIE_FMT = ' %s=; expires=Wed, 01-Jan-1970 00:00:00 GMT; Path=' + COOKIE_PATH
COOKIE_FMT = ' ' + COOKIE_NAME_PREFIX + '%02d="%s"; %sPath=' + COOKIE_PATH + '; HttpOnly'
COOKIE_FMT_SECURE = COOKIE_FMT + '; Secure'
COOKIE_DATE_FMT = '%a, %d-%b-%Y %H:%M:%S GMT'
COOKIE_OVERHEAD = len(COOKIE_FMT % (0, '', '')) + len(
'expires=Xxx, xx XXX XXXX XX:XX:XX GMT; ') + 150 # 150=safety margin (e.g., in case browser uses 4000 instead of 4096)
MAX_DATA_PER_COOKIE = MAX_COOKIE_LEN - COOKIE_OVERHEAD
_tls = threading.local()
def get_current_session():
"""Returns the session associated with the current request."""
return _tls.current_session
def set_current_session(session):
"""Sets the session associated with the current request."""
_tls.current_session = session
def is_gaesessions_key(k):
return k.startswith(COOKIE_NAME_PREFIX)
class SessionModel(db.Model):
"""Contains session data. key_name is the session ID and pdump contains a
pickled dictionary which maps session variables to their values."""
pdump = db.BlobProperty()
class Session(object):
"""Manages loading, reading/writing key-value pairs, and saving of a session.
``sid`` - if set, then the session for that sid (if any) is loaded. Otherwise,
sid will be loaded from the HTTP_COOKIE (if any).
"""
DIRTY_BUT_DONT_PERSIST_TO_DB = 1
def __init__(self, sid=None, lifetime=DEFAULT_LIFETIME, no_datastore=False,
cookie_only_threshold=DEFAULT_COOKIE_ONLY_THRESH, cookie_key=None):
self._accessed = False
self.sid = None
self.cookie_keys = []
self.cookie_data = None
self.data = {}
self.dirty = False # has the session been changed?
self.lifetime = lifetime
self.no_datastore = no_datastore
self.cookie_only_thresh = cookie_only_threshold
self.base_key = cookie_key
if sid:
self.__set_sid(sid, False)
self.data = None
else:
self.__read_cookie()
@staticmethod
def __compute_hmac(base_key, sid, text):
"""Computes the signature for text given base_key and sid."""
key = base_key + sid
return b64encode(hmac.new(key, text, hashlib.sha256).digest())
def __read_cookie(self):
"""Reads the HTTP Cookie and loads the sid and data from it (if any)."""
try:
# check the cookie to see if a session has been started
cookie = SimpleCookie(os.environ['HTTP_COOKIE'])
self.cookie_keys = filter(is_gaesessions_key, cookie.keys())
if not self.cookie_keys:
return # no session yet
self.cookie_keys.sort()
data = ''.join(cookie[k].value for k in self.cookie_keys)
i = SIG_LEN + SID_LEN
sig, sid, b64pdump = data[:SIG_LEN], data[SIG_LEN:i], data[i:]
pdump = b64decode(b64pdump)
actual_sig = Session.__compute_hmac(self.base_key, sid, pdump)
if sig == actual_sig:
self.__set_sid(sid, False)
# check for expiration and terminate the session if it has expired
if self.get_expiration() != 0 and time.time() > self.get_expiration():
return self.terminate()
if pdump:
self.data = self.__decode_data(pdump)
else:
self.data = None # data is in memcache/db: load it on-demand
else:
logging.warn('cookie with invalid sig received from %s: %s' % (os.environ.get('REMOTE_ADDR'), b64pdump))
except (CookieError, KeyError, IndexError, TypeError):
# there is no cookie (i.e., no session) or the cookie is invalid
self.terminate(False)
def make_cookie_headers(self):
"""Returns a list of cookie headers to send (if any)."""
# expire all cookies if the session has ended
if not self.sid:
return [EXPIRE_COOKIE_FMT % k for k in self.cookie_keys]
if self.cookie_data is None:
return [] # no cookie headers need to be sent
# build the cookie header(s): includes sig, sid, and cookie_data
if self.is_ssl_only():
m = MAX_DATA_PER_COOKIE - 8
fmt = COOKIE_FMT_SECURE
else:
m = MAX_DATA_PER_COOKIE
fmt = COOKIE_FMT
sig = Session.__compute_hmac(self.base_key, self.sid, self.cookie_data)
cv = sig + self.sid + b64encode(self.cookie_data)
num_cookies = 1 + (len(cv) - 1) / m
if self.get_expiration() > 0:
ed = "expires=%s; " % datetime.datetime.fromtimestamp(self.get_expiration()).strftime(COOKIE_DATE_FMT)
else:
ed = ''
cookies = [fmt % (i, cv[i * m:i * m + m], ed) for i in xrange(num_cookies)]
# expire old cookies which aren't needed anymore
old_cookies = xrange(num_cookies, len(self.cookie_keys))
key = COOKIE_NAME_PREFIX + '%02d'
cookies_to_ax = [EXPIRE_COOKIE_FMT % (key % i) for i in old_cookies]
return cookies + cookies_to_ax
def is_active(self):
"""Returns True if this session is active (i.e., it has been assigned a
session ID and will be or has been persisted)."""
return self.sid is not None
def is_ssl_only(self):
"""Returns True if cookies set by this session will include the "Secure"
attribute so that the client will only send them over a secure channel
like SSL)."""
return self.sid is not None and self.sid[-33] == 'S'
def is_accessed(self):
"""Returns True if any value of this session has been accessed."""
return self._accessed
def ensure_data_loaded(self):
"""Fetch the session data if it hasn't been retrieved it yet."""
self._accessed = True
if self.data is None and self.sid:
self.__retrieve_data()
def get_expiration(self):
"""Returns the timestamp at which this session will expire."""
try:
return int(self.sid[:-33])
except:
return 0
def __make_sid(self, expire_ts=None, ssl_only=False):
"""Returns a new session ID."""
# make a random ID (random.randrange() is 10x faster but less secure?)
if expire_ts is None:
expire_dt = datetime.datetime.now() + self.lifetime
expire_ts = int(time.mktime((expire_dt).timetuple()))
else:
expire_ts = int(expire_ts)
if ssl_only:
sep = 'S'
else:
sep = '_'
return ('%010d' % expire_ts) + sep + hashlib.md5(os.urandom(16)).hexdigest()
@staticmethod
def __encode_data(d):
"""Returns a "pickled+" encoding of d. d values of type db.Model are
protobuf encoded before pickling to minimize CPU usage & data size."""
# separate protobufs so we'll know how to decode (they are just strings)
eP = {} # for models encoded as protobufs
eO = {} # for everything else
for k, v in d.iteritems():
if isinstance(v, db.Model):
eP[k] = db.model_to_protobuf(v)
else:
eO[k] = v
return pickle.dumps((eP, eO), 2)
@staticmethod
def __decode_data(pdump):
"""Returns a data dictionary after decoding it from "pickled+" form."""
try:
eP, eO = pickle.loads(pdump)
for k, v in eP.iteritems():
eO[k] = db.model_from_protobuf(v)
except Exception, e:
logging.warn("failed to decode session data: %s" % e)
eO = {}
return eO
def regenerate_id(self, expiration_ts=None):
"""Assigns the session a new session ID (data carries over). This
should be called whenever a user authenticates to prevent session
fixation attacks.
``expiration_ts`` - The UNIX timestamp the session will expire at. If
omitted, the session expiration time will not be changed.
"""
if self.sid or expiration_ts is not None:
self.ensure_data_loaded() # ensure we have the data before we delete it
if expiration_ts is None:
expiration_ts = self.get_expiration()
self.__set_sid(self.__make_sid(expiration_ts, self.is_ssl_only()))
self.dirty = True # ensure the data is written to the new session
def start(self, expiration_ts=None, ssl_only=False):
"""Starts a new session. expiration specifies when it will expire. If
expiration is not specified, then self.lifetime will used to
determine the expiration date.
Normally this method does not need to be called directly - a session is
automatically started when the first value is added to the session.
``expiration_ts`` - The UNIX timestamp the session will expire at. If
omitted, the session will expire after the default ``lifetime`` has past
(as specified in ``SessionMiddleware``).
``ssl_only`` - Whether to specify the "Secure" attribute on the cookie
so that the client will ONLY transfer the cookie over a secure channel.
"""
self.dirty = True
self.data = {}
self.__set_sid(self.__make_sid(expiration_ts, ssl_only), True)
def terminate(self, clear_data=True):
"""Deletes the session and its data, and expires the user's cookie."""
if clear_data:
self.__clear_data()
self.sid = None
self.data = {}
self.dirty = False
if self.cookie_keys:
self.cookie_data = '' # trigger the cookies to expire
else:
self.cookie_data = None
def __set_sid(self, sid, make_cookie=True):
"""Sets the session ID, deleting the old session if one existed. The
session's data will remain intact (only the session ID changes)."""
if self.sid:
self.__clear_data()
self.sid = sid
self.db_key = db.Key.from_path(SessionModel.kind(), sid, namespace='')
# set the cookie if requested
if make_cookie:
self.cookie_data = '' # trigger the cookie to be sent
def __clear_data(self):
"""Deletes this session from memcache and the datastore."""
if self.sid:
memcache.delete(self.sid, namespace='') # not really needed; it'll go away on its own
try:
db.delete(self.db_key)
except:
pass # either it wasn't in the db (maybe cookie/memcache-only) or db is down => cron will expire it
def __retrieve_data(self):
"""Sets the data associated with this session after retrieving it from
memcache or the datastore. Assumes self.sid is set. Checks for session
expiration after getting the data."""
pdump = memcache.get(self.sid, namespace='')
if pdump is None:
# memcache lost it, go to the datastore
if self.no_datastore:
logging.info("can't find session data in memcache for sid=%s (using memcache only sessions)" % self.sid)
self.terminate(False) # we lost it; just kill the session
return
session_model_instance = db.get(self.db_key)
if session_model_instance:
pdump = session_model_instance.pdump
else:
logging.error("can't find session data in the datastore for sid=%s" % self.sid)
self.terminate(False) # we lost it; just kill the session
return
self.data = self.__decode_data(pdump)
def save(self, persist_even_if_using_cookie=False):
"""Saves the data associated with this session IF any changes have been
made (specifically, if any mutator methods like __setitem__ or the like
is called).
If the data is small enough it will be sent back to the user in a cookie
instead of using memcache and the datastore. If `persist_even_if_using_cookie`
evaluates to True, memcache and the datastore will also be used. If the
no_datastore option is set, then the datastore will never be used.
Normally this method does not need to be called directly - a session is
automatically saved at the end of the request if any changes were made.
"""
if not self.sid:
return # no session is active
if not self.dirty:
return # nothing has changed
dirty = self.dirty
self.dirty = False # saving, so it won't be dirty anymore
# do the pickling ourselves b/c we need it for the datastore anyway
pdump = self.__encode_data(self.data)
# persist via cookies if it is reasonably small
if len(pdump) * 4 / 3 <= self.cookie_only_thresh: # 4/3 b/c base64 is ~33% bigger
self.cookie_data = pdump
if not persist_even_if_using_cookie:
return
elif self.cookie_keys:
# latest data will only be in the backend, so expire data cookies we set
self.cookie_data = ''
memcache.set(self.sid, pdump, namespace='', time=self.get_expiration()) # may fail if memcache is down
# persist the session to the datastore
if dirty is Session.DIRTY_BUT_DONT_PERSIST_TO_DB or self.no_datastore:
return
try:
SessionModel(key_name=self.sid, pdump=pdump).put()
except Exception, e:
logging.warning("unable to persist session to datastore for sid=%s (%s)" % (self.sid, e))
# Users may interact with the session through a dictionary-like interface.
def clear(self):
"""Removes all data from the session (but does not terminate it)."""
if self.sid:
self.data = {}
self.dirty = True
def get(self, key, default=None):
"""Retrieves a value from the session."""
self.ensure_data_loaded()
return self.data.get(key, default)
def has_key(self, key):
"""Returns True if key is set."""
self.ensure_data_loaded()
return key in self.data
def pop(self, key, default=None):
"""Removes key and returns its value, or default if key is not present."""
self.ensure_data_loaded()
self.dirty = True
return self.data.pop(key, default)
def pop_quick(self, key, default=None):
"""Removes key and returns its value, or default if key is not present.
The change will only be persisted to memcache until another change
necessitates a write to the datastore."""
self.ensure_data_loaded()
if self.dirty is False:
self.dirty = Session.DIRTY_BUT_DONT_PERSIST_TO_DB
return self.data.pop(key, default)
def set_quick(self, key, value):
"""Set a value named key on this session. The change will only be
persisted to memcache until another change necessitates a write to the
datastore. This will start a session if one is not already active."""
dirty = self.dirty
self[key] = value
if dirty is False or dirty is Session.DIRTY_BUT_DONT_PERSIST_TO_DB:
self.dirty = Session.DIRTY_BUT_DONT_PERSIST_TO_DB
def __getitem__(self, key):
"""Returns the value associated with key on this session."""
self.ensure_data_loaded()
return self.data.__getitem__(key)
def __setitem__(self, key, value):
"""Set a value named key on this session. This will start a session if
one is not already active."""
self.ensure_data_loaded()
if not self.sid:
self.start()
self.data.__setitem__(key, value)
self.dirty = True
def __delitem__(self, key):
"""Deletes the value associated with key on this session."""
self.ensure_data_loaded()
self.data.__delitem__(key)
self.dirty = True
def __iter__(self):
"""Returns an iterator over the keys (names) of the stored values."""
self.ensure_data_loaded()
return self.data.iterkeys()
def __contains__(self, key):
"""Returns True if key is present on this session."""
self.ensure_data_loaded()
return self.data.__contains__(key)
def __str__(self):
"""Returns a string representation of the session."""
if self.sid:
self.ensure_data_loaded()
return "SID=%s %s" % (self.sid, self.data)
else:
return "uninitialized session"
class SessionMiddleware(object):
"""WSGI middleware that adds session support.
``cookie_key`` - A key used to secure cookies so users cannot modify their
content. Keys should be at least 32 bytes (RFC2104). Tip: generate your
key using ``os.urandom(64)`` but do this OFFLINE and copy/paste the output
into a string which you pass in as ``cookie_key``. If you use ``os.urandom()``
to dynamically generate your key at runtime then any existing sessions will
become junk every time your app starts up!
``lifetime`` - ``datetime.timedelta`` that specifies how long a session may last. Defaults to 7 days.
``no_datastore`` - By default all writes also go to the datastore in case
memcache is lost. Set to True to never use the datastore. This improves
write performance but sessions may be occassionally lost.
``cookie_only_threshold`` - A size in bytes. If session data is less than this
threshold, then session data is kept only in a secure cookie. This avoids
memcache/datastore latency which is critical for small sessions. Larger
sessions are kept in memcache+datastore instead. Defaults to 10KB.
"""
def __init__(self, app, cookie_key, lifetime=DEFAULT_LIFETIME, no_datastore=False,
cookie_only_threshold=DEFAULT_COOKIE_ONLY_THRESH):
self.app = app
self.lifetime = lifetime
self.no_datastore = no_datastore
self.cookie_only_thresh = cookie_only_threshold
self.cookie_key = cookie_key
if not self.cookie_key:
raise ValueError("cookie_key MUST be specified")
if len(self.cookie_key) < 32:
raise ValueError(
"RFC2104 recommends you use at least a 32 character key. Try os.urandom(64) to make a key.")
def __call__(self, environ, start_response):
# initialize a session for the current user
_tls.current_session = Session(lifetime=self.lifetime, no_datastore=self.no_datastore,
cookie_only_threshold=self.cookie_only_thresh, cookie_key=self.cookie_key)
# create a hook for us to insert a cookie into the response headers
def my_start_response(status, headers, exc_info=None):
_tls.current_session.save() # store the session if it was changed
for ch in _tls.current_session.make_cookie_headers():
headers.append(('Set-Cookie', ch))
return start_response(status, headers, exc_info)
# let the app do its thing
return self.app(environ, my_start_response)
class DjangoSessionMiddleware(object):
"""Django middleware that adds session support. You must specify the
session configuration parameters by modifying the call to ``SessionMiddleware``
in ``DjangoSessionMiddleware.__init__()`` since Django cannot call an
initialization method with parameters.
"""
def __init__(self):
fake_app = lambda environ, start_response: start_response
self.wrapped_wsgi_middleware = SessionMiddleware(fake_app, cookie_key='you MUST change this')
self.response_handler = None
def process_request(self, request):
self.response_handler = self.wrapped_wsgi_middleware(None, lambda status, headers, exc_info: headers)
request.session = get_current_session() # for convenience
def process_response(self, request, response):
if self.response_handler:
session_headers = self.response_handler(None, [], None)
for k, v in session_headers:
response[k] = v
self.response_handler = None
if hasattr(request, 'session') and request.session.is_accessed():
from django.utils.cache import patch_vary_headers
logging.info("Varying")
patch_vary_headers(response, ('Cookie',))
return response
def delete_expired_sessions():
"""Deletes expired sessions from the datastore.
If there are more than 500 expired sessions, only 500 will be removed.
Returns True if all expired sessions have been removed.
"""
now_str = unicode(int(time.time()))
q = db.Query(SessionModel, keys_only=True, namespace='')
key = db.Key.from_path('SessionModel', now_str + u'\ufffd', namespace='')
q.filter('__key__ < ', key)
results = q.fetch(500)
db.delete(results)
logging.info('gae-sessions: deleted %d expired sessions from the datastore' % len(results))
return len(results) < 500
| {
"repo_name": "rhefner1/ghidonations",
"path": "gaesessions/__init__.py",
"copies": "1",
"size": "21922",
"license": "apache-2.0",
"hash": -3185394978976203300,
"line_mean": 40.8358778626,
"line_max": 123,
"alpha_frac": 0.6182373871,
"autogenerated": false,
"ratio": 4.061133753241942,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5179371140341942,
"avg_score": null,
"num_lines": null
} |
"""A fast, lightweight, and secure session WSGI middleware for use with GAE."""
from Cookie import CookieError, SimpleCookie
from base64 import b64decode, b64encode
import datetime
import hashlib
import hmac
import logging
import pickle
import os
import threading
import time
from google.appengine.api import memcache
from google.appengine.ext import db
# Configurable cookie options
COOKIE_NAME_PREFIX = "DgU" # identifies a cookie as being one used by gae-sessions (so you can set cookies too)
COOKIE_PATH = "/"
DEFAULT_COOKIE_ONLY_THRESH = 10240 # 10KB: GAE only allows ~16000B in HTTP header - leave ~6KB for other info
DEFAULT_LIFETIME = datetime.timedelta(days=7)
# constants
SID_LEN = 43 # timestamp (10 chars) + underscore + md5 (32 hex chars)
SIG_LEN = 44 # base 64 encoded HMAC-SHA256
MAX_COOKIE_LEN = 4096
EXPIRE_COOKIE_FMT = ' %s=; expires=Wed, 01-Jan-1970 00:00:00 GMT; Path=' + COOKIE_PATH
COOKIE_FMT = ' ' + COOKIE_NAME_PREFIX + '%02d="%s"; %sPath=' + COOKIE_PATH + '; HttpOnly'
COOKIE_FMT_SECURE = COOKIE_FMT + '; Secure'
COOKIE_DATE_FMT = '%a, %d-%b-%Y %H:%M:%S GMT'
COOKIE_OVERHEAD = len(COOKIE_FMT % (0, '', '')) + len('expires=Xxx, xx XXX XXXX XX:XX:XX GMT; ') + 150
# 150=safety margin (e.g., in case browser uses 4000 instead of 4096)
MAX_DATA_PER_COOKIE = MAX_COOKIE_LEN - COOKIE_OVERHEAD
_tls = threading.local()
def get_current_session():
"""Returns the session associated with the current request."""
return _tls.current_session
def set_current_session(session):
"""Sets the session associated with the current request."""
_tls.current_session = session
def is_gaesessions_key(k):
return k.startswith(COOKIE_NAME_PREFIX)
class SessionModel(db.Model):
"""Contains session data. key_name is the session ID and pdump contains a
pickled dictionary which maps session variables to their values."""
pdump = db.BlobProperty()
class Session(object):
"""Manages loading, reading/writing key-value pairs, and saving of a session.
``sid`` - if set, then the session for that sid (if any) is loaded. Otherwise,
sid will be loaded from the HTTP_COOKIE (if any).
"""
DIRTY_BUT_DONT_PERSIST_TO_DB = 1
def __init__(self, sid=None, lifetime=DEFAULT_LIFETIME, no_datastore=False,
cookie_only_threshold=DEFAULT_COOKIE_ONLY_THRESH, cookie_key=None):
self._accessed = False
self.sid = None
self.cookie_keys = []
self.cookie_data = None
self.data = {}
self.dirty = False # has the session been changed?
self.lifetime = lifetime
self.no_datastore = no_datastore
self.cookie_only_thresh = cookie_only_threshold
self.base_key = cookie_key
if sid:
self.__set_sid(sid, False)
self.data = None
else:
self.__read_cookie()
@staticmethod
def __compute_hmac(base_key, sid, text):
"""Computes the signature for text given base_key and sid."""
key = base_key + sid
return b64encode(hmac.new(key, text, hashlib.sha256).digest())
def __read_cookie(self):
"""Reads the HTTP Cookie and loads the sid and data from it (if any)."""
try:
# check the cookie to see if a session has been started
cookie = SimpleCookie(os.environ['HTTP_COOKIE'])
self.cookie_keys = filter(is_gaesessions_key, cookie.keys())
if not self.cookie_keys:
return # no session yet
self.cookie_keys.sort()
data = ''.join(cookie[k].value for k in self.cookie_keys)
i = SIG_LEN + SID_LEN
sig, sid, b64pdump = data[:SIG_LEN], data[SIG_LEN:i], data[i:]
pdump = b64decode(b64pdump)
actual_sig = Session.__compute_hmac(self.base_key, sid, pdump)
if sig == actual_sig:
self.__set_sid(sid, False)
# check for expiration and terminate the session if it has expired
if self.get_expiration() != 0 and time.time() > self.get_expiration():
return self.terminate()
if pdump:
self.data = self.__decode_data(pdump)
else:
self.data = None # data is in memcache/db: load it on-demand
else:
logging.warn('cookie with invalid sig received from %s: %s' % (os.environ.get('REMOTE_ADDR'), b64pdump))
except (CookieError, KeyError, IndexError, TypeError):
# there is no cookie (i.e., no session) or the cookie is invalid
self.terminate(False)
def make_cookie_headers(self):
"""Returns a list of cookie headers to send (if any)."""
# expire all cookies if the session has ended
if not self.sid:
return [EXPIRE_COOKIE_FMT % k for k in self.cookie_keys]
if self.cookie_data is None:
return [] # no cookie headers need to be sent
# build the cookie header(s): includes sig, sid, and cookie_data
if self.is_ssl_only():
m = MAX_DATA_PER_COOKIE - 8
fmt = COOKIE_FMT_SECURE
else:
m = MAX_DATA_PER_COOKIE
fmt = COOKIE_FMT
sig = Session.__compute_hmac(self.base_key, self.sid, self.cookie_data)
cv = sig + self.sid + b64encode(self.cookie_data)
num_cookies = 1 + (len(cv) - 1) / m
if self.get_expiration() > 0:
ed = "expires=%s; " % datetime.datetime.fromtimestamp(self.get_expiration()).strftime(COOKIE_DATE_FMT)
else:
ed = ''
cookies = [fmt % (i, cv[i * m:i * m + m], ed) for i in xrange(num_cookies)]
# expire old cookies which aren't needed anymore
old_cookies = xrange(num_cookies, len(self.cookie_keys))
key = COOKIE_NAME_PREFIX + '%02d'
cookies_to_ax = [EXPIRE_COOKIE_FMT % (key % i) for i in old_cookies]
return cookies + cookies_to_ax
def is_active(self):
"""Returns True if this session is active (i.e., it has been assigned a
session ID and will be or has been persisted)."""
return self.sid is not None
def is_ssl_only(self):
"""Returns True if cookies set by this session will include the "Secure"
attribute so that the client will only send them over a secure channel
like SSL)."""
return self.sid is not None and self.sid[-33] == 'S'
def is_accessed(self):
"""Returns True if any value of this session has been accessed."""
return self._accessed
def ensure_data_loaded(self):
"""Fetch the session data if it hasn't been retrieved it yet."""
self._accessed = True
if self.data is None and self.sid:
self.__retrieve_data()
def get_expiration(self):
"""Returns the timestamp at which this session will expire."""
try:
return int(self.sid[:-33])
except:
return 0
def __make_sid(self, expire_ts=None, ssl_only=False):
"""Returns a new session ID."""
# make a random ID (random.randrange() is 10x faster but less secure?)
if expire_ts is None:
expire_dt = datetime.datetime.now() + self.lifetime
expire_ts = int(time.mktime(expire_dt.timetuple()))
else:
expire_ts = int(expire_ts)
if ssl_only:
sep = 'S'
else:
sep = '_'
return ('%010d' % expire_ts) + sep + hashlib.md5(os.urandom(16)).hexdigest()
@staticmethod
def __encode_data(d):
"""Returns a "pickled+" encoding of d. d values of type db.Model are
protobuf encoded before pickling to minimize CPU usage & data size."""
# separate protobufs so we'll know how to decode (they are just strings)
eP = {} # for models encoded as protobufs
eO = {} # for everything else
for k, v in d.iteritems():
if isinstance(v, db.Model):
eP[k] = db.model_to_protobuf(v)
else:
eO[k] = v
return pickle.dumps((eP, eO), 2)
@staticmethod
def __decode_data(pdump):
"""Returns a data dictionary after decoding it from "pickled+" form."""
try:
eP, eO = pickle.loads(pdump)
for k, v in eP.iteritems():
eO[k] = db.model_from_protobuf(v)
except Exception, e:
logging.warn("failed to decode session data: %s" % e)
eO = {}
return eO
def regenerate_id(self, expiration_ts=None):
"""Assigns the session a new session ID (data carries over). This
should be called whenever a user authenticates to prevent session
fixation attacks.
``expiration_ts`` - The UNIX timestamp the session will expire at. If
omitted, the session expiration time will not be changed.
"""
if self.sid or expiration_ts is not None:
self.ensure_data_loaded() # ensure we have the data before we delete it
if expiration_ts is None:
expiration_ts = self.get_expiration()
self.__set_sid(self.__make_sid(expiration_ts, self.is_ssl_only()))
self.dirty = True # ensure the data is written to the new session
def start(self, expiration_ts=None, ssl_only=False):
"""Starts a new session. expiration specifies when it will expire. If
expiration is not specified, then self.lifetime will used to
determine the expiration date.
Normally this method does not need to be called directly - a session is
automatically started when the first value is added to the session.
``expiration_ts`` - The UNIX timestamp the session will expire at. If
omitted, the session will expire after the default ``lifetime`` has past
(as specified in ``SessionMiddleware``).
``ssl_only`` - Whether to specify the "Secure" attribute on the cookie
so that the client will ONLY transfer the cookie over a secure channel.
"""
self.dirty = True
self.data = {}
self.__set_sid(self.__make_sid(expiration_ts, ssl_only), True)
def terminate(self, clear_data=True):
"""Deletes the session and its data, and expires the user's cookie."""
if clear_data:
self.__clear_data()
self.sid = None
self.data = {}
self.dirty = False
if self.cookie_keys:
self.cookie_data = '' # trigger the cookies to expire
else:
self.cookie_data = None
def __set_sid(self, sid, make_cookie=True):
"""Sets the session ID, deleting the old session if one existed. The
session's data will remain intact (only the session ID changes)."""
if self.sid:
self.__clear_data()
self.sid = sid
self.db_key = db.Key.from_path(SessionModel.kind(), sid, namespace='')
# set the cookie if requested
if make_cookie:
self.cookie_data = '' # trigger the cookie to be sent
def __clear_data(self):
"""Deletes this session from memcache and the datastore."""
if self.sid:
memcache.delete(self.sid, namespace='') # not really needed; it'll go away on its own
try:
db.delete(self.db_key)
except:
pass # either it wasn't in the db (maybe cookie/memcache-only) or db is down => cron will expire it
def __retrieve_data(self):
"""Sets the data associated with this session after retrieving it from
memcache or the datastore. Assumes self.sid is set. Checks for session
expiration after getting the data."""
pdump = memcache.get(self.sid, namespace='')
if pdump is None:
# memcache lost it, go to the datastore
if self.no_datastore:
logging.info("can't find session data in memcache for sid=%s (using memcache only sessions)" % self.sid)
self.terminate(False) # we lost it; just kill the session
return
session_model_instance = db.get(self.db_key)
if session_model_instance:
pdump = session_model_instance.pdump
else:
logging.error("can't find session data in the datastore for sid=%s" % self.sid)
self.terminate(False) # we lost it; just kill the session
return
self.data = self.__decode_data(pdump)
def save(self, persist_even_if_using_cookie=False):
"""Saves the data associated with this session IF any changes have been
made (specifically, if any mutator methods like __setitem__ or the like
is called).
If the data is small enough it will be sent back to the user in a cookie
instead of using memcache and the datastore. If `persist_even_if_using_cookie`
evaluates to True, memcache and the datastore will also be used. If the
no_datastore option is set, then the datastore will never be used.
Normally this method does not need to be called directly - a session is
automatically saved at the end of the request if any changes were made.
"""
if not self.sid:
return # no session is active
if not self.dirty:
return # nothing has changed
dirty = self.dirty
self.dirty = False # saving, so it won't be dirty anymore
# do the pickling ourselves b/c we need it for the datastore anyway
pdump = self.__encode_data(self.data)
# persist via cookies if it is reasonably small
if len(pdump) * 4 / 3 <= self.cookie_only_thresh: # 4/3 b/c base64 is ~33% bigger
self.cookie_data = pdump
if not persist_even_if_using_cookie:
return
elif self.cookie_keys:
# latest data will only be in the backend, so expire data cookies we set
self.cookie_data = ''
memcache.set(self.sid, pdump, namespace='', time=self.get_expiration()) # may fail if memcache is down
# persist the session to the datastore
if dirty is Session.DIRTY_BUT_DONT_PERSIST_TO_DB or self.no_datastore:
return
try:
SessionModel(key_name=self.sid, pdump=pdump).put()
except Exception, e:
logging.warning("unable to persist session to datastore for sid=%s (%s)" % (self.sid, e))
# Users may interact with the session through a dictionary-like interface.
def clear(self):
"""Removes all data from the session (but does not terminate it)."""
if self.sid:
self.data = {}
self.dirty = True
def get(self, key, default=None):
"""Retrieves a value from the session."""
self.ensure_data_loaded()
return self.data.get(key, default)
def has_key(self, key):
"""Returns True if key is set."""
self.ensure_data_loaded()
return key in self.data
def pop(self, key, default=None):
"""Removes key and returns its value, or default if key is not present."""
self.ensure_data_loaded()
self.dirty = True
return self.data.pop(key, default)
def pop_quick(self, key, default=None):
"""Removes key and returns its value, or default if key is not present.
The change will only be persisted to memcache until another change
necessitates a write to the datastore."""
self.ensure_data_loaded()
if self.dirty is False:
self.dirty = Session.DIRTY_BUT_DONT_PERSIST_TO_DB
return self.data.pop(key, default)
def set_quick(self, key, value):
"""Set a value named key on this session. The change will only be
persisted to memcache until another change necessitates a write to the
datastore. This will start a session if one is not already active."""
dirty = self.dirty
self[key] = value
if dirty is False or dirty is Session.DIRTY_BUT_DONT_PERSIST_TO_DB:
self.dirty = Session.DIRTY_BUT_DONT_PERSIST_TO_DB
def __getitem__(self, key):
"""Returns the value associated with key on this session."""
self.ensure_data_loaded()
return self.data.__getitem__(key)
def __setitem__(self, key, value):
"""Set a value named key on this session. This will start a session if
one is not already active."""
self.ensure_data_loaded()
if not self.sid:
self.start()
self.data.__setitem__(key, value)
self.dirty = True
def __delitem__(self, key):
"""Deletes the value associated with key on this session."""
self.ensure_data_loaded()
self.data.__delitem__(key)
self.dirty = True
def __iter__(self):
"""Returns an iterator over the keys (names) of the stored values."""
self.ensure_data_loaded()
return self.data.iterkeys()
def __contains__(self, key):
"""Returns True if key is present on this session."""
self.ensure_data_loaded()
return self.data.__contains__(key)
def __str__(self):
"""Returns a string representation of the session."""
if self.sid:
self.ensure_data_loaded()
return "SID=%s %s" % (self.sid, self.data)
else:
return "uninitialized session"
class SessionMiddleware(object):
"""WSGI middleware that adds session support.
``cookie_key`` - A key used to secure cookies so users cannot modify their
content. Keys should be at least 32 bytes (RFC2104). Tip: generate your
key using ``os.urandom(64)`` but do this OFFLINE and copy/paste the output
into a string which you pass in as ``cookie_key``. If you use ``os.urandom()``
to dynamically generate your key at runtime then any existing sessions will
become junk every time your app starts up!
``lifetime`` - ``datetime.timedelta`` that specifies how long a session may last. Defaults to 7 days.
``no_datastore`` - By default all writes also go to the datastore in case
memcache is lost. Set to True to never use the datastore. This improves
write performance but sessions may be occassionally lost.
``cookie_only_threshold`` - A size in bytes. If session data is less than this
threshold, then session data is kept only in a secure cookie. This avoids
memcache/datastore latency which is critical for small sessions. Larger
sessions are kept in memcache+datastore instead. Defaults to 10KB.
"""
def __init__(self, app, cookie_key, lifetime=DEFAULT_LIFETIME, no_datastore=False,
cookie_only_threshold=DEFAULT_COOKIE_ONLY_THRESH):
self.app = app
self.lifetime = lifetime
self.no_datastore = no_datastore
self.cookie_only_thresh = cookie_only_threshold
self.cookie_key = cookie_key
if not self.cookie_key:
raise ValueError("cookie_key MUST be specified")
if len(self.cookie_key) < 32:
raise ValueError("RFC2104 recommends you use at least a 32 character key. "
"Try os.urandom(64) to make a key.")
def __call__(self, environ, start_response):
# initialize a session for the current user
_tls.current_session = Session(lifetime=self.lifetime, no_datastore=self.no_datastore,
cookie_only_threshold=self.cookie_only_thresh, cookie_key=self.cookie_key)
# create a hook for us to insert a cookie into the response headers
def my_start_response(status, headers, exc_info=None):
_tls.current_session.save() # store the session if it was changed
for ch in _tls.current_session.make_cookie_headers():
headers.append(('Set-Cookie', ch))
return start_response(status, headers, exc_info)
# let the app do its thing
return self.app(environ, my_start_response)
class DjangoSessionMiddleware(object):
"""Django middleware that adds session support. You must specify the
session configuration parameters by modifying the call to ``SessionMiddleware``
in ``DjangoSessionMiddleware.__init__()`` since Django cannot call an
initialization method with parameters.
"""
def __init__(self):
fake_app = lambda environ, start_response: start_response
self.wrapped_wsgi_middleware = SessionMiddleware(fake_app, cookie_key='you MUST change this')
self.response_handler = None
def process_request(self, request):
self.response_handler = self.wrapped_wsgi_middleware(None, lambda status, headers, exc_info: headers)
request.session = get_current_session() # for convenience
def process_response(self, request, response):
if self.response_handler:
session_headers = self.response_handler(None, [], None)
for k, v in session_headers:
response[k] = v
self.response_handler = None
if hasattr(request, 'session') and request.session.is_accessed():
from django.utils.cache import patch_vary_headers
logging.info("Varying")
patch_vary_headers(response, ('Cookie',))
return response
def delete_expired_sessions():
"""Deletes expired sessions from the datastore.
If there are more than 500 expired sessions, only 500 will be removed.
Returns True if all expired sessions have been removed.
"""
now_str = unicode(int(time.time()))
q = db.Query(SessionModel, keys_only=True, namespace='')
key = db.Key.from_path('SessionModel', now_str + u'\ufffd', namespace='')
q.filter('__key__ < ', key)
results = q.fetch(500)
db.delete(results)
logging.info('gae-sessions: deleted %d expired sessions from the datastore' % len(results))
return len(results) < 500
| {
"repo_name": "rice-apps/petition-app",
"path": "authentication/gaesessions.py",
"copies": "1",
"size": "21927",
"license": "mit",
"hash": 4062767842706723000,
"line_mean": 41.0057471264,
"line_max": 120,
"alpha_frac": 0.6180964108,
"autogenerated": false,
"ratio": 4.062812673707615,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5180909084507616,
"avg_score": null,
"num_lines": null
} |
""" AFDKOPython copyCFFCharstrings.py -src <file1> -dst <file2>[,file3,...filen_n]
Copies the CFF charstrings and subrs from src to dst fonts.
"""
__help__ = """AFDKOPython copyCFFCharstrings.py -src <file1> -dst <file2>
Copies the CFF charstrings and subrs from src to dst.
v1.002 Aug 27 1014
"""
import os
import sys
import re
import struct
import subprocess
from fontTools.ttLib import TTFont, getTableModule, TTLibError
import traceback
class LocalError(TypeError):
pass
def getOpts():
srcPath = None
dstPath = None
i = 0
args = sys.argv[1:]
while i < len(args):
arg = args[i]
i += 1
if arg =="-dst":
dstPath = args[i]
i += 1
elif arg == "-src":
srcPath = args[i]
i += 1
else:
print "Did not recognize argument: ", arg
print __help__
raise LocalError()
if not (srcPath and dstPath):
print "You must supply source and destination font paths."
print __help__
raise LocalError()
if not os.path.exists(srcPath):
print "Source path does not exist:", srcPath
raise LocalError()
dstPathList = dstPath.split(",")
for dstPath in dstPathList:
if not os.path.exists(dstPath):
print "Destination path does not exist:", dstPath
raise LocalError()
return srcPath, dstPathList
def makeTempOTF(srcPath):
ff = file(srcPath, "rb")
data = ff.read()
ff.close()
try:
ttFont = TTFont()
cffModule = getTableModule('CFF ')
cffTable = cffModule.table_C_F_F_('CFF ')
ttFont['CFF '] = cffTable
cffTable.decompile(data, ttFont)
except:
print "\t%s" %(traceback.format_exception_only(sys.exc_type, sys.exc_value)[-1])
print "Attempted to read font %s as CFF." % filePath
raise LocalError("Error parsing font file <%s>." % filePath)
return ttFont
def getTTFontCFF(filePath):
isOTF = True
try:
ttFont = TTFont(filePath)
except (IOError, OSError):
raise LocalError("Error opening or reading from font file <%s>." % filePath)
except TTLibError:
# Maybe it is a CFF. Make a dummy TTF font for fontTools to work with.
ttFont = makeTempOTF(filePath)
isOTF = False
try:
cffTable = ttFont['CFF ']
except KeyError:
raise LocalError("Error: font is not a CFF font <%s>." % filePath)
return ttFont, cffTable.cff, isOTF
def validatePD(srcPD, dstPD):
# raise LocalError if the hints differ.
for key in ["BlueScale", "BlueShift", "BlueFuzz", "BlueValues", "OtherBlues", "FamilyBlues", "FamilyOtherBlues", "StemSnapH", "StemSnapV", "StdHW", "StdVW", "ForceBold", "LanguageGroup"]:
err = 0
if dstPD.rawDict.has_key(key):
if not srcPD.rawDict.has_key(key):
err = 1
else:
srcVal = eval("srcPD.%s" % (key))
dstVal = eval("dstPD.%s" % (key))
if (srcVal != dstVal):
err = 1
elif srcPD.rawDict.has_key(key):
err = 1
if err:
break
if err:
print "Quitting. FDArray Private hint info does not match for FD[%s]." % (i)
raise LocalError()
return
def copyData(srcPath, dstPath):
srcTTFont, srcCFFTable, srcIsOTF = getTTFontCFF(srcPath)
srcTopDict = srcCFFTable.topDictIndex[0]
dstTTFont, dstCFFTable, dstIsOTF = getTTFontCFF(dstPath)
dstTopDict = dstCFFTable.topDictIndex[0]
# Check that ROS, charset, and hinting parameters all match.
if srcTopDict.ROS != dstTopDict.ROS:
print "Quitting. ROS does not match. src: %s dst: %s." % (srcTopDict.ROS, dstTopDict.ROS)
return
if srcTopDict.CIDCount != dstTopDict.CIDCount:
print "Quitting. CIDCount does not match. src: %s dst: %s." % (srcTopDict.CIDCount, dstTopDict.CIDCount)
return
if srcTopDict.charset != dstTopDict.charset:
print "Quitting. charset does not match.."
return
numFD = len(srcTopDict.FDArray)
if numFD != len(dstTopDict.FDArray):
print "Quitting. FDArray count does not match. src: %s dst: %s." % (srcTopDict.FDArray.count, dstTopDict.FDArray.count)
return
for i in range(numFD):
srcFD = srcTopDict.FDArray[i]
# srcFD.FontName
srcPD = srcFD.Private
dstFD = dstTopDict.FDArray[i]
dstPD = dstFD.Private
validatePD(srcPD, dstPD) # raises LocalError if the hints differ.
# All is OK. Update the font names.
for i in range(numFD):
srcFD = srcTopDict.FDArray[i]
dstFD = dstTopDict.FDArray[i]
srcFD.FontName = dstFD.FontName
# Update the CID name.
if dstTopDict.rawDict.has_key("version"):
srcTopDict.version = dstTopDict.version
if dstTopDict.rawDict.has_key("Notice"):
srcTopDict.Notice = dstTopDict.Notice
if dstTopDict.rawDict.has_key("Copyright"):
srcTopDict.Copyright = dstTopDict.Copyright
if dstTopDict.rawDict.has_key("FullName"):
srcTopDict.FullName = dstTopDict.FullName
if dstTopDict.rawDict.has_key("FamilyName"):
srcTopDict.FamilyName = dstTopDict.FamilyName
if dstTopDict.rawDict.has_key("Weight"):
srcTopDict.Weight = dstTopDict.Weight
if dstTopDict.rawDict.has_key("UniqueID"):
srcTopDict.UniqueID = dstTopDict.UniqueID
if dstTopDict.rawDict.has_key("XUID"):
srcTopDict.XUID = dstTopDict.XUID
if dstTopDict.rawDict.has_key("CIDFontVersion"):
srcTopDict.CIDFontVersion = dstTopDict.CIDFontVersion
if dstTopDict.rawDict.has_key("CIDFontRevision"):
srcTopDict.CIDFontRevision = dstTopDict.CIDFontRevision
for i in range(len(srcCFFTable.fontNames)):
srcCFFTable.fontNames[i] = dstCFFTable.fontNames[i]
cffTable = srcTTFont['CFF ']
outputFile = dstPath + ".new"
if dstIsOTF:
dstTTFont['CFF '] = cffTable
dstTTFont.save(outputFile)
print "Wrote new OTF file:", outputFile
else:
data = cffTable.compile(dstTTFont)
tf = file(outputFile, "wb")
tf.write(data)
tf.close()
print "Wrote new CFF file:", outputFile
srcTTFont.close()
dstTTFont.close()
def run():
srcPath, dstPathList = getOpts()
for dstPath in dstPathList:
copyData(srcPath, dstPath)
if __name__ == "__main__":
#try:
run()
#except LocalError:
# pass
| {
"repo_name": "shannpersand/cooper-type",
"path": "_resources/FDK Adobe/Tools/SharedData/FDKScripts/copyCFFCharstrings.py",
"copies": "1",
"size": "5754",
"license": "cc0-1.0",
"hash": -1566179258001377800,
"line_mean": 25.8878504673,
"line_max": 188,
"alpha_frac": 0.7036843935,
"autogenerated": false,
"ratio": 2.74653937947494,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.395022377297494,
"avg_score": null,
"num_lines": null
} |
#! AFDKOPython
import sys, pickle, os
from mutatorMath.ufo.document import DesignSpaceDocumentWriter
def main():
family = pickle.load(sys.stdin)
generate_designspace(family, 'font.designspace')
def generate_designspace(family, path):
def normalize_path(path):
return os.path.join(family['working_directory'], path)
doc = DesignSpaceDocumentWriter(normalize_path(path))
for i, master in enumerate(family['masters']):
doc.addSource(
path = normalize_path(master['path']),
name = 'master-' + master['name'],
location = {'weight': master['interpolation_value']},
copyLib = True if i == 0 else False,
copyGroups = True if i == 0 else False,
copyInfo = True if i == 0 else False,
# muteInfo = False,
# muteKerning = False,
# mutedGlyphNames = None,
)
for style in family['styles']:
doc.startInstance(
name = 'instance-' + style['name'],
location = {'weight': style['interpolation_value']},
familyName = family['output_name'],
styleName = style['name'],
fileName = normalize_path(style['path']),
postScriptFontName = style['output_full_name_postscript'],
# styleMapFamilyName = None,
# styleMapStyleName = None,
)
doc.writeInfo()
if family['has_kerning']:
doc.writeKerning()
doc.endInstance()
doc.save()
if __name__ == '__main__':
main()
| {
"repo_name": "mooniak/hindkit",
"path": "hindkit/AFDKOPython/generate_designspace.py",
"copies": "1",
"size": "1566",
"license": "mit",
"hash": -7516400354499871000,
"line_mean": 26,
"line_max": 70,
"alpha_frac": 0.5683269476,
"autogenerated": false,
"ratio": 4.088772845953002,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5157099793553003,
"avg_score": null,
"num_lines": null
} |
# A feature decorator
import functools
FEATURE_CONFIG = {}
class FeatureSetupError(Exception): pass
def feature_setup(config, update=False):
'''
Expects a file name with JSON formatted contents
{
"DEFAULT": {
"active": true,
"doc": "The default active feature"
}
}
Or a pure python dictonary
{
'DEFAULT': {
'active': True,
'doc': "The default active feature"
},
'experimental': {
'active': True if (datetime.date.today() - datetime.date(2014,06,14)).days >= 0 else False,
'doc': "Beta program"
}
}
'''
global FEATURE_CONFIG
_config = config
if not isinstance(config, dict):
import json
with open(config) as f:
_config = json.load(f)
if update:
FEATURE_CONFIG.update(_config)
else:
FEATURE_CONFIG = _config
def is_feature_active(feat):
status = True \
if isinstance(FEATURE_CONFIG.get(feat), dict) \
and FEATURE_CONFIG.get(feat).get('active') \
else False
return status
def is_feature_deactive(*args, **kwargs):
return not is_feature_active(*args, **kwargs)
def feature_with(feat):
def feature_wrapper(func):
@functools.wraps(func)
def feature(*args, **kwargs):
if not FEATURE_CONFIG:
raise FeatureSetupError
func_return = None
if is_feature_active(feat):
func_return = func(*args, **kwargs)
return func_return
return feature
return feature_wrapper | {
"repo_name": "cyrilthomas/feature",
"path": "feature.py",
"copies": "1",
"size": "1627",
"license": "mit",
"hash": 3117333696473817000,
"line_mean": 24.8412698413,
"line_max": 103,
"alpha_frac": 0.5617701291,
"autogenerated": false,
"ratio": 4.150510204081633,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5212280333181634,
"avg_score": null,
"num_lines": null
} |
# A feed-forward DNN with 5 hidden layers using sigmoid activations.
import os
import time
import tensorflow as tf
#import ffn
import argparse
from ffn26752l6 import *
device_str = ''
def set_parameters(epochs, minibatch, iterations, device_id):
"""
iterations means the number of iterations in each epoch
"""
global device_str
if int(device_id) >= 0:
device_str = '/gpu:%d'%int(device_id)
else:
# cpus
device_str = '/cpu:0'
global numMinibatches
numMinibatches = iterations*epochs
#numMinibatches = (138493+minibatch-1)/minibatch * epochs
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument("-e", "--epochs", help="the number of epochs", type=int, default=4)
parser.add_argument("-b", "--minibatch", help="minibatch size", type=int, default=128)
parser.add_argument("-i", "--iterations", help="iterations", type=int, default=2)
parser.add_argument("-d", "--deviceid", help="specified device id", type=int, default=0)
args = parser.parse_args()
epochs = args.epochs
minibatch = args.minibatch
iterations = args.iterations
device_id = args.deviceid
minibatchSize = args.minibatch
set_parameters(epochs, minibatch, iterations, device_id)
program_start_time = time.time()
# Create the model
if (FLAGS.noInputFeed):
features, labels = getFakeMinibatch(minibatchSize)
else:
features = tf.placeholder("float", [None, featureDim])
labels = tf.placeholder("float", [None, labelDim])
config = tf.ConfigProto(allow_soft_placement=True)
if device_str.find('cpu') >= 0: # cpu version
num_threads = os.getenv('OMP_NUM_THREADS', 1)
print 'num_threads: ', num_threads
config = tf.ConfigProto(allow_soft_placement=True, intra_op_parallelism_threads=int(num_threads))
with tf.device(device_str):
crossEntropy, accuracy = getLossAndAccuracyForSubBatch(features, labels)
trainStep = tf.train.GradientDescentOptimizer(0.01).minimize(crossEntropy)
# Train
#sess = tf.Session(config=tf.ConfigProto(log_device_placement=FLAGS.logDevicePlacement, allow_soft_placement=True))
sess = tf.Session(config=config)
init = tf.initialize_all_variables()
sess.run(init)
perMinibatchTime = []
for i in range(numMinibatches):
if (FLAGS.noInputFeed == False):
minibatchFeatures, minibatchLabels = getFakeMinibatch(minibatchSize)
startTime = time.time()
if (FLAGS.noInputFeed):
sess.run([trainStep, accuracy])
else:
sess.run([trainStep, accuracy], feed_dict={features: minibatchFeatures, labels: minibatchLabels})
currMinibatchDuration = time.time() - startTime
perMinibatchTime.append(currMinibatchDuration)
printTrainingStats(1, minibatchSize, perMinibatchTime)
program_end_time = time.time()
#print('Program finished, Total seconds: %s' % (program_end_time - program_start_time))
| {
"repo_name": "hclhkbu/dlbench",
"path": "synthetic/experiments/tensorflow/fc/ffn26752l6bm.py",
"copies": "2",
"size": "3127",
"license": "mit",
"hash": 8270168528972925000,
"line_mean": 35.3604651163,
"line_max": 123,
"alpha_frac": 0.6514230892,
"autogenerated": false,
"ratio": 3.855733662145499,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5507156751345499,
"avg_score": null,
"num_lines": null
} |
# A feed-forward DNN with 5 hidden layers using sigmoid activations.
import time
import tensorflow as tf
import ffn
import argparse
from ffn import *
device_str = ''
def set_parameters(epochs, minibatch, iterations, device_id):
"""
iterations means the number of iterations in each epoch
"""
global device_str
if int(device_id) >= 0:
device_str = '/gpu:%d'%int(device_id)
else:
# cpus
device_str = '/cpu:0'
global numMinibatches
numMinibatches = iterations*epochs
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument("-e", "--epochs", help="the number of epochs", type=int, default=4)
parser.add_argument("-b", "--minibatch", help="minibatch size", type=int, default=1024)
parser.add_argument("-i", "--iterations", help="iterations", type=int, default=2)
parser.add_argument("-d", "--deviceid", help="specified device id", type=int, default=0)
args = parser.parse_args()
epochs = args.epochs
minibatch = args.minibatch
iterations = args.iterations
device_id = args.deviceid
minibatchSize = args.minibatch
set_parameters(epochs, minibatch, iterations, device_id)
program_start_time = time.time()
# Create the model
if (FLAGS.noInputFeed):
features, labels = getFakeMinibatch(minibatchSize)
else:
features = tf.placeholder("float", [None, featureDim])
labels = tf.placeholder("float", [None, labelDim])
with tf.device('/gpu:0'):
crossEntropy, accuracy = getLossAndAccuracyForSubBatch(features, labels)
trainStep = tf.train.GradientDescentOptimizer(0.01).minimize(crossEntropy)
# Train
sess = tf.Session(config=tf.ConfigProto(log_device_placement=FLAGS.logDevicePlacement, allow_soft_placement=True))
init = tf.initialize_all_variables()
sess.run(init)
perMinibatchTime = []
for i in range(numMinibatches):
if (FLAGS.noInputFeed == False):
minibatchFeatures, minibatchLabels = getFakeMinibatch(minibatchSize)
startTime = time.time()
if (FLAGS.noInputFeed):
sess.run([trainStep, accuracy])
else:
sess.run([trainStep, accuracy], feed_dict={features: minibatchFeatures, labels: minibatchLabels})
currMinibatchDuration = time.time() - startTime
perMinibatchTime.append(currMinibatchDuration)
printTrainingStats(1, minibatchSize, perMinibatchTime)
program_end_time = time.time()
#print('Program finished, Total seconds: %s' % (program_end_time - program_start_time))
| {
"repo_name": "linmajia/dlbench",
"path": "synthetic/experiments/tensorflow/fc/ffnbm.py",
"copies": "2",
"size": "2696",
"license": "mit",
"hash": 949856742463735900,
"line_mean": 33.5641025641,
"line_max": 122,
"alpha_frac": 0.6461424332,
"autogenerated": false,
"ratio": 3.982274741506647,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5628417174706647,
"avg_score": null,
"num_lines": null
} |
# A feed-forward DNN with 5 hidden layers using sigmoid activations.
import time
import tensorflow as tf
import ffn
from ffn import *
if __name__ == '__main__':
minibatchSize = 1024
program_start_time = time.time()
# Create the model
if (FLAGS.noInputFeed):
features, labels = getFakeMinibatch(minibatchSize)
else:
features = tf.placeholder("float", [None, featureDim])
labels = tf.placeholder("float", [None, labelDim])
crossEntropy, accuracy = getLossAndAccuracyForSubBatch(features, labels)
trainStep = tf.train.GradientDescentOptimizer(0.01).minimize(crossEntropy)
# Train
sess = tf.Session(config=tf.ConfigProto(log_device_placement=FLAGS.logDevicePlacement))
init = tf.initialize_all_variables()
sess.run(init)
perMinibatchTime = []
for i in range(numMinibatches):
if (FLAGS.noInputFeed == False):
minibatchFeatures, minibatchLabels = getFakeMinibatch(minibatchSize)
startTime = time.time()
if (FLAGS.noInputFeed):
sess.run([trainStep, accuracy])
else:
sess.run([trainStep, accuracy], feed_dict={features: minibatchFeatures, labels: minibatchLabels})
currMinibatchDuration = time.time() - startTime
perMinibatchTime.append(currMinibatchDuration)
printTrainingStats(1, minibatchSize, perMinibatchTime)
program_end_time = time.time()
print('Program finished, Total seconds: %s' % (program_end_time - program_start_time))
| {
"repo_name": "hclhkbu/dlbench",
"path": "synthetic/experiments/tensorflow/fc/ffn_exp.py",
"copies": "2",
"size": "1516",
"license": "mit",
"hash": -7083709089150383000,
"line_mean": 31.9565217391,
"line_max": 105,
"alpha_frac": 0.6833773087,
"autogenerated": false,
"ratio": 3.9072164948453607,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5590593803545361,
"avg_score": null,
"num_lines": null
} |
# A feed-forward DNN with 5 hidden layers using sigmoid activations.
import time
import tensorflow as tf
#from ffn import *
from ffn26752 import *
minibatchSize = 1024
program_start_time = time.time()
# Create the model
if (FLAGS.noInputFeed):
features, labels = getFakeMinibatch(minibatchSize)
else:
features = tf.placeholder("float", [None, featureDim])
labels = tf.placeholder("float", [None, labelDim])
with tf.device('/gpu:0'):
crossEntropy, accuracy = getLossAndAccuracyForSubBatch(features, labels)
trainStep = tf.train.GradientDescentOptimizer(0.01).minimize(crossEntropy)
#config = tf.ConfigProto(allow_soft_placement=True)
# Train
sess = tf.Session(config=tf.ConfigProto(log_device_placement=FLAGS.logDevicePlacement, allow_soft_placement=True))
init = tf.initialize_all_variables()
sess.run(init)
perMinibatchTime = []
for i in range(numMinibatches):
if (FLAGS.noInputFeed == False):
minibatchFeatures, minibatchLabels = getFakeMinibatch(minibatchSize)
startTime = time.time()
if (FLAGS.noInputFeed):
sess.run([trainStep, accuracy])
else:
sess.run([trainStep, accuracy], feed_dict={features: minibatchFeatures, labels: minibatchLabels})
currMinibatchDuration = time.time() - startTime
perMinibatchTime.append(currMinibatchDuration)
printTrainingStats(1, minibatchSize, perMinibatchTime)
program_end_time = time.time()
print('Program finished, Total seconds: %s' % (program_end_time - program_start_time))
| {
"repo_name": "hclhkbu/dlbench",
"path": "synthetic/experiments/tensorflow/fc/ffn1080.py",
"copies": "2",
"size": "1501",
"license": "mit",
"hash": -2773150321940359000,
"line_mean": 30.9361702128,
"line_max": 116,
"alpha_frac": 0.7421718854,
"autogenerated": false,
"ratio": 3.5400943396226414,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.03871983653402012,
"num_lines": 47
} |
"""A few convenience functions to setup the Heisenberg model.
.. math::
H=\sum_{i}\vec{S}_{i}\cdot\vec{S}_{i+1}=
\sum_{i}\left[S^{z}_{i}S^{z}_{i+1}+
\frac{1}{2}\left(S^{\dagger}_{i}S^{-}_{i+1}+
S^{-}_{i}S^{\dagger}_{i+1}\right)\right]
"""
class HeisenbergModel(object):
"""Implements a few convenience functions for AF Heisenberg.
Does exactly that.
"""
def __init__(self):
super(HeisenbergModel, self).__init__()
def set_hamiltonian(self, system):
"""Sets a system Hamiltonian to the AF Heisenberg Hamiltonian.
Does exactly this. If the system hamiltonian has some other terms on
it, there are not touched. So be sure to use this function only in
newly created `System` objects.
Parameters
----------
system : a System.
The System you want to set the Hamiltonain for.
"""
system.clear_hamiltonian()
if 'bh' in system.left_block.operators.keys():
system.add_to_hamiltonian(left_block_op='bh')
if 'bh' in system.right_block.operators.keys():
system.add_to_hamiltonian(right_block_op='bh')
system.add_to_hamiltonian('id', 'id', 's_z', 's_z')
system.add_to_hamiltonian('id', 'id', 's_p', 's_m', .5)
system.add_to_hamiltonian('id', 'id', 's_m', 's_p', .5)
system.add_to_hamiltonian('id', 's_z', 's_z', 'id')
system.add_to_hamiltonian('id', 's_p', 's_m', 'id', .5)
system.add_to_hamiltonian('id', 's_m', 's_p', 'id', .5)
system.add_to_hamiltonian('s_z', 's_z', 'id', 'id')
system.add_to_hamiltonian('s_p', 's_m', 'id', 'id', .5)
system.add_to_hamiltonian('s_m', 's_p', 'id', 'id', .5)
def set_block_hamiltonian(self, tmp_matrix_for_bh, system):
"""Sets the block Hamiltonian to be what you need for AF Heisenberg.
Parameters
----------
tmp_matrix_for_bh : a numpy array of ndim = 2.
An auxiliary matrix to keep track of the result.
system : a System.
The System you want to set the Hamiltonian for.
"""
# If you have a block hamiltonian in your block, add it
if 'bh' in system.growing_block.operators.keys():
system.add_to_block_hamiltonian(tmp_matrix_for_bh, 'bh', 'id')
system.add_to_block_hamiltonian(tmp_matrix_for_bh, 's_z', 's_z')
system.add_to_block_hamiltonian(tmp_matrix_for_bh, 's_p', 's_m', .5)
system.add_to_block_hamiltonian(tmp_matrix_for_bh, 's_m', 's_p', .5)
def set_operators_to_update(self, system):
"""Sets the operators to update to be what you need to AF Heisenberg.
Parameters
----------
system : a System.
The System you want to set the Hamiltonian for.
Notes
-----
The block Hamiltonian, althought needs to be updated, is treated
separately by the very functions in the `System` class.
"""
system.add_to_operators_to_update('s_z', site_op='s_z')
system.add_to_operators_to_update('s_p', site_op='s_p')
system.add_to_operators_to_update('s_m', site_op='s_m')
| {
"repo_name": "iglpdc/dmrg101",
"path": "dmrg101/utils/models/heisenberg_model.py",
"copies": "2",
"size": "3156",
"license": "mit",
"hash": 6006273256473236000,
"line_mean": 39.987012987,
"line_max": 77,
"alpha_frac": 0.5773130545,
"autogenerated": false,
"ratio": 2.9801699716713883,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9344773743002519,
"avg_score": 0.04254185663377369,
"num_lines": 77
} |
"""A few convenience functions to setup the Hubbard model.
.. math::
H=\sum_{i}\vec{S}_{i}\cdot\vec{S}_{i+1}=
\sum_{i}\left[S^{z}_{i}S^{z}_{i+1}+
\frac{1}{2}\left(S^{\dagger}_{i}S^{-}_{i+1}+
S^{-}_{i}S^{\dagger}_{i+1}\right)\right]
"""
class HubbardModel(object):
"""Implements a few convenience functions for Hubbard model.
Does exactly that.
"""
def __init__(self):
super(HubbardModel, self).__init__()
self.U = 0.
def set_hamiltonian(self, system):
"""Sets a system Hamiltonian to the Hubbard Hamiltonian.
Does exactly this. If the system hamiltonian has some other terms on
it, there are not touched. So be sure to use this function only in
newly created `System` objects.
Parameters
----------
system : a System.
The System you want to set the Hamiltonian for.
"""
system.clear_hamiltonian()
if 'bh' in system.left_block.operators.keys():
system.add_to_hamiltonian(left_block_op='bh')
if 'bh' in system.right_block.operators.keys():
system.add_to_hamiltonian(right_block_op='bh')
system.add_to_hamiltonian('dimer', 'id', 'id', 'id', -(1. - self.U))
system.add_to_hamiltonian('id', 'dimer', 'id', 'id', -(1. - self.U))
system.add_to_hamiltonian('id', 'id', 'dimer', 'id', -(1. - self.U))
system.add_to_hamiltonian('id', 'id', 'id', 'dimer', -(1. - self.U))
# system.add_to_hamiltonian('dimer', 'id', 'id', 'id', self.U)
# system.add_to_hamiltonian('id', 'dimer', 'id', 'id', self.U)
# system.add_to_hamiltonian('id', 'id', 'dimer', 'id', self.U)
# system.add_to_hamiltonian('id', 'id', 'id', 'dimer', self.U)
system.add_to_hamiltonian('rprm_up_minus_dag', 'rprm_up_plus', 'id', 'id', -(1. + self.U)/2.)
system.add_to_hamiltonian('rprm_down_minus_dag', 'rprm_down_plus', 'id', 'id', -(1. + self.U)/2.)
system.add_to_hamiltonian('rprm_up_minus', 'rprm_up_plus_dag', 'id', 'id', (1. + self.U)/2.)
system.add_to_hamiltonian('rprm_down_minus', 'rprm_down_plus_dag', 'id', 'id', (1. + self.U)/2.)
system.add_to_hamiltonian('id', 'rprm_up_minus_dag', 'rprm_up_plus', 'id', -(1.+self.U)/2.)
system.add_to_hamiltonian('id', 'rprm_down_minus_dag', 'rprm_down_plus', 'id', -(1.+self.U)/2.)
system.add_to_hamiltonian('id', 'rprm_up_minus', 'rprm_up_plus_dag', 'id', (1.+self.U)/2.)
system.add_to_hamiltonian('id', 'rprm_down_minus', 'rprm_down_plus_dag', 'id', (1.+self.U)/2.)
system.add_to_hamiltonian('id','id', 'rprm_up_minus_dag', 'rprm_up_plus', -(1.+self.U)/2.)
system.add_to_hamiltonian('id','id', 'rprm_down_minus_dag', 'rprm_down_plus', -(1.+self.U)/2.)
system.add_to_hamiltonian('id','id', 'rprm_up_minus', 'rprm_up_plus_dag', (1.+self.U)/2.)
system.add_to_hamiltonian('id','id', 'rprm_down_minus', 'rprm_down_plus_dag', (1.+self.U)/2.)
def set_block_hamiltonian(self, tmp_matrix_for_bh, system):
"""Sets the block Hamiltonian to the Hubbard model block Hamiltonian.
Parameters
----------
tmp_matrix_for_bh : a numpy array of ndim = 2.
An auxiliary matrix to keep track of the result.
system : a System.
The System you want to set the Hamiltonian for.
"""
# If you have a block hamiltonian in your block, add it
if 'bh' in system.growing_block.operators.keys():
system.add_to_block_hamiltonian(tmp_matrix_for_bh, 'bh', 'id')
system.add_to_block_hamiltonian(tmp_matrix_for_bh, 'id', 'dimer', -(1. - self.U))
system.add_to_block_hamiltonian(tmp_matrix_for_bh, 'dimer', 'id', -(1. - self.U))
# system.add_to_block_hamiltonian(tmp_matrix_for_bh, 'id', 'dimer', self.U)
# system.add_to_block_hamiltonian(tmp_matrix_for_bh, 'dimer', 'id', self.U)
system.add_to_block_hamiltonian(tmp_matrix_for_bh, 'rprm_up_minus_dag', 'rprm_up_plus', -(1.+self.U)/2.)
system.add_to_block_hamiltonian(tmp_matrix_for_bh, 'rprm_down_minus_dag', 'rprm_down_plus', -(1.+self.U)/2.)
system.add_to_block_hamiltonian(tmp_matrix_for_bh, 'rprm_up_minus', 'rprm_up_plus_dag', (1.+self.U)/2.)
system.add_to_block_hamiltonian(tmp_matrix_for_bh, 'rprm_down_minus', 'rprm_down_plus_dag', (1.+self.U)/2.)
# system.add_to_block_hamiltonian(tmp_matrix_for_bh, 'id', 'u', self.U)
# system.add_to_block_hamiltonian(tmp_matrix_for_bh, 'u', 'id', self.U)
def set_operators_to_update(self, system):
"""Sets the operators to update to the ones for the Hubbard model.
Parameters
----------
system : a System.
The System you want to set the Hamiltonian for.
Notes
-----
The block Hamiltonian, althought needs to be updated, is treated
separately by the very functions in the `System` class.
"""
system.add_to_operators_to_update('rprm_up_plus_dag', site_op='rprm_up_plus_dag')
system.add_to_operators_to_update('rprm_down_plus_dag', site_op='rprm_down_plus_dag')
system.add_to_operators_to_update('rprm_up_minus_dag', site_op='rprm_up_minus_dag')
system.add_to_operators_to_update('rprm_down_minus_dag', site_op='rprm_down_minus_dag')
system.add_to_operators_to_update('rprm_up_plus', site_op='rprm_up_plus')
system.add_to_operators_to_update('rprm_down_plus', site_op='rprm_down_plus')
system.add_to_operators_to_update('rprm_up_minus', site_op='rprm_up_minus')
system.add_to_operators_to_update('rprm_down_minus', site_op='rprm_down_minus')
system.add_to_operators_to_update('dimer', site_op='dimer')
#system.add_to_operators_to_update('u', site_op='u')
| {
"repo_name": "chanul13/hubbard_dimer",
"path": "dmrg101/utils/models/hubbard_model.py",
"copies": "1",
"size": "5799",
"license": "mit",
"hash": 4855553149288449000,
"line_mean": 53.1962616822,
"line_max": 116,
"alpha_frac": 0.5987239179,
"autogenerated": false,
"ratio": 2.6884561891515992,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.37871801070515987,
"avg_score": null,
"num_lines": null
} |
"""A few convenience functions to setup the Ising model in a TF.
TFIM stands for Ising model in a transverse field, i.e.:
.. math::
H=\sum_{i}\left[S^{z}_{i}S^{z}_{i+1} + h S^{x}_{i}\right)\right]
"""
class TranverseFieldIsingModel(object):
"""Implements a few convenience functions for the TFIM.
Does exactly that.
"""
def __init__(self, H = 0):
super(TranverseFieldIsingModel, self).__init__()
self.H = H
def set_hamiltonian(self, system):
"""Sets a system Hamiltonian to the TFIM Hamiltonian.
Does exactly this. If the system hamiltonian has some other terms on
it, there are not touched. So be sure to use this function only in
newly created `System` objects.
Parameters
----------
system : a System.
The System you want to set the Hamiltonain for.
"""
system.clear_hamiltonian()
if 'bh' in system.left_block.operators.keys():
system.add_to_hamiltonian(left_block_op='bh')
if 'bh' in system.right_block.operators.keys():
system.add_to_hamiltonian(right_block_op='bh')
system.add_to_hamiltonian('id', 'id', 's_z', 's_z', -1.)
system.add_to_hamiltonian('id', 's_z', 's_z', 'id', -1.)
system.add_to_hamiltonian('s_z', 's_z', 'id', 'id', -1.)
system.add_to_hamiltonian('id', 'id', 'id', 's_x', self.H)
system.add_to_hamiltonian('id', 'id', 's_x', 'id', self.H)
system.add_to_hamiltonian('id', 's_x', 'id', 'id', self.H)
system.add_to_hamiltonian('s_x', 'id', 'id', 'id', self.H)
def set_block_hamiltonian(self, tmp_matrix_for_bh, system):
"""Sets the block Hamiltonian to be what you need for TFIM.
Parameters
----------
tmp_matrix_for_bh : a numpy array of ndim = 2.
An auxiliary matrix to keep track of the result.
system : a System.
The System you want to set the Hamiltonian for.
"""
# If you have a block hamiltonian in your block, add it
if 'bh' in system.growing_block.operators.keys():
system.add_to_block_hamiltonian(tmp_matrix_for_bh, 'bh', 'id')
system.add_to_block_hamiltonian(tmp_matrix_for_bh, 's_z', 's_z', -1.)
system.add_to_block_hamiltonian(tmp_matrix_for_bh, 'id', 's_x', self.H)
system.add_to_block_hamiltonian(tmp_matrix_for_bh, 's_x', 'id', self.H)
def set_operators_to_update(self, system):
"""Sets the operators to update to be what you need to TFIM.
Parameters
----------
system : a System.
The System you want to set the Hamiltonian for.
Notes
-----
The block Hamiltonian, althought needs to be updated, is treated
separately by the very functions in the `System` class.
"""
system.add_to_operators_to_update('s_z', site_op='s_z')
system.add_to_operators_to_update('s_x', site_op='s_x')
| {
"repo_name": "iglpdc/dmrg101",
"path": "dmrg101/utils/models/tfi_model.py",
"copies": "2",
"size": "2957",
"license": "mit",
"hash": -5009058996563521000,
"line_mean": 39.5068493151,
"line_max": 79,
"alpha_frac": 0.5894487656,
"autogenerated": false,
"ratio": 3.1357370095440085,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.47251857751440085,
"avg_score": null,
"num_lines": null
} |
# A few convenient math functions for the bicorr project
import matplotlib
#matplotlib.use('agg') # for flux
import matplotlib.pyplot as plt
import seaborn as sns
sns.set(style='ticks')
import sys
import os
import os.path
import scipy.io as sio
from scipy.optimize import curve_fit
import time
import numpy as np
np.set_printoptions(threshold=np.nan) # print entire matrices
import pandas as pd
from tqdm import *
# Don't import any bicorr modules here
# Other modules will import bicorr_math, but not the other way around
def prop_err_division(num,num_err,denom,denom_err):
A = num/denom
A_err = A*np.sqrt((num_err/num)**2+(denom_err/denom)**2)
return A, A_err
def calc_centers(edges):
"""
Simple method for returning centers from an array of bin edges. Calculates center between each point as difference between containing edges.
Example, plt.plot(bicorr.centers(edges),counts,'.k')
Serves as a shortcode to first producing array of bin centers.
Parameters
----------
edges : ndarray
Array of bin edges
Returns
-------
centers : ndarray
Array of bin edges
"""
return (edges[:-1]+edges[1:])/2
def calc_histogram_mean(bin_edges, counts, print_flag = False, bin_centers_flag = False):
"""
Calculate mean of a count rate distribution, counts vs. x.
Errors are calculated under the assumption that you are working
with counting statistics. (C_err = sqrt(C) in each bin)
Parameters
----------
bin_edges : ndarray
Bin edges for x
counts : ndarray
Bin counts
print_flag : bool
Option to print intermediate values
bin_centers_flag : bool
Option to provide bin centers instead of bin edges (useful for 2d histograms)
Returns
-------
x_mean : float
x_mean_err : float
"""
if bin_centers_flag == True:
bin_centers = bin_edges
else:
bin_centers = calc_centers(bin_edges)
num = np.sum(np.multiply(bin_centers,counts))
num_err = np.sqrt(np.sum(np.multiply(bin_centers**2,counts)))
denom = np.sum(counts)
denom_err = np.sqrt(denom)
if print_flag:
print('num: ',num)
print('num_err: ',num_err)
print('denom: ',denom)
print('denom_err: ',denom_err)
x_mean = num/denom
x_mean_err = x_mean * np.sqrt((num_err/num)**2+(denom_err/denom)**2)
if print_flag:
print('x_mean: ',x_mean)
print('x_mean_err:',x_mean_err)
return x_mean, x_mean_err
def convert_energy_to_time(energy, distance = 1.05522):
'''
Convert energy in MeV to time in ns for neutrons that travel 1 m. From Matthew's `reldist.m` script.
6/5/18 Changing default to 105.522 cm, which is mean distance.
Parameters
----------
energy : float
Neutron energy in MeV
distance : float, optional
Neutron flight distance in meters
Returns
-------
time : float
Time of flight of neutron
'''
# Constants
m_n = 939.565 # MeV/c2
c = 2.99e8 # m/s
# Calculations
v = c*np.sqrt(2*energy/m_n)
time = np.divide(distance/v,1e-9)
return time
def convert_time_to_energy(time, distance = 1.05522):
'''
Convert time in ns to energy in MeV for neutrons that travel 1 m. From Matthew's `reldist.m` script.
6/5/18 Changing default to 105.522 cm, which is mean distance.
If an array of times, use energy_bin_edges = np.asarray(np.insert([bicorr.convert_time_to_energy(t) for t in dt_bin_edges[1:]],0,10000))
Parameters
----------
time : float
Time of flight of neutron in ns
distance : float, optional
Neutron flight distance in meters
Returns
-------
energy : float
Neutron energy in MeV
'''
# Constants
m_n = 939.565 # MeV/c2
c = 2.99e8 # m/s
v = distance * 1e9 / time # ns -> s
energy = (m_n/2)*(v/c)**2
return energy
def f_line(x, m, b):
"""
Line fit with equation y = mx + b
Parameters
----------
x : array
x values
m : float
slope
b : float
y-intercept
Returns
-------
y : array
y values
"""
y = m*x + b
return y
def fit_f_line(x, y, y_err=None, p0=None, bounds=(-np.inf,np.inf)):
"""
Fit a straight line with equation y = mx + b
Parameters
----------
x : ndarray
y : ndarray
y_err : ndarray, optional
p0 : ndarra
Initial guess of coefficients
bounds : ndarray
Boundaries for searching for coefficients
Returns
-------
m, m_err : float
b, b_err : float
"""
if y_err is None:
y_err = np.ones(x.size)
# Only use dat apoints with non-zero error
w = np.where(y_err != 0)
popt, pcov = curve_fit(f_line, x[w], y[w], sigma=y_err[w], p0=p0, absolute_sigma = True, bounds = bounds)
errors = np.sqrt(np.diag(pcov))
[m, b] = popt
[m_err, b_err] = errors
return m, m_err, b, b_err
| {
"repo_name": "pfschus/fission_bicorrelation",
"path": "scripts/bicorr_math.py",
"copies": "1",
"size": "5188",
"license": "mit",
"hash": -5487174353924035000,
"line_mean": 23.3615023474,
"line_max": 145,
"alpha_frac": 0.5836545875,
"autogenerated": false,
"ratio": 3.560741249142073,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9416911394892007,
"avg_score": 0.045496888350013316,
"num_lines": 213
} |
'''A few functions to check whether the data in the estimator file is fine.
'''
from dmrg_helpers.core.dmrg_exceptions import DMRGException
def process_estimator_name(estimator_name):
'''Process the name of the estimator and tries to extract the operators and
sites involved.
Parameters
----------
name: a string
This is one of the elements of the first columns of the estimators file
produced by the DMRG code.
Returns
-------
operators: a n-tuple of strings.
The single-site operators that form the estimator.
sites: a n-tuple of ints.
The sites at which each of the operators above act.
'''
operators = estimator_name.split('*')
operator_names = []
sites = []
for operator in operators:
name, site = split_into_name_and_site(operator)
operator_names.append(name)
sites.append(site)
return (operator_names, sites)
def split_into_name_and_site(operator):
"""Splits an operator into a single-site operator name and site.
A single site operator is a string with the following format: chars_ints,
where chars are characters (incl. '_') but no numbers, and ints are [0-9].
There must be at least a char in chars and number in ints.
You use this function to separate the chars from the ints.
Parameters
----------
operator: a string.
The name of a single site operator.
Returns
-------
name: a string
The name of the single-site operator.
site: a string
The site where this operator acts.
"""
splitted = operator.split('_')
if len(splitted)<2:
raise DMRGException('Bad operator name')
site = splitted[-1]
# the operator name is the whole thing but the site part and the '_' that
# separates them
name = operator[:-(len(site)+1)]
return name, site
| {
"repo_name": "iglpdc/dmrg_helpers",
"path": "dmrg_helpers/extract/process_estimator_name.py",
"copies": "1",
"size": "1893",
"license": "mit",
"hash": -7094766706356059000,
"line_mean": 29.0476190476,
"line_max": 79,
"alpha_frac": 0.6455361859,
"autogenerated": false,
"ratio": 4.27313769751693,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.541867388341693,
"avg_score": null,
"num_lines": null
} |
"""A few helper functions for serverv-py.
Surprisingly, there aren't common functions to convert
from QBasic times to Python datetimes.
"""
from datetime import datetime, date
from collections import namedtuple
def qb_time_to_datetime(year, yday, hour, minute, doublesecond):
"""Convert a QB-format time to a datetime."""
date_fromordinal = date.fromordinal(
date(year, 1, 1).toordinal() + yday - 1)
return datetime(
year=year,
month=date_fromordinal.month,
day=date_fromordinal.day,
hour=hour,
minute=minute,
second=int(doublesecond),
microsecond=int(doublesecond % 1 * 1000000)
# No tzinfo because this datetime is really beta-reality time.
)
RGB = namedtuple('RGB', ['r', 'g', 'b'])
def colour_code_to_rgb(code):
"""Convert a QB colour code to an RGB tuple."""
# These specific RGB values are actually xterm defaults for RGB mappings
# from ANSI colour codes. They should be pretty close.
if code == 0: # Black
# Foreground colours
return RGB(0, 0, 0)
elif code == 1: # Blue
return RGB(0, 0, 0xff)
elif code == 2: # Green
return RGB(0, 0xff, 0)
elif code == 3: # Cyan
return RGB(0, 0xff, 0xff)
elif code == 4: # Red
return RGB(0xff, 0, 0)
elif code == 5: # Purple
return RGB(0xff, 0, 0xff)
elif code == 6: # Brown/Orange
return RGB(0xff, 0xff, 0)
elif code == 7: # Light Gray (White)
return RGB(0xff, 0xff, 0xff)
elif code == 8: # Dark Gray (Light Black)
# Background colours
return RGB(0x4d, 0x4d, 0x4d)
elif code == 9: # Light Blue
return RGB(0, 0, 0xcd)
elif code == 10: # Light Green
return RGB(0, 0xcd, 0)
elif code == 11: # Light Cyan
return RGB(0, 0xcd, 0xcd)
elif code == 12: # Light Red
return RGB(0xcd, 0, 0)
elif code == 13: # Light Purple
return RGB(0xcd, 0, 0xcd)
elif code == 14: # Yellow (Light Orange)
return RGB(0xcd, 0xcd, 0)
elif code == 15: # White (Light White)
return RGB(0xe5, 0xe5, 0xe5)
raise ValueError('Read invalid QB colour code from STARSr file!')
| {
"repo_name": "OCESS/serverv-py",
"path": "serverv-py/utility.py",
"copies": "1",
"size": "2216",
"license": "mit",
"hash": 3056297919810087000,
"line_mean": 32.0746268657,
"line_max": 76,
"alpha_frac": 0.5992779783,
"autogenerated": false,
"ratio": 3.3223388305847075,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9421616808884707,
"avg_score": 0,
"num_lines": 67
} |
'''A few helper functions for the Thread study.'''
def init_sqlite_file(c):
''' Set up the tables. '''
tables = []
tables.append(('event_kinds',
['id INTEGER PRIMARY KEY', 'name TEXT']))
tables.append(('processes',
['id INTEGER PRIMARY KEY', 'name TEXT', 'friendly_name TEXT']))
tables.append(('threads',
['id INTEGER PRIMARY KEY', 'name TEXT', 'process INTEGER',
'FOREIGN KEY(process) REFERENCES processes(id)']))
evs_table = []
evs_table.append('id INTEGER PRIMARY KEY')
evs_table.append('thread INTEGER')
evs_table.append('core INTEGER')
evs_table.append('kind INTEGER')
evs_table.append('timestamp INTEGER')
evs_table.append('FOREIGN KEY(thread) REFERENCES threads(id)')
evs_table.append('FOREIGN KEY(kind) REFERENCES event_kinds(id)')
tables.append(('events', evs_table ))
for (name, columns) in tables:
cmd = 'CREATE TABLE '+name+'\n ('
first = True
for column in columns:
if not first:
cmd += ', '
cmd += column
first = False
cmd += ')'
print cmd
c.execute(cmd)
c.execute("INSERT INTO event_kinds VALUES (1,'start')")
c.execute("INSERT INTO event_kinds VALUES (2,'stop')")
def ensure_thread_in_db(tid, proc_name, friendly_name,
threads, processes, c, process_id):
if not tid in threads:
if not proc_name in processes:
c.execute("INSERT INTO processes VALUES (%d, '%s', '%s')" %
(process_id, proc_name, friendly_name))
processes[proc_name] = process_id
process_id += 1
to_task_id = processes[proc_name]
c.execute("INSERT INTO threads VALUES (%d, '%d', %d)" %
(tid, tid, to_task_id))
threads[tid] = proc_name
return process_id
| {
"repo_name": "benjaminy/ThreadMeasurement",
"path": "TraceConverters/thread_study_utils.py",
"copies": "1",
"size": "1881",
"license": "mit",
"hash": 7403315063189546000,
"line_mean": 35.8823529412,
"line_max": 71,
"alpha_frac": 0.5715045189,
"autogenerated": false,
"ratio": 3.8466257668711656,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.49181302857711656,
"avg_score": null,
"num_lines": null
} |
# a few helpful filter functions
from spitfire.runtime.udn import UndefinedPlaceholder
# decorate a function object so the default filter will not be applied to the
# value of a placeholder. this is handy when building functions that will
# create data that could be double-escaped and you don't wnat to constantly
# inform spitfire to us raw mode.
def skip_filter(function):
function.skip_filter = True
return function
def passthrough_filter(value):
return value
@skip_filter
def escape_html(value, quote=True):
"""Replace special characters '&', '<' and '>' by SGML entities."""
value = safe_values(value)
if isinstance(value, basestring):
value = value.replace("&", "&") # Must be done first!
value = value.replace("<", "<")
value = value.replace(">", ">")
if quote:
value = value.replace('"', """)
return value
def safe_values(value):
if isinstance(value, (str, unicode, int, long, float, UndefinedPlaceholder)):
return value
else:
return ''
# test function for function registry - don't use
@skip_filter
def escape_html_function(value):
return escape_html(value)
| {
"repo_name": "eklitzke/spitfire",
"path": "spitfire/runtime/filters.py",
"copies": "1",
"size": "1139",
"license": "bsd-3-clause",
"hash": -609531609762857300,
"line_mean": 29.7837837838,
"line_max": 79,
"alpha_frac": 0.7050043898,
"autogenerated": false,
"ratio": 3.847972972972973,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9948088778056996,
"avg_score": 0.02097771694319538,
"num_lines": 37
} |
'''A few methods for dealing with gene annotation from snpEff.'''
__author__ = "dpark@broadinstitute.org"
__version__ = "PLACEHOLDER"
__date__ = "PLACEHOLDER"
import sqlite3, itertools, urllib, logging, re, os
import util.file, util.misc
log = logging.getLogger(__name__)
class SnpAnnotater(object):
''' Add annotations to snps based on a snpEff-annotated VCF file.
'''
def __init__(self, snpEffVcf=None, snpIterator=None):
self.snpIterator = snpIterator
self.dbFile = util.file.mkstempfname(prefix='SnpAnnotater-', suffix='.db')
self.conn = sqlite3.connect(self.dbFile, isolation_level='DEFERRED')
self.cur = self.conn.cursor()
self.cur.execute("""create table annot (
chr not null,
pos integer not null,
allele_ref not null,
allele_alt not null,
effect not null,
impact not null,
gene_id,
gene_name,
protein_pos integer,
residue_ref,
residue_alt
)""")
self.cur.execute("create index idx_annot on annot(chr,pos)")
if snpEffVcf:
self.loadVcf(snpEffVcf)
def loadVcf(self, snpEffVcf):
#log.info("reading in snpEff VCF file: %s" % snpEffVcf)
with util.file.open_or_gzopen(snpEffVcf, 'rt') as inf:
ffp = util.file.FlatFileParser(inf)
try:
imap = hasattr(itertools, 'imap') and itertools.imap or map #py2 & py3 compatibility
ifilter = hasattr(itertools, 'ifilter') and itertools.ifilter or filter #py2 & py3 compatibility
self.cur.executemany("""insert into annot (chr,pos,allele_ref,allele_alt,
effect,impact,gene_id,gene_name,protein_pos,residue_ref,residue_alt)
values (?,?,?,?,?,?,?,?,?,?,?)""",
imap(lambda row:
[row['CHROM'], int(row['POS']), row['REF'], row['ALT']] +
parse_eff(row['CHROM'], row['POS'], row['INFO']),
ifilter(lambda r: r['ALT'] != '.', ffp)))
except Exception:
log.exception("exception processing file %s line %s", snpEffVcf, ffp.line_num)
raise
self.cur.execute("select chr,pos from annot group by chr,pos having count(*)>1")
dupes = [(c,p) for c,p in self.cur]
if dupes:
log.info("deleting annotation for %d duplicate positions: %s", len(dupes), ', '.join(['%s:%s'%(c,p) for c,p in dupes]))
self.cur.executemany("delete from annot where chr=? and pos=?", dupes)
self.conn.commit()
def __iter__(self):
assert self.snpIterator
for snp in self.snpIterator:
yield self.annotate(snp)
def annotate(self, row):
self.cur.execute("""select effect,impact,gene_id,gene_name,protein_pos,
allele_ref,allele_alt,residue_ref,residue_alt
from annot where chr=? and pos=?""", [row['chr'], int(row['pos'])])
x = self.cur.fetchone()
if x is not None:
row['effect'],row['impact'],row['gene_id'],row['gene_name'],row['protein_pos'],\
row['allele_ref'],row['allele_alt'],row['residue_ref'],row['residue_alt'] = x
row['alleles'] = '/'.join((row['allele_ref'],row['allele_alt']))
if row['residue_alt']:
row['residues'] = '/'.join((row['residue_ref'],row['residue_alt']))
else:
row['residues'] = row['residue_ref']
else:
row['effect'] = 'UNKNOWN'
row['impact'] = 'UNKNOWN'
return row
def new_fields(self):
return ('effect', 'impact', 'gene_id', 'gene_name', 'protein_pos', 'alleles', 'residues')
def __enter__(self):
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self.close()
return 0
def close(self):
self.cur.close()
self.conn.close()
os.unlink(self.dbFile)
def parse_eff(chrom, pos, info, required=True):
try:
impact_rank = {'HIGH':0,'MODERATE':1,'LOW':2,'MODIFIER':3}
infos = [x for x in info.split(';') if x.startswith('EFF=')]
assert len(infos)<=1
if not infos:
assert not required
return ['', '', '', '', '', '', '']
info = infos[0]
effs = info[4:].split(',')
out = []
for eff in effs:
assert eff.endswith(')')
effect, other = eff[:-1].split('(')
other = other.split('|')
assert len(other)>=10
impact = other[0]
gene_id = other[8]
assert not gene_id or (gene_id.endswith('-1') and gene_id.startswith('rna_'))
if gene_id:
gene_id = gene_id[4:-2]
if gene_id=='PF14_0620' or gene_id=='PF3D7_1465300':
gene_name = 'tRNA 3-trailer sequence RNase, putative'
else:
try:
gene_name = urllib.unquote_plus(other[5]).encode('ascii')
except UnicodeDecodeError:
log.error("error at %s:%s decoding the string '%s'" % (chrom, pos, other[5]))
raise
aa_chg = other[3]
if aa_chg:
if effect.startswith('SYNON'):
aas = (aa_chg[0], '')
codon = int(aa_chg[1:])
elif effect.startswith('NON_SYNON') or effect.startswith('START_') or effect.startswith('STOP_') or effect.startswith('CODON_'):
mo = re.match(r'^(\D+)(\d+)(\D+)$', aa_chg)
assert mo, "unrecognized coding change format for %s (%s)" % (effect, aa_chg)
aas = (mo.group(1), mo.group(3))
codon = int(mo.group(2))
elif effect=='FRAME_SHIFT':
mo = re.match(r'^(\D*)(\d+)(\D*)$', aa_chg)
assert mo, "unrecognized coding change format for %s (%s)" % (effect, aa_chg)
aas = ('','')
codon = int(mo.group(2))
else:
assert 0, "unrecognized effect type (%s) for variant with coding change (%s)" % (effect, aa_chg)
else:
aas = ('','')
codon = ''
out.append([impact_rank[impact], effect, impact, gene_id, gene_name,
codon, aas[0], aas[1]])
if len(out)>1:
out.sort()
if out[0][0] == out[1][0]:
#log.debug("SNP found with multiple effects of the same impact level: %s:%s - %s" % (chrom, pos, info))
#assert out[0][2] in ('MODIFIER','LOW'), "Error: SNP found with multiple effects of the same impact level"
out = [[';'.join(util.misc.unique([str(o[i]) for o in out])) for i in range(len(out[0]))]]
eff = out[0][1:]
return eff
except Exception:
log.exception("exception parsing snpEff on row %s:%s - %s" % (chrom, pos, info))
raise
#except Error:
# log.error("error parsing snpEff on row %s:%s - %s" % (chrom, pos, info))
# raise
| {
"repo_name": "broadinstitute/cms",
"path": "cms/util/old/annot.py",
"copies": "1",
"size": "7188",
"license": "bsd-2-clause",
"hash": -2021669786897336800,
"line_mean": 43.3703703704,
"line_max": 144,
"alpha_frac": 0.5139120757,
"autogenerated": false,
"ratio": 3.6505840528186897,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4664496128518689,
"avg_score": null,
"num_lines": null
} |
'''A few miscellaneous functions and objects used by the datastream and datastreamdiff modules.
'''
import re
import sys
import time
import json
import numpy as np
import datetime as dt
# regex for checking netcdf file names
ncfname_re = \
re.compile('^([a-z]{3})([a-z0-9]*)([A-Z]\d+)\.([a-z]\d).'
+'(\d{4})(\d\d)(\d\d)\.(\d\d)(\d\d)(\d\d)\.(cdf|nc)$')
def file_time(fname):
'''Return time in netcdf file name as a datetime object
'''
match = ncfname_re.match(fname)
return dt.datetime(*map(int, match.groups()[4:10])) if match else None
def file_datastream(fname):
'''return the datstream substring from a filename'''
match = ncfname_re.match(fname)
return ''.join(match.groups()[:4])
def store_difference(func):
'''Decorator that causes difference() methods to store and reuse their result.
'''
def difference(self):
if not hasattr(self, '_difference'):
setattr(self, '_difference', func(self))
return self._difference
return difference
def json_section(self, contents):
'''Returns a json section object with the specified contents.
'''
sec = {
'type': 'section',
'name': self.name,
'contents': contents
}
if hasattr(self, 'difference'):
sec['difference'] = self.difference()
elif hasattr(self, '_difference'):
sec['difference'] = self._difference
return sec
def JEncoder(obj):
''' Defines a few default behaviours when the json encoder doesn't know what to do
'''
try:
if np.isnan(obj):
return None
elif obj // 1 == obj:
return int(obj)
else:
return float(obj)
except:
try:
return str(obj)
except:
raise TypeError('Object of type {0} with value of {1} is not JSON serializable' \
.format(type(obj), repr(obj)))
def shared_times(old_ftimes, new_ftimes):
'''Yeilds time intervals shared by both the old and new files, in order.
Parameters:
old_ftimes list of old file times as TimeInterval objects
new_fties list of new file times as TimeInterval objects
Yeilds:
yeilds the tuple:
beg beginning of the shared time interval
end end of the shared time interval
old_i index of interval in old_ftimes that overlaps this shared interval
new_i index of interval in new_ftimes that overlaps this shared interval
'''
old_itr = iter(enumerate(old_ftimes))
new_itr = iter(enumerate(new_ftimes))
old_i, old_f = next(old_itr, (None, None))
new_i, new_f = next(new_itr, (None, None))
while old_f and new_f:
beg = max(old_f.beg, new_f.beg)
end = min(old_f.end, new_f.end)
if beg < end:
yield beg, end, old_i, new_i
if old_f.end < new_f.end:
old_i, old_f = next(old_itr, (None, None))
elif old_f.end > new_f.end:
new_i, new_f = next(new_itr, (None, None))
else:
old_i, old_f = next(old_itr, (None, None))
new_i, new_f = next(new_itr, (None, None))
| {
"repo_name": "CesiumLifeJacket/overwatch",
"path": "dummy_project/ncr/subdir/utils.py",
"copies": "2",
"size": "3169",
"license": "mit",
"hash": 2777706655101118000,
"line_mean": 31.0101010101,
"line_max": 95,
"alpha_frac": 0.5900915115,
"autogenerated": false,
"ratio": 3.5487122060470324,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.007441661941490733,
"num_lines": 99
} |
'''A few miscellaneous tools. '''
from __future__ import print_function, division # Division of integers with / should never round!
import collections
import itertools
import logging
import os
import re
import subprocess
import multiprocessing
import sys
import util.file
log = logging.getLogger(__name__)
__author__ = "dpark@broadinstitute.org"
def unique(items):
''' Return unique items in the same order as seen in the input. '''
seen = set()
for i in items:
if i not in seen:
seen.add(i)
yield i
def histogram(items):
''' I count the number of times I see stuff and return a dict of counts. '''
out = {}
for i in items:
out.setdefault(i, 0)
out[i] += 1
return out
def freqs(items, zero_checks=None):
''' Given a list of comparable, non-unique items, produce an iterator of
(item, count, freq) tuples.
item is a unique instance of one of the items seen on input
count is a positive integer describing the number of times this item was observed
freq is count / sum(count) for all outputs.
If zero_checks is specified, then the output iterator will emit tuples for the
items in zero_checks even if they do not appear in the input. If they are not in
the input, they will be emitted with a zero count and freq.
See histogram(items)
'''
zero_checks = zero_checks or set()
tot = 0
out = {}
for i in items:
out.setdefault(i, 0)
out[i] += 1
tot += 1
for k, v in out.items():
yield (k, v, float(v) / tot)
for i in zero_checks:
if i not in out:
yield (i, 0, 0.0)
def intervals(i, n, l):
''' Divide something of length l into n equally sized parts and return the
start-stop values of the i'th part. Values are 1-based. Each part
will be adjacent and non-overlapping with the next part. i must be a
number from 1 to n.
'''
assert 1 <= i <= n and l >= n
part_size = l // n
start = 1 + part_size * (i - 1)
stop = part_size * i
if i == n:
stop = l
return (start, stop)
# from http://stackoverflow.com/a/312467
def pairwise(iterable):
""" from itertools recipes
s -> (s0,s1), (s1,s2), (s2, s3), ..."""
a, b = itertools.tee(iterable)
next(b, None)
if hasattr(itertools, 'izip'):
# Python 2
return itertools.izip(a, b)
else:
# Python 3
return zip(a, b)
def batch_iterator(iterator, batch_size):
"""Returns lists of length batch_size.
This can be used on any iterator, for example to batch up
SeqRecord objects from Bio.SeqIO.parse(...), or to batch
Alignment objects from Bio.AlignIO.parse(...), or simply
lines from a file handle.
This is a generator function, and it returns lists of the
entries from the supplied iterator. Each list will have
batch_size entries, although the final list may be shorter.
"""
it = iter(iterator)
item = list(itertools.islice(it, batch_size))
while item:
yield item
item = list(itertools.islice(it, batch_size))
def list_contains(sublist, list_):
"""Tests whether sublist is contained in full list_."""
for i in range(len(list_)-len(sublist)+1):
if sublist == list_[i:i+len(sublist)]:
return True
return False
try:
from subprocess import run
except ImportError:
CompletedProcess = collections.namedtuple(
'CompletedProcess', ['args', 'returncode', 'stdout', 'stderr'])
def run(args, stdin=None, stdout=None, stderr=None, shell=False,
env=None, cwd=None, timeout=None, check=False):
'''A poor man's substitute of python 3.5's subprocess.run().
Should only be used for capturing stdout. If stdout is unneeded, a
simple subprocess.call should suffice.
'''
assert stdout is not None, (
'Why are you using this util function if not capturing stdout?')
stdout_pipe = stdout == subprocess.PIPE
stderr_pipe = stderr == subprocess.PIPE
# A little optimization when we don't need temporary files.
if stdout_pipe and (
stderr == subprocess.STDOUT or stderr is None):
try:
output = subprocess.check_output(
args, stdin=stdin, stderr=stderr, shell=shell,
env=env, cwd=cwd)
return CompletedProcess(args, 0, output, b'')
# Py3.4 doesn't have stderr attribute
except subprocess.CalledProcessError as e:
if check:
raise
returncode = e.returncode
stderr_text = getattr(e, 'stderr', b'')
return CompletedProcess(args, e.returncode, e.output, stderr_text)
# Otherwise use temporary files as buffers, since subprocess.call
# cannot use PIPE.
if stdout_pipe:
stdout_fn = util.file.mkstempfname('.stdout')
stdout = open(stdout_fn, 'wb')
if stderr_pipe:
stderr_fn = util.file.mkstempfname('.stderr')
stderr = open(stderr_fn, 'wb')
try:
returncode = subprocess.call(
args, stdin=stdin, stdout=stdout,
stderr=stderr, shell=shell, env=env, cwd=cwd)
if stdout_pipe:
stdout.close()
with open(stdout_fn, 'rb') as f:
output = f.read()
else:
output = ''
if stderr_pipe:
stderr.close()
with open(stderr_fn, 'rb') as f:
error = f.read()
else:
error = ''
if check and returncode != 0:
print(output.decode("utf-8"))
print(error.decode("utf-8"))
try:
raise subprocess.CalledProcessError(
returncode, args, output, error) #pylint: disable-msg=E1121
except TypeError: # py2 CalledProcessError does not accept error
raise subprocess.CalledProcessError(
returncode, args, output)
return CompletedProcess(args, returncode, output, error)
finally:
if stdout_pipe:
stdout.close()
os.remove(stdout_fn)
if stderr_pipe:
stderr.close()
os.remove(stderr_fn)
def run_and_print(args, stdout=None, stderr=None,
stdin=None, shell=False, env=None, cwd=None,
timeout=None, silent=False, buffered=False, check=False,
loglevel=None):
'''Capture stdout+stderr and print.
This is useful for nose, which has difficulty capturing stdout of
subprocess invocations.
'''
if loglevel:
silent = False
if not buffered:
if check and not silent:
try:
result = run(
args,
stdin=stdin,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
env=env,
cwd=cwd,
timeout=timeout,
check=check
)
print(result.stdout.decode('utf-8'))
try:
print(result.stderr.decode('utf-8'))
except AttributeError:
pass
except subprocess.CalledProcessError as e:
if loglevel:
try:
log.log(loglevel, result.stdout.decode('utf-8'))
except NameError:
# in some situations, result does not get assigned anything
pass
except AttributeError:
log.log(loglevel, result.output.decode('utf-8'))
else:
print(e.output.decode('utf-8'))
try:
print(e.stderr.decode('utf-8'))
except AttributeError:
pass
sys.stdout.flush()
raise(e)
else:
result = run(args, stdin=stdin, stdout=subprocess.PIPE,
stderr=subprocess.STDOUT, env=env, cwd=cwd,
timeout=timeout, check=check)
if not silent and not loglevel:
print(result.stdout.decode('utf-8'))
sys.stdout.flush()
elif loglevel:
log.log(loglevel, result.stdout.decode('utf-8'))
else:
CompletedProcess = collections.namedtuple(
'CompletedProcess', ['args', 'returncode', 'stdout', 'stderr'])
process = subprocess.Popen(args, stdin=stdin, stdout=subprocess.PIPE,
stderr=subprocess.STDOUT, env=env,
cwd=cwd)
output = []
while process.poll() is None:
for c in iter(process.stdout.read, b""):
output.append(c)
if not silent:
print(c.decode('utf-8'), end="") # print for py3 / p2 from __future__
sys.stdout.flush() # flush buffer to stdout
# in case there are still chars in the pipe buffer
for c in iter(lambda: process.stdout.read(1), b""):
if not silent:
print(c, end="")
sys.stdout.flush() # flush buffer to stdout
if check and process.returncode != 0:
raise subprocess.CalledProcessError(process.returncode, args,
b''.join(output))
result = CompletedProcess(
args, process.returncode, b''.join(output), b'')
return result
def run_and_save(args, stdout=None, stdin=None,
outf=None, stderr=None, preexec_fn=None,
close_fds=False, shell=False, cwd=None, env=None, check=True):
assert outf is not None
sp = subprocess.Popen(args,
stdin=stdin,
stdout=outf,
stderr=subprocess.PIPE,
preexec_fn=preexec_fn,
close_fds=close_fds,
shell=shell,
cwd=cwd,
env=env)
out, err = sp.communicate()
# redirect stderror to stdout so it can be captured by nose
if err:
sys.stdout.write(err.decode("UTF-8"))
if sp.returncode != 0 and check:
raise subprocess.CalledProcessError(sp.returncode, str(args[0]))
return sp
class FeatureSorter(object):
''' This class helps sort genomic features. It's not terribly optimized
for speed or anything. Slightly inspired by calhoun's MultiSequenceRangeMap.
'''
def __init__(self, collection=None):
self.seqids = []
self.seq_to_features = {}
self.seq_to_breakpoints = {}
self.dirty = False
if collection is not None:
for args in collection:
self.add(*args)
def add(self, c, start, stop, strand='+', other=None):
''' Add a "feature", which is a chrom,start,stop,strand tuple (with
optional other info attached)
'''
if c not in self.seq_to_features:
self.seqids.append(c)
self.seq_to_features[c] = []
self.seq_to_breakpoints[c] = set()
#self.seq_to_breakpoints[c].add(1) # do we want the empty interval in front?
self.seq_to_features[c].append((int(start), int(stop), strand, other))
self.seq_to_breakpoints[c].add(start)
self.seq_to_breakpoints[c].add(stop+1)
self.dirty = True
def _cleanup(self):
if self.dirty:
self.dirty = False
for c in self.seqids:
self.seq_to_features[c].sort()
def get_seqids(self):
return tuple(self.seqids)
def get_features(self, c=None, left=0, right=float('inf')):
''' Get all features on all chromosomes in sorted order. Chromosomes
are emitted in order of first appearance (via add). Features on
each chromosome are emitted in start, then stop order. If
boundaries are specified, we restrict to features that contain
the specified interval.
'''
self._cleanup()
if c is not None:
seqlist = [c]
else:
seqlist = self.seqids
for c in seqlist:
for start, stop, strand, other in self.seq_to_features[c]:
if stop>=left and start<=right:
yield (c, start, stop, strand, other)
def get_intervals(self, c=None):
''' Get all intervals on the reference where the overlapping feature
set remains the same. Output will be sorted, adjacent intervals
and will describe how many and which features overlap it.
'''
self._cleanup()
if c is not None:
seqlist = [c]
else:
seqlist = self.seqids
for c in seqlist:
for left, right in pairwise(sorted(self.seq_to_breakpoints[c])):
right = right - 1
features = list(self.get_features(c, left, right))
yield (c, left, right, len(features), features)
def available_cpu_count():
"""
Return the number of available virtual or physical CPUs on this system.
The number of available CPUs can be smaller than the total number of CPUs
when the cpuset(7) mechanism is in use, as is the case on some cluster
systems.
Adapted from http://stackoverflow.com/a/1006301/715090
"""
try:
with open('/proc/self/status') as f:
status = f.read()
m = re.search(r'(?m)^Cpus_allowed:\s*(.*)$', status)
if m:
res = bin(int(m.group(1).replace(',', ''), 16)).count('1')
if res > 0:
return min(res, multiprocessing.cpu_count())
except IOError:
pass
return multiprocessing.cpu_count()
def which(application_binary_name):
"""
Similar to the *nix "which" command,
this function finds the first executable binary present
in the system PATH for the binary specified.
It differs in that it resolves symlinks.
"""
path=os.getenv('PATH')
for path in path.split(os.path.pathsep):
full_path=os.path.join(path, application_binary_name)
if os.path.exists(full_path) and os.access(full_path, os.X_OK):
link_resolved_path = os.path.realpath(full_path)
return link_resolved_path | {
"repo_name": "broadinstitute/cms",
"path": "cms/util/misc.py",
"copies": "1",
"size": "14792",
"license": "bsd-2-clause",
"hash": -4408463980622262300,
"line_mean": 34.818401937,
"line_max": 98,
"alpha_frac": 0.5513791239,
"autogenerated": false,
"ratio": 4.295005807200929,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5346384931100929,
"avg_score": null,
"num_lines": null
} |
'''A few pure-python statistical tools to avoid the need to install scipy. '''
from __future__ import division # Division of integers with / should never round!
from math import exp, log, sqrt, gamma, lgamma, erf
import itertools
__author__ = "dpark@broadinstitute.org, irwin@broadinstitute.org"
try:
# Python 3.4
from statistics import mean, median
except ImportError:
# Python <3.4, avoid numpy if these two methods are all we really need
def mean(l):
if len(l)>0:
return float(sum(l))/len(l)
else:
raise Exception("empty list for mean")
def median(l):
if len(l)>0:
half = len(l) // 2
l.sort()
if len(l) % 2 == 0:
return (l[half-1] + l[half]) / 2.0
else:
return l[half]
else:
raise Exception("empty list for median")
def product(iter_obj) :
prod = 1
for x in iter_obj :
prod *= x
return prod
def chi2_contingency(contingencyTable, correction = True) :
""" contingencyTable is a sequence of m sequences, each of length n.
Return an estimate using the chi-square distribution of the two-tailed
p-value for an m x n contingency table against the null hypothesis
that the row and column criteria are independent. This is not as
accurate as fisher_exact, but is faster (and is implemented for all
m and n).
If correction is True and there is 1 degree of freedom, apply Yates's
correction for continuity, i.e., adjust each observed value
by 0.5 towards the corresponding expected value, which brings
the result closer to the Fisher exact test result.
Not recommended if any of the counts or expected counts are
less than 5.
"""
# scipy equivalent: scipy.stats.chi2_contingency(contingencyTable)[1]
if len(contingencyTable) == 0 :
return 1.0
if len(set(map(len, contingencyTable))) != 1 :
raise ValueError('Not all rows have the same length')
# Eliminate rows and columns with 0 sum
colSums = [sum(row[col] for row in contingencyTable)
for col in range(len(contingencyTable[0]))]
table = [[x for x, colSum in zip(row, colSums) if colSum != 0]
for row in contingencyTable if sum(row) != 0]
if len(table) < 2 or len(table[0]) < 2 :
return 1.0
m = len(table)
n = len(table[0])
rowSums = [sum(row) for row in table]
colSums = [sum(row[col] for row in table) for col in range(n)]
N = sum(rowSums)
expect = [[rowSums[i] * colSums[j] / N for j in range(n)] for i in range(m)]
if correction and m == n == 2 :
def corr(i, j) :
if expect[i][j] > table[i][j] :
return min(table[i][j] + 0.5, expect[i][j])
else :
return max(table[i][j] - 0.5, expect[i][j])
table = [[corr(i, j) for j in range(n)] for i in range(m)]
chisq = sum((table[i][j] - expect[i][j]) ** 2 / expect[i][j]
for j in range(n)
for i in range(m))
pval = 1 - pchisq(chisq, (m - 1) * (n - 1))
return pval
def fisher_exact(contingencyTable) :
""" Fisher exact test for the 2 x n case.
contingencyTable is a sequence of 2 length-n sequences of integers.
Return the two-tailed p-value against the null hypothesis that the row
and column criteria are independent, using Fisher's exact test.
For n larger than 2, this is very slow, O(S^(n-1)), where S is the
smaller of the two row sums. Better to use chi2_contingency unless
one of the row sums is small.
Handles m x n contingencyTable with m > 2 if it can be reduced to the
2 x n case by transposing or by removing rows that are all 0s. Also
handles degenerate cases of 0 or 1 row by returning 1.0.
"""
if len(contingencyTable) == 0 :
return 1.0
if len(set(map(len, contingencyTable))) != 1 :
raise ValueError('Not all rows have the same length')
if any(x != int(x) for row in contingencyTable for x in row) :
raise ValueError('Some table entry is not an integer')
if any(x < 0 for row in contingencyTable for x in row) :
raise ValueError('Some table entry is negative')
# Eliminate rows and columns with 0 sum
colSums = [sum(row[col] for row in contingencyTable)
for col in range(len(contingencyTable[0]))]
table = [[x for x, colSum in zip(row, colSums) if colSum != 0]
for row in contingencyTable if sum(row) != 0]
if len(table) < 2 or len(table[0]) < 2 :
return 1.0
if len(table) > len(table[0]) :
table = list(zip(*table)) # Transpose
m = len(table)
n = len(table[0])
if m != 2 :
raise NotImplementedError('More than 2 non-zero rows and columns.')
# Put row with smaller sum first. Makes the loop iterations simpler.
table.sort(key = sum)
# Put column with largest sum last. Makes loop quick rejection faster.
table = list(zip(*table)) # Transpose
table.sort(key = sum)
table = list(zip(*table)) # Transpose back
# There are many optimizations possible for the following code, but it would
# still be O(S^(n-1)) so it would still be too slow for anything
# sizeable, and it's usable as it for small things.
rowSums = [sum(row) for row in table]
colSums = [sum(row[col] for row in table) for col in range(n)]
logChooseNrowSum = log_choose(sum(rowSums), rowSums[0])
def prob_of_table(firstRow) :
return exp(sum(log_choose(cs, a) for cs, a in zip(colSums, firstRow)) -
logChooseNrowSum)
p0 = prob_of_table(table[0])
result = 0
for firstRowM1 in itertools.product(*[range(min(rowSums[0], colSums[i]) + 1)
for i in range(n - 1)]) :
lastElmt = rowSums[0] - sum(firstRowM1)
if lastElmt < 0 or lastElmt > colSums[-1] :
continue
prob = prob_of_table(firstRowM1 + (lastElmt,))
if prob <= p0 + 1e-9 : # (1e-9 handles floating point round off)
result += prob
return result
def log_choose(n, k) :
# Return log(n choose k). Compute using lgamma(x + 1) = log(x!)
if not (0 <= k <=n) :
raise ValueError('%d is negative or more than %d' % (k, n))
return lgamma(n + 1) - lgamma(k + 1) - lgamma(n - k + 1)
def gammainc_halfint(s, x) :
""" Lower incomplete gamma function =
integral from 0 to x of t ** (s-1) exp(-t) dt divided by gamma(s),
i.e., the fraction of gamma that you get if you integrate only until
x instead of all the way to infinity.
Implemented here only if s is a positive multiple of 0.5.
"""
# scipy equivalent: scipy.special.gammainc(s,x)
if s <= 0 :
raise ValueError('%s is not positive' % s)
if x < 0 :
raise ValueError('%s < 0' % x)
if s * 2 != int(s * 2) :
raise NotImplementedError('%s is not a multiple of 0.5' % s)
# Handle integers analytically
if s == int(s) :
term = 1
total = 1
for k in range(1, int(s)) :
term *= x / k
total += term
return 1 - exp(-x) * total
# Otherwise s is integer + 0.5. Decrease to 0.5 using recursion formula:
result = 0.0
while s > 1 :
result -= x ** (s - 1) * exp(-x) / gamma(s)
s = s - 1
# Then use gammainc(0.5, x) = erf(sqrt(x))
result += erf(sqrt(x))
return result
def pchisq(x, k) :
"Cumulative distribution function of chi squared with k degrees of freedom."
if k < 1 or k != int(k) :
raise ValueError('%s is not a positive integer' % k)
if x < 0 :
raise ValueError('%s < 0' % x)
return gammainc_halfint(k / 2, x / 2)
| {
"repo_name": "broadinstitute/cms",
"path": "cms/util/stats.py",
"copies": "1",
"size": "7924",
"license": "bsd-2-clause",
"hash": 8284199714528943000,
"line_mean": 38.2277227723,
"line_max": 81,
"alpha_frac": 0.5884654215,
"autogenerated": false,
"ratio": 3.4723926380368098,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.456085805953681,
"avg_score": null,
"num_lines": null
} |
'''A few simple examples of recursion.'''
def sum1(xs):
'''We can recursively sum a list of numbers.'''
if len(xs) == 0:
return 0
else:
return xs[0] + sum1(xs[1:])
def sum2(xs):
'''Or do the same thing iteratively.'''
y = 0
for x in xs:
y += x
return y
def product1(xs):
'''Similarly we can recursively take the product of a list of numbers.'''
if len(xs) == 0:
return 1
else:
return xs[0] + sum1(xs[1:])
def concat1(xs):
'''Concatenate a list of strings.'''
if len(xs) == 0:
return ''
else:
return xs[0] + concat1(xs[1:])
def reverse1(xs):
'''Or reverse a list.'''
if len(xs) == 0:
return xs
else:
return reverse(xs[1:]) + [xs[0]]
# At this point we realise all of these examples are practically
# identical (i.e. the recursion structure is the same), we can
# abstract them into two recursive functions.
def foldl(xs, op, id):
'''Folds a list xs from the left with binary operation op,
and identity id.'''
if len(xs) == 0:
return id
else:
return op(foldl(xs[1:], op, id), xs[0])
def foldr(xs, op, id):
'''Folds a list xs from the right with binary operation op,
and identity id.'''
if len(xs) == 0:
return id
else:
return op(xs[0], foldr(xs[1:], op, id))
| {
"repo_name": "twright/Python-Examples",
"path": "recursion_examples.py",
"copies": "1",
"size": "1365",
"license": "unlicense",
"hash": 2449193552415364600,
"line_mean": 23.375,
"line_max": 77,
"alpha_frac": 0.5626373626,
"autogenerated": false,
"ratio": 3.3292682926829267,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4391905655282926,
"avg_score": null,
"num_lines": null
} |
"""A few subroutines written to interface with the SMS API.
Currently not in use and so un-maintained,
but I think it should all basically still work.
"""
import simplejson as json
import os
import sys
import getpass
import warnings
import time
import glob
import httplib
import urllib
import pysovo.utils as utils
default_sms_config_file = "".join((os.environ['HOME'], "/.pysovo/sms_acc"))
class SMSConfigKeys():
user = 'username'
pw = 'api_password'
keys = SMSConfigKeys()
try:
import textmagic
import textmagic.client
sms_available = True
except ImportError:
warnings.warn("NB textmagic not found, SMS alerts not available.", ImportWarning)
sms_available = False
# http://api.textmagic.com/https-api/sms-delivery-notification-codes
delivery_status_key = {
'q': 'Queued',
'u': 'Unknown',
'r': 'Sent',
'd': 'Delivered',
'e': 'Send error',
'f': 'Delivery error' #Different to send error?
}
def prompt_for_config(config_filename=default_sms_config_file):
utils.ensure_dir(config_filename)
outputfile = open(config_filename, 'w')
account = {}
print "Please enter the sms username:"
account[keys.user] = raw_input(">")
print "Now please enter your API password:"
account[keys.pw] = raw_input(">")
print "You entered:"
print "User", account[keys.user]
print "API Pass", account[keys.pw]
outputfile.write(json.dumps(account))
outputfile.close()
print ""
print "Account settings saved to:", config_filename
chmod_command = "chmod go-rwx {file}".format(file=config_filename)
os.system(chmod_command)
return config_filename
def load_account_settings_from_file(config_filename=default_sms_config_file):
if sms_available:
try:
with open(config_filename, 'r') as config_file:
account = json.loads(config_file.read())
except Exception:
print "Error: Could not load email account from " + config_filename
raise
return account
else:
return None
def send_sms(account,
recipients,
body_text,
debug=False
):
if debug:
print "Loaded account, starting SMS session"
if len(body_text) > 155:
print "Warning: Body text will be truncated"
body_text = body_text[:160]
client = textmagic.client.TextMagicClient(
account[keys.sms_account.username],
account[keys.sms_account.api_password])
result = client.send(body_text, recipients)
message_ids = result['message_id'].keys()
return message_ids
def check_sms_statuses(account, message_ids):
client = textmagic.client.TextMagicClient(
account[keys.user],
account[keys.pw])
responses = client.message_status(message_ids)
delivery_status_codes = [ responses[id]['status'] for id in message_ids]
delivery_statuses = []
for code in delivery_status_codes:
if code in delivery_status_key:
delivery_statuses.append(delivery_status_key[code])
else:
delivery_statuses.append(code)
return zip(message_ids, delivery_status_codes, delivery_statuses)
def check_sms_balance(account, debug=False):
client = textmagic.client.TextMagicClient(
account[keys.user],
account[keys.pw])
balance = client.account()['balance']
if debug:
print "Balance is:", balance
return balance
| {
"repo_name": "timstaley/pysovo",
"path": "pysovo/comms/sms.py",
"copies": "1",
"size": "3668",
"license": "bsd-2-clause",
"hash": -4573503130731165700,
"line_mean": 25.9705882353,
"line_max": 85,
"alpha_frac": 0.6175027263,
"autogenerated": false,
"ratio": 4.048565121412803,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.01032272461245812,
"num_lines": 136
} |
""" A few tests for BorderSwipingNetworks
>>> from pybrain import MotherConnection
>>> from scipy import ones, array
We will use a simple 3-dimensional network:
>>> dim = 3
>>> size = 3
>>> hsize = 1
It is possible to define some weights before construction:
>>> predefined = {'outconn': MotherConnection(1)}
>>> predefined['outconn']._setParameters([0.5])
Building it with the helper function below:
>>> net = buildSimpleBorderSwipingNet(size, dim, hsize, predefined)
>>> net.name
'BorderSwipingNetwork-...
>>> net.paramdim
7
>>> net.dims
(3, 3, 3)
Did the weight get set correctly?
>>> net.params[0]
0.5
Now we'll set all weights to a sequence of values:
>>> net._setParameters(array(range(net.paramdim))/10.+.1)
>>> nearlyEqual(list(net.params), [0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7])
True
Now we want to use the same weights to build a bigger network
>>> size2 = size + 2
>>> net2 = buildSimpleBorderSwipingNet(size2, dim, hsize, net.predefined)
It has a few more parameters:
>>> net2.paramdim
12
But the values are the same than before except numerical differences.
>>> nearlyEqual(list(net2.params), [0.1, 0.2, 0.3, 0.3, 0.4, 0.5, 0.4333, 0.40, 0.46666, 0.4142857, 0.6, 0.7])
True
Let's attempt a couple of activations:
>>> res = net.activate(array(range(net.indim))/10.)
>>> res2 = net2.activate(array(range(net2.indim))/10.)
>>> min(res), min(res2)
(0.625..., 0.631...)
>>> max(res), max(res2)
(0.797..., 0.7999...)
"""
__author__ = 'Tom Schaul, tom@idsia.ch'
from pybrain.tests import runModuleTestSuite
from pybrain.structure.networks import BorderSwipingNetwork
from pybrain import ModuleMesh, LinearLayer, TanhLayer
def nearlyEqual(lst1, lst2, tolerance=0.001):
"""Tell whether the itemwise differences of the two lists is never bigger
than tolerance."""
return all(abs(i - j) <= tolerance for i, j in zip(lst1, lst2))
def buildSimpleBorderSwipingNet(size = 3, dim = 3, hsize = 1, predefined = {}):
""" build a simple swiping network,of given size and dimension, using linear inputs and output"""
# assuming identical size in all dimensions
dims = tuple([size]*dim)
# also includes one dimension for the swipes
hdims = tuple(list(dims)+[2**dim])
inmod = LinearLayer(size**dim, name = 'input')
inmesh = ModuleMesh.viewOnFlatLayer(inmod, dims, 'inmesh')
outmod = LinearLayer(size**dim, name = 'output')
outmesh = ModuleMesh.viewOnFlatLayer(outmod, dims, 'outmesh')
hiddenmesh = ModuleMesh.constructWithLayers(TanhLayer, hsize, hdims, 'hidden')
return BorderSwipingNetwork(inmesh, hiddenmesh, outmesh, predefined = predefined)
if __name__ == '__main__':
runModuleTestSuite(__import__('__main__'))
| {
"repo_name": "zygmuntz/pybrain",
"path": "pybrain/tests/unittests/structure/networks/test_borderswipingnetwork.py",
"copies": "31",
"size": "2820",
"license": "bsd-3-clause",
"hash": 3172192282942634500,
"line_mean": 28.375,
"line_max": 114,
"alpha_frac": 0.659929078,
"autogenerated": false,
"ratio": 3.2451093210586883,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0025248231539059473,
"num_lines": 96
} |
""" A few tests for BorderSwipingNetworks
>>> from pybrain import MotherConnection
>>> from scipy import ones, array
We will use a simple 3-dimensional network:
>>> dim = 3
>>> size = 3
>>> hsize = 1
It is possible to define some weights before cosntruction:
>>> predefined = {'outconn': MotherConnection(1)}
>>> predefined['outconn']._setParameters([0.5])
Building it with the helper function below:
>>> net = buildSimpleBorderSwipingNet(size, dim, hsize, predefined)
>>> net.name
'BorderSwipingNetwork-...
>>> net.paramdim
7
>>> net.dims
(3, 3, 3)
Did the weight get set correctly?
>>> net.params[0]
0.5
Now we'll set all weights to a sequence of values:
>>> net._setParameters(array(range(net.paramdim))/10.+.1)
>>> nearlyEqual(list(net.params), [0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7])
True
Now we want to use the same weights to build a bigger network
>>> size2 = size + 2
>>> net2 = buildSimpleBorderSwipingNet(size2, dim, hsize, net.predefined)
It has a few more parameters:
>>> net2.paramdim
12
But the values are the same than before except numerical differences.
>>> nearlyEqual(list(net2.params), [0.1, 0.2, 0.3, 0.3, 0.4, 0.5, 0.4333, 0.40, 0.46666, 0.4142857, 0.6, 0.7])
True
Let's attempt a couple of activations:
>>> res = net.activate(array(range(net.indim))/10.)
>>> res2 = net2.activate(array(range(net2.indim))/10.)
>>> min(res), min(res2)
(0.625..., 0.631...)
>>> max(res), max(res2)
(0.797..., 0.7999...)
"""
__author__ = 'Tom Schaul, tom@idsia.ch'
from pybrain.tests import runModuleTestSuite
from pybrain.structure.networks import BorderSwipingNetwork
from pybrain import ModuleMesh, LinearLayer, TanhLayer
def nearlyEqual(lst1, lst2, tolerance=0.001):
"""Tell wether the itemwise differences of the two lists is never bigger
than tolerance."""
return all(abs(i - j) <= tolerance for i, j in zip(lst1, lst2))
def buildSimpleBorderSwipingNet(size = 3, dim = 3, hsize = 1, predefined = {}):
""" build a simple swiping network,of given size and dimension, using linear inputs and output"""
# assuming identical size in all dimensions
dims = tuple([size]*dim)
# also includes one dimension for the swipes
hdims = tuple(list(dims)+[2**dim])
inmod = LinearLayer(size**dim, name = 'input')
inmesh = ModuleMesh.viewOnFlatLayer(inmod, dims, 'inmesh')
outmod = LinearLayer(size**dim, name = 'output')
outmesh = ModuleMesh.viewOnFlatLayer(outmod, dims, 'outmesh')
hiddenmesh = ModuleMesh.constructWithLayers(TanhLayer, hsize, hdims, 'hidden')
return BorderSwipingNetwork(inmesh, hiddenmesh, outmesh, predefined = predefined)
if __name__ == '__main__':
runModuleTestSuite(__import__('__main__'))
| {
"repo_name": "daanwierstra/pybrain",
"path": "pybrain/tests/unittests/test_borderswipingnetwork.py",
"copies": "1",
"size": "2937",
"license": "bsd-3-clause",
"hash": 7808063341258796000,
"line_mean": 29.59375,
"line_max": 114,
"alpha_frac": 0.633299285,
"autogenerated": false,
"ratio": 3.3412969283276452,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9201673079446597,
"avg_score": 0.054584626776209566,
"num_lines": 96
} |
"""A few tests for classes and functions from kernel/modelingtools
"""
#!/usr/bin/env python
import numpy as np
import siconos.kernel as K
t0 = 0
T = 10
r = 0.1
g = 9.81
m = 1
e = 0.9
theta = 0.5
h = 0.005
q = np.array([1, 0, 0])
v = np.array([0, 0, 0])
mass = np.eye(3)
mass[2, 2] = 3. / 5 * r * r
weight = np.array([-m * g, 0, 0])
tol = np.finfo(np.double).eps
def test_LagrangianLinearTIDS():
ball = K.LagrangianLinearTIDS(q, v, mass)
assert np.allclose(ball.q(), q, rtol=tol, atol=tol)
assert np.allclose(ball.velocity(), v, rtol=tol, atol=tol)
assert np.allclose(ball.mass(), mass, rtol=tol, atol=tol)
ball.setFExtPtr(weight)
assert np.allclose(ball.fExt(), weight, rtol=tol, atol=tol)
def test_NewtonImpactNSL():
nslaw = K.NewtonImpactNSL(e)
assert nslaw.e() == e
def test_LagrangianLinearTIR():
H = np.array([[1, 0, 0]])
b = np.zeros(1)
relation = K.LagrangianLinearTIR(H, b)
assert np.allclose(relation.jachq(), H, rtol=tol, atol=tol)
def test_Model():
bouncing_ball = K.Model(t0, T)
assert bouncing_ball.t0() == t0
| {
"repo_name": "siconos/siconos-deb",
"path": "kernel/swig/tests/test_modelingTools.py",
"copies": "1",
"size": "1095",
"license": "apache-2.0",
"hash": 3254588241091078700,
"line_mean": 20.0576923077,
"line_max": 66,
"alpha_frac": 0.6310502283,
"autogenerated": false,
"ratio": 2.4829931972789114,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.36140434255789117,
"avg_score": null,
"num_lines": null
} |
"""A few tests using a server (and --fullstate so that know will 'see' uniform numbers)"""
from __future__ import print_function
import os
import subprocess
import sys
import time
import hfo
hfo_env = hfo.HFOEnvironment()
def try_step(): # if a game ends within ~5 frames, something is wrong...
status = hfo_env.step()
assert (status ==
hfo.IN_GAME), ("Status is {!s} ({!r}), not IN_GAME".
format(hfo_env.statusToString(status),status))
return hfo_env.getState()
def test_with_server():
test_dir = os.path.dirname(os.path.abspath(os.path.realpath(__file__)))
binary_dir = os.path.normpath(test_dir + "/../bin")
conf_dir = os.path.join(binary_dir, 'teams/base/config/formations-dt')
bin_HFO = os.path.join(binary_dir, "HFO")
popen_list = [sys.executable, "-x", bin_HFO,
"--offense-agents=1", "--defense-npcs=2",
"--offense-npcs=2", "--trials=1", "--headless",
"--fullstate"]
HFO_process = subprocess.Popen(popen_list)
time.sleep(0.2)
assert (HFO_process.poll() is
None), "Failed to start HFO with command '{}'".format(" ".join(popen_list))
time.sleep(3)
try:
hfo_env.connectToServer(config_dir=conf_dir) # using defaults otherwise
min_state_size = 58+(9*4)
state_size = hfo_env.getStateSize()
assert (state_size >=
min_state_size), "State size is {!s}, not {!s}+".format(state_size,min_state_size)
print("State size is {!s}".format(state_size))
my_unum = hfo_env.getUnum()
assert ((my_unum > 0) and (my_unum <= 11)), "Wrong self uniform number ({!r})".format(my_unum)
print("My unum is {!s}".format(my_unum))
num_teammates = hfo_env.getNumTeammates()
assert (num_teammates == 2), "Wrong num teammates ({!r})".format(num_teammates)
num_opponents = hfo_env.getNumOpponents()
assert (num_opponents == 2), "Wrong num opponents ({!r})".format(num_opponents)
had_ok_unum = False
had_ok_unum_set_my_side = set()
had_ok_unum_set_their_side = set();
hfo_env.act(hfo.NOOP)
state = try_step()
for x in range(0,5):
if int(state[12]) == 1: # can kick the ball
hfo_env.act(hfo.DRIBBLE)
else:
hfo_env.act(hfo.MOVE)
state = try_step()
for n in range((state_size-4), state_size):
their_unum = state[n]
if ((their_unum > 0) and (their_unum <= 0.11)):
print("{!s}: OK uniform number ({!r}) for {!s}".format(x,their_unum,n))
had_ok_unum = True
if n > (state_size-3):
had_ok_unum_set_their_side.add(their_unum)
else:
had_ok_unum_set_my_side.add(their_unum)
elif x > 3:
print("{!s}: Wrong other uniform number ({!r}) for {!s}".format(x,their_unum,n))
if (len(had_ok_unum_set_my_side) > 1) and (len(had_ok_unum_set_their_side) > 1):
break
assert had_ok_unum, "Never saw OK other uniform number"
try:
hfo_env.act(hfo.MOVE_TO)
except AssertionError:
pass
else:
raise AssertionError("Should have got AssertionError")
HFO_process.terminate()
hfo_env.act(hfo.QUIT)
time.sleep(1.2)
status = hfo_env.step()
assert (status ==
hfo.SERVER_DOWN), ("Status is {!s} ({!r}), not SERVER_DOWN".
format(hfo_env.statusToString(status), status))
finally:
if HFO_process.poll() is None:
HFO_process.terminate()
os.system("killall -9 rcssserver")
if __name__ == '__main__':
test_with_server()
| {
"repo_name": "LARG/HFO",
"path": "tests/test_with_server_fullstate.py",
"copies": "2",
"size": "3979",
"license": "mit",
"hash": -7458352879266243000,
"line_mean": 32.4369747899,
"line_max": 102,
"alpha_frac": 0.528524755,
"autogenerated": false,
"ratio": 3.3493265993265995,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9786949209641345,
"avg_score": 0.018180428937050807,
"num_lines": 119
} |
"""A few tests using a server"""
from __future__ import print_function
import os
import subprocess
import sys
import time
import hfo
hfo_env = hfo.HFOEnvironment()
def try_step(): # if a game ends within ~20 frames, something is wrong...
status = hfo_env.step()
assert (status ==
hfo.IN_GAME), ("Status is {!s} ({!r}), not IN_GAME".
format(hfo_env.statusToString(status),status))
return hfo_env.getState()
def test_with_server():
test_dir = os.path.dirname(os.path.abspath(os.path.realpath(__file__)))
binary_dir = os.path.normpath(test_dir + "/../bin")
conf_dir = os.path.join(binary_dir, 'teams/base/config/formations-dt')
bin_HFO = os.path.join(binary_dir, "HFO")
popen_list = [sys.executable, "-x", bin_HFO,
"--offense-agents=1", "--defense-npcs=2",
"--offense-npcs=2", "--trials=1", "--headless"]
HFO_process = subprocess.Popen(popen_list)
time.sleep(0.2)
assert (HFO_process.poll() is
None), "Failed to start HFO with command '{}'".format(" ".join(popen_list))
time.sleep(3)
try:
hfo_env.connectToServer(config_dir=conf_dir) # using defaults otherwise
min_state_size = 58+(9*4)
state_size = hfo_env.getStateSize()
assert (state_size >=
min_state_size), "State size is {!s}, not {!s}+".format(state_size,min_state_size)
print("State size is {!s}".format(state_size))
my_unum = hfo_env.getUnum()
assert ((my_unum > 0) and (my_unum <= 11)), "Wrong self uniform number ({!r})".format(my_unum)
print("My unum is {!s}".format(my_unum))
num_teammates = hfo_env.getNumTeammates()
assert (num_teammates == 2), "Wrong num teammates ({!r})".format(num_teammates)
num_opponents = hfo_env.getNumOpponents()
assert (num_opponents == 2), "Wrong num opponents ({!r})".format(num_opponents)
had_ok_unum = False
had_ok_unum_set_my_side = set()
had_ok_unum_set_their_side = set();
hfo_env.act(hfo.NOOP)
state = try_step()
for x in range(0,25):
if int(state[12]) == 1: # can kick the ball
hfo_env.act(hfo.DRIBBLE)
elif (state[50] < 0) or (state[0] < 0) or (state[1] < 0) or (state[54] < 0):
hfo_env.act(hfo.REORIENT)
else:
hfo_env.act(hfo.MOVE)
state = try_step()
for n in range((state_size-4), state_size):
their_unum = state[n]
if ((their_unum > 0) and (their_unum <= 0.11)):
print("{!s}: OK uniform number ({!r}) for {!s}".format(x,their_unum,n))
had_ok_unum = True
if n > (state_size-3):
had_ok_unum_set_their_side.add(their_unum)
else:
had_ok_unum_set_my_side.add(their_unum)
elif x > 3:
print("{!s}: Wrong other uniform number ({!r}) for {!s}".format(x,their_unum,n))
if (len(had_ok_unum_set_my_side) > 1) and (len(had_ok_unum_set_their_side) > 1):
break
assert had_ok_unum, "Never saw OK other uniform number"
try:
hfo_env.act(hfo.MOVE_TO)
except AssertionError:
pass
else:
raise AssertionError("Should have got AssertionError")
HFO_process.terminate()
hfo_env.act(hfo.QUIT)
time.sleep(1.2)
status = hfo_env.step()
assert (status ==
hfo.SERVER_DOWN), ("Status is {!s} ({!r}), not SERVER_DOWN".
format(hfo_env.statusToString(status), status))
finally:
if HFO_process.poll() is None:
HFO_process.terminate()
os.system("killall -9 rcssserver")
if __name__ == '__main__':
test_with_server()
| {
"repo_name": "mhauskn/HFO",
"path": "tests/test_with_server.py",
"copies": "2",
"size": "4021",
"license": "mit",
"hash": -1793857208741640400,
"line_mean": 32.5083333333,
"line_max": 102,
"alpha_frac": 0.5254911714,
"autogenerated": false,
"ratio": 3.293202293202293,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4818693464602293,
"avg_score": null,
"num_lines": null
} |
"""A few things that didn't seem to fit anywhere else."""
import os, os.path
import pwd
import tempfile
import fcntl
import errno
import threading
import subprocess
import shutil
import sys
import logger
PID_FILE = '/var/run/nodemanager.pid'
####################
def get_default_if():
interface = get_if_from_hwaddr(get_hwaddr_from_plnode())
if not interface: interface = "eth0"
return interface
def get_hwaddr_from_plnode():
try:
for line in open("/usr/boot/plnode.txt", 'r').readlines():
if line.startswith("NET_DEVICE"):
return line.split("=")[1].strip().strip('"')
except:
pass
return None
def get_if_from_hwaddr(hwaddr):
import sioc
devs = sioc.gifconf()
for dev in devs:
dev_hwaddr = sioc.gifhwaddr(dev)
if dev_hwaddr == hwaddr: return dev
return None
####################
# daemonizing
def as_daemon_thread(run):
"""Call function <run> with no arguments in its own thread."""
thr = threading.Thread(target=run)
thr.setDaemon(True)
thr.start()
def close_nonstandard_fds():
"""Close all open file descriptors other than 0, 1, and 2."""
_SC_OPEN_MAX = 4
for fd in range(3, os.sysconf(_SC_OPEN_MAX)):
try: os.close(fd)
except OSError: pass # most likely an fd that isn't open
# after http://www.erlenstar.demon.co.uk/unix/faq_2.html
def daemon():
"""Daemonize the current process."""
if os.fork() != 0: os._exit(0)
os.setsid()
if os.fork() != 0: os._exit(0)
os.chdir('/')
os.umask(0022)
devnull = os.open(os.devnull, os.O_RDWR)
os.dup2(devnull, 0)
# xxx fixme - this is just to make sure that nothing gets stupidly lost - should use devnull
crashlog = os.open('/var/log/nodemanager.daemon', os.O_RDWR | os.O_APPEND | os.O_CREAT, 0644)
os.dup2(crashlog, 1)
os.dup2(crashlog, 2)
def fork_as(su, function, *args):
"""fork(), cd / to avoid keeping unused directories open, close all nonstandard file descriptors (to avoid capturing open sockets), fork() again (to avoid zombies) and call <function> with arguments <args> in the grandchild process. If <su> is not None, set our group and user ids appropriately in the child process."""
child_pid = os.fork()
if child_pid == 0:
try:
os.chdir('/')
close_nonstandard_fds()
if su:
pw_ent = pwd.getpwnam(su)
os.setegid(pw_ent[3])
os.seteuid(pw_ent[2])
child_pid = os.fork()
if child_pid == 0: function(*args)
except:
os.seteuid(os.getuid()) # undo su so we can write the log file
os.setegid(os.getgid())
logger.log_exc("tools: fork_as")
os._exit(0)
else: os.waitpid(child_pid, 0)
####################
# manage files
def pid_file():
"""We use a pid file to ensure that only one copy of NM is running at a given time.
If successful, this function will write a pid file containing the pid of the current process.
The return value is the pid of the other running process, or None otherwise."""
other_pid = None
if os.access(PID_FILE, os.F_OK): # check for a pid file
handle = open(PID_FILE) # pid file exists, read it
other_pid = int(handle.read())
handle.close()
# check for a process with that pid by sending signal 0
try: os.kill(other_pid, 0)
except OSError, e:
if e.errno == errno.ESRCH: other_pid = None # doesn't exist
else: raise # who knows
if other_pid == None:
# write a new pid file
write_file(PID_FILE, lambda f: f.write(str(os.getpid())))
return other_pid
def write_file(filename, do_write, **kw_args):
"""Write file <filename> atomically by opening a temporary file, using <do_write> to write that file, and then renaming the temporary file."""
shutil.move(write_temp_file(do_write, **kw_args), filename)
def write_temp_file(do_write, mode=None, uidgid=None):
fd, temporary_filename = tempfile.mkstemp()
if mode: os.chmod(temporary_filename, mode)
if uidgid: os.chown(temporary_filename, *uidgid)
f = os.fdopen(fd, 'w')
try: do_write(f)
finally: f.close()
return temporary_filename
# replace a target file with a new contents - checks for changes
# can handle chmod if requested
# can also remove resulting file if contents are void, if requested
# performs atomically:
# writes in a tmp file, which is then renamed (from sliverauth originally)
# returns True if a change occurred, or the file is deleted
def replace_file_with_string (target, new_contents, chmod=None, remove_if_empty=False):
try:
current=file(target).read()
except:
current=""
if current==new_contents:
# if turns out to be an empty string, and remove_if_empty is set,
# then make sure to trash the file if it exists
if remove_if_empty and not new_contents and os.path.isfile(target):
logger.verbose("tools.replace_file_with_string: removing file %s"%target)
try: os.unlink(target)
finally: return True
return False
# overwrite target file: create a temp in the same directory
path=os.path.dirname(target) or '.'
fd, name = tempfile.mkstemp('','repl',path)
os.write(fd,new_contents)
os.close(fd)
if os.path.exists(target):
os.unlink(target)
shutil.move(name,target)
if chmod: os.chmod(target,chmod)
return True
####################
# utilities functions to get (cached) information from the node
# get node_id from /etc/planetlab/node_id and cache it
_node_id=None
def node_id():
global _node_id
if _node_id is None:
try:
_node_id=int(file("/etc/planetlab/node_id").read())
except:
_node_id=""
return _node_id
_root_context_arch=None
def root_context_arch():
global _root_context_arch
if not _root_context_arch:
sp=subprocess.Popen(["uname","-i"],stdout=subprocess.PIPE)
(_root_context_arch,_)=sp.communicate()
_root_context_arch=_root_context_arch.strip()
return _root_context_arch
####################
class NMLock:
def __init__(self, file):
logger.log("tools: Lock %s initialized." % file, 2)
self.fd = os.open(file, os.O_RDWR|os.O_CREAT, 0600)
flags = fcntl.fcntl(self.fd, fcntl.F_GETFD)
flags |= fcntl.FD_CLOEXEC
fcntl.fcntl(self.fd, fcntl.F_SETFD, flags)
def __del__(self):
os.close(self.fd)
def acquire(self):
logger.log("tools: Lock acquired.", 2)
fcntl.lockf(self.fd, fcntl.LOCK_SH)
def release(self):
logger.log("tools: Lock released.", 2)
fcntl.lockf(self.fd, fcntl.LOCK_UN)
####################
# Utilities for getting the IP address of a LXC/Openvswitch slice. Do this by
# running ifconfig inside of the slice's context.
def get_sliver_process(slice_name, process_cmdline):
""" Utility function to find a process inside of an LXC sliver. Returns
(cgroup_fn, pid). cgroup_fn is the filename of the cgroup file for
the process, for example /proc/2592/cgroup. Pid is the process id of
the process. If the process is not found then (None, None) is returned.
"""
try:
cmd = 'grep %s /proc/*/cgroup | grep freezer'%slice_name
output = os.popen(cmd).readlines()
except:
# the slice couldn't be found
logger.log("get_sliver_process: couldn't find slice %s" % slice_name)
return (None, None)
cgroup_fn = None
pid = None
for e in output:
try:
l = e.rstrip()
path = l.split(':')[0]
comp = l.rsplit(':')[-1]
slice_name_check = comp.rsplit('/')[-1]
if (slice_name_check == slice_name):
slice_path = path
pid = slice_path.split('/')[2]
cmdline = open('/proc/%s/cmdline'%pid).read().rstrip('\n\x00')
if (cmdline == process_cmdline):
cgroup_fn = slice_path
break
except:
break
if (not cgroup_fn) or (not pid):
logger.log("get_sliver_process: process %s not running in slice %s" % (process_cmdline, slice_name))
return (None, None)
return (cgroup_fn, pid)
def get_sliver_ifconfig(slice_name, device="eth0"):
""" return the output of "ifconfig" run from inside the sliver.
side effects: adds "/usr/sbin" to sys.path
"""
# See if setns is installed. If it's not then we're probably not running
# LXC.
if not os.path.exists("/usr/sbin/setns.so"):
return None
# setns is part of lxcsu and is installed to /usr/sbin
if not "/usr/sbin" in sys.path:
sys.path.append("/usr/sbin")
import setns
(cgroup_fn, pid) = get_sliver_process(slice_name, "/sbin/init")
if (not cgroup_fn) or (not pid):
return None
path = '/proc/%s/ns/net'%pid
result = None
try:
setns.chcontext(path)
args = ["/sbin/ifconfig", device]
sub = subprocess.Popen(args, stderr = subprocess.PIPE, stdout = subprocess.PIPE)
sub.wait()
if (sub.returncode != 0):
logger.log("get_slice_ifconfig: error in ifconfig: %s" % sub.stderr.read())
result = sub.stdout.read()
finally:
setns.chcontext("/proc/1/ns/net")
return result
def get_sliver_ip(slice_name):
ifconfig = get_sliver_ifconfig(slice_name)
if not ifconfig:
return None
for line in ifconfig.split("\n"):
if "inet addr:" in line:
# example: ' inet addr:192.168.122.189 Bcast:192.168.122.255 Mask:255.255.255.0'
parts = line.strip().split()
if len(parts)>=2 and parts[1].startswith("addr:"):
return parts[1].split(":")[1]
return None
### this returns the kind of virtualization on the node
# either 'vs' or 'lxc'
# also caches it in /etc/planetlab/virt for next calls
# could be promoted to core nm if need be
virt_stamp="/etc/planetlab/virt"
def get_node_virt ():
try:
return file(virt_stamp).read().strip()
except:
pass
logger.log("Computing virt..")
try:
if subprocess.call ([ 'vserver', '--help' ]) ==0: virt='vs'
else: virt='lxc'
except:
virt='lxc'
with file(virt_stamp,"w") as f:
f.write(virt)
return virt
# how to run a command in a slice
# now this is a painful matter
# the problem is with capsh that forces a bash command to be injected in its exec'ed command
# so because lxcsu uses capsh, you cannot exec anything else than bash
# bottom line is, what actually needs to be called is
# vs: vserver exec slicename command and its arguments
# lxc: lxcsu slicename "command and its arguments"
# which, OK, is no big deal as long as the command is simple enough,
# but do not stretch it with arguments that have spaces or need quoting as that will become a nightmare
def command_in_slice (slicename, argv):
virt=get_node_virt()
if virt=='vs':
return [ 'vserver', slicename, 'exec', ] + argv
elif virt=='lxc':
# wrap up argv in a single string for -c
return [ 'lxcsu', slicename, ] + [ " ".join(argv) ]
logger.log("command_in_slice: WARNING: could not find a valid virt")
return argv
| {
"repo_name": "wangyang2013/NodeManager",
"path": "tools.py",
"copies": "2",
"size": "11449",
"license": "apache-2.0",
"hash": -6975115171925987000,
"line_mean": 33.6939393939,
"line_max": 324,
"alpha_frac": 0.6115817975,
"autogenerated": false,
"ratio": 3.5227692307692307,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.013919543932113627,
"num_lines": 330
} |
# a few utilities common to sfi and sfaadmin
def optparse_listvalue_callback(option, opt, value, parser):
former=getattr(parser.values,option.dest)
if not former: former=[]
# support for using e.g. sfi update -t slice -x the.slice.hrn -r none
# instead of -r '' which is painful and does not pass well through ssh
if value.lower()=='none':
newvalue=former
else:
newvalue=former+value.split(',')
setattr(parser.values, option.dest, newvalue)
def optparse_dictvalue_callback (option, option_string, value, parser):
try:
(k,v)=value.split('=',1)
d=getattr(parser.values, option.dest)
d[k]=v
except:
parser.print_help()
sys.exit(1)
# a code fragment that could be helpful for argparse which unfortunately is
# available with 2.7 only, so this feels like too strong a requirement for the client side
#class ExtraArgAction (argparse.Action):
# def __call__ (self, parser, namespace, values, option_string=None):
# would need a try/except of course
# (k,v)=values.split('=')
# d=getattr(namespace,self.dest)
# d[k]=v
#####
#parser.add_argument ("-X","--extra",dest='extras', default={}, action=ExtraArgAction,
# help="set extra flags, testbed dependent, e.g. --extra enabled=true")
##############################
# these are not needed from the outside
def terminal_render_plural (how_many, name,names=None):
if not names: names="%ss"%name
if how_many<=0: return "No %s"%name
elif how_many==1: return "1 %s"%name
else: return "%d %s"%(how_many,names)
def terminal_render_default (record,options):
print "%s (%s)" % (record['hrn'], record['type'])
def terminal_render_user (record, options):
print "%s (User)"%record['hrn'],
if record.get('reg-pi-authorities',None): print " [PI at %s]"%(" and ".join(record['reg-pi-authorities'])),
if record.get('reg-slices',None): print " [IN slices %s]"%(" and ".join(record['reg-slices'])),
user_keys=record.get('reg-keys',[])
if not options.verbose:
print " [has %s]"%(terminal_render_plural(len(user_keys),"key"))
else:
print ""
for key in user_keys: print 8*' ',key.strip("\n")
def terminal_render_slice (record, options):
print "%s (Slice)"%record['hrn'],
if record.get('reg-researchers',None): print " [USERS %s]"%(" and ".join(record['reg-researchers'])),
# print record.keys()
print ""
def terminal_render_authority (record, options):
print "%s (Authority)"%record['hrn'],
if record.get('reg-pis',None): print " [PIS %s]"%(" and ".join(record['reg-pis'])),
print ""
def terminal_render_node (record, options):
print "%s (Node)"%record['hrn']
### used in sfi list
def terminal_render (records,options):
# sort records by type
grouped_by_type={}
for record in records:
type=record['type']
if type not in grouped_by_type: grouped_by_type[type]=[]
grouped_by_type[type].append(record)
group_types=grouped_by_type.keys()
group_types.sort()
for type in group_types:
group=grouped_by_type[type]
# print 20 * '-', type
try: renderer=eval('terminal_render_'+type)
except: renderer=terminal_render_default
for record in group: renderer(record,options)
####################
def filter_records(type, records):
filtered_records = []
for record in records:
if (record['type'] == type) or (type == "all"):
filtered_records.append(record)
return filtered_records
| {
"repo_name": "yippeecw/sfa",
"path": "sfa/client/common.py",
"copies": "2",
"size": "3567",
"license": "mit",
"hash": 5748487680873300000,
"line_mean": 36.5473684211,
"line_max": 111,
"alpha_frac": 0.6212503504,
"autogenerated": false,
"ratio": 3.463106796116505,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9912361952743662,
"avg_score": 0.03439903875456837,
"num_lines": 95
} |
# A few utilities for dealing with certainty judgments
def cert_or(a, b):
if a > 0 and b > 0:
return a + b - a * b
elif a < 0 and b < 0:
return a + b + a * b
else:
return (a + b) / (1 - min(abs(a), abs(b)))
def cert_and(a, b):
return min(a, b)
def is_cert(x):
return Cert.false <= x <= Cert.true
def cert_true(x):
return is_cert(x) and x > Cert.cutoff
def cert_false(x):
return is_cert(x) and x < (Cert.cutoff - 1)
class Cert(object):
true = 1.0
false = -1.0
unknown = 0.0
cutoff = 0.2
# Context (the things we can reason about)
class Ctx(object):
def __init__(self, name, initial=None, goals=None):
self.count = 0
self.name = name
self.initial = initial or []
self.goals = goals or []
def build(self):
inst = (self.name, self.count)
self.count += 1
return inst
# Parameters (the qualities of the context that we're interested in)
class Param(object):
def __init__(self, name, ctx=None, enum=None, cls=None, ask_first=False):
self.name = name
self.ctx = ctx
self.enum = enum
self.ask_first = ask_first
self.cls = cls
def type_string(self):
return self.cls.__name__ if self.cls else '(%s)' % ', '.join(list(self.enum))
def from_string(self, val):
if self.cls:
return self.cls(val)
if self.enum and val in self.enum:
return val
raise ValueError('val must be one of %s for the param %s' %
(', '.join(list(self.enum)), self.name))
def eval_condition(condition, values, discover=None):
param, inst, op, val = condition
if discover:
discover(param, inst)
total = sum(cert for given_val, cert in values.items() if op(given_val, val))
return total
def print_condition(condition):
param, inst, op, val = condition
name = inst if isinstance(inst, str) else inst[0]
opname = op.__name__
return '%s %s %s %s' % (param, name, opname, val)
def get_vals(values, param, inst):
return values.setdefault((param, inst), {})
def get_cert(values, param, inst, val):
vals = get_vals(values, param, inst)
return vals.setdefault(val, Cert.unknown)
def update_cert(values, param, inst, val, cert):
existing = get_cert(values, param, inst, val)
updated = cert_or(existing, cert)
get_vals(values, param, inst)[val] = updated
# Rules (how we reason about the context)
class Rule(object):
def __init__(self, num, premises, conclusions, cert):
self.num = num
self.cert = cert
self.raw_premises = premises
self.raw_conclusions = conclusions
def __str__(self):
prems = map(print_condition, self.raw_premises)
concls = map(print_condition, self.raw_conclusions)
templ = 'RULE %d\nIF\n\t%s\nTHEN %f\n\t%s'
return templ % (self.num, '\n\t'.join(prems), self.cert, '\n\t'.join(concls))
def clone(self):
return Rule(self.num, list(self.raw_premises),
list(self.raw_conclusions), self.cert)
def _bind_cond(self, cond, instances):
param, ctx, op, val = cond
return param, instances[ctx], op, val
def premises(self, instances):
return [self._bind_cond(premise, instances) for premise in self.raw_premises]
def conclusions(self, instances):
return [self._bind_cond(concl, instances) for concl in self.raw_conclusions]
def applicable(self, values, instances, discover=None):
for premise in self.premises(instances):
param, inst, op, val = premise
vals = get_vals(values, param, inst)
cert = eval_condition(premise, vals)
if cert_false(cert):
return Cert.false
total_cert = Cert.true
for premise in self.premises(instances):
param, inst, op, val = premise
vals = get_vals(values, param, inst)
cert = eval_condition(premise, vals, discover)
total_cert = cert_and(total_cert, cert)
if not cert_true(total_cert):
return Cert.false
return total_cert
def apply(self, values, instances, discover=None, track=None):
if track:
track(self)
cert = self.cert * self.applicable(values, instances, discover)
if not cert_true(cert):
return False
for conclusion in self.conclusions(instances):
param, inst, op, val = conclusion
update_cert(values, param, inst, val, cert)
return True
def use_rules(values, instances, rules, discover=None, track_rules=None):
return any([rule.apply(values, instances, discover, track_rules) for rule in rules])
def write(line): print line
# The Expert Shell (how we interact with the rule system)
class Shell(object):
def __init__(self, read=raw_input, write=write):
self.read = read
self.write = write
self.rules = {}
self.ctxs = {}
self.params = {}
self.given = set()
self.asked = set()
self.given_values = {}
self.current_inst = None
self.instances = {}
self.current_rule = None
def clear(self):
self.given.clear()
self.asked.clear()
self.given_values.clear()
self.current_inst = None
self.current_rule = None
self.instances.clear()
def define_rule(self, rule):
for param, ctx, op, val in rule.raw_conclusions:
self.rules.setdefault(param, []).append(rule)
def define_ctx(self, ctx):
self.ctxs[ctx.name] = ctx
def define_param(self, param):
self.params[param.name] = param
def get_rules(self, param):
return self.rules.setdefault(param, [])
def build(self, ctx_name):
inst = self.ctxs[ctx_name].build()
self.current_inst = inst
self.instances[ctx_name] = inst
return inst
def get_param(self, name):
return self.params.setdefault(name, Param(name))
HELP = """Type one of the following:
? - to see possible answers for this param
rule - to show the current rule
why - to see why this question is asked
help - to show this message
unknown - if the answer to this question is not given
<val> - a single definite answer to the question
<val1> <cert1> [, <val2> <cert2>, ...]
- if there are multiple answers with associated certainty factors."""
def ask_values(self, param, inst):
if (param, inst) in self.asked:
return
self.asked.add((param, inst))
while True:
resp = self.read('%s? ' % (param))
if not resp:
continue
if resp == 'unknown':
return False
elif resp == 'help':
self.write(Shell.HELP)
elif resp == 'why':
self.print_why(param)
elif resp == 'rule':
self.write(self.current_rule)
elif resp == '?':
self.write('%s must be of type %s' %
(param, self.get_param(param).type_string()))
else:
try:
for val, cert in parse_reply(self.get_param(param), resp):
update_cert(self.given_values, param, inst, val, cert)
return True
except:
self.write('Invalid response. Type ? to see legal ones.')
def print_why(self, param):
self.write('Why is the value of %s being asked for?' % param)
if self.current_rule in ('initial', 'goal'):
self.write('%s is one of the %s params.' % (param, self.current_rule))
return
given, unknown = [], []
for premise in self.current_rule.premises(self.instances):
vals = get_vals(self.given_values, premise[0], premise[1])
if cert_true(eval_condition(premise, vals)):
given.append(premise)
else:
unknown.append(premise)
if given:
self.write('It is given that:')
for condition in given:
self.write(print_condition(condition))
self.write('Therefore,')
rule = self.current_rule.clone()
rule.raw_premises = unknown
self.write(rule)
def _set_current_rule(self, rule):
self.current_rule = rule
def discover(self, param, inst=None):
inst = inst or self.current_inst
if (param, inst) in self.given:
return True
def rules():
return use_rules(self.given_values, self.instances,
self.get_rules(param), self.discover,
self._set_current_rule)
if self.get_param(param).ask_first:
success = self.ask_values(param, inst) or rules()
else:
success = rules() or self.ask_values(param, inst)
if success:
self.given.add((param, inst))
return success
def execute(self, ctx_names):
self.write('CS 251 - Final Project. Jack Henahan. For help answering questions, type "help".')
self.clear()
results = {}
for name in ctx_names:
ctx = self.ctxs[name]
self.build(name)
self._set_current_rule('initial')
for param in ctx.initial:
self.discover(param)
self._set_current_rule('goal')
for param in ctx.goals:
self.discover(param)
if ctx.goals:
result = {}
for param in ctx.goals:
result[param] = get_vals(self.given_values, param, self.current_inst)
results[self.current_inst] = result
return results
def parse_reply(param, reply):
if reply.find(',') >= 0:
vals = []
for pair in reply.split(','):
val, cert = pair.strip().split(' ')
vals.append((param.from_string(val), float(cert)))
return vals
return [(param.from_string(reply), Cert.true)]
| {
"repo_name": "jhenahan/pycin",
"path": "shell.py",
"copies": "1",
"size": "10424",
"license": "mit",
"hash": -7320549787899603000,
"line_mean": 30.877675841,
"line_max": 102,
"alpha_frac": 0.5510360706,
"autogenerated": false,
"ratio": 3.8085495067592254,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9745301713687239,
"avg_score": 0.02285677273439739,
"num_lines": 327
} |
""" A few utilities for Indian Buffet Processes. """
__author__ = 'Tom Schaul, tom@idsia.ch'
from scipy import zeros, rand, array, sqrt
from numpy.random import beta
def leftordered(M):
""" Returns the given matrix in left-ordered-form. """
l = list(M.T)
l.sort(key=tuple)
return array(l)[::-1].T
def generateIBP(customers, alpha=10, reducedprop=1.):
""" Simple implementation of the Indian Buffet Process. Generates a binary matrix with
customers rows and an expected number of columns of alpha * sum(1,1/2,...,1/customers).
This implementation uses a stick-breaking construction.
An additional parameter permits reducing the expected number of times a dish is tried. """
# max number of dishes is distributed according to Poisson(alpha*sum(1/i))
_lambda = alpha * sum(1. / array(list(range(1, customers + 1))))
alpha /= reducedprop
# we give it 2 standard deviations as cutoff
maxdishes = int(_lambda + sqrt(_lambda) * 2) + 1
res = zeros((customers, maxdishes), dtype=bool)
stickprops = beta(alpha, 1, maxdishes) # nu_i
currentstick = 1.
dishesskipped = 0
for i, nu in enumerate(stickprops):
currentstick *= nu
dishestaken = rand(customers) < currentstick * reducedprop
if sum(dishestaken) > 0:
res[:, i - dishesskipped] = dishestaken
else:
dishesskipped += 1
return res[:, :maxdishes - dishesskipped]
def testIBP():
""" Plot matrices generated by an IBP, for a few different settings. """
from pybrain.tools.plotting.colormaps import ColorMap
import pylab
# always 50 customers
n = 50
# define parameter settings
ps = [(10, 0.1),
(10,), (50,),
(50, 0.5),
]
# generate a few matrices, on for each parameter setting
ms = []
for p in ps:
if len(p) > 1:
m = generateIBP(n, p[0], p[1])
else:
m = generateIBP(n, p[0])
ms.append(leftordered(m))
# plot the matrices
for m in ms:
ColorMap(m, pixelspervalue=3)
pylab.show()
if __name__ == '__main__':
testIBP()
| {
"repo_name": "sepehr125/pybrain",
"path": "pybrain/tools/ibp.py",
"copies": "25",
"size": "2150",
"license": "bsd-3-clause",
"hash": -6824234915666958000,
"line_mean": 26.5641025641,
"line_max": 94,
"alpha_frac": 0.616744186,
"autogenerated": false,
"ratio": 3.5073409461663947,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.014204364712197841,
"num_lines": 78
} |
"""A few utility functions concerning indexable things"""
from hawkweed.functional.primitives import curry
from hawkweed.functional.mathematical import inc, dec
def first(element):
"""
A wrapper around element[0].
params:
element: an element that implements __getitem__
"""
return element[0]
def rest(element):
"""
A wrapper around element[1:].
params:
element: an element that implements __getitem__
"""
return element[1:]
def second(element):
"""
A wrapper around element[1].
params:
element: an element that implements __getitem__
"""
return element[1]
def last(element):
"""
A wrapper around element[-1].
params:
element: an element that implements __getitem__
"""
return element[-1]
@curry
def get(index, element):
"""
A wrapper around element[index].
params:
index: the index at which we should get
element: an element that implements __getitem__
"""
return element[index]
@curry
def remove_from(index, element):
"""
A wrapper around del element[index].
params:
index: the index at which we should delete
element: an element that implements __delitem__
"""
del element[index]
return element
@curry
def remove_from_keep(index, element):
"""
Non-destructively deletes the element at index index.
Complexity: O(n)
params:
index: the index at which we should delete
element: an element that implements __delitem__
returns: the new list
"""
return element[:index] + element[inc(index):]
@curry
def aperture(n, l):
"""
Creates a generator of consecutive sublists of size n.
If n is bigger than the length of the list, the
generator will be empty.
Complexity: O(slice_size*n)
params:
n: the slice size
l: the list we should create the generator from
returns: the generator
"""
index = 0
stop = len(l) - dec(n)
while index < stop:
yield l[index:index+n]
index += 1
@curry
def get_attr(attr, element):
"""
Like get, but for attributes.
Complexity: O(1)
params:
attr: the attribute to get
element: the element to search
returns: the attribute
"""
return getattr(element, attr)
@curry
def take(n, l):
"""
Takes n elements from the list l.
Complexity: O(n)
params:
n: the number of elements to take
l: the list to take from
returns: a generator object
"""
index = 0
while index < n:
yield l[index]
index += 1
| {
"repo_name": "hellerve/hawkweed",
"path": "hawkweed/functional/list_prims.py",
"copies": "1",
"size": "2639",
"license": "mit",
"hash": 8964633804039210000,
"line_mean": 20.4552845528,
"line_max": 58,
"alpha_frac": 0.6131110269,
"autogenerated": false,
"ratio": 3.986404833836858,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5099515860736857,
"avg_score": null,
"num_lines": null
} |
# A few utility functions
import itertools
import numpy as np
###############################################
# Generally useful functions #
###############################################
# useful with reshape
def linearize_indices(indices, dims):
res = []
remain = indices
for i, _ in enumerate(dims):
res = [remain % dims[-i - 1]] + res
remain = remain / dims[-i - 1]
linearized = tf.transpose(tf.pack(res))
return linearized
###############################################
# Data reading functions #
###############################################
class Config:
def __init__(self, batch_size=20, num_steps=32, learning_rate=1e-2,
l1_reg=2e-3, l1_list=[],
l2_reg=2e-3, l2_list=[],
features_dim=50, init_words=False, input_features=[],
use_rnn=False, rnn_hidden_units=100, rnn_output_size=50,
use_convo=False, conv_window=5, conv_dim=50,
pot_size=1,
pred_window=1, tag_list=[],
verbose=False, num_epochs=10, num_predict=5):
# optimization parameters
self.batch_size = batch_size
self.num_steps = num_steps
self.learning_rate = learning_rate
# regularization parameters
self.l1_reg = l1_reg
self.l1_list = l1_list
self.l2_reg = l2_reg
self.l2_list = l2_list
# input layer
self.features_dim = features_dim
self.init_words = init_words
self.input_features = input_features
# recurrent layer
self.use_rnn = use_rnn
self.rnn_hidden_units = rnn_hidden_units
self.rnn_output_size = rnn_output_size
# convolutional layer
self.use_convo = use_convo
self.conv_window = conv_window
self.conv_dim = conv_dim
# CRF parameters:
self.pot_size = pot_size
self.n_tags = len(tag_list)
# output layer
self.pred_window = pred_window
self.tag_list = tag_list
self.label_dict = {}
tags_ct = 0
for element in itertools.product(tag_list, repeat=pred_window):
tag_st = '_'.join(element)
mid = element[pred_window / 2]
if mid == '<P>':
self.label_dict[tag_st] = (-1, tag_list.index(mid))
else:
self.label_dict[tag_st] = (tags_ct, tag_list.index(mid))
tags_ct += 1
self.n_outcomes = tags_ct
# misc parameters
self.verbose = verbose
self.num_epochs = num_epochs
self.num_predict = num_predict
def make_mappings(self, data):
self.feature_maps = dict([(feat, {'lookup': {'_unk_': 0},
'reverse': ['_unk_']})
for feat in data[0][0]])
for sentence in data:
for token in sentence:
for feat in data[0][0]:
ft = token[feat]
if ft not in self.feature_maps[feat]['lookup']:
self.feature_maps[feat]['lookup'][ft] = \
len(self.feature_maps[feat]['reverse'])
self.feature_maps[feat]['reverse'] += [ft]
def to_string(self):
st = ''
for k, v in self.__dict__.items():
if k not in ['feature_maps', 'label_dict']:
st += k + ' --- ' + str(v) + ' \n'
return st
class Batch:
def __init__(self):
# features: {'word': 'have', 'pos': 'VB', ...} ->
# [1345, 12 * num_features + 1,...]
self.features = []
# tags: 'B' -> 1
self.tags = []
# tags_one_hot: 'B' -> [0, 1, 0, 0, 0, 0]
self.tags_one_hot = []
# tag_windows: '<P>_B_O' -> [0, 1, 3]
self.tag_windows = []
# tag_windows_lin: '<P>_B_O' -> num_values * token_id + 0 * config.n_tags **2 + 1 * config.n_tags + 3
self.tag_windows_lin = []
# tag_windows_one_hot: '<P>_B_O' -> [0, ..., 0, 1, 0, ..., 0]
self.tag_windows_one_hot = []
# tag_neighbours: '<P>_B_O' -> [0, 3]
self.tag_neighbours = []
# tag_neighbours_linearized: '<P>_B_O' -> num_values * token_id + 0 * config.n_tags + 3
self.tag_neighbours_lin = []
# mask: <P> -> 0, everything else -> 1
def read(self, data, start, config, fill=False):
num_features = len(config.input_features)
batch_data = data[start:start + config.batch_size]
batch_features = [[[config.feature_maps[feat]['lookup'][token[feat]]
for feat in config.input_features]
for token in sentence]
for sentence in batch_data]
batch_labels = [[config.label_dict[token['label']]
for token in sentence]
for sentence in batch_data]
# multiply feature indices for use in tf.nn.embedding_lookup
self.features = [[[num_features * ft + i for i, ft in enumerate(word)]
for word in sentence] for sentence in batch_features]
self.tags = [[label[1] for label in sentence]
for sentence in batch_labels]
self.tags_one_hot = [[[int(x == label[1] and x > 0) # TODO: count padding tokens?
for x in range(config.n_tags)]
for label in sentence]
for sentence in batch_labels]
self.tag_windows_one_hot = [[[int(x == label[0])
for x in range(config.n_outcomes)]
for label in sentence]
for sentence in batch_labels]
if fill:
max_len = max(config.conv_window,
max([len(sentence) for sentence in batch_data]) + 2)
for i in range(config.batch_size):
current_len = len(batch_data[i])
pre_len = (max_len - current_len) / 2
post_len = max_len - pre_len - current_len
self.features[i] = [range(num_features)] * pre_len + \
self.features[i] + \
[range(num_features)] * post_len
self.tags[i] = [0] * pre_len + self.tags[i] + [0] * post_len
self.tags_one_hot[i] = [[0] * config.n_outcomes] * pre_len + \
self.tags_one_hot[i] + \
[[0] * config.n_outcomes] * post_len
self.tag_windows_one_hot[i] = [[0] * config.n_outcomes] * pre_len + \
self.tag_windows_one_hot[i] + \
[[0] * config.n_outcomes] * post_len
mid = config.pot_window / 2
padded_tags = [[0] * mid + sentence + [0] * mid
for sentence in self.tags]
# get linearized window indices
self.tag_windows = [[sent[i + j] for j in range(-mid, mid + 1)]
for sent in padded_tags
for i in range(mid, len(sent) - mid)]
n_indices = config.n_tags ** config.pot_window
self.tag_windows_lin = [sum([t * (config.n_tags ** (config.pot_window - 1 - i))
for i, t in enumerate(window)]) + i * n_indices
for i, window in enumerate(self.tag_windows)]
# get linearized potential indices
self.tag_neighbours = [[sent[i + j]
for j in range(-mid, 0) + range(1, mid + 1)]
for sent in padded_tags
for i in range(mid, len(sent) - mid)]
max_pow = config.pot_window - 1
n_indices = config.n_tags ** max_pow
self.tag_neighbours_lin = [sum([idx * (config.n_tags) ** (max_pow - j - 1)
for j, idx in enumerate(token)]) + i * n_indices
for i, token in enumerate(self.tag_neighbours)]
# make mask:
self.mask = [[int(tag > 0) for tag in sent] for sent in self.tags]
def aggregate_labels(sentence, config):
pre_tags = ['<P>'] * (config.pred_window / 2)
sentence_ext = pre_tags + [token['label']
for token in sentence] + pre_tags
for i, token in enumerate(sentence):
current = token['label']
sentence[i]['label'] = '_'.join([sentence_ext[i+j]
for j in range(config.pred_window)])
def read_data(file_name, features, config):
sentences = []
sentence = []
f = open(file_name)
c = 0
for line in f:
c += 1
if c % 100000 == 0:
print c, 'lines read'
if len(line.strip()) == 0 and len(sentence) > 0:
sentences += [sentence[:]]
sentence = []
else:
sentence += [dict(zip(features, line.strip().split('\t')))]
if len(sentence) > 0:
sentences += [sentence[:]]
f.close()
foo = [aggregate_labels(sentence, config) for sentence in sentences]
return sentences
def show(sentence):
return ' '.join([token['word']+'/'+token['label'] for token in sentence])
# read pre_trained word vectors
def read_vectors(file_name, vocab):
vectors = {}
f = open(file_name)
dim = int(f.readline().strip().split()[1])
for line in f:
w = line.split()[0]
vec = [float(x) for x in line.strip().split()[1:]]
vectors[w] = np.array(vec)
f.close()
res = np.zeros((len(vocab), dim))
for i, w in enumerate(vocab):
res[i] = vectors.get(w, np.zeros(dim))
return res
# extract windows from data to fit into unrolled RNN. Independent sentences
def cut_and_pad(data, config):
pad_token = dict([(feat, '_unk_') for feat in data[0][0]])
pad_token['label'] = '_'.join(['<P>'] * config.pred_window)
num_steps = config.num_steps
res = []
seen = 0
pad_len = max(config.pred_window, config.pot_window) / 2
sen = [pad_token] * pad_len + data[0] + [pad_token] * pad_len
while seen < len(data):
if len(sen) < num_steps:
if sen[0]['label'] == '<P>':
new_sen = ((num_steps - len(sen)) / 2) * [pad_token] + sen
else:
new_sen = sen
new_sen = new_sen + (num_steps - len(new_sen)) * [pad_token]
res += [new_sen[:]]
seen += 1
if seen < len(data):
sen = [pad_token] * pad_len + data[seen] + [pad_token] * pad_len
else:
res += [sen[:num_steps]]
sen = sen[(2 * num_steps) / 3:]
return res
# extract windows from data to fit into unrolled RNN. Continuous model
def cut_batches(data, config):
pad_token = dict([(feat, '_unk_') for feat in data[0][0]])
pad_token['label'] = '_'.join(['<P>'] * config.pred_window)
padding = [pad_token] * config.pred_window
new_data = padding + [tok for sentence in data
for tok in sentence + padding]
step_size = (config.num_steps / 2)
num_cuts = len(new_data) / step_size
res = [new_data[i * step_size: i * step_size + config.num_steps]
for i in range(num_cuts)]
res[-1] = res[-1] + [pad_token] * (config.num_steps - len(res[-1]))
return res
###############################################
# NN evaluation functions #
###############################################
def treat_spans(spans_file):
span_lists = []
f = open(spans_file)
y = []
for line in f:
if line.strip() == '':
span_lists += [y[:]]
y = []
else:
lsp = line.strip().split()
y = y + [(int(lsp[0]), int(lsp[1]), lsp[2])]
f.close()
return span_lists
def find_gold(sentence):
gold = []
current_gold = []
for i, token in enumerate(sentence):
if token['label'] == 'B' or token['label'] == 'O':
if len(current_gold) > 0:
gold += [tuple(current_gold)]
current_gold = []
if 'I' in token['label'] or token['label'] == 'B':
current_gold += [i]
if len(current_gold) > 0:
gold += [tuple(current_gold)]
return gold
def make_scores(token, thr):
res = dict([(key, val)
for key, val in token.items()
if key in ['O', 'OD', 'I', 'ID', 'B'] and val > thr])
return res
def find_mentions(sentence, thr=0.02):
scores = [make_scores(token, thr) for token in sentence]
found = []
working = []
for i, score in enumerate(scores):
if 'B' in score or 'O' in score:
for work in working:
if work[0][-1] == i-1:
sc = work[1] + np.log(score.get('B', 0) +
score.get('O', 0))
sc /= (work[0][-1] + 2 - work[0][0])
found += [(tuple(work[0]), np.exp(sc))]
if len(score) == 1 and 'O' in score:
working = []
else:
new_working = []
if 'B' in score:
new_working = [[[i], np.log(score['B']), False]]
for work in working:
for tg, sc in score.items():
if tg == 'OD':
new_working += [[work[0], work[1] + np.log(sc), True]]
elif tg == 'ID' and work[2]:
new_working += [[work[0] + [i], work[1] + np.log(sc),
True]]
elif tg == 'I' and not work[2]:
new_working += [[work[0] + [i], work[1] + np.log(sc),
False]]
working = new_working[:]
if len(working) > 1000:
working = sorted(working, key=lambda x: x[1],
reverse=True)[:1000]
return sorted(found, key=lambda x: x[1], reverse=True)
def read_sentence(sentence):
return (sentence, find_gold(sentence), find_mentions(sentence))
def merge(sentences, spans):
res = []
sent = read_sentence(sentences[0])
span = spans[0]
for i, sp in enumerate(spans):
if i == 0:
continue
if sp[0] == span[0]:
sen = read_sentence(sentences[i])
gold = sorted(list(set(sen[1] + sent[1])))
sent = (sen[0], gold, sen[2])
else:
res += [(sent, span)]
sent = read_sentence(sentences[i])
span = spans[i]
res += [(sent, span)]
return res
def evaluate(merged_sentences, threshold):
TP = 0
FP = 0
FN = 0
for sentence in merged_sentences:
true_mentions = sentence[0][1]
tp = 0
for pred in sentence[0][2]:
if pred[1] >= threshold:
if pred[0] in true_mentions:
tp += 1
else:
FP += 1
TP += tp
FN += len(true_mentions) - tp
if (TP + FP) == 0:
prec = 0
recall = 0
else:
prec = float(TP) / (TP + FP)
recall = float(TP) / (TP + FN)
if prec == 0 or recall == 0:
f1 = 0
else:
f1 = 2 * (prec * recall) / (prec + recall)
print 'TH:', threshold, '\t', 'P:', prec, '\t', 'R:', recall, '\t', 'F:', f1
| {
"repo_name": "yjernite/DeepCRF",
"path": "utils.py",
"copies": "1",
"size": "15564",
"license": "mit",
"hash": 5032182127618894000,
"line_mean": 38.2040302267,
"line_max": 109,
"alpha_frac": 0.4693523516,
"autogenerated": false,
"ratio": 3.7189964157706092,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9685411523228653,
"avg_score": 0.0005874488283912005,
"num_lines": 397
} |
# aff/dic output
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
import sys
import unicodedata
def NFD(unistr):
return unicodedata.normalize('NFD', unistr)
def warn(s):
return sys.stderr.write(s + '\n')
def progress(s):
return sys.stderr.write('Progress: ' + s + '...\n')
import config
import suffix
import josa
from flags import *
class Word:
def __init__(self):
self.word = ''
self.pos = ''
self.props = []
self.stem = ''
self.flags = []
self.flags_alias = -1
self.morph_alias = -1
def __hash__(self):
return (self.word + self.pos).__hash__()
# to make it orderable
def __lt__(self, other):
r = self.word < other.word
if r:
return True
r = self.pos < other.pos
if r:
return True
# FIXME: 이렇게 하면 순서가 다를텐데. set에서 뭐가 먼저 나올지 알고...
for prop in other.props:
if not prop in self.props:
return True
return False
def __repr__(self):
return 'Word %s pos:%s' % (self.word, self.pos)
def attach_flags(word):
pos_default_flags = {
'명사': [ ],
'대명사': [ ],
'특수:복수접미사': [ plural_suffix_flag ],
'특수:알파벳': [ alpha_flag ],
'특수:숫자': [ digit_flag ],
'특수:수:1': [ number_1_flag ],
'특수:수:10': [ number_10_flag ],
'특수:수:100': [ number_100_flag ],
'특수:수:1000': [ number_1000_flag ],
'특수:수:10000': [ number_10000_flag ],
'특수:고유수:1': [ knumber_1_flag ],
'특수:고유수:10': [ knumber_10_flag ],
'특수:금지어': [ forbidden_flag ],
'내부:활용:-어': [ conjugation_eo_flag ],
'내부:활용:-은': [ conjugation_eun_flag ],
'내부:활용:-을': [ conjugation_eul_flag ],
}
try:
word.flags = pos_default_flags[word.pos]
except KeyError:
pass
if word.pos == '동사' or word.pos == '형용사':
word.flags += suffix.find_flags(word.word, word.pos, word.props)
word.flags += josa.find_flags(word.word, word.pos, word.props)
prop_default_flags = {
'단위명사': [ counter_flag ],
'보조용언:-어': [ auxiliary_eo_flag ],
'보조용언:-은': [ auxiliary_eun_flag ],
'보조용언:-을': [ auxiliary_eul_flag ],
}
for prop in word.props:
try:
word.flags += prop_default_flags[prop]
except KeyError:
pass
word.flags.sort()
class Dictionary:
def __init__(self):
self.words = set()
self.flag_aliases = []
self.morph_aliases = []
def add(self, word):
self.words.add(word)
def remove(self, word):
self.words.remove(word)
def append(self, words):
for w in words:
self.words.add(w)
def load_xml(self, infile):
from lxml import etree
doc = etree.parse(infile)
root = doc.getroot()
for item in root:
w = Word()
for field in item:
if field.tag == 'word':
w.word = field.text
elif field.tag == 'pos':
w.pos = field.text
elif field.tag == 'props' and field.text:
w.props = field.text.split(',')
w.props.sort()
elif field.tag == 'stem' and field.text:
w.stem = field.text
dic.add(w)
def process(self):
progress('복수형 확장')
self.expand_plurals()
if config.expand_auxiliary_attached:
progress('플래그 계산')
self.attach_flags()
progress('보조용언 확장')
self.expand_auxiliary()
else:
progress('보조용언 확장')
self.expand_auxiliary()
progress('플래그 계산')
self.attach_flags()
#progress('속성 계산')
#self.attach_morph()
def output(self, afffile, dicfile):
progress('dic 출력')
self.output_dic(dicfile)
progress('aff 출력')
self.output_aff(afffile)
######################################################################
def output_dic(self, outfile):
outfile.write('%d\n' % len(self.words))
for word in sorted(list(self.words)):
line = '%s' % word.word
if word.flags_alias > 0:
line += ('/%d' % word.flags_alias)
if word.morph_alias > 0:
line += (' %d' % word.morph_alias)
line += '\n'
outfile.write(NFD(line))
def output_aff(self, outfile):
from string import Template
import aff
template = Template(open('template.aff').read())
# 주의: flag alias를 변경하므로 get_AF() 앞에 와야 한다.
suffix_str = aff.get_suffix_defines(self.flag_aliases)
josa_str = aff.get_josa_defines(self.flag_aliases)
af_str = self.get_AF()
d = {'version': config.version,
'required_hunspell': '%d.%d.%d' % config.minimum_hunspell_version,
'CONV': aff.CONV_DEFINES,
'AF': af_str,
'forbidden_flag': str(forbidden_flag),
'trychars': aff.TRYCHARS,
'MAP': aff.MAP_DEFINES,
'REP': aff.REP_DEFINES,
'COMPOUNDRULE': aff.COMPOUNDRULE_DEFINES,
'JOSA': josa_str,
'SUFFIX': suffix_str,
}
outfile.write(template.substitute(d))
def get_AF(self):
aliases = self.flag_aliases
result = 'AF %d\n' % len(aliases)
for flags in aliases:
result += 'AF %s\n' % ','.join('%d' % f for f in flags)
return result
def get_AM(self):
aliases = self.morph_aliases
result = 'AM %d\n' % len(aliases)
for morph in aliases:
result += 'AM %s\n' % morph
return result
######################################################################
def attach_flags(self):
aliases = []
for word in self.words:
word.attach_flags()
if word.flags:
if not word.flags in aliases:
aliases.append(word.flags)
word.flags_alias = aliases.index(word.flags) + 1
self.flag_aliases = aliases
def attach_morph(self):
aliases = []
for word in self.words:
morph = ''
if word.stem:
morph += 'st:%s' % word.stem
if morph:
if not morph in aliases:
aliases.append(morph)
word.morph_alias = aliases.index(morph) + 1
self.morph_aliases = aliases
def expand_plurals(self):
new_words = []
for word in [w for w in self.words if '가산명사' in w.props]:
new_word = Word()
new_word.word = word.word + '들'
new_word.pos = word.pos
new_word.props = [p for p in word.props if p != '가산명사']
new_word.stem = word.word
new_words.append(new_word)
self.append(new_words)
def expand_auxiliary(self):
new_words = []
forms = ['-어', '-은', '-을']
verbs = [w for w in self.words if w.pos in ['동사', '형용사']]
for form in forms:
auxiliaries = [w for w in verbs if ('보조용언:' + form) in w.props]
for verb in verbs:
# 본용언이 용언+용언 합성용언이면 붙여 쓸 수 없다
if '용언합성' in verb.props:
continue
prefixes = suffix.make_conjugations(verb.word,
verb.pos, verb.props, form)
if config.expand_auxiliary_attached:
for auxiliary in auxiliaries:
# 본용언이 해당 보조용언으로 끝나는 합성어인 경우 생략
# 예: 다가오다 + 오다 => 다가와오다 (x)
if (verb.word != auxiliary.word and
verb.word.endswith(auxiliary.word)):
continue
new_props = [p for p in auxiliary.props if not p.startswith('보조용언:')]
for prefix in prefixes:
new_word = Word()
new_word.word = prefix + auxiliary.word
new_word.pos = auxiliary.pos
new_word.stem = verb.word
new_word.props = new_props
new_word.flags = auxiliary.flags
new_word.flags_alias = auxiliary.flags_alias
new_words.append(new_word)
else:
for prefix in prefixes:
new_word = Word()
new_word.word = prefix
new_word.pos = '내부:활용:' + form
new_words.append(new_word)
self.append(new_words)
if __name__ == '__main__':
afffilename = sys.argv[1]
dicfilename = sys.argv[2]
infilenames = sys.argv[3:]
dic = Dictionary()
for filename in infilenames:
dic.load_xml(open(filename))
dic.process()
dic.output(open(afffilename, 'w'), open(dicfilename, 'w'))
| {
"repo_name": "changwoo/hunspell-dict-ko",
"path": "make-aff-dic.py",
"copies": "1",
"size": "9883",
"license": "mpl-2.0",
"hash": -7829373850463896000,
"line_mean": 32.6308243728,
"line_max": 93,
"alpha_frac": 0.4775658105,
"autogenerated": false,
"ratio": 3.1914965986394557,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9132835459949128,
"avg_score": 0.007245389838065592,
"num_lines": 279
} |
# AFF file utility
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
import config
from flags import *
from jamo import *
import suffix
import josa
import unicodedata
def NFD(unistr):
return unicodedata.normalize('NFD', unistr)
def NFC(unistr):
return unicodedata.normalize('NFC', unistr)
# 빈도가 높은 글자를 앞에 쓸 수록 처리 속도 향상
#
# NOTE: 단, 모음이 틀리는 경우가 보통 더 많으므로 더 나은 단어가 앞에
# 추천 단어의 앞에 나오도록 모음을 먼저 쓴다.
#
# 2008년 12월 현재 한국어 위키백과의 빈도:
#
# U+110b: 12945437 (HANGUL CHOSEONG IEUNG)
# U+1161: 11184877 (HANGUL JUNGSEONG A)
# U+11ab: 9201655 (HANGUL JONGSEONG NIEUN)
# U+1175: 8976003 (HANGUL JUNGSEONG I)
# U+1100: 6748571 (HANGUL CHOSEONG KIYEOK)
# U+1173: 6501009 (HANGUL JUNGSEONG EU)
# U+1169: 6130514 (HANGUL JUNGSEONG O)
# U+1109: 5708785 (HANGUL CHOSEONG SIOS)
# U+1165: 5264197 (HANGUL JUNGSEONG EO)
# U+11bc: 5092793 (HANGUL JONGSEONG IEUNG)
# U+110c: 4962840 (HANGUL CHOSEONG CIEUC)
# U+116e: 4835854 (HANGUL JUNGSEONG U)
# U+1105: 4667272 (HANGUL CHOSEONG RIEUL)
# U+1103: 4663742 (HANGUL CHOSEONG TIKEUT)
# U+11af: 4478045 (HANGUL JONGSEONG RIEUL)
# U+1112: 3953535 (HANGUL CHOSEONG HIEUH)
# U+1167: 3444653 (HANGUL JUNGSEONG YEO)
# U+11a8: 3353338 (HANGUL JONGSEONG KIYEOK)
# U+1102: 3270768 (HANGUL CHOSEONG NIEUN)
# U+1107: 3037763 (HANGUL CHOSEONG PIEUP)
# U+1166: 2881967 (HANGUL JUNGSEONG E)
# U+1106: 2635707 (HANGUL CHOSEONG MIEUM)
# U+1162: 2533498 (HANGUL JUNGSEONG AE)
# U+11b7: 1718630 (HANGUL JONGSEONG MIEUM)
# U+110e: 1507312 (HANGUL CHOSEONG CHIEUCH)
# U+1110: 1505921 (HANGUL CHOSEONG THIEUTH)
# U+1174: 1230310 (HANGUL JUNGSEONG YI)
# U+116a: 1195625 (HANGUL JUNGSEONG WA)
# U+1111: 1174547 (HANGUL CHOSEONG PHIEUPH)
# U+110f: 912332 (HANGUL CHOSEONG KHIEUKH)
# U+11bb: 875292 (HANGUL JONGSEONG SSANGSIOS)
# U+116d: 848305 (HANGUL JUNGSEONG YO)
# U+11b8: 806419 (HANGUL JONGSEONG PIEUP)
# U+1172: 798379 (HANGUL JUNGSEONG YU)
# U+116f: 717457 (HANGUL JUNGSEONG WEO)
# U+116c: 711301 (HANGUL JUNGSEONG OE)
# U+11ba: 389100 (HANGUL JONGSEONG SIOS)
# U+1171: 375026 (HANGUL JUNGSEONG WI)
# U+1163: 359881 (HANGUL JUNGSEONG YA)
# U+1168: 288224 (HANGUL JUNGSEONG YE)
# U+1104: 233944 (HANGUL CHOSEONG SSANGTIKEUT)
# U+1101: 189755 (HANGUL CHOSEONG SSANGKIYEOK)
# U+11c0: 119337 (HANGUL JONGSEONG THIEUTH)
# U+110a: 113001 (HANGUL CHOSEONG SSANGSIOS)
# U+11ad: 97304 (HANGUL JONGSEONG NIEUN-HIEUH)
# U+110d: 84806 (HANGUL CHOSEONG SSANGCIEUC)
# U+11ae: 70808 (HANGUL JONGSEONG TIKEUT)
# U+11b9: 61476 (HANGUL JONGSEONG PIEUP-SIOS)
# U+1170: 52914 (HANGUL JUNGSEONG WE)
# U+11be: 48191 (HANGUL JONGSEONG CHIEUCH)
# U+11c1: 44326 (HANGUL JONGSEONG PHIEUPH)
# U+11bd: 42718 (HANGUL JONGSEONG CIEUC)
# U+11c2: 40882 (HANGUL JONGSEONG HIEUH)
# U+1108: 36758 (HANGUL CHOSEONG SSANGPIEUP)
# U+11b0: 22199 (HANGUL JONGSEONG RIEUL-KIYEOK)
# U+11b1: 21644 (HANGUL JONGSEONG RIEUL-MIEUM)
# U+116b: 20701 (HANGUL JUNGSEONG WAE)
# U+11a9: 16775 (HANGUL JONGSEONG SSANGKIYEOK)
# U+11b2: 7965 (HANGUL JONGSEONG RIEUL-PIEUP)
# U+11b6: 6840 (HANGUL JONGSEONG RIEUL-HIEUH)
# U+1164: 1946 (HANGUL JUNGSEONG YAE)
# U+11ac: 1710 (HANGUL JONGSEONG NIEUN-CIEUC)
# U+11aa: 1600 (HANGUL JONGSEONG KIYEOK-SIOS)
# U+11bf: 794 (HANGUL JONGSEONG KHIEUKH)
# U+11b4: 544 (HANGUL JONGSEONG RIEUL-THIEUTH)
# U+11b3: 523 (HANGUL JONGSEONG RIEUL-SIOS)
# U+11b5: 495 (HANGUL JONGSEONG RIEUL-PHIEUPH)
TRYCHARS = ('\u1161\u1175\u1173\u1169\u1165\u116e\u1167\u1166\u1162\u1174' +
'\u116a\u116d\u1172\u116f\u116c\u1171\u1163\u1168\u1170\u116b' +
'\u1164' +
'\u110b\u11ab\u1100\u1109\u11bc\u110c\u1105\u1103\u11af\u1112' +
'\u11a8\u1102\u1107\u1106\u11b7\u110e\u1110\u1111\u110f\u11bb' +
'\u11b8\u11ba\u1104\u1101\u11c0\u110a\u11ad\u110d\u11ae\u11b9' +
'\u11be\u11c1\u11bd\u11c2\u1108\u11b0\u11b1\u11a9\u11b2\u11b6' +
'\u11ac\u11aa\u11bf\u11b4\u11b3\u11b5')
_conv_strings = []
_conv_strings.append('ICONV 11172')
for uch in [chr(c) for c in range(0xac00, 0xd7a3 + 1)]:
_conv_strings.append('ICONV %s %s' % (uch, NFD(uch)))
_conv_strings.append('OCONV 11172')
for uch in [chr(c) for c in range(0xac00, 0xd7a3 + 1)]:
_conv_strings.append('OCONV %s %s' % (NFD(uch), uch))
CONV_DEFINES = '\n'.join(_conv_strings)
# MAP: 비슷한 성격의 자모
# - 초성은 거센소리나 된소리처럼 같은 계열 초성 묶음
# - 중성은 비슷한 발음 묶음
# - 종성의 경우 받침 소리가 같은 발음 묶음
map_list = [
L_KIYEOK + L_SSANGKIYEOK + L_KHIEUKH,
L_TIKEUT + L_SSANGTIKEUT + L_THIEUTH,
L_PIEUP + L_SSANGPIEUP + L_PHIEUPH,
L_SIOS + L_SSANGSIOS,
L_CIEUC + L_SSANGCIEUC + L_CHIEUCH,
V_AE + V_E + V_YAE + V_YE,
V_WAE + V_OE + V_WE,
T_KIYEOK + T_SSANGKIYEOK + T_KIYEOK_SIOS + T_KHIEUKH,
T_NIEUN + T_NIEUN_CIEUC + T_NIEUN_HIEUH,
T_TIKEUT + T_SIOS + T_SSANGSIOS + T_CIEUC + T_CHIEUCH +
T_THIEUTH + T_HIEUH,
T_RIEUL + T_RIEUL_KIYEOK + T_RIEUL_MIEUM + T_RIEUL_PIEUP +
T_RIEUL_SIOS + T_RIEUL_THIEUTH + T_RIEUL_PHIEUPH + T_RIEUL_HIEUH,
T_PIEUP + T_PIEUP_SIOS + T_PHIEUPH,
]
MAP_DEFINES = 'MAP %d\n' % len(map_list)
for m in map_list:
MAP_DEFINES += 'MAP %s\n' % m
######################################################################
## REP: 흔히 틀리는 목록
rep_list = [
# 의존명사 앞에 띄어 쓰기
('것', '_것'),
## 두벌식 입력 순서 바뀜
# 가능한 모든 경우를 다 열거할 수는 없고 흔히 범하는 경우만 쓴다.
('ㅇ벗', '없'), # ㅇ벗어 => 없어
('빈', T_PIEUP + '니'), # 하빈다 => 합니다
('낟', T_NIEUN + '다'), # 하낟 => 한다
('싿', T_SSANGSIOS + '다'), # 이싿 => 있다
(V_O + '나', V_WA + T_NIEUN), # 오나전 => 완전
(T_IEUNG + '미', '임'), # 뭥미 => 뭐임
(T_PIEUP + '라', '발'), # 젭라 => 제발
## 불규칙 용언의 활용을 잘못 썼을 경우에 대한 대치어 만들기. 단순
## 탈락이나 자모 한두개 변경같은 경우 hunspell의 기본 대치어
## 규칙에서 처리되므로 여기 쓰지 않고 처리할 수 없는 경우만 쓴다.
# ㅂ불규칙
(T_PIEUP + '아', '와'),
(T_PIEUP + '어', '워'),
(T_PIEUP + '으', '우'),
# 르불규칙
('르어', T_RIEUL + '러'),
('르어', T_RIEUL + '라'),
# 으불규칙
(V_EU + '어', V_EO),
(V_EU + '어', V_A),
## 용언 활용
# '-ㄹ런지' => '-ㄹ는지'
(T_RIEUL + '런지', T_RIEUL + '는지'),
# '-스런' => '-스러운' (잘못된 준말 사용)
('스런', '스러운'),
# '-고픈' => -고 싶은' (잘못된 준말 사용)
('고픈', '고_싶은'),
# '-다더니' => -다 하더니' (잘못된 준말 사용)
('다더니', '다_하더니'),
## 준말 용언 + 모음 어미 -> 본디말 용언에 해당 어미
# 형태가 가지각색이므로 케이스별로: 갖다, 머물다, 서툴다, 딛다
(T_CIEUC + '어', '져'),
(T_CIEUC + '아', '져'),
(T_CIEUC + '으', '지'),
(T_RIEUL + '어', T_RIEUL + '러'),
(T_RIEUL + '으', '르'),
(T_TIKEUT + '어', '뎌'),
(T_TIKEUT + '으', '디'),
## 연철/분철 발음을 혼동할 때 나타나는 오타 대치어
# 받침+ㅇ초성 (일찍이/일찌기 등)
(T_KIYEOK + L_IEUNG, L_KIYEOK),
(L_KIYEOK, T_KIYEOK + L_IEUNG),
(T_NIEUN + L_IEUNG, L_NIEUN),
(L_NIEUN, T_NIEUN + L_IEUNG),
(T_RIEUL + L_IEUNG, L_RIEUL),
(L_RIEUL, T_RIEUL + L_IEUNG),
(T_MIEUM + L_IEUNG, L_MIEUM),
(L_MIEUM, T_MIEUM + L_IEUNG),
(T_PHIEUPH + L_IEUNG, L_PHIEUPH),
(L_PHIEUPH, T_PHIEUPH + L_IEUNG),
(T_SIOS + L_IEUNG, L_SIOS),
(L_SIOS, T_SIOS + L_IEUNG),
(T_CIEUC + L_IEUNG, L_CIEUC),
(L_CIEUC, T_CIEUC + L_IEUNG),
(T_CHIEUCH + L_IEUNG, L_CHIEUCH),
(L_CHIEUCH, T_CHIEUCH + L_IEUNG),
(T_RIEUL_KIYEOK + L_IEUNG, T_RIEUL + L_KIYEOK),
(T_RIEUL + L_KIYEOK, T_RIEUL_KIYEOK + L_IEUNG),
# ㅅㅎ -> ㅌ (통신어..)
(T_SIOS + L_HIEUH, L_THIEUTH),
## 접두어, 접미어, 합성어
# 사이시옷
(T_SIOS + L_KIYEOK, L_KHIEUKH), # 숫개 -> 수캐
(T_SIOS + L_TIKEUT, L_THIEUTH), # 숫돼지 -> 수퇘지
## 두음법칙
# ㅇ을 써야 할 자리에 ㄹ을 쓰는 일은 많지 않고 반대가 많다.
('야', '랴'),
('여', '려'),
('요', '료'),
('유', '류'),
('이', '리'),
('녀', '여'),
('뇨', '요'),
# ㄴ을 써야 할 자리에 ㄹ을 쓰는 일은 많지 않고 반대가 많다.
('나', '라'),
('노', '로'),
('뇌', '뢰'),
('누', '루'),
('느', '르'),
]
REP_DEFINES = 'REP %d\n' % len(rep_list)
for rep in rep_list:
REP_DEFINES += NFD('REP %s %s\n' % (rep[0], rep[1]))
compound_rules = [
# 아라비아 숫자
'(%d)*(%d)' % (digit_flag, digit_flag),
# 아라비아 숫자+단위
'(%d)*(%d)(%d)' % (digit_flag, digit_flag, counter_flag),
# 아라비아 숫자+만 단위 ("300만", "50억")
'(%d)*(%d)(%d)' % (digit_flag, digit_flag, number_10000_flag),
# tokenizer에서 로마자를 분리해 주지 않는 경우를 위해 로마자로 된 모든
# 단어를 허용하고 명사로 취급한다.
'(%d)*(%d)?' % (alpha_flag, plural_suffix_flag),
]
# 숫자 만 단위로 띄어 쓰기
if config.minimum_hunspell_version >= (1,2,14):
compound_rules += [
'(%d)?(%d)?(%d)?(%d)?(%d)?' % (number_1000_flag,
number_100_flag,
number_10_flag,
number_1_flag,
number_10000_flag),
'(%d)?(%d)?(%d)?(%d)?' % (number_1000_flag,
number_100_flag,
knumber_10_flag,
knumber_1_flag),
]
else:
# NOTE: hunspell 1.2.8에서는 백자리 이상 쓰면 SEGV
compound_rules += [
'(%d)?(%d)?(%d)?' % (number_10_flag,
number_1_flag,
number_10000_flag),
'(%d)?(%d)?' % (knumber_10_flag,
knumber_1_flag),
]
# 보조용언 붙여 쓰기: 별도로 확장하지 않는 경우에만 필요
if not config.expand_auxiliary_attached:
compound_rules += [
'(%d)(%d)' % (conjugation_eo_flag, auxiliary_eo_flag),
'(%d)(%d)' % (conjugation_eun_flag, auxiliary_eun_flag),
'(%d)(%d)' % (conjugation_eul_flag, auxiliary_eul_flag),
]
COMPOUNDRULE_DEFINES = 'COMPOUNDRULE %d\n' % len(compound_rules)
for rule in compound_rules:
COMPOUNDRULE_DEFINES += 'COMPOUNDRULE %s\n' % rule
## 용언 어미
def get_suffix_defines(flagaliases):
return suffix.get_rules_string(flagaliases)
# 조사
def get_josa_defines(flagaliases):
return josa.get_output(flagaliases)
| {
"repo_name": "changwoo/hunspell-dict-ko",
"path": "aff.py",
"copies": "1",
"size": "11154",
"license": "mpl-2.0",
"hash": -4456701804743460400,
"line_mean": 32.3767123288,
"line_max": 77,
"alpha_frac": 0.5762364047,
"autogenerated": false,
"ratio": 1.626773493573694,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.2703009898273694,
"avg_score": null,
"num_lines": null
} |
# affiliates.py
from django import template
from django.utils import html
from django.template.defaultfilters import stringfilter
from django.utils.safestring import mark_safe
import urllib;
if (not vars().has_key('ITUNES_AFFILIATE_ID')):
ITUNES_AFFILIATE_ID = 'FU9TTuAveps'
if (not vars().has_key('AMAZON_AFFILIATE_ID')):
AMAZON_AFFILIATE_ID = '6tringle-20'
register = template.Library()
def doubleurlescape(some_string):
return urllib.quote(urllib.quote(some_string, ''), '')
@register.filter()
@stringfilter
def itunes(value, arg=None, autoescape=None):
"""Takes an iTunes link (http://phobos.apple.com/Web... or http://itunes.apple.com/Web...) and turns it into an affilate link
If there is an argument, the arg becomes a text link. If not, the standard iTunes Badge is used
Usage Example:
{{"http://itunes.apple.com/WebObjects/MZStore.woa/wa/viewSoftware?id=288545208&mt=8"|itunes:"Instapaper Pro" }}
{{"http://itunes.apple.com/WebObjects/MZStore.woa/wa/viewSoftware?id=288545208&mt=8"|itunes }}
"""
if (value[0:34] != 'http://phobos.apple.com/WebObjects' and value[0:34] != 'http://itunes.apple.com/WebObjects'):
return value
str_list = []
str_list.append('<a href="http://click.linksynergy.com/fs-bin/stat?id=')
str_list.append(ITUNES_AFFILIATE_ID)
str_list.append('&offerid=146261&type=3&subid=0&tmpid=1826&RD_PARM1=')
withpartner = value + '&partnerId=30'
str_list.append(doubleurlescape(withpartner))
str_list.append('">')
if (arg):
str_list.append(arg)
else:
str_list.append('<img height="15" width="61" alt="Buy app through iTunes" src="http://ax.phobos.apple.com.edgesuite.net/images/badgeitunes61x15dark.gif" />')
str_list.append('</a>')
return_string = ''.join(str_list)
return mark_safe(return_string)
itunes.needs_autoescape = True
@register.filter()
@stringfilter
def amazon_link(value, arg=None, autoescape=None):
"""take any amazon link and affiliates it. The argument is set inside the anchor tags. No argument will just place the link itself in the anchor tags
Usage Example:
{{"http://www.amazon.com/Kindle-Amazons-Wireless-Reading-Generation/dp/B00154JDAI/"|amazon_link:"Kindle2"}}
{{"http://www.amazon.com/dp/B00154JDAI/"|amazon_link}}
"""
str_list = []
str_list.append('<a href="http://www.amazon.com/gp/redirect.html?ie=UTF8&location=')
str_list.append(urllib.quote(value, ''))
str_list.append('&tag=')
str_list.append(AMAZON_AFFILIATE_ID)
str_list.append('&linkCode=ur2')
str_list.append('">')
if (arg):
str_list.append(arg)
else:
str_list.append(value)
str_list.append('</a>')
str_list.append('<img src="http://www.assoc-amazon.com/e/ir?t=')
str_list.append(AMAZON_AFFILIATE_ID)
str_list.append('&l=ur2&o=1" width="1" height="1" border="0" alt="" style="border:none !important; margin:0px !important;" />')
return_string = ''.join(str_list)
return mark_safe(return_string)
amazon_link.needs_autoescape = True
@register.filter()
@stringfilter
def amazon_asin(value, arg=None, autoescape=None):
"""take any amazon ASIN and affiliates it. The argument is set inside the anchor tags. No argument will just place the link itself in the anchor tags
Usage Example:
{{"B00154JDAI"|amazon_asin:"Kindle2"}}
{{"B00154JDAI"|amazon_asin}}
"""
str_list = []
str_list.append('<a href="http://www.amazon.com/gp/product/')
str_list.append(value) #ASIN
str_list.append('?ie=UTF8&tag=')
str_list.append(AMAZON_AFFILIATE_ID)
str_list.append('&linkCode=as2&creativeASIN=')
str_list.append(value) #ASIN
str_list.append('">')
if (arg):
str_list.append(arg)
else:
str_list.append("Amazon Product Link: %s" % value)
str_list.append('</a>')
str_list.append('<img src="http://www.assoc-amazon.com/e/ir?t=')
str_list.append(AMAZON_AFFILIATE_ID)
str_list.append('&l=as2&o=1&a=')
str_list.append(value) #ASIN
str_list.append('" width="1" height="1" border="0" alt="" style="border:none !important; margin:0px !important;" />')
return_string = ''.join(str_list)
return mark_safe(return_string)
amazon_asin.needs_autoescape = True
| {
"repo_name": "winks/hyde",
"path": "hydeengine/templatetags/affliiates.py",
"copies": "62",
"size": "4305",
"license": "mit",
"hash": 5220269701794594000,
"line_mean": 36.7631578947,
"line_max": 165,
"alpha_frac": 0.6680603949,
"autogenerated": false,
"ratio": 2.9833679833679834,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": null,
"num_lines": null
} |
# Affine Cipher Hacker
# http://inventwithpython.com/hacking (BSD Licensed)
# Affine Hacker copied from AL Sweigart
# Implement this into Main
import pyperclip, affineCipher, detectEnglish, cryptomath
SILENT_MODE = False
def main():
# You might want to copy & paste this text from the source code at
# http://invpy.com/affineHacker.py
myMessage = input(str("Input here: "))
hackedMessage = hackAffine(myMessage)
if hackedMessage != None:
# The plaintext is displayed on the screen. For the convenience of
# the user, we copy the text of the code to the clipboard.
print('Copying hacked message to clipboard:')
print(hackedMessage)
pyperclip.copy(hackedMessage)
else:
print('Failed to hack encryption.')
def hackAffine(message):
print('Hacking...')
# Python programs can be stopped at any time by pressing Ctrl-C (on
# Windows) or Ctrl-D (on Mac and Linux)
print('(Press Ctrl-C or Ctrl-D to quit at any time.)')
# brute-force by looping through every possible key
for key in range(len(affineCipher.SYMBOLS) ** 2):
keyA = affineCipher.getKeyParts(key)[0]
if cryptomath.gcd(keyA, len(affineCipher.SYMBOLS)) != 1:
continue
decryptedText = affineCipher.decryptMessage(key, message)
if not SILENT_MODE:
print('Tried Key %s... (%s)' % (key, decryptedText[:40]))
if detectEnglish.isEnglish(decryptedText):
# Check with the user if the decrypted key has been found.
print()
print('Possible encryption hack:')
print('Key: %s' % (key))
print('Decrypted message: ' + decryptedText[:200])
print()
print('Enter D for done, or just press Enter to continue hacking:')
response = input('> ')
if response.strip().upper().startswith('D'):
return decryptedText
return None
# If affineHacker.py is run (instead of imported as a module) call
# the main() function.
if __name__ == '__main__':
main()
| {
"repo_name": "brandonskerritt51/Ciphey",
"path": "source/AffineHacker.py",
"copies": "1",
"size": "2082",
"license": "mit",
"hash": -7774710397419433000,
"line_mean": 33.131147541,
"line_max": 79,
"alpha_frac": 0.6340057637,
"autogenerated": false,
"ratio": 3.834254143646409,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4968259907346409,
"avg_score": null,
"num_lines": null
} |
# Affine Cipher Hacker
# http://inventwithpython.com/hacking (BSD Licensed)
import pyperclip, affineCipher, detectEnglish, cryptomath
SILENT_MODE = False
def main():
# You might want to copy & paste this text from the source code at
# http://invpy.com/affineHacker.py
myMessage = """U&'<3dJ^Gjx'-3^MS'Sj0jxuj'G3'%j'<mMMjS'g{GjMMg9j{G'g"'gG'<3^MS'Sj<jguj'm'P^dm{'g{G3'%jMgjug{9'GPmG'gG'-m0'P^dm{LU'5&Mm{'_^xg{9"""
hackedMessage = hackAffine(myMessage)
if hackedMessage != None:
# The plaintext is displayed on the screen. For the convenience of
# the user, we copy the text of the code to the clipboard.
print('Copying hacked message to clipboard:')
print(hackedMessage)
pyperclip.copy(hackedMessage)
else:
print('Failed to hack encryption.')
def hackAffine(message):
print('Hacking...')
# Python programs can be stopped at any time by pressing Ctrl-C (on
# Windows) or Ctrl-D (on Mac and Linux)
print('(Press Ctrl-C or Ctrl-D to quit at any time.)')
# brute-force by looping through every possible key
for key in range(len(affineCipher.SYMBOLS) ** 2):
keyA = affineCipher.getKeyParts(key)[0]
if cryptomath.gcd(keyA, len(affineCipher.SYMBOLS)) != 1:
continue
decryptedText = affineCipher.decryptMessage(key, message)
if not SILENT_MODE:
print('Tried Key %s... (%s)' % (key, decryptedText[:40]))
if detectEnglish.isEnglish(decryptedText):
# Check with the user if the decrypted key has been found.
print()
print('Possible encryption hack:')
print('Key: %s' % (key))
print('Decrypted message: ' + decryptedText[:200])
print()
print('Enter D for done, or just press Enter to continue hacking:')
response = input('> ')
if response.strip().upper().startswith('D'):
return decryptedText
return None
# If affineHacker.py is run (instead of imported as a module) call
# the main() function.
if __name__ == '__main__':
main() | {
"repo_name": "coolhacks/python-hacks",
"path": "examples/codebreaker/affineHacker.py",
"copies": "1",
"size": "2121",
"license": "mit",
"hash": -1399614942504336100,
"line_mean": 34.3666666667,
"line_max": 148,
"alpha_frac": 0.6265912306,
"autogenerated": false,
"ratio": 3.4154589371980677,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.45420501677980674,
"avg_score": null,
"num_lines": null
} |
"""Affine Cipher Hacker
Implements a function that can hack an affine cipher encrypted message.
Attributes:
SILENT_MODE (bool): Specifies whether to print all key attempts.
Note:
* https://www.nostarch.com/crackingcodes/ (BSD Licensed)
"""
from pythontutorials.books.CrackingCodes.Ch14.affineCipher import decryptMessage, SYMBOLS, getKeyParts
from pythontutorials.books.CrackingCodes.Ch13.cryptomath import gcd
from pythontutorials.books.CrackingCodes.Ch11.detectEnglish import isEnglish
SILENT_MODE = True
def main():
from pyperclip import copy
# You might want to copy & paste this text from the source code at
# https://www.nostarch.com/crackingcodes/.
myMessage = """5QG9ol3La6QI93!xQxaia6faQL9QdaQG1!!axQARLa!!AuaRLQ
ADQALQG93!xQxaGaAfaQ1QX3o1RQARL9Qda!AafARuQLX1LQALQI1iQX3o1RN"Q-5!1RQP36ARu"""
hackedMessage = hackAffine(myMessage)
if hackedMessage is not None:
# The plaintext is displayed on the screen. For the convenience of
# the user, we copy the text of the code to the clipboard:
print('Copying hacked message to clipboard:')
print(hackedMessage)
copy(hackedMessage)
else:
print('Failed to hack encryption.')
def hackAffine(message: str):
"""Hacks affine cipher encrypted messages
Brute-forces a given encrypted message by looping through all the keys, checking if the result is English, and
prompting the user for confirmation of decryption.
Args:
message: String with message to brute-force.
Returns:
Prints out possible results and prompts user for confirmation. If confirmed, prints out and returns
full decrypted message, otherwise returns None.
"""
print('Hacking...')
# Python programs can be stopped at any time by pressing Ctrl-C (on
# Windows) or Ctrl-D (on macOS and Linux):
print('(Press Ctrl-C or Ctrl-D to quit at any time.)')
# Brute-force by looping through every possible key:
for key in range(len(SYMBOLS) ** 2):
keyA = getKeyParts(key)[0]
if gcd(keyA, len(SYMBOLS)) != 1:
continue
decryptedText = decryptMessage(key, message)
if not SILENT_MODE:
print('Tried Key %s... (%s)' % (key, decryptedText[:40]))
if isEnglish(decryptedText):
# Check with the user if the decrypted key has been found:
print()
print('Possible encryption hack:')
print('Key: %s' % key)
print('Decrypted message: ' + decryptedText[:200])
print()
print('Enter D for done, or just press Enter to continue hacking:')
response = input('> ')
if response.strip().upper().startswith('D'):
return decryptedText
return None
# If affineHacker.py is run (instead of imported as a module), call
# the main() function:
if __name__ == '__main__':
main()
| {
"repo_name": "JoseALermaIII/python-tutorials",
"path": "pythontutorials/books/CrackingCodes/Ch15/affineHacker.py",
"copies": "1",
"size": "2921",
"license": "mit",
"hash": 7821984454907578000,
"line_mean": 32.1931818182,
"line_max": 114,
"alpha_frac": 0.6699760356,
"autogenerated": false,
"ratio": 3.633084577114428,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4803060612714428,
"avg_score": null,
"num_lines": null
} |
# Affine Cipher Hacker
# http://inventwithpython.com/hacking (BSD Licensed)
import pyperclip, affineCipher, detectEnglish, cryptomath
SILENT_MODE = False
def main():
# You might want to copy & paste this text from the source code at
# http://invpy.com/affineHacker.py
myMessage = """U&'<3dJ^Gjx'-3^MS'Sj0jxuj'G3'%j'<mMMjS'g{GjMMg9j{G'g"'gG'<3^MS'Sj<jguj'm'P^dm{'g{G3'%jMgjug{9'GPmG'gG'-m0'P^dm{LU'5&Mm{'_^xg{9"""
hackedMessage = hackAffine(myMessage)
if hackedMessage != None:
# The plaintext is displayed on the screen. For the convenience of
# the user, we copy the text of the code to the clipboard.
print('Copying hacked message to clipboard:')
print(hackedMessage)
pyperclip.copy(hackedMessage)
else:
print('Failed to hack encryption.')
def hackAffine(message):
print('Hacking...')
# Python programs can be stopped at any time by pressing Ctrl-C (on
# Windows) or Ctrl-D (on Mac and Linux)
print('(Press Ctrl-C or Ctrl-D to quit at any time.)')
# brute-force by looping through every possible key
for key in range(len(affineCipher.SYMBOLS) ** 2):
keyA = affineCipher.getKeyParts(key)[0]
if cryptomath.gcd(keyA, len(affineCipher.SYMBOLS)) != 1:
continue
decryptedText = affineCipher.decryptMessage(key, message)
if not SILENT_MODE:
print('Tried Key %s... (%s)' % (key, decryptedText[:40]))
if detectEnglish.isEnglish(decryptedText):
# Check with the user if the decrypted key has been found.
print()
print('Possible encryption hack:')
print('Key: %s' % (key))
print('Decrypted message: ' + decryptedText[:200])
print()
print('Enter D for done, or just press Enter to continue hacking:')
response = input('> ')
if response.strip().upper().startswith('D'):
return decryptedText
return None
# If affineHacker.py is run (instead of imported as a module) call
# the main() function.
if __name__ == '__main__':
main() | {
"repo_name": "SafeW3rd/Ciphers",
"path": "affineHacker.py",
"copies": "1",
"size": "2180",
"license": "mit",
"hash": -2016764720417632800,
"line_mean": 34.3666666667,
"line_max": 148,
"alpha_frac": 0.6096330275,
"autogenerated": false,
"ratio": 3.4935897435897436,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9583823024369056,
"avg_score": 0.0038799493441374253,
"num_lines": 60
} |
# Affine Cipher
# http://inventwithpython.com/hacking (BSD Licensed)
import sys, pyperclip, cryptomath, random
SYMBOLS = """ !"#$%&'()*+,-./0123456789:;<=>?@ABCDEFGHIJKLMNOPQRSTUVWXYZ[\]^_`abcdefghijklmnopqrstuvwxyz{|}~""" # note the space at the front
def main():
myMessage = """"A computer would deserve to be called intelligent if it could deceive a human into believing that it was human." -Alan Turing"""
myKey = 2023
myMode = 'encrypt' # set to 'encrypt' or 'decrypt'
if myMode == 'encrypt':
translated = encryptMessage(myKey, myMessage)
elif myMode == 'decrypt':
translated = decryptMessage(myKey, myMessage)
print('Key: %s' % (myKey))
print('%sed text:' % (myMode.title()))
print(translated)
pyperclip.copy(translated)
print('Full %sed text copied to clipboard.' % (myMode))
def getKeyParts(key):
keyA = key // len(SYMBOLS)
keyB = key % len(SYMBOLS)
return (keyA, keyB)
def checkKeys(keyA, keyB, mode):
if keyA == 1 and mode == 'encrypt':
sys.exit('The affine cipher becomes incredibly weak when key A is set to 1. Choose a different key.')
if keyB == 0 and mode == 'encrypt':
sys.exit('The affine cipher becomes incredibly weak when key B is set to 0. Choose a different key.')
if keyA < 0 or keyB < 0 or keyB > len(SYMBOLS) - 1:
sys.exit('Key A must be greater than 0 and Key B must be between 0 and %s.' % (len(SYMBOLS) - 1))
if cryptomath.gcd(keyA, len(SYMBOLS)) != 1:
sys.exit('Key A (%s) and the symbol set size (%s) are not relatively prime. Choose a different key.' % (keyA, len(SYMBOLS)))
def encryptMessage(key, message):
keyA, keyB = getKeyParts(key)
checkKeys(keyA, keyB, 'encrypt')
ciphertext = ''
for symbol in message:
if symbol in SYMBOLS:
# encrypt this symbol
symIndex = SYMBOLS.find(symbol)
ciphertext += SYMBOLS[(symIndex * keyA + keyB) % len(SYMBOLS)]
else:
ciphertext += symbol # just append this symbol unencrypted
return ciphertext
def decryptMessage(key, message):
keyA, keyB = getKeyParts(key)
checkKeys(keyA, keyB, 'decrypt')
plaintext = ''
modInverseOfKeyA = cryptomath.findModInverse(keyA, len(SYMBOLS))
for symbol in message:
if symbol in SYMBOLS:
# decrypt this symbol
symIndex = SYMBOLS.find(symbol)
plaintext += SYMBOLS[(symIndex - keyB) * modInverseOfKeyA % len(SYMBOLS)]
else:
plaintext += symbol # just append this symbol undecrypted
return plaintext
def getRandomKey():
while True:
keyA = random.randint(2, len(SYMBOLS))
keyB = random.randint(2, len(SYMBOLS))
if cryptomath.gcd(keyA, len(SYMBOLS)) == 1:
return keyA * len(SYMBOLS) + keyB
# If affineCipher.py is run (instead of imported as a module) call
# the main() function.
if __name__ == '__main__':
main() | {
"repo_name": "coolhacks/python-hacks",
"path": "examples/codebreaker/affineCipher.py",
"copies": "1",
"size": "2955",
"license": "mit",
"hash": -131558046693880850,
"line_mean": 35.0487804878,
"line_max": 148,
"alpha_frac": 0.6345177665,
"autogenerated": false,
"ratio": 3.5262529832935563,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.46607707497935563,
"avg_score": null,
"num_lines": null
} |
"""Affine Cipher
Provides functions that implement affine cipher encryption and decryption.
Attributes:
SYMBOLS (str): String containing all symbols that can be encrypted/decrypted.
Example:
>>> import pythontutorials.books.CrackingCodes.Ch14.affineCipher as affineCipher
>>> someString = 'Enthusiasm is contagious. Not having enthusiasm is also contagious.'
>>> key = affineCipher.getRandomKey() # key = 921, in this example
>>> affineCipher.encryptMessage(key, someString)
'xq3eBprFpdLrpLf4q3FRr4BpyLi43LeFOrqRL6q3eBprFpdLrpLFQp4Lf4q3FRr4Bpy'
Note:
* https://www.nostarch.com/crackingcodes/ (BSD Licensed)
* There must be a "dictionary.txt" file in this directory with all
English words in it, one word per line. You can download this from
https://www.nostarch.com/crackingcodes/.
"""
import sys
import random
from pythontutorials.books.CrackingCodes.Ch13.cryptomath import gcd, findModInverse
SYMBOLS = 'ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz1234567890 !?.'
def main():
from pyperclip import copy
myMessage = """"A computer would deserve to be called intelligent if it
could deceive a human into believing that it was human." -Alan Turing"""
myKey = 2894
myMode = 'encrypt' # Set to either 'encrypt' or 'decrypt'.
if myMode == 'encrypt':
translated = encryptMessage(myKey, myMessage)
elif myMode == 'decrypt':
translated = decryptMessage(myKey, myMessage)
print('Key: %s' % myKey)
print('%sed text:' % (myMode.title()))
print(translated)
copy(translated)
print('Full %sed text copied to clipboard.' % myMode)
if myMode == 'encrypt':
print("Decrypted text:")
translated = decryptMessage(myKey, translated)
print(translated)
return None
def getKeyParts(key: int) -> (int, int):
"""Split key into parts
Splits key into keyA and keyB via floor division and modulus by length of SYMBOLS.
Args:
key: Integer key used to encrypt message.
Returns:
Tuple containing the integral and remainder.
"""
keyA = key // len(SYMBOLS)
keyB = key % len(SYMBOLS)
return keyA, keyB
def checkKeys(keyA: int, keyB: int, mode: str) -> None:
"""Checks keys for validity.
Prevents keyA from being 1 and keyB from being 0 (if encrypting).
Makes sure keyA is relatively prime with the length of SYMBOLS.
Ensures keyA is greater than 0 and that keyB is between 0 and length of SYMBOLS.
Args:
keyA: Integer integral of the original key after floor division by length of SYMBOLS.
keyB: Integer remainder of the original key after modulus by length of SYMBOLS.
mode: String specifying whether to 'encrypt' or 'decrypt'.
Returns:
None if successful, exits program with error message otherwise.
"""
if keyA == 1 and mode == 'encrypt':
sys.exit('Cipher is weak if key A is 1. Choose a different key.')
if keyB == 0 and mode == 'encrypt':
sys.exit('Cipher is weak if key B is 0. Choose a different key.')
if keyA < 0 or keyB < 0 or keyB > len(SYMBOLS) - 1:
sys.exit('Key A must be greater than 0 and Key B must be between 0 and %s ' % (len(SYMBOLS) - 1))
if gcd(keyA, len(SYMBOLS)) != 1:
sys.exit('Key A (%s) and the symbol set size (%s) are not relatively prime. Choose a different key.'
% (keyA, len(SYMBOLS)))
def encryptMessage(key: int, message: str) -> str:
"""Affine cipher encryption
Encrypts given message with given key using the affine cipher.
Args:
key: Integer encryption key to encrypt with affine cipher.
message: Message string to encrypt.
Returns:
Encrypted message string.
"""
keyA, keyB = getKeyParts(key)
checkKeys(keyA, keyB, 'encrypt')
ciphertext = ''
for symbol in message:
if symbol in SYMBOLS:
# Encrypt the symbol:
symbolIndex = SYMBOLS.find(symbol)
ciphertext += SYMBOLS[(symbolIndex * keyA + keyB) % len(SYMBOLS)]
else:
ciphertext += symbol # Append the symbol without encrypting.
return ciphertext
def decryptMessage(key: int, message: str) -> str:
"""Affine cipher decryption
Decrypts given affine cipher encrypted message with given key.
Args:
key: Integer decryption key to decrypt affine cipher.
message: Message string to decrypt.
Returns:
Decrypted message string.
"""
keyA, keyB = getKeyParts(key)
checkKeys(keyA, keyB, 'decrypt')
plaintext = ''
modInverseOfKeyA = findModInverse(keyA, len(SYMBOLS))
for symbol in message:
if symbol in SYMBOLS:
# Decrypt the symbol:
symbolIndex = SYMBOLS.find(symbol)
plaintext += SYMBOLS[(symbolIndex - keyB) * modInverseOfKeyA % len(SYMBOLS)]
else:
plaintext += symbol # Append the symbol without decrypting.
return plaintext
def getRandomKey() -> int:
"""Affine cipher key generator
Generates a random key that can be used with the affine cipher.
Returns:
Random, valid integer key
"""
while True:
keyA = random.randint(2, len(SYMBOLS))
keyB = random.randint(2, len(SYMBOLS))
if gcd(keyA, len(SYMBOLS)) == 1:
return keyA * len(SYMBOLS) + keyB
# If affineCipher.py is run (instead of imported as a module), call
# the main() function:
if __name__ == '__main__':
main()
| {
"repo_name": "JoseALermaIII/python-tutorials",
"path": "pythontutorials/books/CrackingCodes/Ch14/affineCipher.py",
"copies": "1",
"size": "5506",
"license": "mit",
"hash": -2644642832449897500,
"line_mean": 31.9700598802,
"line_max": 108,
"alpha_frac": 0.6600072648,
"autogenerated": false,
"ratio": 3.776406035665295,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4936413300465295,
"avg_score": null,
"num_lines": null
} |
# Affine Cipher
# http://inventwithpython.com/hacking (BSD Licensed)
import sys, pyperclip, cryptomath, random
SYMBOLS = """ !"#$%&'()*+,-./0123456789:;<=>?@ABCDEFGHIJKLMNOPQRSTUVWXYZ[\]^_`abcdefghijklmnopqrstuvwxyz{|}~""" # note the space at the front
def main():
myMessage = """"A computer would deserve to be called intelligent if it could deceive a human into believing that it was human." -Alan Turing"""
myKey = 2023
myMode = 'encrypt' # set to 'encrypt' or 'decrypt'
if myMode == 'encrypt':
translated = encryptMessage(myKey, myMessage)
elif myMode == 'decrypt':
translated = decryptMessage(myKey, myMessage)
print('Key: %s' % (myKey))
print('%sed text:' % (myMode.title()))
print(translated)
pyperclip.copy(translated)
print('Full %sed text copied to clipboard.' % (myMode))
def getKeyParts(key):
keyA = key // len(SYMBOLS)
keyB = key % len(SYMBOLS)
return (keyA, keyB)
def checkKeys(keyA, keyB, mode):
if keyA == 1 and mode == 'encrypt':
sys.exit('The affine cipher becomes incredibly weak when key A is set to 1. Choose a different key.')
if keyB == 0 and mode == 'encrypt':
sys.exit('The affine cipher becomes incredibly weak when key B is set to 0. Choose a different key.')
if keyA < 0 or keyB < 0 or keyB > len(SYMBOLS) - 1:
sys.exit('Key A must be greater than 0 and Key B must be between 0 and %s.' % (len(SYMBOLS) - 1))
if cryptomath.gcd(keyA, len(SYMBOLS)) != 1:
sys.exit('Key A (%s) and the symbol set size (%s) are not relatively prime. Choose a different key.' % (keyA, len(SYMBOLS)))
def encryptMessage(key, message):
keyA, keyB = getKeyParts(key)
checkKeys(keyA, keyB, 'encrypt')
ciphertext = ''
for symbol in message:
if symbol in SYMBOLS:
# encrypt this symbol
symIndex = SYMBOLS.find(symbol)
ciphertext += SYMBOLS[(symIndex * keyA + keyB) % len(SYMBOLS)]
else:
ciphertext += symbol # just append this symbol unencrypted
return ciphertext
def decryptMessage(key, message):
keyA, keyB = getKeyParts(key)
checkKeys(keyA, keyB, 'decrypt')
plaintext = ''
modInverseOfKeyA = cryptomath.findModInverse(keyA, len(SYMBOLS))
for symbol in message:
if symbol in SYMBOLS:
# decrypt this symbol
symIndex = SYMBOLS.find(symbol)
plaintext += SYMBOLS[(symIndex - keyB) * modInverseOfKeyA % len(SYMBOLS)]
else:
plaintext += symbol # just append this symbol undecrypted
return plaintext
def getRandomKey():
while True:
keyA = random.randint(2, len(SYMBOLS))
keyB = random.randint(2, len(SYMBOLS))
if cryptomath.gcd(keyA, len(SYMBOLS)) == 1:
return keyA * len(SYMBOLS) + keyB
# If affineCipher.py is run (instead of imported as a module) call
# the main() function.
if __name__ == '__main__':
main()
| {
"repo_name": "brandonskerritt51/Ciphey",
"path": "source/affineCipher.py",
"copies": "1",
"size": "3038",
"license": "mit",
"hash": 164405921672283870,
"line_mean": 35.0487804878,
"line_max": 148,
"alpha_frac": 0.6171823568,
"autogenerated": false,
"ratio": 3.603795966785291,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9712255592351222,
"avg_score": 0.001744546246813781,
"num_lines": 82
} |
""" Affine image registration module consisting of the following classes:
AffineMap: encapsulates the necessary information to perform affine
transforms between two domains, defined by a `static` and a `moving`
image. The `domain` of the transform is the set of points in the
`static` image's grid, and the `codomain` is the set of points in
the `moving` image. When we call the `transform` method, `AffineMap`
maps each point `x` of the domain (`static` grid) to the codomain
(`moving` grid) and interpolates the `moving` image at that point
to obtain the intensity value to be placed at `x` in the resulting
grid. The `transform_inverse` method performs the opposite operation
mapping points in the codomain to points in the domain.
ParzenJointHistogram: computes the marginal and joint distributions of
intensities of a pair of images, using Parzen windows [Parzen62]
with a cubic spline kernel, as proposed by Mattes et al. [Mattes03].
It also computes the gradient of the joint histogram w.r.t. the
parameters of a given transform.
MutualInformationMetric: computes the value and gradient of the mutual
information metric the way `Optimizer` needs them. That is, given
a set of transform parameters, it will use `ParzenJointHistogram`
to compute the value and gradient of the joint intensity histogram
evaluated at the given parameters, and evaluate the the value and
gradient of the histogram's mutual information.
AffineRegistration: it runs the multi-resolution registration, putting
all the pieces together. It needs to create the scale space of the
images and run the multi-resolution registration by using the Metric
and the Optimizer at each level of the Gaussian pyramid. At each
level, it will setup the metric to compute value and gradient of the
metric with the input images with different levels of smoothing.
References
----------
[Parzen62] E. Parzen. On the estimation of a probability density
function and the mode. Annals of Mathematical Statistics,
33(3), 1065-1076, 1962.
[Mattes03] Mattes, D., Haynor, D. R., Vesselle, H., Lewellen, T. K.,
& Eubank, W. PET-CT image registration in the chest using
free-form deformations. IEEE Transactions on Medical
Imaging, 22(1), 120-8, 2003.
"""
import numpy as np
import numpy.linalg as npl
import scipy.ndimage as ndimage
from dipy.core.optimize import Optimizer
from dipy.core.optimize import SCIPY_LESS_0_12
from dipy.align import vector_fields as vf
from dipy.align import VerbosityLevels
from dipy.align.parzenhist import (ParzenJointHistogram,
sample_domain_regular,
compute_parzen_mi)
from dipy.align.imwarp import (get_direction_and_spacings, ScaleSpace)
from dipy.align.scalespace import IsotropicScaleSpace
from warnings import warn
_interp_options = ['nearest', 'linear']
_transform_method = {}
_transform_method[(2, 'nearest')] = vf.transform_2d_affine_nn
_transform_method[(3, 'nearest')] = vf.transform_3d_affine_nn
_transform_method[(2, 'linear')] = vf.transform_2d_affine
_transform_method[(3, 'linear')] = vf.transform_3d_affine
_number_dim_affine_matrix = 2
class AffineInversionError(Exception):
pass
class AffineInvalidValuesError(Exception):
pass
class AffineMap(object):
def __init__(self, affine, domain_grid_shape=None, domain_grid2world=None,
codomain_grid_shape=None, codomain_grid2world=None):
""" AffineMap
Implements an affine transformation whose domain is given by
`domain_grid` and `domain_grid2world`, and whose co-domain is
given by `codomain_grid` and `codomain_grid2world`.
The actual transform is represented by the `affine` matrix, which
operate in world coordinates. Therefore, to transform a moving image
towards a static image, we first map each voxel (i,j,k) of the static
image to world coordinates (x,y,z) by applying `domain_grid2world`.
Then we apply the `affine` transform to (x,y,z) obtaining (x', y', z')
in moving image's world coordinates. Finally, (x', y', z') is mapped
to voxel coordinates (i', j', k') in the moving image by multiplying
(x', y', z') by the inverse of `codomain_grid2world`. The
`codomain_grid_shape` is used analogously to transform the static
image towards the moving image when calling `transform_inverse`.
If the domain/co-domain information is not provided (None) then the
sampling information needs to be specified each time the `transform`
or `transform_inverse` is called to transform images. Note that such
sampling information is not necessary to transform points defined in
physical space, such as stream lines.
Parameters
----------
affine : array, shape (dim + 1, dim + 1)
the matrix defining the affine transform, where `dim` is the
dimension of the space this map operates in (2 for 2D images,
3 for 3D images). If None, then `self` represents the identity
transformation.
domain_grid_shape : sequence, shape (dim,), optional
the shape of the default domain sampling grid. When `transform`
is called to transform an image, the resulting image will have
this shape, unless a different sampling information is provided.
If None, then the sampling grid shape must be specified each time
the `transform` method is called.
domain_grid2world : array, shape (dim + 1, dim + 1), optional
the grid-to-world transform associated with the domain grid.
If None (the default), then the grid-to-world transform is assumed
to be the identity.
codomain_grid_shape : sequence of integers, shape (dim,)
the shape of the default co-domain sampling grid. When
`transform_inverse` is called to transform an image, the resulting
image will have this shape, unless a different sampling
information is provided. If None (the default), then the sampling
grid shape must be specified each time the `transform_inverse`
method is called.
codomain_grid2world : array, shape (dim + 1, dim + 1)
the grid-to-world transform associated with the co-domain grid.
If None (the default), then the grid-to-world transform is assumed
to be the identity.
"""
self.set_affine(affine)
self.domain_shape = domain_grid_shape
self.domain_grid2world = domain_grid2world
self.codomain_shape = codomain_grid_shape
self.codomain_grid2world = codomain_grid2world
def get_affine(self):
"""
Returns the value of the transformation, not a reference!
Returns
-------
affine : ndarray
Copy of the transform, not a reference.
"""
# returning a copy to insulate it from changes outside object
return self.affine.copy()
def set_affine(self, affine):
""" Sets the affine transform (operating in physical space)
Also sets `self.affine_inv` - the inverse of `affine`, or None if
there is no inverse.
Parameters
----------
affine : array, shape (dim + 1, dim + 1)
the matrix representing the affine transform operating in
physical space. The domain and co-domain information
remains unchanged. If None, then `self` represents the identity
transformation.
"""
if affine is None:
self.affine = None
self.affine_inv = None
return
try:
affine = np.array(affine)
except:
raise TypeError('Input must be type ndarray, or be convertible to one.')
if len(affine.shape) != _number_dim_affine_matrix:
raise AffineInversionError('Affine transform must be 2D')
if not affine.shape[0] == affine.shape[1]:
raise AffineInversionError('Affine transform must be a square matrix')
if not np.all(np.isfinite(affine)):
raise AffineInvalidValuesError('Affine transform contains invalid elements')
# checking on proper augmentation
# First n-1 columns in last row in matrix contain non-zeros
if not np.all(affine[-1, :-1] == 0.0):
raise AffineInvalidValuesError('First {n_1} columns in last row in matrix '
'contain non-zeros!'.format(n_1=affine.shape[0] - 1))
# Last row, last column in matrix must be 1.0!
if affine[-1, -1] != 1.0:
raise AffineInvalidValuesError('Last row, last column in matrix is not 1.0!')
# making a copy to insulate it from changes outside object
self.affine = affine.copy()
try:
self.affine_inv = npl.inv(affine)
except npl.LinAlgError:
raise AffineInversionError('Affine cannot be inverted')
def __str__(self):
"""Printable format - relies on ndarray's implementation."""
return str(self.affine)
def __repr__(self):
"""Relodable representation - also relies on ndarray's implementation."""
return self.affine.__repr__()
def __format__(self, format_spec):
"""Implementation various formatting options"""
if format_spec is None or self.affine is None:
return str(self.affine)
elif isinstance(format_spec, str):
format_spec = format_spec.lower()
if format_spec in ['', ' ', 'f', 'full']:
return str(self.affine)
# rotation part only (initial 3x3)
elif format_spec in ['r', 'rotation']:
return str(self.affine[:-1, :-1])
# translation part only (4th col)
elif format_spec in ['t', 'translation']:
# notice unusual indexing to make it a column vector
# i.e. rows from 0 to n-1, cols from n to n
return str(self.affine[:-1, -1:])
else:
allowed_formats_print_map = ['full', 'f',
'rotation', 'r',
'translation', 't']
raise NotImplementedError('Format {} not recognized or implemented.\n'
'Try one of {}'.format(format_spec, allowed_formats_print_map))
def _apply_transform(self, image, interp='linear', image_grid2world=None,
sampling_grid_shape=None, sampling_grid2world=None,
resample_only=False, apply_inverse=False):
""" Transforms the input image applying this affine transform
This is a generic function to transform images using either this
(direct) transform or its inverse.
If applying the direct transform (`apply_inverse=False`):
by default, the transformed image is sampled at a grid defined by
`self.domain_shape` and `self.domain_grid2world`.
If applying the inverse transform (`apply_inverse=True`):
by default, the transformed image is sampled at a grid defined by
`self.codomain_shape` and `self.codomain_grid2world`.
If the sampling information was not provided at initialization of this
transform then `sampling_grid_shape` is mandatory.
Parameters
----------
image : array, shape (X, Y) or (X, Y, Z)
the image to be transformed
interp : string, either 'linear' or 'nearest'
the type of interpolation to be used, either 'linear'
(for k-linear interpolation) or 'nearest' for nearest neighbor
image_grid2world : array, shape (dim + 1, dim + 1), optional
the grid-to-world transform associated with `image`.
If None (the default), then the grid-to-world transform is assumed
to be the identity.
sampling_grid_shape : sequence, shape (dim,), optional
the shape of the grid where the transformed image must be sampled.
If None (the default), then `self.domain_shape` is used instead
(which must have been set at initialization, otherwise an exception
will be raised).
sampling_grid2world : array, shape (dim + 1, dim + 1), optional
the grid-to-world transform associated with the sampling grid
(specified by `sampling_grid_shape`, or by default
`self.domain_shape`). If None (the default), then the
grid-to-world transform is assumed to be the identity.
resample_only : Boolean, optional
If False (the default) the affine transform is applied normally.
If True, then the affine transform is not applied, and the input
image is just re-sampled on the domain grid of this transform.
apply_inverse : Boolean, optional
If False (the default) the image is transformed from the codomain
of this transform to its domain using the (direct) affine
transform. Otherwise, the image is transformed from the domain
of this transform to its codomain using the (inverse) affine
transform.
Returns
-------
transformed : array, shape `sampling_grid_shape` or `self.domain_shape`
the transformed image, sampled at the requested grid
"""
# Verify valid interpolation requested
if interp not in _interp_options:
raise ValueError('Unknown interpolation method: %s' % (interp,))
# Obtain sampling grid
if sampling_grid_shape is None:
if apply_inverse:
sampling_grid_shape = self.codomain_shape
else:
sampling_grid_shape = self.domain_shape
if sampling_grid_shape is None:
msg = 'Unknown sampling info. Provide a valid sampling_grid_shape'
raise ValueError(msg)
dim = len(sampling_grid_shape)
shape = np.array(sampling_grid_shape, dtype=np.int32)
# Verify valid image dimension
img_dim = len(image.shape)
if img_dim < 2 or img_dim > 3:
raise ValueError('Undefined transform for dim: %d' % (img_dim,))
# Obtain grid-to-world transform for sampling grid
if sampling_grid2world is None:
if apply_inverse:
sampling_grid2world = self.codomain_grid2world
else:
sampling_grid2world = self.domain_grid2world
if sampling_grid2world is None:
sampling_grid2world = np.eye(dim + 1)
# Obtain world-to-grid transform for input image
if image_grid2world is None:
if apply_inverse:
image_grid2world = self.domain_grid2world
else:
image_grid2world = self.codomain_grid2world
if image_grid2world is None:
image_grid2world = np.eye(dim + 1)
image_world2grid = npl.inv(image_grid2world)
# Compute the transform from sampling grid to input image grid
if apply_inverse:
aff = self.affine_inv
else:
aff = self.affine
if (aff is None) or resample_only:
comp = image_world2grid.dot(sampling_grid2world)
else:
comp = image_world2grid.dot(aff.dot(sampling_grid2world))
# Transform the input image
if interp == 'linear':
image = image.astype(np.float64)
transformed = _transform_method[(dim, interp)](image, shape, comp)
return transformed
def transform(self, image, interp='linear', image_grid2world=None,
sampling_grid_shape=None, sampling_grid2world=None,
resample_only=False):
""" Transforms the input image from co-domain to domain space
By default, the transformed image is sampled at a grid defined by
`self.domain_shape` and `self.domain_grid2world`. If such
information was not provided then `sampling_grid_shape` is mandatory.
Parameters
----------
image : array, shape (X, Y) or (X, Y, Z)
the image to be transformed
interp : string, either 'linear' or 'nearest'
the type of interpolation to be used, either 'linear'
(for k-linear interpolation) or 'nearest' for nearest neighbor
image_grid2world : array, shape (dim + 1, dim + 1), optional
the grid-to-world transform associated with `image`.
If None (the default), then the grid-to-world transform is assumed
to be the identity.
sampling_grid_shape : sequence, shape (dim,), optional
the shape of the grid where the transformed image must be sampled.
If None (the default), then `self.codomain_shape` is used instead
(which must have been set at initialization, otherwise an exception
will be raised).
sampling_grid2world : array, shape (dim + 1, dim + 1), optional
the grid-to-world transform associated with the sampling grid
(specified by `sampling_grid_shape`, or by default
`self.codomain_shape`). If None (the default), then the
grid-to-world transform is assumed to be the identity.
resample_only : Boolean, optional
If False (the default) the affine transform is applied normally.
If True, then the affine transform is not applied, and the input
image is just re-sampled on the domain grid of this transform.
Returns
-------
transformed : array, shape `sampling_grid_shape` or
`self.codomain_shape`
the transformed image, sampled at the requested grid
"""
transformed = self._apply_transform(image, interp, image_grid2world,
sampling_grid_shape,
sampling_grid2world,
resample_only,
apply_inverse=False)
return np.array(transformed)
def transform_inverse(self, image, interp='linear', image_grid2world=None,
sampling_grid_shape=None, sampling_grid2world=None,
resample_only=False):
""" Transforms the input image from domain to co-domain space
By default, the transformed image is sampled at a grid defined by
`self.codomain_shape` and `self.codomain_grid2world`. If such
information was not provided then `sampling_grid_shape` is mandatory.
Parameters
----------
image : array, shape (X, Y) or (X, Y, Z)
the image to be transformed
interp : string, either 'linear' or 'nearest'
the type of interpolation to be used, either 'linear'
(for k-linear interpolation) or 'nearest' for nearest neighbor
image_grid2world : array, shape (dim + 1, dim + 1), optional
the grid-to-world transform associated with `image`.
If None (the default), then the grid-to-world transform is assumed
to be the identity.
sampling_grid_shape : sequence, shape (dim,), optional
the shape of the grid where the transformed image must be sampled.
If None (the default), then `self.codomain_shape` is used instead
(which must have been set at initialization, otherwise an exception
will be raised).
sampling_grid2world : array, shape (dim + 1, dim + 1), optional
the grid-to-world transform associated with the sampling grid
(specified by `sampling_grid_shape`, or by default
`self.codomain_shape`). If None (the default), then the
grid-to-world transform is assumed to be the identity.
resample_only : Boolean, optional
If False (the default) the affine transform is applied normally.
If True, then the affine transform is not applied, and the input
image is just re-sampled on the domain grid of this transform.
Returns
-------
transformed : array, shape `sampling_grid_shape` or
`self.codomain_shape`
the transformed image, sampled at the requested grid
"""
transformed = self._apply_transform(image, interp, image_grid2world,
sampling_grid_shape,
sampling_grid2world,
resample_only,
apply_inverse=True)
return np.array(transformed)
class MutualInformationMetric(object):
def __init__(self, nbins=32, sampling_proportion=None):
r""" Initializes an instance of the Mutual Information metric
This class implements the methods required by Optimizer to drive the
registration process.
Parameters
----------
nbins : int, optional
the number of bins to be used for computing the intensity
histograms. The default is 32.
sampling_proportion : None or float in interval (0, 1], optional
There are two types of sampling: dense and sparse. Dense sampling
uses all voxels for estimating the (joint and marginal) intensity
histograms, while sparse sampling uses a subset of them. If
`sampling_proportion` is None, then dense sampling is
used. If `sampling_proportion` is a floating point value in (0,1]
then sparse sampling is used, where `sampling_proportion`
specifies the proportion of voxels to be used. The default is
None.
Notes
-----
Since we use linear interpolation, images are not, in general,
differentiable at exact voxel coordinates, but they are differentiable
between voxel coordinates. When using sparse sampling, selected voxels
are slightly moved by adding a small random displacement within one
voxel to prevent sampling points from being located exactly at voxel
coordinates. When using dense sampling, this random displacement is
not applied.
"""
self.histogram = ParzenJointHistogram(nbins)
self.sampling_proportion = sampling_proportion
self.metric_val = None
self.metric_grad = None
def setup(self, transform, static, moving, static_grid2world=None,
moving_grid2world=None, starting_affine=None):
r""" Prepares the metric to compute intensity densities and gradients
The histograms will be setup to compute probability densities of
intensities within the minimum and maximum values of `static` and
`moving`
Parameters
----------
transform: instance of Transform
the transformation with respect to whose parameters the gradient
must be computed
static : array, shape (S, R, C) or (R, C)
static image
moving : array, shape (S', R', C') or (R', C')
moving image. The dimensions of the static (S, R, C) and moving
(S', R', C') images do not need to be the same.
static_grid2world : array (dim+1, dim+1), optional
the grid-to-space transform of the static image. The default is
None, implying the transform is the identity.
moving_grid2world : array (dim+1, dim+1)
the grid-to-space transform of the moving image. The default is
None, implying the spacing along all axes is 1.
starting_affine : array, shape (dim+1, dim+1), optional
the pre-aligning matrix (an affine transform) that roughly aligns
the moving image towards the static image. If None, no
pre-alignment is performed. If a pre-alignment matrix is available,
it is recommended to provide this matrix as `starting_affine`
instead of manually transforming the moving image to reduce
interpolation artifacts. The default is None, implying no
pre-alignment is performed.
"""
n = transform.get_number_of_parameters()
self.metric_grad = np.zeros(n, dtype=np.float64)
self.dim = len(static.shape)
if moving_grid2world is None:
moving_grid2world = np.eye(self.dim + 1)
if static_grid2world is None:
static_grid2world = np.eye(self.dim + 1)
self.transform = transform
self.static = np.array(static).astype(np.float64)
self.moving = np.array(moving).astype(np.float64)
self.static_grid2world = static_grid2world
self.static_world2grid = npl.inv(static_grid2world)
self.moving_grid2world = moving_grid2world
self.moving_world2grid = npl.inv(moving_grid2world)
self.static_direction, self.static_spacing = \
get_direction_and_spacings(static_grid2world, self.dim)
self.moving_direction, self.moving_spacing = \
get_direction_and_spacings(moving_grid2world, self.dim)
self.starting_affine = starting_affine
P = np.eye(self.dim + 1)
if self.starting_affine is not None:
P = self.starting_affine
self.affine_map = AffineMap(P, static.shape, static_grid2world,
moving.shape, moving_grid2world)
if self.dim == 2:
self.interp_method = vf.interpolate_scalar_2d
else:
self.interp_method = vf.interpolate_scalar_3d
if self.sampling_proportion is None:
self.samples = None
self.ns = 0
else:
k = int(np.ceil(1.0 / self.sampling_proportion))
shape = np.array(static.shape, dtype=np.int32)
self.samples = sample_domain_regular(k, shape, static_grid2world)
self.samples = np.array(self.samples)
self.ns = self.samples.shape[0]
# Add a column of ones (homogeneous coordinates)
self.samples = np.hstack((self.samples, np.ones(self.ns)[:, None]))
if self.starting_affine is None:
self.samples_prealigned = self.samples
else:
self.samples_prealigned = \
self.starting_affine.dot(self.samples.T).T
# Sample the static image
static_p = self.static_world2grid.dot(self.samples.T).T
static_p = static_p[..., :self.dim]
self.static_vals, inside = self.interp_method(static, static_p)
self.static_vals = np.array(self.static_vals, dtype=np.float64)
self.histogram.setup(self.static, self.moving)
def _update_histogram(self):
r""" Updates the histogram according to the current affine transform
The current affine transform is given by `self.affine_map`, which
must be set before calling this method.
Returns
-------
static_values: array, shape(n,) if sparse sampling is being used,
array, shape(S, R, C) or (R, C) if dense sampling
the intensity values corresponding to the static image used to
update the histogram. If sparse sampling is being used, then
it is simply a sequence of scalars, obtained by sampling the static
image at the `n` sampling points. If dense sampling is being used,
then the intensities are given directly by the static image,
whose shape is (S, R, C) in the 3D case or (R, C) in the 2D case.
moving_values: array, shape(n,) if sparse sampling is being used,
array, shape(S, R, C) or (R, C) if dense sampling
the intensity values corresponding to the moving image used to
update the histogram. If sparse sampling is being used, then
it is simply a sequence of scalars, obtained by sampling the moving
image at the `n` sampling points (mapped to the moving space by the
current affine transform). If dense sampling is being used,
then the intensities are given by the moving imaged linearly
transformed towards the static image by the current affine, which
results in an image of the same shape as the static image.
"""
static_values = None
moving_values = None
if self.sampling_proportion is None: # Dense case
static_values = self.static
moving_values = self.affine_map.transform(self.moving)
self.histogram.update_pdfs_dense(static_values, moving_values)
else: # Sparse case
sp_to_moving = self.moving_world2grid.dot(self.affine_map.affine)
pts = sp_to_moving.dot(self.samples.T).T # Points on moving grid
pts = pts[..., :self.dim]
self.moving_vals, inside = self.interp_method(self.moving, pts)
self.moving_vals = np.array(self.moving_vals)
static_values = self.static_vals
moving_values = self.moving_vals
self.histogram.update_pdfs_sparse(static_values, moving_values)
return static_values, moving_values
def _update_mutual_information(self, params, update_gradient=True):
r""" Updates marginal and joint distributions and the joint gradient
The distributions are updated according to the static and transformed
images. The transformed image is precisely the moving image after
transforming it by the transform defined by the `params` parameters.
The gradient of the joint PDF is computed only if update_gradient
is True.
Parameters
----------
params : array, shape (n,)
the parameter vector of the transform currently used by the metric
(the transform name is provided when self.setup is called), n is
the number of parameters of the transform
update_gradient : Boolean, optional
if True, the gradient of the joint PDF will also be computed,
otherwise, only the marginal and joint PDFs will be computed.
The default is True.
"""
# Get the matrix associated with the `params` parameter vector
current_affine = self.transform.param_to_matrix(params)
# Get the static-to-prealigned matrix (only needed for the MI gradient)
static2prealigned = self.static_grid2world
if self.starting_affine is not None:
current_affine = current_affine.dot(self.starting_affine)
static2prealigned = self.starting_affine.dot(static2prealigned)
self.affine_map.set_affine(current_affine)
# Update the histogram with the current joint intensities
static_values, moving_values = self._update_histogram()
H = self.histogram # Shortcut to `self.histogram`
grad = None # Buffer to write the MI gradient into (if needed)
if update_gradient:
grad = self.metric_grad
# Compute the gradient of the joint PDF w.r.t. parameters
if self.sampling_proportion is None: # Dense case
# Compute the gradient of moving img. at physical points
# associated with the >>static image's grid<< cells
# The image gradient must be eval. at current moved points
grid_to_world = current_affine.dot(self.static_grid2world)
mgrad, inside = vf.gradient(self.moving,
self.moving_world2grid,
self.moving_spacing,
self.static.shape,
grid_to_world)
# The Jacobian must be evaluated at the pre-aligned points
H.update_gradient_dense(
params,
self.transform,
static_values,
moving_values,
static2prealigned,
mgrad)
else: # Sparse case
# Compute the gradient of moving at the sampling points
# which are already given in physical space coordinates
pts = current_affine.dot(self.samples.T).T # Moved points
mgrad, inside = vf.sparse_gradient(self.moving,
self.moving_world2grid,
self.moving_spacing,
pts)
# The Jacobian must be evaluated at the pre-aligned points
pts = self.samples_prealigned[..., :self.dim]
H.update_gradient_sparse(params, self.transform, static_values,
moving_values, pts, mgrad)
# Call the cythonized MI computation with self.histogram fields
self.metric_val = compute_parzen_mi(H.joint, H.joint_grad,
H.smarginal, H.mmarginal,
grad)
def distance(self, params):
r""" Numeric value of the negative Mutual Information
We need to change the sign so we can use standard minimization
algorithms.
Parameters
----------
params : array, shape (n,)
the parameter vector of the transform currently used by the metric
(the transform name is provided when self.setup is called), n is
the number of parameters of the transform
Returns
-------
neg_mi : float
the negative mutual information of the input images after
transforming the moving image by the currently set transform
with `params` parameters
"""
try:
self._update_mutual_information(params, False)
except (AffineInversionError, AffineInvalidValuesError):
return np.inf
return -1 * self.metric_val
def gradient(self, params):
r""" Numeric value of the metric's gradient at the given parameters
Parameters
----------
params : array, shape (n,)
the parameter vector of the transform currently used by the metric
(the transform name is provided when self.setup is called), n is
the number of parameters of the transform
Returns
-------
grad : array, shape (n,)
the gradient of the negative Mutual Information
"""
try:
self._update_mutual_information(params, True)
except (AffineInversionError, AffineInvalidValuesError):
return 0 * self.metric_grad
return -1 * self.metric_grad
def distance_and_gradient(self, params):
r""" Numeric value of the metric and its gradient at given parameters
Parameters
----------
params : array, shape (n,)
the parameter vector of the transform currently used by the metric
(the transform name is provided when self.setup is called), n is
the number of parameters of the transform
Returns
-------
neg_mi : float
the negative mutual information of the input images after
transforming the moving image by the currently set transform
with `params` parameters
neg_mi_grad : array, shape (n,)
the gradient of the negative Mutual Information
"""
try:
self._update_mutual_information(params, True)
except (AffineInversionError, AffineInvalidValuesError):
return np.inf, 0 * self.metric_grad
return -1 * self.metric_val, -1 * self.metric_grad
class AffineRegistration(object):
def __init__(self,
metric=None,
level_iters=None,
sigmas=None,
factors=None,
method='L-BFGS-B',
ss_sigma_factor=None,
options=None,
verbosity=VerbosityLevels.STATUS):
""" Initializes an instance of the AffineRegistration class
Parameters
----------
metric : None or object, optional
an instance of a metric. The default is None, implying
the Mutual Information metric with default settings.
level_iters : sequence, optional
the number of iterations at each scale of the scale space.
`level_iters[0]` corresponds to the coarsest scale,
`level_iters[-1]` the finest, where n is the length of the
sequence. By default, a 3-level scale space with iterations
sequence equal to [10000, 1000, 100] will be used.
sigmas : sequence of floats, optional
custom smoothing parameter to build the scale space (one parameter
for each scale). By default, the sequence of sigmas will be
[3, 1, 0].
factors : sequence of floats, optional
custom scale factors to build the scale space (one factor for each
scale). By default, the sequence of factors will be [4, 2, 1].
method : string, optional
optimization method to be used. If Scipy version < 0.12, then
only L-BFGS-B is available. Otherwise, `method` can be any
gradient-based method available in `dipy.core.Optimize`: CG, BFGS,
Newton-CG, dogleg or trust-ncg.
The default is 'L-BFGS-B'.
ss_sigma_factor : float, optional
If None, this parameter is not used and an isotropic scale
space with the given `factors` and `sigmas` will be built.
If not None, an anisotropic scale space will be used by
automatically selecting the smoothing sigmas along each axis
according to the voxel dimensions of the given image.
The `ss_sigma_factor` is used to scale the automatically computed
sigmas. For example, in the isotropic case, the sigma of the
kernel will be $factor * (2 ^ i)$ where
$i = 1, 2, ..., n_scales - 1$ is the scale (the finest resolution
image $i=0$ is never smoothed). The default is None.
options : dict, optional
extra optimization options. The default is None, implying
no extra options are passed to the optimizer.
"""
self.metric = metric
if self.metric is None:
self.metric = MutualInformationMetric()
if level_iters is None:
level_iters = [10000, 1000, 100]
self.level_iters = level_iters
self.levels = len(level_iters)
if self.levels == 0:
raise ValueError('The iterations sequence cannot be empty')
self.options = options
self.method = method
if ss_sigma_factor is not None:
self.use_isotropic = False
self.ss_sigma_factor = ss_sigma_factor
else:
self.use_isotropic = True
if factors is None:
factors = [4, 2, 1]
if sigmas is None:
sigmas = [3, 1, 0]
self.factors = factors
self.sigmas = sigmas
self.verbosity = verbosity
# Separately add a string that tells about the verbosity kwarg. This needs
# to be separate, because it is set as a module-wide option in __init__:
docstring_addendum = \
"""verbosity: int (one of {0, 1, 2, 3}), optional
Set the verbosity level of the algorithm:
0 : do not print anything
1 : print information about the current status of the algorithm
2 : print high level information of the components involved in
the registration that can be used to detect a failing
component.
3 : print as much information as possible to isolate the cause
of a bug.
Default: % s
""" % VerbosityLevels.STATUS
__init__.__doc__ = __init__.__doc__ + docstring_addendum
def _init_optimizer(self, static, moving, transform, params0,
static_grid2world, moving_grid2world,
starting_affine):
r"""Initializes the registration optimizer
Initializes the optimizer by computing the scale space of the input
images
Parameters
----------
static : array, shape (S, R, C) or (R, C)
the image to be used as reference during optimization.
moving : array, shape (S', R', C') or (R', C')
the image to be used as "moving" during optimization. The
dimensions of the static (S, R, C) and moving (S', R', C') images
do not need to be the same.
transform : instance of Transform
the transformation with respect to whose parameters the gradient
must be computed
params0 : array, shape (n,)
parameters from which to start the optimization. If None, the
optimization will start at the identity transform. n is the
number of parameters of the specified transformation.
static_grid2world : array, shape (dim+1, dim+1)
the voxel-to-space transformation associated with the static image
moving_grid2world : array, shape (dim+1, dim+1)
the voxel-to-space transformation associated with the moving image
starting_affine : string, or matrix, or None
If string:
'mass': align centers of gravity
'voxel-origin': align physical coordinates of voxel (0,0,0)
'centers': align physical coordinates of central voxels
If matrix:
array, shape (dim+1, dim+1)
If None:
Start from identity
"""
self.dim = len(static.shape)
self.transform = transform
n = transform.get_number_of_parameters()
self.nparams = n
if params0 is None:
params0 = self.transform.get_identity_parameters()
self.params0 = params0
if starting_affine is None:
self.starting_affine = np.eye(self.dim + 1)
elif isinstance(starting_affine, str):
if starting_affine == 'mass':
affine_map = transform_centers_of_mass(static,
static_grid2world,
moving,
moving_grid2world)
self.starting_affine = affine_map.affine
elif starting_affine == 'voxel-origin':
affine_map = transform_origins(static, static_grid2world,
moving, moving_grid2world)
self.starting_affine = affine_map.affine
elif starting_affine == 'centers':
affine_map = transform_geometric_centers(static,
static_grid2world,
moving,
moving_grid2world)
self.starting_affine = affine_map.affine
else:
raise ValueError('Invalid starting_affine strategy')
elif (isinstance(starting_affine, np.ndarray) and
starting_affine.shape >= (self.dim, self.dim + 1)):
self.starting_affine = starting_affine
else:
raise ValueError('Invalid starting_affine matrix')
# Extract information from affine matrices to create the scale space
static_direction, static_spacing = \
get_direction_and_spacings(static_grid2world, self.dim)
moving_direction, moving_spacing = \
get_direction_and_spacings(moving_grid2world, self.dim)
static = ((static.astype(np.float64) - static.min()) /
(static.max() - static.min()))
moving = ((moving.astype(np.float64) - moving.min()) /
(moving.max() - moving.min()))
# Build the scale space of the input images
if self.use_isotropic:
self.moving_ss = IsotropicScaleSpace(moving, self.factors,
self.sigmas,
moving_grid2world,
moving_spacing, False)
self.static_ss = IsotropicScaleSpace(static, self.factors,
self.sigmas,
static_grid2world,
static_spacing, False)
else:
self.moving_ss = ScaleSpace(moving, self.levels, moving_grid2world,
moving_spacing, self.ss_sigma_factor,
False)
self.static_ss = ScaleSpace(static, self.levels, static_grid2world,
static_spacing, self.ss_sigma_factor,
False)
def optimize(self, static, moving, transform, params0,
static_grid2world=None, moving_grid2world=None,
starting_affine=None):
r''' Starts the optimization process
Parameters
----------
static : array, shape (S, R, C) or (R, C)
the image to be used as reference during optimization.
moving : array, shape (S', R', C') or (R', C')
the image to be used as "moving" during optimization. It is
necessary to pre-align the moving image to ensure its domain
lies inside the domain of the deformation fields. This is assumed
to be accomplished by "pre-aligning" the moving image towards the
static using an affine transformation given by the
'starting_affine' matrix
transform : instance of Transform
the transformation with respect to whose parameters the gradient
must be computed
params0 : array, shape (n,)
parameters from which to start the optimization. If None, the
optimization will start at the identity transform. n is the
number of parameters of the specified transformation.
static_grid2world : array, shape (dim+1, dim+1), optional
the voxel-to-space transformation associated with the static
image. The default is None, implying the transform is the
identity.
moving_grid2world : array, shape (dim+1, dim+1), optional
the voxel-to-space transformation associated with the moving
image. The default is None, implying the transform is the
identity.
starting_affine : string, or matrix, or None, optional
If string:
'mass': align centers of gravity
'voxel-origin': align physical coordinates of voxel (0,0,0)
'centers': align physical coordinates of central voxels
If matrix:
array, shape (dim+1, dim+1).
If None:
Start from identity.
The default is None.
Returns
-------
affine_map : instance of AffineMap
the affine resulting affine transformation
'''
self._init_optimizer(static, moving, transform, params0,
static_grid2world, moving_grid2world,
starting_affine)
del starting_affine # Now we must refer to self.starting_affine
# Multi-resolution iterations
original_static_shape = self.static_ss.get_image(0).shape
original_static_grid2world = self.static_ss.get_affine(0)
original_moving_shape = self.moving_ss.get_image(0).shape
original_moving_grid2world = self.moving_ss.get_affine(0)
affine_map = AffineMap(None,
original_static_shape,
original_static_grid2world,
original_moving_shape,
original_moving_grid2world)
for level in range(self.levels - 1, -1, -1):
self.current_level = level
max_iter = self.level_iters[-1 - level]
if self.verbosity >= VerbosityLevels.STATUS:
print('Optimizing level %d [max iter: %d]' % (level, max_iter))
# Resample the smooth static image to the shape of this level
smooth_static = self.static_ss.get_image(level)
current_static_shape = self.static_ss.get_domain_shape(level)
current_static_grid2world = self.static_ss.get_affine(level)
current_affine_map = AffineMap(None,
current_static_shape,
current_static_grid2world,
original_static_shape,
original_static_grid2world)
current_static = current_affine_map.transform(smooth_static)
# The moving image is full resolution
current_moving_grid2world = original_moving_grid2world
current_moving = self.moving_ss.get_image(level)
# Prepare the metric for iterations at this resolution
self.metric.setup(transform, current_static, current_moving,
current_static_grid2world,
current_moving_grid2world, self.starting_affine)
# Optimize this level
if self.options is None:
self.options = {'gtol': 1e-4,
'disp': False}
if self.method == 'L-BFGS-B':
self.options['maxfun'] = max_iter
else:
self.options['maxiter'] = max_iter
if SCIPY_LESS_0_12:
# Older versions don't expect value and gradient from
# the same function
opt = Optimizer(self.metric.distance, self.params0,
method=self.method, jac=self.metric.gradient,
options=self.options)
else:
opt = Optimizer(self.metric.distance_and_gradient,
self.params0,
method=self.method, jac=True,
options=self.options)
params = opt.xopt
# Update starting_affine matrix with optimal parameters
T = self.transform.param_to_matrix(params)
self.starting_affine = T.dot(self.starting_affine)
# Start next iteration at identity
self.params0 = self.transform.get_identity_parameters()
affine_map.set_affine(self.starting_affine)
return affine_map
def align_centers_of_mass(static, static_grid2world,
moving, moving_grid2world):
msg = "This function is deprecated please use"
msg += " dipy.align.imaffine.transform_centers_of_mass instead."
warn(msg)
return transform_centers_of_mass(static, static_grid2world,
moving, moving_grid2world)
def align_geometric_centers(static, static_grid2world,
moving, moving_grid2world):
msg = "This function is deprecated please use"
msg += " dipy.align.imaffine.transform_geometric_centers instead."
warn(msg)
return transform_geometric_centers(static, static_grid2world,
moving, moving_grid2world)
def align_origins(static, static_grid2world,
moving, moving_grid2world):
msg = "This function is deprecated please use"
msg += " dipy.align.imaffine.transform_origins instead."
warn(msg)
return transform_origins(static, static_grid2world,
moving, moving_grid2world)
def transform_centers_of_mass(static, static_grid2world,
moving, moving_grid2world):
r""" Transformation to align the center of mass of the input images
Parameters
----------
static : array, shape (S, R, C)
static image
static_grid2world : array, shape (dim+1, dim+1)
the voxel-to-space transformation of the static image
moving : array, shape (S, R, C)
moving image
moving_grid2world : array, shape (dim+1, dim+1)
the voxel-to-space transformation of the moving image
Returns
-------
affine_map : instance of AffineMap
the affine transformation (translation only, in this case) aligning
the center of mass of the moving image towards the one of the static
image
"""
dim = len(static.shape)
if static_grid2world is None:
static_grid2world = np.eye(dim + 1)
if moving_grid2world is None:
moving_grid2world = np.eye(dim + 1)
c_static = ndimage.measurements.center_of_mass(np.array(static))
c_static = static_grid2world.dot(c_static + (1,))
c_moving = ndimage.measurements.center_of_mass(np.array(moving))
c_moving = moving_grid2world.dot(c_moving + (1,))
transform = np.eye(dim + 1)
transform[:dim, dim] = (c_moving - c_static)[:dim]
affine_map = AffineMap(transform,
static.shape, static_grid2world,
moving.shape, moving_grid2world)
return affine_map
def transform_geometric_centers(static, static_grid2world,
moving, moving_grid2world):
r""" Transformation to align the geometric center of the input images
With "geometric center" of a volume we mean the physical coordinates of
its central voxel
Parameters
----------
static : array, shape (S, R, C)
static image
static_grid2world : array, shape (dim+1, dim+1)
the voxel-to-space transformation of the static image
moving : array, shape (S, R, C)
moving image
moving_grid2world : array, shape (dim+1, dim+1)
the voxel-to-space transformation of the moving image
Returns
-------
affine_map : instance of AffineMap
the affine transformation (translation only, in this case) aligning
the geometric center of the moving image towards the one of the static
image
"""
dim = len(static.shape)
if static_grid2world is None:
static_grid2world = np.eye(dim + 1)
if moving_grid2world is None:
moving_grid2world = np.eye(dim + 1)
c_static = tuple((np.array(static.shape, dtype=np.float64)) * 0.5)
c_static = static_grid2world.dot(c_static + (1,))
c_moving = tuple((np.array(moving.shape, dtype=np.float64)) * 0.5)
c_moving = moving_grid2world.dot(c_moving + (1,))
transform = np.eye(dim + 1)
transform[:dim, dim] = (c_moving - c_static)[:dim]
affine_map = AffineMap(transform,
static.shape, static_grid2world,
moving.shape, moving_grid2world)
return affine_map
def transform_origins(static, static_grid2world,
moving, moving_grid2world):
r""" Transformation to align the origins of the input images
With "origin" of a volume we mean the physical coordinates of
voxel (0,0,0)
Parameters
----------
static : array, shape (S, R, C)
static image
static_grid2world : array, shape (dim+1, dim+1)
the voxel-to-space transformation of the static image
moving : array, shape (S, R, C)
moving image
moving_grid2world : array, shape (dim+1, dim+1)
the voxel-to-space transformation of the moving image
Returns
-------
affine_map : instance of AffineMap
the affine transformation (translation only, in this case) aligning
the origin of the moving image towards the one of the static
image
"""
dim = len(static.shape)
if static_grid2world is None:
static_grid2world = np.eye(dim + 1)
if moving_grid2world is None:
moving_grid2world = np.eye(dim + 1)
c_static = static_grid2world[:dim, dim]
c_moving = moving_grid2world[:dim, dim]
transform = np.eye(dim + 1)
transform[:dim, dim] = (c_moving - c_static)[:dim]
affine_map = AffineMap(transform,
static.shape, static_grid2world,
moving.shape, moving_grid2world)
return affine_map
| {
"repo_name": "nilgoyyou/dipy",
"path": "dipy/align/imaffine.py",
"copies": "2",
"size": "57192",
"license": "bsd-3-clause",
"hash": -2217354619437348600,
"line_mean": 45.6112469438,
"line_max": 105,
"alpha_frac": 0.5965169954,
"autogenerated": false,
"ratio": 4.5136137637124145,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0002056667124843746,
"num_lines": 1227
} |
"""Affine measure"""
import numpy as np
from py_stringmatching import utils
from six.moves import xrange
from py_stringmatching.similarity_measure.sequence_similarity_measure import \
SequenceSimilarityMeasure
def sim_ident(char1, char2):
return int(char1 == char2)
class Affine(SequenceSimilarityMeasure):
"""Computes the affine gap score between two strings.
The affine gap measure is an extension of the Needleman-Wunsch measure that handles the longer gaps more gracefully. For more information refer to the string matching chapter
in the DI book ("Principles of Data Integration").
Parameters:
gap_start (float): Cost for the gap at the start (defaults to 1)
gap_continuation (float): Cost for the gap continuation (defaults to 0.5)
sim_func (function): Function computing similarity score between two chars,
represented as strings (defaults to identity).
"""
def __init__(self, gap_start=1, gap_continuation=0.5, sim_func=sim_ident):
self.gap_start = gap_start
self.gap_continuation = gap_continuation
self.sim_func = sim_func
super(Affine, self).__init__()
def get_raw_score(self, string1, string2):
"""
Args:
string1,string2 (str) : Input strings
Returns:
Affine gap score (float)
Raises:
TypeError : If the inputs are not strings or if one of the inputs is None.
Examples:
>>> aff = Affine()
>>> aff.get_raw_score('dva', 'deeva')
1.5
>>> aff = Affine(gap_start=2, gap_continuation=0.5)
>>> aff.get_raw_score('dva', 'deeve')
-0.5
>>> aff = Affine(gap_continuation=0.2, sim_func=lambda s1, s2: (int(1 if s1 == s2 else 0)))
>>> aff.get_raw_score('AAAGAATTCA', 'AAATCA')
4.4
"""
# input validations
utils.sim_check_for_none(string1, string2)
utils.tok_check_for_string_input(string1, string2)
# if one of the strings is empty return 0
if utils.sim_check_for_empty(string1, string2):
return 0
gap_start = -self.gap_start
gap_continuation = -self.gap_continuation
m = np.zeros((len(string1) + 1, len(string2) + 1), dtype=np.float)
x = np.zeros((len(string1) + 1, len(string2) + 1), dtype=np.float)
y = np.zeros((len(string1) + 1, len(string2) + 1), dtype=np.float)
# DP initialization
for i in xrange(1, len(string1) + 1):
m[i][0] = -float("inf")
x[i][0] = gap_start + (i - 1) * gap_continuation
y[i][0] = -float("inf")
# DP initialization
for j in xrange(1, len(string2) + 1):
m[0][j] = -float("inf")
x[0][j] = -float("inf")
y[0][j] = gap_start + (j - 1) * gap_continuation
# affine gap calculation using DP
for i in xrange(1, len(string1) + 1):
for j in xrange(1, len(string2) + 1):
# best score between x_1....x_i and y_1....y_j
# given that x_i is aligned to y_j
m[i][j] = (self.sim_func(string1[i - 1], string2[j - 1]) +
max(m[i - 1][j - 1], x[i - 1][j - 1],
y[i - 1][j - 1]))
# the best score given that x_i is aligned to a gap
x[i][j] = max(gap_start + m[i - 1][j],
gap_continuation + x[i - 1][j])
# the best score given that y_j is aligned to a gap
y[i][j] = max(gap_start + m[i][j - 1],
gap_continuation + y[i][j - 1])
return max(m[len(string1)][len(string2)], x[len(string1)][len(string2)],
y[len(string1)][len(string2)])
def get_gap_start(self):
"""
Get gap start cost
Returns:
gap start cost (float)
"""
return self.gap_start
def get_gap_continuation(self):
"""
Get gap continuation cost
Returns:
gap continuation cost (float)
"""
return self.gap_continuation
def get_sim_func(self):
"""
Get similarity function
Returns:
similarity function (function)
"""
return self.sim_func
def set_gap_start(self, gap_start):
"""
Set gap start cost
Args:
gap_start (float): Cost for the gap at the start
"""
self.gap_start = gap_start
return True
def set_gap_continuation(self, gap_continuation):
"""
Set gap continuation cost
Args:
gap_continuation (float): Cost for the gap continuation
"""
self.gap_continuation = gap_continuation
return True
def set_sim_func(self, sim_func):
"""
Set similarity function
Args:
sim_func (function): Function computing similarity score between two chars, represented as strings.
"""
self.sim_func = sim_func
return True
| {
"repo_name": "Anson-Doan/py_stringmatching",
"path": "py_stringmatching/similarity_measure/affine.py",
"copies": "1",
"size": "5192",
"license": "bsd-3-clause",
"hash": -6131511115504364000,
"line_mean": 31.6540880503,
"line_max": 187,
"alpha_frac": 0.5381355932,
"autogenerated": false,
"ratio": 3.845925925925926,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4884061519125926,
"avg_score": null,
"num_lines": null
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.