text stringlengths 0 1.05M | meta dict |
|---|---|
"""A library that provides a Python interface to the Futbol24 API"""
import json
# static type-checking
from typing import Dict
from urllib.parse import urlparse, urlunparse, urlencode
import requests
import re
from lxml import html
from futbol24.models import Country, Competition, League, Team, Match, Matches
from futbol24.error import Futbol24Error
class Api(object):
"""A python interface into the Futbol24 API"""
_API_REALM = 'Futbol24 API'
def __init__(self,
input_encoding: str = 'utf-8',
request_headers: Dict[str, str] = None,
base_url: str = None,
user_agent: str = None,
add_compat_f24_headers=False,
debug_http=False,
timeout: int = None,
language: str = None):
self._input_encoding: str = input_encoding
self._debug_http: bool = debug_http
self._timeout: int = timeout
self._language: str = language
self._cookies: Dict[str, str] = {}
self._initialize_default_parameters()
self._initialize_request_headers(request_headers)
self._initialize_user_agent(user_agent)
self._initialize_f24_headers(add_compat_f24_headers)
if base_url is None:
self.base_url = 'http://api.futbol24.gluak.com'
else:
self.base_url = base_url
if debug_http:
import logging
import http.client
http.client.HTTPConnection.debuglevel = 1
logging.basicConfig() # you need to initialize logging, otherwise you will not see anything from requests
logging.getLogger().setLevel(logging.DEBUG)
requests_log = logging.getLogger("requests.packages.urllib3")
requests_log.setLevel(logging.DEBUG)
requests_log.propagate = True
def _initialize_default_parameters(self):
self._default_params = {}
def _initialize_request_headers(self, request_headers: Dict[str, str]):
if request_headers:
self._request_headers = request_headers
else:
self._request_headers = {}
self._request_headers['Accept'] = 'application/json'
def _initialize_user_agent(self, user_agent: str = None):
if user_agent is None:
user_agent = 'Futbol24 2.30/61 (compatible)'
self.set_user_agent(user_agent)
def _initialize_f24_headers(self, add_compatibility_headers=False):
"""Set the F24 HTTP headers that will be sent to the server.
They are required to allow authorization on server side.
"""
if not self._language:
self._language = 'en'
self._cookies['f24-asi'] = 'f24-asi=8926cbf3b3a61be64614147d136d389f3b05a64391e2046ae2491ad' \
'152ebd2be2409e754b9aea32dc43e954a008a2fe16d4dee014a9782cc0b542edc6bc55cdd'
if add_compatibility_headers:
self._request_headers['F24-App-Version'] = '2.30'
self._request_headers['F24-Device-Platform'] = 'android'
self._request_headers['F24-App-Id'] = '1'
self._request_headers['F24-Device-Language'] = 'slk'
self._request_headers['F24-Session-Auth'] = self._cookies['f24-asi']
def set_user_agent(self, user_agent: str):
"""Override the default user agent.
Args:
user_agent:
A string that should be send to the server as the user-agent.
"""
self._request_headers['User-Agent'] = user_agent
def get_countries(self) -> [Country]:
# Build request parameters
parameters = {}
url = '%s/v2/countries' % self.base_url
resp = self._request_url(url, 'GET', data=parameters)
data = self._parse_and_check_http_response(resp, self._input_encoding)
# Date and time (in unix epoch format) for which data were sent
# time = data.get('time', 0)
countries = list(map(Country.new_from_json_dict, data.get('result', {}).get('countries', {}).get('list', [])))
return countries
def get_competitions(self) -> [Competition]:
# Build request parameters
parameters = {}
url = '%s/v2/competitions' % self.base_url
resp = self._request_url(url, 'GET', data=parameters)
data = self._parse_and_check_http_response(resp, self._input_encoding)
# Date and time (in unix epoch format) for which data were sent
# time = data.get('time', 0)
countries = list(map(Country.new_from_json_dict, data.get('result', {}).get('countries', {}).get('list', [])))
competitions = list(map(lambda competition: self._map_competitions(competition, countries),
data.get('result', {}).get('competitions', {}).get('list', [])))
return competitions
def get_teams(self) -> [Team]:
# Build request parameters
parameters = {}
url = '%s/v2/teams' % self.base_url
resp = self._request_url(url, 'GET', data=parameters)
data = self._parse_and_check_http_response(resp, self._input_encoding)
# Date and time (in unix epoch format) for which data were sent
# time = data.get('time', 0)
countries = list(map(Country.new_from_json_dict, data.get('result', {}).get('countries', {}).get('list', [])))
teams = list(map(lambda team: self._map_teams(team, countries),
data.get('result', {}).get('teams', {}).get('list', [])))
return teams
# noinspection PyUnresolvedReferences
def get_team_matches(self, team: Team) -> Matches:
# Build request parameters
parameters = {}
url = '{base_url}/v2/team/{team_id}/matches'.format(base_url=self.base_url, team_id=team.id)
resp = self._request_url(url, 'GET', data=parameters)
data = self._parse_and_check_http_response(resp, self._input_encoding)
# Date and time (in unix epoch format) for which data were sent
# time = data.get('time', 0)
countries = list(map(Country.new_from_json_dict, data.get('result', {}).get('countries', {}).get('list', [])))
competitions = list(map(lambda competition: self._map_competitions(competition, countries),
data.get('result', {}).get('competitions', {}).get('list', [])))
leagues = list(map(lambda league: self._map_leagues(league, competitions),
data.get('result', {}).get('leagues', {}).get('list', [])))
teams = list(map(lambda team: self._map_teams(team, countries),
data.get('result', {}).get('teams', {}).get('list', [])))
matches = list(map(lambda match: self._map_matches(match, leagues, teams),
data.get('result', {}).get('matches', {}).get('list', [])))
return Matches(matches)
# noinspection PyUnresolvedReferences
def get_team_info(self, team: Team) -> str:
# Build request parameters
parameters = {}
# No API available, we need to scrape the data from the web
url = '{base_url}/team/{country}/{team_name}/'.format(base_url='https://www.futbol24.com',
country=self._replace_characters(team.country.name),
team_name=self._replace_characters(team.name))
resp = self._request_url(url, 'GET', data=parameters)
data = self._parse_team_info_http_response(resp, self._input_encoding)
return data
def get_daily_matches(self) -> Matches:
# Build request parameters
parameters = {}
url = '%s/v2/matches/day' % self.base_url
resp = self._request_url(url, 'GET', data=parameters)
data = self._parse_and_check_http_response(resp, self._input_encoding)
# Date and time (in unix epoch format) for which data were sent
# time = data.get('time', 0)
# Status of last db updates
# status = Status.new_from_json_dict(data.get('result', {}).get('status', {}))
# Date range for matches
# range = Range.new_from_json_dict(data.get('result', {}).get('range', {}))
countries = list(map(Country.new_from_json_dict, data.get('result', {}).get('countries', {}).get('list', [])))
competitions = list(map(lambda competition: self._map_competitions(competition, countries),
data.get('result', {}).get('competitions', {}).get('list', [])))
leagues = list(map(lambda league: self._map_leagues(league, competitions),
data.get('result', {}).get('leagues', {}).get('list', [])))
teams = list(map(lambda team: self._map_teams(team, countries),
data.get('result', {}).get('teams', {}).get('list', [])))
matches = list(map(lambda match: self._map_matches(match, leagues, teams),
data.get('result', {}).get('matches', {}).get('list', [])))
return Matches(matches)
# def get_team_details(self, team_id: int) -> str:
# # Build request parameters
# parameters = {}
#
# url = '{0}/v2/team/{1}'.format(self.base_url, team_id)
# resp = self._request_url(url, 'GET', data=parameters)
# data = self._parse_and_check_http_response(resp)
#
# # Parse countries
# data.get('countries', {})
#
# # data = self._parse_and_check_http_response(resp)
# return resp.content.decode('utf-8')
# def get_countries(self, get_only_countries_with_stats_tables=False) -> [Country]:
# # Build request parameters
# parameters = {}
#
# if get_only_countries_with_stats_tables:
# parameters['filter'] = 'tables'
#
# url = '%s/countries' % self.base_url
#
# resp = self._request_url(url, 'GET', data=parameters)
# data = self._parse_and_check_http_response(resp)
#
# return [Country.new_from_json_dict(x) for x in data.get('countries', {}).get('list', '')]
#
# # noinspection PyUnresolvedReferences
# def get_teams(self, country: Country):
# try:
# if int(country.country_id) < 0:
# raise Futbol24Error({'message': "'country_id' must be a positive number"})
# except ValueError or TypeError:
# raise Futbol24Error({'message': "'country_id' must be an integer"})
#
# # Build request parameters
# parameters = {}
#
# url = '{0}/country/{1}/teams'.format(self.base_url, country.country_id)
#
# resp = self._request_url(url, 'GET', data=parameters)
# data = self._parse_and_check_http_response(resp)
#
# teams = {'countries': [Country.new_from_json_dict(x) for x in data.get('countries', {}).get('list', '')],
# 'teams': [Team.new_from_json_dict(x) for x in data.get('teams', {}).get('list', '')]}
#
# return Teams.new_from_json_dict(teams)
#
# # noinspection PyUnresolvedReferences
# def get_leagues(self, country: Country, get_only_leagues_with_stats_tables=False):
# try:
# if int(country.country_id) < 0:
# raise Futbol24Error({'message': "'country_id' must be a positive number"})
# except ValueError or TypeError:
# raise Futbol24Error({'message': "'country_id' must be an integer"})
#
# # Build request parameters
# parameters = {}
#
# if get_only_leagues_with_stats_tables:
# parameters['filter'] = 'tables'
#
# url = '{0}/country/{1}/leagues'.format(self.base_url, country.country_id)
#
# resp = self._request_url(url, 'GET', data=parameters)
# data = self._parse_and_check_http_response(resp)
#
# leagues = {}
#
# try:
# country = data['countries']['list'][0]
# except TypeError:
# country = None
#
# if country is not None:
# leagues['country'] = country
#
# leagues['seasons'] = [Season.new_from_json_dict(x) for x in data.get('seasons', {}).get('list', '')]
# leagues['leagues'] = [League.new_from_json_dict(x) for x in data.get('leagues', {}).get('list', '')]
#
# return Leagues.new_from_json_dict(leagues)
#
# def get_updated_matches(self, update_id: int = None):
# # Build request parameters
# parameters = {}
#
# if update_id:
# url = '{0}/matches/update/{1}'.format(self.base_url, update_id)
# else:
# url = '%s/matches/update' % self.base_url
#
# resp = self._request_url(url, 'GET', data=parameters)
# data = self._parse_and_check_http_response(resp)
#
# matches = {'countries': [Country.new_from_json_dict(x) for x in data.get('countries', {}).get('list', '')],
# 'seasons': [Season.new_from_json_dict(x) for x in data.get('seasons', {}).get('list', '')],
# 'leagues': [League.new_from_json_dict(x) for x in data.get('leagues', {}).get('list', '')],
# 'matches': [Match.new_from_json_dict(x) for x in data.get('matches', {}).get('list', '')],
# 'range': data.get('matches', {}).get('range', ''),
# 'update': data.get('matches', {}).get('update', '')}
#
# return Matches.new_from_json_dict(matches)
def _request_url(self, url, method, data: Dict[str, str] = None, json_data: str = None) -> requests.Response:
"""Request a url.
Args:
url:
The web location we want to retrieve.
method:
Either POST or GET.
data:
A dict of (str, unicode) key/value pairs.
Returns:
A JSON object.
"""
if method == 'POST':
if data:
resp = requests.post(url, headers=self._request_headers, cookies=self._cookies,
data=data, timeout=self._timeout)
elif json_data:
self._request_headers['Content-Type'] = 'application/json'
resp = requests.post(url, headers=self._request_headers, cookies=self._cookies,
json=json_data, timeout=self._timeout)
else:
resp = 0 # POST request, but without data or json
elif method == 'GET':
url = self._build_url(url, extra_params=data)
resp = requests.get(url, headers=self._request_headers, cookies=self._cookies, timeout=self._timeout)
else:
resp = 0 # if not a POST or GET request
return resp
def _build_url(self, url: str, path_elements: [str] = None, extra_params: [str] = None):
# Break url into constituent parts
(scheme, netloc, path, params, query, fragment) = urlparse(url)
# Add any additional path elements to the path
if path_elements:
# Filter out the path elements that have a value of None
p = [i for i in path_elements if i]
if not path.endswith('/'):
path += '/'
path += '/'.join(p)
# Add any additional query parameters to the query string
if extra_params and len(extra_params) > 0:
extra_query = self._encode_parameters(extra_params)
# Add it to the existing query
if query:
query += '&' + extra_query
else:
query = extra_query
# Return the rebuilt URL
return urlunparse((scheme, netloc, path, params, query, fragment))
@staticmethod
def _parse_and_check_http_response(response: requests.Response, encoding='utf-8'):
""" Check http response returned from Futbol24, try to parse it as JSON and return
an empty dictionary if there is any error.
"""
if not response.ok:
raise Futbol24Error({'message': "Error {0} {1}".format(response.status_code, response.reason)})
json_data = response.content.decode(encoding)
try:
data = json.loads(json_data)
except TypeError or json.JSONDecodeError:
raise Futbol24Error({'message': "Invalid JSON content"})
return data
@staticmethod
def _encode_parameters(parameters: Dict[str, str]) -> str or None:
"""Return a string in key=value&key=value form.
Values of None are not included in the output string.
Args:
parameters (dict): dictionary of query parameters to be converted into a
string for encoding and sending to Twitter.
Returns:
A URL-encoded string in "key=value&key=value" form
"""
if parameters is None:
return None
if not isinstance(parameters, dict):
raise Futbol24Error("`parameters` must be a dict.")
else:
return urlencode(dict((k, v) for k, v in parameters.items() if v is not None))
# noinspection PyUnresolvedReferences
@staticmethod
def _map_competitions(competition: Dict[str, str], countries: [Country]):
competition: Competition = Competition.new_from_json_dict(competition)
competition_country = list(filter(lambda country: country.id == competition.country_id, countries))[0]
delattr(competition, 'country_id')
setattr(competition, 'country', competition_country)
return competition
# noinspection PyUnresolvedReferences
@staticmethod
def _map_leagues(league: Dict[str, str], competitions: [Competition]):
league: League = League.new_from_json_dict(league)
league_competition = list(filter(lambda competition: competition.id == league.competition_id, competitions))[0]
delattr(league, 'competition_id')
setattr(league, 'competition', league_competition)
return league
# noinspection PyUnresolvedReferences
@staticmethod
def _map_teams(team: Dict[str, str], countries: [Country]):
team: Team = Team.new_from_json_dict(team)
team_country = list(filter(lambda country: country.id == team.country_id, countries))[0]
delattr(team, 'country_id')
setattr(team, 'country', team_country)
return team
# noinspection PyUnresolvedReferences
@staticmethod
def _map_matches(match: Dict[str, str], leagues: [League], teams: [Team]):
match: Match = Match.new_from_json_dict(match)
match_league = list(filter(lambda league: league.id == match.league_id, leagues))[0]
delattr(match, 'league_id')
setattr(match, 'league', match_league)
home_team = list(filter(lambda team: team.id == match.home.get('team_id', -1), teams))[0]
del match.home['team_id']
match.home['team'] = home_team
guest_team = list(filter(lambda team: team.id == match.guest.get('team_id', -1), teams))[0]
del match.guest['team_id']
match.guest['team'] = guest_team
return match
@staticmethod
def _parse_team_info_http_response(response: requests.Response, encoding='utf-8') -> Dict[str, tuple]:
""" Parse team info http response returned from Futbol24. It is returned as html which
needs to be parsed.
"""
if not response.ok:
raise Futbol24Error({'message': "Error {0} {1}".format(response.status_code, response.reason)})
html_data = response.content.decode(encoding)
goal_stats_html = re.search(r'Goals in minutes.+(?P<table><table.+</table>)',
html_data, flags=re.DOTALL | re.MULTILINE)
goals_in_minutes = {}
if goal_stats_html:
goal_table=html.fromstring(goal_stats_html.group('table'))
goal_table_rows=goal_table.xpath('.//tr')
for row in goal_table_rows:
min_range=row.xpath('./td[contains(@class, "under")]/text()')
percent = row.xpath('./td[contains(@class, "percent")]/text()')
goals = row.xpath('./td[contains(@class, "bold")]/text()')
goals_in_minutes[str(min_range[0])]=(str(percent[0]), int(goals[0]))
return goals_in_minutes
@staticmethod
def _replace_characters(text: str) -> str:
translation_table = {
' ' : '-',
'/': '-',
'\\': '-',
'–': '-',
'(' : '',
')': '',
'.': '',
'\'': '',
'º': '',
'°': '',
'‘': '',
'’': '',
'&': '',
'á': 'a',
'à': 'a',
'ä': 'a',
'â': 'a',
'ã': 'a',
'å': 'a',
'ă': 'a',
'ą': 'a',
'Å': 'A',
'Á': 'A',
'Ä': 'A',
'æ': 'ae',
'ć': 'c',
'č': 'c',
'ç': 'c',
'Ç': 'C',
'Č': 'C',
'đ': 'd',
'ď': 'd',
'ð': 'd',
'Ď': 'D',
'ë': 'e',
'è': 'e',
'é': 'e',
'ê': 'e',
'ě': 'e',
'ė': 'e',
'ę': 'e',
'ə': '',
'É': 'e',
'ğ': 'g',
'ħ': 'h',
'í': 'i',
'Í': 'I',
'ī': 'i',
'ı': 'i',
'î': 'i',
'ï': 'i',
'ì': 'i',
'İ': 'I',
'Î': 'I',
'ĺ': 'l',
'ľ': 'l',
'ł': 'l',
'Ł': 'L',
'ň': 'n',
'ń': 'n',
'ñ': 'n',
'ņ': 'n',
'Ñ': 'N',
'ø': 'o',
'ö': 'o',
'Ö': 'O',
'ó': 'o',
'õ': 'o',
'ô': 'o',
'ő': 'o',
'Ø': 'O',
'Ó': 'O',
'œ': 'oe',
'ŕ': 'r',
'ř': 'r',
'Ř': 'R',
'ś': 's',
'š': 's',
'ş': 's',
'ș': 's',
'Ș': 'S',
'Ş': 'S',
'Ś': 'S',
'Š': 'S',
'ß': 'ss',
'ť': 't',
'ţ': 't',
'ț': 't',
'ü': 'u',
'ú': 'u',
'ů': 'u',
'Ü': 'U',
'ū': 'u',
'Ú': 'U',
'ų': 'u',
'ý': 'y',
'ž': 'z',
'ż': 'z',
'ź': 'z',
'Ž': 'Z',
'Ż': 'Z'
}
for char, repl in translation_table.items():
text = text.replace(char, repl)
return text
| {
"repo_name": "vladimir-zahradnik/futbol24-python",
"path": "futbol24/api.py",
"copies": "1",
"size": "22892",
"license": "apache-2.0",
"hash": 538868867375233500,
"line_mean": 36.8043117745,
"line_max": 119,
"alpha_frac": 0.5291717845,
"autogenerated": false,
"ratio": 3.673811442385173,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4702983226885173,
"avg_score": null,
"num_lines": null
} |
"""A library that provides a Python interface to the GetResponse API"""
import requests
from collections import defaultdict
import json
from json.decoder import JSONDecodeError
class GetresponseClient:
"""
Base class which does requests calls
"""
API_ENDPOINT = 'https://api.getresponse.com/v3'
API_KEY = None
X_DOMAIN = None
X_TIME_ZONE = None
HEADERS = None
def __init__(self, api_endpoint: str, api_key: str, x_domain: str = None, x_time_zone: str = None):
"""
Initiation of Client object
:param api_endpoint: API Endpoint - http://apidocs.getresponse.com/v3
Usually either https://api.getresponse.com/v3 for normal GetResponse account or
https://api3.getresponse360.[pl|com]/v3 for Getresponse 360
:param api_key: API key, should be generated here - https://app.getresponse.com/manage_api.html
:param x_domain: http://apidocs.getresponse.com/v3/configuration
Account url for GetResponse 360 without http,https and www
:param x_time_zone: TZ column from https://en.wikipedia.org/wiki/List_of_tz_database_time_zones
The default timezone in response data is UTC
"""
self.API_ENDPOINT = api_endpoint
self.API_KEY = api_key
self.X_DOMAIN = x_domain
self.X_TIME_ZONE = x_time_zone
if x_domain:
self.HEADERS = {'X-Auth-Token': 'api-key ' + self.API_KEY, 'X-DOMAIN': self.X_DOMAIN,
'Content-Type': 'application/json'}
else:
self.HEADERS = {'X-Auth-Token': 'api-key ' + self.API_KEY, 'Content-Type': 'application/json'}
def get(self, url: str):
r = requests.get(self.API_ENDPOINT + url, headers=self.HEADERS)
return r.json()
def post(self, url: str, data: json):
r = requests.post(self.API_ENDPOINT + url, data=data, headers=self.HEADERS)
try:
result = r.json()
except JSONDecodeError:
result = r.text
return result
def delete(self, url: str, data: json = None):
if data:
r = requests.delete(self.API_ENDPOINT + url, data=data, headers=self.HEADERS)
else:
r = requests.delete(self.API_ENDPOINT + url, headers=self.HEADERS)
return r.text
class Campaigns:
"""
Class represents campaigns section of API
http://apidocs.getresponse.com/v3/resources/campaigns
"""
def __init__(self, api_endpoint: str, api_key: str, x_domain: str = None, x_time_zone: str = None):
self._getresponse_client = GetresponseClient(api_endpoint=api_endpoint, api_key=api_key, x_domain=x_domain,
x_time_zone=x_time_zone)
def get_campaigns(self, query: list = None, sort: list = None, **kwargs):
"""
Get all campaigns within account
http://apidocs.getresponse.com/v3/resources/campaigns#campaigns.get.all
:param query: Used to search only resources that meets criteria. Can be:
- name
Should be passed like this: query = ['name=searched query', ..]
Examples:
query = ['name=VIP']
:param sort: Enable sorting using specified field (set as a key) and order (set as a value).
multiple fields to sort by can be used. Can be:
- name: asc or desc
- createdOn: asc or desc
Should be passed like this: sort = ['name=asc', ..]
Examples:
sort = ['name=asc','createdOn=desc']
query = ['name=asc']
:param kwargs:
- fields: List of fields that should be returned. Id is always returned. Fields should be separated by comma
- page: Specify which page of results return. :type: int
- perPage: Specify how many results per page should be returned :type: int
:return: JSON response
"""
url = str('/campaigns?')
if query:
for item in query:
query_data = str(item).split('=')
url = url + 'query[' + query_data[0] + ']=' + query_data[1] + '&'
if sort:
for item in sort:
sort_data = str(item).split('=')
url = url + 'sort[' + sort_data[0] + ']=' + sort_data[1] + '&'
if kwargs:
for key, value in kwargs.items():
url = url + str(key) + '=' + str(value) + '&'
url = url[:-1] # get rid of last &
r = self._getresponse_client.get(url)
return r
def get_campaign(self, campaign_id: str):
"""
Get campaign details by id
http://apidocs.getresponse.com/v3/resources/campaigns#campaigns.get
:param campaign_id: Id of campaign
:return: JSON response
"""
r = self._getresponse_client.get('/campaigns/' + campaign_id)
return r
@staticmethod
def _get_confirmation(from_field: dict, reply_to: dict, redirect_type: str, redirect_url: str = None):
"""
Subscription confirmation email settings
http://apidocs.getresponse.com/v3/resources/campaigns#campaigns.create
:param from_field: dict {"fromFieldId": 'xxx'} FromFieldId from from-fields resources
:param reply_to: dict {"fromFieldId": 'xxx'} FromFieldId from from-fields resources
:param redirect_type: What will happen after confirmation of email. Possible values: hosted (subscriber will stay on GetResponse website), customUrl (subscriber will be redirected to provided url)
:param redirect_url: Url where subscriber will be redirected if redirectType is set to customUrl
:return: dict
"""
if redirect_url:
response = {"fromField": from_field, "replyTo": reply_to, "redirectType": redirect_type,
"redirectUrl": redirect_url}
else:
response = {"fromField": from_field, "replyTo": reply_to, "redirectType": redirect_type}
return response
@staticmethod
def _get_profile(industry_tag_id: int, description: str, logo: str, logo_link_url: str, title: str):
"""
How campaign will be visible for subscribers
http://apidocs.getresponse.com/v3/resources/campaigns#campaigns.create
:param industry_tag_id: Category of content of campaign
:param description: Short description of campaign content, length 2-255
:param logo: Url of image displayed as campaign logo
:param logo_link_url: Url of link in campaign logo
:param title: Title of campaign, length 2-64
:return: dict
"""
response = {"industryTagId": industry_tag_id, "description": description, "logo": logo,
"logoLinkUrl": logo_link_url, "title": title}
return response
@staticmethod
def _get_postal(add_postal_to_messages: str, city: str, company_name: str, design: str, state: str, street: str,
zipcode: str):
"""
Postal address of your company
http://apidocs.getresponse.com/v3/resources/campaigns#campaigns.create
:param add_postal_to_messages: Should postal address be sent with all messages from campaign. (For US and Canada it's mandatory)
:param city: City
:param company_name: Company name
:param design: How postal address would be designed in emails. Avaiable fields definitions: [[name]], [[address]], [[city]], [[state]] [[zip]], [[country]]
:param state: State
:param street: Street
:param zipcode: Zip code
:return: dict
"""
response = {"addPostalToMessages": add_postal_to_messages, "city": city, "companyName": company_name,
"design": design, "state": state, "street": street, "zipCode": zipcode}
return response
@staticmethod
def _get_option_types(email: str, import_type: str, webform: str):
"""
How subscribers will be added to list - with double (with confirmation) or single optin
http://apidocs.getresponse.com/v3/resources/campaigns#campaigns.create
:param email: Optin type for subscriptions via email. Possible values: single, double
:param import_type: Optin type for subscriptions via import. Possible values: single, double
:param webform: Optin type for subscriptions via webforms and landing pages. Possible values: single, double
:return: dict
"""
response = {"email": email, "import": import_type, "webform": webform}
return response
@staticmethod
def _get_subscription_notifications(status: str, recipients: list):
"""
Notifications for each subscribed email to Your list
http://apidocs.getresponse.com/v3/resources/campaigns#campaigns.create
:param status: Are notifications enabled. Possible values: enabled, disabled
:param recipients: Emails where to send notifications. They have to be defined in account from fields
:return: dict
"""
response = {"status": status, "recipients": recipients}
return response
def post_campaign(self, name: str, **kwargs):
"""
Create new campaign
http://apidocs.getresponse.com/v3/resources/campaigns#campaigns.create
:param name: Campaign name which has to be unique in whole GetResponse platform
:param kwargs:
-languageCode: Campaign language code (2 letters format)
-isDefault: Possible values: true, false. Is campaign default for account. You cannot remove default flag, only reassign it to other campaign.
-confirmation: Subscription confirmation email settings. Dict from _get_confirmation method
-profile: How campaign will be visible for subscribers. Dict from _get_profile
-postal: Postal address of your company. Dict from _get_postal
-optinTypes: How subscribers will be added to list - with double (with confirmation) or single optin. Dict from _get_option_types
-subscriptionNotifications: Notifications for each subscribed email to Your list. Dict from _get_subscription_notifications
:return: JSON response
"""
data = defaultdict()
data['name'] = name
for key, value in kwargs.items():
data[key] = value
r = self._getresponse_client.post('/campaigns', data=json.dumps(data))
return r
def update_campaign(self, campaign_id: str, **kwargs):
"""
Allows to update campaign prefenrences. Send only those fields that need to be changed.
The rest of properties will stay the same.
http://apidocs.getresponse.com/v3/resources/campaigns#campaigns.update
:param campaign_id: Id of campaign to update
:param kwargs:
-languageCode: Campaign language code (2 letters format)
-isDefault: Possible values: true, false. Is campaign default for account. You cannot remove default flag, only reassign it to other campaign.
-confirmation: Subscription confirmation email settings. Dict from _get_confirmation method
-profile: How campaign will be visible for subscribers. Dict from _get_profile
-postal: Postal address of your company. Dict from _get_postal
-optinTypes: How subscribers will be added to list - with double (with confirmation) or single optin. Dict from _get_option_types
-subscriptionNotifications: Notifications for each subscribed email to Your list. Dict from _get_subscription_notifications
:return: JSON response
"""
data = defaultdict()
for key, value in kwargs.items():
data[key] = value
r = self._getresponse_client.post('/campaigns/' + campaign_id, data=json.dumps(data))
return r
def get_campaign_contacts(self, campaign_id: str, query: list = None, sort: list = None, **kwargs):
"""
Allows to retrieve all contacts from given campaigns. Standard sorting and filtering apply.
http://apidocs.getresponse.com/v3/resources/campaigns#campaigns.contacts.get
:param campaign_id: Id of given campaign
:param query: Used to search only resources that meets criteria. Can be:
- email
- name
- createdOn][from]
- createdOn][to]
Should be passed like this: query = ['email=searched query', ..]
Examples:
query = ['email=@gmail.com','createdOn][from]=2017-03-10']
query = ['createdOn][from]=2017-03-10']
:param sort: Enable sorting using specified field (set as a key) and order (set as a value).
multiple fields to sort by can be used. Can be:
- email: asc or desc
- name: asc or desc
- createdOn: asc or desc
Should be passed like this: sort = ['email=asc', ..]
Examples:
sort = ['email=asc','createdOn=desc']
query = ['name=asc']
:param kwargs:
- fields: List of fields that should be returned. Id is always returned. Fields should be separated by comma
- page: Specify which page of results return. :type: int
- perPage: Specify how many results per page should be returned :type: int
:return: JSON response
"""
url = str('/campaigns/' + campaign_id + '/contacts?')
if query:
for item in query:
query_data = str(item).split('=')
url = url + 'query[' + query_data[0] + ']=' + query_data[1] + '&'
if sort:
for item in sort:
sort_data = str(item).split('=')
url = url + 'sort[' + sort_data[0] + ']=' + sort_data[1] + '&'
if kwargs:
for key, value in kwargs.items():
url = url + str(key) + '=' + str(value) + '&'
url = url[:-1] # get rid of last &
r = self._getresponse_client.get(url)
return r
def get_campaign_blacklist(self, campaign_id: str, mask: str):
"""
This request allows to fetch blacklist for given campaign.
Blacklist is simple plain collection of email addresses or partial masks (like @gmail.com)
http://apidocs.getresponse.com/v3/resources/campaigns#campaigns.blacklists.get
:param campaign_id: Id of campaign
:param mask: Blacklist mask to search for
:return: JSON response
"""
r = self._getresponse_client.get('/campaigns/' + campaign_id + '/blacklists?query[mask]=' + mask)
return r
def post_campaign_blacklist(self, campaign_id: str, mask: list):
"""
This request allows to update blacklist. Full list is expected.
This list will replace the present list
http://apidocs.getresponse.com/v3/resources/campaigns#campaigns.blacklists.update
:param campaign_id: Id of campaign
:param mask: Blacklist mask
:return: JSON response
"""
data = {'masks': mask}
r = self._getresponse_client.post('/campaigns/' + campaign_id + '/blacklists', data=json.dumps(data))
return r
@staticmethod
def _prepare_url_from_query(url: str, query: list, campaign_id: str):
"""
Method to populate url with query and campaign id
:param url: str
:param query: list like this ['createdOn][from]=2017-03-10', 'groupBy=hour' ]
:param campaign_id: Id of campaign.
:return:
"""
url = url + 'query[campaignId]=' + campaign_id + '&'
for item in query:
query_data = str(item).split('=')
url = url + 'query[' + query_data[0] + ']=' + query_data[1] + '&'
return url
def get_campaigns_statistics_list_size(self, query: list, campaign_id: str, fields: str = None):
"""
Get list size for found campaigns
http://apidocs.getresponse.com/v3/resources/campaigns#campaigns.statistics.list-size
:param query: Used to search only resources that meets criteria.
If multiple parameters are specified then it uses AND logic.
Can be:
- groupBy String. Can be:
- hour
- day
- month
- total
- createdOn][from] Date in YYYY-mm-dd
- createdOn][to] Date in YYYY-mm-dd
Should be passed like this: query = ['email=searched query', ..]
Examples:
query = ['createdOn][from]=2017-03-10', 'groupBy=hour' ]
query = ['createdOn][from]=2017-03-10']
:param fields: List of fields that should be returned. Id is always returned. Fields should be separated by comma
:param campaign_id: Id of campaign. For multiple campaigns can be separated by comma like O,323fD,ddeE
:return: JSON response
"""
url = str('/campaigns/statistics/list-size?')
url = Campaigns._prepare_url_from_query(url, query, campaign_id)
if fields:
url += 'fields=' + fields
else:
url = url[:-1] # get rid of last &
r = self._getresponse_client.get(url)
return r
def get_campaigns_statistics_locations(self, query: list, campaign_id: str, fields: str = None):
"""
Get locations for found campaigns
http://apidocs.getresponse.com/v3/resources/campaigns#campaigns.statistics.locations
:param query: Used to search only resources that meets criteria.
If multiple parameters are specified then it uses AND logic.
Can be:
- groupBy String. Can be:
- hour
- day
- month
- total
- createdOn][from] Date in YYYY-mm-dd
- createdOn][to] Date in YYYY-mm-dd
Should be passed like this: query = ['email=searched query', ..]
Examples:
query = ['createdOn][from]=2017-03-10', 'groupBy=hour' ]
query = ['createdOn][from]=2017-03-10']
:param fields: List of fields that should be returned. Id is always returned. Fields should be separated by comma
:param campaign_id: Id of campaign. For multiple campaigns can be separated by comma like O,323fD,ddeE
:return: JSON response
"""
url = str('/campaigns/statistics/locations?')
url = Campaigns._prepare_url_from_query(url, query, campaign_id)
if fields:
url += 'fields=' + fields
else:
url = url[:-1] # get rid of last &
r = self._getresponse_client.get(url)
return r
def get_campaigns_statistics_origins(self, query: list, campaign_id: str, fields: str = None):
"""
Get origins for found campaigns
http://apidocs.getresponse.com/v3/resources/campaigns#campaigns.statistics.locations
:param query: Used to search only resources that meets criteria.
If multiple parameters are specified then it uses AND logic.
Can be:
- groupBy String. Can be:
- hour
- day
- month
- total
- createdOn][from] Date in YYYY-mm-dd
- createdOn][to] Date in YYYY-mm-dd
Should be passed like this: query = ['email=searched query', ..]
Examples:
query = ['createdOn][from]=2017-03-10', 'groupBy=hour' ]
query = ['createdOn][from]=2017-03-10']
:param fields: List of fields that should be returned. Id is always returned. Fields should be separated by comma
:param campaign_id: Id of campaign. For multiple campaigns can be separated by comma like O,323fD,ddeE
:return: JSON response
"""
url = str('/campaigns/statistics/origins?')
url = Campaigns._prepare_url_from_query(url, query, campaign_id)
if fields:
url += 'fields=' + fields
else:
url = url[:-1] # get rid of last &
r = self._getresponse_client.get(url)
return r
def get_campaigns_statistics_removals(self, query: list, campaign_id: str, fields: str = None):
"""
Get removals for found campaigns
http://apidocs.getresponse.com/v3/resources/campaigns#campaigns.statistics.removals
:param query: Used to search only resources that meets criteria.
If multiple parameters are specified then it uses AND logic.
Can be:
- groupBy String. Can be:
- hour
- day
- month
- total
- createdOn][from] Date in YYYY-mm-dd
- createdOn][to] Date in YYYY-mm-dd
Should be passed like this: query = ['email=searched query', ..]
Examples:
query = ['createdOn][from]=2017-03-10', 'groupBy=hour' ]
query = ['createdOn][from]=2017-03-10']
:param fields: List of fields that should be returned. Id is always returned. Fields should be separated by comma
:param campaign_id: Id of campaign. For multiple campaigns can be separated by comma like O,323fD,ddeE
:return: JSON response
"""
url = str('/campaigns/statistics/removals?')
url = Campaigns._prepare_url_from_query(url, query, campaign_id)
if fields:
url += 'fields=' + fields
else:
url = url[:-1] # get rid of last &
r = self._getresponse_client.get(url)
return r
def get_campaigns_statistics_subscriptions(self, query: list, campaign_id: str, fields: str = None):
"""
Get removals for found campaigns
http://apidocs.getresponse.com/v3/resources/campaigns#campaigns.statistics.subscriptions
:param query: Used to search only resources that meets criteria.
If multiple parameters are specified then it uses AND logic.
Can be:
- groupBy String. Can be:
- hour
- day
- month
- total
- createdOn][from] Date in YYYY-mm-dd
- createdOn][to] Date in YYYY-mm-dd
Should be passed like this: query = ['email=searched query', ..]
Examples:
query = ['createdOn][from]=2017-03-10', 'groupBy=hour' ]
query = ['createdOn][from]=2017-03-10']
:param fields: List of fields that should be returned. Id is always returned. Fields should be separated by comma
:param campaign_id: Id of campaign. For multiple campaigns can be separated by comma like O,323fD,ddeE
:return: JSON response
"""
url = str('/campaigns/statistics/subscriptions?')
url = Campaigns._prepare_url_from_query(url, query, campaign_id)
if fields:
url += 'fields=' + fields
else:
url = url[:-1] # get rid of last &
r = self._getresponse_client.get(url)
return r
def get_campaigns_statistics_balance(self, query: list, campaign_id: str, fields: str = None):
"""
Get balance for found campaigns (i.e. subscriptions and removals)
http://apidocs.getresponse.com/v3/resources/campaigns#campaigns.get.balance
:param query: Used to search only resources that meets criteria.
If multiple parameters are specified then it uses AND logic.
Can be:
- groupBy String. Can be:
- hour
- day
- month
- total
- createdOn][from] Date in YYYY-mm-dd
- createdOn][to] Date in YYYY-mm-dd
Should be passed like this: query = ['email=searched query', ..]
Examples:
query = ['createdOn][from]=2017-03-10', 'groupBy=hour' ]
query = ['createdOn][from]=2017-03-10']
:param fields: List of fields that should be returned. Id is always returned. Fields should be separated by comma
:param campaign_id: Id of campaign. For multiple campaigns can be separated by comma like O,323fD,ddeE
:return: JSON response
"""
url = str('/campaigns/statistics/balance?')
url = Campaigns._prepare_url_from_query(url, query, campaign_id)
if fields:
url += 'fields=' + fields
else:
url = url[:-1] # get rid of last &
r = self._getresponse_client.get(url)
return r
def get_campaigns_statistics_summary(self, campaign_id_list: str, fields: str = None):
"""
Get summary for found campaigns (i.e. subscriptions and removals)
http://apidocs.getresponse.com/v3/resources/campaigns#campaigns.get.summary
:param campaign_id_list: List of campaigns. Fields should be separated by comma
:param fields: List of fields that should be returned. Id is always returned. Fields should be separated by comma
:return: JSON response
"""
url = str('/campaigns/statistics/summary?')
url = Campaigns._prepare_url_from_query(url, [], campaign_id_list)
if fields:
url += 'fields=' + fields
else:
url = url[:-1] # get rid of last &
r = self._getresponse_client.get(url)
return r
class FromFields(object):
"""
Class represents From fields section of API
http://apidocs.getresponse.com/v3/resources/fromfields
"""
def __init__(self, api_endpoint: str, api_key: str, x_domain: str = None, x_time_zone: str = None):
self._getresponse_client = GetresponseClient(api_endpoint=api_endpoint, api_key=api_key, x_domain=x_domain,
x_time_zone=x_time_zone)
def get_from_fields(self, query: list = None, **kwargs):
"""
Get all from fields within account
http://apidocs.getresponse.com/v3/resources/fromfields#fromfields.get.all
:param query: Used to search only resources that meets criteria.
If multiple parameters are specified then it uses AND logic.
Can be:
- name
- email (should be full email as expression is strict equality)
Should be passed like this: query = ['email=searched query', ..]
Examples:
query = ['name=Test', 'email=info@test.com' ]
query = ['name=Test']
:param kwargs:
- fields: :type: str
List of fields that should be returned. Fields should be separated by comma
- sort: :type: str
Enable sorting using specified field (set as a key) and order (set as a value).
Can be:
- asc
- desc
- perPage: :type: int
Number results on page
- page: :type: int
Page number
:return: JSON response
"""
url = '/from-fields?'
if query:
for item in query:
query_data = str(item).split('=')
url = url + 'query[' + query_data[0] + ']=' + query_data[1] + '&'
for key, value in kwargs.items():
if key == 'sort':
url = url + key + '[createdOn]=' + value + '&'
else:
url = url + key + '=' + value + '&'
url = url[:-1] # get rid of last & or ?
r = self._getresponse_client.get(url)
return r
def get_from_field(self, field_id: str, fields: str = None):
"""
This method returns from field by fromfieldId.
http://apidocs.getresponse.com/v3/resources/fromfields#fromfields.get
:param field_id: Id of the field to return
:param fields: List of fields that should be returned. Fields should be separated by comma
:return: JSON response
"""
url = '/from-fields/' + field_id
if fields:
url += '?fields=' + fields
r = self._getresponse_client.get(url)
return r
def post_from_field(self, name: str, email: str):
"""
This request will create new from-field
http://apidocs.getresponse.com/v3/resources/fromfields#fromfields.create
:param name: Name connected to email address
:param email: Email
:return: JSON response
"""
url = '/from-fields'
data = {'name': name, 'email': email}
r = self._getresponse_client.post(url, data=json.dumps(data))
return r
def delete_or_replace_from_field(self, from_field_id: str, replace_id: str = None):
"""
This request removes fromField.
New fromFieldId could be passed in the body of this request, and it will replace removed from field.
http://apidocs.getresponse.com/v3/resources/fromfields#fromfields.delete
:param replace_id: Id of replacement from field
:return: Empty response or error response
"""
url = '/from-fields/' + from_field_id
if replace_id:
data = {'fromFieldIdToReplaceWith': replace_id}
r = self._getresponse_client.delete(url, data=json.dumps(data))
else:
r = self._getresponse_client.delete(url)
return r
def make_default(self, from_field_id: str):
"""
Make from field default
http://apidocs.getresponse.com/v3/resources/fromfields#fromfields.default
:param from_field_id: Id of from field
Field should be active, i.e. it's 'isActive' property should be set to 'true'
:return: JSON response
"""
url = '/from-fields/' + from_field_id + '/default'
r = self._getresponse_client.post(url, data=None)
return r
class CustomFields:
"""
Class represents Custom fields section of API
http://apidocs.getresponse.com/v3/resources/customfields
"""
def __init__(self, api_endpoint: str, api_key: str, x_domain: str = None, x_time_zone: str = None):
self._getresponse_client = GetresponseClient(api_endpoint=api_endpoint, api_key=api_key, x_domain=x_domain,
x_time_zone=x_time_zone)
def get_custom_fields(self, **kwargs):
"""
Get custom fields
http://apidocs.getresponse.com/v3/resources/customfields#customfields.get.all
:param kwargs:
- fields: :type: str
List of fields that should be returned. Fields should be separated by comma
- sort: :type: str
Enable sorting using specified field (set as a key) and order (set as a value).
Can be:
- name: - asc
- desc
- perPage: :type: int
Number results on page
- page: :type: int
Page number
:return:
"""
url = '/custom-fields?'
for key, value in kwargs.items():
if key == 'sort':
url = url + key + '[name]=' + value + '&'
else:
url = url + key + '=' + value + '&'
url = url[:-1] # get rid of last & or ?
r = self._getresponse_client.get(url)
return r
def get_custom_field(self, field_id: str, fields: str = None):
"""
Get custom field by id
http://apidocs.getresponse.com/v3/resources/customfields#customfields.get
:param field_id: Id of custom field
:param fields: List of fields that should be returned. Id is always returned. Fields should be separated by comma
:return: JSON Response
"""
url = '/custom-fields/' + field_id
if fields:
url += '?fields=' + fields
r = self._getresponse_client.get(url)
return r
def post_custom_field(self, name: str, custom_type: str, hidden: bool, values: list):
"""
Create custom field
http://apidocs.getresponse.com/v3/resources/customfields#customfields.create
:param name: Name of the custom field.
Should be:
- from 1 to 32 characters long
- be unique
- use only lowercase letters, underscores and digits
- not be equal to one of the merge words used in messages, i.e. name, email, campaign, twitter, facebook, buzz,
myspace, linkedin, digg, googleplus, pinterest, responder, campaign, change
:param custom_type: Type of custom field value. Cane be text for example
:param hidden: Flag if custom field is visible to contact
:param values: List of assigned values (one or more - depending of customField type)
:return: JSON Response
"""
url = '/custom-fields'
data = {'name': name, 'type': custom_type, 'hidden': hidden, 'values': values}
r = self._getresponse_client.post(url, data=json.dumps(data))
return r
def delete_custom_field(self, field_id: str):
"""
Delete custom field by id
http://apidocs.getresponse.com/v3/resources/customfields#customfields.delete
:param field_id: Id of custom field
:return Empty response or error response
"""
url = '/custom-fields/' + field_id
r = self._getresponse_client.delete(url)
return r
def update_custom_field(self, field_id: str, hidden: bool, values: list = None):
"""
Update custom field
http://apidocs.getresponse.com/v3/resources/customfields#customfields.update
:param hidden: Flag if custom field is visible to contact
:param values: List of assigned values (one or more - depending of customField type)
:return: JSON Response
"""
url = '/custom-fields/' + field_id
if values:
data = {'hidden': hidden, 'values': values}
else:
data = {'hidden': hidden}
r = self._getresponse_client.post(url, data=json.dumps(data))
return r
class Newsletters:
"""
Class represents Newsletters section of API
http://apidocs.getresponse.com/v3/resources/newsletters
"""
def __init__(self, api_endpoint: str, api_key: str, x_domain: str = None, x_time_zone: str = None):
self._getresponse_client = GetresponseClient(api_endpoint=api_endpoint, api_key=api_key, x_domain=x_domain,
x_time_zone=x_time_zone)
def get_newsletters(self, query: list = None, **kwargs):
"""
Get all newsletters within account
http://apidocs.getresponse.com/v3/resources/newsletters#newsletters.get.all
:param query: Used to search only resources that meets criteria.
If multiple parameters are specified then it uses AND logic.
Can be:
- subject
- status Can be:
- scheduled
- in_progress
- finished
- createdOn][from]
- createdOn][to]
- type Can be:
- draft
- broadcast
- splittest
- automation
- campaignId Id of a campaign, multiple campaigns separated by comma allowed
Should be passed like this: query = ['type=searched query', ..]
Examples:
query = ['subject=Test', 'type=broadcast' ]
query = ['subject=Test']
:param kwargs:
- fields: :type: str
List of fields that should be returned. Fields should be separated by comma
- sort: :type: str
Enable sorting using specified field (set as a key) and order (set as a value).
Can be:
- asc
- desc
- perPage: :type: int
Number results on page
- page: :type: int
Page number
:return: JSON response
"""
url = '/newsletters?'
if query:
for item in query:
query_data = str(item).split('=')
url = url + 'query[' + query_data[0] + ']=' + query_data[1] + '&'
for key, value in kwargs.items():
if key == 'sort':
url = url + key + '[createdOn]=' + value + '&'
else:
url = url + key + '=' + value + '&'
url = url[:-1] # get rid of last & or ?
r = self._getresponse_client.get(url)
return r
def get_newsletter(self, newsletter_id: str, fields: str = None):
"""
This method returns newsletter by newsletter_id
http://apidocs.getresponse.com/v3/resources/newsletters#newsletters.get
:param newsletter_id: Id of the newsletter to return
:param fields: :type: str
List of fields that should be returned. Fields should be separated by comma
:return: JSON response
"""
url = '/newsletters/' + newsletter_id
if fields:
url += '?fields=' + fields
r = self._getresponse_client.get(url)
return r
def get_newsletters_statistics(self, query: list, **kwargs):
"""
Get all newsletters within account
http://apidocs.getresponse.com/v3/resources/newsletters#newsletters.get.all
:param query: Used to search only resources that meets criteria.
If multiple parameters are specified then it uses AND logic.
Can be:
- groupBy Can be:
- total
- hour
- day
- month
- newsletterId List of newsletter resource ids. (string separated with ",")
- campaignId List of campaign resource ids. (string separated with ",")
- createdOn][from] Date YYYY-mm-dd
- createdOn][to] Date YYYY-mm-dd
Should be passed like this: query = ['email=searched query', ..]
Examples:
query = ['subject=Test', 'type=broadcast' ]
query = ['subject=Test']
:param kwargs:
- fields: :type: str
List of fields that should be returned. Fields should be separated by comma
- perPage: :type: int
Number results on page
- page: :type: int
Page number
:return: JSON response
"""
url = '/newsletters/statistics?'
for item in query:
query_data = str(item).split('=')
url = url + 'query[' + query_data[0] + ']=' + query_data[1] + '&'
for key, value in kwargs.items():
url = url + key + '=' + value + '&'
url = url[:-1] # get rid of last & or ?
r = self._getresponse_client.get(url)
return r
@staticmethod
def prepare_content(html: str, plain: str):
"""
Prepare content for post_post_newsletters
:param html: Html content of email
:param plain: Plain content of email
:return: dict
"""
return {'html': html, 'plain': plain}
@staticmethod
def prepare_attachment(file_name: str, content: str, mime_type: str):
"""
Newsletter attachments, sum of attachments size cannot excess 400kb
:param file_name: File name
:param content: Base64 encoded file content
:param mime_type: File mime type
:return: dict
"""
return {'fileName': file_name, 'content': content, 'mimeType': mime_type}
@staticmethod
def prepare_send_settings(selected_campaigns: list = None, selected_segments: list = None,
selected_suppressions: list = None, excluded_campaigns: list = None,
excluded_segments: list = None, selected_contacts: list = None,
time_travel: str = 'false',
perfect_timing: str = 'false'):
"""
Prepare send settings to be used in post_newsletters
:param selected_campaigns: List of selected campaigns ids
:param selected_segments: List of selected segments ids
:param selected_suppressions: List of selected suppressions ids
:param excluded_campaigns: List of excluded campaigns ids
:param excluded_segments: List of excluded segments ids
:param selected_contacts: List of selected contacts ids
:param time_travel: Use time travel functionality - message will be sent according to each recipient time zone.
Possible values: true, false
:param perfect_timing: Use perfect timing functionality.
Possible values: true, false
:return: dict
"""
selected_campaigns = selected_campaigns or []
selected_segments = selected_segments or []
selected_suppressions = selected_suppressions or []
excluded_campaigns = excluded_campaigns or []
excluded_segments = excluded_segments or []
selected_contacts = selected_contacts or []
return {'selectedCampaigns': selected_campaigns, 'selectedSegments': selected_segments,
'selectedSuppressions': selected_suppressions, 'excludedCampaigns': excluded_campaigns,
'excludedSegments': excluded_segments, 'selectedContacts': selected_contacts, 'timeTravel': time_travel,
'perfectTiming': perfect_timing}
def post_newsletters(self, name: str, subject: str, from_field_id: dict, campaign_id: dict, content: dict,
send_settings: dict, newsletter_type: str = 'broadcast', editor: str = 'custom',
reply_to: str = None, flags: list = None, attachments: list = None):
"""
Creates and queues sending a new newsletter.
This method has limit of 256 calls per day.
http://apidocs.getresponse.com/v3/resources/newsletters#newsletters.create
:param name: Name of the newsletter
:param subject: Subject of the newsletter
:param from_field_id: Email from field Id
:param campaign_id: Id of campaign to which newsletter belongs to
:param content: result of _prepare_content method
:param send_settings: result of _prepare_send_settings
:param newsletter_type: Type of a newsletter. If set to 'draft' then standard draft will be created.
:param editor: Describes how content was created. Possible values: 'getresponse' if content was created only in
Getresponse Web editor, 'plain' if content is only text and 'custom' if any changes to the html was made outside
Getresponse web editor
:param reply_to: Email from field Id to where reply to should go
:param flags: :type list Flags that message can contain. Possible values: openrate and clicktrack
:param attachments: :type list of result of _prepare_attachment
:return: JSON response
"""
url = '/newsletters'
flags = flags or ['openrate', 'clicktrack']
data = {'name': name, 'type': newsletter_type, 'subject': subject, 'flags': flags, 'editor': editor,
'campaign': campaign_id, 'content': content, 'fromField': from_field_id, 'replyTo': reply_to,
'attachments': attachments, 'sendSettings': send_settings}
r = self._getresponse_client.post(url, data=json.dumps(data))
return r
class Contacts:
"""
Class represents contacts section of API
http://apidocs.getresponse.com/v3/resources/contacts
"""
def __init__(self, api_endpoint: str, api_key: str, x_domain: str = None, x_time_zone: str = None):
self._getresponse_client = GetresponseClient(api_endpoint=api_endpoint, api_key=api_key, x_domain=x_domain,
x_time_zone=x_time_zone)
def get_contacts(self, query: list = None, sort: list = None, **kwargs):
"""
Allows to retrieve all contacts from given campaigns. Standard sorting and filtering apply.
http://apidocs.getresponse.com/v3/resources/campaigns#campaigns.contacts.get
:param campaign_id: Id of given campaign
:param query: Used to search only resources that meets criteria. Can be:
- email
- name
- createdOn][from]
- createdOn][to]
- changedOn][from]
- changedOn][to]
- campaignId
- origin
Should be passed like this: query = ['email=searched query', ..]
Examples:
query = ['email=@gmail.com','createdOn][from]=2017-03-10']
query = ['createdOn][from]=2017-03-10']
:param sort: Enable sorting using specified field (set as a key) and order (set as a value).
multiple fields to sort by can be used. Can be:
- email: asc or desc
- name: asc or desc
- createdOn: asc or desc
Should be passed like this: sort = ['email=asc', ..]
Examples:
sort = ['email=asc','createdOn=desc']
query = ['name=asc']
:param kwargs:
- fields: List of fields that should be returned. Id is always returned. Fields should be separated by comma
- page: Specify which page of results return. :type: int
- perPage: Specify how many results per page should be returned :type: int
- additionalFlags: exactMatch
Additional flags parameter with value 'exactMatch' will search contacts with exact value of email and
name provided in query string.
Without that flag matching is done via standard 'like' comparison, what could be sometimes slow.
:return: JSON response
"""
url = str('/contacts?')
if query:
for item in query:
query_data = str(item).split('=')
url = url + 'query[' + query_data[0] + ']=' + query_data[1] + '&'
if sort:
for item in sort:
sort_data = str(item).split('=')
url = url + 'sort[' + sort_data[0] + ']=' + sort_data[1] + '&'
if kwargs:
for key, value in kwargs.items():
url = url + str(key) + '=' + str(value) + '&'
url = url[:-1] # get rid of last &
r = self._getresponse_client.get(url)
return r
def post_contacts(self, email: str, campaign_id: str, **kwargs):
"""
Create new contact
http://apidocs.getresponse.com/v3/resources/contacts#contacts.create
:param campaign_id: Campaign Id which has to be unique in whole GetResponse platform
:param email: Email of new contact
:param kwargs:
-name: Name of contact
-dayOfCycle: Day of autoresponder cycle
-customFieldValues: Collection of customFieldValues that should be assign to contact
-ipAddress: IP address of a contact
:return: JSON response
"""
data = defaultdict()
data['email'] = email
data['campaign'] = defaultdict()
data['campaign']['campaignId'] = campaign_id
for key, value in kwargs.items():
data[key] = value
r = self._getresponse_client.post('/contacts', data=json.dumps(data))
return r
def update_contact_customs(self, contact_id: str, custom_fields: dict):
"""
The method allows adding and updating contacts custom field values. This method does not remove custom fields.
http://apidocs.getresponse.com/v3/resources/contacts#contacts.upsert.custom-fields
:param contact_id: Contact id
:param custom_fields: Custom fields to update
:return: JSON response
"""
r = self._getresponse_client.post('/contacts/' + contact_id + '/custom-fields', data=json.dumps(custom_fields))
return r
def get_contact(self, contact_id: str):
"""
Get contact by ID
http://apidocs.getresponse.com/v3/resources/contacts#contacts.get
:param contact_id: id of contact
:return: JSON response
"""
url = str('/contacts/' + contact_id)
r = self._getresponse_client.get(url)
return r
class SearchContacts:
"""
Class represents search contacts section of API
http://apidocs.getresponse.com/v3/resources/search-contacts
"""
def __init__(self, api_endpoint: str, api_key: str, x_domain: str = None, x_time_zone: str = None):
self._getresponse_client = GetresponseClient(api_endpoint=api_endpoint, api_key=api_key, x_domain=x_domain,
x_time_zone=x_time_zone)
def get_contacts(self, search_contact_id: str):
"""
Get contacts by search-contact id
http://apidocs.getresponse.com/v3/resources/search-contacts#search-contacts.contacts.get.all
:param search_contact_id: Id of segment
:return: JSON response
"""
url = str('/search-contacts/' + search_contact_id + '/contacts')
r = self._getresponse_client.get(url)
return r
def get_segments(self):
"""
Get list of segments
http://apidocs.getresponse.com/v3/resources/search-contacts
:return: JSON response
"""
url = str('/search-contacts/')
r = self._getresponse_client.get(url)
return r
class Imports:
"""
Class represents imports section of API
http://apidocs.getresponse.com/v3/resources/imports
"""
def __init__(self, api_endpoint: str, api_key: str, x_domain: str = None, x_time_zone: str = None):
self._getresponse_client = GetresponseClient(api_endpoint=api_endpoint, api_key=api_key, x_domain=x_domain,
x_time_zone=x_time_zone)
def get_imports(self, query: str = None):
"""
Get imports
http://apidocs.getresponse.com/v3/resources/imports#imports.get.all
:param query: Query for import
Examples:
qeury = 'query[campaignId]=0&fields=importId,status,createdOn
:return: JSON response
"""
if query:
url = str('/imports?' + query)
else:
url = str('/imports')
r = self._getresponse_client.get(url)
return r
def get_import(self, import_id: str):
"""
Get import by id
:param import_id: import id
:return:
"""
url = str('/imports/' + import_id)
r = self._getresponse_client.get(url)
return r
if __name__ == '__main__':
import doctest
doctest.testmod()
| {
"repo_name": "pavelkalin/getresponse-python",
"path": "getresponse/getresponsev3.py",
"copies": "1",
"size": "53453",
"license": "mit",
"hash": -7133962155115208000,
"line_mean": 46.7258928571,
"line_max": 204,
"alpha_frac": 0.5584158045,
"autogenerated": false,
"ratio": 4.432255389718076,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5490671194218076,
"avg_score": null,
"num_lines": null
} |
""" A library that provides a Python interface to the ScreenConnect API """
import re
from json import dumps
import requests
from screenconnect.session import Session
from screenconnect.session_group import SessionGroup
from screenconnect.error import ScreenConnectError
class ScreenConnect:
""" A python interface into the ScreenConnect API """
def __init__(self, url, auth=None):
""" Instantiate a new ScreenConnect object
Arguments:
url -- publicly accessible url for the ScreenConnect web server
auth -- (user, pwd)
"""
# Need to do some basic sanitation to remove unnecessary trailing slash
self.url = url
self.user, self.__pwd = auth
def __repr__(self):
return "{0}(url: {1}, user: {2})".format(
self.__class__.__name__, self.url, self.user
)
@property
def server_version(self):
raw_server = self.make_request("HEAD", return_json=False).headers.get("Server")
try:
return re.search("ScreenConnect/([0-9][0-9.]*[0-9])*", raw_server).group(1)
except AttributeError:
raise ScreenConnectError("Unable to determine server version")
def reset_auth_credentials(self, auth=(None, None)):
""" Resets the designated account for authorization
Argument:
auth -- supplied credentials in (user, pwd); if no credentials
are provided, they will default to none to revoke access
"""
user, pwd = auth
if self.user == user and self.__pwd == pwd:
return None
self.user, self.__pwd = auth
def make_request(self, verb, path="", data=None, params=None, return_json=True):
""" Performs request with optional payload to a specified path
The purpose of
Arguments:
verb -- HTTP verb to use when making the request
path -- relative path to append to the object's url
data -- optional payload to send with the request
"""
url = self.url + path
response = requests.request(
verb, url, auth=(self.user, self.__pwd), data=data, params=params
)
status_code = response.status_code
if status_code == 200:
return response.json() if return_json else response
elif status_code == 403:
raise ScreenConnectError("Bad or missing credentials provided")
elif status_code == 404:
raise ScreenConnectError("Invalid URL provided")
else:
raise ScreenConnectError("Unknown error")
# ------------ SESSION METHODS ------------
def create_session(self, session_type, name, is_public, code, custom_properties):
""" Creates a new ScreenConnect session
ScreenConnect API -- ~/Services/PageService.ashx/CreateSession
Arguments:
session_type -- type of ScreenConnect session
name -- identifying name visible to users
is_public -- boolean value on whether the session can be connected
to form the Guest page
code -- code that can be used to join from the Guest Page if applicable
custom_properties -- list of 8 properties that can be used to define
groups and filters for sessions
"""
# TODO: propagate missing values to Session object
path = "/Services/PageService.ashx/CreateSession"
payload = [session_type.value, name, is_public, code, custom_properties]
result = self.make_request("POST", path, data=dumps(payload))
return Session(self, result, name)
def get_guest_session_info(self, session_codes=[], session_ids=[], version=0):
""" Retrieves information about a session from the Guest perspective
ScreenConnect API -- ~/Services/PageService.ashx/GetGuestSessionInfo
Arguments:
"""
path = "/Services/PageService.ashx/GetGuestSessionInfo"
payload = [session_codes, session_ids, version]
response = self.make_request("GET", path, data=dumps(payload))
return [
Session(self, _["SessionID"], _["Name"], **_)
for _ in response.get("Sessions", [])
]
def get_host_session_info(
self,
session_type=0,
session_group_path=[],
session_filter=None,
find_session_id=None,
version=0,
):
""" Retrieves information about a session from the Host perspective
ScreenConnect API -- ~/Services/PageService.ashx/GetHostSessionInfo
Arguments:
"""
path = "/Services/PageService.ashx/GetHostSessionInfo"
payload = [
session_type,
session_group_path,
session_filter,
find_session_id,
version,
]
response = self.make_request("GET", path, data=dumps(payload))
return [
Session(self, _["SessionID"], _["Name"], **_)
for _ in response.get("Sessions", [])
]
def update_sessions(
self,
session_group_name,
session_ids,
names,
is_publics,
codes,
custom_property_values,
):
""" Updates one or more ScreenConnect sessions within the same session group;
all lists should be saved in the same respective order
ScreenConnect API -- ~/Services/PageService.ashx/UpdateSessions
Arguments:
session_group_name -- name of the session group to which sessions belong
session_ids -- list of session ids for the sessions to update
names -- list of names
is_publics -- list of boolean is_public statuses
codes -- list of join code strings
custom_property_values -- list of custom property value lists
"""
path = "/Services/PageService.ashx/UpdateSessions"
self.make_request("POST", path)
pass
def transfer_sessions(self, session_group_name, session_ids, to_host):
""" Updates the "ownership" quality of one or more ScreenConnect sessions
within the same session group
ScreenConnect API -- ~/Services/PageService.ashx/TransferSessions
Arguments:
session_group_name --
"""
path = "/Services/PageService.ashx/TransferSessions"
payload = [session_group_name, session_ids, to_host]
self.make_request("POST", path, data=dumps(payload))
# ------------ SESSION GROUP METHODS ------------
def get_session_groups(self):
""" Retrieves all session groups """
path = "/Services/SessionGroupService.ashx/GetSessionGroups"
result = self.make_request("GET", path)
return [SessionGroup(self, **x) for x in result]
def save_session_groups(self, session_groups):
""" Saves all session groups """
path = "/Services/SessionGroupService.ashx/SaveSessionGroups"
payload = list(
[_.to_dict() for _ in session_groups]
) # needs to be nested for some reason
self.make_request("POST", path, data=dumps(payload))
# ------------ MISC METHODS ------------
def get_session_report(
self,
report_type=None,
select_fields=None,
group_fields=None,
report_filter=None,
aggregate_filter=None,
item_limit=None,
transform=True,
):
""" Get a report based upon session criteria """
path = "/Report.json"
params = {
"ReportType": report_type,
"SelectFields": select_fields,
"GroupFields": group_fields,
"Filter": report_filter,
"AggregateFilter": aggregate_filter,
"ItemLimit": item_limit,
}
response = self.make_request(
"GET", path, params={k: v for k, v in params.items() if v}
)
if transform:
response = [dict(zip(response["FieldNames"], x)) for x in response["Items"]]
return response
# ------------ MISC METHODS ------------
def send_email(self, to, subject=None, body=None, is_html=False):
""" Sends an email through the ScreenConnect mail service"""
path = "/Services/MailService.ashx/SendEmail"
payload = dumps([to, subject, body, is_html])
self.make_request("POST", path, data=payload)
def get_eligible_hosts(self):
""" Retrieves list of all accounts with login in past 24 hours """
path = "/Services/PageService.ashx/GetEligibleHosts"
return self.make_request("GET", path)
def get_toolbox(self):
""" Retrieves toolbox items """
path = "/Services/PageService.ashx/GetToolbox"
return self.make_request("GET", path)
| {
"repo_name": "jacobeturpin/python-screenconnect",
"path": "screenconnect/api.py",
"copies": "1",
"size": "8800",
"license": "mit",
"hash": 2255075523229696000,
"line_mean": 32.7164750958,
"line_max": 88,
"alpha_frac": 0.5965909091,
"autogenerated": false,
"ratio": 4.4110275689223055,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5507618478022306,
"avg_score": null,
"num_lines": null
} |
'''A library to allow navigating rest apis easy.'''
from __future__ import print_function
from __future__ import unicode_literals
__version__ = '1.0'
from weakref import WeakValueDictionary
try:
from http import client as http_client
except ImportError:
import httplib as http_client
import json
try:
from urllib import parse as urlparse
except ImportError:
import urlparse
import webbrowser
import requests
import six
import uritemplate
from restnavigator import exc, utils
DEFAULT_HEADERS = {
'Accept': 'application/hal+json,application/json',
'User-Agent': 'HALNavigator/{0}'.format(__version__)
}
# Constants used with requests library
GET = 'GET'
POST = 'POST'
DELETE = 'DELETE'
PATCH = 'PATCH'
PUT = 'PUT'
class APICore(object):
'''Shared data between navigators from a single api.
This should contain all state that is generally maintained from
one navigator to the next.
'''
def __init__(self,
root,
nav_class,
apiname=None,
default_curie=None,
session=None,
id_map=None,
):
self.root = root
self.nav_class = nav_class
self.apiname = utils.namify(root) if apiname is None else apiname
self.default_curie = default_curie
self.session = session or requests.Session()
self.id_map = id_map if id_map is not None else WeakValueDictionary()
def cache(self, link, nav):
'''Stores a navigator in the identity map for the current
api. Can take a link or a bare uri'''
if link is None:
return # We don't cache navigators without a Link
elif hasattr(link, 'uri'):
self.id_map[link.uri] = nav
else:
self.id_map[link] = nav
def get_cached(self, link, default=None):
'''Retrieves a cached navigator from the id_map.
Either a Link object or a bare uri string may be passed in.'''
if hasattr(link, 'uri'):
return self.id_map.get(link.uri, default)
else:
return self.id_map.get(link, default)
def is_cached(self, link):
'''Returns whether the current navigator is cached. Intended
to be overwritten and customized by subclasses.
'''
if link is None:
return False
elif hasattr(link, 'uri'):
return link.uri in self.id_map
else:
return link in self.id_map
def authenticate(self, auth):
'''Sets the authentication for future requests to the api'''
self.session.auth = auth
class Link(object):
'''Represents a HAL link. Does not store the link relation'''
def __init__(self, uri, properties=None):
self.uri = uri
self.props = properties or {}
def relative_uri(self, root):
'''Returns the link of the current uri compared against an api root'''
return self.uri.replace(root, '/')
class PartialNavigator(object):
'''A lazy representation of a navigator. Expands to a full
navigator when template arguments are given by calling it.
'''
def __init__(self, link, core=None):
self.link = link
self._core = core
def __repr__(self): # pragma: nocover
relative_uri = self.link.relative_uri(self._core.root)
objectified_uri = utils.objectify_uri(relative_uri)
return "{cls}({name}{path})".format(
cls=type(self).__name__,
name=self._core.apiname,
path=objectified_uri
)
@property
def variables(self):
'''Returns a set of the template variables in this templated
link'''
return uritemplate.variables(self.link.uri)
def expand_uri(self, **kwargs):
'''Returns the template uri expanded with the current arguments'''
kwargs = dict([(k, v if v != 0 else '0') for k, v in kwargs.items()])
return uritemplate.expand(self.link.uri, kwargs)
def expand_link(self, **kwargs):
'''Expands with the given arguments and returns a new
untemplated Link object
'''
props = self.link.props.copy()
del props['templated']
return Link(
uri=self.expand_uri(**kwargs),
properties=props,
)
@property
def template_uri(self):
return self.link.uri
def __call__(self, **kwargs):
'''Expands the current PartialNavigator into a new
navigator. Keyword traversal are supplied to the uri template.
'''
return HALNavigator(
core=self._core,
link=self.expand_link(**kwargs),
)
class Navigator(object):
'''A factory for other navigators. Makes creating them more
convenient
'''
@staticmethod
def hal(root,
apiname=None,
default_curie=None,
auth=None,
headers=None,
session=None,
):
'''Create a HALNavigator'''
root = utils.fix_scheme(root)
halnav = HALNavigator(
link=Link(uri=root),
core=APICore(
root=root,
nav_class=HALNavigator,
apiname=apiname,
default_curie=default_curie,
session=session,
)
)
if auth:
halnav.authenticate(auth)
halnav.headers.update(DEFAULT_HEADERS)
if headers is not None:
halnav.headers.update(headers)
return halnav
class HALNavigatorBase(object):
'''Base class for navigation objects'''
DEFAULT_CONTENT_TYPE = 'application/hal+json'
def __new__(cls, link, core, *args, **kwargs):
'''New decides whether we need a new instance or whether it's
already in the id_map of the core'''
if core.is_cached(link):
return core.get_cached(link.uri)
else:
return super(HALNavigatorBase, cls).__new__(cls)
def __init__(self, link, core,
response=None,
state=None,
curies=None,
_links=None,
_embedded=None,
):
'''Internal constructor. If you want to create a new
HALNavigator, use the factory `Navigator.hal`
'''
if core.is_cached(link):
# Don't want to overwrite a cached navigator
return
else:
self.self = link
self.response = response
self.state = state
self.fetched = response is not None
self.curies = curies
self._core = core
self._links = _links or utils.CurieDict(core.default_curie, {})
self._embedded = _embedded or utils.CurieDict(
core.default_curie, {})
core.cache(link, self)
@property
def uri(self):
if self.self is not None:
return self.self.uri
@property
def apiname(self):
return self._core.apiname
@property
def title(self):
if self.self is not None:
return self.self.props.get('title')
@property
def profile(self):
if self.self is not None:
return self.self.props.get('profile')
@property
def type(self):
if self.self is not None:
return self.self.props.get('type')
@property
def headers(self):
return self._core.session.headers
@property
def resolved(self):
return self.fetched or self.state is not None
def __repr__(self): # pragma: nocover
relative_uri = self.self.relative_uri(self._core.root)
objectified_uri = utils.objectify_uri(relative_uri)
return "{cls}({name}{path})".format(
cls=type(self).__name__, name=self.apiname, path=objectified_uri)
def authenticate(self, auth):
'''Authenticate with the api'''
self._core.authenticate(auth)
def links(self):
'''Returns a dictionary of navigators from the current
resource. Fetches the resource if necessary.
'''
if not self.resolved:
self.fetch()
return self._links
def embedded(self):
'''Returns a dictionary of navigators representing embedded
documents in the current resource. If the navigators have self
links they can be fetched as well.
'''
if not self.resolved:
self.fetch()
return self._embedded
@property
def status(self):
if self.response is not None:
return self.response.status_code, self.response.reason
def __eq__(self, other):
'''Equality'''
try:
return self.uri == other.uri and self.apiname == other.apiname
except Exception:
return False
def __ne__(self, other):
'''Inequality'''
return not self == other
def __iter__(self):
'''Part of iteration protocol'''
yield self
last = self
while True:
current = last.next()
current() # fetch if necessary
yield current
last = current
def __nonzero__(self):
'''Whether this navigator was successful.'''
if not self.resolved:
raise exc.NoResponseError(
'this navigator has not been fetched '
'yet, so we cannot determine if it succeeded')
return bool(self.response)
def __contains__(self, value):
if not self.resolved:
raise exc.NoResponseError(
'this navigator has not been fetched '
'yet, so we cannot determine if it contains a link '
'relation')
return value in self._links or value in self._embedded
def next(self):
try:
return self['next']
except exc.OffTheRailsException as otre:
if isinstance(otre.exception, KeyError):
raise StopIteration()
else:
raise
def __getitem__(self, getitem_args):
r'''Rel selector and traversor for navigators'''
traversal = utils.normalize_getitem_args(getitem_args)
intermediates = [self]
val = self
for i, arg in enumerate(traversal):
try:
if isinstance(arg, six.string_types):
val() # fetch the resource if necessary
if val._embedded and arg in val._embedded:
val = val._embedded[arg]
else:
# We're hoping it's in links, otherwise we're
# off the tracks
val = val.links()[arg]
elif isinstance(arg, tuple):
val = val.get_by(*arg, raise_exc=True)
elif isinstance(arg, int) and isinstance(val, list):
val = val[arg]
else:
raise TypeError("{0!r} doesn't accept a traversor of {1!r}"
.format(val, arg))
except Exception as e:
raise exc.OffTheRailsException(
traversal, i, intermediates, e)
intermediates.append(val)
return val
def docsfor(self, rel): # pragma: nocover
'''Obtains the documentation for a link relation. Opens in a webbrowser
window'''
prefix, _rel = rel.split(':')
if prefix in self.curies:
doc_url = uritemplate.expand(self.curies[prefix], {'rel': _rel})
else:
doc_url = rel
print('opening', doc_url)
webbrowser.open(doc_url)
def _make_links_from(self, body):
'''Creates linked navigators from a HAL response body'''
ld = utils.CurieDict(self._core.default_curie, {})
for rel, link in body.get('_links', {}).items():
if rel != 'curies':
if isinstance(link, list):
ld[rel] = utils.LinkList(
(self._navigator_or_thunk(lnk), lnk) for lnk in link)
else:
ld[rel] = self._navigator_or_thunk(link)
return ld
def _make_embedded_from(self, doc):
'''Creates embedded navigators from a HAL response doc'''
ld = utils.CurieDict(self._core.default_curie, {})
for rel, doc in doc.get('_embedded', {}).items():
if isinstance(doc, list):
ld[rel] = [self._recursively_embed(d) for d in doc]
else:
ld[rel] = self._recursively_embed(doc)
return ld
def _recursively_embed(self, doc, update_state=True):
'''Crafts a navigator from a hal-json embedded document'''
self_link = None
self_uri = utils.getpath(doc, '_links.self.href')
if self_uri is not None:
uri = urlparse.urljoin(self.uri, self_uri)
self_link = Link(
uri=uri,
properties=utils.getpath(doc, '_links.self')
)
curies = utils.getpath(doc, '_links.curies')
state = utils.getstate(doc)
if self_link is None:
nav = OrphanHALNavigator(
link=None,
response=None,
parent=self,
core=self._core,
curies=curies,
state=state,
)
else:
nav = HALNavigator(
link=self_link,
response=None,
core=self._core,
curies=curies,
state=state,
)
if update_state:
nav.state = state
links = self._make_links_from(doc)
if links is not None:
nav._links = links
embedded = self._make_embedded_from(doc)
if embedded is not None:
nav._embedded = embedded
return nav
def _navigator_or_thunk(self, link):
'''Crafts a navigator or from a hal-json link dict.
If the link is relative, the returned navigator will have a
uri that relative to this navigator's uri.
If the link passed in is templated, a PartialNavigator will be
returned instead.
'''
# resolve relative uris against the current uri
uri = urlparse.urljoin(self.uri, link['href'])
link_obj = Link(uri=uri, properties=link)
if link.get('templated'):
# Can expand into a real HALNavigator
return PartialNavigator(link_obj, core=self._core)
else:
return HALNavigator(link_obj, core=self._core)
def _can_parse(self, content_type):
'''Whether this navigator can parse the given content-type.
Checks that the content_type matches one of the types specified
in the 'Accept' header of the request, if supplied.
If not supplied, matches against the default'''
content_type, content_subtype, content_param = utils.parse_media_type(content_type)
for accepted in self.headers.get('Accept', self.DEFAULT_CONTENT_TYPE).split(','):
type, subtype, param = utils.parse_media_type(accepted)
# if either accepted_type or content_type do not
# contain a parameter section, then it will be
# optimistically ignored
matched = (type == content_type) \
and (subtype == content_subtype) \
and (param == content_param or not (param and content_param))
if matched:
return True
return False
def _parse_content(self, text):
'''Parses the content of a response doc into the correct
format for .state.
'''
try:
return json.loads(text)
except ValueError:
raise exc.UnexpectedlyNotJSON(
"The resource at {.uri} wasn't valid JSON", self)
def _update_self_link(self, link, headers):
'''Update the self link of this navigator'''
self.self.props.update(link)
# Set the self.type to the content_type of the returned document
self.self.props['type'] = headers.get(
'Content-Type', self.DEFAULT_CONTENT_TYPE)
self.self.props
def _ingest_response(self, response):
'''Takes a response object and ingests state, links, embedded
documents and updates the self link of this navigator to
correspond. This will only work if the response is valid
JSON
'''
self.response = response
if self._can_parse(response.headers['Content-Type']):
hal_json = self._parse_content(response.text)
else:
raise exc.HALNavigatorError(
message="Unexpected content type! Wanted {0}, got {1}"
.format(self.headers.get('Accept', self.DEFAULT_CONTENT_TYPE),
self.response.headers['content-type']),
nav=self,
status=self.response.status_code,
response=self.response,
)
self._links = self._make_links_from(hal_json)
self._embedded = self._make_embedded_from(hal_json)
# Set properties from new document's self link
self._update_self_link(
hal_json.get('_links', {}).get('self', {}),
response.headers,
)
# Set curies if available
self.curies = dict(
(curie['name'], curie['href'])
for curie in
hal_json.get('_links', {}).get('curies', []))
# Set state by removing HAL attributes
self.state = utils.getstate(hal_json)
class HALNavigator(HALNavigatorBase):
'''The main navigation entity'''
def __call__(self, raise_exc=True):
if not self.resolved:
return self.fetch(raise_exc=raise_exc)
else:
return self.state.copy()
def _create_navigator(self, response, raise_exc=True):
'''Create the appropriate navigator from an api response'''
method = response.request.method
# TODO: refactor once hooks in place
if method in (POST, PUT, PATCH, DELETE) \
and response.status_code in (
http_client.CREATED,
http_client.FOUND,
http_client.SEE_OTHER,
http_client.NO_CONTENT) \
and 'Location' in response.headers:
uri = urlparse.urljoin(self._core.root, response.headers['Location'])
nav = HALNavigator(
link=Link(uri=uri),
core=self._core
)
# We don't ingest the response because we haven't fetched
# the newly created resource yet
elif method in (POST, PUT, PATCH, DELETE):
nav = OrphanHALNavigator(
link=None,
core=self._core,
response=response,
parent=self,
)
nav._ingest_response(response)
elif method == GET:
nav = self
nav._ingest_response(response)
else: # pragma: nocover
assert False, "This shouldn't happen"
return nav
def _request(self, method, body=None, raise_exc=True, headers=None, files=None):
'''Fetches HTTP response using the passed http method. Raises
HALNavigatorError if response is in the 400-500 range.'''
headers = headers or {}
if body and 'Content-Type' not in headers:
headers.update({'Content-Type': 'application/json'})
response = self._core.session.request(
method,
self.uri,
data=body if not isinstance(body, dict) else None,
json=body if isinstance(body, dict) else None,
files=files,
headers=headers,
allow_redirects=False,
)
nav = self._create_navigator(response, raise_exc=raise_exc)
if raise_exc and not response:
raise exc.HALNavigatorError(
message=response.text,
status=response.status_code,
nav=nav, # may be self
response=response,
)
else:
return nav
def fetch(self, raise_exc=True):
'''Performs a GET request to the uri of this navigator'''
self._request(GET, raise_exc=raise_exc) # ingests response
self.fetched = True
return self.state.copy()
def create(self, body=None, raise_exc=True, headers=None, **kwargs):
'''Performs an HTTP POST to the server, to create a
subordinate resource. Returns a new HALNavigator representing
that resource.
`body` may either be a string or a dictionary representing json
`headers` are additional headers to send in the request
'''
return self._request(POST, body, raise_exc, headers, **kwargs)
def delete(self, raise_exc=True, headers=None, files=None):
'''Performs an HTTP DELETE to the server, to delete resource(s).
`headers` are additional headers to send in the request'''
return self._request(DELETE, None, raise_exc, headers, files)
def upsert(self, body, raise_exc=True, headers=False, files=None):
'''Performs an HTTP PUT to the server. This is an idempotent
call that will create the resource this navigator is pointing
to, or will update it if it already exists.
`body` may either be a string or a dictionary representing json
`headers` are additional headers to send in the request
'''
return self._request(PUT, body, raise_exc, headers, files)
def patch(self, body, raise_exc=True, headers=False, files=None):
'''Performs an HTTP PATCH to the server. This is a
non-idempotent call that may update all or a portion of the
resource this navigator is pointing to. The format of the
patch body is up to implementations.
`body` may either be a string or a dictionary representing json
`headers` are additional headers to send in the request
'''
return self._request(PATCH, body, raise_exc, headers, files)
class OrphanHALNavigator(HALNavigatorBase):
'''A Special navigator that is the result of a non-GET
This navigator cannot be fetched or created, but has a special
property called `.parent` that refers to the navigator this one
was created from. If the result is a HAL document, it will be
populated properly
'''
def __init__(self, link, core,
response=None,
state=None,
curies=None,
_links=None,
parent=None,
):
super(OrphanHALNavigator, self).__init__(
link, core, response, state, curies, _links)
self.parent = parent
def __call__(self, *args, **kwargs):
return self.state.copy()
def __repr__(self): # pragma: nocover
relative_uri = self.parent.self.relative_uri(self._core.root)
objectified_uri = utils.objectify_uri(relative_uri)
return "{cls}({name}{path})".format(
cls=type(self).__name__, name=self.apiname, path=objectified_uri)
def _can_parse(self, content_type):
'''If something doesn't parse, we just return an empty doc'''
return True
def _parse_content(self, text):
'''Try to parse as HAL, but on failure use an empty dict'''
try:
return super(OrphanHALNavigator, self)._parse_content(text)
except exc.UnexpectedlyNotJSON:
return {}
def _update_self_link(self, link, headers):
'''OrphanHALNavigator has no link object'''
pass
def _navigator_or_thunk(self, link):
'''We need to resolve relative links against the parent uri'''
return HALNavigatorBase._navigator_or_thunk(self.parent, link)
| {
"repo_name": "deontologician/restnavigator",
"path": "restnavigator/halnav.py",
"copies": "1",
"size": "23952",
"license": "mit",
"hash": 3088228122453363700,
"line_mean": 33.4633093525,
"line_max": 91,
"alpha_frac": 0.5708917836,
"autogenerated": false,
"ratio": 4.274089935760172,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5344981719360171,
"avg_score": null,
"num_lines": null
} |
'''A library to allow navigating rest apis easy.'''
from __future__ import print_function
__version__ = '0.2'
import copy
from weakref import WeakValueDictionary
import functools
import httplib
import re
import json
import urlparse
import webbrowser
import urllib
import requests
import unidecode
import uritemplate
from restnavigator import exc, utils
def autofetch(fn):
'''A decorator used by Navigators that fetches the resource if necessary
prior to calling the function '''
@functools.wraps(fn)
def wrapped(self, *args, **qargs):
if self.response is None:
self._GET()
return fn(self, *args, **qargs)
return wrapped
def default_headers():
'''Default headers for HALNavigator'''
return {'Accept': 'application/hal+json,application/json',
'User-Agent': 'HALNavigator/{}'.format(__version__)}
class HALNavigator(object):
'''The main navigation entity'''
def __init__(
self, root, apiname=None, auth=None, headers=None, session=None):
self.root = utils.fix_scheme(root)
self.apiname = utils.namify(root) if apiname is None else apiname
self.uri = self.root
self.profile = None
self.title = None
self.type = 'application/hal+json'
self.curies = None
self.session = session or requests.Session()
self.session.auth = auth
self.session.headers.update(default_headers())
if headers:
self.session.headers.update(headers)
self.response = None
self.state = None
self.template_uri = None
self.template_args = None
self.parameters = None
self.templated = False
self._links = None
# This is the identity map shared by all descendents of this
# HALNavigator
self._id_map = WeakValueDictionary({self.root: self})
def __repr__(self):
def path_clean(chunk):
if not chunk:
return chunk
if re.match(r'\d+$', chunk):
return '[{}]'.format(chunk)
else:
return '.' + chunk
byte_arr = self.relative_uri.encode('utf-8')
unquoted = urllib.unquote(byte_arr).decode('utf-8')
nice_uri = unidecode.unidecode(unquoted)
path = ''.join(path_clean(c) for c in nice_uri.split('/'))
return "HALNavigator({name}{path})".format(
name=self.apiname, path=path)
def authenticate(self, auth):
'''Allows setting authentication for future requests to the api'''
self.session.auth = auth
@property
def relative_uri(self):
'''Returns the link of the current uri compared against the api root.
This is a good candidate for overriding in a subclass if the api you
are interacting with uses an unconventional uri layout.'''
if self.uri is None:
return self.template_uri.replace(self.root, '/')
else:
return self.uri.replace(self.root, '/')
@property
@autofetch
def links(self):
r'''Returns dictionary of navigators from the current resource.'''
return dict(self._links)
@property
def status(self):
if self.response is not None:
return (self.response.status_code, self.response.reason)
def _GET(self, raise_exc=True):
r'''Handles GET requests for a resource'''
if self.templated:
raise exc.AmbiguousNavigationError(
'This is a templated Navigator. You must provide values for '
'the template parameters before fetching the resource or else '
'explicitly null them out with the syntax: N[:]')
self.response = self.session.get(self.uri)
try:
body = self.response.json()
except ValueError as e:
if raise_exc:
raise UnexpectedlyNotJSON(
"The resource at {.uri} wasn't valid JSON", self.response)
else:
return
def make_nav(link):
'''Crafts the Navigators for each link'''
if isinstance(link, list):
return utils.LinkList((make_nav(l), l) for l in link)
templated = link.get('templated', False)
if not templated:
uri = urlparse.urljoin(self.uri, link['href'])
template_uri = None
else:
uri = None
template_uri = urlparse.urljoin(self.uri, link['href'])
cp = self._copy(uri=uri,
template_uri=template_uri,
templated=templated,
title=link.get('title'),
type=link.get('type'),
profile=link.get('profile'),
)
if templated:
cp.uri = None
cp.parameters = uritemplate.variables(cp.template_uri)
else:
cp.template_uri = None
return cp
self._links = {}
for rel, links in body.get('_links', {}).iteritems():
if rel not in ('self', 'curies'):
self._links[rel] = make_nav(links)
self.title = body.get('_links', {}).get('self', {}).get(
'title', self.title)
if 'curies' in body.get('_links', {}):
curies = body['_links']['curies']
self.curies = {curie['name']: curie['href'] for curie in curies}
self.state = {k: v for k, v in self.response.json().iteritems()
if k not in ('_links', '_embedded')}
self.state.pop('_links', None)
self.state.pop('_embedded', None)
if raise_exc and not self.response:
raise HALNavigatorError(self.response.text,
status=self.status,
nav=self,
response=self.response,
)
def _copy(self, **kwargs):
'''Creates a shallow copy of the HALNavigator that extra attributes can
be set on.
If the object is already in the identity map, that object is returned
instead.
If the object is templated, it doesn't go into the id_map
'''
if 'uri' in kwargs and kwargs['uri'] in self._id_map:
return self._id_map[kwargs['uri']]
cp = copy.copy(self)
cp._links = None
cp.response = None
cp.state = None
cp.fetched = False
for attr, val in kwargs.iteritems():
if val is not None:
setattr(cp, attr, val)
if not cp.templated:
self._id_map[cp.uri] = cp
return cp
def __eq__(self, other):
try:
return self.uri == other.uri and self.apiname == other.apiname
except Exception:
return False
def __ne__(self, other):
return not self == other
def __call__(self, raise_exc=True):
if self.response is None:
self._GET(raise_exc=raise_exc)
return self.state.copy()
def fetch(self, raise_exc=True):
'''Like __call__, but doesn't cache, always makes the request'''
self._GET(raise_exc=raise_exc)
return self.state.copy()
def create(self,
body,
raise_exc=True,
content_type='application/json',
json_cls=None,
headers=None,
):
'''Performs an HTTP POST to the server, to create a subordinate
resource. Returns a new HALNavigator representing that resource.
`body` may either be a string or a dictionary which will be serialized
as json
`content_type` may be modified if necessary
`json_cls` is a JSONEncoder to use rather than the standard
`headers` are additional headers to send in the request'''
if isinstance(body, dict):
body = json.dumps(body, cls=json_cls, separators=(',', ':'))
headers = {} if headers is None else headers
headers['Content-Type'] = content_type
response = self.session.post(
self.uri, data=body, headers=headers, allow_redirects=False)
if raise_exc and not response:
raise HALNavigatorError(
message=response.text,
status=response.status_code,
nav=self,
response=response,
)
if response.status_code in (httplib.CREATED,
httplib.ACCEPTED,
httplib.FOUND,
httplib.SEE_OTHER,
) and 'Location' in response.headers:
return self._copy(uri=response.headers['Location'])
else:
return (response.status_code, response)
def __iter__(self):
'''Part of iteration protocol'''
last = self
while True:
current = last.next()
yield current
last = current
def __nonzero__(self):
# we override normal exception throwing since the user seems interested
# in the boolean value
if self.response is None:
self._GET(raise_exc=False)
return bool(self.response)
def next(self):
try:
return self['next']
except KeyError:
raise StopIteration()
def expand(self, _keep_templated=False, **kwargs):
'''Expand template args in a templated Navigator.
if :_keep_templated: is True, the resulting Navigator can be further
expanded. A Navigator created this way is not part of the id map.
'''
if not self.templated:
raise TypeError(
"This Navigator isn't templated! You can't expand it.")
for k, v in kwargs.iteritems():
if v == 0:
kwargs[k] = '0' # uritemplate expands 0's to empty string
if self.template_args is not None:
kwargs.update(self.template_args)
cp = self._copy(uri=uritemplate.expand(self.template_uri, kwargs),
templated=_keep_templated,
)
if not _keep_templated:
cp.template_uri = None
cp.template_args = None
else:
cp.template_args = kwargs
return cp
def __getitem__(self, getitem_args):
r'''Subselector for a HALNavigator'''
@autofetch
def dereference(n, rels):
'''Helper to recursively dereference'''
if len(rels) == 1:
ret = n._links[rels[0]]
if isinstance(ret, list):
if len(ret) == 1:
return ret[0]
else:
return [r._copy() if r.templated else r for r in ret]
else:
return ret._copy() if ret.templated else ret
else:
return dereference(n[rels[0]], rels[1:])
rels, qargs, slug, ellipsis = utils.normalize_getitem_args(
getitem_args)
if slug and ellipsis:
raise SyntaxError("':' and '...' syntax cannot be combined!")
if rels:
n = dereference(self, rels)
else:
n = self
if qargs or slug:
n = n.expand(_keep_templated=ellipsis, **qargs)
return n
@autofetch
def docsfor(self, rel):
'''Obtains the documentation for a link relation. Opens in a webbrowser
window'''
prefix, _rel = rel.split(':')
if prefix in self.curies:
doc_url = uritemplate.expand(self.curies[prefix], {'rel': _rel})
else:
doc_url = rel
print('opening', doc_url)
webbrowser.open(doc_url)
class HALNavigatorError(Exception):
'''Raised when a response is an error
Has all of the attributes of a normal HALNavigator. The error body can be
returned by examining response.body '''
def __init__(self, message, nav=None, status=None, response=None):
self.nav = nav
self.response = response
self.message = message
self.status = status
super(HALNavigatorError, self).__init__(message)
class UnexpectedlyNotJSON(TypeError):
'''Raised when a non-json parseable resource is gotten'''
def __init__(self, msg, response):
self.msg = msg
self.response = response
def __repr__(self): # pragma: nocover
return '{.msg}:\n\n\n{.response}'.format(self)
| {
"repo_name": "EricSchles/rest_navigator",
"path": "restnavigator/halnav.py",
"copies": "1",
"size": "12660",
"license": "mit",
"hash": -1084705765944690700,
"line_mean": 33.9723756906,
"line_max": 79,
"alpha_frac": 0.5488151659,
"autogenerated": false,
"ratio": 4.349020954998283,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.000039463299131807416,
"num_lines": 362
} |
"""A library to control a RobertSonics WavTrigger through a serial port
"""
from __future__ import absolute_import, division, print_function
from os import errno
import serial
import struct
__version__ = '0.1.2'
__author__ = 'Eberhard Fahle'
__license__ = 'MIT'
__copyright__ = 'Copyright 2015 Eberhard Fahle'
#Constants for the commands a wavtrigger understands
# Reading data back from a WavTrigger
# Firmware version
_WT_GET_VERSION = bytearray([0xF0,0xAA,0x05,0x01,0x55])
# Number of polyphonic voices and number of tracks on sd-card
_WT_GET_SYS_INFO = bytearray([0xF0,0xAA,0x05,0x02,0x55])
# List of currently playing tracks
_WT_GET_STATUS = bytearray([0xF0,0xAA,0x05,0x07,0x55])
# Timeout when waiting for the data from the Get-Status command
_WT_GET_STATUS_TIMEOUT = 0.25
# Playing individual tracks
_WT_TRACK_SOLO = bytearray([0xF0,0xAA,0x08,0x03,0x00,0x00,0x00,0x55])
_WT_TRACK_PLAY = bytearray([0xF0,0xAA,0x08,0x03,0x01,0x00,0x00,0x55])
_WT_TRACK_PAUSE = bytearray([0xF0,0xAA,0x08,0x03,0x02,0x00,0x00,0x55])
_WT_TRACK_RESUME = bytearray([0xF0,0xAA,0x08,0x03,0x03,0x00,0x00,0x55])
_WT_TRACK_STOP = bytearray([0xF0,0xAA,0x08,0x03,0x04,0x00,0x00,0x55])
_WT_TRACK_LOOP_ON = bytearray([0xF0,0xAA,0x08,0x03,0x05,0x00,0x00,0x55])
_WT_TRACK_LOOP_OFF = bytearray([0xF0,0xAA,0x08,0x03,0x06,0x00,0x00,0x55])
_WT_TRACK_LOAD = bytearray([0xF0,0xAA,0x08,0x03,0x07,0x00,0x00,0x55])
# Stopping and resuming several tracks at once
_WT_STOP_ALL = bytearray([0xF0,0xAA,0x05,0x04,0x55])
_WT_RESUME_ALL = bytearray([0xF0,0xAA,0x05,0x0B,0x55])
# Mixer settings and fader
_WT_VOLUME = bytearray([0xF0,0xAA,0x07,0x05,0x00,0x00,0x55])
_WT_TRACK_VOLUME = bytearray([0xF0,0xAA,0x09,0x08,0x00,0x00,0x00,0x00,0x55])
_WT_FADE = bytearray([0xF0,0xAA,0x0C,0x0A,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x55])
# Pitch bending
_WT_SAMPLERATE = bytearray([0xF0,0xAA,0x07,0x0C,0x00,0x00,0x55])
# Switching the Power amp on or off (not implemented!)
_WT_AMP_POWER = bytearray([0xF0,0xAA,0x06,0x09,0x00,0x55])
class WavTrigger(object):
"""A controller for a RobertSonics WavTrigger
"""
def __init__(self,device, baud=57600, timeout=5.0):
"""Open a serial port to the device and read the
hardware version and info from the WavTrigger.
:param device: The serial port where the WavTrigger is listening.
:type device: str
:param baud: The baudrate to be used on the port. The value must match
the baudrate set in the init file of the WavTrigger. The default
value (57600) seems to be fast enough for all purposes
:type baud: int
:param timeout: A timeout for reading and writing on the port.
The default (5.0 seconds) is plenty. If this limit is reached
you can be quite sure to have lost the connection.
:type timeout: float
"""
self._wt=serial.Serial(port=device, baudrate=baud)
self._wt.timeout=timeout
if self._wt.isOpen():
self._version=self._getVersion()
self._voices,self._tracks=self._getSysInfo()
def close(self):
"""Closes the port to the WavTrigger. Does not stop playing tracks.
"""
self._wt.close()
def isOpen(self):
"""Test if a serial connection to the WavTrigger is established.
:returns: bool -- True if the device is open, False otherwise
"""
return self._wt.isOpen()
@property
def version(self):
"""Get the version string of the WavTrigger firmeware
:returns: str -- A string with the firmware version that runs on the WavTrigger
"""
return self._version
@property
def voices(self):
"""Get the number of polyphonic voices the WavTrigger can play simultanously.
:returns: int -- The number of voices that can be played simultanously
"""
return self._voices
@property
def tracks(self):
"""Get the number of tracks stored on SD-Card of the WavTrigger.
:returns: int -- The total number of tracks the WavTrigger found on the SD-Card.
"""
return self._tracks
def play(self,track):
"""Play a track
:param track: Number of the track.
The range of valid tracknumbers is 1..999
:type track: int
"""
if self._isValidTrackNumber(track):
t=self._setTrackForCommand(_WT_TRACK_PLAY,track)
self._wt.write(t)
def solo(self,track):
"""Play a track solo. Stops all currently playing tracks
and starts the solo track.
:param track: Number of the track.
The range of valid tracknumbers is 1..999
:type track: int
"""
if self._isValidTrackNumber(track):
t=self._setTrackForCommand(_WT_TRACK_SOLO,track)
self._wt.write(t)
def stop(self,track):
"""Stop a playing track.
:param track: Number of the track.
The range of valid tracknumbers is 1..999
:type track: int
"""
if self._isValidTrackNumber(track):
t=self._setTrackForCommand(_WT_TRACK_STOP,track)
self._wt.write(t)
def pause(self,track):
"""Pause a track. Stops a playing track until
'resume' is called for the track.
:param track: Number of the track.
The range of valid tracknumbers is 1..999
:type track: int
"""
if self._isValidTrackNumber(track):
t=self._setTrackForCommand(_WT_TRACK_PAUSE,track)
self._wt.write(t)
def resume(self,track):
"""Resume playing a track that has been paused previously.
:param track: Number of the track.
The range of valid tracknumbers is 1..999
:type track: int
"""
if self._isValidTrackNumber(track):
t=self._setTrackForCommand(_WT_TRACK_RESUME,track)
self._wt.write(t)
def load(self,track):
"""Load a track into the memory of the WavTrigger and pause it.
The track can than be played using the :meth:`resume` or :meth:`resumeAll` commands
:param track: Number of the track.
The range of valid tracknumbers is 1..999
:type track: int
"""
if self._isValidTrackNumber(track):
t=self._setTrackForCommand(_WT_TRACK_LOAD,track)
self._wt.write(t)
def loop(self,track):
"""Set loop flag for a track. When the track is started it is played
in a loop until it is stopped. But stopping it does not clear the loop flag.
If the track is started again, it will still loop. Use :meth:`unLoop` to clear
the loop flag
:param track: Number of the track.
The range of valid tracknumbers is 1..999
:type track: int
"""
if self._isValidTrackNumber(track):
self._wt.write(self._setTrackForCommand(_WT_TRACK_LOOP_ON,track))
def unLoop(self,track):
"""Clear the loop flag for a track. see :meth:`loop`
:param track: Number of the track.
The range of valid tracknumbers is 1..999
:type track: int
"""
if self._isValidTrackNumber(track):
self._wt.write(self._setTrackForCommand(_WT_TRACK_LOOP_OFF,track))
def stopAll(self):
"""Stop all playing tracks.
"""
self._wt.write(_WT_STOP_ALL)
def resumeAll(self):
"""Restart all resumed tracks.
"""
self._wt.write(_WT_RESUME_ALL)
def masterGain(self,gain):
"""
Sets the gain for the WavTrigger output.
:param gain: Gain for the WavTrigger output.
The valid range for the gain argument is -70..+10
:type gain: int
"""
if gain<-70 or gain>10:
raise ValueError('Gain argument range is from -70 to +10')
g=_WT_VOLUME
g[4],g[5]=self._intToLsb(gain)
self._wt.write(g)
def trackGain(self, track, gain):
""" Set the gain for a specific track.
:param track: Number of the track.
The range of valid tracknumbers is 1..999
:type track: int
:param gain: Gain for the WavTrigger output.
The valid range for the gain argument is -70..+10
:type gain: int
"""
if gain<-70 or gain>10:
raise ValueError('Gain argument range is from -70 to +10')
g=_WT_TRACK_VOLUME
g[4],g[5]=self._intToLsb(track)
g[6],g[7]=self._intToLsb(gain)
self._wt.write(g)
def masterVolume(self,volume):
"""Set the volume for the WavTrigger output. This method never
amplifies the signal as the :meth:`masterGain` does when called
with gain values > 0. This prevents distorsion in the output signal.
Also most people are used to volume ranges from zero to 100 rather then
a gain value in dB.
:param volume: Volume for the WavTrigger output.
The valid range for the volume argument is 0..100
:type gain: int
"""
vol=_WT_VOLUME
vol[4],vol[5]=self._intToLsb(self._volumeToDb(volume))
self._wt.write(vol)
def trackVolume(self,track,volume):
"""Set the volume for a track. This method never
amplifies the track signal as the :meth:`trackGain` does when called
with gain values > 0. This prevents distorsion in the output signal.
Also most people are used to volume ranges from zero to 100 rather then
a gain value in dB.
:param track: Number of the track.
The range of valid tracknumbers is 1..999
:type track: int
:param volume: Volume for the track.
The valid range for the volume argument is 0..100
:type gain: int
"""
tvol=_WT_TRACK_VOLUME
tvol[4],tvol[5]=self._intToLsb(track)
tvol[6],tvol[7]=self._intToLsb(self._volumeToDb(volume))
self._wt.write(tvol)
def pitch(self,offset):
"""Set an offset for the samplerate that the WavTrigger uses.
A negative offset lowers the tone, a positive offset raises the tone
value.
:param offset: Offset to the samplerate.
The range of valid offset agrument values is -32767..+32767
:type offset: int
"""
if offset>32767 :
offset=32767
if offset < -32767:
ofset = -32767
pitch=_WT_SAMPLERATE
pitch[4],pitch[5]=self._intToLsb(offset)
self._wt.write(pitch)
def fade(self,track,volume,time):
"""Fade the track volume from the current volume level to
a lower or higer volume
:param track: Number of the track.
The range of valid tracknumbers is 1..999
:type track: int
:param volume: The target volume for the track.
The valid range for the volume argument is 0..100
:type volume: int
:param time: The time in milliseconds for the fade from the current
to the target level
:type time: int
"""
f=_WT_FADE
f[4],f[5]=self._intToLsb(track)
f[6],f[7]=self._intToLsb(self._volumeToDb(volume))
f[8],f[9]=self._intToLsb(time)
f[10]=0x00
self._wt.write(f)
def fadeOut(self,track, time):
"""Fade the track volume from the current volume level to zero,
than stop the track.
:param track: Number of the track.
The range of valid tracknumbers is 1..999
:type track: int
:param time: The time in milliseconds for the fade out from the current
to silence
:type time: int
"""
f=_WT_FADE
f[4],f[5]=self._intToLsb(track)
f[6],f[7]=self._intToLsb(self._volumeToDb(0))
f[8],f[9]=self._intToLsb(time)
f[10]=0x01
self._wt.write(f)
def playing(self):
"""
Get a list of the currently playing tracks on the WavTrigger.
:returns: list -- A list with the track numbers currently playing.
If no tracks are playing the empty list is returned.
If there is a problem reading the return value from the
WavTrigger `None` is returned.
"""
self._wt.write(_WT_GET_STATUS)
header=self._readFromPort(4)
if header[:2]!=b'\xF0\xAA' or header[3:4]!=b'\x83':
self._wt.flushInput()
return None
trackLen=ord(header[2:3])-4
t=self._readFromPort(trackLen)
if t[-1:]!=b'\x55':
return None
t=t[:-1]
tracks=[t[i:i+2] for i in range(0, len(t), 2)]
trackList=[]
for i in range(len(tracks)):
trackList.append(self._lsbToInt(tracks[i]))
return trackList
def amplifierOn(self):
"""Switch the on-board amplifier on.
"""
data=_WT_AMP_POWER
data[4]=0x01
self._wt.write(data)
def amplifierOff(self):
"""Switch the on-board amplifier off.
"""
data=_WT_AMP_POWER
data[4]=0x00
self._wt.write(data)
def _isValidTrackNumber(self,track):
"""Simple test for valid track numbers
"""
if track>0:
return True
return False
def _lsbToInt(self,lsbValue):
"""Convert track number from 2 bytes in lsb order to an int value
"""
return struct.unpack('<h',lsbValue)[0]
def _intToLsb(self,value):
"""Convert an int value to a 2 byte tuple in lsb order
"""
return (value & 0xFF, (value >> 8) & 0xFF)
def _setTrackForCommand(self,cmd,track):
"""All track commands need a track argument in the data sent
to the WavTrigger. We update the command data array in place
"""
cmd[5],cmd[6]=self._intToLsb(track)
return cmd
def _volumeToDb(self, vol):
"""Map a volume level of 0..100 to the gain level of -70..0 db
which is used by the WavTrigger
"""
if vol<0 or vol>100:
raise ValueError('Volume argument range is from 0 to 100')
return -70+int(vol/1.428)
def _getVersion(self):
"""Read version number from device
"""
if(self._wt.write(_WT_GET_VERSION) != len(_WT_GET_VERSION)):
return ''
v=self._readFromPort(25)
if(v[:4]!=b'\xF0\xAA\x19\x81' or v[-1:]!=b'\x55'):
return ''
vstr=v[4:-1].decode('utf8')
return vstr.strip()
def _getSysInfo(self):
"""Read system info from device. The current firmware reports
the number of polyphonic voice and the number of tracks found on the SD-card.
"""
if(self._wt.write(_WT_GET_SYS_INFO) != len(_WT_GET_SYS_INFO)):
return (0,0)
v=self._readFromPort(8)
if(v[:4]!=b'\xF0\xAA\x08\x82' or v[-1:]!=b'\x55'):
return (0,0)
return (ord(v[4:5]),self._lsbToInt(v[5:7]))
def _readFromPort(self, size):
"""Read data from the serial port. If the length of the data returned from
the WavTrigger does not match the requested size an OSError is raised
for the timeout condition.
"""
result=self._wt.read(size)
if len(result) != size:
raise OSError(errno.ETIMEDOUT,"Connection timed out");
return result
def __delete__(self):
"""Close the serial port if the class instance goes out of scope.
"""
self.close()
| {
"repo_name": "wayoda/rswt",
"path": "rswt.py",
"copies": "1",
"size": "15701",
"license": "mit",
"hash": -2205861493957191200,
"line_mean": 32.1244725738,
"line_max": 91,
"alpha_frac": 0.5974778677,
"autogenerated": false,
"ratio": 3.5442437923250565,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.46417216600250566,
"avg_score": null,
"num_lines": null
} |
"""A library to evaluate MDM on a single GPU.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from datetime import datetime
from pathlib import Path
import data_provider
import math
import menpo
import matplotlib
import mdm_model
import mdm_train
import numpy as np
import os.path
import tensorflow as tf
import time
import utils
import slim
import menpo.io as mio
# Do not use a gui toolkit for matlotlib.
matplotlib.use('Agg')
FLAGS = tf.app.flags.FLAGS
tf.app.flags.DEFINE_string('eval_dir', 'ckpt/eval',
"""Directory where to write event logs.""")
tf.app.flags.DEFINE_string('checkpoint_dir', 'ckpt/train/',
"""Directory where to read model checkpoints.""")
# Flags governing the frequency of the eval.
tf.app.flags.DEFINE_integer('eval_interval_secs', 60 * 5,
"""How often to run the eval.""")
tf.app.flags.DEFINE_boolean('run_once', False,
"""Whether to run eval only once.""")
# Flags governing the data used for the eval.
tf.app.flags.DEFINE_integer('num_examples', 224,
"""Number of examples to run.""")
tf.app.flags.DEFINE_string('dataset_path', 'lfpw/testset/*.png',
"""The dataset path to evaluate.""")
tf.app.flags.DEFINE_string('device', '/cpu:0', 'the device to eval on.')
def plot_ced(errors, method_names=['MDM']):
from matplotlib import pyplot as plt
from menpofit.visualize import plot_cumulative_error_distribution
import numpy as np
# plot the ced and store it at the root.
fig = plt.figure()
fig.add_subplot(111)
plot_cumulative_error_distribution(errors, legend_entries=method_names,
error_range=(0, 0.09, 0.005))
# shift the main graph to make room for the legend
ax = plt.gca()
box = ax.get_position()
ax.set_position([box.x0, box.y0, box.width * 0.9, box.height])
fig.canvas.draw()
data = np.fromstring(fig.canvas.tostring_rgb(), dtype=np.uint8, sep='')
data = data.reshape(fig.canvas.get_width_height()[::-1] + (3,))
plt.clf()
return data
def _eval_once(saver, summary_writer, rmse_op, summary_op):
"""Runs Eval once.
Args:
saver: Saver.
summary_writer: Summary writer.
rmse_op: rmse_op.
summary_op: Summary op.
"""
with tf.Session() as sess:
ckpt = tf.train.get_checkpoint_state(FLAGS.checkpoint_dir)
if ckpt and ckpt.model_checkpoint_path:
if os.path.isabs(ckpt.model_checkpoint_path):
# Restores from checkpoint with absolute path.
saver.restore(sess, ckpt.model_checkpoint_path)
else:
# Restores from checkpoint with relative path.
saver.restore(sess, os.path.join(FLAGS.checkpoint_dir,
ckpt.model_checkpoint_path))
# Assuming model_checkpoint_path looks something like:
# /my-favorite-path/imagenet_train/model.ckpt-0,
# extract global_step from it.
global_step = ckpt.model_checkpoint_path.split('/')[-1].split('-')[-1]
print('Succesfully loaded model from %s at step=%s.' %
(ckpt.model_checkpoint_path, global_step))
else:
print('No checkpoint file found')
return
# Start the queue runners.
coord = tf.train.Coordinator()
try:
threads = []
for qr in tf.get_collection(tf.GraphKeys.QUEUE_RUNNERS):
threads.extend(qr.create_threads(sess, coord=coord, daemon=True,
start=True))
num_iter = int(math.ceil(FLAGS.num_examples / FLAGS.batch_size))
# Counts the number of correct predictions.
errors = []
total_sample_count = num_iter * FLAGS.batch_size
step = 0
print('%s: starting evaluation on (%s).' % (datetime.now(), FLAGS.dataset_path))
start_time = time.time()
while step < num_iter and not coord.should_stop():
rmse = sess.run(rmse_op)
errors.append(rmse)
step += 1
if step % 20 == 0:
duration = time.time() - start_time
sec_per_batch = duration / 20.0
examples_per_sec = FLAGS.batch_size / sec_per_batch
print('%s: [%d batches out of %d] (%.1f examples/sec; %.3f'
'sec/batch)' % (datetime.now(), step, num_iter,
examples_per_sec, sec_per_batch))
start_time = time.time()
errors = np.vstack(errors).ravel()
mean_rmse = errors.mean()
auc_at_08 = (errors < .08).mean()
auc_at_05 = (errors < .05).mean()
ced_image = plot_ced([errors.tolist()])
ced_plot = sess.run(tf.merge_summary([tf.image_summary('ced_plot', ced_image[None, ...])]))
print('Errors', errors.shape)
print('%s: mean_rmse = %.4f, auc @ 0.05 = %.4f, auc @ 0.08 = %.4f [%d examples]' %
(datetime.now(), errors.mean(), auc_at_05, auc_at_08, total_sample_count))
summary = tf.Summary()
summary.ParseFromString(sess.run(summary_op))
summary.value.add(tag='AUC @ 0.08', simple_value=float(auc_at_08))
summary.value.add(tag='AUC @ 0.05', simple_value=float(auc_at_05))
summary.value.add(tag='Mean RMSE', simple_value=float(mean_rmse))
summary_writer.add_summary(ced_plot, global_step)
summary_writer.add_summary(summary, global_step)
except Exception as e: # pylint: disable=broad-except
coord.request_stop(e)
coord.request_stop()
coord.join(threads, stop_grace_period_secs=10)
def flip_predictions(predictions, shapes):
flipped_preds = []
for pred, shape in zip(predictions, shapes):
pred = menpo.shape.PointCloud(pred)
pred = utils.mirror_landmarks_68(pred, shape)
flipped_preds.append(pred.points)
return np.array(flipped_preds, np.float32)
def evaluate(dataset_path):
"""Evaluate model on Dataset for a number of steps."""
with tf.Graph().as_default(), tf.device('/cpu:0'):
train_dir = Path(FLAGS.checkpoint_dir)
reference_shape = mio.import_pickle(train_dir / 'reference_shape.pkl')
images, gt_truth, inits, _ = data_provider.batch_inputs(
[dataset_path], reference_shape,
batch_size=FLAGS.batch_size, is_training=False)
mirrored_images, _, mirrored_inits, shapes = data_provider.batch_inputs(
[dataset_path], reference_shape,
batch_size=FLAGS.batch_size, is_training=False, mirror_image=True)
print('Loading model...')
# Build a Graph that computes the logits predictions from the
# inference model.
with tf.device(FLAGS.device):
patch_shape = (FLAGS.patch_size, FLAGS.patch_size)
pred, _, _ = mdm_model.model(images, inits, patch_shape=patch_shape)
tf.get_variable_scope().reuse_variables()
pred_mirrored, _, _ = mdm_model.model(
mirrored_images, mirrored_inits, patch_shape=patch_shape)
pred_images, = tf.py_func(utils.batch_draw_landmarks,
[images, pred], [tf.float32])
gt_images, = tf.py_func(utils.batch_draw_landmarks,
[images, gt_truth], [tf.float32])
summaries = []
summaries.append(tf.image_summary('images',
tf.concat(2, [gt_images, pred_images]), max_images=5))
avg_pred = pred + tf.py_func(flip_predictions, (pred_mirrored, shapes), (tf.float32, ))[0]
avg_pred /= 2.
# Calculate predictions.
norm_error = mdm_model.normalized_rmse(avg_pred, gt_truth)
# Restore the moving average version of the learned variables for eval.
variable_averages = tf.train.ExponentialMovingAverage(
mdm_train.MOVING_AVERAGE_DECAY)
variables_to_restore = variable_averages.variables_to_restore()
saver = tf.train.Saver(variables_to_restore)
# Build the summary operation based on the TF collection of Summaries.
summary_op = tf.merge_summary(summaries)
graph_def = tf.get_default_graph().as_graph_def()
summary_writer = tf.train.SummaryWriter(FLAGS.eval_dir,
graph_def=graph_def)
while True:
_eval_once(saver, summary_writer, norm_error, summary_op)
if FLAGS.run_once:
break
time.sleep(FLAGS.eval_interval_secs)
if __name__ == '__main__':
evaluate(FLAGS.dataset_path)
| {
"repo_name": "trigeorgis/mdm",
"path": "mdm_eval.py",
"copies": "1",
"size": "8331",
"license": "bsd-3-clause",
"hash": 5910222370436099000,
"line_mean": 35.8628318584,
"line_max": 97,
"alpha_frac": 0.6272956428,
"autogenerated": false,
"ratio": 3.5196451204055768,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4646940763205577,
"avg_score": null,
"num_lines": null
} |
""" A library to export some stats that we can use for monitoring.
"""
def export(values):
o = []
for k, v in sorted(values.items()):
o.append('%s %s' % (escape(k), build_value(v)))
return '\n'.join(o)
class ExportedMap(object):
def __init__(self, label, value):
self.label = label
self.value = value
def __str__(self):
o = ['map:%s' % escape(self.label)]
for k, v in sorted(self.value.items()):
o.append('%s:%s' % (escape(k), escape(v)))
return ' '.join(o)
class ExportedList(object):
def __init__(self, value):
self.value = value
def __str__(self):
return '/'.join([escape(v) for v in self.value])
class ExportedCallable(object):
def __init__(self, callable):
self.callable = callable
def __str__(self):
return make_value(self.callable())
def build_value(value):
""" attempt to do some inference of types """
# a dict, with label
if type(value) is type(tuple()) and type(value[1]) is type(dict()):
return ExportedMap(label=value[0], value=value[1])
elif type(value) is type(tuple()) or type(value) is type(list()):
return ExportedList(value)
elif callable(value):
return ExportedCallable(value)
else:
return escape(value)
def escape(value):
return str(value).replace('\\', '\\\\').replace(':', '\\:').replace(' ', '-')
| {
"repo_name": "AloneRoad/Inforlearn",
"path": "common/monitor.py",
"copies": "1",
"size": "1324",
"license": "apache-2.0",
"hash": -8141803161859887000,
"line_mean": 26.5833333333,
"line_max": 79,
"alpha_frac": 0.6178247734,
"autogenerated": false,
"ratio": 3.3266331658291457,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9341393531825761,
"avg_score": 0.020612881480676897,
"num_lines": 48
} |
# A library to handle dealing with ATCA MoniCA points.
from requests import Session
import json
import cabb_scheduler.errors
class monicaPoint:
def __init__(self, info={}):
self.value = None
self.description = None
self.pointName = None
self.updateTime = None
self.errorState = None
if "value" in info:
self.setValue(info['value'])
if "description" in info:
self.setDescription(info['description'])
if "pointName" in info:
self.pointName = info['pointName']
if "updateTime" in info:
self.setUpdateTime(info['updateTime'])
if "errorState" in info:
self.setErrorState(info['errorState'])
def getPointName(self):
return self.pointName
def setValue(self, value=None):
if value is not None:
self.value = value
return self
def getValue(self):
return self.value
def setDescription(self, description=None):
if description is not None:
self.description = description
return self
def getDescription(self):
return self.description
def setUpdateTime(self, updateTime=None):
if updateTime is not None:
self.updateTime = updateTime
return self
def getUpdateTime(self):
return self.updateTime
def setErrorState(self, errorState=None):
if errorState is not None:
self.errorState = errorState
return self
def getErrorState(self):
return self.errorState
class monicaServer:
def __init__(self, info={}):
self.serverName = "monhost-nar"
self.protocol = "https"
self.webserverName = "www.narrabri.atnf.csiro.au"
self.webserverPath = "cgi-bin/obstools/web_monica/monicainterface_json.pl"
self.points = []
if "serverName" in info:
self.serverName = info['serverName']
if "protocol" in info:
self.protocol = info['protocol']
if "webserverName" in info:
self.webserverName = info['webserverName']
if "webserverPath" in info:
self.webserverPath = info['webserverPath']
def addPoint(self, pointName=None):
if pointName is not None:
npoint = monicaPoint({ 'pointName': pointName })
self.points.append(npoint)
return self
def addPoints(self, points=[]):
if len(points) > 0:
for i in range(0, len(points)):
self.addPoint(points[i])
return self
def getPointByName(self, pointName=None):
if pointName is not None:
for i in range(0, len(self.points)):
if (self.points[i].getPointName() == pointName):
return self.points[i]
return None
def __comms(self, data=None):
if data is None:
return None
session = Session()
url = self.protocol + "://" + self.webserverName + "/" + self.webserverPath
postResponse = session.post( url=url, data=data )
# Try to convert to JSON first, in case it fails.
try:
jResponse = json.loads(postResponse.text)
except ValueError as e:
return None
return json.loads(postResponse.text)
def updatePoints(self):
allPointNames = [ p.getPointName() for p in self.points ]
data = { 'action': "points", 'server': self.serverName,
'points': ";".join(allPointNames) }
response = self.__comms(data)
if response is not None and "pointData" in response:
for i in range(0, len(response['pointData'])):
if response['pointData'][i]['pointName'] is not None:
point = self.getPointByName(response['pointData'][i]['pointName'])
point.setValue(response['pointData'][i]['value'])
point.setUpdateTime(response['pointData'][i]['time'])
point.setErrorState(not bool(response['pointData'][i]['errorState']))
return True
return False
serverInstance = None
def initialiseServerInstance(*args):
global serverInstance
if serverInstance is None:
serverInstance = monicaServer()
return serverInstance
def getArray(*args):
server = initialiseServerInstance()
server.addPoint("site.misc.array").updatePoints()
return server.getPointByName("site.misc.array").getValue()
def getFrequencies(*args):
server = initialiseServerInstance()
server.addPoints([ "site.misc.obs.freq1", "site.misc.obs.freq2" ]).updatePoints()
freqs = [ float(server.getPointByName("site.misc.obs.freq1").getValue()), float(server.getPointByName("site.misc.obs.freq2").getValue()) ]
return freqs
| {
"repo_name": "ste616/cabb-schedule-api",
"path": "python/cabb_scheduler/monica_information.py",
"copies": "1",
"size": "4820",
"license": "mit",
"hash": 4166115675816516000,
"line_mean": 33.4285714286,
"line_max": 142,
"alpha_frac": 0.603526971,
"autogenerated": false,
"ratio": 4.130248500428449,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5233775471428449,
"avg_score": null,
"num_lines": null
} |
# A library to handle dealing with v3 of the ATCA calibrator database.
from requests import Session
from xml.dom import minidom
import json
import numpy as np
import cabb_scheduler.errors
arrayNames = { '6A': "6km", '6B': "6km", '6C': "6km", '6D': "6km",
'1.5A': "1.5km", '1.5B': "1.5km", '1.5C': "1.5km", '1.5D': "1.5km",
'750A': "750m", '750B': "750m", '750C': "750m", '750D': "750m",
'EW367': "375m", 'EW352': "375m",
'H214': "375m", 'H168': "375m", 'H75': "375m" }
class calibrator:
def __init__(self, details=None):
# This is a single calibrator from the database.
self.__calibratorDetails = {
'name': "",
'rightAscension': "",
'declination': "",
'fluxDensities': [],
'measurements': None,
'qualities': None
}
if details is not None:
if 'name' in details:
self.setName(details['name'])
if 'rightAscension' in details:
self.setRightAscension(details['rightAscension'])
if 'declination' in details:
self.setDeclination(details['declination'])
if 'fluxDensities' in details:
for i in range(0, len(details['fluxDensities'])):
self.addFluxDensity(details['fluxDensities'][i]['frequency'],
details['fluxDensities'][i]['fluxDensity'])
def setName(self, name=None):
if name is not None:
self.__calibratorDetails['name'] = name
return self
def getName(self):
return self.__calibratorDetails['name']
def setRightAscension(self, ra=None):
if ra is not None:
self.__calibratorDetails['rightAscension'] = ra
return self
def getRightAscension(self):
return self.__calibratorDetails['rightAscension']
def setDeclination(self, dec=None):
if dec is not None:
self.__calibratorDetails['declination'] = dec
return self
def getDeclination(self):
return self.__calibratorDetails['declination']
def addFluxDensity(self, frequency=None, fluxDensity=None):
if frequency is not None and fluxDensity is not None:
# Check we don't already have an element with this frequency.
foundFrequency = False
for i in range(0, len(self.__calibratorDetails['fluxDensities'])):
if (self.__calibratorDetails['fluxDensities'][i]['frequency'] == int(frequency)):
self.__calibratorDetails['fluxDensities'][i]['fluxDensity'] = float(fluxDensity)
foundFrequency = True
if foundFrequency == False:
self.__calibratorDetails['fluxDensities'].append({ 'frequency': int(frequency),
'fluxDensity': float(fluxDensity) })
return self
def getFluxDensities(self, frequency=None):
if frequency is not None:
for i in range(0, len(self.__calibratorDetails['fluxDensities'])):
if (self.__calibratorDetails['fluxDensities'][i]['frequency'] == int(frequency)):
return [ self.__calibratorDetails['fluxDensities'][i] ]
return None
return self.__calibratorDetails['fluxDensities']
def fetchDetails(self):
# Get all the measurements from the database.
if self.__calibratorDetails['measurements'] is not None:
# We already have the measurements.
return self
data = { 'action': "source_all_details", 'source': self.__calibratorDetails['name'] }
response = __communications(data, "json")
if response is not None and response['source_name'] == self.__calibratorDetails['name']:
self.__calibratorDetails['measurements'] = response['measurements']
return self
def fetchQualities(self):
# Get the quality metric for this calibrator.
if self.__calibratorDetails['qualities'] is not None:
# We already have the qualities.
return self
data = { 'action': "source_quality", 'source': self.__calibratorDetails['name'] }
response = __communications(data, "json")
if response is not None and self.__calibratorDetails['name'] in response:
self.__calibratorDetails['qualities'] = response[self.__calibratorDetails['name']]
for a in self.__calibratorDetails['qualities']:
for b in self.__calibratorDetails['qualities'][a]:
if self.__calibratorDetails['qualities'][a][b] is not None:
self.__calibratorDetails['qualities'][a][b] = int(self.__calibratorDetails['qualities'][a][b])
return self
def getQuality(self, array=None, band=None):
if array is None:
return self.__calibratorDetails['qualities']
elif band is None:
if array in self.__calibratorDetails['qualities']:
return self.__calibratorDetails['qualities'][array]
return self.__calibratorDetails['qualities']
if array in self.__calibratorDetails['qualities']:
if band in self.__calibratorDetails['qualities'][array]:
return self.__calibratorDetails['qualities'][array][band]
return self.__calibratorDetails['qualities'][array]
return self.__calibratorDetails['qualities']
def collateDetails(self):
# Take all the measurements and determine some secondary properties from them.
if self.__calibratorDetails['measurements'] is None:
# No measurements.
return self
# Go through all of the measurements.
bandNames = [ "16cm", "4cm", "15mm", "7mm", "3mm" ]
bandEvals = { "16cm": 2100, "4cm": 5500, "15mm": 17000, "7mm": 33000, "3mm": 93000 }
arraySpecs = {}
for a in arrayNames:
if arrayNames[a] not in arraySpecs:
arraySpecs[arrayNames[a]] = {}
for i in range(0, len(bandNames)):
arraySpecs[arrayNames[a]][bandNames[i]] = {
'closurePhases': [], 'defects': [],
'fluxDensities': [],
'closurePhaseMedian': None, 'defectMedian': None,
'fluxDensityMedian': None, 'fluxDensityStdDev': None,
'qualityFlag': None
}
for i in range(0, len(self.__calibratorDetails['measurements'])):
# Work out closure phase and defect as a function of band and array.
r = self.__calibratorDetails['measurements'][i]
a = r['array'].split()[0]
b = r['frequency_band']
if a in arrayNames:
arr = arrayNames[a]
for j in range(0, len(r['frequencies'])):
r2 = r['frequencies'][j]
for k in range(0, len(r2['closure_phases'])):
r3 = r2['closure_phases'][k]
arraySpecs[arr][b]['closurePhases'].append(float(r3['closure_phase_average']))
for j in range(0, len(r['fluxdensities'])):
r2 = r['fluxdensities'][j]
arraySpecs[arr][b]['defects'].append((float(r2['fluxdensity_scalar_averaged']) /
float(r2['fluxdensity_vector_averaged'])) - 1)
arraySpecs[arr][b]['fluxDensities'].append(__model2FluxDensity(r2['fluxdensity_fit_coeff'],
bandEvals[b]))
# The collation is done, we make some evaluations.
for a in arraySpecs:
for b in bandNames:
r = arraySpecs[a][b]
if len(r['closurePhases']) > 0:
r['closurePhaseMedian'] = np.median(r['closurePhases'])
if len(r['defects']) > 0:
r['defectMedian'] = np.median(r['defects'])
if len(r['fluxDensities']) > 0:
r['fluxDensityMedian'] = np.median(r['fluxDensities'])
r['fluxDensityStdDev'] = np.std(r['fluxDensities'])
if (r['closurePhaseMedian'] is not None and r['defectMedian'] is not None and
r['fluxDensityMedian'] is not None and r['fluxDensityStdDev'] is not None):
r['qualityFlag'] = 4 # The maximum value.
if r['closurePhaseMedian'] > 3:
r['qualityFlag'] -= 1
if r['closurePhaseMedian'] > 10:
r['qualityFlag'] -= 1
if r['defectMedian'] > 1.05:
r['qualityFlag'] -= 1
if r['fluxDensityStdDev'] > (r['fluxDensityMedian'] / 2):
r['qualityFlag'] -= 1
return self
class calibratorSearchResponse:
def __init__(self):
# A list of sources returned from a calibrator search.
self.__calibrators = { 'list': [], 'bestIndex': None }
def addCalibrator(self, calibratorDetails=None, distance=None):
# Create a new calibrator.
if (calibratorDetails is not None and distance is not None):
nc = calibrator(calibratorDetails)
self.__calibrators['list'].append({ 'calibrator': nc, 'distance': float(distance) })
return nc
else:
return None
def getCalibrator(self, index=None):
if index is not None and index >= 0 and index < len(self.__calibrators['list']):
return self.__calibrators['list'][index]
return None
def numCalibrators(self):
return len(self.__calibrators['list'])
def getAllCalibrators(self):
return self.__calibrators['list']
def selectBest(self, array=None):
# Choose the best calibrator from this list.
# We do this by looking for the nearest calibrator with quality 4 in the
# band that we are using.
# We need to know the array.
if array is None:
array = "6km"
elif array in arrayNames:
array = arrayNames[array]
else:
# The array name might have a version letter at the end.
tarray = array[:-1]
if tarray in arrayNames:
array = arrayNames[tarray]
# Work out the band first.
firstFrequency = self.__calibrators['list'][0]['calibrator'].getFluxDensities()[0]['frequency']
bandName = __frequency2BandName(firstFrequency)
desiredScore = 4
calFound = False
calFd = None
while (calFound == False and desiredScore > 1):
for i in range(0, len(self.__calibrators['list'])):
tcal = self.__calibrators['list'][i]['calibrator']
tFd = tcal.getFluxDensities(firstFrequency)[0]['fluxDensity']
if tcal is not None and (calFound == False or
(calFound == True and tFd >= 2 * calFd and
self.__calibrators['list'][i]['distance'] < 10)):
tcal.fetchQualities()
tqual = tcal.getQuality(array, bandName)
if (tqual == desiredScore):
if self.__calibrators['bestIndex'] is None:
self.__calibrators['bestIndex'] = i
calFd = tFd
calFound = True
elif (tFd >= (2 * calFd) and self.__calibrators['list'][i]['distance'] < 10):
# We will accept a calibrator further away, if it is much brighter.
self.__calibrators['bestIndex'] = i
calFd = tFd
desiredScore -= 1
return self
def getBestCalibrator(self, array=None):
if self.__calibrators['bestIndex'] is None:
self.selectBest(array)
if self.__calibrators['bestIndex'] is not None:
return self.getCalibrator(self.__calibrators['bestIndex'])
return None
def __frequency2BandName(frequency=None):
# Take a frequency in MHz and return the band it would be in.
if frequency is not None:
if frequency < 3100:
return "16cm"
if frequency < 12000:
return "4cm"
if frequency < 27000:
return "15mm"
if frequency < 52000:
return "7mm"
if frequency < 106000:
return "3mm"
return None
def _calibratorSearchResponse__frequency2BandName(frequency=None):
return __frequency2BandName(frequency)
def __model2FluxDensity(model=None, frequency=None):
# Convert an array of model parameters into a flux density.
# The frequency should be given in MHz.
if model is not None and frequency is not None:
logS = float(model[0])
logF = np.log10(float(frequency) / 1000)
for i in range(1, len(model) - 1):
logS += float(model[i]) * logF**i
return 10**logS
def _calibrator__model2FluxDensity(model=None, frequency=None):
return __model2FluxDensity(model, frequency)
def __getValue(xmlNode=None, tagName=None):
if (xmlNode is not None and tagName is not None):
return xmlNode.getElementsByTagName(tagName)[0].childNodes[0].data
def __communications(data=None, parseType=None):
serverName = "www.narrabri.atnf.csiro.au"
serverProtocol = "https://"
serverScript = "/cgi-bin/Calibrators/new/caldb_v3.pl"
if data is None:
return None
session = Session()
postResponse = session.post(
url=serverProtocol + serverName + serverScript,
data=data
)
response = {}
if parseType is None or parseType == "json":
response = json.loads(postResponse.text)
elif parseType == "xml":
response = minidom.parseString(postResponse.text)
return response
def _calibrator__communications(data=None, parseType=None):
return __communications(data, parseType)
# A routine to search for a calibrator, given an RA and Dec and a search radius.
def coneSearch(ra=None, dec=None, radius=None, fluxLimit=None, frequencies=None):
# Form the request to the calibrator database server.
if fluxLimit is None:
fluxLimit = 0.2
if frequencies is None:
frequencies = [ 5500, 9000 ]
for i in range(0, len(frequencies)):
frequencies[i] = str(frequencies[i])
data = { 'mode': "cals" }
if (ra is not None and dec is not None and radius is not None):
data['radec'] = ra + "," + dec
data['theta'] = radius
data['frequencies'] = ",".join(frequencies)
data['flimit'] = fluxLimit
xmlresponse = __communications(data, "xml")
sourceList = xmlresponse.getElementsByTagName('source')
calList = calibratorSearchResponse()
for i in range(0, len(sourceList)):
distance = __getValue(sourceList[i], 'distance')
j = 1
fluxDensities = []
while (j > 0):
try:
freq = __getValue(sourceList[i], 'ffreq' + str(j))
except IndexError:
break
flux = __getValue(sourceList[i], 'fflux' + str(j))
fluxDensities.append({ 'frequency': freq, 'fluxDensity': flux })
j += 1
calDetails = { 'name': __getValue(sourceList[i], 'name'),
'rightAscension': __getValue(sourceList[i], 'rightascension'),
'declination': __getValue(sourceList[i], 'declination'),
'fluxDensities': fluxDensities
}
calList.addCalibrator(calDetails, distance)
return calList
| {
"repo_name": "ste616/cabb-schedule-api",
"path": "python/cabb_scheduler/calibrator_database.py",
"copies": "1",
"size": "15941",
"license": "mit",
"hash": -6466371795305313000,
"line_mean": 43.0359116022,
"line_max": 118,
"alpha_frac": 0.5554231228,
"autogenerated": false,
"ratio": 4.186186974789916,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.008287692869724817,
"num_lines": 362
} |
"""'A library to help presenters demonstrate terminal sessions hands-free."""
import locale
import logging
import random
import subprocess
import time
import warnings
from contextlib import contextmanager
from functools import wraps
from . import keys as keyboard
from .version import get_version
__version__ = get_version()
logger = logging.getLogger(__name__)
# check for minimum tmux version
VALID_VERSIONS = ['1.7', '1.8', '1.9']
WRONG_VERSION_MESSAGE = ('tmux {ver} or greater not found. '
'Oraide requires tmux>={ver}'
).format(ver=VALID_VERSIONS[0])
try:
output = subprocess.check_output(['tmux', '-V'], stderr=subprocess.STDOUT)
output = output.decode(locale.getdefaultlocale()[1])
version_checks = (ver in output for ver in VALID_VERSIONS)
if not any(version_checks):
warnings.warn(WRONG_VERSION_MESSAGE, Warning)
except Exception as exc:
warnings.warn(WRONG_VERSION_MESSAGE, Warning)
class TmuxError(subprocess.CalledProcessError):
"""The command sent to tmux returned a non-zero exit status. This is an
unrecognized tmux error.
This exception type inherits from :exc:`subprocess.CalledProcessError`,
which adds ``returncode``, ``cmd``, and ``output`` attributes.
"""
pass
class ConnectionFailedError(TmuxError):
"""The tmux server connection failed (often because the server was not
running at the time the command was sent).
"""
def __str__(self):
return 'Connection to tmux server failed.'
class SessionNotFoundError(TmuxError):
"""The tmux session was not found (but a connection to tmux server was
established).
This exception type adds another attribute, ``session``, for your debugging
convenience.
"""
def __init__(self, *args, **kwargs):
self.session = kwargs.pop('session', None)
super(SessionNotFoundError, self).__init__(*args, **kwargs)
def __str__(self):
return 'tmux session {} not found.'.format(repr(self.session))
def send_keys(session, keys, literal=True):
"""Send keys to a tmux session.
This function is a wrapper around tmux's ``send-keys`` command.
If ``literal`` is ``False``, tmux will attempt to convert keynames such as
``Escape`` or ``Space`` to their single-key equivalents.
:param session: name of a tmux session
:param keys: keystrokes to send to the tmux session
:param literal: whether to prevent tmux from looking up keynames
"""
args = ["tmux", "send-keys"]
if literal:
args.append('-l')
args.append("-t{}".format(session))
args.append(keys)
cmd = ' '.join(args)
logger.debug('Sending keys with command: %s', cmd)
try:
subprocess.check_output(args, stderr=subprocess.STDOUT)
except subprocess.CalledProcessError as exc:
output = exc.output.decode(locale.getdefaultlocale()[1])
if 'session not found' in output:
raise SessionNotFoundError(exc.returncode, cmd, exc.output,
session=session)
elif 'failed to connect to server' in output:
raise ConnectionFailedError(exc.returncode, cmd, exc.output)
else:
raise
def prompt(func, input_func=None):
"""Handle prompting for advancement on `Session` methods."""
if input_func is None:
try:
input_func = raw_input
except NameError:
input_func = input
@wraps(func)
def wrapper(*args, **kwargs):
self = args[0]
keys = args[1] if len(args) > 1 else None
if not args[0].auto_advancing:
if keys is not None:
msg = "[{session}] Press enter to send {keys}".format(
keys=repr(keys),
session=self.session,
)
else:
msg = "[{session}] Press enter to continue".format(
session=self.session
)
input_func(msg)
return func(*args, **kwargs)
return wrapper
class Session(object):
"""A session to which to send keys. This function allows for the
deduplication of session names when repeatedly sending keystrokes the same
session.
:param session: the name of a tmux session
:param enable_auto_advance: whether to send keystrokes to the session
immediately, or wait for confirmation, on certain methods
:param int teletype_delay: the delay between keystrokes for the
:meth:`teletype` method (for overriding the default of 90 milliseconds)
"""
def __init__(self, session, enable_auto_advance=False,
teletype_delay=None):
self.session = session
self.auto_advancing = enable_auto_advance
self.teletype_delay = teletype_delay
def send_keys(self, keys, literal=True):
"""Send each literal character in ``keys`` to the session.
:param keys: literal keystrokes to send to the session
:param literal: whether to prevent tmux from looking up keynames
.. seealso:: :func:`send_keys`
"""
send_keys(self.session, keys, literal=literal)
@prompt
def teletype(self, keys, delay=None):
"""teletype(keys, delay=90)
Type ``keys`` character-by-character, as if you were actually typing
them by hand.
The ``delay`` parameter adds time between each keystroke for
verisimilitude. The actual time between keystrokes varies up to ten
percent more or less than the nominal value. The default, 90
milliseconds, approximates a fast typist.
.. note:: |auto-advancing|
:param keys: the literal keys to be typed
:param int delay: the nominal time between keystrokes in milliseconds.
"""
if delay is None:
delay = (self.teletype_delay if self.teletype_delay is not None
else 90)
delay_variation = delay / 10
with self.auto_advance():
logger.info('[%s] Sending %s', self.session, repr(keys))
for key in keys:
self.send_keys(key)
current_delay = random.randint(delay - delay_variation,
delay + delay_variation)
time.sleep(current_delay / 1000.0)
@prompt
def enter(self, keys=None, teletype=True, after=keyboard.enter):
"""enter(keys=None, teletype=True, after='Enter')
Type ``keys``, then press :kbd:`Enter`.
By default, typing character-by-character is enabled with the
``teletype`` parameter.
.. note:: |auto-advancing|
:param keys: the keystroke to be sent to the to the session. These keys
may only be literal keystrokes, not keynames to be looked up by
tmux.
:param teletype: whether to enable simulated typing
:param after: additional keystrokes to send to the session with
``literal`` set to ``False`` (typically for appending a special
keys from :mod:`oraide.keys`, like the default, :kbd:`Enter`)
"""
if keys:
if teletype:
with self.auto_advance():
self.teletype(keys)
else:
self.send_keys(keys)
if after:
with self.auto_advance():
self.send_keys(after, literal=False)
@contextmanager
def auto_advance(self):
"""auto_advance()
Return a context manager that disables prompts before sending
keystrokes to the session. For example:
.. code-block:: python
session.enter('vim some_file.txt') # prompt first
with session.auto_advance(): # disables prompts
session.teletype('jjji')
session.enter('Hello, World!', after=keys.escape)
session.enter(':x') # prompt first
"""
initial_auto_state = self.auto_advancing
self.auto_advancing = True
yield
self.auto_advancing = initial_auto_state
__all__ = ['send_keys', 'Session']
| {
"repo_name": "ddbeck/oraide",
"path": "oraide/__init__.py",
"copies": "1",
"size": "8158",
"license": "bsd-3-clause",
"hash": 8550628255468664000,
"line_mean": 32.9916666667,
"line_max": 79,
"alpha_frac": 0.6144888453,
"autogenerated": false,
"ratio": 4.271204188481676,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5385693033781676,
"avg_score": null,
"num_lines": null
} |
"""A library to plot zipcodes on a map of America"""
from __future__ import print_function
try:
from ConfigParser import SafeConfigParser
except ImportError:
from configparser import SafeConfigParser
from datetime import datetime
import json
import random
import sys
import time
import paho.mqtt.client as mqtt
import pygame
import pyproj
from animated_value import AnimatedAverage, AnimatedValue
from Box2D import b2PolygonShape, b2World
from uszipcode import ZipcodeSearchEngine
class Ping(object):
"""A ping on the map"""
_text_color = (0x55, 0x55, 0x55)
colors = [
(0xFF, 0x8D, 0x00),
(0x1E, 0xBB, 0xF3),
(0x71, 0xCB, 0x3A)]
def __init__(self, world, x_loc, y_loc, text):
self.created_time = time.time()
self.life_time = 3
self.color = random.choice(Ping.colors)
self.size = 40
self._text = text
self._text_surface = None
self._text_surface2 = None
self._rect_surface = None
self._body = world.CreateDynamicBody(position=(x_loc, y_loc), fixedRotation=True)
self._box = self._body.CreatePolygonFixture(box=(self.size, self.size), density=1, friction=0.0)
def is_alive(self):
"""Returns true if we are within lifetime, false otherwise"""
return (time.time() - self.created_time) < self.life_time
def life_factor(self):
"""Gets a scaling factor based on life remaining"""
return (time.time() - self.created_time) / self.life_time
def draw(self, win, font):
"""Renders a ping to a display window"""
pos = self._body.position
sq_size = self.size
center_square = (pos[0] - sq_size/2,
pos[1] - sq_size/2)
alpha = int((1.0 - self.life_factor()) * 255)
if not self._rect_surface:
self._rect_surface = pygame.surface.Surface((sq_size, sq_size))
self._rect_surface.fill(self.color)
self._rect_surface.set_alpha(alpha)
win.blit(self._rect_surface, center_square)
if not self._text_surface:
self._text_surface = font.render(self._text, True, self._text_color)
rect = self._text_surface.get_rect()
self._text_surface.convert_alpha()
self._text_surface2 = pygame.surface.Surface(rect.size, pygame.SRCALPHA, 32)
self._text_width = rect.width
fade = int(255 * (1 - self.life_factor()))
self._text_surface2.fill((255, 255, 255, fade))
self._text_surface2.blit(self._text_surface, (0, 0), special_flags=pygame.BLEND_RGBA_MULT)
text_pos = (pos[0] - self._text_width/2, pos[1] + 25)
win.blit(self._text_surface2, text_pos)
def destroy(self, world):
if self._body:
world.DestroyBody(self._body)
def __repr__(self):
return "<Ping {}: {:.3f}, {:.3f}>".format(self.created_time,
*self.coordinate)
class Map(object):
"""A class to render the map and pings"""
_text_color = (0x55, 0x55, 0x55)
background_color = (0xe9, 0xe9, 0xe9)
def __init__(self, config):
pygame.display.init()
pygame.font.init()
self._world = b2World(gravity=(0, 0))
screen_info = pygame.display.Info()
pygame.mouse.set_visible(False)
self.pings = []
self.config = config
self._avg_spend = AnimatedAverage(count=500)
self._order_count = 0
self._cum_order_spend = 0
self._cum_order_spend_anim = AnimatedValue()
self._day_start = datetime.now()
self._last_frame = 0
self.client = mqtt.Client()
self.client.on_connect = self.on_connect
self.client.on_message = self.on_message
self.client.connect(self.config["host"],
int(self.config["port"]),
int(self.config["keepalive"]))
self._font = pygame.font.SysFont('Source Sans Pro Semibold', 25)
self._font_avg_spend = pygame.font.SysFont('Source Sans Pro', 30, bold=True)
self.background = pygame.image.load(config['map_image'])
self.proj_in = pyproj.Proj(proj='latlong', datum='WGS84')
self.proj_map = pyproj.Proj(init=config['map_projection'])
MANUAL_SCALE_FACTOR = float(self.config['scale_factor'])
self.x_scale = self.background.get_height()/MANUAL_SCALE_FACTOR
self.y_scale = self.x_scale
self.x_shift = self.background.get_width()/2
self.y_shift = self.background.get_height()/2
self.zips = None
if config["fullscreen"].lower() != 'true':
self.win = pygame.display.set_mode(
[
self.background.get_width(),
self.background.get_height()
],
pygame.NOFRAME | pygame.HWSURFACE | pygame.DOUBLEBUF)
self.x_offset = self.y_offset = 0
else:
self.win = pygame.display.set_mode(
[
screen_info.current_w,
screen_info.current_h
],
pygame.FULLSCREEN | pygame.HWSURFACE | pygame.DOUBLEBUF)
self.x_offset = (screen_info.current_w - self.background.get_width()) / 2
self.y_offset = (screen_info.current_h - self.background.get_height()) / 2
self.background = self.background.convert()
print("{} {}".format(self.x_offset, self.y_offset))
self.client.loop_start()
def test(self):
print("Window size: {}, {}".format(self.background.get_width(), self.background.get_height()))
print("scale: {}, {}\nshift: {}, {}".format(self.x_scale, self.y_scale, self.x_shift, self.y_shift))
seattle = [-122.4821474, 47.6129432]
la = [-118.6919199, 34.0201613]
bar_harbor = [-68.4103749, 44.3583123]
miami = [-80.369544, 25.7823404]
left_coast = [-124.411326, 40.438851]
cape_flattery = [-124.723378, 48.384951]
west_quoddy = [-66.952785, 44.816219]
p_town = [-70.2490474, 42.0622933]
print("Seattle: {} -> {}".format(seattle, self.project(*seattle)))
print("LA: {} -> {}".format(la, self.project(*la)))
print("Bar Harbor: {} -> {}".format(bar_harbor, self.project(*bar_harbor)))
print("Miami: {} -> {}".format(miami, self.project(*miami)))
places = [seattle, la, bar_harbor, miami, left_coast, cape_flattery, west_quoddy, p_town]
for place in places:
(x_coord, y_coord) = self.project(*place)
self.pings.append(Ping(self._world, x_coord + self.x_offset, y_coord + self.y_offset, ''))
def on_connect(self, client, _flags, _userdata, response_code):
"""MQTT Connection callback"""
print("Connected with result code {}".format(response_code))
print()
client.subscribe(self.config["topic"])
def on_message(self, _client, _userdata, message):
"""MQTT Message received callback"""
payload = json.loads(message.payload.decode('utf-8'))
if payload["postal_code"] is None or payload["postal_code"] == "":
return
if self.zips is None:
self.zips = ZipcodeSearchEngine()
zcode = self.zips.by_zipcode(payload["postal_code"])
if zcode["Longitude"] is None or zcode["Latitude"] is None:
return
merchant_name = payload.get('merchant_name', '')
(x_coord, y_coord) = self.project(zcode["Longitude"], zcode["Latitude"])
self.pings.append(Ping(self._world, x_coord + self.x_offset, y_coord + self.y_offset, merchant_name))
spend = int(payload['spend_amount'])
if spend:
self._avg_spend.add(spend)
self._maybe_reset_daily_totals()
self._order_count += 1
self._cum_order_spend += spend
self._cum_order_spend_anim.set(self._cum_order_spend)
def _draw_text_stat(self, text, value, index):
self.win.blit(self._font_avg_spend.render(text.format(value), True, self._text_color), (100, (self.win.get_height() - 180) + index * 40))
def draw(self):
"""Render the map and it's pings"""
self._avg_spend.tick()
self._cum_order_spend_anim.tick()
frame_time = pygame.time.get_ticks() / 1000
if self._last_frame:
frame_delay = frame_time - self._last_frame
else:
frame_delay = 1.0/60
self._world.Step(frame_delay, 6, 2)
self._last_frame = frame_time
self.win.fill(Map.background_color)
self.win.blit(self.background, (self.x_offset, self.y_offset))
for ping in self.pings[:]:
if ping.is_alive():
ping.draw(self.win, self._font)
else:
ping.destroy(self._world)
self.pings.remove(ping)
self._draw_text_stat("Average Order Price: ${:0.02f}", self._avg_spend.get()/100.0, 0)
self._draw_text_stat("Orders Today Total: ${:0,.02f}", self._cum_order_spend_anim.get()/100.0, 1)
self._draw_text_stat("Orders Today: {:,}", self._order_count, 2)
def project(self, lon, lat):
"""Convert lat/long to pixel x/y"""
(x_coord_m, y_coord_m) = pyproj.transform(self.proj_in, self.proj_map, lon, lat)
x_coord = (self.x_scale * x_coord_m) + self.x_shift
y_coord = -(self.y_scale * y_coord_m) + self.y_shift
return (int(x_coord), int(y_coord))
def quit(self):
"""Cleanup"""
self.client.loop_stop()
def _maybe_reset_daily_totals(self):
now = datetime.now()
if self._day_start.day != now.day:
self._cum_order_spend = 0
self._order_count = 0
self._day_start = now
def read_config(config_file):
"""Global function to read external config file"""
config = SafeConfigParser()
read = config.read(config_file)
if not read:
print("Could not read config file {}".format(config_file))
sys.exit(1)
return dict(config.items('map'))
def main():
"""Script Entry Point"""
if len(sys.argv) != 2:
print("Usage: {} CONFIG_FILE".format(sys.argv[0]))
print()
sys.exit(1)
config_file = sys.argv[1]
done = False
clock = pygame.time.Clock()
world_map = Map(read_config(config_file))
while not done:
clock.tick(60)
for event in pygame.event.get():
if event.type == pygame.QUIT:
done = True
if event.type == pygame.KEYDOWN and event.key == pygame.K_ESCAPE:
done = True
world_map.draw()
pygame.display.flip()
world_map.quit()
pygame.quit()
def main_profiled():
import cProfile, pstats, StringIO
pr = cProfile.Profile()
pr.enable()
main()
pr.disable()
s = StringIO.StringIO()
sortby = 'cumulative'
ps = pstats.Stats(pr, stream=s).sort_stats(sortby)
ps.print_stats()
print(s.getvalue())
if __name__ == '__main__':
main()
| {
"repo_name": "Plasmarobo/LevelUpMap",
"path": "mqtt_locator.py",
"copies": "1",
"size": "11028",
"license": "mit",
"hash": -9190755774495834000,
"line_mean": 36.3830508475,
"line_max": 145,
"alpha_frac": 0.5797968807,
"autogenerated": false,
"ratio": 3.445173383317713,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.45249702640177136,
"avg_score": null,
"num_lines": null
} |
# A library to read Starbound's SBAsset format.
# I know it's terrible
import struct
import logging
# Make sure the file is a valid pak, then get various information from it.
def get_pak_info(pak):
# Check that the pak is valid
header = pak.read(8)
assert header == b'SBAsset6', 'Unrecognized format!'
# Find the metadata and file index
index_offset = pak.read(8)
index_offset = struct.unpack('>q', index_offset)[0]
logging.debug(index_offset)
pak.seek(index_offset)
index_header = pak.read(5)
assert index_header == b'INDEX', 'Index offset incorrect!'
# Get metadata information
meta_count = read_varint(pak)
metadata = {}
for x in range (0, meta_count):
key_len = read_varint(pak)
print(pak.tell())
key = str(struct.unpack(str(key_len) + 's', pak.read(key_len))[0], "utf-8")
print(key)
metadata[key] = read_dynamic(pak)
# Locate the beginning of the file index
file_count = read_varint(pak)
file_offset = pak.tell()
return metadata, file_count, file_offset
# Given an index, file path, and pak, returns a file from the pak
def get_file(pak, file_offset, file_length):
pak.seek(file_offset)
file = pak.read(file_length)
return file
# Given a pak, the pak's file offset, and the number of files in the pak, creates an index
def create_file_index(pak, index_offset, file_count):
pak.seek(index_offset)
path_len = struct.unpack('>B', pak.read(1))[0]
index = {}
for x in range(1, file_count):
path = str(struct.unpack(str(path_len) + 's', pak.read(path_len))[0], "utf-8")
file_offset = struct.unpack('>q', pak.read(8))[0]
file_length = struct.unpack('>q', pak.read(8))[0]
file_info = [file_offset, file_length]
path_len = struct.unpack('>B', pak.read(1))[0]
index.update({path:[file_offset, file_length]})
return index
# Blatantly stolen from py-starbound, thanks blixt
def read_bytes(stream):
length = read_varint(stream)
return stream.read(length)
def read_varint(stream):
"""Read while the most significant bit is set, then put the 7 least
significant bits of all read bytes together to create a number.
"""
value = 0
while True:
byte = ord(stream.read(1))
if not byte & 0b10000000:
return value << 7 | byte
value = value << 7 | (byte & 0b01111111)
def read_varint_signed(stream):
value = read_varint(stream)
# Least significant bit represents the sign.
if value & 1:
return -(value >> 1)
else:
return value >> 1
def read_dynamic(stream):
type_id = ord(stream.read(1))
if type_id == 1:
return None
elif type_id == 2:
return struct.unpack('>d', stream.read(8))[0]
elif type_id == 3:
return stream.read(1) != b'\0'
elif type_id == 4:
return read_varint_signed(stream)
elif type_id == 5:
return read_string(stream)
elif type_id == 6:
return read_list(stream)
elif type_id == 7:
return read_map(stream)
raise ValueError('Unknown dynamic type 0x%02X' % type_id)
def read_list(stream):
length = read_varint(stream)
return [read_dynamic(stream) for _ in range(length)]
def read_map(stream):
length = read_varint(stream)
value = dict()
for _ in range(length):
key = read_string(stream)
value[key] = read_dynamic(stream)
return value
def read_string(stream):
return read_bytes(stream).decode('utf-8') | {
"repo_name": "R2pChyou/starcheat",
"path": "starcheat/assets/sb_asset.py",
"copies": "1",
"size": "3535",
"license": "mit",
"hash": 3110389603335012400,
"line_mean": 28.7142857143,
"line_max": 90,
"alpha_frac": 0.6263083451,
"autogenerated": false,
"ratio": 3.3006535947712417,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9421645361118753,
"avg_score": 0.0010633157504977603,
"num_lines": 119
} |
"""A library to support the mojo command line client"""
import os
import sys
import yaml
from pymojo.mojo import Mojo
def dict_merge(src, dest):
"""Helper function for merging config dictionaries"""
# For each item in the source
for key, value in src.items():
# If the item is a dictionary, merge it
if isinstance(value, dict):
dict_merge(value, dest.setdefault(key, {}))
# Otherwise, the destination takes the source value
else:
dest[key] = value
return dest
def complete_environment(environment, args, default_opts):
"""Helper function for overriding the environment with CLI settings and
filling incomplete instances with default options"""
# Allow user to override settings from the CLI
if args.endpoint is not None:
environment["endpoint"] = args.endpoint
if args.port is not None:
environment["port"] = args.port
if args.use_ssl is not None:
environment["use_ssl"] = args.use_ssl
if args.verify is not None:
environment["verify"] = args.verify
if args.user is not None:
environment["user"] = args.user
if args.password is not None:
environment["password"] = args.password
# Bring in any missing values at their defaults
environment = dict_merge(environment, default_opts.copy())
return environment
def cli(args):
"""Run the command line client"""
# Defaults
config_files = ["/etc/mojo.yml", os.path.expanduser("~") + "/.mojo.yml"]
config = {"environments": {}, "default_environment": None}
opts = {}
default_opts = {
"endpoint": "localhost",
"port": 3000,
"use_ssl": False,
"verify": True,
"user": None,
"password": None
}
# User supplied additional config file?
if args.config is not None:
config_files.append(os.path.expanduser(args.config))
# Merge the config dictionaries
for config_file in config_files:
try:
config = dict_merge(yaml.load(open(config_file, 'r')), config)
except IOError:
pass
# Some logic to determine if we have enough information to run
# and also to load any preconfigured connection options
# User supplied an environment name or a group...
if args.env is not None or args.group is not None:
# ...but it doesn't exist: error/exit.
if args.env is not None and args.env not in config["environments"]:
print("The specified environment is not defined.")
sys.exit(1)
# ...and it is defined: "load" those settings.
else:
if args.group is not None and args.group:
if args.group not in config["groups"]:
print("The specified group is not defined.")
else:
# User supplied a valid group
# Make a list for convenient use later
user_environments = \
",".join(config["groups"][args.group])
else:
# No group passed so get the environment
user_environments = args.env
# We now have a list of user environments that contains one or more
# environments
for environment in user_environments.split(','):
# Ensure we have a real environment
if environment in config["environments"]:
# Complete the environment and add it to the opts dict
opts[environment] = complete_environment(
config["environments"][environment], args, default_opts
)
else:
print(
"The group contains an invalid environment:",
environment
)
sys.exit(1)
# User did not supply an environment name...
else:
# ...but they have a default_environment...
if config["default_environment"] is not None:
# ...and that environment is defined: "load" those settings.
if config["default_environment"] in config["environments"]:
# complete the environment and add it to the opts dict
opts[config["default_environment"]] = complete_environment(
config["environments"][config["default_environment"]],
args,
default_opts
)
# ...but that env doesn't exist: error/exit.
else:
print("The default environment is not defined.")
sys.exit(1)
# Route that action!
for environment_name in opts:
environment = opts[environment_name]
if args.action == "list":
environment["boolean"] = args.boolean
environment["tags"] = args.tags
list_scripts(environment)
elif args.action == "show":
show(environment, args)
elif args.action == "run":
run(environment, args)
elif args.action == "reload":
reload_jojo(environment)
sys.exit(0)
def print_script(script):
"""Prints the details of the given script to the console"""
print("Name: {}".format(script["name"]))
print("Description: {}".format(script["description"]))
print("Filename: {}".format(script["filename"]))
if "http_method" in script:
print("HTTP Method: {}".format(script["http_method"]))
if "output" in script:
print("Output Type: {}".format(script["output"]))
if "params" in script and len(script["params"]) > 0:
print("Parameters:")
for param in sorted(script["params"]):
print(" {}: {}".format(param["name"], param["description"]))
if "filtered_params" in script and len(script["filtered_params"]) > 0:
print("Filtered parameters:")
for param in script["filtered_params"]:
print(" {}".format(param))
if "tags" in script and len(script["tags"]) > 0:
print("Tags:")
for tag in sorted(script["tags"]):
print(" {}".format(tag))
print("Lock: {}".format(script["lock"]))
def list_scripts(opts):
"""List available scripts"""
mojo = Mojo(**opts)
if mojo.unauthorized:
print("Authentication failed")
else:
if opts["boolean"] is not None and opts["tags"] is not None:
if opts["boolean"] == "and":
param = "tags"
elif opts["boolean"] == "or":
param = "any_tags"
elif opts["boolean"] == "not":
param = "not_tags"
scripts = mojo.get_scripts(param, opts["tags"])
for script in sorted(scripts):
print_script(mojo.get_script(script))
print("")
else:
for script in sorted(mojo.scripts):
print(script)
def show(opts, args):
"""Show script details"""
mojo = Mojo(**opts)
script = mojo.get_script(args.script)
if mojo.unauthorized:
print("Authentication failed")
else:
print_script(script)
def run(opts, args):
"""Run a script"""
mojo = Mojo(**opts)
# Parse CLI-given parameters
params = {}
for param in args.params:
broken = param.split("=")
params[broken[0]] = broken[1]
resp = mojo.run(args.script, params)
if mojo.auth and mojo.unauthorized:
print("Authentication failed")
else:
print("Status Code: {}".format(resp.status_code))
print("Headers:")
for header in resp.headers:
print(" {}: {}".format(header, resp.headers[header]))
j = resp.json()
print("Script return code: {}".format(j['retcode']))
if "stderr" in j:
print("Stderr:")
if type(j["stderr"]) is unicode:
print(j["stderr"])
else:
for line in j["stderr"]:
print(" {}".format(line))
if "stdout" in j:
print("Stdout:")
if type(j["stdout"]) is unicode:
print(j["stdout"])
else:
for line in j["stdout"]:
print(" {}".format(line))
if "return_values" in j and len(j["return_values"]) > 0:
print("Return Values:")
for key in sorted(j["return_values"]):
print(" {}: {}".format(key, j["return_values"][key]))
def reload_jojo(opts):
"""Reload the Jojo"""
mojo = Mojo(**opts)
result = mojo.reload()
if result is True:
print("Reload successful!")
elif result is False:
print("Authentication failed")
elif type(result) == int:
print(
"The Jojo responded with an unexpected status code: {}".
format(result)
)
| {
"repo_name": "GradysGhost/pymojo",
"path": "pymojo/cli.py",
"copies": "1",
"size": "8820",
"license": "apache-2.0",
"hash": 968245283014085900,
"line_mean": 33.8616600791,
"line_max": 76,
"alpha_frac": 0.5583900227,
"autogenerated": false,
"ratio": 4.4590495449949445,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5517439567694945,
"avg_score": null,
"num_lines": null
} |
"""A library with a base class that stores the assigned name of an object.
>>> import name
>>> a = name.AutoName()
>>> a.__assigned_name__
'a'
"""
from __future__ import annotations
import inspect
from types import FrameType
from typing import Generator, Iterator, Dict, Any, Optional
__all__ = ["AutoName"]
# Yields all locals variables in the higher (calling) frames
_FrameGenerator = Generator[Dict[str, Any], None, None]
def _get_outer_locals(frame: FrameType) -> _FrameGenerator:
while frame:
yield frame.f_locals
frame = frame.f_back
class AutoName:
""" Creates an object with the '__assigned_name__'
attribute that stores the name of such object.
>>> import name
>>> a = name.AutoName()
>>> a.__assigned_name__
'a'
"""
def __init__(self, count: int = 0) -> None:
assert isinstance(count, int), \
"Expected 'int' object, got '%s'" % count.__class__.__name__
assert count >= 0, "Expected positive 'int' number, got '%r'" % count
self.__count = count
self.__name: Optional[str] = None
# I define the '__iter__' method to give compatibility
# with the unpack sequence assignment syntax.
def __iter__(self) -> Iterator[AutoName]:
# NOTE: I call 'type(self)' to warranty that this
# method works even in a subclass of this.
return (type(self)() for _ in range(self.__count))
# Find the assigned name of the current object.
def _find_name(self, frame: FrameType) -> str:
scope = _get_outer_locals(frame)
# NOTE: The same object could have many names in differents scopes.
# So, I stores all names in the 'scopes' var. The valid name is one
# that is in the last scope.
scopes = []
for variables in scope:
# NOTE: An object could have various names in the same scope. So,
# I stores all in the 'names' var. This situation happen when user
# assign the object to multiples variables with the "multiple
# assignment syntax".
names = []
for name, value in variables.items():
if value is self:
names.append(name)
if names:
scopes.append(names)
if scopes:
# Remember: the valid name is one that is in the last scope.
names = scopes[-1]
if len(names) > 1: # Check for multiple assignment.
raise NameError(
"Can not assign a unique name to multiple variables.")
else:
return names[0]
raise NameError("Can not find the name of this object.")
@property
def __assigned_name__(self) -> str:
"""Find the name of the instance of the current class."""
if self.__name is None:
frame: Optional[FrameType] = inspect.currentframe()
if frame is None:
raise NameError("Can not find the name of this object.")
else:
self.__name = self._find_name(frame.f_back)
return self.__name
| {
"repo_name": "AlanCristhian/named",
"path": "name.py",
"copies": "2",
"size": "3116",
"license": "mit",
"hash": -2817110240312406000,
"line_mean": 34.0112359551,
"line_max": 78,
"alpha_frac": 0.5847240051,
"autogenerated": false,
"ratio": 4.274348422496571,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5859072427596571,
"avg_score": null,
"num_lines": null
} |
alien_0 = {'color': 'green', 'points': 5}
print(alien_0)
print(alien_0['color'])
print(alien_0['points'])
new_points = alien_0['points']
print("you just earned " + str(new_points) + " points!")
alien_0['x_position'] = 0
alien_0['y_position'] = 25
print(alien_0)
print("The alien is " + alien_0['color'] + ".")
alien_0['color'] = 'yellow'
print("The alien is now " + alien_0['color'] + ".")
alien_0['speed'] = 'medium'
print("Original x-position: " + str(alien_0['x_position']))
if alien_0['speed'] == 'slow':
x_increment = 1
elif alien_0['speed'] == 'medium':
x_increment = 2
else:
x_increment = 3
alien_0['x_position'] = alien_0['x_position'] + x_increment
print("New x-position: " + str(alien_0['x_position']))
del alien_0['points']
print(alien_0)
favorite_languages = {
'jen': 'python',
'sarah': 'c',
'edward': 'ruby',
'phil': 'python',
}
print("Sarah's favorite language is " + favorite_languages['sarah'].title() +
".")
user_0 = {
'username': 'efermi',
'first': 'enrico',
'last': 'fermi',
}
for k, v in user_0.items():
print("\nKey: " + k)
print("Value: " + v)
friends = ['phil', 'sarah']
for name in favorite_languages.keys():
print(name.title())
if name in friends:
print(" Hi " + name.title() + ", I see your favorite language is " +
favorite_languages[name].title() + "!")
for name in sorted(favorite_languages.keys()):
print(name.title() + ", thank you for taking the poll.")
print("\nThe following languages have been mentioned:")
for language in favorite_languages.values():
print(language.title())
alien_0 = {'color': 'green', 'points': 5}
alien_1 = {'color': 'yellow', 'points': 10}
alien_2 = {'color': 'red', 'points': 15}
aliens = [alien_0, alien_1, alien_2]
for alien in aliens:
print(alien)
aliens = []
for alien_number in range(30):
new_alien = {'color': 'green', 'points': 5, 'speed': 'slow'}
aliens.append(new_alien)
for alien in aliens[:5]:
print(alien)
print("...")
print("Total number of aliens: " + str(len(aliens)))
favorite_languages = {
'jen': ['python', 'ruby'],
'sarah': ['c'],
'edward': ['ruby', 'go'],
'phil': ['python', 'haskell'],
}
for name, languages in favorite_languages.items():
print("\n" + name.title() + "'s favorite languages are:")
for language in languages:
print("\t" + language.title())
users = {
'aeinstein': {
'first': 'albert',
'last': 'einstein',
'location': 'princeton',
},
'mcurie': {
'first': 'marie',
'last': 'curie',
'location': 'paris',
}
}
for username, user_info in users.items():
print("\nUsername: " + username)
full_name = user_info['first'] + " " + user_info['last']
location = user_info['location']
print("\tFull name: " + full_name.title())
print("\tLocation: " + location.title()) | {
"repo_name": "timtian090/Playground",
"path": "Python/dictionary.py",
"copies": "1",
"size": "2901",
"license": "mit",
"hash": -1065786397649997600,
"line_mean": 22.593495935,
"line_max": 77,
"alpha_frac": 0.5798000689,
"autogenerated": false,
"ratio": 2.858128078817734,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.8937004274842641,
"avg_score": 0.00018477457501847746,
"num_lines": 123
} |
alien_0 = {
"cor": "verde",
"pontos": 10
}
# impressao divergente
print(alien_0)
print(alien_0.items())
print("----")
# imprime o valor da chave especifica
print(alien_0["cor"])
print(alien_0["pontos"])
print("----")
linguagem_favorita = {}
linguagem_favorita.__setitem__("paulo", "python")
linguagem_favorita.__setitem__("sara", "c")
linguagem_favorita.__setitem__("marcio", "ruby")
linguagem_favorita.__setitem__("vinicius", "go")
linguagem_favorita.__setitem__("joao", "mql5")
linguagem_favorita.__setitem__("edvaldo", "mql5")
linguagem_favorita.__setitem__("kenia", "c")
linguagem_favorita.__setitem__("mario", "python")
linguagem_favorita.__setitem__("junior", "python")
print(linguagem_favorita.items())
# iteracao com o dicionario parecido com oracle, precisa do item()
for nome, linguagem in linguagem_favorita.items():
print("a linguagem do " + str(nome) + " e " + str(linguagem))
print("-----")
# iteracao apenas com as chaves
for nome in linguagem_favorita.keys():
print(nome)
print("-----")
# iteracao apenas com as chaves
for nome in linguagem_favorita:
print(nome)
print("----")
# ordena os itens atraves de suas respectivas chaves
print(sorted(linguagem_favorita.items()))
print("-----")
# ordena as chaves
print(sorted(linguagem_favorita.keys()))
print("-----")
# itera sobre os valores unicos
for linguagem in set(linguagem_favorita.values()):
print(linguagem)
# imprime somente as chaves
print(alien_0.keys())
# remove a chave
print(alien_0.pop("cor"))
print(alien_0)
# adiciona a chave e inclusive atualiza ela
alien_0.__setitem__("cor", "verde")
print(alien_0)
alien_0.__setitem__("cor", "azul")
print(alien_0)
# Lista de dicionarios
aliens = []
for alien in range(0, 30):
new_alien = {}
new_alien.__setitem__("color", "green")
new_alien.__setitem__("points", "5")
new_alien.__setitem__("speed", "slow")
aliens.append(new_alien)
for x in range(0, 2):
for alien in aliens[int(x):3 + int(x)]:
if alien["color"] == "yellow":
alien.__setitem__("color", "red")
alien.__setitem__("points", "15")
alien.__setitem__("speed", "fast")
elif(alien["color"]) == "green":
alien.__setitem__("color", "yellow")
alien.__setitem__("points", "10")
alien.__setitem__("speed", "medium")
for alien in aliens[:10]:
print(alien)
# Lista dentro de um diciionario
pizza = {
'crust': 'thick',
'toppings': ['mushrooms', 'extra cheese'],
}
print("You ordered a " + pizza['crust'] + "-crust pizza " +
"with the following toppings:")
for topping in pizza['toppings']:
print("\t" + topping)
# Dicionario aninhado
users = {'aeinstein': {'first': 'albert',
'last': 'einstein',
'location': 'princeton'},
'mcurie': {'first': 'marie',
'last': 'curie',
'location': 'paris'},
}
# for gera lista de tupla onde cada elemento eh um par
# o primeiro elemento do par eh uma string
# o segundo elemento eh um dicionario
for username, user_info in users.items():
print("\nUsername: " + username)
full_name = user_info['first'] + " " + user_info['last']
location = user_info['location']
print("\tFull name: " + full_name.title())
print("\tLocation: " + location.title())
| {
"repo_name": "romeubertho/USP-IntroPython",
"path": "06-Dicionarios/dicionario.py",
"copies": "1",
"size": "3496",
"license": "mit",
"hash": 6869659061885434000,
"line_mean": 27.3781512605,
"line_max": 66,
"alpha_frac": 0.5875286041,
"autogenerated": false,
"ratio": 2.9133333333333336,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9000861937433333,
"avg_score": 0,
"num_lines": 119
} |
# Aliex Cardona and Josep Casanovas
# Realitat aumentada practica 1
import cv2
import numpy as np
from matplotlib import pyplot as plt
from PyFiles.convolutionKernel import getMatchingMap
IMAGES_PATH = "../Images/"
#imageName = IMAGES_PATH + input("Source image: ")
#targetName = IMAGES_PATH + input("Target to search: ")
#detectionThreshold = input("Detection threshold: ")
imageName = IMAGES_PATH+'img1.png'
targetName = IMAGES_PATH+'t1-img1.png'
img = cv2.imread(imageName, cv2.IMREAD_GRAYSCALE)
template = cv2.imread(targetName, cv2.IMREAD_GRAYSCALE)
res = cv2.matchTemplate(img,template,0)
matching_map = getMatchingMap(img, template)
min_value_X = 0
min_value_Y = 0
min_value = 255
for i in range(matching_map.shape[0]):
for j in range(matching_map.shape[1]):
if matching_map[i][j] < min_value:
min_value = matching_map[i][j]
min_value_X = j
min_value_Y = i
cv2.rectangle(img,(min_value_X - 6, min_value_Y - 6), (min_value_X + 6, min_value_Y + 6), 0, 2)
print img.shape
print template.shape
print res.shape
print matching_map.shape
plt.subplot(1,3,1), plt.imshow(res, cmap = 'gray')
plt.title('Matching map'), plt.xticks([]), plt.yticks([])
plt.subplot(1,3,2), plt.imshow(matching_map, cmap = 'gray')
plt.title('Matching map'), plt.xticks([]), plt.yticks([])
plt.subplot(1,3,3), plt.imshow(img, cmap = 'gray')
plt.title('Matching map'), plt.xticks([]), plt.yticks([])
plt.show() | {
"repo_name": "UndistinguishedFellows/RealitatAumentadaPractiques",
"path": "Practica_1/MatchingImages.py",
"copies": "1",
"size": "1447",
"license": "mit",
"hash": -6834179536693130000,
"line_mean": 26.8461538462,
"line_max": 95,
"alpha_frac": 0.6862474084,
"autogenerated": false,
"ratio": 2.8767395626242545,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9011755865374447,
"avg_score": 0.01024622112996142,
"num_lines": 52
} |
"""A light and fast template engine."""
import re
import sys
PY3 = False
if sys.version_info > (3, 0):
PY3 = True
class Template(object):
COMPILED_TEMPLATES = {} # {template string: code object, }
# Regex for stripping all leading, trailing and interleaving whitespace.
RE_STRIP = re.compile("(^[ \t]+|[ \t]+$|(?<=[ \t])[ \t]+|\A[\r\n]+|[ \t\r\n]+\Z)", re.M)
def __init__(self, template, strip=True):
"""Initialize class"""
super(Template, self).__init__()
self.template = template
self.options = {"strip": strip}
self.builtins = {"escape": lambda s: escape_html(s),
"setopt": lambda k, v: self.options.update({k: v}), }
if template in Template.COMPILED_TEMPLATES:
self.code = Template.COMPILED_TEMPLATES[template]
else:
self.code = self._process(self._preprocess(self.template))
Template.COMPILED_TEMPLATES[template] = self.code
def expand(self, namespace={}, **kw):
"""Return the expanded template string"""
output = []
namespace.update(kw, **self.builtins)
namespace["echo"] = lambda s: output.append(s)
namespace["isdef"] = lambda v: v in namespace
eval(compile(self.code, "<string>", "exec"), namespace)
return self._postprocess("".join(map(to_unicode, output)))
def stream(self, buffer, namespace={}, encoding="utf-8", **kw):
"""Expand the template and stream it to a file-like buffer."""
def write_buffer(s, flush=False, cache = [""]):
# Cache output as a single string and write to buffer.
cache[0] += to_unicode(s)
if flush and cache[0] or len(cache[0]) > 65536:
buffer.write(postprocess(cache[0]))
cache[0] = ""
namespace.update(kw, **self.builtins)
namespace["echo"] = write_buffer
namespace["isdef"] = lambda v: v in namespace
postprocess = lambda s: s.encode(encoding)
if self.options["strip"]:
postprocess = lambda s: Template.RE_STRIP.sub("", s).encode(encoding)
eval(compile(self.code, "<string>", "exec"), namespace)
write_buffer("", flush=True) # Flush any last cached bytes
def _preprocess(self, template):
"""Modify template string before code conversion"""
# Replace inline ('%') blocks for easier parsing
o = re.compile("(?m)^[ \t]*%((if|for|while|try).+:)")
c = re.compile("(?m)^[ \t]*%(((else|elif|except|finally).*:)|(end\w+))")
template = c.sub(r"<%:\g<1>%>", o.sub(r"<%\g<1>%>", template))
# Replace ({{x}}) variables with '<%echo(x)%>'
v = re.compile("\{\{(.*?)\}\}")
template = v.sub(r"<%echo(\g<1>)%>\n", template)
return template
def _process(self, template):
"""Return the code generated from the template string"""
code_blk = re.compile(r"<%(.*?)%>\n?", re.DOTALL)
indent = 0
code = []
for n, blk in enumerate(code_blk.split(template)):
# Replace '<\%' and '%\>' escapes
blk = re.sub(r"<\\%", "<%", re.sub(r"%\\>", "%>", blk))
# Unescape '%{}' characters
blk = re.sub(r"\\(%|{|})", "\g<1>", blk)
if not (n % 2):
# Escape backslash characters
blk = re.sub(r'\\', r'\\\\', blk)
# Escape double-quote characters
blk = re.sub(r'"', r'\\"', blk)
blk = (" " * (indent*4)) + 'echo("""{0}""")'.format(blk)
else:
blk = blk.rstrip()
if blk.lstrip().startswith(":"):
if not indent:
err = "unexpected block ending"
raise SyntaxError("Line {0}: {1}".format(n, err))
indent -= 1
if blk.startswith(":end"):
continue
blk = blk.lstrip()[1:]
blk = re.sub("(?m)^", " " * (indent * 4), blk)
if blk.endswith(":"):
indent += 1
code.append(blk)
if indent:
err = "Reached EOF before closing block"
raise EOFError("Line {0}: {1}".format(n, err))
return "\n".join(code)
def _postprocess(self, output):
"""Modify output string after variables and code evaluation"""
if self.options["strip"]:
output = Template.RE_STRIP.sub("", output)
return output
def escape_html(x):
"""Escape HTML special characters &<> and quotes "'."""
CHARS, ENTITIES = "&<>\"'", ["&", "<", ">", """, "'"]
string = x if isinstance(x, basestring) else str(x)
for c, e in zip(CHARS, ENTITIES): string = string.replace(c, e)
return string
def to_unicode(x, encoding="utf-8"):
"""Convert anything to Unicode."""
if PY3:
return str(x)
if not isinstance(x, unicode):
x = unicode(str(x), encoding, errors="replace")
return x
| {
"repo_name": "dotpy/step",
"path": "step/step.py",
"copies": "1",
"size": "5051",
"license": "bsd-3-clause",
"hash": 1515811274266750700,
"line_mean": 36.4148148148,
"line_max": 92,
"alpha_frac": 0.5181152247,
"autogenerated": false,
"ratio": 3.8498475609756095,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4867962785675609,
"avg_score": null,
"num_lines": null
} |
"""A light system for platforms which batches all updates."""
import abc
import asyncio
from typing import Tuple, Set, List
from sortedcontainers import SortedSet, SortedList
from mpf.platforms.interfaces.light_platform_interface import LightPlatformInterface
from mpf.core.utility_functions import Util
class PlatformBatchLight(LightPlatformInterface, abc.ABC):
"""Light which can be batched."""
__slots__ = ["light_system", "_current_fade", "_last_brightness"]
def __init__(self, number, light_system: "PlatformBatchLightSystem"):
"""Initialise light."""
super().__init__(number)
self.light_system = light_system
self._current_fade = (0, -1, 0, -1)
self._last_brightness = None
@abc.abstractmethod
def get_max_fade_ms(self):
"""Return max fade ms."""
def set_fade(self, start_brightness, start_time, target_brightness, target_time):
"""Mark dirty and remember fade."""
self.light_system.mark_dirty(self)
self._current_fade = (start_brightness, start_time, target_brightness, target_time)
self._last_brightness = None
def get_fade_and_brightness(self, current_time):
"""Return fade + brightness and mark as clean if this is it."""
if self._last_brightness:
return self._last_brightness, 0, True
max_fade_ms = self.get_max_fade_ms()
start_brightness, start_time, target_brightness, target_time = self._current_fade
fade_ms = int(round((target_time - current_time) * 1000.0))
if fade_ms > max_fade_ms >= 0:
fade_ms = max_fade_ms
ratio = ((current_time + (fade_ms / 1000.0) - start_time) /
(target_time - start_time))
brightness = start_brightness + (target_brightness - start_brightness) * ratio
done = False
else:
if fade_ms < 0:
fade_ms = 0
brightness = target_brightness
self._last_brightness = brightness
done = True
return brightness, fade_ms, done
class PlatformBatchLightSystem:
"""Batch light system for platforms."""
__slots__ = ["dirty_lights", "dirty_schedule", "clock", "update_task", "update_callback",
"update_hz", "max_batch_size", "scheduler_task", "schedule_changed", "dirty_lights_changed",
"last_state"]
# pylint: disable-msg=too-many-arguments
def __init__(self, clock, update_callback, update_hz, max_batch_size):
"""Initialise light system."""
self.dirty_lights = SortedSet() # type: Set[PlatformBatchLight]
self.dirty_lights_changed = asyncio.Event()
self.dirty_schedule = SortedList()
self.schedule_changed = asyncio.Event()
self.update_task = None
self.scheduler_task = None
self.clock = clock
self.update_callback = update_callback
self.update_hz = update_hz
self.max_batch_size = max_batch_size
self.last_state = {}
def start(self):
"""Start light system."""
self.update_task = self.clock.loop.create_task(self._send_updates())
self.update_task.add_done_callback(Util.raise_exceptions)
self.scheduler_task = self.clock.loop.create_task(self._schedule_updates())
self.scheduler_task.add_done_callback(Util.raise_exceptions)
def stop(self):
"""Stop light system."""
if self.scheduler_task:
self.scheduler_task.cancel()
self.scheduler_task = None
if self.update_task:
self.update_task.cancel()
self.update_task = None
async def _schedule_updates(self):
while True:
run_time = self.clock.get_time()
self.schedule_changed.clear()
while self.dirty_schedule and self.dirty_schedule[0][0] <= run_time:
self.dirty_lights.add(self.dirty_schedule[0][1])
del self.dirty_schedule[0]
self.dirty_lights_changed.set()
if self.dirty_schedule:
await asyncio.wait([self.schedule_changed.wait()],
timeout=self.dirty_schedule[0][0] - run_time, return_when=asyncio.FIRST_COMPLETED)
else:
await self.schedule_changed.wait()
async def _send_updates(self):
poll_sleep_time = 1 / self.update_hz
max_fade_tolerance = int(poll_sleep_time * 1000)
while True:
await self.dirty_lights_changed.wait()
self.dirty_lights_changed.clear()
sequential_lights = []
for light in list(self.dirty_lights):
if not sequential_lights:
# first light
sequential_lights = [light]
elif light.is_successor_of(sequential_lights[-1]):
# lights are sequential
sequential_lights.append(light)
else:
# sequence ended
await self._send_update_batch(sequential_lights, max_fade_tolerance)
# this light is a new sequence
sequential_lights = [light]
if sequential_lights:
await self._send_update_batch(sequential_lights, max_fade_tolerance)
self.dirty_lights.clear()
await asyncio.sleep(poll_sleep_time)
async def _send_update_batch(self, sequential_lights: List[PlatformBatchLight], max_fade_tolerance):
sequential_brightness_list = [] # type: List[Tuple[LightPlatformInterface, float, int]]
common_fade_ms = None
current_time = self.clock.get_time()
for light in sequential_lights:
brightness, fade_ms, done = light.get_fade_and_brightness(current_time)
schedule_time = current_time + (fade_ms / 1000)
if not done:
if not self.dirty_schedule or self.dirty_schedule[0][0] > schedule_time:
self.schedule_changed.set()
self.dirty_schedule.add((schedule_time, light))
else:
# check if we realized this brightness earlier
last_state = self.last_state.get(light, None)
if last_state and last_state[0] == brightness and last_state[1] < schedule_time and \
not sequential_brightness_list:
# we already set the light to that color earlier. skip it
# we only skip this light if we are in the beginning of the list for now
# the reason for that is that we do not want to break fade chains when one color channel
# of an RGB light did not change
# this could become an option in the future
continue
self.last_state[light] = (brightness, schedule_time)
if common_fade_ms is None:
common_fade_ms = fade_ms
if -max_fade_tolerance < common_fade_ms - fade_ms < max_fade_tolerance and \
len(sequential_brightness_list) < self.max_batch_size:
sequential_brightness_list.append((light, brightness, common_fade_ms))
else:
await self.update_callback(sequential_brightness_list)
# start new list
current_time = self.clock.get_time()
common_fade_ms = fade_ms
sequential_brightness_list = [(light, brightness, common_fade_ms)]
if sequential_brightness_list:
await self.update_callback(sequential_brightness_list)
def mark_dirty(self, light: "PlatformBatchLight"):
"""Mark as dirty."""
self.dirty_lights.add(light)
self.dirty_lights_changed.set()
self.dirty_schedule = SortedList([x for x in self.dirty_schedule if x[1] != light])
| {
"repo_name": "missionpinball/mpf",
"path": "mpf/core/platform_batch_light_system.py",
"copies": "1",
"size": "7875",
"license": "mit",
"hash": -6938963416869747000,
"line_mean": 42.0327868852,
"line_max": 117,
"alpha_frac": 0.5912380952,
"autogenerated": false,
"ratio": 4.125196437925616,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0013653584663220362,
"num_lines": 183
} |
"""A lightweight library for common vector and matrix operations."""
# ***** BEGIN LICENSE BLOCK *****
#
# Copyright (c) 2007-2012, Python File Format Interface
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
#
# * Neither the name of the Python File Format Interface
# project nor the names of its contributors may be used to endorse
# or promote products derived from this software without specific
# prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
#
# ***** END LICENSE BLOCK *****
import logging
import operator
def float_to_int(value):
"""Convert float to integer, rounding and handling nan and inf
gracefully.
>>> float_to_int(0.4)
0
>>> float_to_int(-0.4)
0
>>> float_to_int(0.6)
1
>>> float_to_int(-0.6)
-1
>>> float_to_int(float('inf'))
pyffi.utils.mathutils:WARNING:float_to_int converted +inf to +2147483648.
2147483648
>>> float_to_int(-float('inf'))
pyffi.utils.mathutils:WARNING:float_to_int converted -inf to -2147483648.
-2147483648
>>> float_to_int(float('nan'))
pyffi.utils.mathutils:WARNING:float_to_int converted nan to 0.
0
"""
try:
return int(value + 0.5 if value > 0 else value - 0.5)
except ValueError:
logging.getLogger("pyffi.utils.mathutils").warn(
"float_to_int converted nan to 0.")
return 0
except OverflowError:
if value > 0:
logging.getLogger("pyffi.utils.mathutils").warn(
"float_to_int converted +inf to +2147483648.")
return 2147483648
else:
logging.getLogger("pyffi.utils.mathutils").warn(
"float_to_int converted -inf to -2147483648.")
return -2147483648
def getBoundingBox(veclist):
"""Calculate bounding box (pair of vectors with minimum and maximum
coordinates).
>>> getBoundingBox([(0,0,0), (1,1,2), (0.5,0.5,0.5)])
((0, 0, 0), (1, 1, 2))"""
if not veclist:
# assume 3 dimensions if veclist is empty
return (0,0,0), (0,0,0)
# find bounding box
dim = len(veclist[0])
return (
tuple((min(vec[i] for vec in veclist) for i in range(dim))),
tuple((max(vec[i] for vec in veclist) for i in range(dim))))
def getCenterRadius(veclist):
"""Calculate center and radius of given list of vectors.
>>> getCenterRadius([(0,0,0), (1,1,2), (0.5,0.5,0.5)]) # doctest: +ELLIPSIS
((0.5, 0.5, 1.0), 1.2247...)
"""
if not veclist:
# assume 3 dimensions if veclist is empty
return (0,0,0), 0
# get bounding box
vecmin, vecmax = getBoundingBox(veclist)
# center is in the center of the bounding box
center = tuple((minco + maxco) * 0.5
for minco, maxco in zip(vecmin, vecmax))
# radius is the largest distance from the center
r2 = 0.0
for vec in veclist:
dist = vecSub(center, vec)
r2 = max(r2, vecDotProduct(dist, dist))
radius = r2 ** 0.5
return center, radius
def vecSub(vec1, vec2):
"""Vector substraction."""
return tuple(x - y for x, y in zip(vec1, vec2))
def vecAdd(vec1, vec2):
return tuple(x + y for x, y in zip(vec1, vec2))
def vecscalarMul(vec, scalar):
return tuple(x * scalar for x in vec)
def vecDotProduct(vec1, vec2):
"""The vector dot product (any dimension).
>>> vecDotProduct((1,2,3),(4,-5,6))
12"""
return sum(x1 * x2 for x1, x2 in zip(vec1, vec2))
def vecDistance(vec1, vec2):
"""Return distance between two vectors (any dimension).
>>> vecDistance((1,2,3),(4,-5,6)) # doctest: +ELLIPSIS
8.185...
"""
return vecNorm(vecSub(vec1, vec2))
def vecNormal(vec1, vec2, vec3):
"""Returns a vector that is orthogonal on C{triangle}."""
return vecCrossProduct(vecSub(vec2, vec1), vecSub(vec3, vec1))
def vecDistanceAxis(axis, vec):
"""Return distance between the axis spanned by axis[0] and axis[1] and the
vector v, in 3 dimensions. Raises ZeroDivisionError if the axis points
coincide.
>>> vecDistanceAxis([(0,0,0), (0,0,1)], (0,3.5,0))
3.5
>>> vecDistanceAxis([(0,0,0), (1,1,1)], (0,1,0.5)) # doctest: +ELLIPSIS
0.70710678...
"""
return vecNorm(vecNormal(axis[0], axis[1], vec)) / vecDistance(*axis)
def vecDistanceTriangle(triangle, vert):
"""Return (signed) distance between the plane spanned by triangle[0],
triangle[1], and triange[2], and the vector v, in 3 dimensions.
>>> vecDistanceTriangle([(0,0,0),(1,0,0),(0,1,0)], (0,0,1))
1.0
>>> vecDistanceTriangle([(0,0,0),(0,1,0),(1,0,0)], (0,0,1))
-1.0
"""
normal = vecNormal(*triangle)
return vecDotProduct(normal, vecSub(vert, triangle[0])) \
/ vecNorm(normal)
def vecNorm(vec):
"""Norm of a vector (any dimension).
>>> vecNorm((2,3,4)) # doctest: +ELLIPSIS
5.3851648...
"""
return vecDotProduct(vec, vec) ** 0.5
def vecNormalized(vec):
"""Normalized version of a vector (any dimension).
>>> vecNormalized((2,3,4)) # doctest: +ELLIPSIS
(0.371..., 0.557..., 0.742...)
"""
return vecscalarMul(vec, 1.0 / vecNorm(vec))
def vecCrossProduct(vec1, vec2):
"""The vector cross product (in 3d).
>>> vecCrossProduct((1,0,0),(0,1,0))
(0, 0, 1)
>>> vecCrossProduct((1,2,3),(4,5,6))
(-3, 6, -3)
"""
return (vec1[1] * vec2[2] - vec1[2] * vec2[1],
vec1[2] * vec2[0] - vec1[0] * vec2[2],
vec1[0] * vec2[1] - vec1[1] * vec2[0])
def matTransposed(mat):
"""Return the transposed of a nxn matrix.
>>> matTransposed(((1, 2), (3, 4)))
((1, 3), (2, 4))"""
dim = len(mat)
return tuple( tuple( mat[i][j]
for i in range(dim) )
for j in range(dim) )
def matscalarMul(mat, scalar):
"""Return matrix * scalar."""
dim = len(mat)
return tuple( tuple( mat[i][j] * scalar
for j in range(dim) )
for i in range(dim) )
def matvecMul(mat, vec):
"""Return matrix * vector."""
dim = len(mat)
return tuple( sum( mat[i][j] * vec[j] for j in range(dim) )
for i in range(dim) )
def matMul(mat1, mat2):
"""Return matrix * matrix."""
dim = len(mat1)
return tuple( tuple( sum( mat1[i][k] * mat2[k][j]
for k in range(dim) )
for j in range(dim) )
for i in range(dim) )
def matAdd(mat1, mat2):
"""Return matrix + matrix."""
dim = len(mat1)
return tuple( tuple( mat1[i][j] + mat2[i][j]
for j in range(dim) )
for i in range(dim) )
def matSub(mat1, mat2):
"""Return matrix - matrix."""
dim = len(mat1)
return tuple( tuple( mat1[i][j] - mat2[i][j]
for j in range(dim) )
for i in range(dim) )
def matCofactor(mat, i, j):
dim = len(mat)
return matDeterminant(tuple( tuple( mat[ii][jj]
for jj in range(dim)
if jj != j )
for ii in range(dim)
if ii != i ))
def matDeterminant(mat):
"""Calculate determinant.
>>> matDeterminant( ((1,2,3), (4,5,6), (7,8,9)) )
0
>>> matDeterminant( ((1,2,4), (3,0,2), (-3,6,2)) )
36
"""
dim = len(mat)
if dim == 0: return 0
elif dim == 1: return mat[0][0]
elif dim == 2: return mat[0][0] * mat[1][1] - mat[1][0] * mat[0][1]
else:
return sum( (-1 if i&1 else 1) * mat[i][0] * matCofactor(mat, i, 0)
for i in range(dim) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| {
"repo_name": "griest024/PokyrimTools",
"path": "pyffi-develop/pyffi/utils/mathutils.py",
"copies": "1",
"size": "8964",
"license": "mit",
"hash": -117752123602741570,
"line_mean": 31.3610108303,
"line_max": 79,
"alpha_frac": 0.5893574297,
"autogenerated": false,
"ratio": 3.35604642456009,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9411778715894328,
"avg_score": 0.006725027673152351,
"num_lines": 277
} |
"""A lightweight mutable container factory.
This is a lightweight struct that can be used when working with very
large numbers of objects. thinrecord instances do not carry around a
dict, and cannot define their own attributes. You can, however, add
methods to the class object and, with caveats, subclass the class to
add additional methods: http://stackoverflow.com/a/1816648/3109503
tl;dr subclasses must use __slots__, don't repeat variable names.
Based on collections.namedtuple, with inspiration and tests from:
https://bitbucket.org/ericvsmith/recordtype
"""
__all__ = ['thinrecord']
__author__ = "Isaac Levy <ilevy@chromium.org>"
__version__ = "0.0.1"
import six as _six
import sys as _sys
from keyword import iskeyword as _iskeyword
NO_DEFAULT = object()
def _check_name(name):
err_id = 'Type names and field names'
if not isinstance(name, _six.string_types):
raise ValueError('{} must be a string (type: {!r}): '
'{!r}'.format(err_id, type(name), name))
if not name:
raise ValueError('{} cannot be empty.'.format(err_id))
if not all(c.isalnum() or c=='_' for c in name):
raise ValueError('{} can only contain alphanumerics and underscores: '
'{!r}'.format(err_id, name))
if _iskeyword(name):
raise ValueError('{} cannot be a keyword: {!r}'.format(err_id, name))
if name[0].isdigit():
raise ValueError('{} cannot start with a number: '
'{!r}'.format(err_id, name))
def thinrecord(typename, fields, default=NO_DEFAULT,
ignore_extra_kwargs=True):
# field_names must be a string or an iterable, consisting of fieldname
# strings or 2-tuples. Each 2-tuple is of the form (fieldname,
# default).
_check_name(typename)
if isinstance(fields, _six.string_types):
fields = fields.replace(',', ' ').split()
field_defaults, field_names, fields_seen = {}, [], set()
for field in fields:
if isinstance(field, _six.string_types):
field_name = field
cur_default = default
else:
try:
field_name, cur_default = field
except TypeError:
raise ValueError('Field must be string or iterable: {!r}'.format(field))
_check_name(field_name)
if field_name in ('_fields', '_items', '_update'):
raise ValueError('field name conflicts with helper method: '
'{!r}'.format(field_name))
if field_name in fields_seen:
raise ValueError('Duplicate field name: {}'.format(field_name))
fields_seen.add(field_name)
field_names.append(field_name)
if cur_default is not NO_DEFAULT:
field_defaults[field_name] = cur_default
# Create and fill-in the class template.
default_name_prefix = '_default_val_for_'
argtxt = ', '.join(field_names) # "x, y, ..."
quoted_argtxt = ', '.join("'{}'".format(f) for f in field_names)
if len(field_names) == 1:
quoted_argtxt += ','
initargs = []
for f_name in field_names:
if f_name in field_defaults:
initargs.append('{}={}'.format(f_name, default_name_prefix + f_name))
else:
initargs.append(f_name)
if ignore_extra_kwargs:
initargs.append('**_unused_kwargs')
initargs = ', '.join(initargs) # "x, y=_default_val_for_y, **_unused_kwargs"
if field_names:
initbody = '\n '.join('self.{0} = {0}'.format(f) for f in field_names)
else:
initbody = 'pass'
reprtxt = ', '.join('{}={{!r}}'.format(f) for f in field_names)
template = '''
try:
from collections import OrderedDict as _MaybeOrderedDict
except ImportError:
_MaybeOrderedDict = dict
try:
from __builtins__ import property as _property, list as _list, tuple as _tuple
except ImportError:
_property, _tuple, _list = property, tuple, list
class {typename}(object):
'{typename}({argtxt})'
__slots__ = ({quoted_argtxt})
_fields = __slots__
def __init__(self, {initargs}):
{initbody}
def __len__(self):
return {num_fields}
def __iter__(self):
"""Iterate through values."""
for var in self._fields:
yield getattr(self, var)
def _items(self):
"""A fresh list of pairs (key, val)."""
return zip(self._fields, self)
def _update(self, **kwargs):
for k, v in kwargs:
setattr(self, k, v)
@_property
def __dict__(self):
return _MaybeOrderedDict(self._items())
def __repr__(self):
return '{typename}(' + '{reprtxt}'.format(*self) + ')'
def __eq__(self, other):
return isinstance(other, self.__class__) and _tuple(self) == _tuple(other)
def __ne__(self, other):
return not self.__eq__(other)
def __lt__(self, other):
if isinstance(other, self.__class__):
return _tuple(self) < _tuple(other)
raise TypeError('Unorderable types ({typename}, {{!s}})'.format(
other.__class__.__name__))
def __ge__(self, other):
return not self.__lt__(other)
def __le__(self, other):
if isinstance(other, self.__class__):
return _tuple(self) <= _tuple(other)
raise TypeError('Unorderable types ({typename}, {{!s}})'.format(
other.__class__.__name__))
def __gt__(self, other):
return not self.__le__(other)
def __hash__(self):
raise TypeError('Unhashable type: {typename}')
def __getstate__(self):
return _tuple(self)
def __setstate__(self, state):
self.__init__(*state)
def __getitem__(self, idx):
return _tuple(self)[idx]
def __setitem__(self, idx, value):
if isinstance(idx, slice):
raise TypeError('{typename} does not support assignment by slice.')
else:
setattr(self, self._fields[idx], value)
'''.format(
typename=typename,
argtxt=argtxt,
quoted_argtxt=quoted_argtxt,
initargs=initargs,
initbody=initbody,
reprtxt=reprtxt,
num_fields=len(field_names))
# Execute the template string in a temporary namespace.
namespace = {'__name__': 'thinrecord_' + typename}
for name, default in _six.iteritems(field_defaults):
namespace[default_name_prefix + name] = default
_six.exec_(template, namespace)
cls = namespace[typename]
cls._source = template
# For pickling to work, the __module__ variable needs to be set to
# the frame where the named tuple is created. Bypass this step in
# enviroments where sys._getframe is not defined (Jython for
# example).
if hasattr(_sys, '_getframe') and _sys.platform != 'cli':
cls.__module__ = _sys._getframe(1).f_globals.get('__name__', '__main__')
return cls
| {
"repo_name": "isaacl/thinrecord",
"path": "thinrecord.py",
"copies": "1",
"size": "6444",
"license": "mit",
"hash": 5427799041935288000,
"line_mean": 30.900990099,
"line_max": 80,
"alpha_frac": 0.6315952824,
"autogenerated": false,
"ratio": 3.5899721448467967,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9677686521569854,
"avg_score": 0.008776181135388623,
"num_lines": 202
} |
"""A lightweight Python wrapper of espeak and mbrola"""
import fnmatch
import io
import logging
import os
import re
import wave
from pathlib import Path
from shlex import quote
from shutil import which
from struct import pack
from subprocess import PIPE, run
from sys import platform
from typing import List, Dict
from typing import Union
from .phonemes import BritishEnglishPhonemes, GermanPhonemes, FrenchPhonemes, \
SpanishPhonemes, ItalianPhonemes, PhonemeList
class AudioPlayer:
"""A sound player"""
chunk = 1024
def __init__(self):
""" Init audio stream """
self.wf, self.stream = None, None
import pyaudio
self.p = pyaudio.PyAudio()
def set_file(self, file):
if self.stream is not None:
self.stream.close()
self.wf = wave.open(file, 'rb')
self.stream = self.p.open(
format=self.p.get_format_from_width(self.wf.getsampwidth()),
channels=self.wf.getnchannels(),
rate=self.wf.getframerate(),
output=True
)
def play(self):
""" Play entire file """
data = self.wf.readframes(self.chunk)
while data != b'':
self.stream.write(data)
data = self.wf.readframes(self.chunk)
def close(self):
""" Graceful shutdown """
self.stream.stop_stream()
self.stream.close()
self.p.terminate()
lg_code_to_phoneme = {"fr": FrenchPhonemes,
"en": BritishEnglishPhonemes,
"es": SpanishPhonemes,
"de": GermanPhonemes,
"it": ItalianPhonemes}
class Voice:
class InvalidVoiceParameters(Exception):
pass
if platform in ('linux', 'darwin'):
espeak_binary = 'espeak'
mbrola_binary = 'mbrola'
mbrola_voices_folder = "/usr/share/mbrola"
elif platform == 'win32':
# If the path has spaces it needs to be enclosed in double quotes.
espeak_binary = '"C:\\Program Files (x86)\\eSpeak\\command_line\\espeak"'
mbrola_binary = '"C:\\Program Files (x86)\\Mbrola Tools\\mbrola"'
mbrola_voices_folder = os.path.expanduser('~\\.mbrola\\')
if not os.path.exists(mbrola_voices_folder):
os.makedirs(mbrola_voices_folder)
# TODO: raise error if no binary is installed
else:
raise ValueError('Unsupported system.')
volumes_presets = {'fr1': 1.17138, 'fr2': 1.60851, 'fr3': 1.01283,
'fr4': 1.0964, 'fr5': 2.64384, 'fr6': 1.35412,
'fr7': 1.96092, 'us1': 1.658, 'us2': 1.7486,
'us3': 3.48104, 'es1': 3.26885, 'es2': 1.84053}
def __init__(self, speed: int = 160, pitch: int = 50, lang: str = "fr",
voice_id: int = None, volume: float = None):
"""All parameters are optional, but it's still advised that you pick
a language, else it **will** default to French, which is a
default to the most beautiful language on earth.
Any invalid parameter will raise an `InvalidVoiceParameter` exception."""
self.speed = speed
if 99 >= pitch >= 0:
self.pitch = pitch
else:
raise self.InvalidVoiceParameters(
"Pitch adjustment has to be an integer between 0 and 99")
# if no voice ID is specified, just defaults to one it can find
voice_id = (voice_id if voice_id is not None
else self._find_existing_voiceid(lang))
voice_name = lang + str(voice_id)
if (Path(self.mbrola_voices_folder)
/ Path(voice_name)
/ Path(voice_name)).is_file():
self.lang = lang
self.voice_id = voice_id
else:
raise self.InvalidVoiceParameters(
"Voice %s not found. Check language and voice id, or install "
"by running 'sudo apt install mbrola-%s'. On Windows download "
"voices from https://github.com/numediart/MBROLA-voices"
% (voice_name, voice_name))
self.volume = volume or self.volumes_presets.get(voice_name, 1)
if lang != 'fr':
self.sex = self.voice_id
else:
self.sex = 4 if self.voice_id in (2, 4) else 1
try:
self.phonemes = lg_code_to_phoneme[lang]
except KeyError:
self.phonemes = None
self._player = None
def _find_existing_voiceid(self, lang: str):
"""Finds any possible voice id for a given language"""
for file in os.listdir(self.mbrola_voices_folder):
if fnmatch.fnmatch(file, lang + "[0-9]"):
return int(file.strip(lang))
# default to 1 if no voice are found (although it'll probably fail then)
return 1
def _mbrola_exists(self):
return which(self.mbrola_binary) is not None
@property
def player(self):
if self._player is None:
self._player = AudioPlayer()
return self._player
def _wav_format(self, wav: bytes):
"""Reformats the wav returned by mbrola, which doesn't have the
right size headers, since mbrola doesn't know in advance
the size of the wav file."""
# the five terms of this bytes concatenation are the following:
# ["RIFF"] + [CHUNCK_SIZE] + [VARIOUS_HEADERS] + [SUBCHUNK_SIZE] + [ACTUAL_AUDIO_DATA]
# http://soundfile.sapp.org/doc/WaveFormat/ to get more details
return wav[:4] + pack('<I', len(wav) - 8) + wav[8:40] + pack('<I', len(
wav) - 44) + wav[44:]
def _str_to_phonemes(self, text: str) -> PhonemeList:
espeak_voice_name_template = ('mb/mb-%s%d'
if platform in ('linux', 'darwin')
else 'mb-%s%d')
voice_filename = espeak_voice_name_template % (self.lang, self.sex)
# Detailed explanation of options:
# http://espeak.sourceforge.net/commands.html
phoneme_synth_args = [
self.espeak_binary,
'-s', str(self.speed),
'-p', str(self.pitch),
'--pho', # outputs mbrola phoneme data
'-q', # quiet mode
'-v', voice_filename,
'%s' % text]
# Linux-specific memory management setting
# Tells Clib to ignore allocations problems (which happen but doesn't
# compromise espeak's outputs)
if platform in ('linux', 'darwin'):
phoneme_synth_args.insert(0, 'MALLOC_CHECK_=0')
logging.debug("Running espeak command %s"
% " ".join(phoneme_synth_args))
# Since MALLOC_CHECK_ has to be used before anything else,
# we need to compile the full command as a single
# string and we need to use `shell=True`.
return PhonemeList.from_pho_str(
run(' '.join(phoneme_synth_args), shell=True, stdout=PIPE,
stderr=PIPE)
.stdout
.decode("utf-8")
.strip())
def _phonemes_to_audio(self, phonemes: PhonemeList) -> bytes:
voice_path_template = ('%s/%s%d/%s%d'
if platform in ("linux", "darwin")
else '%s\\%s%d\\%s%d')
voice_phonemic_db = (voice_path_template
% (self.mbrola_voices_folder, self.lang,
self.voice_id, self.lang, self.voice_id))
audio_synth_string = [
self.mbrola_binary,
'-v', str(self.volume),
'-e', # ignores fatal errors on unknown diphone
voice_phonemic_db,
'-', # command or .pho file; `-` instead of a file means stdin
'-.wav' # output file; `-` instead of a file means stdout
]
if platform in ('linux', 'darwin'):
audio_synth_string.insert(0, 'MALLOC_CHECK_=0')
logging.debug(
"Running mbrola command %s" % " ".join(audio_synth_string))
return self._wav_format(
run(" ".join(audio_synth_string), shell=True, stdout=PIPE,
stderr=PIPE, input=str(phonemes).encode("utf-8")).stdout)
def _str_to_audio(self, text: str) -> bytes:
phonemes_list = self._str_to_phonemes(text)
audio = self._phonemes_to_audio(phonemes_list)
return audio
def to_phonemes(self, text: str) -> PhonemeList:
"""Renders a str to a ```PhonemeList`` object."""
return self._str_to_phonemes(quote(text))
def to_audio(self, speech: Union[PhonemeList, str], filename=None) -> bytes:
"""Renders a str or a ``PhonemeList`` to a wave byte object.
If a filename is specified, it saves the audio file to wave as well
Throws a `InvalidVoiceParameters` if the voice isn't found"""
if not self._mbrola_exists():
raise RuntimeError("Can't synthesize sound: mbrola executable is "
"not present. "
"Install using apt get install mbrola or from"
"the official mbrola repository on github")
if isinstance(speech, str):
wav = self._str_to_audio(quote(speech))
elif isinstance(speech, PhonemeList):
wav = self._phonemes_to_audio(speech)
if filename is not None:
with open(filename, "wb") as wavfile:
wavfile.write(wav)
return wav
def say(self, speech: Union[PhonemeList, str]):
"""Renders a string or a ``PhonemeList`` object to audio,
then plays it using the PyAudio lib"""
wav = self.to_audio(speech)
try:
self.player.set_file(io.BytesIO(wav))
except ImportError:
raise ImportError(
"You must install the pyaudio pip package to be able to "
"use the say() method")
else:
self.player.play()
self.player.close()
@classmethod
def list_voice_ids(cls) -> Dict[str, List]:
"""Returns a dictionary listing available voice id's for each language"""
langs: Dict[str, List] = {}
for file in os.listdir(cls.mbrola_voices_folder):
match = re.match(r"([a-z]{2})([0-9])", file)
if match is not None:
lang, voice_id = match.groups()
if lang not in langs:
langs[lang] = []
langs[lang].append(voice_id)
return langs
@classmethod
def get_voices_for_lang(cls, lang: str) -> List['Voice']:
"""Get instances of all the available voices for a particular language"""
voice_ids = cls.list_voice_ids()
return [Voice(voice_id=voice_id, lang=lang)
for voice_id in voice_ids[lang]]
| {
"repo_name": "hadware/voxpopuli",
"path": "voxpopuli/main.py",
"copies": "1",
"size": "10858",
"license": "mit",
"hash": -3271826230098135000,
"line_mean": 36.8327526132,
"line_max": 94,
"alpha_frac": 0.5614293608,
"autogenerated": false,
"ratio": 3.8178621659634318,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4879291526763432,
"avg_score": null,
"num_lines": null
} |
"""A lightweight Python wrapper of SoX's effects."""
import shlex
from io import BufferedReader, BufferedWriter
from subprocess import PIPE, Popen
import numpy as np
from .sndfiles import (
FileBufferInput,
FileBufferOutput,
FilePathInput,
FilePathOutput,
NumpyArrayInput,
NumpyArrayOutput,
logger,
)
def mutually_exclusive(*args):
return sum(arg is not None for arg in args) < 2
class AudioEffectsChain:
def __init__(self):
self.command = []
def equalizer(self, frequency, q=1.0, db=-3.0):
"""equalizer takes three parameters: filter center frequency in Hz, "q"
or band-width (default=1.0), and a signed number for gain or
attenuation in dB.
Beware of clipping when using positive gain.
"""
self.command.append('equalizer')
self.command.append(frequency)
self.command.append(str(q) + 'q')
self.command.append(db)
return self
def bandpass(self, frequency, q=1.0):
"""bandpass takes 2 parameters: filter center frequency in Hz and "q"
or band-width (default=1.0).
It gradually removes frequencies outside the band specified.
"""
self.command.append('bandpass')
self.command.append(frequency)
self.command.append(str(q) + 'q')
return self
def bandreject(self, frequency, q=1.0):
"""bandreject takes 2 parameters: filter center frequency in Hz and "q"
or band-width (default=1.0).
It gradually removes frequencies within the band specified.
"""
self.command.append('bandreject')
self.command.append(frequency)
self.command.append(str(q) + 'q')
return self
def lowshelf(self, gain=-20.0, frequency=100, slope=0.5):
"""lowshelf takes 3 parameters: a signed number for gain or attenuation
in dB, filter frequency in Hz and slope (default=0.5, maximum=1.0).
Beware of Clipping when using positive gain.
"""
self.command.append('bass')
self.command.append(gain)
self.command.append(frequency)
self.command.append(slope)
return self
def highshelf(self, gain=-20.0, frequency=3000, slope=0.5):
"""highshelf takes 3 parameters: a signed number for gain or
attenuation in dB, filter frequency in Hz and slope (default=0.5).
Beware of clipping when using positive gain.
"""
self.command.append('treble')
self.command.append(gain)
self.command.append(frequency)
self.command.append(slope)
return self
def highpass(self, frequency, q=0.707):
"""highpass takes 2 parameters: filter frequency in Hz below which
frequencies will be attenuated and q (default=0.707).
Beware of clipping when using high q values.
"""
self.command.append('highpass')
self.command.append(frequency)
self.command.append(str(q) + 'q')
return self
def lowpass(self, frequency, q=0.707):
"""lowpass takes 2 parameters: filter frequency in Hz above which
frequencies will be attenuated and q (default=0.707).
Beware of clipping when using high q values.
"""
self.command.append('lowpass')
self.command.append(frequency)
self.command.append(str(q) + 'q')
return self
def limiter(self, gain=3.0):
"""limiter takes one parameter: gain in dB.
Beware of adding too much gain, as it can cause audible
distortion. See the compand effect for a more capable limiter.
"""
self.command.append('gain')
self.command.append('-l')
self.command.append(gain)
return self
def normalize(self):
"""normalize has no parameters.
It boosts level so that the loudest part of your file reaches
maximum, without clipping.
"""
self.command.append('gain')
self.command.append('-n')
return self
def compand(self, attack=0.2, decay=1, soft_knee=2.0, threshold=-20, db_from=-20.0, db_to=-20.0):
"""compand takes 6 parameters:
attack (seconds), decay (seconds), soft_knee (ex. 6 results
in 6:1 compression ratio), threshold (a negative value
in dB), the level below which the signal will NOT be companded
(a negative value in dB), the level above which the signal will
NOT be companded (a negative value in dB). This effect
manipulates dynamic range of the input file.
"""
self.command.append('compand')
self.command.append(str(attack) + ',' + str(decay))
self.command.append(str(soft_knee) + ':' + str(threshold) + ',' + str(db_from) + ',' + str(db_to))
return self
def sinc(self,
high_pass_frequency=None,
low_pass_frequency=None,
left_t=None,
left_n=None,
right_t=None,
right_n=None,
attenuation=None,
beta=None,
phase=None,
M=None,
I=None,
L=None):
"""sinc takes 12 parameters:
high_pass_frequency in Hz,
low_pass_frequency in Hz,
left_t,
left_n,
right_t,
right_n,
attenuation in dB,
beta,
phase,
M,
I,
L
This effect creates a steep bandpass or
bandreject filter. You may specify as few as the first two
parameters. Setting the high-pass parameter to a lower value
than the low-pass creates a band-reject filter.
"""
self.command.append("sinc")
if not mutually_exclusive(attenuation, beta):
raise ValueError("Attenuation (-a) and beta (-b) are mutually exclusive arguments.")
if attenuation is not None and beta is None:
self.command.append('-a')
self.command.append(str(attenuation))
elif attenuation is None and beta is not None:
self.command.append('-b')
self.command.append(str(beta))
if not mutually_exclusive(phase, M, I, L):
raise ValueError("Phase (-p), -M, L, and -I are mutually exclusive arguments.")
if phase is not None:
self.command.append('-p')
self.command.append(str(phase))
elif M is not None:
self.command.append('-M')
elif I is not None:
self.command.append('-I')
elif L is not None:
self.command.append('-L')
if not mutually_exclusive(left_t, left_t):
raise ValueError("Transition bands options (-t or -n) are mutually exclusive.")
if left_t is not None:
self.command.append('-t')
self.command.append(str(left_t))
if left_n is not None:
self.command.append('-n')
self.command.append(str(left_n))
if high_pass_frequency is not None and low_pass_frequency is None:
self.command.append(str(high_pass_frequency))
elif high_pass_frequency is not None and low_pass_frequency is not None:
self.command.append(str(high_pass_frequency) + '-' + str(low_pass_frequency))
elif high_pass_frequency is None and low_pass_frequency is not None:
self.command.append(str(low_pass_frequency))
if not mutually_exclusive(right_t, right_t):
raise ValueError("Transition bands options (-t or -n) are mutually exclusive.")
if right_t is not None:
self.command.append('-t')
self.command.append(str(right_t))
if right_n is not None:
self.command.append('-n')
self.command.append(str(right_n))
return self
def bend(self, bends, frame_rate=None, over_sample=None):
"""TODO Add docstring."""
self.command.append("bend")
if frame_rate is not None and isinstance(frame_rate, int):
self.command.append('-f %s' % frame_rate)
if over_sample is not None and isinstance(over_sample, int):
self.command.append('-o %s' % over_sample)
for bend in bends:
self.command.append(','.join(bend))
return self
def chorus(self, gain_in, gain_out, decays):
"""TODO Add docstring."""
self.command.append("chorus")
self.command.append(gain_in)
self.command.append(gain_out)
for decay in decays:
modulation = decay.pop()
numerical = decay
self.command.append(' '.join(map(str, numerical)) + ' -' + modulation)
return self
def delay(self,
gain_in=0.8,
gain_out=0.5,
delays=None,
decays=None,
parallel=False):
"""delay takes 4 parameters: input gain (max 1), output gain
and then two lists, delays and decays.
Each list is a pair of comma seperated values within
parenthesis.
"""
if delays is None:
delays = list((1000, 1800))
if decays is None:
decays = list((0.3, 0.25))
self.command.append('echo' + ('s' if parallel else ''))
self.command.append(gain_in)
self.command.append(gain_out)
self.command.extend(list(sum(zip(delays, decays), ())))
return self
def echo(self, **kwargs):
"""TODO Add docstring."""
return self.delay(**kwargs)
def fade(self):
"""TODO Add docstring."""
raise NotImplementedError()
def flanger(self, delay=0, depth=2, regen=0, width=71, speed=0.5, shape='sine', phase=25, interp='linear'):
"""TODO Add docstring."""
raise NotImplementedError()
def gain(self, db):
"""gain takes one paramter: gain in dB."""
self.command.append('gain')
self.command.append(db)
return self
def mcompand(self):
"""TODO Add docstring."""
raise NotImplementedError()
def noise_reduction(self, amount=0.5):
"""TODO Add docstring."""
# TODO Run sox once with noiseprof on silent portions to generate a noise profile.
raise NotImplementedError()
def oops(self):
"""TODO Add docstring."""
raise NotImplementedError()
def overdrive(self, gain=20, colour=20):
"""overdrive takes 2 parameters: gain in dB and colour which effects
the character of the distortion effet.
Both have a default value of 20. TODO - changing color does not seem to have an audible effect
"""
self.command.append('overdrive')
self.command.append(gain)
self.command.append(colour)
return self
def phaser(self,
gain_in=0.9,
gain_out=0.8,
delay=1,
decay=0.25,
speed=2,
triangular=False):
"""phaser takes 6 parameters: input gain (max 1.0), output gain (max
1.0), delay, decay, speed and LFO shape=trianglar (which must be set to
True or False)"""
self.command.append("phaser")
self.command.append(gain_in)
self.command.append(gain_out)
self.command.append(delay)
self.command.append(decay)
self.command.append(speed)
if triangular:
self.command.append('-t')
else:
self.command.append('-s')
return self
def pitch(self, shift,
use_tree=False,
segment=82,
search=14.68,
overlap=12):
"""pitch takes 4 parameters: user_tree (True or False), segment, search
and overlap."""
self.command.append("pitch")
if use_tree:
self.command.append('-q')
self.command.append(shift)
self.command.append(segment)
self.command.append(search)
self.command.append(overlap)
return self
def loop(self):
"""TODO Add docstring."""
self.command.append('repeat')
self.command.append('-')
return self
def reverb(self,
reverberance=50,
hf_damping=50,
room_scale=100,
stereo_depth=100,
pre_delay=20,
wet_gain=0,
wet_only=False):
"""reverb takes 7 parameters: reverberance, high-freqnency damping,
room scale, stereo depth, pre-delay, wet gain and wet only (True or
False)"""
self.command.append('reverb')
if wet_only:
self.command.append('-w')
self.command.append(reverberance)
self.command.append(hf_damping)
self.command.append(room_scale)
self.command.append(stereo_depth)
self.command.append(pre_delay)
self.command.append(wet_gain)
return self
def reverse(self):
"""reverse takes no parameters.
It plays the input sound backwards.
"""
self.command.append("reverse")
return self
def speed(self, factor, use_semitones=False):
"""speed takes 2 parameters: factor and use-semitones (True or False).
When use-semitones = False, a factor of 2 doubles the speed and raises the pitch an octave. The same result is achieved with factor = 1200 and use semitones = True.
"""
self.command.append("speed")
self.command.append(factor if not use_semitones else str(factor) + "c")
return self
def synth(self):
raise NotImplementedError()
def tempo(self,
factor,
use_tree=False,
opt_flag=None,
segment=82,
search=14.68,
overlap=12):
"""tempo takes 6 parameters: factor, use tree (True or False), option
flag, segment, search and overlap).
This effect changes the duration of the sound without modifying
pitch.
"""
self.command.append("tempo")
if use_tree:
self.command.append('-q')
if opt_flag in ('l', 'm', 's'):
self.command.append('-%s' % opt_flag)
self.command.append(factor)
self.command.append(segment)
self.command.append(search)
self.command.append(overlap)
return self
def tremolo(self, freq, depth=40):
"""tremolo takes two parameters: frequency and depth (max 100)"""
self.command.append("tremolo")
self.command.append(freq)
self.command.append(depth)
return self
def trim(self, positions):
"""TODO Add docstring."""
self.command.append("trim")
for position in positions:
# TODO: check if the position means something
self.command.append(position)
return self
def upsample(self, factor):
"""TODO Add docstring."""
self.command.append("upsample")
self.command.append(factor)
return self
def vad(self):
raise NotImplementedError()
def vol(self, gain, type="amplitude", limiter_gain=None):
"""vol takes three parameters: gain, gain-type (amplitude, power or dB)
and limiter gain."""
self.command.append("vol")
if type in ["amplitude", "power", "dB"]:
self.command.append(type)
else:
raise ValueError("Type has to be dB, amplitude or power.")
if limiter_gain is not None:
self.command.append(str(limiter_gain))
print(self.command)
return self
def custom(self, command):
"""Run arbitrary SoX effect commands.
Examples:
custom('echo 0.8 0.9 1000 0.3') for an echo effect.
References:
- https://linux.die.net/man/1/soxexam
- http://sox.sourceforge.net/sox.html
- http://tldp.org/LDP/LG/issue73/chung.html
- http://dsl.org/cookbook/cookbook_29.html
"""
self.command.append(command)
return self
def __call__(
self,
src,
dst=np.ndarray,
sample_in=44100, # used only for arrays
sample_out=None,
encoding_out=None,
channels_out=None,
allow_clipping=True):
# depending on the input, using the right object to set up the input data arguments
stdin = None
if isinstance(src, str):
infile = FilePathInput(src)
stdin = src
elif isinstance(src, np.ndarray):
infile = NumpyArrayInput(src, sample_in)
stdin = src
elif isinstance(src, BufferedReader):
infile = FileBufferInput(src)
stdin = infile.data # retrieving the data from the file reader (np array)
else:
infile = None
# finding out which output encoding to use in case the output is ndarray
if encoding_out is None and dst is np.ndarray:
if isinstance(stdin, np.ndarray):
encoding_out = stdin.dtype.type
elif isinstance(stdin, str):
encoding_out = np.float32
# finding out which channel count to use (defaults to the input file's channel count)
if channels_out is None:
if infile is None:
channels_out = 1
else:
channels_out = infile.channels
if sample_out is None: # if the output samplerate isn't specified, default to input's
sample_out = sample_in
# same as for the input data, but for the destination
if isinstance(dst, str):
outfile = FilePathOutput(dst, sample_out, channels_out)
elif dst is np.ndarray:
outfile = NumpyArrayOutput(encoding_out, sample_out, channels_out)
elif isinstance(dst, BufferedWriter):
outfile = FileBufferOutput(dst, sample_out, channels_out)
else:
outfile = None
cmd = shlex.split(
' '.join([
'sox',
'-N',
'-V1' if allow_clipping else '-V2',
infile.cmd_prefix if infile is not None else '-d',
outfile.cmd_suffix if outfile is not None else '-d',
] + list(map(str, self.command))),
posix=False,
)
logger.debug("Running command : %s" % cmd)
if isinstance(stdin, np.ndarray):
stdout, stderr = Popen(cmd, stdin=PIPE, stdout=PIPE, stderr=PIPE).communicate(stdin.tobytes(order='F'))
else:
stdout, stderr = Popen(cmd, stdout=PIPE, stderr=PIPE).communicate()
if stderr:
raise RuntimeError(stderr.decode())
elif stdout:
outsound = np.frombuffer(stdout, dtype=encoding_out)
if channels_out > 1:
outsound = outsound.reshape((channels_out, int(len(outsound) / channels_out)), order='F')
if isinstance(outfile, FileBufferOutput):
outfile.write(outsound)
return outsound
| {
"repo_name": "carlthome/python-audio-effects",
"path": "pysndfx/dsp.py",
"copies": "1",
"size": "19031",
"license": "mit",
"hash": 992653292582544500,
"line_mean": 33.7281021898,
"line_max": 172,
"alpha_frac": 0.5801586885,
"autogenerated": false,
"ratio": 4.076033411865496,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5156192100365496,
"avg_score": null,
"num_lines": null
} |
"""A lightweight test suite for games.py"""
# You can run this test suite by doing: py.test tests/test_games.py
# Of course you need to have py.test installed to do this.
import pytest
from games import * # noqa
# Creating the game instances
f52 = Fig52Game()
ttt = TicTacToe()
def gen_state(to_move='X', x_positions=[], o_positions=[], h=3, v=3, k=3):
"""Given whose turn it is to move, the positions of X's on the board, the
positions of O's on the board, and, (optionally) number of rows, columns
and how many consecutive X's or O's required to win, return the corresponding
game state"""
moves = set([(x, y) for x in range(1, h + 1) for y in range(1, v + 1)]) \
- set(x_positions) - set(o_positions)
moves = list(moves)
board = {}
for pos in x_positions:
board[pos] = 'X'
for pos in o_positions:
board[pos] = 'O'
return GameState(to_move=to_move, utility=0, board=board, moves=moves)
def test_minimax_decision():
assert minimax_decision('A', f52) == 'a1'
assert minimax_decision('B', f52) == 'b1'
assert minimax_decision('C', f52) == 'c1'
assert minimax_decision('D', f52) == 'd3'
def test_alphabeta_full_search():
assert alphabeta_full_search('A', f52) == 'a1'
assert alphabeta_full_search('B', f52) == 'b1'
assert alphabeta_full_search('C', f52) == 'c1'
assert alphabeta_full_search('D', f52) == 'd3'
state = gen_state(to_move='X', x_positions=[(1, 1), (3, 3)],
o_positions=[(1, 2), (3, 2)])
assert alphabeta_full_search(state, ttt) == (2, 2)
state = gen_state(to_move='O', x_positions=[(1, 1), (3, 1), (3, 3)],
o_positions=[(1, 2), (3, 2)])
assert alphabeta_full_search(state, ttt) == (2, 2)
state = gen_state(to_move='O', x_positions=[(1, 1)],
o_positions=[])
assert alphabeta_full_search(state, ttt) == (2, 2)
state = gen_state(to_move='X', x_positions=[(1, 1), (3, 1)],
o_positions=[(2, 2), (3, 1)])
assert alphabeta_full_search(state, ttt) == (1, 3)
def test_random_tests():
assert Fig52Game().play_game(alphabeta_player, alphabeta_player) == 3
# The player 'X' (one who plays first) in TicTacToe never loses:
assert ttt.play_game(alphabeta_player, alphabeta_player) >= 0
# The player 'X' (one who plays first) in TicTacToe never loses:
assert ttt.play_game(alphabeta_player, random_player) >= 0
if __name__ == '__main__':
pytest.main()
| {
"repo_name": "sofmonk/aima-python",
"path": "tests/test_games.py",
"copies": "1",
"size": "2510",
"license": "mit",
"hash": -6377196400236806000,
"line_mean": 33.3835616438,
"line_max": 81,
"alpha_frac": 0.6003984064,
"autogenerated": false,
"ratio": 2.9460093896713615,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4046407796071361,
"avg_score": null,
"num_lines": null
} |
"""A lightweight wrapper around MySQL Connector/Python."""
# -*- coding: utf-8 -*-
import logging
from collections import namedtuple
import time
import mysql.connector
__version__ = '0.0.1'
class JeekdbError(Exception):
"""jeekdb error"""
pass
ExecuteResult = namedtuple('ExecuteResult', ['last_row_id', 'row_count', 'rows'])
class Jeekdb(object):
"""wrapper around mysql.connector.MySQLConnection"""
def __init__(self, host, port, user, password, database, max_idle_time=3 * 60, **kwargs):
"""
init a connection, a connection will be reconnect after max_idle_time
:param kwargs: arguments for mysql.connector
https://dev.mysql.com/doc/connector-python/en/connector-python-connectargs.html
"""
self.max_idle_time = float(max_idle_time)
self._last_use_time = time.time()
self._db = None
self._db_args = dict(
host=host, port=port, user=user, password=password, database=database, autocommit=True,
raise_on_warnings=True, **kwargs)
try:
self.reconnect()
except Exception as e:
logging.error('failed to connect %s:%d %s %s', host, port, database, e, exc_info=True)
raise JeekdbError('connection failed: {}'.format(e))
def __del__(self):
self.close()
def _exceed_max_idle_time(self):
return time.time() - self._last_use_time > self.max_idle_time
def close(self):
"""close connection if connected"""
if self._db is not None:
self._db.close()
self._db = None
def reconnect(self):
"""
close the existing connection and re-open it
"""
logging.debug('Jeekdb reconnect')
self.close()
self._db = mysql.connector.connect(**self._db_args)
def _ensure_connected(self):
"""MySQL or other MySQL proxy will close connections that are idle for some time, but the
client library will not report this face until the next query try when it fails."""
if self._db is None or self._exceed_max_idle_time():
self.reconnect()
self._last_use_time = time.time()
def _cursor(self):
self._ensure_connected()
return self._db.cursor(dictionary=True)
@staticmethod
def _execute(cursor, sql, parameter_dict=None):
"""
wrap cursor.execute(). parameter_dict is a dict.
:param cursor:
:param sql: SQL, e.g. INSERT INTO my_table (name, field1) VALUES (%(name)s, %(company)s)
:param parameter_dict: dict of parameter, e.g. {'name':'John', 'company':'Baidu'}
:type parameter_dict: dict
:return:
"""
if parameter_dict is None:
parameter_dict = {}
try:
return cursor.execute(sql, parameter_dict)
except mysql.connector.Error as e:
logging.error("_execute failed, query=%s, parameter_dict=%s, error=%s",
sql, parameter_dict, e, exc_info=True)
raise JeekdbError('_execute failed: {}'.format(e))
def iter(self, sql, parameter_dict=None, size=20):
"""
returns an iterator for the given query and parameters
:param sql: SQL, e.g. INSERT INTO my_table (name, field1) VALUES (%(name)s, %(company)s)
:param parameter_dict: dict of parameter, e.g. {'name':'John', 'company':'Baidu'}
:param size: size for fetchmany
:type sql: str
:type parameter_dict: dict
:return: iterator
"""
if parameter_dict is None:
parameter_dict = {}
cursor = self._cursor()
try:
self._execute(cursor, sql, parameter_dict)
while True:
rows = cursor.fetchmany(size=size)
if not rows:
break
for row in rows:
yield row
finally:
cursor.close()
def query(self, sql, parameter_dict=None):
"""
return a list of dict for the given sql and parameter_dict
:param sql: SQL, e.g. INSERT INTO my_table (name, field1) VALUES (%(name)s, %(company)s)
:param parameter_dict: dict of parameter, e.g. {'name':'John', 'company':'Baidu'}
:type sql: str
:type parameter_dict: dict
:return: list
"""
if parameter_dict is None:
parameter_dict = {}
cursor = self._cursor()
try:
self._execute(cursor, sql, parameter_dict)
rows = cursor.fetchall()
return rows
finally:
cursor.close()
def get_one(self, sql, parameter_dict=None):
"""
return a singular row, if it has more than one result, raise an exception
:param sql: SQL, e.g. INSERT INTO my_table (name, field1) VALUES (%(name)s, %(company)s)
:param parameter_dict: dict of parameter, e.g. {'name':'John', 'company':'Baidu'}
:type sql: str
:type parameter_dict: dict
:return: dict
"""
if parameter_dict is None:
parameter_dict = {}
rows = self.query(sql, parameter_dict)
if not rows:
return None
if len(rows) > 1:
raise JeekdbError("multiple rows returned for get_one()")
return rows[0]
def execute(self, sql, parameter_dict=None):
"""
execute the given sql, return ExecuteResult
:param sql: SQL, e.g. INSERT INTO my_table (name, field1) VALUES (%(name)s, %(company)s)
:param parameter_dict: dict of parameter, e.g. {'name':'John', 'company':'Baidu'}
:type sql: str
:type parameter_dict: dict
:return: ExecuteResult
"""
if parameter_dict is None:
parameter_dict = {}
cursor = self._cursor()
try:
self._execute(cursor, sql, parameter_dict)
last_row_id = cursor.lastrowid
row_count = cursor.rowcount
rows = None
if cursor.with_rows:
rows = cursor.fetchall()
return ExecuteResult(last_row_id=last_row_id, row_count=row_count, rows=rows)
finally:
cursor.close()
def insert(self, table, data):
"""
insert a record
:param table: table name
:param data: dict, e.g. {'name': 'tom', 'age': 14}
:return: ExecuteResult
"""
sql = "INSERT INTO `%s` (%s) VALUES (%s)"
sql_cols, sql_vals = [], []
for name, val in data.items():
sql_cols.append('`%s`' % name)
sql_vals.append('%%(%s)s' % name)
sql_cols = ', '.join(sql_cols)
sql_vals = ', '.join(sql_vals)
sql = sql % (table, sql_cols, sql_vals)
return self.execute(sql, data)
def delete(self, table, conditions):
"""
delete records. Example:
table = 'mytable'
conditions = {'name': 'tom'}
:param table:
:param conditions: dict of WHERE condition, can not be {} to prevent mistaken deleting
:return: ExecuteResult
"""
if not conditions:
raise JeekdbError('conditions can not be None or {}')
sql_where = []
sql_start = "DELETE FROM `%s` WHERE " % table
for name, val in conditions.items():
sql_where.append("`{0}` = %({1})s".format(name, name))
sql_where = ' AND '.join(sql_where)
sql = sql_start + sql_where
return self.execute(sql, conditions)
update = execute
| {
"repo_name": "gaopenghigh/jeekdb",
"path": "jeekdb.py",
"copies": "1",
"size": "7547",
"license": "apache-2.0",
"hash": -8487911619596600000,
"line_mean": 34.5990566038,
"line_max": 99,
"alpha_frac": 0.560620114,
"autogenerated": false,
"ratio": 3.9679284963196637,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9999849380724396,
"avg_score": 0.005739845919053365,
"num_lines": 212
} |
"""A lightweight wrapper around _mysql."""
from MySQLdb import _mysql
import time
import operator
try:
from _thread import get_ident as _get_ident
except ImportError:
from thread import get_ident as _get_ident
from memsql.common.conversions import CONVERSIONS
MySQLError = _mysql.MySQLError
OperationalError = _mysql.OperationalError
DatabaseError = _mysql.DatabaseError
def connect(*args, **kwargs):
return Connection(*args, **kwargs)
class Connection(object):
"""A lightweight wrapper around _mysql DB-API connections.
The main value we provide is wrapping rows in a dict/object so that
columns can be accessed by name. Typical usage::
db = database.Connection("localhost", "mydatabase")
for article in db.query("SELECT * FROM articles"):
print article.title
Cursors are hidden by the implementation, but other than that, the methods
are very similar to the DB-API.
We explicitly set the timezone to UTC and the character encoding to
UTF-8 on all connections to avoid time zone and encoding errors.
"""
def __init__(self, host, port=3306, database="information_schema", user=None, password=None,
max_idle_time=7 * 3600, _version=0, options=None):
self.max_idle_time = max_idle_time
args = {
"db": database,
"conv": CONVERSIONS
}
if user is not None:
args["user"] = user
if password is not None:
args["passwd"] = password
args["host"] = host
args["port"] = int(port)
if options is not None:
assert isinstance(options, dict), "Options to database.Connection must be an dictionary of { str: value } pairs."
args.update(options)
self._db = None
self._db_args = args
self._last_use_time = time.time()
self.reconnect()
self._db.set_character_set("utf8")
self._version = _version
def __del__(self):
self.close()
def __enter__(self):
return self
def __exit__(self, type, value, traceback):
self.close()
def close(self):
"""Closes this database connection."""
if getattr(self, "_db", None) is not None:
self._db.close()
self._db = None
def connected(self):
if self._db is not None:
try:
self.ping()
return True
except _mysql.InterfaceError:
return False
return False
def reconnect(self):
"""Closes the existing database connection and re-opens it."""
conn = _mysql.connect(**self._db_args)
if conn is not None:
self.close()
self._db = conn
def select_db(self, database):
self._db.select_db(database)
self._db_args['db'] = database
def ping(self):
""" Ping the server """
return self._db.ping()
def thread_id(self):
""" Retrieve the thread id for the current connection """
return self._db.thread_id()
def debug_query(self, query, *parameters, **kwparameters):
return self._query(query, parameters, kwparameters, debug=True)
def query(self, query, *parameters, **kwparameters):
"""
Query the connection and return the rows (or affected rows if not a
select query). Mysql errors will be propogated as exceptions.
"""
return self._query(query, parameters, kwparameters)
def get(self, query, *parameters, **kwparameters):
"""Returns the first row returned for the given query."""
rows = self._query(query, parameters, kwparameters)
if not rows:
return None
elif not isinstance(rows, list):
raise MySQLError("Query is not a select query")
elif len(rows) > 1:
raise MySQLError("Multiple rows returned for Database.get() query")
else:
return rows[0]
# rowcount is a more reasonable default return value than lastrowid,
# but for historical compatibility execute() must return lastrowid.
def execute(self, query, *parameters, **kwparameters):
"""Executes the given query, returning the lastrowid from the query."""
return self.execute_lastrowid(query, *parameters, **kwparameters)
def execute_lastrowid(self, query, *parameters, **kwparameters):
"""Executes the given query, returning the lastrowid from the query."""
self._execute(query, parameters, kwparameters)
self._result = self._db.store_result()
return self._db.insert_id()
def _query(self, query, parameters, kwparameters, debug=False):
self._execute(query, parameters, kwparameters, debug)
self._result = self._db.store_result()
if self._result is None:
return self._rowcount
fields = [ f[0] for f in self._result.describe() ]
rows = self._result.fetch_row(0)
return SelectResult(fields, rows)
def _execute(self, query, parameters, kwparameters, debug=False):
if parameters and kwparameters:
raise ValueError('database.py querying functions can receive *args or **kwargs, but not both')
query = escape_query(query, parameters or kwparameters)
if debug:
print(query)
self._ensure_connected()
self._db.query(query)
self._rowcount = self._db.affected_rows()
def _ensure_connected(self):
# Mysql by default closes client connections that are idle for
# 8 hours, but the client library does not report this fact until
# you try to perform a query and it fails. Protect against this
# case by preemptively closing and reopening the connection
# if it has been idle for too long (7 hours by default).
if (self._db is None or (time.time() - self._last_use_time > self.max_idle_time)):
self.reconnect()
self._last_use_time = time.time()
class Row(object):
"""A fast, ordered, partially-immutable dictlike object (or objectlike dict)."""
def __init__(self, fields, values):
self._fields = fields
self._values = values
def __getattr__(self, name):
try:
return self._values[self._fields.index(name)]
except (ValueError, IndexError):
raise AttributeError(name)
def __getitem__(self, name):
try:
return self._values[self._fields.index(name)]
except (ValueError, IndexError):
raise KeyError(name)
def __setitem__(self, name, value):
try:
self._values[self._fields.index(name)] = value
except (ValueError, IndexError):
self._fields += (name,)
self._values += (value,)
def __contains__(self, name):
return name in self._fields
has_key = __contains__
def __sizeof__(self, name):
return len(self._fields)
def __iter__(self):
return self._fields.__iter__()
def __len__(self):
return self._fields.__len__()
def get(self, name, default=None):
try:
return self.__getitem__(name)
except KeyError:
return default
def keys(self):
for field in iter(self._fields):
yield field
def values(self):
for value in iter(self._values):
yield value
def items(self):
for item in zip(self._fields, self._values):
yield item
def __eq__(self, other):
if isinstance(other, Row):
return dict.__eq__(dict(self.items()), other) and all(map(operator.eq, self, other))
else:
return dict.__eq__(dict(self.items()), other)
def __ne__(self, other):
return not self == other
def __repr__(self, _repr_running={}):
call_key = id(self), _get_ident()
if call_key in _repr_running:
return '...'
_repr_running[call_key] = 1
try:
if not self:
return '%s()' % (self.__class__.__name__,)
return '%s(%r)' % (self.__class__.__name__, dict(self.items()))
finally:
del _repr_running[call_key]
# for simplejson.dumps()
def _asdict(self):
return dict(self)
def nope(self, *args, **kwargs):
raise NotImplementedError('This object is partially immutable. To modify it, call "foo = dict(foo)" first.')
update = nope
pop = nope
setdefault = nope
fromkeys = nope
clear = nope
__delitem__ = nope
__reversed__ = nope
class SelectResult(list):
def __init__(self, fieldnames, rows):
self.fieldnames = tuple(fieldnames)
self.rows = rows
data = [Row(self.fieldnames, row) for row in self.rows]
list.__init__(self, data)
def width(self):
return len(self.fieldnames)
def __getitem__(self, i):
if isinstance(i, slice):
return SelectResult(self.fieldnames, self.rows[i])
return list.__getitem__(self, i)
def escape_query(query, parameters):
if parameters:
if isinstance(parameters, (list, tuple)):
query = query % tuple(map(_escape, parameters))
elif isinstance(parameters, dict):
params = {}
for key, val in parameters.items():
params[key] = _escape(val)
query = query % params
else:
assert False, 'not sure what to do with parameters of type %s' % type(parameters)
return query
def _escape(param):
_bytes_to_utf8 = lambda b: b.decode("utf-8") if isinstance(b, bytes) else b
if isinstance(param, (list, tuple)):
return ','.join(_bytes_to_utf8(_mysql.escape(p, CONVERSIONS)) for p in param)
else:
return _bytes_to_utf8(_mysql.escape(param, CONVERSIONS))
| {
"repo_name": "memsql/memsql-python",
"path": "memsql/common/database.py",
"copies": "1",
"size": "9853",
"license": "mit",
"hash": 219747671753173600,
"line_mean": 30.8867313916,
"line_max": 125,
"alpha_frac": 0.5937277986,
"autogenerated": false,
"ratio": 4.1892006802721085,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0010522638565233086,
"num_lines": 309
} |
"""A lightweight wrapper around _mysql."""
import _mysql
import itertools
import time
try:
from collections import OrderedDict
except:
from ordereddict import OrderedDict
from MySQLdb.converters import conversions
MySQLError = _mysql.MySQLError
OperationalError = _mysql.OperationalError
def connect(*args, **kwargs):
return Connection(*args, **kwargs)
class Connection(object):
"""A lightweight wrapper around _mysql DB-API connections.
The main value we provide is wrapping rows in a dict/object so that
columns can be accessed by name. Typical usage::
db = database.Connection("localhost", "mydatabase")
for article in db.query("SELECT * FROM articles"):
print article.title
Cursors are hidden by the implementation, but other than that, the methods
are very similar to the DB-API.
We explicitly set the timezone to UTC and the character encoding to
UTF-8 on all connections to avoid time zone and encoding errors.
"""
def __init__(self, host, port=3306, database="information_schema", user=None, password=None,
max_idle_time=7 * 3600):
self.database = database
self.max_idle_time = max_idle_time
self.encoders = dict([ (k, v) for k, v in conversions.items() if not isinstance(k, int) ])
sys_vars = {
"character_set_server": "utf8",
"collation_server": "utf8_general_ci"
}
args = {
"db": database,
"conv": conversions
}
if user is not None:
args["user"] = user
if password is not None:
args["passwd"] = password
args["host"] = host
args["port"] = int(port)
args["init_command"] = 'set names "utf8" collate "utf8_bin"' + ''.join([', @@%s = "%s"' % t for t in sys_vars.items()])
self._db = None
self._db_args = args
self._last_use_time = time.time()
self.reconnect()
self._db.set_character_set("utf8")
def __del__(self):
self.close()
def __enter__(self):
return self
def __exit__(self, type, value, traceback):
self.close()
def close(self):
"""Closes this database connection."""
if getattr(self, "_db", None) is not None:
self._db.close()
self._db = None
def connected(self):
if self._db is not None:
try:
self.ping()
return True
except _mysql.InterfaceError:
return False
return False
def reconnect(self):
"""Closes the existing database connection and re-opens it."""
conn = _mysql.connect(**self._db_args)
if conn is not None:
self.close()
self._db = conn
def ping(self):
""" Ping the server """
return self._db.ping()
def debug_query(self, query, *parameters):
return self._query(query, parameters, debug=True)
def query(self, query, *parameters):
"""
Query the connection and return the rows (or affected rows if not a
select query). Mysql errors will be propogated as exceptions.
"""
return self._query(query, parameters)
def get(self, query, *parameters):
"""Returns the first row returned for the given query."""
rows = self._query(query, parameters)
if not rows:
return None
elif not isinstance(rows, list):
raise MySQLError("Query is not a select query")
elif len(rows) > 1:
raise MySQLError("Multiple rows returned for Database.get() query")
else:
return rows[0]
# rowcount is a more reasonable default return value than lastrowid,
# but for historical compatibility execute() must return lastrowid.
def execute(self, query, *parameters):
"""Executes the given query, returning the lastrowid from the query."""
return self.execute_lastrowid(query, *parameters)
def execute_lastrowid(self, query, *parameters):
"""Executes the given query, returning the lastrowid from the query."""
self._execute(query, parameters)
self._result = self._db.store_result()
return self._db.insert_id()
def _ensure_connected(self):
# Mysql by default closes client connections that are idle for
# 8 hours, but the client library does not report this fact until
# you try to perform a query and it fails. Protect against this
# case by preemptively closing and reopening the connection
# if it has been idle for too long (7 hours by default).
if (self._db is None or (time.time() - self._last_use_time > self.max_idle_time)):
self.reconnect()
self._last_use_time = time.time()
def _query(self, query, parameters, debug=False):
self._execute(query, parameters, debug)
self._result = self._db.use_result()
if self._result is None:
return self._rowcount
fields = zip(*self._result.describe())[0]
rows = list(self._result.fetch_row(0))
ret = SelectResult(fields, rows)
return ret
def _execute(self, query, parameters, debug=False):
if parameters is not None and parameters != ():
params = []
for param in parameters:
if isinstance(param, unicode):
params.append(param.encode(self._db.character_set_name()))
else:
params.append(param)
query = query % self._db.escape(params, self.encoders)
if isinstance(query, unicode):
query = query.encode(self._db.character_set_name())
if debug:
print query
self._db.query(query)
self._rowcount = self._db.affected_rows()
class Row(OrderedDict):
"""A dict that allows for object-like property access syntax."""
def __getattr__(self, name):
try:
return self[name]
except KeyError:
raise AttributeError(name)
class SelectResult(list):
def __init__(self, fieldnames, rows):
self.fieldnames = fieldnames
self.rows = rows
data = [Row(itertools.izip(self.fieldnames, row)) for row in self.rows]
list.__init__(self, data)
def width(self):
return len(self.fieldnames)
def __getitem__(self, i):
if isinstance(i, slice):
return SelectResult(self.fieldnames, self.rows[i])
return list.__getitem__(self, i)
| {
"repo_name": "vidyar/memsql-python",
"path": "memsql/common/database.py",
"copies": "1",
"size": "6579",
"license": "mit",
"hash": 3968340851776399400,
"line_mean": 32.0603015075,
"line_max": 127,
"alpha_frac": 0.5950752394,
"autogenerated": false,
"ratio": 4.2335907335907335,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0016901351586212747,
"num_lines": 199
} |
"""Align an evaluation set and render the output as a text grid."""
import sys
import hmm
class Aligned:
"""A hand-aligned parallel sentence pair."""
def __init__(self, zh, en, align):
"""zh and en are lists of words (strings). align is moses-formatted."""
self.zh = zh
self.en = en
# Sets of (zh_index, en_index) pairs
self.sure = set()
self.possible = set()
self.parse(align, self.sure, self.possible)
def parse(self, align, sure, possible):
"""Populate sets sure and possible with alignment links from
moses-formatted align string."""
for pair in align.split():
parts = pair.split('-')
link = tuple(map(int, parts[0:2]))
assert link[0] < len(self.zh), "%d: %s" % (link[0], str(self.zh))
assert link[1] < len(self.en), "%d: %s" % (link[1], str(self.en))
if len(parts) == 3:
assert parts[2] == "P", align
possible.add(link)
else:
sure.add(link)
def combine(self, forward, backward):
"""Combine forward and backward alignments by intersection."""
guess_forward, guess_backward, _ = set(), set(), set()
self.parse(forward, guess_forward, _)
self.parse(backward, guess_backward, _)
return guess_forward.intersection(guess_backward)
def render(self, guess):
"""Render this alignment as a grid."""
# Alignment links and horizontal labels
link_lines = []
for j, zh in enumerate(self.zh):
links = []
for i in range(len(self.en)):
links.append(self.render_link(i, j, guess))
link_lines.append("".join(links) + "| " + zh)
link_lines.append("---" * len(self.en) + "'")
# Vertical labels
longest_en = max(len(en) for en in self.en)
labels = [[' '] * len(self.en) for _ in range(longest_en)]
for i, en in enumerate(self.en):
for k, s in enumerate(en):
labels[k][i] = s
label_lines = [" %s " % " ".join(line) for line in labels]
label_lines.append(" " * len(self.en))
return "\n".join(link_lines + label_lines)
def render_link(self, i, j, guess):
link = (j, i)
s = [" "] * 3
if link in self.sure:
s[0], s[2] = "[", "]"
elif link in self.possible:
s[0], s[2] = "(", ")"
if link in guess:
s[1] = "#"
return "".join(s)
if __name__ == "__main__":
"""Usage: align.py data.zh data.en data.align zh_en.json en_zh.json"""
data_paths = sys.argv[1:4]
model_paths = sys.argv[4:6]
data_files = [open(f) for f in data_paths]
zh_en_model, en_zh_model = [hmm.HMM(f) for f in model_paths]
for i, (zh, en, align) in enumerate(zip(*data_files)):
zh_words = zh.lower().split()
en_words = en.lower().split()
a = Aligned(zh_words, en_words, align.strip())
forward = en_zh_model.align(en_words, zh_words).moses_str(False)
backward = zh_en_model.align(zh_words, en_words).moses_str(True)
guess = a.combine(forward, backward)
print("Alignment %d:" % i)
print(a.render(guess))
| {
"repo_name": "papajohn/zh_en_word_alignment_data",
"path": "src/align.py",
"copies": "1",
"size": "3268",
"license": "mit",
"hash": 1221358594009319700,
"line_mean": 35.7191011236,
"line_max": 79,
"alpha_frac": 0.535495716,
"autogenerated": false,
"ratio": 3.4112734864300625,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.44467692024300626,
"avg_score": null,
"num_lines": null
} |
import sublime
import sublime_plugin
import re
class AlignAssignmentsCommand(sublime_plugin.TextCommand):
def run(self, edit):
relevant_line_pattern = r"^[^=]+[^-+<>=!%\/|&*^]=(?!=|~)"
column_search_pattern = r"[\t ]*="
for region in self.view.sel():
if not region.empty():
lines = self.view.lines(region)
total_lines = len(lines)
best_column = 0
target_lines = []
modified_lines = []
# find the best column for the =
for line in lines:
string = self.view.substr(line)
if re.search(relevant_line_pattern, string):
target_lines.append(line)
match = re.search(column_search_pattern, string)
best_column = match.start(0) if match.start(0) > best_column else best_column
# reformat the selection
for line in target_lines:
string = self.view.substr(line)
before, after = re.split(r"[\t ]*=[\t ]*", string, 1)
# we might be dealing withs something like:
# foo => bar
# array("foo" => $foo,
# "baz" => $baz);
#
# so pick our join string wisely
artifact = " =" if after[0:1] == ">" else " = "
value = artifact.join([before.ljust(best_column), after])
modified_lines.append({"region": line, "value":value})
# start from the end and work up to the beginning
# we do this because, by editing, we mess with the region
# bounds. In other words if we modify the first region, and
# it becomes shorter or longer, the bounds for the next region
# are affected. Starting from the end, this is a non-issue
while len(modified_lines) > 0:
item = modified_lines.pop()
edit = self.view.begin_edit("Align Equals")
self.view.replace(edit, item["region"], item["value"])
self.view.end_edit(edit) | {
"repo_name": "openfirmware/dotfiles",
"path": "sublime2/User/align_assignments.py",
"copies": "1",
"size": "3209",
"license": "mit",
"hash": 5168565313200912000,
"line_mean": 41.8,
"line_max": 101,
"alpha_frac": 0.557182923,
"autogenerated": false,
"ratio": 4.233509234828496,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.002997918245646465,
"num_lines": 75
} |
#Align a tomography tilt series using cross correlation
#
#
#developed as part of the tomviz project (www.tomviz.com)
def transform_scalars(dataset):
#----USER SPECIFIED VARIABLES-----#
TILT_AXIS = [] #Specify Tilt Axis Dimensions x=0, y=1, z=2
#---------------------------------#
from tomviz import utils
import numpy as np
data_py = utils.get_array(dataset) #get data as numpy array
if data_py is None: #Check if data exists
raise RuntimeError("No data array found!")
if TILT_AXIS == []: #If tilt axis is not given, find it
#Find smallest array dimension, assume it is the tilt angle axis
if data_py.ndim == 3:
TILT_AXIS = np.argmin( data_py.shape )
elif data_py.ndim == 2:
raise RuntimeError("Data Array is 2 dimensions, it should be 3!")
else:
raise RuntimeError("Data Array is not 2 or 3 dimensions!")
print('Aligning Images by Cross Correlation')
for i in range(1,np.size(data_py,TILT_AXIS)):#Align image to previous
if TILT_AXIS == 2:
im0 = np.fft.fft2(data_py[:,:,i-1])
im1 = np.fft.fft2(data_py[:,:,i])
xcor = abs(np.fft.ifft2((im0 * im1.conjugate())))
shift = np.unravel_index(xcor.argmax(), xcor.shape)
print( shift )
data_py[:,:,i] = np.roll( data_py[:,:,i], shift[0], axis = 0)
data_py[:,:,i] = np.roll( data_py[:,:,i], shift[1], axis = 1)
elif TILT_AXIS == 1:
im0 = np.fft.fft2(data_py[:,i-1,:])
im1 = np.fft.fft2(data_py[:,i,:])
xcor = abs(np.fft.ifft2((im0 * im1.conjugate())))
print( np.amax(xcor) )
shift = np.unravel_index(xcor.argmax(), xcor.shape)
print( shift )
data_py[:,i,:] = np.roll( data_py[:,i,:], shift[0], axis = 0)
data_py[:,i,:] = np.roll( data_py[:,i,:], shift[2], axis = 2)
elif TILT_AXIS == 0:
im0 = np.fft.fft2(data_py[i-1,:,:])
im1 = np.fft.fft2(data_py[i,:,:])
xcor = abs(np.fft.ifft2((im0 * im1.conjugate())))
print( np.amax(xcor) )
shift = np.unravel_index(xcor.argmax(), xcor.shape)
print( shift )
data_py[i,:,:] = np.roll( data_py[i,:,:], shift[1], axis = 1)
data_py[i,:,:] = np.roll( data_py[i,:,:], shift[2], axis = 2)
else:
raise RuntimeError("Python Transform Error: Unknown TILT_AXIS.")
utils.set_array(dataset, data_py)
print('Align Images Complete') | {
"repo_name": "Hovden/tomviz",
"path": "tomviz/python/Align_Images.py",
"copies": "2",
"size": "2581",
"license": "bsd-3-clause",
"hash": -3612023094479816700,
"line_mean": 41.3278688525,
"line_max": 77,
"alpha_frac": 0.5311894614,
"autogenerated": false,
"ratio": 3.230287859824781,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4761477321224781,
"avg_score": null,
"num_lines": null
} |
"""Aligned Packed Encoding Rules (PER) codec.
"""
from operator import attrgetter
from operator import itemgetter
import binascii
import string
import datetime
from ..parser import EXTENSION_MARKER
from . import BaseType
from . import EncodeError
from . import DecodeError
from . import OutOfDataError
from . import compiler
from . import format_or
from . import restricted_utc_time_to_datetime
from . import restricted_utc_time_from_datetime
from . import restricted_generalized_time_to_datetime
from . import restricted_generalized_time_from_datetime
from . import add_error_location
from .compiler import enum_values_split
from .compiler import enum_values_as_dict
from .compiler import clean_bit_string_value
from .compiler import rstrip_bit_string_zeros
from .ber import encode_real
from .ber import decode_real
from .ber import encode_object_identifier
from .ber import decode_object_identifier
from .permitted_alphabet import NUMERIC_STRING
from .permitted_alphabet import PRINTABLE_STRING
from .permitted_alphabet import IA5_STRING
from .permitted_alphabet import BMP_STRING
from .permitted_alphabet import VISIBLE_STRING
def is_unbound(minimum, maximum):
return ((minimum in [None, 'MIN'])
or (maximum in [None, 'MAX'])
or (maximum > 65535))
def to_int(chars):
if isinstance(chars, int):
return chars
num = 0
byte_array = bytearray(chars)
while len(byte_array) > 0:
num <<= 8
byte = byte_array.pop(0)
num += byte
return num
def to_byte_array(num, number_of_bits):
byte_array = bytearray()
while number_of_bits > 0:
byte_array.insert(0, (num & 0xff))
num >>= 8
number_of_bits -= 8
return byte_array
def integer_as_number_of_bits(size):
"""Returns the minimum number of bits needed to fit given positive
integer.
"""
if size == 0:
return 0
else:
return size.bit_length()
def integer_as_number_of_bits_power_of_two(size):
"""Returns the minimum power of two number of bits needed to fit given
positive integer.
"""
if size == 0:
return 0
else:
bit_length = integer_as_number_of_bits(size)
bit_length_pow_2 = 1
while bit_length > bit_length_pow_2:
bit_length_pow_2 <<= 1
return bit_length_pow_2
def size_as_number_of_bytes(size):
"""Returns the minimum number of bytes needed to fit given positive
integer.
"""
if size == 0:
return 1
else:
number_of_bits = size.bit_length()
rest = (number_of_bits % 8)
if rest != 0:
number_of_bits += (8 - rest)
return number_of_bits // 8
CLASS_PRIO = {
'UNIVERSAL': 0,
'APPLICATION': 1,
'CONTEXT_SPECIFIC': 2,
'PRIVATE': 3
}
class PermittedAlphabet(object):
def __init__(self, encode_map, decode_map):
self.encode_map = encode_map
self.decode_map = decode_map
def __len__(self):
return len(self.encode_map)
def encode(self, value):
try:
return self.encode_map[value]
except KeyError:
raise EncodeError(
"Expected a character in '{}', but got '{}' (0x{:02x})'.".format(
''.join(sorted([chr(v) for v in self.encode_map])),
chr(value) if chr(value) in string.printable else '.',
value))
def decode(self, value):
try:
return self.decode_map[value]
except KeyError:
raise DecodeError(
"Expected a value in {}, but got {:d}.".format(
list(self.decode_map),
value))
class Encoder(object):
def __init__(self):
self.number_of_bits = 0
self.value = 0
self.chunks_number_of_bits = 0
self.chunks = []
def __iadd__(self, other):
for value, number_of_bits in other.chunks:
self.append_non_negative_binary_integer(value, number_of_bits)
self.append_non_negative_binary_integer(other.value,
other.number_of_bits)
return self
def reset(self):
self.number_of_bits = 0
self.value = 0
self.chunks_number_of_bits = 0
self.chunks = []
def are_all_bits_zero(self):
return not (any([value for value, _ in self.chunks]) or self.value)
def number_of_bytes(self):
return (self.chunks_number_of_bits + self.number_of_bits + 7) // 8
def offset(self):
return (len(self.chunks), self.number_of_bits)
def set_bit(self, offset):
chunk_offset, bit_offset = offset
if len(self.chunks) == chunk_offset:
self.value |= (1 << (self.number_of_bits - bit_offset - 1))
else:
chunk = self.chunks[chunk_offset]
chunk[0] |= (1 << (chunk[1] - bit_offset - 1))
def align(self):
self.align_always()
def align_always(self):
width = 8 * self.number_of_bytes()
width -= self.chunks_number_of_bits
width -= self.number_of_bits
self.number_of_bits += width
self.value <<= width
def append_bit(self, bit):
"""Append given bit.
"""
self.number_of_bits += 1
self.value <<= 1
self.value |= bit
def append_bits(self, data, number_of_bits):
"""Append given bits.
"""
if number_of_bits == 0:
return
value = int(binascii.hexlify(data), 16)
value >>= (8 * len(data) - number_of_bits)
self.append_non_negative_binary_integer(value, number_of_bits)
def append_non_negative_binary_integer(self, value, number_of_bits):
"""Append given integer value.
"""
if self.number_of_bits > 4096:
self.chunks.append([self.value, self.number_of_bits])
self.chunks_number_of_bits += self.number_of_bits
self.number_of_bits = 0
self.value = 0
self.number_of_bits += number_of_bits
self.value <<= number_of_bits
self.value |= value
def append_bytes(self, data):
"""Append given data.
"""
self.append_bits(data, 8 * len(data))
def as_bytearray(self):
"""Return the bits as a bytearray.
"""
value = 0
number_of_bits = 0
for chunk_value, chunk_number_of_bits in self.chunks:
value <<= chunk_number_of_bits
value |= chunk_value
number_of_bits += chunk_number_of_bits
value <<= self.number_of_bits
value |= self.value
number_of_bits += self.number_of_bits
if number_of_bits == 0:
return bytearray()
number_of_alignment_bits = (8 - (number_of_bits % 8))
if number_of_alignment_bits != 8:
value <<= number_of_alignment_bits
number_of_bits += number_of_alignment_bits
value |= (0x80 << number_of_bits)
value = hex(value)[4:].rstrip('L')
return bytearray(binascii.unhexlify(value))
def append_length_determinant(self, length):
if length < 128:
encoded = bytearray([length])
elif length < 16384:
encoded = bytearray([(0x80 | (length >> 8)), (length & 0xff)])
elif length < 32768:
encoded = b'\xc1'
length = 16384
elif length < 49152:
encoded = b'\xc2'
length = 32768
elif length < 65536:
encoded = b'\xc3'
length = 49152
else:
encoded = b'\xc4'
length = 65536
self.append_bytes(encoded)
return length
def append_length_determinant_chunks(self, length):
offset = 0
chunk_length = length
while True:
chunk_length = self.append_length_determinant(chunk_length)
yield offset, chunk_length
if chunk_length < 16384:
break
offset += chunk_length
chunk_length = length - offset
def append_normally_small_non_negative_whole_number(self, value):
if value < 64:
self.append_non_negative_binary_integer(value, 7)
else:
self.append_bit(1)
length = (value.bit_length() + 7) // 8
self.append_length_determinant(length)
self.append_non_negative_binary_integer(value, 8 * length)
def append_normally_small_length(self, value):
if value <= 64:
self.append_non_negative_binary_integer(value - 1, 7)
elif value <= 127:
self.append_non_negative_binary_integer(0x100 | value, 9)
else:
raise NotImplementedError(
'Normally small length number >127 is not yet supported.')
def append_constrained_whole_number(self,
value,
minimum,
maximum,
number_of_bits):
_range = (maximum - minimum + 1)
value -= minimum
if _range <= 255:
self.append_non_negative_binary_integer(value, number_of_bits)
elif _range == 256:
self.align_always()
self.append_non_negative_binary_integer(value, 8)
elif _range <= 65536:
self.align_always()
self.append_non_negative_binary_integer(value, 16)
else:
self.align_always()
self.append_non_negative_binary_integer(value, number_of_bits)
def append_unconstrained_whole_number(self, value):
number_of_bits = value.bit_length()
if value < 0:
number_of_bytes = ((number_of_bits + 7) // 8)
value = ((1 << (8 * number_of_bytes)) + value)
if (value & (1 << (8 * number_of_bytes - 1))) == 0:
value |= (0xff << (8 * number_of_bytes))
number_of_bytes += 1
elif value > 0:
number_of_bytes = ((number_of_bits + 7) // 8)
if number_of_bits == (8 * number_of_bytes):
number_of_bytes += 1
else:
number_of_bytes = 1
self.append_length_determinant(number_of_bytes)
self.append_non_negative_binary_integer(value,
8 * number_of_bytes)
def __repr__(self):
return binascii.hexlify(self.as_bytearray()).decode('ascii')
class Decoder(object):
def __init__(self, encoded):
self.number_of_bits = (8 * len(encoded))
self.total_number_of_bits = self.number_of_bits
if len(encoded) > 0:
value = int(binascii.hexlify(encoded), 16)
value |= (0x80 << self.number_of_bits)
self.value = bin(value)[10:]
else:
self.value = ''
def align(self):
self.align_always()
def align_always(self):
width = (self.number_of_bits & 0x7)
self.number_of_bits -= width
def number_of_read_bits(self):
return self.total_number_of_bits - self.number_of_bits
def skip_bits(self, number_of_bits):
if number_of_bits > self.number_of_bits:
raise OutOfDataError(self.number_of_read_bits())
self.number_of_bits -= number_of_bits
def read_bit(self):
"""Read a bit.
"""
if self.number_of_bits == 0:
raise OutOfDataError(self.number_of_read_bits())
bit = int(self.value[self.number_of_read_bits()])
self.number_of_bits -= 1
return bit
def read_bits(self, number_of_bits):
"""Read given number of bits.
"""
if number_of_bits > self.number_of_bits:
raise OutOfDataError(self.number_of_read_bits())
offset = self.number_of_read_bits()
value = self.value[offset:offset + number_of_bits]
self.number_of_bits -= number_of_bits
value = '10000000' + value
number_of_alignment_bits = (8 - (number_of_bits % 8))
if number_of_alignment_bits != 8:
value += '0' * number_of_alignment_bits
return binascii.unhexlify(hex(int(value, 2))[4:].rstrip('L'))
def read_bytes(self, number_of_bytes):
return self.read_bits(8 * number_of_bytes)
def read_non_negative_binary_integer(self, number_of_bits):
"""Read an integer value of given number of bits.
"""
if number_of_bits > self.number_of_bits:
raise OutOfDataError(self.number_of_read_bits())
if number_of_bits == 0:
return 0
offset = self.number_of_read_bits()
value = self.value[offset:offset + number_of_bits]
self.number_of_bits -= number_of_bits
return int(value, 2)
def read_length_determinant(self):
value = self.read_non_negative_binary_integer(8)
if (value & 0x80) == 0x00:
return value
elif (value & 0xc0) == 0x80:
return (((value & 0x7f) << 8)
| (self.read_non_negative_binary_integer(8)))
else:
try:
return {
0xc1: 16384,
0xc2: 32768,
0xc3: 49152,
0xc4: 65536
}[value]
except KeyError:
raise DecodeError(
'Bad length determinant fragmentation value 0x{:02x}.'.format(
value))
def read_length_determinant_chunks(self):
while True:
length = self.read_length_determinant()
yield length
if length < 16384:
break
def read_normally_small_non_negative_whole_number(self):
if not self.read_bit():
decoded = self.read_non_negative_binary_integer(6)
else:
length = self.read_length_determinant()
decoded = self.read_non_negative_binary_integer(8 * length)
return decoded
def read_normally_small_length(self):
if not self.read_bit():
return self.read_non_negative_binary_integer(6) + 1
elif not self.read_bit():
return self.read_non_negative_binary_integer(7)
else:
raise NotImplementedError(
'Normally small length number >64 is not yet supported.')
def read_constrained_whole_number(self,
minimum,
maximum,
number_of_bits):
_range = (maximum - minimum + 1)
if _range <= 255:
value = self.read_non_negative_binary_integer(number_of_bits)
elif _range == 256:
self.align_always()
value = self.read_non_negative_binary_integer(8)
elif _range <= 65536:
self.align_always()
value = self.read_non_negative_binary_integer(16)
else:
self.align_always()
value = self.read_non_negative_binary_integer(number_of_bits)
return value + minimum
def read_unconstrained_whole_number(self):
length = self.read_length_determinant()
decoded = self.read_non_negative_binary_integer(8 * length)
number_of_bits = (8 * length)
if decoded & (1 << (number_of_bits - 1)):
mask = ((1 << number_of_bits) - 1)
decoded = (decoded - mask)
decoded -= 1
return decoded
class Type(BaseType):
def __init__(self, name, type_name):
super().__init__(name, type_name)
self.module_name = None
self.tag = None
def set_size_range(self, minimum, maximum, has_extension_marker):
pass
def set_restricted_to_range(self, minimum, maximum, has_extension_marker):
pass
class KnownMultiplierStringType(Type):
ENCODING = 'ascii'
PERMITTED_ALPHABET = PermittedAlphabet({}, {})
def __init__(self,
name,
minimum=None,
maximum=None,
has_extension_marker=False,
permitted_alphabet=None):
super(KnownMultiplierStringType, self).__init__(name,
self.__class__.__name__)
self.set_size_range(minimum, maximum, has_extension_marker)
if permitted_alphabet is None:
permitted_alphabet = self.PERMITTED_ALPHABET
self.permitted_alphabet = permitted_alphabet
self.bits_per_character = integer_as_number_of_bits_power_of_two(
len(permitted_alphabet) - 1)
if len(self.PERMITTED_ALPHABET) < 2 ** self.bits_per_character:
self.permitted_alphabet = self.PERMITTED_ALPHABET
def set_size_range(self, minimum, maximum, has_extension_marker):
self.minimum = minimum
self.maximum = maximum
self.has_extension_marker = has_extension_marker
if is_unbound(minimum, maximum):
self.number_of_bits = None
else:
size = maximum - minimum
self.number_of_bits = integer_as_number_of_bits(size)
@add_error_location
def encode(self, data, encoder):
if self.has_extension_marker:
if self.minimum <= len(data) <= self.maximum:
encoder.append_bit(0)
else:
raise NotImplementedError(
'String size extension is not yet implemented.')
if self.number_of_bits is None:
return self.encode_unbound(data, encoder)
elif self.minimum != self.maximum:
encoder.append_constrained_whole_number(len(data),
self.minimum,
self.maximum,
self.number_of_bits)
if self.maximum > 1 and len(data) > 0:
encoder.align()
elif self.maximum * self.bits_per_character > 16:
encoder.align()
for value in data:
encoder.append_non_negative_binary_integer(
self.permitted_alphabet.encode(
to_int(value.encode(self.ENCODING))),
self.bits_per_character)
def encode_unbound(self, data, encoder):
encoder.align()
for offset, length in encoder.append_length_determinant_chunks(len(data)):
for entry in data[offset:offset + length]:
encoder.append_non_negative_binary_integer(
self.permitted_alphabet.encode(
to_int(entry.encode(self.ENCODING))),
self.bits_per_character)
@add_error_location
def decode(self, decoder):
if self.has_extension_marker:
bit = decoder.read_bit()
if bit:
raise NotImplementedError(
'String size extension is not yet implemented.')
if self.number_of_bits is None:
return self.decode_unbound(decoder)
else:
if self.minimum != self.maximum:
length = decoder.read_constrained_whole_number(self.minimum,
self.maximum,
self.number_of_bits)
if self.maximum > 1 and length > 0:
decoder.align()
elif self.maximum * self.bits_per_character > 16:
decoder.align()
length = self.minimum
else:
length = self.minimum
data = bytearray()
for _ in range(length):
value = decoder.read_non_negative_binary_integer(self.bits_per_character)
value = self.permitted_alphabet.decode(value)
data += to_byte_array(value, self.bits_per_character)
return data.decode(self.ENCODING)
def decode_unbound(self, decoder):
decoder.align()
decoded = bytearray()
orig_bits_per_character = integer_as_number_of_bits_power_of_two(
len(self.ALPHABET) - 1)
for length in decoder.read_length_determinant_chunks():
for _ in range(length):
value = decoder.read_non_negative_binary_integer(
self.bits_per_character)
value = self.permitted_alphabet.decode(value)
decoded += to_byte_array(value, orig_bits_per_character)
return decoded.decode(self.ENCODING)
def __repr__(self):
return '{}({})'.format(self.__class__.__name__,
self.name)
class StringType(Type):
ENCODING = None
LENGTH_MULTIPLIER = 1
def __init__(self, name):
super(StringType, self).__init__(name, self.__class__.__name__)
@add_error_location
def encode(self, data, encoder):
encoded = data.encode(self.ENCODING)
encoder.align()
for offset, length in encoder.append_length_determinant_chunks(len(data)):
offset *= self.LENGTH_MULTIPLIER
data = encoded[offset:offset + self.LENGTH_MULTIPLIER * length]
encoder.append_bytes(data)
@add_error_location
def decode(self, decoder):
decoder.align()
encoded = []
for length in decoder.read_length_determinant_chunks():
encoded.append(decoder.read_bytes(self.LENGTH_MULTIPLIER * length))
return b''.join(encoded).decode(self.ENCODING)
def __repr__(self):
return '{}({})'.format(self.__class__.__name__,
self.name)
class MembersType(Type):
def __init__(self,
name,
root_members,
additions,
type_name):
super(MembersType, self).__init__(name, type_name)
self.root_members = root_members
self.additions = additions
self.optionals = [
member
for member in root_members
if member.optional or member.default is not None
]
@add_error_location
def encode(self, data, encoder):
if self.additions is not None:
offset = encoder.offset()
encoder.append_bit(0)
self.encode_root(data, encoder)
if len(self.additions) > 0:
if self.encode_additions(data, encoder):
encoder.set_bit(offset)
else:
self.encode_root(data, encoder)
def encode_root(self, data, encoder):
for optional in self.optionals:
if optional.optional:
encoder.append_bit(optional.name in data)
elif optional.name in data:
encoder.append_bit(not optional.is_default(data[optional.name]))
else:
encoder.append_bit(0)
for member in self.root_members:
self.encode_member(member, data, encoder)
def encode_additions(self, data, encoder):
# Encode extension additions.
presence_bits = 0
addition_encoders = []
number_of_precence_bits = 0
try:
for addition in self.additions:
presence_bits <<= 1
addition_encoder = encoder.__class__()
number_of_precence_bits += 1
if isinstance(addition, AdditionGroup):
addition.encode_addition_group(data, addition_encoder)
else:
self.encode_member(addition,
data,
addition_encoder,
encode_default=True)
if addition_encoder.number_of_bits > 0 or addition.name in data:
addition_encoders.append(addition_encoder)
presence_bits |= 1
except EncodeError:
pass
# Return false if no extension additions are present.
if not addition_encoders:
return False
# Presence bit field.
number_of_additions = len(self.additions)
presence_bits <<= (number_of_additions - number_of_precence_bits)
encoder.append_normally_small_length(number_of_additions)
encoder.append_non_negative_binary_integer(presence_bits,
number_of_additions)
# Embed each encoded extension addition in an open type (add a
# length field and multiple of 8 bits).
encoder.align()
for addition_encoder in addition_encoders:
addition_encoder.align_always()
encoder.append_length_determinant(addition_encoder.number_of_bytes())
encoder += addition_encoder
return True
def encode_addition_group(self, data, encoder):
self.encode_root(data, encoder)
if (encoder.are_all_bits_zero()
and (encoder.number_of_bits == len(self.optionals))):
encoder.reset()
def encode_member(self, member, data, encoder, encode_default=False):
name = member.name
if name in data:
if member.default is None:
member.encode(data[name], encoder)
elif not member.is_default(data[name]) or encode_default:
member.encode(data[name], encoder)
elif member.optional or member.default is not None:
pass
else:
raise EncodeError(
"{} member '{}' not found in {}.".format(
self.__class__.__name__,
name,
data))
@add_error_location
def decode(self, decoder):
if self.additions is not None:
if decoder.read_bit():
decoded = self.decode_root(decoder)
decoded.update(self.decode_additions(decoder))
else:
decoded = self.decode_root(decoder)
else:
decoded = self.decode_root(decoder)
return decoded
def decode_root(self, decoder):
values = {}
optionals = {
optional: decoder.read_bit()
for optional in self.optionals
}
for member in self.root_members:
if optionals.get(member, True):
value = member.decode(decoder)
values[member.name] = value
elif member.has_default():
values[member.name] = member.default
return values
def decode_additions(self, decoder):
# Presence bit field.
length = decoder.read_normally_small_length()
presence_bits = decoder.read_non_negative_binary_integer(length)
decoder.align()
decoded = {}
for i in range(length):
if presence_bits & (1 << (length - i - 1)):
# Open type decoding.
open_type_length = decoder.read_length_determinant()
offset = decoder.number_of_bits
if i < len(self.additions):
addition = self.additions[i]
if isinstance(addition, AdditionGroup):
decoded.update(addition.decode(decoder))
else:
decoded[addition.name] = addition.decode(decoder)
else:
decoder.skip_bits(8 * open_type_length)
alignment_bits = (offset - decoder.number_of_bits) % 8
if alignment_bits != 0:
decoder.skip_bits(8 - alignment_bits)
return decoded
def __repr__(self):
return '{}({}, [{}])'.format(
self.__class__.__name__,
self.name,
', '.join([repr(member) for member in self.root_members]))
class ArrayType(Type):
def __init__(self,
name,
element_type,
minimum,
maximum,
has_extension_marker,
type_name):
super(ArrayType, self).__init__(name, type_name)
self.element_type = element_type
self.minimum = minimum
self.maximum = maximum
self.has_extension_marker = has_extension_marker
if is_unbound(minimum, maximum):
self.number_of_bits = None
else:
size = maximum - minimum
self.number_of_bits = integer_as_number_of_bits(size)
@add_error_location
def encode(self, data, encoder):
if self.has_extension_marker:
if self.minimum <= len(data) <= self.maximum:
encoder.append_bit(0)
else:
encoder.append_bit(1)
encoder.align()
encoder.append_length_determinant(len(data))
for entry in data:
self.element_type.encode(entry, encoder)
return
if self.number_of_bits is None:
return self.encode_unbound(data, encoder)
elif self.minimum != self.maximum:
encoder.append_constrained_whole_number(len(data),
self.minimum,
self.maximum,
self.number_of_bits)
for entry in data:
self.element_type.encode(entry, encoder)
def encode_unbound(self, data, encoder):
encoder.align()
for offset, length in encoder.append_length_determinant_chunks(len(data)):
for entry in data[offset:offset + length]:
self.element_type.encode(entry, encoder)
@add_error_location
def decode(self, decoder):
length = None
if self.has_extension_marker:
bit = decoder.read_bit()
if bit:
decoder.align()
length = decoder.read_length_determinant()
if length is not None:
pass
elif self.number_of_bits is None:
return self.decode_unbound(decoder)
elif self.minimum != self.maximum:
length = decoder.read_constrained_whole_number(self.minimum,
self.maximum,
self.number_of_bits)
else:
length = self.minimum
decoded = []
for _ in range(length):
decoded_element = self.element_type.decode(decoder)
decoded.append(decoded_element)
return decoded
def decode_unbound(self, decoder):
decoder.align()
decoded = []
for length in decoder.read_length_determinant_chunks():
for _ in range(length):
decoded_element = self.element_type.decode(decoder)
decoded.append(decoded_element)
return decoded
def __repr__(self):
return '{}({}, {})'.format(self.__class__.__name__,
self.name,
self.element_type)
class Boolean(Type):
def __init__(self, name):
super(Boolean, self).__init__(name, 'BOOLEAN')
@add_error_location
def encode(self, data, encoder):
encoder.append_bit(bool(data))
@add_error_location
def decode(self, decoder):
return bool(decoder.read_bit())
def __repr__(self):
return 'Boolean({})'.format(self.name)
class Integer(Type):
def __init__(self, name):
super(Integer, self).__init__(name, 'INTEGER')
self.minimum = None
self.maximum = None
self.has_extension_marker = False
self.number_of_bits = None
self.number_of_indefinite_bits = None
def set_restricted_to_range(self, minimum, maximum, has_extension_marker):
self.has_extension_marker = has_extension_marker
if minimum == 'MIN' or maximum == 'MAX':
return
self.minimum = minimum
self.maximum = maximum
size = self.maximum - self.minimum
self.number_of_bits = integer_as_number_of_bits(size)
if size <= 65535:
self.number_of_indefinite_bits = None
else:
number_of_bits = ((self.number_of_bits + 7) // 8 - 1).bit_length()
self.number_of_indefinite_bits = number_of_bits
@add_error_location
def encode(self, data, encoder):
if self.has_extension_marker:
if self.minimum <= data <= self.maximum:
encoder.append_bit(0)
else:
encoder.append_bit(1)
encoder.align()
encoder.append_unconstrained_whole_number(data)
return
if self.number_of_bits is None:
encoder.align()
encoder.append_unconstrained_whole_number(data)
else:
if self.number_of_indefinite_bits is None:
number_of_bits = self.number_of_bits
else:
number_of_bytes = size_as_number_of_bytes(data - self.minimum)
number_of_bits = 8 * number_of_bytes
encoder.append_constrained_whole_number(
number_of_bytes - 1,
0,
2 ** self.number_of_indefinite_bits,
self.number_of_indefinite_bits)
encoder.align()
encoder.append_constrained_whole_number(data,
self.minimum,
self.maximum,
number_of_bits)
@add_error_location
def decode(self, decoder):
if self.has_extension_marker:
if decoder.read_bit():
decoder.align()
return decoder.read_unconstrained_whole_number()
if self.number_of_bits is None:
decoder.align()
return decoder.read_unconstrained_whole_number()
else:
if self.number_of_indefinite_bits is None:
number_of_bits = self.number_of_bits
else:
number_of_bytes = decoder.read_constrained_whole_number(
0,
2 ** self.number_of_indefinite_bits,
self.number_of_indefinite_bits)
number_of_bytes += 1
number_of_bits = (8 * number_of_bytes)
decoder.align()
return decoder.read_constrained_whole_number(self.minimum,
self.maximum,
number_of_bits)
def __repr__(self):
return 'Integer({})'.format(self.name)
class Real(Type):
def __init__(self, name):
super(Real, self).__init__(name, 'REAL')
@add_error_location
def encode(self, data, encoder):
encoded = encode_real(data)
encoder.align()
encoder.append_length_determinant(len(encoded))
encoder.append_bytes(encoded)
@add_error_location
def decode(self, decoder):
decoder.align()
length = decoder.read_length_determinant()
return decode_real(bytearray(decoder.read_bytes(length)))
def __repr__(self):
return 'Real({})'.format(self.name)
class Null(Type):
def __init__(self, name):
super(Null, self).__init__(name, 'NULL')
@add_error_location
def encode(self, _, _encoder):
pass
@add_error_location
def decode(self, _):
return None
def __repr__(self):
return 'Null({})'.format(self.name)
class BitString(Type):
def __init__(self,
name,
named_bits,
minimum,
maximum,
has_extension_marker):
super(BitString, self).__init__(name, 'BIT STRING')
self.minimum = minimum
self.maximum = maximum
self.has_extension_marker = has_extension_marker
self.named_bits = named_bits
self.has_named_bits = named_bits is not None
if is_unbound(minimum, maximum):
self.number_of_bits = None
else:
size = self.maximum - self.minimum
self.number_of_bits = integer_as_number_of_bits(size)
def is_default(self, value):
clean_value = clean_bit_string_value(value,
self.has_named_bits)
clean_default = clean_bit_string_value(self.default,
self.has_named_bits)
return clean_value == clean_default
def rstrip_zeros(self, data, number_of_bits):
data, number_of_bits = rstrip_bit_string_zeros(bytearray(data))
if self.minimum is not None:
if number_of_bits < self.minimum:
number_of_bits = self.minimum
number_of_bytes = ((number_of_bits + 7) // 8)
data += (number_of_bytes - len(data)) * b'\x00'
return (data, number_of_bits)
@add_error_location
def encode(self, data, encoder):
data, number_of_bits = data
if self.has_extension_marker:
if self.minimum <= number_of_bits <= self.maximum:
encoder.append_bit(0)
else:
raise NotImplementedError(
'BIT STRING extension is not yet implemented.')
if self.has_named_bits:
data, number_of_bits = self.rstrip_zeros(data, number_of_bits)
if self.number_of_bits is None:
return self.encode_unbound(data, number_of_bits, encoder)
elif self.minimum != self.maximum:
encoder.append_constrained_whole_number(number_of_bits,
self.minimum,
self.maximum,
self.number_of_bits)
encoder.align()
elif self.minimum > 16:
encoder.align()
encoder.append_bits(data, number_of_bits)
def encode_unbound(self, data, number_of_bits, encoder):
encoder.align()
for offset, length in encoder.append_length_determinant_chunks(number_of_bits):
encoder.append_bits(data[offset // 8:(offset + length + 7) // 8], length)
@add_error_location
def decode(self, decoder):
if self.has_extension_marker:
if decoder.read_bit():
raise NotImplementedError(
'BIT STRING extension is not yet implemented.')
if self.number_of_bits is None:
return self.decode_unbound(decoder)
else:
number_of_bits = self.minimum
if self.minimum != self.maximum:
number_of_bits = decoder.read_constrained_whole_number(
self.minimum,
self.maximum,
self.number_of_bits)
decoder.align()
elif self.minimum > 16:
decoder.align()
value = decoder.read_bits(number_of_bits)
return (value, number_of_bits)
def decode_unbound(self, decoder):
decoder.align()
decoded = []
number_of_bits = 0
for length in decoder.read_length_determinant_chunks():
decoded.append(decoder.read_bits(length))
number_of_bits += length
return (b''.join(decoded), number_of_bits)
def __repr__(self):
return 'BitString({})'.format(self.name)
class OctetString(Type):
def __init__(self, name, minimum, maximum, has_extension_marker):
super(OctetString, self).__init__(name, 'OCTET STRING')
self.set_size_range(minimum, maximum, has_extension_marker)
def set_size_range(self, minimum, maximum, has_extension_marker):
self.minimum = minimum
self.maximum = maximum
self.has_extension_marker = has_extension_marker
if is_unbound(minimum, maximum):
self.number_of_bits = None
else:
size = self.maximum - self.minimum
if size == 0 and self.maximum >= 65536:
self.number_of_bits = None
else:
self.number_of_bits = integer_as_number_of_bits(size)
@add_error_location
def encode(self, data, encoder):
align = True
if self.has_extension_marker:
if self.minimum <= len(data) <= self.maximum:
encoder.append_bit(0)
else:
encoder.append_bit(1)
encoder.align()
encoder.append_length_determinant(len(data))
encoder.append_bytes(data)
return
if self.number_of_bits is None:
return self.encode_unbound(data, encoder)
elif self.minimum != self.maximum:
encoder.append_constrained_whole_number(len(data),
self.minimum,
self.maximum,
self.number_of_bits)
elif self.maximum <= 2:
align = False
if align:
encoder.align()
encoder.append_bytes(data)
def encode_unbound(self, data, encoder):
encoder.align()
for offset, length in encoder.append_length_determinant_chunks(len(data)):
encoder.align()
encoder.append_bytes(data[offset:offset + length])
@add_error_location
def decode(self, decoder):
align = True
if self.has_extension_marker:
bit = decoder.read_bit()
if bit:
decoder.align()
length = decoder.read_length_determinant()
return decoder.read_bytes(length)
if self.number_of_bits is None:
return self.decode_unbound(decoder)
else:
length = self.minimum
if self.minimum != self.maximum:
length = decoder.read_constrained_whole_number(
self.minimum,
self.maximum,
self.number_of_bits)
elif self.maximum <= 2:
align = False
if align:
decoder.align()
return decoder.read_bytes(length)
def decode_unbound(self, decoder):
decoder.align()
decoded = []
for length in decoder.read_length_determinant_chunks():
decoder.align()
decoded.append(decoder.read_bytes(length))
return b''.join(decoded)
def __repr__(self):
return 'OctetString({})'.format(self.name)
class ObjectIdentifier(Type):
def __init__(self, name):
super(ObjectIdentifier, self).__init__(name, 'OBJECT IDENTIFIER')
@add_error_location
def encode(self, data, encoder):
encoded_subidentifiers = encode_object_identifier(data)
encoder.align()
encoder.append_length_determinant(len(encoded_subidentifiers))
encoder.append_bytes(bytearray(encoded_subidentifiers))
@add_error_location
def decode(self, decoder):
decoder.align()
length = decoder.read_length_determinant()
data = decoder.read_bytes(length)
return decode_object_identifier(bytearray(data), 0, len(data))
def __repr__(self):
return 'ObjectIdentifier({})'.format(self.name)
class Enumerated(Type):
def __init__(self, name, values, numeric):
super(Enumerated, self).__init__(name, 'ENUMERATED')
root, additions = enum_values_split(values)
root = sorted(root, key=itemgetter(1))
# Root.
index_to_data, data_to_index = self.create_maps(root,
numeric)
self.root_index_to_data = index_to_data
self.root_data_to_index = data_to_index
self.root_number_of_bits = integer_as_number_of_bits(len(index_to_data) - 1)
if numeric:
self.root_data_to_value = {k: k for k in enum_values_as_dict(root)}
else:
self.root_data_to_value = {v: k for k, v in enum_values_as_dict(root).items()}
# Optional additions.
if additions is None:
index_to_data = None
data_to_index = None
else:
index_to_data, data_to_index = self.create_maps(additions,
numeric)
self.additions_index_to_data = index_to_data
self.additions_data_to_index = data_to_index
def create_maps(self, items, numeric):
if numeric:
index_to_data = {
index: value[1]
for index, value in enumerate(items)
}
else:
index_to_data = {
index: value[0]
for index, value in enumerate(items)
}
data_to_index = {
data: index
for index, data in index_to_data.items()
}
return index_to_data, data_to_index
def format_root_indexes(self):
return format_or(sorted(list(self.root_index_to_data)))
@add_error_location
def encode(self, data, encoder):
if self.additions_index_to_data is None:
index = self.root_data_to_index[data]
encoder.append_non_negative_binary_integer(index,
self.root_number_of_bits)
else:
if data in self.root_data_to_index:
encoder.append_bit(0)
index = self.root_data_to_index[data]
encoder.append_non_negative_binary_integer(index,
self.root_number_of_bits)
else:
encoder.append_bit(1)
index = self.additions_data_to_index[data]
encoder.append_normally_small_non_negative_whole_number(index)
@add_error_location
def decode(self, decoder):
if self.additions_index_to_data is None:
return self.decode_root(decoder)
else:
additions = decoder.read_bit()
if additions == 0:
return self.decode_root(decoder)
else:
index = decoder.read_normally_small_non_negative_whole_number()
if index in self.additions_index_to_data:
return self.additions_index_to_data[index]
else:
return None
def decode_root(self, decoder):
index = decoder.read_non_negative_binary_integer(self.root_number_of_bits)
try:
data = self.root_index_to_data[index]
except KeyError:
raise DecodeError(
'Expected enumeration index {}, but got {}.'.format(
self.format_root_indexes(),
index))
return data
def __repr__(self):
return 'Enumerated({})'.format(self.name)
class Sequence(MembersType):
def __init__(self,
name,
root_members,
additions):
super(Sequence, self).__init__(name,
root_members,
additions,
'SEQUENCE')
class SequenceOf(ArrayType):
def __init__(self,
name,
element_type,
minimum,
maximum,
has_extension_marker):
super(SequenceOf, self).__init__(name,
element_type,
minimum,
maximum,
has_extension_marker,
'SEQUENCE OF')
class Set(MembersType):
def __init__(self,
name,
root_members,
additions):
super(Set, self).__init__(name,
root_members,
additions,
'SET')
class SetOf(ArrayType):
def __init__(self,
name,
element_type,
minimum,
maximum,
has_extension_marker):
super(SetOf, self).__init__(name,
element_type,
minimum,
maximum,
has_extension_marker,
'SET OF')
class Choice(Type):
def __init__(self, name, root_members, additions):
super(Choice, self).__init__(name, 'CHOICE')
# Root.
index_to_member, name_to_index = self.create_maps(root_members)
self.root_index_to_member = index_to_member
self.root_name_to_index = name_to_index
self.maximum = (len(root_members) - 1)
self.root_number_of_bits = integer_as_number_of_bits(self.maximum)
if self.maximum <= 65535:
self.number_of_indefinite_bits = None
else:
number_of_bits = ((self.root_number_of_bits + 7) // 8 - 1).bit_length()
self.number_of_indefinite_bits = number_of_bits
# Optional additions.
if additions is None:
index_to_member = None
name_to_index = None
else:
index_to_member, name_to_index = self.create_maps(additions)
self.additions_index_to_member = index_to_member
self.additions_name_to_index = name_to_index
def create_maps(self, members):
index_to_member = {
index: member
for index, member in enumerate(members)
}
name_to_index = {
member.name: index
for index, member in enumerate(members)
}
return index_to_member, name_to_index
def format_root_indexes(self):
return format_or(sorted(list(self.root_index_to_member)))
def format_names(self):
members = list(self.root_index_to_member.values())
if self.additions_index_to_member is not None:
members += list(self.additions_index_to_member.values())
return format_or(sorted([member.name for member in members]))
@add_error_location
def encode(self, data, encoder):
if self.additions_index_to_member is not None:
if data[0] in self.root_name_to_index:
encoder.append_bit(0)
self.encode_root(data, encoder)
else:
encoder.append_bit(1)
self.encode_additions(data, encoder)
else:
self.encode_root(data, encoder)
def encode_root(self, data, encoder):
try:
index = self.root_name_to_index[data[0]]
except KeyError:
raise EncodeError(
"Expected choice {}, but got '{}'.".format(
self.format_names(),
data[0]))
if len(self.root_index_to_member) > 1:
self.encode_root_index(index, encoder)
member = self.root_index_to_member[index]
member.encode(data[1], encoder)
def encode_root_index(self, index, encoder):
if self.number_of_indefinite_bits is None:
number_of_bits = self.root_number_of_bits
else:
number_of_bytes = size_as_number_of_bytes(index)
number_of_bits = 8 * number_of_bytes
encoder.append_constrained_whole_number(
number_of_bytes - 1,
0,
2 ** self.number_of_indefinite_bits,
self.number_of_indefinite_bits)
encoder.align()
encoder.append_constrained_whole_number(index,
0,
self.maximum,
number_of_bits)
def encode_additions(self, data, encoder):
try:
index = self.additions_name_to_index[data[0]]
except KeyError:
raise EncodeError(
"Expected choice {}, but got '{}'.".format(
self.format_names(),
data[0]))
addition_encoder = encoder.__class__()
addition = self.additions_index_to_member[index]
addition.encode(data[1], addition_encoder)
# Embed encoded extension addition in an open type (add a
# length field and multiple of 8 bits).
addition_encoder.align_always()
encoder.append_normally_small_non_negative_whole_number(index)
encoder.align()
encoder.append_length_determinant(addition_encoder.number_of_bytes())
encoder += addition_encoder
@add_error_location
def decode(self, decoder):
if self.additions_index_to_member is not None:
if decoder.read_bit():
return self.decode_additions(decoder)
else:
return self.decode_root(decoder)
else:
return self.decode_root(decoder)
def decode_root(self, decoder):
if len(self.root_index_to_member) > 1:
index = self.decode_root_index(decoder)
else:
index = 0
try:
member = self.root_index_to_member[index]
except KeyError:
raise DecodeError(
'Expected choice index {}, but got {}.'.format(
self.format_root_indexes(),
index))
return (member.name, member.decode(decoder))
def decode_root_index(self, decoder):
if self.number_of_indefinite_bits is None:
number_of_bits = self.root_number_of_bits
else:
number_of_bytes = decoder.read_constrained_whole_number(
0,
2 ** self.number_of_indefinite_bits,
self.number_of_indefinite_bits)
number_of_bytes += 1
number_of_bits = (8 * number_of_bytes)
decoder.align()
return decoder.read_constrained_whole_number(0,
self.maximum,
number_of_bits)
def decode_additions(self, decoder):
index = decoder.read_normally_small_non_negative_whole_number()
if index in self.additions_index_to_member:
addition = self.additions_index_to_member[index]
else:
addition = None
# Open type decoding.
decoder.align()
length = 8 * decoder.read_length_determinant()
offset = decoder.number_of_bits
if addition is None:
name = None
decoded = None
else:
name = addition.name
decoded = addition.decode(decoder)
length -= (offset - decoder.number_of_bits)
decoder.skip_bits(length)
return (name, decoded)
def __repr__(self):
return 'Choice({}, [{}])'.format(
self.name,
', '.join([repr(member)
for member in self.root_name_to_index]))
class UTF8String(Type):
def __init__(self, name):
super(UTF8String, self).__init__(name, 'UTF8String')
@add_error_location
def encode(self, data, encoder):
encoded = data.encode('utf-8')
encoder.align()
for offset, length in encoder.append_length_determinant_chunks(len(encoded)):
encoder.append_bytes(encoded[offset:offset + length])
@add_error_location
def decode(self, decoder):
decoder.align()
encoded = []
for length in decoder.read_length_determinant_chunks():
encoded.append(decoder.read_bytes(length))
return b''.join(encoded).decode('utf-8')
def __repr__(self):
return 'UTF8String({})'.format(self.name)
class NumericString(KnownMultiplierStringType):
ALPHABET = bytearray(NUMERIC_STRING.encode('ascii'))
ENCODE_MAP = {v: i for i, v in enumerate(ALPHABET)}
DECODE_MAP = {i: v for i, v in enumerate(ALPHABET)}
PERMITTED_ALPHABET = PermittedAlphabet(ENCODE_MAP,
DECODE_MAP)
class PrintableString(KnownMultiplierStringType):
ALPHABET = bytearray(PRINTABLE_STRING.encode('ascii'))
ENCODE_MAP = {v: v for v in ALPHABET}
DECODE_MAP = {v: v for v in ALPHABET}
PERMITTED_ALPHABET = PermittedAlphabet(ENCODE_MAP,
DECODE_MAP)
class IA5String(KnownMultiplierStringType):
ALPHABET = bytearray(IA5_STRING.encode('ascii'))
ENCODE_DECODE_MAP = {v: v for v in ALPHABET}
PERMITTED_ALPHABET = PermittedAlphabet(ENCODE_DECODE_MAP,
ENCODE_DECODE_MAP)
class BMPString(KnownMultiplierStringType):
ENCODING = 'utf-16-be'
ALPHABET = BMP_STRING
ENCODE_DECODE_MAP = {ord(v): ord(v) for v in ALPHABET}
PERMITTED_ALPHABET = PermittedAlphabet(ENCODE_DECODE_MAP,
ENCODE_DECODE_MAP)
class VisibleString(KnownMultiplierStringType):
ALPHABET = bytearray(VISIBLE_STRING.encode('ascii'))
ENCODE_DECODE_MAP = {v: v for v in ALPHABET}
PERMITTED_ALPHABET = PermittedAlphabet(ENCODE_DECODE_MAP,
ENCODE_DECODE_MAP)
class GeneralString(StringType):
ENCODING = 'latin-1'
class GraphicString(StringType):
ENCODING = 'latin-1'
class TeletexString(StringType):
ENCODING = 'iso-8859-1'
class UniversalString(StringType):
ENCODING = 'utf-32-be'
LENGTH_MULTIPLIER = 4
class ObjectDescriptor(GraphicString):
pass
class UTCTime(VisibleString):
def encode(self, data, encoder):
encoded = restricted_utc_time_from_datetime(data)
return super(UTCTime, self).encode(encoded, encoder)
def decode(self, decoder):
decoded = super(UTCTime, self).decode(decoder)
return restricted_utc_time_to_datetime(decoded)
class GeneralizedTime(VisibleString):
def encode(self, data, encoder):
enceded = restricted_generalized_time_from_datetime(data)
return super(GeneralizedTime, self).encode(enceded, encoder)
def decode(self, decoder):
decoded = super(GeneralizedTime, self).decode(decoder)
return restricted_generalized_time_to_datetime(decoded)
class Date(Type):
def __init__(self, name):
super(Date, self).__init__(name, 'DATE')
immediate = Integer('immediate')
near_future = Integer('near_future')
near_past = Integer('near_past')
reminder = Integer('reminder')
immediate.set_restricted_to_range(2005, 2020, False)
near_future.set_restricted_to_range(2021, 2276, False)
near_past.set_restricted_to_range(1749, 2004, False)
reminder.set_restricted_to_range('MIN', 1748, False)
year = Choice('year',
[immediate, near_future, near_past, reminder],
None)
month = Integer('month')
day = Integer('day')
month.set_restricted_to_range(1, 12, False)
day.set_restricted_to_range(1, 31, False)
self._inner = Sequence('DATE-ENCODING',
[year, month, day],
None)
@add_error_location
def encode(self, data, encoder):
if 2005 <= data.year <= 2020:
choice = 'immediate'
elif 2021 <= data.year <= 2276:
choice = 'near_future'
elif 1749 <= data.year <= 2004:
choice = 'near_past'
else:
choice = 'reminder'
data = {
'year': (choice, data.year),
'month': data.month,
'day': data.day
}
return self._inner.encode(data, encoder)
@add_error_location
def decode(self, decoder):
decoded = self._inner.decode(decoder)
return datetime.date(decoded['year'][1],
decoded['month'],
decoded['day'])
class TimeOfDay(Type):
def __init__(self, name):
super(TimeOfDay, self).__init__(name, 'TIME-OF-DAY')
hours = Integer('hours')
minutes = Integer('minutes')
seconds = Integer('seconds')
hours.set_restricted_to_range(0, 24, False)
minutes.set_restricted_to_range(0, 59, False)
seconds.set_restricted_to_range(0, 60, False)
self._inner = Sequence('TIME-OF-DAY-ENCODING',
[hours, minutes, seconds],
None)
@add_error_location
def encode(self, data, encoder):
data = {
'hours': data.hour,
'minutes': data.minute,
'seconds': data.second
}
return self._inner.encode(data, encoder)
@add_error_location
def decode(self, decoder):
decoded = self._inner.decode(decoder)
return datetime.time(decoded['hours'],
decoded['minutes'],
decoded['seconds'])
class DateTime(Type):
def __init__(self, name):
super(DateTime, self).__init__(name, 'DATE-TIME')
self._inner = Sequence('DATE-TIME-ENCODING',
[Date('date'), TimeOfDay('time')],
None)
@add_error_location
def encode(self, data, encoder):
data = {
'date': data,
'time': data
}
return self._inner.encode(data, encoder)
@add_error_location
def decode(self, decoder):
decoded = self._inner.decode(decoder)
return datetime.datetime(decoded['date'].year,
decoded['date'].month,
decoded['date'].day,
decoded['time'].hour,
decoded['time'].minute,
decoded['time'].second)
class OpenType(Type):
def __init__(self, name):
super(OpenType, self).__init__(name, 'OpenType')
@add_error_location
def encode(self, data, encoder):
encoder.align()
encoder.append_length_determinant(len(data))
encoder.append_bytes(data)
@add_error_location
def decode(self, decoder):
decoder.align()
length = decoder.read_length_determinant()
return decoder.read_bytes(length)
def __repr__(self):
return 'OpenType({})'.format(self.name)
class Any(Type):
def __init__(self, name):
super(Any, self).__init__(name, 'ANY')
@add_error_location
def encode(self, _, _encoder):
raise NotImplementedError('ANY is not yet implemented.')
@add_error_location
def decode(self, _decoder):
raise NotImplementedError('ANY is not yet implemented.')
def __repr__(self):
return 'Any({})'.format(self.name)
class Recursive(Type, compiler.Recursive):
def __init__(self, name, type_name, module_name):
super(Recursive, self).__init__(name, 'RECURSIVE')
self.type_name = type_name
self.module_name = module_name
self._inner = None
def set_inner_type(self, inner):
self._inner = inner
@add_error_location
def encode(self, data, encoder):
self._inner.encode(data, encoder)
@add_error_location
def decode(self, decoder):
return self._inner.decode(decoder)
def __repr__(self):
return 'Recursive({})'.format(self.type_name)
class AdditionGroup(Sequence):
pass
class CompiledType(compiler.CompiledType):
def encode(self, data):
encoder = Encoder()
self._type.encode(data, encoder)
return encoder.as_bytearray()
def decode(self, data):
decoder = Decoder(bytearray(data))
return self._type.decode(decoder)
class Compiler(compiler.Compiler):
def process_type(self, type_name, type_descriptor, module_name):
compiled_type = self.compile_type(type_name,
type_descriptor,
module_name)
return CompiledType(compiled_type)
def compile_type(self, name, type_descriptor, module_name):
module_name = self.get_module_name(type_descriptor, module_name)
type_name = type_descriptor['type']
if type_name == 'SEQUENCE':
compiled = Sequence(
name,
*self.compile_members(type_descriptor['members'],
module_name))
elif type_name == 'SEQUENCE OF':
compiled = SequenceOf(name,
self.compile_type('',
type_descriptor['element'],
module_name),
*self.get_size_range(type_descriptor,
module_name))
elif type_name == 'SET':
compiled = Set(
name,
*self.compile_members(type_descriptor['members'],
module_name,
sort_by_tag=True))
elif type_name == 'SET OF':
compiled = SetOf(name,
self.compile_type('',
type_descriptor['element'],
module_name),
*self.get_size_range(type_descriptor,
module_name))
elif type_name == 'CHOICE':
compiled = Choice(name,
*self.compile_members(
type_descriptor['members'],
module_name,
flat_additions=True))
elif type_name == 'INTEGER':
compiled = Integer(name)
elif type_name == 'REAL':
compiled = Real(name)
elif type_name == 'ENUMERATED':
compiled = Enumerated(name,
self.get_enum_values(type_descriptor,
module_name),
self._numeric_enums)
elif type_name == 'BOOLEAN':
compiled = Boolean(name)
elif type_name == 'OBJECT IDENTIFIER':
compiled = ObjectIdentifier(name)
elif type_name == 'OCTET STRING':
compiled = OctetString(name,
*self.get_size_range(type_descriptor,
module_name))
elif type_name == 'TeletexString':
compiled = TeletexString(name)
elif type_name == 'NumericString':
permitted_alphabet = self.get_permitted_alphabet(type_descriptor)
compiled = NumericString(name,
*self.get_size_range(type_descriptor,
module_name),
permitted_alphabet=permitted_alphabet)
elif type_name == 'PrintableString':
permitted_alphabet = self.get_permitted_alphabet(type_descriptor)
compiled = PrintableString(name,
*self.get_size_range(type_descriptor,
module_name),
permitted_alphabet=permitted_alphabet)
elif type_name == 'IA5String':
permitted_alphabet = self.get_permitted_alphabet(type_descriptor)
compiled = IA5String(name,
*self.get_size_range(type_descriptor,
module_name),
permitted_alphabet=permitted_alphabet)
elif type_name == 'BMPString':
permitted_alphabet = self.get_permitted_alphabet(type_descriptor)
compiled = BMPString(name,
*self.get_size_range(type_descriptor,
module_name),
permitted_alphabet=permitted_alphabet)
elif type_name == 'VisibleString':
permitted_alphabet = self.get_permitted_alphabet(type_descriptor)
compiled = VisibleString(name,
*self.get_size_range(type_descriptor,
module_name),
permitted_alphabet=permitted_alphabet)
elif type_name == 'GeneralString':
compiled = GeneralString(name)
elif type_name == 'UTF8String':
compiled = UTF8String(name)
elif type_name == 'GraphicString':
compiled = GraphicString(name)
elif type_name == 'UTCTime':
compiled = UTCTime(name)
elif type_name == 'UniversalString':
compiled = UniversalString(name)
elif type_name == 'GeneralizedTime':
compiled = GeneralizedTime(name)
elif type_name == 'DATE':
compiled = Date(name)
elif type_name == 'TIME-OF-DAY':
compiled = TimeOfDay(name)
elif type_name == 'DATE-TIME':
compiled = DateTime(name)
elif type_name == 'BIT STRING':
compiled = BitString(name,
self.get_named_bits(type_descriptor,
module_name),
*self.get_size_range(type_descriptor,
module_name))
elif type_name == 'ANY':
compiled = Any(name)
elif type_name == 'ANY DEFINED BY':
compiled = Any(name)
elif type_name == 'NULL':
compiled = Null(name)
elif type_name == 'OpenType':
compiled = OpenType(name)
elif type_name == 'EXTERNAL':
compiled = Sequence(
name,
*self.compile_members(self.external_type_descriptor()['members'],
module_name))
elif type_name == 'ObjectDescriptor':
compiled = ObjectDescriptor(name)
else:
if type_name in self.types_backtrace:
compiled = Recursive(name,
type_name,
module_name)
self.recursive_types.append(compiled)
else:
compiled = self.compile_user_type(name,
type_name,
module_name)
if 'tag' in type_descriptor:
compiled = self.set_compiled_tag(compiled, type_descriptor)
if 'restricted-to' in type_descriptor:
compiled = self.set_compiled_restricted_to(compiled,
type_descriptor,
module_name)
return compiled
def set_compiled_tag(self, compiled, type_descriptor):
compiled = self.copy(compiled)
tag = type_descriptor['tag']
class_prio = CLASS_PRIO[tag.get('class', 'CONTEXT_SPECIFIC')]
class_number = tag['number']
compiled.tag = (class_prio, class_number)
return compiled
def compile_members(self,
members,
module_name,
sort_by_tag=False,
flat_additions=False):
compiled_members = []
in_extension = False
additions = None
for member in members:
if member == EXTENSION_MARKER:
in_extension = not in_extension
if in_extension:
additions = []
elif in_extension:
self.compile_extension_member(member,
module_name,
additions,
flat_additions)
else:
self.compile_root_member(member,
module_name,
compiled_members)
if sort_by_tag:
compiled_members = sorted(compiled_members, key=attrgetter('tag'))
return compiled_members, additions
def compile_extension_member(self,
member,
module_name,
additions,
flat_additions):
if isinstance(member, list):
if flat_additions:
for memb in member:
compiled_member = self.compile_member(memb,
module_name)
additions.append(compiled_member)
else:
compiled_member, _ = self.compile_members(member,
module_name)
compiled_group = AdditionGroup('ExtensionAddition',
compiled_member,
None)
additions.append(compiled_group)
else:
compiled_member = self.compile_member(member,
module_name)
additions.append(compiled_member)
def get_permitted_alphabet(self, type_descriptor):
def char_range(begin, end):
return ''.join([chr(char)
for char in range(ord(begin), ord(end) + 1)])
if 'from' not in type_descriptor:
return
permitted_alphabet = type_descriptor['from']
value = ''
for item in permitted_alphabet:
if isinstance(item, tuple):
value += char_range(item[0], item[1])
else:
value += item
value = sorted(value)
encode_map = {ord(v): i for i, v in enumerate(value)}
decode_map = {i: ord(v) for i, v in enumerate(value)}
return PermittedAlphabet(encode_map, decode_map)
def compile_dict(specification, numeric_enums=False):
return Compiler(specification, numeric_enums).process()
def decode_length(_data):
raise DecodeError('Decode length is not supported for this codec.')
| {
"repo_name": "eerimoq/asn1tools",
"path": "asn1tools/codecs/per.py",
"copies": "1",
"size": "74856",
"license": "mit",
"hash": 2101994487705196000,
"line_mean": 31.6169934641,
"line_max": 90,
"alpha_frac": 0.5260099391,
"autogenerated": false,
"ratio": 4.345271956811981,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5371281895911981,
"avg_score": null,
"num_lines": null
} |
"""Align embeddings using Procrustes method
"""
import numpy as np
def match_coords(C1, C2):
idx = []
idxlist = range(C2.shape[0])
for pt1 in C1:
idxremain = np.setdiff1d(idxlist, idx)
idxmatch = np.argsort(np.sum((C2[idxremain, :] - pt1) * (C2[idxremain, :] - pt1), axis=1))[0]
idx.append(idxremain[idxmatch])
return idx
def get_weight_matrix(Acoord, Bcoord, idx):
d = np.sqrt(np.sum((Acoord - Bcoord[idx, :]) * (Acoord - Bcoord[idx, :]), axis=1))
epsilon = max(np.median(d), 0.0001)
W = np.diag(np.exp( - (d * d)/epsilon))
return W
def iterative_alignment(embeddings, n_iters=1):
target = embeddings[0]
realigned = [target]
xfms = []
# first pass
for i, embedding in enumerate(embeddings[1:]):
u, s, v = np.linalg.svd(target.T.dot(embedding), full_matrices=False)
xfms.append(v.T.dot(u.T))
realigned.append(embedding.dot(xfms[-1]))
# get mean target
# 1. random sampling (doesn't cover all points)
# - allows more dense sampling
# - keeps coordinates from the real anatomical space
# 2. mean across subjects
# multi-pass
for i in range(1, n_iters):
index = []
target = np.mean(realigned, axis=0).squeeze()
target = np.array(target)
realigned = []
xfms = []
for i, embedding in enumerate(embeddings):
u, s, v = np.linalg.svd(target.T.dot(embedding), full_matrices=False)
xfms.append(v.T.dot(u.T))
realigned.append(embedding.dot(xfms[-1]))
return realigned, xfms
def iterative_alignment_with_coords(embeddings, coords=None, n_iters=1, n_samples=0.1, use_mean=False):
target = embeddings[0]
if coords is None:
targetcoords = None
dummycoords = np.random.rand(target.shape[0], 3)
W = np.eye(target.shape[0])
idx = range(target.shape[0])
else:
targetcoords = coords[0]
realigned = [target]
# first pass
for i, embedding in enumerate(embeddings[1:]):
if targetcoords is not None:
idx = match_coords(targetcoords, coords[i + 1])
W = get_weight_matrix(targetcoords, coords[i + 1], idx)
u, s, v = np.linalg.svd(target.T.dot(W.dot(embedding[idx, :])))
xfm = v.T.dot(u.T)
realigned.append(embedding.dot(xfm))
# get mean target
# 1. random sampling (doesn't cover all points)
# - allows more dense sampling
# - keeps coordinates from the real anatomical space
# 2. mean across subjects
# multi-pass
for i in range(n_iters):
index = []
target = []
targetcoords = []
basecoords = []
if use_mean:
target = np.mean(realigned, axis=0).squeeze()
if coords is None:
targetcoords = dummycoords
else:
targetcoords = np.mean(coords, axis=0)
else:
for i, embedding in enumerate(realigned):
index.append(np.random.permutation(embedding.shape[0])[:int(n_samples*embedding.shape[0])])
target.extend(embedding[index[-1]].tolist())
if coords is None:
targetcoords.extend(dummycoords[index[-1]])
basecoords.append(dummycoords)
else:
targetcoords.extend(coords[i][index[-1]])
basecoords.append(coords[i])
target = np.array(target)
targetcoords = np.array(targetcoords)
for i, embedding in enumerate(realigned):
if coords is None:
basecoords.append(dummycoords)
else:
basecoords.append(coords[i])
realigned = []
xfms = []
for i, embedding in enumerate(embeddings):
idx = match_coords(targetcoords, basecoords[i])
W = get_weight_matrix(targetcoords, basecoords[i], idx)
u, s, v = np.linalg.svd(target.T.dot(W.dot(embedding[idx, :])))
xfms.append(v.T.dot(u.T))
realigned.append(embedding.dot(xfms[-1]))
return realigned, xfms
| {
"repo_name": "ThomasYeoLab/CBIG",
"path": "external_packages/python/mapalign-master/mapalign/align.py",
"copies": "1",
"size": "4122",
"license": "mit",
"hash": -6516714628070068000,
"line_mean": 35.1578947368,
"line_max": 107,
"alpha_frac": 0.5761766133,
"autogenerated": false,
"ratio": 3.5051020408163267,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9577985004423549,
"avg_score": 0.0006587299385554981,
"num_lines": 114
} |
"""Aligner for texts and their segmentations.
"""
from __future__ import absolute_import
from __future__ import print_function
from __future__ import unicode_literals
__all__ = ['AlignmentFailed', 'Aligner']
class AlignmentFailed(Exception): pass
class Aligner(object):
"""Align a text with its tokenization.
"""
def align(self, text, tokens):
"""Align text with its tokeniation.
Parameters
----------
text : str
Text.
tokens : list of str
Tokenization of ``text``.
Returns
-------
spans : list of tuple
List of (``onset``, ``offset``) pairs, where ``spans[i]`` gives the
onseta and offset in characters of ``tokens[i]`` relative to the
beginning of ``text`` (0-indexed).
"""
spans = []
bi = 0
for token in tokens:
try:
token_len = len(token)
token_bi = bi + text[bi:].index(token)
token_ei = token_bi + token_len - 1
spans.append([token_bi, token_ei])
bi = token_ei + 1
except ValueError:
raise AlignmentFailed(token)
return spans
| {
"repo_name": "nryant/twokenize_py",
"path": "twokenize_py/align.py",
"copies": "1",
"size": "1237",
"license": "apache-2.0",
"hash": -7878816150672177000,
"line_mean": 25.8913043478,
"line_max": 79,
"alpha_frac": 0.5238480194,
"autogenerated": false,
"ratio": 4.310104529616725,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0005574136008918618,
"num_lines": 46
} |
"""Alignment algorithms."""
from warnings import warn
import numpy as np
from scipy.linalg import svd, det
from . import earth
from . import dcm
from . import util
def align_wahba(dt, theta, dv, lat, VE=None, VN=None):
"""Estimate attitude matrix by solving Wahba's problem.
This method is based on solving a least-squares problem for a direction
cosine matrix A (originally formulated in [1]_)::
L = sum(||A r_i - b_i||^2, i=1, ..., m) -> min A,
s. t. A being a right orthogonal matrix.
Here ``(r_i, b_i)`` are measurements of the same unit vectors in two
frames.
The application of this method to self alignment of INS is explained in
[2]_. In this problem the vectors ``(r_i, b_i)`` are normalized velocity
increments due to gravity. It is applicable to dynamic conditions as well,
but in this case a full accuracy can be achieved only if velocity is
provided.
The optimization problem is solved using the most straightforward method
based on SVD [3]_.
Parameters
----------
dt : double
Sensors sampling period.
theta, dv : array_like, shape (n_samples, 3)
Rotation vectors and velocity increments computed from gyro and
accelerometer readings after applying coning and sculling
corrections.
lat : float
Latitude of the place.
VE, VN : array_like with shape (n_samples + 1, 3) or None
East and North velocity of the target. If None (default), it is
assumed to be 0. See Notes for further details.
Returns
-------
hpr : tuple of 3 floats
Estimated heading, pitch and roll at the end of the alignment.
P_align : ndarray, shape (3, 3)
Covariance matrix of misalignment angles, commonly known as
"phi-angle" in INS literature. Its values are measured in degrees
squared. This matrix is estimated in a rather ad-hoc fashion, see
Notes.
Notes
-----
If the alignment takes place in dynamic conditions but velocities `VE`
and `VN` are not provided, the alignment accuracy will be decreased (to
some extent it will be reflected in `P_align`). Note that `VE` and `VN` are
required with the same rate as inertial readings (and contain 1 more
sample). It means that you usually have to do some sort of interpolation.
In on-board implementation you just provide the last available velocity
data from GPS and it will work fine.
The paper [3]_ contains a recipe of computing the covariance matrix given
that errors in measurements are independent, small and follow a statistical
distribution with zero mean and known variance. In our case we estimate
measurement error variance from the optimal value of the optimized function
(see above). But as our errors are not independent and necessary small
(nor they follow any reasonable distribution) we don't scale their
variance by the number of observations (which is commonly done for the
variance of an average value). Some experiments show that this approach
gives reasonable values of `P_align`.
Also note, that `P_align` accounts only for misalignment errors due
to non-perfect alignment conditions. In addition to that, azimuth accuracy
is always limited by gyro drifts and level accuracy is limited by the
accelerometer biases. You should add these systematic uncertainties to the
diagonal of `P_align`.
References
----------
.. [1] G. Wahba, "Problem 65–1: A Least Squares Estimate of Spacecraft
Attitude", SIAM Review, 1965, 7(3), 409.
.. [2] P. M. G. Silson, "Coarse Alignment of a Ship’s Strapdown Inertial
Attitude Reference System Using Velocity Loci", IEEE Trans. Instrum.
Meas., vol. 60, pp. 1930-1941, Jun. 2011.
.. [3] F. L. Markley, "Attitude Determination using Vector Observations
and the Singular Value Decomposition", The Journal of the
Astronautical Sciences, Vol. 36, No. 3, pp. 245-258, Jul.-Sept.
1988.
"""
n_samples = theta.shape[0]
Vg = np.zeros((n_samples + 1, 3))
if VE is not None:
Vg[:, 0] = VE
if VN is not None:
Vg[:, 1] = VN
lat = np.deg2rad(lat)
slat, clat = np.sin(lat), np.cos(lat)
tlat = slat / clat
re, rn = earth.principal_radii(lat)
u = earth.RATE * np.array([0, clat, slat])
g = np.array([0, 0, -earth.gravity(slat)])
Cb0b = np.empty((n_samples + 1, 3, 3))
Cg0g = np.empty((n_samples + 1, 3, 3))
Cb0b[0] = np.identity(3)
Cg0g[0] = np.identity(3)
Vg_m = 0.5 * (Vg[1:] + Vg[:-1])
rho = np.empty_like(Vg_m)
rho[:, 0] = -Vg_m[:, 1] / rn
rho[:, 1] = Vg_m[:, 0] / re
rho[:, 2] = Vg_m[:, 0] / re * tlat
for i in range(n_samples):
Cg0g[i + 1] = Cg0g[i].dot(dcm.from_rv((rho[i] + u) * dt))
Cb0b[i + 1] = Cb0b[i].dot(dcm.from_rv(theta[i]))
f_g = np.cross(u, Vg) - g
f_g0 = util.mv_prod(Cg0g, f_g)
f_g0 = 0.5 * (f_g0[1:] + f_g0[:-1])
f_g0 = np.vstack((np.zeros(3), f_g0))
V_g0 = util.mv_prod(Cg0g, Vg) + dt * np.cumsum(f_g0, axis=0)
V_b0 = np.cumsum(util.mv_prod(Cb0b[:-1], dv), axis=0)
V_b0 = np.vstack((np.zeros(3), V_b0))
k = n_samples // 2
b = V_g0[k:2 * k] - V_g0[:k]
b /= np.linalg.norm(b, axis=1)[:, None]
r = V_b0[k:2 * k] - V_b0[:k]
r /= np.linalg.norm(r, axis=1)[:, None]
B = np.zeros((3, 3))
for bi, ri in zip(b, r):
B += np.outer(bi, ri)
n_obs = b.shape[0]
B /= n_obs
U, s, VT = svd(B, overwrite_a=True)
d = det(U) * det(VT)
Cg0b0 = U.dot(np.diag([1, 1, d])).dot(VT)
Cgb = Cg0g[-1].T.dot(Cg0b0).dot(Cb0b[-1])
s[-1] *= d
trace_s = np.sum(s)
L = 1 - trace_s
D = trace_s - s
M = np.identity(3) - np.diag(s)
if L < 0 or np.any(M < 0):
L = max(L, 0)
M[M < 0] = 0
warn("Negative values encountered when estimating the covariance, "
"they were set to zeros.")
R = (L * M / n_obs) ** 0.5 / D
R = U.dot(R)
R = Cg0g[-1].T.dot(R)
R = np.rad2deg(R)
return dcm.to_hpr(Cgb), R.dot(R.T)
| {
"repo_name": "nmayorov/pyins",
"path": "pyins/align.py",
"copies": "1",
"size": "6144",
"license": "mit",
"hash": 2215627464297501400,
"line_mean": 35.5476190476,
"line_max": 79,
"alpha_frac": 0.6175895765,
"autogenerated": false,
"ratio": 3.1246819338422394,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9242271510342239,
"avg_score": 0,
"num_lines": 168
} |
##### alignment_base_split
import sys
import os
args = sys.argv
arg_len = len(args)
if arg_len <2:
print("\n**** alignment_base_split.py | Written by DJP, 07/04/16 in Python 3.5 in Edinburgh / SA train ****\n")
print("This program takes a fasta alignments as input, and outputs 3 fasta alignments:\n first codon (seq_name_base + _first_position.fa)\nsecond codon (seq_name_base + _second_position.fa)\n third codon (seq_name_base + _third_position.fa)")
print("\n**** WARNING **** \n")
print("base name for output is taken to be the part of the file name before the first '.' \n")
print("\n**** USAGE **** \n")
print("alignment_base_split.py [name of fasta alignment file] \n")
else:
seqF1 = args[1]
### add seqs to dictionary
name_list = []
seq_list = []
seq_dict = {}
seq_name = seqF1
done = 0
seq_file_1 = open(seq_name)
for line in seq_file_1:
lineA = line.rstrip("\n")
if lineA.startswith(">"):
lineB = lineA.replace(">", "")
name_list.append(lineB)
else:
seq_list.append(lineA)
done = done + 1
done_divide = done / 1000
if done_divide.is_integer():
print("Read " + str(done) + " sequences from " + seqF1)
for element in range(0,len(name_list)):
name1 = name_list[element]
seq1 = seq_list[element]
seq_dict[name1] = seq1
seq_N = str(len(name_list))
print ("\nLoaded " + seq_N + " seqeunces from " + seqF1 + ".\n")
### split by base
seq_name_base = seq_name.rsplit('.') ### beware if name of fasta file has more than one '.'
seq_name_base = seq_name_base[0]
output_file_0 = open(seq_name_base + '_first_position.fa', "w")
output_file_1 = open(seq_name_base + '_second_position.fa', "w")
output_file_2 = open(seq_name_base + '_third_position.fa', "w")
for el in name_list: # keep order
seq = seq_dict.get(el)
#seq = seq.upper()
seq_len = len(seq)
base_0 = ""
for number in range(0, seq_len, 3): ## range to give codon 1,2, or 3
seq_t = seq[number]
base_0 = base_0 + seq_t
#print(el + base_0)
output_file_0.write('>' + el + '\n' + base_0 + '\n')
base_1 = ""
for number in range(1, seq_len, 3): ## range to give codon 1,2, or 3
seq_t = seq[number]
base_1 = base_1 + seq_t
#print(el + base_1)
output_file_1.write('>' + el + '\n' + base_1 + '\n')
base_2 = ""
for number in range(2, seq_len, 3): ## range to give codon 1,2, or 3
seq_t = seq[number]
base_2 = base_2 + seq_t
#print(el + base_2)
output_file_2.write('>' + el + '\n' + base_2 + '\n')
| {
"repo_name": "DarrenJParker/fasta_tools",
"path": "alignment_base_split.py",
"copies": "1",
"size": "2581",
"license": "mit",
"hash": -275477630111508860,
"line_mean": 27.3295454545,
"line_max": 244,
"alpha_frac": 0.5854320031,
"autogenerated": false,
"ratio": 2.6498973305954827,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.820390165067417,
"avg_score": 0.10628553660426254,
"num_lines": 88
} |
##### alignment_concat
import sys
import os
import getopt
try:
opts, args = getopt.getopt(sys.argv[1:], 'i:d:D:o:s:n:g:e:f:h')
except getopt.GetoptError:
print('ERROR getting options, please see help by specifing -h')
sys.exit(2) ### close ofter error from try
arg_len = len(opts)
if arg_len == 0:
print('No options provided. Please see help by specifing -h')
sys.exit(2)
in_dir_name = None
file_ext = ".fa"
out_name = "testout.fa"
delim_g_str = "-"
delim_s_str = "-"
new_delim_str = "_"
sp_group_pos = 1
gene_name_pos = 2
filter_filename = None
#print (opts) ## see args
for opt, arg in opts:
if opt in ('-h'):
print("\n**** alignment_concat.py | Written by DJP, 07/04/16 in Python 3.5 in SA / Edinburgh train ****\n")
print("\n**** Re-written by DJP, 08/04/20 in Python 3.5 in Lausanne ****\n")
print("This program takes a number of seperate fasta file alignments as input, and concats them")
print("Sequences in each file DO NOT need be in order before use\n")
print("\n**** USAGE **** \n")
print("python3 alignment_concat.py -i [input dir] -o [output file] \n")
print("\n**** OPTIONS **** \n")
print("\t-e\tfile extention. Default = .fa")
print("\t-d\tdelim to get gene name from header. Default = -")
print("\t-D\tdelim to get sp name from header. Default = -")
print("\t-n\tnew delim for output file. Default = _")
print("\t-s\tIndex for sp name after spliting. Default = 1")
print("\t-g\tIndex for gene name after spliting. Default = 2")
print("\t-f\tfilter filename. Specify if want to filter out any genes")
print("\n**** EXAMPLE fasta file ****\n")
print(">speciesA-gene1\nATTATACACCACGACGAGCAGCAGCCGAGCACGACGCGAG")
print(">speciesB-gene1\nCCCCCCCCCCCGAGCAGCAGCCGAGCACGACGCGAGCAGC")
print(">speciesC-gene1\nGGGGGGGGCCACGACGAGCAGCAGCCGAGCACGACGCGAG\n\n")
print("\n**** EXAMPLE code ****\n")
print("python3 alignment_concat.py -i in_dir -e .fa -D - -d - -s 1 -g 2 -n _\n\n")
sys.exit(2)
elif opt in ('-i'):
in_dir_name = arg
elif opt in ('-e'):
file_ext = arg
elif opt in ('-o'):
out_name = arg
elif opt in ('-d'):
delim_g_str = arg
elif opt in ('-D'):
delim_s_str = arg
elif opt in ('-s'):
sp_group_pos = int(arg)
elif opt in ('-g'):
gene_name_pos = int(arg)
elif opt in ('-n'):
new_delim_str = arg
elif opt in ('-f'):
filter_filename = arg
else:
print("i dont know")
sys.exit(2)
## Read seqs, unwrap, add into a dict
### read in file
seq_dict = {}
N_seqs_per_file = set()
all_new_names = set()
gene_name_set = set()
sp_name_set = set()
path = in_dir_name
for path, subdirs, files in os.walk(path):
for name in files:
if name.endswith(file_ext):
#print (os.path.join(path, name))
curr_file = open(os.path.join(path, name))
output_fasta_name = name + ".TEMP_extract_fasta_file"
output_file = open(output_fasta_name, "w")
count = 0
for line in curr_file:
count = count + 1
line = line.rstrip("\n")
if line.startswith(">") and count == 1:
output_file.write(line + "\n")
elif line.startswith(">") and count > 1:
output_file.write("\n" + line + "\n")
else:
output_file.write(line)
output_file.close()
### add seqs to dictionary
name_list = []
seq_list = []
seq_len = set()
done = 0
seq_file_1 = open(output_fasta_name)
for line in seq_file_1:
lineA = line.rstrip("\n")
if lineA.startswith(">"):
lineB = lineA.replace(">", "")
sp = lineB.split(delim_s_str)[sp_group_pos -1]
gene_name = lineB.split(delim_g_str)[gene_name_pos -1]
new_name = sp + new_delim_str + gene_name
gene_name_set.add(gene_name)
sp_name_set.add(sp)
#print(new_name)
if new_name not in all_new_names:
all_new_names.add(new_name)
else:
print("New names are not unique. FIX THIS before going on.")
sys.exit(2)
name_list.append(new_name)
else:
seq_list.append(lineA)
done = done + 1
for element in range(0,len(name_list)):
name1 = name_list[element]
seq1 = seq_list[element]
seq_dict[name1] = seq1
seq_len.add(len(seq1))
N_seqs_per_file.add(len(name_list))
if len(seq_len) != 1:
print("alignments not the same length. FIX THIS")
sys.exit(2)
#print(seq_dict)
## tidyup
seq_file_1.close()
os.remove(output_fasta_name)
print("\n\nTotal number of seqs read: " + str(len(seq_dict)))
if len(N_seqs_per_file) != 1:
print("Different files have different number of sequences. FIX THIS")
sys.exit(2)
else:
print("Number of seqs per file: " + str(list(N_seqs_per_file)[0]))
#print(sp_name_set)
if filter_filename == None:
print("Number of alignments to be joined: " + str(len(gene_name_set)))
sp_name_list_s = sorted(list(sp_name_set))
gene_name_list_s = sorted(list(gene_name_set))
###########################################################################################
### if filtering genes to a subset
if filter_filename != None:
gene_name_set_f = set()
filter_file = open(filter_filename)
for line in filter_file:
line = line.strip()
gene_name_set_f.add(line)
gene_name_list_f = []
for el in gene_name_list_s:
if el in gene_name_set_f :
gene_name_list_f.append(el)
gene_name_list_s = gene_name_list_f
print("Number of alignments to be joined after filtering: " + str(len(gene_name_list_s)))
######################################################################
## output
out_fa_file = open(out_name, "w")
align_len = 0
N_sp = 0
for s in sp_name_list_s:
out_fa_file.write(">" + s + "\n")
N_sp = N_sp + 1
for g in gene_name_list_s:
rec = seq_dict.get(s + new_delim_str + g)
out_fa_file.write(rec)
if N_sp == 1:
align_len = align_len + len(rec)
out_fa_file.write("\n")
print("Alignment length: " + str(align_len))
print("\nOutput alignment file: " + out_name + "\n")
print("\nFinished, JMS\n\n")
| {
"repo_name": "DarrenJParker/fasta_tools",
"path": "alignment_concat.py",
"copies": "1",
"size": "6144",
"license": "mit",
"hash": -4466723590678872600,
"line_mean": 26.1834862385,
"line_max": 109,
"alpha_frac": 0.5838216146,
"autogenerated": false,
"ratio": 2.758868432869331,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.38426900474693315,
"avg_score": null,
"num_lines": null
} |
# Alignment examples.
from ocempgui.widgets import Renderer, VFrame, HFrame, Button, Alignment, Label
from ocempgui.widgets.Constants import *
def create_alignment_view ():
# Crate and display the different alignments.
frm_main = VFrame (Label ("Alignment examples"))
frm_main.topleft = 10, 10
# Top alignments.
align_topleft = Alignment (100, 50)
align_topleft.align = ALIGN_TOP | ALIGN_LEFT
align_topleft.child = Button ("topleft")
align_top = Alignment (100, 50)
align_top.align = ALIGN_TOP
align_top.child = Button ("top")
align_topright = Alignment (100, 50)
align_topright.align = ALIGN_TOP | ALIGN_RIGHT
align_topright.child = Button ("topright")
frm_top = HFrame ()
frm_top.children = align_topleft, align_top, align_topright
# Centered alignments.
align_left = Alignment (100, 50)
align_left.align = ALIGN_LEFT
align_left.child = Button ("left")
align_center = Alignment (100, 50)
align_center.align = ALIGN_NONE
align_center.child = Button ("center")
align_right = Alignment (100, 50)
align_right.align = ALIGN_RIGHT
align_right.child = Button ("right")
frm_center = HFrame ()
frm_center.children = align_left, align_center, align_right
# Bottom alignments.
align_bottomleft = Alignment (100, 50)
align_bottomleft.align = ALIGN_BOTTOM | ALIGN_LEFT
align_bottomleft.child = Button ("bottomleft")
align_bottom = Alignment (100, 50)
align_bottom.align = ALIGN_BOTTOM
align_bottom.child = Button ("bottom")
align_bottomright = Alignment (100, 50)
align_bottomright.align = ALIGN_BOTTOM | ALIGN_RIGHT
align_bottomright.child = Button ("bottomright")
frm_bottom = HFrame ()
frm_bottom.children = align_bottomleft, align_bottom, align_bottomright
frm_main.children = frm_top, frm_center, frm_bottom
return frm_main
if __name__ == "__main__":
# Initialize the drawing window.
re = Renderer ()
re.create_screen (350, 300)
re.title = "Alignment examples"
re.color = (234, 228, 223)
re.add_widget (create_alignment_view ())
# Start the main rendering loop.
re.start ()
| {
"repo_name": "prim/ocempgui",
"path": "doc/examples/alignment.py",
"copies": "1",
"size": "2185",
"license": "bsd-2-clause",
"hash": 562793848207529500,
"line_mean": 30.6666666667,
"line_max": 79,
"alpha_frac": 0.6672768879,
"autogenerated": false,
"ratio": 3.300604229607251,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9386801184591218,
"avg_score": 0.016215986583206547,
"num_lines": 69
} |
"""Alignment pipeline integration tests.
"""
import os
import time
from django.conf import settings
from djcelery_testworker.testcase import CeleryWorkerTestCase
from main.models import AlignmentGroup
from main.models import Dataset
from main.models import ExperimentSample
from main.testing_util import create_common_entities
from pipeline.pipeline_runner import run_pipeline
from utils.import_util import copy_and_add_dataset_source
from utils.import_util import import_reference_genome_from_local_file
from utils.import_util import import_reference_genome_from_ncbi
from utils import internet_on
TEST_FASTA = os.path.join(settings.PWD, 'test_data', 'fake_genome_and_reads',
'test_genome.fa')
TEST_FASTQ1 = os.path.join(settings.PWD, 'test_data', 'fake_genome_and_reads',
'38d786f2', 'test_genome_1.snps.simLibrary.1.fq')
TEST_FASTQ2 = os.path.join(settings.PWD, 'test_data', 'fake_genome_and_reads',
'38d786f2', 'test_genome_1.snps.simLibrary.2.fq')
class TestAlignmentPipeline(CeleryWorkerTestCase):
def setUp(self):
common_entities = create_common_entities()
self.project = common_entities['project']
self.reference_genome = import_reference_genome_from_local_file(
self.project, 'ref_genome', TEST_FASTA, 'fasta')
self.experiment_sample = ExperimentSample.objects.create(
project=self.project, label='sample1')
copy_and_add_dataset_source(self.experiment_sample, Dataset.TYPE.FASTQ1,
Dataset.TYPE.FASTQ1, TEST_FASTQ1)
copy_and_add_dataset_source(self.experiment_sample, Dataset.TYPE.FASTQ2,
Dataset.TYPE.FASTQ2, TEST_FASTQ2)
def test_run_pipeline(self):
"""Tests running the full pipeline.
"""
sample_list = [self.experiment_sample]
alignment_group_obj, async_result = run_pipeline('name_placeholder',
self.reference_genome, sample_list)
# Block until pipeline finishes.
while not async_result.ready():
time.sleep(1)
if async_result.status == 'FAILURE':
self.fail('Async task failed.')
# Refresh the object.
alignment_group_obj = AlignmentGroup.objects.get(
id=alignment_group_obj.id)
# Verify the AlignmentGroup object is created.
self.assertEqual(1,
len(alignment_group_obj.experimentsampletoalignment_set.all()))
self.assertEqual(AlignmentGroup.STATUS.COMPLETED,
alignment_group_obj.status)
# Make sure the initial JBrowse config has been created.
jbrowse_dir = self.reference_genome.get_jbrowse_directory_path()
self.assertTrue(os.path.exists(jbrowse_dir))
self.assertTrue(os.path.exists(os.path.join(jbrowse_dir,
'indiv_tracks')))
def test_run_pipeline__genbank_from_ncbi_with_spaces_in_label(self):
"""Tests the pipeline where the genome is imported from NCBI with
spaces in the name.
"""
if not internet_on():
return
MG1655_ACCESSION = 'NC_000913.3'
MG1655_LABEL = 'mg1655 look a space'
ref_genome = import_reference_genome_from_ncbi(self.project,
MG1655_LABEL, MG1655_ACCESSION, 'genbank')
sample_list = [self.experiment_sample]
alignment_group_obj, async_result = run_pipeline('name_placeholder',
ref_genome, sample_list)
# Block until pipeline finishes.
while not async_result.ready():
time.sleep(1)
if async_result.status == 'FAILURE':
self.fail('Async task failed.')
alignment_group_obj = AlignmentGroup.objects.get(
id=alignment_group_obj.id)
self.assertEqual(1,
len(alignment_group_obj.experimentsampletoalignment_set.all()))
self.assertEqual(AlignmentGroup.STATUS.COMPLETED,
alignment_group_obj.status)
| {
"repo_name": "woodymit/millstone",
"path": "genome_designer/tests/integration/test_pipeline_integration.py",
"copies": "3",
"size": "3962",
"license": "mit",
"hash": 6941483204825603000,
"line_mean": 37.4660194175,
"line_max": 80,
"alpha_frac": 0.6617869763,
"autogenerated": false,
"ratio": 3.7306967984934087,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5892483774793409,
"avg_score": null,
"num_lines": null
} |
"""Alignment with bbmap: https://sourceforge.net/projects/bbmap/
"""
import os
from bcbio import utils
from bcbio.pipeline import datadict as dd
from bcbio.ngsalign import alignprep, novoalign, postalign
from bcbio.provenance import do
def align(fastq_file, pair_file, index_dir, names, align_dir, data):
"""Perform piped alignment of fastq input files, generating sorted, deduplicated BAM.
"""
umi_ext = "-cumi" if "umi_bam" in data else ""
out_file = os.path.join(align_dir, "{0}-sort{1}.bam".format(dd.get_sample_name(data), umi_ext))
num_cores = data["config"]["algorithm"].get("num_cores", 1)
rg_info = "rgid={rg} rgpl={pl} rgpu={pu} rgsm={sample}".format(**names)
pair_file = pair_file if pair_file else ""
final_file = None
if data.get("align_split"):
# BBMap does not accept input fastq streams
raise ValueError("bbmap is not compatible with alignment splitting, set `align_split: false`")
pair_arg = "in2=%s" % pair_file if pair_file else ""
if not utils.file_exists(out_file) and (final_file is None or not utils.file_exists(final_file)):
with postalign.tobam_cl(data, out_file, pair_file != "") as (tobam_cl, tx_out_file):
if index_dir.endswith(("/ref", "/ref/")):
index_dir = os.path.dirname(index_dir)
# sam=1.3 required for compatibility with strelka2
cmd = ("bbmap.sh sam=1.3 mdtag=t {rg_info} path={index_dir} in1={fastq_file} "
"{pair_arg} out=stdout.sam | ")
do.run(cmd.format(**locals()) + tobam_cl, "bbmap alignment: %s" % dd.get_sample_name(data))
data["work_bam"] = out_file
return data
def remap_index_fn(ref_file):
index_dir = os.path.join(os.path.dirname(ref_file), os.pardir, "bbmap")
if os.path.exists(index_dir) and os.path.isdir(index_dir):
return index_dir
else:
return os.path.dirname(ref_file)
| {
"repo_name": "a113n/bcbio-nextgen",
"path": "bcbio/ngsalign/bbmap.py",
"copies": "4",
"size": "1916",
"license": "mit",
"hash": -6166478218451737000,
"line_mean": 48.1282051282,
"line_max": 103,
"alpha_frac": 0.6450939457,
"autogenerated": false,
"ratio": 3.1256117455138663,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0030719728532506816,
"num_lines": 39
} |
"""Alignment with minimap2: https://github.com/lh3/minimap2
"""
import os
from bcbio import utils
from bcbio.pipeline import datadict as dd
from bcbio.ngsalign import alignprep, novoalign, postalign
from bcbio.provenance import do
def align(fastq_file, pair_file, index_dir, names, align_dir, data):
"""Perform piped alignment of fastq input files, generating sorted, deduplicated BAM.
"""
umi_ext = "-cumi" if "umi_bam" in data else ""
out_file = os.path.join(align_dir, "{0}-sort{1}.bam".format(dd.get_sample_name(data), umi_ext))
num_cores = data["config"]["algorithm"].get("num_cores", 1)
rg_info = novoalign.get_rg_info(names)
preset = "sr"
pair_file = pair_file if pair_file else ""
if data.get("align_split"):
final_file = out_file
out_file, data = alignprep.setup_combine(final_file, data)
fastq_file, pair_file = alignprep.split_namedpipe_cls(fastq_file, pair_file, data)
else:
final_file = None
if not utils.file_exists(out_file) and (final_file is None or not utils.file_exists(final_file)):
with postalign.tobam_cl(data, out_file, pair_file != "") as (tobam_cl, tx_out_file):
index_file = None
# Skip trying to use indices now as they provide only slight speed-ups
# and give inconsitent outputs in BAM headers
# If a single index present, index_dir points to that
# if index_dir and os.path.isfile(index_dir):
# index_dir = os.path.dirname(index_dir)
# index_file = os.path.join(index_dir, "%s-%s.mmi" % (dd.get_genome_build(data), preset))
if not index_file or not os.path.exists(index_file):
index_file = dd.get_ref_file(data)
cmd = ("minimap2 -a -x {preset} -R '{rg_info}' -t {num_cores} {index_file} "
"{fastq_file} {pair_file} | ")
do.run(cmd.format(**locals()) + tobam_cl, "minimap2 alignment: %s" % dd.get_sample_name(data))
data["work_bam"] = out_file
return data
def remap_index_fn(ref_file):
"""minimap2 can build indexes on the fly but will also store commons ones.
"""
index_dir = os.path.join(os.path.dirname(ref_file), os.pardir, "minimap2")
if os.path.exists(index_dir) and os.path.isdir(index_dir):
return index_dir
else:
return os.path.dirname(ref_file)
| {
"repo_name": "vladsaveliev/bcbio-nextgen",
"path": "bcbio/ngsalign/minimap2.py",
"copies": "4",
"size": "2380",
"license": "mit",
"hash": 1056028327380683300,
"line_mean": 45.6666666667,
"line_max": 106,
"alpha_frac": 0.631512605,
"autogenerated": false,
"ratio": 3.1860776439089693,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5817590248908969,
"avg_score": null,
"num_lines": null
} |
"""Alignment with SNAP: http://snap.cs.berkeley.edu/
"""
import os
from bcbio import utils
from bcbio.pipeline import config_utils
from bcbio.pipeline import datadict as dd
from bcbio.ngsalign import alignprep, novoalign, postalign
from bcbio.provenance import do
def align(fastq_file, pair_file, index_dir, names, align_dir, data):
"""Perform piped alignment of fastq input files, generating sorted, deduplicated BAM.
Pipes in input, handling paired and split inputs, using interleaving magic
from: https://biowize.wordpress.com/2015/03/26/the-fastest-darn-fastq-decoupling-procedure-i-ever-done-seen/
"""
out_file = os.path.join(align_dir, "{0}-sort.bam".format(dd.get_sample_name(data)))
num_cores = data["config"]["algorithm"].get("num_cores", 1)
resources = config_utils.get_resources("snap", data["config"])
rg_info = novoalign.get_rg_info(names)
if data.get("align_split"):
final_file = out_file
out_file, data = alignprep.setup_combine(final_file, data)
fastq_file, pair_file = alignprep.split_namedpipe_cls(fastq_file, pair_file, data)
fastq_file = fastq_file[2:-1]
if pair_file:
pair_file = pair_file[2:-1]
stream_input = (r"paste <({fastq_file} | paste - - - -) "
r"<({pair_file} | paste - - - -) | tr '\t' '\n'")
else:
stream_input = fastq_file[2:-1]
else:
assert fastq_file.endswith(".gz")
if pair_file:
stream_input = (r"paste <(zcat {fastq_file} | paste - - - -) "
r"<(zcat {pair_file} | paste - - - -) | tr '\t' '\n'")
else:
stream_input = "zcat {fastq_file}"
pair_file = pair_file if pair_file else ""
if not utils.file_exists(out_file) and (final_file is None or not utils.file_exists(final_file)):
with postalign.tobam_cl(data, out_file, pair_file is not None) as (tobam_cl, tx_out_file):
if pair_file:
sub_cmd = "paired"
input_cmd = "-pairedInterleavedFastq -"
else:
sub_cmd = "single"
input_cmd = "-fastq -"
stream_input = stream_input.format(**locals())
cmd = ("{stream_input} | snap-aligner {sub_cmd} {index_dir} {input_cmd} "
"-R '{rg_info}' -t {num_cores} -M -o -sam - | ")
do.run(cmd.format(**locals()) + tobam_cl, "SNAP alignment: %s" % names["sample"])
data["work_bam"] = out_file
return data
# Optional galaxy location file. Falls back on remap_index_fn if not found
galaxy_location_file = "snap_indices.loc"
def remap_index_fn(ref_file):
"""Map sequence references to snap reference directory, using standard layout.
"""
snap_dir = os.path.join(os.path.dirname(ref_file), os.pardir, "snap")
assert os.path.exists(snap_dir) and os.path.isdir(snap_dir), snap_dir
return snap_dir
| {
"repo_name": "Cyberbio-Lab/bcbio-nextgen",
"path": "bcbio/ngsalign/snap.py",
"copies": "1",
"size": "2929",
"license": "mit",
"hash": 3543153154401651700,
"line_mean": 44.0615384615,
"line_max": 112,
"alpha_frac": 0.6036189826,
"autogenerated": false,
"ratio": 3.250832408435072,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4354451391035072,
"avg_score": null,
"num_lines": null
} |
"""Align multiple structures with TMalign."""
from __future__ import division
import itertools
import logging
import math
import os
import subprocess
import tempfile
from cStringIO import StringIO
import networkx
from Bio import SeqIO
from Bio.Seq import Seq
from Bio.SeqRecord import SeqRecord
from biofrills import alnutils
def align_structs(pdb_fnames, seed_fnames=None):
"""Align multiple PDB structures using TM-align.
Returns a list of aligned SeqRecords.
"""
# 1. Align all-v-all structure pairs with TM-align
allpairs = []
for idx, ref_pdbfn in enumerate(pdb_fnames):
assert ' ' not in ref_pdbfn
for eqv_pdbfn in pdb_fnames[idx+1:]:
assert eqv_pdbfn != ref_pdbfn
logging.info("Aligning %s to %s", eqv_pdbfn, ref_pdbfn)
try:
tm_output = subprocess.check_output(['TMalign',
ref_pdbfn, eqv_pdbfn])
except OSError:
logging.warning("Failed command: TMalign %s %s",
ref_pdbfn, eqv_pdbfn)
for fname in (ref_pdbfn, eqv_pdbfn):
if not os.path.isfile(fname):
logging.warning("Missing file: %s", fname)
raise
except subprocess.CalledProcessError, exc:
raise RuntimeError("TMalign failed (returned %s):\n%s"
% (exc.returncode, exc.output))
tm_seqpair = read_tmalign_as_seqrec_pair(tm_output,
ref_pdbfn, eqv_pdbfn)
allpairs.append(tm_seqpair)
# In case of 2 structs, no need to combine alignments -- we're done
if len(allpairs) == 1:
recs = allpairs[0][:2]
return alnutils.remove_empty_cols(recs)
# 2. Resolve MST pairs & write seed tempfiles
tmp_seed_fnames = []
for seedpair in mst_pairs(allpairs):
# fd, seedfn = tempfile.mkstemp(text=True)
# SeqIO.write(seedpair, seedfn, 'fasta')
# SeqIO.write(seedpair, os.fdopen(fd), 'fasta')
with tempfile.NamedTemporaryFile('w+', delete=False) as handle:
SeqIO.write(seedpair, handle, 'fasta')
tmp_seed_fnames.append(handle.name)
# 3. Use MAFFT to combine TMalign'd pairs into a multiple alignment;
seq_fd, seq_fname = tempfile.mkstemp(text=True)
# Create a blank file to appease MAFFT
os.write(seq_fd, '')
os.close(seq_fd)
mafft_output = subprocess.check_output(['mafft',
'--quiet', '--amino', '--localpair',
'--maxiterate', '1000']
+ list(itertools.chain(*[('--seed', sfn)
for sfn in (seed_fnames or []) + tmp_seed_fnames]))
+ [seq_fname])
# Clean up
os.remove(seq_fname)
for sfn in tmp_seed_fnames:
os.remove(sfn)
# 4. Emit the aligned sequences
recs = SeqIO.parse(StringIO(mafft_output), 'fasta')
recs = clean_and_dedupe_seqs(recs)
recs = alnutils.remove_empty_cols(recs)
recs = purge_seqs(recs)
return list(recs)
def read_tmalign_as_seqrec_pair(tm_output, ref_id, eqv_id):
"""Create a pair of SeqRecords from TMalign output."""
lines = tm_output.splitlines()
# Extract the TM-score (measure of structure similarity)
# Take the mean of the (two) given TM-scores -- not sure which is reference
tmscores = []
for line in lines:
if line.startswith('TM-score'):
# TMalign v. 2012/05/07 or earlier
tmscores.append(float(line.split(None, 2)[1]))
elif 'TM-score=' in line:
# TMalign v. 2013/05/11 or so
tokens = line.split()
for token in tokens:
if token.startswith('TM-score='):
_key, _val = token.split('=')
tmscores.append(float(_val.rstrip(',')))
break
tmscore = math.fsum(tmscores) / len(tmscores)
# Extract the sequence alignment
lastlines = lines[-5:]
assert lastlines[0].startswith('(":"') # (":" denotes the residues pairs
assert not lastlines[-1].strip()
refseq, eqvseq = lastlines[1].strip(), lastlines[3].strip()
return (SeqRecord(Seq(refseq), id=ref_id,
description="TMalign TM-score=%f" % tmscore),
SeqRecord(Seq(eqvseq), id=eqv_id,
description="TMalign TM-score=%f" % tmscore),
tmscore)
def mst_pairs(pairs):
"""Given all pairwise distances, determine the minimal spanning subset.
Convert pairwise distances to an undirected graph, determine the
minumum spanning tree, and emit the minimal list of edges to connect all
nodes.
Input: iterable of (SeqRecord, SeqRecord, distance)
Output: iterable of (SeqRecord, SeqRecord)
"""
G = networkx.Graph()
for left, right, score in pairs:
G.add_edge(left, right, weight=1.0/score)
mst = networkx.minimum_spanning_edges(G, data=False)
return list(mst)
def tmscore_from_description(text):
for token in text.split():
if token.startswith('TM-score'):
return float(token.split('=', 1)[1])
def clean_and_dedupe_seqs(records, best_score=False):
"""Remove the _seed_ prefix and omit duplicated records (by ID)."""
if best_score:
seen = {}
else:
seen = set()
for record in records:
# Remove the _seed_ prefix from each sequence ID
if record.id.startswith('_seed_'):
record.id = record.id[len('_seed_'):]
record.name = record.id
# Check for duplicates.
if best_score:
# If a previously seen PDB was aligned better (per the TM-score),
# defer to that one
tmscore = tmscore_from_description(record.description)
if record.id in seen and seen[record.id] >= tmscore:
# This PDB was aligned better previously; skip
continue
seen[record.id] = tmscore
else:
# Keep a duplicate sequence if it was aligned differently
ident = (record.id, str(record.seq))
if ident in seen:
continue
seen.add(ident)
yield record
def purge_seqs(records):
"""Drop duplicated records by identical sequence."""
seen = set()
for rec in records:
seq = str(rec.seq)
if seq not in seen:
yield rec
seen.add(seq)
| {
"repo_name": "etal/fammer",
"path": "fammerlib/tmalign.py",
"copies": "2",
"size": "6484",
"license": "bsd-2-clause",
"hash": -5913380124578665000,
"line_mean": 35.0222222222,
"line_max": 84,
"alpha_frac": 0.5851326342,
"autogenerated": false,
"ratio": 3.794031597425395,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.006103511690871634,
"num_lines": 180
} |
# align
def align_up(alignment, x):
"""Rounds x up to nearest multiple of the alignment."""
a = alignment
return ((x + a - 1) // a) * a
def align_down(alignment, x):
"""Rounds x down to nearest multiple of the alignment."""
a = alignment
return (x // a) * a
def align(alignment, x):
"""Rounds x up to nearest multiple of the alignment."""
return align_up(alignment, x)
# network utils
def ip (host):
"""Resolve host and return IP as four byte string"""
import socket, struct
return struct.unpack('I', socket.inet_aton(socket.gethostbyname(host)))[0]
def get_interfaces():
"""Gets all (interface, IPv4) of the local system."""
import subprocess, re
d = subprocess.check_output('ip -4 -o addr', shell=True)
ifs = re.findall(r'^\S+:\s+(\S+)\s+inet\s+([^\s/]+)', d, re.MULTILINE)
return [i for i in ifs if i[0] != 'lo']
# Stuff
def size(n, abbriv = 'B', si = False):
"""Convert number to human readable form"""
base = 1000.0 if si else 1024.0
if n < base:
return '%d%s' % (n, abbriv)
for suffix in ['K', 'M', 'G', 'T']:
n /= base
if n <= base:
num = '%.02f' % n
# while num[-1] == '0':
# num = num[:-1]
# if num[-1] == '.':
# num = num[:-1]
return '%s%s%s' % (num, suffix, abbriv)
return '%.02fP%s' % (n, abbriv)
def read(path):
"""Open file, return content."""
with open(path) as f:
return f.read()
def write(path, data):
"""Create new file or truncate existing to zero length and write data."""
with open(path, 'w') as f:
f.write(data)
def bash(cmd, timeout = None, return_stderr = False):
"""Execute cmd and return stdout and stderr in a tuple """
import subprocess, time
p = subprocess.Popen(['/bin/bash', '-c', cmd],
stdin = subprocess.PIPE,
stdout = subprocess.PIPE,
stderr = subprocess.PIPE)
if timeout is None:
o, e = p.communicate()
else:
t = time.time()
while time.time() - t < timeout:
time.sleep(0.01)
if p.poll() is not None:
break
if p.returncode is None:
p.kill()
o, e = p.communicate()
if return_stderr:
return o, e
return o
| {
"repo_name": "Haabb/pwnfork",
"path": "pwn/util.py",
"copies": "1",
"size": "2371",
"license": "mit",
"hash": -329871686992642750,
"line_mean": 29.3974358974,
"line_max": 78,
"alpha_frac": 0.5326866301,
"autogenerated": false,
"ratio": 3.5125925925925925,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.45452792226925925,
"avg_score": null,
"num_lines": null
} |
""" align_reports.py
Usage: align_reports.py <report> [--gabor=<REPORT>]
Options:
--gabor=<REPORT> [default: gabor_report.txt]
"""
from collections import OrderedDict
__author__ = 'victor'
from docopt import docopt
from tabulate import tabulate
if __name__ == '__main__':
args = docopt(__doc__)
report = OrderedDict()
with open(args['<report>']) as f:
lines = f.readlines()[2:-2] # first two lines are headers, last two lines are averages
for line in lines:
# no_relation 0.86 0.34 0.49 6191
relation, precision, recall, f1, support = line.split()
precision, recall, f1 = ["{:.2%}".format(float(e)) for e in [precision, recall, f1]]
report[relation] = (precision, recall, f1, support)
gabor = {}
with open(args['--gabor']) as f:
for line in f:
# [org:number_of_employees/members] #: 9 P: 100.00% R: 0.00% F1: 0.00%
relation, _, support, _, precision, _, recall, _, f1 = line.split()
gabor[relation.strip('[]')] = (precision, recall, f1, support)
header = ['relation', 'nn_precision', 'nn_recall', 'nn_f1', 'nn_support', 'sup_precision', 'sup_recall', 'sup_f1', 'sup_support']
table = []
for relation in report.keys():
precision, recall, f1, support = report[relation]
g_precision, g_recall, g_f1, g_support = gabor[relation] if relation in gabor else 4 * ['N/A']
row = [relation, precision, recall, f1, support, g_precision, g_recall, g_f1, g_support]
table += [row]
with open(args['<report>'] + '.comparison', 'wb') as f:
f.write(tabulate(table, headers=header))
| {
"repo_name": "vzhong/sent2rel",
"path": "align_reports.py",
"copies": "1",
"size": "1716",
"license": "mit",
"hash": -9164656366779958000,
"line_mean": 39.8571428571,
"line_max": 133,
"alpha_frac": 0.5734265734,
"autogenerated": false,
"ratio": 3.30635838150289,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.437978495490289,
"avg_score": null,
"num_lines": null
} |
"""Align the raw sentences from Read et al (2012) to the PTB tokenization,
outputting as a .json file. Used in bin/prepare_treebank.py
"""
from __future__ import unicode_literals
import plac
from pathlib import Path
import json
from os import path
import os
from spacy.munge import read_ptb
from spacy.munge.read_ontonotes import sgml_extract
def read_odc(section_loc):
# Arbitrary patches applied to the _raw_ text to promote alignment.
patches = (
('. . . .', '...'),
('....', '...'),
('Co..', 'Co.'),
("`", "'"),
# OntoNotes specific
(" S$", " US$"),
("Showtime or a sister service", "Showtime or a service"),
("The hotel and gaming company", "The hotel and Gaming company"),
("I'm-coming-down-your-throat", "I-'m coming-down-your-throat"),
)
paragraphs = []
with open(section_loc) as file_:
para = []
for line in file_:
if line.startswith('['):
line = line.split('|', 1)[1].strip()
for find, replace in patches:
line = line.replace(find, replace)
para.append(line)
else:
paragraphs.append(para)
para = []
paragraphs.append(para)
return paragraphs
def read_ptb_sec(ptb_sec_dir):
ptb_sec_dir = Path(ptb_sec_dir)
files = []
for loc in ptb_sec_dir.iterdir():
if not str(loc).endswith('parse') and not str(loc).endswith('mrg'):
continue
filename = loc.parts[-1].split('.')[0]
with loc.open() as file_:
text = file_.read()
sents = []
for parse_str in read_ptb.split(text):
words, brackets = read_ptb.parse(parse_str, strip_bad_periods=True)
words = [_reform_ptb_word(word) for word in words]
string = ' '.join(words)
sents.append((filename, string))
files.append(sents)
return files
def _reform_ptb_word(tok):
tok = tok.replace("``", '"')
tok = tok.replace("`", "'")
tok = tok.replace("''", '"')
tok = tok.replace('\\', '')
tok = tok.replace('-LCB-', '{')
tok = tok.replace('-RCB-', '}')
tok = tok.replace('-RRB-', ')')
tok = tok.replace('-LRB-', '(')
tok = tok.replace("'T-", "'T")
return tok
def get_alignment(raw_by_para, ptb_by_file):
# These are list-of-lists, by paragraph and file respectively.
# Flatten them into a list of (outer_id, inner_id, item) triples
raw_sents = _flatten(raw_by_para)
ptb_sents = list(_flatten(ptb_by_file))
output = []
ptb_idx = 0
n_skipped = 0
skips = []
for (p_id, p_sent_id, raw) in raw_sents:
if ptb_idx >= len(ptb_sents):
n_skipped += 1
continue
f_id, f_sent_id, (ptb_id, ptb) = ptb_sents[ptb_idx]
alignment = align_chars(raw, ptb)
if not alignment:
skips.append((ptb, raw))
n_skipped += 1
continue
ptb_idx += 1
sepped = []
for i, c in enumerate(ptb):
if alignment[i] is False:
sepped.append('<SEP>')
else:
sepped.append(c)
output.append((f_id, p_id, f_sent_id, (ptb_id, ''.join(sepped))))
if n_skipped + len(ptb_sents) != len(raw_sents):
for ptb, raw in skips:
print(ptb)
print(raw)
raise Exception
return output
def _flatten(nested):
flat = []
for id1, inner in enumerate(nested):
flat.extend((id1, id2, item) for id2, item in enumerate(inner))
return flat
def align_chars(raw, ptb):
if raw.replace(' ', '') != ptb.replace(' ', ''):
return None
i = 0
j = 0
length = len(raw)
alignment = [False for _ in range(len(ptb))]
while i < length:
if raw[i] == ' ' and ptb[j] == ' ':
alignment[j] = True
i += 1
j += 1
elif raw[i] == ' ':
i += 1
elif ptb[j] == ' ':
j += 1
assert raw[i].lower() == ptb[j].lower(), raw[i:1]
alignment[j] = i
i += 1; j += 1
return alignment
def group_into_files(sents):
last_id = 0
last_fn = None
this = []
output = []
for f_id, p_id, s_id, (filename, sent) in sents:
if f_id != last_id:
assert last_fn is not None
output.append((last_fn, this))
this = []
last_fn = filename
this.append((f_id, p_id, s_id, sent))
last_id = f_id
if this:
assert last_fn is not None
output.append((last_fn, this))
return output
def group_into_paras(sents):
last_id = 0
this = []
output = []
for f_id, p_id, s_id, sent in sents:
if p_id != last_id and this:
output.append(this)
this = []
this.append(sent)
last_id = p_id
if this:
output.append(this)
return output
def get_sections(odc_dir, ptb_dir, out_dir):
for i in range(25):
section = str(i) if i >= 10 else ('0' + str(i))
odc_loc = path.join(odc_dir, 'wsj%s.txt' % section)
ptb_sec = path.join(ptb_dir, section)
out_loc = path.join(out_dir, 'wsj%s.json' % section)
yield odc_loc, ptb_sec, out_loc
def align_section(raw_paragraphs, ptb_files):
aligned = get_alignment(raw_paragraphs, ptb_files)
return [(fn, group_into_paras(sents))
for fn, sents in group_into_files(aligned)]
def do_wsj(odc_dir, ptb_dir, out_dir):
for odc_loc, ptb_sec_dir, out_loc in get_sections(odc_dir, ptb_dir, out_dir):
files = align_section(read_odc(odc_loc), read_ptb_sec(ptb_sec_dir))
with open(out_loc, 'w') as file_:
json.dump(files, file_)
def do_web(src_dir, onto_dir, out_dir):
mapping = dict(line.split() for line in open(path.join(onto_dir, 'map.txt'))
if len(line.split()) == 2)
for annot_fn, src_fn in mapping.items():
if not annot_fn.startswith('eng'):
continue
ptb_loc = path.join(onto_dir, annot_fn + '.parse')
src_loc = path.join(src_dir, src_fn + '.sgm')
if path.exists(ptb_loc) and path.exists(src_loc):
src_doc = sgml_extract(open(src_loc).read())
ptb_doc = [read_ptb.parse(parse_str, strip_bad_periods=True)[0]
for parse_str in read_ptb.split(open(ptb_loc).read())]
print('Found')
else:
print('Miss')
def may_mkdir(parent, *subdirs):
if not path.exists(parent):
os.mkdir(parent)
for i in range(1, len(subdirs)):
directories = (parent,) + subdirs[:i]
subdir = path.join(*directories)
if not path.exists(subdir):
os.mkdir(subdir)
def main(odc_dir, onto_dir, out_dir):
may_mkdir(out_dir, 'wsj', 'align')
may_mkdir(out_dir, 'web', 'align')
#do_wsj(odc_dir, path.join(ontonotes_dir, 'wsj', 'orig'),
# path.join(out_dir, 'wsj', 'align'))
do_web(
path.join(onto_dir, 'data', 'english', 'metadata', 'context', 'wb', 'sel'),
path.join(onto_dir, 'data', 'english', 'annotations', 'wb'),
path.join(out_dir, 'web', 'align'))
if __name__ == '__main__':
plac.call(main)
| {
"repo_name": "rebeling/spaCy",
"path": "spacy/munge/align_raw.py",
"copies": "9",
"size": "7279",
"license": "mit",
"hash": -1558720285742534700,
"line_mean": 29.0785123967,
"line_max": 83,
"alpha_frac": 0.5329028713,
"autogenerated": false,
"ratio": 3.208021154693698,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.8240924025993699,
"avg_score": null,
"num_lines": null
} |
"""Align the tissue."""
import skimage.morphology
import skimage.measure
from jicbioimage.core.transform import transformation
from jicbioimage.segment import connected_components
from jicbioimage.transform import (
equalize_adaptive_clahe,
threshold_otsu,
remove_small_objects,
)
from util import argparse_get_image
from transform import rotate, erosion_binary
def find_angle(image):
image = equalize_adaptive_clahe(image)
image = threshold_otsu(image)
image = erosion_binary(image, selem=skimage.morphology.disk(3))
image = remove_small_objects(image, min_size=5000)
segmentation = connected_components(image, background=0)
properties = skimage.measure.regionprops(segmentation)
angles = [p["orientation"] for p in properties]
return sum(angles) / len(angles)
@transformation
def align(image):
"""Return an aligned image."""
angle = find_angle(image)
image = rotate(image, angle)
return image
def main():
image = argparse_get_image()
align(image)
if __name__ == "__main__":
main()
| {
"repo_name": "JIC-CSB/wheat-leaf-segmentation",
"path": "scripts/align.py",
"copies": "1",
"size": "1066",
"license": "mit",
"hash": -4952143167816337000,
"line_mean": 23.7906976744,
"line_max": 67,
"alpha_frac": 0.7166979362,
"autogenerated": false,
"ratio": 3.577181208053691,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4793879144253691,
"avg_score": null,
"num_lines": null
} |
#Permission is hereby granted, free of charge, to any person obtaining a copy
#of this software and associated documentation files (the "Software"), to deal
#in the Software without restriction, including without limitation the rights
#to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
#copies of the Software, and to permit persons to whom the Software is furnished
#to do so, subject to the following conditions:
#THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED,
#INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A
#PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
#HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
#OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
#SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
import sys, string
import os.path
import unique
import GO_parsing
import copy
import time
import update
def filepath(filename):
fn = unique.filepath(filename)
return fn
def read_directory(sub_dir):
dir_list = unique.read_directory(sub_dir)
return dir_list
def makeUnique(item):
db1={}; list1=[]; k=0
for i in item:
try: db1[i]=[]
except TypeError: db1[tuple(i)]=[]; k=1
for i in db1:
if k==0: list1.append(i)
else: list1.append(list(i))
list1.sort()
return list1
def customDeepCopy(db):
db2={}
for i in db:
for e in db[i]:
try: db2[i].append(e)
except KeyError: db2[i]=[e]
return db2
def cleanUpLine(line):
line = string.replace(line,'\n','')
line = string.replace(line,'\c','')
data = string.replace(line,'\r','')
data = string.replace(data,'"','')
return data
################### Import exon coordinate/transcript data from Ensembl
def importEnsExonStructureData(species,ensembl_gene_coordinates,ensembl_annotations,exon_annotation_db):
ensembl_ucsc_splicing_annotations={}
try: ensembl_ucsc_splicing_annotations = importUCSCAnnotationData(species,ensembl_gene_coordinates,ensembl_annotations,exon_annotation_db,{},'polyA')
except Exception: ensembl_ucsc_splicing_annotations={}
try: ensembl_ucsc_splicing_annotations = importUCSCAnnotationData(species,ensembl_gene_coordinates,ensembl_annotations,exon_annotation_db,ensembl_ucsc_splicing_annotations,'splicing')
except Exception: null=[]
return ensembl_ucsc_splicing_annotations
def importUCSCAnnotationData(species,ensembl_gene_coordinates,ensembl_annotations,exon_annotation_db,ensembl_ucsc_splicing_annotations,data_type):
ucsc_gene_coordinates={}
if data_type == 'splicing': filename = 'AltDatabase/ucsc/'+species+'/knownAlt.txt'
if data_type == 'polyA': filename = 'AltDatabase/ucsc/'+species+'/polyaDb.txt'
start_time = time.time()
fn=filepath(filename); x=0
verifyFile(filename,species) ### Makes sure file is local and if not downloads
for line in open(fn,'rU').xreadlines():
data = cleanUpLine(line)
if data_type == 'splicing':
regionid,chr,start,stop,event_call,null,strand = string.split(data,'\t')
if data_type == 'polyA':
event_call = 'alternative_polyA'
try: regionid,chr,start,stop,null,null,strand,start,stop = string.split(data,'\t')
except Exception: chr,start,stop,annotation,null,strand = string.split(data,'\t')
start = int(start)+1; stop = int(stop); chr = string.replace(chr,'chr','') ###checked it out and all UCSC starts are -1 from the correspond Ensembl start
if chr == 'chrM': chr = 'chrMT' ### MT is the Ensembl convention whereas M is the Affymetrix and UCSC convention
if chr == 'M': chr = 'MT' ### MT is the Ensembl convention whereas M is the Affymetrix and UCSC convention
try: ucsc_gene_coordinates[chr,start,stop,strand].append(event_call)
except KeyError: ucsc_gene_coordinates[chr,start,stop,strand] = [event_call]
print len(ucsc_gene_coordinates),'UCSC annotations imported.'
ensembl_chr_coordinate_db={}
for gene in ensembl_gene_coordinates:
a = ensembl_gene_coordinates[gene]; a.sort()
gene_start = a[0]; gene_stop = a[-1]
chr,strand = ensembl_annotations[gene]
if chr in ensembl_chr_coordinate_db:
ensembl_gene_coordinates2 = ensembl_chr_coordinate_db[chr]
ensembl_gene_coordinates2[(gene_start,gene_stop)] = gene,strand
else:
ensembl_gene_coordinates2={}; ensembl_gene_coordinates2[(gene_start,gene_stop)] = gene,strand
ensembl_chr_coordinate_db[chr]=ensembl_gene_coordinates2
ucsc_chr_coordinate_db={}
for geneid in ucsc_gene_coordinates:
chr,start,stop,strand = geneid
if chr in ucsc_chr_coordinate_db:
ucsc_gene_coordinates2 = ucsc_chr_coordinate_db[chr]
ucsc_gene_coordinates2[(start,stop)] = geneid,strand
else:
ucsc_gene_coordinates2={}; ucsc_gene_coordinates2[(start,stop)] = geneid,strand
ucsc_chr_coordinate_db[chr] = ucsc_gene_coordinates2
ensembl_transcript_clusters,no_match_list = getChromosomalOveralap(ucsc_chr_coordinate_db,ensembl_chr_coordinate_db)
ensembl_ucsc_splicing_event_db = {}
for clusterid in ensembl_transcript_clusters:
ens_geneids = ensembl_transcript_clusters[clusterid]
if len(ens_geneids)==1: ###If a cluster ID associates with multiple Ensembl IDs
ens_geneid = ens_geneids[0]
annotations = ucsc_gene_coordinates[clusterid]
try: ensembl_ucsc_splicing_event_db[ens_geneid].append((clusterid,annotations))
except KeyError: ensembl_ucsc_splicing_event_db[ens_geneid] = [(clusterid,annotations)]
for ensembl in ensembl_ucsc_splicing_event_db:
chr,strand = ensembl_annotations[ensembl]
key = ensembl,chr,strand
###Look through each of the annotations (with coordinate info) for those that are specifically AltPromoters
###Their coordinates occur overlapping but before the exon, so we want to change the coordinates
for (clusterid,annotations) in ensembl_ucsc_splicing_event_db[ensembl]:
new_coordinates = []
if 'altPromoter' in annotations:
chr,bp1,ep1,strand = clusterid
if key in exon_annotation_db:
exon_info_ls = exon_annotation_db[key]
for exon_info in exon_info_ls:
bp2 = exon_info[0]; ep2 = exon_info[0]; add = 0 ### Changed ep2 to be the second object in the list (previously it was also the first) 4-5-08
if ((bp1 >= bp2) and (ep2 >= bp1)) or ((ep1 >= bp2) and (ep2 >= ep1)): add = 1 ###if the start or stop of the UCSC region is inside the Ensembl start and stop
elif ((bp2 >= bp1) and (ep1 >= bp2)) or ((ep2 >= bp1) and (ep1 >= ep2)): add = 1 ###opposite
if add == 1:
new_coordinates += [bp1,bp2,ep1,ep2] ###record all coordinates and take the extreme values
new_coordinates.sort()
if len(new_coordinates)>0:
new_start = new_coordinates[0]; new_stop = new_coordinates[-1]
clusterid = chr,new_start,new_stop,strand
annotation_str = string.join(annotations,'|')
###replace with new or old information
start = clusterid[1]; stop = clusterid[2]
try: ensembl_ucsc_splicing_annotations[ensembl].append((start,stop,annotation_str))
except KeyError: ensembl_ucsc_splicing_annotations[ensembl] = [(start,stop,annotation_str)]
if data_type == 'polyA':
### Only keep entries for which there are mulitple polyAs per gene
ensembl_ucsc_splicing_annotations_multiple={}
for ensembl in ensembl_ucsc_splicing_annotations:
if len(ensembl_ucsc_splicing_annotations[ensembl])>1:
ensembl_ucsc_splicing_annotations_multiple[ensembl] = ensembl_ucsc_splicing_annotations[ensembl]
ensembl_ucsc_splicing_annotations = ensembl_ucsc_splicing_annotations_multiple
print len(ensembl_ucsc_splicing_annotations),'genes with events added from UCSC annotations.'
return ensembl_ucsc_splicing_annotations
def getChromosomalOveralap(ucsc_chr_db,ensembl_chr_db):
print len(ucsc_chr_db),len(ensembl_chr_db); start_time = time.time()
"""Find transcript_clusters that have overlapping start positions with Ensembl gene start and end (based on first and last exons)"""
###exon_location[transcript_cluster_id,chr,strand] = [(start,stop,exon_type,probeset_id)]
y = 0; l =0; ensembl_transcript_clusters={}; no_match_list=[]
###(bp1,ep1) = (47211632,47869699); (bp2,ep2) = (47216942, 47240877)
for chr in ucsc_chr_db:
ucsc_db = ucsc_chr_db[chr]
try:
for (bp1,ep1) in ucsc_db:
#print (bp1,ep1)
x = 0
gene_clusterid,ucsc_strand = ucsc_db[(bp1,ep1)]
try:
ensembl_db = ensembl_chr_db[chr]
for (bp2,ep2) in ensembl_db:
y += 1; ensembl,ens_strand = ensembl_db[(bp2,ep2)]
#print (bp1,ep1),(bp2,ep2);kill
if ucsc_strand == ens_strand:
###if the two gene location ranges overlapping
##########FORCE UCSC mRNA TO EXIST WITHIN THE SPACE OF ENSEMBL TO PREVENT TRANSCRIPT CLUSTER EXCLUSION IN ExonArrayEnsemblRules
add = 0
if (bp1 >= bp2) and (ep2>= ep1): add = 1 ###if the annotations reside within the gene's start and stop position
#if ((bp1 >= bp2) and (ep2 >= bp1)) or ((ep1 >= bp2) and (ep2 >= ep1)): add = 1 ###if the start or stop of the UCSC region is inside the Ensembl start and stop
#elif ((bp2 >= bp1) and (ep1 >= bp2)) or ((ep2 >= bp1) and (ep1 >= ep2)): add = 1 ###opposite
if add == 1:
#if (bp1 >= bp2) and (ep2>= ep1): a = ''
#else: print gene_clusterid,ensembl,bp1,bp2,ep1,ep2;kill
x = 1
try: ensembl_transcript_clusters[gene_clusterid].append(ensembl)
except KeyError: ensembl_transcript_clusters[gene_clusterid] = [ensembl]
l += 1
except KeyError: null=[]#; print chr, 'not found'
if x == 0: no_match_list.append(gene_clusterid)
except ValueError:
for y in ucsc_db: print y;kill
end_time = time.time(); time_diff = int(end_time-start_time)
print "UCSC genes matched up to Ensembl in %d seconds" % time_diff
print "UCSC Transcript Clusters (or accession numbers) overlapping with Ensembl:",len(ensembl_transcript_clusters)
print "With NO overlapp",len(no_match_list)
return ensembl_transcript_clusters,no_match_list
def reformatPolyAdenylationCoordinates(species,force):
""" PolyA annotations are currently only available from UCSC for human, but flat file
annotations from 2003-2006 are available for multiple species. Convert these to BED format"""
version={}
version['Rn'] = '2003(rn3)'
version['Dr'] = '2003(zv4)'
version['Gg'] = '2004(galGal2)'
version['Hs'] = '2006(hg8)'
version['Mm'] = '2004(mm5)'
print 'Exporting polyADB_2 coordinates as BED for',species
### Obtain the necessary database files
url = 'http://altanalyze.org/archiveDBs/all/polyAsite.txt'
output_dir = 'AltDatabase/ucsc/'+species + '/'
if force == 'yes':
filename, status = update.download(url,output_dir,'')
else: filename = output_dir+'polyAsite.txt'
### Import the refseq to Ensembl information
import gene_associations; import OBO_import; import EnsemblImport; import export
try:
ens_unigene = gene_associations.getGeneToUid(species,'Ensembl-UniGene')
print len(ens_unigene),'Ensembl-UniGene entries imported'
external_ensembl = OBO_import.swapKeyValues(ens_unigene); use_entrez='no'
except Exception:
ens_entrez = gene_associations.getGeneToUid(species,'Ensembl-EntrezGene')
print len(ens_entrez),'Ensembl-EntrezGene entries imported'
external_ensembl = OBO_import.swapKeyValues(ens_entrez); use_entrez='yes'
gene_location_db = EnsemblImport.getEnsemblGeneLocations(species,'RNASeq','key_by_array')
export_bedfile = output_dir+species+'_polyADB_2_predictions.bed'
print 'exporting',export_bedfile
export_data = export.ExportFile(export_bedfile)
header = '#'+species+'\t'+'polyADB_2'+'\t'+version[species]+'\n'
export_data.write(header)
fn=filepath(filename); x=0; not_found={}
for line in open(fn,'rU').xreadlines():
data = cleanUpLine(line)
if x==0: x=1
else:
siteid,llid,chr,sitenum,position,supporting_EST,cleavage = string.split(data,'\t')
if chr == 'chrM': chr = 'chrMT' ### MT is the Ensembl convention whereas M is the Affymetrix and UCSC convention
if chr == 'M': chr = 'MT' ### MT is the Ensembl convention whereas M is the Affymetrix and UCSC convention
if species in siteid:
if 'NA' not in chr: chr = 'chr'+chr
strand = '+'; geneid = siteid
pos_start = str(int(position)-1); pos_end = position
if use_entrez=='no':
external_geneid = string.join(string.split(siteid,'.')[:2],'.')
else: external_geneid=llid
if external_geneid in external_ensembl:
ens_geneid = external_ensembl[external_geneid][0]
geneid += '-'+ens_geneid
chr,strand,start,end = gene_location_db[ens_geneid]
else:
not_found[external_geneid]=[]
bed_format = string.join([chr,pos_start,pos_end,geneid,'0','-'],'\t')+'\n' ### We don't know the strand, so write out both strands
export_data.write(bed_format)
bed_format = string.join([chr,pos_start,pos_end,geneid,'0',strand],'\t')+'\n'
export_data.write(bed_format)
export_data.close()
def verifyFile(filename,species_name):
fn=filepath(filename); counts=0
try:
for line in open(fn,'rU').xreadlines():
counts+=1
if counts>10: break
except Exception:
counts=0
if species_name == 'counts': ### Used if the file cannot be downloaded from http://www.altanalyze.org
return counts
elif counts == 0:
if species_name in filename: server_folder = species_name ### Folder equals species unless it is a universal file
elif 'Mm' in filename: server_folder = 'Mm' ### For PicTar
else: server_folder = 'all'
print 'Downloading:',server_folder,filename
update.downloadCurrentVersion(filename,server_folder,'txt')
else:
return counts
if __name__ == '__main__':
species = 'Hs'; #species_full = 'Drosophila_melanogaster'
filename = 'AltDatabase/ucsc/'+species+'/polyaDb.txt'
verifyFile(filename,species) ### Makes sure file is local and if not downloads.
sys.exit()
importEnsExonStructureData(species,[],[],[]);sys.exit()
reformatPolyAdenylationCoordinates(species,'no');sys.exit()
#test = 'yes'
#test_gene = ['ENSG00000140153','ENSG00000075413']
import UCSCImport; import update
knownAlt_dir = update.getFTPData('hgdownload.cse.ucsc.edu','/goldenPath/currentGenomes/'+species_full+'/database','knownAlt.txt.gz')
polyA_dir = update.getFTPData('hgdownload.cse.ucsc.edu','/goldenPath/currentGenomes/'+species_full+'/database','polyaDb.txt.gz')
output_dir = 'AltDatabase/ucsc/'+species + '/'
UCSCImport.downloadFiles(knownAlt_dir,output_dir); UCSCImport.downloadFiles(polyA_dir,output_dir);sys.exit()
ensembl_ucsc_splicing_annotations = importEnsExonStructureData(species,ensembl_gene_coordinates,ensembl_annotations,exon_annotation_db) | {
"repo_name": "wuxue/altanalyze",
"path": "alignToKnownAlt.py",
"copies": "1",
"size": "16386",
"license": "apache-2.0",
"hash": -9221897234071024000,
"line_mean": 53.0825082508,
"line_max": 187,
"alpha_frac": 0.6408519468,
"autogenerated": false,
"ratio": 3.466469219378041,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4607321166178041,
"avg_score": null,
"num_lines": null
} |
#Permission is hereby granted, free of charge, to any person obtaining a copy
#of this software and associated documentation files (the "Software"), to deal
#in the Software without restriction, including without limitation the rights
#to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
#copies of the Software, and to permit persons to whom the Software is furnished
#to do so, subject to the following conditions:
#THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED,
#INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A
#PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
#HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
#OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
#SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
import sys,string,os
sys.path.insert(1, os.path.join(sys.path[0], '..')) ### import parent dir dependencies
import os.path
import unique
from build_scripts import GO_parsing
import copy
import time
import update
def filepath(filename):
fn = unique.filepath(filename)
return fn
def read_directory(sub_dir):
dir_list = unique.read_directory(sub_dir)
return dir_list
def makeUnique(item):
db1={}; list1=[]; k=0
for i in item:
try: db1[i]=[]
except TypeError: db1[tuple(i)]=[]; k=1
for i in db1:
if k==0: list1.append(i)
else: list1.append(list(i))
list1.sort()
return list1
def customDeepCopy(db):
db2={}
for i in db:
for e in db[i]:
try: db2[i].append(e)
except KeyError: db2[i]=[e]
return db2
def cleanUpLine(line):
line = string.replace(line,'\n','')
line = string.replace(line,'\c','')
data = string.replace(line,'\r','')
data = string.replace(data,'"','')
return data
################### Import exon coordinate/transcript data from Ensembl
def importEnsExonStructureData(species,ensembl_gene_coordinates,ensembl_annotations,exon_annotation_db):
ensembl_ucsc_splicing_annotations={}
try: ensembl_ucsc_splicing_annotations = importUCSCAnnotationData(species,ensembl_gene_coordinates,ensembl_annotations,exon_annotation_db,{},'polyA')
except Exception: ensembl_ucsc_splicing_annotations={}
try: ensembl_ucsc_splicing_annotations = importUCSCAnnotationData(species,ensembl_gene_coordinates,ensembl_annotations,exon_annotation_db,ensembl_ucsc_splicing_annotations,'splicing')
except Exception: null=[]
return ensembl_ucsc_splicing_annotations
def importUCSCAnnotationData(species,ensembl_gene_coordinates,ensembl_annotations,exon_annotation_db,ensembl_ucsc_splicing_annotations,data_type):
ucsc_gene_coordinates={}
if data_type == 'splicing': filename = 'AltDatabase/ucsc/'+species+'/knownAlt.txt'
if data_type == 'polyA': filename = 'AltDatabase/ucsc/'+species+'/polyaDb.txt'
start_time = time.time()
fn=filepath(filename); x=0
verifyFile(filename,species) ### Makes sure file is local and if not downloads
for line in open(fn,'rU').xreadlines():
data = cleanUpLine(line)
if data_type == 'splicing':
regionid,chr,start,stop,event_call,null,strand = string.split(data,'\t')
if data_type == 'polyA':
event_call = 'alternative_polyA'
try: regionid,chr,start,stop,null,null,strand,start,stop = string.split(data,'\t')
except Exception: chr,start,stop,annotation,null,strand = string.split(data,'\t')
start = int(start)+1; stop = int(stop); chr = string.replace(chr,'chr','') ###checked it out and all UCSC starts are -1 from the correspond Ensembl start
if chr == 'chrM': chr = 'chrMT' ### MT is the Ensembl convention whereas M is the Affymetrix and UCSC convention
if chr == 'M': chr = 'MT' ### MT is the Ensembl convention whereas M is the Affymetrix and UCSC convention
try: ucsc_gene_coordinates[chr,start,stop,strand].append(event_call)
except KeyError: ucsc_gene_coordinates[chr,start,stop,strand] = [event_call]
print len(ucsc_gene_coordinates),'UCSC annotations imported.'
ensembl_chr_coordinate_db={}
for gene in ensembl_gene_coordinates:
a = ensembl_gene_coordinates[gene]; a.sort()
gene_start = a[0]; gene_stop = a[-1]
chr,strand = ensembl_annotations[gene]
if chr in ensembl_chr_coordinate_db:
ensembl_gene_coordinates2 = ensembl_chr_coordinate_db[chr]
ensembl_gene_coordinates2[(gene_start,gene_stop)] = gene,strand
else:
ensembl_gene_coordinates2={}; ensembl_gene_coordinates2[(gene_start,gene_stop)] = gene,strand
ensembl_chr_coordinate_db[chr]=ensembl_gene_coordinates2
ucsc_chr_coordinate_db={}
for geneid in ucsc_gene_coordinates:
chr,start,stop,strand = geneid
if chr in ucsc_chr_coordinate_db:
ucsc_gene_coordinates2 = ucsc_chr_coordinate_db[chr]
ucsc_gene_coordinates2[(start,stop)] = geneid,strand
else:
ucsc_gene_coordinates2={}; ucsc_gene_coordinates2[(start,stop)] = geneid,strand
ucsc_chr_coordinate_db[chr] = ucsc_gene_coordinates2
ensembl_transcript_clusters,no_match_list = getChromosomalOveralap(ucsc_chr_coordinate_db,ensembl_chr_coordinate_db)
ensembl_ucsc_splicing_event_db = {}
for clusterid in ensembl_transcript_clusters:
ens_geneids = ensembl_transcript_clusters[clusterid]
if len(ens_geneids)==1: ###If a cluster ID associates with multiple Ensembl IDs
ens_geneid = ens_geneids[0]
annotations = ucsc_gene_coordinates[clusterid]
try: ensembl_ucsc_splicing_event_db[ens_geneid].append((clusterid,annotations))
except KeyError: ensembl_ucsc_splicing_event_db[ens_geneid] = [(clusterid,annotations)]
for ensembl in ensembl_ucsc_splicing_event_db:
chr,strand = ensembl_annotations[ensembl]
key = ensembl,chr,strand
###Look through each of the annotations (with coordinate info) for those that are specifically AltPromoters
###Their coordinates occur overlapping but before the exon, so we want to change the coordinates
for (clusterid,annotations) in ensembl_ucsc_splicing_event_db[ensembl]:
new_coordinates = []
if 'altPromoter' in annotations:
chr,bp1,ep1,strand = clusterid
if key in exon_annotation_db:
exon_info_ls = exon_annotation_db[key]
for exon_info in exon_info_ls:
bp2 = exon_info[0]; ep2 = exon_info[0]; add = 0 ### Changed ep2 to be the second object in the list (previously it was also the first) 4-5-08
if ((bp1 >= bp2) and (ep2 >= bp1)) or ((ep1 >= bp2) and (ep2 >= ep1)): add = 1 ###if the start or stop of the UCSC region is inside the Ensembl start and stop
elif ((bp2 >= bp1) and (ep1 >= bp2)) or ((ep2 >= bp1) and (ep1 >= ep2)): add = 1 ###opposite
if add == 1:
new_coordinates += [bp1,bp2,ep1,ep2] ###record all coordinates and take the extreme values
new_coordinates.sort()
if len(new_coordinates)>0:
new_start = new_coordinates[0]; new_stop = new_coordinates[-1]
clusterid = chr,new_start,new_stop,strand
annotation_str = string.join(annotations,'|')
###replace with new or old information
start = clusterid[1]; stop = clusterid[2]
try: ensembl_ucsc_splicing_annotations[ensembl].append((start,stop,annotation_str))
except KeyError: ensembl_ucsc_splicing_annotations[ensembl] = [(start,stop,annotation_str)]
if data_type == 'polyA':
### Only keep entries for which there are mulitple polyAs per gene
ensembl_ucsc_splicing_annotations_multiple={}
for ensembl in ensembl_ucsc_splicing_annotations:
if len(ensembl_ucsc_splicing_annotations[ensembl])>1:
ensembl_ucsc_splicing_annotations_multiple[ensembl] = ensembl_ucsc_splicing_annotations[ensembl]
ensembl_ucsc_splicing_annotations = ensembl_ucsc_splicing_annotations_multiple
print len(ensembl_ucsc_splicing_annotations),'genes with events added from UCSC annotations.'
return ensembl_ucsc_splicing_annotations
def getChromosomalOveralap(ucsc_chr_db,ensembl_chr_db):
print len(ucsc_chr_db),len(ensembl_chr_db); start_time = time.time()
"""Find transcript_clusters that have overlapping start positions with Ensembl gene start and end (based on first and last exons)"""
###exon_location[transcript_cluster_id,chr,strand] = [(start,stop,exon_type,probeset_id)]
y = 0; l =0; ensembl_transcript_clusters={}; no_match_list=[]
###(bp1,ep1) = (47211632,47869699); (bp2,ep2) = (47216942, 47240877)
for chr in ucsc_chr_db:
ucsc_db = ucsc_chr_db[chr]
try:
for (bp1,ep1) in ucsc_db:
#print (bp1,ep1)
x = 0
gene_clusterid,ucsc_strand = ucsc_db[(bp1,ep1)]
try:
ensembl_db = ensembl_chr_db[chr]
for (bp2,ep2) in ensembl_db:
y += 1; ensembl,ens_strand = ensembl_db[(bp2,ep2)]
#print (bp1,ep1),(bp2,ep2);kill
if ucsc_strand == ens_strand:
###if the two gene location ranges overlapping
##########FORCE UCSC mRNA TO EXIST WITHIN THE SPACE OF ENSEMBL TO PREVENT TRANSCRIPT CLUSTER EXCLUSION IN ExonArrayEnsemblRules
add = 0
if (bp1 >= bp2) and (ep2>= ep1): add = 1 ###if the annotations reside within the gene's start and stop position
#if ((bp1 >= bp2) and (ep2 >= bp1)) or ((ep1 >= bp2) and (ep2 >= ep1)): add = 1 ###if the start or stop of the UCSC region is inside the Ensembl start and stop
#elif ((bp2 >= bp1) and (ep1 >= bp2)) or ((ep2 >= bp1) and (ep1 >= ep2)): add = 1 ###opposite
if add == 1:
#if (bp1 >= bp2) and (ep2>= ep1): a = ''
#else: print gene_clusterid,ensembl,bp1,bp2,ep1,ep2;kill
x = 1
try: ensembl_transcript_clusters[gene_clusterid].append(ensembl)
except KeyError: ensembl_transcript_clusters[gene_clusterid] = [ensembl]
l += 1
except KeyError: null=[]#; print chr, 'not found'
if x == 0: no_match_list.append(gene_clusterid)
except ValueError:
for y in ucsc_db: print y;kill
end_time = time.time(); time_diff = int(end_time-start_time)
print "UCSC genes matched up to Ensembl in %d seconds" % time_diff
print "UCSC Transcript Clusters (or accession numbers) overlapping with Ensembl:",len(ensembl_transcript_clusters)
print "With NO overlapp",len(no_match_list)
return ensembl_transcript_clusters,no_match_list
def reformatPolyAdenylationCoordinates(species,force):
""" PolyA annotations are currently only available from UCSC for human, but flat file
annotations from 2003-2006 are available for multiple species. Convert these to BED format"""
version={}
version['Rn'] = '2003(rn3)'
version['Dr'] = '2003(zv4)'
version['Gg'] = '2004(galGal2)'
version['Hs'] = '2006(hg8)'
version['Mm'] = '2004(mm5)'
print 'Exporting polyADB_2 coordinates as BED for',species
### Obtain the necessary database files
url = 'http://altanalyze.org/archiveDBs/all/polyAsite.txt'
output_dir = 'AltDatabase/ucsc/'+species + '/'
if force == 'yes':
filename, status = update.download(url,output_dir,'')
else: filename = output_dir+'polyAsite.txt'
### Import the refseq to Ensembl information
import gene_associations; from import_scripts import OBO_import; from build_scripts import EnsemblImport; import export
try:
ens_unigene = gene_associations.getGeneToUid(species,'Ensembl-UniGene')
print len(ens_unigene),'Ensembl-UniGene entries imported'
external_ensembl = OBO_import.swapKeyValues(ens_unigene); use_entrez='no'
except Exception:
ens_entrez = gene_associations.getGeneToUid(species,'Ensembl-EntrezGene')
print len(ens_entrez),'Ensembl-EntrezGene entries imported'
external_ensembl = OBO_import.swapKeyValues(ens_entrez); use_entrez='yes'
gene_location_db = EnsemblImport.getEnsemblGeneLocations(species,'RNASeq','key_by_array')
export_bedfile = output_dir+species+'_polyADB_2_predictions.bed'
print 'exporting',export_bedfile
export_data = export.ExportFile(export_bedfile)
header = '#'+species+'\t'+'polyADB_2'+'\t'+version[species]+'\n'
export_data.write(header)
fn=filepath(filename); x=0; not_found={}
for line in open(fn,'rU').xreadlines():
data = cleanUpLine(line)
if x==0: x=1
else:
siteid,llid,chr,sitenum,position,supporting_EST,cleavage = string.split(data,'\t')
if chr == 'chrM': chr = 'chrMT' ### MT is the Ensembl convention whereas M is the Affymetrix and UCSC convention
if chr == 'M': chr = 'MT' ### MT is the Ensembl convention whereas M is the Affymetrix and UCSC convention
if species in siteid:
if 'NA' not in chr: chr = 'chr'+chr
strand = '+'; geneid = siteid
pos_start = str(int(position)-1); pos_end = position
if use_entrez=='no':
external_geneid = string.join(string.split(siteid,'.')[:2],'.')
else: external_geneid=llid
if external_geneid in external_ensembl:
ens_geneid = external_ensembl[external_geneid][0]
geneid += '-'+ens_geneid
chr,strand,start,end = gene_location_db[ens_geneid]
else:
not_found[external_geneid]=[]
bed_format = string.join([chr,pos_start,pos_end,geneid,'0','-'],'\t')+'\n' ### We don't know the strand, so write out both strands
export_data.write(bed_format)
bed_format = string.join([chr,pos_start,pos_end,geneid,'0',strand],'\t')+'\n'
export_data.write(bed_format)
export_data.close()
def verifyFile(filename,species_name):
fn=filepath(filename); counts=0
try:
for line in open(fn,'rU').xreadlines():
counts+=1
if counts>10: break
except Exception:
counts=0
if species_name == 'counts': ### Used if the file cannot be downloaded from http://www.altanalyze.org
return counts
elif counts == 0:
if species_name in filename: server_folder = species_name ### Folder equals species unless it is a universal file
elif 'Mm' in filename: server_folder = 'Mm' ### For PicTar
else: server_folder = 'all'
print 'Downloading:',server_folder,filename
update.downloadCurrentVersion(filename,server_folder,'txt')
else:
return counts
if __name__ == '__main__':
species = 'Hs'; #species_full = 'Drosophila_melanogaster'
filename = 'AltDatabase/ucsc/'+species+'/polyaDb.txt'
verifyFile(filename,species) ### Makes sure file is local and if not downloads.
sys.exit()
importEnsExonStructureData(species,[],[],[]);sys.exit()
reformatPolyAdenylationCoordinates(species,'no');sys.exit()
#test = 'yes'
#test_gene = ['ENSG00000140153','ENSG00000075413']
from build_scripts import UCSCImport; import update
knownAlt_dir = update.getFTPData('hgdownload.cse.ucsc.edu','/goldenPath/currentGenomes/'+species_full+'/database','knownAlt.txt.gz')
polyA_dir = update.getFTPData('hgdownload.cse.ucsc.edu','/goldenPath/currentGenomes/'+species_full+'/database','polyaDb.txt.gz')
output_dir = 'AltDatabase/ucsc/'+species + '/'
UCSCImport.downloadFiles(knownAlt_dir,output_dir); UCSCImport.downloadFiles(polyA_dir,output_dir);sys.exit()
ensembl_ucsc_splicing_annotations = importEnsExonStructureData(species,ensembl_gene_coordinates,ensembl_annotations,exon_annotation_db) | {
"repo_name": "nsalomonis/AltAnalyze",
"path": "build_scripts/alignToKnownAlt.py",
"copies": "1",
"size": "16855",
"license": "apache-2.0",
"hash": -7294693521039946000,
"line_mean": 53.4506578947,
"line_max": 187,
"alpha_frac": 0.630495402,
"autogenerated": false,
"ratio": 3.521730045967405,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9510960783028866,
"avg_score": 0.02825293298770797,
"num_lines": 304
} |
# align vertices into one line
# Purpose: Align vertices to straight line based on 2 vertices.
# Usage: select "start vtx", then select vertices need to align, last select "end vtx" then run the following cmd in python cmdline.
# command(python): import af_alignVert;af_alignVert.main()
#Limitation: Must under Maya 2012+
import maya.cmds as mc
import math as mh
def main():
# turn on selection order
if (mc.selectPref(tso=1,q=1))==0:
mc.selectPref(tso=1)
# select vertices. first one and last one will define the line
sel = mc.ls(fl=1,os=1)
if len(sel) > 2:
B = mc.pointPosition(sel[0])
A = mc.pointPosition(sel[-1])
otherPoint = list(sel)
otherPoint.remove(sel[0])
otherPoint.remove(sel[-1])
#for i in range(1,(len(sel)-1)):
for point in otherPoint:
C = mc.pointPosition(point)
Mab = mh.sqrt(mh.pow(B[0]-A[0],2) + mh.pow(B[1]-A[1],2) + mh.pow(B[2]-A[2],2))
Mac = mh.sqrt(mh.pow(C[0]-A[0],2) + mh.pow(C[1]-A[1],2) + mh.pow(C[2]-A[2],2))
Vab = [(B[0]-A[0])/Mab,(B[1]-A[1])/Mab,(B[2]-A[2])/Mab]
Vac = [(C[0]-A[0])/Mac,(C[1]-A[1])/Mac,(C[2]-A[2])/Mac]
cosA = Vab[0]*Vac[0]+Vab[1]*Vac[1]+Vab[2]*Vac[2]
e = Mac*cosA
E = [A[0]+Vab[0]*e,A[1]+Vab[1]*e,A[2]+Vab[2]*e]
mc.move(E[0],E[1],E[2],point,ws=1,wd=1)
| {
"repo_name": "aaronfang/personal_scripts",
"path": "scripts/af_alignVert.py",
"copies": "2",
"size": "1316",
"license": "mit",
"hash": 3585425033533229600,
"line_mean": 41.4516129032,
"line_max": 132,
"alpha_frac": 0.5911854103,
"autogenerated": false,
"ratio": 2.3087719298245615,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.3899957340124562,
"avg_score": null,
"num_lines": null
} |
"""A limited wrapper for RT4's database structure
This only covers tickets, queues and custom fields of tickets. No users.
A complete list of models are in raw_models.
"""
from __future__ import unicode_literals
from django.db import models
from django.utils.encoding import python_2_unicode_compatible
@python_2_unicode_compatible
class Customfield(models.Model):
name = models.CharField(max_length=200, blank=True, null=True)
type = models.CharField(max_length=200, blank=True, null=True)
description = models.CharField(max_length=255, blank=True, null=True)
sortorder = models.IntegerField()
creator = models.IntegerField()
created = models.DateTimeField(blank=True, null=True)
lastupdatedby = models.IntegerField()
lastupdated = models.DateTimeField(blank=True, null=True)
disabled = models.SmallIntegerField()
lookuptype = models.CharField(max_length=255)
pattern = models.CharField(max_length=65536, blank=True, null=True)
maxvalues = models.IntegerField(blank=True, null=True)
basedon = models.IntegerField(blank=True, null=True)
rendertype = models.CharField(max_length=64, blank=True, null=True)
valuesclass = models.CharField(max_length=64, blank=True, null=True)
class Meta:
managed = False
db_table = 'customfields'
def __str__(self):
return self.name
@python_2_unicode_compatible
class Queue(models.Model):
name = models.CharField(max_length=200)
description = models.CharField(max_length=255, blank=True, null=True)
correspondaddress = models.CharField(max_length=120, blank=True, null=True)
commentaddress = models.CharField(max_length=120, blank=True, null=True)
initialpriority = models.IntegerField()
finalpriority = models.IntegerField()
defaultduein = models.IntegerField()
creator = models.IntegerField()
created = models.DateTimeField(blank=True, null=True)
lastupdatedby = models.IntegerField()
lastupdated = models.DateTimeField(blank=True, null=True)
disabled = models.SmallIntegerField()
subjecttag = models.CharField(max_length=120, blank=True, null=True)
lifecycle = models.CharField(max_length=32, blank=True, null=True)
class Meta:
managed = False
db_table = 'queues'
def __str__(self):
return self.name
@python_2_unicode_compatible
class Ticket(models.Model):
effectiveid = models.IntegerField()
queue = models.ForeignKey('Queue', db_column='queue', related_name='tickets')
type = models.CharField(max_length=16, blank=True, null=True)
issuestatement = models.IntegerField()
resolution = models.IntegerField()
owner = models.IntegerField()
subject = models.CharField(max_length=200, blank=True, null=True)
initialpriority = models.IntegerField()
finalpriority = models.IntegerField()
priority = models.IntegerField()
timeestimated = models.IntegerField()
timeworked = models.IntegerField()
status = models.CharField(max_length=64, blank=True, null=True)
timeleft = models.IntegerField()
told = models.DateTimeField(blank=True, null=True)
starts = models.DateTimeField(blank=True, null=True)
started = models.DateTimeField(blank=True, null=True)
due = models.DateTimeField(blank=True, null=True)
resolved = models.DateTimeField(blank=True, null=True)
lastupdatedby = models.IntegerField()
lastupdated = models.DateTimeField(blank=True, null=True)
creator = models.IntegerField()
created = models.DateTimeField(blank=True, null=True)
disabled = models.SmallIntegerField()
ismerged = models.SmallIntegerField(blank=True, null=True)
class Meta:
managed = False
db_table = 'tickets'
def __str__(self):
return '#{}: {}'.format(self.id, self.subject)
class TicketCustomfieldValueManager(models.Manager):
"Filter out non-Tickets"
def get_queryset(self):
return super(TicketCustomfieldValueManager,
self).get_queryset().filter(objecttype='RT::Ticket')
@python_2_unicode_compatible
class TicketCustomfieldValue(models.Model):
"""The "objectcustomfieldvalues" table points to several other tables
This model is only for "tickets" objectcustomfieldvalues
Known hooks:
* Tickets
* Articles
'objectid' is the id of the row
'objecttype' is the table"""
# Rename the column
ticket = models.ForeignKey('Ticket', db_column='objectid', related_name='customfields')
customfield = models.ForeignKey('Customfield', db_column='customfield')
content = models.CharField(max_length=255, blank=True, null=True)
creator = models.IntegerField()
created = models.DateTimeField(blank=True, null=True)
lastupdatedby = models.IntegerField()
lastupdated = models.DateTimeField(blank=True, null=True)
objecttype = models.CharField(max_length=255)
largecontent = models.TextField(blank=True, null=True)
contenttype = models.CharField(max_length=80, blank=True, null=True)
contentencoding = models.CharField(max_length=80, blank=True, null=True)
sortorder = models.IntegerField()
disabled = models.IntegerField()
objects = TicketCustomfieldValueManager()
class Meta:
managed = False
db_table = 'objectcustomfieldvalues'
def __str__(self):
return '{}: {}'.format(self.customfield, self.content)
@python_2_unicode_compatible
class CustomfieldValue(models.Model):
"Allowable content for Customfields"
customfield = models.ForeignKey('Customfield', db_column='customfield')
name = models.CharField(max_length=200, blank=True, null=True)
description = models.CharField(max_length=255, blank=True, null=True)
sortorder = models.IntegerField()
creator = models.IntegerField()
created = models.DateTimeField(blank=True, null=True)
lastupdatedby = models.IntegerField()
lastupdated = models.DateTimeField(blank=True, null=True)
category = models.CharField(max_length=255, blank=True, null=True)
class Meta:
managed = False
db_table = 'customfieldvalues'
def __str__(self):
return '{}: {}'.format(self.customfield, self.name)
| {
"repo_name": "UNINETT/django-rtdb",
"path": "src/rtdb/models.py",
"copies": "1",
"size": "6175",
"license": "mit",
"hash": -846170732343019800,
"line_mean": 36.1987951807,
"line_max": 91,
"alpha_frac": 0.712388664,
"autogenerated": false,
"ratio": 3.9156626506024095,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.00023162230453750726,
"num_lines": 166
} |
"A linear mixed effects model"
from itertools import product
import copy
import numpy as np
from numpy.linalg import LinAlgError
from scipy.sparse import csc_matrix, issparse
from scipy.sparse import eye as sparseeye
from scipy.linalg import inv as scipy_inv
from scipy.linalg import pinv
from pydigree.stats.mixedmodel.likelihood import makeP
from pydigree.stats.mixedmodel.likelihood import full_loglikelihood
from pydigree.stats.mixedmodel.likelihood import REML, ML
from pydigree.stats.mixedmodel.maximization import newtonlike_maximization
from pydigree.stats.mixedmodel.maximization import expectation_maximization
# from pydigree.stats.mixedmodel.maximization import minque
from pydigree.stats.mixedmodel.maximization import grid_search
from pydigree.stats.mixedmodel.maximization import MLEResult
def is_genetic_effect(effect):
"""
Is this effect a genetic effect?
:rtype: bool
"""
return effect in set(['additive', 'dominance', 'mitochondrial'])
def inv(M):
"Invert a matrix. If sparse, convert to dense first"
if issparse(M):
M = M.todense()
return scipy_inv(M)
def make_incidence_matrix(individuals, effect_name):
if effect_name.lower() == 'residual':
incidence_matrix = sparseeye(len(individuals))
elif is_genetic_effect(effect_name):
incidence_matrix = sparseeye(len(individuals))
else:
levels = sorted({ind.phenotypes[effect_name] for ind in individuals})
# Missing values are not a valid level
levels = [x for x in levels if x is not None]
nlevels = len(levels)
# Calculate which individual has which level
gen = (ind.phenotypes[effect_name] == level for ind, level in
product(individuals, levels))
Z = np.fromiter(gen, dtype=np.uint8)
# Shout out to scipy for both not documenting reshape on any of their
# sparse matrix objects and also not making them take the same number
# of arguments
Z = Z.reshape(-1, nlevels)
# Check for missing values and complain about them!
# Kind of hard to read but heres how it goes:
# Check if any of the rows are all zero.
if (Z == 0).all(axis=1).any():
raise LinAlgError('Missing values in random effect')
incidence_matrix = csc_matrix(Z)
return incidence_matrix
class RandomEffect(object):
"A random effect in a mixed model"
__slots__ = ['label',
'variance_component',
'incidence_matrix',
'covariance_matrix',
'levels',
'V_i']
def __init__(self, individuals, label, variance=None,
incidence_matrix=None, covariance_matrix=None, levels=None):
"""
Create the random effect.
:param individuals: Individuals included
:param label: name of the effect
:param variance: variance associated with the effect
:param incidence_matrix: incidence matrix for random effect
:param covariance_matrix: covariance matrix for random effect
:param levels: levels of random effect
:type individuals: iterable
:type label: string
:type variance: float
:type incidence_matrix: matrix
:type covariance_matrix: matrix
"""
nobs = len(individuals)
self.label = label
self.variance_component = variance
if isinstance(incidence_matrix, str) and incidence_matrix == 'eye':
self.incidence_matrix = sparseeye(nobs, nobs)
elif incidence_matrix is None:
self.incidence_matrix = make_incidence_matrix(individuals,
self.label)
else:
self.incidence_matrix = incidence_matrix
if covariance_matrix is None:
# Number of levels of random effects is the number of
# columns in the incidence matrix
nlevel = self.incidence_matrix.shape[1]
self.covariance_matrix = sparseeye(nlevel, nlevel)
else:
# Covariance matrices are square
if covariance_matrix.shape[0] != covariance_matrix.shape[1]:
raise LinAlgError('Covariance matrix not square')
if covariance_matrix.shape[0] != self.incidence_matrix.shape[1]:
raise LinAlgError('Incidence and covariance matrix '
'not conformable')
self.covariance_matrix = covariance_matrix
if not levels:
self.levels = ['L{}'.format(i) for i in
range(self.incidence_matrix.shape[1])]
else:
if len(levels) != incidence_matrix.shape[1]:
raise ValueError('Number of levels not correct')
self.levels = levels
self.V_i = self.Z * self.G * self.Z.T
def __repr__(self):
return 'Random Effect: {}'.format(self.label)
@property
def nlevels(self):
"""
The number of levels of the random effect
:rtype: int
"""
return len(self.levels)
# Convenience properties for linear algebra
@property
def sigma(self):
"""
Convenience property for returning the variance of the component
:rtype: float
"""
return self.variance_component
@property
def Z(self):
"Convenience property for returning the incidence matrix"
return self.incidence_matrix
@property
def G(self):
"Convenience property for returning the covariance_matrix"
return self.covariance_matrix
class MixedModel(object):
"""
Fits linear models in the form of y = X * b + sum(Z_i * u_i) + e, where:
y is the vector of outcomes
X is a design matrix of fixed effects
b is the vector of coefficients correstponding to those fixed effects
Z_i is an incidence matrix corresponding to random effect i
u_i is a vector of values corresponding to random effect i
e is a vector of errors
"""
def __init__(self, pedigrees, outcome=None, fixed_effects=None,
random_effects=None, covariance_matrices=None, only=None):
self.mle = None
self.pedigrees = pedigrees
self.outcome = outcome
self.fixed_effects = fixed_effects if fixed_effects else []
self.obs = []
if only is not None:
self.only = frozenset(only)
if not random_effects:
self.random_effects = []
else:
if not all(isinstance(x, RandomEffect) for x in random_effects):
raise ValueError(
'Random effects must be of class RandomEffect')
self.random_effects = random_effects
# Extra variance component for residual variance. Works like
# any other random effect.
residual = RandomEffect(self.observations(), 'Residual')
self.random_effects.append(residual)
self.V = None
self.X = None
self.y = None
self.Zlist = None
self.beta = None
def copy(self):
"""
Returns a deep copy of the model
:returns: A copy of the model object
:rtype: MixedModel
"""
# We want to avoid copying pedigree and individual data, so
# we'll set the pedigrees attribute to None for a sec, and then
# change it back
peds = self.pedigrees
self.pedigrees = None
newmm = copy.deepcopy(self)
newmm.pedigrees = peds
self.pedigrees = peds
return newmm
def fit_model(self):
"""
Builds X, Z, Y, and R for the model
:returns: void
"""
self.y = self._makey()
self.X = self._makeX()
self.Zlist = self._makeZs()
need_vcs = not all(x is not None for x in self.variance_components)
if need_vcs:
# Not a great way to start but you need to start with something
need_vcs = True
vcs = [0] * len(self.random_effects)
vcs[-1] = np.var(self.y)
self.set_variance_components(vcs)
self.V = self._makeV()
self.beta = self._makebeta()
if need_vcs:
vcs[-1] = np.var(self.y - self.X * self.beta)
def _fit_results(self):
self.V = self._makeV()
self.beta = self._makebeta()
def clear_model(self):
""" Clears all parameters from the model """
self.random_effects = []
self.fixed_effects = []
self.Zlist = []
self.X = None
self.y = None
self.beta = None
self.V = None
self.obs = None
def observations(self):
"""
Returns a list of the fully observed individuals in the model
Fully observed individuals have observations for each fixed effect and
and observation for the outcome variable.
:returns: the fully observed individuals
:rtype: list of Individuals
"""
def has_all_fixefs(ind, effects):
if not effects:
return True
for effect in effects:
if effect not in ind.phenotypes:
return False
elif ind.phenotypes[effect] is None:
return False
return True
def has_outcome(ind):
try:
return ind.phenotypes[self.outcome] is not None
except KeyError:
return False
obs = [x for x in self.pedigrees.individuals
if (has_all_fixefs(x, self.fixed_effects) and
has_outcome(x) and x)]
if not self.obs:
self.obs = obs
return obs
def nobs(self):
"""
:returns: the number of fully observed individuals in the model
:rtype: integer
"""
return len(self.observations())
@property
def variance_components(self):
"""
The current variances associated with each random effect
:rtype: list of floats
"""
return [x.sigma for x in self.random_effects]
@property
def covariance_matrices(self):
"""
The covariance matrices associated with each random effect
:rtype: list of matrices
"""
return [x.covariance_matrix for x in self.random_effects]
@property
def R(self):
"Covariance matrix of the residual variance"
return self.random_effects[-1].covariance_matrix
@property
def P(self):
"Projection matrix"
return makeP(self.X, inv(self.V))
def residual_variance(self):
"""
Returns the variance in y not accounted for by random effects
:rtype: float
"""
return self.random_effects[-1].sigma
def _makey(self):
""" Prepares the vector of outcome variables for model estimation """
obs = self.observations()
return np.matrix([x.phenotypes[self.outcome] for x in obs]).transpose()
def _makeX(self):
"""
Builds the design matrix for the fixed effects in the mixed model.
Includes a column of ones for the intercept.
Returns: matrix
"""
obs = self.observations()
xmat = [[1] * len(obs)]
for phen in self.fixed_effects:
xmat.append([ob.phenotypes[phen] for ob in obs])
X = np.matrix(list(zip(*xmat)))
return X
def _makeZs(self):
"""
Makes the incidence matrix for random effects
:rtype: A list of numpy matrices
"""
Zlist = [ranef.Z for ranef in self.random_effects]
return [csc_matrix(Z) for Z in Zlist]
def _makeV(self, vcs=None):
if vcs is None and (not all(x is not None for x in self.variance_components)):
raise ValueError('Variance components not set')
if vcs is None:
variance_components = self.variance_components
else:
variance_components = vcs
V = sum(sigma * Z * A * Z.T for sigma, Z, A in
zip(variance_components,
self.Zlist,
self.covariance_matrices))
return V
def _makebeta(self):
"""
Calculates BLUEs for the fixed effects portion of the model
Reference:
McCulloch & Seale. Generalized, Linear, and Mixed Models. (2001)
Equation 6.24
"""
vinv = inv(self.V.todense())
return pinv(self.X.T * vinv * self.X) * self.X.T * vinv * self.y
def set_outcome(self, outcome):
""" Sets the outcome for the mixed model """
self.outcome = outcome
self.y = self._makey()
def add_fixed_effects(self, effect):
""" Adds a fixed effect to the model """
self.fixed_effects.append(effect)
self.X = self._makeX()
def add_random_effect(self, effect):
""" Adds a random effect to the model """
if not isinstance(effect, RandomEffect):
raise ValueError('Random effect must be of type RandomEffect')
self.random_effects.insert(-1, effect)
self.Zlist = self._makeZs()
def add_genetic_effect(self, kind='additive'):
"""
Adds a genetic effect to the model as a random effect
:param kind: type of effect to add
:type kind: 'additive' or 'dominance'
"""
inds = [x.full_label for x in self.observations()]
peds = self.pedigrees
if kind.lower() == 'additive':
covmat = peds.additive_relationship_matrix(inds)
elif kind.lower() == 'dominance':
covmat = peds.dominance_relationship_matrix(inds)
else:
raise NotImplementedError(
'Nonadditive/dominance genetic effects not implemented')
effect = RandomEffect(
self.observations(), kind, covariance_matrix=covmat)
self.add_random_effect(effect)
def set_variance_components(self, variance_components):
"""
Manually set variance components for each random effect in the model.
Useful if you know a priori, say a heritability, and just want to
predict breeding values for the trait.
:param variance_components: variances associated with each random effect
:type variance_components: iterable of numerics
"""
if not all(x is not None for x in variance_components):
raise ValueError('Not all variance components are specified')
for sigma, ranef in zip(variance_components, self.random_effects):
ranef.variance_component = sigma
def maximize(self, method="Average Information", restricted=False,
starts=None, verbose=False):
"""
Finds the optimal values for variance components in the model using
provided optimization methods.
:param restricted: Uses REML estimation
:param starts: starting values for the variance components
:param method: maximization method
:param verbose: output maximization progress
:type restricted: bool
:type method: string
:type starts: iterable of numerics
:type verbose: bool:
"""
if (isinstance(self.mle, MLEResult) and
self.maximized.method == method):
return
self.fit_model()
if starts is None:
starts = self._starting_variance_components()
likefunc = REML if restricted else ML
llik = likefunc(self, info=method)
llik.set_parameters(starts)
# if method.lower().startswith('minque'):
# mle = minque(self, value=0, verbose=verbose, starts=starts)
if method.lower() in {'em', 'emreml', 'expectation-maximization'}:
mle = expectation_maximization(self, llik, verbose=verbose)
elif method.lower() == 'grid':
mle = grid_search(self, llik, nevals=20, oob=False)
else:
mle = newtonlike_maximization(self, llik, verbose=verbose)
self.mle = mle
self.set_variance_components(mle.parameters)
self.fit_model()
# Get the full loglikelihood at the REML maximimum so we
# can use it later
self.mle.full_loglikelihood = full_loglikelihood(self.y, self.V,
self.X, self.beta)
@property
def maximized(self):
"""
Has the model been maximized?
:rtype: bool
"""
return isinstance(self.mle, MLEResult)
def loglikelihood(self, restricted=False, vcs=None, vmat=None):
"""
Returns the loglikelihood of the model with the current model
parameters
:returns: loglikelihood
:rtype: float
"""
if self.mle is not None and vcs is None and vmat is None:
if restricted:
return self.mle.restricted_loglikelihood
else:
return self.mle.full_loglikelihood
if vcs is not None:
V = self._makeV(vcs=vcs)
elif vmat is not None:
V = vmat
elif self.V is None:
self.V = self._makeV()
else:
V = self.V
if not restricted:
return full_loglikelihood(self.y, V, self.X, self.beta)
else:
return REML(self).loglikelihood()
@property
def df(self):
'''
The number of observations minus the number of fixed effects, minus
the number of non-residual random effects
:rtype: integer
'''
return self.nobs() - self.X.shape[1] - len(self.random_effects) + 1
@property
def bic(self):
"""
Calculates the Bayesian Information Criterion (BIC) for the model
:rtype: float
"""
if not self.maximized:
raise ValueError('Model not maximized!')
# Add 1 because the intercept has to be estimated
nparam = len(self.fixed_effects) + len(self.random_effects) + 1
n = self.nobs()
loglike = self.loglikelihood()
return -2 * loglike + nparam * np.log(n)
def blup(self, idx):
"""
Get the BLUPs for a random effect
:param idx: index of effect
:type idx: int
:rtype: np.array
"""
rf = self.random_effects[idx]
res = (self.y - self.X * self.beta)
blups = rf.G * rf.Z.T * inv(self.V.todense()) * res
return np.array(blups.T)[0]
def summary(self):
"""
Prints a summary of the current model
:rtype: void
"""
if not self.maximized:
raise ValueError('Model not maximized!')
self._fit_results()
print()
print('Linear mixed model fit by {}'.format(self.mle.method))
print()
print('Fixed effects:')
fixefnames = ['(Intercept)'] + self.fixed_effects
betas = self.beta.T.tolist()[0]
print('\t'.join(['Name', 'Estimate']))
for name, beta in zip(fixefnames, betas):
print('\t'.join(q for q in [name, '{:5.3f}'.format(beta)]))
print()
print('Variance components:')
print('\t'.join(['Component', 'Variance', '% Variance']))
totalvar = sum(self.variance_components)
for effect, vc in zip(self.random_effects, self.variance_components):
print('\t'.join(v for v in [effect.label,
'{:5.3f}'.format(vc),
'{:5.3f}'.format(100 * vc / totalvar)]))
print()
print('Observations: {}'.format(self.nobs()))
print('Loglikelihood: {:10.2f}'.format(self.loglikelihood()))
print('BIC: {:10.3f}'.format(self.bic))
print()
def _variance_after_fixefs(self):
return np.var(self.y - self.X * self.beta)
def _starting_variance_components(self, kind='equal'):
"""
Starting variance components in optimization.
Valid values:
'ols': Starting values are all 0 except residual, which is
var(y - X*Beta)
'EM': the starting values are the variance components after
100 iterations of expectation-maximization REML (started from all
equal values).
'equal': Chooses all variance components (including residual)
to be equal.
:param kind: the method to find starting values
:type kind: string
:returns: variance components
:rtype: numpy array of floats
"""
# 'minque0': Starting values are those from MINQUE with all weights
# set equal to 0 except for the residual variance, which is set
# to 1. This is the default method used by SAS's PROC MIXED.
# 'minque1': Starting values are those from MINQUE with all weights
# set equal to 1
# if kind.lower() == 'minque0':
# return minque(self, value=0, return_after=1, return_vcs=True)
# if kind.lower() == 'minque1':
# return minque(self, value=1, return_after=1, return_vcs=True)
# if kind.lower() == 'minquemean':
# zero = minque(self, value=0, return_after=1, return_vcs=True)
# one = minque(self, value=1, return_after=1, return_vcs=True)
# return (zero + one) / 2.0
if kind.lower() == 'ols':
vcs_start = np.zeros(len(self.random_effects))
vcs_start[-1] = self._variance_after_fixefs()
return vcs_start
if kind.lower() == 'equal':
v = self._variance_after_fixefs()
n = len(self.random_effects)
vcs_start = [v/float(n)] * n
return vcs_start
if kind.lower() == 'em':
starts = self._starting_variance_components('equal')
vcs_start = expectation_maximization(self,
REML(self),
starts=starts,
return_after=100)
return vcs_start.parameters
else:
raise ValueError('Unknown method: {}'.format(kind))
| {
"repo_name": "jameshicks/pydigree",
"path": "pydigree/stats/mixedmodel/mixedmodel.py",
"copies": "1",
"size": "22342",
"license": "apache-2.0",
"hash": 2361852839133171700,
"line_mean": 31.8076358297,
"line_max": 86,
"alpha_frac": 0.5799391281,
"autogenerated": false,
"ratio": 4.189386836677293,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5269325964777293,
"avg_score": null,
"num_lines": null
} |
""" A line segment component. """
from __future__ import with_statement
from numpy import array, resize
# Enthought library imports.
from kiva.constants import FILL, FILL_STROKE, STROKE
from traits.api import Any, Event, Float, List, Trait, Bool
# Local imports.
from enable.api import border_size_trait, Component
from enable.colors import ColorTrait
class Line(Component):
"""A line segment component"""
# Event fired when the points are no longer updating.
# PZW: there seems to be a missing defn here; investigate.
# An event to indicate that the point list has changed
updated = Event
# The color of the line.
line_color = ColorTrait("black")
# The dash pattern for the line.
line_dash = Any
# The width of the line.
line_width = Trait(1, border_size_trait)
# The points that make up this polygon.
points = List # List of Tuples
# The color of each vertex.
vertex_color = ColorTrait("black")
# The size of each vertex.
vertex_size = Float(3.0)
# Whether to draw the path closed, with a line back to the first point
close_path = Bool(True)
#--------------------------------------------------------------------------
# 'Line' interface
#--------------------------------------------------------------------------
def reset(self):
"Reset the polygon to the initial state"
self.points = []
self.event_state = 'normal'
self.updated = self
return
#--------------------------------------------------------------------------
# 'Component' interface
#--------------------------------------------------------------------------
def _draw_mainlayer(self, gc, view_bounds=None, mode="default"):
"Draw this line in the specified graphics context"
if len(self.points) > 1:
with gc:
# Set the drawing parameters.
gc.set_stroke_color(self.line_color_)
gc.set_line_dash(self.line_dash)
gc.set_line_width(self.line_width)
# Draw the path as lines.
gc.begin_path()
offset_points = [(x, y) for x, y in self.points ]
offset_points = resize(array(offset_points), (len(self.points),2))
gc.lines(offset_points)
if self.close_path:
gc.close_path()
gc.draw_path(STROKE)
if len(self.points) > 0:
with gc:
# Draw the vertices.
self._draw_points(gc)
return
#--------------------------------------------------------------------------
# Private interface
#--------------------------------------------------------------------------
def _draw_points(self, gc):
"Draw the points of the line"
# Shortcut out if we would draw transparently.
if self.vertex_color_[3] != 0:
with gc:
gc.set_fill_color(self.vertex_color_)
gc.set_line_dash(None)
offset_points = [(x, y) for x, y in self.points ]
offset_points = resize(array(offset_points), (len(self.points),2))
offset = self.vertex_size / 2.0
if hasattr(gc, 'draw_path_at_points'):
path = gc.get_empty_path()
path.rect( -offset, -offset,
self.vertex_size, self.vertex_size)
gc.draw_path_at_points(offset_points, path, FILL_STROKE)
else:
for x, y in offset_points:
gc.draw_rect((x-offset, y-offset,
self.vertex_size, self.vertex_size), FILL)
return
| {
"repo_name": "tommy-u/enable",
"path": "enable/primitives/line.py",
"copies": "1",
"size": "3775",
"license": "bsd-3-clause",
"hash": -6054470470936187000,
"line_mean": 32.7053571429,
"line_max": 82,
"alpha_frac": 0.4874172185,
"autogenerated": false,
"ratio": 4.4780545670225385,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.005124142541541731,
"num_lines": 112
} |
"""A link checker using urllib2."""
import ssl
import socket
import urllib.request
import urllib.error
import urllib.parse
import http.client
import regex
import logging
LINK_CHECK_TIMEOUT = 5
def check_url(url, method="HEAD"):
"""Check a URL using the specified method (GET, or HEAD).
Return None if the URL can be reached with no errors.
If the URL can't be checked with HEAD (default), tries a GET request.
Otherwise, return a dict containing a status_code and status_message
containing the error information.
:param url:
:param method:
:return:
"""
from os2webscanner.utils import capitalize_first
try:
logging.info("Checking %s" % url)
request = urllib.request.Request(url, headers={"User-Agent":
"OS2Webscanner"})
request.get_method = lambda: method
urllib.request.urlopen(request, timeout=LINK_CHECK_TIMEOUT)
return None
except (urllib.error.HTTPError,
urllib.error.URLError,
http.client.InvalidURL,
socket.timeout,
IOError, ssl.CertificateError) as e:
logging.debug("Error %s" % e)
code = getattr(e, "code", 0)
if code == 405:
# Method not allowed, try with GET instead
result = check_url(url, method="GET")
return result
reason = str(getattr(e, "reason", ""))
if reason == "":
reason = str(e)
# Strip [Errno: -2] stuff
reason = regex.sub("\[.+\] ", "", reason)
reason = capitalize_first(reason)
if code != 0:
reason = "%d %s" % (code, reason)
return {"status_code": code, "status_message": reason}
| {
"repo_name": "os2webscanner/os2webscanner",
"path": "scrapy-webscanner/linkchecker.py",
"copies": "1",
"size": "1747",
"license": "mpl-2.0",
"hash": 1332120235112983300,
"line_mean": 29.649122807,
"line_max": 73,
"alpha_frac": 0.5907269605,
"autogenerated": false,
"ratio": 4.15952380952381,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.525025077002381,
"avg_score": null,
"num_lines": null
} |
# a linked list(here the singly linked list) must have a head reference(pointer), otherwise it would
# be impossible to locate the head or any other node.
from time import time
class ListNode:
def __init__(self, x):
self.val = x
self.next = None
def __repr__(self):
return str(self.val)
def CreateLinkedList(infolist):
'''
The infolist is a python list contains structure information, this
function returns the head node of the constructed linked list.
'''
curr = head = ListNode(None)
for item in infolist:
node = ListNode(item)
curr.next = node
curr = curr.next
head = head.next
return head
def traverse(head):
curNode = head
while curNode != None:
print(curNode)
curNode = curNode.next
def unorderedSearch(head, target):
curNode = head
while curNode != None and curNode.val != target:
curNode = curNode.next
return curNode is not None
def removeNode(head, target):
prev = None
curr = head
while curr and curr.val != target:
prev = curr
curr = curr.next
if curr:
if curr is head:
head = curr.next # what about "head = head.next"?
else:
prev.next = curr.next
curr.next = None # disconnect the removed node with it's next node
def reverse(head):
prev = None
while head:
curr = head
head = head.next
curr.next = prev
prev = curr
return prev
def reverse_rec(head):
if not head:
return None
if not head.next:
return head
curr = head.next
new_head = reverse_rec(curr)
head.next = None
curr.next = head
return new_head
def reverse_2b(head):
rec = []
while head:
rec.append(head.val)
head = head.next
if rec:
new_head = ListNode(rec[-1])
curr = new_head
for i in range(len(rec)-1):
curr.next = ListNode(rec[-2-i])
curr = curr.next
return None
def run(func, loops, *args, **kargs):
t0 = time()
for i in range(loops):
func(*args, **kargs)
t1 = time()
return t1 - t0
if __name__ == '__main__':
nn = None
no = ListNode('a')
n0 = ListNode('A')
n1 = ListNode('B')
n2 = ListNode('C')
n3 = ListNode('D')
n0.next = n1; n1.next = n2; n2.next = n3
time1 = run(reverse, 1000000, n0)
time2 = run(reverse_rec, 1000000, n0)
time3 = run(reverse_2b, 1000000, n0)
print("reverse: %.6f reverse_rec: %.6f reverse_2b: %.6f" % (time1, time2, time3))
| {
"repo_name": "spencerpomme/coconuts-on-fire",
"path": "DSA/linked_list_node.py",
"copies": "1",
"size": "2633",
"license": "apache-2.0",
"hash": -1096255098069033000,
"line_mean": 22.5089285714,
"line_max": 100,
"alpha_frac": 0.5685529814,
"autogenerated": false,
"ratio": 3.5485175202156336,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4617070501615634,
"avg_score": null,
"num_lines": null
} |
# A linked list is given such that each node contains an additional random pointer which could point to any node in the list or null.
# Return a deep copy of the list.
# Definition for singly-linked list with a random pointer.
class RandomListNode(object):
def __init__(self, x):
self.label = x
self.next = None
self.random = None
class Solution(object):
def copyRandomList(self, head):
if not head:
return head
track = head
# create a duplicate for every node after the node
while(track):
newNode = RandomListNode(track.label)
tmp = track.next
newNode.next = tmp
track.next = newNode
track = tmp
# add random pointer to every node
track = head
while(track and track.next):
if (track.random):
track.next.random = track.random.next
track = track.next.next
# extract the duplicate list
res = head.next
while(head and head.next):
dup = head.next
head.next = dup.next
if head.next:
dup.next = head.next.next
head = head.next
return res
if __name__ == "__main__":
node1 = RandomListNode(1)
node2 = RandomListNode(2)
node3 = RandomListNode(3)
node4 = RandomListNode(4)
node1.next = node2
node2.next = node3
node3.next = node4
s = Solution()
res = s.copyRandomList(node1)
| {
"repo_name": "seanxwzhang/LeetCode",
"path": "138 Copy List with Random Pointer/solution.py",
"copies": "1",
"size": "1505",
"license": "mit",
"hash": -1187486207535339500,
"line_mean": 29.12,
"line_max": 133,
"alpha_frac": 0.5754152824,
"autogenerated": false,
"ratio": 4.0675675675675675,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5142982849967568,
"avg_score": null,
"num_lines": null
} |
# A linked list is given such that each node contains an additional random pointer which could point to any node in the list or null.
# Return a deep copy of the list.
# Definition for singly-linked list with a random pointer.
class RandomListNode:
def __init__(self, x):
self.label = x
self.next = None
self.random = None
class Solution:
# @param head, a RandomListNode
# @return a RandomListNode
def copyRandomList(self, head):
if not head:
return
cur = head
newCur = newHead = RandomListNode(cur.label)
while cur:
newCur.random = cur.random
cur.random = newCur
cur = cur.next
if cur:
newCur.next = RandomListNode(cur.label)
newCur = newCur.next
newCur = newHead
dupCur = dupHead = RandomListNode(head.label)
while newCur:
if newCur.random:
dupCur.random = newCur.random
newCur.random = newCur.random.random
dupCur.next = RandomListNode(0)
newCur, dupCur = newCur.next, dupCur.next
cur, dupCur = head, dupHead
while cur:
cur.random = dupCur.random
cur, dupCur = cur.next, dupCur.next
return newHead | {
"repo_name": "abawchen/leetcode",
"path": "solutions/138_copy_list_with_random_pointer.py",
"copies": "1",
"size": "1314",
"license": "mit",
"hash": 7256814660487912000,
"line_mean": 28.2222222222,
"line_max": 133,
"alpha_frac": 0.5814307458,
"autogenerated": false,
"ratio": 4.080745341614906,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5162176087414906,
"avg_score": null,
"num_lines": null
} |
# A linked list, relies on a more distributed representation in which a
# lightweight object, known as a node, is allocated for each element. each
# node maintains a reference to its element and one or more references to
# neighboring nodes in order to collectively represent the linear order of the
# sequence.
# Linked list cannot be accessed using a numeric index.
# head --> Start
# tail --> None
# from head to tail --> traversing
# It is really hard to delete the last element in singly linked list since
# when we reach the last element, we cannot go back and fetch the element before
# the last one.
class LinkedStack:
"""LIFO Stack implementation using a singly linked list for storage"""
#--------------- nested _Node class ------------------#
class _Node:
"""Lightweight, nonpublic class for storing a singly linked node."""
__slot__ = '_element', '_next'
def __init__(self, element, next):
self._element = element
self._next = next
#--------------- Stack methods -----------------------#
def __init__(self):
"""Create an empty stack."""
self._head = None
self._size = 0
def __len__(self):
"""Return the number of elements in the stack"""
return self._size
def is_empty(self):
"""Return true if the stack is empty"""
return self._size == 0
def push(self, e):
"""Add element e to the top of the stack."""
self._head = _Node(e, self._head)
self._size += 1
def top(self):
"""Return (but do not remove) the element at the top of the stack.
Raise IndexError exception if the stack is empty."""
if self.is_empty():
raise IndexError
return self._head._element
def pop(self):
"""Remove and return the element from the top of the stack
Raise IndexError exception if the stack is empty"""
if self.is_empty():
raise IndexError
answer = self._head._element
self._head = self._head._next
self._size -= 1
return answer
class LinkedQueue:
"""FIFO queue implementation using a singly linked list for storage."""
class _Node:
def __init__(self, element, next):
self._element = element
self._next = next
def __init__(self):
self._head = None
self._tail = None
self._size = 0
def __len__(self):
return self._size
def is_empty(self):
return self._size == 0
def first(self):
if self.is_empty():
raise IndexError
return self._head._element
def dequeue(self):
if self.is_empty():
raise IndexError
answer = self._head._element
self._head = self._head._next
self._size -= 1
if self.is_empty():
self._tail = None
return answer
def enqueue(self, e):
newest = self._Node(e, None)
if self.is_empty():
self._head = newest
else:
self._tail._next = newest
self._tail = newest
| {
"repo_name": "XYM1988/Algorithm",
"path": "Chp7/7.1-Singly-Listed-List.py",
"copies": "1",
"size": "3119",
"license": "mit",
"hash": 3130156461613973000,
"line_mean": 27.6146788991,
"line_max": 80,
"alpha_frac": 0.5694132735,
"autogenerated": false,
"ratio": 4.374474053295932,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0011152881215278512,
"num_lines": 109
} |
"""A linter for docstrings following the google docstring format."""
import ast
from collections import deque
import sys
from enum import Enum
from typing import (
Callable,
Iterator,
List,
Set,
Tuple,
Optional,
Union,
Type,
Any,
)
from .analysis.analysis_visitor import (
AnalysisVisitor,
)
from .analysis.function_and_method_visitor import (
FunctionAndMethodVisitor,
)
from .config import get_logger
from .analysis.analysis_helpers import (
_has_decorator
)
logger = get_logger()
FunctionDef = ast.FunctionDef # type: Union[Type[Any], Tuple[Type[Any], Type[Any]]] # noqa: E501
if hasattr(ast, 'AsyncFunctionDef'):
FunctionDef = (ast.FunctionDef, ast.AsyncFunctionDef)
def read_program(filename): # type: (str) -> Union[bytes, str]
"""Read a program from a file.
Args:
filename: The name of the file to read. If set to '-', then we will
read from stdin.
Returns:
The program as a single string.
"""
program = None # type: Union[bytes, Optional[str]]
if filename == '-':
program = sys.stdin.read()
else:
with open(filename, 'rb') as fin:
program = fin.read()
return program or ''
def _get_docstring(fun): # type: (ast.AST) -> Optional[str]
return ast.get_docstring(fun)
def _get_all_functions(tree): # type: (ast.AST) -> Iterator[Union[ast.FunctionDef, ast.AsyncFunctionDef]] # noqa: E501
for node in ast.walk(tree):
if isinstance(node, ast.FunctionDef):
yield node
elif hasattr(ast, 'AsyncFunctionDef'):
if isinstance(node, ast.AsyncFunctionDef):
yield node
def _get_all_classes(tree): # type: (ast.AST) -> Iterator[ast.ClassDef]
for node in ast.walk(tree):
if isinstance(node, ast.ClassDef):
yield node
def _get_all_methods(tree): # type: (ast.AST) -> Iterator[Union[ast.FunctionDef, ast.AsyncFunctionDef]] # noqa: E501
for klass in _get_all_classes(tree):
for fun in _get_all_functions(klass):
yield fun
def _get_return_type(fn):
# type: (Union[ast.FunctionDef, ast.AsyncFunctionDef]) -> Optional[str]
if fn.returns is not None and hasattr(fn.returns, 'id'):
return getattr(fn.returns, 'id')
return None
def get_line_number_from_function(fn):
# type: (Union[ast.FunctionDef, ast.AsyncFunctionDef]) -> int
"""Get the line number for the end of the function signature.
The function signature can be farther down when the parameter
list is split across multiple lines.
Args:
fn: The function from which we are getting the line number.
Returns:
The line number for the start of the docstring for this
function.
"""
line_number = fn.lineno
if hasattr(fn, 'args') and fn.args.args:
last_arg = fn.args.args[-1]
line_number = last_arg.lineno
return line_number
class FunctionType(Enum):
FUNCTION = 1
METHOD = 2
PROPERTY = 3
class FunctionDescription(object):
"""Describes a function or method.
Whereas a `Docstring` object describes a function's docstring,
a `FunctionDescription` describes the function itself. (What,
ideally, the docstring should describe.)
"""
def __init__(self, function_type, function):
# type: (FunctionType, Union[ast.FunctionDef, ast.AsyncFunctionDef]) -> None
"""Create a new FunctionDescription.
Args:
function_type: Type of the function.
function: The base node of the function.
"""
self.is_method = (function_type == FunctionType.METHOD)
self.is_property = (function_type == FunctionType.PROPERTY)
self.function = function
self.line_number = get_line_number_from_function(function)
self.name = function.name
visitor = AnalysisVisitor()
try:
visitor.visit(function)
except Exception as ex:
msg = 'Failed to visit in {}: {}'.format(self.name, ex)
logger.debug(msg)
return
self.argument_names = visitor.arguments
self.argument_types = visitor.types
if function_type != FunctionType.FUNCTION and len(self.argument_names) > 0:
if not _has_decorator(function, "staticmethod"):
self.argument_names.pop(0)
self.argument_types.pop(0)
self.has_return = bool(visitor.returns)
self.has_empty_return = False
if self.has_return:
return_value = visitor.returns[0]
self.has_empty_return = (
return_value is not None
and return_value.value is None
)
self.return_type = _get_return_type(function)
self.has_yield = bool(visitor.yields)
self.raises = visitor.exceptions
self.docstring = _get_docstring(function)
self.variables = [x.id for x in visitor.variables]
self.raises_assert = bool(visitor.asserts)
self.is_abstract = visitor.is_abstract
def get_function_descriptions(program):
# type: (ast.AST) -> List[FunctionDescription]
"""Get function name, args, return presence and docstrings.
This function should be called on the top level of the
document (for functions), and on classes (for methods.)
Args:
program: The tree representing the entire program.
Returns:
A list of function descriptions pulled from the ast.
"""
ret = list() # type: List[FunctionDescription]
visitor = FunctionAndMethodVisitor()
visitor.visit(program)
for prop in visitor.properties:
ret.append(
FunctionDescription(function_type=FunctionType.PROPERTY, function=prop)
)
for method in visitor.methods:
ret.append(
FunctionDescription(function_type=FunctionType.METHOD, function=method)
)
for function in visitor.functions:
ret.append(
FunctionDescription(function_type=FunctionType.FUNCTION, function=function)
)
return ret
| {
"repo_name": "terrencepreilly/darglint",
"path": "darglint/function_description.py",
"copies": "1",
"size": "6095",
"license": "mit",
"hash": 7630981752976406000,
"line_mean": 28.7317073171,
"line_max": 120,
"alpha_frac": 0.6364232978,
"autogenerated": false,
"ratio": 3.99672131147541,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.513314460927541,
"avg_score": null,
"num_lines": null
} |
"""A linting utility for targets.json
This linting utility may be called as follows:
python <path-to>/lint.py targets TARGET [TARGET ...]
all targets will be linted
"""
# mbed SDK
# Copyright (c) 2017 ARM Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from os.path import join, abspath, dirname
if __name__ == "__main__":
import sys
ROOT = abspath(join(dirname(__file__), "..", ".."))
sys.path.insert(0, ROOT)
from copy import copy
from yaml import dump_all
import argparse
from past.builtins import basestring
from tools.targets import Target, set_targets_json_location, TARGET_MAP
def must_have_keys(keys, dict):
"""Require keys in an MCU/Board
is a generator for errors
"""
for key in keys:
if key not in dict:
yield "%s not found, and is required" % key
def may_have_keys(keys, dict):
"""Disable all other keys in an MCU/Board
is a generator for errors
"""
for key in dict.keys():
if key not in keys:
yield "%s found, and is not allowed" % key
def check_extra_labels(dict):
"""Check that extra_labels does not contain any Target names
is a generator for errors
"""
for label in (dict.get("extra_labels", []) +
dict.get("extra_labels_add", [])):
if label in Target.get_json_target_data():
yield "%s is not allowed in extra_labels" % label
def check_release_version(dict):
"""Verify that release version 5 is combined with support for all toolcahins
is a generator for errors
"""
if ("release_versions" in dict and
"5" in dict["release_versions"] and
"supported_toolchains" in dict):
for toolc in ["GCC_ARM", "ARM", "IAR"]:
if toolc not in dict["supported_toolchains"]:
yield ("%s not found in supported_toolchains, and is "
"required by mbed OS 5" % toolc)
def check_inherits(dict):
if ("inherits" in dict and len(dict["inherits"]) > 1):
yield "multiple inheritance is forbidden"
DEVICE_HAS_ALLOWED = ["ANALOGIN", "ANALOGOUT", "CAN", "ETHERNET", "EMAC",
"FLASH", "I2C", "I2CSLAVE", "I2C_ASYNCH", "INTERRUPTIN",
"LPTICKER", "PORTIN", "PORTINOUT", "PORTOUT",
"PWMOUT", "RTC", "TRNG","SERIAL", "SERIAL_ASYNCH",
"SERIAL_FC", "SLEEP", "SPI", "SPI_ASYNCH", "SPISLAVE",
"STORAGE", "SYSTICK_CLK_OFF_DURING_SLEEP"]
def check_device_has(dict):
for name in dict.get("device_has", []):
if name not in DEVICE_HAS_ALLOWED:
yield "%s is not allowed in device_has" % name
MCU_REQUIRED_KEYS = ["release_versions", "supported_toolchains",
"c_lib", "public", "inherits", "device_has"]
MCU_ALLOWED_KEYS = ["device_has_add", "device_has_remove", "core",
"extra_labels", "features", "features_add",
"features_remove", "bootloader_supported", "device_name",
"post_binary_hook", "default_toolchain", "config",
"extra_labels_add", "extra_labels_remove",
"target_overrides"] + MCU_REQUIRED_KEYS
def check_mcu(mcu_json, strict=False):
"""Generate a list of problems with an MCU
:param: mcu_json the MCU's dict to check
:param: strict enforce required keys
"""
errors = list(may_have_keys(MCU_ALLOWED_KEYS, mcu_json))
if strict:
errors.extend(must_have_keys(MCU_REQUIRED_KEYS, mcu_json))
errors.extend(check_extra_labels(mcu_json))
errors.extend(check_release_version(mcu_json))
errors.extend(check_inherits(mcu_json))
errors.extend(check_device_has(mcu_json))
if 'public' in mcu_json and mcu_json['public']:
errors.append("public must be false")
return errors
BOARD_REQUIRED_KEYS = ["inherits"]
BOARD_ALLOWED_KEYS = ["supported_form_factors", "is_disk_virtual",
"detect_code", "extra_labels", "extra_labels_add",
"extra_labels_remove", "public", "config",
"forced_reset_timeout", "target_overrides"] + BOARD_REQUIRED_KEYS
def check_board(board_json, strict=False):
"""Generate a list of problems with an board
:param: board_json the mcus dict to check
:param: strict enforce required keys
"""
errors = list(may_have_keys(BOARD_ALLOWED_KEYS, board_json))
if strict:
errors.extend(must_have_keys(BOARD_REQUIRED_KEYS, board_json))
errors.extend(check_extra_labels(board_json))
errors.extend(check_inherits(board_json))
return errors
def add_if(dict, key, val):
"""Add a value to a dict if it's non-empty"""
if val:
dict[key] = val
def _split_boards(resolution_order, tgt):
"""Split the resolution order between boards and mcus"""
mcus = []
boards = []
iterable = iter(resolution_order)
for name in iterable:
mcu_json = tgt.json_data[name]
if (len(list(check_mcu(mcu_json, True))) >
len(list(check_board(mcu_json, True)))):
boards.append(name)
else:
mcus.append(name)
break
mcus.extend(iterable)
mcus.reverse()
boards.reverse()
return mcus, boards
MCU_FORMAT_STRING = {1: "MCU (%s) ->",
2: "Family (%s) -> MCU (%s) ->",
3: "Family (%s) -> SubFamily (%s) -> MCU (%s) ->"}
BOARD_FORMAT_STRING = {1: "Board (%s)",
2: "Module (%s) -> Board (%s)"}
def _generate_hierarchy_string(mcus, boards):
global_errors = []
if len(mcus) < 1:
global_errors.append("No MCUS found in hierarchy")
mcus_string = "??? ->"
elif len(mcus) > 3:
global_errors.append("No name for targets %s" % ", ".join(mcus[3:]))
mcus_string = MCU_FORMAT_STRING[3] % tuple(mcus[:3])
for name in mcus[3:]:
mcus_string += " ??? (%s) ->" % name
else:
mcus_string = MCU_FORMAT_STRING[len(mcus)] % tuple(mcus)
if len(boards) < 1:
global_errors.append("no boards found in hierarchy")
boards_string = "???"
elif len(boards) > 2:
global_errors.append("no name for targets %s" % ", ".join(boards[2:]))
boards_string = BOARD_FORMAT_STRING[2] % tuple(boards[:2])
for name in boards[2:]:
boards_string += " -> ??? (%s)" % name
else:
boards_string = BOARD_FORMAT_STRING[len(boards)] % tuple(boards)
return mcus_string + " " + boards_string, global_errors
def check_hierarchy(tgt):
"""Atempts to assign labels to the hierarchy"""
resolution_order = copy(tgt.resolution_order_names[:-1])
mcus, boards = _split_boards(resolution_order, tgt)
target_errors = {}
hierachy_string, hierachy_errors = _generate_hierarchy_string(mcus, boards)
to_ret = {"hierarchy": hierachy_string}
add_if(to_ret, "hierarchy errors", hierachy_errors)
for name in mcus[:-1]:
add_if(target_errors, name, list(check_mcu(tgt.json_data[name])))
if len(mcus) >= 1:
add_if(target_errors, mcus[-1],
list(check_mcu(tgt.json_data[mcus[-1]], True)))
for name in boards:
add_if(target_errors, name, list(check_board(tgt.json_data[name])))
if len(boards) >= 1:
add_if(target_errors, boards[-1],
list(check_board(tgt.json_data[boards[-1]], True)))
add_if(to_ret, "target errors", target_errors)
return to_ret
PARSER = argparse.ArgumentParser(prog="targets/lint.py")
SUBPARSERS = PARSER.add_subparsers(title="Commands")
def subcommand(name, *args, **kwargs):
def __subcommand(command):
kwargs['description'] = command.__doc__
subparser = SUBPARSERS.add_parser(name, **kwargs)
for arg in args:
arg = dict(arg)
opt = arg['name']
del arg['name']
if isinstance(opt, basestring):
subparser.add_argument(opt, **arg)
else:
subparser.add_argument(*opt, **arg)
def _thunk(parsed_args):
argv = [arg['dest'] if 'dest' in arg else arg['name']
for arg in args]
argv = [(arg if isinstance(arg, basestring)
else arg[-1]).strip('-').replace('-', '_')
for arg in argv]
argv = {arg: vars(parsed_args)[arg] for arg in argv
if vars(parsed_args)[arg] is not None}
return command(**argv)
subparser.set_defaults(command=_thunk)
return command
return __subcommand
@subcommand("targets",
dict(name="mcus", nargs="+", metavar="MCU",
choices=TARGET_MAP.keys(), type=str.upper))
def targets_cmd(mcus=[]):
"""Find and print errors about specific targets"""
print(dump_all([check_hierarchy(TARGET_MAP[m]) for m in mcus],
default_flow_style=False))
@subcommand("all-targets")
def all_targets_cmd():
"""Print all errors about all parts"""
print(dump_all([check_hierarchy(m) for m in list(TARGET_MAP.values())],
default_flow_style=False))
@subcommand("orphans")
def orphans_cmd():
"""Find and print all orphan targets"""
orphans = Target.get_json_target_data().keys()
for tgt in TARGET_MAP.values():
for name in tgt.resolution_order_names:
if name in orphans:
orphans.remove(name)
if orphans:
print(dump_all([orphans], default_flow_style=False))
return len(orphans)
def main():
"""entry point"""
options = PARSER.parse_args()
return options.command(options)
if __name__ == "__main__":
sys.exit(main())
| {
"repo_name": "adfernandes/mbed",
"path": "tools/targets/lint.py",
"copies": "14",
"size": "10159",
"license": "apache-2.0",
"hash": 2535787793179750000,
"line_mean": 35.6750902527,
"line_max": 87,
"alpha_frac": 0.5983856679,
"autogenerated": false,
"ratio": 3.5471368715083798,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": null,
"num_lines": null
} |
# A Linux-only demo
#
# For comparison purposes, this is a ctypes version of readdir.py.
import sys
import ctypes
if not sys.platform.startswith('linux'):
raise Exception("Linux-only demo")
DIR_p = ctypes.c_void_p
ino_t = ctypes.c_long
off_t = ctypes.c_long
class DIRENT(ctypes.Structure):
_fields_ = [
('d_ino', ino_t), # inode number
('d_off', off_t), # offset to the next dirent
('d_reclen', ctypes.c_ushort), # length of this record
('d_type', ctypes.c_ubyte), # type of file; not supported
# by all file system types
('d_name', ctypes.c_char * 256), # filename
]
DIRENT_p = ctypes.POINTER(DIRENT)
DIRENT_pp = ctypes.POINTER(DIRENT_p)
C = ctypes.CDLL(None)
readdir_r = C.readdir_r
readdir_r.argtypes = [DIR_p, DIRENT_p, DIRENT_pp]
readdir_r.restype = ctypes.c_int
openat = C.openat
openat.argtypes = [ctypes.c_int, ctypes.c_char_p, ctypes.c_int]
openat.restype = ctypes.c_int
fdopendir = C.fdopendir
fdopendir.argtypes = [ctypes.c_int]
fdopendir.restype = DIR_p
closedir = C.closedir
closedir.argtypes = [DIR_p]
closedir.restype = ctypes.c_int
def walk(basefd, path):
print '{', path
dirfd = openat(basefd, path, 0)
if dirfd < 0:
# error in openat()
return
dir = fdopendir(dirfd)
dirent = DIRENT()
result = DIRENT_p()
while True:
if readdir_r(dir, dirent, result):
# error in readdir_r()
break
if not result:
break
name = dirent.d_name
print '%3d %s' % (dirent.d_type, name)
if dirent.d_type == 4 and name != '.' and name != '..':
walk(dirfd, name)
closedir(dir)
print '}'
walk(-1, "/tmp")
| {
"repo_name": "Peddle/hue",
"path": "desktop/core/ext-py/cffi-1.5.2/demo/readdir_ctypes.py",
"copies": "13",
"size": "1789",
"license": "apache-2.0",
"hash": 8812708136425164000,
"line_mean": 24.9275362319,
"line_max": 71,
"alpha_frac": 0.5818893236,
"autogenerated": false,
"ratio": 3.10051993067591,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.000879156010230179,
"num_lines": 69
} |
# A Linux-only demo
#
# For comparison purposes, this is a ctypes version of readdir.py.
import sys
import ctypes
if not sys.platform.startswith('linux'):
raise Exception("Linux-only demo")
DIR_p = ctypes.c_void_p
ino_t = ctypes.c_long
off_t = ctypes.c_long
class DIRENT(ctypes.Structure):
_fields_ = [
('d_ino', ino_t), # inode number
('d_off', off_t), # offset to the next dirent
('d_reclen', ctypes.c_ushort), # length of this record
('d_type', ctypes.c_ubyte), # type of file; not supported
# by all file system types
('d_name', ctypes.c_char * 256), # filename
]
DIRENT_p = ctypes.POINTER(DIRENT)
DIRENT_pp = ctypes.POINTER(DIRENT_p)
C = ctypes.CDLL(None)
readdir_r = C.readdir_r
readdir_r.argtypes = [DIR_p, DIRENT_p, DIRENT_pp]
readdir_r.restype = ctypes.c_int
openat = C.openat
openat.argtypes = [ctypes.c_int, ctypes.c_char_p, ctypes.c_int]
openat.restype = ctypes.c_int
fdopendir = C.fdopendir
fdopendir.argtypes = [ctypes.c_int]
fdopendir.restype = DIR_p
closedir = C.closedir
closedir.argtypes = [DIR_p]
closedir.restype = ctypes.c_int
def walk(basefd, path):
print '{', path
dirfd = openat(basefd, path, 0)
if dirfd < 0:
# error in openat()
return
dir = fdopendir(dirfd)
dirent = DIRENT()
result = DIRENT_p()
while True:
if readdir_r(dir, dirent, result):
# error in readdir_r()
break
if not result:
break
name = dirent.d_name
print '%3d %s' % (dirent.d_type, name)
if dirent.d_type == 4 and name != '.' and name != '..':
walk(dirfd, name)
closedir(dir)
print '}'
walk(-1, "/tmp")
| {
"repo_name": "hipnusleo/laserjet",
"path": "resource/pypi/cffi-1.9.1/demo/readdir_ctypes.py",
"copies": "1",
"size": "1858",
"license": "apache-2.0",
"hash": -2683616232218035000,
"line_mean": 24.9275362319,
"line_max": 71,
"alpha_frac": 0.5602798708,
"autogenerated": false,
"ratio": 3.127946127946128,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9183959733610297,
"avg_score": 0.0008532530271660706,
"num_lines": 69
} |
"""aliquot parent ref fix
Revision ID: e8b090aa164e
Revises: b06bb829f85d
Create Date: 2020-05-07 12:36:05.140486
"""
from alembic import op
import sqlalchemy as sa
import model.utils
from sqlalchemy.dialects import mysql
from rdr_service.participant_enums import PhysicalMeasurementsStatus, QuestionnaireStatus, OrderStatus
from rdr_service.participant_enums import WithdrawalStatus, WithdrawalReason, SuspensionStatus, QuestionnaireDefinitionStatus
from rdr_service.participant_enums import EnrollmentStatus, Race, SampleStatus, OrganizationType, BiobankOrderStatus
from rdr_service.participant_enums import OrderShipmentTrackingStatus, OrderShipmentStatus
from rdr_service.participant_enums import MetricSetType, MetricsKey, GenderIdentity
from rdr_service.model.base import add_table_history_table, drop_table_history_table
from rdr_service.model.code import CodeType
from rdr_service.model.site_enums import SiteStatus, EnrollingStatus, DigitalSchedulingStatus, ObsoleteStatus
# revision identifiers, used by Alembic.
revision = 'e8b090aa164e'
down_revision = 'b06bb829f85d'
branch_labels = None
depends_on = None
def upgrade(engine_name):
globals()["upgrade_%s" % engine_name]()
def downgrade(engine_name):
globals()["downgrade_%s" % engine_name]()
def upgrade_rdr():
# ### commands auto generated by Alembic - please adjust! ###
op.alter_column('biobank_aliquot', 'parent_aliquot_id',
type_=mysql.INTEGER)
op.create_foreign_key(None, 'biobank_aliquot', 'biobank_aliquot', ['parent_aliquot_id'], ['id'])
# ### end Alembic commands ###
def downgrade_rdr():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_constraint(None, 'biobank_aliquot', type_='foreignkey')
op.alter_column('biobank_aliquot', 'parent_aliquot_id',
type_=mysql.VARCHAR(length=80))
# ### end Alembic commands ###
def upgrade_metrics():
# ### commands auto generated by Alembic - please adjust! ###
pass
# ### end Alembic commands ###
def downgrade_metrics():
# ### commands auto generated by Alembic - please adjust! ###
pass
# ### end Alembic commands ###
| {
"repo_name": "all-of-us/raw-data-repository",
"path": "rdr_service/alembic/versions/e8b090aa164e_aliquot_parent_ref_fix.py",
"copies": "1",
"size": "2173",
"license": "bsd-3-clause",
"hash": 6847696966140727000,
"line_mean": 32.953125,
"line_max": 125,
"alpha_frac": 0.7330878969,
"autogenerated": false,
"ratio": 3.6096345514950166,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.975424790064025,
"avg_score": 0.01769490955095324,
"num_lines": 64
} |
"""aliquot_specimen_rlims_fk
Revision ID: 69355dfe4d91
Revises: 9a0873b51fe0
Create Date: 2020-05-06 13:43:38.477762
"""
from alembic import op
import sqlalchemy as sa
import model.utils
from sqlalchemy.dialects import mysql
from rdr_service.participant_enums import PhysicalMeasurementsStatus, QuestionnaireStatus, OrderStatus
from rdr_service.participant_enums import WithdrawalStatus, WithdrawalReason, SuspensionStatus, QuestionnaireDefinitionStatus
from rdr_service.participant_enums import EnrollmentStatus, Race, SampleStatus, OrganizationType, BiobankOrderStatus
from rdr_service.participant_enums import OrderShipmentTrackingStatus, OrderShipmentStatus
from rdr_service.participant_enums import MetricSetType, MetricsKey, GenderIdentity
from rdr_service.model.base import add_table_history_table, drop_table_history_table
from rdr_service.model.code import CodeType
from rdr_service.model.site_enums import SiteStatus, EnrollingStatus, DigitalSchedulingStatus, ObsoleteStatus
# revision identifiers, used by Alembic.
revision = '69355dfe4d91'
down_revision = '9a0873b51fe0'
branch_labels = None
depends_on = None
def upgrade(engine_name):
globals()["upgrade_%s" % engine_name]()
def downgrade(engine_name):
globals()["downgrade_%s" % engine_name]()
def upgrade_rdr():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_constraint('biobank_aliquot_ibfk_2', 'biobank_aliquot', type_='foreignkey')
op.create_foreign_key(None, 'biobank_aliquot', 'biobank_specimen', ['specimen_rlims_id'], ['rlims_id'])
# ### end Alembic commands ###
def downgrade_rdr():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_constraint(None, 'biobank_aliquot', type_='foreignkey')
op.create_foreign_key('biobank_aliquot_ibfk_2', 'biobank_aliquot', 'biobank_specimen', ['specimen_rlims_id'], ['order_id'])
# ### end Alembic commands ###
def upgrade_metrics():
# ### commands auto generated by Alembic - please adjust! ###
pass
# ### end Alembic commands ###
def downgrade_metrics():
# ### commands auto generated by Alembic - please adjust! ###
pass
# ### end Alembic commands ###
| {
"repo_name": "all-of-us/raw-data-repository",
"path": "rdr_service/alembic/versions/69355dfe4d91_aliquot_specimen_rlims_fk.py",
"copies": "1",
"size": "2186",
"license": "bsd-3-clause",
"hash": 5535463848198537000,
"line_mean": 34.2580645161,
"line_max": 127,
"alpha_frac": 0.7451967063,
"autogenerated": false,
"ratio": 3.46984126984127,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.471503797614127,
"avg_score": null,
"num_lines": null
} |
"""ALI-related bricks."""
from theano import tensor
from blocks.bricks.base import Brick, application, lazy
from blocks.bricks.conv import ConvolutionalSequence
from blocks.bricks.interfaces import Initializable, Random
from blocks.select import Selector
class ALI(Initializable, Random):
"""Adversarial learned inference brick.
Parameters
----------
encoder : :class:`blocks.bricks.Brick`
Encoder network.
decoder : :class:`blocks.bricks.Brick`
Decoder network.
discriminator : :class:`blocks.bricks.Brick`
Discriminator network taking :math:`x` and :math:`z` as input.
"""
def __init__(self, encoder, decoder, discriminator, **kwargs):
self.encoder = encoder
self.decoder = decoder
self.discriminator = discriminator
super(ALI, self).__init__(**kwargs)
self.children.extend([self.encoder, self.decoder, self.discriminator])
@property
def discriminator_parameters(self):
return list(
Selector([self.discriminator]).get_parameters().values())
@property
def generator_parameters(self):
return list(
Selector([self.encoder, self.decoder]).get_parameters().values())
@application(inputs=['x', 'z_hat', 'x_tilde', 'z'],
outputs=['data_preds', 'sample_preds'])
def get_predictions(self, x, z_hat, x_tilde, z, application_call):
# NOTE: the unbroadcasts act as a workaround for a weird broadcasting
# bug when applying dropout
input_x = tensor.unbroadcast(
tensor.concatenate([x, x_tilde], axis=0), *range(x.ndim))
input_z = tensor.unbroadcast(
tensor.concatenate([z_hat, z], axis=0), *range(x.ndim))
data_sample_preds = self.discriminator.apply(input_x, input_z)
data_preds = data_sample_preds[:x.shape[0]]
sample_preds = data_sample_preds[x.shape[0]:]
application_call.add_auxiliary_variable(
tensor.nnet.sigmoid(data_preds).mean(), name='data_accuracy')
application_call.add_auxiliary_variable(
(1 - tensor.nnet.sigmoid(sample_preds)).mean(),
name='sample_accuracy')
return data_preds, sample_preds
@application(inputs=['x', 'z'],
outputs=['discriminator_loss', 'generator_loss'])
def compute_losses(self, x, z, application_call):
z_hat = self.encoder.apply(x)
x_tilde = self.decoder.apply(z)
data_preds, sample_preds = self.get_predictions(x, z_hat, x_tilde, z)
discriminator_loss = (tensor.nnet.softplus(-data_preds) +
tensor.nnet.softplus(sample_preds)).mean()
generator_loss = (tensor.nnet.softplus(data_preds) +
tensor.nnet.softplus(-sample_preds)).mean()
return discriminator_loss, generator_loss
@application(inputs=['z'], outputs=['samples'])
def sample(self, z):
return self.decoder.apply(z)
@application(inputs=['x'], outputs=['reconstructions'])
def reconstruct(self, x):
return self.decoder.apply(self.encoder.apply(x))
class COVConditional(Initializable, Random):
"""Change-of-variables conditional.
Parameters
----------
mapping : :class:`blocks.bricks.Brick`
Network mapping the concatenation of the input and a source of
noise to the output.
noise_shape : tuple of int
Shape of the input noise.
"""
def __init__(self, mapping, noise_shape, **kwargs):
self.mapping = mapping
self.noise_shape = noise_shape
super(COVConditional, self).__init__(**kwargs)
self.children.extend([self.mapping])
def get_dim(self, name):
if isinstance(self.mapping, ConvolutionalSequence):
dim = self.mapping.get_dim(name)
if name == 'input_':
return (dim[0] - self.noise_shape[0],) + dim[1:]
else:
return dim
else:
if name == 'output':
return self.mapping.output_dim
elif name == 'input_':
return self.mapping.input_dim - self.noise_shape[0]
else:
return self.mapping.get_dim(name)
@application(inputs=['input_'], outputs=['output'])
def apply(self, input_, application_call):
epsilon = self.theano_rng.normal(
size=(input_.shape[0],) + self.noise_shape)
output = self.mapping.apply(
tensor.concatenate([input_, epsilon], axis=1))
application_call.add_auxiliary_variable(output.mean(), name='avg')
application_call.add_auxiliary_variable(output.std(), name='std')
application_call.add_auxiliary_variable(output.min(), name='min')
application_call.add_auxiliary_variable(output.max(), name='max')
return output
class GaussianConditional(Initializable, Random):
"""Gaussian conditional.
Parameters
----------
mapping : :class:`blocks.bricks.Brick`
Network predicting distribution parameters. It is expected to
output a concatenation of :math:`\mu` and :math:`\log\sigma`.
"""
def __init__(self, mapping, **kwargs):
self.mapping = mapping
super(GaussianConditional, self).__init__(**kwargs)
self.children.extend([self.mapping])
@property
def _nlat(self):
if isinstance(self.mapping, ConvolutionalSequence):
return self.get_dim('output')[0]
else:
return self.get_dim('output')
def get_dim(self, name):
if isinstance(self.mapping, ConvolutionalSequence):
dim = self.mapping.get_dim(name)
if name == 'output':
return (dim[0] // 2,) + dim[1:]
else:
return dim
else:
if name == 'output':
return self.mapping.output_dim // 2
elif name == 'input_':
return self.mapping.input_dim
else:
return self.mapping.get_dim(name)
@application(inputs=['input_'], outputs=['output'])
def apply(self, input_, application_call):
params = self.mapping.apply(input_)
mu, log_sigma = params[:, :self._nlat], params[:, self._nlat:]
sigma = tensor.exp(log_sigma)
epsilon = self.theano_rng.normal(size=mu.shape)
output = mu + sigma * epsilon
application_call.add_auxiliary_variable(mu.mean(), name='mu_avg')
application_call.add_auxiliary_variable(mu.std(), name='mu_std')
application_call.add_auxiliary_variable(mu.min(), name='mu_min')
application_call.add_auxiliary_variable(mu.max(), name='mu_max')
application_call.add_auxiliary_variable(sigma.mean(), name='sigma_avg')
application_call.add_auxiliary_variable(sigma.std(), name='sigma_std')
application_call.add_auxiliary_variable(sigma.min(), name='sigma_min')
application_call.add_auxiliary_variable(sigma.max(), name='sigma_max')
return output
class DeterministicConditional(Initializable, Random):
"""Deterministic conditional.
Parameters
----------
mapping : :class:`blocks.bricks.Brick`
Network producing the output of the conditional.
"""
def __init__(self, mapping, **kwargs):
self.mapping = mapping
super(DeterministicConditional, self).__init__(**kwargs)
self.children.extend([self.mapping])
def get_dim(self, name):
return self.mapping.get_dim(name)
@application(inputs=['input_'], outputs=['output'])
def apply(self, input_, application_call):
output = self.mapping.apply(input_)
application_call.add_auxiliary_variable(output.mean(), name='avg')
application_call.add_auxiliary_variable(output.std(), name='std')
application_call.add_auxiliary_variable(output.min(), name='min')
application_call.add_auxiliary_variable(output.max(), name='max')
return output
class XZJointDiscriminator(Initializable):
"""Three-way discriminator.
Parameters
----------
x_discriminator : :class:`blocks.bricks.Brick`
Part of the discriminator taking :math:`x` as input. Its
output will be concatenated with ``z_discriminator``'s output
and fed to ``joint_discriminator``.
z_discriminator : :class:`blocks.bricks.Brick`
Part of the discriminator taking :math:`z` as input. Its
output will be concatenated with ``x_discriminator``'s output
and fed to ``joint_discriminator``.
joint_discriminator : :class:`blocks.bricks.Brick`
Part of the discriminator taking the concatenation of
``x_discriminator``'s and output ``z_discriminator``'s output
as input and computing :math:`D(x, z)`.
"""
def __init__(self, x_discriminator, z_discriminator, joint_discriminator,
**kwargs):
self.x_discriminator = x_discriminator
self.z_discriminator = z_discriminator
self.joint_discriminator = joint_discriminator
super(XZJointDiscriminator, self).__init__(**kwargs)
self.children.extend([self.x_discriminator, self.z_discriminator,
self.joint_discriminator])
@application(inputs=['x', 'z'], outputs=['output'])
def apply(self, x, z):
# NOTE: the unbroadcasts act as a workaround for a weird broadcasting
# bug when applying dropout
input_ = tensor.unbroadcast(
tensor.concatenate(
[self.x_discriminator.apply(x), self.z_discriminator.apply(z)],
axis=1),
*range(x.ndim))
return self.joint_discriminator.apply(input_)
class GAN(Initializable, Random):
"""Generative adversarial networks.
Parameters
----------
decoder : :class:`blocks.bricks.Brick`
Decoder network.
discriminator : :class:`blocks.bricks.Brick`
Discriminator network.
"""
def __init__(self, decoder, discriminator, **kwargs):
self.decoder = decoder
self.discriminator = discriminator
super(GAN, self).__init__(**kwargs)
self.children.extend([self.decoder, self.discriminator])
@property
def discriminator_parameters(self):
return list(
Selector([self.discriminator]).get_parameters().values())
@property
def generator_parameters(self):
return list(
Selector([self.decoder]).get_parameters().values())
@application(inputs=['z'], outputs=['x_tilde'])
def sample_x_tilde(self, z, application_call):
x_tilde = self.decoder.apply(z)
application_call.add_auxiliary_variable(x_tilde.mean(), name='avg')
application_call.add_auxiliary_variable(x_tilde.std(), name='std')
return x_tilde
@application(inputs=['x', 'x_tilde'],
outputs=['data_preds', 'sample_preds'])
def get_predictions(self, x, x_tilde, application_call):
# NOTE: the unbroadcasts act as a workaround for a weird broadcasting
# bug when applying dropout
data_sample_preds = self.discriminator.apply(
tensor.unbroadcast(tensor.concatenate([x, x_tilde], axis=0),
*range(x.ndim)))
data_preds = data_sample_preds[:x.shape[0]]
sample_preds = data_sample_preds[x.shape[0]:]
application_call.add_auxiliary_variable(
tensor.nnet.sigmoid(data_preds).mean(), name='data_accuracy')
application_call.add_auxiliary_variable(
(1 - tensor.nnet.sigmoid(sample_preds)).mean(),
name='sample_accuracy')
return data_preds, sample_preds
@application(inputs=['x'],
outputs=['discriminator_loss', 'generator_loss'])
def compute_losses(self, x, z, application_call):
x_tilde = self.sample_x_tilde(z)
data_preds, sample_preds = self.get_predictions(x, x_tilde)
discriminator_loss = (tensor.nnet.softplus(-data_preds) +
tensor.nnet.softplus(sample_preds)).mean()
generator_loss = (tensor.nnet.softplus(data_preds) +
tensor.nnet.softplus(-sample_preds)).mean()
return discriminator_loss, generator_loss
@application(inputs=['z'], outputs=['samples'])
def sample(self, z):
return self.sample_x_tilde(z)
class ConvMaxout(Brick):
"""Convolutional version of the Maxout activation.
Parameters
----------
num_pieces : int
Number of linear pieces.
num_channels : int
Number of input channels.
image_size : (int, int), optional
Input shape. Defaults to ``(None, None)``.
"""
@lazy(allocation=['num_pieces', 'num_channels'])
def __init__(self, num_pieces, num_channels, image_size=(None, None),
**kwargs):
super(ConvMaxout, self).__init__(**kwargs)
self.num_pieces = num_pieces
self.num_channels = num_channels
def get_dim(self, name):
if name == 'input_':
return (self.num_channels,) + self.image_size
if name == 'output':
return (self.num_filters,) + self.image_size
return super(ConvMaxout, self).get_dim(name)
@property
def num_filters(self):
return self.num_channels // self.num_pieces
@property
def num_output_channels(self):
return self.num_filters
@application(inputs=['input_'], outputs=['output'])
def apply(self, input_):
input_ = input_.dimshuffle(0, 2, 3, 1)
new_shape = ([input_.shape[i] for i in range(input_.ndim - 1)] +
[self.num_filters, self.num_pieces])
output = tensor.max(input_.reshape(new_shape, ndim=input_.ndim + 1),
axis=input_.ndim)
return output.dimshuffle(0, 3, 1, 2)
| {
"repo_name": "IshmaelBelghazi/ALI",
"path": "ali/bricks.py",
"copies": "1",
"size": "13838",
"license": "mit",
"hash": -756317318554520300,
"line_mean": 35.4157894737,
"line_max": 79,
"alpha_frac": 0.6105651106,
"autogenerated": false,
"ratio": 3.9616375608359577,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.00011278195488721805,
"num_lines": 380
} |
""" ALI related graphs """
from functools import partial
import numpy as np
import matplotlib.pyplot as plt
import scipy
from ali.datasets import GaussianMixtureDistribution
from ali.utils import as_array
def make_2D_latent_view(valid_data,
samples_data,
gradients_funs=None,
densities_funs=None,
epoch=None,
save_path=None):
"""
2D views of the latent and visible spaces
Parameters
----------
valid_data: dictionary of numpy arrays
Holds five keys: originals, labels, mu, sigma, encoding, reconstructions
samples_data: dictionary of numpy arrays
Holds two keys prior and samples
gradients_funs: dict of functions
Holds two keys: latent, for the gradients on the latent space w.r.p to Z and
visible, for the gradients ob the visible space
densities_fun: dictionary of functions
Holds two keys: latent, for the probability density of the latent space, and
visible, for the probability density on the latent space
"""
# Creating figure
fig = plt.figure()
# Getting Cmap
cmap = plt.cm.get_cmap('Spectral', 5)
# Adding visible subplot
recons_visible_ax = fig.add_subplot(221, aspect='equal')
# Train data
recons_visible_ax.scatter(valid_data['originals'][:, 0],
valid_data['originals'][:, 1],
c=valid_data['labels'],
marker='s', label='originals',
alpha=0.3, cmap=cmap)
recons_visible_ax.scatter(valid_data['reconstructions'][:, 0],
valid_data['reconstructions'][:, 1],
c=valid_data['labels'],
marker='x', label='reconstructions',
alpha=0.3,
cmap=cmap)
recons_visible_ax.set_title('Visible space. Epoch {}'.format(str(epoch)))
samples_visible_ax = fig.add_subplot(222, aspect='equal',
sharex=recons_visible_ax,
sharey=recons_visible_ax)
samples_visible_ax.scatter(valid_data['originals'][:, 0],
valid_data['originals'][:, 1],
c=valid_data['labels'],
marker='s', label='originals',
alpha=0.3,
cmap=cmap)
samples_visible_ax.scatter(samples_data['samples'][:, 0],
samples_data['samples'][:, 1],
marker='o', alpha=0.3, label='samples')
samples_visible_ax.set_title('Visible space. Epoch {}'.format(str(epoch)))
# plt.legend(loc="upper left", bbox_to_anchor=[0, 1],
# shadow=True, title="Legend", fancybox=True)
# visible_ax.get_legend()
# Adding latent subplot
recons_latent_ax = fig.add_subplot(223, aspect='equal')
recons_latent_ax.scatter(valid_data['encodings'][:, 0],
valid_data['encodings'][:, 1],
c=valid_data['labels'],
marker='x', label='encodings',
alpha=0.3, cmap=cmap)
recons_latent_ax.set_title('Latent space. Epoch {}'.format(str(epoch)))
samples_latent_ax = fig.add_subplot(224, aspect='equal',
sharex=recons_latent_ax,
sharey=recons_latent_ax)
samples_latent_ax.scatter(samples_data['noise'][:, 0],
samples_data['noise'][:, 1],
marker='o', label='noise',
alpha=0.3)
samples_latent_ax.set_title('Latent space. Epoch {}'.format(str(epoch)))
# plt.legend(loc="upper left", bbox_to_anchor=[0, 1],
# shadow=True, title="Legend", fancybox=True)
# latent_ax.get_legend()
plt.tight_layout()
if save_path is None:
plt.show()
else:
plt.savefig(save_path, transparent=True, bbox_inches='tight')
if __name__ == '__main__':
means = map(lambda x: as_array(x), [[0, 0],
[1, 1],
[-1, -1],
[1, -1],
[-1, 1]])
std = 0.01
variances = [np.eye(2) * std for _ in means]
priors = [1.0/len(means) for _ in means]
gaussian_mixture = GaussianMixtureDistribution(means=means,
variances=variances,
priors=priors)
originals, labels = gaussian_mixture.sample(1000)
reconstructions = originals * np.random.normal(size=originals.shape,
scale=0.05)
encodings = np.random.normal(size=(1000, 2))
train_data = {'originals': originals, 'labels': labels,
'encodings': encodings,
'reconstructions': reconstructions}
valid_data = train_data
noise = np.random.normal(size=(1000, 2))
samples = np.random.normal(size=(1000, 2), scale=0.3)
samples_data = {'noise': noise,
'samples': samples}
#make_2D_latent_view(train_data, valid_data, samples_data)
make_assignement_plots(valid_data)
| {
"repo_name": "IshmaelBelghazi/ALI",
"path": "ali/graphing.py",
"copies": "1",
"size": "5528",
"license": "mit",
"hash": -6615574103447395000,
"line_mean": 36.3513513514,
"line_max": 84,
"alpha_frac": 0.5047033285,
"autogenerated": false,
"ratio": 4.236015325670498,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5240718654170498,
"avg_score": null,
"num_lines": null
} |
"""ALI-related training algorithms."""
from collections import OrderedDict
import theano
from blocks.algorithms import GradientDescent, CompositeRule, Restrict
def ali_algorithm(discriminator_loss, discriminator_parameters,
discriminator_step_rule, generator_loss,
generator_parameters, generator_step_rule):
"""Instantiates a training algorithm for ALI.
Parameters
----------
discriminator_loss : tensor variable
Discriminator loss.
discriminator_parameters : list
Discriminator parameters.
discriminator_step_rule : :class:`blocks.algorithms.StepRule`
Discriminator step rule.
generator_loss : tensor variable
Generator loss.
generator_parameters : list
Generator parameters.
generator_step_rule : :class:`blocks.algorithms.StepRule`
Generator step rule.
"""
gradients = OrderedDict()
gradients.update(
zip(discriminator_parameters,
theano.grad(discriminator_loss, discriminator_parameters)))
gradients.update(
zip(generator_parameters,
theano.grad(generator_loss, generator_parameters)))
step_rule = CompositeRule([Restrict(discriminator_step_rule,
discriminator_parameters),
Restrict(generator_step_rule,
generator_parameters)])
return GradientDescent(
cost=generator_loss + discriminator_loss,
gradients=gradients,
parameters=discriminator_parameters + generator_parameters,
step_rule=step_rule)
| {
"repo_name": "IshmaelBelghazi/ALI",
"path": "ali/algorithms.py",
"copies": "1",
"size": "1632",
"license": "mit",
"hash": -468573184335289700,
"line_mean": 36.9534883721,
"line_max": 71,
"alpha_frac": 0.6537990196,
"autogenerated": false,
"ratio": 4.945454545454545,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0,
"num_lines": 43
} |
#Alireza.karimi.67@gmail.com
import json
import sqlite3
class DatabaseHandler():
need_upgrade = False
conn = None
def __init__(self):
self.conn = sqlite3.connect('DataBase.db')
if self.need_upgrade:
self.upgrade_tables()
else:
self.create_tables()
def create_tables(self):
cursor=self.conn.cursor()
cursor.execute('''
CREATE TABLE IF NOT EXISTS Sample(id INTEGER PRIMARY KEY,
SampleHash TEXT,Lable TEXT)
''')
self.conn.commit()
def upgrade_tables(self):
cursor=self.conn.cursor()
cursor.execute('''DROP TABLE Sample''')
self.create_tables()
def clear_table_Dataset(self):
cursor = self.conn.cursor()
cursor.execute('''DROP TABLE Dataset''')
cursor.execute('''
CREATE TABLE IF NOT EXISTS Dataset(id INTEGER PRIMARY KEY, nodefrom INTEGER,
nodeto INTEGER, nodefromapi TEXT, nodetoapi TEXT,nodeweight INTEGER,sampleid TEXT)
''')
self.conn.commit()
def insert_a_sample(self,sample_hash,isMal):
self.conn.execute('''INSERT INTO Sample(SampleHash,Lable)
VALUES(?,?)''', (sample_hash,str(isMal)))
self.conn.commit()
def select_sample_all(self):
cursor=self.conn.cursor()
query=cursor.execute('SELECT * FROM Sample')
samples=[]
for row in query:
samples.append(row)
return samples
def select_sample(self,sample_hash):
cursor=self.conn.cursor()
query=cursor.execute('SELECT * FROM Sample WHERE SampleHash=?',[sample_hash])
return query.fetchone()
def update_sample_lable(self,sample_id,lable):
cursor = self.conn.cursor()
cursor.execute('''UPDATE Sample SET Lable=? WHERE SampleHash LIKE ?''', (lable, sample_id))
self.conn.commit()
return cursor.rowcount
def recreats_table_samples(self):
cursor=self.conn.cursor()
cursor.execute('''DROP TABLE Sample''')
cursor.execute('''
CREATE TABLE IF NOT EXISTS Sample(id INTEGER PRIMARY KEY,
SampleHash TEXT,Lable TEXT)
''')
| {
"repo_name": "alireza-87/RansomeAnalyzLDA",
"path": "DataBaseHandler/Handler.py",
"copies": "1",
"size": "2334",
"license": "mit",
"hash": 4194801864288765400,
"line_mean": 32.8260869565,
"line_max": 121,
"alpha_frac": 0.5676949443,
"autogenerated": false,
"ratio": 4.370786516853933,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5438481461153932,
"avg_score": null,
"num_lines": null
} |
# Alireza.Karimi.67@gmail.com
from __future__ import division
import urllib
import urllib2
from os import walk
import os
import time
from PIL import Image
from sklearn.discriminant_analysis import LinearDiscriminantAnalysis
from DataBaseHandler import DatabaseHandler
from androdd import dump_all_method
import json
from collections import Counter
import unicodedata
import numpy as np
from shutil import copyfile
import xlsxwriter
class Main():
def __init__(self):
self.db = DatabaseHandler()
def generat(self):
f = []
for (dirpath, dirnames, filenames) in walk(self.constant.getInputDir()):
f.extend(filenames)
return f
def get_all_files_in_directory(directory):
f = []
for (dirpath, dirnames, filenames) in walk(directory):
f.extend(filenames)
return f
def get_all_files_withpath_in_directory(directory):
f = []
for (dirpath, dirnames, filenames) in walk(directory):
if filenames:
for item in filenames:
fillee = dirpath + '/' + item
f.append(fillee)
return f
def clean_up_folder(folder):
for the_file in os.listdir(folder):
file_path = os.path.join(folder, the_file)
try:
if os.path.isfile(file_path):
os.unlink(file_path)
except Exception as e:
print(e)
def func_weight_p_op1_op2(sample_mal, sample_mal_1, vector):
cal_class = []
for iii in range(0, len(sample_mal)):
sample_vector = {}
dict_y = Counter(sample_mal_1[iii])
dict_x = Counter(sample_mal[iii])
for op_seq in vector:
print str(op_seq)
spliter = op_seq.strip().split()
x = 0
y = 0
if spliter[0] in dict_y:
y = dict_y[spliter[0]]
if op_seq in dict_x:
x = dict_x[op_seq]
if y != 0:
p = x / y
else:
p = 0
sample_vector[op_seq] = p
cal_class.append(sample_vector)
return cal_class
def func_weight_freq(sample_mal):
cal_class = []
for iii in range(0, len(sample_mal)):
dict_x = Counter(sample_mal[iii])
cal_class.append(dict_x)
return cal_class
def write_arff(dataset, class1, class2):
final_op_set = []
opcode_bank = {}
index_helper_x = 0
seen = set()
for item in class1:
for key, value in item.iteritems():
splitter = key.strip().split()
if splitter[0] not in seen:
final_op_set.append(splitter[0])
opcode_bank[splitter[0]] = index_helper_x
index_helper_x = index_helper_x + 1
seen.add(splitter[0])
if splitter[1] not in seen:
final_op_set.append(splitter[1])
opcode_bank[splitter[1]] = index_helper_x
index_helper_x = index_helper_x + 1
seen.add(splitter[1])
for item in class2:
for key, value in item.iteritems():
splitter = key.strip().split()
if splitter[0] not in seen:
final_op_set.append(splitter[0])
opcode_bank[splitter[0]] = index_helper_x
index_helper_x = index_helper_x + 1
seen.add(splitter[0])
if splitter[1] not in seen:
final_op_set.append(splitter[1])
opcode_bank[splitter[1]] = index_helper_x
index_helper_x = index_helper_x + 1
seen.add(splitter[1])
data_fp = open(dataset, "w")
data_fp.write('''@RELATION OpcodeSequence
''')
data_fp.write("\n")
for opc_i in final_op_set:
for opc_j in final_op_set:
name = str(opc_i) + str(opc_j)
data_fp.write("@ATTRIBUTE %s NUMERIC \n" % name)
data_fp.write("@ATTRIBUTE Class1 {mal,bin} \n")
data_fp.write("\n")
data_fp.write("@DATA")
data_fp.write("\n")
for item in class1:
image = np.array([[0.0 for j in range(len(final_op_set))] for i in range(len(final_op_set))])
for opc_i in final_op_set:
for opc_j in final_op_set:
x = opcode_bank[opc_i]
y = opcode_bank[opc_j]
key = str(str(opc_i) + " " + str(opc_j))
print key
if key in item:
image[x][y] = item[str(opc_i) + " " + str(opc_j)]
data_fp.write(str(item[str(opc_i) + " " + str(opc_j)]) + ",")
else:
data_fp.write("0" + ",")
data_fp.write("mal")
data_fp.write("\n")
for item in class2:
image = np.array([[0.0 for j in range(len(final_op_set))] for i in range(len(final_op_set))])
for opc_i in final_op_set:
for opc_j in final_op_set:
x = opcode_bank[opc_i]
y = opcode_bank[opc_j]
key = str(str(opc_i) + " " + str(opc_j))
print key
if key in item:
image[x][y] = item[str(opc_i) + " " + str(opc_j)]
data_fp.write(str(item[str(opc_i) + " " + str(opc_j)]) + ",")
else:
data_fp.write("0" + ",")
data_fp.write("bin")
data_fp.write("\n")
def write_arff_n_opcode(i, dataset, class1, class2):
final_op_set = []
seen = set()
for item in class1:
for key in item:
if key not in seen:
final_op_set.append(key)
seen.add(key)
for item in class2:
for key in item:
if key not in seen:
final_op_set.append(key)
seen.add(key)
data_fp = open(dataset, "w")
data_fp.write('''@RELATION OpcodeSequence
''')
data_fp.write("\n")
for x in range(0,len(final_op_set)):
name = str(x)
data_fp.write("@ATTRIBUTE %s NUMERIC \n" % name)
data_fp.write("@ATTRIBUTE Class1 {mal,bin} \n")
data_fp.write("\n")
data_fp.write("@DATA")
data_fp.write("\n")
for item in class1:
for key in final_op_set:
if key in item:
data_fp.write(str(item[key]) + ",")
else:
data_fp.write("0" + ",")
data_fp.write("mal")
data_fp.write("\n")
for item in class1:
for key in final_op_set:
if key in item:
data_fp.write(str(item[key]) + ",")
else:
data_fp.write("0" + ",")
data_fp.write("bin")
data_fp.write("\n")
def capture_image(repo,dump_method_dir):
db = DatabaseHandler()
samples = db.select_sample_all()
vector = []
sample = []
sample_name = []
sample_1 = []
seen = set()
for item in samples:
try:
# Generate Opcode Seq for every sample
dump_all_method(repo + item[1], dump_method_dir)
opcode_sequence = check_opcode(dump_method_dir,2)
opcode_list1 = check_opcode2(dump_method_dir)
# Add opcode seq to class belong
if item[1].endswith(".apk"):
sample.append(opcode_sequence)
sample_1.append(opcode_list1)
sample_name.append(item[1])
for item in opcode_sequence:
if item not in seen:
vector.append(item)
seen.add(item)
except Exception as e:
print e
sample_class = []
sample_class = func_weight_p_op1_op2(sample, sample_1, vector)
final_op_set = []
opcode_bank = {}
index_helper_x = 0
seen = set()
for item in sample_class:
for key, value in item.iteritems():
splitter = key.strip().split()
if splitter[0] not in seen:
final_op_set.append(splitter[0])
opcode_bank[splitter[0]] = index_helper_x
index_helper_x = index_helper_x + 1
seen.add(splitter[0])
if splitter[1] not in seen:
final_op_set.append(splitter[1])
opcode_bank[splitter[1]] = index_helper_x
index_helper_x = index_helper_x + 1
seen.add(splitter[1])
index_name = 0
for item in sample_class:
image = np.array([[0.0 for j in range(256)] for i in range(256)])
for opc_i in final_op_set:
for opc_j in final_op_set:
x = opcode_bank[opc_i]
y = opcode_bank[opc_j]
key = str(str(opc_i) + " " + str(opc_j))
if key in item:
image[x][y] = item[str(opc_i) + " " + str(opc_j)]
else:
image[x][y] = 0
rescaled = (255.0 / image.max() * (image - image.min())).astype(np.uint8)
im = Image.fromarray(rescaled)
im.show()
im.save(str(sample_name[index_name])+'.png', 'PNG')
index_name = index_name + 1
def opcode_sequence_generator4(repo, dumpMethodDir):
db = DatabaseHandler()
samples = db.select_sample_all()
vector = []
sample_mal = []
sample_bin = []
sample_mal_1 = []
sample_bin_1 = []
sample_bin_name = []
sample_mal_name = []
seen = set()
for item in samples:
try:
# Generate Opcode Seq for every sample
if item[1].endswith(".apk"):
dump_all_method(repo + item[1], dumpMethodDir)
opcode_sequence = check_opcode(dumpMethodDir,2)
opcode_list1 = check_opcode2(dumpMethodDir)
# Add opcode seq to class belong
if item[1].startswith('bin_') and item[1].endswith(".apk"):
sample_bin.append(opcode_sequence)
sample_bin_1.append(opcode_list1)
sample_bin_name.append(item[1])
elif item[1].endswith(".apk"):
sample_mal.append(opcode_sequence)
sample_mal_1.append(opcode_list1)
sample_mal_name.append(item[1])
# Generate a Sequence banck
for item in opcode_sequence:
if item not in seen:
vector.append(item)
seen.add(item)
except Exception as e:
print e
mal_class = []
bin_class = []
mal_class = func_weight_p_op1_op2(sample_mal, sample_mal_1, vector)
bin_class = func_weight_p_op1_op2(sample_bin, sample_bin_1, vector)
write_arff(repo + 'result.arff', mal_class, bin_class)
output_filename = repo + 'resultLDA.txt'
simple_result = repo + 'Expenses01.xlsx'
fp_lda = open(output_filename, "w")
workbook = xlsxwriter.Workbook(simple_result)
worksheet = workbook.add_worksheet()
n_fold = 5
top_edge = []
for i in range(2, 250):
top_edge.append(i)
row_index = 0
for top in top_edge:
total_tp = 0
total_tn = 0
total_fp = 0
total_fn = 0
total_acc = 0
total_tpr = 0
total_fpr = 0
total_final_set = 0
name = "************** TOP" + str(top) + " **************"
fp_lda.write(name)
fp_lda.write('\n')
test_count_mal = int(len(mal_class) / n_fold)
test_count_bin = int(len(bin_class) / n_fold)
p_bin = 0
p_mal = 0
for fold in range(1, n_fold + 1):
train_mal_class = []
train_bin_class = []
test_mal_class = []
test_bin_class = []
test_mal_name = []
test_bin_name = []
for i in range(0, len(bin_class)):
if i >= p_bin * test_count_bin and i < p_bin * test_count_bin + test_count_bin:
test_bin_class.append(bin_class[i])
test_bin_name.append(sample_bin_name[i])
else:
train_bin_class.append(bin_class[i])
p_bin = p_bin + 1
for i in range(0, len(mal_class)):
if i >= p_mal * test_count_mal and i < p_mal * test_count_mal + test_count_mal:
test_mal_class.append(mal_class[i])
test_mal_name.append(sample_mal_name[i])
else:
train_mal_class.append(mal_class[i])
p_mal = p_mal + 1
# calculate MIN mal class for every feature
MIN_total = {}
total_len = len(train_mal_class) + len(train_bin_class)
print "start Calculate Mean Malware Class"
MIN_mal = {}
for feature in vector:
sum_feature = 0
for item in train_mal_class:
if feature in item:
sum_feature = item[feature] + sum_feature
MIN_mal[feature] = sum_feature / len(train_mal_class)
MIN_total[feature] = sum_feature
print "start Calculate Mean Bin Class"
MIN_bin = {}
for feature in vector:
sum_feature = 0
for item in train_bin_class:
if feature in item:
sum_feature = item[feature] + sum_feature
MIN_bin[feature] = sum_feature / len(train_bin_class)
MIN_total[feature] = (MIN_total[feature] + sum_feature) / total_len
print "start Calculate SW"
# Calculate SW
SW = {}
for feature in vector:
sum_feature = 0
for item in train_mal_class:
if feature in item and feature in MIN_mal:
X = item[feature] - MIN_mal[feature]
elif feature in item:
X = item[feature]
elif feature in MIN_mal:
X = MIN_mal[feature]
else:
X = 0
Y = X * X
sum_feature = sum_feature + Y
for item in train_bin_class:
if feature in item and feature in MIN_bin:
X = item[feature] - MIN_bin[feature]
elif feature in item:
X = item[feature]
elif feature in MIN_bin:
X = MIN_bin[feature]
else:
X = 0
Y = X * X
sum_feature = sum_feature + Y
SW[feature] = sum_feature
# Calculate SB
print "start Calculate Mean SB"
malware_persentage = len(train_mal_class) * 100 / total_len
binware_persentage = len(train_mal_class) * 100 / total_len
SB = {}
for features in vector:
if feature in MIN_mal and feature in MIN_bin:
total_mean = MIN_total[features]
SB[features] = (malware_persentage * (MIN_mal[features] - total_mean) * (MIN_mal[features] - total_mean)) + (
binware_persentage * (MIN_bin[features] - total_mean) * (MIN_bin[features] - total_mean))
elif feature in MIN_bin:
total_mean = MIN_total[features]
SB[features] = (malware_persentage * (0 - total_mean) * (0 - total_mean)) + (
binware_persentage * (MIN_bin[features] - total_mean) * (MIN_bin[features] - total_mean))
elif feature in MIN_mal:
total_mean = MIN_total[features]
SB[features] = (malware_persentage * (MIN_mal[features] - total_mean) * (MIN_mal[features] - total_mean)) + (
binware_persentage * (0 - total_mean) * (0 - total_mean))
else:
total_mean = 0
SB[features] = (malware_persentage * (0 - total_mean) * (0 - total_mean)) + (binware_persentage * (0 - total_mean) * (0 - total_mean))
# Calculate ST
print "start Calculate ST"
ST = {}
for item in vector:
if SW[item] != 0:
ST[item] = (SB[item]) / SW[item]
else:
ST[item] = 0
select_top = sorted(ST.iteritems(), key=lambda x: -x[1], reverse=False)[: top]
final_op_set = []
opcode_bank = {}
index_helper_x = 0
seen = set()
for key, value in select_top:
splitter = key.strip().split()
if splitter[0] not in seen:
final_op_set.append(splitter[0])
opcode_bank[splitter[0]] = index_helper_x
index_helper_x = index_helper_x + 1
seen.add(splitter[0])
if splitter[1] not in seen:
final_op_set.append(splitter[1])
opcode_bank[splitter[1]] = index_helper_x
index_helper_x = index_helper_x + 1
seen.add(splitter[1])
len_train = len(train_bin_class) + len(train_mal_class)
test_set_mal = np.zeros((len(test_mal_class), len(final_op_set) * len(final_op_set)))
test_set_bin = np.zeros((len(test_bin_class), len(final_op_set) * len(final_op_set)))
train_set = np.zeros((len_train, len(final_op_set) * len(final_op_set)))
train_lable = []
index_train = 0
for item in train_mal_class:
image = np.array([[1.0 for j in range(len(final_op_set))] for i in range(len(final_op_set))])
for opc_i in final_op_set:
for opc_j in final_op_set:
x = opcode_bank[opc_i]
y = opcode_bank[opc_j]
key = str(str(opc_i) + " " + str(opc_j))
if key in item:
image[x][y] = item[str(opc_i) + " " + str(opc_j)]
else:
image[x][y] = 0
train_set[index_train] = image.flatten()
train_lable.append(1)
index_train = index_train + 1
for item in train_bin_class:
image = np.array([[1.0 for j in range(len(final_op_set))] for i in range(len(final_op_set))])
for opc_i in final_op_set:
for opc_j in final_op_set:
x = opcode_bank[opc_i]
y = opcode_bank[opc_j]
key = str(str(opc_i) + " " + str(opc_j))
if key in item:
image[x][y] = item[str(opc_i) + " " + str(opc_j)]
else:
image[x][y] = 0
train_set[index_train] = image.flatten()
train_lable.append(0)
index_train = index_train + 1
index_test = 0
for item in test_mal_class:
image = np.array([[1.0 for j in range(len(final_op_set))] for i in range(len(final_op_set))])
for opc_i in final_op_set:
for opc_j in final_op_set:
x = opcode_bank[opc_i]
y = opcode_bank[opc_j]
key = str(str(opc_i) + " " + str(opc_j))
if key in item:
image[x][y] = item[str(opc_i) + " " + str(opc_j)]
else:
image[x][y] = 0
test_set_mal[index_test] = image.flatten()
index_test = index_test + 1
index_test = 0
for item in test_bin_class:
image = np.array([[1.0 for j in range(len(final_op_set))] for i in range(len(final_op_set))])
for opc_i in final_op_set:
for opc_j in final_op_set:
x = opcode_bank[opc_i]
y = opcode_bank[opc_j]
key = str(str(opc_i) + " " + str(opc_j))
if key in item:
image[x][y] = item[str(opc_i) + " " + str(opc_j)]
else:
image[x][y] = 0
test_set_bin[index_test] = image.flatten()
index_test = index_test + 1
clf = LinearDiscriminantAnalysis()
clf.fit(train_set, train_lable)
tp = 0
tn = 0
fp = 0
fn = 0
fn_name = []
fp_name = []
index_name = 0
for item in test_set_mal:
result = clf.predict(item.reshape(1, -1))
if result == 1:
tp = tp + 1
else:
fn = fn + 1
fn_name.append(test_mal_name[index_name])
index_name = index_name + 1
index_name = 0
for item in test_set_bin:
result = clf.predict(item.reshape(1, -1))
if result == 0:
tn = tn + 1
else:
fp = fp + 1
fp_name.append(test_bin_name[index_name])
index_name = index_name + 1
acc = (tp + tn) / (tp + tn + fp + fn)
tpr = (tp) / (tp + fn)
fpr = (fp) / (fp + tn)
fp_lda.write('\n')
fp_lda.write('TP : ' + str(tp))
fp_lda.write('\n')
fp_lda.write('TN : ' + str(tn))
fp_lda.write('\n')
fp_lda.write('FP : ' + str(fp))
fp_lda.write('\n')
fp_lda.write('FN : ' + str(fn))
fp_lda.write('\n')
fp_lda.write('ACC : ' + str(acc))
fp_lda.write('\n')
fp_lda.write('LEN : ' + str(len(final_op_set)))
fp_lda.write('\n')
for item in fp_name:
fp_lda.write('fp_name : ' + str(item))
fp_lda.write('\n')
for item in fn_name:
fp_lda.write('fn_name : ' + str(item))
fp_lda.write('\n')
total_tp = total_tp + tp
total_tn = total_tn + tn
total_fp = total_fp + fp
total_fn = total_fn + fn
total_acc = total_acc + acc
total_tpr = total_tpr + tpr
total_fpr = total_fpr + fpr
total_final_set = len(final_op_set) + total_final_set
col_index = 0
worksheet.write(row_index, col_index, total_tp / fold)
col_index = col_index + 1
worksheet.write(row_index, col_index, total_fp / fold)
col_index = col_index + 1
worksheet.write(row_index, col_index, total_tn / fold)
col_index = col_index + 1
worksheet.write(row_index, col_index, total_fn / fold)
col_index = col_index + 1
worksheet.write(row_index, col_index, total_tpr / fold)
col_index = col_index + 1
worksheet.write(row_index, col_index, total_fpr / fold)
col_index = col_index + 1
worksheet.write(row_index, col_index, total_acc / fold)
col_index = col_index + 1
worksheet.write(row_index, col_index, top)
col_index = col_index + 1
worksheet.write(row_index, col_index, total_final_set / fold)
col_index = col_index + 1
row_index = row_index + 1
def opcode_sequence_generator5(repo, dumpMethodDir):
db = DatabaseHandler()
samples = db.select_sample_all()
for i in range(2,11):
vector = []
sample_mal = []
sample_bin = []
seen = set()
for item in samples:
try:
if item[1].endswith(".apk"):
dump_all_method(repo + item[1], dumpMethodDir)
opcode_sequence = check_opcode(dumpMethodDir,i)
# Add opcode seq to class belong
if item[1].startswith('bin_') and item[1].endswith(".apk"):
sample_bin.append(opcode_sequence)
elif item[1].endswith(".apk"):
sample_mal.append(opcode_sequence)
# Generate a Sequence banck
for item in opcode_sequence:
if item not in seen:
vector.append(item)
seen.add(item)
except Exception as e:
print e
write_arff_n_opcode(i,repo +str(i)+ '_result.arff', sample_mal, sample_bin)
def opcode_sequence_generator6(repo, dumpMethodDir):
db = DatabaseHandler()
samples = db.select_sample_all()
vector = []
sample_bin_banck = []
sample_mal_banck = []
seen = set()
for item in samples:
sample_mal = []
sample_bin = []
type = 1
try:
if item[1].endswith(".apk"):
dump_all_method(repo + item[1], dumpMethodDir)
for i in range(2,11):
opcode_sequence = check_opcode(dumpMethodDir, i)
# Add opcode seq to class belong
if item[1].startswith('bin_') and item[1].endswith(".apk"):
sample_bin.append(opcode_sequence)
type = 1
elif item[1].endswith(".apk"):
sample_mal.append(opcode_sequence)
type = 2
# Generate a Sequence banck
if type == 1:
sample_bin_banck.append(sample_bin)
else:
sample_mal_banck.append(sample_mal)
clean_up_folder(dumpMethodDir)
except Exception as e:
print e
for x in range(0,9):
sample_mal_1 = []
sample_bin_1 = []
for y in range(0,len(sample_bin_banck)):
sample_bin_1.append(sample_bin_banck[y][x])
for y in range(0, len(sample_mal_banck)):
sample_mal_1.append(sample_mal_banck[y][x])
mal_class_w = func_weight_freq(sample_mal_1)
bin_class_w = func_weight_freq(sample_bin_1)
write_arff_n_opcode(x+2,repo +str(x+2)+ '_result.arff', mal_class_w, bin_class_w)
def scan_with_virus_total(path, db=None):
files = get_all_files_in_directory(path)
for afile in files:
try:
if '.DS_Store' not in afile:
make_virus_total_request(afile.split('.')[0])
except Exception as e:
print e
def make_virus_total_request(hash, db=None):
try:
params = {'apikey': 'YOUR_LEY', 'resource': hash}
data = urllib.urlencode(params)
result = urllib2.urlopen('https://www.virustotal.com/vtapi/v2/file/report', data)
jdata = json.loads(result.read())
return parse(jdata, hash)
except Exception as e:
print e
return 'Forbidden'
def parse(it, md5, verbose=True, jsondump=True):
if it['response_code'] == 0:
print md5 + " -- Not Found in VT"
return 0
else:
return it['positives']
def check_opcode(path_to_dir, n):
full_address = (path_to_dir).strip('\n')
list_files = get_all_files_withpath_in_directory(full_address)
list_general = []
for index in range(0, len(list_files)):
temp_file = list_files[index]
try:
if temp_file.endswith('.ag'):
list_opcode = []
file_open = open(temp_file)
print temp_file
for m in file_open:
b = m.strip()
if b.startswith('1') or b.startswith('2') or b.startswith('3') or b.startswith('4') or b.startswith('5') or b.startswith('6') or b.startswith('7') or b.startswith('8') or b.startswith('9') or b.startswith('0'):
word = []
word = m.strip().split()
if len(word) >= 2:
list_opcode.append(word[2])
list_general.append(word[2])
print list_opcode
except Exception as e:
print e
list_opcode_sequence = []
for item in range(0, (len(list_general) - n + 1)):
if n==2:
list_opcode_sequence.append(list_general[item] + ' ' + list_general[item + 1])
elif n==3:
list_opcode_sequence.append(list_general[item] + ' ' + list_general[item + 1]+ ' ' + list_general[item + 2])
elif n==4:
list_opcode_sequence.append(list_general[item] + ' ' + list_general[item + 1]+ ' ' + list_general[item + 2]+' ' + list_general[item + 3])
elif n==5:
list_opcode_sequence.append(list_general[item] + ' ' + list_general[item + 1]+ ' ' + list_general[item + 2]+' ' + list_general[item + 3]+' ' + list_general[item + 4])
elif n==6:
list_opcode_sequence.append(list_general[item] + ' ' + list_general[item + 1]+ ' ' + list_general[item + 2]+' ' + list_general[item + 3]+' ' + list_general[item + 4]+' ' + list_general[item + 5])
elif n==7:
list_opcode_sequence.append(list_general[item] + ' ' + list_general[item + 1]+ ' ' + list_general[item + 2]+' ' + list_general[item + 3]+' ' + list_general[item + 4]+' ' + list_general[item + 5]+' ' + list_general[item + 6])
elif n==8:
list_opcode_sequence.append(list_general[item] + ' ' + list_general[item + 1]+ ' ' + list_general[item + 2]+' ' + list_general[item + 3]+' ' + list_general[item + 4]+' ' + list_general[item + 5]+' ' + list_general[item + 6]+' ' + list_general[item + 7])
elif n==9:
list_opcode_sequence.append(list_general[item] + ' ' + list_general[item + 1]+ ' ' + list_general[item + 2]+' ' + list_general[item + 3]+' ' + list_general[item + 4]+' ' + list_general[item + 5]+' ' + list_general[item + 6]+' ' + list_general[item + 7]+' ' + list_general[item + 8])
elif n==10:
list_opcode_sequence.append(list_general[item] + ' ' + list_general[item + 1]+ ' ' + list_general[item + 2]+' ' + list_general[item + 3]+' ' + list_general[item + 4]+' ' + list_general[item + 5]+' ' + list_general[item + 6]+' ' + list_general[item + 7]+' ' + list_general[item + 8]+' ' + list_general[item + 9])
return list_opcode_sequence
def check_opcode2(path_to_dir):
full_address = (path_to_dir).strip('\n')
list_files = get_all_files_withpath_in_directory(full_address)
list_general = []
for index in range(0, len(list_files)):
temp_file = list_files[index]
try:
if temp_file.endswith('.ag'):
list_opcode = []
file_open = open(temp_file)
print temp_file
for m in file_open:
b = m.strip()
if b.startswith('1') or b.startswith('2') or b.startswith('3') or b.startswith('4') or b.startswith('5') or b.startswith('6') or b.startswith('7') or b.startswith('8') or b.startswith('9') or b.startswith('0'):
word = []
word = m.strip().split()
if len(word) >= 2:
list_opcode.append(word[2])
list_general.append(word[2])
print list_opcode
except Exception as e:
print e
return list_general
def fill_samples_table(repo):
db = DatabaseHandler()
db.recreats_table_samples()
files = get_all_files_in_directory(repo)
for afile in files:
try:
if '.DS_Store' not in afile:
db.insert_a_sample(afile, '')
except Exception as e:
print e
def update_samples_label(repo):
db = DatabaseHandler()
samples = db.select_sample_all()
for item in samples:
isSend = False
while not isSend:
lable = make_virus_total_request(item[1].split('.')[0])
if 'Forbidden' != lable:
shash = unicodedata.normalize('NFKD', item[1]).encode('ascii', 'ignore')
rowcount = db.update_sample_lable(shash, lable)
print item[0], ' -> ', item[1], " : ", lable, ' RowCount : ', str(rowcount)
if (int(lable) == 0):
copyfile(repo + item[1], repo + "0/" + item[1])
elif (int(lable) == 1):
copyfile(repo + item[1], repo + "1/" + item[1])
elif int(lable) > 1 and int(lable) <= 5:
copyfile(repo + item[1], repo + "5/" + item[1])
elif int(lable) > 5 and int(lable) <= 10:
copyfile(repo + item[1], repo + "10/" + item[1])
else:
copyfile(repo + item[1], repo + "more/" + item[1])
isSend = True
else:
print item[0], ' -> ', item[1], ' : Forbidden'
time.sleep(120)
def n_opcode_progress(repo, dump_Method_dir):
fill_samples_table(repo)
opcode_sequence_generator6(repo, dump_Method_dir)
def run_whole_process(repo, dump_Method_dir):
fill_samples_table(repo)
opcode_sequence_generator4(repo, dump_Method_dir)
def menu_select():
db = DatabaseHandler()
repo = '/Users/midnightgeek/Repo/11/l12/'
dump_Method_dir = '/Users/midnightgeek/Tools/test2'
print '********* DataSet Generator *********'
print 'Enter 1 For Run LDA'
print 'Enter 2 For Fill Samples Table'
print 'Enter 3 For Lable Sample With VT Api'
print 'Enter 4 For Clear Samples Table'
print 'Enter 5 For capture Image'
print 'Enter 6 For Run n-opcode'
menu = raw_input("Enter Number : ")
if menu == '1':
run_whole_process(repo, dump_Method_dir)
elif menu == '2':
fill_samples_table(repo, dump_Method_dir)
elif menu == '3':
update_samples_label(repo)
elif menu == '4':
db.clear_table_samples()
elif menu == '5':
fill_samples_table(repo)
capture_image(repo,dump_Method_dir)
elif menu == '6':
n_opcode_progress(repo, dump_Method_dir)
else:
print 'Wrong Number'
if __name__ == '__main__':
menu_select()
| {
"repo_name": "alireza-87/RansomeAnalyzLDA",
"path": "Main.py",
"copies": "1",
"size": "35006",
"license": "mit",
"hash": -5922272111662597000,
"line_mean": 38.2830840046,
"line_max": 323,
"alpha_frac": 0.479146432,
"autogenerated": false,
"ratio": 3.7165304172417453,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9688649397611129,
"avg_score": 0.0014054903261232906,
"num_lines": 869
} |
#@AlirezaKarimi
#alireza.karimi.67@gmail.com
from kivy.uix.popup import Popup
from kivy.uix.screenmanager import ScreenManager, Screen
from game_engine import Engine
from kivy.app import App
from kivy.properties import NumericProperty, ReferenceListProperty, ObjectProperty
from kivy.uix.widget import Widget
from kivy.vector import Vector
from win_popup import WinAlarm
sm = ScreenManager()
class SettingsScreen(Screen):
pass
class MainScreen(Screen):
pass
class GameScreen(Screen):
def __init__(self, **kwargs):
super(GameScreen, self).__init__(**kwargs)
self.game = PongGame()
self.game.serve_ball()
self.add_widget(self.game)
def start_game(self):
self.game.start_game()
class PongGame(Widget):
ball = ObjectProperty(None)
player1 = ObjectProperty(None)
player2 = ObjectProperty(None)
game_engine = Engine()
def open_main(self):
self.game_engine.stop()
self.player1.score = 0
self.player2.score = 0
self.serve_ball()
sm.current = 'MainScreen'
def start_game(self):
self.game_engine.start(self.update)
def stop_game(self):
self.game_engine.stop()
def restart_game(self):
self.player1.score = 0
self.player2.score = 0
self.serve_ball()
self.start_game()
def serve_ball(self, vel=(4, 0)):
self.ball.center = self.center
self.ball.velocity = vel
def update(self, dt):
self.ball.move()
self.player1.bounce_ball(self.ball)
self.player2.bounce_ball(self.ball)
if (self.ball.y < self.y) or (self.ball.top > self.top):
self.ball.velocity_y *= -1
if self.ball.x < self.x:
self.player2.score += 1
self.serve_ball(vel=(4, 0))
if self.ball.x > self.width:
self.player1.score += 1
self.serve_ball(vel=(-4, 0))
if self.player1.score == 1:
self.stop_game()
content = WinAlarm()
content.bind(on_answer=self.alarm_answer)
self.popup = Popup(title="Player 1 Win the game", content=content, size_hint=(None, None), size=(300, 200),
auto_dismiss=False)
self.popup.open()
elif self.player2.score == 1:
self.stop_game()
content = WinAlarm()
content.bind(on_answer=self.alarm_answer)
self.popup = Popup(title="Player 2 Win the game", content=content, size_hint=(None, None), size=(300, 200),
auto_dismiss=False)
self.popup.open()
def on_touch_move(self, touch):
if touch.x < self.width /3:
self.player1.center_y = touch.y
if touch.x > self.width - self.width / 3:
self.player2.center_y = touch.y
def alarm_answer(self, instance, play_again):
if play_again == "yes":
self.popup.dismiss()
self.restart_game()
elif play_again == "no":
self.popup.dismiss()
sm.current = 'MainScreen'
class PongPaddle(Widget):
score = NumericProperty(0)
def bounce_ball(self, ball):
if self.collide_widget(ball):
speed_up = 1.1
offset = 0.02 * Vector(0, ball.center_y - self.center_y)
ball.velocity = speed_up * (offset - ball.velocity)
class PongBall(Widget):
velocity_x = NumericProperty(0)
velocity_y = NumericProperty(0)
velocity = ReferenceListProperty(velocity_x, velocity_y)
def move(self):
self.pos = Vector(*self.velocity) + self.pos
class PongApp(App):
def build(self):
sm.add_widget(MainScreen(name='MainScreen'))
sm.add_widget(GameScreen(name='GameScreen'))
sm.add_widget(SettingsScreen(name='SettingScreen'))
sm.current = 'MainScreen'
return sm
if __name__ == '__main__':
PongApp().run()
| {
"repo_name": "alireza-87/KivyPongGame",
"path": "game/main.py",
"copies": "1",
"size": "3936",
"license": "mit",
"hash": -6753706001877033000,
"line_mean": 26.914893617,
"line_max": 119,
"alpha_frac": 0.5945121951,
"autogenerated": false,
"ratio": 3.561990950226244,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4656503145326244,
"avg_score": null,
"num_lines": null
} |
alist = [54,26,93,17,77,31,44,55,21]
def bubbleSort(blist):
for i in range(len(alist)-1, 0, -1):
for j in range(i):
if alist[j] > alist[j + 1]:
alist[j], alist[j+1] = alist[j+1], alist[j]
bubbleSort(alist)
print alist
def selectionSort(alist):
for i in range(len(alist)-1,0,-1):
positionOfMax=0
for j in range(1,i+1):
if alist[j]>alist[positionOfMax]:
positionOfMax = j
alist[i], alist[positionOfMax] = alist[positionOfMax], alist[i]
alist = [59,26,93,17,77,31,44,55,2,99]
selectionSort(alist)
print alist
def quickSort(alist, first, last):
if first < last:
splitpoint = partition(alist,first,last)
quickSort(alist,first,splitpoint-1)
quickSort(alist,splitpoint+1,last)
def partition(alist,first,last):
pivotvalue = alist[first]
leftmark = first+1
rightmark = last
done = False
while not done:
while leftmark <= rightmark and alist[leftmark] <= pivotvalue:
leftmark = leftmark + 1
while alist[rightmark] >= pivotvalue and rightmark >= leftmark:
rightmark = rightmark -1
if rightmark < leftmark:
done = True
else:
alist[leftmark], alist[rightmark] = alist[rightmark], alist[leftmark]
alist[first], alist[rightmark] = alist[rightmark], alist[first]
return rightmark
alist = [54,26,93,17,77,31,44,55,21, 23]
quickSort(alist, 0, len(alist)-1)
print alist
'''
def qsort(array=[12,4,5,6,7,3,1,15,15]):
less = []
equal = []
greater = []
if len(array) > 1:
pivot = array[0]
for x in array:
if x < pivot:
less.append(x)
if x == pivot:
equal.append(x)
if x > pivot:
greater.append(x)
# Don't forget to return something!
return qsort(less)+equal+qsort(greater) # Just use the + operator to join lists
# Note that you want equal ^^^^^ not pivot
else: # You need to hande the part at the end of the recursion - when you only have one element in your array, just return the array.
return array
#print qsort()
''' | {
"repo_name": "behappycc/USB-Driver",
"path": "test/11/Sort.py",
"copies": "1",
"size": "2189",
"license": "mit",
"hash": -8175243303112940000,
"line_mean": 26.375,
"line_max": 138,
"alpha_frac": 0.5883965281,
"autogenerated": false,
"ratio": 3.20497803806735,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.42933745661673495,
"avg_score": null,
"num_lines": null
} |
#a list is a container for your infomation
#names of thing, text
#each is a variable
attendees = ['Shannon', 'Jenn', 'Grace']
#empty list
coolpeople = []
print attendees[0] # Shannon
print attendees[1] # Jenn
print attendees[2] # Grace
print attendees[0:2] # Shannon, Jenn
#why does it tells it's a list for [0:2], mulitple items return back in a list
#print attendees[3] yeilds an error since there is no 3 in the list
#lot of ways to find the length our list
#first way
print len(attendees)
#second way
number_of_attendees = len(attendees)
print number_of_attendees
#append adds an item to the end
#this is the code for it list.append()
#start with an empty list
attendees_ages = []
attendees_ages.append(28)
print attendees_ages
attendees_ages.append(27)
print attendees_ages
#before we used sclining with print to say show me item with slice numbers, now we can change the list using slice number
days_of_week = ['Monday', 'Tuesday']
days_of_week.append('Wednesday')
days_of_week.append('Thursday')
days_of_week.append('Friday')
days_of_week.append('Saturday')
days_of_week.append('Sunday')
print days_of_week
print len(days_of_week)
#removing items from the list
#pop takes it out
day = days_of_week.pop()
print day
#you get sunday
#always starts from the top just like plates
#by default it pops of the top
print days_of_week
#we popped it off and saved it to a variable
days = days_of_week.pop(3)
print days
#we can remove from wherever we want so the above takes off Thursday
#can you pop off into a list?
daysq = ['M', 'T', 'W', 'R', 'F', 'S', 'N']
dayq = daysq.pop()
print dayq
months = ['January', 'February']
months.extend(['March', 'April'])
print months
#list.append() adds one to the end
#list.extend() adds many
#Remove the first month
months.pop(0)
# Insert 'January' before index 0
months.insert(0, 'January')
print months
#address example
address = "1133 19th St NW Washington, DC 20036"
print address
#now let's split on the spaces
address_as_list = address.split(" ")
print address_as_list
#membership
#The in keyword allows you to check whether a value exists in the list
python_class = ["Jess", "Joy", "Ashley"]
'ann' in 'Shannon'
'Frankenstein' in python_class
#range 0 to 5
print range(5)
#range (start, stop) from start to stop
range(5, 10)
#now lets loopl through this
for number in range(10):
print number
#so instead of the repeating code from earlier we can use loops
days_of_week = ['Monday','Tuesday',…]
for day in days_of_week:
print day
#we can also this for numbers and use our formatting form before
for week in range(1, 5):
print "Week {0}".format(week)
#we can nest loops so the we can do both
for week in range(1, 5):
print "Week {0}".format(week)
for day in days_of_week:
print day
#or even nest this deeper
for month in months_in_year:
print month
for week in range(1, 5):
print "Week {0}".format(week)
for day in days_of_week:
print day
#enumerate() is a function that you use with a for loop to get the index (position) of that list item, too.
#remember the class excerise where we all stoop up and told our positions
#zip() is a function that you use with a for loop to use each item in multiple lists all at once
#you can bring them together
#while loops as long as the answer to the question is yes
#we provide a way for the while loop to end
#conditionals are key to the while loop
#while loops you need a yes or no question
if bread >= 2:
print "I'm making a sandwich"
while bread >= 2:
print "I'm making a sandwich"
| {
"repo_name": "JessicaGarson/Hear-Me-Code-TA-notes",
"path": "TAnoteslesson2.py",
"copies": "1",
"size": "3562",
"license": "unlicense",
"hash": 5161256686306035000,
"line_mean": 21.9677419355,
"line_max": 121,
"alpha_frac": 0.7174157303,
"autogenerated": false,
"ratio": 2.9300411522633745,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.41474568825633745,
"avg_score": null,
"num_lines": null
} |
# A list isn't the most efficient way to store a potentially sparse set of memory segments, but then
# again this isn't exactly designed for huge programs.
from debugwire import DWException
MAX_ADDRESS = 0xffff
class memlist(list):
def write(self, offset, values):
if offset + len(values) > MAX_ADDRESS:
raise DWException("Binary is too large.")
while len(self) < offset + len(values):
self.append(None)
for i, b in enumerate(values):
self[offset + i] = b
def parse_hex(f):
mem = memlist()
for line in f:
if line[0:1] != b":":
raise DWException("Invalid hex line prefix")
lb = bytes.fromhex(line.decode("ascii").strip(":\r\n"))
count = lb[0]
if count + 5 != len(lb):
raise DWException("Invalid hex line length")
addr = (lb[1] << 8) | lb[2]
rtype = lb[3]
checksum = 0x100 - (sum(lb[:-1]) & 0xff)
if checksum != lb[-1]:
raise DWException("Invalid hex line checksum")
if rtype == 0x00:
mem.write(addr, lb[4:-1])
elif rtype == 0x01:
break
else:
raise DWException("Unknown hex line")
return mem
def parse_elf(f):
from elftools.elf.elffile import ELFFile
from elftools.elf.enums import ENUM_E_MACHINE
elf = ELFFile(f)
if elf["e_machine"] != "EM_AVR":
raise DWException("Invalid ELF architecture")
mem = memlist()
for s in elf.iter_segments():
if s["p_filesz"] > 0:
mem.write(s["p_paddr"], s.data())
return mem
def parse_binary(filename):
with open(filename, "rb") as f:
magic = f.read(9)
f.seek(0)
if magic[:4] == b"\x7fELF":
return parse_elf(f)
elif len(magic) == 9 and magic[0:1] == b":" and magic[7:9] in (b"00", b"01"):
return parse_hex(f)
else:
raise DWException("Unknown binary file type.")
| {
"repo_name": "mvirkkunen/dwprog",
"path": "binparser.py",
"copies": "1",
"size": "1984",
"license": "mit",
"hash": -4126635697877080000,
"line_mean": 25.4533333333,
"line_max": 100,
"alpha_frac": 0.5564516129,
"autogenerated": false,
"ratio": 3.4444444444444446,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9486497834576431,
"avg_score": 0.0028796445536026466,
"num_lines": 75
} |
# A list is symmetric if the first row is the same as the first column,
# the second row is the same as the second column and so on. Write a
# procedure, symmetric, which takes a list as input, and returns the
# boolean True if the list is symmetric and False if it is not.
def symmetric(lista):
if lista == []:
return True
if len(lista) != len(lista[0]):
return False
else:
i = 0
while len(lista) > i:
j = 0
while len(lista) > j:
if lista[i][j] != lista[j][i]:
return False
j += 1
i += 1
return True
print symmetric([[1, 2, 3],
[2, 3, 4],
[3, 4, 1]])
#>>> True
print symmetric([["cat", "dog", "fish"],
["dog", "dog", "fish"],
["fish", "fish", "cat"]])
#>>> True
print symmetric([["cat", "dog", "fish"],
["dog", "dog", "dog"],
["fish","fish","cat"]])
#>>> False
print symmetric([[1, 2],
[2, 1]])
#>>> True
print symmetric([[1, 2, 3, 4],
[2, 3, 4, 5],
[3, 4, 5, 6]])
#>>> False
print symmetric([[1,2,3],
[2,3,1]])
#>>> False
| {
"repo_name": "xala3pa/Computer-Science-cs101",
"path": "Lesson3/symmetric_square.py",
"copies": "1",
"size": "1246",
"license": "mit",
"hash": -4865042815470545000,
"line_mean": 24.4285714286,
"line_max": 71,
"alpha_frac": 0.4414125201,
"autogenerated": false,
"ratio": 3.56,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.45014125201,
"avg_score": null,
"num_lines": null
} |
# A list is used to manage the list of Particles.
from particle import Particle
from crazy_particle import CrazyParticle
class ParticleSystem(object):
def __init__(self, num, v):
self.particles = [] # Initialize the list.
self.origin = v.get() # Store the origin point.
for i in range(num):
# Add "num" amount of particles to the list.
self.particles.append(Particle(self.origin))
def run(self):
# Cycle through the list backwards, because we are deleting while
# iterating.
for i in reversed(range(len(self.particles))):
p = self.particles[i]
p.run()
if p.isDead():
del self.particles[i]
def addParticle(self):
p = None
# Add either a Particle or CrazyParticle to the system.
if int(random(0, 2)) == 0:
p = Particle(self.origin)
else:
p = CrazyParticle(self.origin)
self.particles.append(p)
# A method to test if the particle system still has particles.
def dead(self):
return self.particles.isEmpty()
| {
"repo_name": "jdf/processing.py",
"path": "mode/examples/Topics/Simulate/MultipleParticleSystems/particle_system.py",
"copies": "6",
"size": "1129",
"license": "apache-2.0",
"hash": -8542031432512239000,
"line_mean": 29.5135135135,
"line_max": 73,
"alpha_frac": 0.5943312666,
"autogenerated": false,
"ratio": 4.00354609929078,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.02702702702702703,
"num_lines": 37
} |
#A list of all available Tequilas, to show the user when they type "help"
#all lowercase values of these exact phrases are stored in the weights dictionary
#The script should take upper and lower case values of these exact strings
tequila_list = ["Jose Cuervo", "JLP", "Herradura", "El Jimador", "Arette", "Casa Noble","Don Julio Blanco", "Don Julio", "Don Julio 1942", "KAH", "Patron",\
"Porfidio", "1800", "1800 Anejo", "Fortaleza", "T1", "Gran Centenario", "Jose Cuervo Reserva"]
#loads my bar and sets it as a list
my_bar_lst = []
with open("mybar.txt", "r+") as bar:
for line in bar:
my_bar_lst.append(line[:len(line)-1])
#opens the saved mybar, then makes it into a list
#also removes the "\n" on each line
#opens the file, writes my_bar_lst to the file
#with a new line, and closes the file
def save(file):
with open("mybar.txt", "r+") as bar:
for x in my_bar_lst:
bar.write(x + "\n")
#sets the Tequilas, and the weights of their different bottles.
weights = {
"jose cuervo":0.9, #guess
"jlp":0.9, #guess
"herradura": 0.9, #guess
"el jimador":0.53,
"arette":0.638,
"casa noble":0.905,
"don julio blanco":0.702,
"don julio":0.789,
"don julio 1942":.7, #guess
"kah":.6, #guess
"patron":0.77,
"porfidio":0.7,
"1800":0.817,
"1800 anejo":0.747,
"fortaleza":0.791,
"t1":0.951,
"gran centenario":0.583,
"jose cuervo reserva":0.765
}
#universally sets the density of tequila
rho = 0.9501
while True:
print("What would you like to do?")
print("Type 'stocktake' to perform stocktake, 'edit' to edit your bar.")
initinput = input("")
if initinput.lower() == "edit":
while True:
print("Here you can edit your bar. Type 'add' or 'remove' to edit your bar,\
'view' to view what's currently in your bar, or 'all' to see \
everything that's available to add. Type 'back' to go back")
editinput = input("")
if editinput.lower() == "view":
print(my_bar_lst)
print("")
elif editinput.lower() == "all":
for t in weights:
print(t)
elif editinput.lower() == "add":
print("Type the name of the Tequila you would like to add.")
new_name = input("")
if new_name in weights:
my_bar_lst.append(new_name)
print("Successfully added " + new_name + " to your bar.")
save(bar)
else:
print("Whoops! That's not in the database.")
print("Are you sure you've spelt it correctly?")
print("")
elif editinput.lower() =='back':
save(bar)
break
#if comand is 'stocktake', leads to the stocktaking block of code
elif initinput.lower() == "stocktake":
print("Type the name of the Tequila you want to test. Type 'Help' to see a list of all tequilas, \
or 'add' to add a new tequila to the database.")
while True:
name = input("name : ")
if name.lower() == "help":
print("These are all the types of Tequila supported. If you're having difficulty, make sure \
you're spelling them right:")
print(tequila_list)
print("")
elif name.lower() == "back":
save(bar)
break
#if the lowercase version of the name is in the dictionary "weights"
elif name.lower() in weights:
name = name.lower()
#grabs the weight of that name's bottle by putting it into the dicitonary
weight_b = weights[name]
#Gets the current weight of bottle + tequila
weight_t =(input("How much does this weigh? "))
try:
weight_t = float(weight_t)
#makes sure that only a number has been entered, nothing else
except:
print("Whoops! Try *just* inputting numbers, nothing else")
weight_liquid = weight_t - weight_b
#total weight minus weight of bottle
volume = weight_liquid / rho
#density = mass/volume^
#also, assumes we're looking for shots measuring 30ml each
shots = volume / .03
if shots < 100:
#gives it to 2 dp, provided the number is less than 100 (ie, a reasonable number)
print("There are {} shots (of 30ml each) left in this bottle".format(str(shots)[:4]))
print ("")
else:
#in case some silly and crazily big number is entered
print("There are {} shots (of 30ml each) left in this bottle".format(shots))
print("")
#lets me fill any bottle I want with water as a test to see if the calculations work
#asks what bottle I'm using, gets that bottle, and sets density = 1
elif name.lower() == "water":
try:
bottle = input("What bottle are you using? ").lower()
except:
print("Whoops! Are you sure you're spelling that correctly?")
weight_b = weights[bottle]
weight_t =(input("How much does this weigh? "))
try:
weight_t = float(weight_t)
except:
print("Whoops! Try *just* inputting numbers, nothing else")
weight_liquid = weight_t - weight_b
volume = weight_liquid / 1
shots = volume / .03
if shots < 100:
print("There are {} shots (of 30ml each) left in this bottle".format(str(shots)[:4]))
print ("")
else:
print("There are {} shots (of 30ml each) left in this bottle".format(shots))
print("")
elif name.lower() == "add":
print("What is the name of the Tequila you want to add to the databse?")
title = input()
print("What is the weight of the empty bottle? (precise and accurate values are appreciated")
add_weight = input()
weights[title] = add_weight
else:
print("Whoops! I don't recognise that name. Type 'Help' to see a list of accepted names, \
and make sure they're spelt correctly!")
else:
print("I don't recognise that input")
print("")
| {
"repo_name": "Evander7/Tequila-stocktake",
"path": "Tequila 3.py",
"copies": "1",
"size": "7091",
"license": "apache-2.0",
"hash": 6466793610920742000,
"line_mean": 38.52,
"line_max": 156,
"alpha_frac": 0.5058524891,
"autogenerated": false,
"ratio": 4.01528878822197,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9925001921109216,
"avg_score": 0.019227871242550997,
"num_lines": 175
} |
# A list of applications to be added to INSTALLED_APPS.
ADD_INSTALLED_APPS = ['nec_portal']
# 'key', 'label', 'path'
AVAILABLE_THEMES = [
('nec_portal', 'NEC_Portal', 'themes/nec_portal'),
]
# The default theme if no cookie is present
DEFAULT_THEME = 'nec_portal'
# Site Branding
SITE_BRANDING = 'NEC Cloud System'
# WEBROOT is the location relative to Webserver root
# should end with a slash.
WEBROOT = '/dashboard/'
# The OPENSTACK_IMAGE_BACKEND settings can be used to customize features
# in the OpenStack Dashboard related to the Image service, such as the list
# of supported image formats.
OPENSTACK_IMAGE_BACKEND = {
'image_formats': [
('', _('Select format')),
('qcow2', _('QCOW2 - QEMU Emulator')),
('raw', _('Raw')),
]
}
# Dictionary of currently available angular features
ANGULAR_FEATURES = {
'images_panel': False,
}
# Map of local copy of service policy files
POLICY_FILES = {
'identity': 'keystone_policy.json',
'ticket': 'aflo_policy.json',
'compute': 'nova_policy.json',
'volume': 'cinder_policy.json',
'image': 'glance_policy.json',
'orchestration': 'heat_policy.json',
'network': 'neutron_policy.json',
'telemetry': 'ceilometer_policy.json',
}
MESSAGE_STORAGE = 'django.contrib.messages.storage.session.SessionStorage'
# Overrides the default domain used when running on single-domain model
# with Keystone V3. All entities will be created in the default domain.
# NOTE: This value must be the ID of the default domain, NOT the name.
# Also, you will most likely have a value in the keystone policy file like this
# "cloud_admin": "rule:admin_required and domain_id:<your domain id>"
# This value must match the domain id specified there.
OPENSTACK_KEYSTONE_DEFAULT_DOMAIN = 'default'
# Default OpenStack Dashboard configuration.
HORIZON_CONFIG['help_url'] = "http://help.example.com"
SECRET_KEY='xxxx'
LOCAL_PATH = '/tmp'
# We recommend you use memcached for development; otherwise after every reload
# of the django development server, you will have to login again. To use
# memcached set CACHES to something like
CACHES = {
'default': {
'BACKEND': 'django.core.cache.backends.memcached.MemcachedCache',
'LOCATION': 'localhost.example.com:11211',
}
}
OPENSTACK_HOST = "127.0.0.1"
OPENSTACK_KEYSTONE_URL = "http://127.0.0.1:5000/v3"
OPENSTACK_KEYSTONE_DEFAULT_ROLE = "_member_"
# Overrides for OpenStack API versions. Use this setting to force the
# OpenStack dashboard to use a specific API version for a given service API.
# Versions specified here should be integers or floats, not strings.
# NOTE: The version should be formatted as it appears in the URL for the
# service API. For example, The identity service APIs have inconsistent
# use of the decimal point, so valid options would be 2.0 or 3.
# Minimum compute version to get the instance locked status is 2.9.
OPENSTACK_API_VERSIONS = {
"identity": 3
}
# A dictionary of settings which can be used to provide the default values for
# properties found in the Launch Instance modal.
LAUNCH_INSTANCE_DEFAULTS = {
'config_drive': True,
'enable_scheduler_hints': True,
'disable_image': True,
'disable_instance_snapshot': True,
'disable_volume': False,
'disable_volume_snapshot': True,
}
# The OPENSTACK_CINDER_FEATURES settings can be used to enable optional
# services provided by cinder that is not exposed by its extension API.
OPENSTACK_CINDER_FEATURES = {
'enable_backup': True,
}
# The OPENSTACK_NEUTRON_NETWORK settings can be used to enable optional
# services provided by neutron. Options currently available are load
# balancer service, security groups, quotas, VPN service.
OPENSTACK_NEUTRON_NETWORK = {
'enable_router': True,
'enable_quotas': True,
'enable_ipv6': True,
'enable_distributed_router': False,
'enable_ha_router': False,
'enable_lb': True,
'enable_firewall': False,
'enable_vpn': False,
'enable_fip_topology_check': True,
# The profile_support option is used to detect if an external router can be
# configured via the dashboard. When using specific plugins the
# profile_support can be turned on if needed.
'profile_support': None,
#'profile_support': 'cisco',
# Set which provider network types are supported. Only the network types
# in this list will be available to choose from when creating a network.
# Network types include local, flat, vlan, gre, and vxlan.
'supported_provider_types': ['*'],
# Set which VNIC types are supported for port binding. Only the VNIC
# types in this list will be available to choose from when creating a
# port.
# VNIC types include 'normal', 'macvtap' and 'direct'.
'supported_vnic_types': ['*']
}
# OPENSTACK_ENDPOINT_TYPE specifies the endpoint type to use for the endpoints
# in the Keystone service catalog. Use this setting when Horizon is running
# external to the OpenStack environment. The default is 'publicURL'.
OPENSTACK_ENDPOINT_TYPE = "internalURL"
# Specify a maximum number of items to display in a dropdown.
DROPDOWN_MAX_ITEMS = 1000
SESSION_ENGINE = 'django.contrib.sessions.backends.cache'
SESSION_COOKIE_DOMAIN = ".example.com"
# Override policy.check function.
# Admin dashboard menu disabled by only admin roles.
# Setting of OPENSTACK_KEYSTONE_ADMIN_ROLES is necessary
# to use this method.
def check(actions, request, target=None):
import inspect
from horizon import Dashboard as dashboard
frame = None
try:
# Get a caller frame
frame = inspect.stack()[2][0]
arginfo = inspect.getargvalues(frame)
called_obj = arginfo.locals['self'] if 'self' in arginfo.locals else None
# Check calld class is dashboard
if called_obj and isinstance(called_obj, dashboard):
roles = request.user.roles
role_name_list = [role['name'] for role in request.user.roles]
my_roles = set(role_name_list)
admin_roles = set(OPENSTACK_KEYSTONE_ADMIN_ROLES)
return 0 < len(my_roles & admin_roles)
finally:
if frame: del frame
# Note(Itxaka): This is to prevent circular dependencies and apps not ready
# If you do django imports in your settings, you are gonna have a bad time
from openstack_auth import policy
return policy.check(actions, request, target)
OPENSTACK_KEYSTONE_ADMIN_ROLES = [
'admin'
]
POLICY_CHECK_FUNCTION = check
| {
"repo_name": "NECCSiPortal/NECCSPortal-dashboard",
"path": "openstack_dashboard/local/local_settings.d/_30_nec_portal.py",
"copies": "1",
"size": "6473",
"license": "apache-2.0",
"hash": -3030332370167824400,
"line_mean": 34.1793478261,
"line_max": 81,
"alpha_frac": 0.7067820176,
"autogenerated": false,
"ratio": 3.7243958573072495,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.49311778749072493,
"avg_score": null,
"num_lines": null
} |
#A list of common functions used by various parts of the app
from consts import switches
import consts.paths as paths
import datetime
def handle_err_msg(err_msg):
globals = Globals()
globals.handle_err_msg(err_msg)
def add_commas_to_num_str(num_str):
num_str = float(num_str)
num_str = ("{:,.2f}".format(num_str))
num_str = num_str[:-3]
return num_str
def format_address(st_num, st, city, state, zip):
address = st_num + " " + st.title() + ", " + city.title() + ", " + state.upper() + " " + zip
return address
def format_citystatezip(city, state, zip):
citystatezip = city + ", " + state + " " + zip
return citystatezip
class Globals(object):
newline = "\n"
_instance = None
def __new__(cls, *args, **kwargs):
if not cls._instance:
cls._instance = super(Globals, cls).__new__(
cls, *args, **kwargs)
return cls._instance
def __init__(self):
now = datetime.datetime.now()
self.timestamp = str(now.year) + "_" + str(now.month) + "_" + str(now.day)
def write_err_msg_to_file(self, err_msg):
err_msg_path = paths.ERR_MSG_PATH_DIR + paths.ERR_FILE_NAME + "_" + str(self.timestamp) + paths.ERR_FILE_EXTENSION
with open(err_msg_path, "a") as errfile:
errfile.write(err_msg)
errfile.write(self.newline)
def handle_err_msg(self, err_msg):
if switches.WRITE_ERR_MSG_TO_FS:
self.write_err_msg_to_file(err_msg)
if switches.PRINT_ERR_MSG_TO_SCREEN:
print "Globals handle_err_msg : " + err_msg
| {
"repo_name": "pbryzek/Freedom",
"path": "common/globals.py",
"copies": "1",
"size": "1619",
"license": "mit",
"hash": -3305529827679223300,
"line_mean": 30.1346153846,
"line_max": 122,
"alpha_frac": 0.5843113033,
"autogenerated": false,
"ratio": 3.2186878727634194,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.43029991760634195,
"avg_score": null,
"num_lines": null
} |
# A list of lists storing the Scrabble
# scores of all of the 26 letters
values = [ ["a", 1], ["b", 3], ["c", 3], ["d", 2], ["e", 1], \
["f", 4], ["g", 2], ["h", 4], ["i", 1], ["j", 8], ["k", 5], \
["l", 1], ["m", 3], ["n", 1], ["o", 1], ["p", 3], ["q", 10], \
["r", 1], ["s", 1], ["t", 1], ["u", 1], ["v", 4], ["w", 4], \
["x", 8], ["y", 4], ["z", 10] ]
def letterScore(letter):
"""
This function takes a single letter as input and
returns the score for that letter.
"""
# Use a loop to find the letter
# Complete this
for value in values:
if value[0] == letter:
return value[1]
def scrabbleScore(word):
"""
This function takes a single word as input and returns
the total score of all of the letters in the word.
"""
# Inititalize the total score to be 0
totalScore = 0
# Go through the word letter by letter.
# Complete this with a for loop:
# Inside this loop, you need to add the score of the
# letter to the total score
# Complete this
for letter in word:
totalScore += letterScore(letter)
# Return the total score
return totalScore
| {
"repo_name": "chpoon92/python-bridging-lab-exercise-python",
"path": "exercise_1_scrabble_word/ scrabble.py",
"copies": "1",
"size": "1190",
"license": "mit",
"hash": -4768340863694267000,
"line_mean": 26.6744186047,
"line_max": 62,
"alpha_frac": 0.5268907563,
"autogenerated": false,
"ratio": 3.3903133903133904,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.93821639048004,
"avg_score": 0.007008048362598065,
"num_lines": 43
} |
# A list of our CSS and JS assets for jingo-minify.
CSS = {
'mkt/admin': (
'css/zamboni/zamboni.css',
'css/zamboni/mkt-admin.css',
'css/zamboni/admin-django.css',
),
'mkt/devreg': (
# Contains reset, clearfix, etc.
'css/devreg/base.css',
# Base styles (body, breadcrumbs, islands, columns).
'css/devreg/base.styl',
'css/devreg/breadcrumbs.styl',
# Typographical styles (font treatments, headings).
'css/devreg/typography.styl',
# Header (aux-nav, masthead, site-nav).
'css/devreg/desktop-account-links.styl',
'css/devreg/header.styl',
# Item rows (used on Dashboard).
'css/devreg/listing.styl',
'css/devreg/legacy-paginator.styl',
# Buttons (used for paginator, "Edit" buttons, Refunds page).
'css/devreg/buttons.styl',
# Forms (used for tables on "Manage ..." pages).
'css/devreg/forms.styl',
# Popups, Modals, Tooltips.
'css/devreg/notification.styl',
'css/devreg/overlay.styl',
'css/devreg/popups.styl',
'css/devreg/device.styl',
'css/devreg/tooltips.styl',
# L10n menu ("Localize for ...").
'css/devreg/l10n.styl',
# Tables.
'css/devreg/data-grid.styl',
# Landing page
'css/devreg/landing.styl',
# "Manage ..." pages.
'css/devreg/manage.styl',
'css/devreg/prose.styl',
'css/devreg/authors.styl',
'css/devreg/in-app-config.styl',
'css/devreg/payments.styl',
'css/devreg/transactions.styl',
'css/devreg/status.styl',
'css/devreg/content_ratings.styl',
# Image Uploads (used for "Edit Listing" Images and Submission).
'css/devreg/media.styl',
'css/devreg/invisible-upload.styl',
# Submission.
'css/devreg/submit-progress.styl',
'css/devreg/submit-terms.styl',
'css/devreg/submit-manifest.styl',
'css/devreg/submit-details.styl',
'css/devreg/validation.styl',
'css/devreg/submit.styl',
'css/devreg/tabs.styl',
# Developer Log In / Registration.
'css/devreg/login.styl',
# Footer.
'css/devreg/footer.styl',
# Marketplace elements.
'css/lib/marketplace-elements.css',
),
'mkt/reviewers': (
'css/zamboni/editors.styl',
'css/devreg/consumer-buttons.styl',
'css/devreg/content_ratings.styl',
'css/devreg/data-grid.styl',
'css/devreg/manifest.styl',
'css/devreg/reviewers.styl',
'css/devreg/reviewers-header.styl',
'css/devreg/reviewers-mobile.styl',
'css/devreg/legacy-paginator.styl',
'css/devreg/files.styl',
),
'mkt/ecosystem': (
'css/devreg/reset.styl',
'css/devreg/consumer-typography.styl',
'css/devreg/login.styl',
'css/devreg/forms.styl',
'css/ecosystem/landing.styl',
'css/ecosystem/documentation.styl',
),
'mkt/in-app-payments': (
'css/devreg/reset.styl',
'css/devreg/consumer-typography.styl',
'css/devreg/buttons.styl',
'css/devreg/in-app-payments.styl',
),
'mkt/in-app-products': (
'css/devreg/in-app-products.styl',
),
'mkt/lookup': (
'css/devreg/manifest.styl',
'css/devreg/lookup-tool.styl',
'css/devreg/activity.styl',
),
'mkt/gaia': (
# Gaia building blocks.
'css/gaia/action_menu.css',
'css/gaia/switches.css',
'css/gaia/value_selector.css',
),
'mkt/operators': (
'css/devreg/legacy-paginator.styl',
'css/devreg/data-grid.styl',
'css/devreg/operators.styl',
),
}
JS = {
# Used by the File Viewer for packaged apps.
'zamboni/files': (
'js/lib/diff_match_patch_uncompressed.js',
'js/lib/syntaxhighlighter/xregexp-min.js',
'js/lib/syntaxhighlighter/shCore.js',
'js/lib/syntaxhighlighter/shLegacy.js',
'js/lib/syntaxhighlighter/shBrushAppleScript.js',
'js/lib/syntaxhighlighter/shBrushAS3.js',
'js/lib/syntaxhighlighter/shBrushBash.js',
'js/lib/syntaxhighlighter/shBrushCpp.js',
'js/lib/syntaxhighlighter/shBrushCSharp.js',
'js/lib/syntaxhighlighter/shBrushCss.js',
'js/lib/syntaxhighlighter/shBrushDiff.js',
'js/lib/syntaxhighlighter/shBrushJava.js',
'js/lib/syntaxhighlighter/shBrushJScript.js',
'js/lib/syntaxhighlighter/shBrushPhp.js',
'js/lib/syntaxhighlighter/shBrushPlain.js',
'js/lib/syntaxhighlighter/shBrushPython.js',
'js/lib/syntaxhighlighter/shBrushSass.js',
'js/lib/syntaxhighlighter/shBrushSql.js',
'js/lib/syntaxhighlighter/shBrushVb.js',
'js/lib/syntaxhighlighter/shBrushXml.js',
'js/zamboni/storage.js',
'js/zamboni/files.js',
),
'mkt/devreg': (
# tiny module loader
'js/lib/amd.js',
'js/lib/jquery-1.11.1.js',
'js/lib/underscore.js',
'js/lib/format.js',
'js/lib/jquery.cookie.js',
'js/lib/stick.js',
'js/common/fakefilefield.js',
'js/devreg/gettext.js',
'js/devreg/tracking.js',
'js/devreg/init.js', # This one excludes buttons initialization, etc.
'js/devreg/modal.js',
'js/devreg/overlay.js',
'js/devreg/capabilities.js',
'js/devreg/slugify.js',
'js/devreg/formdata.js',
'js/devreg/tooltip.js',
'js/devreg/popup.js',
'js/devreg/login.js',
'js/devreg/notification.js',
'js/devreg/outgoing_links.js',
'js/devreg/tarako.js',
'js/devreg/utils.js',
'js/lib/csrf.js',
'js/lib/document-register-element.js',
'js/impala/serializers.js',
'js/common/keys.js',
'js/common/upload-base.js',
'js/common/upload-packaged-app.js',
'js/common/upload-image.js',
'js/devreg/l10n.js',
'js/zamboni/storage.js', # Used by editors.js, devhub.js
# jQuery UI
'js/lib/jquery-ui/jquery-ui-1.10.1.custom.js',
'js/lib/jquery.minicolors.js',
'js/devreg/devhub.js',
'js/devreg/submit.js',
'js/devreg/tabs.js',
'js/devreg/edit.js',
'js/devreg/validator.js',
# Specific stuff for making payments nicer.
'js/devreg/payments-enroll.js',
'js/devreg/payments-manage.js',
'js/devreg/payments.js',
# For testing installs.
'js/devreg/apps.js',
'js/devreg/test-install.js',
'js/devreg/tracking_app_submit.js',
# IARC.
'js/devreg/content_ratings.js',
# Marketplace elements.
'js/lib/marketplace-elements.js',
# Module initialization.
'js/devreg/devreg_init.js',
),
'mkt/reviewers': (
'js/lib/moment-with-langs.min.js', # JS date lib.
'js/common/buckets.js',
'js/devreg/reviewers/editors.js',
'js/devreg/apps.js', # Used by install.js
'js/devreg/reviewers/payments.js',
'js/devreg/reviewers/install.js',
'js/devreg/reviewers/buttons.js',
'js/devreg/manifest.js', # Used by reviewers.js
'js/devreg/reviewers/reviewers_commbadge.js',
'js/devreg/reviewers/reviewers.js',
'js/devreg/reviewers/expandable.js',
'js/devreg/reviewers/mobile_review_actions.js',
'js/common/fakefilefield.js',
'js/common/formsets.js', # Used by Reviewer Attachments in devreg/init.js.
'js/devreg/reviewers/reviewers_init.js',
),
'mkt/in-app-payments': (
'js/lib/jquery-1.11.1.js',
'js/devreg/inapp_payments.js',
'js/devreg/utils.js',
'js/lib/csrf.js',
'js/impala/serializers.js',
'js/devreg/login.js',
),
'mkt/in-app-products': (
'js/lib/es5-shim.min.js', # We might already assume these work.
'js/lib/flight.min.js',
'js/devreg/in_app_products.js',
),
'mkt/lookup': (
'js/common/keys.js',
'js/impala/ajaxcache.js',
'js/devreg/suggestions.js',
'js/devreg/manifest.js',
'js/devreg/lookup-tool.js',
),
'mkt/ecosystem': (
'js/devreg/ecosystem.js',
)
}
def jquery_migrated():
new_JS = dict(JS)
for bundle, files in new_JS.iteritems():
files = list(files)
try:
jquery = files.index('js/lib/jquery-1.9.1.js')
except ValueError:
continue
# Insert jquery-migrate immediately after jquery (before any files
# requiring jquery are loaded).
files.insert(jquery + 1, 'js/lib/jquery-migrate-1.1.0.js')
new_JS[bundle] = tuple(files)
return new_JS
def less2stylus():
"""
This will return a dict of the CSS bundles with `.styl` stylesheets
instead of `.less` ones.
Put in your local settings::
try:
MINIFY_BUNDLES['css'].update(asset_bundles.less2stylus())
except AttributeError:
pass
"""
import os
ROOT = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
def stylus(fn):
fn_styl = fn.replace('.less', '.styl')
if os.path.exists(os.path.join(ROOT, 'media', fn_styl)):
fn = fn_styl
return fn
new_CSS = dict(CSS)
for bundle, files in new_CSS.iteritems():
new_CSS[bundle] = tuple(stylus(f) for f in files)
return new_CSS
| {
"repo_name": "ngokevin/zamboni",
"path": "mkt/asset_bundles.py",
"copies": "1",
"size": "9529",
"license": "bsd-3-clause",
"hash": 8707214549230948000,
"line_mean": 30.1405228758,
"line_max": 83,
"alpha_frac": 0.5801238325,
"autogenerated": false,
"ratio": 3.0193282636248417,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4099452096124842,
"avg_score": null,
"num_lines": null
} |
# A list of our CSS and JS assets for jingo-minify.
CSS = {
'mkt/devreg': (
# Contains reset, clearfix, etc.
'css/devreg/base.css',
# Base styles (body, breadcrumbs, islands, columns).
'css/devreg/base.styl',
'css/devreg/breadcrumbs.styl',
# Typographical styles (font treatments, headings).
'css/devreg/typography.styl',
# Header (aux-nav, masthead, site-nav).
'css/devreg/desktop-account-links.styl',
'css/devreg/header.styl',
# Item rows (used on Dashboard).
'css/devreg/listing.styl',
'css/devreg/legacy-paginator.styl',
# Buttons (used for paginator, "Edit" buttons, Refunds page).
'css/devreg/buttons.styl',
# Popups, Modals, Tooltips.
'css/devreg/notification.styl',
'css/devreg/overlay.styl',
'css/devreg/popups.styl',
'css/devreg/device.styl',
'css/devreg/tooltips.styl',
# L10n menu ("Localize for ...").
'css/devreg/l10n.styl',
# Forms (used for tables on "Manage ..." pages).
'css/devreg/forms.styl',
# Tables.
'css/devreg/data-grid.styl',
# Landing page
'css/devreg/landing.styl',
# "Manage ..." pages.
'css/devreg/manage.styl',
'css/devreg/prose.styl',
'css/devreg/authors.styl',
'css/devreg/in-app-config.styl',
'css/devreg/payments.styl',
'css/devreg/refunds.styl',
'css/devreg/transactions.styl',
'css/devreg/status.styl',
'css/devreg/content_ratings.styl',
# Image Uploads (used for "Edit Listing" Images and Submission).
'css/devreg/media.styl',
'css/devreg/invisible-upload.styl',
# Submission.
'css/devreg/submit-progress.styl',
'css/devreg/submit-terms.styl',
'css/devreg/submit-manifest.styl',
'css/devreg/submit-details.styl',
'css/devreg/validation.styl',
'css/devreg/submit.styl',
'css/devreg/tabs.styl',
# Developer Log In / Registration.
'css/devreg/login.styl',
# Footer.
'css/devreg/footer.styl',
),
'mkt/reviewers': (
'css/zamboni/editors.styl',
'css/devreg/consumer-buttons.styl',
'css/devreg/content_ratings.styl',
'css/devreg/data-grid.styl',
'css/devreg/manifest.styl',
'css/devreg/reviewers.styl',
'css/devreg/reviewers-header.styl',
'css/devreg/reviewers-mobile.styl',
'css/devreg/legacy-paginator.styl',
'css/devreg/files.styl',
),
'mkt/ecosystem': (
'css/devreg/reset.styl',
'css/devreg/consumer-typography.styl',
'css/devreg/login.styl',
'css/devreg/forms.styl',
'css/ecosystem/landing.styl',
'css/ecosystem/documentation.styl',
),
'mkt/in-app-payments': (
'css/devreg/reset.styl',
'css/devreg/consumer-typography.styl',
'css/devreg/buttons.styl',
'css/devreg/in-app-payments.styl',
),
'mkt/in-app-products': (
'css/devreg/in-app-products.styl',
),
'mkt/lookup': (
'css/devreg/manifest.styl',
'css/devreg/lookup-tool.styl',
'css/devreg/activity.styl',
),
'mkt/gaia': (
# Gaia building blocks.
'css/gaia/action_menu.css',
'css/gaia/switches.css',
'css/gaia/value_selector.css',
),
'mkt/operators': (
'css/devreg/legacy-paginator.styl',
'css/devreg/data-grid.styl',
'css/devreg/operators.styl',
),
}
JS = {
'mkt/devreg': (
# tiny module loader
'js/lib/amd.js',
'js/lib/jquery-1.9.1.js',
'js/lib/underscore.js',
'js/lib/format.js',
'js/lib/jquery.cookie.js',
'js/lib/stick.js',
'js/lib/csrf.js',
'js/common/fakefilefield.js',
'js/devreg/gettext.js',
'js/devreg/tracking.js',
'js/devreg/init.js', # This one excludes buttons initialization, etc.
'js/devreg/modal.js',
'js/devreg/overlay.js',
'js/devreg/capabilities.js',
'js/devreg/slugify.js',
'js/devreg/formdata.js',
'js/devreg/tooltip.js',
'js/devreg/popup.js',
'js/devreg/login.js',
'js/devreg/notification.js',
'js/devreg/outgoing_links.js',
'js/devreg/utils.js',
'js/impala/serializers.js',
'js/common/keys.js',
'js/common/upload-base.js',
'js/common/upload-packaged-app.js',
'js/common/upload-image.js',
'js/devreg/l10n.js',
# jQuery UI
'js/lib/jquery-ui/jquery-ui-1.10.1.custom.js',
'js/lib/jquery.minicolors.js',
'js/devreg/devhub.js',
'js/devreg/submit.js',
'js/devreg/tabs.js',
'js/devreg/edit.js',
'js/devreg/validator.js',
# Specific stuff for making payments nicer.
'js/devreg/payments-enroll.js',
'js/devreg/payments-manage.js',
'js/devreg/payments.js',
# For testing installs.
'js/devreg/apps.js',
'js/devreg/test-install.js',
'js/devreg/tracking_app_submit.js',
# IARC.
'js/devreg/content_ratings.js',
# Module initialization.
'js/devreg/devreg_init.js',
),
'mkt/reviewers': (
'js/lib/moment-with-langs.min.js', # JS date lib.
'js/zamboni/storage.js', # Used by editors.js
'js/common/buckets.js',
'js/devreg/reviewers/editors.js',
'js/devreg/apps.js', # Used by install.js
'js/devreg/reviewers/payments.js',
'js/devreg/reviewers/install.js',
'js/devreg/reviewers/buttons.js',
'js/devreg/manifest.js', # Used by reviewers.js
'js/devreg/reviewers/reviewers_commbadge.js',
'js/devreg/reviewers/reviewers.js',
'js/devreg/reviewers/expandable.js',
'js/devreg/reviewers/mobile_review_actions.js',
'js/common/fakefilefield.js',
'js/common/formsets.js', # TODO: Not used? Only seen in devreg/init.js
'js/devreg/reviewers/reviewers_init.js',
),
'mkt/in-app-payments': (
'js/lib/jquery-1.9.1.js',
'js/devreg/inapp_payments.js',
'js/lib/csrf.js',
'js/impala/serializers.js',
'js/devreg/login.js',
),
'mkt/in-app-products': (
'js/lib/es5-shim.min.js', # We might already assume these work.
'js/lib/flight.min.js',
'js/devreg/in_app_products.js',
),
'mkt/lookup': (
'js/common/keys.js',
'js/impala/ajaxcache.js',
'js/devreg/suggestions.js',
'js/devreg/manifest.js',
'js/devreg/lookup-tool.js',
),
'mkt/ecosystem': (
'js/devreg/ecosystem.js',
),
'mkt/debug': (
'js/debug/tinytools.js',
),
}
def jquery_migrated():
new_JS = dict(JS)
for bundle, files in new_JS.iteritems():
files = list(files)
try:
jquery = files.index('js/lib/jquery-1.9.1.js')
except ValueError:
continue
# Insert jquery-migrate immediately after jquery (before any files
# requiring jquery are loaded).
files.insert(jquery + 1, 'js/lib/jquery-migrate-1.1.0.js')
new_JS[bundle] = tuple(files)
return new_JS
def less2stylus():
"""
This will return a dict of the CSS bundles with `.styl` stylesheets
instead of `.less` ones.
Put in your local settings::
try:
MINIFY_BUNDLES['css'].update(asset_bundles.less2stylus())
except AttributeError:
pass
"""
import os
ROOT = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
def stylus(fn):
fn_styl = fn.replace('.less', '.styl')
if os.path.exists(os.path.join(ROOT, 'media', fn_styl)):
fn = fn_styl
return fn
new_CSS = dict(CSS)
for bundle, files in new_CSS.iteritems():
new_CSS[bundle] = tuple(stylus(f) for f in files)
return new_CSS
| {
"repo_name": "jinankjain/zamboni",
"path": "mkt/asset_bundles.py",
"copies": "1",
"size": "8046",
"license": "bsd-3-clause",
"hash": -6298928333306292000,
"line_mean": 28.8,
"line_max": 79,
"alpha_frac": 0.5662440964,
"autogenerated": false,
"ratio": 3.0362264150943394,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9102213309848248,
"avg_score": 0.000051440329218106995,
"num_lines": 270
} |
# A list of the 05AB1E encoding page in unicode values (256 in total)
osabie_code_page = "\u01DD\u0292\u03B1\u03B2\u03B3\u03B4\u03B5\u03B6\u03B7" \
"\u03B8\u000A\u0432\u0438\u043C\u043D\u0442\u0393\u0394" \
"\u0398\u03B9\u03A3\u03A9\u2260\u220A\u220D\u221E\u2081" \
"\u2082\u2083\u2084\u2085\u2086\u0020\u0021\u0022\u0023" \
"\u0024\u0025\u0026\u0027\u0028\u0029\u002A\u002B\u002C" \
"\u002D\u002E\u002F\u0030\u0031\u0032\u0033\u0034\u0035" \
"\u0036\u0037\u0038\u0039\u003A\u003B\u003C\u003D\u003E" \
"\u003F\u0040\u0041\u0042\u0043\u0044\u0045\u0046\u0047" \
"\u0048\u0049\u004A\u004B\u004C\u004D\u004E\u004F\u0050" \
"\u0051\u0052\u0053\u0054\u0055\u0056\u0057\u0058\u0059" \
"\u005A\u005B\u005C\u005D\u005E\u005F\u0060\u0061\u0062" \
"\u0063\u0064\u0065\u0066\u0067\u0068\u0069\u006A\u006B" \
"\u006C\u006D\u006E\u006F\u0070\u0071\u0072\u0073\u0074" \
"\u0075\u0076\u0077\u0078\u0079\u007A\u007B\u007C\u007D" \
"\u007E\u01B5\u20AC\u039B\u201A\u0192\u201E\u2026\u2020" \
"\u2021\u02C6\u2030\u0160\u2039\u0152\u0106\u017D\u01B6" \
"\u0100\u2018\u2019\u201C\u201D\u2022\u2013\u2014\u02DC" \
"\u2122\u0161\u203A\u0153\u0107\u017E\u0178\u0101\u00A1" \
"\u00A2\u00A3\u00A4\u00A5\u00A6\u00A7\u00A8\u00A9\u00AA" \
"\u00AB\u00AC\u03BB\u00AE\u00AF\u00B0\u00B1\u00B2\u00B3" \
"\u00B4\u00B5\u00B6\u00B7\u00B8\u00B9\u00BA\u00BB\u00BC" \
"\u00BD\u00BE\u00BF\u00C0\u00C1\u00C2\u00C3\u00C4\u00C5" \
"\u00C6\u00C7\u00C8\u00C9\u00CA\u00CB\u00CC\u00CD\u00CE" \
"\u00CF\u00D0\u00D1\u00D2\u00D3\u00D4\u00D5\u00D6\u00D7" \
"\u00D8\u00D9\u00DA\u00DB\u00DC\u00DD\u00DE\u00DF\u00E0" \
"\u00E1\u00E2\u00E3\u00E4\u00E5\u00E6\u00E7\u00E8\u00E9" \
"\u00EA\u00EB\u00EC\u00ED\u00EE\u00EF\u00F0\u00F1\u00F2" \
"\u00F3\u00F4\u00F5\u00F6\u00F7\u00F8\u00F9\u00FA\u00FB" \
"\u00FC\u00FD\u00FE\u00FF"
def osabie_to_utf8(code):
"""
Translates the given code encoded in raw bytes into an 05AB1E
understandable code
:param code: The code that needs to be converted into unicode values
:return: An understandable UTF-8 encoded string for the 05AB1E interpreter
"""
# Keep the processed unicode values into this string
processed_code = ""
# Replace the char with the corresponding character in the 05AB1E code page
for char in code:
processed_code += osabie_code_page[char]
return processed_code
def utf8_to_osabie(code):
"""
Translates the given code encoded in UTF-8 into raw behaviour bytes
:param code: The code that needs to be converted into behaviour bytes
:return: A string encoded in behaviour bytes
"""
# Keep the processed byte values into this string
processed_code = ""
# Replace the char with the corresponding byte in the 05AB1E code page
for char in code:
processed_code += chr(osabie_code_page.index(char))
return processed_code
| {
"repo_name": "Emigna/05AB1E",
"path": "lib/encoding.py",
"copies": "1",
"size": "3350",
"license": "mit",
"hash": -5540987796150666000,
"line_mean": 50.5384615385,
"line_max": 79,
"alpha_frac": 0.6211940299,
"autogenerated": false,
"ratio": 2.3843416370106763,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.35055356669106763,
"avg_score": null,
"num_lines": null
} |
# a list of the numbers that Skulpt has trouble rounding correctly; all others should be true
bugs = [-0.5,-0.025,-0.055,0.045,-0.0025,-0.0035,0.0045,0.0055,-250,-350,-450,-550]
def helper(iterable,expect,n=None):
if n:
for i in iterable:
r = round(i,n)
if abs(r-expect) > (1/10.0**(n+1)) and i not in bugs:
print False,i," expected: ",expect," result: ",r,abs(r-expect)
else:
for i in iterable:
r = round(i)
if abs(r-expect) > 0.000001 and i not in bugs:
print False,i," expected: ",expect," result: ",r,abs(r-expect)
print "\n-1.4 to 1.4, no ndigit"
helper([x/10.0 for x in range(-5,-15,-1)],-1)
helper([x/10.0 for x in range(4,-5,-1)],0)
helper([x/10.0 for x in range(5,15)],1)
print "\n-1.49 to 1.49, no ndigit"
helper([x/100.0 for x in range(-50,-150,-1)],-1)
helper([x/100.0 for x in range(40,-50,-1)],0)
helper([x/100.0 for x in range(50,150)],1)
print "\n-0.064 to -0.025, ndigit=2"
helper([x/1000.0 for x in range(-25,-35,-1)],-0.03,2)
helper([x/1000.0 for x in range(-35,-46,-1)],-0.04,2)
helper([x/1000.0 for x in range(-46,-55,-1)],-0.05,2)
helper([x/1000.0 for x in range(-55,-65,-1)],-0.06,2)
print "\n0.025 to 0.064, ndigit=2"
helper([x/1000.0 for x in range(25,35)],0.03,2)
helper([x/1000.0 for x in range(35,46)],0.04,2)
helper([x/1000.0 for x in range(46,55)],0.05,2)
helper([x/1000.0 for x in range(55,65)],0.06,2)
print "\n-0.0064 to -0.0025, ndigit=3"
helper([x/10000.0 for x in range(-25,-35,-1)],-0.003,3)
helper([x/10000.0 for x in range(-35,-46,-1)],-0.004,3)
helper([x/10000.0 for x in range(-46,-56,-1)],-0.005,3)
helper([x/10000.0 for x in range(-56,-65,-1)],-0.006,3)
print "\n0.0025 to 0.0064, ndigit=3"
helper([x/10000.0 for x in range(25,35)],0.003,3)
helper([x/10000.0 for x in range(35,46)],0.004,3)
helper([x/10000.0 for x in range(46,56)],0.005,3)
helper([x/10000.0 for x in range(56,65)],0.006,3)
print "\n-649 to -250, ndigit=-2"
helper(range(-250,-350,-1),-300,-2)
helper(range(-350,-450,-1),-400,-2)
helper(range(-450,-550,-1),-500,-2)
helper(range(-550,-650,-1),-600,-2)
print "\n250 to 649, ndigit=-2"
helper(range(250,350),300,-2)
helper(range(350,450),400,-2)
helper(range(450,550),500,-2)
helper(range(550,650),600,-2)
| {
"repo_name": "ArcherSys/ArcherSys",
"path": "skulpt/test/run/t422.py",
"copies": "1",
"size": "2281",
"license": "mit",
"hash": 2691433720624841700,
"line_mean": 37.0166666667,
"line_max": 93,
"alpha_frac": 0.6063130206,
"autogenerated": false,
"ratio": 2.3275510204081633,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.820458056465793,
"avg_score": 0.045856695270046766,
"num_lines": 60
} |
# A list of utility functions.
# Read a single data path (if app info configures this way).
def read_data_path(app_info_path):
file = open(app_info_path, 'r')
for line in file:
clean_line = line.strip()
if not(clean_line.startswith('#')):
return clean_line
return None
# For a given app_info_path, get out the parameters as a dict.
def read_params(app_info_path, sep = ':'):
file = open(app_info_path, 'r')
params = {}
for line in file:
parts = filter(lambda x: not(x in [None, '', ' ', "\t"]),
map(lambda y: y.strip(), line.split(sep)))
if len(parts) > 1 and not(parts[0].startswith('#')):
params[parts[0]] = parts[1]
return params
# Print a JSON/dict data set to CSV. Attributes = columns, default is all attributes.
def to_csv(data_points, filename, columns=None):
if columns == None:
columns = list(set(reduce(lambda x,y: x + y, map(lambda z: z.keys(), data_points) + [])))
data = [map(lambda x: str(x), columns)]
for point in data_points:
line = []
for column in columns:
val = str(point[column]) if column else ''
line.append(val)
data.append(line)
text = "\n".join(map(lambda line: ','.join(line), data))
print('Writing file: ' + filename + '...')
out_file = open(filename, "w")
out_file.write(text)
out_file.close()
print('Done!') | {
"repo_name": "chrisgarcia001/Little-Projects",
"path": "courthouse/lib/util.py",
"copies": "1",
"size": "1296",
"license": "epl-1.0",
"hash": 3879801570372613000,
"line_mean": 32.2564102564,
"line_max": 91,
"alpha_frac": 0.6435185185,
"autogenerated": false,
"ratio": 2.88,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.40235185185,
"avg_score": null,
"num_lines": null
} |
''' a little bit more in the comment...
'''
import dynamics.simulation
from dynamics.frame import Frame
from dynamics.spring import NailSpring
from dynamics.object import Rectangle, Circle, Beam
from dynamics.constraint import Nail, Rod, Pin, Shelf
from dynamics.animation import Animation
from dynamics.constants import foot2meter, inch2meter, meter2foot
from dynamics.misc import length_, rot2radians, radians2rot
from dynamics.constants import lb2kgram, kgram2lb, newton2lb
from dynamics.constants import pine_density, steel_density
from flight import Flight
import scipy
import scipy.interpolate
import numpy as np
from math import pi, sin, cos, sqrt, acos, atan2
from scipy.optimize.optimize import fmin
from scipy.optimize.minpack import fsolve
#from scipy.interpolate.fitpack2 import UnivariateSpline
from pylab import plot
import sys
from PyQt5.QtWidgets import QApplication
scipy.set_printoptions(precision=5, linewidth=200)
def treb( sling_length = 9.3, # sling length, feet
ramp_length = 11, # ramp length, feet
link_sum = 5.587, # sum of upper and lower link lengths, feet
hanger_x = 11.38508, # feet
hanger_y = -2,
hinge_x = (6.+2.)/12., # feet
hinge_y = -4.0,
alpha=90, # arm start angle, ccw from horizontal (degrees)
omega=10, # cocked angle between upper link and lower link
cw_drop = 5.0, # feet
cw_weight = 4581 + 2000, # pounds
cw_moment_arm = 10.41, # distance from hinge to cw center of gravity, feet
cw_moment = 3.516e6, # counterweight moment about its CG, lb*ft^2
upper_link_weight = 2*58., # pounds
lower_link_weight = 2*52., # pounds
link_axle_weight = 106, # pounds
connector_rod_weight = 84.8, # pounds
connector_brace_weight = 105, # pounds
pumpkin_weight = 10.0, # pounds
sling_weight = 1.7, # pounds
sim_duration = 2.0, # seconds
dry_fire = False, # True to disable sling from time 0
time_step = 0.001, # seconds
slide_y = -9, # feet
arm_depth = (10.+1./4.)/12., # inches
arm_thick = (5.+1./4.)/12., # inches
arm_end_depth = (6.+5./8)/12.,# inches
arm_end_thick = (3.+1./8)/12.,# inches
release_pin_weight = 9, # pounds
release_time = 0.0, #seconds
debug = True):
sim = dynamics.simulation.Simulation(max_time=sim_duration,
time_step=time_step)
sim.debug=debug
# convert arguments to metric and radians
sling_length = foot2meter(sling_length)
hanger_pos = foot2meter(np.array((hanger_x, hanger_y)))
del hanger_x, hanger_y
hinge_pos = foot2meter(np.array((hinge_x, hinge_y)))
del hinge_x, hinge_y
slide_y = foot2meter(slide_y)
arm_depth = foot2meter(arm_depth)
arm_thick = foot2meter(arm_thick)
arm_end_depth = foot2meter(arm_end_depth)
arm_end_thick = foot2meter(arm_end_thick)
ramp_length = foot2meter(ramp_length)
link_sum = foot2meter(link_sum)
sim.release_time = release_time
alpha = scipy.deg2rad(alpha)
omega = scipy.deg2rad(omega)
cw_drop = foot2meter(cw_drop)
cw_mass = lb2kgram(cw_weight)
cw_moment_arm = foot2meter(cw_moment_arm)
cw_moment = cw_moment / 32.174049 * 0.00029263965 # convert lb to slug, then
# slug*in^2 to kgram*meter^2
connector_rod_mass = lb2kgram(connector_rod_weight)
connector_brace_mass = lb2kgram(connector_brace_weight)
upper_link_mass = lb2kgram(upper_link_weight)
lower_link_mass = lb2kgram(lower_link_weight)
link_axle_mass = lb2kgram(link_axle_weight)
pumpkin_mass = lb2kgram(pumpkin_weight)
sling_mass = lb2kgram(sling_weight)
release_pin_mass = lb2kgram(release_pin_weight)
# long arm length to reach slide
long_arm_length = -slide_y / np.sin(alpha) - inch2meter(0)
# compute rest cw position thru triangulation
rest_cw_ctr = circle_intersection(hanger_pos, link_sum,
hinge_pos, ramp_length)
# compute cocked cw position on circle about hinge, up 'drop' meters from rest position
cocked_cw_ctr = np.array((None, rest_cw_ctr[1] + cw_drop))
# ramp_length**2 = (x-hinge_x)**2 + (y-hinge_y)**2
cocked_cw_ctr[0] = hinge_pos[0] + sqrt(ramp_length**2 - (cocked_cw_ctr[1]-hinge_pos[1])**2)
# cocked connection point is on ellipse w/ foci at hanger and cocked_cw, 'string' length
# equal to link_sum, 'string' interior angle omega. In maxima:
# r2: s-r1
# eq1: d^2 = r1^2+r2^2-2*r1*r2*cos(omega)
# solve(eq1, r1)
d = length_(hanger_pos - cocked_cw_ctr)
s = link_sum
sol1 = -(sqrt(s**2*cos(omega)**2 + 2*d**2*cos(omega)-s**2+2*d**2) - s*cos(omega) - s)/(2*cos(omega)+2)
sol2 = (sqrt(s**2*cos(omega)**2 + 2*d**2*cos(omega)-s**2+2*d**2) + s*cos(omega) + s)/(2*cos(omega)+2)
upper_link_length = min(sol1,sol2)
lower_link_length = max(sol1,sol2)
if abs((upper_link_length+lower_link_length-link_sum)/link_sum) > 0.001:
print("link sum error")
print(" upper_link_length=", meter2foot(upper_link_length))
print(" lower_link_length=", meter2foot(lower_link_length))
print(" link_sum=", meter2foot(link_sum))
raise ValueError
cocked_connection_pos = circle_intersection(cocked_cw_ctr, lower_link_length,
hanger_pos, upper_link_length)
# all link angles measured at top of link
cocked_upper_link_angle = rot2radians(cocked_connection_pos - hanger_pos)
cocked_lower_link_angle = rot2radians(cocked_cw_ctr - cocked_connection_pos)
rest_upper_link_angle = rot2radians(rest_cw_ctr - hanger_pos)
rest_lower_link_angle = rest_upper_link_angle
rest_connection_pos = hanger_pos + upper_link_length * radians2rot(rest_upper_link_angle)
# end of short arm is on ellipse with foci at axle and cocked connection, with 'string' length
# distance from axle to rest connection point.
axle_rest_connection_distance = length_(rest_connection_pos)
ellipse_axis_angle = rot2radians(-cocked_connection_pos)
ellipse_a = axle_rest_connection_distance / 2.0
ellipse_f = length_(cocked_connection_pos) / 2.0
ellipse_e = ellipse_f / ellipse_a
theta = ellipse_axis_angle - cocked_upper_link_angle
connector_length = ellipse_a * (1-ellipse_e**2) / (1 - ellipse_e*cos(theta))
# cocked_connection angle measured at connection point
cocked_connection_angle = cocked_upper_link_angle
cocked_short_arm_end = cocked_connection_pos + connector_length * radians2rot(cocked_connection_angle)
short_arm_length = length_(cocked_short_arm_end)
if abs((short_arm_length + connector_length - axle_rest_connection_distance)/axle_rest_connection_distance) > 0.001:
print ("short arm length error:")
print (" ellipse_a=", meter2foot(ellipse_a))
print (" ellipse_f=", meter2foot(ellipse_f))
print (" ellipse_e=", ellipse_e)
print (" theta=", scipy.rad2deg(theta))
print (" connector_length=", meter2foot(connector_length))
print (" short_arm_length=", meter2foot(short_arm_length))
print (" axle_rest_connection_distance=",
meter2foot(axle_rest_connection_distance))
raise ValueError
# short arm angle measured at axle
cocked_short_arm_angle = rot2radians(cocked_short_arm_end)
# compute beta, angle from long arm to short arm
beta = pi + alpha - cocked_short_arm_angle
# long arm end, cocked
cocked_long_arm_end = long_arm_length * radians2rot(pi+alpha)
# other dimensions
pumpkin_diameter = inch2meter(8.0)
pumpkin_ctr = cocked_long_arm_end + np.array((sling_length, 0.0))
if debug:
# rest short arm angle and position (for printing only)
rest_short_arm_angle = rot2radians(rest_connection_pos)
rest_short_arm_end = short_arm_length * radians2rot(rest_short_arm_angle)
# rest long arm angle and position (for printing only)
rest_long_arm_angle = (pi+alpha) + (rest_short_arm_angle - cocked_short_arm_angle)
rest_long_arm_end = long_arm_length * radians2rot(rest_long_arm_angle)
print("slide_y=", meter2foot(slide_y))
print("long_arm_length=", meter2foot(long_arm_length))
print("pumpkin=", meter2foot(pumpkin_ctr))
print("hanger=", meter2foot(hanger_pos))
print("cocked_connection=", meter2foot(cocked_connection_pos))
print("cocked_cw=", meter2foot(cocked_cw_ctr))
print("cocked_short_arm=", meter2foot(cocked_short_arm_end))
print("cocked_long_arm=", meter2foot(cocked_long_arm_end))
print("cocked_lower_link_angle=", scipy.rad2deg(cocked_lower_link_angle))
print("rest_lower_link_angle=", scipy.rad2deg(rest_lower_link_angle))
print("connector_length=", meter2foot(connector_length))
print("lower_link_length=", meter2foot(lower_link_length))
print("rest_cw_ctr=", meter2foot(rest_cw_ctr))
print("rest_connection=", meter2foot(rest_connection_pos))
print("rest_short_arm=", meter2foot(rest_short_arm_end))
print("rest_long_arm=", meter2foot(rest_long_arm_end))
### Machine frame origin is at axle
sim.machineFrame=Frame(sim, "machine", theta=0, origin=(0,0))
sim.machineFrame.machine=Rectangle(sim.machineFrame,
l=hanger_pos[0]+2.0,
w=-slide_y+1.0,
theta=0,
origin=(hanger_pos[0]/2,
(slide_y)/2),
mass=lb2kgram(5000),
color=(0,0,0))
front_foot_pos = (hanger_pos[0], slide_y-0.5)
rear_foot_pos = (0, slide_y - 0.5)
sim.machineFrame.rear_foot=Rectangle(sim.machineFrame,
l=0.3,
w=0.1,
origin=rear_foot_pos,
mass=0,
color=(0,0,0))
sim.machineFrame.front_foot=Rectangle(sim.machineFrame,
l=0.3,
w=0.1,
origin=front_foot_pos,
mass=0,
color=(0,0,0))
### Arm frame origin is at axle. Framespace has long arm horizontal to the left
sim.armFrame=Frame(sim, "arm", theta=alpha, origin=(0,0))
sim.armFrame.long_arm=Beam(sim.armFrame,
x0=-long_arm_length, d0=arm_end_depth, t0=arm_end_thick,
x1=0, d1=arm_depth, t1=arm_thick,
density=pine_density,
color=(0.8,0.3,0))
sim.armFrame.short_arm=dynamics.object.Rectangle(sim.armFrame,
l=inch2meter(18.99),
w=inch2meter(8.0),
theta=-beta,
origin=(-inch2meter(15.0)*cos(beta),
inch2meter(15.0)*sin(beta)),
mass=lb2kgram(53),
color=(0.8,0.3,0))
sim.armFrame.connector_pin=dynamics.object.Circle(sim.armFrame,
radius=inch2meter(2.0),
origin=(-short_arm_length*cos(beta),
short_arm_length*sin(beta)),
mass=lb2kgram(1),
color=(0.8,0.3,0))
sim.armFrame.long_arm_plate=dynamics.object.Rectangle(sim.armFrame,
l=inch2meter(27.5),
w=inch2meter(8.0),
theta=0.0,
origin=(inch2meter(-6.25), 0),
mass=lb2kgram(63),
color=(0.8,0.3,0))
sim.armFrame.release_pin=dynamics.object.Circle(sim.armFrame,
radius=inch2meter(6),
origin=(-long_arm_length, 0),
mass=release_pin_mass, color=(1.0, 1.0, 1.0))
# Weight frame origin is at pivot point, ramp horizontal to the right
cocked_ramp_angle = rot2radians(cocked_cw_ctr-hinge_pos)
sim.weightFrame=dynamics.frame.Frame(sim, "weight", theta=cocked_ramp_angle, origin=hinge_pos)
sim.weightFrame.ramp = dynamics.object.Rectangle(sim.weightFrame, l=ramp_length, w=inch2meter(4),
mass=0, color=(0.3,0.5,0.2),
origin = (ramp_length/2,0))
sim.weightFrame.cw = dynamics.object.Rectangle(sim.weightFrame, l=foot2meter(2.6), w=foot2meter(2.6),
color=(0.3,0.5,0.2),
mass=cw_mass,
origin = (cw_moment_arm,0),
moment = cw_moment)
# Lower link frame origin is at end of ramp
sim.lowerLinkFrame = dynamics.frame.Frame(sim, "lower link", origin=cocked_cw_ctr,
theta = cocked_lower_link_angle-pi)
sim.lowerLinkFrame.link = dynamics.object.Rectangle(sim.lowerLinkFrame, l=lower_link_length, w=inch2meter(6),
mass=lower_link_mass, color=(1.0,0.0,0.0),
origin=(lower_link_length/2, 0.0))
sim.lowerLinkFrame.axle=dynamics.object.Circle(sim.lowerLinkFrame,
radius=inch2meter(3),
origin=(lower_link_length, 0.0),
mass=link_axle_mass, color=(1.0, 0.0, 0.0))
# Upper link frame origin is the hanger
cocked_upper_link_angle = rot2radians(cocked_connection_pos-hanger_pos)
sim.upperLinkFrame = dynamics.frame.Frame(sim, "upper link", origin=hanger_pos,
theta = cocked_upper_link_angle)
sim.upperLinkFrame.link = dynamics.object.Rectangle(sim.upperLinkFrame, l=upper_link_length, w=inch2meter(6),
mass=upper_link_mass, color=(1.0,0.0,0.0),
origin=(upper_link_length/2, 0.0))
# Connector frame origin is the end of the short arm
sim.connectorFrame = dynamics.frame.Frame(sim, "connector", origin=cocked_short_arm_end,
theta = rot2radians(cocked_connection_pos - cocked_short_arm_end))
sim.connectorFrame.rod = dynamics.object.Rectangle(sim.connectorFrame, l=connector_length,
w=inch2meter(2),
mass=connector_rod_mass,
color=(0.0, 0.0, 0.0),
origin=(connector_length/2, 0.0))
sim.connectorFrame.stiffener = dynamics.object.Rectangle(sim.connectorFrame, l=connector_length,
w=inch2meter(4.0),
mass=lb2kgram(100),
color=(0.0, 0.0, 0.0),
origin=(connector_length/2, inch2meter(3.0)))
sim.connectorFrame.brace = dynamics.object.Rectangle(sim.connectorFrame, l=foot2meter(2),
w=inch2meter(4),
mass=connector_brace_mass,
color=(0.0, 0.0, 0.0),
origin=(connector_length-foot2meter(1), 0.0))
# Pumpkin
sim.pumpkinFrame=dynamics.frame.Frame(sim, "pumpkin", origin=pumpkin_ctr)
sim.pumpkinFrame.pumpkin=dynamics.object.Circle(sim.pumpkinFrame,
radius=pumpkin_diameter/2.0,
mass=pumpkin_mass, color=(1.0, 0.5, 0))
sim.pumpkinFrame.sling=dynamics.object.Circle(sim.pumpkinFrame,
radius=pumpkin_diameter/2.0,
mass=sling_mass, color=(1.0, 0.5, 0))
# initialize frames
for frame in sim.frames:
frame.init()
# define constraints
sim.rear_foot = Nail(sim, "rear foot",
obj=sim.machineFrame.rear_foot,
xobj=(0,0),
xworld=rear_foot_pos)
sim.front_foot = NailSpring(sim, "front foot",
obj=sim.machineFrame.front_foot,
xobj=(0,0),
x_world=front_foot_pos,
spring_constant=1e6,
damping_constant=500e3)
sim.axle = Pin(sim, "axle",
obj0=sim.armFrame.long_arm,
xobj0=(0, 0),
obj1=sim.machineFrame)
sim.hinge =Pin(sim, "hinge",
obj0=sim.weightFrame.ramp,
xobj0=(-ramp_length/2, 0.0),
obj1=sim.machineFrame)
sim.hanger = Pin(sim, "hanger",
obj0=sim.upperLinkFrame.link,
xobj0=(-upper_link_length/2.0,0.0),
obj1=sim.machineFrame)
sim.linkPin = Pin(sim, "linkPin",
obj0=sim.upperLinkFrame.link,
xobj0= (upper_link_length/2.0, 0.0),
obj1=sim.lowerLinkFrame.link,
xobj1 = (lower_link_length/2.0, 0.0))
sim.rampPin = dynamics.constraint.Pin(sim, "rampPin",
obj0=sim.weightFrame.ramp,
xobj0= (ramp_length/2.0, 0.0),
obj1=sim.lowerLinkFrame.link,
xobj1 = (-lower_link_length/2.0, 0.0))
sim.connectorPin1 = Pin(sim, "connectorPin1",
obj0=sim.armFrame.connector_pin,
xobj0=(0.0,0.0),
obj1=sim.connectorFrame.rod,
xobj1 = (-connector_length/2.0, 0.0))
sim.connectorPin2 = Pin(sim, "connectorPin2",
obj0=sim.upperLinkFrame.link,
xobj0=(upper_link_length/2.0,0.0),
obj1=sim.connectorFrame.rod,
xobj1 = (connector_length/2.0, 0.0))
sim.sling=Rod(sim, "sling",
obj0=sim.armFrame.long_arm, xobj0=(-long_arm_length,
0),
obj1=sim.pumpkinFrame.pumpkin, xobj1=(0.0,0.0),
length=sling_length)
'''
sim.trigger = Rod(sim, "trigger",
obj0=sim.pumpkinFrame.pumpkin,
xobj0= (0.0, 0.0),
obj1=sim.machineFrame.front_foot,
xobj1= (0.0,0.0))
'''
sim.slide=Shelf(sim, "slide",
obj=sim.pumpkinFrame.pumpkin,
xobj=(0,0),
height=slide_y)
if (dry_fire):
sim.sling.enabled = False
print( " running simulation")
from time import clock
tstart=clock()
sim.run(continue_sim, debug=debug)
print (" done: time=%g sec" % (clock()-tstart))
if not sim.release_time:
sim.range = Y2range(sim,sim.Y)
range_spline = scipy.interpolate.UnivariateSpline(sim.t, sim.range, k=3,s=0.0)
d0,t0 = max( (range,time) for range,time in zip(sim.range, sim.t) ) # find guess
sim.tmax = fsolve(range_spline, t0, args=1) # root of first derivative of range
sim.maxrange = range_spline(sim.tmax)
launchDegrees_spline = scipy.interpolate.UnivariateSpline(sim.t, Y2launchDegrees(sim.Y), k=3,s=0.0)
sim.launchDegrees = launchDegrees_spline(sim.tmax)
print (" distance=%g feet at %g sec" % (meter2foot(sim.maxrange), sim.tmax))
else:
sim.range=np.zeros(len(sim.t))
sim.maxrange=0
sim.Fmax = max(sim.hanger.Fvec())
print(" max force on hanger = %g pounds" % (newton2lb(sim.Fmax)))
return(sim)
def circle_intersection(ctr1, rad1, ctr2, rad2):
"""Return intersection of two circles.
Intersection returned is the one in the ccw direction from the vector
ctr1->ctr2.
"""
base_len = length_(ctr2-ctr1)
# alpha is angle from vector ctr1->ctr2 to vector ctr1->isect
alpha = acos( (base_len**2 + rad1**2 - rad2**2) / (2 * base_len * rad1) )
# beta is angle from positive x axis to vector ctr1->ctr2
beta = rot2radians(ctr2-ctr1)
isect = ctr1 + rad1*radians2rot(alpha+beta)
return isect
def continue_sim(sim, time, y):
"continue simulation?"
#if time>0.001:
# sim.trigger.enabled = False
if sim.slide.enabled:
shelf_force = sim.slide.forces[0][1]
if shelf_force < 0.0:
sim.slide.enabled = False
if 0:
if sim.sling.enabled:
v = sim.pumpkinFrame.v
angle = atan2(v.A[1], v.A[0])
if v.A[0] > 0.0 and v.A[1] > 0.0 and angle <= sim.release_angle:
sim.maxrange = Y2range(sim,y)[0]
sim.sling.enabled = False
#return False
return True
else:
if sim.release_time:
if time >= sim.release_time:
sim.sling.enabled = False
return True
if sim.armFrame.theta >= -3*pi/4:
return True
if sim.pumpkinFrame.v.A1[1] > 0:
return True
return False
def Y2range(sim, Y, with_air_friction=True):
if (len(Y.shape)==1):
Y = Y.reshape([1,len(Y)])
idx = sim.pumpkinFrame.idx
x0 = Y[:,6*idx]
y0 = Y[:,6*idx+1]
vx0 = Y[:,6*idx+3]
vy0 = Y[:,6*idx+4]
if not with_air_friction:
tof = 2.0 * vy0 / scipy.constants.g
tof[tof<0.0] = 0.0
return (tof*vx0)
else:
range = np.zeros(len(x0))
flight = Flight(mass=sim.pumpkinFrame.pumpkin.mass,
area=pi*sim.pumpkinFrame.pumpkin.radius**2)
for i in np.arange(len(x0)):
if (vy0[i] > 0) & (vx0[i] > 0):
flight.run([x0[i],y0[i]], [vx0[i],vy0[i]])
range[i] = flight.range()
return range
def Y2launchDegrees(Y):
if (len(Y.shape)==1):
Y = Y.reshape([1,len(Y)])
vx = Y[:,33]
vy = Y[:,34]
return (180./pi*np.arctan2(vy, vx))
def trebPEvec(sim):
return (sim.machineFrame.PEvec()+
sim.weightFrame.PEvec() +
sim.upperLinkFrame.PEvec() +
sim.lowerLinkFrame.PEvec() +
sim.connectorFrame.PEvec() +
sim.armFrame.PEvec())
def trebKEvec(sim):
return (sim.machineFrame.KEvec() +
sim.weightFrame.KEvec() +
sim.upperLinkFrame.KEvec() +
sim.lowerLinkFrame.KEvec() +
sim.connectorFrame.KEvec() +
sim.armFrame.KEvec())
def plotEnergies(sim):
plot (sim.t, trebPEvec(sim) - min(trebPEvec(sim)))
plot (sim.t, trebKEvec(sim))
plot (sim.t, (trebPEvec(sim) - min(trebPEvec(sim)) +
trebKEvec(sim)))
plot (sim.t, trebKEvec(sim))
plot (sim.t, sim.pumpkinFrame.KEvec() + sim.pumpkinFrame.PEvec())
def opt(X):
global sim, X0
X0 = X
print ("X=", X)
try:
sim = treb(debug=False, time_step=0.0001, sim_duration=0.8,
sling_length=X[0])
return -sim.maxrange
#return -sim.maxrange / sim.Fmax**0.10
except KeyboardInterrupt:
raise KeyboardInterrupt
except:
return 0.0
#X0 = array([ 8.70381, 6.08564, 10.3123 ])
#X0 = array([ 8, 6, 10 ])
#X0 = [ 9.62859, 6.23794, 9.98966]
#X0 = [ 8.70153, 6.04452, 10.43426]
#X0 = array([ 8.68625, 6.00475, 10.44 ])
#X0 = array([ 8.21222, 5.58682, 11.43518, -9.0])
#X0 = array([8.411, 5.587, 11.433])
X0 = np.array([9.3])
#lower = array([ 6.0, 3.0, 5.0])
#upper = array([ 12.0, 9.0, 12.0])
#result=scipy.optimize.fmin(opt, X0)
#result=scipy.optimize.fmin_l_bfgs_b(opt, X0, approx_grad=True, bounds=None)
#result=scipy.optimize.anneal(opt, X0, lower=lower, upper=upper, T0=0.001, feps=1e-60, full_output=True)
if __name__ == '__main__':
sim=treb(release_time=0.661, debug=True)
app = QApplication(sys.argv)
anim=Animation(sim, Y2range)
| {
"repo_name": "treygreer/treb",
"path": "treb_sim/src/first_in_fright_2013.py",
"copies": "1",
"size": "25655",
"license": "mit",
"hash": 7394276054165479000,
"line_mean": 45.4764492754,
"line_max": 120,
"alpha_frac": 0.5277333853,
"autogenerated": false,
"ratio": 3.4436241610738256,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9404148496553397,
"avg_score": 0.013441809964085656,
"num_lines": 552
} |
# A little bit of molecular biology
# Codons are non-overlapping triplets of nucleotides.
# ATG CCC CTG GTA ... - this corresponds to four codons; spaces added for emphasis
# The start codon is 'ATG'
# Stop codons can be 'TGA' , 'TAA', or 'TAG', but they must be 'in frame' with the start codon. The first stop codon usually determines the end of the gene.
# In other words:
# 'ATGCCTGA...' - here TGA is not a stop codon, because the T is part of CCT
# 'ATGCCTTGA...' - here TGA is a stop codon because it is in frame (i.e. a multiple of 3 nucleic acids from ATG)
# The gene is start codon to stop codon, inclusive
# Example"
# dna - GGCATGAAAGTCAGGGCAGAGCCATCTATTTGAGCTTAC
# gene - ATGAAAGTCAGGGCAGAGCCATCTATTTGA
#dna ='GGCATGAAAGTCAGGGCAGAGCCATCTATTGCTTACATTTGCTTCTGACACAACTGTGTTCACTAGCAACCTCAAACAGACACCATGGTGCACCTGACTCCTGAGGAGAAGTCTGCCGTTACTGCCCTGTGGGGCAAGGTGAACGTGGATGAAGTTGGTGGTGAGGCCCTGGGCAGGTTGGTATCAAGGTTACAAGACAGGTTTAAGGAGACCAATAGAAACTGGGCATGTGGAGACAGAGAAGACTCTTGGGTTTCTGATAGGCACTGACTCTCTCTGCCTATTGGTCTATTTTCCCACCCTTAGGCTGCTGGTGGTCTACCCTTGGACCCAGAGGTTCTTTGAGTCCTTTGGGGATCTGTCCACTCCTGATGCTGTTATGGGCAACCCTAAGGTGAAGGCTCATGGCAAGAAAGTGCTCGGTGCCTTTAGTGATGGCCTGGCTCACCTGGACAACCTCAAGGGCACCTTTGCCACACTGAGTGAGCTGCACTGTGACAAGCTGCACGTGGATCCTGAGAACTTCAGGGTGAGTCTATGGGACCCTTGATGTTTTCTTTCCCCTTCTTTTCTATGGTTAAGTTCATGTCATAGGAAGGGGAGAAGTAACAGGGTACAGTTTAGAATGGGAAACAGACGAATGATT'
dna = 'GGGATGTTTGGGCCCTACGGGCCCTGATCGGCT'
def startCodonIndex(seq):
# input: list of CAPs characters corresponding to DNA sequence
# output: index of first letter of start codon; return -1 if none are found
start_idx = seq.find('ATG')
return start_idx
def stopCodonIndex(seq, start_codon):
# input: list of CAPs characters corresponding to DNA sequence and index of start codon
# output: index of first stop codon; return -1 if none are found
stop_idx = -1
codon_length = 3
search_start = start_codon + codon_length
search_stop = len(seq)
for i in range(search_start, search_stop, codon_length):
codon = seq[i: i+codon_length]
if codon == "TAA" or codon == "TGA" or codon == "TAG":
stop_idx = i
break
return stop_idx
def codingDNA(seq):
# input: list of CAPs characters corresponding to DNA
# output: coding sequence only, including start and stop codons
start_idx = startCodonIndex(seq)
stop_idx = stopCodonIndex(seq, start_idx)
codon_length = 3
new_seq = dna[start_idx: stop_idx + codon_length]
return new_seq
def numCodons(seq):
# calculate the number of codons in the gene
# input: coding DNA sequence
# output: number of codons
codon_length = 3
num_codons = len(seq) / codon_length
# You don't need to run this line in python 2 because
# of integer division.
num_codons = int(num_codons)
return num_codons
def transcription(seq):
# Transcription: (A->U), (T->A), (C->G), (G->C)
# input: DNA coding squence
# ouput: RNA sequence
rna_seq=''
for base in seq:
if base == "A":
rna_seq += "U"
if base == "T":
rna_seq += "A"
if base == "C":
rna_seq += "G"
if base == "G":
rna_seq += "C"
return rna_seq
# calling the functions
# It would be more accurate to calculate the number of codons from coding_dna
codons = numCodons(dna)
start = startCodonIndex(dna)
stop = stopCodonIndex(dna, start)
coding_dna = codingDNA(dna)
coding_rna = transcription(coding_dna)
print(("DNA: {}".format(dna)))
print(("CODONS: {}".format(codons)))
print(("START: {}".format(start)))
print(("STOP: {}".format(stop)))
print(("CODING DNA: {}".format(coding_dna)))
print(("TRANSCRIBED RNA: {}".format(coding_rna)))
| {
"repo_name": "WomensCodingCircle/CodingCirclePython",
"path": "Lesson05_Strings/DNAExtravaganzaSOLUTION.py",
"copies": "1",
"size": "3769",
"license": "mit",
"hash": 5725075422715767000,
"line_mean": 39.0957446809,
"line_max": 654,
"alpha_frac": 0.7073494296,
"autogenerated": false,
"ratio": 2.9330739299610893,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9107545290037742,
"avg_score": 0.00657561390466947,
"num_lines": 94
} |
#A little encapsulated databse for storing elevation profiles of OSM ways
import os
import sqlite3
try:
import json
except ImportError:
import simplejson as json
import binascii
from struct import pack, unpack
def pack_coords(coords):
return binascii.b2a_base64( "".join([pack( "ff", *coord ) for coord in coords]) )
def unpack_coords(str):
bin = binascii.a2b_base64( str )
return [unpack( "ff", bin[i:i+8] ) for i in range(0, len(bin), 8)]
class ProfileDB:
def __init__(self, dbname,overwrite=False):
if overwrite:
try:
os.remove( dbname )
except OSError:
pass
self.conn = sqlite3.connect(dbname)
if overwrite:
self.setup()
def setup(self):
c = self.conn.cursor()
c.execute( "CREATE TABLE profiles (id TEXT, profile TEXT)" )
c.execute( "CREATE INDEX profile_id ON profiles (id)" )
self.conn.commit()
c.close()
def store(self, id, profile):
c = self.conn.cursor()
c.execute( "INSERT INTO profiles VALUES (?, ?)", (id, pack_coords( profile )) )
c.close()
def get(self, id):
c = self.conn.cursor()
c.execute( "SELECT profile FROM profiles WHERE id = ?", (id,) )
try:
(profile,) = c.next()
except StopIteration:
return None
finally:
c.close()
return unpack_coords( profile )
def execute(self,sql,args=None):
c = self.conn.cursor()
if args:
for row in c.execute(sql,args):
yield row
else:
for row in c.execute(sql):
yield row
c.close()
from sys import argv
def main():
if len(argv) > 1:
pdb = ProfileDB( argv[1] )
if len(argv) > 2:
print pdb.get( argv[2] )
else:
for (id,) in list( pdb.execute( "SELECT id from profiles" ) ):
print id
else:
print "python profiledb.py profiledb_filename [profile_id]"
if __name__ == '__main__':
main()
| {
"repo_name": "jeriksson/graphserver",
"path": "pygs/graphserver/ext/osm/profiledb.py",
"copies": "3",
"size": "2230",
"license": "bsd-3-clause",
"hash": -1838021591556148000,
"line_mean": 25.2352941176,
"line_max": 87,
"alpha_frac": 0.5156950673,
"autogenerated": false,
"ratio": 4.032549728752261,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.03147644892066991,
"num_lines": 85
} |
""" A little example script showing a Capture-Game tournament between
- a random player
- a kill-on-sight player
- a small-network-player with random weights
"""
__author__ = 'Tom Schaul, tom@idsia.ch'
from pybrain.rl.environments.twoplayergames import CaptureGame
from pybrain.rl.agents.capturegameplayers import RandomCapturePlayer, KillingPlayer, ModuleDecidingPlayer
from pybrain.rl.agents.capturegameplayers.clientwrapper import ClientCapturePlayer
from pybrain.rl.experiments import Tournament
from pybrain.tools.shortcuts import buildNetwork
from pybrain import SigmoidLayer
game = CaptureGame(5)
randAgent = RandomCapturePlayer(game, name = 'rand')
killAgent = KillingPlayer(game, name = 'kill')
# the network's outputs are probabilities of choosing the action, thus a sigmoid output layer
net = buildNetwork(game.outdim, game.indim, outclass = SigmoidLayer)
netAgent = ModuleDecidingPlayer(net, game, name = 'net')
# same network, but greedy decisions:
netAgentGreedy = ModuleDecidingPlayer(net, game, name = 'greedy', greedySelection = True)
agents = [randAgent, killAgent, netAgent, netAgentGreedy]
try:
javaAgent = ClientCapturePlayer(game, name = 'java')
agents.append(javaAgent)
except:
print 'No Java server available.'
print
print 'Starting tournament...'
tourn = Tournament(game, agents)
tourn.organize(50)
print tourn
# try a different network, and play again:
net.randomize()
tourn.reset()
tourn.organize(50)
print tourn
| {
"repo_name": "daanwierstra/pybrain",
"path": "examples/capturegame/minitournament.py",
"copies": "1",
"size": "1468",
"license": "bsd-3-clause",
"hash": -4417278950011121000,
"line_mean": 29.5833333333,
"line_max": 105,
"alpha_frac": 0.780653951,
"autogenerated": false,
"ratio": 3.4704491725768323,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4751103123576832,
"avg_score": null,
"num_lines": null
} |
# A little helper module for plotting of broadbean objects
from typing import Tuple, Union, Dict, List
import numpy as np
import matplotlib.pyplot as plt
from broadbean import Sequence, BluePrint, Element
from broadbean.sequence import SequenceConsistencyError
# The object we can/want to plot
BBObject = Union[Sequence, BluePrint, Element]
def getSIScalingAndPrefix(minmax: Tuple[float, float]) -> Tuple[float, str]:
"""
Return the scaling exponent and unit prefix. E.g. (-2e-3, 1e-6) will
return (1e3, 'm')
Args:
minmax: The (min, max) value of the signal
Returns:
A tuple of the scaling (inverse of the prefix) and the prefix
string.
"""
v_max = max(map(abs, minmax)) # type: ignore
if v_max == 0:
v_max = 1 # type: ignore
exponent = np.log10(v_max)
prefix = ''
scaling: float = 1
if exponent < 0:
prefix = 'm'
scaling = 1e3
if exponent < -3:
prefix = 'micro '
scaling = 1e6
if exponent < -6:
prefix = 'n'
scaling = 1e9
return (scaling, prefix)
def _plot_object_validator(obj_to_plot: BBObject) -> None:
"""
Validate the object
"""
if isinstance(obj_to_plot, Sequence):
proceed = obj_to_plot.checkConsistency(verbose=True)
if not proceed:
raise SequenceConsistencyError
elif isinstance(obj_to_plot, Element):
obj_to_plot.validateDurations()
elif isinstance(obj_to_plot, BluePrint):
assert obj_to_plot.SR is not None
def _plot_object_forger(obj_to_plot: BBObject,
**forger_kwargs) -> Dict[int, Dict]:
"""
Make a forged sequence out of any object.
Returns a forged sequence.
"""
if isinstance(obj_to_plot, BluePrint):
elem = Element()
elem.addBluePrint(1, obj_to_plot)
seq = Sequence()
seq.addElement(1, elem)
seq.setSR(obj_to_plot.SR)
elif isinstance(obj_to_plot, Element):
seq = Sequence()
seq.addElement(1, obj_to_plot)
seq.setSR(obj_to_plot._meta['SR'])
elif isinstance(obj_to_plot, Sequence):
seq = obj_to_plot
forged_seq = seq.forge(includetime=True, **forger_kwargs)
return forged_seq
def _plot_summariser(seq: Dict[int, Dict]) -> Dict[int, Dict[str, np.ndarray]]:
"""
Return a plotting summary of a subsequence.
Args:
seq: The 'content' value of a forged sequence where a
subsequence resides
Returns:
A dict that looks like a forged element, but all waveforms
are just two points, np.array([min, max])
"""
output = {}
# we assume correctness, all postions specify the same channels
chans = seq[1]['data'].keys()
minmax = dict(zip(chans, [(0, 0)]*len(chans)))
for element in seq.values():
arr_dict = element['data']
for chan in chans:
wfm = arr_dict[chan]['wfm']
if wfm.min() < minmax[chan][0]:
minmax[chan] = (wfm.min(), minmax[chan][1])
if wfm.max() > minmax[chan][1]:
minmax[chan] = (minmax[chan][0], wfm.max())
output[chan] = {'wfm': np.array(minmax[chan]),
'm1': np.zeros(2),
'm2': np.zeros(2),
'time': np.linspace(0, 1, 2)}
return output
# the Grand Unified Plotter
def plotter(obj_to_plot: BBObject, **forger_kwargs) -> None:
"""
The one plot function to be called. Turns whatever it gets
into a sequence, forges it, and plots that.
"""
# TODO: Take axes as input
# strategy:
# * Validate
# * Forge
# * Plot
_plot_object_validator(obj_to_plot)
seq = _plot_object_forger(obj_to_plot, **forger_kwargs)
# Get the dimensions.
chans = seq[1]['content'][1]['data'].keys()
seqlen = len(seq.keys())
def update_minmax(chanminmax, wfmdata, chanind):
(thismin, thismax) = (wfmdata.min(), wfmdata.max())
if thismin < chanminmax[chanind][0]:
chanminmax[chanind] = [thismin, chanminmax[chanind][1]]
if thismax > chanminmax[chanind][1]:
chanminmax[chanind] = [chanminmax[chanind][0], thismax]
return chanminmax
# Then figure out the figure scalings
minf: float = -np.inf
inf: float = np.inf
chanminmax: List[Tuple[float, float]] = [(inf, minf)]*len(chans)
for chanind, chan in enumerate(chans):
for pos in range(1, seqlen+1):
if seq[pos]['type'] == 'element':
wfmdata = (seq[pos]['content'][1]
['data'][chan]['wfm'])
chanminmax = update_minmax(chanminmax, wfmdata, chanind)
elif seq[pos]['type'] == 'subsequence':
for pos2 in seq[pos]['content'].keys():
elem = seq[pos]['content'][pos2]['data']
wfmdata = elem[chan]['wfm']
chanminmax = update_minmax(chanminmax,
wfmdata, chanind)
fig, axs = plt.subplots(len(chans), seqlen)
# ...and do the plotting
for chanind, chan in enumerate(chans):
# figure out the channel voltage scaling
# The entire channel shares a y-axis
minmax: Tuple[float, float] = chanminmax[chanind]
(voltagescaling, voltageprefix) = getSIScalingAndPrefix(minmax)
voltageunit = voltageprefix + 'V'
for pos in range(seqlen):
# 1 by N arrays are indexed differently than M by N arrays
# and 1 by 1 arrays are not arrays at all...
if len(chans) == 1 and seqlen > 1:
ax = axs[pos]
if len(chans) > 1 and seqlen == 1:
ax = axs[chanind]
if len(chans) == 1 and seqlen == 1:
ax = axs
if len(chans) > 1 and seqlen > 1:
ax = axs[chanind, pos]
# reduce the tickmark density (must be called before scaling)
ax.locator_params(tight=True, nbins=4, prune='lower')
if seq[pos+1]['type'] == 'element':
content = seq[pos+1]['content'][1]['data'][chan]
wfm = content['wfm']
m1 = content.get('m1', np.zeros_like(wfm))
m2 = content.get('m2', np.zeros_like(wfm))
time = content['time']
newdurs = content.get('newdurations', [])
else:
arr_dict = _plot_summariser(seq[pos+1]['content'])
wfm = arr_dict[chan]['wfm']
newdurs = []
ax.annotate('SUBSEQ', xy=(0.5, 0.5),
xycoords='axes fraction',
horizontalalignment='center')
time = np.linspace(0, 1, 2) # needed for timeexponent
# Figure out the axes' scaling
timeexponent = np.log10(time.max())
timeunit = 's'
timescaling: float = 1.0
if timeexponent < 0:
timeunit = 'ms'
timescaling = 1e3
if timeexponent < -3:
timeunit = 'micro s'
timescaling = 1e6
if timeexponent < -6:
timeunit = 'ns'
timescaling = 1e9
if seq[pos+1]['type'] == 'element':
ax.plot(timescaling*time, voltagescaling*wfm, lw=3,
color=(0.6, 0.4, 0.3), alpha=0.4)
ymax = voltagescaling * chanminmax[chanind][1]
ymin = voltagescaling * chanminmax[chanind][0]
yrange = ymax - ymin
ax.set_ylim([ymin-0.05*yrange, ymax+0.2*yrange])
if seq[pos+1]['type'] == 'element':
# TODO: make this work for more than two markers
# marker1 (red, on top)
y_m1 = ymax+0.15*yrange
marker_on = np.ones_like(m1)
marker_on[m1 == 0] = np.nan
marker_off = np.ones_like(m1)
ax.plot(timescaling*time, y_m1*marker_off,
color=(0.6, 0.1, 0.1), alpha=0.2, lw=2)
ax.plot(timescaling*time, y_m1*marker_on,
color=(0.6, 0.1, 0.1), alpha=0.6, lw=2)
# marker 2 (blue, below the red)
y_m2 = ymax+0.10*yrange
marker_on = np.ones_like(m2)
marker_on[m2 == 0] = np.nan
marker_off = np.ones_like(m2)
ax.plot(timescaling*time, y_m2*marker_off,
color=(0.1, 0.1, 0.6), alpha=0.2, lw=2)
ax.plot(timescaling*time, y_m2*marker_on,
color=(0.1, 0.1, 0.6), alpha=0.6, lw=2)
# If subsequence, plot lines indicating min and max value
if seq[pos+1]['type'] == 'subsequence':
# min:
ax.plot(time, np.ones_like(time)*wfm[0],
color=(0.12, 0.12, 0.12), alpha=0.2, lw=2)
# max:
ax.plot(time, np.ones_like(time)*wfm[1],
color=(0.12, 0.12, 0.12), alpha=0.2, lw=2)
ax.set_xticks([])
# time step lines
for dur in np.cumsum(newdurs):
ax.plot([timescaling*dur, timescaling*dur],
[ax.get_ylim()[0], ax.get_ylim()[1]],
color=(0.312, 0.2, 0.33),
alpha=0.3)
# labels
if pos == 0:
ax.set_ylabel('({})'.format(voltageunit))
if pos == seqlen - 1 and not(isinstance(obj_to_plot, BluePrint)):
newax = ax.twinx()
newax.set_yticks([])
if isinstance(chan, int):
new_ylabel = f'Ch. {chan}'
elif isinstance(chan, str):
new_ylabel = chan
newax.set_ylabel(new_ylabel)
if seq[pos+1]['type'] == 'subsequence':
ax.set_xlabel('Time N/A')
else:
ax.set_xlabel('({})'.format(timeunit))
# remove excess space from the plot
if not chanind+1 == len(chans):
ax.set_xticks([])
if not pos == 0:
ax.set_yticks([])
fig.subplots_adjust(hspace=0, wspace=0)
# display sequencer information
if chanind == 0 and isinstance(obj_to_plot, Sequence):
seq_info = seq[pos+1]['sequencing']
titlestring = ''
if seq_info['twait'] == 1: # trigger wait
titlestring += 'T '
if seq_info['nrep'] > 1: # nreps
titlestring += '\u21BB{} '.format(seq_info['nrep'])
if seq_info['nrep'] == 0:
titlestring += '\u221E '
if seq_info['jump_input'] != 0:
if seq_info['jump_input'] == -1:
titlestring += 'E\u2192 '
else:
titlestring += 'E{} '.format(seq_info['jump_input'])
if seq_info['goto'] > 0:
titlestring += '\u21b1{}'.format(seq_info['goto'])
ax.set_title(titlestring)
| {
"repo_name": "WilliamHPNielsen/broadbean",
"path": "broadbean/plotting.py",
"copies": "1",
"size": "11374",
"license": "mit",
"hash": 7779729029073954000,
"line_mean": 33.6768292683,
"line_max": 79,
"alpha_frac": 0.5025496747,
"autogenerated": false,
"ratio": 3.665485014502095,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9666764363998843,
"avg_score": 0.0002540650406504065,
"num_lines": 328
} |
from numpy import *
def symmetricPositiveDefinite(n, maxValue= 1):
''' Generates a n x n random symmetric, positive-definite matrix.
The optionnal maxValue argument can be used to specify a maximum
absolute value for extradiagonal coefficients.
Diagonal coefficient will be inferior to 22 times maxValue in
absolute value.
Runs in O(n^2)'''
# To generate such a matrix we use the fact that a symmetric
# diagonnaly dominant matrix is symmetric positive definite
# We first generate a random matrix
# with coefficients between -maxValue and +maxValue
A = random.random_integers(-maxValue, maxValue, (n, n))
# Then by adding to this matrix its transpose, we obtain
# a symmetric matrix
A = A + A.transpose()
# Finally we make sure it is strictly diagonnaly dominant by
# adding 2*n*maxValue times the identity matrix
A += 2*n*maxValue*eye(n)
return A
def symmetricSparsePositiveDefinite(n, nbZeros, maxValue= 1):
''' Generates a n x n random symmetric, positive-definite matrix.
with around nbZeros null coefficients (more precisely nbZeros+-1)
nbZeros must be between 0 and n*(n-1)
The optionnal maxValue argument can be used to specify a maximum
absolute value for extradiagonal coefficients.
Diagonal coefficient will be inferior to 11 times maxValue in
absolute value.
Runs in O(n^2)'''
# The algorithm is the same as in symmetricPositiveDefinite
# except that the matrix generated in the beginning is
# sparse symmetric
A = zeros((n,n))
currentNbZeros = n*(n-1)
while currentNbZeros > nbZeros:
i, j = random.randint(n, size=2)
if i != j and A[i,j] == 0:
while A[i,j] == 0:
A[i,j] = A[j,i] = random.randint(-maxValue, maxValue+1)
currentNbZeros -= 2
# Then we make sure it is strictly diagonnaly dominant by
# adding n*maxValue times the identity matrix
A += n*maxValue*eye(n)
return A
def isSymmetric(M):
''' Returns true if and only if M is symmetric'''
return array_equal(M, M.transpose())
def isDefinitePositive(M):
''' Returns true if and only if M is definite positive'''
# using the fact that if all its eigenvalues are positive,
# M is definite positive
# be careful, as eigvals use numerical methods, some eigenvalues
# which are in reality equal to zero can be found negative
eps = 1e-5
for ev in linalg.eigvals(M):
if ev <= 0-eps:
return False
return True
| {
"repo_name": "ethiery/heat-solver",
"path": "trunk/matgen.py",
"copies": "1",
"size": "2672",
"license": "mit",
"hash": -6380085539740939000,
"line_mean": 34.6266666667,
"line_max": 71,
"alpha_frac": 0.6781437126,
"autogenerated": false,
"ratio": 3.866859623733719,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5045003336333719,
"avg_score": null,
"num_lines": null
} |
# A little script that calculates the area of each grid cells
# Area output is in sqkm!!!
# Will only work for WGS84 lat/lon grids!
import arcgisscripting, sys, os, math
# {{{ STARTUP STUFF...
# First we need to check that the arguments are ok...
gp = arcgisscripting.create(9.3)
try:
if len(sys.argv) == 0: raise
inGrid = sys.argv[1].replace('\\','\\\\')
outGrid = sys.argv[2].replace('\\','\\\\')
try:
thisWorkspace = sys.argv[3].replace('\\','\\\\')
except:
thisWorkspace = gp.Workspace
except:
print "\n\nError:", str(sys.exc_info())
print "\n\nPlease verify the input arguments\n\ncalculategridarea.py <input grid> <output grid> [workspace]\n\n"
sys.exit()
print "\nCalculating the grid cell area of :\n....." + inGrid + "\nand creating output grid\n....." + outGrid + "\nin workspace\n....." + thisWorkspace + "\n"
tempWs = os.environ["temp"]
# }}}
gp.toolbox = "SA"
gp.CheckOutExtension("Spatial")
gp.OverWriteOutput = 1
desc = gp.describe(inGrid)
gp.CellSize = desc.MeanCellHeight
gp.Extent = desc.Extent
cellSizeMeters = float(gp.CellSize) * 111120
cellArea = cellSizeMeters * cellSizeMeters
halfCellWidth = float(cellSizeMeters) / 2
earthRadius = 6371
oneDeginRad = 0.0174532925
cellSizeinRad = float(gp.cellSize) * ((2*math.pi) /360)
halfaDeginRad = 0.00872664625
gp.Workspace = tempWs
#SETWINDOW -180 -90 180 90
#SETCELL 0.5
#&SETVAR oneDeginRad = 0.0174532925
#&SETVAR halfaDeginRad = 0.00872664625
#worldxl1 = float(($$colmap * .5) - 180) * %oneDeginRad%
gp.SingleOutputMapAlgebra_sa ("float(($$colmap * " + str(gp.CellSize) + ") - " + str(desc.Extent.xMax) +") * " + str(oneDeginRad), "worldxl1")
#worldxr1 = worldxl1 + %halfaDeginRad%
gp.SingleOutputMapAlgebra_sa ("worldxl1 + " + str(cellSizeinRad), "worldxr1")
#worldyt1 = - float( ($$rowmap * .5) - 90) * %oneDeginRad%
gp.SingleOutputMapAlgebra_sa ("-1 * (float( ($$rowmap * " + str(gp.CellSize) + ") - " + str(desc.Extent.yMax) + ") * "+ str(oneDeginRad) + ")", "worldyt1")
#worldyb1 = worldyt1 - %halfaDeginRad%
gp.SingleOutputMapAlgebra_sa ("worldyt1 - " + str(cellSizeinRad), "worldyb1")
#term1 = earthRadius * earthRadius
gp.SingleOutputMapAlgebra_sa ("6371 * 6371", "term1")
#term2 = worldxr1 - worldxl1
#term3a = sin (worldyt1)
#term3b = sin (worldyb1)
#term3 = term3a - term3b
#worldarea1 = term1 * term2 * term3
gp.SingleOutputMapAlgebra_sa ("worldxr1 - worldxl1","term2")
gp.SingleOutputMapAlgebra_sa ("sin (worldyt1)","term3a")
gp.SingleOutputMapAlgebra_sa ("sin (worldyb1)","term3b")
gp.SingleOutputMapAlgebra_sa ("term3a - term3b","term3")
gp.SingleOutputMapAlgebra_sa ("term1 * term2 * term3","worldarea1")
gp.DefineProjection_management("worldarea1","GEOGCS['GCS_WGS_1984',DATUM['D_WGS_1984',SPHEROID['WGS_1984',6378137.0,298.257223563]],PRIMEM['Greenwich',0.0],UNIT['Degree',0.0174532925199433]]")
gp.copy ("worldarea1", thisWorkspace + "/" + outGrid)
print "\n\nCalculation complete, created: " + thisWorkspace + "/" + outGrid + "\n\n"
| {
"repo_name": "fraxen/nordpil_arcpy",
"path": "python/calculategridarea.py",
"copies": "1",
"size": "2949",
"license": "apache-2.0",
"hash": -5420444789425520000,
"line_mean": 36.8076923077,
"line_max": 192,
"alpha_frac": 0.6941336046,
"autogenerated": false,
"ratio": 2.6260017809439002,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.38201353855439,
"avg_score": null,
"num_lines": null
} |
# A little script that griddifies a bunch of shape files and then takes the median extent
# for the arctic sea ice
# * rasterize shapefiles
# * get median
# * convert back to polygons
# * reproject
import arcgisscripting
gp = arcgisscripting.create(9.3)
gp.Workspace = "u:/ws/ice"
gp.toolbox = "SA"
gp.CheckOutExtension("Spatial")
gp.OverWriteOutput = 1
cellSize_PS = 1000
outCS = "Y:/config/Application Data/ESRI/ArcMap/Coordinate Systems/Arctic LAEA Greenwich.prj"
gp.Extent = "MAXOF"
gp.CellSize = cellSize_PS
for month in ["09","03"]:
for year in range(1979,2000):
thisYear = str(year)
print "\n\nRasterizing year " + thisYear
gp.FeatureToRaster_conversion("extent_N_" + thisYear + month + "_polygon.shp", "INDEX", "ice" + thisYear + month + "_2", gp.CellSize)
print "Cleaning up year " + thisYear
gp.SingleOutputMapAlgebra_sa("SETNULL(ISNULL(ice" + thisYear + month + "_2),1)","ice" + thisYear + month + "_3")
sumString = ""
for year in range(1979,2000):
thisGrid = "ice" + str(year) + month + "_3"
sumString = sumString + "con(ISNULL(" + thisGrid + "),0,1) + "
print "\n\nSumming up the years..."
gp.SingleOutputMapAlgebra_sa(sumString + "0","ice" + month + "_4")
print "\n\nGrabbing the median"
gp.SingleOutputMapAlgebra_sa("SETNULL(ice" + month + "_4 LT 11,1)","ice" + month + "_5")
gp.RasterToPolygon_conversion("ice" + month + "_5", "ice" + month + "_6.shp", "NO_SIMPLIFY")
gp.Project_management ("ice" + month + "_6.shp","ice" + month + "_7.shp",outCS)
| {
"repo_name": "fraxen/nordpil_arcpy",
"path": "python/medianice.py",
"copies": "1",
"size": "1491",
"license": "apache-2.0",
"hash": -7900366698218198000,
"line_mean": 39.2972972973,
"line_max": 135,
"alpha_frac": 0.6753856472,
"autogenerated": false,
"ratio": 2.7713754646840147,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.3946761111884015,
"avg_score": null,
"num_lines": null
} |
# A little script that splits the Arctic into four tiles, and then does some basic raster processing
# * Subsets the arctic in geog projection to four tiles
# * Then reprojects those separately
# * ...and then mosaics them
import arcgisscripting
gp = arcgisscripting.create(9.3)
gp.Workspace = "u:/ws/grump"
gp.toolbox = "SA"
gp.CheckOutExtension("Spatial")
gp.OverWriteOutput = 1
cellSize_Geo = "MAXOF"
cellSize_Result = 1000
tiles = [['nw', '-180 40 -85 90'], ['sw','-90 40 5 90'],['se','-5 40 95 90'],['ne','85 40 180 90']]
for tile in tiles:
print "\n\nprocessing tile " + tile[0]
thisTile = tile[0]
gp.Extent = tile[1]
gp.CellSize = cellSize_Geo
print "subsetting tile " + tile[0]
gp.SingleOutputMapAlgebra_sa("grump_pd2","grump_pd3" + thisTile)
gp.Extent = "MAXOF"
print "reprojecting tile " + tile[0]
gp.ProjectRaster_management ("grump_pd3" + thisTile, "grump_pd4" + thisTile, "Y:/config/Application Data/ESRI/ArcMap/Coordinate Systems/Arctic LAEA Greenwich.prj", "nearest", cellSize_Result)
gp.Extent = "MAXOF"
if gp.Exists("grump_pd5"):
gp.Delete("grump_pd5")
print "\n\nmosaicing tiles..."
gp.MosaicToNewRaster_management ("grump_pd4nw;grump_pd4sw;grump_pd4ne;grump_pd4se", gp.Workspace, "grump_pd5", "#", "#", cellSize_Result, "#", "Mean")
print "\n\nDone!"
| {
"repo_name": "fraxen/nordpil_arcpy",
"path": "python/projectpopden.py",
"copies": "1",
"size": "1286",
"license": "apache-2.0",
"hash": -3588808961283824000,
"line_mean": 37.9696969697,
"line_max": 193,
"alpha_frac": 0.7029548989,
"autogenerated": false,
"ratio": 2.7245762711864407,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.39275311700864407,
"avg_score": null,
"num_lines": null
} |
# a little script that takes a TCAT tweet file as input, runs them through NLTK/VADER sentiment analysis
# and writes a new files that adds four columns on the right
# The VADER library is documented here:
# Hutto, C.J. & Gilbert, E.E. (2014). VADER: A Parsimonious Rule-based Model for Sentiment Analysis of Social Media Text.
# Eighth International Conference on Weblogs and Social Media (ICWSM-14). Ann Arbor, MI, June 2014.
# change as required:
filename_labeled = 'tcat_trump_5510tweets_labeled.csv'
colname_labeled_text = 'text'
colname_labeled_label = 'label'
filename_tolabel = 'tcat_trump_5510tweets.csv'
colname_tolabel_text = 'text'
import csv
import nltk
from nltk.tokenize import word_tokenize
csvread_labeled = open(filename_labeled, newline='\n')
csvreader_labeled = csv.DictReader(csvread_labeled, delimiter=',', quotechar='"')
# populate dictionary from CSV
train=[]
for row in csvreader_labeled:
train.append((row[colname_labeled_text].lower(),row[colname_labeled_label]))
# create the overall feature vector:
all_words = set(word for passage in train for word in word_tokenize(passage[0]))
# create a feature vector for each text passage
t = [({word: (word in word_tokenize(x[0])) for word in all_words}, x[1]) for x in train]
# train the classifier
classifier = nltk.NaiveBayesClassifier.train(t)
# classifier.show_most_informative_features()
# read the lines to label, classify and write to new file
csvread_tolabel = open(filename_tolabel, newline='\n')
csvreader_tolabel = csv.DictReader(csvread_tolabel, delimiter=',', quotechar='"')
rowcount = len(open(filename_tolabel).readlines())
colnames = csvreader_tolabel.fieldnames
colnames.extend(['label'])
csvwrite = open(filename_tolabel[:-4] + "_BAYES.csv",'w',newline='\n')
csvwriter = csv.DictWriter(csvwrite, fieldnames=colnames)
csvwriter.writeheader()
for row in csvreader_tolabel:
line_features = {word: (word in word_tokenize(row[colname_tolabel_text].lower())) for word in all_words}
row.update({'label':classifier.classify(line_features)})
csvwriter.writerow(row)
rowcount -= 1
print(rowcount) | {
"repo_name": "bernorieder/textprocessing",
"path": "test_bayes.py",
"copies": "1",
"size": "2094",
"license": "unlicense",
"hash": -5939355017558395000,
"line_mean": 36.4107142857,
"line_max": 121,
"alpha_frac": 0.753104107,
"autogenerated": false,
"ratio": 3.1872146118721463,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4440318718872146,
"avg_score": null,
"num_lines": null
} |
# A little solver for band-diagonal matrices. Based on NR Ch 2.4.
from math import *
from Numeric import *
do_pivot = True
def bandec(a, m1, m2):
n, m = a.shape
mm = m1 + m2 + 1
if m != mm:
raise ValueError('Array has width %d expected %d' % (m, mm))
al = zeros((n, m1), Float)
indx = zeros(n, Int)
for i in range(m1):
l = m1 - i
for j in range(l, mm): a[i, j - l] = a[i, j]
for j in range(mm - l, mm): a[i, j] = 0
d = 1.
l = m1
for k in range(n):
dum = a[k, 0]
pivot = k
if l < n: l += 1
if do_pivot:
for j in range(k + 1, l):
if abs(a[j, 0]) > abs(dum):
dum = a[j, 0]
pivot = j
indx[k] = pivot
if dum == 0.: a[k, 0] = 1e-20
if pivot != k:
d = -d
for j in range(mm):
tmp = a[k, j]
a[k, j] = a[pivot, j]
a[pivot, j] = tmp
for i in range(k + 1, l):
dum = a[i, 0] / a[k, 0]
al[k, i - k - 1] = dum
for j in range(1, mm):
a[i, j - 1] = a[i, j] - dum * a[k, j]
a[i, mm - 1] = 0.
return al, indx, d
def banbks(a, m1, m2, al, indx, b):
n, m = a.shape
mm = m1 + m2 + 1
l = m1
for k in range(n):
i = indx[k]
if i != k:
tmp = b[k]
b[k] = b[i]
b[i] = tmp
if l < n: l += 1
for i in range(k + 1, l):
b[i] -= al[k, i - k - 1] * b[k]
l = 1
for i in range(n - 1, -1, -1):
dum = b[i]
for k in range(1, l):
dum -= a[i, k] * b[k + i]
b[i] = dum / a[i, 0]
if l < mm: l += 1
if __name__ == '__main__':
a = zeros((10, 3), Float)
for i in range(10):
a[i, 0] = 1
a[i, 1] = 2
a[i, 2] = 1
print a
al, indx, d = bandec(a, 1, 1)
print a
print al
print indx
b = zeros(10, Float)
b[5] = 1
banbks(a, 1, 1, al, indx, b)
print b
| {
"repo_name": "bowlofstew/roboto",
"path": "third_party/spiro/curves/band.py",
"copies": "15",
"size": "2061",
"license": "apache-2.0",
"hash": -8047997545675352000,
"line_mean": 23.2470588235,
"line_max": 68,
"alpha_frac": 0.378942261,
"autogenerated": false,
"ratio": 2.6491002570694087,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": null,
"num_lines": null
} |
# A little something I wrote to compile a bunch of pages from a book I found online.
import os
import urllib
from clint.textui import colored
from pyquery import PyQuery as pq
base_url = "http://digital.library.pitt.edu"
save_loc = os.path.expanduser("~/Documents/pdf_dump/")
if not os.path.isdir(save_loc):
os.mkdir(save_loc)
first_page = 3
last_page = 235
print colored.red("Fetching pages...")
for i in xrange(first_page, last_page):
print "%s %d" % (colored.yellow("Page:"), i-2)
p = pq(url="%s/cgi-bin/t/text/pageviewer-idx?c=pittpress&cc=pittpress&idno=31735057896064&node=31735057896064%%3A1&frm=frameset&view=pdf&seq=%d" % (base_url, i))
frames = p("frame")
frame = [pq(f) for f in frames if pq(f).attr["name"] == "main"][0]
urllib.urlretrieve("%s%s" % (base_url, frame.attr["src"]), os.path.expanduser("%s/pdf-%d.pdf" % (save_loc, i-2)))
print colored.red("Compiling pages...")
for i in xrange((last_page-first_page)/100):
print "%s %d" % (colored.yellow("Batch:"), i+1)
os.system("gswin64 -dNOPAUSE -sDEVICE=pdfwrite -sOUTPUTFILE=%sbatch-%d.pdf -sBATCH %s" % (save_loc, i, " ".join(["%spdf-%d.pdf" % (save_loc, j) for j in xrange(i*100+1, (i+1)*100+1)])))
print "%s %d" % (colored.yellow("Batch:"), (last_page-first_page)/100+1)
os.system("gswin64 -dNOPAUSE -sDEVICE=pdfwrite -sOUTPUTFILE=%sbatch-%d.pdf -sBATCH %s" % (save_loc, (last_page-first_page)/100, " ".join(["%spdf-%d.pdf" % (save_loc, i) for i in xrange(((last_page-first_page)/100)*100+1, last_page-first_page+1)])))
print "%s %s" % (colored.yellow("Batch:"), (last_page-first_page)/100+2)
os.system("gswin64 -dNOPAUSE -sDEVICE=pdfwrite -sOUTPUTFILE=%scdf.pdf -sBATCH %s" % (save_loc, " ".join(["%sbatch-%d.pdf" % (save_loc, i) for i in xrange((last_page-first_page)/100+1)])))
print colored.red("Cleaning files...")
print "%s %d-%d" % (colored.yellow("Pages:"), 1, (last_page-first_page))
for i in xrange(1, last_page-first_page+1):
os.remove("%spdf-%d.pdf" % (save_loc, i))
print "%s %d-%d" % (colored.yellow("Batch:"), 1, (last_page-first_page)/100+2)
for i in xrange((last_page-first_page)/100+1):
os.remove("%sbatch-%d.pdf" % (save_loc, i))
print colored.red("Finished")
print "%s %s" % (colored.yellow("Output:"), "%scdf.pdf" % (save_loc))
| {
"repo_name": "QuantumPhi/school",
"path": "scripts/pdfcompile.py",
"copies": "1",
"size": "2270",
"license": "mit",
"hash": 7904725609160001000,
"line_mean": 46.2916666667,
"line_max": 248,
"alpha_frac": 0.6524229075,
"autogenerated": false,
"ratio": 2.547699214365881,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.8695865689613067,
"avg_score": 0.0008512864505628484,
"num_lines": 48
} |
# A little test of how many bytes/sec python can push
import sys
# Testing on polypore.
# ~50-60 MiB/s:
# for line in sys.stdin:
# print line
# ~ 100 MiB/s on polypore,
# ~ 120 MiB/s on ged
# BUFSIZE = 2 ** 20
# leftover = ""
# while True:
# buf = sys.stdin.read(BUFSIZE)
# if not buf:
# break
# lines = buf.split("\n")
# lines[0] = leftover + lines[0]
# leftover = lines.pop()
# sys.stdout.write("\n".join(lines))
# ~ 69 MiB/s on ged (~55 for w(line); w("\n"))
# BUFSIZE = 2 ** 20
# leftover = ""
# w = sys.stdout.write
# while True:
# buf = sys.stdin.read(BUFSIZE)
# if not buf:
# break
# lines = buf.split("\n")
# lines[0] = leftover + lines[0]
# leftover = lines.pop()
# for line in lines:
# w(line + "\n")
# # ~ 52 MiB/s on ged
# from zs._zs import to_uleb128
# BUFSIZE = 2 ** 20
# leftover = ""
# w = sys.stdout.write
# while True:
# buf = sys.stdin.read(BUFSIZE)
# if not buf:
# break
# lines = buf.split("\n")
# lines[0] = leftover + lines[0]
# leftover = lines.pop()
# for line in lines:
# w(to_uleb128(len(line)) + line)
# ~ 80 MiB/s on ged -- local variables are faster!
# BUFSIZE = 2 ** 20
# def doit():
# leftover = ""
# w = sys.stdout.write
# while True:
# buf = sys.stdin.read(BUFSIZE)
# if not buf:
# break
# lines = buf.split("\n")
# lines[0] = leftover + lines[0]
# leftover = lines.pop()
# for line in lines:
# w(line + "\n")
# doit()
# ~ 48 MiB/s on ged
# BUFSIZE = 2 ** 20
# from zs._zs import to_uleb128
# def doit():
# leftover = ""
# w = sys.stdout.write
# while True:
# buf = sys.stdin.read(BUFSIZE)
# if not buf:
# break
# lines = buf.split("\n")
# lines[0] = leftover + lines[0]
# leftover = lines.pop()
# last_line = ""
# for line in lines:
# w(to_uleb128(len(line)) + line)
# assert last_line <= line
# last_line = line
# doit()
# ~72 MiB/s on ged
#BUFSIZE = 2 ** 20
# ~ 120 MiB/s on ged (!)
# pretty similar from 16-200 at least
# ~ 155 MiB/s on polypore
#BUFSIZE = 16 * 2 ** 10
# polypore:
# 16384: 156 MiB/s
# 32768: 152 MiB/s
# 65536: 155 MiB/s
BUFSIZE = int(sys.argv[1])
from zs._zs import pack_data_records
def doit():
leftover = ""
w = sys.stdout.write
while True:
buf = sys.stdin.read(BUFSIZE)
if not buf:
break
lines = buf.split("\n")
lines[0] = leftover + lines[0]
leftover = lines.pop()
w(pack_data_records(lines, 2* BUFSIZE))
doit()
| {
"repo_name": "njsmith/zs",
"path": "microbenchmarks/split-lines.py",
"copies": "1",
"size": "2678",
"license": "bsd-2-clause",
"hash": -6238224613998915000,
"line_mean": 23.1261261261,
"line_max": 53,
"alpha_frac": 0.5257654966,
"autogenerated": false,
"ratio": 2.7665289256198347,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.37922944222198346,
"avg_score": null,
"num_lines": null
} |
# A little test server, complete with typelib, we can use for testing.
# Originally submitted with bug:
# [ 753154 ] memory leak wrapping object having _typelib_guid_ attribute
# but modified by mhammond for use as part of the test suite.
import sys, os
import pythoncom
import win32com
import winerror
from win32com.server.util import wrap
class CPippo:
#
# COM declarations
#
_reg_clsid_ = "{05AC1CCE-3F9B-4d9a-B0B5-DFE8BE45AFA8}"
_reg_desc_ = "Pippo Python test object"
_reg_progid_ = "Python.Test.Pippo"
#_reg_clsctx_ = pythoncom.CLSCTX_LOCAL_SERVER
###
### Link to typelib
_typelib_guid_ = '{41059C57-975F-4B36-8FF3-C5117426647A}'
_typelib_version_ = 1, 0
_com_interfaces_ = ['IPippo']
def __init__(self):
self.MyProp1 = 10
def Method1(self):
return wrap(CPippo())
def Method2(self, in1, inout1):
return in1, inout1 * 2
def BuildTypelib():
from distutils.dep_util import newer
this_dir = os.path.dirname(__file__)
idl = os.path.abspath(os.path.join(this_dir, "pippo.idl"))
tlb=os.path.splitext(idl)[0] + '.tlb'
if newer(idl, tlb):
print("Compiling %s" % (idl,))
rc = os.system ('midl "%s"' % (idl,))
if rc:
raise RuntimeError("Compiling MIDL failed!")
# Can't work out how to prevent MIDL from generating the stubs.
# just nuke them
for fname in "dlldata.c pippo_i.c pippo_p.c pippo.h".split():
os.remove(os.path.join(this_dir, fname))
print("Registering %s" % (tlb,))
tli=pythoncom.LoadTypeLib(tlb)
pythoncom.RegisterTypeLib(tli,tlb)
def UnregisterTypelib():
k = CPippo
try:
pythoncom.UnRegisterTypeLib(k._typelib_guid_,
k._typelib_version_[0],
k._typelib_version_[1],
0,
pythoncom.SYS_WIN32)
print("Unregistered typelib")
except pythoncom.error as details:
if details[0]==winerror.TYPE_E_REGISTRYACCESS:
pass
else:
raise
def main(argv=None):
if argv is None: argv = sys.argv[1:]
if '--unregister' in argv:
# Unregister the type-libraries.
UnregisterTypelib()
else:
# Build and register the type-libraries.
BuildTypelib()
import win32com.server.register
win32com.server.register.UseCommandLine(CPippo)
if __name__=='__main__':
main(sys.argv)
| {
"repo_name": "Microsoft/PTVS",
"path": "Python/Product/Miniconda/Miniconda3-x64/Lib/site-packages/win32com/test/pippo_server.py",
"copies": "5",
"size": "2530",
"license": "apache-2.0",
"hash": -5536596123723634000,
"line_mean": 30.625,
"line_max": 72,
"alpha_frac": 0.5928853755,
"autogenerated": false,
"ratio": 3.337730870712401,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.6430616246212401,
"avg_score": null,
"num_lines": null
} |
# A little test server, complete with typelib, we can use for testing.
# Originally submitted with bug:
# [ 753154 ] memory leak wrapping object having _typelib_guid_ attribute
# but modified by mhammond for use as part of the test suite.
import sys, os
import pythoncom
import win32com
import winerror
from win32com.server.util import wrap
class CPippo:
#
# COM declarations
#
_reg_clsid_ = "{05AC1CCE-3F9B-4d9a-B0B5-DFE8BE45AFA8}"
_reg_desc_ = "Pippo Python test object"
_reg_progid_ = "Python.Test.Pippo"
#_reg_clsctx_ = pythoncom.CLSCTX_LOCAL_SERVER
###
### Link to typelib
_typelib_guid_ = '{41059C57-975F-4B36-8FF3-C5117426647A}'
_typelib_version_ = 1, 0
_com_interfaces_ = ['IPippo']
def __init__(self):
self.MyProp1 = 10
def Method1(self):
return wrap(CPippo())
def Method2(self, in1, inout1):
return in1, inout1 * 2
def BuildTypelib():
from distutils.dep_util import newer
this_dir = os.path.dirname(__file__)
idl = os.path.abspath(os.path.join(this_dir, "pippo.idl"))
tlb=os.path.splitext(idl)[0] + '.tlb'
if newer(idl, tlb):
print "Compiling %s" % (idl,)
rc = os.system ('midl "%s"' % (idl,))
if rc:
raise RuntimeError("Compiling MIDL failed!")
# Can't work out how to prevent MIDL from generating the stubs.
# just nuke them
for fname in "dlldata.c pippo_i.c pippo_p.c pippo.h".split():
os.remove(os.path.join(this_dir, fname))
print "Registering %s" % (tlb,)
tli=pythoncom.LoadTypeLib(tlb)
pythoncom.RegisterTypeLib(tli,tlb)
def UnregisterTypelib():
k = CPippo
try:
pythoncom.UnRegisterTypeLib(k._typelib_guid_,
k._typelib_version_[0],
k._typelib_version_[1],
0,
pythoncom.SYS_WIN32)
print "Unregistered typelib"
except pythoncom.error, details:
if details[0]==winerror.TYPE_E_REGISTRYACCESS:
pass
else:
raise
def main(argv=None):
if argv is None: argv = sys.argv[1:]
if '--unregister' in argv:
# Unregister the type-libraries.
UnregisterTypelib()
else:
# Build and register the type-libraries.
BuildTypelib()
import win32com.server.register
win32com.server.register.UseCommandLine(CPippo)
if __name__=='__main__':
main(sys.argv)
| {
"repo_name": "zhanqxun/cv_fish",
"path": "win32com/test/pippo_server.py",
"copies": "2",
"size": "2605",
"license": "apache-2.0",
"hash": -5916710852646424000,
"line_mean": 30.5625,
"line_max": 72,
"alpha_frac": 0.5750479846,
"autogenerated": false,
"ratio": 3.3919270833333335,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9913464595510741,
"avg_score": 0.01070209448451841,
"num_lines": 80
} |
"""A little Transport layer to maintain the JSESSIONID cookie that
Javaserver pages use to maintain a session. I'd like to use this
to make xmlrpclib session aware.
Sample usage:
server = Server("http://foobar.com/baz/servlet/xmlrpc.class")
print server.get_jsession_id();
print server.test.sayHello()
print server.get_jsession_id();
print server.test.sayGoodbye()
print server.get_jsession_id();
"""
import xmlrpclib
import Cookie
class JspAuthTransport(xmlrpclib.Transport):
def __init__(self):
self.__cookies = Cookie.SmartCookie()
def request(self, host, handler, request_body, verbose=0):
# issue XML-RPC request
h = self.make_connection(host)
if verbose:
h.set_debuglevel(1)
self.send_request(h, handler, request_body)
self.__sendJsessionCookie(h)
self.send_host(h, host)
self.send_user_agent(h)
self.send_content(h, request_body)
errcode, errmsg, headers = h.getreply()
if errcode != 200:
raise xmlrpclib.ProtocolError(
host + handler,
errcode, errmsg,
headers
)
self.verbose = verbose
self.__processCookies(headers)
return self.parse_response(h.getfile())
def get_jsession_id(self):
if self.__cookies.has_key('JSESSIONID'):
return self.__cookies['JSESSIONID'].value
return None
def __sendJsessionCookie(self, connection):
if self.__cookies.has_key('JSESSIONID'):
connection.putheader('Cookie', '$Version="1"; JSESSIONID=%s'
% self.get_jsession_id())
def __processCookies(self, headers):
if headers.getheader('Set-Cookie'):
self.__cookies.load(headers.getheader('Set-Cookie'))
def send_content(self, connection, request_body):
connection.putheader("Content-Type", "text/xml")
connection.putheader("Content-Length", str(len(request_body)))
connection.endheaders()
if request_body:
connection.send(request_body)
class Server:
"""A little wrapper to keep the transport and serverproxy together."""
def __init__(self, uri):
self.transport = JspAuthTransport()
self.serverproxy = xmlrpclib.ServerProxy(uri, self.transport)
def __getattr__(self, attr):
return getattr(self.serverproxy, attr)
def get_jsession_id(self):
return self.transport.get_jsession_id()
def _test2():
server = Server("http://www.oreillynet.com/meerkat/xml-rpc/server.php")
print server.system.listMethods()
if __name__ == '__main__':
_test2()
| {
"repo_name": "ActiveState/code",
"path": "recipes/Python/161816_Enable_xmlrpclib_maintaJSP/recipe-161816.py",
"copies": "1",
"size": "2691",
"license": "mit",
"hash": -6565418088224652000,
"line_mean": 27.3263157895,
"line_max": 75,
"alpha_frac": 0.6224451877,
"autogenerated": false,
"ratio": 3.9170305676855897,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.003079119394746188,
"num_lines": 95
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.