hexsha string | size int64 | ext string | lang string | max_stars_repo_path string | max_stars_repo_name string | max_stars_repo_head_hexsha string | max_stars_repo_licenses list | max_stars_count int64 | max_stars_repo_stars_event_min_datetime string | max_stars_repo_stars_event_max_datetime string | max_issues_repo_path string | max_issues_repo_name string | max_issues_repo_head_hexsha string | max_issues_repo_licenses list | max_issues_count int64 | max_issues_repo_issues_event_min_datetime string | max_issues_repo_issues_event_max_datetime string | max_forks_repo_path string | max_forks_repo_name string | max_forks_repo_head_hexsha string | max_forks_repo_licenses list | max_forks_count int64 | max_forks_repo_forks_event_min_datetime string | max_forks_repo_forks_event_max_datetime string | content string | avg_line_length float64 | max_line_length int64 | alphanum_fraction float64 | qsc_code_num_words_quality_signal int64 | qsc_code_num_chars_quality_signal float64 | qsc_code_mean_word_length_quality_signal float64 | qsc_code_frac_words_unique_quality_signal float64 | qsc_code_frac_chars_top_2grams_quality_signal float64 | qsc_code_frac_chars_top_3grams_quality_signal float64 | qsc_code_frac_chars_top_4grams_quality_signal float64 | qsc_code_frac_chars_dupe_5grams_quality_signal float64 | qsc_code_frac_chars_dupe_6grams_quality_signal float64 | qsc_code_frac_chars_dupe_7grams_quality_signal float64 | qsc_code_frac_chars_dupe_8grams_quality_signal float64 | qsc_code_frac_chars_dupe_9grams_quality_signal float64 | qsc_code_frac_chars_dupe_10grams_quality_signal float64 | qsc_code_frac_chars_replacement_symbols_quality_signal float64 | qsc_code_frac_chars_digital_quality_signal float64 | qsc_code_frac_chars_whitespace_quality_signal float64 | qsc_code_size_file_byte_quality_signal float64 | qsc_code_num_lines_quality_signal float64 | qsc_code_num_chars_line_max_quality_signal float64 | qsc_code_num_chars_line_mean_quality_signal float64 | qsc_code_frac_chars_alphabet_quality_signal float64 | qsc_code_frac_chars_comments_quality_signal float64 | qsc_code_cate_xml_start_quality_signal float64 | qsc_code_frac_lines_dupe_lines_quality_signal float64 | qsc_code_cate_autogen_quality_signal float64 | qsc_code_frac_lines_long_string_quality_signal float64 | qsc_code_frac_chars_string_length_quality_signal float64 | qsc_code_frac_chars_long_word_length_quality_signal float64 | qsc_code_frac_lines_string_concat_quality_signal float64 | qsc_code_cate_encoded_data_quality_signal float64 | qsc_code_frac_chars_hex_words_quality_signal float64 | qsc_code_frac_lines_prompt_comments_quality_signal float64 | qsc_code_frac_lines_assert_quality_signal float64 | qsc_codepython_cate_ast_quality_signal float64 | qsc_codepython_frac_lines_func_ratio_quality_signal float64 | qsc_codepython_cate_var_zero_quality_signal bool | qsc_codepython_frac_lines_pass_quality_signal float64 | qsc_codepython_frac_lines_import_quality_signal float64 | qsc_codepython_frac_lines_simplefunc_quality_signal float64 | qsc_codepython_score_lines_no_logic_quality_signal float64 | qsc_codepython_frac_lines_print_quality_signal float64 | qsc_code_num_words int64 | qsc_code_num_chars int64 | qsc_code_mean_word_length int64 | qsc_code_frac_words_unique null | qsc_code_frac_chars_top_2grams int64 | qsc_code_frac_chars_top_3grams int64 | qsc_code_frac_chars_top_4grams int64 | qsc_code_frac_chars_dupe_5grams int64 | qsc_code_frac_chars_dupe_6grams int64 | qsc_code_frac_chars_dupe_7grams int64 | qsc_code_frac_chars_dupe_8grams int64 | qsc_code_frac_chars_dupe_9grams int64 | qsc_code_frac_chars_dupe_10grams int64 | qsc_code_frac_chars_replacement_symbols int64 | qsc_code_frac_chars_digital int64 | qsc_code_frac_chars_whitespace int64 | qsc_code_size_file_byte int64 | qsc_code_num_lines int64 | qsc_code_num_chars_line_max int64 | qsc_code_num_chars_line_mean int64 | qsc_code_frac_chars_alphabet int64 | qsc_code_frac_chars_comments int64 | qsc_code_cate_xml_start int64 | qsc_code_frac_lines_dupe_lines int64 | qsc_code_cate_autogen int64 | qsc_code_frac_lines_long_string int64 | qsc_code_frac_chars_string_length int64 | qsc_code_frac_chars_long_word_length int64 | qsc_code_frac_lines_string_concat null | qsc_code_cate_encoded_data int64 | qsc_code_frac_chars_hex_words int64 | qsc_code_frac_lines_prompt_comments int64 | qsc_code_frac_lines_assert int64 | qsc_codepython_cate_ast int64 | qsc_codepython_frac_lines_func_ratio int64 | qsc_codepython_cate_var_zero int64 | qsc_codepython_frac_lines_pass int64 | qsc_codepython_frac_lines_import int64 | qsc_codepython_frac_lines_simplefunc int64 | qsc_codepython_score_lines_no_logic int64 | qsc_codepython_frac_lines_print int64 | effective string | hits int64 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
92469e853a715e9955a582e9ea45ef2f7e9cfdce | 3,585 | py | Python | running_modes/scoring/logging/base_scoring_logger.py | lilleswing/Reinvent-1 | ac4e3e6fa6379c6f4af883478dfd1b3407933ada | [
"Apache-2.0"
] | 183 | 2020-04-04T02:01:15.000Z | 2022-03-30T21:56:56.000Z | running_modes/scoring/logging/base_scoring_logger.py | lilleswing/Reinvent-1 | ac4e3e6fa6379c6f4af883478dfd1b3407933ada | [
"Apache-2.0"
] | 39 | 2020-04-05T15:19:56.000Z | 2022-03-09T12:58:21.000Z | running_modes/scoring/logging/base_scoring_logger.py | lilleswing/Reinvent-1 | ac4e3e6fa6379c6f4af883478dfd1b3407933ada | [
"Apache-2.0"
] | 70 | 2020-04-05T19:25:43.000Z | 2022-02-22T12:04:39.000Z | import json
import logging
import os
from abc import ABC, abstractmethod
import pandas as pd
from typing import List
from running_modes.configurations.general_configuration_envelope import GeneralConfigurationEnvelope
from running_modes.configurations.logging.scoring_log_configuration import ScoringLoggerConfiguration
from running_modes.enums.scoring_runner_enum import ScoringRunnerEnum
from reinvent_scoring.scoring.score_summary import FinalSummary
class BaseScoringLogger(ABC):
def __init__(self, configuration: GeneralConfigurationEnvelope):
self._configuration = configuration
self._log_config = ScoringLoggerConfiguration(**self._configuration.logging)
self._setup_workfolder()
self._logger = self._setup_logger()
self._scoring_runner_enum = ScoringRunnerEnum()
@abstractmethod
def log_message(self, message: str):
raise NotImplementedError("log_message method is not implemented")
def log_out_input_configuration(self):
file = os.path.join(self._log_config.logging_path, "input.json")
jsonstr = json.dumps(self._configuration, default=lambda x: x.__dict__, sort_keys=True, indent=4,
separators=(',', ': '))
with open(file, 'w') as f:
f.write(jsonstr)
def log_results(self, score_summary: FinalSummary):
output_file = os.path.join(self._log_config.logging_path, "scored_smiles.csv")
table_header = self._create_table_header(score_summary)
data_list = self._convolute_score_summary(score_summary)
dataframe = pd.DataFrame(data_list, columns=table_header, dtype=str)
dataframe.to_csv(output_file, header=True, index=False)
def _convolute_score_summary(self, score_summary: FinalSummary) -> []:
smiles = score_summary.scored_smiles
scores = score_summary.total_score
component_scores = [c.score for c in score_summary.profile]
data = []
for indx in range(len(smiles)):
valid = 1 if indx in score_summary.valid_idxs else 0
score = scores[indx] if valid else 0
row = self._compose_row_entry(indx, valid, score, smiles[indx], component_scores)
data.append(row)
return data
def _compose_row_entry(self, indx: int, valid: int, score: float, smile: str, component_scores: List) -> List:
row = [smile, score]
components = [component[indx] for component in component_scores]
row.extend(components)
row.append(valid)
return row
def _create_table_header(self, score_summary: FinalSummary) -> List:
column_names = [self._scoring_runner_enum.SMILES, self._scoring_runner_enum.TOTAL_SCORE]
component_names = [c.name for c in score_summary.profile]
column_names.extend(component_names)
column_names.append(self._scoring_runner_enum.VALID)
return column_names
def _setup_logger(self):
handler = logging.StreamHandler()
formatter = logging.Formatter(
fmt="%(asctime)s: %(module)s.%(funcName)s +%(lineno)s: %(levelname)-8s %(message)s",
datefmt="%H:%M:%S"
)
handler.setFormatter(formatter)
logger = logging.getLogger("scoring_logger")
if not logger.handlers:
logger.addHandler(handler)
logger.setLevel(logging.INFO)
logger.propagate = False
return logger
def _setup_workfolder(self):
if not os.path.isdir(self._log_config.logging_path):
os.makedirs(self._log_config.logging_path)
| 41.206897 | 114 | 0.698187 | 424 | 3,585 | 5.632075 | 0.323113 | 0.065327 | 0.035595 | 0.035176 | 0.072864 | 0.052764 | 0.031826 | 0.031826 | 0.031826 | 0 | 0 | 0.001772 | 0.212831 | 3,585 | 86 | 115 | 41.686047 | 0.844437 | 0 | 0 | 0 | 0 | 0.014085 | 0.046583 | 0.006416 | 0 | 0 | 0 | 0 | 0 | 1 | 0.126761 | false | 0 | 0.140845 | 0 | 0.338028 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
924a0eddccceff04dda1decc76232550991e0be7 | 2,697 | py | Python | shopkit/contrib/payment/stripe_provider/__init__.py | sakkada/django-shopkit | 35e6f8ac73bf6aa40887aa9b1b860d27db8b2975 | [
"BSD-3-Clause"
] | null | null | null | shopkit/contrib/payment/stripe_provider/__init__.py | sakkada/django-shopkit | 35e6f8ac73bf6aa40887aa9b1b860d27db8b2975 | [
"BSD-3-Clause"
] | null | null | null | shopkit/contrib/payment/stripe_provider/__init__.py | sakkada/django-shopkit | 35e6f8ac73bf6aa40887aa9b1b860d27db8b2975 | [
"BSD-3-Clause"
] | null | null | null | from django.conf import settings
from django.utils.translation import ugettext_lazy as _
from ....payment import PaymentProvider, PaymentFailure, PaymentType
from . import forms
from . import models
import stripe
import datetime
class StripeProvider(PaymentProvider):
form_class = forms.StripeReceiptForm
payment_class = None
def enum_types(self, order=None, customer=None):
yield PaymentType(provider=self, typ='stripe', name='Stripe.com')
def get_configuration_form(self, order, typ, data):
instance = self.payment_class(order=order)
return self.form_class(data or None, instance=instance)
def save(self, order, form, typ=None):
order.payment_price = 0
order.payment_type_name = 'Stripe.com'
order.payment_type_description = ''
if form.is_valid():
form.save()
else:
raise PaymentFailure(_("Could not create Stripe Receipt"))
def confirm(self, order, typ=None):
v = order.receipt
stripe.api_key = settings.STRIPE_SECRET
amount = int(order.get_total().net * 100) # in cents, Stripe only does USD
try:
if v.stripe_card_id and not v.stripe_customer_id:
customer = stripe.Customer.create(
card=v.stripe_card_id,
description=order.user.email,
email=order.user.email
)
customer_id = customer.id
elif v.stripe_customer_id:
customer_id = v.stripe_customer_id
else:
raise PaymentFailure(_("Requires either a card or customer."))
charge = stripe.Charge.create(
amount=amount,
currency="usd",
customer=customer_id,
description=order.user.email,
)
except stripe.StripeError:
raise PaymentFailure(_("Payment denied or network error"))
data = {}
try:
data = charge.__dict__
data['stripe_charge_id'] = data['id']
data.update(data['card'].__dict__)
data['card_type'] = data['type']
# Stripe response has creation time as unix timestamp
data['created'] = \
datetime.datetime.fromtimestamp(int(data['created']))
data['stripe_card_id'] = v.stripe_card_id
data['stripe_customer_id'] = v.stripe_customer_id
except Exception:
pass
finally:
receipt_form = forms.StripeReceiptForm(data, instance=v)
if receipt_form.is_valid():
v.receipt = receipt_form.save()
v.save()
| 35.486842 | 84 | 0.595476 | 296 | 2,697 | 5.233108 | 0.334459 | 0.058102 | 0.051646 | 0.043899 | 0.096837 | 0.034861 | 0 | 0 | 0 | 0 | 0 | 0.002162 | 0.314053 | 2,697 | 75 | 85 | 35.96 | 0.835135 | 0.030404 | 0 | 0.09375 | 0 | 0 | 0.07925 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.0625 | false | 0.015625 | 0.109375 | 0 | 0.234375 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
924a32e6e209fa2de53fe9b454a531ee0e4f03f5 | 2,424 | py | Python | lambda/src/handlers/receive_email.py | sjmatta-forks/serverless-southwest-check-in | 6b8cfbf1cb19c222f51a039db024217637db2ce4 | [
"MIT"
] | 49 | 2017-02-14T13:49:37.000Z | 2022-03-24T13:57:05.000Z | lambda/src/handlers/receive_email.py | sjmatta-forks/serverless-southwest-check-in | 6b8cfbf1cb19c222f51a039db024217637db2ce4 | [
"MIT"
] | 39 | 2017-02-08T14:21:17.000Z | 2022-01-11T00:20:22.000Z | lambda/src/handlers/receive_email.py | sjmatta-forks/serverless-southwest-check-in | 6b8cfbf1cb19c222f51a039db024217637db2ce4 | [
"MIT"
] | 13 | 2018-02-18T18:49:49.000Z | 2022-02-16T15:05:35.000Z | import json
import logging
import os
import time
import boto3
import mail
# Set up logging
log = logging.getLogger(__name__)
log.setLevel(logging.DEBUG)
def _get_sfn_execution_name(reservation):
"""
Generate a human-readable execution named composed of the passenger's
check in details followed by a timestamp
"""
name = "{}-{}-{}-{}".format(
reservation['last_name'].lower().replace(' ', '-'),
reservation['first_name'].lower(),
reservation['confirmation_number'].lower(),
int(time.time())
)
return name
def main(event, context):
"""
This function is triggered when as an SES Action when a new e-mail is
received. It scrapes the email to find the name and confirmation
number of the passenger to check-in, and then executes the AWS Step
state machine provided in the `STATE_MACHINE_ARN` environment variable.
"""
sfn = boto3.client('stepfunctions')
ses_notification = event['Records'][0]['ses']
# ARN of the AWS Step State Machine to execute when an email
# is successfully parsed and a new check-in should run.
state_machine_arn = os.getenv('STATE_MACHINE_ARN')
log.debug("State Machine ARN: {}".format(state_machine_arn))
log.debug("SES Notification: {}".format(ses_notification))
ses_msg = mail.SesMailNotification(ses_notification['mail'])
try:
reservation = mail.find_name_and_confirmation_number(ses_msg)
log.info("Found reservation: {}".format(reservation))
except Exception as e:
log.error("Error scraping email {}: {}".format(ses_msg.message_id, e))
if not ses_msg.from_email.endswith('southwest.com'):
mail.send_failure_notification(ses_msg.from_email)
return False
# Don't add the email if it's straight from southwest.com
if not ses_msg.from_email.endswith('southwest.com'):
reservation['email'] = ses_msg.from_email
execution = sfn.start_execution(
stateMachineArn=state_machine_arn,
name=_get_sfn_execution_name(reservation),
input=json.dumps(reservation)
)
log.debug("State machine started at: {}".format(execution['startDate']))
log.debug("Execution ARN: {}".format(execution['executionArn']))
# Remove the startDate from the return value because datetime objects don't
# easily serialize to JSON.
del(execution['startDate'])
return execution
| 32.32 | 79 | 0.691832 | 315 | 2,424 | 5.174603 | 0.4 | 0.066258 | 0.055215 | 0.03681 | 0.141104 | 0.04908 | 0.04908 | 0.04908 | 0.04908 | 0 | 0 | 0.001545 | 0.198845 | 2,424 | 74 | 80 | 32.756757 | 0.837796 | 0.276403 | 0 | 0.047619 | 0 | 0 | 0.170388 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.047619 | false | 0 | 0.142857 | 0 | 0.261905 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
924c4a9c561c7aed9575e75c33ca46d044434fc2 | 9,184 | py | Python | dyc/methods/MethodBuilder.py | lukerm48/dyc | f7b0a1daf9cdcc4d19bc48cbc4e22c5d5a9b8426 | [
"MIT"
] | null | null | null | dyc/methods/MethodBuilder.py | lukerm48/dyc | f7b0a1daf9cdcc4d19bc48cbc4e22c5d5a9b8426 | [
"MIT"
] | null | null | null | dyc/methods/MethodBuilder.py | lukerm48/dyc | f7b0a1daf9cdcc4d19bc48cbc4e22c5d5a9b8426 | [
"MIT"
] | null | null | null | """
File for classes that handles the actual business logic of
the application. Here is the the place where the actual reading,
parsing and validation of the files happens.
"""
import re
import click
from .MethodInterface import MethodInterface
from ..utils import (
get_leading_whitespace,
get_indent_forward,
get_indent_backward,
count_lines,
)
from ..base import Builder
import os
class MethodBuilder(Builder):
already_printed_filepaths = [] # list of already printed files
def initialize(self, change=None):
patches = []
if change:
patches = change.get("additions")
with open(self.filename, 'r') as file:
file_lines = file.read()
method_support_regex = self.config.get("regex")
argument_support_regex = self.config.get("arguments").get("regex")
method_indicator = self.config.get("method_indicator")
start_parameter = self.config.get("arguments").get("start_parameter")
end_parameter = self.config.get("arguments").get("end_parameter")
parameter_split = self.config.get("arguments").get("parameter_split")
method_end = self.config.get("method_end")
doc_open = self.config.get("doc_open")
doc_close = self.config.get("doc_close")
within_scope = self.config.get("within_scope")
if not method_support_regex:
method_indicator = re.escape(method_indicator)
#doc_open = re.escape(doc_open)
#doc_close = re.escape(doc_close)
method_end = re.escape(method_end)
if not argument_support_regex:
start_parameter = re.escape(start_parameter)
end_parameter = re.escape(end_parameter)
parameter_split = re.escape(parameter_split)
pattern = "("+method_indicator+")"+"\s+"+"(.*?)"+"\s*"+start_parameter+"(.*?)"+end_parameter+"\s*"+"("+method_end+")"
if not within_scope:
pattern = "(" + doc_close + ")?\s*" + pattern
else:
pattern = pattern + "\s*(" + doc_open + ")?"
regex = re.compile(pattern,flags=re.DOTALL)
match_iter = regex.finditer(file_lines)
match_list = []
#only add methods that don't have documentatino before or after them
for i in match_iter:
if (not within_scope and i.groups()[0] is None) or (within_scope and i.groups()[len(i.groups())-1] is None):
match_list.append(i)
#NOTE: handling of self.config.get('comments') was taken out. Doesn't seem real useful
found = (len(match_list) > 0)
for i in match_list:
start = 0
end = 0
parameter_list = ""
method_name = ""
if not within_scope:
method_name = i.groups()[2]
parameter_list = i.groups()[3].split(parameter_split)
start = i.start(2)
end = i.end(5)
else:
method_name = i.groups()[1]
parameter_list = i.groups()[2].split(parameter_split)
start = i.start(1)
end = i.end(4)
if change and found:
#get match from file
line = file_lines[start:end+1]
#index to start counting at
lineno = count_lines(file_lines,start)
found = self._is_line_part_of_patches(lineno, line, patches)
if not self.details.get(self.filename):
self.details[self.filename] = dict()
if found:
all_string = file_lines[start:end+1] # entire match
indent = ""
if(within_scope):
indent = get_indent_forward(file_lines,end+1)
else:
indent = get_indent_backward(file_lines,start-1)
result = MethodInterface(
plain=all_string,
name=method_name,
start=start,
end=end,
indent=indent,
filename=self.filename,
arguments=self.extract_arguments(parameter_list),
config=self.config,
leading_space=get_leading_whitespace(all_string),
placeholders=self.placeholders,
)
if self.validate(result):
self.details[self.filename][result.name] = result
def validate(self, result):
"""
An abstract validator method that checks if the method is
still valid and gives the final decision
Parameters
----------
MethodInterface result: The Method Interface result
"""
if not result:
return False
name = result.name
if name not in self.config.get("ignore", []):
if self.filename not in self.already_printed_filepaths:
# Print file of method to document
click.echo(
"\n\nIn file {} :\n".format(
click.style(
os.path.join(*self.filename.split(os.sep)[-3:]), fg="red"
)
)
)
self.already_printed_filepaths.append(self.filename)
return True
return False
def extract_arguments(self, args_list):
"""
Public extract argument method that calls ArgumentDetails
class to extract args
Parameters
----------
"""
args = ArgumentDetails(args_list, self.config.get("arguments", {}))
args.extract()
return args.sanitize()
def prompts(self):
"""
Abstract prompt method in builder to execute prompts over candidates
"""
for method_interface in self._method_interface_gen():
method_interface.prompt() if method_interface else None
def apply(self):
"""
Over here we are looping over the result of the
chosen methods to document and applying the changes to the
files as confirmed
"""
#reverse list to edit file from bottom to top
for method_interface in self._method_interface_gen(reverse=True):
if not method_interface:
continue
with open(method_interface.filename, 'r+') as file_handle:
file_text = file_handle.read()
file_handle.seek(0)
if not self.config.get("within_scope"):
#write before new line
new_line_index = file_text[0:method_interface.start].rfind("\n")
#are we at top of file?
if new_line_index == -1:
new_line_index = method_interface.start-1
file_handle.write(file_text[0:new_line_index+1] + method_interface.result + file_text[new_line_index+1:])
else:
#write after new line
new_line_index = file_text[method_interface.end:].find("\n")
add_line = ""
#are we at bottom of file?
if new_line_index == -1:
add_line = "\n"
new_line_index = 0
file_handle.write(file_text[0:method_interface.end+new_line_index+1] + add_line + method_interface.result + file_text[method_interface.end+new_line_index+1:])
def _method_interface_gen(self,reverse=False):
"""
A generator that yields the method interfaces
"""
if not self.details:
yield None
for filename, func_pack in self.details.items():
if(reverse):
for method_interface in reversed(func_pack.values()):
yield method_interface
else:
for method_interface in func_pack.values():
yield method_interface
class ArgumentDetails(object):
def __init__(self, args_list, config):
self.args_list = args_list
self.config = config
self.args = []
def extract(self):
"""
Retrieves arguments from a line
"""
try:
ignore = self.config.get("ignore")
self.args = filter(
lambda x: x not in ignore,
filter(
None,
self.args_list
)
)
self.args = list(self.args)
except:
pass
def sanitize(self):
"""
Sanitizes arguments to validate all arguments are correct
"""
if len(self.args) > 0:
return list(map(lambda arg: re.findall(r"[a-zA-Z0-9_]+", arg)[0], self.args))
else:
return []
| 39.416309 | 178 | 0.530488 | 990 | 9,184 | 4.740404 | 0.223232 | 0.067121 | 0.041551 | 0.016621 | 0.178351 | 0.106542 | 0.051566 | 0.017899 | 0 | 0 | 0 | 0.005954 | 0.378267 | 9,184 | 232 | 179 | 39.586207 | 0.815937 | 0.135344 | 0 | 0.084848 | 0 | 0 | 0.034344 | 0 | 0.006061 | 0 | 0 | 0 | 0 | 1 | 0.054545 | false | 0.006061 | 0.036364 | 0 | 0.145455 | 0.018182 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
924fec3ac075b37f75ff462088795707dc130a3b | 1,539 | py | Python | recipes/kangaru/all/conanfile.py | jaspervandeven/conan-center-index | d3c8a2180c4b5e95f63c08c518e1b1a9a7017a23 | [
"MIT"
] | null | null | null | recipes/kangaru/all/conanfile.py | jaspervandeven/conan-center-index | d3c8a2180c4b5e95f63c08c518e1b1a9a7017a23 | [
"MIT"
] | null | null | null | recipes/kangaru/all/conanfile.py | jaspervandeven/conan-center-index | d3c8a2180c4b5e95f63c08c518e1b1a9a7017a23 | [
"MIT"
] | 1 | 2021-03-10T04:02:57.000Z | 2021-03-10T04:02:57.000Z | from conans import ConanFile, tools, CMake
import os
class KangaruConan(ConanFile):
name = "kangaru"
description = "A dependency injection container for C++11, C++14 and later"
license = "MIT"
topics = ("conan", "gracicot", "kangaru", "DI", "IoC", "inversion of control")
homepage = "https://github.com/gracicot/kangaru/wiki"
url = "https://github.com/conan-io/conan-center-index"
options = {"reverse_destruction": [True, False],
"no_exception": [True, False]}
default_options = {"reverse_destruction": True,
"no_exception": False}
no_copy_source = True
@property
def _source_subfolder(self):
return "source_subfolder"
def source(self):
tools.get(**self.conan_data["sources"][self.version])
os.rename(self.name + "-" + self.version, self._source_subfolder)
def _configure_cmake(self):
cmake = CMake(self)
cmake.definitions["KANGARU_REVERSE_DESTRUCTION"] = self.options.reverse_destruction
cmake.definitions["KANGARU_NO_EXCEPTION"] = self.options.no_exception
cmake.configure(source_folder=self._source_subfolder)
return cmake
def build(self):
cmake = self._configure_cmake()
cmake.build()
def package(self):
cmake = self._configure_cmake()
cmake.install()
tools.rmdir(os.path.join(self.package_folder, "lib"))
self.copy(os.path.join(self._source_subfolder, "LICENSE"), "licenses")
| 37.536585 | 92 | 0.638077 | 172 | 1,539 | 5.52907 | 0.418605 | 0.078864 | 0.078864 | 0.060988 | 0.067298 | 0.067298 | 0 | 0 | 0 | 0 | 0 | 0.003401 | 0.235867 | 1,539 | 40 | 93 | 38.475 | 0.805272 | 0 | 0 | 0.058824 | 0 | 0 | 0.234156 | 0.018012 | 0 | 0 | 0 | 0 | 0 | 1 | 0.147059 | false | 0 | 0.058824 | 0.029412 | 0.558824 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
925073b67d11908413dc3c2418ab4f3c9e2f45f4 | 8,989 | py | Python | ai.py | qwlea/chess | 06213ebbcd379e1968ebcfed41d38b9a00f9f3c8 | [
"Apache-2.0"
] | null | null | null | ai.py | qwlea/chess | 06213ebbcd379e1968ebcfed41d38b9a00f9f3c8 | [
"Apache-2.0"
] | null | null | null | ai.py | qwlea/chess | 06213ebbcd379e1968ebcfed41d38b9a00f9f3c8 | [
"Apache-2.0"
] | null | null | null | from player import Player
import copy
from random import randint
# This AI class uses MiniMax Alpha-Beta Pruning in order to find the best possible move(s) each turn.
class MiniMax(Player):
def __init__(self, pid, board, logic):
super().__init__(pid, board)
self._logic = logic
# Board evaluations are handled symmetrically, that is to say, every action that adds to a certain player's score will
# subtract from the opponent's score by the same amount. Score evaluations are handled as follows:
# Piece-Points Values:
# Queen = 90, Rook = 50, Knight = 30, Bishop = 32, Pawn = 10
# Although Bishops have the same literal score as knights in chess, 3:3, bishops are generally considered to be slightly
# more valuable than knights in mid to late game board states as they have much more mobility and have the potential to
# dominate opened-up boards by covering many more squares than a knight could.
# Putting a king in check: +6 points
# Putting a king in check can be very advantageous for gaining a tempo and potentially netting free pieces, but the value
# of checking a player's king cannot be overevaluated lest terrible moves such as free pawn sacrifices become commonplace
# Attacking unprotected pieces: +(0.8 * Piece-Points)
# This one is pretty self explanatory. Attacking unprotected pieces tends to be very strong and forces the opponent to
# react accordingly in order to save the piece. Attacking several unprotected pieces at a time can lead to game-winning
# exchanges that quickly lead to victory.
# Attacking protected pieces: +max(0, 0.2 * Piece-Points-Diff)
# Attacking protected pieces can also be advantageous for aggressive pushes and complicating the board state for the
# opponent.
# Protecting pieces: +(0.5 * Piece-Points)
# For the very reasons mentioned above, protecting pieces is very important. Leaving too many hanging pieces can swiftly
# lead to losing board states. Kings and queens will not be taken into account when calculating this value, as a queen
# can only trade evenly or worse with other pieces and there is no way to "protect" the king in this sense because the
# king cannot be captured. Pawns get a 1.5 value modifier for being protected as it can often be more advantageous to
# protect your pawns with your pieces than it is to protect your pieces with your pawns
# Pieces in center 16 tiles: +2 points
# Controlling the center of the board is one of the most important strategies in chess as almost all of the pieces have
# the possibility of being much more active when positioned around the center of the board.
# Pawn push: +1.5 points
# For each space a pawn is up from its starting point, 1.5 points will be alotted to the player. Pushing pawns too early
# can be disadvantageous and lead to several hanging pawns, but moving pawns up is one of the central game mechanics and
# pawn positioning in late-game board states is very often the difference between who wins and who loses.
# Covering open squares: +0.15 points
# Each open square that is covered by the player nets 0.15 points. This reinforces the "control the center" mentality and
# incentivizes the AI to wrest control of as much of the board as possible. It is difficult to find a great spot for this
# value though, as it can potentially lead to odd queen moves at the beginning of the game that cover many squares but
# otherwise have no reasonable, immediate impact on the board.
def eval_board(self, opponent, board):
score = 0
# Stale/Checkmate evaluation:
res = self._logic.safe_check_mate(opponent, self)
if res == "CM": return float("inf")
if res == "SM": return 0
res = self._logic.safe_check_mate(self, opponent)
if res == "CM": return float("-inf")
if res == "SM": return 0
my_pieces = self.get_owned()
opp_pieces = opponent.get_owned()
# list of all of the verified legal tiles the players' pieces can move to
my_legal = self._logic.get_true_legal_player(self, opponent)
opp_legal = self._logic.get_true_legal_player(opponent, self)
# Returns a list of all of the squares covered by the pieces, whether it is a legal move or not
my_covered = self._logic.get_legal_player(self)
opp_covered = self._logic.get_legal_player(opponent)
# Lists of the player's and opponent's attacked pieces respectively
my_attacked = []
opp_attacked = []
for coords in my_covered:
tile = board.get_tile(coords)
if tile.get_piece() == None: score += 0.15
elif tile.get_piece().get_type() % 2 == self.get_id():
score += [0, 0, 50, 30, 32, 15][tile.get_piece().get_type() // 2] * 0.5
else:
opp_attacked.append(tile.get_piece())
for piece in my_pieces:
type = piece.get_type() // 2
score += [0, 90, 50, 30, 32, 10][type]
if 2 <= piece.get_coords()[0] <= 5 and 2 <= piece.get_coords()[1] <= 5: score += 2
if piece.get_type() == 10: score += (piece.get_coords()[1] - 1) * 1.5
if piece.get_type() == 11: score += (6 - piece.get_coords()[1]) * 1.5
for piece in opp_attacked:
type = piece.get_type() // 2
if piece.get_coords() in opp_covered: score += [30, 270, 50, 30, 32, 10][type] * 0.2
else: score += [7.5, 90, 50, 30, 32, 10][type] * 0.8
for coords in opp_covered:
tile = board.get_tile(coords)
if tile.get_piece() == None: score -= 0.15
elif tile.get_piece().get_type() % 2 == opponent.get_id():
score -= [0, 0, 50, 30, 32, 15][tile.get_piece().get_type() // 2] * 0.5
else:
my_attacked.append(tile.get_piece())
for piece in opp_pieces:
type = piece.get_type() // 2
score -= [0, 90, 50, 30, 32, 10][type]
if 2 <= piece.get_coords()[0] <= 5 and 2 <= piece.get_coords()[1] <= 5: score -= 2
if piece.get_type() == 10: score -= (piece.get_coords()[1] - 1) * 1.5
if piece.get_type() == 11: score -= (6 - piece.get_coords()[1]) * 1.5
for piece in my_attacked:
type = piece.get_type() // 2
if piece.get_coords() in my_covered: score -= [30, 270, 50, 30, 32, 10][type] * 0.2
else: score -= [7.5, 90, 50, 30, 32, 10][type] * 0.8
return score
def randmove(self, opponent, board):
moves = []
for piece in self.get_owned():
piece_coords = piece.get_coords()
for coords in self._logic.get_true_legal_piece(piece, self, opponent):
moves.append((piece_coords, coords))
return moves[randint(0, len(moves) - 1)]
# Alpha-Beta Pruning is a more complex form of minimaxing that can disregard branches in the move tree that are known to
# be obsolete, either by showing the opponent has a worse outcome down the tree or that you will have a better outcome
# down the tree. It always plays in expectation that the opponent will make the moves that benefit you the least and
# continues to make the best possible move based on that scenario.
# In this function, depth represents the total number of turns the function is set to traverse in the turn tree. If
# depth is equal to three on the first call, the AI will search for the best minimum value move within three turns and
# return the most valuable move found from those turns.
# maximizing is a boolean that represents whether or not it is evaluating for the AI or the player. When evaluating for the
# AI, maximizing is set to True and the function will search for the highest score possible the AI can get in the immediate
# turn. If maximizing is set to False, it is the opponent's turn, and the AI will search for the highest score play for the
# opponent, in other words, the score that lowers the AI's score the most. This evaluation method assumes that the
# opponent will always take the best line of play, so it could potentially actually be better in a vacuum at playing
# skilled opponents more so than opponents that make a lot of mistakes/blunders.
# alpha_beta are the max score value and min score values found respectively. Alpha represents the highest score the AI
# has found at the end of its search and Beta represents the lowest score found for the AI that the opponent has made.
# alpha_beta will be stored in a two element list. Alpha starts at -infinity and beta starts at +infinity.
def alpha_beta_prune(self, opponent, board, depth, maximizing, alpha_beta):
return
if depth == 0 or maximizing and self._logic.safe_check_mate(self, opponent) != "NM" or\
not maximizing and self._logic.safe_check_mate(opponent, self) != "NM":
return self.eval_board(opponent, board)
| 61.993103 | 124 | 0.679608 | 1,420 | 8,989 | 4.228873 | 0.252817 | 0.030641 | 0.02398 | 0.017319 | 0.240633 | 0.219817 | 0.195171 | 0.157535 | 0.145545 | 0.145545 | 0 | 0.028988 | 0.243965 | 8,989 | 144 | 125 | 62.423611 | 0.85462 | 0.570586 | 0 | 0.142857 | 0 | 0 | 0.005198 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.057143 | false | 0 | 0.042857 | 0 | 0.171429 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
9250af11f4ee1e071c1b4ca4904fcbb36dc56250 | 2,971 | py | Python | boxes/scrapers/tasks/scrap_news.py | konradarchicinski/pybox | 37c2dae80e1629e3cf2d3e3e86afaaff06b4de17 | [
"MIT"
] | 1 | 2021-04-16T06:29:41.000Z | 2021-04-16T06:29:41.000Z | boxes/scrapers/tasks/scrap_news.py | konradarchicinski/pybox | 37c2dae80e1629e3cf2d3e3e86afaaff06b4de17 | [
"MIT"
] | null | null | null | boxes/scrapers/tasks/scrap_news.py | konradarchicinski/pybox | 37c2dae80e1629e3cf2d3e3e86afaaff06b4de17 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
from boxes.scrapers import DATA_PATH
from pybox.task import Task
from pybox.scraper.news_reader import NewsReader
from datetime import date, timedelta, datetime
def update_reader_settings(iteration, reader_settings):
"""Auxiliar function, returns the settings passed to the reader
with updated dates. Updated dates are shifted always by the same
interval defined by the difference between NewestNewsDate and
the OldestNewsDate.
Args:
iteration (int): number of current iteration.
reader_settings (dict): contains reader settings to be updated.
"""
if iteration > 0:
newest_news_date = datetime.strptime(
reader_settings["NewestNewsDate"], "%Y-%m-%d")
oldest_news_date = datetime.strptime(
reader_settings["OldestNewsDate"], "%Y-%m-%d")
delta = newest_news_date - oldest_news_date
reader_settings["OldestNewsDate"] = str(
(oldest_news_date - delta).date())
reader_settings["NewestNewsDate"] = str(
(newest_news_date - delta).date())
return reader_settings
def scrap_news(settings):
"""Main task function. It creates `NewsReader` instance which
is used to scrap and store stories from webpage supplied
in the settings.
Args:
settings (dict): contains settings of this task.
"""
source = settings["Source"]
additional_interval = settings["AdditionalInterval"]
reader_settings = {key: settings[key]
for key in settings
if key not in ["Source", "AdditionalInterval"]}
for iteration in range(1 + additional_interval):
reader_settings = update_reader_settings(iteration, reader_settings)
reader = NewsReader.initiate(source, reader_settings, DATA_PATH)
reader.read_archival_news
reader.news_to_parquet(DATA_PATH)
task = Task(
task_name="NewsScrap",
task_info="""
Task scraps and stores news/stories from supplied webpage.
""")
task.add_setting(
name="Source",
default_value="Reuters",
info="""
The name of the source webpage such as `Reuters` or `Bloomberg`.
""")
task.add_setting(
name="DriverType",
default_value="Chrome",
info="""
Type of the selenium driver to be used.
""")
task.add_setting(
name="NewestNewsDate",
default_value=str(date.today()),
info="""
Date representing newest news that can be stored.
""")
task.add_setting(
name="OldestNewsDate",
default_value=str(date.today() - timedelta(days=1)),
info="""
Date representing oldest news that can be stored.
""")
task.add_setting(
name="AdditionalInterval",
default_value=0,
info="""
Number of additional intervals. It provides functionality
to scrap data from older dates (shifted with the same interval
as one between dates provided in initial settings).
""")
task.run(main_function=scrap_news)
| 30.947917 | 76 | 0.678559 | 357 | 2,971 | 5.501401 | 0.341737 | 0.099796 | 0.035642 | 0.045825 | 0.144603 | 0.120163 | 0.037678 | 0.037678 | 0.037678 | 0 | 0 | 0.001739 | 0.22585 | 2,971 | 95 | 77 | 31.273684 | 0.852174 | 0.18445 | 0 | 0.246154 | 0 | 0 | 0.297629 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.030769 | false | 0 | 0.061538 | 0 | 0.107692 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
92533fd03fb0f3a8993a9f914c09ba1fc8a481b8 | 677 | py | Python | tests/data23/recipe-577194.py | JohannesBuchner/pystrict3 | f442a89ac6a23f4323daed8ef829d8e9e1197f90 | [
"BSD-2-Clause"
] | 1 | 2020-06-05T08:53:26.000Z | 2020-06-05T08:53:26.000Z | tests/data23/recipe-577194.py | JohannesBuchner/pystrict3 | f442a89ac6a23f4323daed8ef829d8e9e1197f90 | [
"BSD-2-Clause"
] | 1 | 2020-06-04T13:47:19.000Z | 2020-06-04T13:47:57.000Z | tests/data23/recipe-577194.py | JohannesBuchner/pystrict3 | f442a89ac6a23f4323daed8ef829d8e9e1197f90 | [
"BSD-2-Clause"
] | 1 | 2020-11-07T17:02:46.000Z | 2020-11-07T17:02:46.000Z | class Thing(object):
"""A thing, does stuff."""
def __init__(self):
self.special = "My special value!"
def process(self, default=True):
"""Accept any argument with no special processing (except True)."""
if default is True: # Notice I'm checking identity, not truth or equality
default = self.special
elif not default: # Optional check for False values
print("Non-value given!")
print(default)
if __name__ == "__main__":
t = Thing()
t.process() # Prints t's special value
t.process("something") # Prints 'something'
t.process(None) # Prints the False value warning
| 35.631579 | 81 | 0.611521 | 84 | 677 | 4.785714 | 0.607143 | 0.059701 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.279173 | 677 | 18 | 82 | 37.611111 | 0.82377 | 0.357459 | 0 | 0 | 0 | 0 | 0.119048 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.142857 | false | 0 | 0 | 0 | 0.214286 | 0.142857 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
925401b8a6748c444b83c26016063c2cd09a8be6 | 1,233 | py | Python | python/simulator.py | mhoffmann7/MIMO-MRT-Simulator | 9bca63b416b137fa3ded4984c40e669b5bc421bd | [
"MIT"
] | null | null | null | python/simulator.py | mhoffmann7/MIMO-MRT-Simulator | 9bca63b416b137fa3ded4984c40e669b5bc421bd | [
"MIT"
] | null | null | null | python/simulator.py | mhoffmann7/MIMO-MRT-Simulator | 9bca63b416b137fa3ded4984c40e669b5bc421bd | [
"MIT"
] | null | null | null | from get_users_channels import getUsers
from user_association import userAssociation
from mmimo_mrt import computeBitrates
from dpb import Dpb
import numpy as np
def singleUESetSimulation(no_ue, scenario, dpb_algorithm, dpb, assignment_method):
pathloss, position, corr_data = getUsers(scenario,no_ue)
pathloss, action = dpb.dpb(dpb_algorithm,assignment_method, pathloss, corr_data)
assignment = userAssociation(assignment_method,pathloss)
bitrates = computeBitrates(pathloss, corr_data, assignment)
cell_edge = np.quantile(bitrates, 0.1) # cell_edge users 10-th percentile
return bitrates, position, cell_edge, action
def multipleSimulations(no_sets, no_ue, scenario, dpb_algorithm, assignment_method, seed):
dpb = Dpb('')
np.random.seed(seed)
cell_edges = np.zeros(no_sets)
actions = np.zeros(no_sets)
bitrates_per_set = []
positions_per_set = []
for set in range(0,no_sets):
bitrates, positions,cell_edges[set], actions[set] = singleUESetSimulation(no_ue, scenario, dpb_algorithm, dpb, assignment_method)
bitrates_per_set.append(bitrates)
positions_per_set.append(positions)
return cell_edges, actions, bitrates_per_set, positions_per_set
| 34.25 | 137 | 0.763179 | 161 | 1,233 | 5.583851 | 0.329193 | 0.040044 | 0.040044 | 0.050056 | 0.233593 | 0.206897 | 0.14238 | 0.14238 | 0.14238 | 0 | 0 | 0.004808 | 0.156529 | 1,233 | 35 | 138 | 35.228571 | 0.859615 | 0.025953 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.083333 | false | 0 | 0.208333 | 0 | 0.375 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
925480aad2538abcd3adf524e7dc5e68a3d73cf6 | 5,362 | py | Python | anchore_engine/services/policy_engine/api/models/gate_spec.py | dspalmer99/anchore-engine | 8c61318be6fec5d767426fa4ccd98472cc85b5cd | [
"Apache-2.0"
] | 1 | 2019-06-27T08:47:48.000Z | 2019-06-27T08:47:48.000Z | anchore_engine/services/policy_engine/api/models/gate_spec.py | dspalmer99/anchore-engine | 8c61318be6fec5d767426fa4ccd98472cc85b5cd | [
"Apache-2.0"
] | 4 | 2020-11-07T00:16:02.000Z | 2020-11-08T20:52:06.000Z | anchore_engine/services/policy_engine/api/models/gate_spec.py | dspalmer99/anchore-engine | 8c61318be6fec5d767426fa4ccd98472cc85b5cd | [
"Apache-2.0"
] | 1 | 2019-11-23T03:39:28.000Z | 2019-11-23T03:39:28.000Z | # coding: utf-8
from datetime import date, datetime # noqa: F401
from typing import List, Dict # noqa: F401
from anchore_engine.services.policy_engine.api.models.base_model_ import Model
from anchore_engine.services.policy_engine.api.models.trigger_spec import TriggerSpec # noqa: F401,E501
from anchore_engine.services.policy_engine.api import util
class GateSpec(Model):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
def __init__(self, name=None, description=None, state=None, superceded_by=None, triggers=None): # noqa: E501
"""GateSpec - a model defined in Swagger
:param name: The name of this GateSpec. # noqa: E501
:type name: str
:param description: The description of this GateSpec. # noqa: E501
:type description: str
:param state: The state of this GateSpec. # noqa: E501
:type state: str
:param superceded_by: The superceded_by of this GateSpec. # noqa: E501
:type superceded_by: str
:param triggers: The triggers of this GateSpec. # noqa: E501
:type triggers: List[TriggerSpec]
"""
self.swagger_types = {
'name': str,
'description': str,
'state': str,
'superceded_by': str,
'triggers': List[TriggerSpec]
}
self.attribute_map = {
'name': 'name',
'description': 'description',
'state': 'state',
'superceded_by': 'superceded_by',
'triggers': 'triggers'
}
self._name = name
self._description = description
self._state = state
self._superceded_by = superceded_by
self._triggers = triggers
@classmethod
def from_dict(cls, dikt):
"""Returns the dict as a model
:param dikt: A dict.
:type: dict
:return: The GateSpec of this GateSpec. # noqa: E501
:rtype: GateSpec
"""
return util.deserialize_model(dikt, cls)
@property
def name(self):
"""Gets the name of this GateSpec.
Gate name, as it would appear in a policy document # noqa: E501
:return: The name of this GateSpec.
:rtype: str
"""
return self._name
@name.setter
def name(self, name):
"""Sets the name of this GateSpec.
Gate name, as it would appear in a policy document # noqa: E501
:param name: The name of this GateSpec.
:type name: str
"""
self._name = name
@property
def description(self):
"""Gets the description of this GateSpec.
Description of the gate # noqa: E501
:return: The description of this GateSpec.
:rtype: str
"""
return self._description
@description.setter
def description(self, description):
"""Sets the description of this GateSpec.
Description of the gate # noqa: E501
:param description: The description of this GateSpec.
:type description: str
"""
self._description = description
@property
def state(self):
"""Gets the state of this GateSpec.
State of the gate, applied transitively to all triggers if a state other than active # noqa: E501
:return: The state of this GateSpec.
:rtype: str
"""
return self._state
@state.setter
def state(self, state):
"""Sets the state of this GateSpec.
State of the gate, applied transitively to all triggers if a state other than active # noqa: E501
:param state: The state of this GateSpec.
:type state: str
"""
allowed_values = ["active", "deprecated", "eol"] # noqa: E501
if state not in allowed_values:
raise ValueError(
"Invalid value for `state` ({0}), must be one of {1}"
.format(state, allowed_values)
)
self._state = state
@property
def superceded_by(self):
"""Gets the superceded_by of this GateSpec.
The name of another gate that supercedes this on functionally if this is deprecated # noqa: E501
:return: The superceded_by of this GateSpec.
:rtype: str
"""
return self._superceded_by
@superceded_by.setter
def superceded_by(self, superceded_by):
"""Sets the superceded_by of this GateSpec.
The name of another gate that supercedes this on functionally if this is deprecated # noqa: E501
:param superceded_by: The superceded_by of this GateSpec.
:type superceded_by: str
"""
self._superceded_by = superceded_by
@property
def triggers(self):
"""Gets the triggers of this GateSpec.
List of the triggers that can fire for this Gate # noqa: E501
:return: The triggers of this GateSpec.
:rtype: List[TriggerSpec]
"""
return self._triggers
@triggers.setter
def triggers(self, triggers):
"""Sets the triggers of this GateSpec.
List of the triggers that can fire for this Gate # noqa: E501
:param triggers: The triggers of this GateSpec.
:type triggers: List[TriggerSpec]
"""
self._triggers = triggers
| 28.827957 | 113 | 0.608355 | 641 | 5,362 | 5 | 0.168487 | 0.048674 | 0.113573 | 0.033697 | 0.545086 | 0.474571 | 0.45117 | 0.308892 | 0.280187 | 0.251482 | 0 | 0.018664 | 0.310518 | 5,362 | 185 | 114 | 28.983784 | 0.848255 | 0.469974 | 0 | 0.230769 | 0 | 0 | 0.08384 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.184615 | false | 0 | 0.076923 | 0 | 0.369231 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
92588762a2b3b34b9bfdc6caf756989bb0850c29 | 3,576 | py | Python | redux/g7_sci_extract.py | joungh93/PyRAF_GMOS_IFU | 1750caaf846c426cf1fc761ad539f740c8ae64d9 | [
"MIT"
] | null | null | null | redux/g7_sci_extract.py | joungh93/PyRAF_GMOS_IFU | 1750caaf846c426cf1fc761ad539f740c8ae64d9 | [
"MIT"
] | null | null | null | redux/g7_sci_extract.py | joungh93/PyRAF_GMOS_IFU | 1750caaf846c426cf1fc761ad539f740c8ae64d9 | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Tue Oct 31 16:29:33 2019
@author: jlee
"""
import time
start_time = time.time()
import numpy as np
import glob, os
import g0_init_cfg as ic
from astropy.io import fits
# ----- Importing IRAF from the root directory ----- #
current_dir = os.getcwd()
os.chdir(ic.dir_iraf)
from pyraf import iraf
from pyraf.iraf import gemini, gmos
os.chdir(current_dir)
iraf.chdir(current_dir)
iraf.unlearn('gqecorr')
iraf.unlearn('gfextract')
iraf.unlearn('gfdisplay')
# ---------- QE correction and extraction of the science ---------- #
for d in ic.dir_wav:
dir_sci = sorted(glob.glob(d+"/*"))
filenames1 = ""
if (ic.nslit == 2):
filenames2 = ""
for j in np.arange(len(dir_sci)):
# Moving each science directory
name_sci = dir_sci[j].split("/")[-1]
print("Moving path for "+name_sci+"...")
os.chdir(current_dir+"/"+dir_sci[j])
iraf.chdir(current_dir+"/"+dir_sci[j])
# SCI & ARC & FLAT
sci = np.loadtxt(ic.lst_sci, dtype=str)
sci0 = sci.item(0)
arc = np.loadtxt(ic.lst_arc, dtype=str)
arc0 = arc.item(0)
flat = np.loadtxt(ic.lst_flat, dtype=str)
flat0 = flat.item(0)
response = flat0+'_resp'
# QE correction
iraf.imdelete('qxbrg@'+ic.lst_sci)
iraf.imdelete('eqxbrg@'+ic.lst_sci)
iraf.gqecorr('xbrg'+sci0, refimage='erg'+arc0, fl_correct='yes',
fl_vardq='yes', verbose='yes')
# Science extraction
iraf.gfextract('qxbrg'+sci0, response=response, recenter='no',
trace='no', reference='eqbrg'+flat0, weights='none',
fl_vardq='yes', line=ic.pk_line)
# Displaying the extracted data for checking bad columns
if (glob.glob("badcol/") == []):
os.system("mkdir badcol")
# ---> making a directory for bad column masking region files
# ("badcol/sci{}_slit{}.reg", with DS9 saoimage format)
filenames1 += (dir_sci[j]+"/eqxbrg"+sci0+".fits[2] ")
if (ic.nslit == 2):
filenames2 += (dir_sci[j]+"/eqxbrg"+sci0+".fits[5] ")
# #####
# dt1, hd1 = fits.getdata('eqxbrg'+sci0+'.fits', ext=2, header=True)
# z1l, z1u = np.percentile(dt1, [15, 85])
# # 2-slit mode
# if (ic.nslit == 2):
# dt2, hd2 = fits.getdata('eqxbrg'+sci0+'.fits', ext=5, header=True)
# z2l, z2u = np.percentile(dt2, [15, 85])
# if (ic.nslit == 1):
# z1, z2 = z1l, z1u
# ds9_frm = "ds9 eqxbrg"+sci0+".fits[2] -multiframe"
# ds9_loc = " -scale lock yes -frame lock image"
# ds9_scl = " -scale limits {0:.2f} {1:.2f} &".format(z1, z2)
# if (ic.nslit == 2):
# z1, z2 = 0.5*(z1l+z2l), 0.5*(z1u+z2u)
# ds9_frm = "ds9 eqxbrg"+sci0+".fits[2] eqxbrg"+sci0+".fits[5] -multiframe"
# ds9_loc = " -scale lock yes -frame lock image"
# ds9_scl = " -scale limits {0:.2f} {1:.2f} &".format(z1, z2)
# os.system(ds9_frm + ds9_loc + ds9_scl)
# #####
# Coming back to current path
os.chdir(current_dir)
iraf.chdir(current_dir)
ds9_opt = "-scalemode zscale -scale lock yes -frame lock image -mode region -regions shape box"
os.system("ds9 "+ds9_opt+" "+filenames1+"&")
if (ic.nslit == 2):
os.system("ds9 "+ds9_opt+" "+filenames2+"&")
# Printing the running time
print('--- %s seconds ---' %(time.time()-start_time))
| 30.827586 | 99 | 0.558445 | 482 | 3,576 | 4.049793 | 0.358921 | 0.035861 | 0.050205 | 0.025615 | 0.277664 | 0.224385 | 0.13832 | 0.11373 | 0.076844 | 0.076844 | 0 | 0.043943 | 0.268177 | 3,576 | 115 | 100 | 31.095652 | 0.701949 | 0.357662 | 0 | 0.137255 | 0 | 0 | 0.119376 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.137255 | 0 | 0.137255 | 0.039216 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
925ad854367cffcab0b72b26714e6ad39f176b24 | 19,279 | py | Python | wrapless.py | mpi3d/goodix-fp-dump | 039940845bd5eeb98cd92d72f267e3be77feb156 | [
"MIT"
] | 136 | 2021-05-05T14:16:17.000Z | 2022-03-31T09:04:18.000Z | wrapless.py | tsunekotakimoto/goodix-fp-dump | b88ecbababd3766314521fe30ee943c4bd1810df | [
"MIT"
] | 14 | 2021-08-20T09:49:39.000Z | 2022-03-20T13:18:05.000Z | wrapless.py | tsunekotakimoto/goodix-fp-dump | b88ecbababd3766314521fe30ee943c4bd1810df | [
"MIT"
] | 11 | 2021-08-02T15:49:11.000Z | 2022-02-06T22:06:42.000Z | from protocol import Protocol
from typing import Optional
import logging
import struct
from dataclasses import dataclass
from usb.core import USBTimeoutError
from Crypto.Cipher import AES
from Crypto.Hash import HMAC, SHA256
from Crypto.Random import get_random_bytes
from crccheck.crc import Crc32Mpeg2
USB_CHUNK_SIZE = 0x40
@dataclass
class Message:
_category: int
_command: int
_payload: bytes
@property
def category(self) -> int:
return self._category
@category.setter
def category(self, category) -> None:
assert category <= 0xF
self._category = category
@property
def command(self) -> int:
return self._command
@command.setter
def command(self, command) -> None:
assert command <= 0x7
self._command = command
@property
def payload(self) -> bytes:
return self._payload
@payload.setter
def payload(self, payload) -> None:
assert len(payload) <= 0xFFFF
self._payload = payload
class Device:
def __init__(self, product: int, protocol, timeout: Optional[float] = 5) -> None:
logging.debug(f"__init__({product}, {protocol}, {timeout})")
self.protocol: Protocol = protocol(0x27C6, product, timeout)
self.gtls_context: Optional[GTLSContext] = None
# FIXME Empty device reply buffer
# (Current patch while waiting for a fix)
self._empty_buffer()
def _empty_buffer(self) -> None:
logging.debug("_empty_buffer()")
try:
while True:
self.protocol.read(timeout=0.1)
except USBTimeoutError as error:
if error.backend_error_code == -7:
return
raise error
def _recv_next_chunk(self, timeout: float) -> bytes:
for _ in range(10):
chunk = self.protocol.read(USB_CHUNK_SIZE, timeout=timeout)
if chunk:
return chunk
raise Exception("Too many empty reads")
def _recv_message_from_device(
self,
timeout: float,
) -> Message:
data = self._recv_next_chunk(timeout)
logging.debug(f"Received chunk from device: {data.hex(' ')}")
command_byte = data[0]
message_size = struct.unpack("<H", data[1:3])[0]
while len(data) - 1 < message_size:
chunk = self._recv_next_chunk(timeout)
logging.debug(f"Received chunk from device: {chunk.hex(' ')}")
contd_command_byte = chunk[0]
if contd_command_byte & 1 == 0 or contd_command_byte & 0xFE != command_byte:
raise Exception("Wrong continued chunk")
data += chunk[1:]
category = command_byte >> 4
command = (command_byte & 0xF) >> 1
data = data[: message_size + 3]
msg_checksum = data[-1]
data = data[:-1]
if msg_checksum != 0x88:
checksum = 0xAA - sum(data) & 0xFF
if msg_checksum != checksum:
raise Exception(
f"Wrong checksum, "
f"expected: {hex(checksum)}, received: {hex(msg_checksum)}"
)
payload = data[3:]
message = Message(category, command, payload)
logging.info(f"Received message from device: {message}")
return message
def _check_ack(self, command_byte: int, timeout: float) -> None:
message = self._recv_message_from_device(timeout)
if message.category != 0xB:
raise Exception("Not an ACK message")
if message.command != 0:
raise Exception("ACK should not have commands")
if command_byte != message.payload[0]:
raise Exception("ACK wrong command")
logging.info(f"Received ACK for {hex(command_byte)}")
def _send_message_to_device(
self,
message: Message,
use_checksum: bool,
ack_timeout: float,
) -> None:
command_byte = message.category << 4 | message.command << 1
data = struct.pack("<B", command_byte)
data += struct.pack("<H", len(message.payload) + 1)
data += message.payload
checksum = 0xAA - sum(data) & 0xFF if use_checksum else 0x88
data += struct.pack("<B", checksum)
logging.info(f"Sending message: {data.hex(' ')}")
is_first = True
while data:
if is_first:
chunk = data[:USB_CHUNK_SIZE]
data = data[USB_CHUNK_SIZE:]
is_first = False
else:
chunk = struct.pack("<B", command_byte | 1)
chunk += data[: USB_CHUNK_SIZE - 1]
data = data[USB_CHUNK_SIZE - 1 :]
assert len(chunk) <= USB_CHUNK_SIZE
logging.debug(f"Sending chunk: {chunk.hex(' ')}")
self.protocol.write(chunk)
self._check_ack(command_byte, ack_timeout)
def ping(self) -> None:
logging.debug("ping()")
self._send_message_to_device(Message(0, 0, b"\x00\x00"), True, 0.5)
def read_firmware_version(self) -> str:
logging.debug("firmware_version()")
self._send_message_to_device(Message(0xA, 4, b"\x00\x00"), True, 0.5)
message = self._recv_message_from_device(2)
if message.category != 0xA or message.command != 4:
raise Exception("Not a firmware version reply")
return message.payload.split(b"\x00")[0].decode()
def _production_read(self, read_type: int) -> bytes:
request = Message(0xE, 2, struct.pack("<L", read_type))
self._send_message_to_device(request, True, 0.5)
reply = self._recv_message_from_device(1)
if reply.category != 0xE or reply.command != 2:
raise Exception("Not a production read reply")
payload = reply.payload
if payload[0] != 0:
raise Exception("Production read MCU failed")
payload = payload[1:]
msg_read_type = struct.unpack("<L", payload[:4])[0]
payload = payload[4:]
if read_type != msg_read_type:
raise Exception(
f"Wrong read type in reply, "
f"expected: {hex(read_type)}, received: {hex(msg_read_type)}"
)
payload_size = struct.unpack("<L", payload[:4])[0]
payload = payload[4:]
if payload_size != len(payload):
raise Exception(
f"Payload does not match reported size: "
f"{payload_size} != {len(payload)}"
)
return payload
def _production_write(self, data_type: int, data: bytes) -> None:
payload = struct.pack("<L", data_type)
payload += struct.pack("<L", len(data)) # Header size excluded
payload += data
self._send_message_to_device(Message(0xE, 1, payload), True, 0.5)
reply = self._recv_message_from_device(1)
if reply.category != 0xE or reply.command != 1:
raise Exception("Not a production write reply")
if reply.payload[0] != 0:
raise Exception("Production write MCU failed")
def _recv_mcu(self, read_type) -> bytes:
logging.debug("recv_mcu()")
message = self._recv_message_from_device(2)
if message.category != 0xD or message.command != 1:
raise Exception("Not a GTLS handshake message")
payload = message.payload
msg_read_type = struct.unpack("<L", payload[:4])[0]
if read_type != msg_read_type:
raise Exception(
f"Wrong read type in reply, "
f"expected: {hex(read_type)}, received: {hex(msg_read_type)}"
)
payload_size = struct.unpack("<L", payload[4:8])[0]
if payload_size != len(payload):
raise Exception(
f"Payload does not match reported size: "
f"{payload_size} != {len(payload)}"
)
return payload[8:]
def _send_mcu(self, data_type, data: bytes) -> None:
logging.debug("send_mcu()")
payload = struct.pack("<L", data_type)
payload += struct.pack("<L", len(data) + 8) # Header size included
payload += data
self._send_message_to_device(Message(0xD, 1, payload), True, 0.5)
def read_sealed_psk(self) -> bytes:
logging.debug("read_sealed_psk()")
return self._production_read(0xB001)
def write_sealed_psk(self, sealed_psk: bytes) -> None:
logging.debug("writing_sealed_psk()")
return self._production_write(0xB001, sealed_psk)
def write_psk_white_box(self, psk_white_box: bytes) -> None:
logging.debug("write_psk_white_box()")
self._production_write(0xB002, psk_white_box)
def read_psk_hash(self) -> bytes:
logging.debug("read_psk_hash()")
return self._production_read(0xB003)
def establish_gtls_connection(self, psk) -> None:
logging.debug("establish_gtls_connection()")
self.gtls_context = GTLSContext(psk, self)
self.gtls_context.establish_connection()
def read_sensor_register(
self, register: int, read_size: int, timeout: float
) -> bytes:
request = b"\x00"
request += struct.pack("<H", register)
request += struct.pack("<H", read_size)
self._send_message_to_device(Message(0x8, 0x1, request), True, 0.5)
reply = self._recv_message_from_device(timeout)
if reply.category != 0x8 or reply.command != 0x1:
raise Exception("Not a register read message")
return reply.payload
def read_otp(self, timeout: float) -> bytes:
self._send_message_to_device(Message(0xA, 0x3, b"\x00\x00"), True, 0.5)
reply = self._recv_message_from_device(timeout)
if reply.category != 0xA or reply.command != 0x3:
raise Exception("Not a register read message")
return reply.payload
def get_sensor_data(self, payload: bytes, timeout: float) -> bytes:
assert len(payload) == 4
self._send_message_to_device(Message(0x2, 0, payload), True, 0.5)
message = self._recv_message_from_device(timeout)
if message.category != 0x2 or message.command != 0:
raise Exception("Not a sensor data message")
if self.gtls_context is None or not self.gtls_context.is_connected():
raise Exception("Invalid GTLS connection state")
return self.gtls_context.decrypt_sensor_data(message.payload)
class GTLSContext:
def __init__(self, psk: bytes, device: Device):
self.state = 0
self.client_random: Optional[bytes] = None
self.server_random: Optional[bytes] = None
self.client_identity: Optional[bytes] = None
self.server_identity: Optional[bytes] = None
self.symmetric_key: Optional[bytes] = None
self.symmetric_iv: Optional[bytes] = None
self.hmac_key: Optional[bytes] = None
self.hmac_client_counter_init: Optional[int] = None
self.hmac_server_counter_init: Optional[int] = None
self.hmac_client_counter: Optional[int] = None
self.hmac_server_counter: Optional[int] = None
self.psk = psk
self.device = device
def _client_hello_step(self) -> None:
if self.state >= 2:
raise Exception(f"Cannot send client hello, state: {self.state}")
self.client_random = get_random_bytes(0x20)
logging.debug(f"client_random: {self.client_random.hex(' ')}")
self.device._send_mcu(0xFF01, self.client_random)
self.state = 2
def _server_identity_step(self) -> None:
if self.state != 2:
raise Exception(f"Cannot receive server identity, state: {self.state}")
data = self.device._recv_mcu(0xFF02)
if len(data) != 0x40:
raise Exception("Wrong payload size")
self.server_random = data[:0x20]
logging.debug(f"server_random: {self.server_random.hex(' ')}")
self.server_identity = data[0x20:]
logging.debug(f"server_identity: {self.server_identity.hex(' ')}")
session_key = _derive_session_key(
self.psk, self.client_random + self.server_random, 0x44
)
self.symmetric_key = session_key[:0x10]
logging.debug(f"symmetric_key: {self.symmetric_key.hex(' ')}")
session_key = session_key[0x10:]
self.symmetric_iv = session_key[:0x10]
logging.debug(f"symmetric_iv: {self.symmetric_iv.hex(' ')}")
session_key = session_key[0x10:]
self.hmac_key = session_key[:0x20]
logging.debug(f"hmac_key: {self.hmac_key.hex(' ')}")
session_key = session_key[0x20:]
self.hmac_client_counter_init = struct.unpack("<H", session_key[:2])[0]
logging.debug(f"hmac_client_counter_init: {self.hmac_client_counter_init}")
session_key = session_key[2:]
self.hmac_server_counter_init = struct.unpack("<H", session_key[:2])[0]
logging.debug(f"hmac_server_counter_init: {self.hmac_server_counter_init}")
session_key = session_key[2:]
assert not session_key
self.client_identity = HMAC.HMAC(
self.hmac_key, self.client_random + self.server_random, SHA256
).digest()
logging.debug(f"client_identity: {self.client_identity.hex(' ')}")
if self.server_identity != self.client_identity:
raise Exception("Session key not derived correctly")
self.device._send_mcu(0xFF03, self.client_identity + b"\xee" * 4)
self.state = 4
def _server_done_step(self) -> None:
if self.state != 4:
raise Exception(f"Cannot receive server done, state: {self.state}")
data = self.device._recv_mcu(0xFF04)
result = struct.unpack("<L", data)[0]
if result != 0:
raise Exception(f"Wrong handshake result reported: {result}")
self.hmac_client_counter = self.hmac_client_counter_init
self.hmac_server_counter = self.hmac_server_counter_init
self.state = 5
def establish_connection(self) -> None:
logging.info("Starting GTLS handshake")
self._client_hello_step()
self._server_identity_step()
self._server_done_step()
logging.info("GTLS handshake successful")
def is_connected(self):
return self.state == 5
def decrypt_sensor_data(self, encrypted_message):
data_type = struct.unpack("<L", encrypted_message[:4])[0]
if data_type != 0xAA01:
raise Exception("Unexpected data type")
msg_length = struct.unpack("<L", encrypted_message[4:8])[0]
if msg_length != len(encrypted_message):
raise Exception("Length mismatch")
encrypted_payload = encrypted_message[8:-0x20]
payload_hmac = encrypted_message[-0x20:]
logging.debug(f"HMAC for encrypted payload: {payload_hmac.hex(' ')}")
gea_encrypted_data = b""
for block_idx in range(15):
if block_idx % 2 == 0:
if block_idx == 0:
gea_encrypted_data += encrypted_payload[:0x3A7]
encrypted_payload = encrypted_payload[0x3A7:]
elif block_idx == 14:
assert len(gea_encrypted_data) == 0x3A7 + 0x3F0 * 13
gea_encrypted_data += encrypted_payload
else:
gea_encrypted_data += encrypted_payload[:0x3F0]
encrypted_payload = encrypted_payload[0x3F0:]
else:
cipher = AES.new(self.symmetric_key, AES.MODE_CBC, iv=self.symmetric_iv)
gea_encrypted_data += cipher.decrypt(encrypted_payload[:0x3F0])
encrypted_payload = encrypted_payload[0x3F0:]
hmac_data = struct.pack("<L", self.hmac_server_counter)
hmac_data += gea_encrypted_data[-0x400:]
computed_hmac = HMAC.HMAC(self.hmac_key, hmac_data, SHA256).digest()
if computed_hmac != payload_hmac:
raise Exception("HMAC verification failed")
logging.debug("Encrypted payload HMAC verified")
self.hmac_server_counter = (self.hmac_server_counter + 1) & 0xFFFFFFFF
logging.debug(f"HMAC server counter is now: {self.hmac_server_counter}")
if len(gea_encrypted_data) < 5:
raise Exception("Encrypted payload too short")
# The first five bytes are always discarded (alignment?)
gea_encrypted_data = gea_encrypted_data[5:]
msg_gea_crc = decode_u32(gea_encrypted_data[-4:])
gea_encrypted_data = gea_encrypted_data[:-4]
logging.debug(f"GEA data CRC: {hex(msg_gea_crc)}")
computed_gea_crc = Crc32Mpeg2.calc(gea_encrypted_data)
if computed_gea_crc != msg_gea_crc:
raise Exception("CRC check failed")
logging.debug("GEA data CRC verified")
gea_key = self.symmetric_key[:4]
logging.debug(f"GEA key: {gea_key.hex(' ')}")
return _gea_decrypt(gea_key, gea_encrypted_data)
def _derive_session_key(psk, random_data: bytes, session_key_length: int) -> bytes:
seed = b"master secret" + random_data
session_key = b""
A = seed
while len(session_key) < session_key_length:
A = HMAC.HMAC(psk, A, SHA256).digest()
session_key += HMAC.HMAC(psk, A + seed, SHA256).digest()
return session_key[:session_key_length]
def decode_u32(data: bytes):
assert len(data) == 4
return data[0] * 0x100 + data[1] + data[2] * 0x1000000 + data[3] * 0x10000
def _gea_decrypt(key, encrypted_data) -> bytes:
key = struct.unpack("<L", key)[0]
decrypted_data = b""
for data_idx in range(0, len(encrypted_data), 2):
uVar3 = (key >> 1 ^ key) & 0xFFFFFFFF
uVar2 = (
(
(
(
(
(
(
(key >> 0xF & 0x2000 | key & 0x1000000) >> 1
| key & 0x20000
)
>> 2
| key & 0x1000
)
>> 3
| (key >> 7 ^ key) & 0x80000
)
>> 1
| (key >> 0xF ^ key) & 0x4000
)
>> 2
| key & 0x2000
)
>> 2
| uVar3 & 0x40
| key & 0x20
)
>> 1
| (key >> 9 ^ key << 8) & 0x800
| (key >> 0x14 ^ key * 2) & 4
| (key * 8 ^ key >> 0x10) & 0x4000
| (key >> 2 ^ key >> 0x10) & 0x80
| (key << 6 ^ key >> 7) & 0x100
| (key & 0x100) << 7
)
uVar2 = uVar2 & 0xFFFFFFFF
uVar1 = key & 0xFFFF
key = ((key ^ (uVar3 >> 0x14 ^ key) >> 10) << 0x1F | key >> 1) & 0xFFFFFFFF
input_element = struct.unpack("<H", encrypted_data[data_idx : data_idx + 2])[0]
stream_val = ((uVar2 >> 8) & 0xFFFF) + (uVar2 & 0xFF | uVar1 & 1) * 0x100
decrypted_data += struct.pack("<H", input_element ^ stream_val)
assert len(encrypted_data) == len(decrypted_data)
return decrypted_data
| 34.989111 | 88 | 0.591317 | 2,300 | 19,279 | 4.735217 | 0.113478 | 0.039849 | 0.020292 | 0.019282 | 0.369388 | 0.279405 | 0.212744 | 0.178404 | 0.146084 | 0.140942 | 0 | 0.034121 | 0.297681 | 19,279 | 550 | 89 | 35.052727 | 0.770236 | 0.008714 | 0 | 0.133333 | 0 | 0 | 0.120387 | 0.017744 | 0 | 0 | 0.023502 | 0.001818 | 0.021429 | 1 | 0.085714 | false | 0 | 0.02381 | 0.009524 | 0.171429 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
925c6033603c561ea55693dac40cadc80e60ea16 | 10,293 | py | Python | silverberg/client.py | TimothyZhang/silverberg | fb93ab68988c6ad6f7a4136d2c5b16b32966d0ca | [
"Apache-2.0"
] | 1 | 2019-09-22T04:00:56.000Z | 2019-09-22T04:00:56.000Z | silverberg/client.py | TimothyZhang/silverberg | fb93ab68988c6ad6f7a4136d2c5b16b32966d0ca | [
"Apache-2.0"
] | 14 | 2015-01-22T01:00:50.000Z | 2017-12-06T03:35:46.000Z | silverberg/client.py | TimothyZhang/silverberg | fb93ab68988c6ad6f7a4136d2c5b16b32966d0ca | [
"Apache-2.0"
] | 4 | 2015-03-31T19:49:05.000Z | 2020-03-03T20:44:32.000Z | # Copyright 2012 Rackspace Hosting, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Client API library for the Silverberg Twisted Cassandra CQL interface.
"""
import re
from silverberg.cassandra import Cassandra
from silverberg.cassandra import ttypes
from twisted.internet.defer import succeed, Deferred
from silverberg.marshal import prepare, unmarshallers
from silverberg.thrift_client import OnDemandThriftClient
from silverberg.cassandra.ttypes import ConsistencyLevel
class CQLClient(object):
"""
Cassandra CQL Client object.
Instantiate it and it will on-demand create a connection to the Cassandra
cluster.
:param cass_endpoint: A twisted Endpoint
:type cass_endpoint: twisted.internet.interfaces.IStreamClientEndpoint
:param keyspace: A keyspace to connect to
:type keyspace: str.
:param user: Username to connect with.
:type user: str.
:param password: Password to use in conjunction with Username.
:type password: str.
:param bool disconnect_on_cancel: Should the underlying TCP connection be disconnected
when execute deferred is cancelled?
Upon connecting, the client will authenticate (if paramaters are provided)
and obtain the keyspace definition so that it can de-serialize properly.
n.b. Cassandra presently doesn't have any real support for password
authentication in the mainline as the simple access control options
are disabled; you probably need to secure your Cassandra server using
different methods and the password code isn't heavily tested.
"""
def __init__(self, cass_endpoint, keyspace, user=None, password=None,
disconnect_on_cancel=False):
self._client = OnDemandThriftClient(cass_endpoint, Cassandra.Client)
self._keyspace = keyspace
self._user = user
self._password = password
self._disconnect_on_cancel = disconnect_on_cancel
def _set_keyspace(self, client):
d = client.set_keyspace(self._keyspace)
return d.addCallback(lambda _: client)
def _login(self, client):
creds = {'username': self._user, 'password': self._password}
authreq = ttypes.AuthenticationRequest(creds)
d = client.login(authreq)
d.addCallback(lambda _: client)
return d
def _connection(self):
def _handshake(client):
d = succeed(client)
if self._user and self._password:
d.addCallback(self._login)
d.addCallback(self._set_keyspace)
return d
ds = self._client.connection(_handshake)
return ds
def disconnect(self):
"""
Disconnect from the cassandra cluster. Cassandara and Silverberg do
not require the connection to be closed before exiting. However, this
method may be useful if resources are constrained, or for testing
purposes if using or injecting :class:`TestCQLClient: is impossible.
:return: a :class:`Deferred` that fires with None when disconnected.
"""
return self._client.disconnect()
def describe_version(self):
"""
Query the Cassandra server for the version.
:returns: string -- the version tag
"""
def _vers(client):
return client.describe_version()
d = self._connection()
d.addCallback(_vers)
return d
def _unmarshal_result(self, schema, raw_rows, _unmarshallers):
rows = []
def _unmarshal_val(vtype, val):
if val is None:
return
# Differentiate between regular and collection types
# Collection types look like 'ListType(SomeCassandraType)', or 'MapType(KeyType, ValType)'
# so try split into 1~3 parts and check if we can marshal them
types = re.split('\(|,|\)', str(vtype).rstrip(')'))
# Regular type
if len(types) == 1:
if vtype in _unmarshallers:
return _unmarshallers[vtype](val)
# List/Set
elif len(types) == 2:
if types[0] in _unmarshallers and types[1] in _unmarshallers:
return _unmarshallers[types[0]](types[1], val)
# Map
elif len(types) == 3:
if types[0] in _unmarshallers and types[1] in _unmarshallers \
and types[2] in _unmarshallers:
return _unmarshallers[types[0]](types[1], types[2], val)
# XXX: We do not currently implement the full range of types.
# So we can not unmarshal all types in which case we should just
# return the raw bytes.
return val
for raw_row in raw_rows:
row = {}
for raw_col in raw_row.columns:
specific = schema.value_types[raw_col.name]
row[raw_col.name] = _unmarshal_val(specific, raw_col.value)
rows.append(row)
return rows
def execute(self, query, args, consistency):
"""
Execute a CQL query against the server.
:param query: The CQL query to execute
:type query: str.
:param args: The arguments to substitute
:type args: dict.
:param consistency: The consistency level
:type consistency: ConsistencyLevel
In order to avoid unpleasant issues of CQL injection
(Hey, just because there's no SQL doesn't mean that Little
Bobby Tables won't mess things up for you like in XKCD #327)
you probably want to use argument substitution instead of
concatting strings together to build a query.
Thus, like the official CQL driver for non-Twisted python
that comes with the Cassandra distro, we do variable substitution.
Example::
d = client.execute("UPDATE :table SET 'fff' = :val WHERE "
"KEY = :key",{"val":1234, "key": "fff", "table": "blah"})
:returns: A Deferred that fires with either None, an int, or an
iterable of `{'column': value, ...}` dictionaries, depending
on the CQL query. e.g. a UPDATE would return None,
whereas a SELECT would return an int or some rows
Example output::
[{"fff": 1222}]
"""
prep_query = prepare(query, args)
def _execute(client):
exec_d = client.execute_cql3_query(prep_query,
ttypes.Compression.NONE, consistency)
if self._disconnect_on_cancel:
cancellable_d = Deferred(lambda d: self.disconnect())
exec_d.chainDeferred(cancellable_d)
return cancellable_d
else:
return exec_d
def _proc_results(result):
if result.type == ttypes.CqlResultType.ROWS:
return self._unmarshal_result(result.schema, result.rows,
unmarshallers)
elif result.type == ttypes.CqlResultType.INT:
return result.num
else:
return None
d = self._connection()
d.addCallback(_execute)
d.addCallback(_proc_results)
return d
class TestingCQLClient(CQLClient):
"""
Cassandra CQL Client object to be used for testing purposes. This client
exposes the underlying Twisted transport and provides convenience functions
so that it can be used in trial tests.
Instantiate it and it will on-demand create a connection to the Cassandra
cluster.
:param cass_endpoint: A twisted Endpoint
:type cass_endpoint: twisted.internet.interfaces.IStreamClientEndpoint
:param keyspace: A keyspace to connect to
:type keyspace: str.
:param user: Username to connect with.
:type user: str.
:param password: Username to connect with.
:type password: str.
Upon connecting, the client will authenticate (if paramaters are provided)
and obtain the keyspace definition so that it can de-serialize properly.
n.b. Cassandra presently doesn't have any real support for password
authentication in the mainline as the simple access control options
are disabled; you probably need to secure your Cassandra server using
different methods and the password code isn't heavily tested.
"""
@property
def transport(self):
"""
Get the underlying Twisted transport.
"""
return self._client._transport
def pause(self):
"""
Pause the client by removing the connection from the reactor. This is
useful in tests if, for instance, latency is a problem and you do not
want to disconnect and reconnect between every test. If you do not
disconnect and reconnect, and you do not pause and resume, then if you
use Twisted's testing framework (``trial``), tests will fail with a
dirty reactor warning.
"""
if self.transport:
self.transport.stopReading()
self.transport.stopWriting()
def resume(self):
"""
Resume the client by making sure the reactor is aware of the
connection. This is useful in tests if, for instance, latency is a
problem and you do not want to disconnect and reconnect between every
test. If you do not disconnect and reconnect, and you do not pause
and resume, then if you use Twisted's testing framework (``trial``),
tests will fail with a dirty reactor warning.
"""
if self.transport:
self.transport.startReading()
self.transport.startWriting()
__all__ = ["CQLClient", "ConsistencyLevel", "TestingCQLClient"]
| 35.739583 | 102 | 0.644224 | 1,256 | 10,293 | 5.199045 | 0.285032 | 0.006126 | 0.007351 | 0.006738 | 0.306585 | 0.294487 | 0.294487 | 0.294487 | 0.282695 | 0.282695 | 0 | 0.004778 | 0.288351 | 10,293 | 287 | 103 | 35.864112 | 0.886689 | 0.51093 | 0 | 0.09901 | 0 | 0 | 0.01479 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.158416 | false | 0.039604 | 0.069307 | 0.009901 | 0.445545 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
925e1e9d69320dc2a3507c471bdf305b87ca4a4f | 1,459 | py | Python | type_checker.py | perimosocordiae/onearg | 1ed8027f864b31d77193f4cb511dd93b7c4166bb | [
"MIT"
] | null | null | null | type_checker.py | perimosocordiae/onearg | 1ed8027f864b31d77193f4cb511dd93b7c4166bb | [
"MIT"
] | null | null | null | type_checker.py | perimosocordiae/onearg | 1ed8027f864b31d77193f4cb511dd93b7c4166bb | [
"MIT"
] | null | null | null | from collections import defaultdict
from parser import PARSER
from syntax_tree import build_ast
from type_objects import BuiltinType
def check_types(syntax_tree):
all_types = defaultdict(dict)
# Define builtin primitive types.
for t in ('int', 'float', 'string', 'void'):
all_types[t][None] = BuiltinType(t)
# Define builtin function types.
builtin_tree = build_ast(PARSER.parseFile('builtins.oa', parseAll=True))
register_defined_types(builtin_tree, all_types)
# First pass: collect all user-defined types.
register_defined_types(syntax_tree, all_types)
# Second pass: make sure all defined types are known and infer types.
for defn in syntax_tree:
for name, bad_type in defn.type.check_sub_types(all_types):
# This dies on the first bad type, which isn't ideal.
raise TypeError('Unknown type in %s: %s' % (defn.info(name), bad_type))
defn.check_types(all_types)
return all_types
def register_defined_types(syntax_tree, all_types):
for defn in syntax_tree:
overloads = all_types[defn.name]
signature = defn.type.signature()
if signature in overloads:
raise NameError('Duplicate definition for `%s`: %s' % (defn.name,
signature))
overloads[signature] = defn.type
if __name__ == '__main__':
import sys
tree = build_ast(PARSER.parseFile(sys.argv[1], parseAll=True))
check_types(tree)
print('Types check out!')
| 31.042553 | 77 | 0.702536 | 202 | 1,459 | 4.866337 | 0.376238 | 0.073245 | 0.04883 | 0.054934 | 0.19939 | 0.121058 | 0.077314 | 0 | 0 | 0 | 0 | 0.000859 | 0.202193 | 1,459 | 46 | 78 | 31.717391 | 0.843643 | 0.154901 | 0 | 0.068966 | 0 | 0 | 0.08802 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.068966 | false | 0 | 0.172414 | 0 | 0.275862 | 0.034483 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
925f4af91f977c93af580c343c412cbe16f6b8e5 | 766 | py | Python | flowvision/layers/attention/se.py | lixiang007666/vision | 2eced8d6ed2dc42f6451f81f8883318fa0bd24d3 | [
"BSD-3-Clause"
] | null | null | null | flowvision/layers/attention/se.py | lixiang007666/vision | 2eced8d6ed2dc42f6451f81f8883318fa0bd24d3 | [
"BSD-3-Clause"
] | null | null | null | flowvision/layers/attention/se.py | lixiang007666/vision | 2eced8d6ed2dc42f6451f81f8883318fa0bd24d3 | [
"BSD-3-Clause"
] | null | null | null | import oneflow as flow
import oneflow.nn as nn
class SEModule(nn.Module):
def __init__(
self,
channels,
reduction=16,
rd_channels=None,
act_layer=nn.ReLU,
gate_layer=nn.Sigmoid,
mlp_bias=False,
):
super(SEModule, self).__init__()
rd_channels = channels // reduction if rd_channels is None else rd_channels
self.fc1 = nn.Conv2d(channels, rd_channels, 1, bias=mlp_bias)
self.act = act_layer(inplace=True)
self.fc2 = nn.Conv2d(rd_channels, channels, 1, bias=mlp_bias)
self.gate = gate_layer()
def forward(self, x):
x_avg = self.fc2(self.act(self.fc1(x.mean((2, 3), keepdim=True))))
x_attn = self.gate(x_avg)
return x * x_attn
| 29.461538 | 83 | 0.616188 | 109 | 766 | 4.100917 | 0.394495 | 0.134228 | 0.080537 | 0.071588 | 0.107383 | 0.107383 | 0 | 0 | 0 | 0 | 0 | 0.021544 | 0.272846 | 766 | 25 | 84 | 30.64 | 0.780969 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.090909 | false | 0 | 0.090909 | 0 | 0.272727 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
926080df7fe450b583f1afff1fb17e6b9cc7b79a | 2,669 | py | Python | seqauto/management/commands/import_sequencing_info.py | SACGF/variantgrid | 515195e2f03a0da3a3e5f2919d8e0431babfd9c9 | [
"RSA-MD"
] | 5 | 2021-01-14T03:34:42.000Z | 2022-03-07T15:34:18.000Z | seqauto/management/commands/import_sequencing_info.py | SACGF/variantgrid | 515195e2f03a0da3a3e5f2919d8e0431babfd9c9 | [
"RSA-MD"
] | 551 | 2020-10-19T00:02:38.000Z | 2022-03-30T02:18:22.000Z | seqauto/management/commands/import_sequencing_info.py | SACGF/variantgrid | 515195e2f03a0da3a3e5f2919d8e0431babfd9c9 | [
"RSA-MD"
] | null | null | null | import pandas as pd
from django.core.management.base import BaseCommand
from library.log_utils import console_logger
from seqauto.models import Sequencer, SequencingInfo, EnrichmentKit
from snpdb.models import Lab, LabProject
LAB_NAME = "lab name"
INSTITUTION = "institution"
DOI = "doi"
PAPER_NAME = "paper name"
YEAR_PUB = "year pub"
ENRICHMENT_KIT = "enrichment kit"
SEQUENCER = "sequencer"
SEQ_DETAILS = "seq details"
FILE_TYPE = "file type"
FILE_COUNT = "file count"
class Command(BaseCommand):
def add_arguments(self, parser):
parser.add_argument('SequencingInfo', help='csv file for Sequencing Details')
def handle(self, *args, **options):
filename = options["SequencingInfo"]
logger = console_logger()
df = pd.read_csv(filename, sep='\t', index_col=None)
for col in [LAB_NAME, INSTITUTION, DOI, PAPER_NAME, YEAR_PUB, ENRICHMENT_KIT, SEQUENCER, SEQ_DETAILS, FILE_TYPE, FILE_COUNT]:
if col not in df.columns:
msg = f"Expected column '{col}' in tab separated file SequencingInfo"
raise ValueError(msg)
logger.info("Loaded df")
# Insert Sequencing Details
for _, row in df.iterrows():
# First get LabProject ID using Lab Name and Institution
try:
lab = Lab.objects.get(name=row[LAB_NAME])
project = LabProject.objects.get(lab=lab.id)
#get enrichment_kit id
enrichmentkit = EnrichmentKit.objects.get(name=row[ENRICHMENT_KIT])
#get sequencer id
sequencer = Sequencer.objects.get(name=row[SEQUENCER])
if sequencer.name is None:
print("no sequencer in database with name %s" % row[SEQUENCER])
SequencingInfo.objects.create(lab_project=project,
doi=row[DOI],
paper_name=row[PAPER_NAME],
year_published=row[YEAR_PUB],
enrichment_kit=enrichmentkit,
sequencer=sequencer,
seq_details=row[SEQ_DETAILS],
file_type=row[FILE_TYPE],
file_count=row[FILE_COUNT])
print("saved sequence info for lab '%s'" % row[LAB_NAME])
except:
print(f"lab '{row[LAB_NAME]}' does not exist in the database")
logger.info("saved data")
| 39.25 | 133 | 0.557887 | 283 | 2,669 | 5.123675 | 0.349823 | 0.033793 | 0.033103 | 0.041379 | 0.030345 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.359685 | 2,669 | 67 | 134 | 39.835821 | 0.848449 | 0.043837 | 0 | 0 | 0 | 0 | 0.138987 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.041667 | false | 0 | 0.104167 | 0 | 0.166667 | 0.0625 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
92632090c4e3d282320b824c62a3295534a71fb2 | 8,746 | py | Python | tests/test_utils_conf.py | Cologler/anyioc-python | 87aaf52208b8c510c9128f89359300a2158e3637 | [
"MIT"
] | null | null | null | tests/test_utils_conf.py | Cologler/anyioc-python | 87aaf52208b8c510c9128f89359300a2158e3637 | [
"MIT"
] | null | null | null | tests/test_utils_conf.py | Cologler/anyioc-python | 87aaf52208b8c510c9128f89359300a2158e3637 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
#
# Copyright (c) 2020~2999 - Cologler <skyoflw@gmail.com>
# ----------
#
# ----------
import pytest
from pytest import raises
from anyioc import ServiceProvider, LifeTime
from anyioc.utils_conf import BadConfError, load_conf
def from_conf(conf: dict):
provider = ServiceProvider()
load_conf(provider, conf)
return provider
class A:
'a example class'
def __init__(self, gerju: int):
super().__init__()
self.gerju = gerju
self.enter = None
def __enter__(self):
self.enter = True
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self.enter = False
def test_when_conf_is_not_a_dict():
with raises(TypeError) as excinfo:
from_conf([])
assert excinfo.value.args[0] == "conf is not a dict."
def test_load_conf_with_services_dict():
provider = from_conf({
'services': {
'fsdnj': dict(
factory=A,
inject_by='name'
),
'fqufe': dict(
factory=f'{__name__}:A',
inject_by='anno',
lifetime='singleton'
),
'fjndau': dict(
factory=dict(module=__name__, name='A'),
enter=True,
lifetime=LifeTime.scoped
)
}
})
provider.register_value('gerju', 155)
provider.register_value(int, 455)
with provider:
fsdnj = provider['fsdnj']
fqufe = provider['fqufe']
fjndau = provider['fjndau']
assert isinstance(fsdnj, A)
assert isinstance(fqufe, A)
assert isinstance(fjndau, A)
assert fsdnj.gerju == 155
assert fqufe.gerju == 455
assert isinstance(fjndau.gerju, ServiceProvider)
assert fsdnj.enter is None
assert fqufe.enter is None
assert fjndau.enter is True
assert fjndau.enter is False
def test_load_conf_with_services_list():
provider = from_conf({
'services': [
dict(
key='fsdnj',
factory=A,
inject_by='name'
),
dict(
key='fhua',
factory=A,
inject_by=dict(
gerju=int
)
)
]
})
provider.register_value('gerju', 155)
provider.register_value(int, 455)
fsdnj = provider['fsdnj']
assert isinstance(fsdnj, A)
assert fsdnj.gerju == 155
fhua = provider['fhua']
assert isinstance(fhua, A)
assert fhua.gerju == 455
def test_load_conf_with_services_dict_when_key_conflict():
with raises(BadConfError) as excinfo:
from_conf(dict(
services={
'fjndau': dict(
key='dasdas'
)
}
))
assert excinfo.value.args[0] == "</services['fjndau']> already contains another key: 'dasdas'."
def test_services_when_factory_is_a_random_str():
with raises(BadConfError) as excinfo:
from_conf(dict(
services={
'fjndau': dict(
factory='dasdas'
)
}
))
assert excinfo.value.args[0] == "</services['fjndau']/factory> should be a `module-name:callable-name` like str."
def test_services_when_factory_module_is_not_a_str():
with raises(BadConfError) as excinfo:
from_conf(dict(
services={
'fjndau': dict(
factory=dict(module=14, name='djsau')
)
}
))
assert excinfo.value.args[0] == "</services['fjndau']/factory/module> is not a str."
def test_services_when_factory_name_is_not_a_str():
with raises(BadConfError) as excinfo:
from_conf(dict(
services={
'fjndau': dict(
factory=dict(module=__name__, name=15)
)
}
))
assert excinfo.value.args[0] == "</services['fjndau']/factory/name> is not a str."
def test_services_when_factory_is_not_a_str():
with raises(BadConfError) as excinfo:
from_conf(dict(
services={
'fjndau': dict(
factory=15
)
}
))
assert excinfo.value.args[0] == "</services['fjndau']/factory> is not either str or dict."
def test_services_when_factory_module_unable_import():
with raises(BadConfError) as excinfo:
from_conf(dict(
services={
'fjndau': dict(
factory=f'djashfiaushfuia:not_callable'
)
}
))
assert excinfo.value.args[0] == "</services['fjndau']/factory>: unable import module 'djashfiaushfuia'."
def test_services_when_factory_module_has_no_such_attr():
with raises(BadConfError) as excinfo:
from_conf(dict(
services={
'fjndau': dict(
factory=f'{__name__}:djashfiaushfuia'
)
}
))
assert excinfo.value.args[0] == "</services['fjndau']/factory>: no such attr 'djashfiaushfuia' on module 'test_utils_conf'."
not_callable = object()
def test_services_when_factory_is_not_a_callable():
with raises(BadConfError) as excinfo:
from_conf(dict(
services={
'fjndau': dict(
factory=f'{__name__}:not_callable'
)
}
))
assert excinfo.value.args[0] == "</services['fjndau']/factory> is not a callable."
def test_services_when_inject_by_is_invaild():
with raises(BadConfError) as excinfo:
from_conf(dict(
services={
'fjndau': dict(
inject_by='dsadsa',
factory=from_conf
)
}
))
assert excinfo.value.args[0] == "value of </services['fjndau']/inject_by> ('dsadsa') is not one of (anno, inject_by_anno, name, inject_by_name)."
def test_services_when_lifetime_is_invaild():
with raises(BadConfError) as excinfo:
from_conf(dict(
services={
'fjndau': dict(
lifetime='fett',
factory=from_conf
)
}
))
assert excinfo.value.args[0] == "value of </services['fjndau']/lifetime> ('fett') is not one of (transient, scoped, singleton)."
def test_load_conf_with_values_dict():
provider = from_conf({
'values': {
'k': 'v'
}
})
assert provider['k'] == 'v'
def test_load_conf_with_values_list():
provider = from_conf({
'values': [
{
'key': 'k',
'value': 'v'
}, {
'key': 'mod-pytest',
'value': 'pytest',
'ref': True,
}, {
'key': 'obj-pytest.raises',
'value': 'pytest:raises',
'ref': True,
}, {
'key': 'obj-sp',
'value': 'anyioc:ServiceProvider',
'ref': True,
}
]
})
assert provider['k'] == 'v'
assert provider['mod-pytest'] is pytest
assert provider['obj-pytest.raises'] == raises
assert provider['obj-sp'] is ServiceProvider
def test_load_conf_with_binds_dict():
provider = from_conf({
'binds': {
'dsaju': 'fansio'
}
})
provider.register_value('fansio', 'gnis')
assert provider['dsaju'] == 'gnis'
def test_load_conf_with_binds_list():
provider = from_conf({
'binds': [
dict(key='akfw', target='kdsa')
]
})
provider.register_value('kdsa', 'dsafa')
assert provider['akfw'] == 'dsafa'
def test_load_conf_with_groups_dict():
provider = from_conf({
'groups': {
'fask': ['fansio', 'geo']
}
})
provider.register_value('fansio', 'gnis')
provider.register_value('geo', 'egwg')
assert provider['fask'] == ('gnis', 'egwg')
def test_load_conf_with_groups_list():
provider = from_conf({
'groups': [
dict(key='fasfa', keys=['fansio', 'geo'])
]
})
provider.register_value('fansio', 'gnis')
provider.register_value('geo', 'egwg')
assert provider['fasfa'] == ('gnis', 'egwg')
def test_load_conf_with_groups_list_with_item_is_not_a_list():
with raises(BadConfError) as excinfo:
from_conf(dict(
groups=[
dict(key='fasfa', keys=['fansio', 'geo']),
dict(key='fsdgfg', keys=[]),
dict(key='asfa', keys=object())
]
))
assert excinfo.value.args[0] == '</groups[2]/keys> is not a list.'
| 29.153333 | 149 | 0.53533 | 908 | 8,746 | 4.917401 | 0.143172 | 0.041209 | 0.034938 | 0.045689 | 0.54916 | 0.498096 | 0.432923 | 0.390594 | 0.333259 | 0.290258 | 0 | 0.008992 | 0.338783 | 8,746 | 299 | 150 | 29.250836 | 0.763099 | 0.013149 | 0 | 0.400763 | 0 | 0.01145 | 0.162365 | 0.049184 | 0 | 0 | 0 | 0 | 0.133588 | 1 | 0.091603 | false | 0 | 0.022901 | 0 | 0.125954 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
9263ae98e68deabf82d6263e3f38c433bcba634f | 34,195 | py | Python | src/eduid_userdb/tests/test_user.py | SUNET/eduid-userdb | 5970880caf0b0e2bdee6c23869ef287acc87af2a | [
"BSD-2-Clause-FreeBSD"
] | null | null | null | src/eduid_userdb/tests/test_user.py | SUNET/eduid-userdb | 5970880caf0b0e2bdee6c23869ef287acc87af2a | [
"BSD-2-Clause-FreeBSD"
] | 12 | 2015-08-28T12:05:32.000Z | 2020-06-23T13:31:29.000Z | src/eduid_userdb/tests/test_user.py | SUNET/eduid-userdb | 5970880caf0b0e2bdee6c23869ef287acc87af2a | [
"BSD-2-Clause-FreeBSD"
] | 2 | 2016-10-24T06:37:33.000Z | 2016-11-21T11:39:39.000Z | import unittest
from datetime import datetime
from hashlib import sha256
from bson import ObjectId
from six import string_types
from eduid_userdb import LockedIdentityNin, OidcAuthorization, OidcIdToken, Orcid
from eduid_userdb.credentials import METHOD_SWAMID_AL2_MFA, U2F, CredentialList, Password
from eduid_userdb.exceptions import EduIDUserDBError, UserHasNotCompletedSignup, UserIsRevoked
from eduid_userdb.mail import MailAddress, MailAddressList
from eduid_userdb.nin import Nin, NinList
from eduid_userdb.phone import PhoneNumber, PhoneNumberList
from eduid_userdb.profile import Profile, ProfileList
from eduid_userdb.tou import ToUList
from eduid_userdb.user import SubjectType, User
__author__ = 'ft'
from eduid_userdb.util import utc_now
def _keyid(kh):
return 'sha256:' + sha256(kh.encode('utf-8')).hexdigest()
class TestNewUser(unittest.TestCase):
def setUp(self):
self.data1 = {
u'_id': ObjectId('547357c3d00690878ae9b620'),
u'eduPersonPrincipalName': u'guvat-nalif',
u'mail': u'user@example.net',
u'mailAliases': [
{
u'added_timestamp': datetime.fromisoformat('2014-12-18T11:25:19.804000'),
u'email': u'user@example.net',
u'verified': True,
u'primary': True,
}
],
u'passwords': [
{
u'created_ts': datetime.fromisoformat('2014-11-24T16:22:49.188000'),
u'credential_id': '54735b588a7d2a2c4ec3e7d0',
u'salt': u'$NDNv1H1$315d7$32$32$',
u'created_by': u'dashboard',
u'is_generated': False,
}
],
u'norEduPersonNIN': [u'197801012345'],
u'subject': u'physical person',
u'eduPersonEntitlement': [u'http://foo.example.org'],
u'preferredLanguage': u'en',
}
self.data2 = {
u'_id': ObjectId('549190b5d00690878ae9b622'),
u'displayName': u'Some \xf6ne',
u'eduPersonPrincipalName': u'birub-gagoz',
u'givenName': u'Some',
u'mail': u'some.one@gmail.com',
u'mailAliases': [
{u'email': u'someone+test1@gmail.com', u'verified': True},
{
u'added_timestamp': datetime.fromisoformat('2014-12-17T14:35:14.728000'),
u'email': u'some.one@gmail.com',
u'verified': True,
},
],
u'phone': [
{
u'created_ts': datetime.fromisoformat('2014-12-18T09:11:35.078000'),
u'number': u'+46702222222',
u'primary': True,
u'verified': True,
}
],
u'passwords': [
{
u'created_ts': datetime.fromisoformat('2015-02-11T13:58:42.327000'),
u'id': ObjectId('54db60128a7d2a26e8690cda'),
u'salt': u'$NDNv1H1$db011fc$32$32$',
u'is_generated': False,
u'source': u'dashboard',
},
{
'version': 'U2F_V2',
'app_id': 'unit test',
'keyhandle': 'U2F SWAMID AL2',
'public_key': 'foo',
'verified': True,
'proofing_method': METHOD_SWAMID_AL2_MFA,
'proofing_version': 'testing',
},
],
u'profiles': [
{
'created_by': 'test application',
'created_ts': datetime.fromisoformat('2020-02-04T17:42:33.696751'),
'owner': 'test owner 1',
'schema': 'test schema',
'profile_data': {
'a_string': 'I am a string',
'an_int': 3,
'a_list': ['eins', 2, 'drei'],
'a_map': {'some': 'data'},
},
}
],
u'preferredLanguage': u'sv',
u'surname': u'\xf6ne',
u'subject': u'physical person',
}
self._setup_user1()
self._setup_user2()
def _setup_user1(self):
mailAliases_list = [
MailAddress(
created_ts=datetime.fromisoformat('2014-12-18T11:25:19.804000'),
email='user@example.net',
is_verified=True,
is_primary=True,
)
]
password_list = [
Password(
created_ts=datetime.fromisoformat('2014-11-24T16:22:49.188000'),
credential_id='54735b588a7d2a2c4ec3e7d0',
salt='$NDNv1H1$315d7$32$32$',
created_by='dashboard',
is_generated=False,
)
]
nin_list = [
Nin(
number='197801012345',
created_ts=datetime.fromisoformat('2014-11-24T16:22:49.188000'),
is_verified=True,
is_primary=True,
created_by='dashboard',
)
]
self.user1 = User(
user_id=ObjectId('547357c3d00690878ae9b620'),
eppn='guvat-nalif',
mail_addresses=MailAddressList(mailAliases_list),
credentials=CredentialList(password_list),
nins=NinList(nin_list),
subject=SubjectType('physical person'),
entitlements=[u'http://foo.example.org'],
language='en',
)
def _setup_user2(self):
mailAliases_list = [
MailAddress(email='someone+test1@gmail.com', is_verified=True),
MailAddress(
email='some.one@gmail.com',
created_ts=datetime.fromisoformat('2014-12-17T14:35:14.728000'),
is_verified=True,
is_primary=True,
),
]
phone_list = [
PhoneNumber(
number='+46702222222',
created_ts=datetime.fromisoformat('2014-12-18T09:11:35.078000'),
is_primary=True,
is_verified=True,
)
]
credential_list = [
Password(
created_ts=datetime.fromisoformat('2015-02-11T13:58:42.327000'),
credential_id='54db60128a7d2a26e8690cda',
salt='$NDNv1H1$db011fc$32$32$',
is_generated=False,
created_by='dashboard',
),
U2F(
version='U2F_V2',
app_id='unit test',
keyhandle='U2F SWAMID AL2',
public_key='foo',
is_verified=True,
proofing_method=METHOD_SWAMID_AL2_MFA,
proofing_version='testing',
),
]
profile = Profile(
created_by='test application',
created_ts=datetime.fromisoformat('2020-02-04T17:42:33.696751'),
owner='test owner 1',
schema='test schema',
profile_data={
'a_string': 'I am a string',
'an_int': 3,
'a_list': ['eins', 2, 'drei'],
'a_map': {'some': 'data'},
},
)
self.user2 = User(
user_id=ObjectId('549190b5d00690878ae9b622'),
eppn='birub-gagoz',
display_name='Some \xf6ne',
given_name='Some',
mail_addresses=MailAddressList(mailAliases_list),
phone_numbers=PhoneNumberList(phone_list),
credentials=CredentialList(credential_list),
profiles=ProfileList([profile]),
language='sv',
surname='\xf6ne',
subject=SubjectType('physical person'),
)
def test_user_id(self):
self.assertEqual(self.user1.user_id, self.data1['_id'])
def test_eppn(self):
self.assertEqual(self.user1.eppn, self.data1['eduPersonPrincipalName'])
def test_given_name(self):
self.assertEqual(self.user2.given_name, self.data2['givenName'])
def test_display_name(self):
self.assertEqual(self.user2.display_name, self.data2['displayName'])
def test_surname(self):
self.assertEqual(self.user2.surname, self.data2['surname'])
def test_mail_addresses(self):
self.assertEqual(self.user1.mail_addresses.primary.email, self.data1['mailAliases'][0]['email'])
def test_passwords(self):
"""
Test that we get back a dict identical to the one we put in for old-style userdb data.
"""
expected = self.data1['passwords']
obtained = self.user1.credentials.to_list_of_dicts()
# modified_ts is added when not present, verify it is current
modified_ts = obtained[0].pop('modified_ts')
now = utc_now()
assert (now - modified_ts).total_seconds() < 2
assert obtained == expected
def test_obsolete_attributes(self):
"""
Test that some obsolete attributes don't cause parse failures.
"""
data = self.user1.to_dict()
data['postalAddress'] = {'foo': 'bar'}
data['date'] = 'anything'
data['csrf'] = 'long and secret string'
data['mailAliases'][0]['verification_code'] = '123456789'
user = User.from_dict(data)
expected = self.user1.to_dict()
obtained = user.to_dict()
assert obtained == expected
data = self.user2.to_dict()
data['phone'][0]['verification_code'] = '123456789'
user = User.from_dict(data)
expected = self.user2.to_dict()
obtained = user.to_dict()
assert obtained == expected
def test_unknown_attributes(self):
"""
Test parsing a document with unknown data in it.
"""
data = self.data1
data['unknown_attribute'] = 'something'
with self.assertRaises(TypeError):
User.from_dict(data)
def test_incomplete_signup_user(self):
"""
Test parsing the incomplete documents left in the central userdb by older Signup application.
"""
data = {
u'_id': ObjectId(),
u'eduPersonPrincipalName': u'vohon-mufus',
u'mail': u'olle@example.org',
u'mailAliases': [{u'email': u'olle@example.org', u'verified': False}],
}
with self.assertRaises(UserHasNotCompletedSignup):
User.from_dict(data)
data['subject'] = 'physical person' # later signup added this attribute
with self.assertRaises(UserHasNotCompletedSignup):
User.from_dict(data)
data[u'mailAliases'][0]['verified'] = True
data['surname'] = 'not signup-incomplete anymore'
data['passwords'] = [
{
u'created_ts': datetime.fromisoformat('2014-09-04T08:57:07.362000'),
u'credential_id': str(ObjectId()),
u'salt': u'salt',
u'created_by': u'dashboard',
u'is_generated': False,
}
]
user = User.from_dict(data)
self.assertEqual(user.surname, data['surname'])
expected = data['passwords']
obtained = user.credentials.to_list_of_dicts()
assert obtained == expected
def test_revoked_user(self):
"""
Test ability to identify revoked users.
"""
data = {
'_id': ObjectId(),
'eduPersonPrincipalName': 'binib-mufus',
'revoked_ts': datetime.fromisoformat('2015-05-26T08:33:56.826000'),
'passwords': [],
}
with self.assertRaises(UserIsRevoked):
User.from_dict(data)
def test_user_with_no_primary_mail(self):
mail = u'yahoo@example.com'
data = {
u'_id': ObjectId(),
u'eduPersonPrincipalName': u'lutol-bafim',
u'mailAliases': [{u'email': mail, u'verified': True}],
u'passwords': [
{
u'created_ts': datetime.fromisoformat('2014-09-04T08:57:07.362000'),
u'credential_id': str(ObjectId()),
u'salt': u'salt',
u'source': u'dashboard',
}
],
}
user = User.from_dict(data)
self.assertEqual(mail, user.mail_addresses.primary.email)
def test_user_with_indirectly_verified_primary_mail(self):
"""
If a user has passwords set, the 'mail' attribute will be considered indirectly verified.
"""
mail = u'yahoo@example.com'
data = {
u'_id': ObjectId(),
u'eduPersonPrincipalName': u'lutol-bafim',
u'mail': mail,
u'mailAliases': [{u'email': mail, u'verified': False}],
u'passwords': [
{
u'created_ts': datetime.fromisoformat('2014-09-04T08:57:07.362000'),
u'credential_id': str(ObjectId()),
u'salt': u'salt',
u'source': u'dashboard',
}
],
}
user = User.from_dict(data)
self.assertEqual(mail, user.mail_addresses.primary.email)
def test_user_with_indirectly_verified_primary_mail_and_explicit_primary_mail(self):
"""
If a user has manage to verify a mail address in the new style with the same address still
set in old style mail property. Do not make old mail address primary if a primary all ready exists.
"""
old_mail = u'yahoo@example.com'
new_mail = u'not_yahoo@example.com'
data = {
u'_id': ObjectId(),
u'eduPersonPrincipalName': u'lutol-bafim',
u'mail': old_mail,
u'mailAliases': [
{u'email': old_mail, u'verified': True, u'primary': False},
{u'email': new_mail, u'verified': True, u'primary': True},
],
u'passwords': [
{
u'created_ts': datetime.fromisoformat('2014-09-04T08:57:07.362000'),
u'credential_id': str(ObjectId()),
u'salt': u'salt',
u'source': u'dashboard',
}
],
}
user = User.from_dict(data)
self.assertEqual(new_mail, user.mail_addresses.primary.email)
def test_user_with_csrf_junk_in_mail_address(self):
"""
For a long time, Dashboard leaked CSRF tokens into the mail address dicts.
"""
mail = u'yahoo@example.com'
data = {
u'_id': ObjectId(),
u'eduPersonPrincipalName': u'test-test',
u'mailAliases': [{u'email': mail, u'verified': True, u'csrf': u'6ae1d4e95305b72318a683883e70e3b8e302cd75'}],
u'passwords': [
{
u'created_ts': datetime.fromisoformat('2014-09-04T08:57:07.362000'),
u'credential_id': str(ObjectId()),
u'salt': u'salt',
u'source': u'dashboard',
}
],
}
user = User.from_dict(data)
self.assertEqual(mail, user.mail_addresses.primary.email)
def test_to_dict(self):
"""
Test that User objects can be recreated.
"""
d1 = self.user1.to_dict()
u2 = User.from_dict(d1)
d2 = u2.to_dict()
self.assertEqual(d1, d2)
def test_modified_ts(self):
"""
Test the modified_ts property.
"""
_time1 = self.user1.modified_ts
assert _time1 is None
# update to current time
self.user1.modified_ts = datetime.utcnow()
_time2 = self.user1.modified_ts
self.assertNotEqual(_time1, _time2)
# set to a datetime instance
self.user1.modified_ts = datetime.utcnow()
self.assertNotEqual(_time2, self.user1.modified_ts)
def test_two_unverified_non_primary_phones(self):
"""
Test that the first entry in the `phone' list is chosen as primary when none are verified.
"""
number1 = u'+9112345678'
number2 = u'+9123456789'
data = {
u'_id': ObjectId(),
u'displayName': u'xxx yyy',
u'eduPersonPrincipalName': u'pohig-test',
u'givenName': u'xxx',
u'mail': u'test@gmail.com',
u'mailAliases': [{u'email': u'test@gmail.com', u'verified': True}],
u'phone': [
{
u'csrf': u'47d42078719b8377db622c3ff85b94840b483c92',
u'number': number1,
u'primary': False,
u'verified': False,
},
{
u'csrf': u'47d42078719b8377db622c3ff85b94840b483c92',
u'number': number2,
u'primary': False,
u'verified': False,
},
],
u'passwords': [
{
u'created_ts': datetime.fromisoformat('2014-06-29T17:52:37.830000'),
u'credential_id': str(ObjectId()),
u'salt': u'$NDNv1H1$foo$32$32$',
u'source': u'dashboard',
}
],
u'preferredLanguage': u'en',
u'surname': u'yyy',
}
user = User.from_dict(data)
self.assertEqual(user.phone_numbers.primary, None)
def test_two_non_primary_phones(self):
"""
Test that the first verified number is chosen as primary, if there is a verified number.
"""
number1 = u'+9112345678'
number2 = u'+9123456789'
data = {
u'_id': ObjectId(),
u'displayName': u'xxx yyy',
u'eduPersonPrincipalName': u'pohig-test',
u'givenName': u'xxx',
u'mail': u'test@gmail.com',
u'mailAliases': [{u'email': u'test@gmail.com', u'verified': True}],
u'phone': [
{
u'csrf': u'47d42078719b8377db622c3ff85b94840b483c92',
u'number': number1,
u'primary': False,
u'verified': False,
},
{
u'csrf': u'47d42078719b8377db622c3ff85b94840b483c92',
u'number': number2,
u'primary': False,
u'verified': True,
},
],
u'passwords': [
{
u'created_ts': datetime.fromisoformat('2014-06-29T17:52:37.830000'),
u'credential_id': str(ObjectId()),
u'salt': u'$NDNv1H1$foo$32$32$',
u'source': u'dashboard',
}
],
u'preferredLanguage': u'en',
u'surname': u'yyy',
}
user = User.from_dict(data)
self.assertEqual(user.phone_numbers.primary.number, number2)
def test_primary_non_verified_phone(self):
"""
Test that if a non verified phone number is primary, due to earlier error, then that primary flag is removed.
"""
data = {
u'_id': ObjectId(),
u'displayName': u'xxx yyy',
u'eduPersonPrincipalName': u'pohig-test',
u'givenName': u'xxx',
u'mail': u'test@gmail.com',
u'mailAliases': [{u'email': u'test@gmail.com', u'verified': True}],
u'phone': [
{
u'csrf': u'47d42078719b8377db622c3ff85b94840b483c92',
u'number': u'+9112345678',
u'primary': True,
u'verified': False,
}
],
u'passwords': [
{
u'created_ts': datetime.fromisoformat('2014-06-29T17:52:37.830000'),
u'credential_id': str(ObjectId()),
u'salt': u'$NDNv1H1$foo$32$32$',
u'source': u'dashboard',
}
],
u'preferredLanguage': u'en',
u'surname': u'yyy',
}
user = User.from_dict(data)
for number in user.phone_numbers.to_list():
self.assertEqual(number.is_primary, False)
def test_primary_non_verified_phone2(self):
"""
Test that if a non verified phone number is primary, due to earlier error, then that primary flag is removed.
"""
data = {
u'_id': ObjectId(),
u'eduPersonPrincipalName': u'pohig-test',
u'mail': u'test@gmail.com',
u'mailAliases': [{u'email': u'test@gmail.com', u'verified': True}],
u'phone': [
{u'number': u'+11111111111', u'primary': True, u'verified': False},
{u'number': u'+22222222222', u'primary': False, u'verified': True,},
],
u'passwords': [
{
u'created_ts': datetime.fromisoformat('2014-06-29T17:52:37.830000'),
u'id': ObjectId(),
u'salt': u'$NDNv1H1$foo$32$32$',
u'source': u'dashboard',
}
],
}
user = User.from_dict(data)
self.assertEqual(user.phone_numbers.primary.number, u'+22222222222')
def test_user_tou_no_created_ts(self):
"""
Basic test for user ToU.
"""
tou_dict = {
'event_id': ObjectId(),
'event_type': 'tou_event',
'version': '1',
'created_by': 'unit test',
}
tou_events = ToUList([tou_dict])
data = self.data1
data.update({'tou': tou_events.to_list_of_dicts()})
user = User.from_dict(data)
# If we create the ToU from a dict w/o created_ts key, the created object will carry a _no_created_ts_in_db
# attr set to True, and therefore the to_dict method will wipe out the created_ts key
self.assertFalse(user.tou.has_accepted('1', reaccept_interval=94608000)) # reaccept_interval seconds (3 years)
def test_user_tou(self):
"""
Basic test for user ToU.
"""
tou_dict = {
'event_id': ObjectId(),
'event_type': 'tou_event',
'version': '1',
'created_by': 'unit test',
'created_ts': datetime.utcnow(),
}
tou_events = ToUList([tou_dict])
data = self.data1
data.update({'tou': tou_events.to_list_of_dicts()})
user = User.from_dict(data)
self.assertTrue(user.tou.has_accepted('1', reaccept_interval=94608000)) # reaccept_interval seconds (3 years)
self.assertFalse(user.tou.has_accepted('2', reaccept_interval=94608000)) # reaccept_interval seconds (3 years)
def test_locked_identity_load(self):
locked_identity = {'created_by': 'test', 'identity_type': 'nin', 'number': '197801012345'}
data = self.data1
data['locked_identity'] = [locked_identity]
user = User.from_dict(data)
self.assertTrue(user.locked_identity)
self.assertIsInstance(user.locked_identity.find('nin').created_by, string_types)
self.assertIsInstance(user.locked_identity.find('nin').created_ts, datetime)
self.assertIsInstance(user.locked_identity.find('nin').identity_type, string_types)
self.assertIsInstance(user.locked_identity.find('nin').number, string_types)
def test_locked_identity_set(self):
locked_identity = {'created_by': 'test', 'identity_type': 'nin', 'number': '197801012345'}
user = User.from_dict(self.data1)
locked_nin = LockedIdentityNin.from_dict(
dict(number=locked_identity['number'], created_by=locked_identity['created_by'],)
)
user.locked_identity.add(locked_nin)
self.assertEqual(user.locked_identity.count, 1)
locked_nin = user.locked_identity.find('nin')
self.assertIsInstance(locked_nin.created_by, string_types)
self.assertIsInstance(locked_nin.created_ts, datetime)
self.assertIsInstance(locked_nin.identity_type, string_types)
self.assertIsInstance(locked_nin.number, string_types)
def test_locked_identity_to_dict(self):
locked_identity = {'created_by': 'test', 'identity_type': 'nin', 'number': '197801012345'}
user = User.from_dict(self.data1)
locked_nin = LockedIdentityNin.from_dict(
dict(number=locked_identity['number'], created_by=locked_identity['created_by'],)
)
user.locked_identity.add(locked_nin)
old_user = User.from_dict(user.to_dict())
self.assertEqual(user.locked_identity.count, 1)
self.assertIsInstance(old_user.locked_identity.to_list()[0].created_by, string_types)
self.assertIsInstance(old_user.locked_identity.to_list()[0].created_ts, datetime)
self.assertIsInstance(old_user.locked_identity.to_list()[0].identity_type, string_types)
self.assertIsInstance(old_user.locked_identity.to_list()[0].number, string_types)
new_user = User.from_dict(user.to_dict())
self.assertEqual(user.locked_identity.count, 1)
self.assertIsInstance(new_user.locked_identity.to_list()[0].created_by, string_types)
self.assertIsInstance(new_user.locked_identity.to_list()[0].created_ts, datetime)
self.assertIsInstance(new_user.locked_identity.to_list()[0].identity_type, string_types)
self.assertIsInstance(new_user.locked_identity.to_list()[0].number, string_types)
def test_locked_identity_remove(self):
locked_identity = {'created_by': 'test', 'identity_type': 'nin', 'number': '197801012345'}
user = User.from_dict(self.data1)
locked_nin = LockedIdentityNin.from_dict(
dict(number=locked_identity['number'], created_by=locked_identity['created_by'],)
)
user.locked_identity.add(locked_nin)
with self.assertRaises(EduIDUserDBError):
user.locked_identity.remove('nin')
def test_orcid(self):
id_token = {
"aud": ["APP_ID"],
"auth_time": 1526389879,
"exp": 1526392540,
"iat": 1526391940,
"iss": "https://op.example.org",
"sub": "subject_identifier",
"nonce": "a_nonce_token",
}
oidc_data = {
"access_token": "b8b8ca5d-b233-4d49-830a-ede934c626d3",
"expires_in": 631138518,
"refresh_token": "a110e7d2-4968-42d4-a91d-f379b55a0e60",
"token_type": "bearer",
}
orcid = "user_orcid"
id_token['created_by'] = 'test'
oidc_id_token = OidcIdToken.from_dict(id_token)
oidc_data['created_by'] = 'test'
oidc_data['id_token'] = oidc_id_token
oidc_authz = OidcAuthorization.from_dict(oidc_data)
orcid_element = Orcid.from_dict(dict(id=orcid, oidc_authz=oidc_authz, created_by='test'))
user = User.from_dict(self.data1)
user.orcid = orcid_element
old_user = User.from_dict(user.to_dict())
self.assertIsNotNone(old_user.orcid)
self.assertIsInstance(old_user.orcid.created_by, string_types)
self.assertIsInstance(old_user.orcid.created_ts, datetime)
self.assertIsInstance(old_user.orcid.id, string_types)
self.assertIsInstance(old_user.orcid.oidc_authz, OidcAuthorization)
self.assertIsInstance(old_user.orcid.oidc_authz.id_token, OidcIdToken)
new_user = User.from_dict(user.to_dict())
self.assertIsNotNone(new_user.orcid)
self.assertIsInstance(new_user.orcid.created_by, string_types)
self.assertIsInstance(new_user.orcid.created_ts, datetime)
self.assertIsInstance(new_user.orcid.id, string_types)
self.assertIsInstance(new_user.orcid.oidc_authz, OidcAuthorization)
self.assertIsInstance(new_user.orcid.oidc_authz.id_token, OidcIdToken)
def test_profiles(self):
self.assertIsNotNone(self.user1.profiles)
self.assertEqual(self.user1.profiles.count, 0)
self.assertIsNotNone(self.user2.profiles)
self.assertEqual(self.user2.profiles.count, 1)
def test_user_verified_credentials(self):
ver = [x for x in self.user2.credentials.to_list() if x.is_verified]
keys = [x.key for x in ver]
self.assertEqual(keys, [_keyid('U2F SWAMID AL2' + 'foo')])
def test_user_unverified_credential(self):
cred = [x for x in self.user2.credentials.to_list() if x.is_verified][0]
self.assertEqual(cred.proofing_method, METHOD_SWAMID_AL2_MFA)
_dict1 = cred.to_dict()
self.assertEqual(_dict1['verified'], True)
self.assertEqual(_dict1['proofing_method'], METHOD_SWAMID_AL2_MFA)
self.assertEqual(_dict1['proofing_version'], 'testing')
cred.is_verified = False
_dict2 = cred.to_dict()
self.assertFalse('verified' in _dict2)
self.assertFalse('proofing_method' in _dict2)
self.assertFalse('proofing_version' in _dict2)
def test_both_mobile_and_phone(self):
""" Test user that has both 'mobile' and 'phone' """
phone = [
{'number': '+4673123', 'primary': True, 'verified': True},
{'created_by': 'phone', 'number': '+4670999', 'primary': False, 'verified': False,},
]
user = User.from_dict(
data={
'_id': ObjectId(),
'eduPersonPrincipalName': 'test-test',
'passwords': [],
'mobile': [{'mobile': '+4673123', 'primary': True, 'verified': True}],
'phone': phone,
}
)
out = user.to_dict()['phone']
assert phone == out, 'The phone objects differ when using both phone and mobile'
def test_both_sn_and_surname(self):
""" Test user that has both 'sn' and 'surname' """
user = User.from_dict(
data={
'_id': ObjectId(),
'eduPersonPrincipalName': 'test-test',
'passwords': [],
'surname': 'Right',
'sn': 'Wrong',
}
)
self.assertEqual('Right', user.to_dict()['surname'])
def test_rebuild_user1(self):
data = self.user1.to_dict()
new_user1 = User.from_dict(data)
self.assertEqual(new_user1.eppn, 'guvat-nalif')
def test_rebuild_user2(self):
data = self.user2.to_dict()
new_user2 = User.from_dict(data)
self.assertEqual(new_user2.eppn, 'birub-gagoz')
def test_mail_addresses_from_dict(self):
"""
Test that we get back a correct list of dicts for old-style userdb data.
"""
mailAliases_list = [
{'email': 'someone+test1@gmail.com', 'verified': True},
{
'created_ts': datetime.fromisoformat('2014-12-17T14:35:14.728000'),
'email': 'some.one@gmail.com',
'verified': True,
'primary': True,
},
]
mail_addresses = MailAddressList(mailAliases_list)
to_dict_output = mail_addresses.to_list_of_dicts()
# The {'email': 'someone+test1@gmail.com', 'verified': True} should've beem flagged as primary: False
found = False
for this in to_dict_output:
if this['email'] == 'someone+test1@gmail.com':
assert this['primary'] == False
# now delete the marking from the to_list_of_dicts output to be able to compare it to the input below
del this['primary']
found = True
assert found == True, 'The non-primary e-mail in the input dict was not marked as non-primary'
assert to_dict_output == mailAliases_list
def test_phone_numbers_from_dict(self):
"""
Test that we get back a dict identical to the one we put in for old-style userdb data.
"""
phone_list = [
{
'created_ts': datetime.fromisoformat('2014-12-18T09:11:35.078000'),
'number': '+46702222222',
'primary': True,
'verified': True,
}
]
phone_numbers = PhoneNumberList(phone_list)
to_dict_result = phone_numbers.to_list_of_dicts()
assert to_dict_result == phone_list
def test_passwords_from_dict(self):
"""
Test that we get back a dict identical to the one we put in for old-style userdb data.
"""
first = {
'created_ts': datetime.fromisoformat('2015-02-11T13:58:42.327000'),
'id': ObjectId('54db60128a7d2a26e8690cda'),
'salt': '$NDNv1H1$db011fc$32$32$',
'is_generated': False,
'source': 'dashboard',
}
second = {
'version': 'U2F_V2',
'app_id': 'unit test',
'keyhandle': 'U2F SWAMID AL2',
'public_key': 'foo',
'verified': True,
'proofing_method': METHOD_SWAMID_AL2_MFA,
'proofing_version': 'testing',
}
password_list = [first, second]
passwords = CredentialList(password_list)
to_dict_result = passwords.to_list_of_dicts()
# adjust for expected changes
first['created_by'] = first.pop('source')
first['credential_id'] = str(first.pop('id'))
second['description'] = ''
expected = [first, second]
assert to_dict_result == expected
def test_phone_numbers(self):
"""
Test that we get back a dict identical to the one we put in for old-style userdb data.
"""
to_dict_result = self.user2.phone_numbers.to_list_of_dicts()
expected = self.data2['phone']
obtained = to_dict_result
# modified_ts is added when not present, verify it is current
modified_ts = obtained[0].pop('modified_ts')
now = utc_now()
assert (now - modified_ts).total_seconds() < 2
assert obtained == expected
| 38.682127 | 120 | 0.555578 | 3,662 | 34,195 | 5.006827 | 0.121518 | 0.017453 | 0.020289 | 0.020944 | 0.62907 | 0.567276 | 0.528497 | 0.477229 | 0.446305 | 0.404854 | 0 | 0.061232 | 0.324199 | 34,195 | 883 | 121 | 38.725934 | 0.732182 | 0.069162 | 0 | 0.426027 | 0 | 0 | 0.199126 | 0.0561 | 0 | 0 | 0 | 0 | 0.119178 | 1 | 0.058904 | false | 0.035616 | 0.020548 | 0.00137 | 0.082192 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
926458d1d9c97fc6abf90370ad0d99e05fe59844 | 13,515 | py | Python | ivy_vision/quantization.py | unifyai/vision | 7a1d0115da1dcc640709a1bc92e7e31eb63ed566 | [
"Apache-2.0"
] | 11 | 2022-02-09T02:51:59.000Z | 2022-03-31T09:35:24.000Z | ivy_vision/quantization.py | unifyai/vision | 7a1d0115da1dcc640709a1bc92e7e31eb63ed566 | [
"Apache-2.0"
] | 5 | 2022-03-11T03:46:18.000Z | 2022-03-31T06:07:31.000Z | ivy_vision/quantization.py | unifyai/vision | 7a1d0115da1dcc640709a1bc92e7e31eb63ed566 | [
"Apache-2.0"
] | 1 | 2022-02-20T12:25:11.000Z | 2022-02-20T12:25:11.000Z | """Collection of Quantization Functions"""
# global
import ivy as _ivy
# local
from ivy_vision import single_view_geometry as _ivy_svg
MIN_DENOMINATOR = 1e-12
MIN_DEPTH_DIFF = 1e-2
def quantize_to_image(pixel_coords, final_image_dims, feat=None, feat_prior=None, with_db=False,
pixel_coords_var=1e-3, feat_var=1e-3, pixel_coords_prior_var=1e12,
feat_prior_var=1e12, var_threshold=(1e-3, 1e12), uniform_pixel_coords=None,
batch_shape=None, dev_str=None):
"""Quantize pixel co-ordinates with d feature channels (for depth, rgb, normals
etc.), from images :math:`\mathbf{X}\in\mathbb{R}^{input\_images\_shape×(2+d)}`,
which may have been reprojected from a host of different cameras (leading to
non-integer pixel values), to a new quantized pixel co-ordinate image with the
same feature channels :math:`\mathbf{X}\in\mathbb{R}^{h×w×(2+d)}`, and with
integer pixel co-ordinates. Duplicates during the quantization are either
probabilistically fused based on variance, or the minimum depth is chosen when
using depth buffer mode.
Parameters
----------
pixel_coords
Pixel co-ordinates *[batch_shape,input_size,2]*
final_image_dims
Image dimensions of the final image.
feat
Features (i.e. depth, rgb, encoded), default is None. *[batch_shape,input_size,d]*
feat_prior
Prior feature image mean, default is None. *[batch_shape,input_size,d]*
with_db
Whether or not to use depth buffer in rendering, default is false
pixel_coords_var
Pixel coords variance *[batch_shape,input_size,2]* (Default value = 1e-3)
feat_var
Feature variance *[batch_shape,input_size,d]* (Default value = 1e-3)
pixel_coords_prior_var
Pixel coords prior variance *[batch_shape,h,w,2]* (Default value = 1e12)
feat_prior_var
Features prior variance *[batch_shape,h,w,3]* (Default value = 1e12)
var_threshold
Variance threshold, for projecting valid coords and clipping
*[batch_shape,2+d,2]* (Default value = (1e-3)
uniform_pixel_coords
Homogeneous uniform (integer) pixel co-ordinate images,
inferred from final_image_dims
if None *[batch_shape,h,w,3]* (Default value = None)
batch_shape
Shape of batch. Assumed no batches if None. (Default value = None)
dev_str
device on which to create the array 'cuda:0', 'cuda:1', 'cpu' etc.
Same as x if None. (Default value = None)
Returns
-------
ret
Quantized pixel co-ordinates image with d feature channels
(for depth, rgb, normals etc.) *[batch_shape,h,w,2+d]*,
maybe the quantized variance, *[batch_shape,h,w,2+d]*, and scatter counter image
*[batch_shape,h,w,1]*
"""
# ToDo: make variance fully optional. If not specified,
# then do not compute and scatter during function call for better efficiency.
# config
if batch_shape is None:
batch_shape = pixel_coords.shape[:-2]
if dev_str is None:
dev_str = _ivy.dev_str(pixel_coords)
if feat is None:
d = 0
else:
d = feat.shape[-1]
min_depth_diff = _ivy.array([MIN_DEPTH_DIFF], dev_str=dev_str)
red = 'min' if with_db else 'sum'
# shapes as list
batch_shape = list(batch_shape)
final_image_dims = list(final_image_dims)
num_batch_dims = len(batch_shape)
# variance threshold
if isinstance(var_threshold, tuple) or isinstance(var_threshold, list):
ones = _ivy.ones(batch_shape + [1, 2 + d, 1])
var_threshold = _ivy.concatenate((ones * var_threshold[0], ones * var_threshold[1]), -1)
else:
var_threshold = _ivy.reshape(var_threshold, batch_shape + [1, 2 + d, 2])
# uniform pixel coords
if uniform_pixel_coords is None:
uniform_pixel_coords =\
_ivy_svg.create_uniform_pixel_coords_image(final_image_dims, batch_shape, dev_str=dev_str)
uniform_pixel_coords = uniform_pixel_coords[..., 0:2]
# Extract Values #
feat_prior = _ivy.ones_like(feat) * feat_prior if isinstance(feat_prior, float) else feat_prior
pixel_coords_var = _ivy.ones_like(pixel_coords) * pixel_coords_var\
if isinstance(pixel_coords_var, float) else pixel_coords_var
feat_var = _ivy.ones_like(feat) * feat_var if isinstance(feat_var, float) else feat_var
pixel_coords_prior_var = _ivy.ones(batch_shape + final_image_dims + [2]) * pixel_coords_prior_var\
if isinstance(pixel_coords_prior_var, float) else pixel_coords_prior_var
feat_prior_var = _ivy.ones(batch_shape + final_image_dims + [d]) * feat_prior_var\
if isinstance(feat_prior_var, float) else feat_prior_var
# Quantize #
# BS x N x 2
quantized_pixel_coords = _ivy.reshape(_ivy.cast(_ivy.round(pixel_coords), 'int32'), batch_shape + [-1, 2])
# Combine #
# BS x N x (2+D)
pc_n_feat = _ivy.reshape(_ivy.concatenate((pixel_coords, feat), -1), batch_shape + [-1, 2+d])
pc_n_feat_var = _ivy.reshape(_ivy.concatenate((pixel_coords_var, feat_var), -1), batch_shape + [-1, 2+d])
# BS x H x W x (2+D)
prior = _ivy.concatenate((uniform_pixel_coords, feat_prior), -1)
prior_var = _ivy.concatenate((pixel_coords_prior_var, feat_prior_var), -1)
# Validity Mask #
# BS x N x 1
var_validity_mask = \
_ivy.reduce_sum(_ivy.cast(pc_n_feat_var < var_threshold[..., 1], 'int32'), -1, keepdims=True) == 2+d
bounds_validity_mask = _ivy.logical_and(
_ivy.logical_and(quantized_pixel_coords[..., 0:1] >= 0, quantized_pixel_coords[..., 1:2] >= 0),
_ivy.logical_and(quantized_pixel_coords[..., 0:1] <= final_image_dims[1] - 1,
quantized_pixel_coords[..., 1:2] <= final_image_dims[0] - 1)
)
validity_mask = _ivy.logical_and(var_validity_mask, bounds_validity_mask)
# num_valid_indices x len(BS)+2
validity_indices = _ivy.reshape(_ivy.cast(_ivy.indices_where(validity_mask), 'int32'), [-1, num_batch_dims + 2])
num_valid_indices = validity_indices.shape[-2]
if num_valid_indices == 0:
return _ivy.concatenate((uniform_pixel_coords[..., 0:2], feat_prior), -1), \
_ivy.concatenate((pixel_coords_prior_var, feat_prior_var), -1),\
_ivy.zeros_like(feat[..., 0:1], dev_str=dev_str)
# Depth Based Scaling #
mean_depth_min = None
mean_depth_range = None
pc_n_feat_wo_depth_range = None
pc_n_feat_wo_depth_min = None
var_vals_range = None
var_vals_min = None
if with_db:
# BS x N x 1
mean_depth = pc_n_feat[..., 2:3]
# BS x 1 x 1
mean_depth_min = _ivy.reduce_min(mean_depth, -2, keepdims=True)
mean_depth_max = _ivy.reduce_max(mean_depth, -2, keepdims=True)
mean_depth_range = mean_depth_max - mean_depth_min
# BS x N x 1
scaled_depth = (mean_depth - mean_depth_min) / (mean_depth_range * min_depth_diff + MIN_DENOMINATOR)
if d == 1:
# BS x 1 x 1+D
pc_n_feat_wo_depth_min = _ivy.zeros(batch_shape + [1, 0], dev_str=dev_str)
pc_n_feat_wo_depth_range = _ivy.ones(batch_shape + [1, 0], dev_str=dev_str)
else:
# feat without depth
# BS x N x 1+D
pc_n_feat_wo_depth = _ivy.concatenate((pc_n_feat[..., 0:2], pc_n_feat[..., 3:]), -1)
# find the min and max of each value
# BS x 1 x 1+D
pc_n_feat_wo_depth_max = _ivy.reduce_max(pc_n_feat_wo_depth, -2, keepdims=True) + 1
pc_n_feat_wo_depth_min = _ivy.reduce_min(pc_n_feat_wo_depth, -2, keepdims=True) - 1
pc_n_feat_wo_depth_range = pc_n_feat_wo_depth_max - pc_n_feat_wo_depth_min
# BS x N x 1+D
normed_pc_n_feat_wo_depth = (pc_n_feat_wo_depth - pc_n_feat_wo_depth_min) / \
(pc_n_feat_wo_depth_range + MIN_DENOMINATOR)
# combine with scaled depth
# BS x N x 1+D
pc_n_feat_wo_depth_scaled = normed_pc_n_feat_wo_depth + scaled_depth
# BS x N x (2+D)
pc_n_feat = _ivy.concatenate((pc_n_feat_wo_depth_scaled[..., 0:2], mean_depth,
pc_n_feat_wo_depth_scaled[..., 2:]), -1)
# scale variance
# BS x 1 x (2+D)
var_vals_max = _ivy.reduce_max(pc_n_feat_var, -2, keepdims=True) + 1
var_vals_min = _ivy.reduce_min(pc_n_feat_var, -2, keepdims=True) - 1
var_vals_range = var_vals_max - var_vals_min
# BS x N x (2+D)
normed_var_vals = (pc_n_feat_var - var_vals_min) / (var_vals_range + MIN_DENOMINATOR)
pc_n_feat_var = normed_var_vals + scaled_depth
# ready for later reversal with full image dimensions
# BS x 1 x 1 x D
var_vals_min = _ivy.expand_dims(var_vals_min, -2)
var_vals_range = _ivy.expand_dims(var_vals_range, -2)
# Validity Pruning #
# num_valid_indices x (2+D)
pc_n_feat = _ivy.gather_nd(pc_n_feat, validity_indices[..., 0:num_batch_dims + 1])
pc_n_feat_var = _ivy.gather_nd(pc_n_feat_var, validity_indices[..., 0:num_batch_dims + 1])
# num_valid_indices x 2
quantized_pixel_coords = _ivy.gather_nd(quantized_pixel_coords, validity_indices[..., 0:num_batch_dims + 1])
if with_db:
means_to_scatter = pc_n_feat
vars_to_scatter = pc_n_feat_var
else:
# num_valid_indices x (2+D)
vars_to_scatter = 1 / (pc_n_feat_var + MIN_DENOMINATOR)
means_to_scatter = pc_n_feat * vars_to_scatter
# Scatter #
# num_valid_indices x 1
counter = _ivy.ones_like(pc_n_feat[..., 0:1], dev_str=dev_str)
if with_db:
counter *= -1
# num_valid_indices x 2(2+D)+1
values_to_scatter = _ivy.concatenate((means_to_scatter, vars_to_scatter, counter), -1)
# num_valid_indices x (num_batch_dims + 2)
all_indices = _ivy.flip(quantized_pixel_coords, -1)
if num_batch_dims > 0:
all_indices = _ivy.concatenate((validity_indices[..., :-2], all_indices), -1)
# BS x H x W x (2(2+D) + 1)
quantized_img = _ivy.scatter_nd(_ivy.reshape(all_indices, [-1, num_batch_dims + 2]),
_ivy.reshape(values_to_scatter, [-1, 2 * (2 + d) + 1]),
batch_shape + final_image_dims + [2 * (2 + d) + 1],
reduction='replace' if _ivy.backend == 'mxnet' else red)
# BS x H x W x 1
quantized_counter = quantized_img[..., -1:]
if with_db:
invalidity_mask = quantized_counter != -1
else:
invalidity_mask = quantized_counter == 0
if with_db:
# BS x H x W x (2+D)
quantized_mean_scaled = quantized_img[..., 0:2 + d]
quantized_var_scaled = quantized_img[..., 2 + d:2 * (2 + d)]
# BS x H x W x 1
quantized_depth_mean = quantized_mean_scaled[..., 2:3]
# BS x 1 x 1 x 1
mean_depth_min = _ivy.expand_dims(mean_depth_min, -2)
mean_depth_range = _ivy.expand_dims(mean_depth_range, -2)
# BS x 1 x 1 x (1+D)
pc_n_feat_wo_depth_min = _ivy.expand_dims(pc_n_feat_wo_depth_min, -2)
pc_n_feat_wo_depth_range = _ivy.expand_dims(pc_n_feat_wo_depth_range, -2)
# BS x 1 x 1 x (2+D) x 2
var_threshold = _ivy.expand_dims(var_threshold, -3)
# BS x H x W x (1+D)
quantized_mean_wo_depth_scaled = _ivy.concatenate((quantized_mean_scaled[..., 0:2],
quantized_mean_scaled[..., 3:]), -1)
quantized_mean_wo_depth_normed = quantized_mean_wo_depth_scaled - (quantized_depth_mean - mean_depth_min) / \
(mean_depth_range * min_depth_diff + MIN_DENOMINATOR)
quantized_mean_wo_depth = quantized_mean_wo_depth_normed * pc_n_feat_wo_depth_range + pc_n_feat_wo_depth_min
prior_wo_depth = _ivy.concatenate((prior[..., 0:2], prior[..., 3:]), -1)
quantized_mean_wo_depth = _ivy.where(invalidity_mask, prior_wo_depth, quantized_mean_wo_depth)
# BS x H x W x (2+D)
quantized_mean = _ivy.concatenate((quantized_mean_wo_depth[..., 0:2], quantized_depth_mean,
quantized_mean_wo_depth[..., 2:]), -1)
# BS x H x W x (2+D)
quantized_var_normed = quantized_var_scaled - (quantized_depth_mean - mean_depth_min) / \
(mean_depth_range * min_depth_diff + MIN_DENOMINATOR)
quantized_var = _ivy.maximum(quantized_var_normed * var_vals_range + var_vals_min, var_threshold[..., 0])
quantized_var = _ivy.where(invalidity_mask, prior_var, quantized_var)
else:
# BS x H x W x (2+D)
quantized_sum_mean_x_recip_var = quantized_img[..., 0:2 + d]
quantized_var_wo_increase = _ivy.where(invalidity_mask, prior_var,
(1 / (quantized_img[..., 2 + d:2 * (2 + d)] + MIN_DENOMINATOR)))
quantized_var = _ivy.maximum(quantized_var_wo_increase * quantized_counter,
_ivy.expand_dims(var_threshold[..., 0], -2))
quantized_var = _ivy.where(invalidity_mask, prior_var, quantized_var)
quantized_mean = _ivy.where(invalidity_mask, prior, quantized_var_wo_increase * quantized_sum_mean_x_recip_var)
# BS x H x W x (2+D) BS x H x W x (2+D) BS x H x W x 1
return quantized_mean, quantized_var, quantized_counter
| 42.904762 | 119 | 0.645727 | 2,058 | 13,515 | 3.866375 | 0.103984 | 0.017343 | 0.040468 | 0.029408 | 0.513133 | 0.367727 | 0.269071 | 0.205731 | 0.151942 | 0.113234 | 0 | 0.024127 | 0.25172 | 13,515 | 314 | 120 | 43.041401 | 0.762385 | 0.241731 | 0 | 0.103448 | 0 | 0 | 0.003305 | 0 | 0 | 0 | 0 | 0.003185 | 0 | 1 | 0.006897 | false | 0 | 0.013793 | 0 | 0.034483 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
9267d808febd8df821dcd1afc41b5923d6971fdd | 12,396 | py | Python | ansible/plugins/action/ms_include_jinja_vars.py | makinacorpus/makina-states | 8ae1ccd1a0b614a7f308229c07e1493b06f5883a | [
"BSD-3-Clause"
] | 18 | 2015-02-22T12:53:50.000Z | 2019-03-15T16:45:10.000Z | ansible/plugins/action/ms_include_jinja_vars.py | makinacorpus/makina-states | 8ae1ccd1a0b614a7f308229c07e1493b06f5883a | [
"BSD-3-Clause"
] | 20 | 2015-01-20T22:35:02.000Z | 2017-11-06T11:17:34.000Z | ansible/plugins/action/ms_include_jinja_vars.py | makinacorpus/makina-states | 8ae1ccd1a0b614a7f308229c07e1493b06f5883a | [
"BSD-3-Clause"
] | 5 | 2015-01-13T04:23:09.000Z | 2019-01-03T17:00:31.000Z | from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import six
from os import path, walk
import traceback
import re
from ansible.errors import AnsibleError, AnsibleParserError
from ansible.module_utils._text import to_native
from ansible.plugins.action import ActionBase
from mc_states.api import magicstring
class ActionModule(ActionBase):
TRANSFERS_FILES = False
def _mutually_exclusive(self):
dir_arguments = [
self.source_dir, self.files_matching, self.ignore_files,
self.depth
]
if self.source_file and None not in dir_arguments:
err_msg = (
"Can not include {0} with file argument"
.format(", ".join(self.VALID_DIR_ARGUMENTS))
)
raise AnsibleError(err_msg)
elif [
bool(self.content_source),
bool(self.source_dir),
bool(self.source_file)
].count(True) > 1:
err_msg = (
"You must choose between file/dir/<inline_content>"
)
raise AnsibleError(err_msg)
def _set_dir_defaults(self):
if not self.depth:
self.depth = 0
if self.files_matching:
self.matcher = re.compile(r'{0}'.format(self.files_matching))
else:
self.matcher = None
if not self.ignore_files:
self.ignore_files = list()
if isinstance(self.ignore_files, str):
self.ignore_files = self.ignore_files.split()
elif isinstance(self.ignore_files, dict):
return {
'failed': True,
'message': '{0} must be a list'.format(self.ignore_files)
}
def _set_args(self):
""" Set instance variables based on the arguments that were passed
"""
self.VALID_DIR_ARGUMENTS = [
'dir', 'depth', 'files_matching', 'ignore_files'
]
self.VALID_FILE_ARGUMENTS = ['file', '_raw_params']
self.GLOBAL_FILE_ARGUMENTS = ['name', 'content']
self.VALID_ARGUMENTS = (
self.VALID_DIR_ARGUMENTS + self.VALID_FILE_ARGUMENTS +
self.GLOBAL_FILE_ARGUMENTS
)
for arg in self._task.args:
if arg not in self.VALID_ARGUMENTS:
err_msg = '{0} is not a valid option in debug'.format(arg)
raise AnsibleError(err_msg)
self.return_results_as_name = self._task.args.get('name', None)
if self._task._role:
if not self.return_results_as_name:
self.return_results_as_name = self._task._role._role_name
if self.return_results_as_name == '__GLOBAL__':
self.return_results_as_name = None
self.content_source = self._task.args.get('content', None)
self.source_dir = self._task.args.get('dir', None)
self.source_file = self._task.args.get('file', None)
try:
raw_params = self._task.get_ds().get(self._task.action, None)
except Exception:
raw_params = None
if (
isinstance(raw_params, six.string_types) and
not self.content_source and
not self.source_dir and
not self.source_file
):
if '\n' in raw_params:
self.content_source = raw_params
else:
self.source_file = raw_params
if (
not self.content_source and
not self.source_dir and
not self.source_file
):
self.source_file = 'main.yml'
self.depth = self._task.args.get('depth', None)
self.files_matching = self._task.args.get('files_matching', None)
self.ignore_files = self._task.args.get('ignore_files', None)
self._mutually_exclusive()
def run(self, tmp=None, task_vars=None):
"""
Load yml files recursively from a directory.
"""
self.VALID_FILE_EXTENSIONS = ['yaml', 'yml', 'json']
if not task_vars:
task_vars = dict()
self.show_content = True
self._set_args()
self._task_vars = task_vars
if self._task._role:
self._defaults = self._task._role.get_default_vars(
dep_chain=self._task.get_dep_chain())
else:
self._defaults = {}
self._task_vars.update(self._defaults)
results = dict()
if self.source_dir:
self._set_dir_defaults()
self._set_root_dir()
if path.exists(self.source_dir):
for root_dir, filenames in self._traverse_dir_depth():
failed, err_msg, updated_results = (
self._load_files_in_dir(root_dir, filenames)
)
if not failed:
results.update(updated_results)
else:
break
else:
failed = True
err_msg = (
'{0} directory does not exist'.format(self.source_dir)
)
elif self.content_source:
try:
failed, err_msg, updated_results = (
self._load_content(self.content_source)
)
if not failed:
results.update(updated_results)
except AnsibleError as e:
err_msg = to_native(e)
raise AnsibleError(err_msg)
else:
try:
self.source_file = self._find_needle('jinja_vars', self.source_file)
failed, err_msg, updated_results = (
self._load_files(self.source_file)
)
if not failed:
results.update(updated_results)
except AnsibleError as e:
err_msg = to_native(e)
raise AnsibleError(err_msg)
if (
self.return_results_as_name and
self.return_results_as_name != '__GLOBAL__'
):
scope = dict()
scope[self.return_results_as_name] = results
results = scope
result = super(ActionModule, self).run(tmp, task_vars)
if failed:
result['failed'] = failed
result['message'] = err_msg
result['ansible_facts'] = results
result['_ansible_no_log'] = not self.show_content
return result
def _set_root_dir(self):
if self._task._role:
if self.source_dir.split('/')[0] == 'jinja_vars':
path_to_use = (
path.join(self._task._role._role_path, self.source_dir)
)
if path.exists(path_to_use):
self.source_dir = path_to_use
else:
path_to_use = (
path.join(
self._task._role._role_path, 'jinja_vars', self.source_dir
)
)
self.source_dir = path_to_use
else:
current_dir = (
"/".join(self._task._ds._data_source.split('/')[:-1])
)
self.source_dir = path.join(current_dir, self.source_dir)
def _traverse_dir_depth(self):
""" Recursively iterate over a directory and sort the files in
alphabetical order. Do not iterate pass the set depth.
The default depth is unlimited.
"""
current_depth = 0
sorted_walk = list(walk(self.source_dir))
sorted_walk.sort(key=lambda x: x[0])
for current_root, current_dir, current_files in sorted_walk:
current_depth += 1
if current_depth <= self.depth or self.depth == 0:
current_files.sort()
yield (current_root, current_files)
else:
break
def _ignore_file(self, filename):
""" Return True if a file matches the list of ignore_files.
Args:
filename (str): The filename that is being matched against.
Returns:
Boolean
"""
for file_type in self.ignore_files:
try:
if re.search(r'{0}$'.format(file_type), filename):
return True
except Exception:
err_msg = 'Invalid regular expression: {0}'.format(file_type)
raise AnsibleError(err_msg)
return False
def _is_valid_file_ext(self, source_file):
""" Verify if source file has a valid extension
Args:
source_file (str): The full path of source file or source file.
Returns:
Bool
"""
success = False
file_ext = source_file.split('.')
if len(file_ext) >= 1:
if file_ext[-1] in self.VALID_FILE_EXTENSIONS:
success = True
return success
return success
def _load_files(self, filename):
""" Loads a file and converts the output into a valid Python dict.
Args:
filename (str): The source file.
Returns:
Tuple (bool, str, dict)
"""
if not self._is_valid_file_ext(filename):
err_msg = (
'{0} does not have a valid extension: {1}'
.format(filename, ', '.join(self.VALID_FILE_EXTENSIONS))
)
return True, err_msg, {}
data, show_content = self._loader._get_file_contents(filename)
self.show_content = show_content
return self._load_content(data, show_content, filename=filename)
def _load_content(self, data, show_content=True, filename='<string>'):
results = dict()
failed = False
old_data = None
err_msg = ''
try:
data = magicstring(data)
if (('{{' in data) or ('{%' in data)):
while old_data != data:
old_data = data
data = self._templar.template(data)
data = self._loader.load(data, show_content)
else:
data = self._loader.load(data, show_content)
except (Exception,) as exc:
trace = traceback.format_exc()
failed = True
print('\nFile: {0}'.format(filename))
err_msg = (
'{0} does not render correctly: \n{1}\n{2}\n'
.format(filename, exc, trace))
if old_data:
print('\nOriginal data')
print(old_data)
if data and (data != old_data):
print('\nCurrently Rendered data')
print(data)
print('\nError')
print(err_msg)
return failed, err_msg, results
if not data:
data = dict()
if not isinstance(data, dict):
failed = True
err_msg = (
'{0} must be stored as a dictionary/hash'
.format(filename)
)
else:
results.update(data)
return failed, err_msg, results
def _load_files_in_dir(self, root_dir, var_files):
""" Load the found yml files and update/overwrite the dictionary.
Args:
root_dir (str): The base directory of the list of files that is being passed.
var_files: (list): List of files to iterate over and load into a dictionary.
Returns:
Tuple (bool, str, dict)
"""
results = dict()
failed = False
err_msg = ''
for filename in var_files:
stop_iter = False
# Never include main.yml from a role, as that is the default included by the role
if self._task._role:
if filename == 'main.yml':
stop_iter = True
continue
filepath = path.join(root_dir, filename)
if self.files_matching:
if not self.matcher.search(filename):
stop_iter = True
if not stop_iter and not failed:
if path.exists(filepath) and not self._ignore_file(filename):
failed, err_msg, loaded_data = self._load_files(filepath)
if not failed:
results.update(loaded_data)
return failed, err_msg, results
| 34.529248 | 93 | 0.543482 | 1,399 | 12,396 | 4.556826 | 0.162974 | 0.026353 | 0.032627 | 0.023843 | 0.189647 | 0.154667 | 0.112314 | 0.072784 | 0.060549 | 0.060549 | 0 | 0.003076 | 0.370523 | 12,396 | 358 | 94 | 34.625698 | 0.813918 | 0.083979 | 0 | 0.310714 | 0 | 0 | 0.058341 | 0.002254 | 0 | 0 | 0 | 0 | 0 | 1 | 0.039286 | false | 0 | 0.032143 | 0 | 0.117857 | 0.028571 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
92682d145301e3cf26643177715c46fcd5839c3d | 952 | py | Python | tools/Vitis-AI-Library/graph_runner/test/format_json.py | hito0512/Vitis-AI | 996459fb96cb077ed2f7e789d515893b1cccbc95 | [
"Apache-2.0"
] | 848 | 2019-12-03T00:16:17.000Z | 2022-03-31T22:53:17.000Z | tools/Vitis-AI-Library/graph_runner/test/format_json.py | wangyifan778/Vitis-AI | f61061eef7550d98bf02a171604c9a9f283a7c47 | [
"Apache-2.0"
] | 656 | 2019-12-03T00:48:46.000Z | 2022-03-31T18:41:54.000Z | tools/Vitis-AI-Library/graph_runner/test/format_json.py | wangyifan778/Vitis-AI | f61061eef7550d98bf02a171604c9a9f283a7c47 | [
"Apache-2.0"
] | 506 | 2019-12-03T00:46:26.000Z | 2022-03-30T10:34:56.000Z | #
# Copyright 2019 Xilinx Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
import json
def main():
json_file_name = sys.argv[1]
print("read from " + sys.argv[1])
with open(json_file_name) as json_file:
meta_data = json.load(json_file)
print("write to " + sys.argv[1])
with open(sys.argv[1], "w") as f:
f.write(json.dumps(meta_data, indent=4, sort_keys=True))
if __name__ == '__main__':
main()
| 28.848485 | 74 | 0.705882 | 151 | 952 | 4.337748 | 0.582781 | 0.091603 | 0.048855 | 0.048855 | 0.048855 | 0 | 0 | 0 | 0 | 0 | 0 | 0.016949 | 0.194328 | 952 | 32 | 75 | 29.75 | 0.837027 | 0.57563 | 0 | 0 | 0 | 0 | 0.071979 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.083333 | false | 0 | 0.166667 | 0 | 0.25 | 0.166667 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
92692d7f00f3377986985af8973ac66784ff3072 | 6,760 | py | Python | Project/scripts/path_planner.py | manasdesai/Path-Planning-and-Control-Theory- | 6bc5a748b88b7226e069ff4837950fba793e28bb | [
"MIT"
] | 6 | 2020-08-02T17:41:07.000Z | 2022-03-01T18:06:06.000Z | Project/scripts/path_planner.py | manasdesai/Path-Planning-and-Control-Theory- | 6bc5a748b88b7226e069ff4837950fba793e28bb | [
"MIT"
] | 1 | 2020-07-30T07:14:25.000Z | 2020-07-31T12:21:12.000Z | Project/scripts/path_planner.py | manasdesai/Path-Planning-and-Control-Theory- | 6bc5a748b88b7226e069ff4837950fba793e28bb | [
"MIT"
] | 3 | 2021-06-18T12:57:24.000Z | 2022-02-28T04:02:50.000Z | #!/usr/bin/env python
import rospy
from geometry_msgs.msg import Twist, Point, PoseStamped
from nav_msgs.msg import Path
from std_msgs.msg import Float32MultiArray, Float64
from path_nav.msg import points, mylist
import math
import random
global obs_in_path
obs_in_path = []
global rate
def get_obs(request): # callback function
global obs_in_path
obs_in_path = request
rospy.init_node('path_planner', anonymous = True)
sub = rospy.Subscriber('/obstacles', mylist, get_obs)
pub = rospy.Publisher('/path', points, queue_size = 10)
rate = rospy.Rate(50) # 50 Hz
class RRT:
"""
Class for RRT planning
"""
class Node:
"""
RRT Node
"""
def __init__(self, x, y):
self.x = x
self.y = y
self.path = points()
self.parent = None
def __init__(self, start, goal, obstacle_list, rand_area, radius,
expand_dis=3.0, path_resolution=0.5, goal_sample_rate=5, max_iter=500):
"""
Setting Parameter
start:Start Position [x,y]
goal:Goal Position [x,y]
obstacleList:obstacle Positions [[x,y],...]
randArea:Random Sampling Area [min,max]
"""
self.start = self.Node(start[0], start[1]) #Start Position
self.end = self.Node(goal[0], goal[1]) #End/Goal Position
self.min_rand = rand_area[0] #randArea : Random Sampling Area [min_rand, max_rand]
self.max_rand = rand_area[1]
self.expand_dis = expand_dis
self.path_resolution = path_resolution
self.goal_sample_rate = goal_sample_rate
self.max_iter = max_iter
self.obstacle_list = obstacle_list
self.node_list = []
self.path = points()
self.radius = radius
def planning(self):
"""
rrt path planning
"""
self.node_list = [self.start]
for _ in range(self.max_iter):
rnd_node = self.get_random_node()
nearest_index = self.get_nearest_node_index(self.node_list, rnd_node)
nearest_node = self.node_list[nearest_index]
new_node = self.steer(nearest_node, rnd_node, self.expand_dis)
if self.check_collision(new_node, self.obstacle_list, self.radius):
self.node_list.append(new_node)
if self.calc_dist_to_goal(self.node_list[-1].x, self.node_list[-1].y) <= self.expand_dis:
final_node = self.steer(self.node_list[-1], self.end, self.expand_dis)
if self.check_collision(final_node, self.obstacle_list, self.radius):
return self.generate_final_course(len(self.node_list) - 1)
return None # cannot find path
def steer(self, from_node, to_node, extend_length=float("inf")):
new_node = self.Node(from_node.x, from_node.y)
d, theta = self.calc_distance_and_angle(new_node, to_node)
new_node.path.x = [new_node.x]
new_node.path.y = [new_node.y]
if extend_length > d:
extend_length = d
n_expand = math.floor(extend_length / self.path_resolution)
for _ in range(int(n_expand)):
new_node.x += self.path_resolution * math.cos(theta)
new_node.y += self.path_resolution * math.sin(theta)
new_node.path.x.append(new_node.x)
new_node.path.y.append(new_node.y)
d, _ = self.calc_distance_and_angle(new_node, to_node)
if d <= self.path_resolution:
new_node.path.x.append(to_node.x)
new_node.path.y.append(to_node.y)
new_node.parent = from_node
return new_node
def generate_final_course(self, goal_ind):
path = points()
path.x = [self.end.x]
path.y = [self.end.y]
node = self.node_list[goal_ind]
while node.parent is not None:
path.x.append(node.x)
path.y.append(node.y)
node = node.parent
path.x.append(node.x)
path.y.append(node.y)
path.x.reverse()
path.y.reverse()
del path.x[0]
del path.y[0]
print("--------------")
print(path.x)
print(path.y)
print("..............")
return path
def calc_dist_to_goal(self, x, y):
dx = x - self.end.x
dy = y - self.end.y
return math.hypot(dx, dy)
def get_random_node(self):
if random.randint(0, 100) > self.goal_sample_rate:
rnd = self.Node(random.uniform(self.min_rand, self.max_rand), random.uniform(self.min_rand, self.max_rand))
else: # goal point sampling
rnd = self.Node(self.end.x, self.end.y)
return rnd #random node
@staticmethod
def get_nearest_node_index(node_list, rnd_node):
dlist = [(node.x - rnd_node.x) ** 2 + (node.y - rnd_node.y)** 2 for node in node_list]
minind = dlist.index(min(dlist))
return minind #minimum index
@staticmethod
def check_collision(node, obstacleList, radius):
if node is None:
return False
for (ox, oy) in obstacleList:
dx_list = [ox - x for x in node.path.x]
dy_list = [oy - y for y in node.path.y]
d_list = [dx * dx + dy * dy for (dx, dy) in zip(dx_list, dy_list)]
if min(d_list) <= 7*(radius ** 2):
return False # collision
return True # safe
@staticmethod
def calc_distance_and_angle(from_node, to_node):
dx = to_node.x - from_node.x
dy = to_node.y - from_node.y
d = math.hypot(dx, dy)
theta = math.atan2(dy, dx)
return d, theta
def main(gx = 6.0, gy = 6.0): #Goal point is (6.0, 6.0) Start point is origin
print("start " + __file__)
radius = 0.25
# ====Search Path with RRT====
global obs_in_path
obstacleList = obs_in_path #obs_in_path = [(0, 1.5), (0, 3), (0, 4.5), (3.0, 0), (3.0, 1.5), (3.0, 3), (3.0, 4.5), (1.5, 0), (1.5, 1.5), (1.5, 3), (1.5,4.5), (4.5, 0), (4.5, 1.5), (4.5, 3), (4.5,4.5)]
# Set Initial parameters
rrt = RRT(start=[0, 0], goal=[gx, gy], rand_area=[0, 6], obstacle_list=obstacleList, radius = radius)
path = rrt.planning()
if path is None:
print("Cannot find path")
else:
print("found path!!")
while not rospy.is_shutdown():
global rate
pub.publish(path)
rate.sleep()
rospy.spin()
if __name__ == '__main__':
try:
main()
except rospy.ROSInterruptException:
pass
| 32.037915 | 205 | 0.568639 | 949 | 6,760 | 3.8451 | 0.173867 | 0.038367 | 0.032886 | 0.01425 | 0.166895 | 0.128528 | 0.107153 | 0.057002 | 0.037819 | 0.017539 | 0 | 0.022313 | 0.310503 | 6,760 | 210 | 206 | 32.190476 | 0.760566 | 0.100296 | 0 | 0.124138 | 0 | 0 | 0.016807 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.082759 | false | 0.006897 | 0.048276 | 0 | 0.22069 | 0.048276 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
926993a758d4bcc12fbe159bf42b98c618c9eb80 | 6,808 | py | Python | uservoice/client.py | uservoice/uservoice-python | 296af32d861562eeb11886d55f7e00dff30383e9 | [
"MIT"
] | 4 | 2015-01-29T18:05:32.000Z | 2018-05-03T17:02:09.000Z | uservoice/client.py | uservoice/uservoice-python | 296af32d861562eeb11886d55f7e00dff30383e9 | [
"MIT"
] | 13 | 2015-01-21T08:20:48.000Z | 2018-08-20T20:41:01.000Z | uservoice/client.py | uservoice/uservoice-python | 296af32d861562eeb11886d55f7e00dff30383e9 | [
"MIT"
] | 6 | 2015-07-12T21:15:12.000Z | 2020-10-22T16:32:54.000Z | from future import standard_library
standard_library.install_aliases()
from builtins import object
import operator
import array
import urllib.request, urllib.parse, urllib.error
import urllib.request, urllib.error, urllib.parse
import json
import uservoice
from requests_oauthlib import OAuth1
from urllib.parse import parse_qs
import requests
version='0.0.23'
class APIError(RuntimeError): pass
class Unauthorized(APIError): pass
class NotFound(APIError): pass
class RateLimitExceeded(APIError): pass
class ApplicationError(APIError): pass
class Client(object):
def __init__(self, subdomain_name, api_key, api_secret=None, oauth_token='', oauth_token_secret='', callback=None, protocol=None, uservoice_domain=None):
self.request_token = None
self.token = oauth_token
self.secret = oauth_token_secret
self.default_headers = { 'Content-Type': 'application/json', 'Accept': 'application/json', 'API-Client': 'uservoice-python-' + version }
oauth_hooks = {}
if api_secret:
self.oauth = OAuth1(api_key, api_secret, resource_owner_key=self.token, resource_owner_secret=self.secret, callback_uri=callback)
else:
self.oauth = None
self.api_url = "{protocol}://{subdomain_name}.{uservoice_domain}".format(
subdomain_name=subdomain_name,
protocol=(protocol or 'https'),
uservoice_domain=(uservoice_domain or 'uservoice.com')
)
self.api_key = api_key
self.api_secret = api_secret
self.callback = callback
self.subdomain_name = subdomain_name
self.uservoice_domain = uservoice_domain
self.protocol = protocol
def get_request_token(self, callback=None):
url = self.api_url + '/oauth/request_token'
body = {}
if self.callback or callback:
body['oauth_callback'] = callback or self.callback
oauth = OAuth1(self.api_key, self.api_secret, callback_uri=self.callback)
resp = requests.post(url, body, headers=self.default_headers, auth=oauth)
token = parse_qs(resp.text)
if not 'oauth_token' in token or not 'oauth_token_secret' in token:
raise Unauthorized('Failed to get request token')
return self.login_with_access_token(token['oauth_token'][0], token['oauth_token_secret'][0])
def authorize_url(self):
self.request_token = self.get_request_token()
url = self.api_url + '/oauth/authorize?oauth_token=' + self.request_token.token
return url
def login_with_verifier(self, verifier=None):
url = self.api_url + '/oauth/access_token'
oauth = OAuth1(self.api_key, self.api_secret, resource_owner_key=self.request_token.token, resource_owner_secret=self.request_token.secret, callback_uri=self.callback, verifier=verifier)
resp = requests.post(url, auth=oauth)
token = parse_qs(resp.text)
return self.login_with_access_token(token['oauth_token'][0], token['oauth_token_secret'][0])
def login_with_access_token(self, token, secret):
return Client(self.subdomain_name, self.api_key, self.api_secret, oauth_token=token, oauth_token_secret=secret, callback=self.callback,
protocol=self.protocol,
uservoice_domain=self.uservoice_domain)
def request(self, method, path, params={}):
json_body = None
get_parameters = {}
method = method.upper()
url = self.api_url + path
if self.api_secret == None:
if '?' in url:
url += '&client=' + self.api_key
else:
url += '?client=' + self.api_key
json_resp = None
if method == 'POST':
json_resp = requests.post(url, json.dumps(params), headers=self.default_headers, auth=self.oauth)
elif method == 'PUT':
json_resp = requests.put(url, json.dumps(params), headers=self.default_headers, auth=self.oauth)
elif method == 'GET':
json_resp = requests.get(url, headers=self.default_headers, auth=self.oauth)
elif method == 'DELETE':
json_resp = requests.delete(url, headers=self.default_headers, auth=self.oauth)
attrs = {}
try:
if json_resp.status_code == 404:
attrs = {'errors': {'type': 'record_not_found' }}
elif json_resp.status_code == 429:
attrs = {'errors': {'type': 'rate_limit_exceeded' }}
else:
attrs = json_resp.json()
except json.JSONDecodeError as e:
raise APIError(e)
if 'errors' in attrs:
if attrs['errors']['type'] == 'unauthorized':
raise Unauthorized(attrs)
elif attrs['errors']['type'] == 'record_not_found':
raise NotFound(attrs)
elif attrs['errors']['type'] == 'rate_limit_exceeded':
raise RateLimitExceeded(attrs)
elif attrs['errors']['type'] == 'application_error':
raise ApplicationError(attrs)
else:
raise APIError(attrs)
return attrs
# handy delegate methods
def get(self, path, params={}): return self.request('get', path, params)
def put(self, path, params={}): return self.request('put', path, params)
def post(self, path, params={}): return self.request('post', path, params)
def delete(self, path, params={}): return self.request('delete', path, params)
def get_collection(self, path, **opts):
return uservoice.Collection(self, path, **opts)
def login_as(self, email):
resp = self.post('/api/v1/users/login_as', {
'request_token': self.get_request_token().token,
'user': { 'email': email }
})
if 'token' in resp:
token = resp['token']['oauth_token']
secret = resp['token']['oauth_token_secret']
return self.login_with_access_token(token, secret)
else:
raise Unauthorized(resp)
def login_as_owner(self):
resp = self.post('/api/v1/users/login_as_owner', {
'request_token': self.get_request_token().token
})
if 'token' in resp:
token = resp['token']['oauth_token']
secret = resp['token']['oauth_token_secret']
return self.login_with_access_token(token, secret)
else:
raise Unauthorized(resp)
def __enter__(self):
return self
def __exit__(self, type, value, traceback):
pass
| 43.641026 | 194 | 0.611633 | 786 | 6,808 | 5.089059 | 0.152672 | 0.0475 | 0.04125 | 0.042 | 0.38475 | 0.32225 | 0.224 | 0.1915 | 0.149 | 0.137 | 0 | 0.004069 | 0.278055 | 6,808 | 155 | 195 | 43.922581 | 0.809766 | 0.003231 | 0 | 0.160584 | 0 | 0 | 0.107901 | 0.018721 | 0 | 0 | 0 | 0 | 0 | 1 | 0.109489 | false | 0.043796 | 0.080292 | 0.051095 | 0.29927 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
926b0aae20c9db282c44cb01fb1eb01eb50d3739 | 4,754 | py | Python | content/gui/gui_2.py | mm5110/PIC16A | e2dab91439c2627f6a47f4bf6d16de8ba5977fe8 | [
"MIT"
] | 10 | 2020-11-07T04:07:34.000Z | 2021-12-31T10:19:12.000Z | content/gui/gui_2.py | mm5110/PIC16A | e2dab91439c2627f6a47f4bf6d16de8ba5977fe8 | [
"MIT"
] | 16 | 2021-02-03T22:35:01.000Z | 2021-05-24T21:28:56.000Z | content/gui/gui_2.py | mm5110/PIC16A | e2dab91439c2627f6a47f4bf6d16de8ba5977fe8 | [
"MIT"
] | 19 | 2020-11-11T05:44:53.000Z | 2022-02-01T14:10:15.000Z | # Our next aim is to build a simple app that will
# accept user input and show useful output.
# Our app will be a penguin classifier: input some
# measurements of a penguin, and the classifier will
# make an educated guess about the penguin's species.
# This means that we'll need to find a way to bring in
# our machine learning expertise "under the hood" of the app.
# Fortunately, that's actually quite easy to do.
# This time, our focus is simply on acquiring user input.
import tkinter as tk
# see previous lecture for reminders about what these
# lines do.
window = tk.Tk()
window.geometry("600x300")
window.title("Penguin Classifier")
window.minsize(width=300, height=300)
frame_header = tk.Frame(window,
borderwidth=2,
pady=2,
bg = "black")
frame_header.grid(row=0,
column=0)
label_header = tk.Label(frame_header,
text = "Woo penguins",
bg='white',
fg='black',
height=2,
width = 35,
font=("Helvetica 16 bold"))
label_header.grid(row=0, column=0)
frame_center = tk.Frame(window,
borderwidth=2,
pady=2,
bg = "black")
frame_center.grid(row=1, column=0)
frame_bottom = tk.Frame(window,
borderwidth=2,
pady=2,
bg = "black")
frame_bottom.grid(row=2, column=0)
# we are going to create three input fields, each corresponding
# to a column used by our machine learning model.
# To do this, it's helpful to add a frame for each
# input field.
frame_main_1 = tk.Frame(frame_center,
borderwidth=2)
frame_main_2 = tk.Frame(frame_center,
borderwidth=2)
frame_main_3 = tk.Frame(frame_center,
borderwidth=2)
var1_label = tk.Label(frame_main_1,
text = "Island: ")
var2_label = tk.Label(frame_main_2,
text = "Body Mass (g): ")
var3_label = tk.Label(frame_main_3,
text = "Culmen Length (mm): ")
# pack is like grid, but it's useful for when we want to ensure
# that certain elements are placed nearby and side-by-side.
frame_main_1.pack(fill='x', pady=2)
frame_main_2.pack(fill='x', pady=2)
frame_main_3.pack(fill='x', pady=2)
var1_label.pack(side='left')
var2_label.pack(side='left')
var3_label.pack(side='left')
# now we'll do the same thing for the spot that will hold the "answer"
frame_answer = tk.Frame(frame_bottom,
borderwidth=2,
relief='sunken')
frame_answer.pack(fill='x', pady=0)
# So, we are beginning to see the outlines of an interface.
# We still don't have a way to actually acquire or store
# information provided by the user though, so that's next.
# the tk.StringVar() object is what tracks and holds this
# information.
# We then acquire that info from the user.
# For the first variable, we'll use a dropdown menu,
# while for the other two variables we'll use simple text entry.
# Lots of other kinds of data entry mechanisms are also possible.
# For that, we use tk.Entry(). We need to supply
# both the frame in which the entrybox should live
# and the variable to which the entrybox is bound
var1 = tk.StringVar()
var1_entry = tk.OptionMenu(frame_main_1,
var1,
"Torgersen",
"Biscoe",
"Dream")
var1_entry.pack(side='right', padx=1)
var2 = tk.StringVar()
var2_entry = tk.Entry(frame_main_2,
textvariable = var2,
width=10)
var2_entry.pack(side='right', padx=1)
var3 = tk.StringVar()
var3_entry = tk.Entry(frame_main_3,
textvariable = var3,
width=10)
var3_entry.pack(side='right', padx=1)
# We are now able to accept input from the user.
# Let's just check for now that we are able to "remember"
# this information by placing the resulting StringVars
# within labels.
# We'll replace this in a future letter with something more interesting
answer_label = tk.Label(frame_answer,
font=('arial', 16, 'bold'),
bd=16,
anchor="w",
textvariable=var1)
answer_label.grid(row=0, column=0)
# Ok, looks like we are able to acquire and store user input!
# Next time we'll see how to use buttons to trigger events, allowing us
# to perform arbitrary operations on the input.
window.mainloop()
| 31.071895 | 72 | 0.597602 | 660 | 4,754 | 4.222727 | 0.354545 | 0.038751 | 0.021529 | 0.024399 | 0.188375 | 0.140294 | 0.089702 | 0.073197 | 0.04521 | 0.04521 | 0 | 0.025523 | 0.315944 | 4,754 | 152 | 73 | 31.276316 | 0.831488 | 0.404922 | 0 | 0.2 | 0 | 0 | 0.067791 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.013333 | 0 | 0.013333 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
926f0826b49de22b8199e308698b8a8bb238dfee | 13,417 | py | Python | src/shortcircuit/model/crest/pycrest/eve.py | farshield/shortcircu | 87d45ea85b78e3e7da72b7b44755dc429b4fdf5a | [
"MIT"
] | 35 | 2016-06-22T20:07:31.000Z | 2021-04-07T11:02:08.000Z | src/shortcircuit/model/crest/pycrest/eve.py | farshield/shortcircu | 87d45ea85b78e3e7da72b7b44755dc429b4fdf5a | [
"MIT"
] | 15 | 2016-06-17T09:36:02.000Z | 2020-10-30T11:39:07.000Z | src/shortcircuit/model/crest/pycrest/eve.py | farshield/shortcircu | 87d45ea85b78e3e7da72b7b44755dc429b4fdf5a | [
"MIT"
] | 16 | 2016-10-02T16:09:18.000Z | 2021-05-29T02:51:14.000Z | import os
import base64
import requests
import time
import zlib
from . import version
from compat import bytes_, text_
from errors import APIException
from weak_ciphers import WeakCiphersAdapter
try:
from urllib.parse import urlparse, urlunparse, parse_qsl
except ImportError: # pragma: no cover
from urlparse import urlparse, urlunparse, parse_qsl
try:
import pickle
except ImportError: # pragma: no cover
import cPickle as pickle
try:
from urllib.parse import quote
except ImportError: # pragma: no cover
from urllib import quote
import logging
import re
logger = logging.getLogger("pycrest.eve")
cache_re = re.compile(r'max-age=([0-9]+)')
class APICache(object):
def put(self, key, value):
raise NotImplementedError
def get(self, key):
raise NotImplementedError
def invalidate(self, key):
raise NotImplementedError
class FileCache(APICache):
def __init__(self, path):
self._cache = {}
self.path = path
if not os.path.isdir(self.path):
os.mkdir(self.path, 0o700)
def _getpath(self, key):
return os.path.join(self.path, str(hash(key)) + '.cache')
def put(self, key, value):
with open(self._getpath(key), 'wb') as f:
f.write(zlib.compress(pickle.dumps(value, -1)))
self._cache[key] = value
def get(self, key):
if key in self._cache:
return self._cache[key]
try:
with open(self._getpath(key), 'rb') as f:
return pickle.loads(zlib.decompress(f.read()))
except IOError as ex:
if ex.errno == 2: # file does not exist (yet)
return None
else:
raise
def invalidate(self, key):
self._cache.pop(key, None)
try:
os.unlink(self._getpath(key))
except OSError as ex:
if ex.errno == 2: # does not exist
pass
else:
raise
class DictCache(APICache):
def __init__(self):
self._dict = {}
def get(self, key):
return self._dict.get(key, None)
def put(self, key, value):
self._dict[key] = value
def invalidate(self, key):
self._dict.pop(key, None)
class APIConnection(object):
TIMEOUT = 3
def __init__(self, additional_headers=None, user_agent=None, cache_dir=None, cache=None):
# Set up a Requests Session
session = requests.Session()
if additional_headers is None:
additional_headers = {}
if user_agent is None:
user_agent = "PyCrest/{0}".format(version)
session.headers.update({
"User-Agent": user_agent,
"Accept": "application/json",
})
session.headers.update(additional_headers)
session.mount(
'https://public-crest.eveonline.com',
WeakCiphersAdapter()
)
self._session = session
if cache:
if isinstance(cache, APICache):
self.cache = cache # Inherit from parents
elif isinstance(cache, type):
self.cache = cache() # Instantiate a new cache
elif cache_dir:
self.cache_dir = cache_dir
self.cache = FileCache(self.cache_dir)
else:
self.cache = DictCache()
def get(self, resource, params=None):
logger.debug('Getting resource %s', resource)
if params is None:
params = {}
# remove params from resource URI (needed for paginated stuff)
parsed_uri = urlparse(resource)
qs = parsed_uri.query
resource = urlunparse(parsed_uri._replace(query=''))
prms = {}
for tup in parse_qsl(qs):
prms[tup[0]] = tup[1]
# params supplied to self.get() override parsed params
for key in params:
prms[key] = params[key]
# check cache
key = (resource, frozenset(self._session.headers.items()), frozenset(prms.items()))
cached = self.cache.get(key)
if cached and cached['expires'] > time.time():
logger.debug('Cache hit for resource %s (params=%s)', resource, prms)
return cached['payload']
elif cached:
logger.debug('Cache stale for resource %s (params=%s)', resource, prms)
self.cache.invalidate(key)
else:
logger.debug('Cache miss for resource %s (params=%s', resource, prms)
logger.debug('Getting resource %s (params=%s)', resource, prms)
try:
res = self._session.get(resource, params=prms, timeout=APIConnection.TIMEOUT)
except requests.exceptions.RequestException:
raise APIException("No response from server")
if res.status_code != 200:
raise APIException("Got unexpected status code from server: %i" % res.status_code)
ret = res.json()
# cache result
key = (resource, frozenset(self._session.headers.items()), frozenset(prms.items()))
expires = self._get_expires(res)
if expires > 0:
self.cache.put(key, {'expires': time.time() + expires, 'payload': ret})
return ret
def post(self, resource, data, params=None):
try:
res = self._session.post(resource, data=data, params=params, timeout=APIConnection.TIMEOUT)
except requests.exceptions.RequestException:
raise APIException("No response from server")
if res.status_code != 200:
raise APIException("Got unexpected status code from server: %i" % res.status_code)
return res
def _get_expires(self, response):
if 'Cache-Control' not in response.headers:
return 0
if any([s in response.headers['Cache-Control'] for s in ['no-cache', 'no-store']]):
return 0
match = cache_re.search(response.headers['Cache-Control'])
if match:
return int(match.group(1))
return 0
class EVE(APIConnection):
def __init__(self, **kwargs):
self.api_key = kwargs.pop('api_key', None)
self.client_id = kwargs.pop('client_id', None)
self.redirect_uri = kwargs.pop('redirect_uri', None)
if kwargs.pop('testing', False):
self._public_endpoint = "http://public-crest-sisi.testeveonline.com/"
self._authed_endpoint = "https://api-sisi.testeveonline.com/"
self._image_server = "https://image.testeveonline.com/"
self._oauth_endpoint = "https://sisilogin.testeveonline.com/oauth"
else:
self._public_endpoint = "https://public-crest.eveonline.com/"
self._authed_endpoint = "https://crest-tq.eveonline.com/"
self._image_server = "https://image.eveonline.com/"
self._oauth_endpoint = "https://login.eveonline.com/oauth"
self._endpoint = self._public_endpoint
self._cache = {}
self._data = None
self.token = None
self.refresh_token = None
self.expires = None
APIConnection.__init__(self, **kwargs)
def __call__(self):
if not self._data:
self._data = APIObject(self.get(self._endpoint), self)
return self._data
def __getattr__(self, item):
return self._data.__getattr__(item)
def auth_uri(self, scopes=None, state=None):
s = [] if not scopes else scopes
grant_type = "token" if self.api_key is None else "code"
return "%s/authorize?response_type=%s&redirect_uri=%s&client_id=%s%s%s" % (
self._oauth_endpoint,
grant_type,
quote(self.redirect_uri, safe=''),
self.client_id,
"&scope=%s" % '+'.join(s) if scopes else '',
"&state=%s" % state if state else ''
)
def _authorize(self, params):
auth = text_(base64.b64encode(bytes_("%s:%s" % (self.client_id, self.api_key))))
headers = {"Authorization": "Basic %s" % auth}
try:
res = self._session.post(
"%s/token" % self._oauth_endpoint,
params=params,
headers=headers,
timeout=APIConnection.TIMEOUT
)
except requests.exceptions.RequestException:
raise APIException("No response from server")
if res.status_code != 200:
raise APIException("Got unexpected status code from API: %i" % res.status_code)
return res.json()
def set_auth_values(self, res):
self.__class__ = AuthedConnection
self.token = res['access_token']
self.refresh_token = res['refresh_token']
self.expires = int(time.time()) + res['expires_in']
self._endpoint = self._authed_endpoint
self._session.headers.update({"Authorization": "Bearer %s" % self.token})
def authorize(self, code):
res = self._authorize(
params={
"grant_type": "authorization_code",
"code": code
}
)
self.set_auth_values(res)
return AuthedConnection(
res,
self._authed_endpoint,
self._oauth_endpoint,
self.client_id,
self.api_key,
cache=self.cache
)
def refr_authorize(self, refresh_token):
res = self._authorize(
params={
"grant_type": "refresh_token",
"refresh_token": refresh_token
}
)
self.set_auth_values(res)
return AuthedConnection(
{
'access_token': res['access_token'],
'refresh_token': refresh_token,
'expires_in': res['expires_in']
},
self._authed_endpoint,
self._oauth_endpoint,
self.client_id,
self.api_key,
cache=self.cache
)
def temptoken_authorize(self, access_token=None, expires_in=0, refresh_token=None):
self.set_auth_values({
'access_token': access_token,
'refresh_token': refresh_token,
'expires_in': expires_in
})
return AuthedConnection(
{
'access_token': access_token,
'refresh_token': refresh_token,
'expires_in': expires_in
},
self._authed_endpoint,
self._oauth_endpoint,
self.client_id,
self.api_key,
cache=self.cache
)
class AuthedConnection(EVE):
def __init__(self, res, endpoint, oauth_endpoint, client_id=None, api_key=None, **kwargs):
EVE.__init__(self, **kwargs)
self.client_id = client_id
self.api_key = api_key
self.token = res['access_token']
self.refresh_token = res['refresh_token']
self.expires = int(time.time()) + res['expires_in']
self._oauth_endpoint = oauth_endpoint
self._endpoint = endpoint
self._session.headers.update({"Authorization": "Bearer %s" % self.token})
def __call__(self):
if not self._data:
self._data = APIObject(self.get(self._endpoint), self)
return self._data
def whoami(self):
if 'whoami' not in self._cache:
self._cache['whoami'] = self.get("https://login.eveonline.com/oauth/verify")
return self._cache['whoami']
def refresh(self):
res = self._authorize(params={"grant_type": "refresh_token", "refresh_token": self.refresh_token})
self.token = res['access_token']
self.expires = int(time.time()) + res['expires_in']
self._session.headers.update({"Authorization": "Bearer %s" % self.token})
return self # for backwards compatibility
def get(self, resource, params=None):
if int(time.time()) >= self.expires:
self.refresh()
return super(self.__class__, self).get(resource, params)
def post(self, resource, data, params=None):
if int(time.time()) >= self.expires:
self.refresh()
return super(self.__class__, self).post(resource, data, params)
class APIObject(object):
def __init__(self, parent, connection):
self._dict = {}
self.connection = connection
for k, v in parent.items():
if type(v) is dict:
self._dict[k] = APIObject(v, connection)
elif type(v) is list:
self._dict[k] = self._wrap_list(v)
else:
self._dict[k] = v
def _wrap_list(self, list_):
new = []
for item in list_:
if type(item) is dict:
new.append(APIObject(item, self.connection))
elif type(item) is list:
new.append(self._wrap_list(item))
else:
new.append(item)
return new
def __getattr__(self, item):
if item in self._dict:
return self._dict[item]
raise AttributeError(item)
def __call__(self, **kwargs):
# Caching is now handled by APIConnection
if 'href' in self._dict:
return APIObject(self.connection.get(self._dict['href'], params=kwargs), self.connection)
else:
return self
def __str__(self): # pragma: no cover
return self._dict.__str__()
def __repr__(self): # pragma: no cover
return self._dict.__repr__()
| 33.128395 | 106 | 0.585675 | 1,540 | 13,417 | 4.905195 | 0.155195 | 0.02502 | 0.020254 | 0.019063 | 0.410643 | 0.330289 | 0.286736 | 0.247683 | 0.241859 | 0.235372 | 0 | 0.003636 | 0.303123 | 13,417 | 404 | 107 | 33.210396 | 0.804278 | 0.030037 | 0 | 0.361194 | 0 | 0 | 0.111103 | 0.00477 | 0 | 0 | 0 | 0 | 0 | 1 | 0.110448 | false | 0.002985 | 0.059701 | 0.014925 | 0.283582 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
9270ac95e899e83cadb5cec384f9e4de3f872548 | 4,160 | py | Python | CAD_lib/geometry.py | JohnAD/CAD_lib | 67985e09e146d71b4c2b6997e758f6514fc30f5e | [
"MIT"
] | null | null | null | CAD_lib/geometry.py | JohnAD/CAD_lib | 67985e09e146d71b4c2b6997e758f6514fc30f5e | [
"MIT"
] | null | null | null | CAD_lib/geometry.py | JohnAD/CAD_lib | 67985e09e146d71b4c2b6997e758f6514fc30f5e | [
"MIT"
] | null | null | null | ###################################
#
# Point
#
###################################
class Point(object):
'''
A point.
'''
def __init__(self, x=0.0, y=0.0, z=None):
if hasattr(x, 'x'): # is this an objext containing x,y,z objects? If so, then transfer those.
tx = x.x
ty = y
tz = z
if hasattr(x, 'y'):
ty = x.y
if hasattr(x, 'z'):
tz = x.z
self._assign(tx, ty, tz)
else:
self._assign(x, y, z)
self.area = 0.0 # by definition
self.bounds = (0.0, 0.0, 0.0, 0.0) # by defition
self.length = 0.0 # by definition
self.geom_type = "Point"
return
#
# CLASS SPECIAL METHODS
#
def __repr__(self):
if self.z is None:
result = "<CAD_lib.geometry.Point ({}, {})>".format(self.x, self.y)
else:
result = "<CAD_lib.geometry.Point ({}, {}, {})>".format(self.x, self.y, self.z)
return result
def __str__(self):
if self.z is None:
result = "Point ({}, {})".format(self.x, self.y)
else:
result = "Point ({}, {}, {})".format(self.x, self.y, self.z)
return result
#
# INTERNAL METHODS
#
def _assign(self, x, y, z):
if type(x) is tuple or type(x) is list:
if len(x)==3:
(x,y,z) = x
elif len(x)==2:
(x,y) = x
elif len(x)==1:
x = x[0]
self.x = float(x)
self.y = float(y)
if z is None:
self.z = None
self.coords = [(x, y)]
else:
self.z = float(z)
self.coords = [(x, y, z)]
return
#
# GENERAL OBJECT ROUTINES:
#
# def distance(self, other):
# def representative_point(self):
###################################
#
# Line
#
###################################
class Line(object):
'''
A Line is an ordered list of strictly two Points
'''
def __init__(self, a, b):
self.area = 0
self.length = 0.0 #TBD
self.bounds = None #TBD
self.a = Point(a)
self.b = Point(b)
self._reparse()
return
#
# CLASS SPECIAL METHODS
#
def __repr__(self):
result = "<CAD_lib.geometry.Line {}>".format(str(self.coord))
return result
def __str__(self):
result = "Line {}".format(str(self.coord))
return result
#
# INTERNAL METHODS
#
def _reparse(self):
self._ls = LineString([self.a, self.b])
self.coord = self._ls.coord
return
#
# GENERAL OBJECT ROUTINES:
#
# def distance(self, other):
# def representative_point(self):
###################################
#
# LineString
#
###################################
class LineString(object):
'''
A LineString is an ordered list of Points
'''
def __init__(self, coordinates):
self.area = 0
self.length = 0.0 #TBD
self.bounds = None #TBD
if hasattr(coordinates, "coord"):
self._parse_coordinates(coordinates.coord)
else:
self._parse_coordinates(coordinates)
return
#
# CLASS SPECIAL METHODS
#
def __repr__(self):
result = "<CAD_lib.geometry.LineString with {} Points>".format(len(self._true_coord))
return result
def __str__(self):
result = "LineString {}".format(str(self.coord))
return result
#
# INTERNAL METHODS
#
def _parse_coordinates(self, coordinates):
self._true_coord = []
self.coord = []
for p in coordinates:
true_point = Point(p)
self._true_coord.append(true_point)
if true_point.z is None:
self.coord.append((true_point.x, true_point.y))
else:
self.coord.append((true_point.x, true_point.y, true_point.z))
return
#
# GENERAL OBJECT ROUTINES:
#
# def distance(self, other):
# def representative_point(self):
| 23.771429 | 102 | 0.47476 | 479 | 4,160 | 3.974948 | 0.173278 | 0.013655 | 0.009454 | 0.010504 | 0.532563 | 0.483193 | 0.483193 | 0.395483 | 0.377101 | 0.289916 | 0 | 0.009648 | 0.352163 | 4,160 | 175 | 103 | 23.771429 | 0.696846 | 0.14976 | 0 | 0.354839 | 0 | 0 | 0.063586 | 0.029777 | 0 | 0 | 0 | 0 | 0 | 1 | 0.129032 | false | 0 | 0 | 0 | 0.290323 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
927130d7806b470ffda0c0ca41177feb165a188a | 1,658 | py | Python | data_utils/rdp.py | muzik999/road_connectivity | 8d9893752a633f9339e91b7b697f700ca669b729 | [
"MIT"
] | 96 | 2019-06-06T05:13:34.000Z | 2022-03-23T12:01:07.000Z | data_utils/rdp.py | muzik999/road_connectivity | 8d9893752a633f9339e91b7b697f700ca669b729 | [
"MIT"
] | 35 | 2019-07-05T06:03:22.000Z | 2022-03-09T03:11:02.000Z | data_utils/rdp.py | muzik999/road_connectivity | 8d9893752a633f9339e91b7b697f700ca669b729 | [
"MIT"
] | 30 | 2019-07-17T09:37:26.000Z | 2022-02-28T19:09:22.000Z | #!/usr/bin/env python3
"""
The Ramer-Douglas-Peucker algorithm roughly ported from the pseudo-code provided
by http://en.wikipedia.org/wiki/Ramer-Douglas-Peucker_algorithm
The code is taken from
https://github.com/mitroadmaps/roadtracer/blob/master/lib/discoverlib/rdp.py
"""
from math import sqrt
def distance(a, b):
return sqrt((a[0] - b[0]) ** 2 + (a[1] - b[1]) ** 2)
def point_line_distance(point, start, end):
"""
Calaculate the prependicuar distance of given point from the line having
start and end points.
"""
if start == end:
return distance(point, start)
else:
n = abs(
(end[0] - start[0]) * (start[1] - point[1])
- (start[0] - point[0]) * (end[1] - start[1])
)
d = sqrt((end[0] - start[0]) ** 2 + (end[1] - start[1]) ** 2)
return n / d
def rdp(points, epsilon):
"""
Reduces a series of points to a simplified version that loses detail, but
maintains the general shape of the series.
@param points: Series of points for a line geometry represnted in graph.
@param epsilon: Tolerance required for RDP algorithm to aproximate the
line geometry.
@return: Aproximate series of points for approximate line geometry
"""
dmax = 0.0
index = 0
for i in range(1, len(points) - 1):
d = point_line_distance(points[i], points[0], points[-1])
if d > dmax:
index = i
dmax = d
if dmax >= epsilon:
results = rdp(points[: index + 1], epsilon)[:-1] + rdp(points[index:], epsilon)
else:
results = [points[0], points[-1]]
return results
| 28.586207 | 87 | 0.603136 | 234 | 1,658 | 4.252137 | 0.384615 | 0.01809 | 0.042211 | 0.056281 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.026359 | 0.267793 | 1,658 | 57 | 88 | 29.087719 | 0.793245 | 0.436068 | 0 | 0.076923 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.115385 | false | 0 | 0.038462 | 0.038462 | 0.307692 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
927303a84f42823f69e85cdc571a7f5088e0421b | 8,337 | py | Python | combiner/combiner/tf/models.py | gunpowder78/google-research | d41bbaca1eb9bfd980ec2b3fd201c3ddb4d1f2e5 | [
"Apache-2.0"
] | 1 | 2022-03-19T04:26:12.000Z | 2022-03-19T04:26:12.000Z | combiner/combiner/tf/models.py | gunpowder78/google-research | d41bbaca1eb9bfd980ec2b3fd201c3ddb4d1f2e5 | [
"Apache-2.0"
] | null | null | null | combiner/combiner/tf/models.py | gunpowder78/google-research | d41bbaca1eb9bfd980ec2b3fd201c3ddb4d1f2e5 | [
"Apache-2.0"
] | 1 | 2022-03-30T07:20:29.000Z | 2022-03-30T07:20:29.000Z | # coding=utf-8
# Copyright 2022 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# pylint: skip-file
import tensorflow.compat.v1 as tf
import numpy as np
from combiner.tf import attention
from combiner.tf import approx_attention
from combiner.tf import ops
import functools
def get_embedding(vocab_size,
embed_size,
dtype,
num_heads=None,
init_std=1.0,
name='embeddings'):
"""Initialize embedding weights."""
# compatible with both 3d and 4d cases
if num_heads is not None:
shape = [vocab_size, num_heads, embed_size // num_heads]
else:
shape = [vocab_size, embed_size]
with tf.variable_scope(None, name):
weights = tf.get_variable(
'weights', shape=shape,
initializer=tf.random_normal_initializer(stddev=init_std), dtype=dtype)
return weights
def get_position_embedding(max_len, emb_size, dtype,
name='position_embeddings', concatenate=False):
"""Get position encoding."""
def _get_angles_per_position(position, dim, emb_size):
if dtype == tf.float32:
denominator = np.power(10000, (2 * (dim // 2)) / np.float32(emb_size))
else:
denominator = np.power(10000, (2 * (dim // 2)) / np.float16(emb_size))
return position / denominator
# Create the arguments for the sines and cosines.
angles = _get_angles_per_position(np.arange(max_len)[:, np.newaxis],
np.arange(emb_size)[np.newaxis, :],
emb_size)
# Apply sine to the odd positions.
sines = np.sin(angles[:, 0::2])
# Apply cosine to the even positions.
cosines = np.cos(angles[:, 1::2])
if concatenate:
# See e.g. http://jalammar.github.io/illustrated-transformer/.
output = np.concatenate([sines, cosines], axis=-1)
else:
# See e.g.
# https://kazemnejad.com/blog/transformer_architecture_positional_encoding/.
output = np.zeros_like(
angles, dtype=np.float32 if dtype==tf.float32 else np.float16)
output[:, 0::2] = sines
output[:, 1::2] = cosines
with tf.variable_scope(None, name):
weights = tf.get_variable(
'weights', shape=[max_len, emb_size],
initializer=tf.constant_initializer(output), dtype=dtype)
return weights
def embedding_lookup(embeddings, indices, implementation='lookup'):
"""Different types of embedding approaches."""
if implementation == 'lookup':
return tf.nn.embedding_lookup(embeddings, indices)
elif implementation == 'matmul':
onehot = tf.one_hot(indices, depth=embeddings.shape[0].value, axis=-1,
dtype=embeddings.dtype)
return tf.einsum('BLV,VD->BLD', onehot, embeddings)
else:
raise ValueError('Unsupported embedding lookup implementation %s'
% implementation)
def vanilla_transformer_layer(x, config, is_training=True, attn_bias=None,
layer_idx=0): # pylint: disable=unused-argument
"""transformer layer: attention + ffn."""
# Attention
with tf.variable_scope('attn'):
shortcut, x = ops.preprocess(x, config)
x = attention.multihead_attention(x, x, x,
config.model_size,
config.num_heads,
is_training=is_training,
dropatt=config.dropatt,
attn_bias=attn_bias,
bias=config.dense_use_bias)
x = ops.postprocess(shortcut, x, config, is_training)
# FFN
with tf.variable_scope('ffn'):
shortcut, x = ops.preprocess(x, config)
x = ops.ffn(x, is_training, config.dropout)
x = ops.postprocess(shortcut, x, config, is_training)
return x
def transformer_approx_att_layer(x,
config,
is_training=True,
attn_bias=None,
attn_impl=None,
layer_idx=0):
"""transformer layer: approximated attention + ffn."""
# Attention
with tf.variable_scope('attn'):
shortcut, x = ops.preprocess(x, config)
x = attn_impl(x, config, is_training=is_training)
x = ops.postprocess(shortcut, x, config, is_training)
# FFN
with tf.variable_scope('ffn'):
shortcut, x = ops.preprocess(x, config)
x = ops.ffn(x, is_training, config.dropout)
x = ops.postprocess(shortcut, x, config, is_training)
return x
def transformer(inputs, config, is_training=True, input_mask=None,
segment=None, causal=False):
"""Transformer encoder."""
outputs = {}
#### Embeddings
word_embeddings = get_embedding(
config.vocab_size, config.embed_size,
dtype=config.dtype,
init_std=config.embedding_init_std, name='word_embeddings')
outputs['word_embeddings'] = word_embeddings
x = embedding_lookup(word_embeddings, inputs)
if config.pos_sine_init:
pos_embeddings = get_position_embedding(
config.max_seq_len, config.embed_size,
dtype=config.dtype, name='pos_embeddings')
else:
pos_embeddings = get_embedding(
config.max_seq_len, config.embed_size,
dtype=config.dtype, init_std=0.02, name='pos_embeddings')
outputs['pos_embeddings'] = pos_embeddings
x += pos_embeddings[:tf.shape(x)[1]]
if segment is not None:
# Use for multi-segment input (GLUE datasets)
seg_embeddings = get_embedding(config.max_num_seg, config.embed_size,
dtype=config.dtype, name='seg_embeddings')
outputs['seg_embeddings'] = seg_embeddings
x += embedding_lookup(seg_embeddings, segment)
x = ops.dropout(x, is_training, config.dropout)
tf.logging.info('Embedding output: shape %s, dtype %s.', x.shape, x.dtype)
att_type_spec = getattr(config, 'att_type', None)
if att_type_spec is None:
layer_fn = vanilla_transformer_layer
else:
attn_impl = functools.partial(
getattr(approx_attention, att_type_spec, None),
causal=causal)
assert attn_impl is not None, 'unknown attention type %s' % att_type_spec
layer_fn = functools.partial(transformer_approx_att_layer,
attn_impl=attn_impl)
#### Attn bias
attn_bias = attention.attn_bias_from_mask(x, input_mask, causal=causal)
#### Attention blocks
hiddens = []
for idx in range(config.num_layers):
with tf.variable_scope('layer_{:0>3d}'.format(idx)):
x = layer_fn(x,
config,
is_training=is_training,
attn_bias=attn_bias,
layer_idx=idx)
hiddens.append(x)
if att_type_spec == 'axial':
# [B x A1 x A2 x ... x D] -> [B x L x D]
x = tf.reshape(x, [tf.shape(x)[0], -1, tf.shape(x)[-1]])
outputs['hiddens'] = hiddens
outputs['output'] = x
return outputs
def lm_head(hidden, config, embeddings=None, hidden_mapping=None):
"""Compute the logits used for LM/MLM."""
hidden = ops.layer_norm(hidden, name='final_norm')
if hidden_mapping is not None:
# `hidden_mapping` is usually used in MLM to retrieve masked positions
hidden = tf.einsum('BMD,BLM->BLD', hidden,
tf.cast(hidden_mapping, hidden.dtype))
if embeddings is None or not config.inner_prod:
softmax_weight = tf.get_variable(
'softmax_weight', shape=[config.vocab_size, config.embed_size],
initializer=ops.WEIGHT_INITIALIZER, dtype=config.dtype)
else:
softmax_weight = embeddings
softmax_bias = tf.get_variable(
'softmax_bias', shape=[config.vocab_size],
initializer=ops.BIAS_INITIALIZER, dtype=config.dtype)
logits = tf.einsum('BLD,VD->BLV', hidden, softmax_weight) + softmax_bias
return logits
| 36.565789 | 80 | 0.644836 | 1,064 | 8,337 | 4.87782 | 0.24812 | 0.030829 | 0.027746 | 0.026204 | 0.239884 | 0.205588 | 0.186513 | 0.178998 | 0.158767 | 0.144123 | 0 | 0.010209 | 0.248051 | 8,337 | 227 | 81 | 36.726872 | 0.817674 | 0.165767 | 0 | 0.251613 | 0 | 0 | 0.056993 | 0 | 0 | 0 | 0 | 0 | 0.006452 | 1 | 0.051613 | false | 0 | 0.03871 | 0 | 0.148387 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
92745101ac9b7fb8a1d621ca0d57ec6d077050b3 | 832 | py | Python | tflens/model/tfstate.py | neovasili/tflens | 1aa46160c1ccf47f32de2fa44b4169ae6439483e | [
"MIT"
] | 3 | 2020-10-15T14:46:20.000Z | 2021-11-03T23:45:56.000Z | tflens/model/tfstate.py | neovasili/tflens | 1aa46160c1ccf47f32de2fa44b4169ae6439483e | [
"MIT"
] | 25 | 2020-10-14T08:09:05.000Z | 2021-06-23T18:05:38.000Z | tflens/model/tfstate.py | neovasili/tflens | 1aa46160c1ccf47f32de2fa44b4169ae6439483e | [
"MIT"
] | null | null | null | from tflens.model.tfstate_resource import TfStateResource
class TfState():
def __init__(self, content: dict):
super().__init__()
self.__content = content
self.__version = self.__content['version']
self.__tf_version = self.__content['terraform_version']
self.__serial = self.__content['serial']
self.__lineage = self.__content['lineage']
self.__outputs = self.__content['outputs']
self.__get_resources(self.__content['resources'])
def __get_resources(self, source_resources: list):
resources = list()
for resource in source_resources or []:
temp_resource = TfStateResource(resource)
resources.append(temp_resource)
self.__resources = resources
def get_resources_count(self):
return len(self.__resources)
def get_resources(self):
return self.__resources
| 27.733333 | 59 | 0.727163 | 94 | 832 | 5.861702 | 0.340426 | 0.15971 | 0.087114 | 0.130672 | 0.101633 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.165865 | 832 | 29 | 60 | 28.689655 | 0.793948 | 0 | 0 | 0 | 0 | 0 | 0.063702 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.190476 | false | 0 | 0.047619 | 0.095238 | 0.380952 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
9274d416d7048e207bf11ee0a1a4caf193e5c80c | 13,140 | py | Python | ck_solution/module/mm-simulator/module.py | phesse001/Trust-Tools | baf7b70dbd5e9febca403d792df28a718ca90c93 | [
"MIT"
] | null | null | null | ck_solution/module/mm-simulator/module.py | phesse001/Trust-Tools | baf7b70dbd5e9febca403d792df28a718ca90c93 | [
"MIT"
] | null | null | null | ck_solution/module/mm-simulator/module.py | phesse001/Trust-Tools | baf7b70dbd5e9febca403d792df28a718ca90c93 | [
"MIT"
] | null | null | null | import os
#
# Collective Knowledge ()
#
#
#
#
# Developer:
#
cfg={} # Will be updated by CK (meta description of this module)
work={} # Will be updated by CK (temporal data)
ck=None # Will be updated by CK (initialized CK kernel)
# Local settings
##############################################################################
# Initialize module
def init(i):
"""
Input: {}
Output: {
return - return code = 0, if successful
> 0, if error
(error) - error text if return > 0
}
"""
return {'return':0}
##############################################################################
# runs march madness program with three sets of inputs
# tbd retrieve actual inputs from meta or ask for inputs
def run(i):
"""
Input: {
}
Output: {
return - return code = 0, if successful
> 0, if error
(error) - error text if return > 0
}
"""
#find simulator entry
r=ck.access({'action':'find', 'module_uoa':'program', 'data_uoa':'simulator'})
if r['return']>0: return r # use standard error handling in the CK
path = r['path']
#Access run parameters
r=ck.access({'action':'load', 'repo_uoa':'march-madness', 'module_uoa':'program', 'data_uoa':'simulator'})
if r['return']>0: return r # use standard error handling in the CK
r_vars = r['dict']['run_vars']
#find results entry
r=ck.access({'action':'find', 'module_uoa':'mm-simulator', 'data_uoa':'results'})
if r['return']>0: return r # use standard error handling in the CK
r_path = r['path']
r=ck.access({'action':'compile', 'module_uoa':'program', 'data_uoa':'simulator'})
if r['return']>0: return r # use standard error handling in the CK
y='\nWould you like to apply scaling? (Press 1 for True, 0 for False): '
valid = False
while valid == False:
rx=ck.inp({'text':y})
x=rx['string'].strip()
if x=='': x='0'
if x == '0':
valid = True
apply_scaling = 0
elif x == '1':
valid = True
apply_scaling = 1
else:
ck.out("\nInvalid response, please enter 0 or 1")
y='\nEnter a Home Field Advantage: '
valid = False
while valid == False:
rx=ck.inp({'text':y})
x=rx['string'].strip()
if x=='': x='0'
try:
int(x)
valid = True
hfa = int(x)
except ValueError:
ck.out("invalid input")
r=ck.access({'action':'find', 'module_uoa':'program', 'data_uoa':'simulator'})
if r['return']>0: return r # use standard error handling in the CK
p = r['path']
r=ck.load_json_file({'json_file': p + "/.cm/meta.json"})
if r['return']>0: ck.err(r)
d = r['dict']
d['run_vars']['home_field_advantage_1']= hfa
d['run_vars']['apply_scaling_1']= apply_scaling
r=ck.save_json_to_file({'json_file': p + "/.cm/meta.json", 'dict':d, 'sort_keys':'yes'})
if r['return']>0: ck.err(r)
print("\nHome_Field_Advantage: " + str(hfa) + "\nApply_Scaling: " + str(apply_scaling))
r=ck.access({'action':'run', 'module_uoa':'program', 'data_uoa':'simulator', 'env.home_field_advantage':hfa, 'env.apply_scaling':apply_scaling})
if r['return']>0: return r # use standard error handling in the CK
os.system("cp " + path + "/tmp/stdout.log " + path + "/tmp/stdout1.log")
y='\nWould you like to apply scaling? (Press 1 for True, 0 for False): '
valid = False
while valid == False:
rx=ck.inp({'text':y})
x=rx['string'].strip()
if x=='': x='0'
if x == '0':
valid = True
apply_scaling = 0
elif x == '1':
valid = True
apply_scaling = 1
else:
ck.out("\nInvalid response, please enter 0 or 1")
y='\nEnter a Home Field Advantage: '
valid = False
while valid == False:
rx=ck.inp({'text':y})
x=rx['string'].strip()
if x=='': x='0'
try:
int(x)
valid = True
hfa = int(x)
except ValueError:
ck.out("invalid input")
r=ck.access({'action':'find', 'module_uoa':'program', 'data_uoa':'simulator'})
if r['return']>0: return r # use standard error handling in the CK
p = r['path']
r=ck.load_json_file({'json_file': p + "/.cm/meta.json"})
if r['return']>0: ck.err(r)
d = r['dict']
d['run_vars']['home_field_advantage_2']= hfa
d['run_vars']['apply_scaling_2']= apply_scaling
r=ck.save_json_to_file({'json_file': p + "/.cm/meta.json", 'dict':d, 'sort_keys':'yes'})
if r['return']>0: ck.err(r)
print("\nHome_Field_Advantage: " + str(hfa) + "\nApply_Scaling: " + str(apply_scaling))
r=ck.access({'action':'run', 'module_uoa':'program', 'data_uoa':'simulator', 'env.home_field_advantage':hfa, 'env.apply_scaling':apply_scaling})
if r['return']>0: return r # use standard error handling in the CK
os.system("cp " + path + "/tmp/stdout.log " + path + "/tmp/stdout2.log")
y='\nWould you like to apply scaling? (Press 1 for True, 0 for False): '
valid = False
while valid == False:
rx=ck.inp({'text':y})
x=rx['string'].strip()
if x=='': x='0'
if x == '0':
valid = True
apply_scaling = 0
elif x == '1':
valid = True
apply_scaling = 1
else:
ck.out("\nInvalid response, please enter 0 or 1")
y='\nEnter a Home Field Advantage: '
valid = False
while valid == False:
rx=ck.inp({'text':y})
x=rx['string'].strip()
if x=='': x='0'
try:
int(x)
valid = True
hfa = int(x)
except ValueError:
ck.out("invalid input")
r=ck.access({'action':'find', 'module_uoa':'program', 'data_uoa':'simulator'})
if r['return']>0: return r # use standard error handling in the CK
p = r['path']
r=ck.load_json_file({'json_file': p + "/.cm/meta.json"})
if r['return']>0: ck.err(r)
d = r['dict']
d['run_vars']['home_field_advantage_3']= hfa
d['run_vars']['apply_scaling_3']= apply_scaling
r=ck.save_json_to_file({'json_file': p + "/.cm/meta.json", 'dict':d, 'sort_keys':'yes'})
if r['return']>0: ck.err(r)
print("\nHome_Field_Advantage: " + str(hfa) + "\nApply_Scaling: " + str(apply_scaling))
r=ck.access({'action':'run', 'module_uoa':'program', 'data_uoa':'simulator', 'env.home_field_advantage':hfa, 'env.apply_scaling':apply_scaling})
if r['return']>0: return r # use standard error handling in the CK
os.system("cp " + path + "/tmp/stdout.log " + path + "/tmp/stdout3.log")
#copy files to results entry to use for dashboard graph later
os.system("cp " + path + "/tmp/stdout1.log " + r_path)
os.system("cp " + path + "/tmp/stdout2.log " + r_path)
os.system("cp " + path + "/tmp/stdout3.log " + r_path)
#removes duplicate file of stdout1.log
os.system("rm stdout.log")
print("\nResults can be found in " + path + "/tmp directory\n\nRun 'ck {action} mm-simulator --help to learn more about a specified action\n")
return {'return':0}
##############################################################################
# generate paper
def generate_paper(i):
"""
Input: {
}
Output: {
return - return code = 0, if successful
> 0, if error
(error) - error text if return > 0
}
"""
r=ck.access({'action':'find', 'module_uoa':'program', 'data_uoa':'simulator'})
if r['return']>0: return r # use standard error handling in the CK
program_path=r['path']
#change to tmp directory where all run files are
os.chdir(program_path + "/tmp")
#getting path to output file
stdout_path1 = program_path + "/tmp/stdout1.log"
stdout_path2 = program_path + "/tmp/stdout2.log"
stdout_path3 = program_path + "/tmp/stdout3.log"
correct_path = program_path + "/correct.txt"
num_displayed_teams = 10
output_file = "out_table.tex"
correct = []
file_1_array = []
file_2_array = []
file_3_array = []
#get CK entry with the paper template
r=ck.access({'action':'find', 'module_uoa':'paper', 'data_uoa':'ck-march-madness'})
if r['return']>0: return r # use standard error handling in the CK
paper_path=r['path']
#copy tex/bib files to temp
os.system("cp " + paper_path + "/SOTF.bib " + program_path + "/tmp")
os.system("cp " + paper_path + "/SOTF.tex " + program_path + "/tmp")
#create table
with open(correct_path, 'r') as correct_data:
for line in correct_data:
correct.append(line.partition(" ")[2].strip())
with open(stdout_path1, 'r') as file_1:
for line in file_1:
file_1_array.append(line.partition(" ")[2].strip())
with open(stdout_path2, 'r') as file_2:
for line in file_2:
file_2_array.append(line.partition(" ")[2].strip())
with open(stdout_path3, 'r') as file_3:
for line in file_3:
file_3_array.append(line.partition(" ")[2].strip())
with open(output_file, 'w') as output:
output.write('\\begin {table}')
output.write('\\caption{CK Simulated Results}')
output.write('\\begin{tabular}{ |p{2cm}|p{2cm}|p{2cm}|p{2cm}| }\n')
output.write('\\hline\n')
output.write(' 2019 Results & Home-Field Advantage 0 & Home-Field Advantage 4& Home-Field Advantage 8\\\\\n')
output.write('\\hline\n')
for x in range(num_displayed_teams):
output.write('{} & {} & {} & {}\\\\\n'.format(correct[x], file_1_array[x], file_2_array[x], file_3_array[x] ))
output.write('\\hline\n')
output.write('\\end{tabular}\n')
output.write('\\end{table}\n')
#compile files
os.system("pdflatex SOTF")
os.system("bibtex SOTF")
os.system("pdflatex SOTF")
os.system("pdflatex SOTF")
return {'return':0}
##############################################################################
# open notebook from experiment run
def open_notebook(i):
"""
Input: {
}
Output: {
return - return code = 0, if successful
> 0, if error
(error) - error text if return > 0
}
"""
r=ck.access({'action':'run', 'module_uoa':'jnotebook', 'data_uoa':'notebook'})
if r['return']>0: return r # use standard error handling in the CK
##############################################################################
def push(i):
"""
Input: {
}
Output: {
return - return code = 0, if successful
> 0, if error
(error) - error text if return > 0
}
"""
r=ck.access({'action':'list_files','repo_uoa':'march-madness',
'module_uoa':'mm-simulator', 'data_uoa':'results',
'skip_sort':'yes'})
if r['return']>0: return r # use standard error handling in the CK
file_info = r['list']
num_files = r['number']
res = []
selection = None
if num_files == 0:
ck.out('No results to push')
elif num_files == 1:
for item in file_info:
selection = item
print(str(count) + ") " + item)
else:
ck.out('')
ck.out('There is more than one result')
ck.out('')
count = 0;
for item in file_info:
print(str(count) + ") " + item)
count += 1
res.append(item)
y='\nSelect UOA (or press Enter for 0) '
rx=ck.inp({'text':y})
x=rx['string'].strip()
if x=='': x='0'
selection = res[int(x)]
print("\nSelected " + selection + "\n")
r=ck.access({'action':'find','module_uoa':'mm-simulator', 'data_uoa':'results'})
path = r['path']
file1 = open(path + "/" + selection, 'r')
for line in reversed(list(file1)):
if "Standard Error" in line:
se = line.split()[2]
#Access run parameters
r=ck.access({'action':'load', 'repo_uoa':'march-madness', 'module_uoa':'program', 'data_uoa':'simulator'})
if r['return']>0: return r # use standard error handling in the CK
r_vars = r['dict']['run_vars']
hfa = None
apply_scaling = None
if selection == 'stdout3.log':
hfa = r_vars['home_field_advantage_3']
apply_scaling = r_vars['apply_scaling_3']
elif selection == 'stdout2.log':
hfa = r_vars['home_field_advantage_2']
apply_scaling = r_vars['apply_scaling_2']
else:
hfa = r_vars['home_field_advantage_1']
apply_scaling = r_vars['apply_scaling_1']
os.chdir(path + "/..")
f = open("results.json", "w")
f.write('{"hfa": ' + str(hfa) + ', "as": ' + str(apply_scaling) + ', "se": ' + se + '}')
f.close()
os.system('cb push-result mm-simulator-results --filename="results.json"')
return {'return':0}
| 31.662651 | 148 | 0.54277 | 1,757 | 13,140 | 3.940239 | 0.130336 | 0.055467 | 0.0273 | 0.030334 | 0.682508 | 0.661418 | 0.607251 | 0.588184 | 0.568395 | 0.555684 | 0 | 0.014645 | 0.267275 | 13,140 | 414 | 149 | 31.73913 | 0.704404 | 0.172603 | 0 | 0.599206 | 0 | 0.011905 | 0.317352 | 0.03193 | 0 | 0 | 0 | 0 | 0 | 1 | 0.019841 | false | 0 | 0.003968 | 0 | 0.039683 | 0.027778 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
9276b7e92fbafdac27d50333d01d384cadbdbcef | 1,087 | py | Python | deplugins/cfront_ssl.py | arndtroth/AWSomeOverview | e9010f4356215f5c00bb053a1bc6504c567ecf9d | [
"Apache-2.0"
] | 1 | 2018-06-20T13:27:52.000Z | 2018-06-20T13:27:52.000Z | deplugins/cfront_ssl.py | arndtroth/AWSomeOverview | e9010f4356215f5c00bb053a1bc6504c567ecf9d | [
"Apache-2.0"
] | 1 | 2019-03-15T12:30:56.000Z | 2019-03-15T12:30:56.000Z | deplugins/cfront_ssl.py | arndtroth/AWSomeOverview | e9010f4356215f5c00bb053a1bc6504c567ecf9d | [
"Apache-2.0"
] | null | null | null | """
Class for Data Extraction from CloudFront
"""
import boto3
from deplugins.base import AWSFact
class CloudFront (AWSFact):
NAME = "CloudFront-SSL"
OPTION = 'cfront_ssl'
ORDERED_HEADINGS = ['Id', 'SSLSupportMethod', 'MinimumProtocolVersion', 'IAMCertificateId']
def get_all_regions(self):
return [None]
def retrieve(self, conn):
#for element in conn.list_distributions()['DistributionList']['Items']:
for element in conn.list_distributions().get('DistributionList',{}).get('Items',[]):
item = {
"Id": element['Id'],
"SSLSupportMethod": element['ViewerCertificate']['SSLSupportMethod'],
"MinimumProtocolVersion": element['ViewerCertificate']['MinimumProtocolVersion'],
"IAMCertificateId": element['ViewerCertificate']['IAMCertificateId'],
}
self.data.setdefault('N/A', []).append(item)
def connect(self, region):
conn = boto3.client('cloudfront', region_name=region)
conn.region_name = region
return conn
| 31.057143 | 97 | 0.633855 | 97 | 1,087 | 7.020619 | 0.463918 | 0.105727 | 0.035242 | 0.04699 | 0.096916 | 0.096916 | 0 | 0 | 0 | 0 | 0 | 0.002392 | 0.230911 | 1,087 | 34 | 98 | 31.970588 | 0.812201 | 0.103036 | 0 | 0 | 0 | 0 | 0.286453 | 0.068252 | 0 | 0 | 0 | 0 | 0 | 1 | 0.142857 | false | 0 | 0.095238 | 0.047619 | 0.52381 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
92772f7652e5d7de73104cce45e1da31b37c887a | 2,348 | py | Python | 733.flood-fill.py | Lonitch/hackerRank | 84991b8340e725422bc47eec664532cc84a3447e | [
"MIT"
] | null | null | null | 733.flood-fill.py | Lonitch/hackerRank | 84991b8340e725422bc47eec664532cc84a3447e | [
"MIT"
] | null | null | null | 733.flood-fill.py | Lonitch/hackerRank | 84991b8340e725422bc47eec664532cc84a3447e | [
"MIT"
] | null | null | null | #
# @lc app=leetcode id=733 lang=python3
#
# [733] Flood Fill
#
# https://leetcode.com/problems/flood-fill/description/
#
# algorithms
# Easy (52.32%)
# Likes: 745
# Dislikes: 146
# Total Accepted: 86.6K
# Total Submissions: 163.5K
# Testcase Example: '[[1,1,1],[1,1,0],[1,0,1]]\n1\n1\n2'
#
#
# An image is represented by a 2-D array of integers, each integer representing
# the pixel value of the image (from 0 to 65535).
#
# Given a coordinate (sr, sc) representing the starting pixel (row and column)
# of the flood fill, and a pixel value newColor, "flood fill" the image.
#
# To perform a "flood fill", consider the starting pixel, plus any pixels
# connected 4-directionally to the starting pixel of the same color as the
# starting pixel, plus any pixels connected 4-directionally to those pixels
# (also with the same color as the starting pixel), and so on. Replace the
# color of all of the aforementioned pixels with the newColor.
#
# At the end, return the modified image.
#
# Example 1:
#
# Input:
# image = [[1,1,1],[1,1,0],[1,0,1]]
# sr = 1, sc = 1, newColor = 2
# Output: [[2,2,2],[2,2,0],[2,0,1]]
# Explanation:
# From the center of the image (with position (sr, sc) = (1, 1)), all pixels
# connected
# by a path of the same color as the starting pixel are colored with the new
# color.
# Note the bottom corner is not colored 2, because it is not 4-directionally
# connected
# to the starting pixel.
#
#
#
# Note:
# The length of image and image[0] will be in the range [1, 50].
# The given starting pixel will satisfy 0 and 0 .
# The value of each color in image[i][j] and newColor will be an integer in [0,
# 65535].
#
#
# @lc code=start
class Solution:
def floodFill(self, image: List[List[int]], sr: int, sc: int, newColor: int) -> List[List[int]]:
cur,image[sr][sc]=image[sr][sc],newColor
if cur==newColor:
return image
locs=[(sr,sc)]
m,n = len(image),len(image[0])
while locs:
a,b = locs[0]
for nb in [(-1,0),(1,0),(0,-1),(0,1)]:
nbr,nbc = nb
nbr+=a
nbc+=b
if 0<=nbr<m and 0<=nbc<n and image[nbr][nbc]==cur:
locs.append((nbr,nbc))
image[nbr][nbc]=newColor
locs.pop(0)
return image
# @lc code=end
| 30.493506 | 100 | 0.617121 | 383 | 2,348 | 3.78329 | 0.347258 | 0.012422 | 0.077295 | 0.011042 | 0.140787 | 0.140787 | 0.140787 | 0.120083 | 0.086957 | 0.074534 | 0 | 0.05436 | 0.247871 | 2,348 | 76 | 101 | 30.894737 | 0.766138 | 0.673765 | 0 | 0.111111 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.055556 | false | 0 | 0 | 0 | 0.222222 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
9278b5055cf7701d71ff39aa690e2ba8bd96a0df | 2,205 | py | Python | src/graph_transpiler/webdnn/backend/webgpu/kernels/reshape.py | gunpowder78/webdnn | c659ea49007f91d178ce422a1eebe289516a71ee | [
"MIT"
] | 1 | 2018-07-26T13:52:21.000Z | 2018-07-26T13:52:21.000Z | src/graph_transpiler/webdnn/backend/webgpu/kernels/reshape.py | gunpowder78/webdnn | c659ea49007f91d178ce422a1eebe289516a71ee | [
"MIT"
] | null | null | null | src/graph_transpiler/webdnn/backend/webgpu/kernels/reshape.py | gunpowder78/webdnn | c659ea49007f91d178ce422a1eebe289516a71ee | [
"MIT"
] | null | null | null | from typing import List
from webdnn.backend.code_generator.allocator import MemoryLayout
from webdnn.backend.code_generator.injectors.buffer_injector import BufferInjector
from webdnn.backend.code_generator.injectors.kernel_name_injector import KernelNameInjector
from webdnn.backend.webgpu.generator import WebGPUDescriptorGenerator
from webdnn.backend.webgpu.kernel import Kernel, GPUSize
from webdnn.backend.webgpu.preset_placeholders import MAX_THREADS_PER_THREADGROUP
from webdnn.graph.operators.reshape import Reshape
from webdnn.util.misc import mul
template = """
kernel void %%FUNC_NAME%%(device float * %%STATIC_BUFFER%%[[buffer(0)]],
device float * %%DYNAMIC_BUFFER%%[[buffer(1)]],
const device int * %%META_BUFFER%% [[buffer(2)]],
uint index[[thread_position_in_grid]],
uint num_threads[[threads_per_grid]])
{
const device float *x = %%LOAD_BUFFER(reshape_x)%%;
device float *y = %%LOAD_BUFFER(reshape_y)%%;
const int N = %%LOAD_BUFFER(reshape_N)%%;
for (int gid = index; gid < N; gid += num_threads) {
y[gid] = x[gid];
}
}
"""
@WebGPUDescriptorGenerator.register_handler(Reshape)
def reshape(op: Reshape, memory_layout: MemoryLayout) -> List[Kernel]:
x = op.inputs["x"]
y = op.outputs["y"]
if memory_layout[x].offset == memory_layout[y].offset:
# Inplace
return []
assert x.order == op.parameters["in_order"]
assert y.order == op.parameters["out_order"]
assert y.size == mul(op.parameters["out_shape"])
buffer_injector = BufferInjector()
buffer_injector.register({
"reshape_x": memory_layout[x],
"reshape_y": memory_layout[y],
"reshape_N": y.size,
})
name_injector = KernelNameInjector(op)
source = template
source = buffer_injector.inject(source)
source = name_injector.inject(source)
kernel = Kernel(
{name_injector.name: source},
name_injector.name,
GPUSize(8, 1, 1),
GPUSize(MAX_THREADS_PER_THREADGROUP, 1, 1),
buffer_injector.buffer,
buffer_injector.unresolved_value_list
)
return [kernel]
| 32.910448 | 91 | 0.673469 | 263 | 2,205 | 5.444867 | 0.307985 | 0.055866 | 0.071229 | 0.043994 | 0.075419 | 0.054469 | 0 | 0 | 0 | 0 | 0 | 0.0046 | 0.211338 | 2,205 | 66 | 92 | 33.409091 | 0.818861 | 0.003175 | 0 | 0 | 0 | 0 | 0.297814 | 0.095173 | 0 | 0 | 0 | 0 | 0.057692 | 1 | 0.019231 | false | 0 | 0.173077 | 0 | 0.230769 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
9278cc591836f5c26f4155dd214702456cfc9574 | 6,624 | py | Python | Ping-pong.py | IlyA1536/Ping-Pong | 662d0e5801fab4bae949afeae6e67d050c8b58f4 | [
"CC0-1.0"
] | null | null | null | Ping-pong.py | IlyA1536/Ping-Pong | 662d0e5801fab4bae949afeae6e67d050c8b58f4 | [
"CC0-1.0"
] | null | null | null | Ping-pong.py | IlyA1536/Ping-Pong | 662d0e5801fab4bae949afeae6e67d050c8b58f4 | [
"CC0-1.0"
] | null | null | null |
from pygame import *
from random import randint
#фоновая музыка
mixer.init()
mixer.music.load('music.mp3')
mixer.music.play()
#подключение звуков
GOAL_sound = mixer.Sound('GOAL.wav')
hit_sound = mixer.Sound('HIT.wav')
#шрифты и надписи
font.init()
font1 = font.SysFont("Arial", 73)
win_left_text = font1.render('Выйграл левый игрок!', True, (255, 0, 0))
win_left_text2 = font1.render('SPACE для перезапуска', True, (255, 0, 0))
win_right_text = font1.render('Выйграл правый игрок!', True, (180, 0, 0))
win_right_text2 = font1.render('SPACE для перезапуска', True, (180, 0, 0))
font2 = font.SysFont("Arial", 20)
off_music = font2.render('Нажмите Р для выкл. музыки', True, (255, 0, 0))
on_music = font2.render('Нажмите O для вкл. музыки', True, (255, 0, 0))
#нам нужны такие картинки:
img_back = "background.jpg"
img_ball = "ball.png"
img_plat_right = 'plat_right.png'
img_plat_left = 'plat_left.png'
left_score = 0 #счёт левого игрока
right_score = 0 #счёт правого игрока
#класс-родитель для других спрайтов
class GameSprite(sprite.Sprite):
#конструктор класса
def __init__(self, player_image, player_x, player_y, size_x, size_y, player_speed):
#Вызываем конструктор класса (Sprite):
sprite.Sprite.__init__(self)
#каждый спрайт должен хранить свойство image - изображение
self.image = transform.scale(image.load(player_image), (size_x, size_y))
self.speed = player_speed
#каждый спрайт должен хранить свойство rect - прямоугольник, в который он вписан
self.rect = self.image.get_rect()
self.rect.x = player_x
self.rect.y = player_y
#метод, отрисовывающий героя на окне
def reset(self):
window.blit(self.image, (self.rect.x, self.rect.y))
#класс правого игрока
class Player_right(GameSprite):
#метод для управления спрайтом стрелками клавиатуры
def update(self):
keys = key.get_pressed()
if keys[K_UP] and self.rect.y > 5:
self.rect.y -= self.speed
if keys[K_DOWN] and self.rect.y < win_height - 105:
self.rect.y += self.speed
#класс левого игрока
class Player_left(GameSprite):
#метод для управления спрайтом клавишами клавиатуры
def update(self):
keys = key.get_pressed()
if keys[K_w] and self.rect.y > 5:
self.rect.y -= self.speed
if keys[K_s] and self.rect.y < win_height - 105:
self.rect.y += self.speed
#класс спрайта-мяча
class Ball(GameSprite):
#движение мяча
def update(self):
speed_x
speed_y
#Создаём окошко
win_width = 700
win_height = 500
display.set_caption("Ping-pong")
window = display.set_mode((win_width, win_height))
background = transform.scale(image.load(img_back), (win_width, win_height))
#создаём спрайты
left_plat = Player_left(img_plat_left, 0, 150, 30, 100, 10)
right_plat = Player_right(img_plat_right, 670, 150, 30, 100, 10)
ball = Ball(img_ball, 330, 220, 50, 50, 8)
#переменная "игра закончилась": как только там True, в основном цикле перестают работать спрайты
finish = False
#Основной цикл игры:
run = True #флаг сбрасывается кнопкой закрытия окна
end_game = False #окончание игры
speed_x = 7
speed_y = 7
while run:
#событие нажатия на кнопку “Закрыть”
for e in event.get():
if e.type == QUIT:
run = False
if not finish:
ball.rect.x += speed_x
ball.rect.y += speed_y
#обновляем фон
window.blit(background,(0,0))
#пишем текст на экране (счётчик)
left_score_text = font2.render(str(left_score), 1, (255, 0, 0))
window.blit(left_score_text, (350, 10))
right_score_text = font2.render(str(right_score), 1, (255, 0, 0))
window.blit(right_score_text, (400, 10))
#текст вкл/выкл музыка
window.blit(on_music, (0, 455))
window.blit(off_music, (0, 475))
#производим движения спрайтов
left_plat.update()
right_plat.update()
ball.update()
#обновляем их в новом местоположении при каждой итерации цикла
left_plat.reset()
right_plat.reset()
ball.reset()
#отбивание мяча от платформ
if sprite.collide_rect(left_plat, ball) or sprite.collide_rect(right_plat ,ball):
speed_x *= -1
hit_sound.play()
#увеличени скорости
if speed_x <= 0:
speed_x = speed_x + -1
speed_y = speed_y + -1
else:
speed_x = speed_x + 1
speed_y = speed_y + 1
#отбивание мяча от верхних и нижних стенок
if ball.rect.y > win_height - 50 or ball.rect.y < 0:
speed_y *= -1
hit_sound.play()
#гол левого
if ball.rect.x < 0:
right_score = right_score + 1
GOAL_sound.play()
ball = Ball(img_ball, 330, 220, 50, 50, 8)
speed_x = -7
speed_y = -7
#гол правого
if ball.rect.x > 700:
left_score = left_score + 1
GOAL_sound.play()
ball = Ball(img_ball, 330, 220, 50, 50, 8)
speed_x = 7
speed_y = 7
#условие победы левого игрока
if left_score == 5:
end_game = True
window.blit(win_left_text, (40, 200))
window.blit(win_left_text2, (10, 260))
speed_x = 0
speed_y = 0
#условие победы правого игрока
if right_score == 5:
end_game = True
window.blit(win_right_text, (40, 200))
window.blit(win_right_text2, (10, 260))
speed_x = 0
speed_y = 0
#перезапуск игры
if end_game == True:
if e.type == KEYDOWN:
if e.key == K_SPACE:
end_game = False
left_score = 0
right_score = 0
ball = Ball(img_ball, 330, 220, 50, 50, 8)
speed_x = 7
speed_y = 7
#включение/выключение фоновой музыки
if e.type == KEYDOWN:
if e.key == K_p:
mixer.music.pause()
if e.type == KEYDOWN:
if e.key == K_o:
mixer.music.unpause()
display.update()
#цикл срабатывает каждую 0.05 секунд
time.delay(50)
| 31.69378 | 97 | 0.575483 | 879 | 6,624 | 4.166098 | 0.276451 | 0.022938 | 0.024577 | 0.009831 | 0.29574 | 0.231841 | 0.216002 | 0.183233 | 0.149645 | 0.129437 | 0 | 0.049642 | 0.324879 | 6,624 | 208 | 98 | 31.846154 | 0.769231 | 0.177989 | 0 | 0.279412 | 0 | 0 | 0.043495 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.036765 | false | 0 | 0.014706 | 0 | 0.080882 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
92794c155ed2c47d8f504e48e0719392aee204cd | 1,277 | py | Python | openwhisk/devices.py | jdsheehan/cristata | 7115417c63f193b2eee85a6e0461106880e718aa | [
"Apache-2.0"
] | 5 | 2018-07-30T18:50:55.000Z | 2019-03-20T18:07:05.000Z | openwhisk/devices.py | jdsheehan/cristata | 7115417c63f193b2eee85a6e0461106880e718aa | [
"Apache-2.0"
] | 1 | 2018-06-25T21:08:35.000Z | 2018-07-13T21:23:39.000Z | openwhisk/devices.py | jdsheehan/cristata | 7115417c63f193b2eee85a6e0461106880e718aa | [
"Apache-2.0"
] | 7 | 2018-06-25T21:09:29.000Z | 2020-06-29T14:09:01.000Z | #!/usr/bin/env python
#author mark_purcell@ie.ibm.com
import json
import os
import requests
import csv
from db2 import DB2
SQL_TEMPLATE = ''
SQL_TEMPLATE += 'CALL {database}(); '
def build_query(database):
sql_template = SQL_TEMPLATE.format(database=database)
return sql_template, True
def process_response(text):
t = []
rows = text.split('\n')
if len(rows) > 0:
reader = csv.reader(rows, delimiter=',', lineterminator='\\n')
for row in reader:
if len(row) > 0:
t.append(row[0])
return { 'devices': t, 'deviceCount': len(t), 'deviceMetadata': ["Device Id"] }
def main(args):
print('Args %r' % args)
result = {}
try:
db2 = DB2(args, ['database', 'database_userid', 'database_password', 'database_rest_url'])
sql, r = build_query(args['database'])
r = db2.execute(sql)
if r.status_code != 200:
raise Exception(r.json())
response = process_response(r.text)
result = db2.success(response)
except Exception as e:
result = {'status': 400, 'state': 'Failed', 'result': str(e)}
print('Error: %r' % result)
dbg = args.get('debug', False)
if dbg:
print('%r' % result)
return result
| 22.017241 | 98 | 0.588097 | 160 | 1,277 | 4.6 | 0.4875 | 0.074728 | 0.038043 | 0.059783 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.01594 | 0.263117 | 1,277 | 57 | 99 | 22.403509 | 0.766206 | 0.039154 | 0 | 0 | 0 | 0 | 0.144608 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.081081 | false | 0.027027 | 0.135135 | 0 | 0.297297 | 0.081081 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
927c35a89cf67b0b0ea4047744d2f4c1543ea266 | 554 | py | Python | faq/urls/normal.py | coordt/django-faq | 1aa924ee5806dd8611fcf4a83ec46182b191ab60 | [
"BSD-3-Clause"
] | 1 | 2019-06-14T15:29:49.000Z | 2019-06-14T15:29:49.000Z | faq/urls/normal.py | natgeosociety/django-faq | 1aa924ee5806dd8611fcf4a83ec46182b191ab60 | [
"BSD-3-Clause"
] | null | null | null | faq/urls/normal.py | natgeosociety/django-faq | 1aa924ee5806dd8611fcf4a83ec46182b191ab60 | [
"BSD-3-Clause"
] | 1 | 2016-05-05T21:56:37.000Z | 2016-05-05T21:56:37.000Z | # -*- coding: utf-8 -*-
from django.conf.urls import url
from faq.views import TopicListView, TopicDetailView, question_detail
# Include these patterns if you want URLs like:
#
# /faq/
# /faq/topic/
# /faq/topic/#question
#
urlpatterns = [
url(r'^$', TopicListView.as_view(), name='faq-topic-list'),
url(
r'^(?P<slug>[-\w]+)/$',
TopicDetailView.as_view(),
name='faq-topic-detail'),
url(
r'^(?P<topic_slug>[-\w]+)/#(?P<slug>[-\w]+)/$',
question_detail,
name='faq-question-detail'),
]
| 21.307692 | 69 | 0.579422 | 68 | 554 | 4.647059 | 0.470588 | 0.101266 | 0.063291 | 0.082278 | 0.113924 | 0 | 0 | 0 | 0 | 0 | 0 | 0.002294 | 0.212996 | 554 | 25 | 70 | 22.16 | 0.722477 | 0.200361 | 0 | 0.153846 | 0 | 0 | 0.260369 | 0.099078 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.153846 | 0 | 0.153846 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
927cacc6f1985cec9c819c64c22a24d06d77cb91 | 22,599 | py | Python | bitcoin/bitcoin.py | lucasmoten/pizero-minipitft | fcc4232348f0ebfbb6891fe52bf37036350a618a | [
"MIT"
] | 1 | 2020-11-29T21:24:31.000Z | 2020-11-29T21:24:31.000Z | bitcoin/bitcoin.py | lucasmoten/pizero-minipitft | fcc4232348f0ebfbb6891fe52bf37036350a618a | [
"MIT"
] | 11 | 2020-12-03T16:40:51.000Z | 2020-12-07T15:47:31.000Z | bitcoin/bitcoin.py | lucasmoten/pizero-minipitft | fcc4232348f0ebfbb6891fe52bf37036350a618a | [
"MIT"
] | 1 | 2022-03-30T05:18:42.000Z | 2022-03-30T05:18:42.000Z | # -*- coding: utf-8 -*-
import time
import subprocess
import digitalio
import board
from PIL import Image, ImageDraw, ImageFont
import adafruit_rgb_display.st7789 as st7789
import requests
#####################################################################################################
# You can disable panels that you don't want to display, or cannot support
enablePanelRunTheNumbers = False
enablePanelRollerCoasterGuy = True
enablePanelMempoolBlocks = True
enablePanelSatsPerFiatUnit = True
# Toggle for whether panels should auto scan to the next panel
autopanel = True
# URLs for endpoints to get data needed for driving display
mempoolurl = "https://mempool.space/api/v1/fees/mempool-blocks"
mempoolblockheighturl = "https://mempool.space/api/blocks/tip/height"
numbersurl = "http://your.own.node:1839/the_numbers_latest.txt"
priceurl = "https://api.coingecko.com/api/v3/simple/price?ids=bitcoin&vs_currencies=usd"
#####################################################################################################
panelDiagnostic = 0 # to enable at runtime, press buttons in order: top-bottom-top-bottom-top
panelRunTheNumbers = 1
panelRollerCoasterGuy = 2
panelMempoolBlocks = 3
panelSatsPerFiatUnit = 4
# Start timer to have a basis for elapsed time
start = time.time()
# Configuration for CS and DC pins (these are FeatherWing defaults on M0/M4):
cs_pin = digitalio.DigitalInOut(board.CE0)
dc_pin = digitalio.DigitalInOut(board.D25)
reset_pin = None
# Config for display baudrate (default max is 24mhz):
BAUDRATE = 64000000
# Setup SPI bus using hardware SPI:
spi = board.SPI()
# Create the ST7789 display:
disp = st7789.ST7789(
spi,
cs=cs_pin,
dc=dc_pin,
rst=reset_pin,
baudrate=BAUDRATE,
width=135,
height=240,
x_offset=53,
y_offset=40,
)
# Button assignment
buttonA = digitalio.DigitalInOut(board.D23)
buttonB = digitalio.DigitalInOut(board.D24)
buttonA.switch_to_input()
buttonB.switch_to_input()
# Create blank image for drawing.
# Make sure to create image with mode 'RGB' for full color.
height = disp.width # we swap height/width to rotate it to landscape!
width = disp.height
image = Image.new("RGB", (width, height))
rotation = 90
# Get drawing object to draw on image.
draw = ImageDraw.Draw(image)
# Draw a black filled box to clear the image.
draw.rectangle((0, 0, width, height), outline=0, fill=(0, 0, 0))
disp.image(image, rotation)
# Draw some shapes.
# First define some constants to allow easy resizing of shapes.
padding = -2
top = padding
bottom = height - padding
# Turn on the backlight
backlight = digitalio.DigitalInOut(board.D22)
backlight.switch_to_output()
backlight.value = True
# Panels
minPanel = 1 # diagnostic panel is panel 0. This starts with diagnostics disabled
maxPanel = 5
currentPanel = 2
panelDir = 1
drawnPanel = 0
dtPanel = time.time() - start
targetFPS = 5 # realistically due to rendering speed wont get much higher than 5 fps
secondsPerPanel = 10
# Colors
colorbitcoinorange = "#F7931A"
colorbitcoingrey = "#4D4D4D"
colorblack = "#000000"
colorwhite = "#FFFFFF"
colordarkgrey = "#131313"
colormediumgrey = "#353535"
coloryellow = "#FFFF00"
colorgreen = "#00FF00"
colorblue = "#0000FF"
colorpurple = "#FF00FF"
colorred = "#FF0000"
colormempooltext = "#1BD8F4"
# used for mempool block colors based on median fee
colorfee10 = "#039BE5" # blue
colorfee20 = "#11960F" # green
colorfee50 = "#FDD835" # yellow
colorfee100 = "#905206" # orange
colorfee200 = "#B71C1C" # red
colorfee300 = "#3C11C1" # purple
# gradient array for sats per fiat unit display
satscolors = [
"#FF0000","#FF3F00","#FF7F00","#FFBF00","#FFFF00","#7FFF00","#00FF00","#00FF7F",
"#FF3F00","#FF7F00","#FFBF00","#FFFF00","#7FFF00","#00FF00","#00FF7F","#00FFFF",
"#FF7F00","#FFBF00","#FFFF00","#7FFF00","#00FF00","#00FF7F","#00FFFF","#007FFF",
"#FFBF00","#FFFF00","#7FFF00","#00FF00","#00FF7F","#00FFFF","#007FFF","#0000FF"
]
# Images for rollercoaster guy
imageRCDown = (Image.open('images/rollercoasterguy-135x240-down.bmp')).convert(mode="RGB")
imageRCFlat = (Image.open('images/rollercoasterguy-135x240-flat.bmp')).convert(mode="RGB")
imageRCFly = (Image.open('images/rollercoasterguy-135x240-fly.bmp')).convert(mode="RGB")
imageRCTopLeft = (Image.open('images/rollercoasterguy-135x240-topleft.bmp')).convert(mode="RGB")
imageRCTopRight = (Image.open('images/rollercoasterguy-135x240-topright.bmp')).convert(mode="RGB")
imageRCUp = (Image.open('images/rollercoasterguy-135x240-up.bmp')).convert(mode="RGB")
# Bitcoin logo
imageBTC = (Image.open('images/bitcoinlogo-100x100.bmp')).convert(mode="RGB")
# Load in some fonts
fontST = ImageFont.truetype("/usr/share/fonts/truetype/dejavu/DejaVuSans.ttf", 12)
fontST2 = ImageFont.truetype("/usr/share/fonts/truetype/dejavu/DejaVuSans.ttf", 16)
fontBTC = ImageFont.truetype("/usr/share/fonts/truetype/ubuntu/Ubuntu-BI.ttf", 72, encoding="unic")
fontBTC2 = ImageFont.truetype("/usr/share/fonts/truetype/ubuntu/Ubuntu-BI.ttf", 24, encoding="unic")
# Initial stats
# Shell scripts for system monitoring from here:
# https://unix.stackexchange.com/questions/119126/command-to-display-memory-usage-disk-usage-and-cpu-load
cmd = "hostname -I | cut -d' ' -f1"
IP = "IP: " + subprocess.check_output(cmd, shell=True).decode("utf-8")
cmd = "top -bn1 | grep load | awk '{printf \"CPU Load: %.2f\", $(NF-2)}'"
CPU = subprocess.check_output(cmd, shell=True).decode("utf-8")
cmd = "free -m | awk 'NR==2{printf \"Mem: %s/%s MB %.2f%%\", $3,$2,$3*100/$2 }'"
dtCPU = time.time() - start
MemUsage = subprocess.check_output(cmd, shell=True).decode("utf-8")
cmd = 'df -h | awk \'$NF=="/"{printf "Disk: %d/%d GB %s", $3,$2,$5}\''
dtMEM = time.time() - start
Disk = subprocess.check_output(cmd, shell=True).decode("utf-8")
cmd = "cat /sys/class/thermal/thermal_zone0/temp | awk '{printf \"CPU Temp: %.1f C\", $(NF-0) / 1000}'" # pylint: disable=line-too-long
dtDSK = time.time() - start
Temp = subprocess.check_output(cmd, shell=True).decode("utf-8")
dtTMP = time.time() - start
# Initial mempool
mempooldata = ""
dtMPB = time.time() - start
if enablePanelMempoolBlocks:
mempooldata = requests.get(mempoolurl)
# Initial numbers
numbersdata = ""
dtNUM = time.time() - start
if enablePanelRunTheNumbers:
numbersdata = requests.get(numbersurl)
# Initial price
pricedata = ""
dtPRC = time.time() - start
if enablePanelRollerCoasterGuy or enablePanelSatsPerFiatUnit:
pricedata = requests.get(priceurl)
currentprice = pricedata.json()['bitcoin']['usd']
pricemode = 0
# count for metrics and debounce control
counter = 0
buttonWait = 0
loopstart = time.time()
buttonsPressed = ""
# Functions ------------------------------------------------------------------------------------------------------
def blackscreen():
draw.rectangle((0, 0, width, height), outline=0, fill=0)
def check_for_new_price():
global dtPRC
global currentprice
global pricemode
if elapsed > (dtPRC + 300): # 5 minutes
try:
pricedata = requests.get(priceurl)
dtPRC = elapsed
except:
# fake advance our time so we try again later
dtPRC = dtPRC + 120
newprice = pricedata.json()['bitcoin']['usd']
pricediff = newprice - currentprice
if pricediff > -5 and pricediff < 5:
pricemode = 0
elif pricediff > 5 and pricediff < 100:
pricemode = 1
elif pricediff > 100:
pricemode = 2
elif pricediff < -5 and pricediff > -100:
pricemode = -1
elif pricediff < -100:
pricemode = -2
currentprice = newprice
def satssquare(dc, dr, sats, satscolor):
satsleft = sats
for y in range(10):
for x in range(10):
if satsleft > 0:
# draw
tlx = (dc*30)+(x*3)
tly = (dr*30)+(y*3)
brx = tlx+1
bry = tly+1
draw.rectangle(((tlx,tly),(brx,bry)),satscolor,satscolor)
# decrement
satsleft = satsleft - 1
def drawmempoolblock(x=0, y=0, medianFee=999, feeRangeMin=999, feeRangeMax=999, nTx=-1, blockLabel=""):
blockcolor = colorblack
textcolor = colorwhite
if medianFee < 10:
blockcolor = colorfee10 # blue
elif medianFee < 20:
blockcolor = colorfee20 # green
elif medianFee < 50:
blockcolor = colorfee50 # yellow
textcolor = colorblack
elif medianFee < 100:
blockcolor = colorfee100 # orange
elif medianFee < 200:
blockcolor = colorfee200 # red
elif medianFee < 300:
blockcolor = colorfee300 # purple
else:
blockcolor = colorblack
by = 12
draw.polygon(((x,y+by),(x+103,y+by),(x+118,y+by+15),(x+15,y+by+15)), fill=colormediumgrey, outline=colormediumgrey)
draw.polygon(((x,y+by),(x+15,y+by+15),(x+15,y+by+118),(x,y+by+103)), fill=colordarkgrey, outline=colordarkgrey)
draw.polygon(((x+15,y+by+15),(x+118,y+by+15),(x+118,y+by+118),(x+15,y+by+118)), fill=blockcolor, outline=blockcolor)
t = blockLabel
w,h = draw.textsize(t, fontST2)
ox,oy = fontST2.getoffset(t)
w += ox
h += oy
draw.text((x+15+(103/2)-(w/2), y+0), t, font=fontST2, fill=colormempooltext)
t = "~%s sat/vB" % (str(medianFee)) # "~350 sat/vB"
w,h = draw.textsize(t, fontST2)
ox,oy = fontST2.getoffset(t)
w += ox
h += oy
draw.text((x+15+(103/2)-(w/2), y+by+20), t, font=fontST2, fill=textcolor)
t = "%s-%s sat/vB" % (str(feeRangeMin), str(feeRangeMax)) # "100-900 sat/vB"
w,h = draw.textsize(t, fontST)
ox,oy = fontST.getoffset(t)
w += ox
h += oy
draw.text((x+15+(103/2)-(w/2), y+by+45), t, font=fontST, fill=textcolor)
t = "%s" % (str(nTx)) # "2,480"
w,h = draw.textsize(t, fontST)
ox,oy = fontST.getoffset(t)
w += ox
h += oy
draw.text((x+15+(103/2)-(w/2), y+by+85), t, font=fontST, fill=textcolor)
t = "transactions"
w,h = draw.textsize(t, fontST)
ox,oy = fontST.getoffset(t)
w += ox
h += oy
draw.text((x+15+(103/2)-(w/2), y+by+95), t, font=fontST, fill=textcolor)
def looppanels():
global currentPanel
# loop around bounds
if currentPanel < minPanel:
currentPanel = maxPanel - 1
if currentPanel >= maxPanel:
currentPanel = minPanel
def is_current_panel_enabled():
if (not enablePanelMempoolBlocks and currentPanel == panelMempoolBlocks):
return False
if (not enablePanelRunTheNumbers and currentPanel == panelRunTheNumbers):
return False
if (not enablePanelRollerCoasterGuy and currentPanel == panelRollerCoasterGuy):
return False
if (not enablePanelSatsPerFiatUnit and currentPanel == panelSatsPerFiatUnit):
return False
return True
def getuserinputs():
global panelDir
global currentPanel
global buttonWait
global buttonsPressed
global autopanel
global minPanel
global maxPanel
# User inputs
if elapsed > buttonWait:
# just button A (top) pressed
if buttonA.value and not buttonB.value:
panelDir = -1
currentPanel = currentPanel + panelDir
buttonWait = elapsed + .4
buttonsPressed = buttonsPressed + "A"
autopanel = False
# just button B (bottom) pressed
if buttonB.value and not buttonA.value:
panelDir = 1
currentPanel = currentPanel + panelDir
buttonWait = elapsed + .4
buttonsPressed = buttonsPressed + "B"
autopanel = False
# both buttons put in auto scan mode
if not buttonA.value and not buttonB.value:
panelDir = 1
autopanel = True
buttonWait = elapsed + .4
if buttonsPressed[-5:] == "ABABA":
# enable diagnostic panel
minPanel = panelDiagnostic
buttonsPressed = ""
# advance panel approx every 10 seconds if auto scan mode
if autopanel and (counter % (targetFPS * secondsPerPanel) == 0):
currentPanel = currentPanel + panelDir
looppanels()
# adjust based on panels enabled
panelTests = maxPanel
while ((not is_current_panel_enabled()) and (panelTests > 0)):
currentPanel = currentPanel + panelDir
panelTests = panelTests - 1
looppanels()
if panelTests == 0:
autopanel = False
currentPanel = panelDiagnostic
minPanel = panelDiagnostic
maxPanel = panelDiagnostic + 1
renderPanelDiagnostic()
exit()
def renderPanelDiagnostic():
global drawnPanel
global CPU
global dtCPU
global MemUsage
global dtMEM
global Disk
global dtDSK
global Temp
global dtTMP
global elapsed
global maxPanel
global panelDiagnostic
drawnPanel = panelDiagnostic
draw.rectangle((0, 0, width, height), outline=0, fill=0)
# Dont update on every cycle. Too taxing, little change
# Shell scripts for system monitoring from here:
# https://unix.stackexchange.com/questions/119126/command-to-display-memory-usage-disk-usage-and-cpu-load
if elapsed > (dtCPU + 30): # 30 seconds
cmd = "top -bn1 | grep load | awk '{printf \"CPU Load: %.2f\", $(NF-2)}'"
CPU = subprocess.check_output(cmd, shell=True).decode("utf-8")
dtCPU = elapsed
if elapsed > (dtMEM + 90): # 1.5 minutes
cmd = "free -m | awk 'NR==2{printf \"Mem: %s/%s MB %.2f%%\", $3,$2,$3*100/$2 }'"
MemUsage = subprocess.check_output(cmd, shell=True).decode("utf-8")
dtMEM = elapsed
if elapsed > (dtDSK + 1800): # 30 minutes
cmd = 'df -h | awk \'$NF=="/"{printf "Disk: %d/%d GB %s", $3,$2,$5}\''
Disk = subprocess.check_output(cmd, shell=True).decode("utf-8")
dtDSK = elapsed
if elapsed > (dtTMP + 20): # 20 seconds
cmd = "cat /sys/class/thermal/thermal_zone0/temp | awk '{printf \"CPU Temp: %.1f C\", $(NF-0) / 1000}'" # pylint: disable=line-too-long
Temp = subprocess.check_output(cmd, shell=True).decode("utf-8")
dtTMP = elapsed
# Write out the statistics
x = 0
y = top
if maxPanel == panelDiagnostic + 1:
draw.text((x, y), "NO PANELS ENABLED", font=fontST, fill=colorred)
y += fontST.getsize("0")[1]
draw.text((x, y), IP, font=fontST, fill=colorwhite)
y += fontST.getsize(IP)[1]
draw.text((x, y), CPU, font=fontST, fill=coloryellow)
y += fontST.getsize(CPU)[1]
draw.text((x, y), MemUsage, font=fontST, fill=colorgreen)
y += fontST.getsize(MemUsage)[1]
draw.text((x, y), Disk, font=fontST, fill=colorblue)
y += fontST.getsize(Disk)[1]
draw.text((x, y), Temp, font=fontST, fill=colorpurple)
y += fontST.getsize(Temp)[1]
# Counter, FPS, and Sleep Target aiming for 5 FPS is updated every cycle
y += 10
draw.text((x, y), "Counter:" + str(counter), font=fontST, fill=colorbitcoinorange)
y += fontST.getsize("0")[1]
fps = counter / elapsed
draw.text((x, y), "FPS: " + str(fps), font=fontST, fill=colorbitcoinorange)
y += fontST.getsize("0")[1]
elapsed = time.time() - loopstart
st = (counter * (1 / targetFPS)) - elapsed
draw.text((x, y), "Sleep Target: " + str(st), font=fontST, fill=colorbitcoinorange)
disp.image(image, rotation)
def renderPanelRunTheNumbers():
global drawnPanel
global dtPanel
global numbersdata
global dtNUM
drawnPanel = panelRunTheNumbers
dtPanel = elapsed
# Update data if enough time has past
if elapsed > (dtNUM + 300): # 5 minutes
try:
numbersdata = requests.get(numbersurl)
dtNUM = time.time() - start
except:
dtNUM = dtNUM + 60
blackscreen()
image.paste(imageBTC,(0,0,100,100))
xo = 105
yo = 13
draw.rectangle((0 + xo, 0, width - xo, height), outline=0, fill=0)
numbersjson = numbersdata.json()
lastrunblock = numbersjson['height']
totalsupply = numbersjson['total_amount']
draw.text((xo,yo), "Block Height", font=fontST, fill=colorwhite)
draw.text((xo,yo+17), str(lastrunblock), font=fontST, fill=colorbitcoinorange)
draw.text((xo,yo+50), "Total Supply", font=fontST, fill=colorwhite)
draw.text((xo,yo+67), str(totalsupply), font=fontST, fill=colorbitcoinorange)
draw.text((15,xo), "Run The Numbers!", font=fontBTC2, fill=colorbitcoinorange)
disp.image(image, rotation)
def renderPanelRollerCoasterGuy():
global drawnPanel
global dtPanel
global dtPRC
global currentprice
global pricemode
drawnPanel = panelRollerCoasterGuy
dtPanel = elapsed
blackscreen()
check_for_new_price()
rcg = Image.new("RGB", (width, height))
rcgdraw = ImageDraw.Draw(rcg)
if pricemode == -2:
rcg.paste(imageRCDown, (0,0))
rcgdraw.text((122,17),"$" + str(currentprice), font=fontBTC2, fill=colorblack)
rcgdraw.text((119,14),"$" + str(currentprice), font=fontBTC2, fill=colorblack)
rcgdraw.text((120,15),"$" + str(currentprice), font=fontBTC2, fill=colorbitcoinorange)
if pricemode == -1:
rcg.paste(imageRCTopRight, (0,0))
rcgdraw.text((12,7),"$" + str(currentprice), font=fontBTC2, fill=colorblack)
rcgdraw.text((9,4),"$" + str(currentprice), font=fontBTC2, fill=colorblack)
rcgdraw.text((10,5),"$" + str(currentprice), font=fontBTC2, fill=colorbitcoinorange)
if pricemode == 0:
rcg.paste(imageRCFlat, (0,0))
rcgdraw.text((12,107),"$" + str(currentprice), font=fontBTC2, fill=colorblack)
rcgdraw.text((9,104),"$" + str(currentprice), font=fontBTC2, fill=colorblack)
rcgdraw.text((10,105),"$" + str(currentprice), font=fontBTC2, fill=colorbitcoinorange)
if pricemode == 1:
rcg.paste(imageRCTopLeft, (0,0))
rcgdraw.text((122,7),"$" + str(currentprice), font=fontBTC2, fill=colorblack)
rcgdraw.text((119,4),"$" + str(currentprice), font=fontBTC2, fill=colorblack)
rcgdraw.text((120,5),"$" + str(currentprice), font=fontBTC2, fill=colorbitcoinorange)
if pricemode == 2:
rcg.paste(imageRCUp, (0,0))
rcgdraw.text((12,17),"$" + str(currentprice), font=fontBTC2, fill=colorblack)
rcgdraw.text((9,14),"$" + str(currentprice), font=fontBTC2, fill=colorblack)
rcgdraw.text((10,15),"$" + str(currentprice), font=fontBTC2, fill=colorbitcoinorange)
disp.image(rcg, rotation)
def renderPanelMempoolBlocks():
global drawnPanel
global dtPanel
global dtMPB
global mempoolblockheight
drawnPanel = panelMempoolBlocks
dtPanel = elapsed
# Update data if enough time has past
newmempooldata = mempooldata
if elapsed > (dtMPB + 120): # 2 minutes
try:
newmempooldata = requests.get(mempoolurl)
dtMPB = elapsed
except Exception as e:
print("renderPanelMempoolBlocks get mempoolurl exception")
print(e)
# fake advance the last mempool time by a minute to delay next check
dtMPB = dtMPB + 60
blackscreen()
if mempooldata.status_code == 200:
mempooljson = newmempooldata.json()
else:
mempooljson = mempooldata.json()
try:
# Left block
pendingblock = 1
medianFee = int(round(mempooljson[pendingblock]['medianFee']))
feeRangeMin = int(round(mempooljson[pendingblock]['feeRange'][0]))
feeRangeMax = int(round(list(reversed(list(mempooljson[pendingblock]['feeRange'])))[0]))
nTx = int(mempooljson[pendingblock]['nTx'])
drawmempoolblock(0, 0, medianFee, feeRangeMin, feeRangeMax, nTx, "~ 20 Minutes")
# Right block
pendingblock = 0
medianFee = int(round(mempooljson[pendingblock]['medianFee']))
feeRangeMin = int(round(mempooljson[pendingblock]['feeRange'][0]))
feeRangeMax = int(round(list(reversed(list(mempooljson[pendingblock]['feeRange'])))[0]))
nTx = int(mempooljson[pendingblock]['nTx'])
drawmempoolblock(120, 0, medianFee, feeRangeMin, feeRangeMax, nTx, "~ 10 Minutes")
except Exception as e:
print("renderPanelMempoolBlocks render exception")
print(e)
drawmempoolblock(0, 0, 666, 1, 999, 9999, "Error Getting")
drawmempoolblock(120, 0, 999, 999, 999, 9999, "Mempool Data")
disp.image(image, rotation)
def renderPanelSatsPerFiatUnit():
global drawnPanel
global dtPanel
drawnPanel = panelSatsPerFiatUnit
dtPanel = elapsed
blackscreen()
check_for_new_price()
fiatunit = 1.00
satsperfiatunit = int(round(100000000.0 / (currentprice / fiatunit)))
# be as small as need to be to fit within space of 8x4 matrix of 100 per
while satsperfiatunit > 3200:
fiatunit = fiatunit / 2
satsperfiatunit = int(round(100000000.0 / (currentprice / fiatunit)))
t = str(satsperfiatunit) + " sats/$" + str(fiatunit)
dc = 0
dr = 0
colorindex = 0
while satsperfiatunit > 100:
# decrement satsperfiatunit
satsperfiatunit = satsperfiatunit - 100
# draw satssquare
satssquare(dc, dr, 100, satscolors[colorindex])
# advance to next square position
dc = dc + 1
if dc >= 8:
dr = dr + 1
dc = 0
colorindex = colorindex + 1
# remainder
satssquare(dc, dr, satsperfiatunit, satscolors[colorindex])
# label
w,h = draw.textsize(t, fontST2)
ox,oy = fontST2.getoffset(t)
draw.text((width-w,height-h),t,font=fontST2, fill=colorbitcoinorange)
disp.image(image, rotation)
# Main routine ---------------------------------------------------------------------------------------------------
while True:
counter = counter + 1
elapsed = time.time() - loopstart
getuserinputs()
if currentPanel == panelDiagnostic:
renderPanelDiagnostic()
if currentPanel == panelRunTheNumbers and ((drawnPanel != panelRunTheNumbers) or (elapsed > dtPanel + 20)):
renderPanelRunTheNumbers()
if currentPanel == panelRollerCoasterGuy and ((drawnPanel != panelRollerCoasterGuy) or (elapsed > dtPanel + 20)):
renderPanelRollerCoasterGuy()
if currentPanel == panelMempoolBlocks and ((drawnPanel != panelMempoolBlocks) or (elapsed > dtPanel + 20)):
renderPanelMempoolBlocks()
if currentPanel == panelSatsPerFiatUnit and ((drawnPanel != panelSatsPerFiatUnit) or (elapsed > dtPanel + 20)):
renderPanelSatsPerFiatUnit()
elapsed = time.time() - loopstart
st = (counter * (1 / targetFPS)) - elapsed
if st > 0:
time.sleep(st) | 38.499148 | 145 | 0.640427 | 2,671 | 22,599 | 5.400973 | 0.219019 | 0.011091 | 0.015528 | 0.028074 | 0.38188 | 0.329059 | 0.300083 | 0.26071 | 0.251282 | 0.177041 | 0 | 0.048865 | 0.212178 | 22,599 | 587 | 146 | 38.499148 | 0.761402 | 0.124253 | 0 | 0.294845 | 0 | 0.002062 | 0.099072 | 0.029845 | 0 | 0 | 0 | 0 | 0 | 1 | 0.024742 | false | 0 | 0.014433 | 0 | 0.049485 | 0.024742 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
9282e076561a09b5b5e807ad3ccd8b3ba2cf7148 | 264 | py | Python | leap_year_using_inbuilt_funciton.py | Ratheshprabakar/Python-Programs | fca9d4f0b5f5f5693b3d7e25c6d890f4973dc19e | [
"MIT"
] | 2 | 2019-07-10T06:32:05.000Z | 2019-11-13T07:52:53.000Z | leap_year_using_inbuilt_funciton.py | Ratheshprabakar/Python-Programs | fca9d4f0b5f5f5693b3d7e25c6d890f4973dc19e | [
"MIT"
] | null | null | null | leap_year_using_inbuilt_funciton.py | Ratheshprabakar/Python-Programs | fca9d4f0b5f5f5693b3d7e25c6d890f4973dc19e | [
"MIT"
] | 1 | 2019-10-12T06:56:13.000Z | 2019-10-12T06:56:13.000Z | #Python program to check whether the year is leap or not using inbuilt function
import calendar
def main():
x=int(input("Enter the year"))
if calendar.isleap(x):
print(x,"is leap year")
else:
print(x,"is not a leap year")
if __name__=='__main__':
main()
| 22 | 79 | 0.704545 | 45 | 264 | 3.955556 | 0.622222 | 0.078652 | 0.089888 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.170455 | 264 | 11 | 80 | 24 | 0.812785 | 0.295455 | 0 | 0 | 0 | 0 | 0.281081 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.111111 | false | 0 | 0.111111 | 0 | 0.222222 | 0.222222 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
9288db8bce578378c4eef0b2f0dd1396e58bb840 | 4,482 | py | Python | src/PCE_Codes/graphs.py | nasa/UQPCE | 64f2143cddf83ca2e835442db2fc9ef33c6d26be | [
"NASA-1.3"
] | 13 | 2020-11-07T22:03:35.000Z | 2022-01-12T22:08:53.000Z | src/PCE_Codes/graphs.py | nasa/UQPCE | 64f2143cddf83ca2e835442db2fc9ef33c6d26be | [
"NASA-1.3"
] | null | null | null | src/PCE_Codes/graphs.py | nasa/UQPCE | 64f2143cddf83ca2e835442db2fc9ef33c6d26be | [
"NASA-1.3"
] | 4 | 2020-11-23T16:24:03.000Z | 2022-03-25T01:25:21.000Z | from builtins import getattr
from warnings import warn
try:
from mpi4py.MPI import DOUBLE as MPI_DOUBLE, COMM_WORLD as MPI_COMM_WORLD
import matplotlib.pyplot as plt
import numpy as np
except:
warn('Ensure that all required packages are installed.')
exit()
from PCE_Codes._helpers import _warn
from PCE_Codes.variables.discrete import DiscreteVariable
comm = MPI_COMM_WORLD
size = comm.size
rank = comm.rank
is_manager = (rank == 0)
class Graphs:
"""
Inputs: standardize- boolean for if graphs should be standardized or not
Creates the plots of the variable values vs some other value. Plots the
model error vs the predicted responses.
"""
def __init__(self, standardize):
self.input = True
self.standardize = standardize
self.verbose = False
showwarning = _warn
def factor_plots(self, graph_dir, var_list, plot_data, plot_name,
verify=False):
"""
Inputs: graph_dir- file location where to put plots
var_list- list of variables
plot_data- the data to be plotted
plot_name- 'Predicted' or 'Error'; what data is
being plotted
verify- if these points are the verification points or the
input points
Generates plots for each variable against plot_data.
"""
var_count = len(var_list)
attr = ''
stand = ''
if is_manager and self.verbose:
print(f'Generating {plot_name} vs Factor graphs\n')
if self.standardize:
attr = ''.join((attr, 'std_'))
stand = ' (Standardized)'
if verify:
attr = ''.join((attr, 'verify_'))
attr = ''.join((attr, 'vals'))
j = rank
while j < var_count:
curr_var = var_list[j]
plt.scatter(getattr(curr_var, attr), plot_data)
plt.title(f'{plot_name} vs {curr_var.name}{stand}')
plt.xlabel(f'{curr_var.name}')
plt.ylabel(f'{plot_name}')
image_path = f'{graph_dir}/{plot_name}_vs_{curr_var.name}'
if isinstance(curr_var, DiscreteVariable):
if hasattr(curr_var, 'categories'):
if not self.standardize:
ticks = curr_var.unstandardize_points(curr_var.x_values)
else:
ticks = curr_var.x_values
plt.xticks(ticks, curr_var.categories)
plt.savefig(image_path, dpi=600, bbox_inches='tight')
plt.clf()
j += size
def error_vs_pred(self, graph_dir, err, pred, plot_name):
"""
Inputs: graph_dir- file location where to put plots
err- difference between predicted vals and actual vals
pred- the predicted values
plot_name- the name of the plot
Generates a plot of the error vs the predicted values.
"""
if is_manager and self.verbose:
print('Generating Error vs Predicted graph\n')
plt.scatter(pred, err)
plt.title(f'{plot_name}')
plt.xlabel('predicted values')
plt.ylabel('error')
plt.savefig(f'{graph_dir}/{plot_name}', dpi=600, bbox_inches='tight')
plt.clf()
def pred_conf(self, graph_dir, pred, act, mean, conf):
"""
Inputs: graph_dir- file location where to put plots
pred- the predicted values
act- actual response values
mean- predicted mean for each point
conf- predicted mean confidence intervals for each point
Generates a plot of the predicted values, the verification points, and
the prediction interval at each point.
"""
plot_name = 'Predicted and Actual Responses'
xs = np.arange(1, len(pred) + 1)
plt.errorbar(
xs, mean, yerr=conf, capsize=4, fmt='ok', markersize=4, alpha=0.4,
label='predicted interval'
)
plt.errorbar(xs, pred, fmt='xb', markersize=7, label='predicted responses')
plt.errorbar(xs, act, fmt='xg', markersize=7, label='actual responses')
plt.title(f'{plot_name}')
plt.xlabel('count')
plt.ylabel('response')
plt.legend(loc="upper left")
plt.savefig(f'{graph_dir}/{plot_name}', dpi=600, bbox_inches='tight')
plt.clf()
| 32.715328 | 83 | 0.585676 | 554 | 4,482 | 4.599278 | 0.299639 | 0.040816 | 0.014129 | 0.021193 | 0.183673 | 0.161695 | 0.145212 | 0.090659 | 0.090659 | 0.042386 | 0 | 0.006252 | 0.321954 | 4,482 | 136 | 84 | 32.955882 | 0.832182 | 0.251673 | 0 | 0.118421 | 0 | 0 | 0.1573 | 0.03553 | 0 | 0 | 0 | 0 | 0 | 1 | 0.052632 | false | 0 | 0.092105 | 0 | 0.157895 | 0.026316 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
9288fc164e0faa00174f655789c095f5517d439e | 21,121 | py | Python | scripts/kidney.py | vickiwickinger/ThinkBayes2 | 2259f1e83dba9a959b2cc84c7b83318b53b3ee24 | [
"MIT"
] | 1,337 | 2015-01-06T06:23:55.000Z | 2022-03-31T21:06:21.000Z | scripts/kidney.py | vickiwickinger/ThinkBayes2 | 2259f1e83dba9a959b2cc84c7b83318b53b3ee24 | [
"MIT"
] | 43 | 2015-04-23T13:14:15.000Z | 2022-01-04T12:55:59.000Z | scripts/kidney.py | vickiwickinger/ThinkBayes2 | 2259f1e83dba9a959b2cc84c7b83318b53b3ee24 | [
"MIT"
] | 1,497 | 2015-01-13T22:05:32.000Z | 2022-03-30T09:19:53.000Z | """This file contains code for use with "Think Bayes",
by Allen B. Downey, available from greenteapress.com
Copyright 2012 Allen B. Downey
License: GNU GPLv3 http://www.gnu.org/licenses/gpl.html
"""
from __future__ import print_function, division
import math
import numpy
import random
import sys
import correlation
import thinkplot
import matplotlib.pyplot as pyplot
import thinkbayes2
INTERVAL = 245/365.0
FORMATS = ['pdf', 'eps']
MINSIZE = 0.2
MAXSIZE = 20
BUCKET_FACTOR = 10
def log2(x, denom=math.log(2)):
"""Computes log base 2."""
return math.log(x) / denom
def SimpleModel():
"""Runs calculations based on a simple model."""
# time between discharge and diagnosis, in days
interval = 3291.0
# doubling time in linear measure is doubling time in volume * 3
dt = 811.0 * 3
# number of doublings since discharge
doublings = interval / dt
# how big was the tumor at time of discharge (diameter in cm)
d1 = 15.5
d0 = d1 / 2.0 ** doublings
print(('interval (days)', interval))
print(('interval (years)', interval / 365))
print(('dt', dt))
print(('doublings', doublings))
print(('d1', d1))
print(('d0', d0))
# assume an initial linear measure of 0.1 cm
d0 = 0.1
d1 = 15.5
# how many doublings would it take to get from d0 to d1
doublings = log2(d1 / d0)
# what linear doubling time does that imply?
dt = interval / doublings
print(('doublings', doublings))
print(('dt', dt))
# compute the volumetric doubling time and RDT
vdt = dt / 3
rdt = 365 / vdt
print(('vdt', vdt))
print(('rdt', rdt))
cdf = MakeCdf()
p = cdf.Prob(rdt)
print(('Prob{RDT > 2.4}', 1-p))
def MakeCdf():
"""Uses the data from Zhang et al. to construct a CDF."""
n = 53.0
freqs = [0, 2, 31, 42, 48, 51, 52, 53]
ps = [freq/n for freq in freqs]
xs = numpy.arange(-1.5, 6.5, 1.0)
cdf = thinkbayes2.Cdf(xs, ps)
return cdf
def PlotCdf(cdf):
"""Plots the actual and fitted distributions.
cdf: CDF object
"""
xs, ps = cdf.xs, cdf.ps
cps = [1-p for p in ps]
# CCDF on logy scale: shows exponential behavior
thinkplot.Clf()
thinkplot.Plot(xs, cps, 'bo-')
thinkplot.Save(root='kidney1',
formats=FORMATS,
xlabel='RDT',
ylabel='CCDF (log scale)',
yscale='log')
# CDF, model and data
thinkplot.Clf()
thinkplot.PrePlot(num=2)
mxs, mys = ModelCdf()
thinkplot.Plot(mxs, mys, label='model', linestyle='dashed')
thinkplot.Plot(xs, ps, 'gs', label='data')
thinkplot.Save(root='kidney2',
formats=FORMATS,
xlabel='RDT (volume doublings per year)',
ylabel='CDF',
title='Distribution of RDT',
axis=[-2, 7, 0, 1],
loc=4)
def QQPlot(cdf, fit):
"""Makes a QQPlot of the values from actual and fitted distributions.
cdf: actual Cdf of RDT
fit: model
"""
xs = [-1.5, 5.5]
thinkplot.Clf()
thinkplot.Plot(xs, xs, 'b-')
xs, ps = cdf.xs, cdf.ps
fs = [fit.Value(p) for p in ps]
thinkplot.Plot(xs, fs, 'gs')
thinkplot.Save(root = 'kidney3',
formats=FORMATS,
xlabel='Actual',
ylabel='Model')
def FitCdf(cdf):
"""Fits a line to the log CCDF and returns the slope.
cdf: Cdf of RDT
"""
xs, ps = cdf.xs, cdf.ps
cps = [1-p for p in ps]
xs = xs[1:-1]
lcps = [math.log(p) for p in cps[1:-1]]
_inter, slope = correlation.LeastSquares(xs, lcps)
return -slope
def CorrelatedGenerator(cdf, rho):
"""Generates a sequence of values from cdf with correlation.
Generates a correlated standard Normal series, then transforms to
values from cdf
cdf: distribution to choose from
rho: target coefficient of correlation
"""
def Transform(x):
"""Maps from a Normal variate to a variate with the given CDF."""
p = thinkbayes2.NormalCdf(x)
y = cdf.Value(p)
return y
# for the first value, choose from a Normal and transform it
x = random.gauss(0, 1)
yield Transform(x)
# for subsequent values, choose from the conditional distribution
# based on the previous value
sigma = math.sqrt(1 - rho**2)
while True:
x = random.gauss(x * rho, sigma)
yield Transform(x)
def UncorrelatedGenerator(cdf, _rho=None):
"""Generates a sequence of values from cdf with no correlation.
Ignores rho, which is accepted as a parameter to provide the
same interface as CorrelatedGenerator
cdf: distribution to choose from
rho: ignored
"""
while True:
x = cdf.Random()
yield x
def RdtGenerator(cdf, rho):
"""Returns an iterator with n values from cdf and the given correlation.
cdf: Cdf object
rho: coefficient of correlation
"""
if rho == 0.0:
return UncorrelatedGenerator(cdf)
else:
return CorrelatedGenerator(cdf, rho)
def GenerateRdt(pc, lam1, lam2):
"""Generate an RDT from a mixture of exponential distributions.
With prob pc, generate a negative value with param lam2;
otherwise generate a positive value with param lam1.
"""
if random.random() < pc:
return -random.expovariate(lam2)
else:
return random.expovariate(lam1)
def GenerateSample(n, pc, lam1, lam2):
"""Generates a sample of RDTs.
n: sample size
pc: probablity of negative growth
lam1: exponential parameter of positive growth
lam2: exponential parameter of negative growth
Returns: list of random variates
"""
xs = [GenerateRdt(pc, lam1, lam2) for _ in range(n)]
return xs
def GenerateCdf(n=1000, pc=0.35, lam1=0.79, lam2=5.0):
"""Generates a sample of RDTs and returns its CDF.
n: sample size
pc: probablity of negative growth
lam1: exponential parameter of positive growth
lam2: exponential parameter of negative growth
Returns: Cdf of generated sample
"""
xs = GenerateSample(n, pc, lam1, lam2)
cdf = thinkbayes2.MakeCdfFromList(xs)
return cdf
def ModelCdf(pc=0.35, lam1=0.79, lam2=5.0):
"""
pc: probablity of negative growth
lam1: exponential parameter of positive growth
lam2: exponential parameter of negative growth
Returns: list of xs, list of ys
"""
cdf = thinkbayes2.EvalExponentialCdf
x1 = numpy.arange(-2, 0, 0.1)
y1 = [pc * (1 - cdf(-x, lam2)) for x in x1]
x2 = numpy.arange(0, 7, 0.1)
y2 = [pc + (1-pc) * cdf(x, lam1) for x in x2]
return list(x1) + list(x2), y1+y2
def BucketToCm(y, factor=BUCKET_FACTOR):
"""Computes the linear dimension for a given bucket.
t: bucket number
factor: multiplicitive factor from one bucket to the next
Returns: linear dimension in cm
"""
return math.exp(y / factor)
def CmToBucket(x, factor=BUCKET_FACTOR):
"""Computes the bucket for a given linear dimension.
x: linear dimension in cm
factor: multiplicitive factor from one bucket to the next
Returns: float bucket number
"""
return round(factor * math.log(x))
def Diameter(volume, factor=3/math.pi/4, exp=1/3.0):
"""Converts a volume to a diameter.
d = 2r = 2 * (3/4/pi V)^1/3
"""
return 2 * (factor * volume) ** exp
def Volume(diameter, factor=4*math.pi/3):
"""Converts a diameter to a volume.
V = 4/3 pi (d/2)^3
"""
return factor * (diameter/2.0)**3
class Cache(object):
"""Records each observation point for each tumor."""
def __init__(self):
"""Initializes the cache.
joint: map from (age, bucket) to frequency
sequences: map from bucket to a list of sequences
initial_rdt: sequence of (V0, rdt) pairs
"""
self.joint = thinkbayes2.Joint()
self.sequences = {}
self.initial_rdt = []
def GetBuckets(self):
"""Returns an iterator for the keys in the cache."""
return self.sequences.iterkeys()
def GetSequence(self, bucket):
"""Looks up a bucket in the cache."""
return self.sequences[bucket]
def ConditionalCdf(self, bucket, name=''):
"""Forms the cdf of ages for a given bucket.
bucket: int bucket number
name: string
"""
pmf = self.joint.Conditional(0, 1, bucket, name=name)
cdf = pmf.MakeCdf()
return cdf
def ProbOlder(self, cm, age):
"""Computes the probability of exceeding age, given size.
cm: size in cm
age: age in years
"""
bucket = CmToBucket(cm)
cdf = self.ConditionalCdf(bucket)
p = cdf.Prob(age)
return 1-p
def GetDistAgeSize(self, size_thresh=MAXSIZE):
"""Gets the joint distribution of age and size.
Map from (age, log size in cm) to log freq
Returns: new Pmf object
"""
joint = thinkbayes2.Joint()
for val, freq in self.joint.Items():
age, bucket = val
cm = BucketToCm(bucket)
if cm > size_thresh:
continue
log_cm = math.log10(cm)
joint.Set((age, log_cm), math.log(freq) * 10)
return joint
def Add(self, age, seq, rdt):
"""Adds this observation point to the cache.
age: age of the tumor in years
seq: sequence of volumes
rdt: RDT during this interval
"""
final = seq[-1]
cm = Diameter(final)
bucket = CmToBucket(cm)
self.joint.Incr((age, bucket))
self.sequences.setdefault(bucket, []).append(seq)
initial = seq[-2]
self.initial_rdt.append((initial, rdt))
def Print(self):
"""Prints the size (cm) for each bucket, and the number of sequences."""
for bucket in sorted(self.GetBuckets()):
ss = self.GetSequence(bucket)
diameter = BucketToCm(bucket)
print((diameter, len(ss)))
def Correlation(self):
"""Computes the correlation between log volumes and rdts."""
vs, rdts = zip(*self.initial_rdt)
lvs = [math.log(v) for v in vs]
return correlation.Corr(lvs, rdts)
class Calculator(object):
"""Encapsulates the state of the computation."""
def __init__(self):
"""Initializes the cache."""
self.cache = Cache()
def MakeSequences(self, n, rho, cdf):
"""Returns a list of sequences of volumes.
n: number of sequences to make
rho: serial correlation
cdf: Cdf of rdts
Returns: list of n sequences of volumes
"""
sequences = []
for i in range(n):
rdt_seq = RdtGenerator(cdf, rho)
seq = self.MakeSequence(rdt_seq)
sequences.append(seq)
if i % 100 == 0:
print(i)
return sequences
def MakeSequence(self, rdt_seq, v0=0.01, interval=INTERVAL,
vmax=Volume(MAXSIZE)):
"""Simulate the growth of a tumor.
rdt_seq: sequence of rdts
v0: initial volume in mL (cm^3)
interval: timestep in years
vmax: volume to stop at
Returns: sequence of volumes
"""
seq = v0,
age = 0
for rdt in rdt_seq:
age += interval
final, seq = self.ExtendSequence(age, seq, rdt, interval)
if final > vmax:
break
return seq
def ExtendSequence(self, age, seq, rdt, interval):
"""Generates a new random value and adds it to the end of seq.
Side-effect: adds sub-sequences to the cache.
age: age of tumor at the end of this interval
seq: sequence of values so far
rdt: reciprocal doubling time in doublings per year
interval: timestep in years
Returns: final volume, extended sequence
"""
initial = seq[-1]
doublings = rdt * interval
final = initial * 2**doublings
new_seq = seq + (final,)
self.cache.Add(age, new_seq, rdt)
return final, new_seq
def PlotBucket(self, bucket, color='blue'):
"""Plots the set of sequences for the given bucket.
bucket: int bucket number
color: string
"""
sequences = self.cache.GetSequence(bucket)
for seq in sequences:
n = len(seq)
age = n * INTERVAL
ts = numpy.linspace(-age, 0, n)
PlotSequence(ts, seq, color)
def PlotBuckets(self):
"""Plots the set of sequences that ended in a given bucket."""
# 2.01, 4.95 cm, 9.97 cm
buckets = [7.0, 16.0, 23.0]
buckets = [23.0]
colors = ['blue', 'green', 'red', 'cyan']
thinkplot.Clf()
for bucket, color in zip(buckets, colors):
self.PlotBucket(bucket, color)
thinkplot.Save(root='kidney5',
formats=FORMATS,
title='History of simulated tumors',
axis=[-40, 1, MINSIZE, 12],
xlabel='years',
ylabel='diameter (cm, log scale)',
yscale='log')
def PlotJointDist(self):
"""Makes a pcolor plot of the age-size joint distribution."""
thinkplot.Clf()
joint = self.cache.GetDistAgeSize()
thinkplot.Contour(joint, contour=False, pcolor=True)
thinkplot.Save(root='kidney8',
formats=FORMATS,
axis=[0, 41, -0.7, 1.31],
yticks=MakeLogTicks([0.2, 0.5, 1, 2, 5, 10, 20]),
xlabel='ages',
ylabel='diameter (cm, log scale)')
def PlotConditionalCdfs(self):
"""Plots the cdf of ages for each bucket."""
buckets = [7.0, 16.0, 23.0, 27.0]
# 2.01, 4.95 cm, 9.97 cm, 14.879 cm
names = ['2 cm', '5 cm', '10 cm', '15 cm']
cdfs = []
for bucket, name in zip(buckets, names):
cdf = self.cache.ConditionalCdf(bucket, name)
cdfs.append(cdf)
thinkplot.Clf()
thinkplot.PrePlot(num=len(cdfs))
thinkplot.Cdfs(cdfs)
thinkplot.Save(root='kidney6',
title='Distribution of age for several diameters',
formats=FORMATS,
xlabel='tumor age (years)',
ylabel='CDF',
loc=4)
def PlotCredibleIntervals(self, xscale='linear'):
"""Plots the confidence interval for each bucket."""
xs = []
ts = []
percentiles = [95, 75, 50, 25, 5]
min_size = 0.3
# loop through the buckets, accumulate
# xs: sequence of sizes in cm
# ts: sequence of percentile tuples
for _, bucket in enumerate(sorted(self.cache.GetBuckets())):
cm = BucketToCm(bucket)
if cm < min_size or cm > 20.0:
continue
xs.append(cm)
cdf = self.cache.ConditionalCdf(bucket)
ps = [cdf.Percentile(p) for p in percentiles]
ts.append(ps)
# dump the results into a table
fp = open('kidney_table.tex', 'w')
PrintTable(fp, xs, ts)
fp.close()
# make the figure
linewidths = [1, 2, 3, 2, 1]
alphas = [0.3, 0.5, 1, 0.5, 0.3]
labels = ['95th', '75th', '50th', '25th', '5th']
# transpose the ts so we have sequences for each percentile rank
thinkplot.Clf()
yys = zip(*ts)
for ys, linewidth, alpha, label in zip(yys, linewidths, alphas, labels):
options = dict(color='blue', linewidth=linewidth,
alpha=alpha, label=label, markersize=2)
# plot the data points
thinkplot.Plot(xs, ys, 'bo', **options)
# plot the fit lines
fxs = [min_size, 20.0]
fys = FitLine(xs, ys, fxs)
thinkplot.Plot(fxs, fys, **options)
# put a label at the end of each line
x, y = fxs[-1], fys[-1]
pyplot.text(x*1.05, y, label, color='blue',
horizontalalignment='left',
verticalalignment='center')
# make the figure
thinkplot.Save(root='kidney7',
formats=FORMATS,
title='Credible interval for age vs diameter',
xlabel='diameter (cm, log scale)',
ylabel='tumor age (years)',
xscale=xscale,
xticks=MakeTicks([0.5, 1, 2, 5, 10, 20]),
axis=[0.25, 35, 0, 45],
legend=False,
)
def PlotSequences(sequences):
"""Plots linear measurement vs time.
sequences: list of sequences of volumes
"""
thinkplot.Clf()
options = dict(color='gray', linewidth=1, linestyle='dashed')
thinkplot.Plot([0, 40], [10, 10], **options)
for seq in sequences:
n = len(seq)
age = n * INTERVAL
ts = numpy.linspace(0, age, n)
PlotSequence(ts, seq)
thinkplot.Save(root='kidney4',
formats=FORMATS,
axis=[0, 40, MINSIZE, 20],
title='Simulations of tumor growth',
xlabel='tumor age (years)',
yticks=MakeTicks([0.2, 0.5, 1, 2, 5, 10, 20]),
ylabel='diameter (cm, log scale)',
yscale='log')
def PlotSequence(ts, seq, color='blue'):
"""Plots a time series of linear measurements.
ts: sequence of times in years
seq: sequence of columes
color: color string
"""
options = dict(color=color, linewidth=1, alpha=0.2)
xs = [Diameter(v) for v in seq]
thinkplot.Plot(ts, xs, **options)
def PrintCI(fp, cm, ps):
"""Writes a line in the LaTeX table.
fp: file pointer
cm: diameter in cm
ts: tuples of percentiles
"""
fp.write('%0.1f' % round(cm, 1))
for p in reversed(ps):
fp.write(' & %0.1f ' % round(p, 1))
fp.write(r'\\' '\n')
def PrintTable(fp, xs, ts):
"""Writes the data in a LaTeX table.
fp: file pointer
xs: diameters in cm
ts: sequence of tuples of percentiles
"""
fp.write(r'\begin{tabular}{|r||r|r|r|r|r|}' '\n')
fp.write(r'\hline' '\n')
fp.write(r'Diameter & \multicolumn{5}{c|}{Percentiles of age} \\' '\n')
fp.write(r'(cm) & 5th & 25th & 50th & 75th & 95th \\' '\n')
fp.write(r'\hline' '\n')
for i, (cm, ps) in enumerate(zip(xs, ts)):
#print cm, ps
if i % 3 == 0:
PrintCI(fp, cm, ps)
fp.write(r'\hline' '\n')
fp.write(r'\end{tabular}' '\n')
def FitLine(xs, ys, fxs):
"""Fits a line to the xs and ys, and returns fitted values for fxs.
Applies a log transform to the xs.
xs: diameter in cm
ys: age in years
fxs: diameter in cm
"""
lxs = [math.log(x) for x in xs]
inter, slope = correlation.LeastSquares(lxs, ys)
# res = correlation.Residuals(lxs, ys, inter, slope)
# r2 = correlation.CoefDetermination(ys, res)
lfxs = [math.log(x) for x in fxs]
fys = [inter + slope * x for x in lfxs]
return fys
def MakeTicks(xs):
"""Makes a pair of sequences for use as pyplot ticks.
xs: sequence of floats
Returns (xs, labels), where labels is a sequence of strings.
"""
labels = [str(x) for x in xs]
return xs, labels
def MakeLogTicks(xs):
"""Makes a pair of sequences for use as pyplot ticks.
xs: sequence of floats
Returns (xs, labels), where labels is a sequence of strings.
"""
lxs = [math.log10(x) for x in xs]
labels = [str(x) for x in xs]
return lxs, labels
def TestCorrelation(cdf):
"""Tests the correlated generator.
Makes sure that the sequence has the right distribution and correlation.
"""
n = 10000
rho = 0.4
rdt_seq = CorrelatedGenerator(cdf, rho)
xs = [rdt_seq.next() for _ in range(n)]
rho2 = correlation.SerialCorr(xs)
print((rho, rho2))
cdf2 = thinkbayes2.MakeCdfFromList(xs)
thinkplot.Cdfs([cdf, cdf2])
thinkplot.Show()
def main(script):
for size in [1, 5, 10]:
bucket = CmToBucket(size)
print(('Size, bucket', size, bucket))
SimpleModel()
random.seed(17)
cdf = MakeCdf()
lam1 = FitCdf(cdf)
fit = GenerateCdf(lam1=lam1)
# TestCorrelation(fit)
PlotCdf(cdf)
# QQPlot(cdf, fit)
calc = Calculator()
rho = 0.0
sequences = calc.MakeSequences(100, rho, fit)
PlotSequences(sequences)
calc.PlotBuckets()
_ = calc.MakeSequences(1900, rho, fit)
print(('V0-RDT correlation', calc.cache.Correlation()))
print(('15.5 Probability age > 8 year', calc.cache.ProbOlder(15.5, 8)))
print(('6.0 Probability age > 8 year', calc.cache.ProbOlder(6.0, 8)))
calc.PlotConditionalCdfs()
calc.PlotCredibleIntervals(xscale='log')
calc.PlotJointDist()
if __name__ == '__main__':
main(*sys.argv)
| 27.008951 | 80 | 0.573884 | 2,767 | 21,121 | 4.361764 | 0.193712 | 0.013257 | 0.011269 | 0.00348 | 0.203745 | 0.134891 | 0.107051 | 0.097688 | 0.075814 | 0.071008 | 0 | 0.032347 | 0.310591 | 21,121 | 781 | 81 | 27.043534 | 0.796511 | 0.293736 | 0 | 0.17801 | 0 | 0 | 0.067987 | 0.004432 | 0 | 0 | 0 | 0 | 0 | 1 | 0.117801 | false | 0 | 0.02356 | 0 | 0.217277 | 0.049738 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
928cdb12cb361d5fcb3ef68b59a2d730df5d6777 | 1,072 | py | Python | parser.py | wongalvis/WiktionaryCrawler | 4a61e5f28d4bca591b10f9bcc2b3c0d29138fc15 | [
"Unlicense"
] | null | null | null | parser.py | wongalvis/WiktionaryCrawler | 4a61e5f28d4bca591b10f9bcc2b3c0d29138fc15 | [
"Unlicense"
] | null | null | null | parser.py | wongalvis/WiktionaryCrawler | 4a61e5f28d4bca591b10f9bcc2b3c0d29138fc15 | [
"Unlicense"
] | 1 | 2019-03-28T00:25:46.000Z | 2019-03-28T00:25:46.000Z | import config, lang
import pb
import urllib2
import urlnorm
import os
import time
def parse(pages):
spelings = []
speling_dirpath = "data/speling/%s/%s/" % (config.wiki_lang, config.start_cat)
pages_dirpath = "data/pages/%s/%s/" % (config.wiki_lang, config.start_cat)
counter = 0
for page in pages:
counter += 1
pb.update(counter, len(pages))
if os.path.exists(speling_dirpath + page + ".txt"):
f = open(speling_dirpath + page + ".txt", 'r')
speling_list = f.read().strip("\n").split("\n")
f.close()
spelings.extend(speling_list)
continue
speling_list = parse_page(page)
f = open(speling_dirpath + page + ".txt", 'w')
for speling in speling_list:
f.write(speling + "\n")
f.close()
spelings.extend(speling_list)
spelings = [speling for speling in spelings if not speling == ""]
return spelings
def parse_page(page):
dirpath = "data/pages/%s/%s/" % (config.wiki_lang, config.start_cat)
f = open(dirpath + page + ".html", 'r')
htmldoc = f.read()
f.close()
speling_list = lang.parse(page, htmldoc)
return speling_list | 23.304348 | 79 | 0.677239 | 157 | 1,072 | 4.496815 | 0.305732 | 0.109065 | 0.033994 | 0.050992 | 0.33711 | 0.33711 | 0.263456 | 0.172805 | 0.130312 | 0.130312 | 0 | 0.003363 | 0.16791 | 1,072 | 46 | 80 | 23.304348 | 0.788117 | 0 | 0 | 0.142857 | 0 | 0 | 0.073625 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.057143 | false | 0 | 0.171429 | 0 | 0.285714 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
928fe78301baaab64810fda9828eb05526f5d381 | 416 | py | Python | utils/hmdb_label.py | raozhongyu/3D-ConvNets-for-Action-Recognition | 8ede339ad4988bfee78a419022cff9f719d2af71 | [
"MIT"
] | 46 | 2018-08-25T07:45:07.000Z | 2022-01-27T04:06:38.000Z | utils/hmdb_label.py | raozhongyu/3D-ConvNets-for-Action-Recognition | 8ede339ad4988bfee78a419022cff9f719d2af71 | [
"MIT"
] | 1 | 2018-10-10T12:59:43.000Z | 2018-10-10T12:59:43.000Z | utils/hmdb_label.py | raozhongyu/3D-ConvNets-for-Action-Recognition | 8ede339ad4988bfee78a419022cff9f719d2af71 | [
"MIT"
] | 19 | 2018-10-10T12:58:44.000Z | 2021-04-25T16:12:08.000Z | import os
img_path = '/home/deep/datasets/hmdb/'
clip_length = 16
f = open('ucfTrainTestlist/hmdb.txt', 'w')
actions = os.listdir(img_path)
actions.sort(key=str.lower)
label = 0
for action in actions:
print(action)
samples = os.listdir(img_path + action)
samples.sort(key=str.lower)
for sample in samples:
f.write(action + '/' + sample + ' ' + str(label) + '\n')
label += 1
f.close()
| 20.8 | 64 | 0.644231 | 61 | 416 | 4.327869 | 0.557377 | 0.079545 | 0.090909 | 0.121212 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.011976 | 0.197115 | 416 | 19 | 65 | 21.894737 | 0.778443 | 0 | 0 | 0 | 0 | 0 | 0.132212 | 0.120192 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.066667 | 0 | 0.066667 | 0.066667 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
9295f185bea3693f60585923f865715fd849e6ea | 1,232 | py | Python | transl-service-sls/test_handler.py | viktormishyn/transl-service-aws | 2bae073aded82f316123eefd845aa1a1e0a69b5d | [
"MIT"
] | null | null | null | transl-service-sls/test_handler.py | viktormishyn/transl-service-aws | 2bae073aded82f316123eefd845aa1a1e0a69b5d | [
"MIT"
] | null | null | null | transl-service-sls/test_handler.py | viktormishyn/transl-service-aws | 2bae073aded82f316123eefd845aa1a1e0a69b5d | [
"MIT"
] | null | null | null | import json
import pytest
from handler import detect_language, translate_message, handler
@pytest.mark.skip
@pytest.mark.parametrize("test_input, expected", [
('This text is written in English', 'en'),
('Этот текст написан mostly по-русски', 'ru'),
('Ten tekst jest napisany w jezyku polskim, ale bez znakow diakrytycznych', 'pl'),
('Текст написаний українською', 'uk')
])
def test_detect_language(test_input, expected):
assert detect_language(test_input) == expected
@pytest.mark.skip
@pytest.mark.parametrize("test_input, source_lang, target_lang, expected", [
('day', 'en', 'ru', 'день'),
('słońce', 'pl', 'en', 'sun')
])
def test_translate_message(test_input, source_lang, target_lang, expected):
assert translate_message(test_input, source_lang, target_lang) == expected
@pytest.mark.skip
@pytest.mark.parametrize("event, expected", [
({'body': {'message': 'day', 'target_language': 'ru'}}, 'день'),
({'body': {'message': 'słońce', 'target_language': 'en'}}, 'sun'),
])
def test_handler(event, expected):
response = handler(event, 'context')
assert response['statusCode'] == 200
result = json.loads(response['body'])
assert result['translated_message'] == expected
| 34.222222 | 86 | 0.693182 | 151 | 1,232 | 5.496689 | 0.410596 | 0.072289 | 0.050602 | 0.072289 | 0.384337 | 0.319277 | 0.319277 | 0.233735 | 0.127711 | 0 | 0 | 0.002836 | 0.141234 | 1,232 | 35 | 87 | 35.2 | 0.781664 | 0 | 0 | 0.206897 | 0 | 0 | 0.314935 | 0 | 0 | 0 | 0 | 0 | 0.137931 | 1 | 0.103448 | false | 0 | 0.103448 | 0 | 0.206897 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
9297372fa443955e53e67f87e3ad4b6f573eebaa | 715 | py | Python | ag/orbit/node/config/interface.py | AlphaGriffin/orbit-node | 6e330a2734a6a5dfbb52d984fe0b2f8dff4755cd | [
"MIT"
] | null | null | null | ag/orbit/node/config/interface.py | AlphaGriffin/orbit-node | 6e330a2734a6a5dfbb52d984fe0b2f8dff4755cd | [
"MIT"
] | null | null | null | ag/orbit/node/config/interface.py | AlphaGriffin/orbit-node | 6e330a2734a6a5dfbb52d984fe0b2f8dff4755cd | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
#
# Copyright (C) 2018 Alpha Griffin
# @%@~LICENSE~@%@
from ag.orbit.command import main
from ag.orbit.node.config import get_webapi_interface, set_webapi_interface
def run(args):
if args and len(args) != 1:
raise ValueError("Expecting exactly 1 argument")
if args:
ip = args[0]
print()
print(" Setting web API bind interface IP to: {}".format(ip))
webapi = set_webapi_interface(ip)
print()
print("Web API bind IP saved to: {}".format(webapi))
else:
ip = get_webapi_interface()
print()
print(" Bind interface IP for web API: {}".format(ip))
if __name__ == '__main__':
main(run)
| 20.428571 | 75 | 0.606993 | 94 | 715 | 4.446809 | 0.5 | 0.143541 | 0.052632 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.015267 | 0.267133 | 715 | 34 | 76 | 21.029412 | 0.782443 | 0.097902 | 0 | 0.166667 | 0 | 0 | 0.226563 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.055556 | false | 0 | 0.111111 | 0 | 0.166667 | 0.333333 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
929893933759cc9c2a16174a003428aa9861cd4f | 1,290 | py | Python | src/main/python/ch03/sec03/demo_corpus_loader.py | zhuyuanxiang/Hanlp-Books-Examples | 17aff14fa22ec6bc422ffa53497f98c22f9c7efe | [
"MIT"
] | 4 | 2021-01-04T07:04:32.000Z | 2021-07-25T12:25:58.000Z | src/main/python/ch03/sec03/demo_corpus_loader.py | zhuyuanxiang/Hanlp-Books-Examples | 17aff14fa22ec6bc422ffa53497f98c22f9c7efe | [
"MIT"
] | null | null | null | src/main/python/ch03/sec03/demo_corpus_loader.py | zhuyuanxiang/Hanlp-Books-Examples | 17aff14fa22ec6bc422ffa53497f98c22f9c7efe | [
"MIT"
] | null | null | null | # -*- encoding: utf-8 -*-
"""
@Author : zYx.Tom
@Contact : 526614962@qq.com
@site : https://zhuyuanxiang.github.io
---------------------------
@Software : PyCharm
@Project : Hanlp-Books-Examples
@File : demo_corpus_loader.py
@Version : v0.1
@Time : 2020-12-21 16:27
@License : (C)Copyright 2018-2020, zYx.Tom
@Reference :
@Desc : 手工加载语料库
@理解:
"""
import os
from preamble import CorpusLoader
from tools import beep_end, test_data_path, show_title
def my_cws_corpus():
corpus_path = test_data_path() + 'my_cws_corpus.txt'
if not os.path.isfile(corpus_path):
with open(corpus_path, 'w', encoding='utf-8') as out:
out.write("商品 和 服务\n" + "商品 和服 物美价廉\n" + "服务 和 货币")
return corpus_path
def load_cws_corpus(corpus_path):
return CorpusLoader.convert2SentenceList(corpus_path)
# ----------------------------------------------------------------------
def main():
corpus_path = my_cws_corpus()
sents = load_cws_corpus(corpus_path)
for sent in sents:
show_title("")
print("sent: ", sent)
print("word: ", end='')
for word in sent:
print(word, end=', ')
print()
pass
if __name__ == '__main__':
main()
beep_end()
| 24.807692 | 72 | 0.55969 | 159 | 1,290 | 4.314465 | 0.553459 | 0.116618 | 0.048105 | 0.08309 | 0.067055 | 0 | 0 | 0 | 0 | 0 | 0 | 0.034908 | 0.244961 | 1,290 | 51 | 73 | 25.294118 | 0.669405 | 0.370543 | 0 | 0 | 0 | 0 | 0.090909 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.12 | false | 0.04 | 0.12 | 0.04 | 0.32 | 0.16 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
929a40a2c2266e63e865c5849f48f7efa2fab62e | 1,895 | py | Python | saged/get_tissue_data.py | ben-heil/saged | a2c039f00bfc97011c2ee1343c39af42e6f2e1e6 | [
"BSD-3-Clause"
] | 3 | 2020-07-24T01:06:04.000Z | 2021-12-20T14:35:21.000Z | saged/get_tissue_data.py | ben-heil/saged | a2c039f00bfc97011c2ee1343c39af42e6f2e1e6 | [
"BSD-3-Clause"
] | 39 | 2020-06-26T12:54:35.000Z | 2022-03-23T14:16:34.000Z | saged/get_tissue_data.py | ben-heil/saged | a2c039f00bfc97011c2ee1343c39af42e6f2e1e6 | [
"BSD-3-Clause"
] | 1 | 2020-05-29T14:24:16.000Z | 2020-05-29T14:24:16.000Z | """
Create a label file and a pickled subset file of the recount3 dataset containing only samples
with manually annotated tissue labels
"""
import argparse
import pickle
from typing import Dict
import pandas as pd
def map_samples_to_labels(file_path: str) -> Dict[str, str]:
metadata_df = pd.read_csv(file_path, delimiter='\t')
columns_to_keep = ['external_id', 'recount_pred.curated.tissue']
samples_and_labels = metadata_df.drop(metadata_df.columns.difference(columns_to_keep), 1)
samples_and_labels = samples_and_labels.dropna(axis='rows')
samples_and_labels = samples_and_labels.set_index('external_id')
sample_to_label = samples_and_labels.to_dict()['recount_pred.curated.tissue']
return sample_to_label
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('count_file', help='The pickled file containing the tpm matrix generated '
'by pickle_tsv.py')
parser.add_argument('metadata_file', help='The file with info mapping samples to studies')
parser.add_argument('subset_out', help='The path to save the labeled data to')
parser.add_argument('label_out', help='The path to save the sample to label mappings to')
args = parser.parse_args()
sample_to_label = map_samples_to_labels(args.metadata_file)
with open(args.label_out, 'wb') as out_file:
pickle.dump(sample_to_label, out_file)
expression_df = None
with open(args.count_file, 'rb') as count_file:
expression_df = pickle.load(count_file)
samples_to_keep = list(sample_to_label.keys())
samples_in_tpm = set(expression_df.index)
samples_to_keep = [s for s in samples_to_keep if s in samples_in_tpm]
expression_df = expression_df.loc[samples_to_keep, :]
with open(args.subset_out, 'wb') as out_file:
pickle.dump(expression_df, out_file)
| 34.454545 | 98 | 0.729815 | 281 | 1,895 | 4.601423 | 0.33452 | 0.048724 | 0.074246 | 0.027842 | 0.122196 | 0.122196 | 0.072699 | 0 | 0 | 0 | 0 | 0.001284 | 0.177836 | 1,895 | 54 | 99 | 35.092593 | 0.828626 | 0.069129 | 0 | 0 | 0 | 0 | 0.191344 | 0.030752 | 0 | 0 | 0 | 0 | 0 | 1 | 0.03125 | false | 0 | 0.125 | 0 | 0.1875 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
929ea315775a00593ed7a9f890d334c617507497 | 4,136 | py | Python | tests/broker/test_del_parameter.py | ned21/aquilon | 6562ea0f224cda33b72a6f7664f48d65f96bd41a | [
"Apache-2.0"
] | 7 | 2015-07-31T05:57:30.000Z | 2021-09-07T15:18:56.000Z | tests/broker/test_del_parameter.py | ned21/aquilon | 6562ea0f224cda33b72a6f7664f48d65f96bd41a | [
"Apache-2.0"
] | 115 | 2015-03-03T13:11:46.000Z | 2021-09-20T12:42:24.000Z | tests/broker/test_del_parameter.py | ned21/aquilon | 6562ea0f224cda33b72a6f7664f48d65f96bd41a | [
"Apache-2.0"
] | 13 | 2015-03-03T11:17:59.000Z | 2021-09-09T09:16:41.000Z | #!/usr/bin/env python
# -*- cpy-indent-level: 4; indent-tabs-mode: nil -*-
# ex: set expandtab softtabstop=4 shiftwidth=4:
#
# Copyright (C) 2016 Contributor
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Module for testing the del parameter command."""
import unittest
if __name__ == "__main__":
from broker import utils
utils.import_depends()
from broker.brokertest import TestBrokerCommand
class TestDelParameter(TestBrokerCommand):
def test_100_del_testrequired(self):
self.check_plenary_exists("aquilon", "personality", "utpers-dev+next",
"foo")
self.noouttest(["del_parameter", "--personality", "utpers-dev",
"--archetype", "aquilon", "--path", "foo/testrequired"])
self.check_plenary_gone("aquilon", "personality", "utpers-dev+next",
"foo")
def test_110_del_single_action(self):
self.noouttest(["del_parameter", "--personality", "utpers-dev",
"--archetype", "aquilon", "--path", "actions/testaction2"])
command = ["show_parameter", "--personality", "utpers-dev",
"--archetype", "aquilon", "--personality_stage", "next"]
out = self.commandtest(command)
self.matchoutput(out, "testaction", command)
self.matchclean(out, "testaction2", command)
command = ["cat", "--personality", "utpers-dev",
"--archetype", "aquilon", "--personality_stage", "next",
"--param_tmpl", "actions"]
out = self.commandtest(command)
self.matchoutput(out, "testaction", command)
self.matchclean(out, "testaction2", command)
def test_115_del_actions(self):
self.check_plenary_exists("aquilon", "personality", "utpers-dev+next", "actions")
self.noouttest(["del_parameter", "--personality", "utpers-dev",
"--archetype", "aquilon", "--path", "actions"])
command = ["show_parameter", "--personality", "utpers-dev",
"--archetype", "aquilon", "--personality_stage", "next"]
out = self.commandtest(command)
self.matchclean(out, "testaction", command)
self.matchclean(out, "actions", command)
command = ["cat", "--personality", "utpers-dev",
"--archetype", "aquilon", "--personality_stage", "next",
"--param_tmpl", "actions"]
out = self.notfoundtest(command)
self.matchoutput(out, "No parameters found for template actions.",
command)
self.check_plenary_gone("aquilon", "personality", "utpers-dev+next",
"actions")
def test_200_del_bad_path(self):
command = ["del_parameter", "--personality", "utpers-dev",
"--archetype", "aquilon", "--path", "bad-path"]
err = self.badrequesttest(command)
self.matchoutput(err,
"Unknown parameter template bad-path.",
command)
def test_200_del_unknown_path(self):
command = ["del_parameter", "--personality", "utpers-dev",
"--archetype", "aquilon", "--path", "foo/no-such-path"]
err = self.notfoundtest(command)
self.matchoutput(err,
"Path foo/no-such-path does not match any parameter "
"definitions of archetype aquilon.",
command)
if __name__ == '__main__':
suite = unittest.TestLoader().loadTestsFromTestCase(TestDelParameter)
unittest.TextTestRunner(verbosity=2).run(suite)
| 42.204082 | 89 | 0.60324 | 421 | 4,136 | 5.793349 | 0.35867 | 0.090611 | 0.106601 | 0.107011 | 0.520705 | 0.480115 | 0.456745 | 0.456745 | 0.456745 | 0.412464 | 0 | 0.009772 | 0.257737 | 4,136 | 97 | 90 | 42.639175 | 0.784691 | 0.173356 | 0 | 0.5 | 0 | 0 | 0.318516 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.080645 | false | 0 | 0.064516 | 0 | 0.16129 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
929f30d54fb67d713942739135dbf52d301c492c | 1,210 | py | Python | tests/json_type_content_validator_tests.py | vklap/flask-json-content-type-validator | 9c6e596b1e47dc1fd4fe0ab738abbc7a4a8ab206 | [
"MIT"
] | 2 | 2019-11-15T17:59:36.000Z | 2020-06-08T04:42:36.000Z | tests/json_type_content_validator_tests.py | vklap/flask-json-content-type-validator | 9c6e596b1e47dc1fd4fe0ab738abbc7a4a8ab206 | [
"MIT"
] | null | null | null | tests/json_type_content_validator_tests.py | vklap/flask-json-content-type-validator | 9c6e596b1e47dc1fd4fe0ab738abbc7a4a8ab206 | [
"MIT"
] | null | null | null | import unittest
import json
from tests.dummy_flask_app import app
class JsonTypeContentValidatorTests(unittest.TestCase):
def setUp(self):
app.testing = True
self.app = app.test_client()
def test_dummy_resource_should_return_data_given_json_content_header(self):
# given
url = '/echo-resource'
data = dict(key='value')
# when
result = self.app.get(
url, data=json.dumps(data), headers={'Content-Type': 'application/json'})
# then
self.assertEqual(result.status_code, 200)
result_data = json.loads(result.data)
self.assertDictEqual(data, result_data)
def test_dummy_resource_should_return_400_given_no_json_content_header(self):
# given
url = '/echo-resource'
data = dict(key='value')
# when
result = self.app.get(url, data=json.dumps(data))
# then
self.assertEqual(result.status_code, 400)
result_data = json.loads(result.data)
expected_result = dict(
error_code=1000,
message='Missing Content-Type header application/json'
)
self.assertDictEqual(result_data, expected_result)
| 29.512195 | 85 | 0.643802 | 142 | 1,210 | 5.267606 | 0.359155 | 0.080214 | 0.032086 | 0.053476 | 0.516043 | 0.516043 | 0.259358 | 0.259358 | 0.259358 | 0.259358 | 0 | 0.014493 | 0.258678 | 1,210 | 40 | 86 | 30.25 | 0.819398 | 0.02562 | 0 | 0.230769 | 0 | 0 | 0.093857 | 0 | 0 | 0 | 0 | 0 | 0.153846 | 1 | 0.115385 | false | 0 | 0.115385 | 0 | 0.269231 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
929fca46bf26c17e3a094d83f8ff90c1a0c57add | 582 | py | Python | webware/MiddleKit/WebBrowser/BrowseObject.py | PeaceWorksTechnologySolutions/w4py3-middlekit | a9554e20c47010e7b0c0deee63e1786482c59a1c | [
"MIT"
] | 2 | 2020-10-31T09:12:58.000Z | 2021-02-20T13:52:14.000Z | webware/MiddleKit/WebBrowser/BrowseObject.py | WebwareForPython/w4py3-middlekit | f740e2d2d3a5c225d6b8f9eb27ac08f8deed47e6 | [
"MIT"
] | 2 | 2020-01-07T15:24:09.000Z | 2020-01-08T15:39:57.000Z | webware/MiddleKit/WebBrowser/BrowseObject.py | PeaceWorksTechnologySolutions/w4py3-middlekit | a9554e20c47010e7b0c0deee63e1786482c59a1c | [
"MIT"
] | 1 | 2021-09-27T21:04:18.000Z | 2021-09-27T21:04:18.000Z | from .StorePage import StorePage
class BrowseObject(StorePage):
def writeContent(self):
req = self.request()
className = req.field('class')
serialNum = int(req.field('serialNum'))
obj = self.store().fetchObject(className, serialNum, None)
if obj is None:
self.writeln('<p>No object in store for %s.%i.</p>'
% (className, serialNum))
else:
wr = self.writeln
wr('<table>')
wr(obj.klass().htHeadingsRow())
wr(obj.htAttrsRow())
wr('</table>')
| 29.1 | 66 | 0.539519 | 61 | 582 | 5.147541 | 0.557377 | 0.050955 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.319588 | 582 | 19 | 67 | 30.631579 | 0.792929 | 0 | 0 | 0 | 0 | 0 | 0.111684 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.0625 | false | 0 | 0.0625 | 0 | 0.1875 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
92a139101f7375dea0fe1c410b3894330370bd08 | 2,867 | py | Python | scripts/generate-new-list.py | shiaki/sforzando | 24aa5c49693fe783336cf41847b1b361e709d086 | [
"MIT"
] | null | null | null | scripts/generate-new-list.py | shiaki/sforzando | 24aa5c49693fe783336cf41847b1b361e709d086 | [
"MIT"
] | null | null | null | scripts/generate-new-list.py | shiaki/sforzando | 24aa5c49693fe783336cf41847b1b361e709d086 | [
"MIT"
] | null | null | null | #!/usr/bin/python
'''
Read results from visual inspection, create new target lists.
'''
import os, sys
import json
import glob, shutil
from collections import OrderedDict
if __name__ == '__main__':
# read candidate events.
with open('candidate-events.json', 'r') as fp:
cand_events = json.load(fp, object_pairs_hook=OrderedDict)
# read results of visual inspection
with open('./visual-inspection.json', 'r') as f:
vis_insp = json.load(f, object_pairs_hook=OrderedDict)
'''
Visual inspection flags:
c: potential close-by host object
y: host object visible
n: host object invisible
q: poor image quality
f: flag for interesting cases
'''
case_absent, case_visible, case_ambiguous = list(), list(), list()
# for events in the list, get their inspection results.
for cand_i, cand_info_i in cand_events.items():
# skip events w/o vis inspection
if cand_i not in vis_insp:
continue
# get results.
result_i = ''.join([v for k, v in vis_insp[cand_i].items()])
is_visible = ('y' in result_i)
is_absent = ('n' in result_i)
# case 1: visible
if is_visible and (not is_absent):
case_visible.append(cand_i)
continue
# case 2: absent
if (not is_visible) and is_absent:
case_absent.append(cand_i)
continue
# case 3: intermediate, cannot tell.
case_ambiguous.append(cand_i)
# print three lists of events.
fmtstr_event = '{:24} {:32} {:18} {:18} {:16}'
fmtstr_visinsp = '{:8} {:8} {:8} {:12} {:8}'
img_srcs = ['SDSS', 'ps1', 'DECaLS', 'MzLS/BASS', 'DES']
for cand_list in (case_visible, case_absent, case_ambiguous):
print('\n\n\n\n\n')
print(fmtstr_event.format('Name', 'Type', 'RA', 'Dec', 'Z'), \
fmtstr_visinsp.format(*img_srcs))
for cand_i in cand_list:
cand_info_i = cand_events[cand_i]
cand_str = fmtstr_event.format(
cand_i,
cand_info_i['type'],
cand_info_i['ra'],
cand_info_i['dec'],
cand_info_i['redshift'],
)
vinsp_info_i = vis_insp[cand_i]
insp_repr_i = list()
for imsrc_i in img_srcs:
if imsrc_i not in vinsp_info_i:
insp_repr_i.append('N/A')
continue
if 'y' in vinsp_info_i[imsrc_i]:
insp_repr_i.append('Y')
continue
if 'n' in vinsp_info_i[imsrc_i]:
insp_repr_i.append('N')
continue
insp_repr_i.append('?')
vinsp_str = fmtstr_visinsp.format(*insp_repr_i)
print(cand_str, vinsp_str)
| 31.855556 | 70 | 0.560516 | 376 | 2,867 | 4.013298 | 0.308511 | 0.033135 | 0.035785 | 0.026508 | 0.104705 | 0.055666 | 0.043738 | 0.043738 | 0.043738 | 0.043738 | 0 | 0.010417 | 0.33031 | 2,867 | 89 | 71 | 32.213483 | 0.775521 | 0.114405 | 0 | 0.111111 | 0 | 0 | 0.080295 | 0.019531 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.074074 | 0 | 0.074074 | 0.055556 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
92a1643415ce525953e9608207d138948ae44c6e | 736 | py | Python | setup.py | movermeyer/rexlex | 6c451a3b7e9134cbdf895a7ec5682e480480ef1a | [
"BSD-3-Clause"
] | null | null | null | setup.py | movermeyer/rexlex | 6c451a3b7e9134cbdf895a7ec5682e480480ef1a | [
"BSD-3-Clause"
] | null | null | null | setup.py | movermeyer/rexlex | 6c451a3b7e9134cbdf895a7ec5682e480480ef1a | [
"BSD-3-Clause"
] | 1 | 2018-03-05T00:40:04.000Z | 2018-03-05T00:40:04.000Z | #!/usr/bin/env python
from setuptools import find_packages, setup
long_description = """Basic regular expression lexer implementation.
"""
appname = "rexlex"
version = "0.02"
setup(**{
"name": appname,
"version": version,
"packages": [
'tater',
],
"author": "Thom Neale",
"packages": find_packages(exclude=['tests*']),
"package_data": {
'rexlex.lexer': ['*.py'],
'rexlex.scanner': ['*.py'],
},
"author_email": "twneale@gmail.com",
"long_description": long_description,
"description": 'Basic regular expression lexer implementation.',
"license": "MIT",
"url": "http://twneale.github.com/rexlex/",
"platforms": ['any'],
"scripts": [
]
})
| 23 | 68 | 0.588315 | 71 | 736 | 6 | 0.633803 | 0.105634 | 0.107981 | 0.15493 | 0.244131 | 0.244131 | 0 | 0 | 0 | 0 | 0 | 0.005226 | 0.220109 | 736 | 31 | 69 | 23.741935 | 0.736934 | 0.027174 | 0 | 0 | 0 | 0 | 0.453147 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.038462 | 0 | 0.038462 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
92a2d943773633b0c8d2368e44bf92c1be473d13 | 2,776 | py | Python | alipay/aop/api/domain/EduWorkAddress.py | snowxmas/alipay-sdk-python-all | 96870ced60facd96c5bce18d19371720cbda3317 | [
"Apache-2.0"
] | 213 | 2018-08-27T16:49:32.000Z | 2021-12-29T04:34:12.000Z | alipay/aop/api/domain/EduWorkAddress.py | snowxmas/alipay-sdk-python-all | 96870ced60facd96c5bce18d19371720cbda3317 | [
"Apache-2.0"
] | 29 | 2018-09-29T06:43:00.000Z | 2021-09-02T03:27:32.000Z | alipay/aop/api/domain/EduWorkAddress.py | snowxmas/alipay-sdk-python-all | 96870ced60facd96c5bce18d19371720cbda3317 | [
"Apache-2.0"
] | 59 | 2018-08-27T16:59:26.000Z | 2022-03-25T10:08:15.000Z | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from alipay.aop.api.constant.ParamConstants import *
class EduWorkAddress(object):
def __init__(self):
self._address = None
self._city = None
self._district_name = None
self._province = None
self._street_name = None
@property
def address(self):
return self._address
@address.setter
def address(self, value):
self._address = value
@property
def city(self):
return self._city
@city.setter
def city(self, value):
self._city = value
@property
def district_name(self):
return self._district_name
@district_name.setter
def district_name(self, value):
self._district_name = value
@property
def province(self):
return self._province
@province.setter
def province(self, value):
self._province = value
@property
def street_name(self):
return self._street_name
@street_name.setter
def street_name(self, value):
self._street_name = value
def to_alipay_dict(self):
params = dict()
if self.address:
if hasattr(self.address, 'to_alipay_dict'):
params['address'] = self.address.to_alipay_dict()
else:
params['address'] = self.address
if self.city:
if hasattr(self.city, 'to_alipay_dict'):
params['city'] = self.city.to_alipay_dict()
else:
params['city'] = self.city
if self.district_name:
if hasattr(self.district_name, 'to_alipay_dict'):
params['district_name'] = self.district_name.to_alipay_dict()
else:
params['district_name'] = self.district_name
if self.province:
if hasattr(self.province, 'to_alipay_dict'):
params['province'] = self.province.to_alipay_dict()
else:
params['province'] = self.province
if self.street_name:
if hasattr(self.street_name, 'to_alipay_dict'):
params['street_name'] = self.street_name.to_alipay_dict()
else:
params['street_name'] = self.street_name
return params
@staticmethod
def from_alipay_dict(d):
if not d:
return None
o = EduWorkAddress()
if 'address' in d:
o.address = d['address']
if 'city' in d:
o.city = d['city']
if 'district_name' in d:
o.district_name = d['district_name']
if 'province' in d:
o.province = d['province']
if 'street_name' in d:
o.street_name = d['street_name']
return o
| 27.485149 | 77 | 0.569885 | 321 | 2,776 | 4.700935 | 0.133956 | 0.119284 | 0.087475 | 0.059642 | 0.250497 | 0.141816 | 0 | 0 | 0 | 0 | 0 | 0.000536 | 0.32781 | 2,776 | 100 | 78 | 27.76 | 0.808146 | 0.01513 | 0 | 0.120482 | 0 | 0 | 0.088645 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.156627 | false | 0 | 0.024096 | 0.060241 | 0.289157 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
92a3e68d41a9710b084dfa63bf531cd128f665af | 450 | py | Python | Python/435.py | JWang169/LintCodeJava | b75b06fa1551f5e4d8a559ef64e1ac29db79c083 | [
"CNRI-Python"
] | 1 | 2020-12-10T05:36:15.000Z | 2020-12-10T05:36:15.000Z | Python/435.py | JWang169/LintCodeJava | b75b06fa1551f5e4d8a559ef64e1ac29db79c083 | [
"CNRI-Python"
] | null | null | null | Python/435.py | JWang169/LintCodeJava | b75b06fa1551f5e4d8a559ef64e1ac29db79c083 | [
"CNRI-Python"
] | 3 | 2020-04-06T05:55:08.000Z | 2021-08-29T14:26:54.000Z | class Solution:
def eraseOverlapIntervals(self, intervals: List[List[int]]) -> int:
if not intervals:
return 0
count = 0
intervals.sort()
left, right = intervals[0]
for start, end in intervals[1:]:
if start < right:
count += 1
right = min(right, end)
else:
right = end
return count
| 26.470588 | 71 | 0.451111 | 43 | 450 | 4.72093 | 0.534884 | 0.078818 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.020921 | 0.468889 | 450 | 17 | 72 | 26.470588 | 0.828452 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.071429 | false | 0 | 0 | 0 | 0.285714 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
92a551a7495fb26a88c28fac7bc5841ea1c6677d | 585 | py | Python | tests/basic_tests/test_obj.py | sriteja777/My_Mario | ee90abb1f40d42805cf6a92f9773aa04a1743a44 | [
"MIT"
] | 1 | 2021-03-24T13:52:15.000Z | 2021-03-24T13:52:15.000Z | tests/basic_tests/test_obj.py | sriteja777/My_Mario | ee90abb1f40d42805cf6a92f9773aa04a1743a44 | [
"MIT"
] | null | null | null | tests/basic_tests/test_obj.py | sriteja777/My_Mario | ee90abb1f40d42805cf6a92f9773aa04a1743a44 | [
"MIT"
] | 1 | 2018-12-18T13:59:29.000Z | 2018-12-18T13:59:29.000Z | import sys
sys.path.append('/home/sriteja/PycharmProjects/My_Mario/mymario')
import objects as o
import config as c
ob = o.Obj(10, 10, 5,6, 'a')
def test_init():
assert (ob.max_x, ob.max_y, ob.min_x, ob.min_y, ob.string, ob.check_ends) == (10, 10, 5, 6, 'a', False)
def test_update():
ob.min_x = 4
ob.min_y = 5
ob.max_y = 9
ob.max_x = 9
ob.update()
assert c.DIMENSIONAL_ARRAY[8][3] == 'a'
def test_remove():
ob.min_x = 4
ob.min_y = 5
ob.max_y = 9
ob.max_x = 9
ob.update()
ob.remove()
assert c.DIMENSIONAL_ARRAY[8][3] == ' ' | 20.892857 | 107 | 0.605128 | 111 | 585 | 3.018018 | 0.369369 | 0.089552 | 0.053731 | 0.035821 | 0.40597 | 0.364179 | 0.214925 | 0.214925 | 0.214925 | 0.214925 | 0 | 0.053215 | 0.22906 | 585 | 28 | 108 | 20.892857 | 0.689579 | 0 | 0 | 0.454545 | 0 | 0 | 0.085324 | 0.078498 | 0 | 0 | 0 | 0 | 0.136364 | 1 | 0.136364 | false | 0 | 0.136364 | 0 | 0.272727 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
92a5e861cf4d8399bd7cd91a87489badf6a26104 | 3,479 | py | Python | termdoc/htdm.py | jtauber/termdoc | 4dd765437ddf8a1bf3eb8c2bbbda4f71273fee85 | [
"MIT"
] | 7 | 2021-05-20T23:54:30.000Z | 2021-10-21T03:01:21.000Z | termdoc/htdm.py | jtauber/termdoc | 4dd765437ddf8a1bf3eb8c2bbbda4f71273fee85 | [
"MIT"
] | 24 | 2021-05-18T07:56:00.000Z | 2021-08-14T12:35:34.000Z | termdoc/htdm.py | jtauber/termdoc | 4dd765437ddf8a1bf3eb8c2bbbda4f71273fee85 | [
"MIT"
] | null | null | null | import collections
from enum import Enum, auto
class Duplicates(Enum):
ALLOW = auto()
IGNORE = auto()
ERROR = auto()
class HTDM:
def __init__(self, address_sep=".", duplicates=Duplicates.ALLOW):
self.counters = []
self.address_sep = address_sep
self.duplicates = duplicates
def depth(self, address):
if address:
depth = len(address.split(self.address_sep))
else:
depth = 0
return depth
def get_or_create_counter(self, depth):
while depth > len(self.counters) - 1:
self.counters.append(collections.defaultdict(collections.Counter))
return self.counters[depth]
def increment_count(self, address, term, count):
first = True
while True:
depth = self.depth(address)
counter = self.get_or_create_counter(depth)[address]
if first and term in counter:
if self.duplicates == Duplicates.IGNORE:
return
elif self.duplicates == Duplicates.ERROR:
raise ValueError(f"'{term}' already in '{address}'")
counter[term] += count
if depth == 0:
break
address = self.address_sep.join(address.split(self.address_sep)[:-1])
first = False
def load(self, filename, field_sep="\t", address_sep=None, prefix=None):
address_sep = address_sep or self.address_sep
with open(filename) as f:
for line in f:
fields = line.strip().split(field_sep)
if len(fields) == 3:
address, term, count_string = fields
count = int(count_string)
elif len(fields) == 2:
address, term = fields
count = 1
else:
raise ValueError(f"{fields} should have 2 or 3 fields")
if prefix:
address = prefix + address_sep + address
self.increment_count(address, term, count)
def save(self, filename, field_sep="\t", prefix=None):
with open(filename, "w") as f:
for document, term, count in self.leaf_entries(prefix):
print(document, term, count, sep=field_sep, file=f)
def get_counts(self, prefix=""):
depth = self.depth(prefix)
return self.counters[depth][prefix]
def prune(self, level):
self.counters = self.counters[:level]
def leaves(self):
return self.counters[-1]
def leaf_entries(self, prefix=None):
for document, counter in self.leaves().items():
if prefix is None or document.startswith(prefix + self.address_sep):
for term, count in counter.items():
if prefix:
subtree_document = document[len(prefix + self.address_sep) :]
else:
subtree_document = document
yield subtree_document, term, count
def graft(self, prefix, subtree):
for address, term, count in subtree.leaf_entries():
self.increment_count(prefix + self.address_sep + address, term, count)
def copy(self, prefix=None):
new = HTDM(address_sep=self.address_sep, duplicates=self.duplicates)
for document, term, count in self.leaf_entries(prefix):
new.increment_count(document, term, count)
return new
| 36.239583 | 85 | 0.569991 | 393 | 3,479 | 4.933842 | 0.216285 | 0.082517 | 0.072202 | 0.030944 | 0.092831 | 0.044353 | 0.044353 | 0.044353 | 0.044353 | 0 | 0 | 0.004314 | 0.333717 | 3,479 | 95 | 86 | 36.621053 | 0.832183 | 0 | 0 | 0.0875 | 0 | 0 | 0.020408 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.15 | false | 0 | 0.025 | 0.0125 | 0.3125 | 0.0125 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
92abd4bfab45f4773add1ea8e00aef24b3e5cafc | 2,482 | py | Python | gpflowopt/acquisition/poi.py | kyu999/GPflowOpt | 972aa70a57222c66ce9bcfe077f92deb4fbaf227 | [
"Apache-2.0"
] | 258 | 2017-04-28T15:47:35.000Z | 2022-03-31T08:44:40.000Z | gpflowopt/acquisition/poi.py | yanpei18345156216/GPflowOpt | f1c268e6b5dc4d7f458e06c59095901d55b73c32 | [
"Apache-2.0"
] | 123 | 2017-04-28T22:20:47.000Z | 2021-10-01T16:29:47.000Z | gpflowopt/acquisition/poi.py | yanpei18345156216/GPflowOpt | f1c268e6b5dc4d7f458e06c59095901d55b73c32 | [
"Apache-2.0"
] | 69 | 2017-06-06T00:18:01.000Z | 2022-02-25T21:43:39.000Z | # Copyright 2017 Joachim van der Herten
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from .acquisition import Acquisition
from gpflow.param import DataHolder
from gpflow import settings
import numpy as np
import tensorflow as tf
stability = settings.numerics.jitter_level
class ProbabilityOfImprovement(Acquisition):
"""
Probability of Improvement acquisition function for single-objective global optimization.
Key reference:
::
@article{Kushner:1964,
author = "Kushner, Harold J",
journal = "Journal of Basic Engineering",
number = "1",
pages = "97--106",
publisher = "American Society of Mechanical Engineers",
title = "{A new method of locating the maximum point of an arbitrary multipeak curve in the presence of noise}",
volume = "86",
year = "1964"
}
.. math::
\\alpha(\\mathbf x_{\\star}) = \\int_{-\\infty}^{f_{\\min}} \\, p( f_{\\star}\\,|\\, \\mathbf x, \\mathbf y, \\mathbf x_{\\star} ) \\, d f_{\\star}
"""
def __init__(self, model):
"""
:param model: GPflow model (single output) representing our belief of the objective
"""
super(ProbabilityOfImprovement, self).__init__(model)
self.fmin = DataHolder(np.zeros(1))
self._setup()
def _setup(self):
super(ProbabilityOfImprovement, self)._setup()
feasible_samples = self.data[0][self.highest_parent.feasible_data_index(), :]
samples_mean, _ = self.models[0].predict_f(feasible_samples)
self.fmin.set_data(np.min(samples_mean, axis=0))
def build_acquisition(self, Xcand):
candidate_mean, candidate_var = self.models[0].build_predict(Xcand)
candidate_var = tf.maximum(candidate_var, stability)
normal = tf.contrib.distributions.Normal(candidate_mean, tf.sqrt(candidate_var))
return normal.cdf(self.fmin, name=self.__class__.__name__)
| 36.5 | 154 | 0.670024 | 309 | 2,482 | 5.239482 | 0.550162 | 0.03706 | 0.016059 | 0.019765 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.015049 | 0.22361 | 2,482 | 67 | 155 | 37.044776 | 0.825117 | 0.522965 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.142857 | false | 0 | 0.238095 | 0 | 0.47619 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
92ae05ad8c75372791cf6ad5c9f3d0a8771a5d26 | 3,160 | py | Python | courriers/backends/mailjet.py | multiplay/django-courriers | 4a043ad190652126e6a6a979b196286304cc4be3 | [
"MIT"
] | null | null | null | courriers/backends/mailjet.py | multiplay/django-courriers | 4a043ad190652126e6a6a979b196286304cc4be3 | [
"MIT"
] | null | null | null | courriers/backends/mailjet.py | multiplay/django-courriers | 4a043ad190652126e6a6a979b196286304cc4be3 | [
"MIT"
] | 1 | 2022-02-12T11:34:44.000Z | 2022-02-12T11:34:44.000Z | # -*- coding: utf-8 -*-
from __future__ import absolute_import, unicode_literals
import logging
from django.template.loader import render_to_string
from django.utils.translation import ugettext as _
from django.utils.functional import cached_property
from django.core.exceptions import ImproperlyConfigured
try:
from django.utils.encoding import smart_unicode
except ImportError:
from django.utils.encoding import smart_text as smart_unicode
from .campaign import CampaignBackend
from ..settings import (MAILJET_API_KEY, MAILJET_API_SECRET_KEY,
DEFAULT_FROM_EMAIL, DEFAULT_FROM_NAME,
PRE_PROCESSORS)
from ..utils import load_class
import mailjet
logger = logging.getLogger('courriers')
class MailjetBackend(CampaignBackend):
def __init__(self):
if not MAILJET_API_KEY:
raise ImproperlyConfigured(_('Please specify your MAILJET API key in Django settings'))
if not MAILJET_API_SECRET_KEY:
raise ImproperlyConfigured(_('Please specify your MAILJET API SECRET key in Django settings'))
self.mailjet_api = mailjet.Api(api_key=MAILJET_API_KEY, secret_key=MAILJET_API_SECRET_KEY)
@cached_property
def list_ids(self):
return dict((l['label'], l['id']) for l in self.mailjet_api.lists.all()['lists'])
def _subscribe(self, list_id, email):
self.mailjet_api.lists.addcontact(
contact=email,
id=list_id,
method='POST'
)
def _unsubscribe(self, list_id, email):
self.mailjet_api.lists.removecontact(
contact=email,
id=list_id,
method='POST'
)
def _send_campaign(self, newsletter, list_id):
options = {
'method': 'POST',
'subject': smart_unicode(newsletter.name).encode('utf-8'),
'list_id': list_id,
'lang': 'en',
'from': DEFAULT_FROM_EMAIL,
'from_name': DEFAULT_FROM_NAME,
'footer': 'default'
}
html = render_to_string('courriers/newsletter_raw_detail.html', {
'object': newsletter,
'items': newsletter.items.select_related('newsletter'),
'options': options
})
campaign = self.mailjet_api.message.createcampaign(**options)
for pre_processor in PRE_PROCESSORS:
html = load_class(pre_processor)(html)
extra = {
'method': 'POST',
'id': campaign['campaign']['id'],
'html': smart_unicode(html).encode('utf-8'),
'text': smart_unicode(render_to_string('courriers/newsletter_raw_detail.txt', {
'object': newsletter,
'items': newsletter.items.select_related('newsletter'),
'options': options
})).encode('utf-8')
}
self.mailjet_api.message.sethtmlcampaign(**extra)
self.mailjet_api.message.sendcampaign(**{
'method': 'POST',
'id': campaign['campaign']['id']
})
def _format_slug(self, *args):
return u''.join([(u'%s' % arg).replace('-', '') for arg in args])
| 32.57732 | 106 | 0.625316 | 350 | 3,160 | 5.4 | 0.305714 | 0.084656 | 0.051852 | 0.040212 | 0.341799 | 0.318519 | 0.250794 | 0.206349 | 0.077249 | 0.077249 | 0 | 0.00172 | 0.263924 | 3,160 | 96 | 107 | 32.916667 | 0.810834 | 0.006646 | 0 | 0.22973 | 0 | 0 | 0.126554 | 0.022633 | 0 | 0 | 0 | 0 | 0 | 1 | 0.081081 | false | 0 | 0.175676 | 0.027027 | 0.297297 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
92aeaf3bb38c5d7ba7802606c755df910d5db3eb | 307 | py | Python | rnmu/pme/clique.py | marianotepper/nmu_rfit | c726be892b928b884f81452697b9211cf273e03c | [
"BSD-3-Clause"
] | 8 | 2017-06-13T13:07:34.000Z | 2020-02-13T06:30:42.000Z | rnmu/pme/clique.py | marianotepper/nmu_rfit | c726be892b928b884f81452697b9211cf273e03c | [
"BSD-3-Clause"
] | null | null | null | rnmu/pme/clique.py | marianotepper/nmu_rfit | c726be892b928b884f81452697b9211cf273e03c | [
"BSD-3-Clause"
] | 3 | 2017-06-10T18:30:57.000Z | 2019-03-19T07:28:25.000Z | from networkx import Graph, complement
from networkx.algorithms.clique import find_cliques
def maximal_independent_sets(mat):
g = Graph(mat)
for u in g.nodes():
if g.has_edge(u, u):
g.remove_edge(u, u)
cg = complement(g)
isets = list(find_cliques(cg))
return isets
| 23.615385 | 51 | 0.664495 | 46 | 307 | 4.304348 | 0.586957 | 0.121212 | 0.060606 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.237785 | 307 | 12 | 52 | 25.583333 | 0.846154 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.1 | false | 0 | 0.2 | 0 | 0.4 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
92b048524e06f46f14c0a5fe0c3471d724de29c7 | 4,241 | py | Python | NLP_Preprocessing/utils/utils_preprocessing.py | Centroida/case_ontotext | ad6cb3aaea5edc618bdb90443d95d067410c539f | [
"Apache-2.0"
] | 2 | 2018-02-11T10:34:48.000Z | 2018-02-11T17:31:21.000Z | NLP_Preprocessing/utils/utils_preprocessing.py | Centroida/case_ontotext | ad6cb3aaea5edc618bdb90443d95d067410c539f | [
"Apache-2.0"
] | null | null | null | NLP_Preprocessing/utils/utils_preprocessing.py | Centroida/case_ontotext | ad6cb3aaea5edc618bdb90443d95d067410c539f | [
"Apache-2.0"
] | null | null | null | # -*- coding: iso-8859-15 -*-
import unicodedata
import nltk
from nltk.stem.porter import PorterStemmer
import itertools
from nltk.tokenize import RegexpTokenizer
import pandas as pd
import numpy as np
import re
from nltk.corpus import stopwords
from tqdm import tqdm
import glob
import multiprocessing
from functools import partial
from tqdm import *
import ntpath
import os
import csv
def preprocess_line(line, stopwords):
org_1 = line["company1"].lower()
org_2 = line["company2"].lower()
snippet = line["snippet"].lower()
snippet = snippet.replace(org_1, " <firstorganization> ")
snippet = snippet.replace(org_2, " <secondorganization> ")
snippet = re.sub(r"\d+", r"<number>", snippet)
snippet = re.sub(r"\.", r"<eol>", snippet)
snippet = re.sub(r"end", r"", snippet)
snippet = re.sub(r"(?<=[\w>])<eol>", r" <eol>", snippet)
snippet = re.sub(r"(?![\w<])<eol>", r"<eol> ", snippet)
snippet = re.sub(r"(?<=')s", r" <owns>", snippet)
snippet = re.sub(r"(?<=')es", r" <owns>", snippet)
snippet = re.sub(r"\w{0,}<firstorganization>\w{0,}", "<firstorganization>", snippet)
snippet = re.sub(r"\w{0,}<secondorganization>\w{0,}", "<secondorganization>", snippet)
snippet = re.sub(r"-owned", " <owns>", snippet)
snippet = re.sub(r"-owning", " <owns>", snippet)
tokenizer = RegexpTokenizer(r"[A-za-z<>&\-]+")
snippet = tokenizer.tokenize(snippet)
snippet = [word for word in snippet if word not in stopwords]
return " ".join(snippet)
def path_leaf(path):
"""get filename from path"""
head, tail = ntpath.split(path)
return tail or ntpath.basename(head)
def get_stopwords(path):
stopwords = []
with open(path, "r") as f:
for line in f.readlines():
stopwords.append(line.rstrip())
return stopwords
def load_file_pandas(path, columns, out_path):
"""loads a csv file in a pandas dataframe and selects only the
columns we are interested in.
Can pass both array of indices of columns or array of column
names as strings to the columns parameter"""
stopwords = get_stopwords("data/raw_data/stopwords.txt")
df = pd.read_csv(open(path, "rU", encoding="utf-8"),
encoding="utf-8",
engine="c",
index_col=False,
header=0,
error_bad_lines=False)
print("The shape of the dataframe is: ", df.shape)
df["snippet"] = df.apply(lambda row: preprocess_line(row, stopwords), axis=1)
with open(out_path, "a") as f:
for idx, line in df.iterrows():
snippet = line["snippet"]
f.write(snippet + '\n')
def preprare_train_test_data(path, columns, out_path_train, out_path_test):
stopwords = get_stopwords("data/raw_data/stopwords.txt")
df = pd.read_csv(open(path, "rU", encoding="utf-8"),
encoding="utf-8",
engine="c",
index_col=False,
header=0,
error_bad_lines=False)
print("The shape of the dataframe is: ", df.shape)
df["snippet"] = df.apply(lambda row: preprocess_line(row, stopwords), axis=1)
f_train = open(out_path_train, "a")
f_test = open(out_path_test, "a")
for idx, line in df.iterrows():
label = "0"
if line["is_parent"] == True:
label = "1"
snippet = line["snippet"]
if idx < 71560:
f_train.write(snippet + " " + label + '\n')
else:
f_test.write(snippet + " " + label + '\n')
def iterate_dictionary(path):
"""iterates through all kinds of files in a directory by using the
glob package and leaves directories alone"""
if path[-1] != "/":
path += "/"
files_only = list(filter(lambda obj: os.path.isfile(obj), glob.glob(path + "*")))
return files_only
def fix_file_lines(path, out_path):
with open(path, "r") as f:
text = f.read()
lines = re.compile("#+END#+\"{0,}").split(text)
with open(out_path, "a") as f_1:
for line in lines:
line = line.replace("\n", "")
f_1.write(line + "\"" + "\n") | 34.479675 | 90 | 0.590663 | 562 | 4,241 | 4.368327 | 0.291815 | 0.074134 | 0.053768 | 0.058248 | 0.327495 | 0.311202 | 0.270468 | 0.215071 | 0.215071 | 0.215071 | 0 | 0.011147 | 0.259609 | 4,241 | 123 | 91 | 34.479675 | 0.770701 | 0.082999 | 0 | 0.252632 | 0 | 0 | 0.13297 | 0.030327 | 0 | 0 | 0 | 0 | 0 | 1 | 0.073684 | false | 0 | 0.178947 | 0 | 0.294737 | 0.021053 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
92b0497c2fa2c3a01be8e9844086d5b63c53e894 | 1,902 | py | Python | rx/testing/reactivetest.py | mmpio/RxPY | 4ed60bb5c04aa85de5210e5537a6adfe1b667d50 | [
"MIT"
] | 4,342 | 2015-01-06T09:00:23.000Z | 2022-03-28T15:05:50.000Z | rx/testing/reactivetest.py | mmpio/RxPY | 4ed60bb5c04aa85de5210e5537a6adfe1b667d50 | [
"MIT"
] | 613 | 2015-01-07T20:44:56.000Z | 2022-03-20T06:14:20.000Z | rx/testing/reactivetest.py | mmpio/RxPY | 4ed60bb5c04aa85de5210e5537a6adfe1b667d50 | [
"MIT"
] | 420 | 2015-01-07T14:30:30.000Z | 2022-03-11T22:47:46.000Z | from typing import Any
import math
import types
from rx.core.notification import OnNext, OnError, OnCompleted
from .recorded import Recorded
from .subscription import Subscription
def is_prime(i: int) -> bool:
"""Tests if number is prime or not"""
if i <= 1:
return False
_max = int(math.floor(math.sqrt(i)))
for j in range(2, _max+1):
if not i % j:
return False
return True
# New predicate tests
class OnNextPredicate:
def __init__(self, predicate) -> None:
self.predicate = predicate
def __eq__(self, other):
if other == self:
return True
if other is None:
return False
if other.kind != 'N':
return False
return self.predicate(other.value)
class OnErrorPredicate:
def __init__(self, predicate):
self.predicate = predicate
def __eq__(self, other):
if other == self:
return True
if other is None:
return False
if other.kind != 'E':
return False
return self.predicate(other.exception)
class ReactiveTest:
created = 100
subscribed = 200
disposed = 1000
@staticmethod
def on_next(ticks: int, value: Any) -> Recorded:
if isinstance(value, types.FunctionType):
return Recorded(ticks, OnNextPredicate(value))
return Recorded(ticks, OnNext(value))
@staticmethod
def on_error(ticks: int, exception: Exception) -> Recorded:
if isinstance(exception, types.FunctionType):
return Recorded(ticks, OnErrorPredicate(exception))
return Recorded(ticks, OnError(exception))
@staticmethod
def on_completed(ticks: int) -> Recorded:
return Recorded(ticks, OnCompleted())
@staticmethod
def subscribe(start: int, end: int) -> Subscription:
return Subscription(start, end)
| 23.775 | 63 | 0.627234 | 217 | 1,902 | 5.396313 | 0.322581 | 0.056362 | 0.081127 | 0.034159 | 0.278395 | 0.216909 | 0.157131 | 0.157131 | 0.157131 | 0.157131 | 0 | 0.009566 | 0.285489 | 1,902 | 79 | 64 | 24.075949 | 0.852097 | 0.02734 | 0 | 0.375 | 0 | 0 | 0.001085 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.160714 | false | 0 | 0.107143 | 0.035714 | 0.678571 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
92b1e3a36f771edef543cb8815d84b4cc693e74f | 12,296 | py | Python | my_main2.py | aitor-garcia-p/NCRFpp | 3e83fc6c462941eff65b8b42bfe6630277b92d6b | [
"Apache-2.0"
] | null | null | null | my_main2.py | aitor-garcia-p/NCRFpp | 3e83fc6c462941eff65b8b42bfe6630277b92d6b | [
"Apache-2.0"
] | null | null | null | my_main2.py | aitor-garcia-p/NCRFpp | 3e83fc6c462941eff65b8b42bfe6630277b92d6b | [
"Apache-2.0"
] | null | null | null | from model.sentclassifier import SentClassifier
from model.seqlabel import SeqLabel
from utils.data import Data
import torch
import os
import sys
from utils.functions import normalize_word
import time
from main import recover_nbest_label, get_ner_fmeasure, recover_label, batchify_with_label
def label_input(input_lines, model_dir, model_name, nbest=None):
data = Data()
data.HP_gpu = torch.cuda.is_available()
(model_dset, model_file) = obtain_model_dset_and_file(model_dir, model_name)
data.load(model_dset)
data.dset_dir = model_dset
data.load_model_dir = model_file
# data.use_crf=False
data.HP_gpu = False
# data.show_data_summary()
status = data.status.lower()
data.fix_alphabet()
data.nbest = nbest
print("LABEL ALPHABET SIZE", len(data.label_alphabet.instance2index))
print("LABEL ALPHABET", data.label_alphabet.instance2index)
data.raw_texts, data.raw_Ids = read_instance(input_lines, data.word_alphabet, data.char_alphabet, data.feature_alphabets, data.label_alphabet,
data.number_normalized, data.MAX_SENTENCE_LENGTH, data.sentence_classification, data.split_token)
decode_results, pred_scores = load_model_decode(data, 'raw')
print("LABEL ALPHABET SIZE", len(data.label_alphabet.instance2index))
print("LABEL ALPHABET", data.label_alphabet.instance2index)
for i in range(12):
print(i, data.label_alphabet.get_instance(i))
return decode_results, pred_scores
def obtain_model_dset_and_file(model_dir, model_name):
model_dset = os.path.join(model_dir, model_name + '.dset')
model_file = os.path.join(model_dir, model_name + '.model')
return model_dset, model_file
def load_model_decode(data, name):
print("Load Model from file: ", data.model_dir)
if data.sentence_classification:
model = SentClassifier(data)
else:
model = SeqLabel(data)
# model = SeqModel(data)
## load model need consider if the model trained in GPU and load in CPU, or vice versa
# if not gpu:
# model.load_state_dict(torch.load(model_dir))
# # model.load_state_dict(torch.load(model_dir), map_location=lambda storage, loc: storage)
# # model = torch.load(model_dir, map_location=lambda storage, loc: storage)
# else:
# model.load_state_dict(torch.load(model_dir))
# # model = torch.load(model_dir)
model.load_state_dict(torch.load(data.load_model_dir, map_location='cpu'))
print("Decode %s data, nbest: %s ..." % (name, data.nbest))
start_time = time.time()
speed, acc, p, r, f, pred_results, pred_scores = evaluate(data, model, name, data.nbest)
end_time = time.time()
time_cost = end_time - start_time
if data.seg:
print("%s: time:%.2fs, speed:%.2fst/s; acc: %.4f, p: %.4f, r: %.4f, f: %.4f" % (name, time_cost, speed, acc, p, r, f))
else:
print("%s: time:%.2fs, speed:%.2fst/s; acc: %.4f" % (name, time_cost, speed, acc))
return pred_results, pred_scores
def evaluate(data, model, name, nbest=None):
instances = data.raw_Ids
right_token = 0
whole_token = 0
nbest_pred_results = []
pred_scores = []
pred_results = []
gold_results = []
## set model in eval model
model.eval()
batch_size = data.HP_batch_size
start_time = time.time()
train_num = len(instances)
total_batch = train_num // batch_size + 1
for batch_id in range(total_batch):
start = batch_id * batch_size
end = (batch_id + 1) * batch_size
if end > train_num:
end = train_num
instance = instances[start:end]
if not instance:
continue
batch_word, batch_features, batch_wordlen, batch_wordrecover, batch_char, batch_charlen, batch_charrecover, batch_label, mask = batchify_with_label(
instance, data.HP_gpu, False, data.sentence_classification)
if nbest and not data.sentence_classification:
scores, nbest_tag_seq = model.decode_nbest(batch_word, batch_features, batch_wordlen, batch_char, batch_charlen, batch_charrecover, mask,
nbest)
nbest_pred_result = recover_nbest_label(nbest_tag_seq, mask, data.label_alphabet, batch_wordrecover)
nbest_pred_results += nbest_pred_result
pred_scores += scores[batch_wordrecover].cpu().data.numpy().tolist()
# select the best sequence to evaluate
tag_seq = nbest_tag_seq[:, :, 0]
else:
tag_seq = model(batch_word, batch_features, batch_wordlen, batch_char, batch_charlen, batch_charrecover, mask)
# print("tag:",tag_seq)
pred_label, gold_label = recover_label(tag_seq, batch_label, mask, data.label_alphabet, batch_wordrecover, data.sentence_classification)
pred_results += pred_label
gold_results += gold_label
decode_time = time.time() - start_time
speed = len(instances) / decode_time if decode_time > 0 else float('inf')
acc, p, r, f = get_ner_fmeasure(gold_results, pred_results, data.tagScheme)
if nbest and not data.sentence_classification:
return speed, acc, p, r, f, nbest_pred_results, pred_scores
return speed, acc, p, r, f, pred_results, pred_scores
def read_instance(input_lines, word_alphabet, char_alphabet, feature_alphabets, label_alphabet, number_normalized, max_sent_length,
sentence_classification=False, split_token='\\t', char_padding_size=-1, char_padding_symbol='</pad>'):
feature_num = len(feature_alphabets)
in_lines = input_lines # open(input_file,'r', encoding="utf8").readlines()
instence_texts = []
instence_Ids = []
words = []
features = []
chars = []
labels = []
word_Ids = []
feature_Ids = []
char_Ids = []
label_Ids = []
## if sentence classification data format, splited by \\t
if sentence_classification:
for line in in_lines:
if len(line) > 2:
pairs = line.strip().split(split_token)
sent = pairs[0]
if sys.version_info[0] < 3:
sent = sent.decode('utf-8')
original_words = sent.split()
for word in original_words:
words.append(word)
if number_normalized:
word = normalize_word(word)
word_Ids.append(word_alphabet.get_index(word))
## get char
char_list = []
char_Id = []
for char in word:
char_list.append(char)
if char_padding_size > 0:
char_number = len(char_list)
if char_number < char_padding_size:
char_list = char_list + [char_padding_symbol] * (char_padding_size - char_number)
assert (len(char_list) == char_padding_size)
for char in char_list:
char_Id.append(char_alphabet.get_index(char))
chars.append(char_list)
char_Ids.append(char_Id)
label = pairs[-1]
label_Id = label_alphabet.get_index(label)
## get features
feat_list = []
feat_Id = []
for idx in range(feature_num):
feat_idx = pairs[idx + 1].split(']', 1)[-1]
feat_list.append(feat_idx)
feat_Id.append(feature_alphabets[idx].get_index(feat_idx))
## combine together and return, notice the feature/label as different format with sequence labeling task
if (len(words) > 0) and ((max_sent_length < 0) or (len(words) < max_sent_length)):
instence_texts.append([words, feat_list, chars, label])
instence_Ids.append([word_Ids, feat_Id, char_Ids, label_Id])
words = []
features = []
chars = []
char_Ids = []
word_Ids = []
feature_Ids = []
label_Ids = []
if (len(words) > 0) and ((max_sent_length < 0) or (len(words) < max_sent_length)):
instence_texts.append([words, feat_list, chars, label])
instence_Ids.append([word_Ids, feat_Id, char_Ids, label_Id])
words = []
features = []
chars = []
char_Ids = []
word_Ids = []
feature_Ids = []
label_Ids = []
else:
### for sequence labeling data format i.e. CoNLL 2003
for line in in_lines:
if len(line) > 2:
pairs = line.strip().split()
word = pairs[0]
if sys.version_info[0] < 3:
word = word.decode('utf-8')
words.append(word)
if number_normalized:
word = normalize_word(word)
label = pairs[-1]
labels.append(label)
word_Ids.append(word_alphabet.get_index(word))
label_Ids.append(label_alphabet.get_index(label))
## get features
feat_list = []
feat_Id = []
for idx in range(feature_num):
feat_idx = pairs[idx + 1].split(']', 1)[-1]
feat_list.append(feat_idx)
feat_Id.append(feature_alphabets[idx].get_index(feat_idx))
features.append(feat_list)
feature_Ids.append(feat_Id)
## get char
char_list = []
char_Id = []
for char in word:
char_list.append(char)
if char_padding_size > 0:
char_number = len(char_list)
if char_number < char_padding_size:
char_list = char_list + [char_padding_symbol] * (char_padding_size - char_number)
assert (len(char_list) == char_padding_size)
else:
### not padding
pass
for char in char_list:
char_Id.append(char_alphabet.get_index(char))
chars.append(char_list)
char_Ids.append(char_Id)
else:
if (len(words) > 0) and ((max_sent_length < 0) or (len(words) < max_sent_length)):
instence_texts.append([words, features, chars, labels])
instence_Ids.append([word_Ids, feature_Ids, char_Ids, label_Ids])
words = []
features = []
chars = []
labels = []
word_Ids = []
feature_Ids = []
char_Ids = []
label_Ids = []
if (len(words) > 0) and ((max_sent_length < 0) or (len(words) < max_sent_length)):
instence_texts.append([words, features, chars, labels])
instence_Ids.append([word_Ids, feature_Ids, char_Ids, label_Ids])
words = []
features = []
chars = []
labels = []
word_Ids = []
feature_Ids = []
char_Ids = []
label_Ids = []
return instence_texts, instence_Ids
# TEST STUFF
def load_egunkaria_test_data():
path = "C:\\Users\\agarciap\\Data\\DATASETS\\NCRFpp_tests\\TEST_named_ent_eu.test.ixaAlikeNOTAGS"
with open(path, 'r', encoding='utf-8') as f:
lines = f.readlines()
return lines
if __name__ == '__main__':
model_dir = 'C:\\Users\\agarciap\\Data\\DATASETS\\NCRFpp_tests\\test_with_crf'
model_name = 'lstmcrf'
input_lines = load_egunkaria_test_data() # 'LONDON B-LOC\nPhil B-PER\nSimmons B-PER\ntook O\nfour O'.split('\n')
tokens = [x.split(' ')[0] for x in input_lines]
decode_results, pred_scores = label_input(input_lines, model_dir, model_name, nbest=5)
print(tokens)
# wtf is the structure of decode_results? a list of a list of a list...
for i, decode_result in enumerate(decode_results[0]):
print(decode_result, '\t', pred_scores[0][i])
# print(pred_scores)
| 42.546713 | 156 | 0.588728 | 1,496 | 12,296 | 4.554144 | 0.14639 | 0.018788 | 0.021136 | 0.017467 | 0.523118 | 0.497431 | 0.471598 | 0.446499 | 0.416557 | 0.366652 | 0 | 0.00707 | 0.309857 | 12,296 | 288 | 157 | 42.694444 | 0.795781 | 0.084418 | 0 | 0.502128 | 0 | 0.004255 | 0.039501 | 0.013553 | 0 | 0 | 0 | 0 | 0.008511 | 1 | 0.025532 | false | 0.004255 | 0.038298 | 0 | 0.093617 | 0.046809 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
92b2ec2594b7f624a5e0c4ef610a7491441323d8 | 694 | py | Python | GRUCon2021/task7/scripts/parse.py | BlackVS/CTFs | ca7d5c9a3dbf1bcfe7607bbfd454eca470634dde | [
"MIT"
] | 10 | 2020-09-06T12:08:32.000Z | 2021-07-19T15:12:30.000Z | GRUCon2021/task7/scripts/parse.py | BlackVS/CTFs | ca7d5c9a3dbf1bcfe7607bbfd454eca470634dde | [
"MIT"
] | null | null | null | GRUCon2021/task7/scripts/parse.py | BlackVS/CTFs | ca7d5c9a3dbf1bcfe7607bbfd454eca470634dde | [
"MIT"
] | 1 | 2021-11-22T05:14:56.000Z | 2021-11-22T05:14:56.000Z | #!/usr/bin/python
import binascii
import dpkt
import struct
import sys
# Start the pcap file parsing
f = open("usbtraffic.pcap", 'rb')
pcap = dpkt.pcap.Reader(f)
#print(pcap)
lba = 0
packets = []
for ts, buf in pcap:
#print(ts)
if buf[64:68] == b'USBC':
#lba = int( buf[], 16)
a = buf[0x51:0x55]
lba = int.from_bytes(a, 'big')
b = buf[0x56:0x58]
blen = int.from_bytes(b, 'big')
print(hex(lba))
else:
packets.append( (lba, blen, ts, buf[64:] ) )
packets.sort()
with open("usbtraffic.raw","wb+") as f:
for lba, blen, ts, b in packets:
print("Writing lba={} len={} ts={}".format(lba, blen, ts))
f.write(b)
| 23.133333 | 66 | 0.567723 | 107 | 694 | 3.663551 | 0.504673 | 0.053571 | 0.068878 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.040619 | 0.255043 | 694 | 29 | 67 | 23.931034 | 0.717602 | 0.122478 | 0 | 0 | 0 | 0 | 0.11755 | 0 | 0 | 0 | 0.02649 | 0 | 0 | 1 | 0 | false | 0 | 0.181818 | 0 | 0.181818 | 0.090909 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
92b4a1d863351284e48ed8cd2f0cf5dc7bead1a6 | 2,586 | py | Python | online_store/weather/views.py | PhiVaLo/final_project2 | 3d61404f2fe4b9e4503087e523ea16d6c411f57f | [
"Apache-2.0"
] | null | null | null | online_store/weather/views.py | PhiVaLo/final_project2 | 3d61404f2fe4b9e4503087e523ea16d6c411f57f | [
"Apache-2.0"
] | 14 | 2020-12-06T13:31:34.000Z | 2020-12-12T12:55:06.000Z | online_store/weather/views.py | PhiVaLo/final_project2 | 3d61404f2fe4b9e4503087e523ea16d6c411f57f | [
"Apache-2.0"
] | 1 | 2020-12-17T14:33:19.000Z | 2020-12-17T14:33:19.000Z | from django.shortcuts import render
from . utils import get_html_content
from store.utils import cartData
# Create your views here.
def weather(request):
weather_data = {}
unknown_location = {}
try:
if 'city' in request.GET:
# Fetch weather data
city = request.GET.get('city')
html_content = get_html_content(city)
# Scraping
from bs4 import BeautifulSoup
soup = BeautifulSoup(html_content, 'html.parser')
# Weather of the week (7 days)
def weather_week(day):
dayy = soup.find('div', attrs={'data-wob-di': day}).find('div', attrs={'class': 'QrNVmd Z1VzSb'}).text
logo = soup.find('div', attrs={'data-wob-di': day}).find('img', attrs={'class': 'uW5pk'})['src']
min_temp = soup.find('div', attrs={'data-wob-di': day}).find('div', attrs={'class': 'QrNVmd ZXCv8e'}).find('span', attrs={'style': 'display:inline'}).text
max_temp = soup.find('div', attrs={'data-wob-di': day}).find('div', attrs={'class': 'vk_gy gNCp2e'}).find('span', attrs={'style': 'display:inline'}).text
return [dayy, logo, min_temp, max_temp]
for i in range(7):
weather_data['day' + str(i)] = weather_week(str(i))[0]
weather_data['logo' + str(i)] = weather_week(str(i))[1]
weather_data['min_temp' + str(i)] = weather_week(str(i))[2]
weather_data['max_temp' + str(i)] = weather_week(str(i))[3]
# Current weather status
weather_data.update({
'region': soup.find('div', attrs={'id': 'wob_loc'}).text,
'daytime': soup.find('div', attrs={'id': 'wob_dts'}).text,
'status': soup.find('span', attrs={'id': 'wob_dc'}).text,
'logo': soup.find('img', attrs={'id': 'wob_tci'})['src'],
'temp': soup.find('span', attrs={'id': 'wob_tm'}).text,
'precipitation': soup.find('span', attrs={'id': 'wob_pp'}).text,
'humidity': soup.find('span', attrs={'id': 'wob_hm'}).text,
'wind': soup.find('span', attrs={'id': 'wob_ws'}).text,
})
except:
unknown_location = {'input': request.GET.__getitem__('city')}
# cart data
data = cartData(request)
cartItems = data['cartItems']
context = {
'weather': weather_data,
'unknown_location': unknown_location,
'cartItems': cartItems,
}
return render(request, 'weather/weather_api.html', context) | 41.047619 | 170 | 0.549111 | 312 | 2,586 | 4.416667 | 0.304487 | 0.069666 | 0.078374 | 0.069666 | 0.357765 | 0.357765 | 0.219884 | 0.135704 | 0.135704 | 0.112482 | 0 | 0.005829 | 0.270302 | 2,586 | 63 | 171 | 41.047619 | 0.72443 | 0.043697 | 0 | 0 | 0 | 0 | 0.181192 | 0.009728 | 0 | 0 | 0 | 0 | 0 | 1 | 0.046512 | false | 0 | 0.093023 | 0 | 0.186047 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
92b669a43887f9c811d94bcfb545a90bfec0af49 | 718 | py | Python | setup.py | zaibacu/wutu | 642207dcc3bb248fdfb2ee6e32c575cf2ea15fc6 | [
"MIT"
] | 2 | 2015-10-07T12:31:24.000Z | 2015-10-08T04:58:48.000Z | setup.py | zaibacu/wutu | 642207dcc3bb248fdfb2ee6e32c575cf2ea15fc6 | [
"MIT"
] | null | null | null | setup.py | zaibacu/wutu | 642207dcc3bb248fdfb2ee6e32c575cf2ea15fc6 | [
"MIT"
] | null | null | null | from setuptools import setup
from pip.req import parse_requirements
import sys, os
sys.path.append("wutu/")
sys.path.append("wutu/compiler/")
install_reqs = list(parse_requirements("requirements.txt", session={}))
def version():
import version
return version.get_version()
setup(name="wutu",
version=version(),
description="A minimalistic python-angular framework",
author="Šarūnas Navickas",
author_email="zaibacu@gmail.com",
url="https://github.com/zaibacu/wutu",
license="MIT",
packages=["wutu", "wutu.compiler"],
install_requires=[str(ir.req) for ir in install_reqs],
test_suite="nose.collector",
tests_require=["nose"])
| 27.615385 | 71 | 0.669916 | 86 | 718 | 5.488372 | 0.627907 | 0.072034 | 0.055085 | 0.072034 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.189415 | 718 | 25 | 72 | 28.72 | 0.810997 | 0 | 0 | 0 | 0 | 0 | 0.250696 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.05 | false | 0 | 0.2 | 0 | 0.3 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
92b778782a6c826438fa80906dc1159fa91249e9 | 2,063 | py | Python | memory_loading.py | leobouts/Market_basket_analysis | 3f22db68a3bda69abd82717d15c68b4659d61a80 | [
"MIT"
] | null | null | null | memory_loading.py | leobouts/Market_basket_analysis | 3f22db68a3bda69abd82717d15c68b4659d61a80 | [
"MIT"
] | null | null | null | memory_loading.py | leobouts/Market_basket_analysis | 3f22db68a3bda69abd82717d15c68b4659d61a80 | [
"MIT"
] | null | null | null | import itertools
import threading
import pickle
import time
import sys
def animate():
for c in itertools.cycle(['|', '/', '-', '\\']):
if done:
break
sys.stdout.write('\rComputing the matrix ' + c)
sys.stdout.flush()
time.sleep(0.1)
sys.stdout.write('\r... ')
sys.stdout.write('\rDone! ')
print("")
def triangular_matrix_of_pairs_counters(user_baskets, movies_basket):
global done
done = False
t = threading.Thread(target=animate)
t.start()
no_of_movies = len(movies_basket)
# space needed 2*n^2
triangular_matrix = [0]*(2*no_of_movies ** 2)
for basket in user_baskets:
list_of_pairs = itertools.combinations(basket, 2)
for pair in list_of_pairs:
i = pair[0]
j = pair[1]
if i < j:
# store the pair with i < j as a lower triangular matrix
# with a dimension array
k = (i-1) * (no_of_movies - i / 2) + j - 1
# float to int
k = int(k)
# update the counter
triangular_matrix[k] += 1
done = True
with open('triangular', 'wb') as fp:
pickle.dump(triangular_matrix, fp)
return triangular_matrix
def hashed_counters_of_pairs(user_baskets):
global done
done = False
t = threading.Thread(target=animate)
t.start()
hash_table = {}
for basket in user_baskets:
# pairs of three
list_of_pairs = itertools.combinations(basket, 2)
for pair in list_of_pairs:
# if exists in the hash table counter + 1, else create the entry
# this way we dont need to store zeros, opposed to the triangular matrix
try:
hash_table.update({pair: hash_table[pair] + 1})
except KeyError:
hash_table.update({pair: 1})
done = True
# write to file
with open('hash_table', 'wb') as fp:
pickle.dump(hash_table, fp)
return hash_table
| 20.838384 | 84 | 0.565681 | 266 | 2,063 | 4.244361 | 0.338346 | 0.063773 | 0.038973 | 0.033658 | 0.267493 | 0.200177 | 0.200177 | 0.200177 | 0.200177 | 0.200177 | 0 | 0.013148 | 0.336403 | 2,063 | 98 | 85 | 21.05102 | 0.811541 | 0.141057 | 0 | 0.307692 | 0 | 0 | 0.041998 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.057692 | false | 0 | 0.096154 | 0 | 0.192308 | 0.019231 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
92b80775e077bc571711817c75463061aa2ef995 | 2,632 | py | Python | cintegrate/cpptypes.py | YanayGoor/cintegrate | 4eac07f68b0505c6734f120cfee656fcb7a9142a | [
"MIT"
] | null | null | null | cintegrate/cpptypes.py | YanayGoor/cintegrate | 4eac07f68b0505c6734f120cfee656fcb7a9142a | [
"MIT"
] | null | null | null | cintegrate/cpptypes.py | YanayGoor/cintegrate | 4eac07f68b0505c6734f120cfee656fcb7a9142a | [
"MIT"
] | null | null | null | from pathlib import PurePath
from elftools.elf.elffile import ELFFile
from .typing import get_class
import ctypes
def die_from_offset(cu, offset):
return [die for die in cu.iter_DIEs() if die.offset == offset][0]
def get_type_from_file(filename, name):
with open(str(filename), 'rb') as f:
elffile = ELFFile(f)
dwarf_info = elffile.get_dwarf_info()
cu = list(dwarf_info.iter_CUs())[0]
die = [die for die in (cu.iter_DIEs()) if 'DW_AT_name' in die.attributes and die.attributes['DW_AT_name'].value == name.encode('utf-8')][0]
return get_die_type(filename, cu, die)
def get_die_type(filename, cu, die):
name = die.attributes['DW_AT_name'].value.decode('utf-8')
if die.tag == 'DW_TAG_subprogram':
linkage_name = name if 'DW_AT_linkage_name' not in die.attributes else die.attributes['DW_AT_linkage_name'].value.decode('utf-8')
func = getattr(ctypes.cdll.LoadLibrary(str(filename)), linkage_name)
param = [subdie for subdie in die.iter_children() if subdie.tag == 'DW_TAG_formal_parameter']
param_types = [get_class(cu, die_from_offset(cu, subdie.attributes['DW_AT_type'].value)) for subdie in param]
func.argtypes = param_types
func.restype = get_class(cu, die_from_offset(cu, die.attributes['DW_AT_type'].value))
return func
return get_class(cu, die)
def cmp_decl_file(filename, files, die):
return 'DW_AT_decl_file' in die.attributes \
and len(files) >= die.attributes['DW_AT_decl_file'].value \
and files[die.attributes['DW_AT_decl_file'].value-1].name == filename.with_suffix('.c').name.encode('utf-8')
def get_all_user_types(filename):
with open(str(filename), 'rb') as f:
elffile = ELFFile(f)
dwarf_info = elffile.get_dwarf_info()
cu = list(dwarf_info.iter_CUs())[0]
files = dwarf_info.line_program_for_CU(cu)['file_entry']
dies = filter(lambda die: 'DW_AT_name' in die.attributes, cu.iter_DIEs())
dies = filter(lambda die: cmp_decl_file(filename, files, die), dies)
dies = map(lambda die: get_die_type(filename, cu, die), dies)
dies = filter(lambda die: die is not None, dies)
return list(dies)
if __name__ == "__main__":
filename = './test/resources/cpp_test_file.so'
with open(str(filename), 'rb') as f:
elffile = ELFFile(f)
dwarf_info = elffile.get_dwarf_info()
cu = list(dwarf_info.iter_CUs())[0]
die = [die for die in (cu.iter_DIEs()) if 'DW_AT_name' in die.attributes and die.attributes['DW_AT_name'].value == 'do_stuff'.encode('utf-8')][0]
print(die) | 48.740741 | 153 | 0.676292 | 408 | 2,632 | 4.110294 | 0.213235 | 0.031008 | 0.066786 | 0.07096 | 0.514013 | 0.44186 | 0.339296 | 0.309481 | 0.254025 | 0.254025 | 0 | 0.005629 | 0.18997 | 2,632 | 54 | 154 | 48.740741 | 0.780957 | 0 | 0 | 0.255319 | 0 | 0 | 0.11128 | 0.021269 | 0 | 0 | 0 | 0 | 0 | 1 | 0.106383 | false | 0 | 0.085106 | 0.042553 | 0.319149 | 0.021277 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
92ba7faaffb8f7360355154db493fc509d84f145 | 2,305 | py | Python | conf/jupyter/00_findspark.py | vshulyak/workstation_image | c94386aa6f66d49bcd19e742a031be10952aaf74 | [
"MIT"
] | null | null | null | conf/jupyter/00_findspark.py | vshulyak/workstation_image | c94386aa6f66d49bcd19e742a031be10952aaf74 | [
"MIT"
] | null | null | null | conf/jupyter/00_findspark.py | vshulyak/workstation_image | c94386aa6f66d49bcd19e742a031be10952aaf74 | [
"MIT"
] | null | null | null | import os
from functools import partial
import lazy_object_proxy
def start_spark(app_name="Jupyter"):
def sc_lazy(spark):
return spark.sparkContext
def hc_lazy(spark):
return HiveContext(spark.sparkContext)
global sc
global hc
global sqlContext
global spark
import findspark
findspark.init()
from pyspark.sql import SparkSession
from pyspark.sql import HiveContext
spark = lazy_object_proxy.Proxy(SparkSession.builder
.appName(app_name)
.enableHiveSupport()
.config("spark.dynamicAllocation.enabled", "true")
.config("spark.dynamicAllocation.minExecutors", "0")
.config("spark.dynamicAllocation.maxExecutors", "11")
.config("spark.dynamicAllocation.cachedExecutorIdleTimeout", "90s")
.config("spark.executor.cores", "1")
.config("spark.executor.memory", "10512m")
.config("spark.memory.storageFraction", "0.2")
.config("spark.serializer",
"org.apache.spark.serializer.KryoSerializer")
.config("spark.kryoserializer.buffer.max", "512m")
.config("spark.driver.memory", "10g")
.config("spark.executor.memoryOverhead", 1024)
.config("spark.driver.memoryOverhead", 512)
.config("spark.driver.maxResultSize", "10000m")
.config("spark.port.maxRetries", 96)
.getOrCreate)
sc = lazy_object_proxy.Proxy(partial(sc_lazy, spark))
sqlContext = lazy_object_proxy.Proxy(partial(hc_lazy, spark))
hc = lazy_object_proxy.Proxy(partial(hc_lazy, spark))
start_spark()
| 43.490566 | 115 | 0.463774 | 169 | 2,305 | 6.213018 | 0.378698 | 0.146667 | 0.071429 | 0.07619 | 0.098095 | 0.072381 | 0.072381 | 0.072381 | 0 | 0 | 0 | 0.025316 | 0.451627 | 2,305 | 52 | 116 | 44.326923 | 0.80538 | 0 | 0 | 0 | 0 | 0 | 0.204772 | 0.163557 | 0 | 0 | 0 | 0 | 0 | 1 | 0.076923 | false | 0 | 0.153846 | 0.051282 | 0.282051 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
92baceda72fe1079e3c469a957d61c1676e7dbea | 1,534 | py | Python | live_predictions.py | eugeniodias5/Audio-Emotion-Server | 589974d02e309c6356fdb10679df4cfa4576e2b3 | [
"MIT"
] | null | null | null | live_predictions.py | eugeniodias5/Audio-Emotion-Server | 589974d02e309c6356fdb10679df4cfa4576e2b3 | [
"MIT"
] | null | null | null | live_predictions.py | eugeniodias5/Audio-Emotion-Server | 589974d02e309c6356fdb10679df4cfa4576e2b3 | [
"MIT"
] | null | null | null | """
This file can be used to try a live prediction.
"""
import keras
import librosa
import numpy as np
class LivePredictions:
"""
Main class of the application.
"""
def __init__(self):
"""
Init method is used to initialize the main parameters.
"""
self.path = './model/Emotion_Voice_Detection_Model.h5'
self.loaded_model = keras.models.load_model(self.path)
def make_predictions(self, file):
"""
Method to process the files and create your features.
"""
data, sampling_rate = librosa.load(file)
mfccs = np.mean(librosa.feature.mfcc(y=data, sr=sampling_rate, n_mfcc=40).T, axis=0)
x = np.expand_dims(mfccs, axis=1)
x = np.expand_dims(x, axis=0)
predictions = self.loaded_model.predict_classes(x)
return self.convert_class_to_emotion(predictions)
@staticmethod
def convert_class_to_emotion(pred):
"""
Method to convert the predictions (int) into human readable strings.
"""
label_conversion = {'0': 'neutral',
'1': 'calm',
'2': 'happy',
'3': 'sad',
'4': 'angry',
'5': 'fearful',
'6': 'disgust',
'7': 'surprised'}
for key, value in label_conversion.items():
if int(key) == pred:
label = value
return label
| 30.078431 | 92 | 0.524772 | 169 | 1,534 | 4.615385 | 0.573965 | 0.015385 | 0.038462 | 0.033333 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.014418 | 0.367014 | 1,534 | 50 | 93 | 30.68 | 0.788877 | 0.166884 | 0 | 0 | 0 | 0 | 0.080169 | 0.033755 | 0 | 0 | 0 | 0 | 0 | 1 | 0.107143 | false | 0 | 0.107143 | 0 | 0.321429 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
2b7cd4df0f0bbe6c33a448d501b266a7c04861f1 | 3,956 | py | Python | Modules/GenWidgets/Nav.py | macromorgan/pocketchip-menu | 1f824b07ba179b386079528f2bf0496ec0c9c94f | [
"MIT"
] | 1 | 2021-11-12T12:57:59.000Z | 2021-11-12T12:57:59.000Z | Modules/GenWidgets/Nav.py | macromorgan/pocketchip-menu | 1f824b07ba179b386079528f2bf0496ec0c9c94f | [
"MIT"
] | null | null | null | Modules/GenWidgets/Nav.py | macromorgan/pocketchip-menu | 1f824b07ba179b386079528f2bf0496ec0c9c94f | [
"MIT"
] | null | null | null | import pygame
import __main__
from Modules.Globals import *
from Modules.NavWidgets.Battery import *
from Modules.NavWidgets.Bluetooth import *
from Modules.NavWidgets.Wifi import *
class Nav():
def __init__(self, parent):
self.visible = True
self.parent = parent
self.buttons = []
self.widgets = []
# go to power from apps
self.buttons.append(
PageButton(
page=Pages.APPS,
image='powerIcon.png',
pos=(
EDGE_PADDING,
self.parent.screen.get_height() - 22 - EDGE_PADDING
),
size=(45,22),
function=lambda:self.goToPage(self.parent, page=1)
)
)
# go to apps from power
self.buttons.append(
PageButton(
page=Pages.POWER,
image='nextIcon.png',
pos=(
self.parent.screen.get_width() - 64,
self.parent.screen.get_height()/2 - 32
),
size=(64,64),
function=lambda:self.goToPage(self.parent, page=2)
)
)
# go to settings from apps
self.buttons.append(
PageButton(
page=Pages.APPS,
image='settingsIcon.png',
pos=(
self.parent.screen.get_width() - 45 - EDGE_PADDING,
self.parent.screen.get_height() - 22 - EDGE_PADDING
),
size=(45,22),
function=lambda:self.goToPage(self.parent, page=3)
)
)
# go to apps from settings
self.buttons.append(
PageButton(
page=Pages.SETTINGS,
image='backIcon.png',
pos=(
0,
self.parent.screen.get_height()/2 - 32
),
size=(64,64),
function=lambda:self.goToPage(self.parent, page=2)
)
)
self.widgets.append(Battery(parent=self))
self.widgets.append(Wifi(parent=self))
self.widgets.append(Bluetooth(parent=self))
def goToPage(self, menu, direction=None, page=None):
pygame.fastevent.post(pygame.event.Event(pygame.USEREVENT, type="screen_update"))
if page and 1 <= page <= len(menu.pages):
menu.active_page = page
else:
if direction == "left":
menu.active_page -= 1
if menu.active_page < 1:
menu.active_page = 1
elif direction == "right":
menu.active_page += 1
if menu.active_page >= len(menu.pages):
menu.active_page = len(menu.pages)
def do(self, event):
if self.parent.visible:
for button in self.buttons:
button.do(event)
if self.visible:
for widget in self.widgets:
if widget.page == self.parent.active_page or widget.persistent:
widget.update()
def update(self):
pass
def draw(self, surf):
if self.visible is True:
# draw page title
font = pygame.font.Font(FONT_LATO,20)
text = font.render(self.parent.pages[self.parent.active_page-1].title, True, (255, 255, 255))
text_rect = text.get_rect()
text_rect.midtop = (surf.get_width() / 2, EDGE_PADDING)
surf.blit(text, text_rect)
# draw widgets
for widget in self.widgets:
if widget.page == self.parent.active_page or widget.persistent:
widget.draw(surf)
# draw nav buttons
for button in self.buttons:
if button.page == self.parent.active_page:
button.draw(surf)
if __name__ == '__main__':
pass
| 31.396825 | 105 | 0.503539 | 416 | 3,956 | 4.680288 | 0.223558 | 0.09245 | 0.050334 | 0.058552 | 0.510529 | 0.447869 | 0.404725 | 0.345146 | 0.313303 | 0.313303 | 0 | 0.022194 | 0.39636 | 3,956 | 125 | 106 | 31.648 | 0.793132 | 0.035137 | 0 | 0.356436 | 0 | 0 | 0.021791 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.049505 | false | 0.019802 | 0.059406 | 0 | 0.118812 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
2b7eedc17fc971f906228a3bedf199afe148ad63 | 358 | py | Python | executive/executive/urls.py | b800h/vcloudexecutive | aa556664b454ba5d5112fa6c07dde8db8a7dfab4 | [
"BSD-3-Clause"
] | 1 | 2019-03-14T11:13:00.000Z | 2019-03-14T11:13:00.000Z | executive/executive/urls.py | bmcollier/vcloudexecutive | aa556664b454ba5d5112fa6c07dde8db8a7dfab4 | [
"BSD-3-Clause"
] | null | null | null | executive/executive/urls.py | bmcollier/vcloudexecutive | aa556664b454ba5d5112fa6c07dde8db8a7dfab4 | [
"BSD-3-Clause"
] | null | null | null | from django.conf.urls import patterns, include, url
from django.contrib import admin
urlpatterns = patterns('',
# Examples:
# url(r'^$', 'executive.views.home', name='home'),
url(r'^cloudcontrol/ajax/', include('cloudcontrol.urlsajax')),
url(r'^cloudcontrol/', include('cloudcontrol.urls')),
url(r'^admin/', include(admin.site.urls)),
)
| 32.545455 | 66 | 0.675978 | 43 | 358 | 5.627907 | 0.488372 | 0.066116 | 0.132231 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.131285 | 358 | 10 | 67 | 35.8 | 0.778135 | 0.162011 | 0 | 0 | 0 | 0 | 0.262626 | 0.070707 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.285714 | 0 | 0.285714 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
2b7faeb53a37cd7b206440dda10874cb138af114 | 5,356 | py | Python | examples/plot_als_classification.py | arokem/AFQ-Insight | be8db20b34484e96aec255b4862878ffc6a800c8 | [
"BSD-3-Clause"
] | null | null | null | examples/plot_als_classification.py | arokem/AFQ-Insight | be8db20b34484e96aec255b4862878ffc6a800c8 | [
"BSD-3-Clause"
] | 66 | 2019-05-03T22:10:11.000Z | 2022-02-20T18:33:38.000Z | examples/plot_als_classification.py | arokem/AFQ-Insight | be8db20b34484e96aec255b4862878ffc6a800c8 | [
"BSD-3-Clause"
] | 8 | 2018-05-17T04:16:30.000Z | 2022-02-07T18:00:57.000Z | """
=================================================
Classify ALS diagnosis from white matter features
=================================================
Predict ALS diagnosis from white matter features. This example fetches the ALS
classification dataset from Sarica et al [1]_. This dataset contains tractometry
features from 24 patients with ALS and 24 demographically matched control
subjects. The plots display the absolute value of the mean regression
coefficients (averaged across cross-validation splits) for the fractional
anisotropy (FA) features.
To save computational time, we take the first 10 principal components from each
feature group (i.e. from each metric-bundle combination).
For more details on this approach in a research setting, please see [2]_.
.. [1] Alessia Sarica, et al.
"The Corticospinal Tract Profile in AmyotrophicLateral Sclerosis"
Human Brain Mapping, vol. 38, pp. 727-739, 2017
DOI: 10.1002/hbm.23412
.. [2] Adam Richie-Halford, Jason Yeatman, Noah Simon, and Ariel Rokem
"Multidimensional analysis and detection of informative features in human brain white matter"
PLOS Computational Biology, 2021
DOI: 10.1371/journal.pcbi.1009136
"""
import matplotlib.pyplot as plt
import numpy as np
import os.path as op
from afqinsight.datasets import download_sarica, load_afq_data
from afqinsight import make_afq_classifier_pipeline
from groupyr.decomposition import GroupPCA
from sklearn.impute import SimpleImputer
from sklearn.model_selection import cross_validate
workdir = download_sarica()
afqdata = load_afq_data(
fn_nodes=op.join(workdir, "nodes.csv"),
fn_subjects=op.join(workdir, "subjects.csv"),
dwi_metrics=["md", "fa"],
target_cols=["class"],
label_encode_cols=["class"],
)
# afqdata is a namedtuple. You can access it's fields using dot notation or by
# unpacking the tuple. To see all of the available fields use `afqdata._fields`
X = afqdata.X
y = afqdata.y
groups = afqdata.groups
feature_names = afqdata.feature_names
group_names = afqdata.group_names
subjects = afqdata.subjects
# Here we reduce computation time by taking the first 10 principal components of each feature group and performing SGL logistic regression on those components.
# If you want to train an SGL model without group PCA, set ``do_group_pca = False``. This will increase the number of features by an order of magnitude and slow down execution time.
do_group_pca = True
if do_group_pca:
n_components = 10
# The next three lines retrieve the group structure of the group-wise PCA
# and store it in ``groups_pca``. We do not use the imputer or GroupPCA transformer
# for anything else
imputer = SimpleImputer(strategy="median")
gpca = GroupPCA(n_components=n_components, groups=groups)
groups_pca = gpca.fit(imputer.fit_transform(X)).groups_out_
transformer = GroupPCA
transformer_kwargs = {"groups": groups, "n_components": n_components}
else:
transformer = False
transformer_kwargs = None
pipe = make_afq_classifier_pipeline(
imputer_kwargs={"strategy": "median"}, # Use median imputation
use_cv_estimator=True, # Automatically determine the best hyperparameters
feature_transformer=transformer, # See note above about group PCA
feature_transformer_kwargs=transformer_kwargs,
scaler="standard", # Standard scale the features before regression
groups=groups_pca
if do_group_pca
else groups, # SGL will use the original feature groups or the PCA feature groups depending on the choice above
verbose=0, # Be quiet!
pipeline_verbosity=False, # No really, be quiet!
tuning_strategy="bayes", # Use BayesSearchCV to determine the optimal hyperparameters
n_bayes_iter=20, # Consider only this many points in hyperparameter space
cv=3, # Use three CV splits to evaluate each hyperparameter combination
l1_ratio=[0.0, 1.0], # Explore the entire range of ``l1_ratio``
eps=5e-2, # This is the ratio of the smallest to largest ``alpha`` value
tol=1e-2, # Set a lenient convergence tolerance just for this example
)
# ``pipe`` is a scikit-learn pipeline and can be used in other scikit-learn functions
scores = cross_validate(
pipe, X, y, cv=5, return_train_score=True, return_estimator=True
)
print(f"Mean train score: {np.mean(scores['train_score']):5.3f}")
print(f"Mean test score: {np.mean(scores['test_score']):5.3f}")
print(f"Mean fit time: {np.mean(scores['fit_time']):5.2f}s")
print(f"Mean score time: {np.mean(scores['score_time']):5.2f}s")
mean_coefs = np.mean(
np.abs([est.named_steps["estimate"].coef_ for est in scores["estimator"]]), axis=0
)
fig, ax = plt.subplots(1, 1, figsize=(8, 5))
_ = ax.plot(mean_coefs[:180], color="black", lw=2)
_ = ax.set_xlim(0, 180)
colors = plt.get_cmap("tab20").colors
for grp, grp_name, color in zip(groups_pca[:18], group_names, colors):
_ = ax.axvspan(grp.min(), grp.max() + 1, color=color, alpha=0.8, label=grp_name[1])
box = ax.get_position()
_ = ax.set_position(
[box.x0, box.y0 + box.height * 0.375, box.width, box.height * 0.625]
)
_ = ax.legend(loc="upper center", bbox_to_anchor=(0.5, -0.2), ncol=3)
_ = ax.set_ylabel(r"$\hat{\beta}$", fontsize=16)
_ = ax.set_xlabel("Group principal component", fontsize=16)
_ = ax.set_title("Group Principal Regression Coefficients (FA only)", fontsize=18)
| 41.51938 | 181 | 0.731516 | 794 | 5,356 | 4.816121 | 0.437028 | 0.012552 | 0.01046 | 0.010983 | 0.042887 | 0.02772 | 0 | 0 | 0 | 0 | 0 | 0.026316 | 0.155713 | 5,356 | 128 | 182 | 41.84375 | 0.819328 | 0.476475 | 0 | 0 | 0 | 0 | 0.154595 | 0.052252 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.106667 | 0 | 0.106667 | 0.053333 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
2b827b627d677ff0e024a5757a45773ff0405784 | 7,004 | py | Python | mlearn/describers.py | ruoitrau86/mlearn | 5b24690344836f53047ede409966d5dd3859098f | [
"BSD-3-Clause"
] | 1 | 2020-05-05T05:11:56.000Z | 2020-05-05T05:11:56.000Z | mlearn/describers.py | ruoitrau86/mlearn | 5b24690344836f53047ede409966d5dd3859098f | [
"BSD-3-Clause"
] | null | null | null | mlearn/describers.py | ruoitrau86/mlearn | 5b24690344836f53047ede409966d5dd3859098f | [
"BSD-3-Clause"
] | null | null | null | # coding: utf-8
# Copyright (c) Materials Virtual Lab
# Distributed under the terms of the BSD License.
import itertools
import numpy as np
import pandas as pd
from monty.json import MSONable
from pymatgen.core.periodic_table import get_el_sp
from sklearn.base import TransformerMixin, BaseEstimator
class BispectrumCoefficients(BaseEstimator, MSONable, TransformerMixin):
"""
Bispectrum coefficients to describe the local environment of each
atom in a quantitative way.
"""
def __init__(self, rcutfac, twojmax, element_profile, rfac0=0.99363,
rmin0=0, diagonalstyle=3, quadratic=False, pot_fit=False):
"""
Args:
rcutfac (float): Global cutoff distance.
twojmax (int): Band limit for bispectrum components.
element_profile (dict): Parameters (cutoff factor 'r' and
weight 'w') related to each element, e.g.,
{'Na': {'r': 0.3, 'w': 0.9},
'Cl': {'r': 0.7, 'w': 3.0}}
rfac0 (float): Parameter in distance to angle conversion.
Set between (0, 1), default to 0.99363.
rmin0 (float): Parameter in distance to angle conversion.
Default to 0.
diagonalstyle (int): Parameter defining which bispectrum
components are generated. Choose among 0, 1, 2 and 3,
default to 3.
quadratic (bool): Whether including quadratic terms.
Default to False.
pot_fit (bool): Whether to output in potentials fitting
format. Default to False, i.e., returning the bispectrum
coefficients for each site.
"""
from mlearn.potentials.lammps.calcs import SpectralNeighborAnalysis
self.calculator = SpectralNeighborAnalysis(rcutfac, twojmax,
element_profile,
rfac0, rmin0,
diagonalstyle,
quadratic)
self.rcutfac = rcutfac
self.twojmax = twojmax
self.element_profile = element_profile
self.rfac0 = rfac0
self.rmin0 = rmin0
self.diagonalstyle = diagonalstyle
self.elements = sorted(element_profile.keys(),
key=lambda sym: get_el_sp(sym).X)
self.quadratic = quadratic
self.pot_fit = pot_fit
@property
def subscripts(self):
"""
The subscripts (2j1, 2j2, 2j) of all bispectrum components
involved.
"""
return self.calculator.get_bs_subscripts(self.twojmax,
self.diagonalstyle)
def describe(self, structure, include_stress=False):
"""
Returns data for one input structure.
Args:
structure (Structure): Input structure.
include_stress (bool): Whether to include stress descriptors.
Returns:
DataFrame.
In regular format, the columns are the subscripts of
bispectrum components, while indices are the site indices
in input structure.
In potentials fitting format, to match the sequence of
[energy, f_x[0], f_y[0], ..., f_z[N], v_xx, ..., v_xy], the
bispectrum coefficients are summed up by each specie and
normalized by a factor of No. of atoms (in the 1st row),
while the derivatives in each direction are preserved, with
the columns being the subscripts of bispectrum components
with each specie and the indices being
[0, '0_x', '0_y', ..., 'N_z'], and the virial contributions
(in GPa) are summed up for all atoms for each component in
the sequence of ['xx', 'yy', 'zz', 'yz', 'xz', 'xy'].
"""
return self.describe_all([structure], include_stress).xs(0, level='input_index')
def describe_all(self, structures, include_stress=False):
"""
Returns data for all input structures in a single DataFrame.
Args:
structures (Structure): Input structures as a list.
include_stress (bool): Whether to include stress descriptors.
Returns:
DataFrame with indices of input list preserved. To retrieve
the data for structures[i], use
df.xs(i, level='input_index').
"""
columns = list(map(lambda s: '-'.join(['%d' % i for i in s]),
self.subscripts))
if self.quadratic:
columns += list(map(lambda s: '-'.join(['%d%d%d' % (i, j, k)
for i, j, k in s]),
itertools.combinations_with_replacement(self.subscripts, 2)))
raw_data = self.calculator.calculate(structures)
def process(output, combine, idx, include_stress):
b, db, vb, e = output
df = pd.DataFrame(b, columns=columns)
if combine:
df_add = pd.DataFrame({'element': e, 'n': np.ones(len(e))})
df_b = df_add.join(df)
n_atoms = df_b.shape[0]
b_by_el = [df_b[df_b['element'] == e] for e in self.elements]
sum_b = [df[df.columns[1:]].sum(axis=0) for df in b_by_el]
hstack_b = pd.concat(sum_b, keys=self.elements)
hstack_b = hstack_b.to_frame().T / n_atoms
hstack_b.fillna(0, inplace=True)
dbs = np.split(db, len(self.elements), axis=1)
dbs = np.hstack([np.insert(d.reshape(-1, len(columns)),
0, 0, axis=1) for d in dbs])
db_index = ['%d_%s' % (i, d)
for i in df_b.index for d in 'xyz']
df_db = pd.DataFrame(dbs, index=db_index,
columns=hstack_b.columns)
if include_stress:
vbs = np.split(vb.sum(axis=0), len(self.elements))
vbs = np.hstack([np.insert(v.reshape(-1, len(columns)),
0, 0, axis=1) for v in vbs])
volume = structures[idx].volume
vbs = vbs / volume * 160.21766208 # from eV to GPa
vb_index = ['xx', 'yy', 'zz', 'yz', 'xz', 'xy']
df_vb = pd.DataFrame(vbs, index=vb_index,
columns=hstack_b.columns)
df = pd.concat([hstack_b, df_db, df_vb])
else:
df = pd.concat([hstack_b, df_db])
return df
df = pd.concat([process(d, self.pot_fit, i, include_stress)
for i, d in enumerate(raw_data)],
keys=range(len(raw_data)), names=["input_index", None])
return df
| 43.234568 | 93 | 0.536408 | 816 | 7,004 | 4.503676 | 0.289216 | 0.035374 | 0.010612 | 0.015238 | 0.17415 | 0.122993 | 0.099048 | 0.051156 | 0.051156 | 0.035918 | 0 | 0.017898 | 0.369789 | 7,004 | 161 | 94 | 43.503106 | 0.814681 | 0.344517 | 0 | 0.053333 | 0 | 0 | 0.01627 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.066667 | false | 0 | 0.093333 | 0 | 0.226667 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
2b88ac303411dbb8834614706fb3ea14becb3330 | 68,137 | py | Python | src/models/vli/modified_xlnet.py | maranedah/music_inpainting_benchmark | 567f4ccfe135a7a6c0578a0672145414b61fd227 | [
"MIT"
] | null | null | null | src/models/vli/modified_xlnet.py | maranedah/music_inpainting_benchmark | 567f4ccfe135a7a6c0578a0672145414b61fd227 | [
"MIT"
] | null | null | null | src/models/vli/modified_xlnet.py | maranedah/music_inpainting_benchmark | 567f4ccfe135a7a6c0578a0672145414b61fd227 | [
"MIT"
] | null | null | null | # coding=utf-8
# Copyright 2018 Google AI, Google Brain and Carnegie Mellon University Authors and the HuggingFace Inc. team.
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
PyTorch XLNet model.
"""
import warnings
from dataclasses import dataclass
from typing import Callable, Iterable, Optional, Tuple, Union, List
import torch
from torch.optim import Optimizer
import math
from torch import nn
from torch.nn import CrossEntropyLoss, MSELoss
from torch.nn import functional as F
from transformers.activations import ACT2FN
from transformers.file_utils import (
ModelOutput,
add_code_sample_docstrings,
add_start_docstrings,
add_start_docstrings_to_model_forward,
replace_return_docstrings,
)
from transformers.modeling_utils import (
PoolerAnswerClass,
PoolerEndLogits,
PoolerStartLogits,
PreTrainedModel,
SequenceSummary,
apply_chunking_to_forward,
)
from transformers.utils import logging
from transformers.models.xlnet.configuration_xlnet import XLNetConfig
logger = logging.get_logger(__name__)
_CHECKPOINT_FOR_DOC = "xlnet-base-cased"
_CONFIG_FOR_DOC = "XLNetConfig"
_TOKENIZER_FOR_DOC = "XLNetTokenizer"
XLNET_PRETRAINED_MODEL_ARCHIVE_LIST = [
"xlnet-base-cased",
"xlnet-large-cased",
# See all XLNet models at https://huggingface.co/models?filter=xlnet
]
# For relative bar encoding
MAX_BAR_ENCODING = 200
class AdamW(Optimizer):
"""
Implements Adam algorithm with weight decay fix as introduced in [Decoupled Weight Decay
Regularization](https://arxiv.org/abs/1711.05101).
Parameters:
params (`Iterable[nn.parameter.Parameter]`):
Iterable of parameters to optimize or dictionaries defining parameter groups.
lr (`float`, *optional*, defaults to 1e-3):
The learning rate to use.
betas (`Tuple[float,float]`, *optional*, defaults to (0.9, 0.999)):
Adam's betas parameters (b1, b2).
eps (`float`, *optional*, defaults to 1e-6):
Adam's epsilon for numerical stability.
weight_decay (`float`, *optional*, defaults to 0):
Decoupled weight decay to apply.
correct_bias (`bool`, *optional*, defaults to `True`):
Whether or not to correct bias in Adam (for instance, in Bert TF repository they use `False`).
no_deprecation_warning (`bool`, *optional*, defaults to `False`):
A flag used to disable the deprecation warning (set to `True` to disable the warning).
"""
def __init__(
self,
params: Iterable[nn.parameter.Parameter],
lr: float = 1e-3,
betas: Tuple[float, float] = (0.9, 0.999),
eps: float = 1e-6,
weight_decay: float = 0.0,
correct_bias: bool = True,
no_deprecation_warning: bool = False,
):
if not no_deprecation_warning:
warnings.warn(
"This implementation of AdamW is deprecated and will be removed in a future version. Use the PyTorch"
" implementation torch.optim.AdamW instead, or set `no_deprecation_warning=True` to disable this"
" warning",
FutureWarning,
)
#require_version("torch>=1.5.0") # add_ with alpha
if lr < 0.0:
raise ValueError(f"Invalid learning rate: {lr} - should be >= 0.0")
if not 0.0 <= betas[0] < 1.0:
raise ValueError(f"Invalid beta parameter: {betas[0]} - should be in [0.0, 1.0)")
if not 0.0 <= betas[1] < 1.0:
raise ValueError(f"Invalid beta parameter: {betas[1]} - should be in [0.0, 1.0)")
if not 0.0 <= eps:
raise ValueError(f"Invalid epsilon value: {eps} - should be >= 0.0")
defaults = dict(lr=lr, betas=betas, eps=eps, weight_decay=weight_decay, correct_bias=correct_bias)
super().__init__(params, defaults)
def step(self, closure: Callable = None):
"""
Performs a single optimization step.
Arguments:
closure (`Callable`, *optional*): A closure that reevaluates the model and returns the loss.
"""
loss = None
if closure is not None:
loss = closure()
for group in self.param_groups:
for p in group["params"]:
if p.grad is None:
continue
grad = p.grad.data
if grad.is_sparse:
raise RuntimeError("Adam does not support sparse gradients, please consider SparseAdam instead")
state = self.state[p]
# State initialization
if len(state) == 0:
state["step"] = 0
# Exponential moving average of gradient values
state["exp_avg"] = torch.zeros_like(p.data)
# Exponential moving average of squared gradient values
state["exp_avg_sq"] = torch.zeros_like(p.data)
exp_avg, exp_avg_sq = state["exp_avg"], state["exp_avg_sq"]
beta1, beta2 = group["betas"]
state["step"] += 1
# Decay the first and second moment running average coefficient
# In-place operations to update the averages at the same time
exp_avg.mul_(beta1).add_(grad, alpha=(1.0 - beta1))
exp_avg_sq.mul_(beta2).addcmul_(grad, grad, value=1.0 - beta2)
denom = exp_avg_sq.sqrt().add_(group["eps"])
step_size = group["lr"]
if group["correct_bias"]: # No bias correction for Bert
bias_correction1 = 1.0 - beta1 ** state["step"]
bias_correction2 = 1.0 - beta2 ** state["step"]
step_size = step_size * math.sqrt(bias_correction2) / bias_correction1
p.data.addcdiv_(exp_avg, denom, value=-step_size)
# Just adding the square of the weights to the loss function is *not*
# the correct way of using L2 regularization/weight decay with Adam,
# since that will interact with the m and v parameters in strange ways.
#
# Instead we want to decay the weights in a manner that doesn't interact
# with the m/v parameters. This is equivalent to adding the square
# of the weights to the loss with plain (non-momentum) SGD.
# Add weight decay at the end (fixed version)
if group["weight_decay"] > 0.0:
p.data.add_(p.data, alpha=(-group["lr"] * group["weight_decay"]))
return loss
def build_tf_xlnet_to_pytorch_map(model, config, tf_weights=None):
"""
A map of modules from TF to PyTorch. I use a map to keep the PyTorch model as identical to the original PyTorch
model as possible.
"""
tf_to_pt_map = {}
if hasattr(model, "transformer"):
if hasattr(model, "lm_loss"):
# We will load also the output bias
tf_to_pt_map["model/lm_loss/bias"] = model.lm_loss.bias
if hasattr(model, "sequence_summary") and "model/sequnece_summary/summary/kernel" in tf_weights:
# We will load also the sequence summary
tf_to_pt_map["model/sequnece_summary/summary/kernel"] = model.sequence_summary.summary.weight
tf_to_pt_map["model/sequnece_summary/summary/bias"] = model.sequence_summary.summary.bias
if (
hasattr(model, "logits_proj")
and config.finetuning_task is not None
and f"model/regression_{config.finetuning_task}/logit/kernel" in tf_weights
):
tf_to_pt_map[f"model/regression_{config.finetuning_task}/logit/kernel"] = model.logits_proj.weight
tf_to_pt_map[f"model/regression_{config.finetuning_task}/logit/bias"] = model.logits_proj.bias
# Now load the rest of the transformer
model = model.transformer
# Embeddings and output
tf_to_pt_map.update(
{
"model/transformer/word_embedding/lookup_table": model.word_embedding.weight,
"model/transformer/mask_emb/mask_emb": model.mask_emb,
}
)
# Transformer blocks
for i, b in enumerate(model.layer):
layer_str = f"model/transformer/layer_{i}/"
tf_to_pt_map.update(
{
layer_str + "rel_attn/LayerNorm/gamma": b.rel_attn.layer_norm.weight,
layer_str + "rel_attn/LayerNorm/beta": b.rel_attn.layer_norm.bias,
layer_str + "rel_attn/o/kernel": b.rel_attn.o,
layer_str + "rel_attn/q/kernel": b.rel_attn.q,
layer_str + "rel_attn/k/kernel": b.rel_attn.k,
layer_str + "rel_attn/r/kernel": b.rel_attn.r,
layer_str + "rel_attn/v/kernel": b.rel_attn.v,
layer_str + "ff/LayerNorm/gamma": b.ff.layer_norm.weight,
layer_str + "ff/LayerNorm/beta": b.ff.layer_norm.bias,
layer_str + "ff/layer_1/kernel": b.ff.layer_1.weight,
layer_str + "ff/layer_1/bias": b.ff.layer_1.bias,
layer_str + "ff/layer_2/kernel": b.ff.layer_2.weight,
layer_str + "ff/layer_2/bias": b.ff.layer_2.bias,
}
)
# Relative positioning biases
if config.untie_r:
r_r_list = []
r_w_list = []
r_s_list = []
seg_embed_list = []
for b in model.layer:
r_r_list.append(b.rel_attn.r_r_bias)
r_w_list.append(b.rel_attn.r_w_bias)
r_s_list.append(b.rel_attn.r_s_bias)
seg_embed_list.append(b.rel_attn.seg_embed)
else:
r_r_list = [model.r_r_bias]
r_w_list = [model.r_w_bias]
r_s_list = [model.r_s_bias]
seg_embed_list = [model.seg_embed]
tf_to_pt_map.update(
{
"model/transformer/r_r_bias": r_r_list,
"model/transformer/r_w_bias": r_w_list,
"model/transformer/r_s_bias": r_s_list,
"model/transformer/seg_embed": seg_embed_list,
}
)
return tf_to_pt_map
def load_tf_weights_in_xlnet(model, config, tf_path):
"""Load tf checkpoints in a pytorch model"""
try:
import numpy as np
import tensorflow as tf
except ImportError:
logger.error(
"Loading a TensorFlow models in PyTorch, requires TensorFlow to be installed. Please see "
"https://www.tensorflow.org/install/ for installation instructions."
)
raise
# Load weights from TF model
init_vars = tf.train.list_variables(tf_path)
tf_weights = {}
for name, shape in init_vars:
logger.info(f"Loading TF weight {name} with shape {shape}")
array = tf.train.load_variable(tf_path, name)
tf_weights[name] = array
# Build TF to PyTorch weights loading map
tf_to_pt_map = build_tf_xlnet_to_pytorch_map(model, config, tf_weights)
for name, pointer in tf_to_pt_map.items():
logger.info(f"Importing {name}")
if name not in tf_weights:
logger.info(f"{name} not in tf pre-trained weights, skipping")
continue
array = tf_weights[name]
# adam_v and adam_m are variables used in AdamWeightDecayOptimizer to calculated m and v
# which are not required for using pretrained model
if "kernel" in name and ("ff" in name or "summary" in name or "logit" in name):
logger.info("Transposing")
array = np.transpose(array)
if isinstance(pointer, list):
# Here we will split the TF weights
assert (
len(pointer) == array.shape[0]
), f"Pointer length {len(pointer)} and array length {array.shape[0]} mismatched"
for i, p_i in enumerate(pointer):
arr_i = array[i, ...]
try:
assert (
p_i.shape == arr_i.shape
), f"Pointer shape {p_i.shape} and array shape {arr_i.shape} mismatched"
except AssertionError as e:
e.args += (p_i.shape, arr_i.shape)
raise
logger.info(f"Initialize PyTorch weight {name} for layer {i}")
p_i.data = torch.from_numpy(arr_i)
else:
try:
assert (
pointer.shape == array.shape
), f"Pointer shape {pointer.shape} and array shape {array.shape} mismatched"
except AssertionError as e:
e.args += (pointer.shape, array.shape)
raise
logger.info(f"Initialize PyTorch weight {name}")
pointer.data = torch.from_numpy(array)
tf_weights.pop(name, None)
tf_weights.pop(name + "/Adam", None)
tf_weights.pop(name + "/Adam_1", None)
logger.info(f"Weights not copied to PyTorch model: {', '.join(tf_weights.keys())}")
return model
class XLNetRelativeAttention(nn.Module):
def __init__(self, config):
super().__init__()
if config.d_model % config.n_head != 0:
raise ValueError(
f"The hidden size ({config.d_model}) is not a multiple of the number of attention "
f"heads ({config.n_head}"
)
self.n_head = config.n_head
self.d_head = config.d_head
self.d_model = config.d_model
self.scale = 1 / (config.d_head ** 0.5)
self.q = nn.Parameter(torch.FloatTensor(config.d_model, self.n_head, self.d_head))
self.k = nn.Parameter(torch.FloatTensor(config.d_model, self.n_head, self.d_head))
self.v = nn.Parameter(torch.FloatTensor(config.d_model, self.n_head, self.d_head))
self.o = nn.Parameter(torch.FloatTensor(config.d_model, self.n_head, self.d_head))
self.r = nn.Parameter(torch.FloatTensor(config.d_model, self.n_head, self.d_head))
self.r_r_bias = nn.Parameter(torch.FloatTensor(self.n_head, self.d_head))
self.r_s_bias = nn.Parameter(torch.FloatTensor(self.n_head, self.d_head))
self.r_w_bias = nn.Parameter(torch.FloatTensor(self.n_head, self.d_head))
self.seg_embed = nn.Parameter(torch.FloatTensor(2, self.n_head, self.d_head))
self.layer_norm = nn.LayerNorm(config.d_model, eps=config.layer_norm_eps)
self.dropout = nn.Dropout(config.dropout)
def prune_heads(self, heads):
raise NotImplementedError
@staticmethod
def rel_shift(x, klen=-1):
"""perform relative shift to form the relative attention score."""
x_size = x.shape
x = x.reshape(x_size[1], x_size[0], x_size[2], x_size[3])
x = x[1:, ...]
x = x.reshape(x_size[0], x_size[1] - 1, x_size[2], x_size[3])
# x = x[:, 0:klen, :, :]
x = torch.index_select(x, 1, torch.arange(klen, device=x.device, dtype=torch.long))
return x
@staticmethod
def rel_shift_bnij(x, klen=-1):
x_size = x.shape
x = x.reshape(x_size[0], x_size[1], x_size[3], x_size[2])
x = x[:, :, 1:, :]
x = x.reshape(x_size[0], x_size[1], x_size[2], x_size[3] - 1)
# Note: the tensor-slice form was faster in my testing than torch.index_select
# However, tracing doesn't like the nature of the slice, and if klen changes
# during the run then it'll fail, whereas index_select will be fine.
x = torch.index_select(x, 3, torch.arange(klen, device=x.device, dtype=torch.long))
# x = x[:, :, :, :klen]
return x
def rel_attn_core(
self,
q_head,
k_head_h,
v_head_h,
k_head_r,
seg_mat=None,
attn_mask=None,
head_mask=None,
output_attentions=False,
pos_seq=None,
):
"""Core relative positional attention operations."""
"""
Args:
pos_seq: of shape [bsz, qlen, klen]
"""
# content based attention score
ac = torch.einsum("ibnd,jbnd->bnij", q_head + self.r_w_bias, k_head_h)
# position based attention score
# bd = torch.einsum("ibnd,jbnd->bnij", q_head + self.r_r_bias, k_head_r)
bd_tmp = torch.einsum("ibnd,knd->bnik", q_head + self.r_r_bias, k_head_r) # i: qlen, k: -MAX_BAR_ENCODING ~ MAX_BAR_ENCODING
pos_seq = pos_seq[:, None, :, :].expand(-1, bd_tmp.shape[1], -1, -1)
bd = torch.gather(bd_tmp, -1, pos_seq + MAX_BAR_ENCODING)
# bd = self.rel_shift_bnij(bd, klen=ac.shape[3])
# segment based attention score
if seg_mat is None:
ef = 0
else:
ef = torch.einsum("ibnd,snd->ibns", q_head + self.r_s_bias, self.seg_embed)
ef = torch.einsum("ijbs,ibns->bnij", seg_mat, ef)
# merge attention scores and perform masking
attn_score = (ac + bd + ef) * self.scale
if attn_mask is not None:
# attn_score = attn_score * (1 - attn_mask) - 1e30 * attn_mask
if attn_mask.dtype == torch.float16:
attn_score = attn_score - 65500 * torch.einsum("ijbn->bnij", attn_mask)
else:
attn_score = attn_score - 1e30 * torch.einsum("ijbn->bnij", attn_mask)
# attention probability
attn_prob = F.softmax(attn_score, dim=3)
attn_prob = self.dropout(attn_prob)
# Mask heads if we want to
if head_mask is not None:
attn_prob = attn_prob * torch.einsum("ijbn->bnij", head_mask)
# attention output
attn_vec = torch.einsum("bnij,jbnd->ibnd", attn_prob, v_head_h)
if output_attentions:
return attn_vec, torch.einsum("bnij->ijbn", attn_prob)
return attn_vec
def post_attention(self, h, attn_vec, residual=True):
"""Post-attention processing."""
# post-attention projection (back to `d_model`)
attn_out = torch.einsum("ibnd,hnd->ibh", attn_vec, self.o)
attn_out = self.dropout(attn_out)
if residual:
attn_out = attn_out + h
output = self.layer_norm(attn_out)
return output
def forward(
self,
h,
g,
attn_mask_h,
attn_mask_g,
r,
seg_mat,
mems=None,
target_mapping=None,
head_mask=None,
output_attentions=False,
pos_seq=None,
):
if g is not None:
# Two-stream attention with relative positional encoding.
# content based attention score
if mems is not None and mems.dim() > 1:
cat = torch.cat([mems, h], dim=0)
else:
cat = h
# content-based key head
k_head_h = torch.einsum("ibh,hnd->ibnd", cat, self.k)
# content-based value head
v_head_h = torch.einsum("ibh,hnd->ibnd", cat, self.v)
# position-based key head
# k_head_r = torch.einsum("ibh,hnd->ibnd", r, self.r)
k_head_r = torch.einsum("ih,hnd->ind", r, self.r)
# k_head_r_3d = torch.einsum("ijbh,hnd->ijbnd", pos_emb, self.r)
# h-stream
# content-stream query head
q_head_h = torch.einsum("ibh,hnd->ibnd", h, self.q)
# core attention ops
attn_vec_h = self.rel_attn_core(
q_head_h,
k_head_h,
v_head_h,
k_head_r,
seg_mat=seg_mat,
attn_mask=attn_mask_h,
head_mask=head_mask,
output_attentions=output_attentions,
pos_seq=pos_seq,
)
if output_attentions:
attn_vec_h, attn_prob_h = attn_vec_h
# post processing
output_h = self.post_attention(h, attn_vec_h)
# g-stream
# query-stream query head
q_head_g = torch.einsum("ibh,hnd->ibnd", g, self.q)
# core attention ops
if target_mapping is not None:
q_head_g = torch.einsum("mbnd,mlb->lbnd", q_head_g, target_mapping)
attn_vec_g = self.rel_attn_core(
q_head_g,
k_head_h,
v_head_h,
k_head_r,
seg_mat=seg_mat,
attn_mask=attn_mask_g,
head_mask=head_mask,
output_attentions=output_attentions,
pos_seq=pos_seq,
)
if output_attentions:
attn_vec_g, attn_prob_g = attn_vec_g
attn_vec_g = torch.einsum("lbnd,mlb->mbnd", attn_vec_g, target_mapping)
else:
attn_vec_g = self.rel_attn_core(
q_head_g,
k_head_h,
v_head_h,
k_head_r,
seg_mat=seg_mat,
attn_mask=attn_mask_g,
head_mask=head_mask,
output_attentions=output_attentions,
pos_seq=pos_seq,
)
if output_attentions:
attn_vec_g, attn_prob_g = attn_vec_g
# post processing
output_g = self.post_attention(g, attn_vec_g)
if output_attentions:
attn_prob = attn_prob_h, attn_prob_g
else:
# Multi-head attention with relative positional encoding
if mems is not None and mems.dim() > 1:
cat = torch.cat([mems, h], dim=0)
else:
cat = h
# content heads
q_head_h = torch.einsum("ibh,hnd->ibnd", h, self.q)
k_head_h = torch.einsum("ibh,hnd->ibnd", cat, self.k)
v_head_h = torch.einsum("ibh,hnd->ibnd", cat, self.v)
# positional heads
# type casting for fp16 support
# print(r.shape, self.r.shape)
# k_head_r = torch.einsum("ibh,hnd->ibnd", r.type(self.r.dtype), self.r)
k_head_r = torch.einsum("ih,hnd->ind", r, self.r)
# core attention ops
attn_vec = self.rel_attn_core(
q_head_h,
k_head_h,
v_head_h,
k_head_r,
seg_mat=seg_mat,
attn_mask=attn_mask_h,
head_mask=head_mask,
output_attentions=output_attentions,
pos_seq=pos_seq,
)
if output_attentions:
attn_vec, attn_prob = attn_vec
# post processing
output_h = self.post_attention(h, attn_vec)
output_g = None
outputs = (output_h, output_g)
if output_attentions:
outputs = outputs + (attn_prob,)
return outputs
class XLNetFeedForward(nn.Module):
def __init__(self, config):
super().__init__()
self.layer_norm = nn.LayerNorm(config.d_model, eps=config.layer_norm_eps)
self.layer_1 = nn.Linear(config.d_model, config.d_inner)
self.layer_2 = nn.Linear(config.d_inner, config.d_model)
self.dropout = nn.Dropout(config.dropout)
if isinstance(config.ff_activation, str):
self.activation_function = ACT2FN[config.ff_activation]
else:
self.activation_function = config.ff_activation
def forward(self, inp):
output = inp
output = self.layer_1(output)
output = self.activation_function(output)
output = self.dropout(output)
output = self.layer_2(output)
output = self.dropout(output)
output = self.layer_norm(output + inp)
return output
class XLNetLayer(nn.Module):
def __init__(self, config):
super().__init__()
self.rel_attn = XLNetRelativeAttention(config)
self.ff = XLNetFeedForward(config)
self.dropout = nn.Dropout(config.dropout)
self.chunk_size_feed_forward = config.chunk_size_feed_forward
self.seq_len_dim = 1
def forward(
self,
output_h,
output_g,
attn_mask_h,
attn_mask_g,
r,
seg_mat,
mems=None,
target_mapping=None,
head_mask=None,
output_attentions=False,
pos_seq=None,
):
outputs = self.rel_attn(
output_h,
output_g,
attn_mask_h,
attn_mask_g,
r,
seg_mat,
mems=mems,
target_mapping=target_mapping,
head_mask=head_mask,
output_attentions=output_attentions,
pos_seq=pos_seq,
)
output_h, output_g = outputs[:2]
if output_g is not None:
output_g = apply_chunking_to_forward(
self.ff_chunk, self.chunk_size_feed_forward, self.seq_len_dim, output_g
)
output_h = apply_chunking_to_forward(self.ff_chunk, self.chunk_size_feed_forward, self.seq_len_dim, output_h)
outputs = (output_h, output_g) + outputs[2:] # Add again attentions if there are there
return outputs
def ff_chunk(self, output_x):
output_x = self.ff(output_x)
return output_x
class XLNetPreTrainedModel(PreTrainedModel):
"""
An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained
models.
"""
config_class = XLNetConfig
load_tf_weights = load_tf_weights_in_xlnet
base_model_prefix = "transformer"
def _init_weights(self, module):
"""Initialize the weights."""
if isinstance(module, nn.Linear):
# Slightly different from the TF version which uses truncated_normal for initialization
# cf https://github.com/pytorch/pytorch/pull/5617
module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
if module.bias is not None:
module.bias.data.zero_()
elif isinstance(module, nn.Embedding):
module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
if module.padding_idx is not None:
module.weight.data[module.padding_idx].zero_()
elif isinstance(module, nn.LayerNorm):
module.bias.data.zero_()
module.weight.data.fill_(1.0)
elif isinstance(module, XLNetRelativeAttention):
for param in [
module.q,
module.k,
module.v,
module.o,
module.r,
module.r_r_bias,
module.r_s_bias,
module.r_w_bias,
module.seg_embed,
]:
param.data.normal_(mean=0.0, std=self.config.initializer_range)
elif isinstance(module, XLNetModel):
module.mask_emb.data.normal_(mean=0.0, std=self.config.initializer_range)
@dataclass
class XLNetModelOutput(ModelOutput):
"""
Output type of :class:`~transformers.XLNetModel`.
Args:
last_hidden_state (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, num_predict, hidden_size)`):
Sequence of hidden-states at the last layer of the model.
``num_predict`` corresponds to ``target_mapping.shape[1]``. If ``target_mapping`` is ``None``, then
``num_predict`` corresponds to ``sequence_length``.
mems (:obj:`List[torch.FloatTensor]` of length :obj:`config.n_layers`):
Contains pre-computed hidden-states. Can be used (see :obj:`mems` input) to speed up sequential decoding.
The token ids which have their past given to this model should not be passed as :obj:`input_ids` as they
have already been computed.
hidden_states (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_hidden_states=True`` is passed or when ``config.output_hidden_states=True``):
Tuple of :obj:`torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer)
of shape :obj:`(batch_size, sequence_length, hidden_size)`.
Hidden-states of the model at the output of each layer plus the initial embedding outputs.
attentions (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_attentions=True`` is passed or when ``config.output_attentions=True``):
Tuple of :obj:`torch.FloatTensor` (one for each layer) of shape :obj:`(batch_size, num_heads,
sequence_length, sequence_length)`.
Attentions weights after the attention softmax, used to compute the weighted average in the self-attention
heads.
"""
last_hidden_state: torch.FloatTensor
mems: Optional[List[torch.FloatTensor]] = None
hidden_states: Optional[Tuple[torch.FloatTensor]] = None
attentions: Optional[Tuple[torch.FloatTensor]] = None
@dataclass
class XLNetLMHeadModelOutput(ModelOutput):
"""
Output type of :class:`~transformers.XLNetLMHeadModel`.
Args:
loss (:obj:`torch.FloatTensor` of shape `(1,)`, `optional`, returned when ``labels`` is provided)
Language modeling loss (for next-token prediction).
logits (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, num_predict, config.vocab_size)`):
Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax).
``num_predict`` corresponds to ``target_mapping.shape[1]``. If ``target_mapping`` is ``None``, then
``num_predict`` corresponds to ``sequence_length``.
mems (:obj:`List[torch.FloatTensor]` of length :obj:`config.n_layers`):
Contains pre-computed hidden-states. Can be used (see :obj:`mems` input) to speed up sequential decoding.
The token ids which have their past given to this model should not be passed as :obj:`input_ids` as they
have already been computed.
hidden_states (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_hidden_states=True`` is passed or when ``config.output_hidden_states=True``):
Tuple of :obj:`torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer)
of shape :obj:`(batch_size, sequence_length, hidden_size)`.
Hidden-states of the model at the output of each layer plus the initial embedding outputs.
attentions (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_attentions=True`` is passed or when ``config.output_attentions=True``):
Tuple of :obj:`torch.FloatTensor` (one for each layer) of shape :obj:`(batch_size, num_heads,
sequence_length, sequence_length)`.
Attentions weights after the attention softmax, used to compute the weighted average in the self-attention
heads.
"""
loss: Optional[torch.FloatTensor] = None
logits: torch.FloatTensor = None
mems: Optional[List[torch.FloatTensor]] = None
hidden_states: Optional[Tuple[torch.FloatTensor]] = None
attentions: Optional[Tuple[torch.FloatTensor]] = None
@dataclass
class XLNetForSequenceClassificationOutput(ModelOutput):
"""
Output type of :class:`~transformers.XLNetForSequenceClassification`.
Args:
loss (:obj:`torch.FloatTensor` of shape :obj:`(1,)`, `optional`, returned when :obj:`label` is provided):
Classification (or regression if config.num_labels==1) loss.
logits (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, config.num_labels)`):
Classification (or regression if config.num_labels==1) scores (before SoftMax).
mems (:obj:`List[torch.FloatTensor]` of length :obj:`config.n_layers`):
Contains pre-computed hidden-states. Can be used (see :obj:`mems` input) to speed up sequential decoding.
The token ids which have their past given to this model should not be passed as :obj:`input_ids` as they
have already been computed.
hidden_states (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_hidden_states=True`` is passed or when ``config.output_hidden_states=True``):
Tuple of :obj:`torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer)
of shape :obj:`(batch_size, sequence_length, hidden_size)`.
Hidden-states of the model at the output of each layer plus the initial embedding outputs.
attentions (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_attentions=True`` is passed or when ``config.output_attentions=True``):
Tuple of :obj:`torch.FloatTensor` (one for each layer) of shape :obj:`(batch_size, num_heads,
sequence_length, sequence_length)`.
Attentions weights after the attention softmax, used to compute the weighted average in the self-attention
heads.
"""
loss: Optional[torch.FloatTensor] = None
logits: torch.FloatTensor = None
mems: Optional[List[torch.FloatTensor]] = None
hidden_states: Optional[Tuple[torch.FloatTensor]] = None
attentions: Optional[Tuple[torch.FloatTensor]] = None
@dataclass
class XLNetForTokenClassificationOutput(ModelOutput):
"""
Output type of :class:`~transformers.XLNetForTokenClassificationOutput`.
Args:
loss (:obj:`torch.FloatTensor` of shape :obj:`(1,)`, `optional`, returned when ``labels`` is provided) :
Classification loss.
logits (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length, config.num_labels)`):
Classification scores (before SoftMax).
mems (:obj:`List[torch.FloatTensor]` of length :obj:`config.n_layers`):
Contains pre-computed hidden-states. Can be used (see :obj:`mems` input) to speed up sequential decoding.
The token ids which have their past given to this model should not be passed as :obj:`input_ids` as they
have already been computed.
hidden_states (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_hidden_states=True`` is passed or when ``config.output_hidden_states=True``):
Tuple of :obj:`torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer)
of shape :obj:`(batch_size, sequence_length, hidden_size)`.
Hidden-states of the model at the output of each layer plus the initial embedding outputs.
attentions (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_attentions=True`` is passed or when ``config.output_attentions=True``):
Tuple of :obj:`torch.FloatTensor` (one for each layer) of shape :obj:`(batch_size, num_heads,
sequence_length, sequence_length)`.
Attentions weights after the attention softmax, used to compute the weighted average in the self-attention
heads.
"""
loss: Optional[torch.FloatTensor] = None
logits: torch.FloatTensor = None
mems: Optional[List[torch.FloatTensor]] = None
hidden_states: Optional[Tuple[torch.FloatTensor]] = None
attentions: Optional[Tuple[torch.FloatTensor]] = None
@dataclass
class XLNetForMultipleChoiceOutput(ModelOutput):
"""
Output type of :class:`~transformers.XLNetForMultipleChoice`.
Args:
loss (:obj:`torch.FloatTensor` of shape `(1,)`, `optional`, returned when :obj:`labels` is provided):
Classification loss.
logits (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, num_choices)`):
`num_choices` is the second dimension of the input tensors. (see `input_ids` above).
Classification scores (before SoftMax).
mems (:obj:`List[torch.FloatTensor]` of length :obj:`config.n_layers`):
Contains pre-computed hidden-states. Can be used (see :obj:`mems` input) to speed up sequential decoding.
The token ids which have their past given to this model should not be passed as :obj:`input_ids` as they
have already been computed.
hidden_states (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_hidden_states=True`` is passed or when ``config.output_hidden_states=True``):
Tuple of :obj:`torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer)
of shape :obj:`(batch_size, sequence_length, hidden_size)`.
Hidden-states of the model at the output of each layer plus the initial embedding outputs.
attentions (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_attentions=True`` is passed or when ``config.output_attentions=True``):
Tuple of :obj:`torch.FloatTensor` (one for each layer) of shape :obj:`(batch_size, num_heads,
sequence_length, sequence_length)`.
Attentions weights after the attention softmax, used to compute the weighted average in the self-attention
heads.
"""
loss: Optional[torch.FloatTensor] = None
logits: torch.FloatTensor = None
mems: Optional[List[torch.FloatTensor]] = None
hidden_states: Optional[Tuple[torch.FloatTensor]] = None
attentions: Optional[Tuple[torch.FloatTensor]] = None
@dataclass
class XLNetForQuestionAnsweringSimpleOutput(ModelOutput):
"""
Output type of :class:`~transformers.XLNetForQuestionAnsweringSimple`.
Args:
loss (:obj:`torch.FloatTensor` of shape :obj:`(1,)`, `optional`, returned when :obj:`labels` is provided):
Total span extraction loss is the sum of a Cross-Entropy for the start and end positions.
start_logits (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length,)`):
Span-start scores (before SoftMax).
end_logits (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length,)`):
Span-end scores (before SoftMax).
mems (:obj:`List[torch.FloatTensor]` of length :obj:`config.n_layers`):
Contains pre-computed hidden-states. Can be used (see :obj:`mems` input) to speed up sequential decoding.
The token ids which have their past given to this model should not be passed as :obj:`input_ids` as they
have already been computed.
hidden_states (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_hidden_states=True`` is passed or when ``config.output_hidden_states=True``):
Tuple of :obj:`torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer)
of shape :obj:`(batch_size, sequence_length, hidden_size)`.
Hidden-states of the model at the output of each layer plus the initial embedding outputs.
attentions (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_attentions=True`` is passed or when ``config.output_attentions=True``):
Tuple of :obj:`torch.FloatTensor` (one for each layer) of shape :obj:`(batch_size, num_heads,
sequence_length, sequence_length)`.
Attentions weights after the attention softmax, used to compute the weighted average in the self-attention
heads.
"""
loss: Optional[torch.FloatTensor] = None
start_logits: torch.FloatTensor = None
end_logits: torch.FloatTensor = None
mems: Optional[List[torch.FloatTensor]] = None
hidden_states: Optional[Tuple[torch.FloatTensor]] = None
attentions: Optional[Tuple[torch.FloatTensor]] = None
@dataclass
class XLNetForQuestionAnsweringOutput(ModelOutput):
"""
Output type of :class:`~transformers.XLNetForQuestionAnswering`.
Args:
loss (:obj:`torch.FloatTensor` of shape :obj:`(1,)`, `optional`, returned if both :obj:`start_positions` and :obj:`end_positions` are provided):
Classification loss as the sum of start token, end token (and is_impossible if provided) classification
losses.
start_top_log_probs (``torch.FloatTensor`` of shape ``(batch_size, config.start_n_top)``, `optional`, returned if ``start_positions`` or ``end_positions`` is not provided):
Log probabilities for the top config.start_n_top start token possibilities (beam-search).
start_top_index (``torch.LongTensor`` of shape ``(batch_size, config.start_n_top)``, `optional`, returned if ``start_positions`` or ``end_positions`` is not provided):
Indices for the top config.start_n_top start token possibilities (beam-search).
end_top_log_probs (``torch.FloatTensor`` of shape ``(batch_size, config.start_n_top * config.end_n_top)``, `optional`, returned if ``start_positions`` or ``end_positions`` is not provided):
Log probabilities for the top ``config.start_n_top * config.end_n_top`` end token possibilities
(beam-search).
end_top_index (``torch.LongTensor`` of shape ``(batch_size, config.start_n_top * config.end_n_top)``, `optional`, returned if ``start_positions`` or ``end_positions`` is not provided):
Indices for the top ``config.start_n_top * config.end_n_top`` end token possibilities (beam-search).
cls_logits (``torch.FloatTensor`` of shape ``(batch_size,)``, `optional`, returned if ``start_positions`` or ``end_positions`` is not provided):
Log probabilities for the ``is_impossible`` label of the answers.
mems (:obj:`List[torch.FloatTensor]` of length :obj:`config.n_layers`):
Contains pre-computed hidden-states. Can be used (see :obj:`mems` input) to speed up sequential decoding.
The token ids which have their past given to this model should not be passed as :obj:`input_ids` as they
have already been computed.
hidden_states (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_hidden_states=True`` is passed or when ``config.output_hidden_states=True``):
Tuple of :obj:`torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer)
of shape :obj:`(batch_size, sequence_length, hidden_size)`.
Hidden-states of the model at the output of each layer plus the initial embedding outputs.
attentions (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_attentions=True`` is passed or when ``config.output_attentions=True``):
Tuple of :obj:`torch.FloatTensor` (one for each layer) of shape :obj:`(batch_size, num_heads,
sequence_length, sequence_length)`.
Attentions weights after the attention softmax, used to compute the weighted average in the self-attention
heads.
"""
loss: Optional[torch.FloatTensor] = None
start_top_log_probs: Optional[torch.FloatTensor] = None
start_top_index: Optional[torch.LongTensor] = None
end_top_log_probs: Optional[torch.FloatTensor] = None
end_top_index: Optional[torch.LongTensor] = None
cls_logits: Optional[torch.FloatTensor] = None
mems: Optional[List[torch.FloatTensor]] = None
hidden_states: Optional[Tuple[torch.FloatTensor]] = None
attentions: Optional[Tuple[torch.FloatTensor]] = None
XLNET_START_DOCSTRING = r"""
This model inherits from :class:`~transformers.PreTrainedModel`. Check the superclass documentation for the generic
methods the library implements for all its model (such as downloading or saving, resizing the input embeddings,
pruning heads etc.)
This model is also a PyTorch `torch.nn.Module <https://pytorch.org/docs/stable/nn.html#torch.nn.Module>`__
subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to
general usage and behavior.
Parameters:
config (:class:`~transformers.XLNetConfig`): Model configuration class with all the parameters of the model.
Initializing with a config file does not load the weights associated with the model, only the
configuration. Check out the :meth:`~transformers.PreTrainedModel.from_pretrained` method to load the model
weights.
"""
XLNET_INPUTS_DOCSTRING = r"""
Args:
input_ids (:obj:`torch.LongTensor` of shape :obj:`{0}`):
Indices of input sequence tokens in the vocabulary.
Indices can be obtained using :class:`transformers.XLNetTokenizer`. See
:func:`transformers.PreTrainedTokenizer.encode` and :func:`transformers.PreTrainedTokenizer.__call__` for
details.
`What are input IDs? <../glossary.html#input-ids>`__
attention_mask (:obj:`torch.FloatTensor` of shape :obj:`({0})`, `optional`):
Mask to avoid performing attention on padding token indices. Mask values selected in ``[0, 1]``:
- 1 for tokens that are **not masked**,
- 0 for tokens that are **masked**.
`What are attention masks? <../glossary.html#attention-mask>`__
mems (:obj:`List[torch.FloatTensor]` of length :obj:`config.n_layers`):
Contains pre-computed hidden-states (see :obj:`mems` output below) . Can be used to speed up sequential
decoding. The token ids which have their past given to this model should not be passed as :obj:`input_ids`
as they have already been computed.
:obj:`use_mems` has to be set to :obj:`True` to make use of :obj:`mems`.
perm_mask (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length, sequence_length)`, `optional`):
Mask to indicate the attention pattern for each input token with values selected in ``[0, 1]``:
- if ``perm_mask[k, i, j] = 0``, i attend to j in batch k;
- if ``perm_mask[k, i, j] = 1``, i does not attend to j in batch k.
If not set, each token attends to all the others (full bidirectional attention). Only used during
pretraining (to define factorization order) or for sequential decoding (generation).
target_mapping (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, num_predict, sequence_length)`, `optional`):
Mask to indicate the output tokens to use. If ``target_mapping[k, i, j] = 1``, the i-th predict in batch k
is on the j-th token. Only used during pretraining for partial prediction or for sequential decoding
(generation).
token_type_ids (:obj:`torch.LongTensor` of shape :obj:`({0})`, `optional`):
Segment token indices to indicate first and second portions of the inputs. Indices are selected in ``[0,
1]``:
- 0 corresponds to a `sentence A` token,
- 1 corresponds to a `sentence B` token.
`What are token type IDs? <../glossary.html#token-type-ids>`__
input_mask (:obj:`torch.FloatTensor` of shape :obj:`{0}`, `optional`):
Mask to avoid performing attention on padding token indices. Negative of :obj:`attention_mask`, i.e. with 0
for real tokens and 1 for padding which is kept for compatibility with the original code base.
Mask values selected in ``[0, 1]``:
- 1 for tokens that are **masked**,
- 0 for tokens that are **not masked**.
You can only uses one of :obj:`input_mask` and :obj:`attention_mask`.
head_mask (:obj:`torch.FloatTensor` of shape :obj:`(num_heads,)` or :obj:`(num_layers, num_heads)`, `optional`):
Mask to nullify selected heads of the self-attention modules. Mask values selected in ``[0, 1]``:
- 1 indicates the head is **not masked**,
- 0 indicates the head is **masked**.
inputs_embeds (:obj:`torch.FloatTensor` of shape :obj:`({0}, hidden_size)`, `optional`):
Optionally, instead of passing :obj:`input_ids` you can choose to directly pass an embedded representation.
This is useful if you want more control over how to convert :obj:`input_ids` indices into associated
vectors than the model's internal embedding lookup matrix.
output_attentions (:obj:`bool`, `optional`):
Whether or not to return the attentions tensors of all attention layers. See ``attentions`` under returned
tensors for more detail.
output_hidden_states (:obj:`bool`, `optional`):
Whether or not to return the hidden states of all layers. See ``hidden_states`` under returned tensors for
more detail.
return_dict (:obj:`bool`, `optional`):
Whether or not to return a :class:`~transformers.file_utils.ModelOutput` instead of a plain tuple.
"""
@add_start_docstrings(
"The bare XLNet Model transformer outputting raw hidden-states without any specific head on top.",
XLNET_START_DOCSTRING,
)
class XLNetModel(XLNetPreTrainedModel):
def __init__(self, config):
super().__init__(config)
self.mem_len = config.mem_len
self.reuse_len = config.reuse_len
self.d_model = config.d_model
self.same_length = config.same_length
self.attn_type = config.attn_type
self.bi_data = config.bi_data
self.clamp_len = config.clamp_len
self.n_layer = config.n_layer
self.word_embedding = nn.Embedding(config.vocab_size, config.d_model)
self.mask_emb = nn.Parameter(torch.FloatTensor(1, 1, config.d_model))
self.layer = nn.ModuleList([XLNetLayer(config) for _ in range(config.n_layer)])
self.dropout = nn.Dropout(config.dropout)
self.init_weights()
def get_input_embeddings(self):
return self.word_embedding
def set_input_embeddings(self, new_embeddings):
self.word_embedding = new_embeddings
def _prune_heads(self, heads_to_prune):
raise NotImplementedError
def create_mask(self, qlen, mlen):
"""
Creates causal attention mask. Float mask where 1.0 indicates masked, 0.0 indicates not-masked.
Args:
qlen: Sequence length
mlen: Mask length
::
same_length=False: same_length=True:
<mlen > < qlen > <mlen > < qlen >
^ [0 0 0 0 0 1 1 1 1] [0 0 0 0 0 1 1 1 1]
[0 0 0 0 0 0 1 1 1] [1 0 0 0 0 0 1 1 1]
qlen [0 0 0 0 0 0 0 1 1] [1 1 0 0 0 0 0 1 1]
[0 0 0 0 0 0 0 0 1] [1 1 1 0 0 0 0 0 1]
v [0 0 0 0 0 0 0 0 0] [1 1 1 1 0 0 0 0 0]
"""
attn_mask = torch.ones([qlen, qlen])
mask_up = torch.triu(attn_mask, diagonal=0) # change to 0, since we do not want input_g to attend to itself
attn_mask_pad = torch.zeros([qlen, mlen])
ret = torch.cat([attn_mask_pad, mask_up], dim=1)
if self.same_length:
mask_lo = torch.tril(attn_mask, diagonal=-1)
ret = torch.cat([ret[:, :qlen] + mask_lo, ret[:, qlen:]], dim=1)
ret = ret.to(self.device)
return ret
def cache_mem(self, curr_out, prev_mem):
# cache hidden states into memory.
if self.reuse_len is not None and self.reuse_len > 0:
curr_out = curr_out[: self.reuse_len]
if self.mem_len is None or self.mem_len == 0:
# If :obj:`use_mems` is active but no `mem_len` is defined, the model behaves like GPT-2 at inference time
# and returns all of the past and current hidden states.
cutoff = 0
else:
# If :obj:`use_mems` is active and `mem_len` is defined, the model returns the last `mem_len` hidden
# states. This is the preferred setting for training and long-form generation.
cutoff = -self.mem_len
if prev_mem is None:
# if :obj:`use_mems` is active and `mem_len` is defined, the model
new_mem = curr_out[cutoff:]
else:
new_mem = torch.cat([prev_mem, curr_out], dim=0)[cutoff:]
return new_mem.detach()
@staticmethod
def positional_embedding(pos_seq, inv_freq, bsz=None):
sinusoid_inp = torch.einsum("i,d->id", pos_seq, inv_freq)
pos_emb = torch.cat([torch.sin(sinusoid_inp), torch.cos(sinusoid_inp)], dim=-1)
pos_emb = pos_emb[:, None, :]
if bsz is not None:
pos_emb = pos_emb.expand(-1, bsz, -1)
return pos_emb
def bar_ids_to_rel_bar_pos_emb(self, inv_freq, bar_ids, mlen=0):
"""
Args:
bar_ids: of shape [bsz, klen], bar encodings for notes starting from 0.
For example: [[0, 0, 1, 1, 2, 3], [0, 1, 1, 2, 2, 3]]
Returns:
pos_emb: of shape [klen, klen, bsz, hidden_size], i.e., ijbh in einstein summation
"""
# assert mlen == 0, "Transformer-XL's memory for previous chunks is not support for now"
bsz = bar_ids.shape[0]
# qlen = bar_ids.shape[1]
# klen = qlen + mlen
klen = bar_ids.shape[1]
qlen = klen - mlen
pos_seq = bar_ids[:, None, :].repeat(1, qlen, 1)
pos_seq = pos_seq - pos_seq[:, 0, -qlen:][..., None]
# sinusoid_inp = torch.einsum("bij,d->ijbd", pos_seq, inv_freq.to(pos_seq.device))
# pos_emb = torch.cat([torch.sin(sinusoid_inp), torch.cos(sinusoid_inp)], dim=-1)
pos_seq_1d = torch.arange(-MAX_BAR_ENCODING, MAX_BAR_ENCODING+1).to(pos_seq.device)
sinusoid_inp_1d = torch.einsum("i,d->id", pos_seq_1d, inv_freq.to(pos_seq.device))
pos_emb_1d = torch.cat([torch.sin(sinusoid_inp_1d), torch.cos(sinusoid_inp_1d)], dim=-1)
return pos_emb_1d, pos_seq
# def relative_positional_encoding(self, qlen, klen, bsz=None, bar_ids=None):
def relative_positional_encoding(self, bar_ids=None, mlen=None):
# create relative positional encoding.
freq_seq = torch.arange(0, self.d_model, 2.0, dtype=torch.float)
inv_freq = 1 / torch.pow(10000, (freq_seq / self.d_model))
# if self.attn_type == "bi":
# # beg, end = klen - 1, -qlen
# beg, end = klen, -qlen
# elif self.attn_type == "uni":
# # beg, end = klen - 1, -1
# beg, end = klen, -1
# else:
# raise ValueError(f"Unknown `attn_type` {self.attn_type}.")
# if self.bi_data:
# fwd_pos_seq = torch.arange(beg, end, -1.0, dtype=torch.float)
# bwd_pos_seq = torch.arange(-beg, -end, 1.0, dtype=torch.float)
# if self.clamp_len > 0:
# fwd_pos_seq = fwd_pos_seq.clamp(-self.clamp_len, self.clamp_len)
# bwd_pos_seq = bwd_pos_seq.clamp(-self.clamp_len, self.clamp_len)
# if bsz is not None:
# fwd_pos_emb = self.positional_embedding(fwd_pos_seq, inv_freq, bsz // 2)
# bwd_pos_emb = self.positional_embedding(bwd_pos_seq, inv_freq, bsz // 2)
# else:
# fwd_pos_emb = self.positional_embedding(fwd_pos_seq, inv_freq)
# bwd_pos_emb = self.positional_embedding(bwd_pos_seq, inv_freq)
# pos_emb = torch.cat([fwd_pos_emb, bwd_pos_emb], dim=1)
# else:
# fwd_pos_seq = torch.arange(beg, end, -1.0)
# if self.clamp_len > 0:
# fwd_pos_seq = fwd_pos_seq.clamp(-self.clamp_len, self.clamp_len)
# pos_emb = self.positional_embedding(fwd_pos_seq, inv_freq, bsz)
# TODO: also work in testing phase?
pos_emb_1d, pos_seq = self.bar_ids_to_rel_bar_pos_emb(inv_freq, bar_ids, mlen)
# pos_emb = pos_emb.to(self.device)
return pos_emb_1d, pos_seq
@add_start_docstrings_to_model_forward(XLNET_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
@add_code_sample_docstrings(
tokenizer_class=_TOKENIZER_FOR_DOC,
checkpoint=_CHECKPOINT_FOR_DOC,
output_type=XLNetModelOutput,
config_class=_CONFIG_FOR_DOC,
)
def forward(
self,
input_ids=None,
attention_mask=None,
mems=None,
perm_mask=None,
target_mapping=None,
token_type_ids=None,
input_mask=None,
head_mask=None,
inputs_embeds=None,
use_mems=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
bar_ids=None,
inputs_embeds_g=None,
reuse_len=None,
**kwargs, # delete after depreciation warning is removed
):
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
output_hidden_states = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
if "use_cache" in kwargs:
warnings.warn(
"The `use_cache` argument is deprecated and will be removed in a future version, use `use_mems` instead.",
FutureWarning,
)
use_mems = kwargs["use_cache"]
self.reuse_len = reuse_len # reuse len should be modified if specified by user
if self.training:
use_mems = use_mems if use_mems is not None else self.config.use_mems_train
else:
use_mems = use_mems if use_mems is not None else self.config.use_mems_eval
# the original code for XLNet uses shapes [len, bsz] with the batch dimension at the end
# but we want a unified interface in the library with the batch size on the first dimension
# so we move here the first dimension (batch) to the end
if input_ids is not None and inputs_embeds is not None:
raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time")
elif input_ids is not None:
input_ids = input_ids.transpose(0, 1).contiguous()
qlen, bsz = input_ids.shape[0], input_ids.shape[1]
elif inputs_embeds is not None:
inputs_embeds = inputs_embeds.transpose(0, 1).contiguous()
qlen, bsz = inputs_embeds.shape[0], inputs_embeds.shape[1]
else:
raise ValueError("You have to specify either input_ids or inputs_embeds")
token_type_ids = token_type_ids.transpose(0, 1).contiguous() if token_type_ids is not None else None
input_mask = input_mask.transpose(0, 1).contiguous() if input_mask is not None else None
attention_mask = attention_mask.transpose(0, 1).contiguous() if attention_mask is not None else None
perm_mask = perm_mask.permute(1, 2, 0).contiguous() if perm_mask is not None else None
target_mapping = target_mapping.permute(1, 2, 0).contiguous() if target_mapping is not None else None
mlen = mems[0].shape[0] if mems is not None and mems[0] is not None else 0
klen = mlen + qlen
dtype_float = self.dtype
device = self.device
# Attention mask
# causal attention mask
if self.attn_type == "uni":
attn_mask = self.create_mask(qlen, mlen)
attn_mask = attn_mask[:, :, None, None]
elif self.attn_type == "bi":
attn_mask = None
else:
raise ValueError(f"Unsupported attention type: {self.attn_type}")
# data mask: input mask & perm mask
assert input_mask is None or attention_mask is None, "You can only use one of input_mask (uses 1 for padding) "
"or attention_mask (uses 0 for padding, added for compatibility with BERT). Please choose one."
if input_mask is None and attention_mask is not None:
input_mask = 1.0 - attention_mask
if input_mask is not None and perm_mask is not None:
data_mask = input_mask[None] + perm_mask
elif input_mask is not None and perm_mask is None:
data_mask = input_mask[None]
elif input_mask is None and perm_mask is not None:
data_mask = perm_mask
else:
data_mask = None
if data_mask is not None:
# all mems can be attended to
if mlen > 0:
mems_mask = torch.zeros([data_mask.shape[0], mlen, bsz]).to(data_mask)
data_mask = torch.cat([mems_mask, data_mask], dim=1)
if attn_mask is None:
attn_mask = data_mask[:, :, :, None]
else:
# attn_mask += data_mask[:, :, :, None]
attn_mask = data_mask[:, :, :, None] + attn_mask
if attn_mask is not None:
attn_mask = (attn_mask > 0).to(dtype_float)
if attn_mask is not None:
non_tgt_mask = -torch.eye(qlen).to(attn_mask)
if mlen > 0:
non_tgt_mask = torch.cat([torch.zeros([qlen, mlen]).to(attn_mask), non_tgt_mask], dim=-1)
non_tgt_mask = ((attn_mask + non_tgt_mask[:, :, None, None]) > 0).to(attn_mask)
else:
non_tgt_mask = None
# Word embeddings and prepare h & g hidden states
if inputs_embeds is not None:
word_emb_k = inputs_embeds
else:
word_emb_k = self.word_embedding(input_ids)
output_h = self.dropout(word_emb_k)
if target_mapping is not None:
# word_emb_q = self.mask_emb.expand(target_mapping.shape[0], bsz, -1)
word_emb_q = inputs_embeds_g.transpose(0, 1).contiguous()
# else: # We removed the inp_q input which was same as target mapping
# inp_q_ext = inp_q[:, :, None]
# word_emb_q = inp_q_ext * self.mask_emb + (1 - inp_q_ext) * word_emb_k
output_g = self.dropout(word_emb_q)
else:
# output_g = None
word_emb_q = inputs_embeds_g.transpose(0, 1).contiguous()
output_g = self.dropout(word_emb_q)
# Segment embedding
if token_type_ids is not None:
# Convert `token_type_ids` to one-hot `seg_mat`
if mlen > 0:
mem_pad = torch.zeros([mlen, bsz], dtype=torch.long, device=device)
cat_ids = torch.cat([mem_pad, token_type_ids], dim=0)
else:
cat_ids = token_type_ids
# `1` indicates not in the same segment [qlen x klen x bsz]
seg_mat = (token_type_ids[:, None] != cat_ids[None, :]).long()
seg_mat = F.one_hot(seg_mat, num_classes=2).to(dtype_float)
else:
seg_mat = None
# Positional encoding
# pos_emb = self.relative_positional_encoding(qlen, klen, bsz=bsz, bar_ids=bar_ids)
pos_emb_1d, pos_seq = self.relative_positional_encoding(bar_ids=bar_ids, mlen=mlen)
# pos_emb = self.dropout(pos_emb)
pos_emb_1d = self.dropout(pos_emb_1d)
# Prepare head mask if needed
# 1.0 in head_mask indicate we keep the head
# attention_probs has shape bsz x n_heads x N x N
# input head_mask has shape [num_heads] or [num_hidden_layers x num_heads] (a head_mask for each layer)
# and head_mask is converted to shape [num_hidden_layers x qlen x klen x bsz x n_head]
if head_mask is not None:
if head_mask.dim() == 1:
head_mask = head_mask.unsqueeze(0).unsqueeze(0).unsqueeze(0).unsqueeze(0)
head_mask = head_mask.expand(self.n_layer, -1, -1, -1, -1)
elif head_mask.dim() == 2:
head_mask = head_mask.unsqueeze(1).unsqueeze(1).unsqueeze(1)
head_mask = head_mask.to(
dtype=next(self.parameters()).dtype
) # switch to float if need + fp16 compatibility
else:
head_mask = [None] * self.n_layer
new_mems = ()
if mems is None:
mems = [None] * len(self.layer)
attentions = [] if output_attentions else None
hidden_states = [] if output_hidden_states else None
for i, layer_module in enumerate(self.layer):
if use_mems:
# cache new mems
new_mems = new_mems + (self.cache_mem(output_h, mems[i]),)
if output_hidden_states:
hidden_states.append((output_h, output_g) if output_g is not None else output_h)
outputs = layer_module(
output_h,
output_g,
attn_mask_h=non_tgt_mask,
attn_mask_g=attn_mask,
r=pos_emb_1d,
seg_mat=seg_mat,
mems=mems[i],
target_mapping=target_mapping,
head_mask=head_mask[i],
output_attentions=output_attentions,
pos_seq=pos_seq,
)
output_h, output_g = outputs[:2]
if output_attentions:
attentions.append(outputs[2])
# Add last hidden state
if output_hidden_states:
hidden_states.append((output_h, output_g) if output_g is not None else output_h)
output = self.dropout(output_g if output_g is not None else output_h)
# Prepare outputs, we transpose back here to shape [bsz, len, hidden_dim] (cf. beginning of forward() method)
output = output.permute(1, 0, 2).contiguous()
if not use_mems:
new_mems = None
if output_hidden_states:
if output_g is not None:
hidden_states = tuple(h.permute(1, 0, 2).contiguous() for hs in hidden_states for h in hs)
else:
hidden_states = tuple(hs.permute(1, 0, 2).contiguous() for hs in hidden_states)
if output_attentions:
if target_mapping is not None:
# when target_mapping is provided, there are 2-tuple of attentions
attentions = tuple(
tuple(att_stream.permute(2, 3, 0, 1).contiguous() for att_stream in t) for t in attentions
)
else:
attentions = tuple(t.permute(2, 3, 0, 1).contiguous() for t in attentions)
if not return_dict:
return tuple(v for v in [output, new_mems, hidden_states, attentions] if v is not None)
return XLNetModelOutput(
last_hidden_state=output, mems=new_mems, hidden_states=hidden_states, attentions=attentions
)
| 46.132024 | 198 | 0.610931 | 8,955 | 68,137 | 4.460525 | 0.095701 | 0.042059 | 0.010815 | 0.003004 | 0.51084 | 0.461696 | 0.416533 | 0.392525 | 0.373949 | 0.352644 | 0 | 0.010204 | 0.29379 | 68,137 | 1,476 | 199 | 46.163279 | 0.819905 | 0.33371 | 0 | 0.305396 | 0 | 0.01837 | 0.197463 | 0.030265 | 0 | 0 | 0 | 0.000678 | 0.006889 | 1 | 0.030999 | false | 0.002296 | 0.020666 | 0.001148 | 0.137773 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
2b8bc3d0e40e43824259f72ae47057f59a324480 | 3,470 | py | Python | Colloquiums/2020-2021/Colloquium_2/Exercise_3.py | Szymon-Budziak/ASD_exercises_solutions | 36ccbdae03a6c7e4ad141a2b7b01bef9353574ee | [
"MIT"
] | 7 | 2021-12-28T23:38:42.000Z | 2022-03-29T16:36:16.000Z | Colloquiums/2020-2021/Colloquium_2/Exercise_3.py | Szymon-Budziak/ASD_exercises_solutions | 36ccbdae03a6c7e4ad141a2b7b01bef9353574ee | [
"MIT"
] | null | null | null | Colloquiums/2020-2021/Colloquium_2/Exercise_3.py | Szymon-Budziak/ASD_exercises_solutions | 36ccbdae03a6c7e4ad141a2b7b01bef9353574ee | [
"MIT"
] | 4 | 2021-06-29T20:21:52.000Z | 2022-03-12T10:04:17.000Z | # W roku 2050 Maksymilian odbywa podróż przez pustynię z miasta A do miasta B. Droga pomiędzy miastami
# jest linią prostą na której w pewnych miejscach znajdują się plamy ropy. Maksymilian porusza się
# 24-kołową cysterną, która spala 1 litr ropy na 1 kilometr trasy. Cysterna wyposażona jest w pompę
# pozwalającą zbierać ropę z plam. Aby dojechać z miasta A do miasta B Maksymilian będzie musiał zebrać
# ropę z niektórych plam (by nie zabrakło paliwa), co każdorazowo wymaga zatrzymania cysterny. Niestety,
# droga jest niebezpieczna. Maksymilian musi więc tak zaplanować trasę, by zatrzymać się jak najmniej
# razy. Na szczęście cysterna Maksymiliana jest ogromna - po zatrzymaniu zawsze może zebrać całą ropę
# z plamy (w cysternie zmieściłaby się cała ropa na trasie).
# Zaproponuj i zaimplementuj algorytm wskazujący, w których miejscach trasy Maksymilian powinien się
# zatrzymać i zebrać ropę. Algorytm powinien być możliwe jak najszybszy i zużywać jak najmniej pamięci.
# Uzasadnij jego poprawność i oszacuj złożoność obliczeniową.
# Dane wejściowe reprezentowane są jako dwuwymiarowa tablica liczb naturalnych T, w której wartość
# T[u][v] to objętość ropy na polu o współrzędnych (u, v) (objętość 0 oznacza brak ropy). Współrzędne
# u należą do zbioru {0, 1, ..., n−1} a współrzędne v do zbioru {0, 1, ..., m−1}. Miasto A znajduje się
# na polu (0, 0), zaś miasto B na polu (0, m−1). Maksymilian porusza się jedynie po polach
# (0, 0), (0, 1),..., (0, m−1). Bok każdego pola ma długość 1 kilometra. Plamą ropy jest dowolny spójny
# obszar pól zawierających ropę. Dwa pola należą do spójnego obszaru jeśli mają wspólny bok lub są
# połączone sekwencją pól (zawierających ropę) o wspólnych bokach. Zakładamy, że początkowo cysterna
# jest pusta, ale pole (0, 0) jest częścią plamy ropy, którą można zebrać przed wyruszeniem w drogę.
# Zakładamy również, że zadanie posiada rozwiązanie, t.j. da się dojechać z miasta A do miasta B.
# Algorytm należy zaimplementować w funkcji:
# def plan(T):
# ...
# która przyjmuje tablicę z opisem zadania i zwraca listę współrzędnych v pól na których należy
# zatrzymać cysternę w celu zebrania ropy (cysterna porusza się po tylko polach (0, v), więc wystarczy
# zwrócić współrzędną v). Lista powinna być posortowana w kolejności postojów. Postój na polu (0, 0)
# również jest częścią rozwiązania.
from Exercise_3_tests import runtests
from queue import PriorityQueue
def take_fuel(T, row, col, actual_fuel):
actual_fuel[0] += T[row][col]
T[row][col] = 0
new_row = [row - 1, row + 1, row, row]
new_col = [col, col, col - 1, col + 1]
for i in range(len(new_row)):
if new_row[i] >= 0 and new_row[i] < len(T) and new_col[i] >= 0 and new_col[i] < len(T[0]):
if T[new_row[i]][new_col[i]] != 0:
take_fuel(T, new_row[i], new_col[i], actual_fuel)
def plan(T):
stops = []
limit = len(T[0])
actual_fuel = [0]
i = 0
while i < limit:
if T[0][i] != 0:
actual_fuel[0] = 0
take_fuel(T, 0, i, actual_fuel)
T[0][i] = actual_fuel[0]
i += 1
queue = PriorityQueue()
total_fuel = 0
for i in range(limit - 1):
if T[0][i] != 0:
queue.put((-T[0][i], i))
if total_fuel == 0:
actual_value = queue.get()
total_fuel -= actual_value[0]
stops.append(actual_value[1])
total_fuel -= 1
stops.sort()
return stops
runtests(plan)
| 50.289855 | 104 | 0.69366 | 548 | 3,470 | 4.344891 | 0.417883 | 0.029399 | 0.0063 | 0.0126 | 0.058379 | 0.053339 | 0.033599 | 0 | 0 | 0 | 0 | 0.022803 | 0.216427 | 3,470 | 68 | 105 | 51.029412 | 0.851416 | 0.660807 | 0 | 0.057143 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.057143 | false | 0 | 0.057143 | 0 | 0.142857 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
2b8e4161bf16ec89b1640782424942de3ea30db0 | 6,499 | py | Python | datajob/datajob_stack.py | vincentclaes/datajob | 0454c7c6327b2a0c2a2284e35153213787704b71 | [
"Apache-2.0"
] | 90 | 2021-01-04T20:08:20.000Z | 2022-03-14T11:20:24.000Z | datajob/datajob_stack.py | vincentclaes/datajob | 0454c7c6327b2a0c2a2284e35153213787704b71 | [
"Apache-2.0"
] | 93 | 2020-12-12T22:10:33.000Z | 2021-11-21T16:12:24.000Z | datajob/datajob_stack.py | vincentclaes/datajob | 0454c7c6327b2a0c2a2284e35153213787704b71 | [
"Apache-2.0"
] | 13 | 2020-12-12T22:11:01.000Z | 2021-09-22T14:37:09.000Z | import os
from typing import Union
from aws_cdk import core
from aws_cdk.core import CfnOutput
from datajob import logger
from datajob.datajob_context import DataJobContext
from datajob.datajob_execution_input import DataJobExecutionInput
class DataJobStack(core.Stack):
STAGE_NAME = "stage"
def __init__(
self,
scope: core.Construct,
id: str,
stage: str = None,
project_root: str = None,
include_folder: str = None,
account: str = None,
region: str = None,
**kwargs,
) -> None:
"""
:param scope: aws cdk core construct object.
:param id: a name for this stack.
:param stage: the stage name to which we are deploying
:param project_root: the path to the root of this project
:param include_folder: specify the path to the folder we would like to include in the deployment bucket.
:param account: AWS account number
:param region: AWS region where we want to deploy our datajob to
:param kwargs: any extra kwargs for the core.Construct
"""
self.scope = scope
self.env = DataJobStack._create_environment_object(
account=account, region=region
)
self.stage = self.get_stage(stage)
self.unique_stack_name = self._create_unique_stack_name(id, self.stage)
super().__init__(scope=scope, id=self.unique_stack_name, env=self.env, **kwargs)
self.project_root = project_root
self.include_folder = include_folder
self.resources = []
self.outputs = {}
self.execution_input = DataJobExecutionInput()
self.context = None
def __enter__(self):
"""As soon as we enter the contextmanager, we create the datajob
context.
:return: datajob stack.
"""
self.init_datajob_context()
return self
def __exit__(self, exc_type, exc_value, traceback):
"""steps we have to do when exiting the context manager. We execute the
steps when no exception is present.
- we will create the resources we have defined.
- we will create cloudformation stack outputs, if present.
:param exc_type:
:param exc_value:
:param traceback:
:return: None
"""
if exc_type is None and exc_value is None and traceback is None:
logger.debug("creating resources and synthesizing stack.")
self.create_resources()
def add(self, task: str) -> None:
setattr(self, task.unique_name, task)
task.create()
def update_datajob_stack_outputs(self, key: str, value: str) -> None:
"""Add a key and value to datajob_stack output variable
Returns: None
"""
self.outputs[key] = value
def update_datajob_stack_resources(self, resource: object) -> None:
"""add a DataJob resource to the DataJob stack resources variable.
Args:
resource: A DataJobBase implementation. we cannot reference it here explicitly
in the typing, because then we have a circular dependency conflict.
Returns: None
"""
logger.info(f"adding job {self} to stack workflow resources")
self.resources.append(resource)
@staticmethod
def _create_unique_stack_name(stack_name: str, stage: Union[str, None]) -> str:
"""create a unique name for the datajob stack.
:param stack_name: a name for the stack.
:param stage: the stage name we give our pipeline.
:return: a unique name.
"""
if stage:
return f"{stack_name}-{stage}"
return stack_name
@staticmethod
def _create_environment_object(account: str, region: str) -> core.Environment:
"""create an aws cdk Environment object.
Args:
account: AWS account number: 12 numbers
region: AWS region. e.g. eu-west-1
Returns: AWS cdk Environment object.
"""
account = (
account if account is not None else os.environ.get("AWS_DEFAULT_ACCOUNT")
)
region = region if region is not None else os.environ.get("AWS_DEFAULT_REGION")
return core.Environment(account=account, region=region)
def create_cloudformation_outputs(self) -> None:
"""if the outputs dictionary has key value pairs, create these for the
cloudformation stack outputs.
Returns: None
"""
if self.outputs:
for key, value in self.outputs.items():
logger.debug(f"adding key {key} and value {value} to the stack output.")
CfnOutput(scope=self, id=key, value=value)
def create_resources(self) -> None:
"""create each of the resources of this stack."""
if self.resources:
for resource in self.resources:
logger.debug(f"creating resource: {resource.name}")
resource.create()
self.create_cloudformation_outputs()
logger.debug("no resources available to create.")
def get_stage(self, stage: str) -> Union[str, None]:
"""get the stage parameter and return a default if not found."""
if stage:
logger.debug(
"a stage parameter is passed directly to the stack object, take this value."
)
return stage
else:
logger.debug("check cdk context if there is not a stage value provided.")
try:
return self.get_context_parameter(DataJobStack.STAGE_NAME)
except ValueError:
logger.debug("no stage is found on the context. Will return None.")
return None
def get_context_parameter(self, name: str) -> str:
"""get a cdk context parameter from the cli."""
context_parameter = self.scope.node.try_get_context(name)
if not context_parameter:
raise ValueError(
f"we expect a cdk context parameter to be set on the cli with key {name}. "
f"e.g 'cdk deploy -c stage=my-stage' where stage is the key and my-stage is the value."
)
logger.debug(f"context parameter {name} found.")
return context_parameter
def init_datajob_context(self) -> None:
"""Initializes a datajob context."""
self.context = DataJobContext(
self, project_root=self.project_root, include_folder=self.include_folder
)
| 36.717514 | 113 | 0.62625 | 809 | 6,499 | 4.911001 | 0.211372 | 0.015857 | 0.015102 | 0.009061 | 0.031211 | 0.031211 | 0.017619 | 0.017619 | 0.017619 | 0 | 0 | 0.000654 | 0.294507 | 6,499 | 176 | 114 | 36.926136 | 0.865867 | 0.270349 | 0 | 0.040816 | 0 | 0.010204 | 0.146823 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.132653 | false | 0.010204 | 0.071429 | 0 | 0.306122 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
2b8e54d9ba07085ebbc7eda5e53fdf3cf8fed6cb | 5,869 | py | Python | pytorch/exercises/wgan/main_improved_wgan.py | wangyendt/deeplearning_models | 47883b6c65b8d05a0d1c5737f1552df6476ded34 | [
"MIT"
] | 1 | 2020-06-04T11:10:27.000Z | 2020-06-04T11:10:27.000Z | pytorch/exercises/wgan/main_improved_wgan.py | wangyendt/deeplearning_models | 47883b6c65b8d05a0d1c5737f1552df6476ded34 | [
"MIT"
] | null | null | null | pytorch/exercises/wgan/main_improved_wgan.py | wangyendt/deeplearning_models | 47883b6c65b8d05a0d1c5737f1552df6476ded34 | [
"MIT"
] | null | null | null | # encoding: utf-8
import os
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.autograd import Variable
def list_all_files(rootdir, key):
import os
_files = []
list = os.listdir(rootdir) # 列出文件夹下所有的目录与文件
for i in range(0, len(list)):
path = os.path.join(rootdir, list[i])
if os.path.isdir(path):
_files.extend(list_all_files(path, key))
if os.path.isfile(path) and key in path:
_files.append(path)
return _files
def load_data():
global df, df_ind
root = '.'
key = '10_'
files = list_all_files(root, key)
for f in files:
yield extract_signal(f)
def extract_signal(f):
data = pd.read_table(f, header=None, skiprows=1)
rawdata = np.array(data.iloc[:, 19:20])
force_flag = np.array(data.iloc[:, 2])
tds = np.where(np.diff(force_flag) == 1)[0]
# print(len(tds))
x_data = np.array([np.diff(rawdata[i - 3:i + 13, :], axis=0).T.flatten() for i in tds])
x_data = x_data[np.max(x_data, axis=1) > 20]
x_data = x_data.T
x_data = np.apply_along_axis(
lambda x: (x - np.min(x)) / (np.max(x) - np.min(x)), 0, x_data
)
x_data = (x_data - 0.5) * 2
# print(x_data.max(), x_data.min())
return x_data
def make_data():
sample = np.random.choice(datas.shape[0], batch_size, False)
return datas[sample]
def make_noise():
return np.random.uniform(-1, 1, (batch_size, generator_len))
def train():
d_optim = torch.optim.Adam(D.parameters(), d_lr, betas=(0.5, 0.9))
g_optim = torch.optim.Adam(G.parameters(), g_lr, betas=(0.5, 0.9))
plt.ion()
wd = []
for epoch in range(epochs):
D.train(), G.train()
for ci in range(critic_iters):
data_batch = make_data()
gen_batch = make_noise()
data_batch, gen_batch = Variable(torch.FloatTensor(data_batch)), \
Variable(torch.FloatTensor(gen_batch))
d_loss = -torch.mean(D(data_batch)) + torch.mean(D(G(gen_batch))) + calc_gradient_penalty(data_batch,
G(gen_batch))
wasserstein_distance = -torch.mean(D(G(gen_batch))) + torch.mean(D(data_batch))
print(wasserstein_distance.item())
# d_loss = -torch.mean(torch.log(D(data_batch)) + torch.log(1 - D(G(gen_batch))))
# g_loss = torch.mean(torch.log(1 - D(G(gen_batch))))
d_optim.zero_grad()
d_loss.backward(retain_graph=True)
d_optim.step()
data_batch = make_data()
gen_batch = make_noise()
data_batch, gen_batch = Variable(torch.FloatTensor(data_batch)), \
Variable(torch.FloatTensor(gen_batch))
g_loss = -torch.mean(D(G(gen_batch)))
g_optim.zero_grad()
g_loss.backward()
g_optim.step()
if epoch % 50 == 0:
D.eval(), G.eval()
plt.clf()
plt.suptitle('epoch=%d, w-dist=%.6f' % (epoch, wasserstein_distance.item()))
wd.append(wasserstein_distance.item())
for i in range(16):
plt.subplot(4, 4, i + 1)
gen_diff = G(gen_batch).detach().numpy()
gen_raw = np.hstack((np.cumsum(gen_diff[:, :int(data_len / 2)], axis=1),
np.cumsum(gen_diff[:, int(data_len / 2):], axis=1)))
plt.plot(gen_raw[i])
plt.xlim((0, data_len))
plt.ylim((-1, 1))
plt.pause(0.01)
plt.ioff()
plt.figure()
plt.plot(wd)
plt.show()
def calc_gradient_penalty(x_real, x_gen):
alpha = torch.rand(batch_size, 1)
alpha = alpha.expand(x_real.size())
x_hat = alpha * x_real + (1 - alpha) * x_gen
D_x = D(x_hat)
gradients = torch.autograd.grad(
outputs=D_x,
inputs=x_hat,
grad_outputs=torch.ones(D_x.size()),
create_graph=True,
retain_graph=True,
only_inputs=True
)[0]
# print(gradients)
gradient_penalty = gp_lambda * ((gradients.norm(2, dim=1) - 1) ** 2).mean()
print(gradient_penalty)
return gradient_penalty
if __name__ == '__main__':
datas = load_data()
datas = np.hstack([d for d in datas]).T
batch_size = 32
generator_len = 20
data_len = 15
epochs = 200000
d_lr = 0.000001
g_lr = 0.000001
gp_lambda = 0.1
critic_iters = 5
D = nn.Sequential(
nn.Linear(data_len, 32),
# nn.Dropout(0.5),
nn.ReLU(),
nn.Linear(32, 16),
# nn.Dropout(0.5),
nn.ReLU(),
nn.Linear(16, 4),
# nn.Dropout(0.5),
nn.ReLU(),
nn.Linear(4, 1)
)
G = nn.Sequential(
nn.Linear(generator_len, 30),
nn.ReLU(),
nn.Linear(30, 30),
nn.ReLU(),
nn.Linear(30, data_len),
nn.Tanh()
)
x = np.tile(np.linspace(-1, 1, data_len), [batch_size, 1])
# make_data()
train()
torch.save(D, 'D.model')
torch.save(G, 'G.model')
# D_ = torch.load('D.model')
# G_ = torch.load('G.model')
# print(D_, G_)
# batch_size = 1000
# gen_data = make_noise()
# print(gen_data.shape)
# gen_data = G_(Variable(torch.FloatTensor(gen_data))).detach().numpy()
# plt.ion()
# for i in range(gen_data.shape[0]):
# plt.cla()
# plt.plot(np.cumsum(gen_data[i, :int(data_len / 2)]))
# plt.plot(np.cumsum(gen_data[i, int(data_len / 2):]))
# # gen_raw = np.hstack((np.cumsum(gen_data[:, :int(data_len / 2)], axis=1),
# # np.cumsum(gen_data[:, int(data_len / 2):], axis=1)))
# # plt.plot(gen_raw[i])
# plt.pause(0.2)
| 30.567708 | 114 | 0.552905 | 847 | 5,869 | 3.638725 | 0.217237 | 0.02109 | 0.020441 | 0.021415 | 0.2573 | 0.237833 | 0.194354 | 0.17294 | 0.148605 | 0.148605 | 0 | 0.031938 | 0.295791 | 5,869 | 191 | 115 | 30.727749 | 0.713767 | 0.148066 | 0 | 0.109489 | 0 | 0 | 0.009459 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.051095 | false | 0 | 0.065693 | 0.007299 | 0.153285 | 0.014599 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
2b9525401f62a8c3bf39cf1a74d072fa4e2b9b95 | 2,455 | py | Python | chemreac/util/grid.py | bjodah/chemreac | dbe38a10cf6b88e66192bcc998721b61aabbd9dc | [
"BSD-2-Clause"
] | 14 | 2015-03-11T21:46:15.000Z | 2020-06-06T16:01:38.000Z | chemreac/util/grid.py | bjodah/chemreac | dbe38a10cf6b88e66192bcc998721b61aabbd9dc | [
"BSD-2-Clause"
] | 20 | 2015-01-21T16:11:36.000Z | 2020-01-06T10:30:46.000Z | chemreac/util/grid.py | chemreac/chemreac | dbe38a10cf6b88e66192bcc998721b61aabbd9dc | [
"BSD-2-Clause"
] | 3 | 2015-08-13T12:06:17.000Z | 2021-12-17T01:12:20.000Z | # -*- coding: utf-8 -*-
"""
chemreac.util.grid
------------------
Grid related utilities for one-dimensional grid of arbitrary spacing.
"""
from __future__ import print_function, division
from math import log
import numpy as np
from ..units import get_derived_unit, to_unitless
def generate_grid(x0, xend, N, logx=False, unit_registry=None, random=False, use_log2=False):
length_unit = get_derived_unit(unit_registry, 'length')
_x0 = to_unitless(x0, length_unit)
_xend = to_unitless(xend, length_unit)
if logx:
low, high = log(_x0), log(_xend)
if use_log2:
low /= log(2)
high /= log(2)
else:
low, high = _x0, _xend
result = np.linspace(low, high, N+1)
if random is False:
return result
elif random is True:
random = 1.0
elif random > 1.0 or random <= 0.0:
raise ValueError("0 < random <= 1.0, or True => 1.0")
result[1:-1] += random*(np.random.random(N-1)-0.5)*(_xend-_x0)/(N+2)
return result
def padded_centers(x, nsidep):
"""
Parameters
----------
x: sequence
strictly monotonically increasing sequence of positions of
bin separators.
nsidep: integer
number of padding bins: (nstencil-1)/2
"""
xc = x[:-1] + np.diff(x)/2
return np.concatenate((
2*x[0]-xc[:nsidep][::-1], xc, 2*x[-1]-xc[-nsidep:][::-1]
))
def pxci_to_bi(nstencil, N):
"""
Generates a translation list converting x center indicesex starting
at 0, which includes padding bins and into bin indices.
Parameters
----------
nstencil: integer
Number of stencil points used
N: integer
Number of bins
Returns
-------
list of bin indices.
"""
nsidep = (nstencil-1)//2
return list(range(nsidep)[::-1]) + list(range(N)) + list(
range(N-1, N-nsidep-1, -1))
def stencil_pxci_lbounds(nstencil, N, lrefl=False, rrefl=False):
"""
Generates a list of lower bounds in padded centers for each bin index
for use in fintie difference scheme.
Parameters
----------
nstencil: int
Number of stencil points used
N: int
Number of bins
lrefl, rrefl: bool
left and right reflective boundaries
"""
nsidep = (nstencil-1)//2
le = 0 if lrefl else nsidep
re = 0 if rrefl else nsidep
return [max(le, min(N + 2*nsidep - re - nstencil, i))
for i in range(N)]
| 25.309278 | 93 | 0.600407 | 344 | 2,455 | 4.188953 | 0.360465 | 0.00694 | 0.016655 | 0.013879 | 0.036086 | 0.036086 | 0 | 0 | 0 | 0 | 0 | 0.028318 | 0.266395 | 2,455 | 96 | 94 | 25.572917 | 0.771793 | 0.340937 | 0 | 0.102564 | 0 | 0 | 0.026639 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.102564 | false | 0 | 0.102564 | 0 | 0.333333 | 0.025641 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
2b95278b15173f39c3e354e42bd5da9df55f3094 | 4,771 | py | Python | power_calibration.py | elvd/rf_sewer_pipes | 18dd433539f8678d9b64e7004283a031811de241 | [
"MIT"
] | null | null | null | power_calibration.py | elvd/rf_sewer_pipes | 18dd433539f8678d9b64e7004283a031811de241 | [
"MIT"
] | null | null | null | power_calibration.py | elvd/rf_sewer_pipes | 18dd433539f8678d9b64e7004283a031811de241 | [
"MIT"
] | null | null | null | #python:
from collections import namedtuple
import numpy as np
from scipy.constants import speed_of_light
import gprMax.input_cmd_funcs as gprmax_cmds
import aux_funcs
Point = namedtuple('Point', ['x', 'y', 'z'])
# ! Simulation model parameters begin
# * Naming parameters
simulation_name = 'Antenna in free space'
geometry_filename = 'power_calibration_2.45ghz'
snapshot_filename = '_'.join([geometry_filename, 'snapshot'])
geometry_mode = '2D'
output_snapshots = True
snapshots_count = 64
fund_freq = 2.45e9
max_harmonic = 5
runtime_multiplier = 1.25
pml_cells_number = 20
# * Tx and Rx parameters
# * The X, Y, and Z offsets are from the middle points of the side
# * surfaces of the simulation domain. They do not include the PML
# * cells distance in them, this is taken care of later in the script.
tx_power = 10.0
tx_offset = Point(10e-2, 0, 0)
rx_offset = Point(10e-2, 0, 0)
waveform_type = 'contsine'
waveform_identifier = 'tx_1'
dipole_polarisation = 'z'
# ! Simulation model parameters end
# * Frequency-derived parameters
fund_freq_GHz = fund_freq / 1e9
fund_wavelength = speed_of_light / fund_freq
# * Some preliminary calculations
lambda_min = speed_of_light / (max_harmonic * fund_freq)
delta_d = lambda_min / 10
# ! Copied this from SO, to round down `delta_d` to a reasonable width
round_digits = int(np.ceil(-np.log10(delta_d))) + 1
round_digits = np.power(10, round_digits)
delta_d = np.trunc(delta_d * round_digits) / round_digits
# * PML command
if geometry_mode == '2D':
pml_command = '{0} {0} 0 {0} {0} 0'.format(pml_cells_number)
elif geometry_mode == '3D':
pml_command = '{0} {0} {0} {0} {0} {0}'.format(pml_cells_number)
# * Model geometry
far_field_distance = aux_funcs.far_field_distance(
fund_freq_GHz, delta_d, 'hertzian'
)
pml_x = pml_cells_number * delta_d
pml_y = pml_cells_number * delta_d
if geometry_mode == '2D':
pml_z = 0
elif geometry_mode == '3D':
pml_z = pml_cells_number * delta_d
model_x = 3 # 5 * far_field_distance
model_y = 3 # 5 * far_field_distance
if geometry_mode == '2D':
model_z = delta_d
elif geometry_mode == '3D':
model_z = 1 # 2 * far_field_distance
domain_x = model_x + 2 * pml_x
domain_y = model_y + 2 * pml_y
domain_z = model_z + 2 * pml_z
longest_dimension = np.max([domain_x, domain_y, domain_z])
simulation_runtime = runtime_multiplier * (longest_dimension / speed_of_light)
# * Calculate Hertzian dipole current from required power
waveform_amplitude = aux_funcs.hertzian_dipole_current(
fund_freq_GHz, tx_power, delta_d
)
if geometry_mode == '2D':
transmitter_position = Point(
0 + (pml_x + tx_offset.x),
domain_y / 2 + tx_offset.y,
0 + tx_offset.z
)
receiver_position = Point(
domain_x - (pml_x + rx_offset.x),
domain_y / 2 + rx_offset.y,
0 + rx_offset.z
)
elif geometry_mode == '3D':
transmitter_position = Point(
0 + (pml_x + tx_offset.x),
domain_y / 2 + tx_offset.y,
domain_z / 2 + tx_offset.z
)
receiver_position = Point(
domain_x - (pml_x + rx_offset.x),
domain_y / 2 + rx_offset.y,
domain_z / 2 + rx_offset.z
)
# * gprMax simulation setup
gprmax_cmds.command('title', simulation_name)
gprmax_cmds.command('pml_cells', pml_command)
gprmax_cmds.domain(x=domain_x, y=domain_y, z=domain_z)
gprmax_cmds.dx_dy_dz(delta_d, delta_d, delta_d)
gprmax_cmds.time_window(simulation_runtime)
pulse_excitation = gprmax_cmds.waveform(waveform_type,
amplitude=waveform_amplitude,
frequency=fund_freq,
identifier=waveform_identifier)
transmitter = gprmax_cmds.hertzian_dipole(dipole_polarisation,
transmitter_position.x,
transmitter_position.y,
transmitter_position.z,
pulse_excitation)
receiver = gprmax_cmds.rx(receiver_position.x,
receiver_position.y,
receiver_position.z)
gprmax_cmds.geometry_view(0, 0, 0,
domain_x, domain_y, domain_z,
delta_d, delta_d, delta_d,
geometry_filename, 'n')
if output_snapshots:
for number in range(snapshots_count):
gprmax_cmds.snapshot(0, 0, 0,
domain_x, domain_y, domain_z,
delta_d, delta_d, delta_d,
((number + 1) *
(simulation_runtime / snapshots_count)),
snapshot_filename + str(number))
#end_python:
| 29.450617 | 78 | 0.640537 | 647 | 4,771 | 4.401855 | 0.236476 | 0.042135 | 0.010534 | 0.008427 | 0.239115 | 0.181531 | 0.140449 | 0.140449 | 0.140449 | 0.140449 | 0 | 0.024649 | 0.268707 | 4,771 | 161 | 79 | 29.63354 | 0.791631 | 0.134982 | 0 | 0.224299 | 0 | 0 | 0.038705 | 0.006086 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.046729 | 0 | 0.046729 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
2b96748117a2ee7e1202b513f6bf2c96ed0a9759 | 6,226 | py | Python | r2o.py | AnweshGangula/Roam2Obsidian | 5fe70957a6d3e0275c0244b96583657c433010ba | [
"MIT"
] | 1 | 2021-08-06T13:32:17.000Z | 2021-08-06T13:32:17.000Z | r2o.py | AnweshGangula/Roam2Obsidian | 5fe70957a6d3e0275c0244b96583657c433010ba | [
"MIT"
] | null | null | null | r2o.py | AnweshGangula/Roam2Obsidian | 5fe70957a6d3e0275c0244b96583657c433010ba | [
"MIT"
] | 1 | 2021-10-20T20:16:30.000Z | 2021-10-20T20:16:30.000Z | # Save Roam JSON Export file in same folder and run following code in terminal `python r2o.py my-roam-export.json`
import sys
import os
import json
from tqdm import tqdm
import re
from dateutil.parser import parse
from datetime import datetime
yaml = """---
title: {title}
created: {created}
---
"""
months = r"January|February|March|April|May|June|July|August|September|October|November|December"
# fr"() is a combination of f-string and raw strings
# escape literal braces in f-string: https://stackoverflow.com/a/5466478/6908282
re_daily = re.compile(fr"({months}) ([0-9]+)[a-z]{{2}}, ([0-9]{{4}})")
re_daylink = re.compile(fr"(\[\[)([{months} [0-9]+[a-z]{{2}}, [0-9]{{4}})(\]\])")
re_blockmentions = re.compile(r"({{mentions: \(\()(.{9})(\)\)}})")
re_blockembed = re.compile(r"({{embed: \(\()(.{9})(\)\)}})")
re_blockref = re.compile(r"(\(\()(.{9})(\)\))")
re_HTML = re.compile("(?<!`)<(?!\s|-).+?>(?!`)")
# Reference to above Regex: https://regex101.com/r/BVWwGK/10
def scan(jdict, page):
u2b = {jdict["uid"]: jdict}
for child in jdict.get("children", []):
child["page"] = page
u2b.update(scan(child, page))
return u2b
def fence_HTMLtags(string):
# Reference: https://regex101.com/r/BVWwGK/10
if not string.startswith("```"):
# \g<0> stands for whole match - so we're adding backtick (`) as suffix and prefix for whole match
# reference: https://docs.python.org/3/library/re.html#re.sub
# \g<0> instead of \0 - reference: https://stackoverflow.com/q/58134893/6908282
string = re.sub(re_HTML, r"`\g<0>`", string)
return string
def replace_daylinks(s):
new_s = s
while True:
m = re_daylink.search(new_s)
if not m:
break
else:
head = new_s[: m.end(1)]
dt = parse(m.group(2))
replacement = dt.isoformat()[:10]
tail = "]]" + new_s[m.end(0) :]
new_s = head + replacement + tail
return new_s
def replace_blockrefs(s, uid2block, referenced_uids):
new_s = s
while True:
m = re_blockembed.search(new_s)
if m is None:
m = re_blockmentions.search(new_s)
if m is None:
m = re_blockref.search(new_s)
if m is None:
break
else:
uid = m.group(2)
if uid not in uid2block:
print("************** uid not found:", uid)
else:
referenced_uids.add(uid)
head = new_s[: m.start(1)]
r_block = uid2block[uid]
# shall we replace with the text or the link or both
replacement = ""
# replacement = r_block['string']
replacement += f' ![[{r_block["page"]["title"]}#^{r_block["uid"].replace("_", "-")}]]'
tail = new_s[m.end(3) :]
new_s = head + replacement + tail
return replace_daylinks(new_s)
def expand_children(block, uid2block, referenced_uids, level=0):
lines = []
for b in block.get("children", []):
prefix = ""
if level >= 1:
prefix = " " * level
s = b["string"]
children = b.get("children", [])
headinglevel = b.get("heading", None)
if headinglevel is not None:
prefix = "#" * (headinglevel) + " " + prefix
if children is None and level == 0:
pass
else:
prefix += "* "
uid = b["uid"]
if uid in referenced_uids:
postfix = f' ^{uid.replace("_", "-")}'
else:
postfix = ""
# b id magic
s = prefix + replace_blockrefs(s, uid2block, referenced_uids) + postfix
if "\n" in s:
prefix = prefix.replace("*", " ") if not s.startswith("```") else prefix
new_s = s[:-1]
new_s = new_s.replace("\n", "\n" + prefix)
new_s += s[-1]
new_s = (
new_s[:-3] + "\n" + prefix + new_s[-3:]
if not s.startswith("```")
else s
) # move closing backticks (```) of code block to new line
s = new_s + "\n"
s = fence_HTMLtags(s)
lines.append(s)
lines.extend(expand_children(b, uid2block, referenced_uids, level + 1))
return lines
j = json.load(open(sys.argv[1], mode="rt", encoding="utf-8", errors="ignore"))
odir = "md"
ddir = "md/daily"
os.makedirs(ddir, exist_ok=True)
print("Pass 1: scan all pages")
uid2block = {}
referenced_uids = set()
pages = []
for page in tqdm(j):
title = page["title"]
created = page.get("create-time") or page["edit-time"]
created = datetime.fromtimestamp(created / 1000).isoformat()[:10]
children = page.get("children") or []
is_daily = False
m = re_daily.match(title)
if m:
is_daily = True
dt = parse(title)
title = dt.isoformat().split("T")[0]
page = {
"uid": None,
"title": title,
"created": created,
"children": children,
"daily": is_daily,
}
uid2block.update(scan(page, page))
pages.append(page)
print("Pass 2: track blockrefs")
for p in tqdm(pages):
expand_children(p, uid2block, referenced_uids)
print("Pass 3: generate")
error_pages = []
for p in tqdm(pages):
title = p["title"]
if not title:
continue
ofiln = f'{odir}/{p["title"]}.md'
if p["daily"]:
ofiln = f'{ddir}/{p["title"]}.md'
# hack for crazy slashes in titles
if "/" in title:
d = odir
for part in title.split("/")[:-1]:
d = os.path.join(d, part)
os.makedirs(d, exist_ok=True)
lines = expand_children(p, uid2block, referenced_uids)
try:
with open(ofiln, mode="wt", encoding="utf-8") as f:
f.write(yaml.format(**p))
f.write("\n".join(lines))
except:
error_pages.append({"page": p, "content": lines})
if error_pages:
print("The following pages had errors:")
for ep in error_pages:
p = ep["page"]
t = p["title"]
c = ep["content"]
print(f"Title: >{t}<")
print(f"Content:")
print(" " + "\n ".join(c))
print("Done!")
| 29.789474 | 114 | 0.540475 | 813 | 6,226 | 4.055351 | 0.273063 | 0.026691 | 0.048832 | 0.014559 | 0.166515 | 0.138004 | 0.057931 | 0.041856 | 0.041856 | 0.016379 | 0 | 0.023367 | 0.292001 | 6,226 | 208 | 115 | 29.932692 | 0.724592 | 0.122229 | 0 | 0.110429 | 0 | 0.018405 | 0.153945 | 0.038899 | 0 | 0 | 0 | 0 | 0 | 1 | 0.030675 | false | 0.02454 | 0.042945 | 0 | 0.104294 | 0.055215 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
2b99fe63fcf6d06c867423d961d88a13935d459e | 1,465 | py | Python | var/spack/repos/builtin/packages/canu/package.py | mrzv/spack | a0fb2838ea60f020179f480a2db1438da9d2e2ab | [
"ECL-2.0",
"Apache-2.0",
"MIT-0",
"MIT"
] | 2 | 2018-11-27T03:39:44.000Z | 2021-09-06T15:50:35.000Z | var/spack/repos/builtin/packages/canu/package.py | matzke1/spack | 9af44814b12639744926c56cdf16ac9e95490011 | [
"ECL-2.0",
"Apache-2.0",
"MIT"
] | null | null | null | var/spack/repos/builtin/packages/canu/package.py | matzke1/spack | 9af44814b12639744926c56cdf16ac9e95490011 | [
"ECL-2.0",
"Apache-2.0",
"MIT"
] | null | null | null | # Copyright 2013-2018 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class Canu(MakefilePackage):
"""A single molecule sequence assembler for genomes large and
small."""
homepage = "http://canu.readthedocs.io/"
url = "https://github.com/marbl/canu/archive/v1.5.tar.gz"
version('1.7.1', sha256='c314659c929ee05fd413274f391463a93f19b8337eabb7ee5de1ecfc061caafa')
version('1.5', '65df275baa28ecf11b15dfd7343361e3')
depends_on('gnuplot', type='run')
depends_on('jdk', type='run')
depends_on('perl', type='run')
build_directory = 'src'
def patch(self):
# Use our perl, not whatever is in the environment
filter_file(r'^#!/usr/bin/env perl',
'#!{0}'.format(self.spec['perl'].command.path),
'src/pipelines/canu.pl')
def install(self, spec, prefix):
# replicate the Makefile logic here:
# https://github.com/marbl/canu/blob/master/src/Makefile#L344
uname = which('uname')
ostype = uname(output=str).strip()
machinetype = uname('-m', output=str).strip()
if machinetype == 'x86_64':
machinetype = 'amd64'
target_dir = '{0}-{1}'.format(ostype, machinetype)
bin = join_path(target_dir, 'bin')
install_tree(bin, prefix.bin)
| 34.069767 | 95 | 0.63959 | 177 | 1,465 | 5.237288 | 0.661017 | 0.029126 | 0.030205 | 0.040992 | 0.049622 | 0 | 0 | 0 | 0 | 0 | 0 | 0.077058 | 0.220478 | 1,465 | 42 | 96 | 34.880952 | 0.734676 | 0.271672 | 0 | 0 | 0 | 0 | 0.271251 | 0.111748 | 0 | 0 | 0 | 0 | 0 | 1 | 0.086957 | false | 0 | 0.043478 | 0 | 0.304348 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
2b9c7ab513539e60f2d46c3236a9f0d71ce9e6af | 446 | py | Python | socket_sender.py | RashiG27/summer19 | ca6956080b2034793f45716850dca3fb27cb87e2 | [
"Apache-2.0"
] | null | null | null | socket_sender.py | RashiG27/summer19 | ca6956080b2034793f45716850dca3fb27cb87e2 | [
"Apache-2.0"
] | null | null | null | socket_sender.py | RashiG27/summer19 | ca6956080b2034793f45716850dca3fb27cb87e2 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/python3
import socket
#target machine's ip number
target_ip = '3.82.4.173'
#target machine's port number
target_port = 8888
#create UDP socket
# IPv4--(INET--INET6), type of socket UDP --(DGRAM--STREAM)
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
while True:
msg = input("Please enter your message : ")
n = msg.encode('ascii')
s.sendto(n, (target_ip,target_port))
print(s.recvfrom(100))
| 20.272727 | 75 | 0.665919 | 68 | 446 | 4.279412 | 0.617647 | 0.089347 | 0.09622 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.047222 | 0.192825 | 446 | 21 | 76 | 21.238095 | 0.761111 | 0.363229 | 0 | 0 | 0 | 0 | 0.154122 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.111111 | 0 | 0.111111 | 0.111111 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
2b9f17f18f5bbb0bd66f11c7a64aabbb4c285ec5 | 733 | py | Python | step1a/sanproof_1_01/transcode_ab.py | funderburkjim/boesp-prep | a8738671a8f8760feb945f4932bf4f2d19ed4f86 | [
"MIT"
] | null | null | null | step1a/sanproof_1_01/transcode_ab.py | funderburkjim/boesp-prep | a8738671a8f8760feb945f4932bf4f2d19ed4f86 | [
"MIT"
] | 50 | 2021-08-28T23:02:22.000Z | 2022-01-18T18:31:21.000Z | step1a/sanproof_1_02/transcode_ab.py | funderburkjim/boesp-prep | a8738671a8f8760feb945f4932bf4f2d19ed4f86 | [
"MIT"
] | 1 | 2021-09-02T04:36:26.000Z | 2021-09-02T04:36:26.000Z | # coding=utf-8
"""transcode_ab.py
"""
from __future__ import print_function
import sys, re,codecs
sys.path.append('../')
import transcoder
transcoder.transcoder_set_dir('../transcoder')
def transcode(x,tranin,tranout):
y = transcoder.transcoder_processString(x,tranin,tranout)
return y
if __name__=="__main__":
filein = sys.argv[1] # assumed Devanagari encoding of Sanskrit
fileout = sys.argv[2] #
with codecs.open(filein,"r","utf-8") as f:
lines = [line.rstrip('\r\n') for line in f]
print(len(lines),"read from",filein)
newlines = [transcode(line,'deva','slp1') for line in lines]
with codecs.open(fileout,"w","utf-8") as f:
for line in newlines:
f.write(line+'\n')
print(len(newlines),"written to",fileout)
| 27.148148 | 63 | 0.709413 | 111 | 733 | 4.531532 | 0.531532 | 0.023857 | 0.053678 | 0.027833 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.009346 | 0.124147 | 733 | 26 | 64 | 28.192308 | 0.774143 | 0.096862 | 0 | 0 | 0 | 0 | 0.105828 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.052632 | false | 0 | 0.157895 | 0 | 0.263158 | 0.157895 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
2ba3178b86594e0bd355d453a40ea67602520196 | 3,361 | py | Python | COMET/measurement_plugins/OneWireEnv.py | dallaval5u/COMET | 8c5793faafe2797dd4100507aa0fe1e71cf9f6c0 | [
"MIT"
] | null | null | null | COMET/measurement_plugins/OneWireEnv.py | dallaval5u/COMET | 8c5793faafe2797dd4100507aa0fe1e71cf9f6c0 | [
"MIT"
] | null | null | null | COMET/measurement_plugins/OneWireEnv.py | dallaval5u/COMET | 8c5793faafe2797dd4100507aa0fe1e71cf9f6c0 | [
"MIT"
] | null | null | null | from threading import Thread, Timer
from time import time
import logging
try:
import Adafruit_DHT
except:
pass
class OneWireEnv(Thread):
"""This class is for reading out one wire sensors with the rpi"""
def __init__(self, main, framework, update_interval=5000):
"""This starts the background and continuous tasks like humidity and temperature control"""
Thread.__init__(self)
self.main = main
self.framework = framework
self.stop_measurement_loop = self.main.stop_measurement_loop
self.update_interval = float(update_interval)
self.queue_to_main = framework["Message_to_main"]
self.settings = framework["Configs"]["config"]["settings"]
self.sensors = self.settings["Sensors"]
self.log = logging.getLogger(__name__)
self.running = False
# First try if DHT resource is valid and accessible
self.success_DHT = False
try:
import Adafruit_DHT
for name, sensor in self.sensors.items():
sensortype = getattr(Adafruit_DHT, sensor["type"])
humidity, temperature = Adafruit_DHT.read(sensortype, sensor["pin"])
if not humidity and not temperature:
self.log.critical(
"Sensor {} at pin {} for room {} did not answer.".format(
sensortype, sensor["pin"], name
)
)
self.success_DHT = True
except Exception as e:
self.log.error(
"The temperature and humidity controller seems not to be responding. Error:"
+ str(e)
)
# Try to query the
def run(self):
"""This is the update function for temp hum query"""
if self.success_DHT and not self.running:
self.log.critical("Humidity and temp control started...")
self.running = True
elif not self.running:
self.log.critical("Humidity and temp control NOT started...")
return
if not self.stop_measurement_loop and self.success_DHT:
try:
for name, sensor in self.sensors.items():
sensortype = getattr(Adafruit_DHT, sensor["type"])
humidity, temperature = Adafruit_DHT.read_retry(
sensortype, sensor["pin"]
)
self.queue_to_main.put(
{
"Temp_" + name: [float(time()), float(temperature)],
"Hum_" + name: [float(time()), float(humidity)],
}
)
except Exception as err:
self.log.error(
"The temperature and humidity controller seems not to be responding. Error: {!s}".format(
err
)
)
if not self.main.stop_measurement_loop:
self.start_timer(self.run)
else:
self.log.critical(
"Shutting down environment control due to stop of measurement loop"
)
def start_timer(self, object):
Timer(
self.update_interval / 1000.0, object
).start() # This ensures the function will be called again
| 36.532609 | 109 | 0.547456 | 354 | 3,361 | 5.070621 | 0.322034 | 0.027298 | 0.04234 | 0.03844 | 0.288579 | 0.288579 | 0.254039 | 0.254039 | 0.254039 | 0.254039 | 0 | 0.004247 | 0.369533 | 3,361 | 91 | 110 | 36.934066 | 0.84285 | 0.091342 | 0 | 0.178082 | 0 | 0 | 0.135046 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.041096 | false | 0.013699 | 0.068493 | 0 | 0.136986 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
2ba81ec618c7d1270dc12f7a84fbe054a68e7523 | 1,073 | py | Python | CODE/GET_FORECASTS_V001.py | rianashwin/Forecasting-SDPR-Gold-Trust-ETF-Prices--PRD- | 138b99deab4c2afdf89c7c881c3f91ea2732f3ef | [
"MIT"
] | 1 | 2021-09-26T08:03:11.000Z | 2021-09-26T08:03:11.000Z | CODE/GET_FORECASTS_V001.py | rianashwin/Forecasting-SPDR-Gold-Trust-ETF-Prices--PRD- | 138b99deab4c2afdf89c7c881c3f91ea2732f3ef | [
"MIT"
] | null | null | null | CODE/GET_FORECASTS_V001.py | rianashwin/Forecasting-SPDR-Gold-Trust-ETF-Prices--PRD- | 138b99deab4c2afdf89c7c881c3f91ea2732f3ef | [
"MIT"
] | null | null | null | """
Flask app
"""
import pandas as pd
from flask import Flask
import json
app = Flask(__name__)
# runs in local host http://127.0.0.1:5000/latest/all
@app.route('/latest/<horizon>')
def weather(horizon):
"""
Reads json and outputs based on selected paramter
Horizon can be either "all" or an integer between 1 and 90 representing desired timestamp
Eg: http://127.0.0.1:5000/latest/all or http://127.0.0.1:5000/latest/54
Parameters
----------
horizon : string
Horizon can be either "all" or an integer between 1 and 90 representing desired timestamp
Returns
-------
output
Json to output to page
"""
with open(r'.\RESULTS\saved_forecasts_PRD.json', 'r') as jsonfile:
file_data = json.loads(jsonfile.read())
if horizon == "all":
output = json.dumps(file_data)
else:
output = json.dumps(file_data[horizon])
return output
# Get setup so that if we call the app directly (and it isn't being imported elsewhere)
if __name__ == '__main__':
app.run(debug=True)
| 24.386364 | 97 | 0.652377 | 158 | 1,073 | 4.322785 | 0.531646 | 0.030747 | 0.035139 | 0.039531 | 0.377745 | 0.310395 | 0.310395 | 0.281113 | 0.213763 | 0.213763 | 0 | 0.045949 | 0.229264 | 1,073 | 43 | 98 | 24.953488 | 0.779927 | 0.507922 | 0 | 0 | 0 | 0 | 0.135776 | 0.073276 | 0 | 0 | 0 | 0 | 0 | 1 | 0.066667 | false | 0 | 0.2 | 0 | 0.333333 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
2babfb50af910a621784aa5f2878e7604544427c | 535 | py | Python | lut.py | 94JuHo/OpenCV_Lecture | 1a895ce479aa64ddf45563489123f721b754f167 | [
"MIT"
] | null | null | null | lut.py | 94JuHo/OpenCV_Lecture | 1a895ce479aa64ddf45563489123f721b754f167 | [
"MIT"
] | null | null | null | lut.py | 94JuHo/OpenCV_Lecture | 1a895ce479aa64ddf45563489123f721b754f167 | [
"MIT"
] | null | null | null | import cv2
import numpy as np
def showImage():
filename = "Images/lena.jpg"
img = cv2.imread(filename, cv2.IMREAD_GRAYSCALE)
cv2.imshow('image', img)
lut = np.arange(255, -1, -1, dtype='uint8')
ysize = img.shape[0]
xsize = img.shape[1]
for y in range(ysize):
for x in range(xsize):
img.itemset((y, x), lut[img.item(y, x)])
#Opencv 방식
#result = cv2.LUT(img, lut)
cv2.imshow('result', img)
cv2.waitKey(0)
cv2.destroyAllWindows()
showImage() | 22.291667 | 53 | 0.573832 | 75 | 535 | 4.08 | 0.506667 | 0.039216 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.043928 | 0.276636 | 535 | 24 | 54 | 22.291667 | 0.74677 | 0.065421 | 0 | 0 | 0 | 0 | 0.065126 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.0625 | false | 0 | 0.125 | 0 | 0.1875 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
2bac2bdf3e935053151415509164e32a0e767cd4 | 1,340 | py | Python | project-4/code.py | aakashsingh1210/greyatom-python-for-data-science | c4df27416cfffa574107ba1a7a5dc96ba6d92172 | [
"MIT"
] | null | null | null | project-4/code.py | aakashsingh1210/greyatom-python-for-data-science | c4df27416cfffa574107ba1a7a5dc96ba6d92172 | [
"MIT"
] | null | null | null | project-4/code.py | aakashsingh1210/greyatom-python-for-data-science | c4df27416cfffa574107ba1a7a5dc96ba6d92172 | [
"MIT"
] | null | null | null | # --------------
# Importing header files
import numpy as np
import warnings
warnings.filterwarnings('ignore')
#New record
new_record=[[50, 9, 4, 1, 0, 0, 40, 0]]
#Reading file
data = np.genfromtxt(path, delimiter=",", skip_header=1)
#Code starts here
census=np.concatenate((data,new_record))
age=census[:,0]
max_age=np.max(age)
min_age=np.min(age)
age_mean=(max_age+min_age)/2
age_std=np.std(age)
race=census[:,2]
race_0=race[race==0]
race_1=race[race==1]
race_2=race[race==2]
race_3=race[race==3]
race_4=race[race==4]
len_0=len(race_0)
len_1=len(race_1)
len_2=len(race_2)
len_3=len(race_3)
len_4=len(race_4)
if len_0==min(len_0,len_1,len_2,len_3,len_4):
minority_race=0
elif len_1==min(len_0,len_1,len_2,len_3,len_4):
minority_race=1
elif len_2==min(len_0,len_1,len_2,len_3,len_4):
minority_race=2
elif len_3==min(len_0,len_1,len_2,len_3,len_4):
minority_race=3
else:
minority_race=4
senior_citizens=census[census[:,0]>60]#masking
working_hours_sum=sum(senior_citizens[:,6])
senior_citizens_len=len(senior_citizens)
avg_working_hours=working_hours_sum/senior_citizens_len
print(avg_working_hours)
high=census[census[:,1]>10]
low=census[census[:,1]<=10]
avg_pay_high=(high[:,7]).mean()
avg_pay_low=(low[:,7]).mean()
print(avg_pay_high)
print(avg_pay_low)
| 23.928571 | 57 | 0.71194 | 256 | 1,340 | 3.429688 | 0.226563 | 0.027335 | 0.039863 | 0.045558 | 0.159453 | 0.159453 | 0.159453 | 0.159453 | 0.159453 | 0.159453 | 0 | 0.063506 | 0.118657 | 1,340 | 55 | 58 | 24.363636 | 0.679932 | 0.061194 | 0 | 0 | 0 | 0 | 0.005843 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.046512 | 0 | 0.046512 | 0.069767 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
2bad03cbdb9b9c6e7b1117ca58db86aba320ec23 | 3,247 | py | Python | old/tkpng3.py | jarvisteach/tinter-png | 08348fde1fc4099248ca7170d0cdfa8cda8f23d7 | [
"MIT"
] | 8 | 2017-07-02T10:45:43.000Z | 2022-02-25T16:30:22.000Z | old/tkpng3.py | jarvisteach/tinter-png | 08348fde1fc4099248ca7170d0cdfa8cda8f23d7 | [
"MIT"
] | null | null | null | old/tkpng3.py | jarvisteach/tinter-png | 08348fde1fc4099248ca7170d0cdfa8cda8f23d7 | [
"MIT"
] | 1 | 2019-07-10T06:49:22.000Z | 2019-07-10T06:49:22.000Z | #!/usr/bin/env python3
# tkpng - example of using tkinter and pypng to display pngs (albeit reduced quality)
# in nothing but pure python. Can use RGBA images, but the alpha is stripped.
# v0.3 - structure rearranged to make classes and functions for reuse
from array import *
from tkinter import *
import png
class PngImageTk(object):
"""A png image loaded and placed into a tkinter.PhotoImage object"""
def __init__(self, filename):
# Read image, create list of pixel RGB or RGBA values
r = png.Reader(filename)
# Try to use RGB8 load if no alpha chanel otherwise use alpha (RGBA8)
try:
self.w, self.h, self.pixels, self.meta = r.asRGB8()
except:
self.w, self.h, self.pixels, self.meta = r.asRGBA8()
self.pixeldata = list(self.pixels) #pixeldata has each row of the image as an array
self.x = 0
self.y = 0
self.image = PhotoImage(width=self.w, height=self.h) #use photoimage as temporary oject to write to canvas
# Print meta data for image
def __str__(self):
rep = "Width:", self.width, "\n"
rep += "Height:", self.height, "\n"
rep += "Bitdepth:", self.meta["bitdepth"], "\n"
rep += "Greyscale:", self.meta["greyscale"], "\n"
rep += "Alpha:", self.meta["alpha"], "\n"
return rep
# Used to split each row into pairs of RGB or RGBA values
def chunks(self, l, n):
return [l[i:i+n] for i in range(0, len(l), n)]
# Convert pixeldata into a PhotoImage object
def convert(self):
if self.meta["alpha"] == True:
values = 4
else:
values = 3
for row in self.pixeldata:
row = row.tolist() #convert from array to list
chunked = self.chunks(row, values) #RGB/RGBA format = 3/4 values
rowline = "{"
for item in chunked:
if self.meta["alpha"] == True:
del item[-1] #tkinter can't handle alpha values, so remove them
RGB = "#%02x%02x%02x" % tuple(item) #convert to 8-bit RGB hex format
rowline += RGB + " "
self.x += 1
if self.x == self.w:
self.y += 1
self.x = 0
rowline += "}"
self.image.put(rowline,(0,self.y))
# MAIN
# Setup window
root = Tk()
root.title("Loaded image.png")
# Setup our PngImageTk object,
photo = PngImageTk("image_alpha.png")
# Create canvas object
c = Canvas(root, width=photo.w, height=photo.h)
c.pack()
# Convert and place on canvas
photo.convert()
c.create_image(0, 0, image=photo.image, anchor=NW) #place our photoimage onto canvas in NW
root.mainloop()
| 41.101266 | 122 | 0.493994 | 385 | 3,247 | 4.14026 | 0.387013 | 0.035132 | 0.016939 | 0.018821 | 0.060226 | 0.036386 | 0.036386 | 0.036386 | 0.036386 | 0 | 0 | 0.014583 | 0.408685 | 3,247 | 78 | 123 | 41.628205 | 0.815625 | 0.28457 | 0 | 0.078431 | 0 | 0 | 0.05529 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.078431 | false | 0 | 0.058824 | 0.019608 | 0.196078 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
2baf47be9d3daf1c604497a9da492e2f6f27a9d5 | 2,083 | py | Python | demos/face_recognition_demo/python/landmarks_detector.py | Ohtani-y/open_model_zoo | 280b59fc6c00455889a1949c795558252fdad96f | [
"Apache-2.0"
] | 2 | 2019-08-20T15:30:19.000Z | 2020-09-01T15:16:33.000Z | demos/face_recognition_demo/python/landmarks_detector.py | Pandinosaurus/open_model_zoo | 2543996541346418919c5cddfb71e33e2cdef080 | [
"Apache-2.0"
] | 4 | 2020-05-22T17:30:43.000Z | 2021-08-02T07:33:16.000Z | demos/face_recognition_demo/python/landmarks_detector.py | Pandinosaurus/open_model_zoo | 2543996541346418919c5cddfb71e33e2cdef080 | [
"Apache-2.0"
] | 2 | 2021-06-25T06:18:58.000Z | 2021-08-04T10:05:32.000Z | """
Copyright (c) 2018-2021 Intel Corporation
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import numpy as np
from utils import cut_rois, resize_input
from ie_module import Module
class LandmarksDetector(Module):
POINTS_NUMBER = 5
def __init__(self, ie, model):
super(LandmarksDetector, self).__init__(ie, model, 'Landmarks Detection')
assert len(self.model.input_info) == 1, 'Expected 1 input blob'
assert len(self.model.outputs) == 1, 'Expected 1 output blob'
self.input_blob = next(iter(self.model.input_info))
self.output_blob = next(iter(self.model.outputs))
self.input_shape = self.model.input_info[self.input_blob].input_data.shape
output_shape = self.model.outputs[self.output_blob].shape
assert np.array_equal([1, self.POINTS_NUMBER * 2, 1, 1], output_shape), \
'Expected model output shape {}, got {}'.format([1, self.POINTS_NUMBER * 2, 1, 1], output_shape)
def preprocess(self, frame, rois):
inputs = cut_rois(frame, rois)
inputs = [resize_input(input, self.input_shape) for input in inputs]
return inputs
def enqueue(self, input):
return super(LandmarksDetector, self).enqueue({self.input_blob: input})
def start_async(self, frame, rois):
inputs = self.preprocess(frame, rois)
for input in inputs:
self.enqueue(input)
def postprocess(self):
outputs = self.get_outputs()
results = [out[self.output_blob].buffer.reshape((-1, 2)).astype(np.float64) for out in outputs]
return results
| 37.196429 | 108 | 0.697072 | 294 | 2,083 | 4.823129 | 0.404762 | 0.042313 | 0.029619 | 0.038082 | 0.098025 | 0.043724 | 0.043724 | 0.043724 | 0.043724 | 0 | 0 | 0.017523 | 0.205473 | 2,083 | 55 | 109 | 37.872727 | 0.839275 | 0.272204 | 0 | 0 | 0 | 0 | 0.066756 | 0 | 0 | 0 | 0 | 0 | 0.103448 | 1 | 0.172414 | false | 0 | 0.103448 | 0.034483 | 0.448276 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
2bb09c7c624b4b2fa539a4a7b7639cfbbb55950f | 23,581 | py | Python | pyASH/interface.py | dhrone/pyASH | 85da060d135fb8be6475d58d4dc33acf88a3a9b2 | [
"MIT"
] | null | null | null | pyASH/interface.py | dhrone/pyASH | 85da060d135fb8be6475d58d4dc33acf88a3a9b2 | [
"MIT"
] | null | null | null | pyASH/interface.py | dhrone/pyASH | 85da060d135fb8be6475d58d4dc33acf88a3a9b2 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
# Copyright 2018 by dhrone. All Rights Reserved.
#
import time
from .iot import Iot
from .utility import get_utc_timestamp
from .objects import ASHO
class Interface(object):
interface = None
version = None
properties = None
def __init__(self,thing=None, uncertaintyInMilliseconds=0):
if not self.interface: self.interface = 'Alexa.'+self.__class__.__name__
self.version='3'
self.uncertaintyInMilliseconds = uncertaintyInMilliseconds
self.thing = thing
if self.thing:
self.iot = thing.iotcls(thing.name)
@property
def capability(self):
return self.jsonDiscover()
@property
def jsonDiscover(self):
if self.properties:
return { 'type':'AlexaInterface', 'interface':self.interface, 'version': self.version, 'properties': self.properties.jsonDiscover }
return { 'type':'AlexaInterface', 'interface':self.interface, 'version': self.version }
@property
def jsonResponse(self):
if not self.properties: return []
if self.iot:
timeStamps = self.iot.timeStamps
for item, value in self.properties.properties.items():
uncertaintyInMilliseconds = value.uncertaintyInMilliseconds if value.uncertaintyInMilliseconds is not None else self.uncertaintyInMilliseconds
self.properties._set( item, (self._formatForProperty(self.iot[item]), timeStamps[item], uncertaintyInMilliseconds) )
else:
for k, prop in self.properties.properties.items():
prop.uncertaintyInMilliseconds = prop.uncertaintyInMilliseconds if prop.uncertaintyInMilliseconds is not None else self.uncertaintyInMilliseconds
return self.properties.jsonResponse
def __getitem__(self, property):
if self.iot:
timeStamps = self.iot.timeStamps
for item, value in self.properties.properties.items():
uncertaintyInMilliseconds = value.uncertaintyInMilliseconds if value.uncertaintyInMilliseconds is not None else self.uncertaintyInMilliseconds
self.properties._set( item, (self._formatForProperty(self.iot[item]), timeStamps[item], uncertaintyInMilliseconds) )
return self.properties[property]
def __setitem__(self, property, value):
if self.iot:
if type(value) is tuple:
value = value[0]
if self.iot[property] != value:
self.iot[property] = self._formatForProperty(value)
self.properties[property] = value
def _formatForProperty(self, value):
return value
class Properties(object):
def __init__(self, interface, properties, proactivelyReported, retrievable, uncertaintyInMilliseconds = None):
properties = properties if type(properties) == list else [ properties ]
self.interface = interface
self.properties = {}
self.proactivelyReported = proactivelyReported
self.retrievable = retrievable
self.uncertaintyInMilliseconds = uncertaintyInMilliseconds
for item in properties:
if item.uncertaintyInMilliseconds is None: item.uncertaintyInMilliseconds = uncertaintyInMilliseconds
self.properties[item.name] = item
def __getitem__(self, property):
return self.properties[property].value
def __setitem__(self, property, value):
self._set(property, value)
def _set(self, property, value):
if type(value) is tuple:
(value, timeOfSample, uncertaintyInMilliseconds) = value
else:
timeOfSample = time.time()
uncertaintyInMilliseconds = self.uncertaintyInMilliseconds
# Check uncertaintyInMilliseconds if it exists and is not Null, otherwise take the value receieved
uncertaintyInMilliseconds = self.properties[property].uncertaintyInMilliseconds if property in self.properties and self.properties[property].uncertaintyInMilliseconds is not None else uncertaintyInMilliseconds
# If the property version and the received version are None, use the default for the Properties object
uncertaintyInMilliseconds = uncertaintyInMilliseconds if uncertaintyInMilliseconds is not None else self.uncertaintyInMilliseconds
self.properties[property].value = value
self.properties[property].timeOfSample = timeOfSample
self.properties[property].uncertaintyInMilliseconds = uncertaintyInMilliseconds
@property
def jsonDiscover(self):
proplist = []
for item in self.properties:
proplist.append({'name':item})
return { 'supported': proplist, 'proactivelyReported':self.proactivelyReported, 'retrievable': self.retrievable }
@property
def jsonResponse(self):
proplist = []
for item, p in self.properties.items():
ums = p.uncertaintyInMilliseconds if p.uncertaintyInMilliseconds is not None else 0
proplist.append({'namespace': self.interface, 'name':item, 'value': p.value, 'timeOfSample': get_utc_timestamp(p.timeOfSample), 'uncertaintyInMilliseconds': ums})
return proplist
class Property(object):
def __init__(self, name, value=None, timeOfSample = time.time(), uncertaintyInMilliseconds=None):
self.name = name
self.pvalue = value
self.uncertaintyInMilliseconds = uncertaintyInMilliseconds
self.timeOfSample = timeOfSample
@property
def value(self):
try:
return self.pvalue.jsonResponse
except:
return self.pvalue
@value.setter
def value(self, value):
self.pvalue = value
def _setdirective(self, request, propertyName, payloadName, validRange=None):
# Should really send an error if out of range
v = request.payload[payloadName]
if validRange:
v = v if v in validRange else validRange[0] if v < validRange[0] else validRange[-1]
self[propertyName] = (v, get_utc_timestamp(), self.uncertaintyInMilliseconds)
def _adjustdirective(self, request, propertyName, payloadName, validRange=None):
v = self[propertyName]+request.payload[payloadName]
if validRange:
v = v if v in validRange else validRange[0] if v < validRange[0] else validRange[-1]
self[propertyName] = (v, get_utc_timestamp(), self.uncertaintyInMilliseconds)
class BrightnessController(Interface):
def __init__(self, thing=None, proactivelyReported=False, retrievable=False, uncertaintyInMilliseconds=0, *args, **kwargs):
super(BrightnessController, self).__init__(thing=thing, uncertaintyInMilliseconds=uncertaintyInMilliseconds)
self.properties = \
Interface.Properties(self.interface, [ Interface.Property('brightness')], \
proactivelyReported=proactivelyReported, retrievable=retrievable)
def SetBrightness(self, request):
self._setdirective(request, 'brightness', 'brightness', range(101))
def AdjustBrightness(self, request):
self._adjustdirective(request, 'brightness', 'brightnessDelta', range(101))
class Calendar(Interface):
def __init__(self, proactivelyReported=False, retrievable=False, *args, **kwargs):
super(Calendar, self).__init__()
def payload(self, organizerName, calendarEventId):
return { 'organizerName': organizerName, 'calendarEventId':calendarEventId }
class CameraStreamController(Interface):
def __init__(self, cameraStreamConfigurations=None, *args, **kwargs):
super(CameraStreamController, self).__init__()
self.cameraStreamConfigurations_value = cameraStreamConfigurations if type (cameraStreamConfigurations) is list else [ cameraStreamConfigurations ] if cameraStreamConfigurations is not None else None
@property
def jsonDiscover(self):
cameraStreams = []
if self.cameraStreamConfigurations_value:
for item in self.cameraStreamConfigurations_value:
cameraStreams.append(item)
return { 'type': 'AlexaInterface', 'interface': self.interface, 'version': self.version, 'cameraStreamConfigurations': cameraStreams }
class ChannelController(Interface):
def __init__(self, thing=None, proactivelyReported=False, retrievable=False, uncertaintyInMilliseconds=0, *args, **kwargs):
super(ChannelController, self).__init__(thing=thing, uncertaintyInMilliseconds=uncertaintyInMilliseconds)
self.properties = \
Interface.Properties(self.interface, [ Interface.Property('channel')], \
proactivelyReported=proactivelyReported, retrievable=retrievable)
def _formatForProperty(self, value):
value = value.json if hasattr(value,'json') else value
return { k:v for k, v in value.items() if k in ['number','callSign','affiliateCallSign'] }
class ColorController(Interface):
def __init__(self, thing=None, proactivelyReported=False, retrievable=False, uncertaintyInMilliseconds=0, *args, **kwargs):
super(ColorController, self).__init__(thing=thing, uncertaintyInMilliseconds=uncertaintyInMilliseconds)
self.properties = \
Interface.Properties(self.interface, [ Interface.Property('color')], \
proactivelyReported=proactivelyReported, retrievable=retrievable)
def SetColor(self, request):
self._setdirective(request, 'color', 'color')
class ColorTemperatureController(Interface):
def __init__(self, thing=None, proactivelyReported=False, retrievable=False, uncertaintyInMilliseconds=0, *args, **kwargs):
super(ColorTemperatureController, self).__init__(thing=thing, uncertaintyInMilliseconds=uncertaintyInMilliseconds)
self.properties = \
Interface.Properties(self.interface, [ Interface.Property('colorTemperatureInKelvin')], \
proactivelyReported=proactivelyReported, retrievable=retrievable)
def SetColorTemperature(self, request):
self._setdirective(request, 'colorTemperatureInKelvin', 'colorTemperatureInKelvin', range(1000,10001))
def IncreaseColorTemperature(self, request):
ranges_of_cool = [1000, 2200, 2700, 4000, 5500, 7000, 10000]
v = [x for x in ranges_of_cool if x > self['colorTemperatureInKelvin']+500 ]
v = v[0] if v else ranges_of_cool[-1]
self['colorTemperatureInKelvin'] = (v, get_utc_timestamp(), self.uncertaintyInMilliseconds)
def DecreaseColorTemperature(self, request):
ranges_of_cool = [1000, 2200, 2700, 4000, 5500, 7000, 10000]
v = [x for x in ranges_of_cool if x < self['colorTemperatureInKelvin']-500 ]
v = v[-1] if v else ranges_of_cool[0]
self['colorTemperatureInKelvin'] = (v, get_utc_timestamp(), self.uncertaintyInMilliseconds)
class EndpointHealth(Interface):
def __init__(self, thing=None, proactivelyReported=False, retrievable=False, uncertaintyInMilliseconds=0, *args, **kwargs):
super(EndpointHealth, self).__init__(thing=thing, uncertaintyInMilliseconds=uncertaintyInMilliseconds)
self.properties = \
Interface.Properties(self.interface, [ Interface.Property('connectivity')], \
proactivelyReported=proactivelyReported, retrievable=retrievable)
class InputController(Interface):
def __init__(self, thing=None, proactivelyReported=False, retrievable=False, uncertaintyInMilliseconds=0, *args, **kwargs):
super(InputController, self).__init__(thing=thing, uncertaintyInMilliseconds=uncertaintyInMilliseconds)
self.properties = \
Interface.Properties(self.interface, [ Interface.Property('input')], \
proactivelyReported=proactivelyReported, retrievable=retrievable)
def SelectInput(self, request):
self['input'] = (request.payload['input'], get_utc_timestamp(), self.uncertaintyInMilliseconds)
class LockController(Interface):
def __init__(self, thing=None, proactivelyReported=False, retrievable=False, uncertaintyInMilliseconds=0, *args, **kwargs):
super(LockController, self).__init__(thing=thing, uncertaintyInMilliseconds=uncertaintyInMilliseconds)
self.properties = \
Interface.Properties(self.interface, [ Interface.Property('lockState')], \
proactivelyReported=proactivelyReported, retrievable=retrievable)
# If lock is slow, need to send deferred response
# Possibility that lock will jam which should be indicated by lockState=='JAMMED'
def Lock(self, request):
self['lockState'] = ('LOCKED', get_utc_timestamp(), self.uncertaintyInMilliseconds)
def Unlock(self, request):
self['lockState'] = ('UNLOCKED', get_utc_timestamp(), self.uncertaintyInMilliseconds)
class MeetingClientController(Interface):
def __init__(self, thing=None, proactivelyReported=False, retrievable=False, uncertaintyInMilliseconds=0, *args, **kwargs):
super(MeetingClientController, self).__init__(thing=thing, uncertaintyInMilliseconds=uncertaintyInMilliseconds)
# Needs special discovery logic
# Need to add another structure for meeting. See https://developer.amazon.com/docs/device-apis/alexa-meetingclientcontroller.html#properties payload details
# Uses generic response with no context object
class PercentageController(Interface):
def __init__(self, thing=None, proactivelyReported=False, retrievable=False, uncertaintyInMilliseconds=0, *args, **kwargs):
super(PercentageController, self).__init__(thing=thing, uncertaintyInMilliseconds=uncertaintyInMilliseconds)
self.properties = \
Interface.Properties(self.interface, [ Interface.Property('percentage')], \
proactivelyReported=proactivelyReported, retrievable=retrievable)
def SetPercentage(self, request):
self._setdirective(request, 'percentage', 'percentage', range(101))
def AdjustPercentage(self, request):
self._adjustdirective(request, 'percentage', 'percentageDelta', range(101))
class PlaybackController(Interface):
def __init__(self, thing=None, proactivelyReported=False, retrievable=False, uncertaintyInMilliseconds=0, *args, **kwargs):
super(PlaybackController, self).__init__(thing=thing, uncertaintyInMilliseconds=uncertaintyInMilliseconds)
# Requires special discovery logic
# Basicallly receives player state events and needs to command that action for the device
# Response is just a generic message. Weirdly the example shows a context but the properties are empty.
class PowerController(Interface):
def __init__(self, thing=None, proactivelyReported=False, retrievable=False, uncertaintyInMilliseconds=0, *args, **kwargs):
super(PowerController, self).__init__(thing=thing, uncertaintyInMilliseconds=uncertaintyInMilliseconds)
self.properties = \
Interface.Properties(self.interface, [ Interface.Property('powerState')], \
proactivelyReported=proactivelyReported, retrievable=retrievable)
def TurnOn(self, request):
self['powerState'] = ('ON', get_utc_timestamp(), self.uncertaintyInMilliseconds)
def TurnOff(self, request):
self['powerState'] = ('OFF', get_utc_timestamp(), self.uncertaintyInMilliseconds)
class PowerLevelController(Interface):
def __init__(self, thing=None, proactivelyReported=False, retrievable=False, uncertaintyInMilliseconds=0, *args, **kwargs):
super(PowerLevelController, self).__init__(thing=thing, uncertaintyInMilliseconds=uncertaintyInMilliseconds)
self.properties = \
Interface.Properties(self.interface, [ Interface.Property('powerLevel')], \
proactivelyReported=proactivelyReported, retrievable=retrievable)
def SetPowerLevel(self, request):
self._setdirective(request, 'powerLevel', 'powerLevel', range(101))
def AdjustPowerLevel(self, request):
self._adjustdirective(request, 'powerLevel', 'powerLevelDelta', range(101))
class SceneController(Interface):
def __init__(self, thing=None, proactivelyReported=False, supportsDeactivation=False, *args, **kwargs):
super(SceneController, self).__init__(thing=thing)
self.proactivelyReported = proactivelyReported
self.supportsDeactivation = supportsDeactivation
@property
def jsonDiscover(self):
return { 'type':'AlexaInterface', 'interface':self.interface, 'version': self.version, 'supportsDeactivation': self.supportsDeactivation, 'proactivelyReported': self.proactivelyReported }
class StepSpeaker(Interface):
def __init__(self, thing=None, proactivelyReported=False, retrievable=False, uncertaintyInMilliseconds=0, *args, **kwargs):
super(StepSpeaker, self).__init__(thing=thing, uncertaintyInMilliseconds=uncertaintyInMilliseconds)
# Assumes that iot['volume'] is used to tell the speaker how much to increase or decrease the volume by
def AdjustVolume(self, request):
if self.iot:
v = self.iot['volume']+request.payload['volumeSteps']
validRange = range(101)
v = v if v in validRange else validRange[0] if v < validRange[0] else validRange[-1]
self.iot['volume'] = v
def SetMute(self, request):
if self.iot:
self.iot['muted'] = request.payload['mute']
class Speaker(Interface):
def __init__(self, thing=None, proactivelyReported=False, retrievable=False, uncertaintyInMilliseconds=0, *args, **kwargs):
super(Speaker, self).__init__(thing=thing, uncertaintyInMilliseconds=uncertaintyInMilliseconds)
self.properties = \
Interface.Properties(self.interface, [ Interface.Property('volume'), Interface.Property('muted') ], \
proactivelyReported=proactivelyReported, retrievable=retrievable)
def SetVolume(self, request):
self._setdirective(request, 'volume', 'volume', range(101))
def AdjustVolume(self, request):
self._adjustdirective(request, 'volume', 'volume', range(101))
def SetMute(self, request):
self['muted'] = (request.payload['mute'], get_utc_timestamp(), self.uncertaintyInMilliseconds)
class TemperatureSensor(Interface):
def __init__(self, thing=None, proactivelyReported=False, retrievable=False, uncertaintyInMilliseconds=0, *args, **kwargs):
super(TemperatureSensor, self).__init__(thing=thing, uncertaintyInMilliseconds=uncertaintyInMilliseconds)
self.properties = \
Interface.Properties(self.interface, [ Interface.Property('temperature') ], \
proactivelyReported=proactivelyReported, retrievable=retrievable)
class ThermostatController(Interface):
def __init__(self, thing=None, proactivelyReported=False, retrievable=False, uncertaintyInMilliseconds=0, thermostatType='single', *args, **kwargs):
super(ThermostatController, self).__init__(thing=thing, uncertaintyInMilliseconds=uncertaintyInMilliseconds)
prop_list = [ Interface.Property('thermostatMode') ]
if thermostatType.lower() == 'single':
prop_list.append( Interface.Property('targetSetpoint'))
elif thermostatType.lower() == 'dual':
prop_list.append( Interface.Property('lowerSetpoint'))
prop_list.append( Interface.Property('upperSetpoint'))
else:
prop_list.append( Interface.Property('targetSetpoint'))
prop_list.append( Interface.Property('lowerSetpoint'))
prop_list.append( Interface.Property('upperSetpoint'))
self.properties = \
Interface.Properties(self.interface, prop_list, \
proactivelyReported=proactivelyReported, retrievable=retrievable)
def SetTargetTemperature(self, request):
if 'targetSetpoint' in request.payload:
self['targetSetpoint'] = (request.payload['targetSetpoint'], get_utc_timestamp(), self.uncertaintyInMilliseconds)
if 'lowerSetpoint' in request.payload:
self['lowerSetpoint'] = (request.payload['lowerSetpoint'], get_utc_timestamp(), self.uncertaintyInMilliseconds)
if 'upperSetpoint' in request.payload:
self['upperSetpoint'] = (request.payload['upperSetpoint'], get_utc_timestamp(), self.uncertaintyInMilliseconds)
# Documentation only shows targetSetpoint being adjust. Not 100% sure what to do with dual mode thermostats
# Also not sure what range to enforce
def AdjustTargetTemperature(self, request):
tsp = Temperature(request.payload['targetSetpointDelta'])
tsp.value = self['targetSetpoint'].value + tsp.value
self['targetSetpoint'] = (tsp, get_utc_timestamp(), self.uncertaintyInMilliseconds)
def SetThermostatMode(self, request):
tm = ThermostatMode(request.payload['thermostatMode'])
self['thermostatMode'] = (tm, get_utc_timestamp(), self.uncertaintyInMilliseconds)
class ThermostatControllerSingle(ThermostatController):
def __init__(self, thing=None, proactivelyReported=False, retrievable=False, uncertaintyInMilliseconds=0, *args, **kwargs):
self.interface = 'Alexa.'+'ThermostatController'
super(ThermostatControllerSingle, self).__init__(thing=thing, proactivelyReported=proactivelyReported, retrievable=retrievable, uncertaintyInMilliseconds=uncertaintyInMilliseconds, thermostatType='single')
class ThermostatControllerDual(ThermostatController):
def __init__(self, thing=None, proactivelyReported=False, retrievable=False, uncertaintyInMilliseconds=0, *args, **kwargs):
self.interface = 'Alexa.'+'ThermostatController'
super(ThermostatControllerDual, self).__init__(thing=thing, proactivelyReported=proactivelyReported, retrievable=retrievable, uncertaintyInMilliseconds=uncertaintyInMilliseconds, thermostatType='dual')
class ThermostatControllerTriple(ThermostatController):
def __init__(self, thing=None, proactivelyReported=False, retrievable=False, uncertaintyInMilliseconds=0, *args, **kwargs):
self.interface = 'Alexa.'+'ThermostatController'
super(ThermostatControllerTriple, self).__init__(thing=thing, proactivelyReported=proactivelyReported, retrievable=retrievable, uncertaintyInMilliseconds=uncertaintyInMilliseconds, thermostatType='triple')
def getInterfaceClass(interface):
if interface[0:6] == 'Alexa.':
interface = interface[6:]
ret = {
'BrightnessController': BrightnessController,
'Calendar': Calendar,
'CameraStreamController': CameraStreamController,
'ChannelController': ChannelController,
'ColorController': ColorController,
'ColorTemperatureController': ColorTemperatureController,
'EndpointHealth': EndpointHealth,
'InputController': InputController,
'LockController': LockController,
'MeetingClientController': MeetingClientController,
'PercentageController': PercentageController,
'PlaybackController': PlaybackController,
'PowerController': PowerController,
'PowerLevelController': PowerLevelController,
'SceneController': SceneController,
'StepSpeaker': StepSpeaker,
'Speaker': Speaker,
'TemperatureSensor': TemperatureSensor,
'ThermostatController': ThermostatController
}.get(interface, None)
if ret:
return ret
raise INVALID_DIRECTIVE('{0} is not a valid interface'.format(interface))
| 52.872197 | 221 | 0.713329 | 2,113 | 23,581 | 7.823 | 0.136772 | 0.029643 | 0.016636 | 0.020327 | 0.529946 | 0.444041 | 0.375923 | 0.370901 | 0.358863 | 0.338476 | 0 | 0.007738 | 0.188923 | 23,581 | 445 | 222 | 52.991011 | 0.85653 | 0.048217 | 0 | 0.335277 | 0 | 0 | 0.074649 | 0.012932 | 0.008746 | 0 | 0 | 0 | 0 | 1 | 0.198251 | false | 0 | 0.011662 | 0.014577 | 0.338192 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
2bb22f0b235ccbd5403faa7e90ca5e580a0c0f77 | 1,696 | py | Python | Invisible Pixels/invisible_pixel.py | Yash-Vekaria/Web-Tracking-in-Indian-Partisan-News-Websites | 9fac25ed364b823d13d25346dbaef3042fa459fb | [
"MIT"
] | 1 | 2021-01-31T17:32:38.000Z | 2021-01-31T17:32:38.000Z | Invisible Pixels/invisible_pixel.py | vibhor98/Web-Tracking-in-Indian-Partisan-News-Websites | f68c4ae011a499c0519bed0b0cb953a12f438902 | [
"MIT"
] | null | null | null | Invisible Pixels/invisible_pixel.py | vibhor98/Web-Tracking-in-Indian-Partisan-News-Websites | f68c4ae011a499c0519bed0b0cb953a12f438902 | [
"MIT"
] | 2 | 2021-01-31T16:48:13.000Z | 2021-05-28T15:33:48.000Z | import sqlite3
import json
import pandas as pd
import numpy as np
homepages = list(pd.read_csv('news_homepage_urls.csv')['news_url'])
conn = sqlite3.connect('./topics_url_crawls/topics_url_crawl4/crawl-data_all_topics4.sqlite')
c = conn.cursor()
res = [['image_url', 'content_length', 'req_id', 'visit_id', 'news_site']]
image_count = 0
total_res = 0
total_imgs = 0
homepage_visit_ids = {}
for row in c.execute('SELECT visit_id, site_url FROM site_visits'):
if row[1] in homepages:
homepage_visit_ids[row[0]] = row[1]
for row in c.execute('SELECT hres.url, hres.headers, hres.request_id, hres.visit_id FROM http_responses AS hres'):
if row[3] in homepage_visit_ids:
total_res += 1
header = json.loads(row[1])
both_present = 0
content_len = 0
for tup in header:
if tup[0].lower() == 'content-type':
if tup[1].startswith('image'):
total_imgs += 1
both_present += 1
elif tup[0].lower() == 'content-length':
if int(tup[1]) <= 100000:
both_present += 1
content_len = int(tup[1])
if both_present == 2:
res.append([row[0], content_len, row[2], row[3], homepage_visit_ids[row[3]]])
image_count += 1
if image_count % 100 == 0:
print(image_count, 'images have been crawled.')
break
df = pd.DataFrame(res)
df.to_csv('invisible_pixel_100kb.csv', index=False)
conn.close()
print('Total crawled images:', image_count)
print('Total images in homepages:', total_imgs)
print('Total responses in homepages:', total_res)
| 34.612245 | 114 | 0.608491 | 240 | 1,696 | 4.091667 | 0.358333 | 0.050917 | 0.065173 | 0.01833 | 0.044807 | 0.044807 | 0 | 0 | 0 | 0 | 0 | 0.033735 | 0.26592 | 1,696 | 48 | 115 | 35.333333 | 0.75502 | 0 | 0 | 0.047619 | 0 | 0 | 0.254127 | 0.067217 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.095238 | 0 | 0.095238 | 0.095238 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
2bb98a7d05b2fa13ccaa82a80ac1056091559328 | 1,844 | py | Python | loaders/codeforces_parser.py | pandrei7/cf-tests | 4211d2f8c94730a63ceb960cfe487d9d13f9596b | [
"MIT"
] | null | null | null | loaders/codeforces_parser.py | pandrei7/cf-tests | 4211d2f8c94730a63ceb960cfe487d9d13f9596b | [
"MIT"
] | 2 | 2020-04-28T16:50:43.000Z | 2020-09-13T10:36:08.000Z | loaders/codeforces_parser.py | pandrei7/cf-tests | 4211d2f8c94730a63ceb960cfe487d9d13f9596b | [
"MIT"
] | null | null | null | from html.parser import HTMLParser
from .test_data import TestData
class CodeforcesParser(HTMLParser):
"""A class which can parse Codeforces webpages for testcase data."""
def __init__(self):
HTMLParser.__init__(self)
self.in_input = False
self.in_output = False
self.in_pre = False
self.curr_input = ''
self.curr_output = ''
self.tests = []
def add_current_test(self):
# Newer Codeforces tests start with a newline. Remove it if it exists.
input_data = self.curr_input[self.curr_input.find('\n', 0, 1) + 1:]
output_data = self.curr_output[self.curr_output.find('\n', 0, 1) + 1:]
curr_test = TestData(input_data, output_data)
self.tests.append(curr_test)
self.curr_input = ''
self.curr_output = ''
def handle_starttag(self, tag, attrs):
if tag == 'div' and ('class', 'input') in attrs:
self.in_input = True
self.in_output = False
elif tag == 'div' and ('class', 'output') in attrs:
self.in_output = True
self.in_input = False
elif tag == 'pre':
self.in_pre = True
def handle_data(self, data):
if self.in_pre:
if self.in_input:
self.curr_input = data
elif self.in_output:
self.curr_output = data
def handle_endtag(self, tag):
if tag == 'pre':
self.in_pre = False
if self.in_input:
self.in_input = False
elif self.in_output:
self.in_output = False
self.add_current_test()
def get_tests(self, html):
"""Parses the given HTML code and returns a list of testcases."""
self.tests.clear()
self.feed(self.clean_html(html))
return self.tests
def clean_html(self, html):
"""Prepares HTML code to be parsed."""
return (html.replace('<br />', '\n')
.replace('<br/>', '\n')
.replace('<br>', '\n'))
| 28.8125 | 74 | 0.625813 | 262 | 1,844 | 4.209924 | 0.270992 | 0.087035 | 0.059837 | 0.043518 | 0.267452 | 0.076156 | 0 | 0 | 0 | 0 | 0 | 0.00431 | 0.245119 | 1,844 | 63 | 75 | 29.269841 | 0.788075 | 0.122017 | 0 | 0.326531 | 0 | 0 | 0.036182 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.142857 | false | 0 | 0.040816 | 0 | 0.244898 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
2bbb4aba06003d96ae6df55d6da4fe272ec4958a | 10,644 | py | Python | src/nonogram_solver/nonogram.py | MikeJongen/nonogram-solver | b174fe6c693c1ace3918bf19a9110165a5b42db9 | [
"MIT"
] | null | null | null | src/nonogram_solver/nonogram.py | MikeJongen/nonogram-solver | b174fe6c693c1ace3918bf19a9110165a5b42db9 | [
"MIT"
] | null | null | null | src/nonogram_solver/nonogram.py | MikeJongen/nonogram-solver | b174fe6c693c1ace3918bf19a9110165a5b42db9 | [
"MIT"
] | null | null | null | import json
from nonogram_solver.error import LengthError, ClueError, AxisError, SetSolutionError
class Nonogram:
"""Main nonogram class
Used to hold the data of the nonogram.
Also has some basic creation and helper functions.
"""
printable_values = {0: " ",
-1: "..",
1: "XX"}
def __init__(self, size_x=0, size_y=0, file=None):
"""
Initializes using file or puzzle size
"""
self.clues = dict()
self.size = dict()
if file is None:
# Create empty Nonogram
self.size["x"] = size_x
self.size["y"] = size_y
self.solution = [[0 for y in range(self.size["y"])]
for x in range(self.size["x"])]
self.clues["x"] = [[] for x in range(self.size["y"])]
self.clues["y"] = [[] for y in range(self.size["x"])]
else:
# Load from file
self.load(file)
self.init_row_solvers()
def init_row_solvers(self, solver_class=None):
if None is solver_class:
# Use default
solver_class = Row
self.row_solver = dict()
self.row_solver["x"] = []
self.row_solver["y"] = []
for index in range(self.size["y"]):
self.row_solver["x"].append(solver_class(
*self.get_clue_solution_pair("x", index)))
for index in range(self.size["x"]):
self.row_solver["y"].append(solver_class(
*self.get_clue_solution_pair("y", index)))
def set_clues_x(self, *clues):
"""
Sets clues of for the rows
*clues : list
list of clues, where every clue is a list of ints
"""
return self.set_clues("x", *clues)
def set_clues_y(self, *clues):
"""
Sets clues of for the columns
*clues : list
list of clues, where every clue is a list of ints
"""
return self.set_clues("y", *clues)
def set_clues(self, input_axis, *clues):
"""
Sets clues
input_axis : string
"x" if clues are for rows
"y" if clues are for columns
*clues : list
list of clues, where every clue is a list of ints
"""
if len(clues) != self.size[self._other_axis(input_axis)]:
raise LengthError
for index, clue in enumerate(clues):
min_length_clue = sum(clue) + len(clue) - 1
if(min_length_clue > self.size[input_axis]):
raise ClueError
self.clues[input_axis][index] = clue
self.init_row_solvers()
def get_clue_solution_pair(self, axis, row_index):
clues = self.clues[axis][row_index]
values = self._get_solution_row(axis, row_index)
return (clues, values)
def solve_single_iteration(self, solver_function):
"""
Tries to solve all rows a single time, using given solver function.
Returns True if anything was changed to the solution.
"""
total_change = False
for axis in self.row_solver:
for index, row_solver in enumerate(self.row_solver[axis]):
row_changed = solver_function(row_solver)
if row_changed:
self._set_solution_row(axis, index, row_solver.values)
total_change = True
return total_change
def update_row_solvers(self):
"""
Updates all row solvers with the overall solution, so rows get info from columns
and vice versa. Does not check if new info fits with old info, and does not update
completed rows.
"""
for axis in self.row_solver:
for index, row_solver in enumerate(self.row_solver[axis]):
if row_solver.solved:
# don't waste time updating completed rows
continue
row_solver.update_values(self._get_solution_row(axis, index))
def is_complete(self) -> bool:
"""
Checks if puzzle is completely filled in (no unknown values left)
"""
result = all(x != 0 for row in self.solution for x in row)
return result
def percent_complete(self) -> float:
"""
Returns percentage filled in cells / total cells
"""
filled_elements = self.cells_known()
percent_complete = (filled_elements / self.total_cells()) * 100
return percent_complete
def total_cells(self):
return self.size["x"] * self.size["y"]
def cells_known(self):
"""
Returns total amoutn of cells where the value (filled/blank) is known.
"""
empty_elements = 0
for row in self.solution:
for element in row:
if element == 0:
empty_elements += 1
return self.total_cells() - empty_elements
def is_correct(self) -> bool:
"""
Checks if puzzle solution fits the clues
"""
correct = True
for row_index in range(self.size["y"]):
row = Row(*self.get_clue_solution_pair("x", row_index))
if not row.is_correct():
correct = False
for col_index in range(self.size["x"]):
row = Row(*self.get_clue_solution_pair("y", col_index))
if not row.is_correct():
correct = False
return correct
def reset_solution(self):
self.solution = [[0 for y in range(self.size["y"])]
for x in range(self.size["x"])]
if self.row_solver is not None:
for solver in self.row_solver["y"]:
solver.reset()
for solver in self.row_solver["x"]:
solver.reset()
def print_solution(self):
top_row = "+" + self.size["x"] * "--+"
print(top_row)
for y in range(self.size["y"]):
row = "|"
for x in range(self.size["x"]):
row += f"{self.printable_values[self.solution[x][y]]}|"
print(row)
print(top_row)
def print_clues(self):
print("Horizontal clues:")
for clue in self.clues["x"]:
print(clue)
print("Vertical clues:")
for clue in self.clues["y"]:
print(clue)
def save(self, file, only_clues=False):
file = open(file, 'w')
if only_clues:
data = {"clues": self.clues}
else:
data = {"clues": self.clues,
"solution": self.solution}
json.dump(data, file)
file.close()
def load(self, file):
file = open(file, 'r')
data = json.load(file)
self.clues = data["clues"]
self.size["y"] = len(self.clues["x"])
self.size["x"] = len(self.clues["y"])
if "solution" in data:
self.solution = data["solution"]
else:
self.solution = [[0 for y in range(self.size["y"])]
for x in range(self.size["x"])]
file.close()
def _other_axis(self, axis):
if axis == "x":
return "y"
elif axis == "y":
return "x"
else:
raise AxisError
def _set_solution_row(self, axis, row_index, solution_row,
forced=False):
"""
sets row/column to new value.
solution_row : list
list of int with new values.
forced : bool
if True, overwrite old values.
"""
if axis == "y":
for index, value in enumerate(solution_row):
self._set_solution_value(row_index, index, value,
forced)
elif axis == "x":
for index, value in enumerate(solution_row):
self._set_solution_value(index, row_index, value,
forced)
else:
raise AxisError
def _set_solution_value(self, x, y, new, forced=False):
"""
Sets the value of cell [x, y] to new.
Only sets value if previous value was empty (unless forced == True).
"""
value = self.solution[x][y]
if forced:
self.solution[x][y] = new
else:
if value == 0:
self.solution[x][y] = new
elif ((value, new) == (-1, 1)) or ((value, new) == (1, -1)):
raise SetSolutionError
def _get_solution_row(self, axis, row_index) -> list:
"""
Get a copy of a row/column of the solution
"""
if axis == "y":
row = []
for value in self.solution[row_index]:
row.append(value)
return row
elif axis == "x":
row = []
for value in self.solution:
row.append(value[row_index])
return row
else:
raise AxisError
class Row:
"""
Support class
used for data/function for a single row/column
"""
def __init__(self, clues, values):
self.clues = clues
self.values = values
self.size = len(self.values)
self.solved = False
self.clue_size = sum(self.clues) + len(self.clues) - 1
def _reconstruct_clues(self) -> list:
"""
Get clue list, created from the current state of the row
Interprets empty cells as blank
"""
clues = []
current_clue = 0
for element in self.values:
if element == 1:
current_clue += 1
elif current_clue != 0:
clues.append(current_clue)
current_clue = 0
if current_clue != 0:
clues.append(current_clue)
return clues
def is_complete(self) -> bool:
"""
Checks if row is completely filled in (no unknown values left)
"""
result = all(value != 0 for value in self.values)
return result
def is_correct(self) -> bool:
"""
Checks if row solution fits the clues
Non complete solution is interpreted as incorrect
"""
if not self.is_complete():
return False
reconstructed_clue = self._reconstruct_clues()
if reconstructed_clue != self.clues:
return False
return True
def update_values(self, values):
self.values = values
if self.is_complete():
self.solved = True
def reset(self):
self.values = [0] * self.size
self.solved = False
| 31.868263 | 90 | 0.530252 | 1,297 | 10,644 | 4.215883 | 0.136469 | 0.039503 | 0.028164 | 0.038405 | 0.335955 | 0.308888 | 0.232992 | 0.153255 | 0.126189 | 0.126189 | 0 | 0.004444 | 0.36584 | 10,644 | 333 | 91 | 31.963964 | 0.80563 | 0.160936 | 0 | 0.301887 | 0 | 0 | 0.021027 | 0.005376 | 0 | 0 | 0 | 0 | 0 | 1 | 0.132075 | false | 0 | 0.009434 | 0.004717 | 0.240566 | 0.051887 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
2bbe278b496ce085135269ff9461df76d529a0fd | 2,468 | py | Python | gromacs_wrapper/make_ndx.py | bioexcel/virtualscreening | e973958e012e38f99b0c8ed2b798c5e5a7f72b22 | [
"Apache-2.0"
] | 3 | 2020-02-17T11:11:08.000Z | 2021-12-03T18:54:47.000Z | gromacs_wrapper/make_ndx.py | bioexcel/virtualscreening | e973958e012e38f99b0c8ed2b798c5e5a7f72b22 | [
"Apache-2.0"
] | 1 | 2019-12-05T15:32:50.000Z | 2019-12-10T16:13:08.000Z | gromacs_wrapper/make_ndx.py | bioexcel/virtualscreening | e973958e012e38f99b0c8ed2b798c5e5a7f72b22 | [
"Apache-2.0"
] | 2 | 2019-09-26T20:21:14.000Z | 2021-07-10T04:37:31.000Z | #!/usr/bin/env python
"""Python wrapper module for the GROMACS make_ndx module
"""
import sys
import json
from command_wrapper import cmd_wrapper
import configuration.settings as settings
from tools import file_utils as fu
class MakeNdx(object):
"""Wrapper for the 5.1.2 version of the make_ndx module
Args:
input_structure_path (str): Path to the input GRO/PDB/TPR file.
output_ndx_path (str): Path to the output index NDX file.
properties (dic):
selection (str): Atom selection string.
"""
def __init__(self, input_structure_path, output_ndx_path, properties, **kwargs):
if isinstance(properties, basestring):
properties=json.loads(properties)
self.input_structure_path = input_structure_path
self.output_ndx_path = output_ndx_path
self.selection = properties.get('selection', None)
self.gmx_path = properties.get('gmx_path',None)
self.mutation = properties.get('mutation',None)
self.step = properties.get('step',None)
self.path = properties.get('path','')
self.mpirun = properties.get('mpirun', False)
self.mpirun_np = properties.get('mpirun_np', None)
def launch(self):
"""Launches the execution of the GROMACS editconf module.
"""
out_log, err_log = fu.get_logs(path=self.path, mutation=self.mutation, step=self.step)
gmx = 'gmx' if self.gmx_path is None else self.gmx_path
cmd = [gmx, 'make_ndx', '-f', self.input_structure_path,
'-o', self.output_ndx_path]
if self.mpirun_np is not None:
cmd.insert(0, str(self.mpirun_np))
cmd.insert(0, '-np')
if self.mpirun:
cmd.insert(0, 'mpirun')
if self.mpirun:
cmd.append('<<<')
cmd.append('\"'+self.selection+'\"')
else:
cmd.insert(0, '|')
cmd.insert(0, '\"'+self.selection+'\"')
cmd.insert(0, 'echo')
command = cmd_wrapper.CmdWrapper(cmd, out_log, err_log)
return command.launch()
#Creating a main function to be compatible with CWL
def main():
system=sys.argv[1]
step=sys.argv[2]
properties_file=sys.argv[3]
prop = settings.YamlReader(properties_file, system).get_prop_dic()[step]
Editconf(input_structure_path=sys.argv[4],
output_ndx_path=sys.argv[5],
properties=prop).launch()
if __name__ == '__main__':
main()
| 35.257143 | 94 | 0.633712 | 325 | 2,468 | 4.627692 | 0.301538 | 0.060505 | 0.071809 | 0.043883 | 0.021277 | 0 | 0 | 0 | 0 | 0 | 0 | 0.007507 | 0.244327 | 2,468 | 69 | 95 | 35.768116 | 0.798928 | 0.181118 | 0 | 0.042553 | 0 | 0 | 0.048534 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.06383 | false | 0 | 0.106383 | 0 | 0.212766 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
2bbf71442072bd282493639805a17b1c960bf32f | 5,866 | py | Python | Installer.py | LordOfPolls/Helix3 | 7ad62ce8dd831a31d846457cfba98807f9ba6c19 | [
"MIT"
] | null | null | null | Installer.py | LordOfPolls/Helix3 | 7ad62ce8dd831a31d846457cfba98807f9ba6c19 | [
"MIT"
] | 40 | 2018-06-18T13:43:42.000Z | 2018-07-24T19:24:57.000Z | Installer.py | LordOfPolls/Helix3 | 7ad62ce8dd831a31d846457cfba98807f9ba6c19 | [
"MIT"
] | 2 | 2018-07-17T20:47:23.000Z | 2018-07-27T17:42:53.000Z | from __future__ import print_function
import os
import sys
import time
import traceback
import subprocess
from shutil import disk_usage, rmtree
from base64 import b64decode
pipWorking = False
gitWorking = False
def clear():
os.system('cls' if os.name == 'nt' else 'clear')
def checkGit():
global gitWorking
try:
gitWorking = bool(subprocess.check_output('git --version', shell=True))
except:
gitWorking = False
class PIP():
def __init__(self):
global pipWorking
global gitWorking
self.pipWorking = pipWorking
self.gitWorking = gitWorking
def checkPIP(self):
extraInfo = ""
try:
import pip
self.pipWorking = True
extraInfo = "normally"
except ImportError:
self.pipWorking = False
self.installPIP()
if os.name != "nt" and self.python_m("--version") is None:
try:
print("\033[41mI will install it dammit >:(\033[0m")
print("Updating apt...")
subprocess.run("apt-get update", shell=True)
print("\033[42mAttempting to install pip ^-^\033[0m")
subprocess.run("apt-get install python3-pip", shell=True)
if self.python_m("--version") is None:
raise Exception
return
except:
clear()
print("Im sorry, i cant install pip myself ;-;")
print("Please google how to install pip on your OS")
time.sleep(10)
exit()
else:
try:
import pip
self.pipWorking = True
except:
self.pipWorking = False
print("Unable to use pip module, testing python -m pip")
if self.python_m("--version") is None:
print("PIP is not working on this machine, sorry")
print("Try installing it manually")
time.sleep(10)
exit()
else:
extraInfo = "via command line only"
sys.stdout.write("PIP is working " + extraInfo + "\n")
return True
def installPIP(self):
clear()
sys.stderr.write("PIP isnt installed, installing\n")
try:
import urllib.request
except ImportError:
sys.stderr.write("Unable to download pip\nPlease follow this guide: https://www.makeuseof.com/tag/install-pip-for-python/\n")
time.sleep(10)
exit()
urllib.request.urlretrieve("https://bootstrap.pypa.io/get-pip.py", "get-pip.py")
subprocess.run(sys.executable + " get-pip.py", shell=True)
clear()
os.unlink("get-pip.py")
@staticmethod
def python_m(*args, **kwargs):
try:
return str(subprocess.check_output([sys.executable, '-m', 'pip'] + list(args)))
except subprocess.CalledProcessError:
return None
def install(self, module):
command = "install %s" % module
self.python_m(*command.split())
@staticmethod
def getRequirements(file="requirements.txt"):
lineiter = (line.strip() for line in open(file))
return [line for line in lineiter if line and not line.startswith("#")]
def main():
print("Making sure python is running at the correct version")
if sys.version_info < (3, 5):
print("Helix doesnt support any version of python below 3.5, please use that 3.5 or higher")
time.sleep(5)
exit()
checkGit()
PIP().checkPIP()
if not gitWorking:
print("Unable to use git, please install git shell")
print("https://git-scm.com/book/en/v2/Getting-Started-Installing-Git")
if not os.path.exists("code"):
requirementsDir = "Helix3/requirements.txt"
pull = False
else:
pull = True
requirementsDir = "requirements.txt"
clear()
if gitWorking:
if not pull:
if os.path.exists("Helix3"):
print("Error, helix3 directory already exists, please run Installer.py from that location")
time.sleep(3)
else:
print("Downloading Helix")
subprocess.run("git clone https://github.com/LordOfPolls/Helix3.git")
else:
print("Updating Helix")
y_n = input("Would you like to overwrite any local changes?")
if "y" in y_n.lower():
subprocess.run("git fetch --all", shell=True)
subprocess.run("git reset --hard", shell=True)
else:
subprocess.run("git pull", shell=True)
clear()
requirements = PIP.getRequirements(requirementsDir)
print("Installing {} modules from {}\nMSG from the dev: This can take a **really** long time\n i suggest brewing a nice cup of tea\n\n".format(len(requirements), requirementsDir))
for module in requirements:
clear()
print(
"Installing {} modules from {}\nMSG from the dev: This can take a **really** long time\n i suggest brewing a nice cup of tea\n\n".format(
len(requirements), requirementsDir))
print("Installing {}".format(module))
try:
subprocess.call("pip install {} --no-cache-dir --upgrade".format(module), shell=True)
except subprocess.CalledProcessError as e:
print("Unable to install {}\n{}".format(module, e.returncode))
time.sleep(10)
exit()
clear()
print("All modules installed")
print("Helix should now be usable")
print("Press any 'enter' to quit")
input()
if __name__ == '__main__':
main() | 36.434783 | 183 | 0.56103 | 663 | 5,866 | 4.921569 | 0.334842 | 0.022066 | 0.013485 | 0.018388 | 0.134845 | 0.1232 | 0.097456 | 0.08152 | 0.08152 | 0.08152 | 0 | 0.01121 | 0.33089 | 5,866 | 161 | 184 | 36.434783 | 0.820127 | 0 | 0 | 0.326531 | 0 | 0.027211 | 0.276291 | 0.00392 | 0 | 0 | 0 | 0 | 0 | 1 | 0.061224 | false | 0 | 0.088435 | 0 | 0.190476 | 0.156463 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
2bc2d93e8d13d4e3616e50c69adba5724129cd36 | 280 | py | Python | src/nfunctions/templatetags/custom_tags.py | AnclaTech/minify | f986353b04687c39b6b90b65bfec61edf838f7a4 | [
"Apache-2.0"
] | 1 | 2019-11-05T18:28:22.000Z | 2019-11-05T18:28:22.000Z | src/nfunctions/templatetags/custom_tags.py | AnclaTech/minify | f986353b04687c39b6b90b65bfec61edf838f7a4 | [
"Apache-2.0"
] | 9 | 2019-12-05T01:04:48.000Z | 2021-06-10T19:26:06.000Z | src/nfunctions/templatetags/custom_tags.py | AnclaTech/minify | f986353b04687c39b6b90b65bfec61edf838f7a4 | [
"Apache-2.0"
] | 1 | 2020-09-24T22:21:14.000Z | 2020-09-24T22:21:14.000Z | from django import template
register = template.Library()
from ..models import Notification
@register.simple_tag
def unread_notification(user):
no = Notification.objects.filter(user=user, read=False).count()
if no >= 1:
return True
else:
return False | 23.333333 | 67 | 0.707143 | 35 | 280 | 5.6 | 0.685714 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.004464 | 0.2 | 280 | 12 | 68 | 23.333333 | 0.870536 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.1 | false | 0 | 0.2 | 0 | 0.5 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
2bc2f3579b936e5347d7069c68b383a1d2e7b783 | 499 | py | Python | tests/io/test_fred.py | entelecheia/eKorpKit | 9521ae4c4749419fa2b088d1b9e518e5927b7cb8 | [
"CC-BY-4.0"
] | 2 | 2021-09-15T04:12:07.000Z | 2022-02-04T02:44:09.000Z | tests/io/test_fred.py | entelecheia/eKorpKit | 9521ae4c4749419fa2b088d1b9e518e5927b7cb8 | [
"CC-BY-4.0"
] | null | null | null | tests/io/test_fred.py | entelecheia/eKorpKit | 9521ae4c4749419fa2b088d1b9e518e5927b7cb8 | [
"CC-BY-4.0"
] | null | null | null | import pytest
from ekorpkit import eKonf
@pytest.mark.local()
def test_fred():
cfg = eKonf.compose(config_group="io/fetcher=quandl")
cfg.series_name = "DFEDTAR"
cfg.series_id = ["DFEDTAR", "DFEDTARU"]
cfg.force_download = True
fred = eKonf.instantiate(cfg)
cfg = eKonf.compose(config_group="visualize/plot=lineplot")
cfg.plots[0].y = "DFEDTAR"
cfg.figure.figsize = (15, 8)
cfg.ax.title = "Fed Rate"
eKonf.instantiate(cfg, data=fred.data)
assert True
| 23.761905 | 63 | 0.677355 | 68 | 499 | 4.882353 | 0.617647 | 0.048193 | 0.090361 | 0.126506 | 0.156627 | 0 | 0 | 0 | 0 | 0 | 0 | 0.009828 | 0.184369 | 499 | 20 | 64 | 24.95 | 0.805897 | 0 | 0 | 0 | 0 | 0 | 0.154309 | 0.046092 | 0 | 0 | 0 | 0 | 0.066667 | 1 | 0.066667 | false | 0 | 0.133333 | 0 | 0.2 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
2bc30fd5ccd085f6e33f22005fb27c74cb901846 | 16,111 | py | Python | app/main.py | Kyle-L/Revenants | 003d79b7b8141442a998e191dc9855f7d1e33183 | [
"MIT"
] | null | null | null | app/main.py | Kyle-L/Revenants | 003d79b7b8141442a998e191dc9855f7d1e33183 | [
"MIT"
] | 5 | 2021-07-24T15:11:28.000Z | 2021-07-24T15:11:49.000Z | app/main.py | Kyle-L/Revenants | 003d79b7b8141442a998e191dc9855f7d1e33183 | [
"MIT"
] | null | null | null | import os
import operator
from flask import Blueprint, redirect, render_template, url_for, request, session
from flask_socketio import SocketIO, emit, join_room, leave_room
from . import socketio
from .helper import *
from .generator import generate_room_code
from .database import *
MIN_PLAYER_COUNT = 4
app = Blueprint('main', __name__)
@app.route('/how-to-play')
def how_to_play():
return render_template('how-to-play.html')
@app.route('/how-to-play/roles')
def roles():
return render_template('roles.html')
@app.route('/license')
def license():
return render_template('license.html')
@app.route('/', methods=['GET', 'POST'])
def index():
if request.method == 'POST':
if request.form['action'] == 'host':
session['name'] = request.form.get('name').upper()
session['room'] = generate_room_code(4).upper()
elif request.form['action'] == 'join':
name = request.form.get('name').upper()
room = request.form.get('room').upper()
# If there is already a player with that name, redirect to index.
state = get_room_state(room)
if get_player_from_name(name, room) or (state != 'n/a' and state != 'lobby'):
return render_template('index.html', error_message='blah')
session['name'] = name
session['room'] = room
return redirect(url_for('.game'))
return render_template('index.html')
@app.route('/game')
def game():
name = session.get('name', '')
room = session.get('room', '')
if name == '' or room == '':
return redirect(url_for('.index'))
# If there is already a player with that name, redirect to index.
state = get_room_state(room)
if get_player_from_name(name, room) or (state != 'n/a' and state != 'lobby'):
return redirect(url_for('.index'))
return render_template('game.html', name=name, room=room)
@socketio.on('join')
def joined(data):
""" Called when a client connects.
"""
# Gets the player's name and room.
name = session.get('name')
room = session.get('room')
# Adds a player to the socketio room session.
join_room(room)
# Adds a player from the database.
player_join(request.sid, name, room)
print(f'[{room}] {name} joined.')
emit('update_players', {
'players': get_players_string_lobby(room)}, room=room)
@socketio.on('disconnect')
def left():
""" Called when a client disconnects.
"""
# Gets the player's name and room.
name = session.get('name')
room = session.get('room')
player = get_player(request.sid)
# Removes a player to the socketio room session.
leave_room(room)
# Removes a player from the database.
player_leave(request.sid)
print(f'[{room}] {name} left.')
emit('update_players', {
'players': get_players_string_lobby(room)}, room=room)
if len(get_players(room)) < MIN_PLAYER_COUNT and len(get_players(room)) > 0 and get_room_state(room) != 'lobby':
return_to_lobby(room)
@socketio.on('ready')
def ready(data):
""" Called when a client clicks the ready button.
Args:
data (JSON): The data coming from the players when they ready up.
"""
# Gets the player's name and room.
name = session.get('name')
room = session.get('room')
player_ready(request.sid)
state = get_room_state(room)
# Updates ready status if in the lobby.
if state == 'lobby':
emit('update_players', {'players': get_players_string_lobby(room)}, room=room)
# Updates chosen player if in day/night state.
elif state == 'day' or state == 'night':
if data['chosen_player']:
update_player_chosen_player(request.sid, data['chosen_player'])
emit('update_ready', {'players': get_ready_count_string( room) + ' are ready.'}, room=room)
# Updates the ready count string so the player knows how many people need to ready up.
emit('update_ready', {'players': get_ready_count_string(room) + ' are ready.'}, room=room)
# Transition to the next state if everyone is ready.
if is_room_ready(room) and len(get_players(room)) >= MIN_PLAYER_COUNT:
unready_all_players(room)
# Determines the next state based on the current state.
if state == 'lobby':
start_setup(room)
elif state == 'setup':
start_round(room, 'Night', False)
elif state == 'night':
process_choices_night(room)
elif state == 'night-results':
start_round(room, 'Day', True)
elif state == 'day':
process_choices_day(room)
elif state == 'day-results':
start_round(room, 'Night', False)
elif state == 'win':
return_to_lobby(room)
def return_to_lobby(room: str):
""" Returns the game to the lobby state.
Args:
room (str): The room code of the players.
"""
print(f'[{room}] Returning to lobby.')
reset_game(room)
update_room_state(room, 'lobby')
payload = {
'time': 5,
'message': 'Returning to lobby in...',
'state_html': 'lobby',
'state_name': '',
'alive': True
}
emit('start_lobby', payload, room=room)
def start_setup(room: str):
""" Starts the game's setup state.
Args:
room (str): The room code of the players.
"""
print(f'[{room}] Starting setup.')
assign_roles(room)
assign_characters(room)
update_room_state(room, 'setup')
players = get_players(room)
for player in players:
payload = {
'time': 5,
'message': 'Game starting in...',
'state_html': 'setup',
'state_name': 'Set Up',
'name': player.character_name,
'age': player.character_age,
'role': get_role_name(player.role),
'role_description': get_role_description(player.role),
'alive': player.is_alive
}
emit('start_setup', payload, room=player.id)
def start_round(room: str, round_name: str, is_day: bool):
"""[summary]
Args:
room (str): The room code of the players.
round_name (str): The current round name to be shown in the countdown to the player.
is_day (bool): Whether it will transition to day or night
"""
print(f'[{room}] Starting {round_name.lower()}.')
update_room_state(room, round_name.lower())
count = increment_round_count(room)
players = get_players(room)
for player in players:
skip_id = ''
payload = {
'time': 5,
'message': f'{round_name} starting in...',
'state_html': 'round',
'state_name': round_name,
'role_action': get_role_action(player.role, is_day, count),
'players_names': get_players_string(room, skip_id),
'players_ids': get_players_ids(room, skip_id),
'alive': player.is_alive
}
emit('start_round', payload, room=player.id)
def process_choices_night(room: str):
""" Processes the choices that were made by players at night.
Args:
room (str): The room code of the players.
"""
print(f'[{room}] Showing night results.')
result_general_list = list()
result_private_dict = dict()
for id in get_players_ids(room):
result_private_dict[id] = list()
# The dict used to determine the protected player.
protect_dict = {}
players = get_players(room)
count = get_role_count(room)
# The first loop through the players' list will process actions that should happen 1st.
for player in players:
if 'antagonist' in player.role:
if ('crazed' not in player.role) and count == 1:
is_a = 'is a ' + get_role_name('antagonist').capitalize()
result_private_dict[player.id].append([f'{get_player_string(player.chosen_player)} {is_antag}.'])
else:
# Sets the kill status, it is random if the antagonist is crazed.
kill_status = True if 'crazed' not in player.role else bool(random.getrandbits(1))
# Marks the player to be killed.
update_player_marked(player.chosen_player, kill_status)
# Adds to the general and private results so that players know what happened to the village and them.
result = 'attacked' if kill_status else 'failed to attack'
result_general_list.append(f'{get_player_string(player.chosen_player)} {get_result_message_general(result)}')
result_private_dict[player.chosen_player].append(get_result_message_private(result))
elif player.role == 'regular':
# Update the dictionary representing how many regulars are protecting someone.
if player.chosen_player in protect_dict:
protect_dict[player.chosen_player] += 1
else:
protect_dict[player.chosen_player] = 1
elif player.role == 'prophet':
# Update the personal result dict for the player who is a prophet.
is_antag = 'is the ' if 'antagonist' in get_player(player.chosen_player).role else 'is not the '
is_antag += get_role_name('antagonist').capitalize()
result_private_dict[player.id].append([f'{get_player_string(player.chosen_player)} {is_antag}.'])
elif player.role == 'fool':
# Update the personal result dict for the player who is a prophet.
is_antag = 'is the ' if bool(random.getrandbits(1)) else 'is not the '
is_antag += get_role_name('antagonist').capitalize()
result_private_dict[player.id].append([f'{get_player_string(player.chosen_player)} {is_antag}.'])
# Unmark the protected player if they've been marked by the antagonist.
player_protected = max(protect_dict.items(), key=operator.itemgetter(1))[0]
if protect_dict[player_protected] > 1:
update_player_marked(player_protected, False)
# Adds to the general and private results so that players know what happened to the village and them.
result = 'protected'
result_general_list.append(f'{get_player_string(player_protected)} {get_result_message_general(result)}')
result_private_dict[player_protected].append(get_result_message_private(result))
# The second loop through the players' list will process actions that should happen 2nd.
for player in players:
if player.role == 'healer':
update_player_marked(player.chosen_player, False)
# Adds to the general and private results so that players know what happened to the village and them.
result = 'healed'
result_general_list.append(f'{get_player_string(player.chosen_player)} {get_result_message_general(result)}')
result_private_dict[player.chosen_player].append(get_result_message_private(result))
# The final loop through the players' list will determine who survives the night.
for player in players:
if (player.is_marked):
update_player_alive(player.id, False)
# Adds to the general and private results so that players know what happened to the village and them.
result = 'died'
result_general_list.append(f'{get_player_string(player.id)} {get_result_message_general(result)}')
result_private_dict[player.id].append(get_result_message_private(result))
# Determines if the game was won by a team or the next round should start.
process_win_conditions(room, players, 'night-results', 'Night', result_general_list, result_private_dict)
def process_choices_day(room: str):
""" Processes the choices that were made by players during the day.
Args:
room (str): The room code of the players.
"""
print(f'[{room}] Showing day results.')
result_general_list = list()
result_private_dict = dict()
for id in get_players_ids(room):
result_private_dict[id] = list()
# The dict used to determine who is killed this round.
kill_dict = {}
players = get_players(room)
# The loops through the players to determine how wants to kill who.
for player in players:
if player.chosen_player in kill_dict:
kill_dict[player.chosen_player] += 1
else:
kill_dict[player.chosen_player] = 1
# Grab the player from the dict with the most votes and mark them as dead.
player_killed = max(kill_dict.items(), key=operator.itemgetter(1))[0]
update_player_alive(player_killed, False)
# Adds to the general and private results so that players know what happened to the village and them.
result = 'killed'
result_general_list = [f'{get_player_string(player_killed)} {get_result_message_private(result)}']
result_private_dict[player.id].append(get_result_message_private(result))
# Determines if the game was won by a team or the next round should start.
process_win_conditions(room, players, 'day-results', 'Day', result_general_list, result_private_dict)
def process_win_conditions(room: str, players: list, state: str, state_name: str, result_general_list: list, result_private_dict: dict):
"""Moves the game to the next state based on whether the win condition has been met or not.
Args:
room (str): The room code of the players.
players (list): All players in the game.
state (str): The current state.
state_name (str): The state name that will display on the player's screen if it goes to the result screen.
result_general_list (list): A list of general events that happened during the round.
result_private_dict (dict): A dictionary of lists that correspond to events that are only shared with a single player.
"""
count_antag, count_rest = get_role_count(room)
# If there are more bad guys than good.
if count_antag >= count_rest:
update_room_state(room, 'win')
# Create a payload for all players to indicate the bad guys won.
payload = {
'time': 5,
'message': f'{state_name} results showing in...',
'state_html': 'win',
'state_name': get_role_name('antagonist').capitalize() + 's Win!',
'win_message': f'{get_win_message(False)}',
'players': get_players_string_win(room)
}
emit('start_win', payload, room=room)
reset_game(room)
# If there are no living bad guys.
elif count_antag == 0:
update_room_state(room, 'win')
# Create a payload for all players to indicate the good guys won.
payload = {
'time': 5,
'message': f'{state_name} results showing in...',
'state_html': 'win',
'state_name': get_role_name('regular').capitalize() + 's Win!',
'win_message': f'{get_win_message(True)}',
'players': get_players_string_win(room)
}
emit('start_win', payload, room=room)
reset_game(room)
# Continue to the next results screen.
else:
update_room_state(room, state)
# Create a payload for each player. Unique because of private results.
for player in players:
payload = {
'time': 5,
'message': f'{state_name} results showing in...',
'state_html': 'results',
'state_name': f'{state_name} Results',
'results_general': result_general_list,
'results_private': result_private_dict[player.id] if player.id in result_private_dict else [],
'alive': player.is_alive
}
emit('start_results', payload, room=player.id)
def reset_game(room: str):
"""Resets the players' statuses and indicates the reset to them.
Args:
room (str): The room code of the players.
"""
reset_players(room)
reset_room(room)
emit('update_players', {'players': get_players_string_lobby(room)}, room=room)
| 36.042506 | 136 | 0.638322 | 2,137 | 16,111 | 4.627515 | 0.126345 | 0.024269 | 0.030943 | 0.020932 | 0.560421 | 0.499747 | 0.445343 | 0.415512 | 0.387097 | 0.352412 | 0 | 0.001991 | 0.25194 | 16,111 | 446 | 137 | 36.123318 | 0.818536 | 0.245236 | 0 | 0.348837 | 0 | 0 | 0.180599 | 0.046449 | 0 | 0 | 0 | 0 | 0 | 1 | 0.05814 | false | 0 | 0.031008 | 0.011628 | 0.124031 | 0.034884 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
2bc4ad754b21348d16285e2026fcdacb8e8dc9ad | 1,075 | py | Python | hatsploit/modules/post/unix/shell/getpid.py | enty8080/HatSplo | 57ea81c2bc73838cbf7d7062d7e665eda1143d18 | [
"MIT"
] | 139 | 2021-02-17T15:52:30.000Z | 2022-03-30T14:50:42.000Z | hatsploit/modules/post/unix/shell/getpid.py | enty8080/HatSplo | 57ea81c2bc73838cbf7d7062d7e665eda1143d18 | [
"MIT"
] | 27 | 2021-03-24T17:14:30.000Z | 2022-03-02T18:50:43.000Z | hatsploit/modules/post/unix/shell/getpid.py | enty8080/HatSplo | 57ea81c2bc73838cbf7d7062d7e665eda1143d18 | [
"MIT"
] | 85 | 2021-02-17T15:39:03.000Z | 2022-03-07T09:08:58.000Z | #!/usr/bin/env python3
#
# This module requires HatSploit: https://hatsploit.netlify.app
# Current source: https://github.com/EntySec/HatSploit
#
from hatsploit.lib.module import Module
from hatsploit.utils.session import SessionTools
class HatSploitModule(Module, SessionTools):
details = {
'Name': "Unix Shell Get PID",
'Module': "post/unix/shell/getpid",
'Authors': [
'Ivan Nikolsky (enty8080) - module developer'
],
'Description': "Get current session process id.",
'Platform': "unix",
'Rank': "medium"
}
options = {
'SESSION': {
'Description': "Session to run on.",
'Value': None,
'Type': "session->[unix,linux,macos]",
'Required': True
}
}
def run(self):
session = self.parse_options(self.options)
session = self.get_session(session)
if session:
pid = session.send_command("printf $$", True).strip()
if pid:
self.print_information(f"PID: {pid}")
| 26.219512 | 65 | 0.572093 | 109 | 1,075 | 5.605505 | 0.59633 | 0.042553 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.006579 | 0.293023 | 1,075 | 40 | 66 | 26.875 | 0.797368 | 0.126512 | 0 | 0 | 0 | 0 | 0.281585 | 0.052463 | 0 | 0 | 0 | 0 | 0 | 1 | 0.035714 | false | 0 | 0.071429 | 0 | 0.214286 | 0.071429 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
2bc4ad99cec6515a6001bb4ad4c1f6891d5605dd | 293 | py | Python | ex077a.py | jgabriel1607/Python | d6b75519eb8f0d4fef944e1690ba8914d81a5d16 | [
"MIT"
] | null | null | null | ex077a.py | jgabriel1607/Python | d6b75519eb8f0d4fef944e1690ba8914d81a5d16 | [
"MIT"
] | null | null | null | ex077a.py | jgabriel1607/Python | d6b75519eb8f0d4fef944e1690ba8914d81a5d16 | [
"MIT"
] | null | null | null | p = ('Curso', 'Video', 'Python')
cont = 0
contl = 0
while True:
while True:
if p[cont][contl] in 'AaEeIiOoUu':
print(p[cont][contl])
contl += 1
if contl >= len(p[cont]):
break
contl = 0
cont += 1
if cont >= len(p):
break
| 19.533333 | 42 | 0.467577 | 39 | 293 | 3.512821 | 0.410256 | 0.109489 | 0.145985 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.027473 | 0.37884 | 293 | 14 | 43 | 20.928571 | 0.725275 | 0 | 0 | 0.428571 | 0 | 0 | 0.088737 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0 | 0 | 0 | 0.071429 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
2bc5c9f0df460e7d769b16ab75ae99c2117d9daa | 3,951 | py | Python | JT_Selenium/captcha.py | Gothingbop/JT_Selenium | 088cf370bca464a546e039786c720142cf056a29 | [
"MIT"
] | null | null | null | JT_Selenium/captcha.py | Gothingbop/JT_Selenium | 088cf370bca464a546e039786c720142cf056a29 | [
"MIT"
] | null | null | null | JT_Selenium/captcha.py | Gothingbop/JT_Selenium | 088cf370bca464a546e039786c720142cf056a29 | [
"MIT"
] | null | null | null | import requests
import time
from urllib.parse import urlparse, parse_qs
from selenium.common.exceptions import NoSuchElementException
def SolveCaptcha(api_key, site_key, url):
"""
Uses the 2Captcha service to solve Captcha's for you.
Captcha's are held in iframes; to solve the captcha, you need a part of the url of the iframe. The iframe is usually
inside a div with id=gRecaptcha. The part of the url we need is the query parameter k, this is called the site_key:
www.google.com/recaptcha/api2/anchor?ar=1&k=6LcleDIUAAAAANqkex-vX88sMHw8FXuJQ3A4JKK9&co=aHR0cHM6Ly93d3cuZGljZS5jb206NDQz&hl=en&v=oqtdXEs9TE9ZUAIhXNz5JBt_&size=normal&cb=rpcg9w84syix
k=6LcleDIUAAAAANqkex-vX88sMHw8FXuJQ3A4JKK9
Here the site_key is 6LcleDIUAAAAANqkex-vX88sMHw8FXuJQ3A4JKK9
You also need to supply the url of the current page you're on.
This function will return a string with the response key from captcha validating the test. This needs to be inserted
into an input field with the id=g-recaptcha-response.
:param api_key: The 2Captcha API key.
:param site_key: The site_key extracted from the Captcha iframe url
:param url: url of the site you're on
:return: The response from captcha validating the test
"""
print("Solving Captcha...")
print("Sending Request...")
request_response = requests.get("https://2captcha.com/in.php?", params={
"googlekey": site_key,
"method": "userrecaptcha",
"pageurl": url,
"key": api_key,
"json": 1,
"invisible": 0,
})
request_response.raise_for_status()
print("Waiting for Response...")
time.sleep(30)
answer_response_json = {'status': 0, 'request': 'CAPCHA_NOT_READY'}
while answer_response_json['request'] == 'CAPCHA_NOT_READY':
answer_response = requests.get("https://2captcha.com/res.php", params={
"key": api_key,
"action": "get",
"id": request_response.json()['request'],
"json": 1
})
answer_response_json = answer_response.json()
print(answer_response_json)
time.sleep(5)
if answer_response_json['status'] == 1:
print("Solved!")
return answer_response_json['request']
elif answer_response_json['request'] == 'ERROR_CAPTCHA_UNSOLVABLE':
raise TimeoutError("ERROR_CAPTCHA_UNSOLVABLE")
else:
raise Exception(answer_response_json['request'])
def FindSolveCaptcha(api_key, driver, form):
"""
Finds and solves Captchas using the 2Captcha service. When this function is done running, the captcha will be solved
(not visibly), and you can submit the form.
Handles errors if there is no captcha present.
Captcha's are generally tied to a form, to solve the captcha, we need the form that it's inside.
The webdriver is also required to finish solving the captcha.
:param api_key: The 2Captcha API key.
:param driver: The webdriver controlling the browser.
:param form: The form that houses the captcha.
:return:
"""
try:
captcha_div = form.find_element_by_id("gRecaptcha")
assert captcha_div.is_displayed(), '102'
captcha_iframe = captcha_div.find_element_by_tag_name("iframe")
assert captcha_iframe.is_displayed(), '102'
captcha_src = urlparse(captcha_iframe.get_attribute("src"))
assert captcha_src, '102'
captcha_query = parse_qs(captcha_src.query)
assert captcha_query, '102'
captcha_sitekey = captcha_query['k'][0]
except NoSuchElementException:
return '102'
except AssertionError as x:
return x.args[0]
else:
solution = SolveCaptcha(api_key, captcha_sitekey, driver.current_url)
captcha_answer = captcha_div.find_element_by_id("g-recaptcha-response")
driver.execute_script(f"arguments[0].innerText = '{solution}'", captcha_answer) | 41.589474 | 185 | 0.68894 | 517 | 3,951 | 5.112186 | 0.352031 | 0.05297 | 0.061294 | 0.037836 | 0.090049 | 0.051457 | 0.024972 | 0.024972 | 0 | 0 | 0 | 0.022698 | 0.219438 | 3,951 | 95 | 186 | 41.589474 | 0.834306 | 0.398127 | 0 | 0.109091 | 0 | 0 | 0.183862 | 0.030864 | 0 | 0 | 0 | 0 | 0.090909 | 1 | 0.036364 | false | 0 | 0.072727 | 0 | 0.163636 | 0.090909 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |