id stringlengths 2 8 | text stringlengths 16 264k | dataset_id stringclasses 1 value |
|---|---|---|
11338452 | # coding=utf-8
"""
Config value that will be cast as a boolean
"""
import tomlkit.container
from elib_config._types import Types
from ._config_value import ConfigValue
from ._exc import ConfigValueTypeError
class ConfigValueBool(ConfigValue):
"""
Config value that will be cast as a boolean
"""
@property
def type_name(self) -> str:
"""
:return: user friendly type for this config value
"""
return Types.boolean
def _cast(self, raw_value) -> bool:
if not isinstance(raw_value, bool):
raise ConfigValueTypeError(
self.path,
f'invalid boolean expression: "{raw_value}"; use either "true" or "false" instead, without the quotes.'
)
return raw_value
# pylint: disable=useless-super-delegation
def __call__(self) -> bool:
return super(ConfigValueBool, self).__call__()
def _toml_add_examples(self, toml_obj: tomlkit.container.Container):
self._toml_comment(toml_obj, '"true" and "false" are the only valid boolean values')
self._toml_comment(toml_obj, 'example = true')
self._toml_comment(toml_obj, 'example = false')
| StarcoderdataPython |
1718709 | <filename>math.py
#!/usr/bin/env python
#
# Simple script that performs arbitrary math operations
#
# Needs PyExpressionEval, install with `pip install py_expression_eval`
#
# Config usage:
# => exec=python
# => params=path/to/math.py
# => name="Math"
#
from __future__ import print_function
from py_expression_eval import Parser
import sys
import os
def serialize(result):
template = "{\"backend\":\"math\",\"version\":\"1.0.0\",\"results\":[]}"
if result == None:
return template
template = "{\"backend\":\"math\",\"version\":\"1.0.0\",\"results\":[{\"name\":\"= %g\",\"icon\":\"accessories-calculator\"}]}"
return (template % result)
def main():
parser = Parser()
while True:
sys.stderr.write('> ')
input = sys.stdin.readline()
if len(input) == 0:
break;
input = input.rstrip(os.linesep)
try:
result = parser.parse(input).evaluate({})
print(serialize(result))
sys.stdout.flush()
except:
print(serialize(None))
sys.stdout.flush()
if __name__ == "__main__": main()
| StarcoderdataPython |
1776983 | <filename>src/asce/__init__.py
# -*- coding: utf-8 -*-
from . import core
__version__ = '0.0.1'
get_path_list = core.get_path_list
get_template_path_list = core.get_template_path_list
get_parameter_path_list = core.get_parameter_path_list
load_template = core.load_template
load_parameter = core.load_parameter
find_paths_file = core.find_paths_file
| StarcoderdataPython |
1886919 | <reponame>xiaolinzi-xl/Algorithm-Interview-Study<filename>09-dynamic-programming/leetcode_300.py
class Solution:
def rebot(self, nums, index):
if index == 0:
return 1
res = 1
for i in range(index-1, -1, -1):
if nums[index] > nums[i]:
res = max(res, self.rebot(nums, i) + 1)
return res
def lengthOfLIS(self, nums):
"""
:type nums: List[int]
:rtype: int
"""
n = len(nums)
if n == 0:
return 0
res = 0
record = []
for i in range(n):
record.append(self.rebot(nums, i))
res = max(res, self.rebot(nums, i))
# print(record)
return res
def lengthOfLIS_1(self, nums):
n = len(nums)
if n == 0:
return 0
res = 1
memo = [1] * n
for i in range(1, n):
for j in range(i-1, -1, -1):
if nums[i] > nums[j]:
memo[i] = max(memo[i], memo[j] + 1)
res = max(res, memo[i])
# print(memo)
return res
if __name__ == "__main__":
nums = [10, 9, 2, 5, 3, 7, 101, 18]
# nums = [4, 10, 4, 3, 8, 9]
print(Solution().lengthOfLIS(nums))
print(Solution().lengthOfLIS_1(nums))
| StarcoderdataPython |
8062138 | # Copyright 2017-2021 EPAM Systems, Inc. (https://www.epam.com/)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import click
import json
import sys
from prettytable import prettytable
from src.api.dts import DTS
from src.model.dts_model import DtsEncoder
class DtsOperationsManager:
_DTS_TABLE_HEADERS = ['ID', 'Name', 'Status']
_PREF_DELIMITER = "="
def __init__(self):
pass
def create(self, url, name, schedulable, prefixes, preferences, json_out):
registry = DTS.create(url, name, schedulable, prefixes, self._convert_preferences_list_to_dict(preferences))
self._print_registry(registry, json_out)
def list(self, registry_id, json_out):
if registry_id:
registry = DTS.load(registry_id)
self._print_registry(registry, json_out)
else:
registries = DTS.load_all()
if json_out:
self._print_dts_json(registries)
elif registries:
self._print_registries_prettytable(registries)
else:
click.echo('No data transfer services are available.')
def delete(self, registry_id, json_out):
registry = DTS.delete(registry_id)
self._print_registry(registry, json_out)
def upsert_preferences(self, registry_id, preferences_list, json_out):
if not preferences_list:
click.echo('Preferences should not be empty!', err=True)
sys.exit(1)
updated_registry = DTS.update_preferences(registry_id, self._convert_preferences_list_to_dict(preferences_list))
self._print_registry(updated_registry, json_out)
def delete_preferences(self, registry_id, preferences_keys, json_out):
if not preferences_keys:
click.echo('Preferences keys to be removed should not be empty!', err=True)
sys.exit(1)
updated_registry = DTS.delete_preferences(registry_id, preferences_keys)
self._print_registry(updated_registry, json_out)
def _convert_preferences_list_to_dict(self, preferences_list):
preferences_dict = {}
for preference_entry in preferences_list:
preference_value_and_key = preference_entry.split(self._PREF_DELIMITER, 1)
if len(preference_value_and_key) != 2:
click.echo('Error [%s]: preference declaration should contain a delimiter!' % preference_entry, err=True)
sys.exit(1)
else:
preferences_dict[preference_value_and_key[0]] = preference_value_and_key[1]
return preferences_dict
def _print_registry(self, registry, json_out):
if json_out:
self._print_dts_json(registry)
else:
self._print_single_registry_pretty(registry)
def _print_dts_json(self, object):
click.echo(json.dumps(object, cls=DtsEncoder))
def _print_single_registry_pretty(self, registry):
registry_info_table = prettytable.PrettyTable()
registry_info_table.field_names = ['key', 'value']
registry_info_table.align = 'l'
registry_info_table.set_style(12)
registry_info_table.header = False
registry_info_table.add_row(['ID:', registry.id])
registry_info_table.add_row(['Name:', registry.name])
registry_info_table.add_row(['URL:', registry.url])
registry_info_table.add_row(['Created:', registry.created_date])
registry_info_table.add_row(['Schedulable:', registry.schedulable])
registry_info_table.add_row(['Status:', registry.status])
registry_info_table.add_row(['Last heartbeat:', registry.heartbeat or 'No heartbeat was received yet'])
click.echo(registry_info_table)
self._print_list_as_table('Prefixes', registry.prefixes)
self._print_list_as_table('Preferences', self.get_flat_preferences(registry))
def get_flat_preferences(self, registry):
flat_preferences = []
for preference, value in registry.preferences.items():
flat_preferences.append(preference + ': ' + value)
return flat_preferences
def _print_list_as_table(self, header_name, elements):
click.echo()
if elements:
self._echo_title('{}:'.format(header_name))
for prefix in elements:
click.echo(prefix)
else:
click.echo('No {} specified.'.format(header_name.lower()))
def _echo_title(self, title, line=True):
click.echo(title)
if line:
for i in title:
click.echo('-', nl=False)
click.echo('')
def _print_registries_prettytable(self, registries):
table = self._init_table()
for registry in registries:
table.add_row(self._convert_registry_to_prettytable_row(registry))
click.echo(table)
def _init_table(self):
table = prettytable.PrettyTable()
table.field_names = self._DTS_TABLE_HEADERS
table.align = "l"
table.header = True
return table
def _convert_registry_to_prettytable_row(self, registry):
return [registry.id, registry.name, registry.status]
| StarcoderdataPython |
1867631 | #!/usr/bin/env python
import logging
import datetime
import sys
import json
import warnings
sys.path.append('../')
warnings.filterwarnings("ignore")
import pandas as pd
from scipy import stats
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.model_selection import RandomizedSearchCV
import lightgbm
from utils.splits import set_group_splits
from args import args
logger = logging.getLogger(__name__)
logger.setLevel(logging.INFO)
sh = logging.StreamHandler()
sh.setFormatter(logging.Formatter('%(levelname)s:%(name)s: %(message)s'))
logger.addHandler(sh)
if __name__ == '__main__':
if len(sys.argv) != 2:
logger.error(f"Usage: {sys.argv[0]} task_name (ia|ps)")
sys.exit(1)
task = sys.argv[1]
if task not in ['ia', 'ps']:
logger.error("Task values are either ia (imminent admission) or ps (prolonged stay)")
sys.exit(1)
ori_df = pd.read_csv(args.dataset_csv, usecols=args.cols, parse_dates=args.dates)
if task == 'ia':
logger.info(f"Running hyperparameter search for Imminent Admission Prediction task")
task_df = ori_df.loc[(ori_df['imminent_adm_label'] != -1)][args.imminent_adm_cols].reset_index(drop=True)
label = 'imminent_adm_label'
if task == 'ps':
logger.info(f"Running hyperparameter search for Prolonged Stay Prediction task ")
task_df = ori_df.loc[(ori_df['chartinterval'] != 0)][args.prolonged_stay_cols].reset_index(drop=True)
label = 'prolonged_stay_label'
df = set_group_splits(task_df.copy(), group_col='hadm_id', seed=643)
vectorizer = TfidfVectorizer(min_df=args.min_freq, analyzer=str.split, sublinear_tf=True, ngram_range=(2,2))
x_train = vectorizer.fit_transform(df.loc[(df['split'] == 'train')]['processed_note'])
y_train = df.loc[(df['split'] == 'train')][label].to_numpy()
clf_params = {
'objective': 'binary',
'metric': 'binary_logloss',
'is_unbalance': True,
}
clf = lightgbm.LGBMClassifier(**clf_params)
param_space = {
'num_leaves': stats.randint(30, 60),
'bagging_fraction': stats.uniform(0.2, 0.7),
'learning_rate': stats.uniform(0.1, 0.9),
'min_data_in_leaf': stats.randint(2, 20),
'max_bin': stats.randint(3, 20),
'boosting': ['gbdt', 'dart'],
'bagging_freq': stats.randint(3, 31),
'max_depth': stats.randint(0, 11),
'feature_fraction': stats.uniform(0.2, 0.7),
'lambda_l1': stats.uniform(0, 10),
'num_iterations': stats.randint(100, 200),
}
random_search = RandomizedSearchCV(clf, param_space, n_iter=200, cv=5, iid=False, verbose=1, n_jobs=32)
logger.info("Starting random search...")
t1 = datetime.datetime.now()
random_search.fit(x_train, y_train)
dt = datetime.datetime.now() - t1
params_file = args.workdir/f'{task}_best_params.json'
logger.info(f"Random search completed. Took {dt.days} days, {dt.seconds//3600} hours, and {(dt.seconds//60)%60} minutes. Writing best params to {params_file}")
json.dump(random_search.best_params_, params_file.open('w'))
| StarcoderdataPython |
8194492 | <gh_stars>0
# @l2g 1881 python3
# [1881] Maximum Value after Insertion
# Difficulty: Medium
# https://leetcode.com/problems/maximum-value-after-insertion
#
# You are given a very large integer n,represented as a string, and an integer digit x.
# The digits in n and the digit x are in the inclusive range [1,9],
# and n may represent a negative number.
# You want to maximize n's numerical value by inserting x anywhere in the decimal representation of n.
# You cannot insert x to the left of the negative sign.
#
# For example, if n = 73 and x = 6, it would be best to insert it between 7 and 3, making n = 763.
# If n = -55 and x = 2, it would be best to insert it before the first 5, making n = -255.
#
# Return a string representing the maximum value of n after the insertion.
#
# Example 1:
#
# Input: n = "99", x = 9
# Output: "999"
# Explanation: The result is the same regardless of where you insert 9.
#
# Example 2:
#
# Input: n = "-13", x = 2
# Output: "-123"
# Explanation: You can make n one of {-213, -123, -132}, and the largest of those three is -123.
#
#
# Constraints:
#
# 1 <= n.length <= 10^5
# 1 <= x <= 9
# The digits in n are in the range [1, 9].
# n is a valid representation of an integer.
# In the case of a negative n, it will begin with '-'.
#
#
class Solution:
def maxValue(self, n: str, x: int) -> str:
sign = n[0] != "-"
def get_num(sign):
condition = lambda alp: x <= alp if sign else x >= alp
for i, alp in enumerate(n):
if condition(int(alp)):
pass
else:
return n[:i] + str(x) + n[i:]
return n + str(x)
if sign:
return get_num(sign)
else:
n = n[1:]
return "-" + get_num(sign)
if __name__ == "__main__":
import os
import pytest
pytest.main([os.path.join("tests", "test_1881.py")])
| StarcoderdataPython |
8055628 | <filename>rosalind/ini4/ini4.py
import sys
if __name__=="__main__":
a, b = sys.stdin.readline().strip().split()
a, b = int(a), int(b)
if a % 2 == 0: a += 1
print(sum(range(a, b+1, 2)))
| StarcoderdataPython |
11299183 | import datetime
from django.db import models
from employees.models import Employee
# Create your models here.
class Task(models.Model):
state = [
("pending", "pending"), ("assigned", "assigned"),
("incomplete", "incomplete"), ("complete", "complete")
]
task_name = models.CharField(max_length=300, verbose_name="task name")
assigned_by = models.ForeignKey(
Employee, on_delete=models.CASCADE,
verbose_name="assigned by"
)
assigned_to = models.ForeignKey(
Employee, on_delete=models.CASCADE,
verbose_name="assigned to"
)
date_assigned = models.DateTimeField(auto_now_add=True)
date_updated = models.DateTimeField(auto_now=True)
date_completed = models.CharField(max_length=300)
status = models.CharField(choices=state, max_length=20, default="pending")
def get_date_completed(self):
if self.status == "complete":
self.date_completed = datetime.day.today()
def save(self, *args, **kwargs):
self.get_date_completed()
super(Task, self).save(*args, **kwargs)
| StarcoderdataPython |
6458347 | <gh_stars>1-10
from behave import *
from selenium import webdriver
from selenium.webdriver.common.keys import Keys
import time
@given(u'we have navigated to {url}')
def step_impl(context, url):
browser = webdriver.Chrome()
browser.get(url)
context.browser = browser
time.sleep(5)
@when(u'we change {settings} to "{soundOff}"')
def step_impl(context, day, soundOff):
search_element = context.browser.find_element_tag_name('Settings').click()
search_element = context.browser.find_element_by_type('checkbox').click()
search_element = context.browser.find_element_by_id('submitButton').click()
context.task_name = soundOff
@then(u'there should be no sound')
def step_impl(context):
count = 0
spans = list(context.browser.find_elements_by_tag_name('includeMusic'))
for span in spans:
if context.task_name in span.text:
count = count + 1
assert count > 0
| StarcoderdataPython |
9712174 | <gh_stars>0
'''
@File : iplocation.py
@Time : 2020/12/14 19:55:19
@Author : <NAME>
@Contact : <EMAIL>
@License : Copyright (c) 2020 <NAME>
'''
import os
import json
import time
import datetime
import urllib.request
import logging as log
from IPy import IP
import IP2Location
from urllib.error import HTTPError
# Maintain a JSON table, and when there is a query task,
# first query the local JSON table whether there is a cache.
# If the local record is too long or there is no record,
# the network API is used to query the network home.
PRIVATE_RANGE = 'PRIVATE_RANGE'
cache = {}
ipLocationCache = "data/url/ipLocationCache.json"
# updateThreshold: unit is day day day!
updateThreshold = 7
def getLocationByBin(ip: str, binPath = "modules/api/IP2LOCATION.BIN"):
if IP(ip).iptype() != 'PUBLIC':
return PRIVATE_RANGE
ip2locObj = IP2Location.IP2Location()
ip2locObj.open(binPath)
res = ip2locObj.get_all(ip)
return res.region
# return type(res.region)
def getCitysByIPs(IPs: list)->list:
if len(cache) == 0 and os.path.exists(ipLocationCache):
getAll()
citys = set()
for ip in IPs:
if IP(ip).iptype() != 'PUBLIC':
city = PRIVATE_RANGE
elif ip in cache:
interval = datetime.datetime.today() - datetime.datetime.strptime(cache[ip]['lastUpdate'],"%Y-%m-%d")
if interval.days >= updateThreshold:
city = getLocationFromNet(ip)
else:
city = cache[ip]['city']
else:
city = getLocationFromNet(ip)
log.debug("ip: city %s" % city)
citys.add(city)
return citys
def getLocationFromNet(ip: str)->str:
"""
get city by api [ taobao, pconline, sohu ]
"""
# try ip-api api, decode utf-8
city = ipapiAPI(ip)
if city != "ERROR":
putValue(ip, {'city': city, 'lastUpdate': datetime.datetime.today().strftime("%Y-%m-%d")})
return city
# try pconline api
city = pconlineAPI(ip)
if city != "ERROR":
putValue(ip, {'city': city, 'lastUpdate': datetime.datetime.today().strftime("%Y-%m-%d")})
return city
return "ERROR"
############################### API START
def ipapiAPI(ip: str) -> str:
try:
r = urllib.request.urlopen("http://ip-api.com/json/{0}?lang=zh-CN".format(ip))
content = r.read()
content = content.decode('utf-8')
rj = json.loads(content)
if rj['status'] == 'success':
return '-'.join([
rj['country'],
rj['regionName'],
rj['city']
])
else:
log.error(rj)
return 'ERROR'
except HTTPError as he:
if he.code == 429:
time.sleep(5)
log.error(he.__str__() + ' wait 5 seconds try again')
return ipapiAPI(ip)
else:
return "ERROR"
return "ERROR"
# try:
# r = urllib.request.urlopen("http://ip-api.com/json/{0}?lang=zh-CN".format(ip))
# content = r.read()
# content = content.decode('utf-8')
# rj = json.loads(content)
# if rj['message'] == 'private range':
# city = PRIVATE_RANGE
# else:
# city = rj['regionName'] + rj['city']
# return city
# except Exception as e:
# log.debug(e)
# log.debug(content)
# return "ERROR"
def pconlineAPI(ip: str) -> str:
# TODO: undo
# r = urllib.request.urlopen("")
return ""
############################### API END
def getAll():
"""
init
"""
with open(ipLocationCache, "r", encoding="utf-8") as target:
global cache
cache = json.load(target)
def putValue(key, value):
"""
Add or update value of config json.
"""
global cache
cache[key] = value
with open(ipLocationCache, "w", encoding="utf-8") as target:
target.write(json.dumps(cache, ensure_ascii=False, indent=2))
def getHeaders(header_raw):
"""
Gets the request header dictionary from the native request header
:param header_raw: {str} headers
:return: {dict} headers
"""
return dict(line.split(": ", 1) for line in header_raw.split("\n") if line != '')
if __name__ == "__main__":
# citys = getCitysByIPs(["172.16.17.32, 192.168.3.11"])
# print(citys)
# print(getLocationFromNet("172.16.31.10"))
# citys = getCitysByIPs([
# "192.168.3.11",
# "172.16.17.32",
# "172.16.17.32",
# '172.16.31.10'
# ])
# print(citys)
# ip = IP("172.16.31.10")
# ip = IP("127.0.0.1")
# print(ip.iptype())
# ipcnAPI('172.16.17.32')
# print(get_headers(rawHeaders))
# print(ipcnAPI('172.16.17.32'))
print(getLocationByBin('159.226.99.36')) | StarcoderdataPython |
6464417 | <gh_stars>1-10
import sys
import argparse
import signal
import re
import json
from .checks import detectandcheck, allchecks
from .checks import checkrsa, checkcrt, checksshpubkey
from .allkeys import urllookup, loadextrabl
from .scanssh import scanssh
from .scantls import scantls
from .update import update_bl
MAXINPUTSIZE = 10000
count = 0
PRECRT = "-----BEGIN CERTIFICATE-----\n"
POSTCRT = "\n-----END CERTIFICATE-----\n"
def _sighandler(_signum, _handler):
print(f"{count} keys processed", file=sys.stderr)
def _printresults(key, where, args):
if args.json:
print(json.dumps(key))
return
kn = key["type"]
if "bits" in key:
kn += f"[{key['bits']}]"
if key["type"] == "unsupported":
print(f"Warning: Unsupported key type, {where}", file=sys.stderr)
elif key["type"] == "unparseable":
print(f"Warning: Unparseable input, {where}", file=sys.stderr)
elif key["type"] == "notfound":
print(f"Warning: No key found, {where}", file=sys.stderr)
elif args.verbose or args.all:
if key["results"] == {}:
print(f"{kn} key ok, {where}")
for check, result in key["results"].items():
sub = ""
if "subtest" in result:
sub = f"/{result['subtest']}"
print(f"{check}{sub} vulnerability, {kn}, {where}")
if args.url and "lookup" in result:
url = urllookup(result["blid"], result["lookup"])
if url:
print(url)
if args.verbose and "debug" in result:
print(result["debug"])
if args.verbose and "p" in result:
print(f"RSA p {result['p']:02x}")
if args.verbose and "q" in result:
print(f"RSA q {result['q']:02x}")
def runcli():
global count
signal.signal(signal.SIGHUP, _sighandler)
ap = argparse.ArgumentParser()
ap.add_argument(
"infiles", nargs="*", help="Input file (certificate, csr or public key)"
)
ap.add_argument(
"-c", "--checks", help="Comma-separated list of checks (default: all)"
)
ap.add_argument("--list", action="store_true", help="Show list of possible checks")
ap.add_argument(
"-m",
"--moduli",
action="store_true",
help="Input file is list of RSA hex moduli",
)
ap.add_argument(
"--crt-lines", action="store_true", help="Input file is list of base64 certs"
)
ap.add_argument(
"--ssh-lines", action="store_true", help="Input file is list of ssh public keys"
)
ap.add_argument("-a", "--all", action="store_true", help="Show all keys")
ap.add_argument("-v", "--verbose", action="store_true", help="Verbose output")
ap.add_argument("-j", "--json", action="store_true", help="JSON output")
ap.add_argument(
"-u", "--url", action="store_true", help="Show private key URL if possible"
)
ap.add_argument("--update-bl", action="store_true", help="Update blocklist")
ap.add_argument(
"--update-bl-and-urls",
action="store_true",
help="Update blocklist and optional URL lookup list",
)
ap.add_argument("--extrabl", help="comma-separated list of extra blocklist files")
ap.add_argument(
"-t",
"--tls",
action="store_true",
help="Scan TLS (pass hostnames or IPs instead of files)",
)
# default ports for https, smtps, imaps, pop3s, ldaps, ftps
# and 8443 as a common non-default https port
ap.add_argument(
"--tls-ports",
default="443,465,636,990,993,995,8443",
help="TLS ports (comma-separated)",
)
ap.add_argument(
"-s",
"--ssh",
action="store_true",
help="Scan SSH (pass hostnames or IPs instead of files)",
)
ap.add_argument("--ssh-ports", default="22", help="SSH ports (comma-separated)")
args = ap.parse_args()
if (
(args.moduli and args.crt_lines)
or (args.moduli and args.ssh_lines)
or (args.ssh_lines and args.crt_lines)
):
sys.exit("Multiple input format parameters cannot be combined.")
if (args.moduli or args.crt_lines or args.ssh_lines) and (args.tls or args.ssh):
sys.exit("Scan modes and input file modes cannot be combined.")
if args.update_bl_and_urls:
update_bl(lookup=True)
sys.exit()
if args.update_bl:
update_bl()
sys.exit()
if args.list:
for k, v in allchecks.items():
print(f"{k}/{v['type']} keys: {v['desc']}")
sys.exit()
if not args.infiles:
ap.print_help()
sys.exit()
if args.extrabl:
extrabl = args.extrabl.split(",")
for bl in extrabl:
loadextrabl(bl)
if args.checks:
userchecks = args.checks.split(",")
for c in userchecks:
if c not in allchecks:
sys.exit(f"{c} is not a valid check")
else:
userchecks = allchecks.keys()
if args.tls:
ports = [int(p) for p in args.tls_ports.split(",")]
for host in args.infiles:
for port in ports:
keys = scantls(host, port, userchecks)
for k in keys:
_printresults(k, f"tls:{host}:{port}", args)
if args.ssh:
ports = [int(p) for p in args.ssh_ports.split(",")]
for host in args.infiles:
for port in ports:
keys = scanssh(host, port)
for k in keys:
_printresults(k, f"ssh:{host}:{port}", args)
if args.ssh or args.tls:
sys.exit(1)
for fn in args.infiles:
if fn == "-":
f = sys.stdin
else:
f = open(fn, errors="replace")
if args.moduli:
for line in f:
count += 1
if line.startswith("Modulus="):
line = line[8:]
n = int(line, 16)
r = {"type": "rsa", "bits": n.bit_length()}
r["results"] = checkrsa(n, checks=userchecks)
_printresults(r, f"modulus {n:02x}", args)
elif args.crt_lines:
lno = 0
for line in f:
desc = f"{fn}[{lno}]"
ll = re.split("[,; ]", line.rstrip(), maxsplit=1)
if len(ll) == 2:
desc += f" {ll[1]}"
crt = PRECRT + ll[0] + POSTCRT
r = checkcrt(crt, checks=userchecks)
_printresults(r, desc, args)
lno += 1
count += 1
elif args.ssh_lines:
lno = 0
for line in f:
desc = f"{fn}[{lno}]"
ll = line.rstrip().split(" ", 2)
if len(ll) == 3:
desc += f" {ll[2]}"
r = checksshpubkey(line, checks=userchecks)
_printresults(r, desc, args)
lno += 1
count += 1
else:
fcontent = f.read(MAXINPUTSIZE)
r = detectandcheck(fcontent, checks=userchecks)
_printresults(r, fn, args)
if fn != "-":
f.close()
| StarcoderdataPython |
6441681 | import turtle
#note: the horizontal and vertical distance between the sides is constant
#hence (distance + 2)
def spiral(turtle, counter, sides, distance, angle):
if counter == sides: return
else:
turtle.forward(distance)
turtle.right(angle)
spiral(turtle, counter + 1, sides, distance + 2 , angle)
def main():
wn = turtle.Screen()
tess = turtle.Turtle()
tess.color('blue')
tess.right(90)
tess.forward(1)
distance = 1
sides = 50
spiral(tess, 0, sides, distance, 90)
tess.penup()
tess.goto(sides * 3 ,0)
tess.pendown()
tess.left(180)
spiral(tess, 0, sides, distance, 91)
#spiralSkewed(tess, 1, upperBound, distance, 90)
wn.exitonclick()
main()
| StarcoderdataPython |
12856096 | <reponame>lycantropos/rsrc_web
from .literals import booleans
from .models import (readable_web_streams,
web_streams,
writeable_web_streams)
from .paths import web_url_strings
| StarcoderdataPython |
8199122 | <reponame>ju-sh/colorviews
"""
colorviews module
"""
__all__ = ["Color", "AlphaColor", "scale"]
__version__ = "0.1-alpha1"
__author__ = "<NAME>"
from colorviews.colors import Color, AlphaColor
from colorviews.utils import scale
| StarcoderdataPython |
126353 | <reponame>michdr/satzify
WHAT_IS_SATZIFY = (
"Satzify is a simple tool to help with analysing sentences in a given language. "
"It helps with visualising and highlighting the different parts "
"of a sentence according to the selected categories and parts to annotate. "
"Initially and predominantly it has been created to be used for German, and that's where the name is derived from. "
)
LANGUAGE = "🇩🇪"
SPACY_MODEL = "de_core_news_sm"
EXAMPLE_TEXT = "Besser ein Spatz in der Hand, als eine Taube auf dem Dach."
POS = dict(
NOUN=dict(name="Noun", color="#afa"),
PRON=dict(name="Pronoun", color="#fea"),
VERB=dict(name="Verb", color="#8ef"),
ADJ=dict(name="Adjective", color="#faa"),
ADV=dict(name="Adverb", color="#d94"),
ADP=dict(name="Adposition", color="#ccc"),
)
CASES = dict(
NOM=dict(name="Nominativ", color="#afa"),
ACC=dict(name="Akkusativ", color="#fea"),
DAT=dict(name="Dativ", color="#8ef"),
GEN=dict(name="Genetiv", color="#faa"),
)
ANNOTATIONS = dict(POS=POS, CASES=CASES)
| StarcoderdataPython |
12828565 | <reponame>1024sparrow/traliva
#!/usr/bin/env python3
import sys, re
def get_map(pin_js_paths, pin_css_paths, pout_js, pout_css, pout_js_css):
for i_src in [(pin_js_paths, pout_js), (pin_css_paths, pout_css)]:
for i in i_src[0]:
with open(i) as f:
cand = {
'filepath': i,
'text': _get_text_as_array(f.readlines(), True, True)
}
i_src[1].append(cand)
pout_js_css.append(cand)
print('get_map()')
#print('pout_js_css: ', pout_js_css)##
def apply_map(p_js, p_css, p_js_css):
print('apply_map()')
for i in p_js_css:
#print('#%s:' % i['filepath'])
if i['filepath'] is None:
continue
cand = ''
for i_text in i['text']:
cand += i_text['text']
#print(cand)
f = open(i['filepath'], 'w')
f.write(cand)
f.close()
#def process_code_fragment(p_code):
# retval = '>>>>' + p_code + '<<<<'
# #retval = 'XXXX'
# return retval
# p_text - массив отдельных строк
# Должен вернуть массив фрагментов с указанием их типов (0 - комментарий, 1 - код, 2 - содержимое строки)
# [
# {
# type: 1,
# text: 'do_some();\nconsole.log(\''
# },
# {
# type: 2,
# text: 'hello world'
# },
# {
# type: 1,
# text: '\');'
# },
# {
# type: 0,
# text: '//некий комментарий'
# },
# ]
re_one_line_comment = re.compile(r'//.*', re.DOTALL)
def _get_text_as_array(p_text, pp_comment, pp_newlines):
global __type
global __buffer
___type = None
__buffer = ''
retval = []
if not pp_newlines:
pp_comment = False
use_strict_used = False
a = ''
usestrict_pos = None
for line in p_text:
stripline = line.strip()
if not use_strict_used:
if stripline.startswith("'use strict'") or stripline.startswith('"use strict"'):
usestrict_pos = len(a)
a += '#' # любой символ. В результат он не попадёт.
use_strict_used = True
continue
if pp_comment:
a += line
else:
if not pp_newlines:
line_cand = line.strip()
a += re.sub(re_one_line_comment, '', line_cand)
#b = ''
in_comment_1 = False # // ...
in_comment_2 = False # /* ... */
in_comment = False
in_string_1 = False # '
in_string_2 = False # "
in_string_3 = False # ``
string_type = 0 # for in_string_3
#string_content = [] # for in_string_3
#string_state = 0 # for in_string_3
#string_indent = 0 # for in_string_3
"""
`` - тупое экранирование. Сохраняются переносы строки и все символы между '`'
`
asd
` --> '\n\t\tasd\n\t'
1`` - как ``, но дополнительно обрезаются первая и последняя строки
1`
asd
` --> '\t\tasd'
2`` - как 1``, но дополнительно убираются отступы. Вычисляется наибольший общий отступ, и он отрезается. Отступы работают только с пробелами - символ табуляции не считается за отступ.
var a = 2`
var a =
5;
`; --> var a ='var a =\n\t5;';
3`` - убираются крайние пробельные символы и все переносы строки. Если последний символ в строке отличен от '>' и первый символ следующей строки отличен от '<', то в результат вставляется пробел. Первая и последняя строки не обрезаются (так, если что..).
var a = 3`
<table>
<tr>
</tr>
<tr>
</tr>
</table>
` --> var a = '<table><tr></tr><tr></tr></table>'
"""
in_string = False
prev_char = 's' # nor '\\' or '/' or '*'
code_cand = ''
counter = 0
for i in a:
if not (counter is None):
if counter == usestrict_pos:
t = __buffer + code_cand
if __buffer:
retval.append({
'type': __type,
'text': __buffer
})
__buffer = ''
if code_cand:
retval.append({
'type': 1,
'text': code_cand
})
code_cand = ''
retval.append({
'type': 1,
'text': "\n'use strict';\n"
})
__type = 1
counter += 1
continue
counter += 1
skip_current = False
if (not in_comment) and (not in_string) and prev_char == '/' and i == '/':
if len(code_cand) > 0:
code_cand = code_cand[:-1]
#b += process_code_fragment(code_cand) + '/'
_accumulate_array_by_symbols(1, code_cand, retval)
_accumulate_array_by_symbols(0, '/', retval)
code_cand = ''
in_comment_1 = True
in_comment = True
elif in_comment_1 and i == '\n':
if not in_comment_2:
in_comment_1 = False
in_comment = False
elif prev_char == '/' and i == '*':
if not in_comment_1:
if len(code_cand) > 0:
code_cand = code_cand[:-1]
#b += process_code_fragment(code_cand) + '/'
_accumulate_array_by_symbols(1, code_cand, retval)
code_cand = ''
in_comment_2 = True
in_comment = True
if pp_comment:
_accumulate_array_by_symbols(0, '/', retval)
#if not pp_comment:
# b = b[:-1] # удаляем предыдущий символ ('/')
elif prev_char == '*' and i == '/':
if not in_comment_1:
in_comment_2 = False
in_comment = False
skip_current = True
elif prev_char == '\\' and i == '\\':
prev_char = 's'
#b += i
_accumulate_array_by_symbols(__type, i, retval)
continue
elif prev_char != '\\' and i == '"':
if not in_comment and not in_string_1 and not in_string_3:
if in_string:
if in_string_2:
in_string_2 = False
else:
in_string_1 = False
in_string_3 = False
in_string = False
else:
#b += process_code_fragment(code_cand + '"')
skip_current = True
_accumulate_array_by_symbols(1, code_cand + '"', retval)
skip_current = True
code_cand = ''
in_string_2 = True
in_string = True
elif prev_char != '\\' and i == "'":
if not in_comment and not in_string_2 and not in_string_3:
if in_string:
if in_string_1:
in_string_1 = False
else:
in_string_2 = False
in_string_3 = False
in_string = False
else:
#b += process_code_fragment(code_cand + "'")
skip_current = True
_accumulate_array_by_symbols(1, code_cand + "'", retval)
skip_current = True
code_cand = ''
in_string_1 = True
in_string = True
elif prev_char != '\\' and i == "`":
if not in_comment and not in_string_1 and not in_string_2:
if in_string:
#skip_current = True
if in_string_3:
#in_string_3 = False
if string_type == 0 or string_type == 3:
tmp = string_content
else:
tmp = string_content[1:-1] # обрезаем первую и последнюю строки
if string_type == 2:
indent = 10000
for ca in tmp:
cand = 0
for ca_i in ca:
if ca_i == ' ':
cand += 1
else:
break
if cand < indent:
indent = cand
if string_type == 3:
prev = 'q' # any letter symbol
tmp_between_parath = False
for ca in [tmp2.strip() for tmp2 in tmp]:
if len(ca) and len(prev) and prev[-1] != '>' and ca[0] != '<':
_accumulate_array_by_symbols(2, ' ', retval)
tmp_between_parath = False
else:
tmp_between_parath = True
cand = ca
if tmp_between_parath:
while len(cand) and cand[0] == ' ':
cand = cand[1:]
_accumulate_array_by_symbols(2, ca, retval)
prev = ca
else:
for ca in tmp:
if string_type == 2:
cand = ca[indent:]
else:
cand = ca
_accumulate_array_by_symbols(2, cand, retval)
else:
in_string_1 = False
in_string_2 = False
_accumulate_array_by_symbols(1, code_cand + "'", retval)
in_string = False
else:
skip_current = True
#print('::',prev_char,'::::::::::', code_cand)
in_string_3 = True
in_string = True
string_type = 0
string_content = ['']
string_state = 0
string_indent = 0
if prev_char == '1':
string_type = 1
code_cand = code_cand[:-1]
elif prev_char == '2':
string_type = 2
code_cand = code_cand[:-1]
elif prev_char == '3':
string_type = 3
code_cand = code_cand[:-1]
_accumulate_array_by_symbols(1, code_cand + "'", retval)
code_cand = ''
if (not in_comment) and (not skip_current):
if in_string:
if in_string_3:
if i == '\n':
string_content.append('')
else:
ca = i
if i == "'":
ca = '\\\''
string_content[-1] += ca
else:
#b += i
_accumulate_array_by_symbols(2, i, retval)
else:
if in_string_3:
#_accumulate_array_by_symbols(1, "'", retval)
#code_cand += "'"
in_string_3 = False
else:
code_cand += i
else: # комментарии /* ... */
if not in_string:
if pp_comment:
#b += i
_accumulate_array_by_symbols(0, i, retval)
prev_char = i
prev_instring = in_string
#b += process_code_fragment(code_cand)
_accumulate_array_by_symbols(1, code_cand, retval)
_stop_accumulating_array_by_symbols(retval)
return retval
__buffer = ''
__type = None
def _accumulate_array_by_symbols(pin_type, pin_fragment, pout_target):
global __buffer
global __type
if len(pin_fragment) > 0:
if pin_type == __type:
__buffer += pin_fragment
else:
if __buffer:
pout_target.append({
'type': __type,
'text': __buffer
})
__type = pin_type
__buffer = pin_fragment
def _stop_accumulating_array_by_symbols(pout_target):
global __buffer
global __type
if __buffer:
pout_target.append({
'type': __type,
'text': __buffer
})
__buffer = ''
__type = None
| StarcoderdataPython |
3518179 | <reponame>arnaudsm/brep
import os
import pathlib
import gzip
class File():
def __init__(self, file) -> None:
if isinstance(file, str):
self.path = pathlib.Path(file)
elif isinstance(file, pathlib.Path):
self.path = file
else:
raise Exception("Please prove a str of pathlib.Path file.")
self.gzip = self.path.suffix == ".gz"
if self.gzip:
self.file = gzip.open(self.path, 'rb')
else:
self.file = open(file, "r")
def seek(self, position):
return self.file.seek(position)
def tell(self):
return self.file.tell()
def readline(self):
line = self.file.readline()
if self.gzip:
line = line.decode()
return line
def __iter__(self):
for line in self.file:
if self.gzip:
line = line.decode()
yield line
def __exit__(self):
self.file.close()
def __len__(self):
if self.gzip:
pipe_in = os.popen(f'gzip -l "{self.path}"')
return int(pipe_in.readlines()[1].split()[1])
else:
return os.path.getsize(self.path)
class Search():
def __init__(self, prefix, filepath) -> None:
self.file = File(filepath)
self.prefix = prefix
def __iter__(self):
left, right = 0, len(self.file) - 1
while left <= right:
mid = int((left + right) / 2)
self.file.seek(mid)
self.file.readline()
value = self.file.readline().strip()
if self.prefix <= value or value.startswith(self.prefix):
right = mid - 1
else:
left = mid + 1
self.file.seek(left)
self.file.readline()
for line in self.file:
if not line.startswith(self.prefix):
break
yield line.strip()
| StarcoderdataPython |
3363667 | <reponame>Lauszus/socketsocketcan
from socketsocketcan import TCPBus
import can
from datetime import datetime
from time import sleep
bus = TCPBus(5000)
print("socket connected!")
#create a listener to print all received messages
listener = can.Printer()
notifier = can.Notifier(bus,(listener,),timeout=None)
try:
msg = can.Message(
is_extended_id=False,
dlc=6)
count = 0
while bus.is_connected:
msg.arbitration_id = count
msg.data = (count).to_bytes(6,"little")
bus.send(msg)
msg.timestamp = datetime.now().timestamp() #just needed for printing
print(msg) #print sent message
count+=1
sleep(0.5)
print("socket disconnected.")
except KeyboardInterrupt:
print("ctrl+c, exiting...")
notifier.stop()
if bus.is_connected:
bus.shutdown()
| StarcoderdataPython |
11326862 | <gh_stars>1000+
import pygments
class SimplifiedHTTPLexer(pygments.lexer.RegexLexer):
"""Simplified HTTP lexer for Pygments.
It only operates on headers and provides a stronger contrast between
their names and values than the original one bundled with Pygments
(:class:`pygments.lexers.text import HttpLexer`), especially when
Solarized color scheme is used.
"""
name = 'HTTP'
aliases = ['http']
filenames = ['*.http']
tokens = {
'root': [
# Request-Line
(r'([A-Z]+)( +)([^ ]+)( +)(HTTP)(/)(\d+\.\d+)',
pygments.lexer.bygroups(
pygments.token.Name.Function,
pygments.token.Text,
pygments.token.Name.Namespace,
pygments.token.Text,
pygments.token.Keyword.Reserved,
pygments.token.Operator,
pygments.token.Number
)),
# Response Status-Line
(r'(HTTP)(/)(\d+\.\d+)( +)(\d{3})( +)(.+)',
pygments.lexer.bygroups(
pygments.token.Keyword.Reserved, # 'HTTP'
pygments.token.Operator, # '/'
pygments.token.Number, # Version
pygments.token.Text,
pygments.token.Number, # Status code
pygments.token.Text,
pygments.token.Name.Exception, # Reason
)),
# Header
(r'(.*?)( *)(:)( *)(.+)', pygments.lexer.bygroups(
pygments.token.Name.Attribute, # Name
pygments.token.Text,
pygments.token.Operator, # Colon
pygments.token.Text,
pygments.token.String # Value
))
]
}
| StarcoderdataPython |
3511275 | <reponame>techshot25/gpytorch<filename>gpytorch/models/pyro_variational_gp.py
#!/usr/bin/env python3
import torch
import pyro
from .abstract_variational_gp import AbstractVariationalGP
class PyroVariationalGP(AbstractVariationalGP):
def __init__(self, variational_strategy, likelihood, num_data, name_prefix=""):
super(PyroVariationalGP, self).__init__(variational_strategy)
self.name_prefix = name_prefix
self.likelihood = likelihood
self.num_data = num_data
def guide(self, input, output, *params, **kwargs):
inducing_dist = self.variational_strategy.variational_distribution.variational_distribution
# Draw samples from q(u) for KL divergence computation
self.sample_inducing_values(inducing_dist)
self.likelihood.guide(*params, **kwargs)
def model(self, input, output, *params, **kwargs):
pyro.module(self.name_prefix + ".gp_prior", self)
# Get the variational distribution for the function
function_dist = self(input)
# Draw samples from p(u) for KL divergence computation
prior_dist = self.variational_strategy.prior_distribution
inducing_values_samples = self.sample_inducing_values(prior_dist)
sample_shape = inducing_values_samples.shape[:-len(prior_dist.shape())] + \
torch.Size([1] * len(prior_dist.batch_shape))
# Go from function -> output
num_minibatch = function_dist.batch_shape[-1]
with pyro.poutine.scale(scale=float(self.num_data / num_minibatch)):
return self.likelihood.pyro_sample_output(
output, function_dist, *params, **kwargs, sample_shape=sample_shape
)
def sample_inducing_values(self, inducing_values_dist):
"""
Sample values from the inducing point distribution `p(u)` or `q(u)`.
This should only be re-defined to note any conditional independences in
the `inducing_values_dist` distribution. (By default, all batch dimensions
are not marked as conditionally indendent.)
"""
reinterpreted_batch_ndims = len(inducing_values_dist.batch_shape)
samples = pyro.sample(
self.name_prefix + ".inducing_values",
inducing_values_dist.to_event(reinterpreted_batch_ndims)
)
return samples
def transform_function_dist(self, function_dist):
"""
Transform the function_dist from `gpytorch.distributions.MultivariateNormal` into
some other variant of a Normal distribution.
This is useful for marking conditional independencies (useful for inference),
marking that the distribution contains multiple outputs, etc.
By default, this funciton transforms a multivariate normal into a set of conditionally
independent normals when performing inference, and keeps the distribution multivariate
normal for predictions.
"""
if self.training:
return pyro.distributions.Normal(function_dist.mean, function_dist.variance)
else:
return function_dist
def __call__(self, input, *args, **kwargs):
function_dist = super().__call__(input, *args, **kwargs)
# Now make the variational distribution Normal - for conditional indepdence
function_dist = self.transform_function_dist(function_dist)
res = function_dist
return res
| StarcoderdataPython |
351639 | # -*- coding: utf-8 -*-
"""
Created on Wed May 25 15:37:43 2016
@author: mtkessel
"""
import queue as Q
import serial
import threading
import time
import weakref
class LLAP(serial.Serial):
"""Lightweight Local Automation Protocol (LLAP) defines a small device
protocol that balances simplicity and human readability. The
class instance provides either client (commander) or server
(device) context associated with a specific serial port.
The object maintains the synchronization and strips the
protocol syntax from the command/response strings.
A single instance may be used to communicate with multiple devices
or multiple instance may be defined; each instance of LLAP maintains
an independent Thread and Queue
"""
def __init__(self,
deviceId = '--',
*args,
**kwargs# whatever args serial wants, serial gets but don't list them here in case base class changes (not our problem!)
):
"""Initialize comm port object. If a port is given, then the port will be
opened immediately. Otherwise a Serial port object in closed state
is returned."""
print(*args)
self.__running = False
self.__deviceName = ''
self.__messageReceived = False
self.__deviceId = deviceId[:2]
self.__thread = []
self.__queue = Q.Queue(1000)
serial.Serial.__init__(self,*args,**kwargs)
if (self.timeout == None):
self.timeout = 1.0
def start(self):
"""Start the underlying thread that reads and queues messages
on this LLAP port
"""
# NOTE: A weakref.proxy(self) still allows the thread to start correctly
# (i.e., callable object) but still does not release the referece sufficiently
# to allow the destructor to actually run
self.__thread = threading.Thread(target = weakref.proxy(self), name = "Thread_" + self.port)
self.__thread.start()
def stop(self, isDestructor = False):
"""Stop the underlying thread that reads and queues messages
on this LLAP port
"""
# Stop the thread by setting a flag and joining
# When the join exits we can test whether the thread
# really died
self.__running = False
if (self.__thread != []):
print("LLAP Attempting to Stop " + self.__thread.name)
self.__thread.join(3.0)
if (self.__thread.is_alive()):
print("ERROR: LLAP Thread Did NOT Stop")
else:
print("LLAP " + self.__thread.name + " Stopped")
self.__thread = []
else:
if (False == isDestructor):
print("LLAP is already Stopped")
def __del__(self):
"""
"""
# WARNING: Destructor is NOT guaranteed to execute immediately
# but placing a stop here ensures that we attempt to stop the
# thread, but only if we have broken the circular reference
# induced by passing the callable self to the Thread; making
# the callable a separate function does not solve the problem
# because we would still need to pass self as an argument, which
# holds the reference that prevents the garbage collector from
# running the destructor.
self.stop(True) # inidicate that this stop is coming from a destructor
def __call__(self):
"""
"""
self.__running = True
print("LLAP " + self.__thread.name + " Started")
while (self.__running == True):
c = self.read(1);
try:
d = c.decode("utf-8")
if (d == 'a'):
# Found a message start, read the remaining characters
# and move them to the message queue
c = self.read(11)
if (self.__queue.full() == True):
self.__queue.get() # pop oldest item off first
m = c.decode("utf-8")
print(m)
self.__queue.put(m)
elif (d == 'n'):
# Found an Extended message, read it until the requisite EOF/EOL signal
c = self.readline()
if (c != -1):
# Throw out any trailing control characters (e.g., <CR><LR> or anything else)
if (c[-1] <= 32):
c = c[:c.__len__()-1]
if (c[-1] <= 32):
c = c[:c.__len__()-1]
if (self.__queue.full() == True):
self.__queue.get() # pop oldest item off first
m = c.decode("utf-8")
print(m)
self.__queue.put(m)
else:
# We are out of sync with the message header
# or we timed out, just move on
pass
except:
pass
print("LLAP " + self.__thread.name + " Stopping...")
def get(self, block = True, timeout_sec = None):
"""
"""
deviceId = None
message = None
if (timeout_sec == None):
timeout_sec = self.timeout
try:
c = self.__queue.get(block = block, timeout = timeout_sec)
deviceId = c[0:2]
message = c[2:]
except (Q.Empty):
pass
return message, deviceId
def waitFor(self, message, deviceId=None, timeout=None, displayAll=False):
"""Wait for the specified message as a prefix to any recieved message
If the incoming message begins with the specified message the wait
exits, otherwise the specified timeout will be used
Default timeout applies when no timeout is specified
"""
if (deviceId == None):
deviceId = self.__deviceId
if (timeout == None):
timeout = self.timeout
currentMessage = None
currentId = None
startTime = time.time()
currentTime = startTime
while ((currentTime - startTime) < timeout):
currentMessage, currentId = self.get()
if (True == displayAll):
self.display(currentMessage, currentId)
if (currentMessage != None):
if (message == currentMessage[:len(message)]) and (deviceId == currentId):
break;
else:
currentMessage = None
currentId = None
currentTime = time.time()
return currentMessage, currentId
def send(self,message,deviceId=None):
"""
"""
if (deviceId == None):
deviceId = self.__deviceId
l = len(message)
if (l < 9):
message = message + (9-l)*'-'
message = "a" + deviceId + message + "\r\n"
self.write(message.encode())
def display(self,message,deviceId=None):
"""
"""
if (message != None):
if (self.__deviceName != None):
if (deviceId != None):
print(self.__deviceName + " (" + deviceId[:2] + ") " + message)
else:
print(self.__deviceName + " (NOID) " + message)
else:
print(message)
def changeDeviceId(self,deviceId):
"""
"""
self.send("CHDEVID"+deviceId)
self.__deviceId = deviceId[:2]
message, deviceId = self.waitFor("CHDEVID","--")
if (message == None):
raise TimeoutError("Timeout Waiting for CHDEVID")
self.send("DEVNAME")
message = self.get()[0]
if (message != None):
self.__deviceName = message
| StarcoderdataPython |
5077963 | #!/usr/bin/env python
import ngs_utils.ensembl as ebl
import os
import shutil
from optparse import OptionParser, SUPPRESS_HELP
from os.path import isfile, join, basename, dirname, pardir
from ngs_utils import logger
from ngs_utils.file_utils import file_transaction, adjust_path, safe_mkdir, verify_file
''' Generates coding_regions BED file
Example usage:
python {__file__} -g GRCh37 --canonical | grep -v ^MT | grep -v ^GL | sort -k1,1V -k2,2n | bedtools merge -i - > coding_regions.canonical.clean.sort.merged.bed
'''
def main():
options = [
(['-g', '--genome'], dict(
dest='genome',
help='Genome build. Accepted values: ' + ', '.join(ebl.SUPPORTED_GENOMES),
)),
(['-c', '--canonical'], dict(
dest='canonical',
action='store_true',
help='Use canonical only',
)),
]
parser = OptionParser()
for args, kwargs in options:
parser.add_option(*args, **kwargs)
opts, args = parser.parse_args()
if not opts.genome:
logger.critical('Error: please, specify genome build name with -g (e.g. `-g hg19`)')
genome = opts.genome
logger.debug('Getting features from storage')
features_bed = ebl.get_all_features(genome)
if features_bed is None:
logger.critical('Genome ' + genome + ' is not supported. Supported: ' + ', '.join(ebl.SUPPORTED_GENOMES))
logger.warn('Extracting features from Ensembl GTF')
features_bed = features_bed.filter(lambda x: x[ebl.BedCols.FEATURE] == 'CDS')
if opts.canonical:
features_bed = features_bed.filter(ebl.get_only_canonical_filter(genome))
logger.warn('Saving CDS regions...')
output_fpath = adjust_path(join(dirname(__file__), pardir, genome, 'bed', 'CDS-canonical.bed'))
with file_transaction(None, output_fpath) as tx:
features_bed.cut(range(6)).saveas(tx)
logger.warn('Done, saved to ' + output_fpath)
if __name__ == '__main__':
main()
| StarcoderdataPython |
5078718 | <reponame>windystrife/UnrealEngine_NVIDIAGameWork
import maya.cmds as cmds
import maya.OpenMaya as om
from functools import partial
# This script walks along the surface of a mesh from a single component and find the nearest N verts based on the number you pass into it (count).
# It then selects these verts and returns them as a list called "elements"
def surfaceVertSelector(startElement, *args):
count = args[0]
#print count
vertCount = 0
startDist = 0
cmds.select(startElement)
# Find all of the verts on the selected mesh and see if they are less than count. If they are then set count to be all verts.
allVerts = cmds.polyEvaluate(v=True)
#print allVerts
if count > allVerts:
count = allVerts
#print args[0]
# Run through the mesh and using soft select to traverse out and find verts until we reach the count value.
while vertCount < count:
startDist = startDist+0.1
#print "startDist: ", startDist
cmds.softSelect(softSelectEnabled=True, ssf=1, ssd=startDist)
selection = om.MSelectionList()
surfaceVertSelector = om.MRichSelection()
om.MGlobal.getRichSelection(surfaceVertSelector)
surfaceVertSelector.getSelection(selection)
dagPath = om.MDagPath()
component = om.MObject()
iter = om.MItSelectionList( selection,om.MFn.kMeshVertComponent )
elements = []
while not iter.isDone():
iter.getDagPath( dagPath, component )
dagPath.pop()
node = dagPath.fullPathName()
fnComp = om.MFnSingleIndexedComponent(component)
for i in range(fnComp.elementCount()):
elements.append('%s.vtx[%i]' % (node, fnComp.element(i)))
iter.next()
vertCount = len(elements)
#print "vertCount: ", vertCount
cmds.softSelect(softSelectEnabled=False)
cmds.select(elements)
#startElement = cmds.ls(sl=True)
#surfaceVertSelector(startElement, 75)
# Create a UI that will drive the selection tool dynamically using dragCommand.
# The reset button will allow you to change the starting component(s) on your mesh.
def surfaceVertSelectorUI(*args):
startElement = cmds.ls(sl=True, type="float3")
if startElement:
if cmds.window("surfaceVertSelector", exists = True):
cmds.deleteUI("surfaceVertSelector")
window = cmds.window("surfaceVertSelector", title='Surface Vert Selector', s=False)
cmds.columnLayout()
cmds.intSliderGrp("vertCountIntSliderGrp", label='Vert Count', field=True, minValue=1, maxValue=2000, value=1, step=1, cw3=[75, 50, 1000], dragCommand=partial(surfaceVertSelector, startElement))
#surfaceVertSelector(startElement, 20)
cmds.button("refreshButton", label="Reset", c=surfaceVertSelectorUI)
cmds.showWindow(window)
else:
cmds.warning("You must select some components.")
#surfaceVertSelectorUI() | StarcoderdataPython |
3437934 | <reponame>mlweilert/bpnet
"""Small helper-functions for used by modisco classes
"""
import pandas as pd
import numpy as np
from kipoi.readers import HDF5Reader
from bpnet.cli.contrib import ContribFile
from bpnet.functions import mean
import warnings
def bootstrap_mean(x, n=100):
"""Bootstrap the mean computation"""
out = []
for i in range(n):
idx = pd.Series(np.arange(len(x))).sample(frac=1.0, replace=True).values
out.append(x[idx].mean(0))
outm = np.stack(out)
return outm.mean(0), outm.std(0)
def nan_like(a, dtype=float):
a = np.empty(a.shape, dtype)
a.fill(np.nan)
return a
def ic_scale(x):
from modisco.visualization import viz_sequence
background = np.array([0.27, 0.23, 0.23, 0.27])
return viz_sequence.ic_scale(x, background=background)
def shorten_pattern(pattern):
"""metacluster_0/pattern_1 -> m1_p1
"""
if "/" not in pattern:
# input is already a short pattern
return pattern
else:
# two slashes -> prepended name
return pattern.replace("metacluster_", "m").replace("/pattern_", "_p")
def longer_pattern(shortpattern):
"""m1_p1 -> metacluster_0/pattern_1
"""
if "/" in shortpattern:
# input is already a long pattern
return shortpattern
else:
return shortpattern.replace("_p", "/pattern_").replace("m", "metacluster_")
def extract_name_short(ps):
m, p = ps.split("_")
return {"metacluster": int(m.replace("m", "")), "pattern": int(p.replace("p", ""))}
def extract_name_long(ps):
m, p = ps.split("/")
return {"metacluster": int(m.replace("metacluster_", "")), "pattern": int(p.replace("pattern_", ""))}
def trim_pssm_idx(pssm, frac=0.05):
if frac == 0:
return 0, len(pssm)
pssm = np.abs(pssm)
threshold = pssm.sum(axis=-1).max() * frac
for i in range(len(pssm)):
if pssm[i].sum(axis=-1) > threshold:
break
for j in reversed(range(len(pssm))):
if pssm[j].sum(axis=-1) > threshold:
break
return i, j + 1 # + 1 is for using 0-based indexing
def trim_pssm(pssm, frac=0.05):
i, j = trim_pssm_idx(pssm, frac=frac)
return pssm[i:j]
| StarcoderdataPython |
3362823 | <reponame>rsouza01/cli-plugin
# -*- coding: utf-8 -*-
# from setuptools import setup, find_packages
import setuptools
with open('README.md') as f:
readme = f.read()
with open('LICENSE') as f:
license = f.read()
setuptools.setup(
name='cli-plugin',
packages=['cli-plugin'],
version='0.2.3',
description='CLI tool with plugin support - TEMPLATE PROJECT',
# long_description=readme,
# long_description_content_type="text/markdown",
author='<NAME>',
author_email='<EMAIL>',
url='https://github.com/rsouza01/cli-plugin',
download_url='https://github.com/rsouza01/cli-plugin/archive/v_0.1.1.tar.gz',
license=license,
install_requires=[],
classifiers=[
'Development Status :: 3 - Alpha',
'Intended Audience :: Developers',
'Topic :: Software Development :: Libraries :: Application Frameworks',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
]
)
| StarcoderdataPython |
9785649 | <reponame>ChangedLater/feeding-nemo<gh_stars>0
import RPi.GPIO as GPIO
from time import sleep
GPIO.setmode(GPIO.BCM)
class Motor:
def __init__(self,pin):
self.pin = pin
self.cycle = 50
self.minAngle = 90
self.maxAngle = 270
self.centreDuty = 7.5 #the duty cycle for the centre position
self.dutyShift = 5 #the duty cycle difference to move by 90 degrees
self.currentAngle = 180
def start(self):
GPIO.setup(self.pin, GPIO.OUT)
self.pwm = GPIO.PWM(self.pin,self.cycle)
self.pwm.start(self.centreDuty)
def _moveTo(self,angle):
duty = self.centreDuty + (angle-180) * (self.dutyShift/90)
print(duty)
self.pwm.ChangeDutyCycle(duty)
self.currentAngle = angle
def moveTo(self,angle):
newAngle = angle + 180
if(newAngle > self.maxAngle):
newAngle = self.maxAngle
if(newAngle < self.minAngle):
newAngle = self.minAngle
direction = 1
if( newAngle < self.currentAngle ):
direction = -1
while( newAngle != self.currentAngle ):
step = min( abs(newAngle - self.currentAngle), 10)
self._moveTo( self.currentAngle + direction * step)
#sleep(0.05)
def close(self):
self.pwm.stop()
GPIO.cleanup()
def create(pin):
GPIO.setmode(GPIO.BCM)
GPIO.setup(19,GPIO.OUT)
return Motor(pin)
| StarcoderdataPython |
3461471 | <gh_stars>0
import streamlit as st
st.set_page_config(layout='wide', page_title='Seismic Viewer')
import pandas as pd
import numpy as np
from obspy import read
import plotly.graph_objects as go
# event handling
from plotly.callbacks import Points, InputDeviceState
# local module
from load import loadData
def click_fn(trace, points, state):
inds = points.point_inds
print(inds)
def volumePlot():
X, Y, Z = np.mgrid[-1:1:30j, -1:1:30j, -1:1:30j]
values = np.sin(np.pi*X) * np.cos(np.pi*Z) * np.sin(np.pi*Y)
fig = go.Figure(data=go.Volume(
x=X.flatten(),
y=Y.flatten(),
z=Z.flatten(),
value=values.flatten(),
isomin=-0.1,
isomax=0.8,
opacity=0.1, # needs to be small to see through all surfaces
surface_count=21, # needs to be a large number for good volume rendering
colorscale='RdBu',
))
fig.update_layout(autosize=False,
height= 800,
width= 800,
margin=dict(
l=0,
r=0,
b=0,
t=0,
pad=0),
showlegend=False)
return fig
# @st.cache
def seisPlot(rawData, axisYData, ampGain, timeScaling):
if rawData is not None:
st.sidebar.success('Data terupload')
# Add histogram data
fig = go.Figure()
for i in range(1, 24):
x = (rawData[:,i] / np.linalg.norm(rawData[:,i]))*ampGain + i+1
y = axisYData[:,i]
fig.add_trace(go.Scatter(x=x,
y=y,
mode='lines',
line= dict(color='white', width=0.5),
fill='tozeroy'))
fig.update_yaxes(autorange='reversed')
fig.update_layout(autosize=False,
height= timeScaling,
xaxis_title = 'Number of Traces',
yaxis_title='Time (ms)',
margin=dict(
l=0,
r=0,
b=0,
t=0,
pad=0),
showlegend=False)
# Plot!
st.header(f'Source data: {uploadedFile.name}')
st.plotly_chart(fig, use_container_width=True)
# Sidebar option & File uploader
st.sidebar.header('SEIRA 2.0 PRO')
processSelect = st.sidebar.selectbox('Process', ['Home',
'Seismic Viewer',
'Dispersion Curve',
'1D VS30 Inversion',
'2D VS30'], )
if processSelect == 'Home':
st.header('GEOPHYSICAL ENGINEERING \n INSTITUT TEKNOLOGI SUMATERA')
if processSelect == 'Seismic Viewer':
rawData = None
uploadedFile = st.sidebar.file_uploader('Seismic data', type=['.sgy'])
rawData, axisYData = loadData(uploadedFile)
# EXPANDER PARAMETERS
paramOption = st.sidebar.beta_expander('ADJUST PARAMETERS')
ampGain = paramOption.slider('Amplitude Gain', min_value=1, max_value=20)
timeScaling = paramOption.slider('Time Scale', min_value=480, max_value=1080)
paramOption.write('Bandpass Filter')
minFrequency = paramOption.number_input('Min Freq (Hz)', min_value=0, max_value=200)
maxFrequency = paramOption.number_input('Max Freq (Hz)', min_value=0, max_value=200)
applyBandpass = paramOption.checkbox('Apply filter')
confirmSaveData = paramOption.button('Save to SGY File')
# EXPANDER PICKING FB
expanderPick = st.sidebar.beta_expander('PICKING FB')
# CALLBACK PROCESS
seisPlot(rawData, axisYData, ampGain, timeScaling)
if processSelect == 'Dispersion Curve':
points, state = Points(), InputDeviceState()
datax = np.array(10)
datay = np.array(10)
f = go.Scatter(x=datax, y=datay)
f.on_click(click_fn)
st.plotly_chart(f)
if processSelect == '1D VS30 Inversion':
pass
if processSelect == '2D VS30':
pass | StarcoderdataPython |
3361535 | <reponame>digambar15/ironic<gh_stars>1-10
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Base PXE Interface Methods
"""
from futurist import periodics
from ironic_lib import metrics_utils
from oslo_config import cfg
from oslo_log import log as logging
from oslo_utils import strutils
from ironic.common import boot_devices
from ironic.common import exception
from ironic.common.i18n import _
from ironic.common import pxe_utils
from ironic.common import states
from ironic.conductor import task_manager
from ironic.conductor import utils as manager_utils
from ironic.drivers.modules import deploy_utils
CONF = cfg.CONF
LOG = logging.getLogger(__name__)
METRICS = metrics_utils.get_metrics_logger(__name__)
REQUIRED_PROPERTIES = {
'deploy_kernel': _("UUID (from Glance) of the deployment kernel. "
"Required."),
'deploy_ramdisk': _("UUID (from Glance) of the ramdisk that is "
"mounted at boot time. Required."),
}
OPTIONAL_PROPERTIES = {
'force_persistent_boot_device': _("Controls the persistency of boot order "
"changes. 'Always' will make all "
"changes persistent, 'Default' will "
"make all but the final one upon "
"instance deployment non-persistent, "
"and 'Never' will make no persistent "
"changes at all. The old values 'True' "
"and 'False' are still supported but "
"deprecated in favor of the new ones."
"Defaults to 'Default'. Optional."),
}
RESCUE_PROPERTIES = {
'rescue_kernel': _('UUID (from Glance) of the rescue kernel. This value '
'is required for rescue mode.'),
'rescue_ramdisk': _('UUID (from Glance) of the rescue ramdisk with agent '
'that is used at node rescue time. This value is '
'required for rescue mode.'),
}
COMMON_PROPERTIES = REQUIRED_PROPERTIES.copy()
COMMON_PROPERTIES.update(OPTIONAL_PROPERTIES)
COMMON_PROPERTIES.update(RESCUE_PROPERTIES)
class PXEBaseMixin(object):
ipxe_enabled = False
def get_properties(self):
"""Return the properties of the interface.
:returns: dictionary of <property name>:<property description> entries.
"""
return COMMON_PROPERTIES
@METRICS.timer('PXEBaseMixin.clean_up_ramdisk')
def clean_up_ramdisk(self, task):
"""Cleans up the boot of ironic ramdisk.
This method cleans up the PXE environment that was setup for booting
the deploy or rescue ramdisk. It unlinks the deploy/rescue
kernel/ramdisk in the node's directory in tftproot and removes it's PXE
config.
:param task: a task from TaskManager.
:param mode: Label indicating a deploy or rescue operation
was carried out on the node. Supported values are 'deploy' and
'rescue'. Defaults to 'deploy', indicating deploy operation was
carried out.
:returns: None
"""
node = task.node
mode = deploy_utils.rescue_or_deploy_mode(node)
try:
images_info = pxe_utils.get_image_info(
node, mode=mode, ipxe_enabled=self.ipxe_enabled)
except exception.MissingParameterValue as e:
LOG.warning('Could not get %(mode)s image info '
'to clean up images for node %(node)s: %(err)s',
{'mode': mode, 'node': node.uuid, 'err': e})
else:
pxe_utils.clean_up_pxe_env(
task, images_info, ipxe_enabled=self.ipxe_enabled)
@METRICS.timer('PXEBaseMixin.validate_rescue')
def validate_rescue(self, task):
"""Validate that the node has required properties for rescue.
:param task: a TaskManager instance with the node being checked
:raises: MissingParameterValue if node is missing one or more required
parameters
"""
pxe_utils.parse_driver_info(task.node, mode='rescue')
def _persistent_ramdisk_boot(self, node):
"""If the ramdisk should be configured as a persistent boot device."""
value = node.driver_info.get('force_persistent_boot_device', 'Default')
if value in {'Always', 'Default', 'Never'}:
return value == 'Always'
else:
return strutils.bool_from_string(value, False)
_RETRY_ALLOWED_STATES = {states.DEPLOYWAIT, states.CLEANWAIT,
states.RESCUEWAIT}
@METRICS.timer('PXEBaseMixin._check_boot_timeouts')
@periodics.periodic(spacing=CONF.pxe.boot_retry_check_interval,
enabled=bool(CONF.pxe.boot_retry_timeout))
def _check_boot_timeouts(self, manager, context):
"""Periodically checks whether boot has timed out and retry it.
:param manager: conductor manager.
:param context: request context.
"""
filters = {'provision_state_in': self._RETRY_ALLOWED_STATES,
'reserved': False,
'maintenance': False,
'provisioned_before': CONF.pxe.boot_retry_timeout}
node_iter = manager.iter_nodes(filters=filters)
for node_uuid, driver, conductor_group in node_iter:
try:
lock_purpose = 'checking PXE boot status'
with task_manager.acquire(context, node_uuid,
shared=True,
purpose=lock_purpose) as task:
self._check_boot_status(task)
except (exception.NodeLocked, exception.NodeNotFound):
continue
def _check_boot_status(self, task):
if not isinstance(task.driver.boot, PXEBaseMixin):
return
if not _should_retry_boot(task.node):
return
task.upgrade_lock(purpose='retrying PXE boot')
# Retry critical checks after acquiring the exclusive lock.
if (task.node.maintenance or task.node.provision_state
not in self._RETRY_ALLOWED_STATES
or not _should_retry_boot(task.node)):
return
LOG.info('Booting the ramdisk on node %(node)s is taking more than '
'%(timeout)d seconds, retrying boot',
{'node': task.node.uuid,
'timeout': CONF.pxe.boot_retry_timeout})
manager_utils.node_power_action(task, states.POWER_OFF)
# NOTE(dtantsur): retry even persistent boot setting in case it did not
# work for some reason.
persistent = self._persistent_ramdisk_boot(task.node)
manager_utils.node_set_boot_device(task, boot_devices.PXE,
persistent=persistent)
manager_utils.node_power_action(task, states.POWER_ON)
def _should_retry_boot(node):
# NOTE(dtantsur): this assumes IPA, do we need to make it generic?
for field in ('agent_last_heartbeat', 'last_power_state_change'):
if manager_utils.value_within_timeout(
node.driver_internal_info.get(field),
CONF.pxe.boot_retry_timeout):
# Alive and heartbeating, probably busy with something long
LOG.debug('Not retrying PXE boot for node %(node)s; its '
'%(event)s happened less than %(timeout)d seconds ago',
{'node': node.uuid, 'event': field,
'timeout': CONF.pxe.boot_retry_timeout})
return False
return True
| StarcoderdataPython |
6413653 | <gh_stars>10-100
from __future__ import annotations
from entities.entity import Entity
from arweave.arweave_lib import Wallet
from graphql.query import get_tag_value_from_query
from typing import List
TAG_EXTENDS = "extends"
TAG_TRUSTED_SOURCES = "trustedSources"
TAG_NAME = "name"
TAG_METADATA_TAGS = "metadataTags"
class Collection(Entity):
def __init__(self, wallet: Wallet = None, name: str = None, metadata_tags: List[str] = [], **kwargs):
"""
Either creates a new Collection, or loads one from the permaweb.
:param wallet: Arweave wallet. Used not only for signing and sending the transaction, but its address
is also automatically used as a trusted source for this collection.
:param name: Name of this collection
:param metadata_tags: Metadata tags to be included in Books of this collection.
:param kwargs: Entity arguments
"""
self._trusted_sources = []
self._metadata_tags = metadata_tags
self._name = name
super().__init__(wallet=wallet, **kwargs)
if not self.is_signed:
if wallet is not None:
self.add_trusted_source(wallet.address)
self._transaction.add_tag(TAG_NAME, self._name)
def extend(self, extended_collection: Collection):
"""
Indicate that this collection is extending another one. Future use only, currently not supported by the dApp.
:param extended_collection: Already existing original collection
"""
self._transaction.add_tag(TAG_EXTENDS, extended_collection.id)
@property
def type(self) -> str:
return "Collection"
@property
def name(self) -> str:
return self._name
@property
def trusted_sources(self) -> List[str]:
return self._trusted_sources
@property
def metadata_tags(self) -> List[str]:
return self._metadata_tags
def add_trusted_source(self, trusted_address: str):
"""
Add another wallet address to the list of trusted sources for this collection. Address is not validated.
:param trusted_address: String containing the address of another wallet.
"""
self._trusted_sources.append(trusted_address)
def sign(self):
self._transaction.add_tag(TAG_TRUSTED_SOURCES, self._tag_separator.join(self._trusted_sources))
self._transaction.add_tag(TAG_METADATA_TAGS, self._tag_separator.join(self._metadata_tags))
super().sign()
def load_from_existing_transaction(self, transaction_id: str) -> dict:
result = super().load_from_existing_transaction(transaction_id)
self._name = get_tag_value_from_query(result, TAG_NAME)
transaction_trusted_sources = get_tag_value_from_query(result, TAG_TRUSTED_SOURCES)
if transaction_trusted_sources is not None:
self._trusted_sources = transaction_trusted_sources.split(self._tag_separator)
transaction_metadata_tags = get_tag_value_from_query(result, TAG_METADATA_TAGS)
if transaction_metadata_tags is not None:
self._metadata_tags = transaction_metadata_tags.split(self._tag_separator)
return result
| StarcoderdataPython |
249253 | #!/usr/bin/env python
import logging
import os
import sys
if os.environ.get("NVHTOP_DEBUG"):
LEVEL = logging.DEBUG
else:
LEVEL = logging.INFO
sys.path.insert(0, os.path.dirname(os.path.abspath(os.path.join(__file__, os.pardir))))
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s", level=LEVEL
)
from nvhtop.commands import main # isort:skip # NOQA
def run():
main(prog="nvidia-htop")
if __name__ == "__main__":
run()
| StarcoderdataPython |
3443147 | import os
import sys
import errno
import argparse
import logging
import docker
import docker.errors
from .. import dockerutils
_DEFAULT_LOG_FORMAT = "%(name)s : %(threadName)s : %(levelname)s : %(message)s"
logging.basicConfig(
stream=sys.stderr,
format=_DEFAULT_LOG_FORMAT,
level=logging.INFO,
)
SETUP_SCRPT = b"""#!/bin/sh
set -e
apk add --no-cache git musl-dev gcc
cd /tmp
git clone -b "${DIN_REFSPEC}" "${DIN_SU_EXEC_URL}" su-exec
cd su-exec
gcc -static su-exec.c -o su-exec
cp -v su-exec /din_config/
chown "${DIN_UID}:${DIN_GID}" /din_config/su-exec
"""
class SetupApp(dockerutils.BasicDockerApp):
DEFAULT_SU_EXEC_URL = "https://github.com/ncopa/su-exec.git"
DEFAULT_IMAGE = "alpine:3.6"
PASSED_HOST_ENV = (
'https_proxy', 'http_proxy',
'HTTPS_PROXY', 'HTTP_PROXY',
'HTTP_PROXY_AUTH'
)
@classmethod
def _parse_args(cls, argv):
parser = argparse.ArgumentParser()
loglevel_group = parser.add_mutually_exclusive_group()
loglevel_group.add_argument('--verbose',
dest='loglevel',
action='store_const',
const=logging.DEBUG)
loglevel_group.add_argument('--quiet',
dest='loglevel',
action='store_const',
const=logging.ERROR)
parser.add_argument('--url',
default=cls.DEFAULT_SU_EXEC_URL,
help="Git URL to su-exec repository")
parser.add_argument('--name',
help="Name of the container")
parser.add_argument('--home',
help="Override path to home directory")
parser.add_argument('--auto-pull',
dest="auto_pull",
action="store_true",
default=False,
help="Pull unavailable images automatically")
parser.add_argument('--refspec',
help="Refspec for su-exec repo (tag/branch; default: master)")
parser.add_argument('--host-network',
action="store_true",
default=False,
help="Allow access to host network (f.e. if using a proxy on locahost)")
parser.set_defaults(loglevel=logging.INFO)
args = parser.parse_args(args=argv)
return args
def __init__(self, env=None):
log = logging.getLogger("DockerInside.Setup")
self._args = None
dockerutils.BasicDockerApp.__init__(self, log, env)
def setup(self, url, home=None, auto_pull=False, name=None, refspec=None):
if home is None:
home = os.path.expanduser('~')
if refspec is None:
refspec = 'master'
self._assert_image_available(self.DEFAULT_IMAGE, auto_pull)
cfg_path = os.path.join(home, '.config', 'docker_inside')
self._log.debug("Configuration directory (host): {0}".format(cfg_path))
try:
os.makedirs(cfg_path, 0o755)
except OSError as e:
if (e.errno == errno.EEXIST) and os.path.isdir(cfg_path):
logging.debug("Directory '{0}' already exists".format(cfg_path))
else:
raise
script_pack = dockerutils.tar_pack({
"entrypoint.sh": {
"payload": SETUP_SCRPT,
"mode": 0o755,
}
})
volumes = self.volume_args_to_list([
"{0}:/din_config".format(cfg_path)
])
env = {
"DIN_UID": os.getuid(),
"DIN_GID": os.getgid(),
"DIN_SU_EXEC_URL": url,
"DIN_REFSPEC": refspec,
}
host_env = dict({k:v for k,v in os.environ.items() if k in self.PASSED_HOST_ENV})
env.update(host_env)
logging.debug("Prepared environment: %s", host_env)
network_mode = 'host' if self._args.host_network else None
logging.debug("Network mode: %s", "default" if network_mode is None else network_mode)
cobj = self._dc.containers.create(
self.DEFAULT_IMAGE,
command="/entrypoint.sh",
volumes=volumes,
environment=env,
name=name,
network=network_mode
)
try:
cobj.put_archive('/', script_pack)
cobj.start()
for msg in cobj.logs(stdout=True, stderr=True, stream=True):
logging.debug("{0}".format(msg.decode('utf-8').rstrip('\n')))
ret = cobj.wait()
status_code = ret.get('StatusCode', None)
logging.info("setup returned %s", status_code)
return status_code
finally:
cobj.stop()
cobj.remove()
def run(self, argv):
ret = 1
self._args = self._parse_args(argv)
logging.getLogger().setLevel(self._args.loglevel)
# noinspection PyBroadException
try:
ret = self.setup(
self._args.url,
home=self._args.home,
auto_pull=self._args.auto_pull,
name=self._args.name,
refspec=self._args.refspec
)
except dockerutils.InvalidPath as e:
logging.exception("{0} '{1}' doesn't exist".format(e.type_, e.path))
except docker.errors.ImageNotFound:
logging.exception("Image '{0}' not found".format(self.DEFAULT_IMAGE))
except Exception:
logging.exception("Failed to run setup()")
finally:
return ret
def setup_main():
app = SetupApp()
app.run(sys.argv[1:])
| StarcoderdataPython |
348501 | <reponame>bincrafters/conan-bazel_installer<filename>conanfile.py<gh_stars>1-10
from conans import ConanFile, tools
from conans.errors import ConanException
from conans.errors import ConanInvalidConfiguration
import os
import platform
class BazelInstallerConan(ConanFile):
name = "bazel_installer"
version = "0.27.1"
description = "The Bazel Build system from Google"
website = "https://github.com/bazelbuild/bazel"
url = "https://github.com/bincrafters/conan-bazel_installer"
license = "Apache-2.0"
topics = ("conan", "bazel", "build", "bzl")
homepage = "https://www.bazel.build/"
settings = "os", "arch"
short_paths = True
def config_options(self):
# Checking against self.settings.* would prevent cross-building profiles from working
if tools.detected_architecture() not in ["x86", "x86_64"]:
raise ConanInvalidConfiguration("Unsupported Architecture. This package currently only supports x86 and x86_64.")
if platform.system() not in ["Windows", "Darwin", "Linux"]:
raise ConanInvalidConfiguration("Unsupported System. This package currently only support Linux/Darwin/Windows")
def system_requirements(self):
if tools.os_info.is_linux:
if tools.os_info.with_apt or tools.os_info.with_yum:
installer = tools.SystemPackageTool()
installer.install("unzip")
def build(self):
if self.settings.os == "Windows":
bash = tools.which("bash.exe")
if bash:
self.output.info("using bash.exe from: " + bash)
else:
raise ConanException("No instance of bash.exe could be found on %PATH%")
archive_name = "bazel-{0}-dist.zip".format(self.version)
url = "{0}/releases/download/{1}/{2}".format(self.website, self.version, archive_name)
tools.get(url, sha256="8051d77da4ec338acd91770f853e4c25f4407115ed86fd35a6de25921673e779")
if self.settings.os == "Windows":
bash = tools.which("bash.exe")
with tools.environment_append({'BAZEL_SH': bash}):
self.run('"{bash}" -l -c "pacman -S coreutils git curl zip unzip --needed --noconfirm"'.format(bash=bash))
self.run('"{bash}" -c "./compile.sh"'.format(bash=bash))
else:
# fix executable permissions
for root, _, files in os.walk('.'):
for filename in files:
if filename.endswith('.sh') or filename.endswith('.tpl'):
filepath = os.path.join(root, filename)
os.chmod(filepath, os.stat(filepath).st_mode | 0o111)
self.run('./compile.sh')
def package(self):
if self.settings.os == "Windows":
self.copy(pattern="bazel.exe", dst="bin", src="output")
else:
self.copy(pattern="bazel", dst="bin", src="output")
def package_info(self):
bin_path = os.path.join(self.package_folder, "bin")
self.output.info("Appending PATH environment variable with : {0}".format(bin_path))
self.env_info.path.append(bin_path)
| StarcoderdataPython |
1809789 | # YouTube: https://youtu.be/6hVTr0bcBc0
# Publicação: https://caffeinealgorithm.com/blog/20210922/funcao-input-em-python/
primeiroNome = input('Insere o teu primeiro nome: ')
ultimoNome = input('Insere o teu último nome: ')
idade = input('Insere a tua idade: ')
print('Primeiro nome:', primeiroNome)
print('Último nome:', ultimoNome)
print('Idade:', idade)
| StarcoderdataPython |
226440 | <reponame>TreZc0/donation-tracker
import urllib.parse
import post_office.models
from django.contrib.auth import get_user_model
from django.test import TestCase, RequestFactory
from django.test import override_settings
from django.urls import reverse
from django.utils.encoding import force_bytes
from django.utils.http import urlsafe_base64_encode
import tracker.auth
from . import util
AuthUser = get_user_model()
TEST_AUTH_MAIL_TEMPLATE = post_office.models.EmailTemplate(
content='user:{{user}}\nurl:{{reset_url}}'
)
@override_settings(EMAIL_FROM_USER='<EMAIL>')
class TestRegistrationFlow(TestCase):
def setUp(self):
self.factory = RequestFactory()
def test_registration_flow(self):
request = self.factory.post(reverse('tracker:register'))
new_user = AuthUser.objects.create(
username='dummyuser', email='<EMAIL>', is_active=False
)
sent_mail = tracker.auth.send_registration_mail(
request, new_user, template=TEST_AUTH_MAIL_TEMPLATE
)
contents = util.parse_test_mail(sent_mail)
self.assertEqual(new_user.username, contents['user'][0])
parsed = urllib.parse.urlparse(contents['url'][0])
resp = self.client.get(parsed.path)
expected_url = reverse(
'tracker:confirm_registration',
kwargs={
'uidb64': urlsafe_base64_encode(force_bytes(new_user.pk)),
'token': 'register-<PASSWORD>',
},
)
self.assertRedirects(resp, expected_url)
resp = self.client.get(expected_url)
self.assertContains(resp, 'Please set your username and password.')
resp = self.client.post(
expected_url,
{
'username': 'dummyuser',
'password': '<PASSWORD>',
'passwordconfirm': '<PASSWORD>',
},
)
self.assertContains(resp, 'Your user account has been confirmed')
new_user.refresh_from_db()
self.assertTrue(new_user.is_active)
self.assertTrue(new_user.check_password('<PASSWORD>'))
def test_register_inactive_user(self):
AuthUser.objects.create(
username='existinguser', email='<EMAIL>', is_active=False
)
resp = self.client.post(
reverse('tracker:register'), data={'email': '<EMAIL>'}
)
self.assertContains(resp, 'An e-mail has been sent to your address.')
def test_register_active_user(self):
AuthUser.objects.create(
username='existinguser', email='<EMAIL>', is_active=True
)
resp = self.client.post(
reverse('tracker:register'), data={'email': '<EMAIL>'}
)
self.assertFormError(
resp,
'form',
'email',
'This email is already registered. Please log in, (or reset your password if you forgot it).',
)
| StarcoderdataPython |
6682069 | <reponame>PhilipeRLeal/bootstrap_analyses<gh_stars>0
# -*- coding: utf-8 -*-
"""
Created on Thu Mar 26 11:47:43 2020
@author: Philipe_Leal
"""
import pandas as pd
import numpy as np
a = np.random.normal(size=10)
b = np.random.normal(size=10)
c = np.random.normal(size=10)
def diff_multi(*args):
Multi = pd.MultiIndex.from_product([*args])
Expected = abs(np.mean([np.mean(np.diff(x)) for x in Multi.to_numpy()]))
print('Expected return: {0}'.format(Expected))
return Expected
def diff_2S(a, b):
Expected = abs(np.mean(a) - np.mean(b))
print('Expected return: {0}'.format(Expected))
return Expected
diff_multi(a,b)
diff_2S(a, b) | StarcoderdataPython |
9637136 | <filename>ute/utils/visualization.py
#!/usr/bin/env python
"""Class for visualization of trained embedding: PCA or t-SNE methods are used
for dimensionality reduction."""
__author__ = '<NAME>'
__date__ = 'August 2018'
import matplotlib.pyplot as plt
import numpy as np
from sklearn.decomposition import PCA
from sklearn.manifold import TSNE
from ute.utils.util_functions import join_data
from os.path import join
from ute.utils.arg_pars import opt
from ute.utils.util_functions import dir_check, timing
class Visual(object):
def __init__(self, mode='pca', dim=2, reduce=None, save=False, svg=False, saved_dots=''):
# mpl.rcParams['image.cmap'] = 'cool'
self._mode = mode
self._model = None
self._dim = dim
self._data = None
self._labels = None
self._sizes = []
self._counter = 0
self._result = None
self.size = 1 # size of dots
self.reduce = reduce
self._save = save
self.svg = svg
self.saved_dots = saved_dots
@property
def data(self):
return self._data
@data.setter
def data(self, new_data):
self._data = join_data(self._data, new_data, np.vstack)
@property
def labels(self):
return self._labels
@labels.setter
def labels(self, new_labels):
self._labels = join_data(self._labels, new_labels, np.hstack)
self._sizes += [self.size] * len(new_labels)
@timing
def fit_data(self):
if self.saved_dots:
self._result = np.loadtxt(self.saved_dots)
else:
if self._mode == 'pca':
self._model = PCA(n_components=self._dim, random_state=opt.seed)
if self._mode == 'tsne':
self._model = TSNE(n_components=self._dim, perplexity=15, random_state=opt.seed)
if self.reduce is None:
self._result = self._model.fit_transform(self._data)
else:
fraction = int(self._data.shape[0] * self.reduce / 100)
self._model.fit(self._data[:fraction])
self._result = self._model.transform(self._data)
def plot(self, iter=0, show=True, prefix=''):
if iter is not None:
self._counter = iter
if 20 in self._labels:
self._labels = np.array(self._labels)
mask = self._labels == 20
self._labels[mask] = 10
plt.axis('off')
plt.scatter(self._result[..., 0], self._result[..., 1],
c=self._labels, s=self._sizes, alpha=1)
plt.grid(True)
if prefix == 'time_':
plt.colorbar()
if self._save:
# plt.figure(figsize=(1))
dir_check(join(opt.dataset_root, 'plots'))
dir_check(join(opt.dataset_root, 'plots', opt.subaction))
# name = ['iter%d_' % self._counter, 'gt_'][gt_plot]
name = prefix + '%s_%s_' % (opt.subaction, opt.model_name)
folder_name = opt.log_str
dir_check(join(opt.dataset_root, 'plots', opt.subaction, folder_name))
folder_name = join(opt.log_str, opt.vis_mode)
dir_check(join(opt.dataset_root, 'plots', opt.subaction, folder_name))
if self.svg:
name += '_%s.svg' % self._mode
else:
name += '_%s.png' % self._mode
# plt.savefig(join(opt.dataset_root, 'plots', opt.subaction,
# folder_name, name), dpi=400)
plt.savefig(join(opt.dataset_root, 'plots', opt.subaction,
folder_name, name), transparent=True, dpi=300)
np.savetxt(join(opt.dataset_root, 'plots', opt.subaction,
folder_name, '%s.txt' % opt.vis_mode), self._result)
if show:
plt.show()
def reset(self):
plt.clf()
self._counter += 1
self._data = None
self._labels = None
self._sizes = []
self.size = 1
def color(self, labels, prefix, reset=False):
plt.clf()
self._labels = labels
self.plot(show=False, prefix=prefix)
if reset:
self.reset()
def fit(self, data, labels, prefix, reset=True):
self._data = data
self._labels = labels
self._sizes += [self.size] * len(labels)
self.fit_data()
self.plot(show=False, prefix=prefix)
if reset:
self.reset()
def bounds(segm):
start_label = segm[0]
start_idx = 0
idx = 0
while idx < len(segm):
try:
while start_label == segm[idx]:
idx += 1
except IndexError:
yield start_idx, idx, start_label
break
yield start_idx, idx, start_label
start_idx = idx
start_label = segm[start_idx]
def plot_segm(path, segmentation, colors, name=''):
# mpl.style.use('classic')
fig = plt.figure(figsize=(16, 4))
plt.axis('off')
plt.title(name, fontsize=20)
# plt.subplots_adjust(top=0.9, hspace=0.6)
gt_segm, _ = segmentation['gt']
ax_idx = 1
plots_number = len(segmentation)
ax = fig.add_subplot(plots_number, 1, ax_idx)
ax.set_ylabel('GT', fontsize=30, rotation=0, labelpad=40, verticalalignment='center')
ax.set_yticklabels([])
ax.set_xticklabels([])
# make_axes_area_auto_adjustable(ax)
# plt.title('gt', fontsize=20)
v_len = len(gt_segm)
for start, end, label in bounds(gt_segm):
ax.axvspan(start / v_len, end / v_len, facecolor=colors[label], alpha=1.0)
for key, (segm, label2gt) in segmentation.items():
if key in ['gt', 'cl']:
continue
ax_idx += 1
ax = fig.add_subplot(plots_number, 1, ax_idx)
ax.set_ylabel('OUTPUT', fontsize=30, rotation=0, labelpad=60, verticalalignment='center')
ax.set_yticklabels([])
ax.set_xticklabels([])
# make_axes_area_auto_adjustable(ax)
segm = list(map(lambda x: label2gt[x], segm))
for start, end, label in bounds(segm):
ax.axvspan(start / v_len, end / v_len, facecolor=colors[label], alpha=1.0)
fig.savefig(path, transparent=False)
| StarcoderdataPython |
4925874 | <reponame>YouhuaLi/metamath-turing-machines
"""Framework for building Turing machines using a register machine abstraction
and binary decision diagrams in place of subprograms."""
# Tape layout: PC:bit[NNN] 0 0 ( 1 1* 0 )*
#
# each thing after the PC is a unary register.
#
# There is a "dispatch" state which assumes the head is at position zero, and
# reads PC bits through a decision tree to find out what to do.
#
# The decision tree has shared subtrees - this is how we handle "subroutines".
# Naturally these shared subtrees have to handle different "contexts".
#
# we shift 1 left of the PC MSB during carry phases; the initial state is the
# leftmost shift state, so the total shift is always non-negative.
from collections import namedtuple
import argparse
class Halt:
"""Special machine state which halts the Turing machine."""
def __init__(self):
self.name = 'HALT'
class State:
"""Represents a Turing machine state.
Instances of State can be initialized either at construction or using
the be() method; the latter allows for cyclic graphs to be defined."""
def __init__(self, **kwargs):
self.set = False
self.name = '**UNINITIALIZED**'
if kwargs:
self.be(**kwargs)
def be(self, name, move=None, next=None, write=None,
move0=None, next0=None, write0=None,
move1=None, next1=None, write1=None):
"""Defines a Turing machine state.
The movement direction, next state, and new tape value can be defined
depending on the old tape value, or for both tape values at the same time.
Next state and direction must be provided, tape value can be omitted for no change."""
assert not self.set
self.set = True
self.name = name
self.move0 = move0 or move
self.move1 = move1 or move
self.next0 = next0 or next
self.next1 = next1 or next
self.write0 = write0 or write or '0'
self.write1 = write1 or write or '1'
assert self.move0 in (-1, 1)
assert self.move1 in (-1, 1)
assert self.write0 in ('0', '1')
assert self.write1 in ('0', '1')
assert isinstance(self.name, str)
assert isinstance(self.next0, State) or isinstance(self.next0, Halt)
assert isinstance(self.next1, State) or isinstance(self.next1, Halt)
def clone(self, other):
"""Makes this state equivalent to another state, which must already be initialized."""
assert isinstance(other, State) and other.set
self.be(name=other.name, move0=other.move0, next0=other.next0,
write0=other.write0, move1=other.move1, next1=other.next1,
write1=other.write1)
def make_bits(num, bits):
"""Constructs a bit string of length=bits for an integer num."""
assert num < (1 << bits)
if bits == 0:
return ''
return '{num:0{bits}b}'.format(num=num, bits=bits)
def memo(func):
"""Decorator which memoizes a method, so it will be called once with a
given set of arguments."""
def _wrapper(self, *args):
key = (func,) + args
if key not in self._memos:
self._memos[key] = None
self._memos[key] = func(self, *args)
if not self._memos[key]:
print("recursion detected", func.__name__, repr(args))
assert False
return self._memos[key]
return _wrapper
Label = namedtuple('Label', ['name'])
Label.size = 0
Label.is_decrement = False
Goto = namedtuple('Goto', ['name'])
Goto.size = 1
Goto.is_decrement = False
Register = namedtuple('Register', 'name index inc dec')
class Subroutine:
"""Class wrapping a compiled subprogram, which is an internal node in the
program BDD.
A subprogram consumes a power-of-two number of PC values, and can appear
at any correctly aligned PC; the entry state is entered with the tape head
on the first bit of the subprogram's owned portion of the PC."""
def __init__(self, entry, order, name, child_map=None, is_decrement=False):
self.entry = entry
self.name = name
self.order = order
self.size = 1 << order
self.is_decrement = is_decrement
self.child_map = child_map or {}
InsnInfo = namedtuple('InsnInfo', 'sub labels goto')
def make_dispatcher(child_map, name, order, at_prefix=''):
"""Constructs one or more dispatch states to route to a child map.
Each key in the child map must be a binary string no longer than
the order, and every binary string of length equal to the order must
have exactly one child map key as a prefix. The generated states will
read bits going right and fall into the child states after reading
exactly the prefix."""
if at_prefix in child_map:
return child_map[at_prefix].sub.entry
assert len(at_prefix) <= order
switch = State()
switch.be(move=1, name=name + '[' + at_prefix + ']',
next0=make_dispatcher(child_map, name, order, at_prefix + '0'),
next1=make_dispatcher(child_map, name, order, at_prefix + '1'))
return switch
def cfg_optimizer(parts):
parts = list(parts)
# Thread jumps to jumps
# Delete jumps to the next instruction
counter = 0
label_map = {}
rlabel_map = {}
goto_map = {}
labels = []
for insn in parts:
if isinstance(insn, Label):
labels.append(insn.name)
else:
for label in labels:
label_map[label] = counter
rlabel_map[counter] = label
labels = []
if isinstance(insn, Goto):
goto_map[counter] = insn.name
counter += 1
for label in labels:
label_map[label] = counter
rlabel_map[counter] = label
def follow(count):
for _ in range(10):
if count not in goto_map:
break
count = label_map[goto_map[count]]
return count
# print(repr(parts))
counter = 0
for index, insn in enumerate(parts):
if isinstance(insn, Label):
continue
if isinstance(insn, Goto):
direct_goes_to = label_map[goto_map[counter]]
goes_to = follow(direct_goes_to)
next_goes_to = goto_map.get(counter+1) and follow(counter+1)
# print("CFGO", insn.name, counter, goes_to, next_goes_to)
if goes_to == counter + 1 or goes_to == next_goes_to:
parts[index] = None
elif direct_goes_to != goes_to:
parts[index] = Goto(rlabel_map[goes_to])
counter += 1
# print(repr(parts))
# Delete dead code
# label_to_index = {}
# for index, insn in enumerate(parts):
# if isinstance(insn, Label):
# label_to_index[insn.name] = index
# grey_index = [0]
# black_index = set()
# while grey_index:
# ix = grey_index.pop()
# if ix in black_index or ix >= len(parts):
# continue
# black_index.add(ix)
# if isinstance(insn, Goto):
# grey_index.append(label_to_index[insn.name])
# else:
# grey_index.append(ix + 1)
# if insn and insn.is_decrement:
# grey_index.append(ix + 2)
# for index in range(len(parts)):
# if index not in black_index:
# print("DEAD CODE")
# parts[index] = None
return tuple(p for p in parts if p)
class MachineBuilder:
"""Subclassable class of utilities for constructing Turing machines using
BDD-compressed register machines."""
pc_bits = 0
quick = 0
# Quick=0: Print TM
# Quick=1: Simulate TM, print all steps
# Quick=2: Simulate TM, print at dispatch
# Quick=3: Simulate compressed register machine
# Quick=4: as Quick=3 except subroutines can cheat
# Quick=5: subroutines can cheat to the extent of storing non-integers
def __init__(self, control_args):
self._nextreg = 0
self._memos = {}
self.control_args = control_args
# leaf procs which implement register machine operations
# on entry to a leaf proc the tape head is just after the PC
@memo
def reg_incr(self, index):
"""Primitive subroutine which decrements a register."""
if index == -2:
entry = self.register_common().inc
else:
entry = State()
entry.be(move=1, next1=entry, next0=self.reg_incr(index-1), name='reg_incr.'+str(index))
return entry
@memo
def reg_decr(self, index):
"""Primitive subroutine which decrements a register. The PC will be
incremented by 2 if successful; if the register was zero, it will be
unchanged and the PC will be incremented by 1."""
if index == -2:
entry = self.register_common().dec
else:
entry = State()
entry.be(move=1, next1=entry, next0=self.reg_decr(index-1), name='reg_decr.'+str(index))
return entry
@memo
def reg_init(self):
"""Primitive subroutine which initializes a register. Call this N
times before using registers less than N."""
return Subroutine(self.register_common().init, 0, 'reg_init')
@memo
def register_common(self):
"""Primitive register operations start with the tape head on the first
1 bit of a register, and exit by running back into the dispatcher."""
(inc_shift_1, inc_shift_0, dec_init, dec_check, dec_scan_1,
dec_scan_0, dec_scan_done, dec_shift_0, dec_shift_1, dec_restore,
return_0, return2_0, return_1, return2_1, init_f1, init_f2,
init_scan_1, init_scan_0) = (State() for i in range(18))
# Initialize routine
init_f1.be(move=1, next=init_f2, name='init.f1')
init_f2.be(move=1, next=init_scan_0, name='init.f2')
init_scan_1.be(move=1, next1=init_scan_1, next0=init_scan_0, name='init.scan_1') # only 0 is possible
init_scan_0.be(write0='1', move0=-1, next0=return_1, move1=1, next1=init_scan_1, name='init.scan_0')
# Increment the register, the first 1 bit of which is under the tape head
inc_shift_1.be(move=1, write='1', next0=inc_shift_0, next1=inc_shift_1, name='inc.shift_1')
inc_shift_0.be(write='0', next0=return_0, move0=-1, next1=inc_shift_1, move1=1, name='inc.shift_0')
# Decrementing is a bit more complicated, we need to mark the register we're on
dec_init.be(write='0', move=1, next=dec_check, name='dec.init')
dec_check.be(move0=-1, next0=dec_restore, move1=1, next1=dec_scan_1, name='dec.check')
dec_scan_1.be(move=1, next1=dec_scan_1, next0=dec_scan_0, name='dec.scan_1')
dec_scan_0.be(move1=1, next1=dec_scan_1, move0=-1, next0=dec_scan_done, name='dec.scan_0')
# scan_done = on 0 after last reg
dec_scan_done.be(move=-1, next=dec_shift_0, name='dec.scan_done')
dec_shift_0.be(write='0', move0=-1, next0=return2_0, move1=-1, next1=dec_shift_1, name='dec.shift_0')
# if shifting 0 onto 0, we're moving the marker we created
# let it overlap the fence
dec_shift_1.be(write='1', move=-1, next0=dec_shift_0, next1=dec_shift_1, name='dec.shift_1')
dec_restore.be(write='1', move=-1, next=return_1, name='dec.restore')
return_0.be(move=-1, next0=self.nextstate(), next1=return_1, name='return.0')
return2_0.be(move=-1, next0=self.nextstate_2(), next1=return2_1, name='return2.0')
return_1.be(move=-1, next0=return_0, next1=return_1, name='return.1')
return2_1.be(move=-1, next0=return2_0, next1=return2_1, name='return2.1')
return namedtuple('register_common', 'inc dec init')(inc_shift_1, dec_init, init_f1)
# Implementing the subroutine model
@memo
def dispatchroot(self):
"""A Turing state which issues the correct operation starting from the first PC bit."""
return State()
@memo
def nextstate(self):
"""A Turing state which increments PC by 1, with the tape head on the last PC bit."""
return self.dispatch_order(0, 1)
@memo
def nextstate_2(self):
"""A Turing state which increments PC by 2, with the tape head on the last PC bit."""
return State(move=-1, next=self.dispatch_order(1, 1), name='nextstate_2')
@memo
def dispatch_order(self, order, carry_bit):
"""Constructs Turing states which move from the work area back to the PC head.
On entry, the head should be order bits left of the rightmost bit of the program
counter; if carry_bit is set, the bit the head is on will be incremented."""
if order == self.pc_bits:
return State(move=+1, next=self.dispatchroot(), name='!ENTRY')
assert order < self.pc_bits
if carry_bit:
return State(write0='1', next0=self.dispatch_order(order + 1, 0),
write1='0', next1=self.dispatch_order(order + 1, 1),
move=-1, name='dispatch.{}.carry'.format(order))
else:
return State(next=self.dispatch_order(order + 1, 0), move=-1,
name='dispatch.{}'.format(order))
@memo
def noop(self, order):
"""A subprogram of given size which does nothing.
Used automatically to maintain alignment."""
reverse = State(move=-1, next=self.dispatch_order(order, 1), name='noop.{}'.format(order))
return Subroutine(reverse, order, reverse.name)
@memo
def halt(self):
"""A subprogram which halts the Turing machine when your work is done."""
return Subroutine(Halt(), 0, 'halt')
@memo
def jump(self, order, rel_pc, sub_name):
"""A subprogram which replaces a suffix of the PC, for relative jumps.
Used automatically by the Goto operator."""
assert rel_pc < (1 << (order + 1))
steps = [State() for i in range(order + 2)]
steps[order+1] = self.dispatch_order(order, rel_pc >> order)
steps[0].be(move=-1, next=steps[1], \
name='{}.jump({},{},{})'.format(sub_name, rel_pc, order, 0))
for i in range(order):
bit = str((rel_pc >> i) & 1)
steps[i+1].be(move=-1, next=steps[i+2], write=bit, \
name='{}.jump({},{},{})'.format(sub_name, rel_pc, order, i+1))
return Subroutine(steps[0], 0, '{}.jump({},{})'.format(sub_name, rel_pc, order))
@memo
def rjump(self, rel_pc):
"""A subprogram which adds a constant to the PC, for relative jumps."""
steps = [(State(), State()) for i in range(self.pc_bits + 1)]
steps.append(2 * (self.dispatch_order(self.pc_bits, 0),))
steps[0][0].be(move=-1, next=steps[1][0], name='rjump({})({})'.format(rel_pc, 0))
for i in range(self.pc_bits):
bit = (rel_pc >> i) & 1
steps[i+1][0].be(move=-1, next0=steps[i+2][0], write0=str(bit), \
next1=steps[i+2][bit], write1=str(1-bit), \
name='rjump({})({})'.format(rel_pc, i+1))
steps[i+1][1].be(move=-1, next0=steps[i+2][bit], write0=str(1-bit), \
next1=steps[i+2][1], write1=str(bit), \
name='rjump({})({}+)'.format(rel_pc, i+1))
return Subroutine(steps[0][0], 0, 'rjump({})'.format(rel_pc))
# TODO: subprogram compilation needs to be substantially lazier in order to do
# effective inlining and register allocation
def makesub(self, *parts, name):
"""Assigns PC values within a subprogram and creates the dispatcher."""
# first find out where everything is and how big I am
label_offsets = {}
label_map = {}
goto_map = {}
real_parts = []
offset = 0
if not self.control_args.no_cfg_optimize:
parts = cfg_optimizer(parts)
if name == 'main()':
# inject code to initialize registers (a bit of a hack)
regcount = self._nextreg
while regcount & (regcount - 1):
regcount += 1
parts = regcount * (self.reg_init(), ) + parts
for part in parts:
if isinstance(part, Label):
# labels take up no space
label_offsets[part.name] = offset
label_map.setdefault(offset, []).append(part.name)
continue # not a real_part
if isinstance(part, Goto):
goto_map[offset] = part.name
# parts must be aligned
while offset % part.size:
noop_order = (offset & -offset).bit_length() - 1
offset += 1 << noop_order
real_parts.append(self.noop(noop_order))
real_parts.append(part)
offset += part.size
assert offset > 0
order = 0
while offset > (1 << order):
order += 1
while offset < (1 << order):
noop_order = (offset & -offset).bit_length() - 1
offset += 1 << noop_order
real_parts.append(self.noop(noop_order))
offset = 0
child_map = {}
jumps_required = set()
for part in real_parts:
if isinstance(part, Goto):
jump_order = 0
target = label_offsets[part.name]
while True:
base = (offset >> jump_order) << jump_order
rel = target - base
if rel >= 0 and rel < (1 << (jump_order + 1)):
jumps_required.add((jump_order, rel))
break
jump_order += 1
offset += part.size
offset = 0
for part in real_parts:
if isinstance(part, Goto):
assert part.name in label_offsets
target = label_offsets[part.name]
if self.control_args.relative_jumps:
part = self.rjump(target - offset)
else:
part = None
for jump_order in range(order + 1):
base = (offset >> jump_order) << jump_order
rel = target - base
if (jump_order, rel) in jumps_required:
part = self.jump(jump_order, rel, name)
# don't break, we want to take the largest reqd jump
# except for very short jumps, those have low enough
# entropy to be worthwhile
if jump_order < 3:
break
assert part
offset_bits = make_bits(offset >> part.order, order - part.order)
goto_line = goto_map.get(offset)
label_line = label_map.get(offset)
child_map[offset_bits] = InsnInfo(part, label_line, goto_line)
offset += 1 << part.order
return Subroutine(make_dispatcher(child_map, name, order), order, name, child_map=child_map)
# Utilities...
@memo
def register(self, name):
"""Assigns a name to a register, and creates the primitive inc/dec routines."""
index = self._nextreg
self._nextreg += 1
pad = 0
inc = Subroutine(self.reg_incr(index), 0, 'reg_incr('+name+')')
dec = Subroutine(self.reg_decr(index), 0, 'reg_decr('+name+')', is_decrement=True)
return Register(name, index, inc, dec)
def regfile(self, *regs):
"""Assigns names to one or more registers, and creates the primitive inc/dec routines."""
return [self.register(name) for name in regs]
@memo
def transfer(self, source, *to):
"""Subprogram which moves values between registers.
The source register will be cleared, and its value will be added to each to register."""
name = 'transfer(' + ','.join([source.name] + [x.name for x in sorted(to)]) + ')'
return self.makesub(
Label('again'),
source.dec,
Goto('zero'),
*([tox.inc for tox in sorted(to)] + [
Goto('again'),
Label('zero'),
]),
name=name
)
class Machine:
"""Manipulates and debugs the generated Turing machine for a MachineBuilder."""
def __init__(self, builder):
self.builder = builder
self.main = builder.main()
if self.main.order != builder.pc_bits:
print('pc_bits does not match calculated main order:', self.main.order, builder.pc_bits)
assert False
self.builder.dispatchroot().clone(self.main.entry)
self.entry = self.builder.dispatch_order(self.builder.pc_bits, 0)
self.state = self.entry
self.left_tape = []
self.current_tape = '0'
self.right_tape = []
self.longest_label = max(len(state.name) for state in self.reachable())
def harness(self, args):
"""Processes command line arguments and runs the test harness for a machine."""
if not args.dont_compress:
self.compress()
if args.print_subs:
self.print_subs()
if args.print_tm:
self.print_machine()
if args.run_tm:
while isinstance(self.state, State):
self.tm_step()
def compress(self):
"""Combine pairs of equivalent states in the turing machine."""
while True:
did_work = False
unique_map = {}
replacement_map = {}
for state in self.reachable():
tup = (state.next0, state.next1, state.write0, state.write1,
state.move0, state.move1)
if tup in unique_map:
replacement_map[state] = unique_map[tup]
else:
unique_map[tup] = state
for state in self.reachable():
if state.next0 in replacement_map:
did_work = True
state.next0 = replacement_map[state.next0]
if state.next1 in replacement_map:
did_work = True
state.next1 = replacement_map[state.next1]
if self.entry in replacement_map:
did_work = True
self.entry = replacement_map[self.entry]
if not did_work:
break
def print_subs(self):
"""Dump the subroutines used by this machine."""
stack = [self.main]
seen = set()
while stack:
subp = stack.pop()
if subp in seen:
continue
seen.add(subp)
print()
print('NAME:', subp.name, 'ORDER:', subp.order)
for offset, entry in sorted(subp.child_map.items()):
while len(offset) < subp.order:
offset = offset + ' '
display = ' {offset} -> {child}'.format(offset=offset, child=entry.sub.name)
if entry.goto:
display += ' -> ' + entry.goto
for label in entry.labels or ():
display += ' #' + label
print(display)
stack.append(entry.sub)
def reachable(self):
"""Enumerates reachable states for the generated Turing machine."""
queue = [self.entry]
seen = []
seen_set = set()
while queue:
state = queue.pop()
if isinstance(state, Halt) or state in seen_set:
continue
if not state.set:
continue
seen_set.add(state)
seen.append(state)
queue.append(state.next1)
queue.append(state.next0)
return seen
def print_machine(self):
"""Prints the state-transition table for the generated Turing machine."""
reachable = sorted(self.reachable(), key=lambda x: x.name)
count = {}
for state in reachable:
count[state.name] = count.get(state.name, 0) + 1
index = {}
renumber = {}
for state in reachable:
if count[state.name] == 1:
continue
index[state.name] = index.get(state.name, 0) + 1
renumber[state] = state.name + '(#' + str(index[state.name]) + ')'
dirmap = {1: 'R', -1: 'L'}
for state in sorted(self.reachable(), key=lambda x: x.name):
print(renumber.get(state, state.name), '=',
state.write0, dirmap[state.move0], renumber.get(state.next0, state.next0.name),
state.write1, dirmap[state.move1], renumber.get(state.next1, state.next1.name))
def tm_print(self):
"""Prints the current state of the Turing machine execution."""
tape = ''.join(' ' + x for x in self.left_tape) + \
'[' + self.current_tape + ']' + ' '.join(reversed(self.right_tape))
print('{state:{len}} {tape}'.format(len=self.longest_label, \
state=self.state.name, tape=tape))
def tm_step(self):
"""Executes the Turing machine for a single step."""
self.tm_print()
state = self.state
if self.current_tape == '0':
write, move, nextstate = state.write0, state.move0, state.next0
else:
write, move, nextstate = state.write1, state.move1, state.next1
self.current_tape = write
self.state = nextstate
if move == 1:
self.left_tape.append(self.current_tape)
self.current_tape = self.right_tape.pop() if self.right_tape else '0'
elif move == -1:
self.right_tape.append(self.current_tape)
self.current_tape = self.left_tape.pop() if self.left_tape else '0'
else:
assert False
| StarcoderdataPython |
214485 | <reponame>MaciejNowicki/celery_dill_serializer
import os
from setuptools import setup
VERSION = '0.1.3'
def readme(*paths):
with open(os.path.join(*paths), 'r') as f:
return f.read()
def requirements(*paths):
with open(os.path.join(*paths), 'r') as f:
return list(line.strip() for line in f.readlines() if line.strip() != '')
setup(
name='celery_dill_serializer',
packages=['celery_dill_serializer'],
version=VERSION,
description='Dill serializer for Celery 4.0+',
long_description=readme('README.rst'),
author='<NAME>',
author_email='<EMAIL>',
url='https://github.com/mathiasose/celery_dill_serializer',
download_url='https://github.com/mathiasose/celery_dill_serializer/releases/tag/{}'.format(VERSION),
install_requires=requirements('requirements.txt'),
keywords=['celery', 'dill', 'serialization'],
classifiers=[
'Development Status :: 4 - Beta',
'License :: OSI Approved :: MIT License',
],
)
| StarcoderdataPython |
1745766 | <filename>my_project/my_project/common/migrations/0001_initial.py<gh_stars>0
# Generated by Django 4.0.3 on 2022-03-28 09:47
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
('library', '0005_delete_notification'),
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Notification',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('type', models.CharField(choices=[('Change owner', 'Change Owner'), ('Liked', 'Liked'), ('Reviewed', 'Reviewed'), ('Wanted', 'Wanted'), ('Offered', 'Offered'), ('Deal', 'Deal')], max_length=12)),
('is_read', models.BooleanField(default=False)),
('received_date', models.DateTimeField(auto_now_add=True)),
('book', models.ForeignKey(on_delete=django.db.models.deletion.DO_NOTHING, to='library.book')),
('recipient', models.ForeignKey(on_delete=django.db.models.deletion.DO_NOTHING, related_name='receiver_messages', to=settings.AUTH_USER_MODEL)),
('sender', models.ForeignKey(on_delete=django.db.models.deletion.DO_NOTHING, related_name='sender_messages', to=settings.AUTH_USER_MODEL)),
],
options={
'ordering': ['received_date'],
},
),
]
| StarcoderdataPython |
9646228 | """Did Model."""
from masoniteorm.models import Model
class Did(Model):
__table__="dids" | StarcoderdataPython |
3266462 | <gh_stars>1-10
class <caret>B(C):
def __init__(self):
C.__init__(self) | StarcoderdataPython |
6611792 | <gh_stars>1-10
# =================================================#
# gsn_vector_1.ncl
# =================================================#
#
# This file is loaded by default in NCL V6.2.0 and newer
# load "$NCARG_ROOT/lib/ncarg/nclscripts/csm/gsn_code.ncl"
# =================================================#
from pathlib import Path
import numpy as np
import xarray as xr
import matplotlib.pyplot as plt
import cartopy.crs as ccrs
data_location = Path("/Users/brianpm/Documents/www.ncl.ucar.edu/Applications/Data/cdf/")
data_file = data_location / "uvt.nc"
f = xr.open_dataset(data_file)
u = f["U"][0,0,:,:] # read in example data [2D only here]
v = f["V"][0,0,:,:]
# =================================================#
# create plots
# =================================================#
wks, ax = plt.subplots(figsize=(10,10), constrained_layout=True)
outfile_extension = "png"
outfile_name = "gsn_vector"
outfile_dir = Path("/Users/brianpm/Desktop/")
tiMainString = "Basic Vector Plot"
vcRefMagnitudeF = 5.0 # add a reference vector
vcRefLengthF = 0.045 # what the ref length is
vcGlyphStyle = "CurlyVector" # turn on curly vectors
plot = ax.quiver(u[::2, ::2], v[::2, ::2])
# Getting a "reference vector" is not as nice as it should be.
# Positioning it outside the axes isn't working correctly for me.
# Getting a box around it is also not working.
# Both of these could be fixed with a custom function that would add a box and put the quiverkey inside it;
# this is exactly the kind of functionality that "we" could contribute to Matplotlib.
qk = ax.quiverkey(plot, 0.8, 0.03, 20, '20 m/s', coordinates='axes', labelpos='S', color='red')
wks.savefig(outfile_dir / ".".join([outfile_name , outfile_extension]) , bbox_inches='tight', bbox_extra_artists=(qk,))
print(f'DONE WITH FIRST EXAMPLE: {outfile_dir / ".".join([outfile_name , outfile_extension])}')
# NOTE: As far as I can tell, quiver can only use straight arrows, not "CurlyVector" style.
# But we can try to use streamplot:
# Need the x & y grid
# to make it look better, increase density, scale lines/arrows
shp = u.shape
yvec = np.arange(0, shp[0]) # lat
xvec = np.arange(0, shp[1]) # lon
wks, ax = plt.subplots()
outfile_extension = "png"
outfile_name = "gsn_vectorStreamPlot"
outfile_dir = Path("/Users/brianpm/Desktop/")
ax.streamplot(xvec, yvec, u, v, linewidth=0.5, arrowsize=0.5, density=4)
wks.savefig(outfile_dir / ".".join([outfile_name , outfile_extension]))
# That looks fairly similar to the NCL example
# compare with using a projection to geographic space
lat = f['lat']
lon = f['lon']
lons, lats = np.meshgrid(lon, lat)
wks, ax = plt.subplots(subplot_kw={"projection":ccrs.PlateCarree()})
outfile_extension = "png"
outfile_name = "gsn_vectorStreamPlotCartopy"
outfile_dir = Path("/Users/brianpm/Desktop/")
ax.streamplot(lons, lats, u.values, v.values, density=3, linewidth=0.5, arrowsize=0.5)
ax.coastlines()
wks.savefig(outfile_dir / ".".join([outfile_name , outfile_extension]) )
# When we switch from quiver to streamplot, the idea of a "reference vector"
# does not hold, so we could just color by the magnitude.
wks, ax = plt.subplots(subplot_kw={"projection":ccrs.PlateCarree()})
outfile_extension = "png"
outfile_name = "gsn_vectorStreamPlotCartopyMagnitude"
outfile_dir = Path("/Users/brianpm/Desktop/")
magnitude = (u ** 2 + v ** 2) ** 0.5
ax.streamplot(lons, lats, u.values, v.values, density=3, linewidth=0.5, arrowsize=0.5, color=magnitude.values)
ax.coastlines()
wks.savefig(outfile_dir / ".".join([outfile_name , outfile_extension]) )
| StarcoderdataPython |
1677465 | <filename>py2d.py
from math import sqrt
from PIL.Image import Image
import math
class Point2d:
def __init__(self, x_init, y_init):
self.x = x_init
self.y = y_init
def shift(self, x, y):
self.x += x
self.y += y
def distanceto(self, b, axis=None):
if (axis == 'x'):
return b.x - self.x
if (axis == 'y'):
return b.y - self.y
if (axis == 'all'):
return b.x - self.x, b.y - self.y
return sqrt((b.x-self.x)**2+(b.y-self.y)**2)
def __repr__(self):
return "".join(["Point(", str(self.x), ",", str(self.y), ")"])
class Rect:
def __init__(self, left, top, width, height):
self.left = left
self.top = top
self.width = width
self.height = height
def getbox(self):
return (self.left, self.top, self.left +
self.width, self.top + self.height)
def __repr__(self):
return 'Rect(left:{}, top:{}, width:{}, height:{})'\
.format(self.left, self.top, self.width, self.height)
class Pixel:
def __init__(self, image: Image, coord=(0, 0)):
self.x, self.y = coord
self._image = image
def isvalid(self):
overflow = (self.x >= self._image.width or
self.y >= self._image.height)
underflow = (self.x < 0 or self.y < 0)
return not (overflow or underflow)
def getcolor(self):
if (not self.isvalid()):
return (0, 0, 0)
return self._image.getpixel((self.x, self.y))
def getneighbors(self):
pixels = []
for k in range(8):
xy = (self.x + int(round(math.sin(k * math.pi/4))),
self.y - int(round(math.cos(k * math.pi/4))))
pix = Pixel(self._image, xy)
pixels.append(pix)
return pixels
def findpixel(self, color: tuple, direction: float, distmax=50):
x, y = self.x, self.y
while(distmax > 0):
xy = (x + int(round(math.cos(direction * math.pi/4))),
y - int(round(math.sin(direction * math.pi/4))))
nextpix = Pixel(self._image, xy)
if (not nextpix.isvalid()):
return None
if (nextpix.getcolor() == color):
return nextpix
distmax -= 1
x, y = xy
return None
# if (distmax == 0):
# return None
# xy = (self.x + int(round(math.cos(direction * math.pi/4))),
# self.y - int(round(math.sin(direction * math.pi/4))))
# nextpix = Pixel(self._image, xy)
# if (not nextpix.isvalid()):
# return None
# if (nextpix.getcolor() == color):
# return nextpix
# else:
# distmax -= 1
# return nextpix.findpixel(color, direction, distmax)
def __repr__(self):
return 'Pixel({}, {}) | Color{}'\
.format(self.x, self.y, self.getcolor())
| StarcoderdataPython |
3201149 | # -*- coding: utf-8 -*-
"""OpenCTI CrowdStrike connector module."""
from crowdstrike.core import CrowdStrike
__all__ = ["CrowdStrike"]
| StarcoderdataPython |
6596142 | <reponame>gianimpronta/SentiHIV
from keras import models
from keras.layers import Dropout, Dense
class ModelBuilder:
def __init__(self, layers, units, dropout_rate, input_shape):
self.layers = layers
self.units = units
self.dropout_rate = dropout_rate
self.input_shape = input_shape
def build(self):
# Sigmoid activation, because it's a binary output, 0 or 1
op_units, op_activation = 1, 'sigmoid'
model = models.Sequential()
model.add(Dropout(rate=self.dropout_rate, input_shape=self.input_shape))
for _ in range(self.layers - 1):
model.add(Dense(units=self.units, activation='relu'))
model.add(Dropout(rate=self.dropout_rate))
model.add(Dense(units=op_units, activation=op_activation))
return model
| StarcoderdataPython |
1982417 | <reponame>pplotn/SeismicPro
"""File contains metircs for seismic processing."""
# pylint: disable=no-name-in-module, import-error
import inspect
import numpy as np
from numba import njit, prange
from ..batchflow.models.metrics import Metrics
from .plot_utils import plot_metrics_map
class MetricsMap(Metrics):
""" Class for metrics aggregation and plotting. This class aims to accumulate
coordinates and metrics values for current coordinates. Therefore, all calculations
must be performed outside of the class.
Parameters
----------
coords : array-like
Array of arrays or 2d array with coordinates for X and Y axes.
kwargs : dict
All of the given kwargs are considered as metrics. The kwargs dict has the following structure:
``{metircs_name_1 : metrics_value_1,
...
metrics_name_N : metrics_value_N}``
Here, the ``metric_name`` is any string while ``metrics_value`` should be represented by
one of the following formats: a one-dimensional array, or an array of one-dimensional arrays.
* If one-dimensional array, each value from the array will correspond to a pair of coordinates
with the same index.
* If an array of arrays, each array of metrics values will match to a pair of coordinates with
the same index.
Attributes
----------
attribute_names : array-like
Names of given metrics and coords.
coords : array-like
Array with shape (N, 2) that contains the X and Y coordinates.
Where N is a number of given coordinates.
DEFAULT_METRICS : dict
Dict with functions for aggregation within a bin.
Available functions:
- std
- min
- max
- mean
- quantile
- absquantile
kwargs keys : array-like
All keys from kwargs become instance attributes that contain the corresponding metric values.
Raises
------
ValueError
If kwargs are empty.
If ndim for given coordinate is not equal to 2.
If shape of first dim is not equal to 2.
If the length of the metric array does not match the length of the array with coordinates.
TypeError
If given coordinates are not array-like.
If given metrics is not array-like.
Note
----
1. The length of the metric array and the coordinate array must match.
"""
DEFAULT_METRICS = {
'std' : njit(lambda array: np.nanstd(array)),
'max' : njit(lambda array: np.nanmax(array)),
'min' : njit(lambda array: np.nanmin(array)),
'mean' : njit(lambda array: np.nanmin(array)),
'median' : njit(lambda array: np.nanmedian(array)),
'quantile' : njit(lambda array, q: np.nanquantile(array, q=q)),
'absquantile' : njit(lambda array, q: np.nanquantile(np.abs(array - np.nanmean(array)), q))
}
def __init__(self, coords, **kwargs):
super().__init__()
if not kwargs:
raise ValueError("At least one metric should be passed.")
if not isinstance(coords, (list, tuple, np.ndarray)):
raise TypeError("Wrong type of coords have been given. "\
"Should be array-like but {} received.".format(type(coords)))
coords = np.asarray(coords)
# If received array with dtype object, cast it to dtype int or float. As far as all coordinates must have
# length 2, resulted array will have 2 dims.
coords = np.array(coords.tolist()) if coords.ndim == 1 else coords
if coords.ndim != 2:
raise ValueError("Received coordinates have wrong number of dims.")
if coords.shape[1] != 2:
raise ValueError("An array with coordinates must have shape (N, 2), where N is a number of elements"\
" but given array has shape {}".format(coords.shape))
self.coords = coords
# Create attributes with metrics.
for name, metrics in kwargs.items():
if not isinstance(metrics, (list, tuple, np.ndarray)):
raise TypeError("Received wrong type of '{}' metrics. "\
"Must be array-like but received {}".format(name, type(metrics)))
metrics = np.asarray(metrics)
# Check whether metrics contains numeric or iterable. If numeric, reshape it to 2-d array.
if not isinstance(metrics[0], (list, tuple, np.ndarray)):
metrics = metrics.reshape(-1, 1)
setattr(self, name, metrics)
if len(self.coords) != len(metrics):
raise ValueError("Length of coordinates array doesn't match with '{0}' attribute. "\
"Given length of coordinates is {1} while "\
"length of '{0}' is {2}.". format(name, len(self.coords), len(metrics)))
self.attribute_names = ('coords', ) + tuple(kwargs.keys())
# The dictionary contains functions for aggregating the resulting map.
self._agg_fn_dict = {'mean': np.nanmean,
'max': np.nanmax,
'min': np.nanmin}
def append(self, metrics):
""" Append coordinates and metrics to global container."""
# Append all attributes with given metrics values.
for name in self.attribute_names:
updated_metrics = np.concatenate([getattr(self, name), getattr(metrics, name)])
setattr(self, name, updated_metrics)
def construct_map(self, metrics_name, bin_size=500, agg_func='mean',
agg_func_kwargs=None, plot=True, **plot_kwargs):
""" All obtained coordinates are split into bins of the specified `bin_size`. Each value in the
resulted map represents the aggregated value of metrics for coordinates that belong to the current
bin. If there are no values included in the bin, its values is `np.nan`. Otherwise, the value of this
bin is calculated by calling `agg_func`.
Parameters
----------
metrics_name : str
The name of metric to draw.
bin_size : int, float or array-like with length 2, optional, default 500
The size of the bin by X and Y axes. Based on the received coordinates, the entire map
will be divided into bins with the size `bin_size`.
If int or float, the bin size will be the same for X and Y dimensions.
agg_func : str or callable, optional, default 'mean'
Function to aggregate metrics values in one bin.
If str, the function from `DEFAULT_METRICS` attribute will be used for aggregation.
If callable, it will be used for aggregation within a bin. The function used must
be wrapped in the `njit` decorator. The first argument is a 1-d numpy.ndarray,
containing metric values in a bin, the other arguments can take any numeric values
and must be passed using the `agg_func_kwargs`.
agg_func_kwargs : dict, optional
Kwargs that will be passed to `agg_func` during evaluating.
plot : bool, optional, default True
If True, metrics will be plotted.
Otherwise, the map will be returned without drawing.
**plot_kwargs : dict
Kwargs that are passed directly to plotter, see :func:`.plot_utils.plot_metrics_map`.
Returns
-------
metrics_map : two-dimensional np.ndarray
A matrix, where each value is an aggregated metrics value.
Raises
------
TypeError
If agg_func is not str or callable.
ValueError
If agg_func is str and is not one of the keys of DEFAULT_METRICS.
If agg_func is not wrapped with @njit decorator.
"""
metrics = getattr(self, metrics_name)
coords_repeats = [len(metrics_array) for metrics_array in metrics]
coords = np.repeat(self.coords, coords_repeats, axis=0)
metrics = np.concatenate(metrics)
coords_x = np.array(coords[:, 0], dtype=np.int32)
coords_y = np.array(coords[:, 1], dtype=np.int32)
metrics = np.array(metrics, dtype=np.float32)
if isinstance(bin_size, (int, float, np.number)):
bin_size = (bin_size, bin_size)
if isinstance(agg_func, str):
agg_func = self.DEFAULT_METRICS.get(agg_func, agg_func)
if not callable(agg_func):
raise ValueError("'{}' is not valid value for aggregation." \
" Supported values are: '{}'".format(agg_func,
"', '".join(self.DEFAULT_METRICS.keys())))
elif not callable(agg_func):
raise TypeError("'agg_func' should be either str or callable, not {}".format(type(agg_func)))
# Check whether the function is njitted.
if not hasattr(agg_func, 'py_func'):
raise ValueError("It seems that the aggregation function is not njitted. "\
"Please wrap the function with @njit decorator.")
agg_func_kwargs = dict() if agg_func_kwargs is None else agg_func_kwargs
args = self._create_args(agg_func.py_func, **agg_func_kwargs)
metrics_map = self.construct_metrics_map(coords_x=coords_x, coords_y=coords_y,
metrics=metrics, bin_size=bin_size,
agg_func=agg_func, args=args)
if plot:
ticks_range_x = [coords_x.min(), coords_x.max()]
ticks_range_y = [coords_y.min(), coords_y.max()]
plot_metrics_map(metrics_map=metrics_map, ticks_range_x=ticks_range_x,
ticks_range_y=ticks_range_y, **plot_kwargs)
return metrics_map
def _create_args(self, call, **kwargs):
""" Constructing tuple with positional arguments to callable `call` based on
`kwargs` and `call`'s defaults. The function omits the first argument even if
it was set by `kwargs` because the first argument will be passed during the
metrics map's calculation.
Parameters
----------
call : callable
Function to create positional arguments for.
kwargs : dict
Keyword arguments to function `call`.
Returns
-------
args : tuple
Positional arguments to `call` without first argument.
"""
params = inspect.signature(call).parameters
args = [kwargs.get(name, param.default) for name, param in params.items()][1:]
params_names = list(params.keys())[1:]
empty_params = [name for name, arg in zip(params_names, args) if arg == inspect.Parameter.empty]
if empty_params:
raise ValueError("Missed value to '{}' argument(s).".format("', '".join(empty_params)))
return tuple(args)
@staticmethod
@njit(parallel=True)
def construct_metrics_map(coords_x, coords_y, metrics, bin_size, agg_func, args):
""" Calculate of metrics map.
Parameters
----------
coords_x : array-like
Coordinates for X axis.
coords_x : array-like
Coordinates for Y axis.
metrics : array-like
1d array with metrics values.
bin_size : tuple with length 2
The size of bin by X and Y axes.
agg_func : numba callable in nopython mode
Function to aggregate metrics values in one bin.
args : tuple
Positional arguments to the specified `agg_func`.
Returns
-------
metrics_map : two-dimensional array
The resulting map with aggregated metric values by bins.
"""
bin_size_x, bin_size_y = bin_size
range_x = np.arange(coords_x.min(), coords_x.max() + 1, bin_size_x)
range_y = np.arange(coords_y.min(), coords_y.max() + 1, bin_size_y)
metrics_map = np.full((len(range_y), len(range_x)), np.nan)
for i in prange(len(range_x)): #pylint: disable=not-an-iterable
for j in prange(len(range_y)): #pylint: disable=not-an-iterable
mask = ((coords_x - range_x[i] >= 0) & (coords_x - range_x[i] < bin_size_x) &
(coords_y - range_y[j] >= 0) & (coords_y - range_y[j] < bin_size_y))
if np.any(mask):
metrics_map[j, i] = agg_func(metrics[mask], *args)
return metrics_map
| StarcoderdataPython |
9717041 | <reponame>Matheus1714/Python<filename>CCI/Find_Roots/Newton_Raphson.py<gh_stars>1-10
import math
def function(x):
return x**3-x-1
def differential(x):
return 3*x**2-1
def main():
x0, e = map(float, input().split())
i=0
while True:
i+=1
x1 = x0 - function(x0)/differential(x0)
if abs(function(x1)) < e or abs(x1-x0)<e:
break
else:
x0=x1
print(f"Root = {x1}")
print(f"Itarations = {i}")
if __name__=="__main__":
main() | StarcoderdataPython |
3567012 |
import pandas as pd
import numpy as np
txt = ['this is fake text',
"i don't care what this text says",
"please don't abandon me",
"it needs preprocessing but i really don't want to do that right now"]
txt = pd.DataFrame({'text':txt})
txt['txt'] = txt['text'].apply(lambda x: "".join([i for i in x if (i.isalpha())|(i==" ")]))
def get_emotions():
emotions=pd.read_csv("lexicons/nrc_emotions.csv")
emotions.drop(['Unnamed: 0'],axis=1, inplace=True)
emotions = emotions[np.sum(emotions,axis=1)>0].copy()
return emotions
def add_emotions(df):
emotions = get_emotions()
emotions.set_index('term',inplace=True)
dimensions = emotions.columns.values
df1=df.copy()
for i in dimensions:
temp = list(emotions[emotions[i]==1].index)
df1['emotions_'+i]=df1.txt.apply(lambda x: len([i for i in x.split() if i in temp]))
for i in dimensions:
df1['emotions_'+i+'_norm'] = df1['emotions_'+i]/np.sum(df1[['emotions_'+j for j in dimensions]],axis=1)
return df1
pd.set_option("display.max_columns",500)
add_emotions(txt) | StarcoderdataPython |
6642675 | from tkinter import *
from tkinter import ttk
import tkinter.filedialog as filedialog
from tkinter import messagebox
from PIL import Image,ImageDraw,ImageFont
from PIL import ImageTk,ImageGrab
import cv2
import numpy as np
import os
from predictionModel import predictionCNN
root=Tk()
root.title('Rootster v.0 ')
root.geometry("")
root.option_add('*tearoff',False)
emptymenu=Menu(root)
root.config(menu=emptymenu)
screenheight=root.winfo_screenheight()
screenwidth=root.winfo_screenwidth()
print('screenheight',screenheight,'screenwidth',screenwidth)
screenstd=min(screenheight-100,screenwidth-100,850)
# -----variables------
viewopt_var=StringVar()
scaleval=DoubleVar()
# RGBbands=None
RGBimg=None
gridimg=None
gridnum=0
zoom=None
hasGrid=False
hasMap=False
predictlabels=None
confidence=None
hasPred=False
rownum=0
colnum=0
csvname=''
e1=None
# ------functions-----
def init_canvas(path):
import zoom_example
global panelA,zoom
rownum=int(rowentry.get())
colnum=int(colentry.get())
zoom=zoom_example.Zoom_Advanced(imageframe,panelA,path,rownum,colnum,1440,900)
def Open_File():
global RGBimg,filename
head_tail = os.path.split(filename)
originfile, extension = os.path.splitext(head_tail[1])
print(originfile,extension)
if 'HEIC' in extension:
import pyheif
heif_file=pyheif.read(filename)
RGBimg=Image.frombytes(
heif_file.mode,
heif_file.size,
heif_file.data,
"raw",
heif_file.mode,
heif_file.stride,
)
RGBimg.save(head_tail[0]+'/'+originfile+'.jpg',"JPEG")
filename=head_tail[0]+'/'+originfile+'.jpg'
return True
'''batch process HEIC pictures'''
# files=os.listdir(head_tail[0])
# import pyheif
# for tempname in files:
# if 'HEIC' in tempname:
# originfile, extension = os.path.splitext(tempname)
# heif_file = pyheif.read(head_tail[0]+'/'+tempname)
# RGBimg=Image.frombytes(
# heif_file.mode,
# heif_file.size,
# heif_file.data,
# "raw",
# heif_file.mode,
# heif_file.stride,
# )
# RGBimg.save(head_tail[0]+'/'+originfile+'.jpg',"JPEG")
# filename=head_tail[0]+'/'+originfile+'.jpg'
# print(filename)
# return True
try:
Filersc=cv2.imread(filename,flags=cv2.IMREAD_ANYCOLOR)
h,w,c=np.shape(Filersc)
print('image size:',h,w)
# RGBbands=cv2.cvtColor(Filersc,cv2.COLOR_BGR2RGB)
RGBimg=Image.open(filename)
# head_tail = os.path.split(filename)
# originfile, extension = os.path.splitext(head_tail[1])
# print('head_tail',head_tail,'originfile',originfile,'extension',extension)
except:
return False
return True
def zoomimage(opt):
global zoom
print(opt)
try:
zoom.wheel(opt)
except:
return
def Open_Multifile():
global gridbutton,rowentry,colentry,exportbutton,filename,zoombar,mapfilebutton,hasMap,hasGrid,hasPred
global reversebutton,predictbutton,slider,predictlabels,confidence
filename=filedialog.askopenfilename()
root.update()
if Open_File()!=False:
gridbutton.configure(state=NORMAL)
rowentry.configure(state=NORMAL)
colentry.configure(state=NORMAL)
exportbutton.configure(state=NORMAL)
zoomout.configure(state=NORMAL)
zoomin.configure(state=NORMAL)
mapfilebutton.configure(state=NORMAL)
predictbutton.configure(state=NORMAL)
reversebutton.configure(state=DISABLED)
predictbutton.configure(state=DISABLED)
predictlabels=None
confidence=None
init_canvas(filename)
slider.unbind('<ButtonRelease-1>')
hasMap=False
hasGrid=False
hasPred=False
def Open_Map():
mapfile=filedialog.askopenfilename()
if '.csv' not in mapfile:
messagebox.showerror('Error',message='Map file should be a csv file.')
return
else:
import csv
rows=[]
transrows=[]
if os.name=='nt':
mapfile=r'%s' % mapfile
with open(mapfile,'r',encoding='utf-8-sig') as f:
csvreader=csv.reader(f)
for row in csvreader:
if len(row)!=0:
rows.append(row)
rows.pop(0)
totalgrid=len(rows)
for i in range(totalgrid):
temprow=[int(rows[i][e]) for e in range(4)]
transrows.append(temprow)
# print(temprow)
arrayrow=np.array(transrows)
print(arrayrow.shape)
if arrayrow.shape[1]!=4:
messagebox.showerror('Error', message='Incorrect contents to open. \n Contents shape is:'
+str(arrayrow.shape[0])+'x'+str(arrayrow.shape[1]))
return
rownum=max(arrayrow[:,1])+1
colnum=max(arrayrow[:,2])+1
infected=np.where(arrayrow[:,3]==1)
infected=list(infected)[0]
infected=[ele for ele in infected]
print(totalgrid,rownum,colnum,infected)
global hasGrid,rowentry,colentry,hasMap
global reversebutton,predictbutton,gridbutton
hasGrid=False
hasMap=True
rowentry.configure(state=NORMAL)
rowentry.delete(0,END)
rowentry.insert(END,rownum)
rowentry.configure(state=DISABLED)
colentry.configure(state=NORMAL)
colentry.delete(0,END)
colentry.insert(END,colnum)
colentry.configure(state=DISABLED)
reversebutton.configure(state=DISABLED)
predictbutton.configure(state=NORMAL)
gridbutton.configure(state=DISABLED)
zoom.resetlabels()
setGrid(resetimage=True)
zoom.labelmulti(infected)
def setGrid(resetimage=False):
global gridimg,hasGrid,reversebutton,gridbutton
global rownum, colnum, confidence,slider
print('hasGrid',hasGrid)
if resetimage==True:
rownum=int(rowentry.get())
colnum=int(colentry.get())
print(resetimage,rownum,colnum)
confidence = None
slider.state(["disabled"])
slider.unbind('<Leave>')
rgbwidth, rgbheight = RGBimg.size
row_stepsize = int(rgbheight / rownum)
col_stepsize = int(rgbwidth / colnum)
gridimg = RGBimg.copy()
draw = ImageDraw.Draw(gridimg)
row_start = 0
row_end = rgbheight
col_start = 0
col_end = rgbwidth
for col in range(0, col_end, col_stepsize):
line = ((col, row_start), (col, row_end))
draw.line(line, fill='white', width=5)
for row in range(0, row_end, row_stepsize):
line = ((col_start, row), (col_end, row))
draw.line(line, fill='white', width=5)
del draw
# gridimg.show()
zoom.changeimage(gridimg, rownum, colnum, False)
hasGrid = True
return
if hasGrid==False:
try:
temprownum=int(rowentry.get())
tempcolnum=int(colentry.get())
except:
return
if rownum != 0 and (rownum != temprownum or colnum != tempcolnum):
res = messagebox.askquestion('Warning', 'Changes happened to rows or cols. Do you want to continue?')
if res == 'no':
return
if res=='yes':
zoom.resetlabels()
rownum=temprownum
colnum=tempcolnum
confidence=None
slider.state(["disabled"])
slider.unbind('<Leave>')
rgbwidth,rgbheight=RGBimg.size
row_stepsize=int(rgbheight/rownum)
col_stepsize=int(rgbwidth/colnum)
gridimg=RGBimg.copy()
draw=ImageDraw.Draw(gridimg)
row_start=0
row_end=rgbheight
col_start=0
col_end=rgbwidth
for col in range(0,col_end,col_stepsize):
line=((col,row_start),(col,row_end))
draw.line(line,fill='white',width=5)
for row in range(0,row_end,row_stepsize):
line=((col_start,row),(col_end,row))
draw.line(line,fill='white',width=5)
del draw
# gridimg.show()
zoom.changeimage(gridimg,rownum,colnum,hasGrid)
hasGrid=True
reversebutton.configure(state=NORMAL)
predictbutton.configure(state=NORMAL)
else:
zoom.changeimage(gridimg,0,0,hasGrid)
hasGrid=False
reversebutton.configure(state=DISABLED)
predictbutton.configure(state=DISABLED)
def setReverse():
zoom.labelall()
def printimageexport():
print(imageexport.get())
def exportopts():
global exportoption,imageexport,csvname,e1
exportoption=StringVar()
imageexport=IntVar()
exportoption.set('P')
opt_window=Toplevel()
opt_window.geometry('300x150')
opt_window.title('Export options')
# optionframe=Frame(opt_window)
# optionframe.pack()
checkframe=Frame(opt_window)
checkframe.pack()
# radiostyle=ttk.Style()
# radiostyle.configure('R.TRadiobutton',foreground='White')
# Radiobutton(optionframe,text='Export Prediction',variable=exportoption,value='P').pack(side=LEFT,padx=10,pady=10)
# Radiobutton(optionframe,text='Export Current',variable=exportoption,value='C').pack(side=LEFT,padx=10,pady=10)
Checkbutton(checkframe,text='Export Grid Pictures',variable=imageexport,command=printimageexport).pack(padx=10,pady=10)
head_tail = os.path.split(filename)
originfile, extension = os.path.splitext(head_tail[1])
csvname = originfile + '_labeloutput_' + 'confidthres=' + str(slider.get()) + '.csv'
intro = Label(checkframe, text='Out put csv file name:')
intro.pack()
e1 = Entry(checkframe)
e1.pack()
e1.insert(END, csvname)
Button(checkframe,text='Export!',command=lambda: implementexport(opt_window)).pack(padx=10,pady=10)
opt_window.transient(root)
opt_window.grab_set()
def implementexport(popup):
outpath=filedialog.askdirectory()
root.update()
res=zoom.output()
npimage=res['npimage']
labelimage=res['labeledimage']
infectedlist=res['infectedlist']
import csv
head_tail=os.path.split(filename)
originfile,extension=os.path.splitext(head_tail[1])
# if exportoption.get()=='P':
# outputcsv=outpath+'/'+originfile+'_prediction.csv'
# headline=['index','row','col','prediction']
# if exportoption.get()=='C':
outputcsv=outpath+'/'+e1.get()
headline=['index','row','col','label','prediction','confidence']
with open(outputcsv,mode='w') as f:
csvwriter=csv.writer(f,lineterminator='\n')
csvwriter.writerow(headline)
rownum=int(rowentry.get())
colnum=int(colentry.get())
gridnum=rownum*colnum
outputimg=labelimage.copy()
draw=ImageDraw.Draw(outputimg)
for i in range(gridnum):
index=i+1
row=int(i/colnum)
col=i%colnum
locs=np.where(npimage==index)
x0=min(locs[1])
y0=min(locs[0])
x1=max(locs[1])
y1=max(locs[0])
if int(imageexport.get())==1:
cropimage=RGBimg.crop((x0,y0,x1,y1))
cropimage.save(outpath+'/'+originfile+'_crop_'+str(index)+'.png','PNG')
midx=x0+5
midy=y0+5
state='crop-'+str(index)
draw.text((midx-1, midy+1), text=state, fill='white')
draw.text((midx+1, midy+1), text=state, fill='white')
draw.text((midx-1, midy-1), text=state, fill='white')
draw.text((midx+1, midy-1), text=state, fill='white')
draw.text((midx,midy),text=state,fill='black')
# if exportoption.get()=='P':
# label=predictlabels[i]
# if exportoption.get()=='C':
label=infectedlist[i]
if confidence!=None:
pred_label= 1 if list(confidence)[i]>=float(slider.get()) else 0
confidvalue=list(confidence)[i]
content=[index,row,col,label,pred_label,confidvalue]
else:
content = [index, row, col, label,0,0]
csvwriter.writerow(content)
print(index)
del draw
f.close()
outputimg.save(outpath+'/'+originfile+'_gridimg'+'.png','PNG')
messagebox.showinfo('Output Done!',message='Results are output to'+outpath)
popup.destroy()
def export():
if hasGrid==False:
return
exportopts()
try:
print(exportoption.get(),imageexport.get())
except:
return
def changeconfidencerange(event):
# newconfid=scaleval.get()
newconfid=slider.get()
print(newconfid)
# zoom.changeconfidance(newconfid[0],newconfid[1])
zoom.changeconfidance(newconfid)
def prediction():
global predictlabels,confidence,hasPred
if confidence is not None:
zoom.showcomparison(confidence,hasPred)
hasPred=-hasPred
return
dlparameter=filedialog.askopenfilename()
root.update()
if dlparameter!='':
if '.pth' not in dlparameter:
messagebox.showerror('Document type error',message='Please load weight document ends with .pth')
return
tail=dlparameter.find('_')
dlmodel=dlparameter[:tail]
dlinput={} #input for deep learning model prediction
# global rownum,colnum
rownumdict={'row':rownum}
colnumdict={'col':colnum}
imgpath={'imagepath':filename}
dlparapath={'weight':dlparameter}
dlmodelvalue={'model':dlmodel}
dlinput.update(rownumdict)
dlinput.update(colnumdict)
dlinput.update(imgpath)
dlinput.update(dlparapath)
dlinput.update(dlmodelvalue)
confidence = predictionCNN(dlinput)
#dlinput is the arguments for deep learning model prediction
#return of deep learning model should be probability of being diseases
else:
import random
gridnum = int(rowentry.get()) * int(colentry.get())
randomlabel=(np.array(random.sample(range(0,gridnum),int(gridnum/3))),)
# predictlabels=np.array([0 for i in range(gridnum)])
# predictlabels[randomlabel]=1
confidence=list(np.random.uniform(0.00,1.00,gridnum))
print(len(confidence))
zoom.showcomparison(list(confidence),hasPred)
hasPred=-hasPred
global slider
slider.set(0.50)
slider.state(["!disabled"])
slider.bind('<ButtonRelease-1>',changeconfidencerange)
# slider.state(NORMAL,changeconfidencerange)
# global hasGrid
# hasGrid=False
# setGrid()
# zoom.labelmulti(randomlabel)
# ----Display-----
display_fr=Frame(root,width=screenwidth,height=screenheight)
bottomframe=Frame(root)
bottomframe.pack(side=BOTTOM)
display_fr.pack(anchor='center')
imageframe=LabelFrame(display_fr,bd=0)
imageframe.pack()
w=760
l=640
panelA=Canvas(imageframe,width=w,height=l,bg='black')
panelA.grid(padx=20,pady=20)
buttondisplay=LabelFrame(bottomframe,bd=0)
buttondisplay.config(cursor='hand2')
buttondisplay.pack(side=LEFT)
labeloptframe=LabelFrame(bottomframe,bd=0)
labeloptframe.config(cursor='hand2')
labeloptframe.pack(side=LEFT)
gridoptframe=LabelFrame(bottomframe,bd=0)
gridoptframe.config(cursor='hand2')
gridoptframe.pack(side=LEFT)
gridbuttondisplay=LabelFrame(bottomframe,bd=0)
gridbuttondisplay.config(cursor='hand2')
gridbuttondisplay.pack(side=LEFT)
confidframe=LabelFrame(bottomframe,bd=0)
confidframe.config(cursor='hand2')
confidframe.pack(side=LEFT)
outputframe=LabelFrame(bottomframe,bd=0)
outputframe.config(cursor='hand2')
outputframe.pack(side=LEFT)
# ------button opts---------
openfilebutton=Button(buttondisplay,text='Image',cursor='hand2',command=Open_Multifile)
openfilebutton.pack(side=LEFT,padx=20,pady=5)
mapfilebutton=Button(buttondisplay,text='Map',cursor='hand2',command=Open_Map)
mapfilebutton.pack(side=LEFT,padx=20,pady=5)
mapfilebutton.configure(state=DISABLED)
# zoombar=Scale(labeloptframe,from_=50,to=150,orient=HORIZONTAL,command=zoomimage,variable=scaleval)
# scaleval.set(100)
# zoombar.pack(side=LEFT,padx=5)
# zoombar.configure(state=DISABLED,repeatinterval=10)
zoomin=Button(labeloptframe,text=' + ',cursor='hand2',command=lambda: zoomimage(1))
zoomin.pack(side=LEFT)
zoomin.configure(state=DISABLED)
zoomout=Button(labeloptframe,text=' - ',cursor='hand2',command=lambda: zoomimage(0))
zoomout.pack(side=LEFT)
zoomout.configure(state=DISABLED)
rowdef=Label(gridoptframe,text='Row')
rowdef.pack(side=LEFT)
rowentry=Entry(gridoptframe,width=5)
rowentry.insert(END,10)
rowentry.pack(side=LEFT,padx=2)
coldef=Label(gridoptframe,text='Col')
coldef.pack(side=LEFT)
colentry=Entry(gridoptframe,width=5)
colentry.insert(END,10)
colentry.pack(side=LEFT,padx=2)
for widget in gridoptframe.winfo_children():
widget.config(state=DISABLED)
gridbutton=Button(gridbuttondisplay,text='Grid!',cursor='hand2',command=setGrid)
gridbutton.pack(side=LEFT,padx=10)
gridbutton.configure(state=DISABLED)
reversebutton=Button(gridbuttondisplay,text='Reverse',cursor='hand2',command=setReverse)
reversebutton.pack(side=LEFT,padx=10)
reversebutton.configure(state=DISABLED)
predictbutton=Button(gridbuttondisplay,text='Predict',cursor='hand2',command=prediction)
predictbutton.pack(side=LEFT,padx=10)
predictbutton.configure(state=DISABLED)
confidbutton=Label(confidframe,text='Threshold',cursor='hand2')
confidbutton.pack(side=TOP,padx=10)
# confidbutton.configure(state=DISABLED)
# from tkSliderWidget import Slider
# slider=Slider(confidframe,width=100,height=30,min_val=0.50,max_val=1.00,init_lis=[0.75,0.95],show_value=False)
# slider.pack(side=BOTTOM)
# slider.state(DISABLED,changeconfidencerange)
slider=ttk.Scale(confidframe,from_=0.0,to=1.00,orient=HORIZONTAL)
slider.set(0.50)
slider.pack(side=BOTTOM)
slider.state(["disabled"])
exportbutton=Button(outputframe,text='Export',cursor='hand2',command=export)
exportbutton.pack(side=LEFT,padx=10)
exportbutton.configure(state=DISABLED)
root.mainloop()
| StarcoderdataPython |
1921880 | import argparse
import os
import sys
from src.crawlers.BrowserRobot import BrowserRobot
from src.crawlers.CostcoCrawler import CostcoCrawler
from config.print_path import print_path
from src.crawlers.BestBuyCrawler.BestBuyCrawler import BestBuyCrawler
if __name__ == '__main__':
# parser = argparse.ArgumentParser(description='arguments options:')
# parser.add_argument('-c', '--crawler', type=int, default=5000, help="choose crawler.")
# args = parser.parse_args()
# print_path()
# costco_crwaler = CostcoCrawler(is_dev=False)
# costco_crwaler.run()
bb = BestBuyCrawler()
bb.run(totalPage = 4, percentSavings=75, save_csv=False, upload_deals=True)
# from src.sandbox.uploadImage import test_upload_posts_together
# test_upload_posts_together()
| StarcoderdataPython |
3299996 | <reponame>KeiichiHirobe/tcp-keepalives<filename>test-dead2.py
import io
import os
import select
import socket
import time
import utils
utils.new_ns()
port = 1
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM, 0)
s.bind(('127.0.0.1', port))
s.listen(16)
tcpdump = utils.tcpdump_start(port)
c = socket.socket(socket.AF_INET, socket.SOCK_STREAM, 0)
c.connect(('127.0.0.1', port))
if True:
c.setsockopt(socket.IPPROTO_TCP, socket.TCP_USER_TIMEOUT, 10*1000)
c.setsockopt(socket.SOL_SOCKET, socket.SO_KEEPALIVE, 1)
c.setsockopt(socket.IPPROTO_TCP, socket.TCP_KEEPCNT, 3)
c.setsockopt(socket.IPPROTO_TCP, socket.TCP_KEEPIDLE, 1)
c.setsockopt(socket.IPPROTO_TCP, socket.TCP_KEEPINTVL, 11)
time.sleep(0.2)
t0 = time.time()
c.send(b"h"*200)
time.sleep(0.2)
utils.ss(port)
utils.drop_start(dport=port)
utils.drop_start(sport=port)
utils.ss(port)
c.send(b"h"*17)
utils.ss(port)
time.sleep(0.2)
utils.ss(port)
time.sleep(0.4)
utils.ss(port)
time.sleep(1)
utils.ss(port)
utils.check_buffer(c)
for i in range(15):
try:
c.send(b"h"*17)
except Exception as e:
print(e)
time.sleep(1)
utils.ss(port)
utils.ss(port)
buf = c.recv(10)
print("[ ] len = %d" % (len(buf),))
e = c.getsockopt(socket.SOL_SOCKET, socket.SO_ERROR)
print("[ ] SO_ERROR = %s" % (e,))
t1 = time.time()
print("[ ] took: %f seconds" % (t1-t0,))
| StarcoderdataPython |
11210325 | from src.enums.Rotations import Rotations
import random
class Ship:
type = 'Ship'
size = 2
def __init__(self):
self.rotation = random.choice(list(Rotations))
| StarcoderdataPython |
1939200 | # Copyright (C) 2018 Google Inc.
# Licensed under http://www.apache.org/licenses/LICENSE-2.0 <see LICENSE file>
# Ignoring flake8 warnings because init file is used for importing package
# members.
from ggrc.cache.localcache import LocalCache # Noqa
from ggrc.cache.memcache import MemCache # Noqa
from ggrc.cache.cachemanager import CacheManager # Noqa
| StarcoderdataPython |
4924923 | '''Calculate Coefficient of variation'''
# import libraries
import pandas as pd
# Read dataset
dataset = pd.read_csv('19_coefficient_of_variation_players_data.csv')
player_a_mean = dataset['PlayerA'].mean() # Answer 49.8
player_a_sd = dataset['PlayerA'].std() # Answer 44.57
# Player A Coefficient of variation
payer_a_cv = player_a_sd / player_a_mean # 0.89
player_b_mean = dataset['PlayerB'].mean() # Answer 32.7
player_b_sd = dataset['PlayerB'].std() # Answer 24.43
# Player B Coefficient of variation
payer_b_cv = player_b_sd / player_b_mean # 0.747
| StarcoderdataPython |
3498370 | ###############################################################################
# Scalability benchmark
#
# <NAME>
# Dec, 2018
#
# Plotting time taken as dimensionality increases
###############################################################################
import os
import timeit
import numpy as np
import infotheory
import matplotlib.pyplot as plt
def scalibility(fn, dims, time_reps, num_datapoints, data_dist):
times = []
for dim in dims:
print('\tDimensionality of {}'.format(dim))
sdim = str(dim)
# constructing string of statements to execute before timing
setup = "import numpy as np;import infotheory;"
setup += "it = infotheory.InfoTools("+sdim+",1,[100]*"+sdim+",[0]*"+sdim+",[1]*"+sdim+");"
# setting up appropriate var_IDs for each case
if 'entropy' in fn:
setup += "varids = np.ones("+sdim+");"
elif ('redun' in fn) or ('synergy' in fn) or ('unique' in fn):
setup += "varids = np.random.randint(3,size=["+sdim+"]);"
else:
setup += "varids = list(map(int, np.round(np.random.rand("+sdim+"))));"
# distributions for data
if data_dist == "uniform":
data_dist = "np.random.rand("+str(num_datapoints)+","+str(dim)+")"
elif data_dist == "normal":
data_dist = "np.random.normal(loc=0.5,scale=0.1,size=["+str(num_datapoints)+","+str(dim)+"])"
setup += "it.add_data("+data_dist+");"
# functional call to information theoretic measure
to_time = "it."+fn+"(varids)"
t = timeit.timeit(to_time, setup=setup, number=time_reps)
times.append(t)
return np.asarray(times)/time_reps # average
if __name__ == "__main__":
for data_dist in ['uniform', 'normal']:
# config for timing
time_reps = 1000 # number of runs to average over
dims = 2**np.asarray([2,5,8,9,10])
num_datapoints = 10000
fns = ['mutual_info','entropy','synergy']
plt.figure(figsize=[4,3])
print("Functions to check: {}".format(fns))
# for each info theoretic measure
for fn in fns:
print("Running for {}".format(fn))
filename = fn+'_timeReps{}_{}.npy'.format(time_reps,data_dist)
try:
times = np.load(filename)
except Exception as e:
print(e)
times = scalibility(fn, dims, time_reps, num_datapoints, data_dist)
np.save(filename, times)
print(times)
plt.plot(dims, times)
plt.xlabel('Dimensionality')
plt.ylabel('Time taken to execute (s)')
plt.legend(fns)
# saving with log-scale x-axis
plt.xscale('log')
plt.tight_layout()
#plt.savefig('log_scalability_{}.png'.format(data_dist))
plt.show()
"""
#### Scalability - scalability.py
The time taken to execute the different information theoretic measures as a function of increasing dimensionality of the data was recorded. The following figure shows the average time taken over 1000 runs for the case where 10000 data points uniformly distributed in [0,1] were added and the different metrics were invoked. From the partial information decomposition metrics, only synergy has been reported because estimating synergy involves estimation of total mutual information, redundant information and unique information twice (once for each source). Note that a uniform distribution likely causes data to cover the entire data space hence demonstrates the worst case scenario for this application where the sparse representation cannot be taken advantage of. In this case the application shows a linear increase in log scale of the time axis with a drop for 1024 dimensions. The reason for this is still under investigation.
On the other hand, when the data was normally distributed with mean 0.5, and standard deviation 0.1, the software could potentially take advantage of the sparse representation and the following figure shows the time taken to invoke the different metrics under this condition.
"""
| StarcoderdataPython |
12818337 | <filename>bus_system/apps/bus_driver/serializer.py
from rest_framework.serializers import ModelSerializer
# Imports from your apps
from bus_system.apps.bus_driver.models import BusDriverModel
class BusDriverSerializer(ModelSerializer):
"""
Bus Driver serializers.
"""
class Meta:
model = BusDriverModel
fields = (
'id',
'avatar',
'is_available',
'first_name',
'surname',
'identification_number'
)
read_only_fields = (
'id',
)
| StarcoderdataPython |
4863893 | <gh_stars>0
import requests
from json import loads
def data_return():
data = loads(requests.get("https://simplecovidapi.herokuapp.com").text)
return data['cases'], data['deaths'], data['recoveries']
| StarcoderdataPython |
5123789 | <gh_stars>0
import tensorflow as tf
import numpy as np
import sys,os
import argparse
parser = argparse.ArgumentParser(description='Finetune AHLF and store finetuned weights.')
parser.add_argument('model_weights', type=str, help='[loaded:] trained model weights')
parser.add_argument('finetuned_weights', type=str, help='[saved:] finetuned model weights')
parser.add_argument('mgf_files', type=str, help='directory with mgf-files with suffixes: [.phos.mgf/.other.mgf]')
args = parser.parse_args()
AUTOTUNE = tf.data.experimental.AUTOTUNE
#tf.compat.v1.disable_eager_execution()
from dataset import get_dataset
from network import network
N_TRAINABLE_DENSE_LAYERS = 3
BATCH_SIZE=64
LEARNING_RATE=1.0e-4
DROPOUT_RATE=0.5
EPOCHS=1
VIRTUAL_EPOCH_SIZE=100
train = True
saving = True
def binary_accuracy(y_true, y_pred):
#y_pred = tf.math.sigmoid(y_pred)
return tf.reduce_mean(tf.cast(tf.math.equal(y_true, tf.math.round(y_pred)),tf.float32))
callbacks = []
ch = 64
net = network([ch,ch,ch,ch,ch,ch,ch,ch,ch,ch,ch,ch,ch],kernel_size=2,padding='same',dropout=DROPOUT_RATE)
inp = tf.keras.layers.Input((3600,2))
sigm = net(inp)
model = tf.keras.Model(inputs=inp,outputs=sigm)
bce=tf.keras.losses.BinaryCrossentropy(from_logits=False)
model.load_weights(args.model_weights)
model.compile(optimizer=tf.keras.optimizers.Adam(learning_rate=LEARNING_RATE,clipnorm=1.0),loss=bce,metrics=['binary_accuracy','Recall','Precision'])
# Freeze the model
for l in model.layers[-1].layers:
l.trainable = False
if type(l)==tf.keras.layers.Dropout:
l.rate=DROPOUT_RATE
print('RATE:',l.rate)
# Strategy - tune N terminal dense layers:
i = 0
for l in model.layers[-1].layers[::-1]:
if type(l)==tf.keras.layers.Dense:
l.trainable = True
i+=1
if i>=N_TRAINABLE_DENSE_LAYERS:
break
model.layers[-1].summary()
train_data = get_dataset(dataset=[args.mgf_files],maximum_steps=None,batch_size=BATCH_SIZE,mode='training').prefetch(buffer_size=AUTOTUNE)
if train:
model.fit(train_data,steps_per_epoch=VIRTUAL_EPOCH_SIZE,epochs=EPOCHS,callbacks=callbacks)
if saving:
model.save_weights(args.finetuned_weights)
| StarcoderdataPython |
8035037 | # Generated by Django 2.2.17 on 2021-03-08 14:10
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('ucsrb', '0021_pourpointbasin_segment_id'),
]
operations = [
migrations.RemoveField(
model_name='pourpointbasin',
name='SDsphrical',
),
migrations.RemoveField(
model_name='pourpointbasin',
name='avg_slp',
),
migrations.RemoveField(
model_name='pourpointbasin',
name='bbl_prsr',
),
migrations.RemoveField(
model_name='pourpointbasin',
name='bulk_dens',
),
migrations.RemoveField(
model_name='pourpointbasin',
name='cap_drv',
),
migrations.RemoveField(
model_name='pourpointbasin',
name='center_x',
),
migrations.RemoveField(
model_name='pourpointbasin',
name='center_y',
),
migrations.RemoveField(
model_name='pourpointbasin',
name='dwnst_ppt',
),
migrations.RemoveField(
model_name='pourpointbasin',
name='elev_dif',
),
migrations.RemoveField(
model_name='pourpointbasin',
name='exp_decrs',
),
migrations.RemoveField(
model_name='pourpointbasin',
name='fc_11',
),
migrations.RemoveField(
model_name='pourpointbasin',
name='fc_12',
),
migrations.RemoveField(
model_name='pourpointbasin',
name='fc_13',
),
migrations.RemoveField(
model_name='pourpointbasin',
name='fc_14',
),
migrations.RemoveField(
model_name='pourpointbasin',
name='fc_15',
),
migrations.RemoveField(
model_name='pourpointbasin',
name='fc_21',
),
migrations.RemoveField(
model_name='pourpointbasin',
name='fc_22',
),
migrations.RemoveField(
model_name='pourpointbasin',
name='fc_23',
),
migrations.RemoveField(
model_name='pourpointbasin',
name='fc_24',
),
migrations.RemoveField(
model_name='pourpointbasin',
name='fc_25',
),
migrations.RemoveField(
model_name='pourpointbasin',
name='field_cap',
),
migrations.RemoveField(
model_name='pourpointbasin',
name='lat_con',
),
migrations.RemoveField(
model_name='pourpointbasin',
name='mannings',
),
migrations.RemoveField(
model_name='pourpointbasin',
name='max_inf',
),
migrations.RemoveField(
model_name='pourpointbasin',
name='mean_elev',
),
migrations.RemoveField(
model_name='pourpointbasin',
name='mean_shade',
),
migrations.RemoveField(
model_name='pourpointbasin',
name='normal_x',
),
migrations.RemoveField(
model_name='pourpointbasin',
name='normal_y',
),
migrations.RemoveField(
model_name='pourpointbasin',
name='normal_z',
),
migrations.RemoveField(
model_name='pourpointbasin',
name='pore_sz',
),
migrations.RemoveField(
model_name='pourpointbasin',
name='porosity',
),
migrations.RemoveField(
model_name='pourpointbasin',
name='slp_gt60',
),
migrations.RemoveField(
model_name='pourpointbasin',
name='thc_11',
),
migrations.RemoveField(
model_name='pourpointbasin',
name='thc_12',
),
migrations.RemoveField(
model_name='pourpointbasin',
name='thc_13',
),
migrations.RemoveField(
model_name='pourpointbasin',
name='thc_14',
),
migrations.RemoveField(
model_name='pourpointbasin',
name='thc_15',
),
migrations.RemoveField(
model_name='pourpointbasin',
name='thc_21',
),
migrations.RemoveField(
model_name='pourpointbasin',
name='thc_22',
),
migrations.RemoveField(
model_name='pourpointbasin',
name='thc_23',
),
migrations.RemoveField(
model_name='pourpointbasin',
name='thc_24',
),
migrations.RemoveField(
model_name='pourpointbasin',
name='thc_25',
),
migrations.RemoveField(
model_name='pourpointbasin',
name='veg_prop',
),
migrations.RemoveField(
model_name='pourpointbasin',
name='vert_con',
),
migrations.RemoveField(
model_name='pourpointbasin',
name='wilt_pt',
),
migrations.AddField(
model_name='pourpointbasin',
name='superbasin',
field=models.CharField(blank=True, choices=[('enti', 'Entiat'), ('metw', 'Methow'), ('okan', 'Okanogan'), ('wena', 'Wenatchee')], default=None, max_length=40, null=True),
),
]
| StarcoderdataPython |
5154626 | <filename>app1/filters.py
import django_filters
from .models import *
class StdFilter (django_filters.FilterSet):
class Meta:
model = App1Students
fields = '__all__' | StarcoderdataPython |
4938875 | <reponame>luislorenzom/celery_priority_queue_example<gh_stars>0
from time import sleep
from config import celery_app
@celery_app.task(name='priority', queue='priority')
def doing_something(msg):
print('PRIORITY ~~~~> ' + msg)
sleep(10)
@celery_app.task(name='fifo', queue='fifo')
def doing_something_important(msg):
print('FIFO ~~~~> ' + msg)
sleep(10)
| StarcoderdataPython |
150609 | #!/usr/bin/env python
# Copyright 2015 Coursera
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from courseraprogramming import main
from courseraprogramming import utils
import logging
# Set up mocking of the `open` call. See http://www.ichimonji10.name/blog/6/
from sys import version_info
if version_info.major == 2:
import builtins as builtins # pylint:disable=import-error
else:
import builtins # pylint:disable=import-error
def test_chattiness_parsing_quiet():
parser = main.build_parser()
args = parser.parse_args('-qq version'.split())
assert args.quiet == 2
def test_chattiness_parsing_verbose():
parser = main.build_parser()
args = parser.parse_args('-v version'.split())
assert args.verbose == 1
def test_set_logging_level():
parser = main.build_parser()
args = parser.parse_args('-vv version'.split())
utils.set_logging_level(args)
assert logging.getLogger().getEffectiveLevel() == 5 # "Trace"
def test_set_logging_level_noneSpecified():
parser = main.build_parser()
args = parser.parse_args('version'.split())
utils.set_logging_level(args)
assert logging.getLogger().getEffectiveLevel() == logging.INFO or \
logging.getLogger().getEffectiveLevel() == logging.NOTSET
def test_set_timeout():
parser = main.build_parser()
args = parser.parse_args('--timeout 120 version'.split())
assert args.timeout == 120
def test_no_timeout():
parser = main.build_parser()
args = parser.parse_args('version'.split())
assert args.timeout == 60
| StarcoderdataPython |
8132971 | EPSILON = 1e-10
RANDOM_STATE = 0
| StarcoderdataPython |
6556775 | class PCF8574:
def __init__(self, i2c, address):
self._i2c = i2c
self._address = address
self._input = 0 # Buffers the result of read in memory
self._input_mask = 0 # Mask specifying which pins are set as input
self._output = 0 # The state of pins set for output
self._write()
def _read(self):
self._input = self._i2c.readfrom(self._address, 1)[0] \
& self._input_mask
def _write(self):
self._i2c.writeto(
self._address,
bytes([self._output | self._input_mask])
)
def read(self, pin):
bit_mask = 1 << pin
self._input_mask |= bit_mask
self._output &= ~bit_mask
self._write() # Update input mask before reading
self._read()
return (self._input & bit_mask) >> pin
def read8(self, *mask):
# NOTE not sure about *mask
if mask:
self._input_mask = mask[0]
else:
self._input_mask = 0xFF
self._output = 0
self._write() # Update input mask before reading
self._read()
return self._input
def write(self, pin, value):
bit_mask = 1 << pin
self._input_mask &= ~bit_mask
self._output = (
self._output
| bit_mask if value else self._output
& (~bit_mask)
)
self._write()
def write8(self, value):
self._input_mask = 0
self._output = value
self._write()
def set(self):
self.write8(0xFF)
def clear(self):
self.write8(0x0)
def toggle(self, pin):
bit_mask = 1 << pin
self._input_mask &= ~bit_mask
self._output ^= bit_mask
self._write()
| StarcoderdataPython |
217014 | <reponame>kaustubh-s1/EvalAI
# -*- coding: utf-8 -*-
# Generated by Django 1.11.23 on 2020-09-10 18:09
from __future__ import unicode_literals
import base.utils
import django.contrib.postgres.fields
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
(
"challenges",
"0070_add_config_id_field_tophase_leaderboard_and_datasetsplit_models",
)
]
operations = [
migrations.CreateModel(
name="ChallengeTemplate",
fields=[
(
"id",
models.AutoField(
auto_created=True,
primary_key=True,
serialize=False,
verbose_name="ID",
),
),
("created_at", models.DateTimeField(auto_now_add=True)),
("modified_at", models.DateTimeField(auto_now=True)),
("title", models.CharField(max_length=500)),
(
"template_file",
models.FileField(
upload_to=base.utils.RandomFileName("templates")
),
),
(
"is_active",
models.BooleanField(db_index=True, default=False),
),
(
"image",
models.ImageField(
blank=True,
null=True,
upload_to=base.utils.RandomFileName(
"templates/preview-images/"
),
verbose_name="Template Preview Image",
),
),
("dataset", models.CharField(default="", max_length=200)),
(
"eval_metrics",
django.contrib.postgres.fields.ArrayField(
base_field=models.CharField(
blank=True, max_length=200
),
blank=True,
default=["Accuracy"],
size=None,
),
),
(
"phases",
models.IntegerField(blank=True, default=None, null=True),
),
(
"splits",
models.IntegerField(blank=True, default=None, null=True),
),
("slug", models.CharField(default="", max_length=500)),
],
options={
"db_table": "challenge_templates",
"ordering": ("-created_at",),
},
)
]
| StarcoderdataPython |
3537824 | <reponame>Clinical-Genomics/scout
# -*- coding: utf-8 -*-
import logging
from datetime import datetime as datetime
from scout.exceptions import IntegrityError
from scout.constants import VALID_MODELS
LOG = logging.getLogger(__name__)
def build_gene(gene_info, adapter):
"""Build a panel_gene object
Args:
gene_info(dict)
Returns:
gene_obj(dict)
panel_gene = dict(
hgnc_id = int, # required
symbol = str,
disease_associated_transcripts = list, # list of strings that represent refseq transcripts
reduced_penetrance = bool,
mosaicism = bool,
database_entry_version = str,
ar = bool,
ad = bool,
mt = bool,
xr = bool,
xd = bool,
x = bool,
y = bool,
)
"""
symbol = gene_info.get("hgnc_symbol")
try:
# A gene has to have a hgnc id
hgnc_id = gene_info["hgnc_id"]
if not hgnc_id:
raise KeyError()
except KeyError as err:
raise KeyError(
"Gene {0} is missing hgnc id. Panel genes has to have hgnc_id".format(symbol)
)
hgnc_gene = adapter.hgnc_gene_caption(
hgnc_identifier=hgnc_id, build="37"
) or adapter.hgnc_gene_caption(hgnc_identifier=hgnc_id, build="38")
if hgnc_gene is None:
raise IntegrityError("hgnc_id {0} is not in the gene database!".format(hgnc_id))
gene_obj = dict(hgnc_id=hgnc_id)
gene_obj["symbol"] = hgnc_gene["hgnc_symbol"]
if symbol != gene_obj["symbol"]:
LOG.warning(
"Symbol in database does not correspond to symbol in panel file for gene %s",
hgnc_id,
)
LOG.warning(
"Using symbol %s for gene %s, instead of %s"
% (hgnc_gene["hgnc_symbol"], hgnc_id, symbol)
)
if gene_info.get("transcripts"):
gene_obj["disease_associated_transcripts"] = gene_info["transcripts"]
if gene_info.get("reduced_penetrance"):
gene_obj["reduced_penetrance"] = True
if gene_info.get("mosaicism"):
gene_obj["mosaicism"] = True
if gene_info.get("database_entry_version"):
gene_obj["database_entry_version"] = gene_info["database_entry_version"]
if gene_info.get("inheritance_models"):
gene_obj["inheritance_models"] = []
custom_models = []
for model in gene_info["inheritance_models"]:
if model not in VALID_MODELS:
custom_models.append(model)
continue
gene_obj["inheritance_models"].append(model)
lc_model = model.lower() # example ad = True
gene_obj[lc_model] = True
gene_obj["custom_inheritance_models"] = custom_models
return gene_obj
def build_panel(panel_info, adapter):
"""Build a gene_panel object
Args:
panel_info(dict): A dictionary with panel information
adapter (scout.adapter.MongoAdapter)
Returns:
panel_obj(dict)
gene_panel = dict(
panel_id = str, # required
institute = str, # institute_id, required
maintainer = list, # list of user._id
version = float, # required
date = datetime, # required
display_name = str, # default is panel_name
description = str # optional panel description
genes = list, # list of panel genes, sorted on panel_gene['symbol']
)
"""
panel_name = panel_info.get("panel_id", panel_info.get("panel_name"))
if panel_name:
panel_name = panel_name.strip()
else:
raise KeyError("Panel has to have a id")
panel_obj = dict(panel_name=panel_name)
LOG.info("Building panel with name: {0}".format(panel_name))
try:
institute_id = panel_info["institute"]
except KeyError as err:
raise KeyError("Panel has to have a institute")
# Check if institute exists in database
if adapter.institute(institute_id) is None:
raise IntegrityError("Institute %s could not be found" % institute_id)
panel_obj["institute"] = panel_info["institute"]
panel_obj["version"] = float(panel_info["version"])
try:
panel_obj["date"] = panel_info["date"]
except KeyError as err:
raise KeyError("Panel has to have a date")
panel_obj["maintainer"] = panel_info.get("maintainer", [])
panel_obj["display_name"] = panel_info.get("display_name", panel_obj["panel_name"])
if panel_obj["display_name"]:
panel_obj["display_name"] = panel_obj["display_name"].strip()
panel_obj["description"] = panel_info.get("description")
gene_objs = []
errors = []
for gene_info in panel_info.get("genes", []):
try:
gene_obj = build_gene(gene_info, adapter)
gene_objs.append(gene_obj)
except IntegrityError as err:
LOG.warning(err)
errors.append(f"{gene_info.get('hgnc_symbol')} ({gene_info.get('hgnc_id')})")
if errors:
raise IntegrityError(
f"The following genes: {', '.join(errors)} were not found in Scout database."
)
panel_obj["genes"] = gene_objs
return panel_obj
| StarcoderdataPython |
6520499 | """
* Python program to use OpenCV drawing tools to create a mask.
*
"""
import numpy as np
import skimage
from skimage.viewer import ImageViewer
# Load the original image
image = skimage.io.imread("maize-roots.tif")
viewer = ImageViewer(image)
viewer.show()
# Create the basic mask
mask = np.ones(shape=image.shape[0:2], dtype="bool")
# Draw a filled rectangle on the mask image
rr, cc = skimage.draw.rectangle(start=(357, 44), end=(740, 720))
mask[rr, cc] = False
# Apply the mask and display the result
image[mask] = 0
viewer = ImageViewer(mask)
viewer.show()
| StarcoderdataPython |
4813997 | <filename>array/easy/maxProfit.py
""" Summary
在遍历价格数组时,根据这个动态更新的最低价和当前的价格可以算出当前卖股票最大能赚多少钱。
"""
class Solution(object):
"""
Problem:
https://leetcode.com/problems/best-time-to-buy-and-sell-stock/
Example:
Input: [7, 1, 5, 3, 6, 4]
Output: 5
"""
def maxProfit(self, prices):
"""
:type prices: List[int]
:rtype: int
"""
profit = 0
if not prices:
return profit
min_so_far = prices[0]
for i in range(1, len(prices)):
profit = max(profit, prices[i] - min_so_far)
min_so_far = min(prices[i], min_so_far)
return profit
if __name__ == '__main__':
prices = [7,1,5,3,6,4]
result = Solution().maxProfit(prices)
print(result)
| StarcoderdataPython |
396989 | <filename>alto/commands/terra/remove_method.py
import argparse
from firecloud import api as fapi
from alto.utils import *
def main(argv):
parser = argparse.ArgumentParser(description='Remove methods from Broad Methods Repository.')
parser.add_argument('-m', '--method', dest='method', action='store', required=True, help='Method takes the format of namespace/name/version. If only namespace is provided, delete all methods under that namespace. If both namespace and name are provided, delete all snapshots for that method. If namespace, name and version are provided, only delete the specific snapshot.')
args = parser.parse_args(argv)
fields = args.method.split('/')
if len(fields) == 0:
raise ValueError('No namespace specified!')
method_namespace = fields[0]
method_name = fields[1] if len(fields) > 0 else None
method_version = fields[2] if len(fields) > 1 else None
methods = fapi.list_repository_methods(namespace=method_namespace, name=method_name).json()
if len(methods) == 0:
raise ValueError('No methods found')
if method_name is None: # delete all methods under specified namespace
for method in methods:
if method['namespace'] == method_namespace:
fapi.delete_repository_method(method['namespace'], method['name'], method['snapshotId'])
print(f'Deleted {method["namespace"]}/{method["name"]}/{method["snapshotId"]}')
elif method_version is None: # delete all versions
for selected_method in methods:
if selected_method['namespace'] == method_namespace and selected_method['name'] == method_name:
fapi.delete_repository_method(selected_method['namespace'], selected_method['name'], selected_method['snapshotId'])
print(f'Deleted {selected_method["namespace"]}/{selected_method["name"]}/{selected_method["snapshotId"]}')
else: # delete the specific version
selected_method = methods[0]
fapi.delete_repository_method(selected_method['namespace'], selected_method['name'], selected_method['snapshotId'])
print(f'Deleted {selected_method["namespace"]}/{selected_method["name"]}/{selected_method["snapshotId"]}')
| StarcoderdataPython |
6576153 | # Generated by Django 2.2.7 on 2019-11-30 03:56
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('core', '0003_tag'),
]
operations = [
migrations.CreateModel(
name='Bovid',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('mothers_name', models.CharField(blank=True, max_length=150)),
('fathers_name', models.CharField(blank=True, max_length=150)),
('type_of_bovid', models.CharField(max_length=100)),
('breed', models.CharField(blank=True, max_length=100)),
('name', models.CharField(max_length=255)),
('breeder', models.CharField(blank=True, max_length=250)),
('date_of_birth', models.DateField(blank=True)),
('date_of_death', models.DateField(blank=True)),
('date_of_purchase', models.DateField(blank=True)),
('date_sold', models.DateField(blank=True)),
('created', models.DateTimeField(auto_now_add=True)),
('updated', models.DateTimeField(auto_now=True)),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
]
| StarcoderdataPython |
6678721 | <gh_stars>0
def multiply(a, b):
"""
This code does not execute properly. Try to figure out why.
"""
result = a * b
return result
| StarcoderdataPython |
9602520 | <filename>Naive_Bayes/docclass.py
import re
import math
from pysqlite2 import dbapi2 as sql
def sampletrain(cl):
cl.train('Nobody owns the water.', 'good')
cl.train('the quick rabbit jumps fences', 'good')
cl.train('buy pharmaceuticals now', 'bad')
cl.train('make quick money at the online casino', 'bad')
cl.train('the quick brown fox jumps', 'good')
def getwords(doc):
splitter = re.compile('\\W*')
words = [s.lower() for s in splitter.split(doc)
if len(s) > 2 and len(s) < 20]
return dict([(w, 1) for w in words])
def getwordscount(doc):
splitter = re.compile('\\W*')
words = [s.lower() for s in splitter.split(doc)
if len(s) > 2 and len(s) < 20]
wordcount = {}
for word in words:
wordcount.setdefault(word, 0)
wordcount[word] += 1
return wordcount
class classifier(object):
"""docstring for classifier"""
def __init__(self, getfeatures, filename=None):
self.fc = {}
self.cc = {}
self.getfeatures = getfeatures
self.filename = filename
# print "classifier"
def setdb(self, dbfile):
self.con = sql.connect(dbfile)
self.con.execute(
'create table if not exists fc(feature, category, count)')
self.con.execute('create table if not exists cc(category, count)')
def incf(self, f, cat):
# self.fc.setdefault(f,{})
# self.fc[f].setdefault(cat, 0)
# self.fc[f][cat] += 1
count = self.fcount(f, cat)
if count == 0:
self.con.execute("insert into fc values ('%s','%s',1)"
% (f, cat))
else:
self.con.execute("update fc set count=%d where feature='%s' and category='%s'"
% (count + 1, f, cat))
def incc(self, cat):
# self.cc.setdefault(cat, 0)
# self.cc[cat] += 1
count = self.catcount(cat)
if count == 0:
self.con.execute("insert into cc values ('%s',1)" % (cat))
else:
self.con.execute(
"update cc set count=%d where category='%s'" % (count + 1, cat))
def fcount(self, f, cat):
# if f in self.fc and cat in self.fc[f]:
# return float(self.fc[f][cat])
# return 0
res = self.con.execute("select count from fc where feature='%s' and category='%s'"
% (f, cat)).fetchone()
if res == None:
return 0
else:
return float(res[0])
def catcount(self, cat):
# if cat in self.cc:
# return float(self.cc[cat])
# return 0
res = self.con.execute(
"select count from cc where category='%s'" % (cat)).fetchone()
if res == None:
return 0
else:
return float(res[0])
def totalcount(self):
# return sum(self.cc.values())
res = self.con.execute("select sum(count) from cc").fetchone()
if res == None:
return 0
return res[0]
def categories(self):
# return self.cc.keys()
cur = self.con.execute("select category from cc")
return [d[0] for d in cur]
def train(self, item, cat):
features = self.getfeatures(item)
for f in features:
self.incf(f, cat)
self.incc(cat)
self.con.commit()
def fprob(self, f, cat):
"""
cal prob = P(f, cat)/P()
"""
if self.catcount(cat) == 0:
return 0
return self.fcount(f, cat) / self.catcount(cat)
def weightedprob(self, f, cat, prf, weight=1.0, ap=0.5):
basicprob = prf(f, cat)
totals = sum([self.fcount(f, c) for c in self.categories()])
bp = ((weight * ap) + (totals * basicprob)) / (weight + totals)
return bp
def itemclassify(self, item, cat):
wordcount = getwordscount(item)
prob = 0.0
for word, count in wordcount.items():
prob += self.weightedprob(word, cat, self.fprob) * count
prob = float(prob) / sum(wordcount.values())
return prob
class naivebayes(classifier):
"""docstring for naivebayes"""
def __init__(self, getfeatures):
super(naivebayes, self).__init__(getfeatures)
self.thresholds = {}
def setthreshold(self, cat, t):
self.thresholds[cat] = t
def getthreshold(self, cat):
if cat not in self.thresholds:
return 0
return self.thresholds[cat]
def docprob(self, item, cat):
features = self.getfeatures(item)
p = 1
for f in features:
p *= self.weightedprob(f, cat, self.fprob)
return p
def prob(self, item, cat):
catprob = self.catcount(cat) / self.totalcount()
docprob = self.docprob(item, cat)
return docprob * catprob
def classify(self, item, default=None):
probs = {}
maxit = 0.0
for cat in self.categories():
probs[cat] = self.prob(item, cat)
if probs[cat] > maxit:
maxit = probs[cat]
best = cat
for cat in probs:
if cat == best:
continue
if probs[cat] * self.getthreshold(best) > probs[best]:
return default
return best
class fisherclassifier(classifier):
"""docstring for fisherclassifier"""
def __init__(self, getfeatures):
super(fisherclassifier, self).__init__(getfeatures)
self.minimums = {}
def setminimum(self, cat, minit):
self.minimums[cat] = minit
def getminimum(self, cat):
if cat not in self.minimums:
return 0
return self.minimums[cat]
def cprob(self, f, cat):
clf = self.fprob(f, cat)
if clf == 0:
return 0
freqsum = sum([self.fprob(f, c) for c in self.categories()])
p = clf / freqsum
return p
def fisherprob(self, item, cat):
p = 1
features = self.getfeatures(item)
for f in features:
p *= self.weightedprob(f, cat, self.cprob)
fscore = -2 * math.log(p)
return self.invchi2(fscore, len(features) * 2)
def invchi2(self, chi, df):
m = chi / 2.0
sumit = term = math.exp(-m)
for i in xrange(1, df // 2):
term *= m / i
sumit += term
return min(sumit, 1.0)
def classify(self, item, default=None):
best = default
maxit = 0.0
for c in self.categories():
p = self.fisherprob(item, c)
if p > self.getminimum(c) and p > maxit:
best = c
maxit = p
return best
| StarcoderdataPython |
4882258 | <filename>Admission Counselling For Direct Second Year/Web-Application/AdmissionDirectSecondYear/AdmissionDirectSecondYear/urls.py
"""AdmissionDirectSecondYear URL Configuration
"""
from django.contrib import admin
from django.urls import path, include
from django.conf.urls import url
from . import views
from django.views.generic import TemplateView
urlpatterns = [
path('',include('AllIndiaColleges.urls')),
path('',include('DisplayCollegesInMaharashtra.urls')),
path('',include('TopColleges.urls')),
path('',include('TopMaharashtraCollegeList.urls')),
path('',include('WhereICanGetAdmission.urls')),
path('admin/', admin.site.urls),
path('',views.home_page,name="home"),
]
'''
url('All-India-Colleges/', TemplateView.as_view(template_name='all_india_college.html'),name='all_india_college'),
url('Top-Maharashtra-College/', TemplateView.as_view(template_name='top_college_of_maharashtra.html'),name='top_maharashta_college'),
url('Show-Seat-Matrix/', TemplateView.as_view(template_name='show_seat_matrix.html'),name='show_seat_matrix'),
url('Top-Colleges/', TemplateView.as_view(template_name='top_colleges.html'),name='top_colleges'),
url('College-I-Can-Get-Admission/', TemplateView.as_view(template_name='college_i_can_get_admission.html'),name='college_i_can_get'),
url('Cut-Off-List/', TemplateView.as_view(template_name='cut_off_list.html'),name='cut_off_list'),
url('Display-Colleges-In-Maharashtra/', TemplateView.as_view(template_name='display_colleges_in_maharashtra.html'),name='display_colleges_in_maharashtra'),
''' | StarcoderdataPython |
9632345 | <reponame>vandurme/TFMTL
# coding=utf-8
# Copyright 2017 The Tensor2Tensor Authors.
# Copyright 2018 Johns Hopkins University.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
from six.moves import xrange # pylint: disable=redefined-builtin
from mtl.layers.conv2d import conv2d as conv2d_wn
allow_defun = False
def shape_list(x):
"""Return list of dims, statically where possible."""
x = tf.convert_to_tensor(x)
# If unknown rank, return dynamic shape
if x.get_shape().dims is None:
return tf.shape(x)
static = x.get_shape().as_list()
shape = tf.shape(x)
ret = []
for i in xrange(len(static)):
dim = static[i]
if dim is None:
dim = shape[i]
ret.append(dim)
return ret
def conv_internal(conv_fn, inputs, filters, kernel_size, **kwargs):
"""Conditional conv_fn making kernel 1d or 2d depending on inputs shape."""
static_shape = inputs.get_shape()
if not static_shape or len(static_shape) != 4:
raise ValueError("Inputs to conv must have statically known rank 4. "
"Shape: " + str(static_shape))
# Add support for left padding.
padding = kwargs.get("padding").upper()
if padding == "LEFT" or padding == "CAUSAL":
dilation_rate = (1, 1)
if "dilation_rate" in kwargs:
dilation_rate = kwargs["dilation_rate"]
assert kernel_size[0] % 2 == 1 and kernel_size[1] % 2 == 1
height_padding = 2 * (kernel_size[0] // 2) * dilation_rate[0]
cond_padding = tf.cond(
tf.equal(shape_list(inputs)[2], 1), lambda: tf.constant(0),
lambda: tf.constant(2 * (kernel_size[1] // 2) * dilation_rate[1]))
width_padding = 0 if static_shape[2] == 1 else cond_padding
padding = [[0, 0], [height_padding, 0], [width_padding, 0], [0, 0]]
inputs = tf.pad(inputs, padding)
# Set middle two dimensions to None to prevent convolution from complaining
inputs.set_shape([static_shape[0], None, None, static_shape[3]])
kwargs["padding"] = "VALID"
def conv2d_kernel(kernel_size_arg, name_suffix):
"""Call conv2d but add suffix to name."""
name = "{}_{}".format(kwargs.get("name", "conv"), name_suffix)
original_name = kwargs.pop("name", None)
original_force2d = kwargs.pop("force2d", None)
result = conv_fn(inputs, filters, kernel_size_arg, name=name, **kwargs)
if original_name is not None:
kwargs["name"] = original_name # Restore for other calls.
if original_force2d is not None:
kwargs["force2d"] = original_force2d
return result
return conv2d_kernel(kernel_size, "single")
def conv(inputs, filters, kernel_size, dilation_rate=1, **kwargs):
return conv_internal(
tf.layers.conv2d,
inputs,
filters,
kernel_size,
dilation_rate=dilation_rate,
**kwargs)
def conv_wn(inputs, filters, kernel_size, dilation_rate=(1, 1), **kwargs):
return conv_internal(
conv2d_wn,
inputs,
filters,
kernel_size,
dilation_rate=dilation_rate,
**kwargs)
def conv1d(inputs, filters, kernel_size, dilation_rate=1, **kwargs):
return tf.squeeze(
conv(
tf.expand_dims(inputs, 2),
filters, (kernel_size, 1),
dilation_rate=(dilation_rate, 1),
**kwargs), 2)
def conv_block(inputs, filters, dilation_rates_and_kernel_sizes, **kwargs):
"""A block of standard 2d convolutions."""
return conv_block_internal(conv, inputs, filters,
dilation_rates_and_kernel_sizes, **kwargs)
def conv_block_wn(inputs, filters, dilation_rates_and_kernel_sizes, **kwargs):
return conv_block_internal(conv_wn, inputs, filters,
dilation_rates_and_kernel_sizes,
use_layer_norm=False, **kwargs)
def conv1d_block(inputs, filters, dilation_rates_and_kernel_sizes, **kwargs):
return conv_block_internal(conv1d, inputs, filters,
dilation_rates_and_kernel_sizes, **kwargs)
def conv_block_internal(conv_fn,
inputs,
filters,
dilation_rates_and_kernel_sizes,
initial_nonlinearity=True,
nonlinearity=tf.nn.relu,
use_layer_norm=True,
global_conditioning=None,
separabilities=None,
**kwargs):
name = kwargs.pop("name") if "name" in kwargs else None
mask = kwargs.pop("mask") if "mask" in kwargs else None
if use_layer_norm is True:
use_normalizer_fn = True
def norm(x, name):
return layer_norm(x, filters, name=name)
else:
use_normalizer_fn = False
batch_size = tf.shape(inputs)[0]
batch_len = tf.shape(inputs)[1]
if global_conditioning is None:
global_conditioning = tf.zeros([batch_size, batch_len])
h = global_conditioning
final_dim = tf.shape(h)[-1]
h = tf.expand_dims(h, axis=1)
h = tf.expand_dims(h, axis=2)
h = tf.tile(h, [1, batch_len, 1, 1])
h = tf.reshape(h, (batch_size, batch_len, 1, final_dim))
with tf.variable_scope(name, "conv_block", [inputs]):
cur, counter = inputs, -1
for dilation_rate, kernel_size in dilation_rates_and_kernel_sizes:
counter += 1
if initial_nonlinearity or counter > 0:
cur = nonlinearity(cur)
if mask is not None:
cur *= mask
if separabilities:
cur = conv_fn(
cur + h,
filters,
kernel_size,
dilation_rate=dilation_rate,
name="conv_block_%d" % counter,
use_bias=use_layer_norm is False,
separability=separabilities[counter],
**kwargs)
else:
cur = conv_fn(
cur + h,
filters,
kernel_size,
dilation_rate=dilation_rate,
name="conv_block_%d" % counter,
use_bias=use_layer_norm is False,
**kwargs)
if use_normalizer_fn:
cur = norm(cur, name="conv_block_norm_%d" % counter)
return cur
def layer_norm(x, filters=None, epsilon=1e-6, name=None, reuse=None):
"""Layer normalize the tensor x, averaging over the last dimension."""
if filters is None:
filters = shape_list(x)[-1]
with tf.variable_scope(name, default_name="layer_norm", values=[x],
reuse=reuse):
scale = tf.get_variable(
"layer_norm_scale", [filters], initializer=tf.ones_initializer())
bias = tf.get_variable(
"layer_norm_bias", [filters], initializer=tf.zeros_initializer())
result = layer_norm_compute_python(x, epsilon, scale, bias)
return result
def flatten4d3d(x):
"""Flatten a 4d-tensor into a 3d-tensor by joining width and height."""
xshape = shape_list(x)
result = tf.reshape(x, [xshape[0], xshape[1] * xshape[2], xshape[3]])
return result
def layer_norm_compute_python(x, epsilon, scale, bias):
"""Layer norm raw computation."""
epsilon, scale, bias = [tf.cast(t, x.dtype) for t in [epsilon, scale, bias]]
mean = tf.reduce_mean(x, axis=[-1], keep_dims=True)
variance = tf.reduce_mean(tf.square(x - mean), axis=[-1], keep_dims=True)
norm_x = (x - mean) * tf.rsqrt(variance + epsilon)
return norm_x * scale + bias
| StarcoderdataPython |
261410 | <gh_stars>0
"""
Test the health check endpoints
"""
def test_live(mini_sentry, relay):
"""Internal endpoint used by kubernetes """
relay = relay(mini_sentry)
response = relay.get("/api/relay/healthcheck/live/")
assert response.status_code == 200
def test_external_live(mini_sentry, relay):
"""Endpoint called by a downstream to see if it has network connection to the upstream. """
relay = relay(mini_sentry)
response = relay.get("/api/0/relays/live/")
assert response.status_code == 200
def test_is_healthy(mini_sentry, relay):
"""Internal endpoint used by kubernetes """
relay = relay(mini_sentry)
# NOTE this is redundant but palced here to clearly show the exposed endpoint
# (internally the relay fixture waits for the ready health check anyway)
response = relay.get("/api/relay/healthcheck/ready/")
assert response.status_code == 200
| StarcoderdataPython |
6596413 | import os
import lief
import re
import mimetypes
import hashlib
import os.path
import subprocess
from os import path
from string import digits
from . import CtagsHandler
class FileHandler:
def __init__(self, errorHandler, filepath, filename, filetype, checksum):
self.debug = errorHandler
self.filepath = filepath
self.filename = filename
self.filetype = filetype
self.checksum = checksum
def preload_handlers(self):
# This should be replaced by a BD
return {
'text/x-c': self.handle_cplusplus,
'text/x-c++': self.handle_cplusplus,
'text/x-python': self.handle_python,
'text/x-perl': self.handle_perl,
'text/x-ruby': self.handle_ruby,
'text/x-rust': self.handle_rust,
'text/x-java': self.handle_java,
'text/x-objective-c': self.handle_objectivec,
'application/x-mach-binary': self.handle_mach_o,
'application/x-sharedlib': self.handle_sharedlib,
'application/x-dosexec': self.handle_strings,
'application/x-archive': self.handle_ar,
'font/sfnt': self.handle_strings,
'application/octet-stream': self.ignore,
'application/x-ms-pdb': self.ignore,
'image/vnd.microsoft.icon': self.ignore,
'text/x-shellscript': self.ignore,
'text/xml': self.ignore,
'application/csv': self.ignore,
'text/x-tex': self.ignore,
'text/x-makefile': self.ignore,
'application/json': self.ignore,
'text/html': self.ignore,
'image/x-portable-pixmap': self.ignore,
'image/webp': self.ignore,
'image/png': self.ignore,
'image/x-tga': self.ignore,
'image/g3fax': self.ignore,
'image/gif': self.ignore,
'image/jpeg': self.ignore,
'application/x-wine-extension-ini': self.ignore,
'audio/mpeg': self.ignore,
'audio/x-wav': self.ignore,
'video/mp4': self.ignore,
'inode/x-empty': self.ignore
}
def run_handler(self):
try:
handlers = self.preload_handlers()
handler = handlers[self.filetype]
# print(self.filetype, self.filename)
return handler(self.filepath, self.filename, self.checksum)
except KeyError:
self.debug.error('handler not implemented for ' + self.filetype)
fullPath = os.path.join(self.filepath, self.filename)
self.debug.error('skipping ' + fullPath)
def ignore(self, filepath, filename, checksum):
self.debug.info('skipping ' + self.filename)
return ''
def handle_ar(self, filepath, filename, checksum):
fullpath = filepath+"/"+filename
if(path.exists(fullpath)):
cmd = 'ar t ' + fullpath
process = subprocess.Popen(
cmd,
shell=True,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
(result, error) = process.communicate()
rc = process.wait()
process.stdout.close()
rstTXT = result.decode('utf-8')
results = self.stripNonAlphaNum(' '.join(rstTXT.split()))
prow = checksum + "," + ",".join(results)
prow = prow + "," + os.path.splitext(filename)[0]
cmd = 'strings -n 5 ' + fullpath
process = subprocess.Popen(
cmd,
shell=True,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
(result, error) = process.communicate()
rc = process.wait()
process.stdout.close()
rstTXT = result.decode('utf-8')
row = ""
results = self.stripNonAlphaNum(','.join(rstTXT.split()))
for x in results:
if x.startswith('_Z'):
row = row + ",".join(self.demangle(x))
row = list(set(row.split(',')))
prow = prow + ",".join(row) + "," + os.path.splitext(filename)[0]
return prow
def handle_strings(self, filepath, filename, checksum):
fullpath = filepath+"/"+filename
if(path.exists(fullpath)):
cmd = 'strings -n 5 ' + fullpath
process = subprocess.Popen(
cmd,
shell=True,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
(result, error) = process.communicate()
rc = process.wait()
process.stdout.close()
rstTXT = result.decode('utf-8')
results = self.stripNonAlphaNum(' '.join(rstTXT.split()))
prow = checksum + ",".join(results)
prow = prow + "," + os.path.splitext(filename)[0]
return prow
def handle_sharedlib(self, filepath, filename, checksum):
fullPath = os.path.join(filepath, filename)
libSO = lief.parse(fullPath)
symbols = []
iter = filter(lambda e: e.exported, libSO.dynamic_symbols)
for idx, lsym in enumerate(iter):
symbols.extend(self.demangle(lsym.name))
symbols = list(set(symbols))
prow = checksum + ','
prow = prow + ",".join(symbols)
prow = prow + "," + os.path.splitext(filename)[0]
return prow
def handle_mach_o(self, filepath, filename, checksum):
fullPath = os.path.join(filepath, filename)
symbols = []
if os.stat(fullPath).st_size <= 1048576:
libSO = lief.parse(fullPath)
remove_digits = str.maketrans(',', ',', digits)
for i in libSO.symbols:
symbol = i.name
symbol = re.sub("[^a-zA-Z0-9]+", ",", symbol)
symbol = re.sub("\d+", ",", symbol)
symbols.extend(symbol.split(','))
symbols = list(set(symbols))
while("" in symbols):
symbols.remove("")
prow = checksum + ','
prow = prow + ",".join(symbols)
if len(symbols) <= 1:
cmd = 'strings -n 5 ' + fullPath
process = subprocess.Popen(
cmd,
shell=True,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
(result, error) = process.communicate()
rc = process.wait()
process.stdout.close()
rstTXT = result.decode('utf-8')
row = ""
results = self.stripNonAlphaNum(','.join(rstTXT.split()))
for sym in results:
if sym.startswith('_Z'):
row = row + ",".join(self.demangle(sym))
row = list(set(row.split(',')))
prow = checksum + ','
prow = prow + ",".join(row)
prow = prow + "," + os.path.splitext(filename)[0]
return prow
def handle_objectivec(self, filepath, filename, checksum):
fname = os.path.splitext(filename)[0]
target = os.path.join(filepath, filename)
ctags = CtagsHandler.CtagsHandler(target)
ctags.setLang('objectivec')
ctags.setLangMap('objectivec:.h.m')
rst = ','.join([checksum, ctags.run(), fname])
return rst
def handle_rust(self, filepath, filename, checksum):
fname = os.path.splitext(filename)[0]
target = os.path.join(filepath, filename)
ctags = CtagsHandler.CtagsHandler(target)
ctags.setLang('Rust')
ctags.setLangMap('Rust:.rs')
rst = ','.join([checksum, ctags.run(), fname])
return rst
def handle_ruby(self, filepath, filename, checksum):
fname = os.path.splitext(filename)[0]
target = os.path.join(filepath, filename)
ctags = CtagsHandler.CtagsHandler(target)
ctags.setLang('ruby')
ctags.setLangMap('ruby:+.rake')
rst = ','.join([checksum, ctags.run(), fname])
return rst
def handle_perl(self, filepath, filename, checksum):
fname = os.path.splitext(filename)[0]
target = os.path.join(filepath, filename)
ctags = CtagsHandler.CtagsHandler(target)
ctags.setLang('Perl')
ctags.setLangMap('Perl:+.t')
rst = ','.join([checksum, ctags.run(), fname])
return rst
def handle_cplusplus(self, filepath, filename, checksum):
fname = os.path.splitext(filename)[0]
target = os.path.join(filepath, filename)
ctags = CtagsHandler.CtagsHandler(target)
ctags.setOption('--kinds-C++=+l')
ctags.setOption('-o -')
rst = ','.join([checksum, ctags.run(), fname])
return rst
def handle_python(self, filepath, filename, checksum):
fname = os.path.splitext(filename)[0]
target = os.path.join(filepath, filename)
ctags = CtagsHandler.CtagsHandler(target)
ctags.setLang('python')
ctags.setOption('--python-kinds=-iv')
rst = ','.join([checksum, ctags.run(), fname])
return rst
def handle_java(self, filepath, filename, checksum):
fname = os.path.splitext(filename)[0]
target = os.path.join(filepath, filename)
ctags = CtagsHandler.CtagsHandler(target)
ctags.setLang('Java')
ctags.setLangMap('java:+.aj')
rst = ','.join([checksum, ctags.run(), fname])
return rst
def stripNonAlphaNum(self, text):
return re.compile(r'\W+', re.UNICODE).split(text)
def demangle(self, name):
s = re.sub("[^a-zA-Z]+", ",", name)
s.replace(',,', ',')
lst = s.split(",")
lst = (map(lambda x: x.lower(), lst))
lst = set(lst)
# dstring = demangle(name)
# results = self.stripNonAlphaNum(dstring)
# return ' '.join(results).split()
return lst
| StarcoderdataPython |
3457941 | class EMdata(object):
def __init__(self):
self.holo_1 = None
self.holo_2 = None
self.holo_ref = None
self.holo_2_aligned = None
self.phase_1 = None
self.amplitude_1 = None
self.amplitude_2 = None
self.phase_2 = None
self.phase_ref = None
self.diff_1_ref = None
self.diff_2_ref = None
self.diff_2_1_cor = None
self.diff_2_1_not_cor = None
self.field = None
self.field_not_cor = None
self.pixel = None
self.constant = None
| StarcoderdataPython |
12856968 | # DBPLoadController.py
# 1) Run Validate on the files to process
# 2) Move any Fileset that is accepted to uploading
# 3) Perform upload
# 4) Move any fully uploaded fileset to database
# 5) Update fileset related tables
# 6) Move updated fileset to complete
import os
from Config import *
from RunStatus import *
from LPTSExtractReader import *
from Log import *
from InputFileset import *
from Validate import *
from S3Utility import *
from SQLBatchExec import *
from UpdateDBPFilesetTables import *
from UpdateDBPBiblesTable import *
from UpdateDBPLPTSTable import *
from UpdateDBPVideoTables import *
from UpdateDBPBibleFilesSecondary import *
class DBPLoadController:
def __init__(self, config, db, lptsReader):
self.config = config
self.db = db
self.lptsReader = lptsReader
self.s3Utility = S3Utility(config)
self.stockNumRegex = re.compile("__[A-Z0-9]{8}")
## This corrects filesets that have stock number instead of damId in the filename.
def repairAudioFileNames(self, inputFilesets):
for inp in inputFilesets:
for index in range(len(inp.files)):
file = inp.files[index]
if file.name.endswith(".mp3"):
namePart = file.name.split(".")[0]
damId = namePart[-10:]
if self.stockNumRegex.match(damId):
inp.files[index].name = namePart[:-10] + inp.filesetId[:10] + ".mp3"
def validate(self, inputFilesets):
validate = Validate(self.config, self.db)
validate.process(inputFilesets)
for inp in inputFilesets:
if os.path.isfile(inp.csvFilename):
InputFileset.upload.append(inp)
else:
RunStatus.set(inp.filesetId, False)
def updateBibles(self):
dbOut = SQLBatchExec(self.config)
bibles = UpdateDBPBiblesTable(self.config, self.db, dbOut, self.lptsReader)
bibles.process()
#dbOut.displayStatements()
dbOut.displayCounts()
success = dbOut.execute("bibles")
RunStatus.set(RunStatus.BIBLE, success)
return success
def upload(self, inputFilesets):
self.s3Utility.uploadAllFilesets(inputFilesets)
secondary = UpdateDBPBibleFilesSecondary(self.config, None, None)
secondary.createAllZipFiles(inputFilesets)
Log.writeLog(self.config)
def updateFilesetTables(self, inputFilesets):
inp = inputFilesets
dbOut = SQLBatchExec(self.config)
update = UpdateDBPFilesetTables(self.config, self.db, dbOut)
video = UpdateDBPVideoTables(self.config, self.db, dbOut)
for inp in inputFilesets:
hashId = update.processFileset(inp)
if inp.typeCode == "video":
video.processFileset(inp.filesetPrefix, inp.filenames(), hashId)
dbOut.displayCounts()
success = dbOut.execute(inp.batchName())
RunStatus.set(inp.filesetId, success)
if success:
InputFileset.complete.append(inp)
else:
print("********** Fileset Table %s Update Failed **********" % (inp.filesetId))
def updateLPTSTables(self):
dbOut = SQLBatchExec(self.config)
lptsDBP = UpdateDBPLPTSTable(self.config, dbOut, self.lptsReader)
lptsDBP.process()
#dbOut.displayStatements()
dbOut.displayCounts()
success = dbOut.execute("lpts")
RunStatus.set(RunStatus.LPTS, success)
return success
if (__name__ == '__main__'):
config = Config()
AWSSession.shared() # ensure AWSSession init
db = SQLUtility(config)
lptsReader = LPTSExtractReader(config.filename_lpts_xml)
ctrl = DBPLoadController(config, db, lptsReader)
if len(sys.argv) != 2:
InputFileset.validate = InputFileset.filesetCommandLineParser(config, AWSSession.shared().s3Client, lptsReader)
ctrl.repairAudioFileNames(InputFileset.validate)
ctrl.validate(InputFileset.validate)
if ctrl.updateBibles():
ctrl.upload(InputFileset.upload)
ctrl.updateFilesetTables(InputFileset.database)
ctrl.updateLPTSTables()
for inputFileset in InputFileset.complete:
print("Completed: ", inputFileset.filesetId)
else:
ctrl.updateBibles()
ctrl.updateLPTSTables()
RunStatus.exit()
# Get currrent lpts-dbp.xml
# aws --profile DBP_DEV s3 cp s3://dbp-etl-upload-newdata-fiu49s0cnup1yr0q/lpts-dbp.xml /Volumes/FCBH/bucket_data/lpts-dbp.xml
# Clean up filesets in dbp-stating and dbp-vid-staging
# Prepare by getting some local data into a test bucket
# aws s3 --profile dbp-etl-dev sync --acl bucket-owner-full-control /Volumes/FCBH/all-dbp-etl-test/audio/UNRWFW/UNRWFWP1DA s3://dbp-etl-upload-dev-zrg0q2rhv7shv7hr/UNRWFWP1DA
# aws s3 --profile dbp-etl-dev sync --acl bucket-owner-full-control /Volumes/FCBH/all-dbp-etl-test/HYWWAVN2ET s3://dbp-etl-upload-dev-zrg0q2rhv7shv7hr/HYWWAVN2ET
# aws s3 --profile dbp-etl-dev sync --acl bucket-owner-full-control /Volumes/FCBH/all-dbp-etl-test/ENGESVP2DV s3://dbp-etl-upload-dev-zrg0q2rhv7shv7hr/ENGESVP2DV
# No parameter, should execute only bible and lpts updates
# time python3 load/DBPLoadController.py test
# Successful tests with source on local drive
# time python3 load/TestCleanup.py test HYWWAV
# time python3 load/TestCleanup.py test HYWWAVN_ET-usx
# time python3 load/TestCleanup.py test ENGESVP2DV
# time python3 load/DBPLoadController.py test /Volumes/FCBH/all-dbp-etl-test/ HYWWAVN2ET
# time python3 load/DBPLoadController.py test /Volumes/FCBH/all-dbp-etl-test/ ENGESVP2DV
# Successful tests with source on s3
# time python3 load/TestCleanup.py test UNRWFWP1DA
# time python3 load/TestCleanup.py test UNRWFWP1DA-opus16
# time python3 load/DBPLoadController.py test s3://dbp-etl-upload-dev-zrg0q2rhv7shv7hr UNRWFWP1DA
# time python3 load/TestCleanup.py test HYWWAV
# time python3 load/TestCleanup.py test HYWWAVN_ET-usx
# time python3 load/DBPLoadController.py test s3://dbp-etl-upload-dev-zrg0q2rhv7shv7hr HYWWAVN2ET
# time python3 load/TestCleanup.py test ENGESVP2DV
# time python3 load/DBPLoadController.py test s3://dbp-etl-upload-dev-zrg0q2rhv7shv7hr ENGESVP2DV
# Combined test of two dissimilar filesets on s3
# time python3 load/TestCleanup.py test UNRWFWP1DA
# time python3 load/TestCleanup.py test HYWWAV
# time python3 load/TestCleanup.py test HYWWAVN_ET-usx
# time python3 load/DBPLoadController.py test s3://dbp-etl-upload-dev-zrg0q2rhv7shv7hr UNRWFWP1DA HYWWAVN2ET
# Some video uploads
# time python3 load/TestCleanup.py test ENGESVP2DV
# time python3 load/DBPLoadController.py test /Volumes/FCBH/all-dbp-etl-test/ ENGESVP2DV
# time python3 load/DBPLoadController.py test /Volumes/FCBH/all-dbp-etl-test/ video/ENGESV/ENGESVP2DV
# time python3 load/DBPLoadController.py test /Volumes/FCBH/all-dbp-etl-test/ video/ENGESX/ENGESVP2DV
# Successful tests with source on local drive and full path
# time python3 load/TestCleanup.py test GNWNTM
# time python3 load/TestCleanup.py test GNWNTMN_ET-usx
# time python3 load/DBPLoadController.py test /Volumes/FCBH/all-dbp-etl-test/ GNWNTMN2ET
# time python3 load/TestCleanup.py test GNWNTM
# time python3 load/TestCleanup.py test GNWNTMN_ET-usx
# time python3 load/DBPLoadController.py test /Volumes/FCBH/all-dbp-etl-test/ text/GNWNTM/GNWNTMN2ET
### prepare test data in bucket
### aws --profile DBP_DEV s3 sync /Volumes/FCBH/TextStockNo/Barai_N2BBBWBT_USX/ s3://dbp-etl-upload-dev-zrg0q2rhv7shv7hr/Barai_N2BBBWBT_USX/
### aws --profile DBP_DEV s3 sync /Volumes/FCBH/TextStockNo/Orma_N2ORCBTL_USX/ s3://dbp-etl-upload-dev-zrg0q2rhv7shv7hr/Orma_N2ORCBTL_USX/
### aws --profile DBP_DEV s3 sync /Volumes/FCBH/TextStockNo/Urdu_N2URDPAK_USX/ s3://dbp-etl-upload-dev-zrg0q2rhv7shv7hr/Urdu_N2URDPAK_USX/
# Test stock number upload from Drive with path
# time python3 load/TestCleanup.py test BBBWBT
# time python3 load/TestCleanup.py test BBBWBTN_ET-usx
# time python3 load/DBPLoadController.py test s3://dbp-etl-upload-dev-zrg0q2rhv7shv7hr/ Barai_N2BBBWBT_USX
# time python3 load/TestCleanup.py test ORCBTL
# time python3 load/TestCleanup.py test ORCBTLN_ET-usx
# time python3 load/DBPLoadController.py test s3://dbp-etl-upload-dev-zrg0q2rhv7shv7hr/ Orma_N2ORCBTL_USX
# time python3 load/TestCleanup.py test URDPAK
# time python3 load/TestCleanup.py test URDPAKN_ET-usx
# time python3 load/DBPLoadController.py test s3://dbp-etl-upload-dev-zrg0q2rhv7shv7hr/ Urdu_N2URDPAK_USX
# python3 load/TestCleanup.py test ABIWBT
# python3 load/TestCleanup.py test ABIWBTN_ET-usx
# python3 load/DBPLoadController.py test s3://dbp-etl-mass-batch "Abidji N2ABIWBT/05 DBP & GBA/Abidji_N2ABIWBT/Abidji_N2ABIWBT_USX"
# This one BiblePublisher has two copies of 1CO:16, but I can only find one in the USX file.
# python3 load/TestCleanup.py test ACHBSU
# python3 load/TestCleanup.py test ACHBSUN_ET-usx
# python3 load/DBPLoadController.py test s3://dbp-etl-mass-batch "Acholi N2ACHBSU/05 DBP & GBA/Acholi_N2ACHBSU - Update/Acholi_N2ACHBSU_USX"
# python3 load/TestCleanup.py test CRXWYI
# python3 load/TestCleanup.py test CRXWYIP_ET-usx
# python3 load/TestCleanup.py test CRXWYIN_ET-usx
# python3 load/DBPLoadController.py test s3://dbp-etl-mass-batch "Carrier, Central N2CRXWYI/05 DBP & GBA/Carrier, Central_P1CRXWYI/Carrier, Central_P1CRXWYI_USX"
| StarcoderdataPython |
5032500 | import subprocess
import re
p = subprocess.check_output("poetry version", shell=True)
ver = re.match(".* to (.*)\\\\.*$", str(p)).group(1)
open("attrs_serde/__version__.py", "w").write('__version__ = "{}"'.format(ver))
print("git tag v{}".format(ver))
print(subprocess.check_call("git tag v{}".format(ver), shell=True))
print("now run poetry publish --build, git push --tags")
| StarcoderdataPython |
8104454 | <filename>lingcod/async/migrations/0001_initial.py
# encoding: utf-8
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'URLtoTaskID'
db.create_table('async_urltotaskid', (
('url', self.gf('django.db.models.fields.TextField')()),
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('task_id', self.gf('django.db.models.fields.TextField')()),
))
db.send_create_signal('async', ['URLtoTaskID'])
def backwards(self, orm):
# Deleting model 'URLtoTaskID'
db.delete_table('async_urltotaskid')
models = {
'async.urltotaskid': {
'Meta': {'object_name': 'URLtoTaskID'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'task_id': ('django.db.models.fields.TextField', [], {}),
'url': ('django.db.models.fields.TextField', [], {})
}
}
complete_apps = ['async']
| StarcoderdataPython |
1902042 | <reponame>KatharineShapcott/levelup-exercises<gh_stars>0
import numpy as np
import matplotlib.pyplot as plt
# set up parameters
n_samples = 30
n_repeats = 1000
distribution_mean = 10
# figure preparation
fig, axs = plt.subplots(5, 2, constrained_layout=True)
# create and plot uniform and mean
uniform = np.random.rand(n_samples,n_repeats)*distribution_mean*2
plt.sca(axs[0,0])
plt.hist(uniform.flatten(), 50)
plt.title('Orig. distribution')
plt.sca(axs[1,0])
plt.hist(np.mean(uniform, axis=0), 50)
plt.title('Mean distribution')
# create and plot exponential and mean
exp = np.random.exponential(distribution_mean, size=(n_samples,n_repeats))
plt.sca(axs[0,1])
plt.hist(exp.flatten(), 50)
plt.title('Orig. distribution')
plt.sca(axs[1,1])
plt.hist(np.mean(exp, axis=0), 50)
# create and plot gamma and mean
gamma = np.random.gamma(distribution_mean, size=(n_samples,n_repeats))
plt.sca(axs[0,1])
plt.hist(exp.flatten(), 50)
plt.sca(axs[1,1])
plt.hist(np.mean(exp, axis=0), 50)
# create and plot gamma and mean
p = 0.5
n_flips = distribution_mean/p
binomial = np.random.binomial(n_flips, p, size=(n_samples,n_repeats))
plt.sca(axs[0,3])
plt.hist(binomial.flatten(), 50)
plt.sca(axs[1,3])
plt.hist(np.mean(exp, axis=0), 50)
plt.show() | StarcoderdataPython |
11386934 | <reponame>chschtsch/kiuss<filename>kiuss/local_settings_example.py
import os
BASE_DIR = os.path.dirname(os.path.dirname(__file__))
SECRET_KEY = '<KEY>'
# DEBUG = True
# THUMBNAIL_DEBUG = True
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'kiuss.sqlite3'),
}
}
ALLOWED_HOSTS = [
'kiusscollective.pl',
'www.kiusscollective.pl',
]
| StarcoderdataPython |
9670351 | from haystack import indexes
from .models import Request
class RequestIndex(indexes.SearchIndex, indexes.Indexable):
text = indexes.NgramField(document=True, use_template=True)
name = indexes.CharField(model_attr='title')
def get_model(self):
return Request
def index_queryset(self, using=None):
return self.get_model().objects.all()
| StarcoderdataPython |
3376500 |
def test_something():
pass
# TODO: write this test
| StarcoderdataPython |
3464247 | <filename>autodmg_cache_builder/autodmg_utility.py
#!/usr/bin/python
"""Utility functions used by other parts of the AutoDMG build tools."""
import subprocess
import os
import tempfile
import shutil
def run(cmd):
"""Run a command with subprocess, printing output in realtime."""
proc = subprocess.Popen(
cmd,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT
)
while proc.poll() is None:
l = proc.stdout.readline()
print l,
print proc.stdout.read()
return proc.returncode
def pkgbuild(root_dir, identifier, version, pkg_output_file):
"""Build a package from root_dir at pkg_output_file."""
cmd = [
'/usr/bin/pkgbuild',
'--root', root_dir,
'--identifier', identifier,
'--version', version,
pkg_output_file]
run(cmd)
def build_pkg(source, output, receipt, destination, cache_dir, comment=''):
"""
Construct package using pkgbuild.
source - the directory to build a package from
output - the name of the package file to build ('.pkg' is appended)
receipt - the receipt of the package
destination - the directory path to place the payload in
cache_dir - the directory to place the built package into
comment - A message to print out when building
"""
if os.path.isdir(source) and os.listdir(source):
print comment
pkg_name = '%s.pkg' % output
# We must copy the contents into a temp folder and build
prefix = 'cpe_%s' % receipt.split('.')[-1]
temp_dir = tempfile.mkdtemp(prefix=prefix, dir='/tmp')
pkg_dir = os.path.join(temp_dir, destination.lstrip('/'))
# Copy the contents of the folder into place
shutil.copytree(source, pkg_dir)
# Build the package
output_file = os.path.join(cache_dir, pkg_name)
pkgbuild(
temp_dir,
receipt,
'1.0',
output_file
)
# Clean up after ourselves
shutil.rmtree(temp_dir, ignore_errors=True)
# Return the path to the package
if os.path.isfile(output_file):
return output_file
# If nothing was built, return empty string
return ''
def populate_ds_repo(image_path, repo):
"""Move a built image into the DS repo."""
repo_hfs = os.path.join(repo, 'Masters', 'HFS')
image_name = os.path.basename(image_path)
if not image_path.endswith('.hfs.dmg') and image_path.endswith('.dmg'):
# DS masters must end in '.hfs.dmg'
print 'Renaming image to ".hfs.dmg" for DS support'
image_name = image_name.split('.dmg')[0] + '.hfs.dmg'
repo_target = os.path.join(repo_hfs, image_name)
if os.path.isfile(repo_target):
# If the target already exists, name it "-OLD"
newname = repo_target.split('.hfs.dmg')[0] + '-OLD.hfs.dmg'
print "Renaming old image to %s" % newname
os.rename(repo_target, newname)
# now copy the newly built image over
print "Copying new image to DS Repo."
print "Image path: %s" % image_path
print "Repo target: %s" % repo_target
shutil.move(image_path, repo_target)
| StarcoderdataPython |
8123740 | """Unit test package for navigation."""
| StarcoderdataPython |
3577531 | #!/usr/bin/env python
# Software License Agreement (BSD License)
#
# Copyright (c) 2014, <NAME>, Social Robotics Lab, University of Freiburg
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# * Neither the name of the copyright holder nor the names of its contributors
# may be used to endorse or promote products derived from this software
# without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# Script to speed up the timings in a recorded bagfile by a given factor.
# !!! Assumes that the bagfile has been synced before using sync_bagfile.py !!!
import roslib; roslib.load_manifest('rosbag')
import sys, rosbag, os.path
import subprocess, yaml
if len(sys.argv) < 4:
sys.stderr.write('Missing arguments, syntax is RATE INPUT_FILE OUTPUT_FILE !\n')
sys.exit(1)
rate = float(sys.argv[1])
infilename = sys.argv[2]
outfilename = sys.argv[3]
if os.path.isfile(outfilename):
print 'Output file ' + outfilename + ' already exists, cannot proceed!!'
sys.exit(2)
info_dict = yaml.load(subprocess.Popen(['rosbag', 'info', '--yaml', infilename], stdout=subprocess.PIPE).communicate()[0])
msg_count = 0
for topic in info_dict["topics"] :
msg_count += topic["messages"]
print '\nSpeeding up bag file ' + infilename + ' consisting of ' + str(msg_count) + ' messages by a factor of ' + str(rate)
gotFirstTimestamp = False
msg_index = 0
outbag = rosbag.Bag(outfilename, 'w')
try:
for topic, msg, timestamp in rosbag.Bag(infilename).read_messages():
if not gotFirstTimestamp :
firstTimestamp = timestamp
gotFirstTimestamp = True
# Calculate corrected timestamp
correctedTimestamp = firstTimestamp + (timestamp - firstTimestamp)/rate
# Fix header timestamps
if msg._has_header :
msg.header.stamp = correctedTimestamp
# Fix TF transforms
if topic == "/tf":
for i in range(0, len(msg.transforms)):
msg.transforms[i].header.stamp = correctedTimestamp
# Write message
if not topic == "/rosout":
outbag.write(topic, msg, correctedTimestamp)
# Show status info
msg_index+=1
if msg_index % (msg_count / 10) == 0:
print str(int(100.0 * msg_index / msg_count + 0.5)) + '% completed'
finally:
outbag.close()
print 'Bagfile processing complete!\n'
| StarcoderdataPython |
5196210 | <reponame>yelabucsf/scrna-parameter-estimation<filename>analysis/cell_type_effects/ct_coexpression.py
import scanpy as sc
import scipy as sp
import numpy as np
import pickle as pkl
import sys
sys.path.append('/data/home/Github/scrna-parameter-estimation/scmemo')
import estimator, simulate, scmemo, bootstrap, util, hypothesis_test
if __name__ == '__main__':
data_path = '/data/parameter_estimation/'
cts = ['CD4 T cells', 'CD14+ Monocytes', 'FCGR3A+ Monocytes', 'NK cells','CD8 T cells', 'B cells']
label_converter = dict(zip(cts, ['Th', 'cM', 'ncM', 'NK', 'Tc', 'B']))
adata = sc.read(data_path + 'interferon.h5ad')
adata = adata[(adata.obs.multiplets == 'singlet') & (adata.obs.cell != 'nan'), :].copy()
adata.X = adata.X.astype(np.int64)
adata = adata[adata.obs.stim == 'ctrl'].copy()
with open(data_path + 'all_highcount_tfs.pkl', 'rb') as f:
tfs = pkl.load(f)
for ct in ['CD4 T cells']:#cts:
print('Starting ct', ct)
adata_ct = adata.copy()
adata_ct.obs['ct'] = adata_ct.obs['cell'].apply(lambda x: int(x == ct))
scmemo.create_groups(adata_ct, label_columns=['ct', 'ind'], inplace=True)
scmemo.compute_1d_moments(
adata_ct, inplace=True, filter_genes=True,
residual_var=True, use_n_umi=False, filter_mean_thresh=0.07,
min_perc_group=0.9)
print('Size of data', adata_ct.shape)
available_tfs = list(set(tfs) & set(adata_ct.var.index.tolist()))
target_genes = adata_ct.var.index.tolist()
print('TF list length', len(available_tfs))
scmemo.compute_2d_moments(adata_ct, available_tfs, target_genes)
scmemo.ht_2d_moments(adata_ct, formula_like='1 + ct', cov_column='ct')
adata_ct.write(data_path + 'result_2d/{}_ct.h5ad'.format(label_converter[ct])) | StarcoderdataPython |
11330370 | <gh_stars>0
import sys
from collections import defaultdict
def main():
"""
TODO think about better data structure, list of tuples might be better, perhaps a class for a self-sorting tuple manager, essentially a DB
"""
input_file = sys.argv[1]
output_file_top10occupations = sys.argv[2]
output_file_top10states = sys.argv[3]
occupation_aggregate, state_aggregate = tuple(get_relevant_data_from_file(input_file))
occ_perc = get_stats(occupation_aggregate)
sort_and_print(occupation_aggregate, occ_perc, "TOP_OCCUPATIONS;NUMBER_CERTIFIED_APPLICATIONS;PERCENTAGE\n", output_file_top10occupations)
state_perc = get_stats(state_aggregate)
sort_and_print(state_aggregate, state_perc, "TOP_STATES;NUMBER_CERTIFIED_APPLICATIONS;PERCENTAGE\n", output_file_top10states)
def sort_and_print(aggregate, stats, header_string, output_file):
"""
Returns nothing, side effect: creates and populates `output_file`
TODO split into sort and print fn, clear parameter distinction, if this one goes over the number of lines per fn
Parameters
----
aggregate: Dict of values\n
stats: Dict of totals and percentages\n
"""
list_of_fields = []
for k,v in aggregate.items():
list_of_fields.append([k, v, f"{stats[k]:.1f}%"])
sorted_list_of_fields = sorted(list_of_fields, key = sort_by)
top10_sorted_list_of_fields = sorted_list_of_fields[:10]
# Gets the format of things needed as a list, converts all to string, joins with desired separator. TODO make sep, format and such parametric
# TODO percentage floating point formatting when printing
printable_list_of_fields = "\n".join([";".join(map(str,i)) for i in top10_sorted_list_of_fields])
with open(output_file, 'w') as f:
f.write(header_string)
f.write(printable_list_of_fields)
def sort_by(tuple_like):
"""
https://stackoverflow.com/questions/24579202/
? scaling issues
"""
return (-tuple_like[1], tuple_like[0])
def get_relevant_data_from_file(filename, binary_selector_field = 2, binary_selector_field_value = "CERTIFIED", list_of_fields = ["SOC_NAME", "WORKSITE_STATE"], separator = ";"):
"""
? perhaps remove default arg
Parameters
----
filename: file to read from
binary_selector_field, binary_selector_field_value: Strings, field that has to be `binary_selector_field_value` to get data from it, in our case field 2="CERTIFIED"
list_of_fields: String names of fields to get data. Positions change across inputs, must use strings.
? Split up for better tests again
"""
field_numbers = [0] * len(list_of_fields)
aggregates = [defaultdict(int), defaultdict(int)]
with open(filename) as f:
for line in f:
# RF out if we need other conditionals and such
words = line.split(separator)
# Strip start/end \" tokens (it strips ALL \" though), they aren't in output example
quoteless_words = [i.strip("\"") for i in words]
if(quoteless_words[0] == ""):
# Header line, find the positions of the needed data
for i,field in enumerate(list_of_fields):
for j,e in enumerate(quoteless_words):
if(e == field):
field_numbers[i] = j
elif(quoteless_words[binary_selector_field] == binary_selector_field_value):
# If CERTIFIED, get data
for i,field_number in enumerate(field_numbers):
aggregates[i][quoteless_words[field_number]] = aggregates[i][quoteless_words[field_number]] + 1
return aggregates
def process_data(line):
pass
def gather_insights():
pass
def get_stats(the_dict):
"""
Given a dictionary, returns a new dict with the percentages for each key
"""
total_count = 0
for k,v in the_dict.items():
total_count = total_count + v
percentage_dict = defaultdict(int)
for k,v in the_dict.items():
percentage_dict[k] = (100 * v) / total_count
return percentage_dict
if(__name__ == "__main__"):
main()
| StarcoderdataPython |
11366768 | <reponame>p4l1ly/pycapnp<filename>benchmark/addressbook.proto.py
import addressbook_pb2 as addressbook
import os
print = lambda *x: x
def writeAddressBook():
addressBook = addressbook.AddressBook()
alice = addressBook.person.add()
alice.id = 123
alice.name = 'Alice'
alice.email = '<EMAIL>'
alicePhones = [alice.phone.add()]
alicePhones[0].number = "555-1212"
alicePhones[0].type = addressbook.Person.MOBILE
bob = addressBook.person.add()
bob.id = 456
bob.name = 'Bob'
bob.email = '<EMAIL>'
bobPhones = [bob.phone.add(), bob.phone.add()]
bobPhones[0].number = "555-4567"
bobPhones[0].type = addressbook.Person.HOME
bobPhones[1].number = "555-7654"
bobPhones[1].type = addressbook.Person.WORK
message_string = addressBook.SerializeToString()
return message_string
def printAddressBook(message_string):
addressBook = addressbook.AddressBook()
addressBook.ParseFromString(message_string)
for person in addressBook.person:
print(person.name, ':', person.email)
for phone in person.phone:
print(phone.type, ':', phone.number)
print()
if __name__ == '__main__':
for i in range(10000):
message_string = writeAddressBook()
printAddressBook(message_string)
| StarcoderdataPython |
12836291 | """
urlresolver XBMC Addon
Copyright (C) 2011 t0mm0
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
import re
from t0mm0.common.net import Net
from urlresolver import common
from urlresolver.plugnplay.interfaces import UrlResolver
from urlresolver.plugnplay.interfaces import PluginSettings
from urlresolver.plugnplay import Plugin
class YoutubeResolver(Plugin, UrlResolver, PluginSettings):
implements = [UrlResolver, PluginSettings]
name = "youtube"
domains = [ 'youtube.com', 'youtu.be' ]
def __init__(self):
p = self.get_setting('priority') or 100
self.priority = int(p)
def get_media_url(self, host, media_id):
#just call youtube addon
plugin = 'plugin://plugin.video.youtube/?action=play_video&videoid=' + media_id
return plugin
def get_url(self, host, media_id):
return 'http://youtube.com/watch?v=%s' % media_id
def get_host_and_id(self, url):
if url.find('?') > -1:
queries = common.addon.parse_query(url.split('?')[1])
video_id = queries.get('v', None)
else:
r = re.findall('/([0-9A-Za-z_\-]+)', url)
if r:
video_id = r[-1]
if video_id:
return ('youtube.com', video_id)
else:
return False
def valid_url(self, url, host):
if self.get_setting('enabled') == 'false': return False
return re.match('http[s]*://(((www.|m.)?youtube.+?(v|embed)(=|/))|' +
'youtu.be/)[0-9A-Za-z_\-]+',
url) or 'youtube' in host or 'youtu.be' in host
def get_settings_xml(self):
xml = PluginSettings.get_settings_xml(self)
xml += '<setting label="This plugin calls the youtube addon - '
xml += 'change settings there." type="lsep" />\n'
return xml
| StarcoderdataPython |
4818385 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from io import StringIO
import re
import os
from datetime import datetime
def parse_ssh_config(config=os.getenv("HOME") + "/.ssh/config"):
entries = []
with open(config, "r") as fh:
entry = None
for line in fh:
if re.search("^\s+$", line) or re.search("^\s*#", line):
continue
m = re.match("^\s*Host\s+(.+)$", line)
if m:
if entry:
entries.append(entry)
entry = SshConfigEntry(host=m.group(1).strip(), options=dict())
continue
m = re.match("^\s*(.+?)\s+(.+)$", line)
if m:
entry.add_option(m.group(1), m.group(2).strip())
continue
m = re.match("^\s*(.+?)\s*=\s*(.+)$", line)
if m:
entry.add_option(m.group(1), m.group(2).strip())
continue
if entry:
entries.append(entry)
return entries
class SshConfigEntry(object):
def __init__(self, host, options=dict()):
self.host = host
self.options = options
def __str__(self):
out = StringIO()
out.write("Host {}\n".format(self.host))
for k, v in self.options.items():
out.write(" {} {}\n".format(k, v))
out.write("\n")
s = out.getvalue()
out.close()
return s
def __eq__(self, other):
if self.host != other.host:
return False
return self.options == other.options
def add_option(self, name, value):
self.options[name] = value
| StarcoderdataPython |
1969614 | <reponame>udox/django-social-tools<filename>setup.py
import os
from setuptools import setup
README = open(os.path.join(os.path.dirname(__file__), 'README.rst')).read()
# allow setup.py to be run from any path
os.chdir(os.path.normpath(os.path.join(os.path.abspath(__file__), os.pardir)))
setup(
name='django-social-tools',
version='0.1',
packages=['socialtool'],
include_package_data=True,
install_requires=[
'django>=1.5.7,<1.7',
'pillow>=1.7.8,<2.5',
'South>=0.7.6,<0.9',
'python-dateutil==2.2',
'djangorestframework==2.3.10',
'python-instagram==0.8.0',
'requests==2.1.0',
'requests-oauthlib==0.4.0',
'simplejson==3.3.1',
'python-twitter-dev==1.3.1',
],
dependency_links = ['https://github.com/bear/python-twitter/tarball/master#egg=python-twitter-dev-1.3.1'],
license='BSD License', # example license
description='Django app that scrapes social posts from instagram and twitter.',
long_description=README,
url='http://www.example.com/',
author='<NAME>',
author_email='<EMAIL>',
classifiers=[
'Environment :: Web Environment',
'Framework :: Django',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License', # example license
'Operating System :: OS Independent',
'Programming Language :: Python',
# Replace these appropriately if you are stuck on Python 2.
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.2',
'Programming Language :: Python :: 3.3',
'Topic :: Internet :: WWW/HTTP',
'Topic :: Internet :: WWW/HTTP :: Dynamic Content',
],
)
| StarcoderdataPython |
16651 | #!/usr/bin/python2
import sys
import os
import redis
import time
import datetime
string_keys = []
hash_keys = []
list_keys = []
set_keys = []
zset_keys = []
def import_string(source, dest):
print "Begin Import String Type"
keys_count = len(string_keys)
print "String Key Count is:", keys_count
pipeSrc = source.pipeline(transaction=False)
pipeDst = dest.pipeline(transaction=False)
index = 0
pipe_size = 1000
while index < keys_count:
old_index = index
num = 0
while (index < keys_count) and (num < pipe_size):
pipeSrc.get(string_keys[index])
index += 1
num += 1
results = pipeSrc.execute()
for value in results:
pipeDst.set(string_keys[old_index], value)
old_index += 1
pipeDst.execute()
def import_hash(source, dest):
print "Begin Import Hash Type"
keys_count = len(hash_keys)
print "Hash Key Count is:", keys_count
pipeSrc = source.pipeline(transaction=False)
pipeDst = dest.pipeline(transaction=False)
for key in hash_keys:
hkeys = source.hkeys(key)
keys_count = len(hkeys)
index = 0
pipe_size = 1000
while index < keys_count:
old_index = index
num = 0
while (index < keys_count) and (num < pipe_size):
pipeSrc.hget(key, hkeys[index])
index += 1
num += 1
results = pipeSrc.execute()
for value in results:
pipeDst.hset(key, hkeys[old_index], value)
old_index += 1
pipeDst.execute()
def import_set(source, dest):
print "Begin Import Set Type"
keys_count = len(set_keys)
print "Set Key Count is:", keys_count
pipeDst = dest.pipeline(transaction=False)
for key in set_keys:
sValues = source.smembers(key)
value_count = len(sValues)
index = 0
pipe_size = 1000
while index < value_count:
old_index = index
num = 0
while (index < value_count) and (num < pipe_size):
pipeDst.sadd(key, sValues.pop())
index += 1
num += 1
pipeDst.execute()
def import_zset(source, dest):
print "Begin Import ZSet Type"
keys_count = len(zset_keys)
print "ZSet Key Count is:", keys_count
pipeSrc = source.pipeline(transaction=False)
pipeDst = dest.pipeline(transaction=False)
for key in zset_keys:
zset_size = source.zcard(key)
index = 0
pipe_size = 1000
while index < zset_size:
members = source.zrange(key, index, index + pipe_size)
index += len(members)
for member in members:
pipeSrc.zscore(key, member)
scores = pipeSrc.execute()
i = 0
for member in members:
pipeDst.zadd(key, member, scores[i])
i += 1
pipeDst.execute()
def import_list(source, dest):
print "Begin Import List Type"
keys_count = len(list_keys)
print "List Key Count is:", keys_count
pipeDst = dest.pipeline(transaction=False)
for key in list_keys:
list_size = source.llen(key)
index = 0
pipe_size = 1000
while index < list_size:
results = source.lrange(key, index, index + pipe_size)
index += len(results)
for value in results:
pipeDst.rpush(key, value)
pipeDst.execute()
def read_type_keys(source):
keys = source.keys()
keys_count = len(keys)
print "Key Count is:", keys_count
pipe = source.pipeline(transaction=False)
# for key in keys:
index = 0
pipe_size = 5000
while index < keys_count:
old_index = index
num = 0
while (index < keys_count) and (num < pipe_size):
pipe.type(keys[index])
index += 1
num += 1
results = pipe.execute()
for type in results:
if type == "string":
string_keys.append(keys[old_index])
elif type == "list":
list_keys.append(keys[old_index])
elif type == "hash":
hash_keys.append(keys[old_index])
elif type == "set":
set_keys.append(keys[old_index])
elif type == "zset":
zset_keys.append(keys[old_index])
else:
print keys[old_index], " is not find when TYPE"
old_index += 1
if __name__ == '__main__':
config = {
"source": ['10.4.1.91:0', '10.4.13.124:0', '10.4.12.16:0', '10.4.2.250:0'],
"dest": ['127.0.0.1:11', '127.0.0.1:12', '127.0.0.1:2', '127.0.0.1:1']
}
start = datetime.datetime.now()
for group in zip(config["source"], config["dest"]):
print group
SrcIP = group[0].split(':')[0]
SrcPort = 6379
DstIP = group[1].split(':')[0]
DstPort = 6379
DstDB = group[1].split(':')[1]
source = redis.Redis(host=SrcIP, port=SrcPort)
dest = redis.Redis(host=DstIP, port=DstPort, db=DstDB)
print "Begin Read Keys"
read_type_keys(source)
print "String Key Count is:", len(string_keys)
print "Set Key Count is:", len(set_keys)
print "ZSet Key Count is:", len(zset_keys)
print "List Key Count is:", len(list_keys)
print "Hash Key Count is:", len(hash_keys)
import_string(source, dest)
import_hash(source, dest)
import_list(source, dest)
import_set(source, dest)
import_zset(source, dest)
stop = datetime.datetime.now()
diff = stop - start
print "Finish, token time:", str(diff)
| StarcoderdataPython |
8101522 | # For production
from algoritms.macro_sostream.micro_cluster import MicroCluster
from algoritms.macro_sostream.macro_cluster import MacroCluster
# For test
# from micro_cluster import MicroCluster
# from macro_cluster import MacroCluster
def new_microcluster(vt):
return MicroCluster(vt)
def new_macrocluster(microcluster):
centroid = microcluster.centroid
number_micro_clusters = 1
radius = microcluster.radius
micros = [microcluster]
return MacroCluster(centroid, number_micro_clusters, radius, micros)
| StarcoderdataPython |
3358379 | # 2019-11-24 20:59:47(JST)
import sys
#import numpy as np
days = ['SUN', 'MON', 'TUE', 'WED', 'THU', 'FRI', 'SAT']
def main():
s = sys.stdin.readline().rstrip()
ans = 7 - days.index(s)
print(ans)
if __name__ == '__main__':
main()
| StarcoderdataPython |
344496 | <reponame>eakadams/fluffy-happiness
# Fluffy Happiness: Test code to grab pictures of cute animals from the Internet
# Usage: >> python get_fluffy.py [options]
# <NAME> (<EMAIL>)
__author__ = "<NAME> smells nice"
__date__ = "$22-oct-2018 22:00:00$"
__version__ = "0.2"
# Imports
import os
import sys
import urllib.request, urllib.error, urllib.parse
import ssl
from random import randint
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
from argparse import ArgumentParser, RawTextHelpFormatter
parser = ArgumentParser(formatter_class=RawTextHelpFormatter)
parser.add_argument('-k', '--keywords',
default='cute fluffy animal',
type=str,
help='Specify which kind of search to do(default: %(default)s)')
# Parse the arguments above
args = parser.parse_args()
# Path format
path = 'https://imgur.com/search/score?q=%s' % ('+'.join(args.keywords.split()))
# Get data from website
request = urllib.request.Request(path)
response = urllib.request.urlopen(request, context=ssl._create_unverified_context())
read_response = response.readlines()
# Possible cuteness
possible = []
for line in read_response:
line = line.decode('utf-8')
print(line)
if '<img alt="" src="' in line:
image_url = line.split('src="//')[1].split('"')[0]
possible.append('http://'+image_url)
# Now select a random image to show
rand_int = randint(0,len(possible)-1)
print("I've selected image #%i: %s" % (rand_int,possible[rand_int]))
# Download the image and display it
# note: imgur adds a b to names for some reason.
img_name = (possible[rand_int].split('b.jpg')[0]+'.jpg').split('/')[-1]
image_path = 'https://i.imgur.com/'+img_name
urllib.request.urlretrieve('%s' % image_path,'%s' % img_name)
# Show the image in matplotlib
img=mpimg.imread('%s' % img_name)
imgplot = plt.imshow(img)
plt.show()
| StarcoderdataPython |
6406988 |
import os
import shutil
import sys
import time
import glob
import importlib
# as we need to load the shared lib from here, make sure it's in our path:
if os.path.join( os.environ['CMSSW_BASE'], 'src') not in sys.path:
sys.path.append( os.path.join( os.environ['CMSSW_BASE'], 'src') )
# -------------------------------------------------------------------------------------------------------
payload2xmlCodeTemplate = """
#include <iostream>
#include <string>
#include <memory>
#include <boost/python/class.hpp>
#include <boost/python/module.hpp>
#include <boost/python/init.hpp>
#include <boost/python/def.hpp>
#include <iostream>
#include <string>
#include <sstream>
#include "boost/archive/xml_oarchive.hpp"
#include "CondFormats/Serialization/interface/Serializable.h"
#include "CondFormats/Serialization/interface/Archive.h"
#include "CondCore/Utilities/src/CondFormats.h"
namespace { // Avoid cluttering the global namespace.
std::string %(plTypeSan)s2xml( const std::string &payloadData, const std::string &payloadType ) {
// now to convert
std::unique_ptr< %(plType)s > payload;
std::stringbuf sdataBuf;
sdataBuf.pubsetbuf( const_cast<char *> ( payloadData.c_str() ), payloadData.size() );
std::istream inBuffer( &sdataBuf );
eos::portable_iarchive ia( inBuffer );
payload.reset( new %(plType)s );
ia >> (*payload);
// now we have the object in memory, convert it to xml in a string and return it
std::ostringstream outBuffer;
{
boost::archive::xml_oarchive xmlResult( outBuffer );
xmlResult << boost::serialization::make_nvp( "cmsCondPayload", *payload );
}
return outBuffer.str();
}
} // end namespace
BOOST_PYTHON_MODULE(%(mdName)s)
{
using namespace boost::python;
def ("%(plTypeSan)s2xml", %(plTypeSan)s2xml);
}
"""
buildFileTemplate = """
<flags CXXFLAGS="-Wno-sign-compare -Wno-unused-variable -Os"/>
<use name="CondCore/Utilities"/>
<use name="boost_python"/>
<use name="boost_iostreams"/>
<use name="boost_serialization"/>
<export>
<lib name="1"/>
</export>
"""
# helper function
def sanitize(typeName):
return typeName.replace(' ','').replace('<','_').replace('>','')
class CondXmlProcessor(object):
def __init__(self, condDBIn):
self.conddb = condDBIn
self._pl2xml_isPrepared = False
if not os.path.exists( os.path.join( os.environ['CMSSW_BASE'], 'src') ):
raise Exception("Looks like you are not running in a CMSSW developer area, $CMSSW_BASE/src/ does not exist")
self.fakePkgName = "fakeSubSys4pl/fakePkg4pl"
self._pl2xml_tmpDir = os.path.join( os.environ['CMSSW_BASE'], 'src', self.fakePkgName )
self.doCleanup = True
def __del__(self):
if self.doCleanup:
shutil.rmtree( '/'.join( self._pl2xml_tmpDir.split('/')[:-1] ) )
os.unlink( os.path.join( os.environ['CMSSW_BASE'], 'src', './pl2xmlComp.so') )
return
def discover(self, payloadType):
# print "discover> checking for plugin of type %s" % payloadType
# first search in developer area:
libDir = os.path.join( os.environ["CMSSW_BASE"], 'lib', os.environ["SCRAM_ARCH"] )
pluginList = glob.glob( libDir + '/plugin%s_toXML.so' % sanitize(payloadType) )
# if nothing found there, check release:
if not pluginList:
libDir = os.path.join( os.environ["CMSSW_RELEASE_BASE"], 'lib', os.environ["SCRAM_ARCH"] )
pluginList = glob.glob( libDir + '/plugin%s_toXML.so' % sanitize(payloadType) )
# if pluginList:
# print "found plugin for %s (in %s) : %s " % (payloadType, libDir, pluginList)
# else:
# print "no plugin found for type %s" % payloadType
xmlConverter = None
if len(pluginList) > 0:
dirPath, libName = os.path.split( pluginList[0] )
sys.path.append(dirPath)
# print "going to import %s from %s" % (libName, dirPath)
xmlConverter = importlib.import_module( libName.replace('.so', '') )
# print "found methods: ", dir(xmlConverter)
self.doCleanup = False
return xmlConverter
def prepPayload2xml(self, session, payload):
startTime = time.time()
Payload = session.get_dbtype(self.conddb.Payload)
# get payload from DB:
result = session.query(Payload.data, Payload.object_type).filter(Payload.hash == payload).one()
data, plType = result
info = { "mdName" : "pl2xmlComp",
'plType' : plType,
'plTypeSan' : sanitize(plType),
}
converter = self.discover(plType)
if converter: return converter
code = payload2xmlCodeTemplate % info
tmpDir = self._pl2xml_tmpDir
if ( os.path.exists( tmpDir ) ) :
msg = '\nERROR: %s already exists, please remove if you did not create that manually !!' % tmpDir
self.doCleanup = False
raise Exception(msg)
os.makedirs( tmpDir+'/src' )
buildFileName = "%s/BuildFile.xml" % (tmpDir,)
with open(buildFileName, 'w') as buildFile:
buildFile.write( buildFileTemplate )
buildFile.close()
tmpFileName = "%s/src/%s" % (tmpDir, info['mdName'],)
with open(tmpFileName+'.cpp', 'w') as codeFile:
codeFile.write(code)
codeFile.close()
libDir = os.path.join( os.environ["CMSSW_BASE"], 'tmp', os.environ["SCRAM_ARCH"], 'src', self.fakePkgName, 'src', self.fakePkgName.replace('/',''))
libName = libDir + '/lib%s.so' % self.fakePkgName.replace('/','')
cmd = "source /afs/cern.ch/cms/cmsset_default.sh;"
cmd += "(cd %s ; scram b 2>&1 >build.log && cp %s $CMSSW_BASE/src/pl2xmlComp.so )" % (tmpDir, libName)
ret = os.system(cmd)
if ret != 0 : self.doCleanup = False
buildTime = time.time()-startTime
print >> sys.stderr, "buillding done in ", buildTime, 'sec., return code from build: ', ret
if (ret != 0):
return None
return importlib.import_module( 'pl2xmlComp' )
def payload2xml(self, session, payload):
if not self._pl2xml_isPrepared:
xmlConverter = self.prepPayload2xml(session, payload)
if not xmlConverter:
msg = "Error preparing code for "+payload
raise Exception(msg)
self._pl2xml_isPrepared = True
Payload = session.get_dbtype(self.conddb.Payload)
# get payload from DB:
result = session.query(Payload.data, Payload.object_type).filter(Payload.hash == payload).one()
data, plType = result
convFuncName = sanitize(plType)+'2xml'
sys.path.append('.')
func = getattr(xmlConverter, convFuncName)
resultXML = func( str(data), str(plType) )
print resultXML
| StarcoderdataPython |
287341 | import uuid
from celery.task import task
from spylunking.log.setup_logging import build_colorized_logger
from celery_loaders.work_tasks.custom_task import CustomTask
log = build_colorized_logger(
name='do_some_work')
@task(
bind=True,
base=CustomTask,
queue="do_some_work")
def do_some_work(
self,
work_dict):
"""do_some_work
:param work_dict: dictionary for key/values
"""
label = "do_some_work"
log.info(("task - {} - start "
"work_dict={}")
.format(label,
work_dict))
ret_data = {
"job_results": ("some response key={}").format(
str(uuid.uuid4()))
}
log.info(("task - {} - result={} done")
.format(
ret_data,
label))
return ret_data
# end of do_some_work
| StarcoderdataPython |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.