id stringlengths 1 8 | text stringlengths 6 1.05M | dataset_id stringclasses 1
value |
|---|---|---|
3437522 | # pylint: disable=too-many-locals
import os
import argparse
import argcomplete
import numpy as np
import pandas as pd
from sklearn.ensemble import RandomForestClassifier, ExtraTreesClassifier, GradientBoostingClassifier
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.linear_model import LogisticRegression
from sklearn.metrics import classification_report
from sklearn.naive_bayes import MultinomialNB
from sklearn.preprocessing import LabelEncoder
from sklearn.svm import SVC
from sklearn.model_selection import GridSearchCV
from ml import modelsaving, features
from preprocessing import cleantext
TWEET_COLUMN = 'tweet'
LABEL_COLUMN = 'label'
RANDOM_STATE = 42
def _get_model_by_name(model_type: str):
if model_type == "LogisticRegression":
model = LogisticRegression(random_state=RANDOM_STATE)
elif model_type == "SVC":
model = SVC(random_state=RANDOM_STATE)
elif model_type == "RandomForestClassifier":
model = RandomForestClassifier(random_state=RANDOM_STATE)
elif model_type == "GradientBoostingClassifier":
model = GradientBoostingClassifier(random_state=RANDOM_STATE)
elif model_type == "ExtraTreesClassifier":
model = ExtraTreesClassifier(random_state=RANDOM_STATE)
elif model_type == 'MultinomialNB':
model = MultinomialNB()
else:
raise ValueError("Unknown model type")
print("Model type is: {}".format(model_type))
return model
def train(input_train_csv: str,
input_test_csv: str,
model_type: str,
output_dir: str,
k_related_terms: str) -> None:
print("Reading files")
train_df = pd.read_csv(input_train_csv)
test_df = pd.read_csv(input_test_csv)
print("Encoding labels")
y_train = train_df[LABEL_COLUMN].to_list()
labels = list(set(y_train))
y_test = test_df[LABEL_COLUMN].to_list()
label_encoder = LabelEncoder()
label_encoder.fit(labels)
vec_y_cat_train = label_encoder.transform(y_train)
vec_y_cat_test = label_encoder.transform(y_test)
x_train = train_df[TWEET_COLUMN].apply(cleantext.clean_tweet).to_list()
# get model by its name
single_model = _get_model_by_name(model_type)
print("Vectorizing training data")
vectorizer = TfidfVectorizer(ngram_range=(1, 2))
x_train_tfidf = vectorizer.fit_transform(x_train)
# hyper parameters for each model
parameters = {
'LogisticRegression': {'penalty': ['l2'],
'solver': ['liblinear', 'lbfgs'],
'C': [1.0, 10]},
'ExtraTreesClassifier': {'n_estimators': [16, 32]},
'RandomForestClassifier': {'n_estimators': [16, 32]},
'GradientBoostingClassifier': {'n_estimators': [16, 32], 'learning_rate': [0.8, 1.0]},
'SVC': [
{'kernel': ['rbf'], 'gamma': [1e-3, 1e-4], 'C': [1, 10, 100]},
{'kernel': ['linear'], 'C': [1, 10, 100]}
]
}
# grid search cross-validation
clf = GridSearchCV(single_model,
parameters[model_type],
cv=5,
verbose=3,
n_jobs=-1,
scoring='accuracy',
refit=True)
print("Training")
clf.fit(x_train_tfidf, vec_y_cat_train)
print("Best parameters on the validation test:")
print(clf.best_params_)
print("Grid scores on validation set:")
means = clf.cv_results_['mean_test_score']
stds = clf.cv_results_['std_test_score']
for mean, std, params in zip(means, stds, clf.cv_results_['params']):
print("%0.3f (+/-%0.03f) for %r" % (mean, std * 2, params))
print()
print("Saving model")
model_dir = os.path.join(output_dir, model_type)
modelsaving.save_model(clf.best_estimator_, model_dir)
modelsaving.save_vectorizer(vectorizer, model_dir)
modelsaving.save_label_encoder(label_encoder, model_dir)
print("Predicting training set")
predicted = clf.predict(x_train_tfidf)
accuracy = np.mean(predicted == vec_y_cat_train)
print("Accuracy on train set: {}".format(accuracy))
print("Vectorizing test data")
x_test = test_df[TWEET_COLUMN].apply(cleantext.clean_tweet).to_list()
x_test_tfidf = vectorizer.transform(x_test)
print("Predicting test set")
predicted = clf.predict(x_test_tfidf)
accuracy = np.mean(predicted == vec_y_cat_test)
print("Accuracy on test set: {}".format(accuracy))
target_names = [str(class_name) for class_name in label_encoder.classes_]
print(classification_report(vec_y_cat_test,
predicted,
target_names=target_names))
print("Plotting top K features for each class")
features.plot_top_k_features(vectorizer, clf.best_estimator_, model_dir, k_related_terms)
def main():
argument_parser = argparse.ArgumentParser()
argument_parser.add_argument("--input-train", type=str,
help='Input CSV file containing training tweets', required=True)
argument_parser.add_argument("--input-test", type=str,
help='Input CSV file containing test tweets', required=True)
argument_parser.add_argument("--model", type=str,
help='Model type to train', default='SVM', required=False)
argument_parser.add_argument("--output-dir", type=str, help='Directory for output', required=True)
argument_parser.add_argument("--k-related-terms", type=int,
help='Number of related terms to output per company. Default: 10', required=False,
default=10)
argcomplete.autocomplete(argument_parser)
args = argument_parser.parse_args()
train(args.input_train, args.input_test, args.model, args.output_dir, args.k_related_terms)
if __name__ == '__main__':
main()
| StarcoderdataPython |
4835543 | <reponame>btjanaka/competitive-programming-solutions<filename>leetcode/452.py
# Author: btjanaka (<NAME>)
# Problem: (LeetCode) 452
# Title: Minimum Number of Arrows to Burst Balloons
# Link: https://leetcode.com/problems/minimum-number-of-arrows-to-burst-balloons/
# Idea: Represent the horizontal coordinates as a series of "events" telling
# when each balloon starts and ends. Sort these events by their position and go
# through them. Keep track of a list of balloons that are currently being
# covered; when we find an end event, we know we have to pop everything, so we
# mark all balloons in our list as popped and empty the list.
# Difficulty: medium
# Tags: list, sorting
class Solution:
def findMinArrowShots(self, points: List[List[int]]) -> int:
events = [] # position of event, 0 (start) or 1 (end), index
for i in range(len(points)):
events.append((points[i][0], 0, i))
events.append((points[i][1], 1, i))
events.sort()
popped = [False for _ in range(len(points))]
cur = [] # Current list of spheres that will be popped
arrows = 0
for pos, t, i in events:
if t == 0: # Start
cur.append(i)
elif t == 1: # End
if popped[i]: continue # Avoid re-popping
arrows += 1
# Mark all as popped
while len(cur) > 0:
popped[cur.pop()] = True
return arrows
| StarcoderdataPython |
1946923 | import os
import logging
import numpy as np
'''
@ Multifactorial Evolutionary Algorithm
'''
def sbx_crossover(p1, p2, sbxdi):
D = p1.shape[0]
cf = np.empty([D])
u = np.random.rand(D)
cf[u <= 0.5] = np.power((2 * u[u <= 0.5]), (1 / (sbxdi + 1)))
cf[u > 0.5] = np.power((2 * (1 - u[u > 0.5])), (-1 / (sbxdi + 1)))
c1 = 0.5 * ((1 + cf) * p1 + (1 - cf) * p2)
c2 = 0.5 * ((1 + cf) * p2 + (1 - cf) * p1)
c1 = np.clip(c1, 0, 1)
c2 = np.clip(c2, 0, 1)
return c1, c2
def mutate(p, pmdi):
mp = float(1. / p.shape[0])
u = np.random.uniform(size=[p.shape[0]])
r = np.random.uniform(size=[p.shape[0]])
tmp = np.copy(p)
for i in range(p.shape[0]):
if r[i] < mp:
if u[i] < 0.5:
delta = (2*u[i]) ** (1/(1+pmdi)) - 1
tmp[i] = p[i] + delta * p[i]
else:
delta = 1 - (2 * (1 - u[i])) ** (1/(1+pmdi))
tmp[i] = p[i] + delta * (1 - p[i])
return tmp
'''
@ other ultilities
'''
def find_scalar_fitness(factorial_cost):
return 1 / np.min(np.argsort(np.argsort(factorial_cost, axis=0), axis=0) + 1, axis=1)
def get_best_individual(population, factorial_cost):
K = factorial_cost.shape[1]
p_bests = []
y_bests = []
for k in range(K):
best_index = np.argmax(factorial_cost[:, k])
p_bests.append(population[best_index, :])
y_bests.append(factorial_cost[best_index, k])
return p_bests, y_bests
def get_population_by_skill_factor(population, skill_factor_list, skill_factor):
return population[np.where(skill_factor_list == skill_factor)]
def get_logger(env_name, exp_id):
if not os.path.exists('data/%s' % env_name):
os.mkdir('data/%s' % env_name)
filename = 'data/%s/%s.csv' % (env_name, exp_id)
logger = logging.getLogger(filename)
logger.setLevel(logging.DEBUG)
if os.path.exists(filename):
os.remove(filename)
fh = logging.FileHandler(filename)
fh.setLevel(logging.DEBUG)
ch = logging.StreamHandler()
ch.setLevel(logging.DEBUG)
formatter = logging.Formatter('%(message)s')
fh.setFormatter(formatter)
ch.setFormatter(formatter)
logger.addHandler(fh)
logger.addHandler(ch)
return logger | StarcoderdataPython |
6669190 | <filename>python/leetcode/22.py
# 给出 n 代表生成括号的对数,请你写出一个函数,使其能够生成所有可能的并且有效的括号组合。
#
# 例如,给出 n = 3,生成结果为:
#
# [
# "((()))",
# "(()())",
# "(())()",
# "()(())",
# "()()()"
# ]
#
#
# 来源:力扣(LeetCode)
# 链接:https://leetcode-cn.com/problems/generate-parentheses
# 著作权归领扣网络所有。商业转载请联系官方授权,非商业转载请注明出处。
from typing import List
class Solution:
def backtracking(self, res: List[str], cur: str, open: int, close: int, max: int):
if len(cur) == max * 2:
res.append(cur)
return
if open < max:
self.backtracking(res, cur + '(', open + 1, close, max)
if close < open:
self.backtracking(res, cur + ')', open, close + 1, max)
def generateParenthesis(self, n: int) -> List[str]:
s = []
self.backtracking(s, "", 0, 0, n)
return s
if __name__ == '__main__':
s = Solution()
res = s.generateParenthesis(3)
print(res) | StarcoderdataPython |
9738700 | # Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Set of methods to be used with find_element_by"""
# Created by a1wen at 28.02.19
# Stolen from Selene
from selenium.webdriver.common.by import By
def by_css(css_selector):
"""Add CSS_SELECTOR selector strategy constant to the actual selector search pattern"""
return By.CSS_SELECTOR, css_selector
def by_name(name):
"""Add NAME selector strategy constant to the actual selector search pattern"""
return By.NAME, name
def by_class(name):
"""Add CLASS_NAME selector strategy constant to the actual selector search pattern"""
return By.CLASS_NAME, name
def by_link_text(text):
"""Add LINK_TEXT selector strategy constant to the actual selector search pattern"""
return By.LINK_TEXT, text
def by_partial_link_text(partial_text):
"""Add PARTIAL_LINK_TEXT selector strategy constant to the actual selector search pattern"""
return By.PARTIAL_LINK_TEXT, partial_text
def by_xpath(xpath):
"""Add XPATH selector strategy constant to the actual selector search pattern"""
return By.XPATH, xpath
def by_tag(tag):
"""Add TAG_NAME selector strategy constant to the actual selector search pattern"""
return By.TAG_NAME, tag
def by_id(oid):
"""Add ID selector strategy constant to the actual selector search pattern"""
return By.ID, oid
| StarcoderdataPython |
6553760 | from .checker import XPermittedCrossDomainPolicyChecker
from .notnone import XPCDPNotNoneChecker
__all__ = ['XPermittedCrossDomainPolicyChecker','XPCDPNotNoneChecker']
| StarcoderdataPython |
8055606 | <reponame>carlsummer/python_developer_tools<gh_stars>10-100
# !/usr/bin/env python
# -- coding: utf-8 --
# @Author zengxiaohui
# Datatime:4/29/2021 2:41 PM
# @File:setup
import os
from setuptools import setup, find_packages
here = os.path.abspath(os.path.dirname(__file__))
with open(os.path.join(here, 'README.md'), encoding="utf-8") as f:
README = f.read()
setup(
author='zengxiaohui', # 可以用来指定该package的作者信息。
name='python_developer_tools', # 是该package的名字。该名字可以由字母,数字,_和-组成。并且这个名字不能与其他已经上传至pypi.org的项目相同
version='0.0.2', # 是当前package的版本。关于版本的详细信息请参考
author_email="<EMAIL>", # 可以用来指定该package的作者信息。
description='python developer tools',
long_description=README,
long_description_content_type='text/markdown',
classifiers=[
"Development Status :: 4 - Beta",
"Intended Audience :: Developers",
"Intended Audience :: System Administrators",
"License :: OSI Approved :: Apache Software License",
"Operating System :: Microsoft :: Windows",
"Operating System :: POSIX",
"Programming Language :: Python",
"Topic :: System :: Networking :: Monitoring",
"Topic :: Software Development :: Libraries :: Python Modules",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.4",
"Programming Language :: Python :: 3.5",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3.8",
],
url='https://github.com/carlsummer/python_developer_tools',
packages=find_packages(), # find_packages()告诉Distutils需要处理那些包(包含__init__.py的文件夹)
# package_dir告诉Distutils哪些目录下的文件被映射到哪个源码包。一个例子:package_dir = {'': 'lib'},表示“root package”中的模块都在lib目录中。
package_dir={"cv": "cv",
"files": "files",
"python": "python",
"web": "web", },
# package_data={"docs": ["/docs/keys/*.txt"]},
# data_files=[
# ('docs', ['python_developer_tools/docs/keys/*.txt',]),
# ],
install_requires=["numpy", "scipy", "matplotlib"],
)
| StarcoderdataPython |
4878279 | from django.contrib.auth.models import User
from django.core.exceptions import ValidationError
from django_webtest import WebTest
from ..remote_user_auth import email_to_username
class AuthTests(WebTest):
def test_email_domain_validation(self):
"""Ensure that a given email address is a member of the right domain"""
with self.settings(ALLOWED_EMAIL_DOMAINS={'gsa.gov'}):
email = '<EMAIL>'
self.assertEqual('aaron.snow', email_to_username(email))
with self.assertRaises(ValidationError):
email = '<EMAIL>'
email_to_username(email)
def test_username_stripping(self):
"""Ensure that a proper username is created"""
email = '<EMAIL>'
self.assertEqual('aaron.snow', email_to_username(email))
email = '<EMAIL>'
self.assertEqual('aaron.snowsnowsnowsnow', email_to_username(email))
def _login(self, email):
self.app.get(
'/',
headers={'X_FORWARDED_EMAIL': email},
)
def test_login_creates_user_and_user_data(self):
email = '<EMAIL>'
self._login(email)
user = User.objects.filter(username='tock').first()
self.assertIsNotNone(user)
self.assertTrue(hasattr(user, 'user_data'))
def test_login_ensures_user_data(self):
email = '<EMAIL>'
self._login(email)
user = User.objects.filter(username='tock').first()
user.user_data.delete()
user = User.objects.filter(username='tock').first()
self.assertFalse(hasattr(user, 'user_data'))
self._login(email)
user = User.objects.filter(username='tock').first()
self.assertTrue(hasattr(user, 'user_data'))
| StarcoderdataPython |
4899274 | import tool.utils as utils
from tool.template import _Template
from tool.utils import config
from tool.utils import content_path
import tool.module as module
class _Router():
def __init__(self,post_list):
self.post_list = post_list
self.home = _Home(post_list)
self.archive = _Archive(post_list)
self.category_list = _Category(post_list).list()
self.tag_list = _Tag(post_list).list()
class _Home:
def __init__(self,post_list):
self.post_list = post_list[0:config['Site']['Home_Page_Items']]
self.path = utils.join_path(config['Directory']['Output'], 'index.html')
self.content = self.build()
def build(self):
list_home = module.home_module(self.post_list)
home_page = _Template('home')
home_page.replace('{$Page_Title$}', 'Home')
home_page.replace('{&Home_module&}',str(list_home))
home_page.replace('../','./')
return home_page.str()
class _Archive():
def __init__(self,post_list):
self.post_list = post_list
self.path = utils.join_path(config['Directory']['Output'], 'Archive/index.html')
self.content = self.build()
def build(self):
list_archive = module.archive_module(self.post_list)
archive_page = _Template('archive')
archive_page.replace('{$Page_Title$}', 'Archive')
archive_page.replace('{&Archive_module&}',str(list_archive))
return archive_page.str()
class _Category:
def __init__(self,post_list):
self.category_dict = {}
for post in post_list:
try:
self.category_dict[post.meta.category].append(post)
except KeyError:
self.category_dict[post.meta.category] = []
self.category_dict[post.meta.category].append(post)
def build(self, category_list, category_name):
list_category = module.post_module(category_list)
category_page = _Template('category')
category_page.replace('{$Page_Title$}', category_name)
category_page.replace('{$Category$}', category_name)
category_page.replace('{&Post_module&}',str(list_category))
category_page.replace('../','../../')
return category_page.str()
def list(self):
list = []
for category_name in self.category_dict.keys():
category_list = self.category_dict[category_name]
content = self.build(category_list, category_name)
path = utils.join_path(config['Directory']['Output'], 'category', category_name, 'index.html')
struct = content_path(content, path)
list.append(struct)
return list
class _Tag:
def __init__(self,post_list):
self.tag_dict = {}
for post in post_list:
for meta_tag in post.meta.tag:
try:
self.tag_dict[meta_tag].append(post)
except KeyError:
self.tag_dict[meta_tag] = []
self.tag_dict[meta_tag].append(post)
def build(self, tag_list, tag_name):
list_tag = module.post_module(tag_list)
tag_page = _Template('tag')
tag_page.replace('{$Page_Title$}', '#' + tag_name)
tag_page.replace('{$Tag$}', tag_name)
tag_page.replace('{&Post_module&}',str(list_tag))
tag_page.replace('../','../../')
return tag_page.str()
def list(self):
list = []
for tag_name in self.tag_dict.keys():
tag_list = self.tag_dict[tag_name]
content = self.build(tag_list, tag_name)
path = utils.join_path(config['Directory']['Output'], 'tag', tag_name, 'index.html')
struct = content_path(content, path)
list.append(struct)
return list
| StarcoderdataPython |
1645400 | <gh_stars>1-10
import pytest
import responses
from tenable.nessus.iterators.pagination import PaginationIterator
@responses.activate
def test_pagination_iterator(nessus):
responses.add(responses.GET,
'https://localhost:8834/page',
json={'envelope': [{'id': i} for i in range(100)]}
)
responses.add(responses.GET,
'https://localhost:8834/page',
json={'envelope': [{'id': i + 100} for i in range(100)]}
)
responses.add(responses.GET,
'https://localhost:8834/page',
json={'envelope': [{'id': i + 200} for i in range(92)]}
)
iter = PaginationIterator(nessus,
path='page',
envelope='envelope',
limit=100
)
total = 0
for item in iter:
assert item['id'] == total
total += 1
assert total == 292 | StarcoderdataPython |
8059269 | from prettytable import PrettyTable
from textwrap import fill, TextWrapper
import frontmatter
import re
from .storage import configStorage as config
# For recursive display of a discussion thread (--comments + --parents)
currentThreadDepth = 0
def markdownify(t):
width = 120
def mdCodeBlock(t):
return (" " +
Back.WHITE +
Fore.BLUE +
" " +
t.group(1) +
" " +
Fore.RESET +
Back.RESET)
def mdCodeInline(t):
return (Back.WHITE +
Fore.BLUE +
" " +
t.group(1) +
" " +
Fore.RESET +
Back.RESET)
def mdList(t):
return (Fore.GREEN +
" " +
t.group(1) +
" " +
Fore.RESET +
t.group(2))
def mdLink(t):
return (Fore.RED +
"[%s]" % t.group(1) +
Fore.GREEN +
"(%s)" % t.group(2) +
Fore.RESET)
def mdHeadline(t):
colors = [
Back.RED,
Back.GREEN,
Back.YELLOW,
Back.BLUE,
Back.MAGENTA,
Back.CYAN,
]
color = colors[len(t.group(1)) % len(colors)]
# width = 80 - 15 * len(t.group(1))
headline = (color +
'{:^{len}}'.format(t.group(2), len=width) +
Back.RESET)
return (Style.BRIGHT +
headline +
Style.NORMAL)
def mdBold(t):
return (Style.BRIGHT +
t.group(1) +
Style.NORMAL)
def mdLight(t):
return (Style.DIM +
t.group(1) +
Style.NORMAL)
def wrapText(t):
postWrapper = TextWrapper()
postWrapper.width = width
return ("\n".join(postWrapper.fill(l) for l in t.splitlines()))
import colorama
from colorama import Fore, Back, Style
colorama.init()
t = re.sub(r"\n\n", "{NEWLINE}", t, flags=re.M)
t = re.sub(r"\n(^[^#\-\*].*)", r"\1", t, flags=re.M)
t = re.sub(r"{NEWLINE}", "\n\n", t, flags=re.M)
t = re.sub(r"\*\*(.*)\*\*", mdBold, t, flags=re.M)
t = re.sub(r"\*(.*)\*", mdLight, t, flags=re.M)
t = re.sub(r"`(.*)`", mdCodeInline, t, flags=re.M)
t = re.sub(r"^ {4,}(.*)", mdCodeBlock, t, flags=re.M)
t = re.sub(r"^([\*\-])\s*(.*)", mdList, t, flags=re.M)
t = re.sub(r"\[(.*)\]\((.*)\)", mdLink, t, flags=re.M)
t = wrapText(t)
t = re.sub(r"^(#+)\s*(.*)$", mdHeadline, t, flags=re.M)
t = re.sub(r"```(.*)```", mdCodeBlock, t, flags=re.M)
return t
def list_posts(discussions):
t = PrettyTable([
"identifier",
"title",
"category",
"replies",
# "votes",
"payouts",
])
t.align = "l"
t.align["payouts"] = "r"
# t.align["votes"] = "r"
t.align["replies"] = "c"
for d in discussions:
# Some discussions are dicts or identifiers
if isinstance(d, str):
d = discussions[d]
identifier = "@%s/%s" % (d["author"], d["permlink"])
identifier_wrapper = TextWrapper()
identifier_wrapper.width = 60
identifier_wrapper.subsequent_indent = " "
t.add_row([
identifier_wrapper.fill(identifier),
identifier_wrapper.fill(d["title"]),
d["category"],
d["children"],
# d["net_rshares"],
d["pending_payout_value"],
])
print(t)
def dump_recursive_parents(rpc,
post_author,
post_permlink,
limit=1,
format="markdown"):
global currentThreadDepth
limit = int(limit)
postWrapper = TextWrapper()
postWrapper.width = 120
postWrapper.initial_indent = " " * (limit)
postWrapper.subsequent_indent = " " * (limit)
if limit > currentThreadDepth:
currentThreadDepth = limit + 1
post = rpc.get_content(post_author, post_permlink)
if limit and post["parent_author"]:
parent = rpc.get_content_replies(post["parent_author"], post["parent_permlink"])
if len(parent):
dump_recursive_parents(rpc, post["parent_author"], post["parent_permlink"], limit - 1)
meta = {}
for key in ["author", "permlink"]:
meta[key] = post[key]
meta["reply"] = "@{author}/{permlink}".format(**post)
if format == "markdown":
body = markdownify(post["body"])
else:
body = post["body"]
yaml = frontmatter.Post(body, **meta)
print(frontmatter.dumps(yaml))
def dump_recursive_comments(rpc,
post_author,
post_permlink,
depth=0,
format="markdown"):
global currentThreadDepth
postWrapper = TextWrapper()
postWrapper.width = 120
postWrapper.initial_indent = " " * (depth + currentThreadDepth)
postWrapper.subsequent_indent = " " * (depth + currentThreadDepth)
depth = int(depth)
posts = rpc.get_content_replies(post_author, post_permlink)
for post in posts:
meta = {}
for key in ["author", "permlink"]:
meta[key] = post[key]
meta["reply"] = "@{author}/{permlink}".format(**post)
if format == "markdown":
body = markdownify(post["body"])
else:
body = post["body"]
yaml = frontmatter.Post(body, **meta)
print(frontmatter.dumps(yaml))
reply = rpc.get_content_replies(post["author"], post["permlink"])
if len(reply):
dump_recursive_comments(rpc, post["author"], post["permlink"], depth + 1)
| StarcoderdataPython |
3345748 | <gh_stars>1-10
# This sample tests the type checker's handling of
# empty tuples and assignment to empty tuples.
from typing import Tuple
a: Tuple[()] = ()
# This should generate an error because the assigned
# tuple has one element, but the destination is
# expecting zero.
b: Tuple[()] = (1, )
# This should generate an error because the assigned
# tuple has zero elements, but the destination is
# expecting two.
c: Tuple[int, str] = ()
| StarcoderdataPython |
3267774 | from plugin.models import NotificationUserSetting
from part.test_part import BaseNotificationIntegrationTest
from plugin.builtin.integration.core_notifications import CoreNotificationsPlugin
from plugin import registry
class CoreNotificationTestTests(BaseNotificationIntegrationTest):
def test_email(self):
"""
Ensure that the email notifications run
"""
# enable plugin and set mail setting to true
plugin = registry.plugins.get('corenotificationsplugin')
plugin.set_setting('ENABLE_NOTIFICATION_EMAILS', True)
NotificationUserSetting.set_setting(
key='NOTIFICATION_METHOD_MAIL',
value=True,
change_user=self.user,
user=self.user,
method=CoreNotificationsPlugin.EmailNotification.METHOD_NAME
)
# run through
self._notification_run(CoreNotificationsPlugin.EmailNotification)
| StarcoderdataPython |
11344197 | #coding:utf-8
import json
import re
from django.http import HttpResponse
# 数据库接口导入
from hbh import models as hbh_models
info = {
'fail': '失败',
'error': '错误',
'Success': '成功'
}
# 注册用户
def addUser(request):
if request.method == "POST":
data = request.POST
if hbh_models.User.objects.filter(UserName = data['UserName']):
return HttpResponse(u'数据已存在')
elif not re.match('^1(3|4|5|7|8)[0-9]\d{8}$', data['PhoneNumber']):
return HttpResponse(info['fail'])
elif data['UserName'] and data['UserPassword']:
hbh_models.User(UserName=data['UserName'], UserPassword=data['<PASSWORD>'], PhoneNumber=data['PhoneNumber']).save()
return HttpResponse(info['Success'])
else:
return HttpResponse(u'接口调用方式出错!')
# 用户登录
def isLogin(request):
if request.method == "GET":
UserName = request.GET.get('UserName', False)
UserPassword = request.GET.get('UserPassword', False)
if UserName and UserPassword:
userInfo = hbh_models.User.objects.filter(UserName=UserName).values()
if userInfo[0]['UserPassword'] == UserPassword:
return HttpResponse(u'登陆成功!')
else:
return HttpResponse(u'UserName不能为空!')
else:
return HttpResponse(u'接口调用方式出错!')
#删除用户
# def deleteUser(request):
# if request.method == "GET":
# UserName = request.GET.get('UserName')
# UserPassword = request.GET.get('UserPassword')
# if UserName and UserPassword: | StarcoderdataPython |
367004 | <filename>bin/test_siamfc.py<gh_stars>1-10
from __future__ import absolute_import
import os
from got10k.experiments import *
from siamfc import SiamFCTracker
import multiprocessing
multiprocessing.set_start_method('spawn',True)
gpu_id=0
model_path='./models/siamfc_30.pth'
# model_gpu=nn.DataParallel(model,device_ids=[0,1])# 多GPU并行计算
# output=model_gpu(input)
if __name__ == '__main__':
tracker = SiamFCTracker(model_path,gpu_id) #初始化一个追踪器
# root_dir = os.path.abspath('datasets/OTB')
# e = ExperimentOTB(root_dir, version=2013)
# root_dir = os.path.abspath('datasets/OTB')
# e = ExperimentOTB(root_dir, version=2015)
root_dir = os.path.abspath('datasets/UAV123')
e = ExperimentUAV123(root_dir, version='UAV123')
# root_dir = os.path.abspath('datasets/UAV123')
# e = ExperimentUAV123(root_dir, version='UAV20L')
# root_dir = os.path.abspath('datasets/DTB70')
# e = ExperimentDTB70(root_dir)
# root_dir = os.path.abspath('datasets/VOT2018')
# e = ExperimentVOT(root_dir,version=2018)
# root_dir = os.path.abspath('datasets/TColor128')
# e = ExperimentTColor128(root_dir)
# root_dir = os.path.abspath('datasets/Nfs')
# e = ExperimentNfS(root_dir)
# root_dir = os.path.abspath('datasets/LaSOT')
# e = ExperimentLaSOT(root_dir)
e.run(tracker,visualize=True) #run(tracker, visualize=False)
e.report([tracker.name])
| StarcoderdataPython |
1680866 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Thu Jun 12 19:49:18 2020
@author: hongxing
"""
import torch
import numpy as np
from torchvision import transforms
from torch.utils.data import Dataset, DataLoader
torch.manual_seed(1) # reproducible
def Graying(img):
i = 0
grayed = 0
for channel in img:
grayed = grayed + abs(channel)
i += 1
grayed = grayed/i
return grayed[np.newaxis,:]
'''MyDataset'''
class MyDataset(Dataset):
def __init__(self, data):
self.data = np.load(data)
def __getitem__(self, index):
hdct = self.data[index, :, :, :]
hdct = torch.from_numpy(hdct)
return hdct
def __len__(self):
return self.data.shape[0]
class GrayDataset(Dataset):
def __init__(self, data):
self.data = np.load(data)
def __getitem__(self, index):
hdct = self.data[index, :, :, :]
hdct = Graying(hdct)
hdct = torch.from_numpy(hdct)
return hdct
def __len__(self):
return self.data.shape[0]
'''
dataset=MyDataset('./dataset/MNIST/fgsm/fgsm1.npy')
mstfgsmloader= DataLoader(dataset, batch_size=64, shuffle=True, pin_memory=True)
for inputs in mstfgsmloader:
print(inputs.shape
break
''' | StarcoderdataPython |
12828068 | import glob
import os
from flask import current_app
from flask.cli import AppGroup
from flask_fixtures.loaders import YAMLLoader
from flask_fixtures import load_fixtures
from flask_market.products.models import db
user_cli = AppGroup('fixtures')
@user_cli.command('load')
def load_fixtures_():
for fixture_dir in current_app.config.get('FIXTURES_DIRS', [os.path.join(current_app.root_path, 'fixtures')]):
for fixture_file in sorted(glob.glob(fixture_dir + '/*.yml')):
fixtures = YAMLLoader().load(fixture_file)
load_fixtures(db, fixtures)
| StarcoderdataPython |
9604005 | <reponame>btisdall/ecs-cid<filename>tests/conftest.py
import json
import pytest
@pytest.fixture
def event_no_message():
return {
u'Records': [
{
u'EventVersion': u'1.0',
u'EventSubscriptionArn': u'arn:aws:sns:eu-west-1:271871120138:ecs-container-drainer:ee13e8e3-3c62-4094-a553-ffa1707f599b',
u'EventSource': u'aws:sns',
u'Sns': {
u'SignatureVersion': u'1',
u'Timestamp': u'2018-01-05T14:40:46.090Z',
u'Signature': u'<KEY>',
u'SigningCertUrl': u'https://sns.eu-west-1.amazonaws.com/SimpleNotificationService-433026a4050d206028891664da859041.pem',
u'MessageId': u'981260d8-7d7f-516d-8ddf-3211edd1ff12',
u'Message': None,
u'MessageAttributes': {},
u'Type': u'Notification',
u'UnsubscribeUrl': u'https://sns.eu-west-1.amazonaws.com/?Action=Unsubscribe&SubscriptionArn=arn:aws:sns:eu-west-1:271871120138:ecs-container-drainer:ee13e8e3-3c62-4094-a553-ffa1707f599b',
u'TopicArn': u'arn:aws:sns:eu-west-1:271871120138:ecs-container-drainer',
u'Subject': u'Re-invoking myself'
}
}
]
}
@pytest.fixture
def message():
return {
u'EC2InstanceId': u'EC2InstanceIdFromMessage',
u'Service': u'AWS Auto Scaling',
u'AutoScalingGroupName': u'ecs-testing-dev-1-ECSAutoScalingGroup-HHMT99DL02F9',
u'LifecycleActionToken': u'<PASSWORD>',
u'LifecycleHookName': u'ecs-testing-dev-1-ASGTerminateHook-19A75XFO08VR7',
u'RequestId': u'25d5831b-1149-ea51-02d2-bbfc4c211e82',
u'Time': u'2018-01-05T14:40:44.682Z',
u'LifecycleTransition': u'autoscaling:EC2_INSTANCE_TERMINATING',
u'AccountId': u'271871120138'
}
@pytest.fixture
def cache():
return {
'_CidLambdaCache': {
'InstanceIsDraining': True,
'EcsCluster': 'EcsClusterFromCache',
'ContainerInstanceArn': 'ContainerInstanceArnFromCache',
}
}
@pytest.fixture
def event_no_cache(event_no_message, message):
event_no_message['Records'][0]['Sns']['Message'] = json.dumps(message)
return event_no_message
@pytest.fixture
def event_with_cache(event_no_message, message, cache):
message.update(cache)
event_no_message['Records'][0]['Sns']['Message'] = json.dumps(message)
return event_no_message
| StarcoderdataPython |
8058343 | <gh_stars>1000+
from opensfm.actions import compute_depthmaps
from . import command
class Command(command.CommandBase):
name = "compute_depthmaps"
help = "Compute depthmap"
def run_impl(self, dataset, args):
compute_depthmaps.run_dataset(dataset, args.subfolder, args.interactive)
def add_arguments_impl(self, parser):
parser.add_argument(
"--subfolder",
help="undistorted subfolder where to load and store data",
default="undistorted",
)
parser.add_argument(
"--interactive",
help="plot results as they are being computed",
action="store_true",
)
| StarcoderdataPython |
1727223 | import numpy as np
# Change False to True for each block of code to see what it does
# Arithmetic operations between 2 NumPy arrays
if False:
a = np.array([1, 2, 3, 4])
b = np.array([1, 2, 1, 2])
print(a + b)
print(a - b)
print(a * b)
print(a / b)
print(a ** b)
# Arithmetic operations between a NumPy array and a single number
if True:
a = np.array([1, 2, 3, 4])
b = 2
print(a + b)
print(a - b)
print(a * b)
print(a / b)
print(a ** b)
# Logical operations with NumPy arrays
if False:
a = np.array([True, True, False, False])
b = np.array([True, False, True, False])
print(a & b)
print(a | b)
print(~a)
print(a & True)
print(a & False)
print(a | True)
print(a | False)
# Comparison operations between 2 NumPy Arrays
if True:
a = np.array([1, 2, 3, 4, 5])
b = np.array([5, 4, 3, 2, 1])
print(a > b)
print(a >= b)
print(a < b)
print(a <= b)
print(a == b)
print(a != b)
# Comparison operations between a NumPy array and a single number
if False:
a = np.array([1, 2, 3, 4])
b = 2
print(a > b)
print(a >= b)
print(a < b)
print(a <= b)
print(a == b)
print(a != b)
# First 20 countries with school completion data
countries = np.array([
'Algeria', 'Argentina', 'Armenia', 'Aruba', 'Austria', 'Azerbaijan',
'Bahamas', 'Barbados', 'Belarus', 'Belgium', 'Belize', 'Bolivia',
'Botswana', 'Brunei', 'Bulgaria', 'Burkina Faso', 'Burundi',
'Cambodia', 'Cameroon', 'Cape Verde'
])
# Female school completion rate in 2007 for those 20 countries
female_completion = np.array([
97.35583, 104.62379, 103.02998, 95.14321, 103.69019,
98.49185, 100.88828, 95.43974, 92.11484, 91.54804,
95.98029, 98.22902, 96.12179, 119.28105, 97.84627,
29.07386, 38.41644, 90.70509, 51.7478, 95.45072
])
# Male school completion rate in 2007 for those 20 countries
male_completion = np.array([
95.47622, 100.66476, 99.7926, 91.48936, 103.22096,
97.80458, 103.81398, 88.11736, 93.55611, 87.76347,
102.45714, 98.73953, 92.22388, 115.3892, 98.70502,
37.00692, 45.39401, 91.22084, 62.42028, 90.66958
])
def overall_completion_rate(female_completion, male_completion):
"""
Fill in this function to return a NumPy array containing the overall
school completion rate for each country. The arguments are NumPy
arrays giving the female and male completion of each country in
the same order.
"""
return (female_completion + male_completion) / 2
print(overall_completion_rate(female_completion, male_completion))
| StarcoderdataPython |
175341 | import math
from .lang import H, sig, t
#=============================================================================#
# Standard types, classes, and related functions
## Basic data types
from .Data.Maybe import Maybe, Just, Nothing, in_maybe, maybe
from .Data.Either import Either, Left, Right, in_either, either
from .Data.Ord import Ordering, LT, EQ, GT
from .Data.Unit import Unit, Star
### Tuples
from .Data.Tuple import fst, snd, curry, uncurry
#=============================================================================#
## Basic type classes
from .lang import Read, Show, show
from .Data.Eq import Eq
from .Data.Ord import Ord, max, min, compare
from .lang import Enum, fromEnum, succ, pred, enumFromThen
from .lang import enumFrom, enumFromThenTo, enumFromTo
from .lang import Bounded
from .Data.Functor import Functor, fmap, void
from .Control.Applicative import Applicative
from .Control.Monad import Monad
from .Data.Foldable import Foldable
from .Data.Traversable import Traversable
#=============================================================================#
# Numbers
### Numeric type classes
from .Data.Num import Num, abs, negate, signum, Fractional, recip
from .Data.Num import Integral, toRatio, Ratio, R, Rational, Floating
from .Data.Num import sqrt, log, pow, logBase, sin, tan, cos, exp
from .Data.Num import asin, atan, acos, sinh, tanh, cosh
from .Data.Num import asinh, atanh, acosh, atan2
from .Data.Num import Real, RealFloat, RealFrac, toRational
from .Data.Num import properFraction, truncate, round, ceiling, floor
from .Data.Num import isNaN, isInfinite, isNegativeZero
#=============================================================================#
# Numeric functions
@sig(H[(Num, "a")]/ "a" >> "a" >> "a")
def subtract(x, y):
"""
subtract :: Num a => a -> a -> a
the same as lambda x, y: y - x
"""
return y - x
@sig(H[(Integral, "a")]/ "a" >> bool)
def even(x):
"""
even :: Integral a => a -> Bool
Returns True if the integral value is even, and False otherwise.
"""
return x % 2 == 0
@sig(H[(Integral, "a")]/ "a" >> bool)
def odd(x):
"""
odd :: Integral a => a -> Bool
Returns True if the integral value is odd, and False otherwise.
"""
return x % 2 == 1
@sig(H[(Integral, "a")]/ "a" >> "a" >> "a")
def gcd(x, y):
"""
gcd :: Integral a => a -> a -> a
gcd(x,y) is the non-negative factor of both x and y of which every common
factor of x and y is also a factor; for example gcd(4,2) = 2, gcd(-4,6) =
2, gcd(0,4) = 4. gcd(0,0) = 0. (That is, the common divisor that is
"greatest" in the divisibility preordering.)
"""
return math.gcd(x, y)
@sig(H[(Integral, "a")]/ "a" >> "a" >> "a")
def lcm(x, y):
"""
lcm :: Integral a => a -> a -> a
lcm(x,y) is the smallest positive integer that both x and y divide.
"""
g = gcd(x, y)
return 0 if g == 0 else (x * y) // g
#=============================================================================#
# Monads and functors
from .Data.Functor import Functor
from .Control.Applicative import Applicative
from .Control.Monad import Monad
@sig(H[(Monad, "m")]/ t("m", "a") >> t("m", Unit))
def sequence(xs):
"""
sequence :: Monad m => [m a] -> m [a]
Evaluate each action in the sequence from left to right, and collect the
results.
"""
raise NotImplementedError()
@sig(H[(Monad, "m")]/ t("m", "a") >> t("m", Unit))
def sequence_(xs):
"""
sequence_ :: Monad m => [m a] -> m Unit
Evaluate each action in the sequence from left to right, and ignore the
results.
"""
raise NotImplementedError()
def mapM(f, xs):
"""
mapM :: Monad m => (a -> m b) -> [a] -> m [b]
mapM(f) is equivalent to sequence * map(f)
"""
return sequence(fmap(f, xs))
def mapM_(f, xs):
"""
mapM_ :: Monad m => (a -> m b) -> [a] -> m ()
mapM_(f) is equivalent to sequence_ * map(f)
"""
return sequence_(fmap(f, xs))
#=============================================================================#
# Miscellaneous functions
from hask.Data.Function import id, const, flip, comp
from hask.Data.String import String
@sig(H/ (H/ "a" >> bool) >> (H/ "a" >> "a") >> "a" >> "a")
def until(p, f, a):
"""
until :: (a -> Bool) -> (a -> a) -> a -> a
until(p, f, a) yields the result of applying f until p(a) holds.
"""
while not p(a):
a = f(a)
return a
@sig(H/ "a" >> "a" >> "a")
def asTypeOf(a, b):
"""
asTypeOf :: a -> a -> a
asTypeOf is a type-restricted version of const. It is usually used as an
infix operator, and its typing forces its first argument (which is usually
overloaded) to have the same type as the second.
"""
return a
@sig(H/ String >> "a")
def error(msg):
"""
error :: String -> a
error(msg) stops execution and displays an error message.
"""
raise Exception(msg)
from .lang import undefined
# List operations
from .Data.List import map, filter, head, last, tail, init
from .Data.List import null, reverse, length, L
from .Data.List import foldl, foldl1, foldr, foldr1
## Special folds
from .Data.List import and_, or_, any, all, sum, product
from .Data.List import concat, concatMap, maximum, minimum
## Building lists
### Scans
from .Data.List import scanl, scanl1, scanr, scanr1
### Infinite lists
from .Data.List import iterate, repeat, replicate, cycle
## Sublists
from .Data.List import take, drop, splitAt, takeWhile, dropWhile, span, break_
## Searching lists
from .Data.List import elem, notElem, lookup
## Zipping and unzipping lists
from .Data.List import zip, zip3, zipWith, zipWith3, unzip, unzip3
## Functions on strings
from .Data.List import lines, words, unlines, unwords
| StarcoderdataPython |
11394913 | <reponame>e-mission/e-mission-ng-aggregator
from __future__ import unicode_literals
from __future__ import print_function
from __future__ import division
from __future__ import absolute_import
# Standard imports
from future import standard_library
standard_library.install_aliases()
from builtins import *
import unittest
import datetime as pydt
import logging
import uuid
import json
import bson.json_util as bju
import numpy as np
# Our imports
import emission.storage.decorations.trip_queries as esdt
import emission.storage.decorations.analysis_timeseries_queries as esda
import emission.storage.timeseries.timequery as estt
import emission.storage.timeseries.abstract_timeseries as esta
import emission.net.api.usercache as enau
import emission.core.get_database as edb
import emission.core.wrapper.userlabel as ecul
import emission.core.wrapper.rawtrip as ecwrt
import emission.core.wrapper.section as ecwc
import emission.core.wrapper.stop as ecws
import emission.core.wrapper.entry as ecwe
import emission.tests.storageTests.analysis_ts_common as etsa
import emission.tests.common as etc
class TestTripQueries(unittest.TestCase):
def setUp(self):
self.testUserId = uuid.uuid3(uuid.NAMESPACE_URL, "mailto:<EMAIL>")
edb.get_analysis_timeseries_db().delete_many({'user_id': self.testUserId})
def tearDown(self):
edb.get_analysis_timeseries_db().delete_many({'user_id': self.testUserId})
edb.get_usercache_db().delete_many({'user_id': self.testUserId})
def create_fake_trip(self):
return etsa.createNewTripLike(self, esda.RAW_TRIP_KEY, ecwrt.Rawtrip)
def testGetTimeRangeForTrip(self):
new_trip = self.create_fake_trip()
ret_tq = esda.get_time_query_for_trip_like(esda.RAW_TRIP_KEY, new_trip.get_id())
self.assertEqual(ret_tq.timeType, "data.ts")
self.assertEqual(ret_tq.startTs, 5)
self.assertEqual(ret_tq.endTs, 6)
def testQuerySectionsForTrip(self):
new_trip = self.create_fake_trip()
new_section = ecwc.Section()
new_section.trip_id = new_trip.get_id()
new_section.start_ts = 5
new_section.end_ts = 6
ts = esta.TimeSeries.get_time_series(self.testUserId)
ts.insert_data(self.testUserId, esda.RAW_SECTION_KEY, new_section)
ret_entries = esdt.get_raw_sections_for_trip(self.testUserId, new_trip.get_id())
self.assertEqual([entry.data for entry in ret_entries], [new_section])
def testQueryStopsForTrip(self):
new_trip = self.create_fake_trip()
new_stop = ecws.Stop()
new_stop.trip_id = new_trip.get_id()
new_stop.enter_ts = 5
new_stop.exit_ts = 6
ts = esta.TimeSeries.get_time_series(self.testUserId)
ts.insert_data(self.testUserId, esda.RAW_STOP_KEY, new_stop)
ret_entries = esdt.get_raw_stops_for_trip(self.testUserId, new_trip.get_id())
self.assertEqual([entry.data for entry in ret_entries], [new_stop])
def testUserInputForTripNoInputs(self):
"""
Test the case in which the user has not provided any inputs
"""
new_trip = self.create_fake_trip()
user_input = esdt.get_user_input_for_trip(esda.RAW_TRIP_KEY, self.testUserId, new_trip.get_id(), "manual/mode_confirm")
self.assertIsNone(user_input)
def testUserInputForTripOneInputFromCache(self):
"""
Test the case in which the user has provided exactly one input
"""
MODE_CONFIRM_KEY = "manual/mode_confirm"
new_trip = self.create_fake_trip()
new_mc = ecul.Userlabel()
new_mc["start_ts"] = new_trip.data.start_ts + 1
new_mc["end_ts"] = new_trip.data.end_ts + 1
new_mc["label"] = "roller_blading"
new_mce = ecwe.Entry.create_entry(self.testUserId, MODE_CONFIRM_KEY, new_mc)
new_mce["metadata"]["type"] = "message"
enau.sync_phone_to_server(self.testUserId, [new_mce])
user_input = esdt.get_user_input_from_cache_series(self.testUserId, new_trip, MODE_CONFIRM_KEY)
self.assertEqual(new_mce, user_input)
def testUserInputForTripOneInput(self):
"""
Test the case in which the user has provided exactly one input
"""
MODE_CONFIRM_KEY = "manual/mode_confirm"
new_trip = self.create_fake_trip()
new_mc = ecul.Userlabel()
new_mc["start_ts"] = new_trip.data.start_ts + 1
new_mc["end_ts"] = new_trip.data.end_ts + 1
ts = esta.TimeSeries.get_time_series(self.testUserId)
ts.insert_data(self.testUserId, MODE_CONFIRM_KEY, new_mc)
user_input = esdt.get_user_input_for_trip(esda.RAW_TRIP_KEY, self.testUserId,
new_trip.get_id(), MODE_CONFIRM_KEY)
self.assertEqual(new_mc, user_input.data)
def testUserInputForTripTwoInputFromCache(self):
"""
Test the case in which the user has provided exactly one input
"""
MODE_CONFIRM_KEY = "manual/mode_confirm"
new_trip = self.create_fake_trip()
new_mc = ecul.Userlabel()
new_mc["start_ts"] = new_trip.data.start_ts + 1
new_mc["end_ts"] = new_trip.data.end_ts + 1
new_mc["label"] = "roller_blading"
new_mce = ecwe.Entry.create_entry(self.testUserId, MODE_CONFIRM_KEY, new_mc)
new_mce["metadata"]["type"] = "message"
enau.sync_phone_to_server(self.testUserId, [new_mce])
user_input = esdt.get_user_input_from_cache_series(self.testUserId, new_trip, MODE_CONFIRM_KEY)
# WHen there is only one input, it is roller_blading
self.assertEqual(new_mce, user_input)
self.assertEqual(user_input.data.label, 'roller_blading')
new_mc["label"] = 'pogo_sticking'
new_mce = ecwe.Entry.create_entry(self.testUserId, MODE_CONFIRM_KEY, new_mc)
new_mce["metadata"]["type"] = "message"
enau.sync_phone_to_server(self.testUserId, [new_mce])
user_input = esdt.get_user_input_from_cache_series(self.testUserId, new_trip, MODE_CONFIRM_KEY)
# When it is overridden, it is pogo sticking
self.assertEqual(new_mce, user_input)
self.assertEqual(user_input.data.label, 'pogo_sticking')
def testUserInputForTripTwoInput(self):
"""
Test the case in which the user has provided two inputs
"""
MODE_CONFIRM_KEY = "manual/mode_confirm"
ts = esta.TimeSeries.get_time_series(self.testUserId)
new_trip = self.create_fake_trip()
new_mc = ecul.Userlabel()
new_mc["start_ts"] = new_trip.data.start_ts + 1
new_mc["end_ts"] = new_trip.data.end_ts + 1
new_mc["label"] = "car"
ts.insert_data(self.testUserId, MODE_CONFIRM_KEY, new_mc)
user_input = esdt.get_user_input_for_trip(esda.RAW_TRIP_KEY, self.testUserId,
new_trip.get_id(), MODE_CONFIRM_KEY)
# WHen there is only one input, it is a car
self.assertEqual(new_mc, user_input.data)
self.assertEqual(user_input.data.label, "car")
new_mc["label"] = "bike"
ts.insert_data(self.testUserId, MODE_CONFIRM_KEY, new_mc)
user_input = esdt.get_user_input_for_trip(esda.RAW_TRIP_KEY, self.testUserId,
new_trip.get_id(), MODE_CONFIRM_KEY)
# When it is overridden, it is a bike
self.assertEqual(new_mc, user_input.data)
self.assertEqual(user_input.data.label, "bike")
def testUserInputRealData(self):
np.random.seed(61297777)
dataFile = "emission/tests/data/real_examples/shankari_single_positional_indexer.dec-12"
etc.setupRealExample(self, dataFile)
self.testUserId = self.testUUID
# At this point, we have only raw data, no trips
etc.runIntakePipeline(self.testUUID)
# At this point, we have trips
# Let's retrieve them
ts = esta.TimeSeries.get_time_series(self.testUUID)
ct_df = ts.get_data_df("analysis/cleaned_trip", time_query=None)
self.assertEqual(len(ct_df), 4)
# Now, let's load the mode_confirm and purpose_confirm objects
with open("emission/tests/data/real_examples/shankari_single_positional_indexer.dec-12.mode_confirm") as mcfp:
mode_confirm_list = json.load(mcfp, object_hook=bju.object_hook)
self.assertEqual(len(mode_confirm_list), 5)
with open("emission/tests/data/real_examples/shankari_single_positional_indexer.dec-12.purpose_confirm") as pcfp:
purpose_confirm_list = json.load(pcfp, object_hook=bju.object_hook)
self.assertEqual(len(purpose_confirm_list), 7)
for mc in mode_confirm_list:
mc["user_id"] = self.testUUID
ts.insert(mc)
for pc in purpose_confirm_list:
pc["user_id"] = self.testUUID
ts.insert(pc)
mc_label_list = []
pc_label_list = []
for trip_id in ct_df._id:
mc = esdt.get_user_input_for_trip(esda.CLEANED_TRIP_KEY,
self.testUserId, ct_df._id[0], "manual/mode_confirm")
mc_label_list.append(mc.data.label)
pc = esdt.get_user_input_for_trip(esda.CLEANED_TRIP_KEY,
self.testUserId, ct_df._id[0], "manual/purpose_confirm")
pc_label_list.append(pc.data.label)
self.assertEqual(mc_label_list, 4 * ['bike'])
self.assertEqual(pc_label_list, 4 * ['pick_drop'])
if __name__ == '__main__':
import emission.tests.common as etc
etc.configLogging()
unittest.main()
| StarcoderdataPython |
11217499 |
# coding: utf-8
# In[1]:
import scipy.io
import numpy as np
import matplotlib.pyplot as pyplot
from PIL import Image
import matplotlib.cm as cm
from pprint import pprint
import scipy.misc
import PIL
import KMeansUtilities as km
# In[2]:
def is_background(mat,colno):
row,col=mat.shape
for i in range(0,row):
if mat[i][colno]<0.9:
return False
return True
# In[3]:
def get_next_nonbackground(mat,colno):
row,col=mat.shape
for i in range(colno+1,col):
if is_background(mat,i)==False:
return i
return col-1
# In[17]:
def get_threshold(mat):
diff_mat=[]
row,col=mat.shape
i=0
while i<col:
if is_background(mat,i)==True:
next_bac=get_next_nonbackground(mat,i)
diff_mat.append([next_bac-i])
i=next_bac
i+=1
return diff_mat
# In[30]:
def get_words(mat,threshold):
divs=[]
divs.append(0)
row,col=mat.shape
for i in range(col):
if is_background(mat,i)==False:
next_bac=get_next_nonbackground(mat,i)
if next_bac-i>threshold:
divs.append(i)
divs.append(next_bac)
i=next_bac
divs.append(col-1)
return divs
# In[35]:
def crop_line(mat):
row,col=mat.shape
newimg=[]
front=0
for i in range(col):
if is_background(mat,i)==False:
front=i-2
break
newimg=mat[:,front-1:]
return newimg
def main(filename):
img=scipy.misc.imread(filename)
img=img/255.0
img=crop_line(img)
g=get_threshold(img)
f=km.get_clusters(g,2)
mini=999999
x=[]
if f[1][0][0]>f[0][0][0]:
x=f[1]
else:
x=f[0]
for i in x:
if i[0] <mini:
mini=i[0]
d=get_words(img,mini)
words=[]
for i in range(0,len(d)-1,2):
if d[i+1]-d[i]>0:
words.append(img[:,d[i]:d[i+1]+2])
return words
| StarcoderdataPython |
9780982 | #!/home/kevinml/anaconda3/bin/python3.7
# -*- coding: utf-8 -*-
"""
Created on Wed Mar 27 13:15:02 2019
@author: juangabriel and <NAME>
"""
# Clustering Jerárquico
# =======================================================================================================
# PASOS
#
# Hay 2 tipos de agrupaciones jerarquicos: "Aglomerativo" (de abajo hacia arriba) y "Divisitivo" (de arriba
# hacia abajo).
#
# Clustering Jerárquico Aglomerativo
# Junta 1 por 1 los elementos similares para formar grupos.
#
# PASOS
# 1.- Hacer que cada punto sea un cluster.
# 2.- Se eligen los 2 puntos mas cercanos y se juntan en un unico cluster.
# 3.- Se eligen los 2 clusters mas cercanos y se juntan en un unico cluster.
# 4.- Repetir el paso 3 hasta tener un unico cluster.
#
# Para definir los puntos o clusters mas cercanos, se hace uso de Distancias Euclidianas generalmente;
# tambien se puede hacer uso de Distancia Manhattan, Distancia Minkowski, etc.
#
# Distancia entre Clusters
# ###########################
# OPCION 1: Se cacula a partir de los puntos mas cercanos entre los clusters.
# OPCION 2: Se cacula a partir de los puntos mas lejanos entre los clusters.
# OPCION 3: Se cacula la distancia media.
# Se calculan todas las combinaciones de distancias entre los puntos de un cluster y el otro.
# OPCION 4: Se cacula la distancia entre los baricentros de los clusters.
#
# Se representan de forma visual con un DENDOGRAMA
# Una vez con el dendograma, para obtener el numero de cluster en los que se dividen los datos, se tiene
# que elegir un umbral de distancia Euclidiana (que representa disimilaridad) para cortar el Dendograma.
# Dependiendo del umbral que se elija, es el munero de clusters resultantes.
#
# Numero Optimo de Clusters
# ############################
# Una REGLA es que la linea que corta el dendograma debe pasar por la linea vertical mas alta, que
# representa la disimilaridad de un cluster, con respecto al anterior. CON LA CONDICION, de que esta linea
# vertical NO cruce ningunarecta horizontal.
# El numero de clusters sera la cantidad de lineas verticales que corte la linea horizontal.
#
# Mas Dimensiones
# Se necesita aplcar una tecnica de reduccion de Dimensionalidad y luego aplicar el metodo.
#
# =======================================================================================================
# Importar las librerías
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
################################################
### IMPORTAR EL DATA SET ###
################################################
dataset = pd.read_csv("Mall_Customers.csv")
X = dataset.iloc[:, [3, 4]].values
#####################################################
### NUMERO OPTIMO DE CLUSTERS ###
#####################################################
# Utilizar el dendrograma para encontrar el número óptimo de clusters
# Importamos la libreria para generar el dendograma
import scipy.cluster.hierarchy as sch
# El parametro "method", hace referencia al metodo para encontrar los clusters,
# la opcion "ward", minimiza la varianza entre los puntos de cada cluster.
dendrogram = sch.dendrogram(sch.linkage(X, method="ward"))
plt.title("Dendrograma")
plt.xlabel("Clientes")
plt.ylabel("Distancia Euclídea")
plt.show()
#################################################################
# AJUSTAR EL CLUSTERING JERARQUICO AL DATASET #
#################################################################
# Importamos la libreria para generar los clusters.
# https://scikit-learn.org/stable/modules/generated/sklearn.cluster.AgglomerativeClustering.html
from sklearn.cluster import AgglomerativeClustering
# El parametro "affinity", hace referencia al tipo de distancia que se va a utilizar
# El parametro "linkage", hace referencia al metodo con el que se unen los clusters.
# Dado que el dendograma se uso el metodo de "ward" se usara tambien, otras opciones son la
# distancia minima o la distancia media.
hc = AgglomerativeClustering(
n_clusters=5, affinity="euclidean", linkage="ward")
y_hc = hc.fit_predict(X)
####################################
# Visualización de los clusters #
####################################
plt.scatter(X[y_hc == 0, 0], X[y_hc == 0, 1], s=100, c="red", label="Cautos")
plt.scatter(X[y_hc == 1, 0], X[y_hc == 1, 1],
s=100, c="blue", label="Estandard")
plt.scatter(X[y_hc == 2, 0], X[y_hc == 2, 1],
s=100, c="green", label="Objetivo")
plt.scatter(X[y_hc == 3, 0], X[y_hc == 3, 1],
s=100, c="cyan", label="Descuidados")
plt.scatter(X[y_hc == 4, 0], X[y_hc == 4, 1], s=100,
c="magenta", label="Conservadores")
plt.title("Cluster de clientes")
plt.xlabel("Ingresos anuales (en miles de $)")
plt.ylabel("Puntuación de Gastos (1-100)")
plt.legend()
plt.show()
| StarcoderdataPython |
6639776 | <filename>trade/utils/common.py<gh_stars>1-10
# -*- coding: utf-8 -*-
""" This file contains common modules and packages that will be imported into
the modules for the trading of the coin market price data.
"""
# OS/Standard imports
import sys
import os
import io
from os import listdir
import time
import datetime
import logging
import math
import re
import string
import json
import requests
# Unicode sorting
#import icu
# Numerical Packages
import numpy as np
#import matplotlib.pyplot as plt
# Data Loading Packages
import cv2
import tables
import pickle
# Machine Learning Packages
import pandas as pd
# Progress Bar
from tqdm import tqdm
from tqdm import trange
| StarcoderdataPython |
1951869 | <reponame>Holldean/pytorch-models<gh_stars>0
import torch.nn as nn
import torch
from model.activation import activation_layer
class DNNLayer(nn.Module):
def __init__(self,
inputs_dim,
hidden_units,
activation='relu',
dropout_rate=0,
use_bn=False,
init_std=0.0001,
seed=1024):
super(DNNLayer, self).__init__()
self.dropout_rate = dropout_rate
self.dropout = nn.Dropout(dropout_rate)
self.seed = seed
self.use_bn = use_bn
if len(hidden_units) == 0:
raise ValueError("hidden_units is empty!!")
hidden_units = [inputs_dim] + list(hidden_units)
self.linears = nn.ModuleList(
[nn.Linear(hidden_units[i], hidden_units[i + 1]) for i in range(len(hidden_units) - 1)])
if self.use_bn:
self.bn = nn.ModuleList(
[nn.BatchNorm1d(hidden_units[i + 1]) for i in range(len(hidden_units) - 1)])
self.activation_layers = nn.ModuleList(
[activation_layer(activation) for i in range(len(hidden_units) - 1)])
for name, tensor in self.linears.named_parameters():
if 'weight' in name:
nn.init.normal_(tensor, mean=0, std=init_std)
def forward(self, inputs):
deep_input = inputs
for i in range(len(self.linears)):
fc = self.linears[i](deep_input)
if self.use_bn:
fc = self.bn[i](fc)
fc = self.activation_layers[i](fc)
fc = self.dropout(fc)
deep_input = fc
return deep_input
class PredictionLayer(nn.Module):
def __init__(self,
task='binary',
use_bias=True,
**kwargs):
if task not in ["binary", "multiclass", "regression"]:
raise ValueError("task must be binary, multiclass or regression")
super(PredictionLayer, self).__init__()
self.use_bias = use_bias
self.task = task
if self.use_bias:
self.bias = nn.Parameter(torch.zeros((1,)))
def forward(self, X):
output = X
if self.use_bias:
output += self.bias
if self.task == "binary":
output = torch.sigmoid(output)
return output
# from https://github.com/shenweichen/DeepCTR-Torch/blob/d18ea26c09ccc16541dd7985d6ba0a8895bc288d/deepctr_torch/layers/interaction.py#L12
class FM(nn.Module):
def __init__(self):
super(FM, self).__init__()
def forward(self, X):
square_of_sum = torch.pow(torch.sum(X, dim=1, keepdim=True), 2)
sum_of_square = torch.sum(X * X, dim=1, keepdim=True)
cross_term = square_of_sum - sum_of_square
cross_term = 0.5 * torch.sum(cross_term, dim=2, keepdim=False)
return cross_term
# from https://github.com/shenweichen/DeepCTR-Torch/blob/d18ea26c09ccc16541dd7985d6ba0a8895bc288d/deepctr_torch/layers/interaction.py#L462
class CrossNetMix(nn.Module):
def __init__(self,
in_features,
low_rank=32,
num_experts=4,
layer_num=2):
super(CrossNetMix, self).__init__()
self.layer_num = layer_num
self.num_experts = num_experts
# U: (in_features, low_rank)
self.U_list = torch.nn.ParameterList([nn.Parameter(nn.init.xavier_normal_(
torch.empty(num_experts, in_features, low_rank))) for i in range(self.layer_num)])
# V: (in_features, low_rank)
self.V_list = torch.nn.ParameterList([nn.Parameter(nn.init.xavier_normal_(
torch.empty(num_experts, in_features, low_rank))) for i in range(self.layer_num)])
# C: (low_rank, low_rank)
self.C_list = torch.nn.ParameterList([nn.Parameter(nn.init.xavier_normal_(
torch.empty(num_experts, low_rank, low_rank))) for i in range(self.layer_num)])
self.gating = nn.ModuleList([nn.Linear(in_features, 1, bias=False) for i in range(self.num_experts)])
self.bias = torch.nn.ParameterList([nn.Parameter(nn.init.zeros_(
torch.empty(in_features, 1))) for i in range(self.layer_num)])
def forward(self, inputs):
x_0 = inputs.unsqueeze(2) # (bs, in_features, 1)
x_l = x_0
for i in range(self.layer_num):
output_of_experts = []
gating_score_of_experts = []
for expert_id in range(self.num_experts):
# (1) G(x_l)
# compute the gating score by x_l
gating_score_of_experts.append(self.gating[expert_id](x_l.squeeze(2)))
# (2) E(x_l)
# project the input x_l to $\mathbb{R}^{r}$
v_x = torch.matmul(self.V_list[i][expert_id].t(), x_l) # (bs, low_rank, 1)
# nonlinear activation in low rank space
v_x = torch.tanh(v_x)
v_x = torch.matmul(self.C_list[i][expert_id], v_x)
v_x = torch.tanh(v_x)
# project back to $\mathbb{R}^{d}$
uv_x = torch.matmul(self.U_list[i][expert_id], v_x) # (bs, in_features, 1)
dot_ = uv_x + self.bias[i]
dot_ = x_0 * dot_ # Hadamard-product
output_of_experts.append(dot_.squeeze(2))
# (3) mixture of low-rank experts
output_of_experts = torch.stack(output_of_experts, 2) # (bs, in_features, num_experts)
gating_score_of_experts = torch.stack(gating_score_of_experts, 1) # (bs, num_experts, 1)
moe_out = torch.matmul(output_of_experts, gating_score_of_experts.softmax(1))
x_l = moe_out + x_l # (bs, in_features, 1)
x_l = x_l.squeeze() # (bs, in_features)
return x_l
| StarcoderdataPython |
12847448 | import argparse
import unittest
from ffwd.ffwd_send import tag_type
class TestFFWDSend(unittest.TestCase):
def test_tag_type(self):
self.assertEquals(('hello', 'world'), tag_type("hello:world"))
self.assertEquals(('hello', 'world:two'), tag_type("hello:world:two"))
with self.assertRaises(argparse.ArgumentTypeError):
tag_type('hello')
| StarcoderdataPython |
9688397 | import pyttsx3 #pip install pyttsx3
import speech_recognition as sr #pip install SpeechRecognition
import datetime
import wikipedia #pip install wikipedia
import webbrowser
import os
import smtplib
engine = pyttsx3.init('sapi5')
voices = engine.getProperty('voices')
# print(voices[0].id)
engine.setProperty('voice', voices[0].id)
def speak(audio):
engine.say(audio)
engine.runAndWait()
def wishMe():
hour = int(datetime.datetime.now().hour)
if hour>=0 and hour<12:
speak("Good Morning!")
print("Good Morning!")
elif hour>=12 and hour<18:
speak("Good Afternoon!")
print("Good Afternoon!")
else:
speak("Good Evening!")
print("Good Evening!")
speak("I am <NAME>. Please tell me how may I help you.")
print("I am <NAME>. Please tell me how may I help you.")
def takeCommand():
#It takes microphone input from the user and returns string output
r = sr.Recognizer()
with sr.Microphone() as source:
print("Listening...")
r.pause_threshold = 1
audio = r.listen(source)
try:
print("Recognizing...")
query = r.recognize_google(audio, language='en-in')
print(f"User said: {query}\n")
except Exception as e:
# print(e)
print("Say that again please...")
return "None"
return query
def sendEmail(to, content):
server = smtplib.SMTP('smtp.gmail.com', 587)
server.ehlo()
server.starttls()
server.login('<EMAIL>', '<PASSWORD>') # Replace "<EMAIL>" to your EMAIL ID & replace "<PASSWORD>" with your password of the email
server.sendmail('<EMAIL>', to, content) # Important: Less secure apps must be enabled in your account to perform this email operation
server.close()
if __name__ == "__main__":
wishMe()
while True:
# if 1:
query = takeCommand().lower()
# Logic for executing tasks based on query
if 'wikipedia' in query:
speak('Searching Wikipedia...')
print('Searching Wikipedia...')
query = query.replace("wikipedia", "")
results = wikipedia.summary(query, sentences=2)
speak("According to Wikipedia")
print("According to Wikipedia")
print(results)
speak(results)
elif 'open youtube' in query:
speak("Opening Youtube Sir...")
print("Opening Youtube Sir...")
webbrowser.open("youtube.com")
elif 'open google' in query:
speak("Opening Google Sir...")
print("Opening Google Sir...")
webbrowser.open("google.com")
elif 'open stackoverflow' in query:
speak("Opening Stack Over Flow Sir...")
print("Opening Stack Over Flow Sir...")
webbrowser.open("stackoverflow.com")
elif 'play music' in query:
music_dir = 'C:\\Music\\Favorite'
songs = os.listdir(music_dir)
print(songs)
os.startfile(os.path.join(music_dir, songs[0]))
elif 'the time' in query:
strTime = datetime.datetime.now().strftime("%H:%M:%S")
speak(f"Sir, the time is {strTime}")
elif 'open code' in query:
speak("Opening Visual Studio Code Sir...")
print("Opening Visual Studio Code Sir...")
codePath = "C:\\Microsoft VS Code\\Code.exe"
os.startfile(codePath)
elif 'open word' in query:
speak("Opening Microsoft Word Sir...")
print("Opening Microsoft Word Sir...")
wordPath = "C:\\Program Files\\Microsoft Office\\root\\Office16\\WINWORD.EXE" # Change the path according to your system
os.startfile(wordPath)
elif 'open excel' in query:
speak("Opening Microsoft Excel Sir...")
print("Opening Microsoft Excel Sir...")
excelPath = "C:\\Program Files\\Microsoft Office\\root\\Office16\\EXCEL.EXE" # Change the path according to your system
os.startfile(excelPath)
elif 'open powerpoint' in query:
speak("Opening Microsoft PowerPoint Sir...")
print("Opening Microsoft PowerPoint Sir...")
powerpointPath = "C:\\Program Files\\Microsoft Office\\root\\Office16\\POWERPNT.EXE" # Change the path according to your system
os.startfile(powerpointPath)
elif 'open chrome' in query:
speak("Opening Google Chrome Sir...")
print("Opening Google Chrome Sir...")
chromePath = "C:\\Program Files (x86)\\Google\\Chrome\\Application\\chrome.exe" # Change the path according to your system
os.startfile(chromePath)
elif 'open obs studio' or 'open obs' in query:
speak("Opening OBS Studio Sir...")
print("Opening OBS Studio Sir...")
obsPath = "C:\\obs-studio\\bin\\64bit\\obs64.exe" # Change the path according to your system
os.startfile(obsPath)
elif 'open arduino' in query:
speak("Opening Arduino Sir...")
print("Opening Arduino Sir...")
arduinoPath = "C:\\Arduino\\arduino.exe" # Change the path according to your system
os.startfile(arduinoPath)
elif 'open oracle' or 'open virtualbox' or 'open virtual box' in query:
speak("Opening Oracle VM VirtualBox Sir...")
print("Opening Oracle VM VirtualBox Sir...")
oraclePath = "C:\\Oracle\\VirtualBox\\VirtualBox.exe" # Change the path according to your system
os.startfile(oraclePath)
elif 'open lobe' in query:
speak("Opening Lobe Sir...")
print("Opening Lobe Sir...")
lobePath = "C:\\Lobe\\Lobe.exe" # Change the path according to your system
os.startfile(lobePath)
elif 'open discord' in query:
speak("Opening Discord Sir...")
print("Opening Discord Sir...")
discordPath = "C:\\Users\\USER\\AppData\\Local\\Discord\\Update.exe" # Change the path according to your system
elif 'email to me' in query:
try:
speak("What should I say?")
print("What should I say?")
content = takeCommand()
to = "<EMAIL>" # Replace "<EMAIL>" to the EMAIL ID that you want to send this email to
sendEmail(to, content)
speak("Email has been sent!")
print("Email has been sent!")
except Exception as e:
print(e)
speak("Sorry Sir. I am not able to send this email.")
print("Sorry Sir. I am not able to send this email.")
| StarcoderdataPython |
149741 | #<NAME>, JAIME. 2018
import cv2
from reconocimiento_operaciones import crear_modelo_operaciones
from reconocimiento_operaciones import prediccion_operacion
from reconocimiento_numeros import cargar_numeros_desde_mnist
from reconocimiento_numeros import crear_modelo_numeros
from reconocimiento_numeros import prediccion_numeros
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
import keras
import sys
import operator
def split_image_into(input_path, n):
images = []
img = cv2.imread(input_path)
imgheight, imgwidth, channels = img.shape
width = imgwidth / n;
for j in range(0,imgwidth,width):
crop_img = img[imgheight/2 - width/2 :imgheight/2 + width/2, j:j+width]#
images.append(crop_img)
return images
def help_menu():
print("\nOPCIONES")
print("\n")
print("(*) Entrenar modelo de operaciones con imagenes en directorio 'dir':")
print("\tpython main.py -eo dir ...")
print("\n(*) Entrenar modelo de numeros con imagenes de la base de datos mnist:")
print("\tpython main.py -en ...")
print("\n(*) Predecir operacion en fichero 'fich.jpg' con 'n' numero de elementos (tal que '3+3' -> 3 elementos):")
print("\tpython main.py ... -op fich.jpg n")
print("\n(*) Predecir operando en fichero 'fich.jpg':")
print("\tpython main.py ... -operand fich.jpg")
print("\n(*) Predecir numero en fichero 'fich.jpg':")
print("\tpython main.py ... -num fich.jpg")
print("\n(*) Predecir operacion en fichero 'fich.jpg' con 'n' numero de elementos y resolver:")
print("\tpython main.py ... -op fich.jpg n -r")
return
def get_operator_fn(op):
return {
'+' : operator.add,
'-' : operator.sub,
'x' : operator.mul,
'/' : operator.div,
}[op]
def solve_operation(pred):
num=[]
ops=[]
j=0
while j<len(pred):
if(j%2 ==0):
num.append(int(pred[j]))
else:
ops.append(get_operator_fn(pred[j]))
j+=1
# Multiplicacion y division tienen prioridad
j=0
elim=0
while j<(len(ops)-elim):
if(ops[j]==operator.mul or ops[j]==operator.div):
num[j]=ops[j](num[j], num[j+1])
ops.remove(ops[j])
num.remove(num[j+1])
else:
j+=1
# Suma y resta
j=0
while len(num)!=1:
num[j]=ops[j](num[j], num[j+1])
ops.remove(ops[j])
num.remove(num[j+1])
return num[0];
def main():
if(len(sys.argv)<2):
help_menu()
return 1
if(sys.argv[1] == "-h"):
help_menu()
return 1
i=1
while i<len(sys.argv):
if(sys.argv[i] == "-eo"):
print("\nEntrenando modelo_operaciones\n")
modelo_operaciones=crear_modelo_operaciones(sys.argv[i+1]) #buscar_y_descargar_imagenes
if(sys.argv[i] == "-en"):
print("\nCargando base de datos con imagenes de numeros de mnist\n")
numeros_data, numeros_labels = cargar_numeros_desde_mnist()
print("\nCargada base de datos mnist")
print("\nEntrenando modelo_numeros\n")
modelo_numeros = crear_modelo_numeros(numeros_data, numeros_labels)
i+=1
option = "NONE"
i=1
while i<len(sys.argv):
if(sys.argv[i] == "-op"):
#sys.argv[i+1] = name of image
#sys.argv[i+2] = number of elements to be recognized
pred = []
images=split_image_into(sys.argv[i+1], int(sys.argv[i+2]))
j=0
while j<int(sys.argv[i+2]):
if(j%2 == 0):
pred.append(prediccion_numeros(modelo_numeros,images[j]))
else:
pred.append(prediccion_operacion(modelo_operaciones, images[j]))
j+=1
option = "op"
break
elif(sys.argv[i] == "-operand"):
images = sys.argv[i+1]
pred = prediccion_operacion(modelo_operaciones, cv2.imread(images))
option = "operand"
break
elif(sys.argv[i] == "-num"):
images = sys.argv[i+1]
pred = prediccion_numeros(modelo_numeros, cv2.imread(images))
option = "num"
break
i+=1
if(option == "NONE"):
print("No se ha seleccionado ninguna opcion. No se va a predecir nada")
if(option == "op"):
i=0
while i<len(pred):
print(pred[i])
i+=1
if(option == "operand"):
print("Prediccion de operacion: " + pred)
if(option == "num"):
print("Prediccion de numero: " + str(pred))
# Opcion de resolver operacion
i=1
result = None
while i<len(sys.argv):
if(option == "op") and (sys.argv[i]=="-r"):
result=solve_operation(pred)
break
i+=1
if (result != None):
print("=")
print result
return 1
main()
| StarcoderdataPython |
8091639 | <filename>RoboOps/RoboOps.py
import subprocess
from pathlib import Path
from robot.api import logger
from robot.api.deco import library, keyword
from robot.libraries.BuiltIn import BuiltIn
from robot.running.model import TestCase
from robot.result.model import TestCase as TestCaseResult
import shlex
class ErrorsAreFatal:
ROBOT_LISTENER_API_VERSION = 3
def __init__(self) -> None:
self.test_failed: bool = False
def start_test(self, test: TestCase, result: TestCaseResult) -> None:
logger.console("") # just to go to next line in console at beginning
if self.test_failed:
test.keywords.clear()
def end_test(self, test: TestCase, result: TestCaseResult) -> None:
if self.test_failed:
result.message = "Skipped execution due previous errors"
result.status = "FAIL" # todo: make it "SKIP" when using RF 4.0
if not result.passed:
self.test_failed = True
def close(self) -> None:
# added as unit tests somehow kept the state
self.test_failed = False
@library(scope="TEST SUITE", version="1.0", listener=ErrorsAreFatal())
class RoboOps:
"""Library for creating, sharing and running DevOps tasks easily and efficiently.
When is imported, any failure within a suite is fatal - preventing other steps from execution and failing whole run.
== Example ==
| *** Settings ***
| Library RoboOps
| *** Variables ***
| &{install python env} command=poetry install
| &{unit tests} command=poetry run coverage run --source=RoboOps -m pytest .
| &{report coverage} command=poetry run coverage report -m --fail-under=80
| &{generate wheel} command=poetry build
| *** Tasks ***
| Unit Test Stage
| Roboops Run Command &{install python env}
| Roboops Run Command &{unit tests}
| ${coverage} Roboops Run Command &{report coverage}
| Create File coverage.log ${coverage.stdout.decode()}
| Roboops Save File Artifact coverage.log coverage.log
|
| Build Package Stage
| Roboops Run Command &{generate wheel}
"""
def __init__(self, artifacts_dir: str = "artifacts"):
"""RoboOps library can take below optional arguments:
- ``artifacts_dir`` <str>
Points to directory where artifacts will be stored."""
self.artifacts_dir = Path(artifacts_dir)
self.artifacts_dir.mkdir(parents=True, exist_ok=True)
@keyword
def roboops_run_command(
self,
command: str,
shell: bool = False,
cwd: str = None,
ignore_rc: bool = False,
) -> subprocess.CompletedProcess:
"""Runs given command using subprocess.run and returns result ``subprocess.CompletedProcess`` object.
Arguments:
- ``command`` <str>:
Command to be executed.
- ``shell`` <bool>:
Specifies if command should be executed in separate shell (see subprocess.run documentation for more details).
Defaults to ``False``
- ``cwd`` <str>:
Sets working directory for given command.
Defaults to ``None``
- ``ignore_rc`` <bool>:
Ignore return code.
By default if return code of executed command is other than 0, then keyword fails.
"""
logger.info(
f"running: '{command}' {'in shell' if shell else ''}", also_console=True
)
if not shell:
command = shlex.split(command) # type: ignore
result = subprocess.run(
command,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
shell=shell,
cwd=cwd,
)
logger.info(result.stdout.decode())
if result.stderr:
logger.error(result.stderr.decode())
if not ignore_rc:
result.check_returncode()
return result
@keyword
def roboops_save_file_artifact(self, source: Path, name: str = None) -> None:
"""
Moves given file to ``artifacts_dir`` with given name and add to ``Artifacts`` metadata in report for easy view/download
Arguments:
- ``source`` <Path>: Path to file
- ``name`` <str>: new name of the file. If not provided, original name will be used.
"""
source = Path(source)
name = name if name else source.name
destination = self.artifacts_dir / name
source.rename(destination)
self._add_artifact_to_suite_metadata(name, destination)
def _add_artifact_to_suite_metadata(self, name: str, file_path: Path) -> None:
entry = f"- [{file_path}|{name}]\n"
BuiltIn().set_suite_metadata("artifacts", entry, append=True, top=True)
| StarcoderdataPython |
338989 | <filename>lib/silfont/scripts/psfsyncmeta.py<gh_stars>1-10
#!/usr/bin/env python
from __future__ import unicode_literals
'''Sync metadata accross a family of fonts assuming standard UFO file naming'''
__url__ = 'http://github.com/silnrsi/pysilfont'
__copyright__ = 'Copyright (c) 2017 SIL International (http://www.sil.org)'
__license__ = 'Released under the MIT License (http://opensource.org/licenses/MIT)'
__author__ = '<NAME>'
from silfont.core import execute
from datetime import datetime
import silfont.ufo as UFO
import os
from xml.etree import cElementTree as ET
argspec = [
('ifont',{'help': 'Input font file'}, {'type': 'infont'}),
('-l','--log',{'help': 'Log file'}, {'type': 'outfile', 'def': '_sync.log'}),
('-s','--single', {'help': 'Sync single UFO against master', 'action': 'store_true', 'default': False},{}),
('-m','--master', {'help': 'Master UFO to sync single UFO against', 'nargs': '?' },{'type': 'infont', 'def': None}),
('-r','--reportonly', {'help': 'Report issues but no updating', 'action': 'store_true', 'default': False},{}),
('-n','--new', {'help': 'append "_new" to file/ufo names', 'action': 'store_true', 'default': False},{}),
('--normalize', {'help': 'output all the fonts to normalize them', 'action': 'store_true', 'default': False},{}),
]
def doit(args) :
standardstyles = ["Regular", "Italic", "Bold", "BoldItalic"]
finfoignore = ["openTypeHeadCreated", "openTypeOS2Panose", "postscriptBlueScale", "postscriptBlueShift",
"postscriptBlueValues", "postscriptOtherBlues", "postscriptStemSnapH", "postscriptStemSnapV", "postscriptForceBold"]
libfields = ["public.postscriptNames", "public.glyphOrder", "com.schriftgestaltung.glyphOrder"]
font = args.ifont
logger = args.logger
singlefont = args.single
mfont = args.master
newfile = "_new" if args.new else ""
reportonly = args.reportonly
updatemessage = " to be updated: " if reportonly else " updated: "
params = args.paramsobj
precision = font.paramset["precision"]
# Increase screen logging level to W unless specific level supplied on command-line
if not(args.quiet or "scrlevel" in params.sets["command line"]) : logger.scrlevel = "W"
# Process UFO name
(path,base) = os.path.split(font.ufodir)
(base,ext) = os.path.splitext(base)
if '-' not in base : logger.log("Non-standard UFO name - must be <family>-<style>", "S")
(family,style) = base.split('-')
styles = [style]
fonts = {}
fonts[style] = font
# Process single and master settings
if singlefont :
if mfont :
mastertext = "Master" # Used in log messages
else : # Check against Regular font from same family
mfont = openfont(params, path, family, "Regular")
if mfont is None : logger.log("No regular font to check against - use -m to specify master font", "S")
mastertext = "Regular"
fonts["Regular"] =mfont
else : # Supplied font must be Regular
if mfont : logger.log("-m --master must only be used with -s --single", "S")
if style != "Regular" : logger.log("Must specify a Regular font unless -s is used", "S")
mastertext = "Regular"
mfont = font
# Check for required fields in master font
mfinfo = mfont.fontinfo
if "familyName" in mfinfo :
spacedfamily = mfinfo["familyName"][1].text
else:
logger.log("No familyName field in " + mastertext, "S")
if "openTypeNameManufacturer" in mfinfo :
manufacturer = mfinfo["openTypeNameManufacturer"][1].text
else:
logger.log("No openTypeNameManufacturer field in " + mastertext, "S")
mlib = mfont.lib
# Open the remaining fonts in the family
if not singlefont :
for style in standardstyles :
if not style in fonts :
fonts[style] = openfont(params, path, family, style) # Will return None if font does not exist
if fonts[style] is not None : styles.append(style)
# Process fonts
psuniqueidlist = []
fieldscopied = False
for style in styles :
font = fonts[style]
if font.UFOversion != "2" : logger.log("This script only works with UFO 2 format fonts","S")
fontname = family + "-" + style
spacedstyle = "Bold Italic" if style == "BoldItalic" else style
spacedname = spacedfamily + " " + spacedstyle
logger.log("************ Processing " + fontname, "P")
ital = True if "Italic" in style else False
bold = True if "Bold" in style else False
# Process fontinfo.plist
finfo=font.fontinfo
fieldlist = list(set(finfo) | set(mfinfo)) # Need all fields from both to detect missing fields
fchanged = False
for field in fieldlist:
action = None; issue = ""; newval = ""
if field in finfo :
elem = finfo[field][1]
tag = elem.tag
text = elem.text
if text is None : text = ""
if tag == "real" : text = processnum(text,precision)
# Field-specific actions
if field not in finfo :
if field not in finfoignore : action = "Copyfield"
elif field == "italicAngle" :
if ital and text == "0" :
issue = "is zero"
action = "Warn"
if not ital and text != "0" :
issue = "is non-zero"
newval = 0
action = "Update"
elif field == "openTypeNameUniqueID" :
newval = manufacturer + ": " + spacedname + ": " + datetime.now().strftime("%Y")
if text != newval :
issue = "Incorrect value"
action = "Update"
elif field == "openTypeOS2WeightClass" :
if bold and text != "700" :
issue = "is not 700"
newval = 700
action = "Update"
if not bold and text != "400" :
issue = "is not 400"
newval = 400
action = "Update"
elif field == "postscriptFontName" :
if text != fontname :
newval = fontname
issue = "Incorrect value"
action = "Update"
elif field == "postscriptFullName" :
if text != spacedname :
newval = spacedname
issue = "Incorrect value"
action = "Update"
elif field == "postscriptUniqueID" :
if text in psuniqueidlist :
issue = "has same value as another font: " + text
action = "Warn"
else :
psuniqueidlist.append(text)
elif field == "postscriptWeightName" :
newval = 'bold' if bold else 'regular'
if text != newval :
issue = "Incorrect value"
action = 'Update'
elif field == "styleMapStyleName" :
if text != spacedstyle.lower() :
newval = spacedstyle.lower()
issue = "Incorrect value"
action = "Update"
elif field in ("styleName", "openTypeNamePreferredSubfamilyName") :
if text != spacedstyle :
newval = spacedstyle
issue = "Incorrect value"
action = "Update"
elif field in finfoignore :
action = "Ignore"
# Warn for fields in this font but not master
elif field not in mfinfo :
issue = "is in " + spacedstyle + " but not in " + mastertext
action = "Warn"
# for all other fields, sync values from master
else :
melem = mfinfo[field][1]
mtag = melem.tag
mtext = melem.text
if mtext is None : mtext = ""
if mtag is 'real' : mtext = processnum(mtext,precision)
if tag in ("real", "integer", "string") :
if mtext != text :
issue = "does not match " + mastertext + " value"
newval = mtext
action = "Update"
elif tag in ("true, false") :
if tag != mtag :
issue = "does not match " + mastertext + " value"
action = "FlipBoolean"
elif tag == "array" : # Assume simple array with just values to compare
marray = mfinfo.getval(field)
array = finfo.getval(field)
if array != marray: action = "CopyArray"
else : logger.log("Non-standard fontinfo field type in " + fontname, "X")
# Now process the actions, create log messages etc
if action is None or action == "Ignore" :
pass
elif action == "Warn" :
logger.log(field + " needs manual correction: " + issue, "W")
elif action == "Error" :
logger.log(field + " needs manual correction: " + issue, "E")
elif action in ("Update", "FlipBoolean", "Copyfield", "CopyArray") : # Updating actions
fchanged = True
message = field + updatemessage
if action == "Update" :
message = message + issue + " Old: '" + text + "' New: '" + str(newval) + "'"
elem.text = newval
elif action == "FlipBoolean" :
newval = "true" if tag == "false" else "false"
message = message + issue + " Old: '" + tag + "' New: '" + newval + "'"
finfo.setelem(field, ET.fromstring("<" + newval + "/>"))
elif action == "Copyfield" :
message = message + "is missing so will be copied from " + mastertext
fieldscopied = True
finfo.addelem(field, ET.fromstring(ET.tostring(mfinfo[field][1])))
elif action == "CopyArray" :
message = message + "Some values different Old: " + str(array) + " New: " + str(marray)
finfo.setelem(field, ET.fromstring(ET.tostring(melem)))
logger.log(message, "W")
else:
logger.log("Uncoded action: " + action + " - oops", "X")
# Process lib.plist - currently just public.postscriptNames and glyph order fields which are all simple dicts or arrays
lib = font.lib
lchanged = False
for field in libfields:
# Check the values
action = None; issue = ""; newval = ""
if field in mlib:
if field in lib:
if lib.getval(field) != mlib.getval(field): # will only work for arrays or dicts with simple values
action = "Updatefield"
else:
action = "Copyfield"
else:
action = "Error" if field == ("public.GlyphOrder", "public.postscriptNames") else "Warn"
issue = field + " not in " + mastertext + " lib.plist"
# Process the actions, create log messages etc
if action is None or action == "Ignore":
pass
elif action == "Warn":
logger.log(field + " needs manual correction: " + issue, "W")
elif action == "Error":
logger.log(field + " needs manual correction: " + issue, "E")
elif action in ("Updatefield", "Copyfield"): # Updating actions
lchanged = True
message = field + updatemessage
if action == "Copyfield":
message = message + "is missing so will be copied from " + mastertext
lib.addelem(field, ET.fromstring(ET.tostring(mlib[field][1])))
elif action == "Updatefield":
message = message + "Some values different"
lib.setelem(field, ET.fromstring(ET.tostring(mlib[field][1])))
logger.log(message, "W")
else:
logger.log("Uncoded action: " + action + " - oops", "X")
# Now update on disk
if not reportonly:
if args.normalize:
font.write(os.path.join(path, family + "-" + style + newfile + ".ufo"))
else: # Just update fontinfo and lib
if fchanged:
filen = "fontinfo" + newfile + ".plist"
logger.log("Writing updated fontinfo to " + filen, "P")
exists = True if os.path.isfile(os.path.join(font.ufodir, filen)) else False
UFO.writeXMLobject(finfo, font.outparams, font.ufodir, filen, exists, fobject=True)
if lchanged:
filen = "lib" + newfile + ".plist"
logger.log("Writing updated lib.plist to " + filen, "P")
exists = True if os.path.isfile(os.path.join(font.ufodir, filen)) else False
UFO.writeXMLobject(lib, font.outparams, font.ufodir, filen, exists, fobject=True)
if fieldscopied :
message = "After updating, UFOsyncMeta will need to be re-run to validate these fields" if reportonly else "Re-run UFOsyncMeta to validate these fields"
logger.log("*** Some fields were missing and so copied from " + mastertext + ". " + message, "P")
return
def openfont(params, path, family, style) : # Only try if directory esists
ufodir = os.path.join(path,family+"-"+style+".ufo")
font = UFO.Ufont(ufodir, params=params) if os.path.isdir(ufodir) else None
return font
def processnum(text, precision) : # Apply same processing to numbers that normalization will
if precision is not None:
val = round(float(text), precision)
if val == int(val) : val = int(val) # Removed trailing decimal .0
text = str(val)
return text
def cmd() : execute("UFO",doit, argspec)
if __name__ == "__main__": cmd()
| StarcoderdataPython |
5100338 | <filename>libraries/AceTime/tools/tzcompiler.py
#!/usr/bin/env python3
#
# Copyright 2018 <NAME>
#
# MIT License.
"""
Main driver for TZ Database compiler which parses the TZ Database files, and
generates zoneinfo files and validation datasets for unit tests.
"""
import argparse
import logging
import sys
from extractor import Extractor
from transformer import Transformer
from argenerator import ArduinoGenerator
from pygenerator import PythonGenerator
from ingenerator import InlineGenerator
from zonelistgenerator import ZoneListGenerator
from validator import Validator
from bufestimator import BufSizeEstimator
from tdgenerator import TestDataGenerator
from arvalgenerator import ArduinoValidationGenerator
from pyvalgenerator import PythonValidationGenerator
def main():
"""Read the test data chunks from the STDIN and print them out. The ability
to run this from the command line is intended mostly for testing purposes.
Usage:
tzcompiler.py [flags...]
"""
# Configure command line flags.
parser = argparse.ArgumentParser(description='Generate Zone Info.')
# Extractor
parser.add_argument(
'--input_dir', help='Location of the input directory', required=True)
# Transformer
parser.add_argument(
'--start_year',
help='Start year of Zone Eras (default: 2000)',
type=int,
default=2000)
parser.add_argument(
'--until_year',
help='Until year of Zone Eras (default: 2038)',
type=int,
default=2038)
parser.add_argument(
'--granularity',
help='Truncate UNTIL, AT, SAVE and RULES fields to ' +
'this many seconds (default: 60)',
type=int)
parser.add_argument(
'--until_at_granularity',
help='Truncate UNTIL and AT fields to this many seconds ' +
'(default: --granularity)',
type=int)
parser.add_argument(
'--offset_granularity',
help='Truncate SAVE, RULES (offset) fields to this many seconds' +
'(default: --granularity)',
type=int)
parser.add_argument(
'--strict',
help='Remove zones and rules not aligned at granularity time boundary',
action='store_true',
default=False)
# Flags for the TestDataGenerator. If not given (default 0), then
# the validation_start_year will be set to start_year, and the
# validation_until_year will be set to until_year.
#
# pytz cannot handle dates after the end of 32-bit Unix time_t type
# (2038-01-19T03:14:07Z), see
# https://answers.launchpad.net/pytz/+question/262216, so the
# validation_until_year cannot be greater than 2038.
parser.add_argument(
'--validation_start_year',
help='Start year of ZoneSpecifier validation (default: start_year)',
type=int,
default=0)
parser.add_argument(
'--validation_until_year',
help='Until year of ZoneSpecifier validation (default: 2038)',
type=int,
default=0)
# Data pipeline selectors:
#
# zonedb: generate zonedb files
# unittest: generate unit test validation_data.* files
# zonelist: generate zones.txt, list of relavant zones
# validate: validate both buffer size and validation data
# validate_buffer_size: determine max sizes of internal buffers
# validate_test_data: compare pytz and zone_specifierusing validation data
parser.add_argument(
'--action',
help='Data pipeline (zonedb|unittest|validate|validate_buffer_size|'
+ 'validate_test_data)',
required=True)
# Language selector (for --action unittest or zonedb)
# python: generate Python files
# arduino: generate C++ files for Arduino
parser.add_argument(
'--language',
help='Target language (arduino|python)')
# Scope (of the zones in the database):
# basic: 241 of the simpler time zones for BasicZoneSpecifier
# extended: all 348 time zones for ExtendedZoneSpecifier
parser.add_argument(
'--scope',
help='Size of the generated database (basic|extended)',
required=True)
# C++ namespace names for language=arduino. If not specified, it will
# automatically be set to 'zonedb' or 'zonedbx' depending on the 'scope'.
parser.add_argument(
'--db_namespace',
help='C++ namespace for the zonedb files (default: zonedb or zonedbx)')
# Enable zone_strings.{h,cpp} if requested
parser.add_argument(
'--generate_zone_strings',
help='Generate Arduino zone_strings.{h,cpp} files',
action='store_true')
# Options for file generators
parser.add_argument(
'--tz_version', help='Version string of the TZ files', required=True)
parser.add_argument(
'--output_dir', help='Location of the output directory')
# Validator
parser.add_argument('--zone', help='Name of time zone to validate')
parser.add_argument('--year', help='Year to validate', type=int)
parser.add_argument(
'--viewing_months',
help='Number of months to use for calculations (13, 14, 36)',
type=int,
default=14)
parser.add_argument(
'--validate_dst_offset',
help='Validate the DST offset as well as the total UTC offset',
action="store_true")
parser.add_argument(
'--debug_validator',
help='Enable debug output from Validator',
action="store_true")
parser.add_argument(
'--debug_specifier',
help='Enable debug output from ZoneSpecifier',
action="store_true")
parser.add_argument(
'--in_place_transitions',
help='Use in-place Transition array to determine Active Transitions',
action="store_true")
parser.add_argument(
'--optimize_candidates',
help='Optimize the candidate transitions',
action='store_true')
# Parse the command line arguments
args = parser.parse_args()
# Configure logging. This should normally be executed after the
# parser.parse_args() because it allows us set the logging.level using a
# flag.
logging.basicConfig(level=logging.INFO)
# Set the defaults for validation_start_year and validation_until_year
# if they were not specified.
validation_start_year = args.start_year if args.validation_start_year == 0 \
else args.validation_start_year
validation_until_year = args.until_year if args.validation_until_year == 0 \
else args.validation_until_year
# How the script was invoked
invocation = ' '.join(sys.argv)
# Determine zonedb namespace
if args.db_namespace:
db_namespace = args.db_namespace
else:
db_namespace = ''
if args.scope == 'basic': db_namespace = 'zonedb'
if args.scope == 'extended': db_namespace = 'zonedbx'
# Define scope-dependent granularity if not overridden by flag
if args.granularity:
until_at_granularity = args.granularity
offset_granularity = args.granularity
else:
if args.until_at_granularity:
until_at_granularity = args.until_at_granularity
else:
until_at_granularity = 60
if args.offset_granularity:
offset_granularity = args.offset_granularity
else:
if args.scope == 'basic':
offset_granularity = 900
else:
offset_granularity = 60
logging.info('Using UNTIL/AT granularity: %d', until_at_granularity)
logging.info('Using RULES/SAVE (offset) granularity: %d',
offset_granularity)
# Extract the TZ files
logging.info('======== Extracting TZ Data files')
extractor = Extractor(args.input_dir)
extractor.parse()
extractor.print_summary()
# Transform the TZ zones and rules
logging.info('======== Transforming Zones and Rules')
logging.info('Extracting years [%d, %d)', args.start_year, args.until_year)
transformer = Transformer(extractor.zones_map, extractor.rules_map,
extractor.links_map, args.language, args.scope, args.start_year,
args.until_year, until_at_granularity, offset_granularity, args.strict)
transformer.transform()
transformer.print_summary()
# Generate internal versions of zone_infos and zone_policies
# so that ZoneSpecifier can be created.
logging.info('======== Generating inlined zone_infos and zone_policies')
inline_generator = InlineGenerator(
transformer.zones_map, transformer.rules_map)
(zone_infos, zone_policies) = inline_generator.generate_maps()
logging.info('zone_infos=%d; zone_policies=%d', len(zone_infos),
len(zone_policies))
# Generate the buf_size estimates for each zone, between start_year and
# until_year.
logging.info('======== Estimating transition buffer sizes')
logging.info('Checking years in [%d, %d)', args.start_year, args.until_year)
estimator = BufSizeEstimator(zone_infos, zone_policies, args.start_year,
args.until_year)
(buf_sizes, max_size) = estimator.estimate()
logging.info('Num zones=%d; Max buffer size=%d', len(buf_sizes), max_size)
# Validate the zone_infos and zone_policies if requested
validate_buffer_size = False
validate_test_data = False
if args.action == 'validate':
validate_buffer_size = True
validate_test_data = True
if args.action == 'validate_buffer_size':
validate_buffer_size = True
if args.action == 'validate_test_data':
validate_test_data = True
if args.action == 'zonedb':
# Create the Python or Arduino files if requested
if not args.output_dir:
logging.error('Must provide --output_dir to generate zonedb files')
sys.exit(1)
if args.language == 'python':
logging.info('======== Creating Python zonedb files')
generator = PythonGenerator(invocation, args.tz_version,
Extractor.ZONE_FILES,
transformer.zones_map,
transformer.rules_map,
transformer.all_removed_zones,
transformer.all_removed_policies,
transformer.all_notable_zones,
transformer.all_notable_policies)
generator.generate_files(args.output_dir)
elif args.language == 'arduino':
logging.info('======== Creating Arduino zonedb files')
generator = ArduinoGenerator(
invocation=invocation,
tz_version=args.tz_version,
tz_files=Extractor.ZONE_FILES,
scope=args.scope,
db_namespace=db_namespace,
generate_zone_strings=args.generate_zone_strings,
start_year=args.start_year,
until_year=args.until_year,
zones_map=transformer.zones_map,
links_map=transformer.links_map,
rules_map=transformer.rules_map,
removed_zones=transformer.all_removed_zones,
removed_links=transformer.all_removed_links,
removed_policies=transformer.all_removed_policies,
notable_zones=transformer.all_notable_zones,
notable_links=transformer.all_notable_links,
notable_policies=transformer.all_notable_policies,
format_strings=transformer.format_strings,
zone_strings=transformer.zone_strings,
buf_sizes=buf_sizes)
generator.generate_files(args.output_dir)
else:
raise Exception("Unrecognized language '%s'" % args.language)
elif args.action == 'zonelist':
generator = ZoneListGenerator(
invocation=invocation,
tz_version=args.tz_version,
tz_files=Extractor.ZONE_FILES,
scope=args.scope,
zones_map=transformer.zones_map)
generator.generate_files(args.output_dir)
elif args.action == 'unittest':
logging.info('======== Generating unit test files')
# Generate test data for unit test.
logging.info('Generating test data for years in [%d, %d)',
validation_start_year, validation_until_year)
data_generator = TestDataGenerator(args.scope, zone_infos,
zone_policies, validation_start_year, validation_until_year)
(test_data, num_items) = data_generator.create_test_data()
logging.info('Num zones=%d; Num test items=%d', len(test_data),
num_items)
# Generate validation data files
logging.info('Generating test validation files')
if args.language == 'arduino':
arval_generator = ArduinoValidationGenerator(
invocation, args.tz_version, db_namespace, test_data,
num_items, args.scope)
arval_generator.generate_files(args.output_dir)
elif args.language == 'python':
pyval_generator = PythonValidationGenerator(
invocation, args.tz_version, test_data, num_items)
pyval_generator.generate_files(args.output_dir)
else:
raise Exception("Unrecognized language '%s'" % args.language)
elif validate_buffer_size or validate_test_data:
validator = Validator(
zone_infos=zone_infos,
zone_policies=zone_policies,
viewing_months=args.viewing_months,
validate_dst_offset=args.validate_dst_offset,
debug_validator=args.debug_validator,
debug_specifier=args.debug_specifier,
zone_name=args.zone,
year=args.year,
start_year=validation_start_year,
until_year=validation_until_year,
in_place_transitions=args.in_place_transitions,
optimize_candidates=args.optimize_candidates)
if validate_buffer_size:
logging.info('======== Validating transition buffer sizes')
validator.validate_buffer_size()
if validate_test_data:
logging.info('======== Validating test data')
validator.validate_test_data()
else:
logging.error(
'One of (--zonedb, --validate, --unittest) must be given')
sys.exit(1)
logging.info('======== Finished processing TZ Data files.')
if __name__ == '__main__':
main()
| StarcoderdataPython |
3553634 | #!/usr/bin/env python2
# -*- coding: utf-8 -*-
"""
ResNet50 models base on Keras.
This procedure is suitable for the situation that you need to quickly train
a ResNet50 network as a classification.
The program will do fine turning on the weights pretrained by imageNet.
If you want to start training from scratch on the specified data set,
please use resNet50.py
# Reference paper
- [Deep Residual Learning for Image Recognition] (https://arxiv.org/abs/1512.03385)
# weights download address for classic CNN: 'https://github.com/fchollet/deep-learning-models/releases/'
@author: zyb_as
"""
import os
import sys
import keras
from keras.applications.resnet50 import ResNet50
from keras.models import Model
from keras.models import load_model
from keras.layers import Dense
from keras.layers import Flatten
from keras.optimizers import SGD
from keras.optimizers import Adam
from keras.preprocessing.image import ImageDataGenerator
from keras.callbacks import TensorBoard
from keras.callbacks import ModelCheckpoint
from keras.callbacks import ReduceLROnPlateau
from keras.callbacks import Callback
#-----------------------------------------------------------------------------------------
#--------------------------------parameter defination-------------------------------------
#-----------------------------------------------------------------------------------------
"""
# Before applying this demo to your new classification problem
# please check the settings of the following parameters.
# Besides, don't forget to check the hyperparameters
# weight_load_path load the weights pretrained on imageNet
if the specified file doesn't exist, the program will start download weights from Internet
# weight_save_path the path to save the weights after training
# train_set_path the root path of the training set. Each category should correspond to a folder
# valid_set_path the root path of the validation set. Each category should correspond to a folder
# record_save_path the path to save the training record file
# category_num the category num of the classification problem
"""
# TODO: set basic configuration parameters
weight_load_path = './weights/resnet50_weights_tf_dim_ordering_tf_kernels_notop.h5'
weight_save_path = './weights/resnet50_weights_tf_transfor.h5'
train_set_path = 'train_root_path/'
valid_set_path = 'validation_root_path/'
record_save_path = './records'
category_num = 2
batch_size = 32
#-----------------------------------------------------------------------------------------
#--Set CallBack(loss history, early stopiing, model check point, reduce learning rate)----
#-----------------------------------------------------------------------------------------
class LossHistory(Callback):
def on_train_begin(self, logs={}):
self.losses = []
self.val_losses = []
self.acc = []
self.val_acc = []
def on_epoch_end(self, batch, logs={}):
self.losses.append(logs.get('loss'))
self.val_losses.append(logs.get('val_loss'))
self.acc.append(logs.get('acc'))
self.val_acc.append(logs.get('val_acc'))
# record loss history callback
history = LossHistory()
# Callback for early stopping the training
early_stopping = keras.callbacks.EarlyStopping(monitor='val_acc', min_delta=0, patience=15, verbose=1, mode='auto')
# set model checkpoint callback (model weights will auto save in weight_save_path)
checkpoint = ModelCheckpoint(weight_save_path, monitor='val_acc', verbose=1, save_best_only=True, mode='max', period=1)
# monitor a learning indicator(reduce learning rate when learning effect is stagnant)
reduceLRcallback = ReduceLROnPlateau(monitor='val_acc', factor=0.7, patience=5,
verbose=1, mode='auto', cooldown=0, min_lr=0)
#-----------------------------------------------------------------------------------------
#---------------------------image data generator------------------------------------------
#-----------------------------------------------------------------------------------------
# TODO: try the data augmentation method you want
train_datagen = ImageDataGenerator(rescale=1/255.,
rotation_range = 45,
width_shift_range = 0.2, # degree of horizontal offset(a ratio relative to image width)
height_shift_range = 0.2, # degree of vertical offset(a ratio relatice to image height)
shear_range = 0.2, # the range of shear transformation(a ratio in 0 ~ 1)
zoom_range = 0.25, # degree of random zoom(the zoom range will be [1 - zoom_range, 1 + zoom_range])
horizontal_flip = True, # whether to perform horizontal flip
vertical_flip = True, # whether to perform vertical flip
fill_mode = 'nearest' # mode list: nearest, constant, reflect, wrap
)
val_datagen = ImageDataGenerator(rescale=1/255.)
train_generator = train_datagen.flow_from_directory(
train_set_path,
target_size=(224, 224),
batch_size=batch_size,
class_mode='categorical')
validation_generator = val_datagen.flow_from_directory(
valid_set_path,
target_size=(224, 224),
batch_size=batch_size,
class_mode='categorical')
#-----------------------------------------------------------------------------------------
#--------------------------------continue training----------------------------------------
#-----------------------------------------------------------------------------------------
# load specified weights to continue training(if exist)
if os.path.exists(weight_save_path):
model = load_model(weight_save_path)
# TODO: choose training parameters
epochs = 200
# TODO: choose a optimizer
optimizer = Adam(lr=0.00001, beta_1=0.9, beta_2=0.999, epsilon=1e-08)
#lrate = 0.01
#decay = lrate/epochs
#optimizer = SGD(lr=lrate, momentum=0.9, decay=decay, nesterov=False)
# compile the model
model.compile(optimizer = optimizer,
loss='categorical_crossentropy',
metrics=['accuracy'])
# number of traning set
train_sample_count = len(train_generator.filenames)
# number of validation set
val_sample_count = len(validation_generator.filenames)
print(train_sample_count, val_sample_count)
# start training(the best model will be automatically save)
model.fit_generator(
train_generator,
steps_per_epoch= int(train_sample_count/batch_size) + 1, # steps_per_epoch defines how many batch in one epoch
epochs=epochs,
validation_data=validation_generator,
validation_steps= int(val_sample_count/batch_size) + 1,
callbacks=[TensorBoard(log_dir=record_save_path), early_stopping, history, checkpoint, reduceLRcallback]
)
sys.exit()
#-----------------------------------------------------------------------------------------
#-------------------------------------build model-----------------------------------------
#-----------------------------------------------------------------------------------------
def add_new_last_layer(base_model, nb_classes):
x = base_model.output
x = Flatten()(x)
predictions = Dense(nb_classes, activation='softmax')(x)
model = Model(inputs = base_model.input, outputs = predictions)
return model
# load preTrained ResNet50 model without top fully connection layer
if os.path.exists(weight_load_path) == False:
weight_load_path = 'imagenet'
baseModel = ResNet50(weights = weight_load_path, include_top=False, pooling=None, input_shape=(224, 224, 3))
# add new layer
model = add_new_last_layer(baseModel, category_num)
# check model
model.summary()
#-----------------------------------------------------------------------------------------
#---------------------------------transfer learning---------------------------------------
#-----------------------------------------------------------------------------------------
# TODO: choose training parameters
epoch_num = 200
# TODO: choose a optimizer
optimizer = Adam(lr=0.00001, beta_1=0.9, beta_2=0.999, epsilon=1e-08)
#lrate = 0.01
#decay = lrate/epochs
#optimizer = SGD(lr=lrate, momentum=0.9, decay=decay, nesterov=False)
# compile the model (should be done after recover layers to trainable)
model.compile(optimizer = optimizer, loss = 'categorical_crossentropy', metrics = ['accuracy'])
# number of training set
train_sample_count = len(train_generator.filenames)
# number of validation set
val_sample_count = len(validation_generator.filenames)
print(train_sample_count, val_sample_count)
fitted_model = model.fit_generator(
train_generator,
steps_per_epoch = int(train_sample_count/batch_size),
epochs = epoch_num,
validation_data = validation_generator,
validation_steps = int(val_sample_count/batch_size),
callbacks = [TensorBoard(log_dir=record_save_path), early_stopping, history, checkpoint, reduceLRcallback]
)
| StarcoderdataPython |
3225074 | <filename>derivations/scripts/sympy_addons.py
from sympy.core.symbol import Symbol
from sympy.tensor.indexed import Idx, IndexedBase, Indexed
from sympy.concrete import Product
from sympy.core.compatibility import is_sequence
from sympy.core.singleton import S
from sympy.core.add import Add
from sympy.core.function import Derivative
from sympy.functions.special.tensor_functions import KroneckerDelta
from sympy.simplify.simplify import simplify
from sympy.concrete import Sum, Product
from sympy.core.containers import Tuple
base_str_total = r'\frac{{\text{{d}} {} }}{{\text{{d}} {} }}'
base_str_partial = r'\frac{{\partial {} }}{{\partial {} }}'
class ImplicitSymbol(Symbol):
def __new__(cls, name, args, **assumptions):
obj = Symbol.__new__(cls, name, **assumptions)
obj.functional_form = args
obj.base_str = base_str_total if len(obj._get_iter_func()) == 1\
else base_str_partial
return obj
def _get_iter_func(self):
funcof = self.functional_form
if not funcof:
return []
if not hasattr(self.functional_form, '__iter__'):
funcof = [self.functional_form]
return funcof
def _eval_subs(self, old, new):
if old == self:
return new
funcof = self._get_iter_func()
for a in funcof:
if a.has(old):
new_func = [x if x != a else a.subs(old, new)
for x in funcof]
return self.__class__(str(self), new_func)
return self
@property
def free_symbols(self):
return set([self]).union(*[
x.free_symbols for x in self._get_iter_func()])
def _eval_diff(self, wrt, **kw_args):
return self._eval_derivative(wrt)
def _get_df(self, a, wrt):
return ImplicitSymbol(self.base_str.format(
str(self.name), str(a)), args=self.functional_form)
def _eval_derivative(self, wrt):
if self == wrt:
return S.One
else:
funcof = self._get_iter_func()
i = 0
l = []
for a in funcof:
i += 1
da = a.diff(wrt)
if da is S.Zero:
continue
df = self._get_df(a, wrt)
l.append(df * da)
return Add(*l)
class MyIndexed(Indexed):
@property
def free_symbols(self):
return set([self])
class MyIndexedBase(IndexedBase):
@property
def free_symbols(self):
return set([self])
def __getitem__(self, indices, **kw_args):
if is_sequence(indices):
# Special case needed because M[*my_tuple] is a syntax error.
if self.shape and len(self.shape) != len(indices):
raise IndexException("Rank mismatch.")
return MyIndexed(self,
*indices, **kw_args)
else:
if self.shape and len(self.shape) != 1:
raise IndexException("Rank mismatch.")
return MyIndexed(self,
indices, **kw_args)
class IndexedFunc(MyIndexedBase):
def __new__(cls, label, args, shape=None, **kw_args):
obj = IndexedBase.__new__(cls, label, shape=shape, **kw_args)
obj.functional_form = args
return obj
def _eval_simplify(self, ratio=1.7, measure=None):
return self.__class__(self.label,
*[simplify(x, ratio=ratio, measure=measure)
for x in self._get_iter_func()])
def _get_subclass(self, *args):
return IndexedFunc.IndexedFuncValue(*args)
class IndexedFuncValue(MyIndexed):
def __new__(cls, base, *args):
functional_form = args[0]
obj = Indexed.__new__(cls, base, *args)
obj.functional_form = functional_form
obj.base_str = base_str_total if len(
obj._get_iter_func()) == 1 else base_str_partial
return obj
@property
def indices(self):
return self.args[2:]
def _eval_simplify(self, ratio=1.7, measure=None):
return self.__class__(
self.base,
*[simplify(x, ratio=ratio, measure=measure)
for x in self._get_iter_func()])
def _eval_subs(self, old, new):
if self == old:
return new
if any(x.has(old) for x in self._get_iter_func()):
return self.__class__(self.base,
tuple(x.subs(old, new)
for x in self._get_iter_func()),
*self.indices)
elif any(x.has(old) for x in self.indices):
return self.__class__(self.base,
self.functional_form,
*tuple(x.subs(old, new) for x in self.indices))
return self
def _get_iter_func(self):
funcof = self.functional_form
if not hasattr(self.functional_form, '__iter__'):
funcof = [self.functional_form]
return funcof
def _get_df(self, a, wrt):
return self.base.__class__(self.base_str.format(
str(self.base), str(a)), args=self.functional_form)[self.indices]
def _eval_diff(self, wrt, **kw_args):
return self._eval_derivative(wrt)
def _eval_derivative(self, wrt):
if self == wrt:
return S.One
elif isinstance(wrt, IndexedFunc.IndexedFuncValue) and wrt.base == self.base:
if len(self.indices) != len(wrt.indices):
msg = "Different # of indices: d({!s})/d({!s})".format(self,
wrt)
raise IndexException(msg)
elif self.functional_form != wrt.functional_form:
msg = "Different function form d({!s})/d({!s})".format(self.functional_form,
wrt.functional_form)
raise IndexException(msg)
result = S.One
for index1, index2 in zip(self.indices, wrt.indices):
result *= KroneckerDelta(index1, index2)
return result
else:
# f(x).diff(s) -> x.diff(s) * f.fdiff(1)(s)
i = 0
l = []
funcof = self._get_iter_func()
for a in funcof:
i += 1
da = a.diff(wrt)
if da is S.Zero:
continue
df = self._get_df(a, wrt)
l.append(df * da)
return Add(*l)
@property
def free_symbols(self):
return super(IndexedFunc.IndexedFuncValue, self).free_symbols.union(*[
set([x]) if not isinstance(x, IndexedFunc.IndexedFuncValue) else
x.free_symbols for x in self._get_iter_func()]).union(
[self])
def __getitem__(self, indices, **kw_args):
if is_sequence(indices):
# Special case needed because M[*my_tuple] is a syntax error.
if self.shape and len(self.shape) != len(indices):
raise IndexException("Rank mismatch.")
return self._get_subclass(self,
self.functional_form,
*indices, **kw_args)
else:
if self.shape and len(self.shape) != 1:
raise IndexException("Rank mismatch.")
return self._get_subclass(self,
self.functional_form,
indices, **kw_args)
| StarcoderdataPython |
11277524 | import os
from os.path import join, dirname
from dotenv import load_dotenv
import glob
from source.replay_reader import Replay_reader
from source.stats_service import Stats_service
#setup
dotenv_path = join(dirname(__file__), '.env')
load_dotenv(dotenv_path)
xml_path = os.environ.get('xml_path')
csv_directory = os.environ.get('csv_directory')
coach_name = os.environ.get('coach_name')
rr = Replay_reader()
matches = rr.get_matches_from_coach_xml(xml_path, coach_name)
#for m in matches:
# print(m.race, m.opponent_race, m.td_plus, m.td_minus, m.cas_plus, m.cas_minus)
rr.panda_magic(matches)
| StarcoderdataPython |
6668272 | #!/usr/bin/python
import getopt
import math
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
from scipy.stats import pearsonr, spearmanr
import sys
# Directories
result_dir = "results/"
plot_dir = "plots/"
# Expected metric column names in result CSV file:
col_human = "Human"
col_coh = "Coherence"
col_cohep = "Coherence+eps"
col_pmi = "PMI"
col_npmi = "NPMI"
col_cos = "Cosine"
col_kl = "KL"
col_euc = "Euclidean"
col_jac = "Jaccard"
col_vari = "Variability"
col_pvari = "Post Variability"
metrics = [col_coh, col_cohep, col_pmi, col_npmi, col_cos, col_kl, col_euc, col_jac, col_vari, col_pvari]
# Compute metric averages
def compute_average(file_name):
results = pd.read_csv(result_dir + file_name)
print(col_human, "average:", np.mean(results[col_human]))
for metric in metrics:
print(metric, "average:", round(np.mean(results[metric]),2))
# Compute correlation between metrics and human scores
def compute_correlation(file_name):
results = pd.read_csv(result_dir + file_name)
for metric in metrics:
print(metric)
print("\tPearson corr.:", round(pearsonr(results[col_human], results[metric])[0],3))
print("\tSpearman's corr.:", round(spearmanr(results[[col_human, metric]])[0],3))
# Plot human scores against metrics
def plot_scores(file_name):
results = pd.read_csv(result_dir + file_name)
# Add jitter to human scores to avoid overlap
human = results[col_human].copy()
human += np.random.random(len(human)) * 0.25
# Specify metrics to plot and number of plots per row
plot_metrics = [cNPMI, cPVari]
plots_per_row = 2
point_size = 15
point_alpha = 0.5
fig, axs = plt.subplots(math.ceil(len(plot_metrics)/plots_per_row), plots_per_row, figsize=(4.5,2.5))
fig.set_tight_layout(True)
plot_num = 0
row = 0
for metric in plot_metrics:
if plot_num == plots_per_row:
plot_num = 0
row += 1
scores = results[metric]
score_range = max(scores) - min(scores)
if plots_per_row == 1:
axs[row].scatter(scores,human, s=point_size, alpha=point_alpha)
axs[row].set_xlim(min(scores)-0.1*score_range, max(scores)+0.1*score_range)
axs[row].set_xlabel(metric)
axs[row].set_ylabel("Human score")
elif len(plot_metrics) <= plots_per_row:
axs[plot_num].scatter(scores,human, s=point_size, alpha=point_alpha)
axs[plot_num].set_xlim(min(scores)-0.1*score_range, max(scores)+0.1*score_range)
axs[plot_num].set_xlabel(metric)
if plot_num > 0:
axs[plot_num].set_yticklabels([])
else:
axs[plot_num].set_ylabel("Human score")
axs[plot_num].set_title(r"$r$: " + str(round(pearsonr(results[col_human], results[metric])[0],3)) + r", $\rho$: " + str(round(spearmanr(results[[col_human, metric]])[0],3)))
else:
axs[row,plot_num].scatter(scores,human, s=point_size, alpha=point_alpha)
axs[row,plot_num].set_xlim(min(scores)-0.1*score_range, max(scores)+0.1*score_range)
axs[plot_num].set_xlabel(metric)
if plot_num > 0:
axs[row,plot_num].set_yticklabels([])
else:
axs[row,plot_num].set_ylabel("Human score")
plot_num += 1
plt.savefig(plot_dir+"plot_"+file_name+".png", dpi=300)
# Path not needed in name of result file
def main(argv):
try:
opts, args = getopt.getopt(argv, "h:f:",["file="])
except getopt.GetoptError:
print("py compute_results.py -f <resultfile>")
sys.exit(2)
for opt, arg in opts:
if opt == "-h":
print("py compute_results.py -f <resultfile>")
sys.exit()
elif opt == "-f":
f = arg
compute_average(f)
compute_correlation(f)
plot_scores(f)
if __name__ == "__main__":
main(sys.argv[1:]) | StarcoderdataPython |
6583746 | """
Templates for cheetah configuration. This should be used as little as possible:
ideally scripts should be stored separately and be independently testable.
For example, bash scripts can use environment variables for customization
instead of being templates.
"""
CAMPAIGN_ENV_TEMPLATE = """
export CODAR_CHEETAH_EXPERIMENT_DIR="{experiment_dir}"
export CODAR_CHEETAH_MACHINE_CONFIG="{machine_config}"
export CODAR_CHEETAH_APP_CONFIG="{app_config}"
export CODAR_WORKFLOW_SCRIPT="{workflow_script_path}"
export CODAR_WORKFLOW_RUNNER="{workflow_runner}"
export CODAR_CHEETAH_WORKFLOW_LOG_LEVEL="{workflow_debug_level}"
export CODAR_CHEETAH_UMASK="{umask}"
export CODAR_PYTHON="{codar_python}"
"""
GROUP_ENV_TEMPLATE = """
export CODAR_CHEETAH_GROUP_WALLTIME="{walltime}"
export CODAR_CHEETAH_GROUP_MAX_PROCS="{max_procs}"
export CODAR_CHEETAH_SCHEDULER_ACCOUNT="{account}"
# queue on PBS, partition on SLURM
export CODAR_CHEETAH_SCHEDULER_QUEUE="{queue}"
# SLURM specific options
export CODAR_CHEETAH_SCHEDULER_CONSTRAINT="{constraint}"
export CODAR_CHEETAH_SCHEDULER_LICENSE="{license}"
export CODAR_CHEETAH_SCHEDULER_RESERVATION="{reservation}"
# User-defined
export CODAR_CHEETAH_SCHEDULER_CUSTOM="{custom}"
export CODAR_CHEETAH_CAMPAIGN_NAME="{campaign_name}"
export CODAR_CHEETAH_GROUP_NAME="{group_name}"
export CODAR_CHEETAH_GROUP_NODES="{nodes}"
export CODAR_CHEETAH_GROUP_NODE_EXCLUSIVE="{node_exclusive}"
export CODAR_CHEETAH_GROUP_PROCESSES_PER_NODE="{processes_per_node}"
export CODAR_CHEETAH_MACHINE_NAME="{machine_name}"
"""
| StarcoderdataPython |
6693254 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright 1999-2020 Alibaba Group Holding Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from collections.abc import Iterable
import numpy as np
from ... import opcodes as OperandDef
from ...serialize import ValueType, KeyField, TupleField, StringField
from ...core import ExecutableTuple
from ..operands import TensorHasInput, TensorOperandMixin
from ..datasource import tensor as astensor
from ..array_utils import as_same_device, device
from ..core import TensorOrder
class TensorUnravelIndex(TensorHasInput, TensorOperandMixin):
_op_type_ = OperandDef.UNRAVEL_INDEX
_input = KeyField('input')
_dims = TupleField('dims', ValueType.int32)
_order = StringField('order')
def __init__(self, dims=None, dtype=None, order=None, **kw):
super().__init__(_dims=dims, _dtype=dtype, _order=order, **kw)
if self._order is None:
self._order = 'C'
@property
def dims(self):
return self._dims
@property
def order(self):
return self._order
@property
def output_limit(self):
return float('inf')
def _set_inputs(self, inputs):
super()._set_inputs(inputs)
self._input = self._inputs[0]
def __call__(self, indices):
order = TensorOrder.C_ORDER if self._order == 'C' else TensorOrder.F_ORDER
kws = [{'pos': i, 'order': order} for i in range(len(self._dims))]
return ExecutableTuple(self.new_tensors([indices], indices.shape, kws=kws, output_limit=len(kws)))
@classmethod
def tile(cls, op):
indices = op.inputs[0]
dims = op.dims
order = op.outputs[0].order
out_chunks = [list() for _ in range(len(dims))]
for in_chunk in indices.chunks:
chunk_op = op.copy().reset_key()
chunk_kws = [{'pos': i, 'index': in_chunk.index, 'order': order}
for i in range(len(dims))]
chunks = chunk_op.new_chunks([in_chunk], shape=in_chunk.shape, kws=chunk_kws,
output_limit=len(dims))
for out_chunk, c in zip(out_chunks, chunks):
out_chunk.append(c)
new_op = op.copy()
kws = [{'chunks': out_chunk, 'nsplits': indices.nsplits, 'shape': o.shape}
for out_chunk, o in zip(out_chunks, op.outputs)]
return new_op.new_tensors(op.inputs, kws=kws, output_limit=len(dims), order=order)
@classmethod
def execute(cls, ctx, op):
inputs, device_id, xp = as_same_device(
[ctx[c.key] for c in op.inputs], device=op.device, ret_extra=True)
indices = inputs[0]
with device(device_id):
outputs = xp.unravel_index(indices, op.dims, order=op.order)
for o, output in zip(op.outputs, outputs):
ctx[o.key] = output
def unravel_index(indices, dims, order='C'):
"""
Converts a flat index or tensor of flat indices into a tuple
of coordinate tensors.
Parameters
----------
indices : array_like
An integer tensor whose elements are indices into the flattened
version of a tensor of dimensions ``dims``.
dims : tuple of ints
The shape of the tensor to use for unraveling ``indices``.
order : {'C', 'F'}, optional
Determines whether the indices should be viewed as indexing in
row-major (C-style) or column-major (Fortran-style) order.
Returns
-------
unraveled_coords : tuple of Tensor
Each tensor in the tuple has the same shape as the ``indices``
tensor.
See Also
--------
ravel_multi_index
Examples
--------
>>> import mars.tensor as mt
>>> from mars.session import new_session
>>> sess = new_session().as_default()
>>> sess.run(mt.unravel_index([22, 41, 37], (7,6)))
(array([3, 6, 6]), array([4, 5, 1]))
>>> sess.run(mt.unravel_index(1621, (6,7,8,9)))
(3, 1, 4, 1)
"""
indices = astensor(indices)
if isinstance(dims, Iterable):
dims = tuple(dims)
else:
dims = (dims,)
if order not in 'CF':
raise TypeError("only 'C' or 'F' order is permitted")
op = TensorUnravelIndex(dims=dims, dtype=np.dtype(np.intp), order=order)
return op(indices)
| StarcoderdataPython |
336344 | def k_th_smallest_distance(nums):
nums_pairs = []
for i in nums:
for j in nums[i:]:
nums_pairs.append((i, j))
return nums_pairs
nums = [1, 3, 1]
print(k_th_smallest_distance(nums))
def k_th_smallest_distance(nums):
nums.sort()
for index in range(k):
| StarcoderdataPython |
188503 | from getratings.models.ratings import Ratings
class NA_Zed_Jng_Aatrox(Ratings):
pass
class NA_Zed_Jng_Ahri(Ratings):
pass
class NA_Zed_Jng_Akali(Ratings):
pass
class NA_Zed_Jng_Alistar(Ratings):
pass
class NA_Zed_Jng_Amumu(Ratings):
pass
class NA_Zed_Jng_Anivia(Ratings):
pass
class NA_Zed_Jng_Annie(Ratings):
pass
class NA_Zed_Jng_Ashe(Ratings):
pass
class NA_Zed_Jng_AurelionSol(Ratings):
pass
class NA_Zed_Jng_Azir(Ratings):
pass
class NA_Zed_Jng_Bard(Ratings):
pass
class NA_Zed_Jng_Blitzcrank(Ratings):
pass
class NA_Zed_Jng_Brand(Ratings):
pass
class NA_Zed_Jng_Braum(Ratings):
pass
class NA_Zed_Jng_Caitlyn(Ratings):
pass
class NA_Zed_Jng_Camille(Ratings):
pass
class NA_Zed_Jng_Cassiopeia(Ratings):
pass
class NA_Zed_Jng_Chogath(Ratings):
pass
class NA_Zed_Jng_Corki(Ratings):
pass
class NA_Zed_Jng_Darius(Ratings):
pass
class NA_Zed_Jng_Diana(Ratings):
pass
class NA_Zed_Jng_Draven(Ratings):
pass
class NA_Zed_Jng_DrMundo(Ratings):
pass
class NA_Zed_Jng_Ekko(Ratings):
pass
class NA_Zed_Jng_Elise(Ratings):
pass
class NA_Zed_Jng_Evelynn(Ratings):
pass
class NA_Zed_Jng_Ezreal(Ratings):
pass
class NA_Zed_Jng_Fiddlesticks(Ratings):
pass
class NA_Zed_Jng_Fiora(Ratings):
pass
class NA_Zed_Jng_Fizz(Ratings):
pass
class NA_Zed_Jng_Galio(Ratings):
pass
class NA_Zed_Jng_Gangplank(Ratings):
pass
class NA_Zed_Jng_Garen(Ratings):
pass
class NA_Zed_Jng_Gnar(Ratings):
pass
class NA_Zed_Jng_Gragas(Ratings):
pass
class NA_Zed_Jng_Graves(Ratings):
pass
class NA_Zed_Jng_Hecarim(Ratings):
pass
class NA_Zed_Jng_Heimerdinger(Ratings):
pass
class NA_Zed_Jng_Illaoi(Ratings):
pass
class NA_Zed_Jng_Irelia(Ratings):
pass
class NA_Zed_Jng_Ivern(Ratings):
pass
class NA_Zed_Jng_Janna(Ratings):
pass
class NA_Zed_Jng_JarvanIV(Ratings):
pass
class NA_Zed_Jng_Jax(Ratings):
pass
class NA_Zed_Jng_Jayce(Ratings):
pass
class NA_Zed_Jng_Jhin(Ratings):
pass
class NA_Zed_Jng_Jinx(Ratings):
pass
class NA_Zed_Jng_Kalista(Ratings):
pass
class NA_Zed_Jng_Karma(Ratings):
pass
class NA_Zed_Jng_Karthus(Ratings):
pass
class NA_Zed_Jng_Kassadin(Ratings):
pass
class NA_Zed_Jng_Katarina(Ratings):
pass
class NA_Zed_Jng_Kayle(Ratings):
pass
class NA_Zed_Jng_Kayn(Ratings):
pass
class NA_Zed_Jng_Kennen(Ratings):
pass
class NA_Zed_Jng_Khazix(Ratings):
pass
class NA_Zed_Jng_Kindred(Ratings):
pass
class NA_Zed_Jng_Kled(Ratings):
pass
class NA_Zed_Jng_KogMaw(Ratings):
pass
class NA_Zed_Jng_Leblanc(Ratings):
pass
class NA_Zed_Jng_LeeSin(Ratings):
pass
class NA_Zed_Jng_Leona(Ratings):
pass
class NA_Zed_Jng_Lissandra(Ratings):
pass
class NA_Zed_Jng_Lucian(Ratings):
pass
class NA_Zed_Jng_Lulu(Ratings):
pass
class NA_Zed_Jng_Lux(Ratings):
pass
class NA_Zed_Jng_Malphite(Ratings):
pass
class NA_Zed_Jng_Malzahar(Ratings):
pass
class NA_Zed_Jng_Maokai(Ratings):
pass
class NA_Zed_Jng_MasterYi(Ratings):
pass
class NA_Zed_Jng_MissFortune(Ratings):
pass
class NA_Zed_Jng_MonkeyKing(Ratings):
pass
class NA_Zed_Jng_Mordekaiser(Ratings):
pass
class NA_Zed_Jng_Morgana(Ratings):
pass
class NA_Zed_Jng_Nami(Ratings):
pass
class NA_Zed_Jng_Nasus(Ratings):
pass
class NA_Zed_Jng_Nautilus(Ratings):
pass
class NA_Zed_Jng_Nidalee(Ratings):
pass
class NA_Zed_Jng_Nocturne(Ratings):
pass
class NA_Zed_Jng_Nunu(Ratings):
pass
class NA_Zed_Jng_Olaf(Ratings):
pass
class NA_Zed_Jng_Orianna(Ratings):
pass
class NA_Zed_Jng_Ornn(Ratings):
pass
class NA_Zed_Jng_Pantheon(Ratings):
pass
class NA_Zed_Jng_Poppy(Ratings):
pass
class NA_Zed_Jng_Quinn(Ratings):
pass
class NA_Zed_Jng_Rakan(Ratings):
pass
class NA_Zed_Jng_Rammus(Ratings):
pass
class NA_Zed_Jng_RekSai(Ratings):
pass
class NA_Zed_Jng_Renekton(Ratings):
pass
class NA_Zed_Jng_Rengar(Ratings):
pass
class NA_Zed_Jng_Riven(Ratings):
pass
class NA_Zed_Jng_Rumble(Ratings):
pass
class NA_Zed_Jng_Ryze(Ratings):
pass
class NA_Zed_Jng_Sejuani(Ratings):
pass
class NA_Zed_Jng_Shaco(Ratings):
pass
class NA_Zed_Jng_Shen(Ratings):
pass
class NA_Zed_Jng_Shyvana(Ratings):
pass
class NA_Zed_Jng_Singed(Ratings):
pass
class NA_Zed_Jng_Sion(Ratings):
pass
class NA_Zed_Jng_Sivir(Ratings):
pass
class NA_Zed_Jng_Skarner(Ratings):
pass
class NA_Zed_Jng_Sona(Ratings):
pass
class NA_Zed_Jng_Soraka(Ratings):
pass
class NA_Zed_Jng_Swain(Ratings):
pass
class NA_Zed_Jng_Syndra(Ratings):
pass
class NA_Zed_Jng_TahmKench(Ratings):
pass
class NA_Zed_Jng_Taliyah(Ratings):
pass
class NA_Zed_Jng_Talon(Ratings):
pass
class NA_Zed_Jng_Taric(Ratings):
pass
class NA_Zed_Jng_Teemo(Ratings):
pass
class NA_Zed_Jng_Thresh(Ratings):
pass
class NA_Zed_Jng_Tristana(Ratings):
pass
class NA_Zed_Jng_Trundle(Ratings):
pass
class NA_Zed_Jng_Tryndamere(Ratings):
pass
class NA_Zed_Jng_TwistedFate(Ratings):
pass
class NA_Zed_Jng_Twitch(Ratings):
pass
class NA_Zed_Jng_Udyr(Ratings):
pass
class NA_Zed_Jng_Urgot(Ratings):
pass
class NA_Zed_Jng_Varus(Ratings):
pass
class NA_Zed_Jng_Vayne(Ratings):
pass
class NA_Zed_Jng_Veigar(Ratings):
pass
class NA_Zed_Jng_Velkoz(Ratings):
pass
class NA_Zed_Jng_Vi(Ratings):
pass
class NA_Zed_Jng_Viktor(Ratings):
pass
class NA_Zed_Jng_Vladimir(Ratings):
pass
class NA_Zed_Jng_Volibear(Ratings):
pass
class NA_Zed_Jng_Warwick(Ratings):
pass
class NA_Zed_Jng_Xayah(Ratings):
pass
class NA_Zed_Jng_Xerath(Ratings):
pass
class NA_Zed_Jng_XinZhao(Ratings):
pass
class NA_Zed_Jng_Yasuo(Ratings):
pass
class NA_Zed_Jng_Yorick(Ratings):
pass
class NA_Zed_Jng_Zac(Ratings):
pass
class NA_Zed_Jng_Zed(Ratings):
pass
class NA_Zed_Jng_Ziggs(Ratings):
pass
class NA_Zed_Jng_Zilean(Ratings):
pass
class NA_Zed_Jng_Zyra(Ratings):
pass
| StarcoderdataPython |
6675440 | <reponame>mwesterhof/wagtail_managed404<gh_stars>1-10
from django.db import models
from wagtail.admin.edit_handlers import (
FieldPanel, MultiFieldPanel, PageChooserPanel)
from wagtail.core.models import Page, Site
class PageNotFoundEntry(models.Model):
site = models.ForeignKey(
Site, related_name='pagenotfound_entries', on_delete=models.CASCADE)
url = models.CharField(max_length=200)
redirect_to_url = models.CharField(max_length=200, null=True, blank=True)
redirect_to_page = models.ForeignKey(
Page, on_delete=models.CASCADE, null=True, blank=True)
created = models.DateTimeField(auto_now_add=True)
last_hit = models.DateTimeField()
hits = models.PositiveIntegerField(default=1)
permanent = models.BooleanField(default=False)
panels = [
MultiFieldPanel(
[
FieldPanel('site'),
FieldPanel('url'),
], heading='entry'),
MultiFieldPanel(
[
FieldPanel('last_hit'),
FieldPanel('hits'),
], heading='general', classname='collapsible'),
MultiFieldPanel(
[
PageChooserPanel('redirect_to_page'),
FieldPanel('redirect_to_url'),
FieldPanel('permanent'),
], heading='redirect', classname='collapsible'),
]
@property
def redirect_to(self):
if self.redirect_to_page:
return self.redirect_to_page.url
return self.redirect_to_url
def __str__(self):
return self.url
class Meta:
verbose_name_plural = "page not found redirects"
ordering = ('-hits',)
| StarcoderdataPython |
3500636 | <filename>isobenefit_cities/image_io.py
from PIL import Image
import numpy as np
from matplotlib import cm
def import_2Darray_from_image(filepath):
pic = Image.open(filepath)
data = np.array(pic.getdata()).reshape(pic.size[1], pic.size[0], -1).mean(axis=2)
data_rescaled = (data - data.min()) / (data.max() - data.min())
return data_rescaled
def plot_image_from_2Darray(normalized_data_array, color_map=cm.gist_earth):
data_mapped = np.uint8(255 * color_map(normalized_data_array))
img = Image.fromarray(data_mapped)
img.show()
def save_image_from_2Darray(canvas, filepath, format='png'):
data_mapped = np.uint8(255 * canvas)
img = Image.fromarray(data_mapped)
img.save(filepath, format=format)
| StarcoderdataPython |
6430672 | # -*- coding: utf-8 -*-
from __future__ import absolute_import, division, print_function
from polyaxon_schemas.ops.build_job.op import BuildConfig, BuildSchema # noqa
| StarcoderdataPython |
6531358 | """Test Tecan automation script generation."""
import unittest
from synbio.containers import Well
from synbio.instructions import Instruction, Transfer
from synbio.picklists import to_tecan
class TestTecan(unittest.TestCase):
"""Tecan picklist generation."""
def test_to_tecan(self):
"""Create a Tecan picklist."""
src = Well()
dest = Well()
instruction = Instruction(transfers=[Transfer(src, dest, volume=5.0)])
picklist = to_tecan(instruction, 0)
lines = picklist.split("\n")
self.assertEqual(3, len(lines))
self.assertEqual("A;Plate:1;;;1;;5.0;;;", lines[0])
self.assertEqual("D;Plate:2;;;1;;5.0;;;", lines[1])
self.assertEqual("W;;;;;;;;;", lines[2])
with self.assertRaises(ValueError):
to_tecan(Instruction(), 0) # needs to have transfers to use in picklist
| StarcoderdataPython |
6511882 | <reponame>jmcroft7/dadpro<gh_stars>0
from flask_app import app
from flask import render_template, request, redirect, jsonify, session
from flask import flash
import requests
# class for User
class Joke:
def __init__(self, id, contents):
self.id = id
self.contents = contents
# classmethods
# ==========================================================
# retrieve joke
@classmethod
def getquote(cls):
rawjoke = requests.get('https://icanhazdadjoke.com', headers={"Accept": "application/json"})
words = rawjoke.json()
print(words)
endjoke = words['joke']
return endjoke
# ==========================================================
# get list of jokes
@classmethod
def getlist(cls):
rawlist = requests.get('https://icanhazdadjoke.com/search', headers={"Accept": "application/json"})
list = rawlist.json()
print(list)
endlist = list['results']
return endlist
# ==========================================================
# count how many jokes retrieved
@classmethod
def getlistcount(cls):
rawlist = requests.get('https://icanhazdadjoke.com', headers={"Accept": "application/json"})
list = rawlist.json()
print(list)
endlist = list['total_jokes']
return endlist
# ==========================================================
# parse through jokes using keyword
@classmethod
def searching(cls, data):
rawlist = requests.get(f'https://icanhazdadjoke.com/search?term={data}', headers={"Accept": "application/json"})
list = rawlist.json()
print(list)
endlist = list['results']
return endlist
# ==========================================================
# count how many jokes are recieved with the parse
@classmethod
def counting(cls, data):
rawlist = requests.get(f'https://icanhazdadjoke.com/search?term={data}', headers={"Accept": "application/json"})
list = rawlist.json()
print(list)
endlist = list['total_jokes']
return endlist | StarcoderdataPython |
3308079 | import re as _re
from mp.core.expression import Expression as Exp
class EventUnit:
def __init__(self, event_name: str, method, unique, fixed, hidden, is_regex: bool = False):
self._event_name = event_name
self._event_name_compiled = _re.compile(self._event_name) if is_regex else None
self._method = method
self._unique = unique
self._fixed = fixed
self._hidden = hidden
def match_name(self, name: str, hidden: bool = False):
if self._hidden and not hidden:
return False
if self._event_name_compiled is not None:
return self._event_name_compiled.search(name) is not None
return self._event_name == name
def get_method(self):
return self._method
def set_method(self, method):
self._method = method
def __call__(self, *args, **kwargs):
return self._method(*args, **kwargs)
@property
def fixed(self) -> bool:
return self._fixed
@property
def hidden(self) -> bool:
return self._hidden
@property
def unique(self) -> bool:
return self._unique
@property
def name(self) -> str:
return self._event_name
def __str__(self):
return self.name
class EventDelegate:
def __init__(self, event_name: str, hidden: bool):
self._event_name = event_name
self._hidden = hidden
def __call__(self, *args, **kwargs):
kwargs['hidden'] = self._hidden
return Exp.EVENT(self._event_name, *args, **kwargs)
class Event:
def __init__(self, verbose: bool = False):
self._events = list()
self._uniques = dict()
self._verbose = verbose
def add(self, event_name: str, method, unique: bool = False,
fixed: bool = False, hidden: bool = False, is_regex: bool = False):
event = EventUnit(event_name, method, unique, fixed, hidden, is_regex)
if unique:
self.remove(event_name)
self._uniques[event_name] = event
self._events.append(event)
def add_object(self, unit: EventUnit):
assert type(unit) is EventUnit
if unit._unique:
self.remove(unit._event_name)
self._uniques[unit._event_name] = unit
self._events.append(unit)
def find(self, event_name: str, hidden: bool = False, get_idx: bool = False, _no_re: bool = False):
def _compare():
if _no_re:
return event._event_name == event_name
return event.match_name(event_name, hidden)
for idx, event in enumerate(self._events):
if _compare():
if get_idx:
return idx, event
return event
if get_idx:
return -1, None
return None
def find_unique(self, event_name: str, hidden: bool = False):
for event in self._uniques.values():
if event.match_name(event_name, hidden):
return event
return None
def remove(self, event_name: str):
idx, event = self.find(event_name, hidden=True, get_idx=True, _no_re=True)
if event_name in self._uniques.keys():
del self._uniques[event_name]
if idx >= 0:
del self._events[idx]
def __call__(self, event_name: str, *args, hidden: bool = False, **kwargs):
list_responses = list()
for event in self._events:
if event.match_name(event_name, hidden):
response = event(*args, **kwargs)
self._add_response(list_responses, response)
if response is not None and event.unique:
return list_responses[-1]
return list_responses
@classmethod
def delegate(cls, event_name: str, hidden: bool = False):
return EventDelegate(event_name, hidden)
@classmethod
def _add_response(cls, list_responses, response):
if response is not None:
list_responses.append(response)
| StarcoderdataPython |
1974812 | class Vehiculo:
color = ""
ruedas = 0
energia = 0
nombre = ""
def __init__(self, nombre, ruedas, energia, color):
self.nombre = nombre
self.ruedas = ruedas
self.energia = energia
self.color = color
def __repr__(self):
return f'Este es un vehiculo llamado {self.nombre}' \
f' de color {self.color} ' \
f'tiene {self.ruedas} ruedas ' \
f'y {self.energia} energia'
def __eq__(self, other):
return self.nombre == other.nombre \
and self.color == other.color \
and self.ruedas == other.ruedas \
and self.energia == other.energia
def mover(self, cantidad):
if cantidad <= self.energia:
print(f'El vehiculo {self.nombre} se mueve {cantidad}')
self.energia -= cantidad
else:
raise Exception(f'El vehiculo {self.nombre} no tiene energía para '
f'moverse {cantidad}')
def tanquear(self, cantidad):
if cantidad > 0:
self.energia += cantidad
else:
raise Exception(f'No puede tanquear con {cantidad}')
| StarcoderdataPython |
3255714 | import smv
class input(smv.SmvCsvFile):
def path(self):
return "employment/CB1200CZ11.csv"
| StarcoderdataPython |
1766057 | from collections import Counter
def palindrome_permutation(string):
string = string.replace(" ", "")
counter = Counter()
for letter in string:
counter[letter] += 1
odd = 0
for total in counter.values():
odd += total % 2
if odd > 1:
return False
return True
string = "tact coal"
print(palindrome_permutation(string))
| StarcoderdataPython |
8086964 | def sortByHeight(a):
treePositions = [x for x in range(len(a)) if a[x] == -1]
people = sorted([x for x in a if x != -1])
for tree in treePositions:
people.insert(tree, -1)
return people
| StarcoderdataPython |
182558 | <reponame>zhiming-shen/Xen-Blanket-NG
import fcntl
import os
def close_fds(pass_fds=()):
try:
MAXFD = os.sysconf('SC_OPEN_MAX')
except:
MAXFD = 256
for i in range(3, MAXFD):
if i in pass_fds:
continue
try:
os.close(i)
except OSError:
pass
def fcntl_setfd_cloexec(file, bool):
f = fcntl.fcntl(file, fcntl.F_GETFD)
if bool: f |= fcntl.FD_CLOEXEC
else: f &= ~fcntl.FD_CLOEXEC
fcntl.fcntl(file, fcntl.F_SETFD, f)
def waitstatus_description(st):
if os.WIFEXITED(st):
es = os.WEXITSTATUS(st)
if es: return "exited with nonzero status %i" % es
else: return "exited"
elif os.WIFSIGNALED(st):
s = "died due to signal %i" % os.WTERMSIG(st)
if os.WCOREDUMP(st): s += " (core dumped)"
return s
else:
return "failed with unexpected wait status %i" % st
| StarcoderdataPython |
11273899 | from .apitools import ApiTools
def setup(bot):
bot.add_cog(ApiTools(bot))
| StarcoderdataPython |
6428012 | # Make a class called User. Create two attributes called first_name
# and last_name, and then create several other attributes that are typically stored
# in a user profile. Make a method called describe_user() that prints a summary
# of the user’s information. Make another method called greet_user() that prints
# a personalized greeting to the user.
# Create several instances representing different users, and call both methods
# for each user.
class User:
def __init__(self, name, id_num, location):
self.name = name
self.id_num = id_num
self.location = location
def describe_user(self):
print(f"User Name: {self.name.title()} \nID: {self.id_num} \nLocation: {self.location.title()}\n")
def greet_user(self):
print(f"Hello {self.name.title()}, your account information is:")
# An administrator is a special kind of user. Write a class called
# Admin that inherits from the User class you wrote in Exercise 9-3 (page 162)
# or Exercise 9-5 (page 167).
class Admin(User):
""" This is a child class of User with added attributes and methods """
def __init__(self, name, id_num, location):
super().__init__(name, id_num, location)
# Add an attribute, privileges, that stores a list
# # of strings like "can add post", "can delete post", "can ban user", and so on.
self.privileges = []
# Write a method called show_privileges() that lists the administrator’s set of
# privileges.
def show_privileges(self):
for privilege in self.privileges:
print(f"\t- {privilege}")
kj = User('kuljot', 9777, 'california')
bg = User('bill', 9036, 'washington')
dt = User('donald', 4022, 'new york')
kj.greet_user()
kj.describe_user()
bg.greet_user()
bg.describe_user()
dt.greet_user()
dt.describe_user()
# Create an instance of Admin, and call your method.
administrator = Admin('kuljot biring', 99999, 'california')
administrator.describe_user()
administrator.privileges = ['can add post', 'can delete post', 'can ban user']
administrator.show_privileges()
| StarcoderdataPython |
4926127 | --- Lib/site.py Tue Jul 20 04:28:28 2004
+++ Lib/site.py.new Mon Apr 4 10:47:12 2005
@@ -186,6 +186,7 @@
else:
sitedirs = [prefix, os.path.join(prefix, "lib", "site-packages")]
if sys.platform == 'darwin':
+ sitedirs.append( os.path.join('__PREFIX__', 'lib', 'python2.4', 'site-packages') )
# for framework builds *only* we add the standard Apple
# locations. Currently only per-user, but /Library and
# /Network/Library could be added too
| StarcoderdataPython |
9729094 | subscription_key = "82d9b1e05e16413d8bcca30f32b1ca83"
assert subscription_key
# You must use the same region in your REST call as you used to get your
# subscription keys. For example, if you got your subscription keys from
# westus, replace "westcentralus" in the URI below with "westus".
#
# Free trial subscription keys are generated in the westcentralus region.
# If you use a free trial subscription key, you shouldn't need to change
# this region.
face_api_url = 'https://westcentralus.api.cognitive.microsoft.com/face/v1.0/detect'
# Set image_url to the URL of an image that you want to analyze.
image_url = 'http://farm3.static.flickr.com/2226/2140577195_61d14b7dc3.jpg'
image_path = "/home/ess/Documents/TechFest/EyeCandy/FinalWork/opencv.jpg"
image_data = open(image_path, "rb").read()
import requests
import json
from IPython.display import HTML
headers = {'Content-Type': 'application/octet-stream','Ocp-Apim-Subscription-Key': subscription_key}
params = {
'returnFaceId': 'true',
'returnFaceLandmarks': 'false',
'returnFaceAttributes': 'emotion'
}
#json={"url": image_url}
response = requests.post(face_api_url, params=params, headers=headers,data=image_data)
faces = response.json()
jsonToPython = json.dumps(faces, indent=4)
datastore = json.loads(jsonToPython)
print datastore
print "\n new"
print datastore[0]["faceAttributes"]["emotion"]["happiness"]
happyValue = datastore[0]["faceAttributes"]["emotion"]["happiness"]
#print json.dumps('faceAttributes')
"""
# Display the original image and overlay it with the face information.
# If you are using a Jupyter notebook, uncomment the following line.
#%matplotlib inline
import matplotlib.pyplot as plt
from PIL import Image
from matplotlib import patches
from io import BytesIO
response = requests.get(image_url)
image = Image.open(BytesIO(response.content))
plt.figure(figsize=(8, 8))
ax = plt.imshow(image, alpha=0.6)
for face in faces:
fr = face["faceRectangle"]
#em = face["scores"]
fa = face["faceAttributes"]
print "\nNEW ATTT", fa
for facing in fa:
emotin = facing[0][0]
emotin2 = facing[1]
emotin3 = facing[2]
emotin4 = facing[3]
print emotin, emotin2, emotin3, emotin4
#
origin = (fr["left"], fr["top"])
p = patches.Rectangle(
origin, fr["width"], fr["height"], fill=False, linewidth=2, color='b')
ax.axes.add_patch(p)
#ct = "\n".join(["{0:<10s}{1:>.4f}".format(k,v) for k, v in sorted(list(em.items()),key=lambda r: r[1], reverse=True)][:3])
#plt.text(origin[0], origin[1], ct, fontsize=20)
plt.text(origin[0], origin[1], "%s, %d"%(fa["gender"].capitalize(), fa["age"]),
fontsize=20, weight="bold", va="bottom")
_ = plt.axis("off")"""
| StarcoderdataPython |
1784349 | <filename>sanic_ext/extensions/http/cors.py
import re
from dataclasses import dataclass
from datetime import timedelta
from types import SimpleNamespace
from typing import Any, FrozenSet, List, Optional, Tuple, Union
from sanic import HTTPResponse, Request, Sanic
from sanic.exceptions import SanicException
from sanic.helpers import Default, _default
from sanic.log import logger
WILDCARD_PATTERN = re.compile(r".*")
ORIGIN_HEADER = "access-control-allow-origin"
ALLOW_HEADERS_HEADER = "access-control-allow-headers"
ALLOW_METHODS_HEADER = "access-control-allow-methods"
EXPOSE_HEADER = "access-control-expose-headers"
CREDENTIALS_HEADER = "access-control-allow-credentials"
REQUEST_METHOD_HEADER = "access-control-request-method"
REQUEST_HEADERS_HEADER = "access-control-request-headers"
MAX_AGE_HEADER = "access-control-max-age"
VARY_HEADER = "vary"
@dataclass(frozen=True)
class CORSSettings:
allow_headers: FrozenSet[str]
allow_methods: FrozenSet[str]
allow_origins: Tuple[re.Pattern, ...]
always_send: bool
automatic_options: bool
expose_headers: FrozenSet[str]
max_age: str
send_wildcard: bool
supports_credentials: bool
def add_cors(app: Sanic) -> None:
_setup_cors_settings(app)
@app.on_response
async def _add_cors_headers(request, response):
preflight = (
request.app.ctx.cors.automatic_options
and request.method == "OPTIONS"
)
if preflight and not request.headers.get(REQUEST_METHOD_HEADER):
logger.info(
"No Access-Control-Request-Method header found on request. "
"CORS headers will not be applied."
)
return
_add_origin_header(request, response)
if ORIGIN_HEADER not in response.headers:
return
_add_expose_header(request, response)
_add_credentials_header(request, response)
_add_vary_header(request, response)
if preflight:
_add_max_age_header(request, response)
_add_allow_header(request, response)
_add_methods_header(request, response)
@app.before_server_start
async def _assign_cors_settings(app, _):
for group in app.router.groups.values():
_cors = SimpleNamespace()
for route in group:
cors = getattr(route.handler, "__cors__", None)
if cors:
for key, value in cors.__dict__.items():
setattr(_cors, key, value)
for route in group:
route.ctx._cors = _cors
def cors(
*,
origin: Union[str, Default] = _default,
expose_headers: Union[List[str], Default] = _default,
allow_headers: Union[List[str], Default] = _default,
allow_methods: Union[List[str], Default] = _default,
supports_credentials: Union[bool, Default] = _default,
max_age: Union[str, int, timedelta, Default] = _default,
):
def decorator(f):
f.__cors__ = SimpleNamespace(
_cors_origin=origin,
_cors_expose_headers=expose_headers,
_cors_supports_credentials=supports_credentials,
_cors_allow_origins=(
_parse_allow_origins(origin)
if origin is not _default
else origin
),
_cors_allow_headers=(
_parse_allow_headers(allow_headers)
if allow_headers is not _default
else allow_headers
),
_cors_allow_methods=(
_parse_allow_methods(allow_methods)
if allow_methods is not _default
else allow_methods
),
_cors_max_age=(
_parse_max_age(max_age) if max_age is not _default else max_age
),
)
return f
return decorator
def _setup_cors_settings(app: Sanic) -> None:
if app.config.CORS_ORIGINS == "*" and app.config.CORS_SUPPORTS_CREDENTIALS:
raise SanicException(
"Cannot use supports_credentials in conjunction with "
"an origin string of '*'. See: "
"http://www.w3.org/TR/cors/#resource-requests"
)
allow_headers = _get_allow_headers(app)
allow_methods = _get_allow_methods(app)
allow_origins = _get_allow_origins(app)
expose_headers = _get_expose_headers(app)
max_age = _get_max_age(app)
app.ctx.cors = CORSSettings(
allow_headers=allow_headers,
allow_methods=allow_methods,
allow_origins=allow_origins,
always_send=app.config.CORS_ALWAYS_SEND,
automatic_options=app.config.CORS_AUTOMATIC_OPTIONS,
expose_headers=expose_headers,
max_age=max_age,
send_wildcard=(
app.config.CORS_SEND_WILDCARD and WILDCARD_PATTERN in allow_origins
),
supports_credentials=app.config.CORS_SUPPORTS_CREDENTIALS,
)
def _get_from_cors_ctx(request: Request, key: str, default: Any = None):
if request.route:
value = getattr(request.route.ctx._cors, key, default)
if value is not _default:
return value
return default
def _add_origin_header(request: Request, response: HTTPResponse) -> None:
request_origin = request.headers.get("origin")
origin_value = ""
allow_origins = _get_from_cors_ctx(
request,
"_cors_allow_origins",
request.app.ctx.cors.allow_origins,
)
fallback_origin = _get_from_cors_ctx(
request,
"_cors_origin",
request.app.config.CORS_ORIGINS,
)
if request_origin:
if request.app.ctx.cors.send_wildcard:
origin_value = "*"
else:
for pattern in allow_origins:
if pattern.match(request_origin):
origin_value = request_origin
break
elif request.app.ctx.cors.always_send:
if WILDCARD_PATTERN in allow_origins:
origin_value = "*"
else:
if isinstance(fallback_origin, str) and "," not in fallback_origin:
origin_value = fallback_origin
else:
origin_value = request.app.config.get("SERVER_NAME", "")
if origin_value:
response.headers[ORIGIN_HEADER] = origin_value
def _add_expose_header(request: Request, response: HTTPResponse) -> None:
with_credentials = _is_request_with_credentials(request)
headers = None
expose_headers = _get_from_cors_ctx(
request, "_cors_expose_headers", request.app.ctx.cors.expose_headers
)
# MDN: The value "*" only counts as a special wildcard value for requests
# without credentials (requests without HTTP cookies or HTTP
# authentication information). In requests with credentials, it is
# treated as the literal header name "*" without special semantics.
# Note that the Authorization header can't be wildcarded and always
# needs to be listed explicitly.
# https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Access-Control-Expose-Headers
if not with_credentials and "*" in expose_headers:
headers = ["*"]
elif expose_headers:
headers = expose_headers
if headers:
response.headers[EXPOSE_HEADER] = ",".join(headers)
def _add_credentials_header(request: Request, response: HTTPResponse) -> None:
supports_credentials = _get_from_cors_ctx(
request,
"_cors_supports_credentials",
request.app.ctx.cors.supports_credentials,
)
if supports_credentials:
response.headers[CREDENTIALS_HEADER] = "true"
def _add_allow_header(request: Request, response: HTTPResponse) -> None:
with_credentials = _is_request_with_credentials(request)
request_headers = set(
h.strip().lower()
for h in request.headers.get(REQUEST_HEADERS_HEADER, "").split(",")
)
allow_headers = _get_from_cors_ctx(
request, "_cors_allow_headers", request.app.ctx.cors.allow_headers
)
# MDN: The value "*" only counts as a special wildcard value for requests
# without credentials (requests without HTTP cookies or HTTP
# authentication information). In requests with credentials,
# it is treated as the literal header name "*" without special semantics.
# Note that the Authorization header can't be wildcarded and always needs
# to be listed explicitly.
# https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Access-Control-Allow-Headers
if not with_credentials and "*" in allow_headers:
allow_headers = ["*"]
else:
allow_headers = request_headers & allow_headers
if allow_headers:
response.headers[ALLOW_HEADERS_HEADER] = ",".join(allow_headers)
def _add_max_age_header(request: Request, response: HTTPResponse) -> None:
max_age = _get_from_cors_ctx(
request, "_cors_max_age", request.app.ctx.cors.max_age
)
if max_age:
response.headers[MAX_AGE_HEADER] = max_age
def _add_methods_header(request: Request, response: HTTPResponse) -> None:
# MDN: The value "*" only counts as a special wildcard value for requests
# without credentials (requests without HTTP cookies or HTTP
# authentication information). In requests with credentials, it
# is treated as the literal method name "*" without
# special semantics.
# https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Access-Control-Allow-Methods
methods = None
with_credentials = _is_request_with_credentials(request)
allow_methods = _get_from_cors_ctx(
request, "_cors_allow_methods", request.app.ctx.cors.allow_methods
)
if not with_credentials and "*" in allow_methods:
methods = {"*"}
elif request.route:
group = request.app.router.groups.get(request.route.segments)
if group:
group_methods = {method.lower() for method in group.methods}
if allow_methods:
methods = group_methods & allow_methods
else:
methods = group_methods
if methods:
response.headers[ALLOW_METHODS_HEADER] = ",".join(methods).upper()
def _add_vary_header(request: Request, response: HTTPResponse) -> None:
allow_origins = _get_from_cors_ctx(
request,
"_cors_allow_origins",
request.app.ctx.cors.allow_origins,
)
if len(allow_origins) > 1:
response.headers[VARY_HEADER] = "origin"
def _get_allow_origins(app: Sanic) -> Tuple[re.Pattern, ...]:
origins = app.config.CORS_ORIGINS
return _parse_allow_origins(origins)
def _parse_allow_origins(
value: Union[str, re.Pattern]
) -> Tuple[re.Pattern, ...]:
origins: Optional[Union[List[str], List[re.Pattern]]] = None
if value and isinstance(value, str):
if value == "*":
origins = [WILDCARD_PATTERN]
else:
origins = value.split(",")
elif isinstance(value, re.Pattern):
origins = [value]
return tuple(
pattern if isinstance(pattern, re.Pattern) else re.compile(pattern)
for pattern in (origins or [])
)
def _get_expose_headers(app: Sanic) -> FrozenSet[str]:
expose_headers = (
(
app.config.CORS_EXPOSE_HEADERS
if isinstance(
app.config.CORS_EXPOSE_HEADERS, (list, set, frozenset, tuple)
)
else app.config.CORS_EXPOSE_HEADERS.split(",")
)
if app.config.CORS_EXPOSE_HEADERS
else tuple()
)
return frozenset(header.lower() for header in expose_headers)
def _get_allow_headers(app: Sanic) -> FrozenSet[str]:
return _parse_allow_headers(app.config.CORS_ALLOW_HEADERS)
def _parse_allow_headers(value: str) -> FrozenSet[str]:
allow_headers = (
(
value
if isinstance(
value,
(list, set, frozenset, tuple),
)
else value.split(",")
)
if value
else tuple()
)
return frozenset(header.lower() for header in allow_headers)
def _get_max_age(app: Sanic) -> str:
return _parse_max_age(app.config.CORS_MAX_AGE or "")
def _parse_max_age(value) -> str:
max_age = value or ""
if isinstance(max_age, timedelta):
max_age = str(int(max_age.total_seconds()))
return str(max_age)
def _get_allow_methods(app: Sanic) -> FrozenSet[str]:
return _parse_allow_methods(app.config.CORS_METHODS)
def _parse_allow_methods(value) -> FrozenSet[str]:
allow_methods = (
(
value
if isinstance(
value,
(list, set, frozenset, tuple),
)
else value.split(",")
)
if value
else tuple()
)
return frozenset(method.lower() for method in allow_methods)
def _is_request_with_credentials(request: Request) -> bool:
return bool(request.headers.get("authorization") or request.cookies)
| StarcoderdataPython |
1653590 | from flask import Flask, redirect, url_for, request, render_template, Response, stream_with_context
import ancv_html_scraper as ancv
import logging
from logging.handlers import RotatingFileHandler
from database.db import initialize_db, drop_db
from database.models import Restaurants
import json
from pymongo import MongoClient
app = Flask(__name__)
# Init mongodb
app.config['MONGODB_SETTINGS'] = {
'host': 'mongodb://my_db:27017'
}
initialize_db(app)
@app.route('/')
def index():
app.logger.info('User: ' + request.environ['REMOTE_ADDR'])
return render_template('home.html')
@app.route('/success/<city_name>')
def success(city_name):
# Check if the city is already present in the database
response = get_restaurant_by_city(city_name)
if response != None and len(response.get_data(as_text=True)) > 0:
body = str(response.get_json())
body = body.replace("'", "\"")
resto = Restaurants.from_json(body)
if resto != None:
return render_template("results.html", result = resto.restaurants, city=city_name)
result = ancv.restoLookup(city_name)
if result != None:
mongoItem = Restaurants(city=city_name, restaurants=result)
mongoItem.save()
return render_template("results.html", result = result, city=city_name)
@app.route('/home',methods = ['POST', 'GET'])
def home():
if request.method == 'POST':
city = request.form['nm']
return redirect(url_for('success', city_name=city.lower()))
else:
city = request.args.get('nm')
return redirect(url_for('success', city_name=city.lower()))
# MONGO RESTFUL API
@app.route('/restaurants/<id>')
def get_restaurant(id):
resto = Restaurants.objects.get(id=id).to_json()
return Response(resto, mimetype="application/json", status=200)
@app.route('/restaurants/city/<city>')
def get_restaurant_by_city(city):
if Restaurants.objects.count() == 0 or Restaurants.objects(city=city).count() == 0:
if request.method == 'GET':
return Response([], mimetype="application/json", status=200)
else:
return None
else:
resto = Restaurants.objects.get(city=city).to_json()
return Response(resto, mimetype="application/json", status=200)
@app.route('/restaurants')
def get_restaurants():
restos = Restaurants.objects().to_json()
return Response(restos, mimetype="application/json", status=200)
@app.route('/restaurants', methods=['POST'])
def add_restaurant():
body = request.get_json()
resto = Restaurants(**body).save()
id = resto.id
return {'id': str(id)}, 200
@app.route('/restaurants/<id>', methods=['PUT'])
def update_resto(id):
body = request.get_json()
Restaurants.objects.get(id=id).update(**body)
return '', 200
@app.route('/restaurants/<id>', methods=['DELETE'])
def delete_resto(id):
Restaurants.objects.get(id=id).delete()
return '', 200
@app.route('/truncate', methods=['DELETE'])
def delete_all_resto():
drop_db(app)
return '', 200
if __name__ == '__main__':
handler = RotatingFileHandler('logfile.log', maxBytes=10000, backupCount=1)
handler.setLevel(logging.INFO)
handler.setFormatter("[%(asctime)s] %(levelname)s [%(name)s.%(funcName)s:%(lineno)d] %(message)s")
app.logger.addHandler(handler)
app.run(debug=True, host ='0.0.0.0') | StarcoderdataPython |
4826130 | <filename>libtaxii/__init__.py
# Copyright (c) 2017, The MITRE Corporation
# For license information, see the LICENSE.txt file
"""
The main libtaxii module
"""
import six
from six.moves import urllib
import libtaxii.messages_10 as tm10
import libtaxii.messages_11 as tm11
import libtaxii.clients as tc
from .constants import *
import cgi
from .version import __version__ # noqa
def get_message_from_http_response(http_response, in_response_to):
"""Create a TAXII message from an HTTPResponse object.
This function parses the :py:class:`httplib.HTTPResponse` by reading the
X-TAXII-Content-Type HTTP header to determine if the message binding is
supported. If the X-TAXII-Content-Type header is present and the value
indicates a supported Message Binding, this function will attempt to parse
the HTTP Response body.
If the X-TAXII-Content-Type header is not present, this function will
attempt to build a Failure Status Message per the HTTP Binding 1.0
specification.
If the X-TAXII-Content-Type header is present and indicates an unsupported
Message Binding, this function will raise a ValueError.
Args:
http_response (httplib.HTTPResponse): the HTTP response to
parse
in_response_to (str): the default value for in_response_to
"""
if isinstance(http_response, six.moves.http_client.HTTPResponse):
return get_message_from_httplib_http_response(http_response, in_response_to)
elif isinstance(http_response, urllib.error.HTTPError):
return get_message_from_urllib2_httperror(http_response, in_response_to)
elif isinstance(http_response, urllib.response.addinfourl):
return get_message_from_urllib_addinfourl(http_response, in_response_to)
else:
raise ValueError('Unsupported response type: %s.' % http_response.__class__.__name__)
def get_message_from_urllib2_httperror(http_response, in_response_to):
""" This function should not be called by libtaxii users directly. """
info = http_response.info()
if hasattr(info, 'getheader'):
taxii_content_type = info.getheader('X-TAXII-Content-Type')
_, params = cgi.parse_header(info.getheader('Content-Type'))
else:
taxii_content_type = info.get('X-TAXII-Content-Type')
_, params = cgi.parse_header(info.get('Content-Type'))
encoding = params.get('charset', 'utf-8')
response_message = http_response.read()
if taxii_content_type is None:
if isinstance(response_message, six.binary_type):
response_message = response_message.decode(encoding, 'replace')
m = str(http_response) + '\r\n' + str(http_response.info()) + '\r\n' + response_message
return tm11.StatusMessage(message_id='0', in_response_to=in_response_to, status_type=ST_FAILURE, message=m)
elif taxii_content_type == VID_TAXII_XML_10: # It's a TAXII XML 1.0 message
return tm10.get_message_from_xml(response_message, encoding)
elif taxii_content_type == VID_TAXII_XML_11: # It's a TAXII XML 1.1 message
return tm11.get_message_from_xml(response_message, encoding)
elif taxii_content_type == VID_CERT_EU_JSON_10:
return tm10.get_message_from_json(response_message, encoding)
else:
raise ValueError('Unsupported X-TAXII-Content-Type: %s' % taxii_content_type)
def get_message_from_urllib_addinfourl(http_response, in_response_to):
""" This function should not be called by libtaxii users directly. """
info = http_response.info()
if hasattr(info, 'getheader'):
taxii_content_type = info.getheader('X-TAXII-Content-Type')
_, params = cgi.parse_header(info.getheader('Content-Type'))
else:
taxii_content_type = info.get('X-TAXII-Content-Type')
_, params = cgi.parse_header(info.get('Content-Type'))
encoding = params.get('charset', 'utf-8')
response_message = http_response.read()
if taxii_content_type is None: # Treat it as a Failure Status Message, per the spec
message = []
header_dict = six.iteritems(http_response.info().dict)
for k, v in header_dict:
message.append(k + ': ' + v + '\r\n')
message.append('\r\n')
message.append(response_message)
m = ''.join(message)
return tm11.StatusMessage(message_id='0', in_response_to=in_response_to, status_type=ST_FAILURE, message=m)
elif taxii_content_type == VID_TAXII_XML_10: # It's a TAXII XML 1.0 message
return tm10.get_message_from_xml(response_message, encoding)
elif taxii_content_type == VID_TAXII_XML_11: # It's a TAXII XML 1.1 message
return tm11.get_message_from_xml(response_message, encoding)
elif taxii_content_type == VID_CERT_EU_JSON_10:
return tm10.get_message_from_json(response_message, encoding)
else:
raise ValueError('Unsupported X-TAXII-Content-Type: %s' % taxii_content_type)
def get_message_from_httplib_http_response(http_response, in_response_to):
""" This function should not be called by libtaxii users directly. """
if hasattr(http_response, 'getheader'):
taxii_content_type = http_response.getheader('X-TAXII-Content-Type')
_, params = cgi.parse_header(http_response.getheader('Content-Type'))
else:
taxii_content_type = http_response.get('X-TAXII-Content-Type')
_, params = cgi.parse_header(http_response.get('Content-Type'))
encoding = params.get('charset', 'utf-8')
response_message = http_response.read()
if taxii_content_type is None: # Treat it as a Failure Status Message, per the spec
message = []
header_tuples = http_response.getheaders()
for k, v in header_tuples:
message.append(k + ': ' + v + '\r\n')
message.append('\r\n')
message.append(response_message)
m = ''.join(message)
return tm11.StatusMessage(message_id='0', in_response_to=in_response_to, status_type=ST_FAILURE, message=m)
elif taxii_content_type == VID_TAXII_XML_10: # It's a TAXII XML 1.0 message
return tm10.get_message_from_xml(response_message, encoding)
elif taxii_content_type == VID_TAXII_XML_11: # It's a TAXII XML 1.1 message
return tm11.get_message_from_xml(response_message, encoding)
else:
raise ValueError('Unsupported X-TAXII-Content-Type: %s' % taxii_content_type)
| StarcoderdataPython |
6417088 | import os
import matplotlib.pyplot as plt
import cPickle as pickle
import tensorflow as tf
from showattendtell.core.test_solver import CaptioningSolver
from showattendtell.core.model import CaptionGenerator
from showattendtell.core.utils import load_coco_data
from showattendtell.core.bleu import evaluate
dir_path = os.path.dirname(os.path.realpath(__file__))
# %matplotlib inline
plt.rcParams['figure.figsize'] = (8.0, 6.0) # set default size of plots
plt.rcParams['image.interpolation'] = 'nearest'
plt.rcParams['image.cmap'] = 'gray'
# %load_ext autoreload
# %autoreload 2
def test_model_on_image(img_path):
with open(os.path.join(dir_path, 'data/train/word_to_idx.pkl'), 'rb') as f:
word_to_idx = pickle.load(f)
model = CaptionGenerator(word_to_idx, dim_feature=[196, 512], dim_embed=512,
dim_hidden=1024, n_time_step=16, prev2out=True,
ctx2out=True, alpha_c=1.0, selector=True, dropout=True)
solver = CaptioningSolver(model, test_model=os.path.join(dir_path, 'model/lstm/model-20'))
return solver.test_live(img_path)
| StarcoderdataPython |
11346025 | """
Curses keycode parser. Exports (see function docstrings for more info):
- `key_to_name_mods`: parses curses keycode and returns the key name and its modifiers.
- `key_to_username`: parses curses keycode and returns the user-readable key name incl. modifiers
2020 by <NAME>
** LICENSE: The Unlicense **
This is free and unencumbered software released into the public domain.
Anyone is free to copy, modify, publish, use, compile, sell, or
distribute this software, either in source code form or as a compiled
binary, for any purpose, commercial or non-commercial, and by any
means.
In jurisdictions that recognize copyright laws, the author or authors
of this software dedicate any and all copyright interest in the
software to the public domain. We make this dedication for the benefit
of the public at large and to the detriment of our heirs and
successors. We intend this dedication to be an overt act of
relinquishment in perpetuity of all present and future rights to this
software under copyright law.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR
OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
OTHER DEALINGS IN THE SOFTWARE.
For more information, please refer to <http://unlicense.org/>
"""
# Named keys and modifiers go up here in case you need to translate strings.
# If comparing against constants, you should only import these ones.
# `KEY_MAP` and any variables below it should not be imported.
NO_MOD = 0
MOD_SHIFT = 1
MOD_CTRL = 2
MOD_ALT = 4
MOD_SHIFT_NAME = "shift"
MOD_CTRL_NAME = "ctrl"
MOD_ALT_NAME = "alt"
KEY_BACKSPACE = "backspace"
KEY_TAB = "tab"
KEY_NEWLINE = "return"
KEY_UP_ARROW = "up arrow"
KEY_DOWN_ARROW = "down arrow"
KEY_LEFT_ARROW = "left arrow"
KEY_RIGHT_ARROW = "right arrow"
KEY_ESCAPE = "esc"
KEY_SPACE = "space"
KEY_DELETE = "delete"
KEY_INSERT = "insert"
KEY_PAGEUP = "page up"
KEY_PAGEDOWN = "page down"
KEY_HOME = "home"
KEY_END = "end"
# Main key map
KEY_MAP = {
"\t": (KEY_TAB, NO_MOD),
"KEY_BTAB": (KEY_TAB, MOD_SHIFT),
"\n": (KEY_NEWLINE, NO_MOD),
"KEY_UP": (KEY_UP_ARROW, NO_MOD),
"KEY_DOWN": (KEY_DOWN_ARROW, NO_MOD),
"KEY_LEFT": (KEY_LEFT_ARROW, NO_MOD),
"KEY_RIGHT": (KEY_RIGHT_ARROW, NO_MOD),
"KEY_SR": (KEY_UP_ARROW, MOD_SHIFT),
"KEY_SF": (KEY_DOWN_ARROW, MOD_SHIFT),
"KEY_SLEFT": (KEY_LEFT_ARROW, MOD_SHIFT),
"KEY_SRIGHT": (KEY_RIGHT_ARROW, MOD_SHIFT),
"\x1b": (KEY_ESCAPE, NO_MOD),
"KEY_RESIZE": ("F11", NO_MOD),
" ": (KEY_SPACE, NO_MOD),
"\x00": (KEY_SPACE, MOD_CTRL),
"KEY_BACKSPACE": (KEY_BACKSPACE, NO_MOD),
"\x08": (KEY_BACKSPACE, MOD_CTRL),
"KEY_PPAGE": (KEY_PAGEUP, NO_MOD),
"KEY_SPREVIOUS": (KEY_PAGEUP, MOD_SHIFT),
"KEY_NPAGE": (KEY_PAGEDOWN, NO_MOD),
"KEY_SNEXT": (KEY_PAGEDOWN, MOD_SHIFT),
"KEY_HOME": (KEY_HOME, NO_MOD),
"KEY_SHOME": (KEY_HOME, MOD_SHIFT),
"KEY_END": (KEY_END, NO_MOD),
"KEY_SEND": (KEY_END, MOD_SHIFT),
"KEY_IC": (KEY_INSERT, NO_MOD),
"KEY_DC": (KEY_DELETE, NO_MOD),
"KEY_SDC": (KEY_DELETE, MOD_SHIFT),
}
# Handle common modifiers
key_bases = {
"kUP": KEY_UP_ARROW,
"kDN": KEY_DOWN_ARROW,
"kLFT": KEY_LEFT_ARROW,
"kRIT": KEY_RIGHT_ARROW,
"kDC": KEY_DELETE,
"kIC": KEY_INSERT,
"kHOM": KEY_HOME,
"kEND": KEY_END,
"kNXT": KEY_PAGEDOWN,
"kPRV": KEY_PAGEUP,
}
for i in range(3, 8):
mods = NO_MOD
if i == 3:
mods = MOD_ALT
elif i == 4:
mods = MOD_ALT | MOD_SHIFT
elif i == 5:
mods = MOD_CTRL
elif i == 6:
mods = MOD_CTRL | MOD_SHIFT
elif i == 7:
mods = MOD_CTRL | MOD_ALT
for base in key_bases:
KEY_MAP[base + str(i)] = (key_bases[base], mods)
# Init function keys
for i in range(1, 13):
if i == 11:
continue
KEY_MAP["KEY_F({})".format(i)] = ("F{}".format(i), NO_MOD)
for i in range(13, 25):
KEY_MAP["KEY_F({})".format(i)] = ("F{}".format(i - 12), MOD_SHIFT)
for i in range(25, 37):
KEY_MAP["KEY_F({})".format(i)] = ("F{}".format(i - 24), MOD_CTRL)
for i in range(37, 49):
KEY_MAP["KEY_F({})".format(i)] = ("F{}".format(i - 36), MOD_CTRL | MOD_SHIFT)
for i in range(49, 61):
KEY_MAP["KEY_F({})".format(i)] = ("F{}".format(i - 48), MOD_ALT)
for i in range(61, 73):
KEY_MAP["KEY_F({})".format(i)] = ("F{}".format(i - 60), MOD_ALT | MOD_SHIFT)
# Init CTRL keys
offset = ord("a") - 1
for i in range(1, 27):
if chr(i) in KEY_MAP:
continue
KEY_MAP[chr(i)] = (chr(i + offset), MOD_CTRL)
def key_to_name_mods(key):
"""Given a key code from curses, find the key name and mods and return it as
a tuple in the form `(key: str, mods: int flags)`. To work with this, `import *`
and compare `key` the constants `KEY_`*, and xor `mods` with any `MOD_`*.
"""
global KEY_MAP
global MOD_SHIFT
try:
found = KEY_MAP[key]
except KeyError:
if len(key) > 1:
return (key, [])
mod = NO_MOD
if key.lower() != key.upper():
if key != key.lower():
mod = MOD_SHIFT
return (key.lower(), mod)
return found
def mod_flags_to_strings(mods):
modstrs = []
if mods & MOD_CTRL:
modstrs.append(MOD_CTRL_NAME)
if mods & MOD_ALT:
modstrs.append(MOD_ALT_NAME)
if mods & MOD_SHIFT:
modstrs.append(MOD_SHIFT_NAME)
return modstrs
def key_to_username(key):
"""Given a key, return a user-readable key name, e.g.: kDC5 -> 'ctrl + delete'."""
key_name, mods = key_to_name_mods(key)
return " + ".join(mod_flags_to_strings(mods) + [key_name])
| StarcoderdataPython |
5070621 | <gh_stars>1000+
# -----------------------------------------------------------------------------
# yacc_simple.py
#
# A simple, properly specifier grammar
# -----------------------------------------------------------------------------
from .calclex import tokens
from ply import yacc
# Parsing rules
precedence = (
('left','PLUS','MINUS'),
('left','TIMES','DIVIDE'),
('right','UMINUS'),
)
# dictionary of names
names = { }
from .statement import *
from .expression import *
def p_error(t):
print("Syntax error at '%s'" % t.value)
import os.path
parser = yacc.yacc(outputdir=os.path.dirname(__file__))
| StarcoderdataPython |
9717179 | # -*- coding: utf-8 -*-
from __future__ import absolute_import
import unittest
import sys
from .common_imports import StringIO, etree, HelperTestCase, _str, _bytes, _chr
try:
unicode
except NameError:
unicode = str
ascii_uni = _bytes("a").decode("utf8")
klingon = _bytes("\\uF8D2").decode("unicode_escape") # not valid for XML names
invalid_tag = _bytes("test").decode("utf8") + klingon
uni = _bytes("\\xc3\\u0680\\u3120").decode(
"unicode_escape"
) # some non-ASCII characters
uxml = _bytes(
"<test><title>test \\xc3\\xa1\\u3120</title><h1>page \\xc3\\xa1\\u3120 title</h1></test>"
).decode("unicode_escape")
class UnicodeTestCase(HelperTestCase):
def test__str(self):
# test the testing framework, namely _str from common_imports
self.assertEqual(_str("\x10"), _str("\u0010"))
self.assertEqual(_str("\x10"), _str("\U00000010"))
self.assertEqual(_str("\u1234"), _str("\U00001234"))
def test_unicode_xml(self):
tree = etree.XML("<p>%s</p>" % uni)
self.assertEqual(uni, tree.text)
def test_wide_unicode_xml(self):
if sys.maxunicode < 1114111:
return # skip test
tree = etree.XML(_bytes("<p>\\U00026007</p>").decode("unicode_escape"))
self.assertEqual(1, len(tree.text))
self.assertEqual(
_bytes("\\U00026007").decode("unicode_escape"), tree.text
)
def test_unicode_xml_broken(self):
uxml = '<?xml version="1.0" encoding="UTF-8"?>' + "<p>%s</p>" % uni
self.assertRaises(ValueError, etree.XML, uxml)
def test_unicode_tag(self):
el = etree.Element(uni)
self.assertEqual(uni, el.tag)
def test_unicode_tag_invalid(self):
# sadly, Klingon is not well-formed
self.assertRaises(ValueError, etree.Element, invalid_tag)
def test_unicode_nstag(self):
tag = "{http://abc/}%s" % uni
el = etree.Element(tag)
self.assertEqual(tag, el.tag)
def test_unicode_ns_invalid(self):
# namespace URIs must conform to RFC 3986
tag = "{http://%s/}abc" % uni
self.assertRaises(ValueError, etree.Element, tag)
def test_unicode_nstag_invalid(self):
# sadly, Klingon is not well-formed
tag = "{http://abc/}%s" % invalid_tag
self.assertRaises(ValueError, etree.Element, tag)
def test_unicode_qname(self):
qname = etree.QName(uni, uni)
tag = "{%s}%s" % (uni, uni)
self.assertEqual(qname.text, tag)
self.assertEqual(unicode(qname), tag)
def test_unicode_qname_invalid(self):
self.assertRaises(ValueError, etree.QName, invalid_tag)
def test_unicode_attr(self):
el = etree.Element("foo", {"bar": uni})
self.assertEqual(uni, el.attrib["bar"])
def test_unicode_comment(self):
el = etree.Comment(uni)
self.assertEqual(uni, el.text)
def test_unicode_repr1(self):
x = etree.Element(_str("å"))
# must not raise UnicodeEncodeError
repr(x)
def test_unicode_repr2(self):
x = etree.Comment(_str("ö"))
repr(x)
def test_unicode_repr3(self):
x = etree.ProcessingInstruction(_str("Å"), _str("\u0131"))
repr(x)
def test_unicode_repr4(self):
x = etree.Entity(_str("ä"))
repr(x)
def test_unicode_text(self):
e = etree.Element("e")
def settext(text):
e.text = text
self.assertRaises(ValueError, settext, _str("ab\ufffe"))
self.assertRaises(ValueError, settext, _str("ö\ffff"))
self.assertRaises(ValueError, settext, _str("\u0123\ud800"))
self.assertRaises(ValueError, settext, _str("x\ud8ff"))
self.assertRaises(ValueError, settext, _str("\U00010000\udfff"))
self.assertRaises(ValueError, settext, _str("abd\x00def"))
# should not Raise
settext(_str("\ud7ff\ue000\U00010000\U0010FFFFäöas"))
for char_val in range(0xD800, 0xDFFF + 1):
self.assertRaises(ValueError, settext, "abc" + _chr(char_val))
self.assertRaises(ValueError, settext, _chr(char_val))
self.assertRaises(ValueError, settext, _chr(char_val) + "abc")
self.assertRaises(ValueError, settext, _bytes("\xe4"))
self.assertRaises(ValueError, settext, _bytes("\x80"))
self.assertRaises(ValueError, settext, _bytes("\xff"))
self.assertRaises(ValueError, settext, _bytes("\x08"))
self.assertRaises(ValueError, settext, _bytes("\x19"))
self.assertRaises(ValueError, settext, _bytes("\x20\x00"))
# should not Raise
settext(_bytes("\x09\x0A\x0D\x20\x60\x7f"))
def test_uniname(self):
Element = etree.Element
def el(name):
return Element(name)
self.assertRaises(ValueError, el, ":")
self.assertRaises(ValueError, el, "0a")
self.assertRaises(ValueError, el, _str("\u203f"))
# should not Raise
el(_str("\u0132"))
def test_unicode_parse_stringio(self):
el = etree.parse(StringIO("<p>%s</p>" % uni)).getroot()
self.assertEqual(uni, el.text)
## def test_parse_fileobject_unicode(self):
## # parse unicode from unnamed file object (not supported by ElementTree)
## f = SillyFileLike(uxml)
## root = etree.parse(f).getroot()
## self.assertEqual(unicode(etree.tostring(root, 'UTF-8'), 'UTF-8'),
## uxml)
class EncodingsTestCase(HelperTestCase):
def test_illegal_utf8(self):
data = _bytes("<test>\x80\x80\x80</test>", encoding="iso8859-1")
self.assertRaises(etree.XMLSyntaxError, etree.fromstring, data)
def test_illegal_utf8_recover(self):
data = _bytes("<test>\x80\x80\x80</test>", encoding="iso8859-1")
parser = etree.XMLParser(recover=True)
self.assertRaises(etree.XMLSyntaxError, etree.fromstring, data, parser)
def _test_encoding(self, encoding, xml_encoding_name=None):
foo = (
"""<?xml version='1.0' encoding='%s'?>\n<tag attrib='123'></tag>"""
% (xml_encoding_name or encoding)
)
root = etree.fromstring(foo.encode(encoding))
self.assertEqual("tag", root.tag)
doc_encoding = root.getroottree().docinfo.encoding
self.assertTrue(
doc_encoding.lower().rstrip("lbe"),
(xml_encoding_name or encoding).lower().rstrip("lbe"),
)
def test_utf8_fromstring(self):
self._test_encoding("utf-8")
def test_utf8sig_fromstring(self):
self._test_encoding("utf_8_sig", "utf-8")
def test_utf16_fromstring(self):
self._test_encoding("utf-16")
def test_utf16LE_fromstring(self):
self._test_encoding("utf-16le", "utf-16")
def test_utf16BE_fromstring(self):
self._test_encoding("utf-16be", "utf-16")
def test_utf32_fromstring(self):
self._test_encoding("utf-32", "utf-32")
def test_utf32LE_fromstring(self):
self._test_encoding("utf-32le", "utf-32")
def test_utf32BE_fromstring(self):
self._test_encoding("utf-32be", "utf-32")
def test_suite():
suite = unittest.TestSuite()
suite.addTests([unittest.makeSuite(UnicodeTestCase)])
suite.addTests([unittest.makeSuite(EncodingsTestCase)])
return suite
| StarcoderdataPython |
4987186 | import pandas as pd
NAMES ={
'locations':["Coal Oil Point"],
'collector':["UAV: Matrice 100"],
'device': ["RTL-SDR New"]
}
SOURCES = [
{
"name": "CHANNEL_SCAN-1",
'label': "SB 1",
'location': 0,
'vehicle': 0,
'device': 0,
"altitude-adjust": -14.11,
},
{
"name": "CHANNEL_SCAN-2",
'label': "SB 1",
'location': 0,
'vehicle': 0,
'device': 0,
"altitude-adjust": -14.11,
},
{
"name": "CHANNEL_SCAN-3",
'label': "SB 1",
'location': 0,
'vehicle': 0,
'device': 0,
"altitude-adjust": -14.11,
},
]
RESULTS = pd.DataFrame()
RTL_DATA = pd.DataFrame()
DRONE_LOGS = pd.DataFrame()
| StarcoderdataPython |
3508323 | from scenarios import *
scenario = (
send_stanza("<presence from='{jid_one}/{resource_one}' to='#aaa%{irc_server_one}/{nick_one}' ><x xmlns='http://jabber.org/protocol/muc'/></presence>"),
sequences.connection("irc.localhost", '{jid_one}/{resource_one}'),
expect_stanza("/presence"),
expect_stanza("/message"),
send_stanza("<presence from='{jid_one}/{resource_one}' to='#bbb%{irc_server_one}/{nick_one}' ><x xmlns='http://jabber.org/protocol/muc'/></presence>"),
expect_stanza("/presence"),
expect_stanza("/message"),
send_stanza("<presence from='{jid_one}/{resource_one}' to='#ccc%{irc_server_one}/{nick_one}' ><x xmlns='http://jabber.org/protocol/muc'/></presence>"),
expect_stanza("/presence"),
expect_stanza("/message"),
send_stanza("<presence from='{jid_one}/{resource_one}' to='#ddd%{irc_server_one}/{nick_one}' ><x xmlns='http://jabber.org/protocol/muc'/></presence>"),
expect_stanza("/presence"),
expect_stanza("/message"),
send_stanza("<presence from='{jid_one}/{resource_one}' to='#eee%{irc_server_one}/{nick_one}' ><x xmlns='http://jabber.org/protocol/muc'/></presence>"),
expect_stanza("/presence"),
expect_stanza("/message"),
send_stanza("<presence from='{jid_one}/{resource_one}' to='#fff%{irc_server_one}/{nick_one}' ><x xmlns='http://jabber.org/protocol/muc'/></presence>"),
expect_stanza("/presence"),
expect_stanza("/message"),
send_stanza("<presence from='{jid_one}/{resource_one}' to='#ggg%{irc_server_one}/{nick_one}' ><x xmlns='http://jabber.org/protocol/muc'/></presence>"),
expect_stanza("/presence"),
expect_stanza("/message"),
send_stanza("<presence from='{jid_one}/{resource_one}' to='#hhh%{irc_server_one}/{nick_one}' ><x xmlns='http://jabber.org/protocol/muc'/></presence>"),
expect_stanza("/presence"),
expect_stanza("/message"),
send_stanza("<presence from='{jid_one}/{resource_one}' to='#iii%{irc_server_one}/{nick_one}' ><x xmlns='http://jabber.org/protocol/muc'/></presence>"),
expect_stanza("/presence"),
expect_stanza("/message"),
send_stanza("<presence from='{jid_one}/{resource_one}' to='#jjj%{irc_server_one}/{nick_one}' ><x xmlns='http://jabber.org/protocol/muc'/></presence>"),
expect_stanza("/presence"),
expect_stanza("/message"),
send_stanza("<iq from='{jid_one}/{resource_one}' id='id' to='{irc_server_one}' type='get'><query xmlns='http://jabber.org/protocol/disco#items'><set xmlns='http://jabber.org/protocol/rsm'><max>3</max></set></query></iq>"),
expect_stanza("/iq[@type='result']/disco_items:query",
"count(/iq/disco_items:query/disco_items:item[@jid])=3",
"/iq/disco_items:query/rsm:set/rsm:first[@index='0']",
"/iq/disco_items:query/rsm:set/rsm:last",
after = save_value("last", lambda stanza: extract_text("/iq/disco_items:query/rsm:set/rsm:last", stanza))),
send_stanza("<iq from='{jid_one}/{resource_one}' id='id' to='{irc_server_one}' type='get'><query xmlns='http://jabber.org/protocol/disco#items'><set xmlns='http://jabber.org/protocol/rsm'><after>{last}</after><max>3</max></set></query></iq>"),
expect_stanza("/iq[@type='result']/disco_items:query",
"count(/iq/disco_items:query/disco_items:item[@jid])=3",
"/iq/disco_items:query/rsm:set/rsm:first[@index='3']",
"/iq/disco_items:query/rsm:set/rsm:last",
after = save_value("last", lambda stanza: extract_text("/iq/disco_items:query/rsm:set/rsm:last", stanza))),
send_stanza("<iq from='{jid_one}/{resource_one}' id='id' to='{irc_server_one}' type='get'><query xmlns='http://jabber.org/protocol/disco#items'><set xmlns='http://jabber.org/protocol/rsm'><after>{last}</after><max>3</max></set></query></iq>"),
expect_stanza("/iq[@type='result']/disco_items:query",
"count(/iq/disco_items:query/disco_items:item[@jid])=3",
"/iq/disco_items:query/rsm:set/rsm:first[@index='6']",
"/iq/disco_items:query/rsm:set/rsm:last",
after = save_value("last", lambda stanza: extract_text("/iq/disco_items:query/rsm:set/rsm:last", stanza))),
send_stanza("<iq from='{jid_one}/{resource_one}' id='id' to='{irc_server_one}' type='get'><query xmlns='http://jabber.org/protocol/disco#items'><set xmlns='http://jabber.org/protocol/rsm'><after>{last}</after><max>3</max></set></query></iq>"),
expect_stanza("/iq[@type='result']/disco_items:query",
"count(/iq/disco_items:query/disco_items:item[@jid])=1",
"/iq/disco_items:query/rsm:set/rsm:first[@index='9']",
"/iq/disco_items:query/rsm:set/rsm:last",
"/iq/disco_items:query/rsm:set/rsm:count[text()='10']"),
send_stanza("<presence from='{jid_one}/{resource_one}' to='#aaa%{irc_server_one}/{nick_one}' type='unavailable' />"),
send_stanza("<presence from='{jid_one}/{resource_one}' to='#bbb%{irc_server_one}/{nick_one}' type='unavailable' />"),
send_stanza("<presence from='{jid_one}/{resource_one}' to='#ccc%{irc_server_one}/{nick_one}' type='unavailable' />"),
send_stanza("<presence from='{jid_one}/{resource_one}' to='#ddd%{irc_server_one}/{nick_one}' type='unavailable' />"),
send_stanza("<presence from='{jid_one}/{resource_one}' to='#eee%{irc_server_one}/{nick_one}' type='unavailable' />"),
send_stanza("<presence from='{jid_one}/{resource_one}' to='#fff%{irc_server_one}/{nick_one}' type='unavailable' />"),
send_stanza("<presence from='{jid_one}/{resource_one}' to='#ggg%{irc_server_one}/{nick_one}' type='unavailable' />"),
send_stanza("<presence from='{jid_one}/{resource_one}' to='#hhh%{irc_server_one}/{nick_one}' type='unavailable' />"),
send_stanza("<presence from='{jid_one}/{resource_one}' to='#iii%{irc_server_one}/{nick_one}' type='unavailable' />"),
send_stanza("<presence from='{jid_one}/{resource_one}' to='#jjj%{irc_server_one}/{nick_one}' type='unavailable' />"),
expect_stanza("/presence[@type='unavailable']"),
expect_stanza("/presence[@type='unavailable']"),
expect_stanza("/presence[@type='unavailable']"),
expect_stanza("/presence[@type='unavailable']"),
expect_stanza("/presence[@type='unavailable']"),
expect_stanza("/presence[@type='unavailable']"),
expect_stanza("/presence[@type='unavailable']"),
expect_stanza("/presence[@type='unavailable']"),
expect_stanza("/presence[@type='unavailable']"),
expect_stanza("/presence[@type='unavailable']"),
)
| StarcoderdataPython |
269 | <reponame>jaraco/hgtools
import os
import pytest
from hgtools import managers
def _ensure_present(mgr):
try:
mgr.version()
except Exception:
pytest.skip()
@pytest.fixture
def tmpdir_as_cwd(tmpdir):
with tmpdir.as_cwd():
yield tmpdir
@pytest.fixture
def hg_repo(tmpdir_as_cwd):
mgr = managers.MercurialManager()
_ensure_present(mgr)
mgr._invoke('init', '.')
os.makedirs('bar')
touch('bar/baz')
mgr._invoke('addremove')
mgr._invoke('ci', '-m', 'committed')
with open('bar/baz', 'w') as baz:
baz.write('content')
mgr._invoke('ci', '-m', 'added content')
return tmpdir_as_cwd
@pytest.fixture
def git_repo(tmpdir_as_cwd):
mgr = managers.GitManager()
_ensure_present(mgr)
mgr._invoke('init')
mgr._invoke('config', 'user.email', '<EMAIL>')
mgr._invoke('config', 'user.name', 'HGTools')
os.makedirs('bar')
touch('bar/baz')
mgr._invoke('add', '.')
mgr._invoke('commit', '-m', 'committed')
with open('bar/baz', 'w') as baz:
baz.write('content')
mgr._invoke('commit', '-am', 'added content')
return tmpdir_as_cwd
def touch(filename):
with open(filename, 'a'):
pass
| StarcoderdataPython |
5159630 | import numpy as np
import torch
from tqdm import tqdm
from .. import effdet_wrapper as det
from ..module import BoundingBoxDetector
from ..util import from_PIL_to_cv
class YetAnotherEfficientDetDetector(BoundingBoxDetector):
def __init__(self, anchor_ratios, anchor_scales,
compound_coef, num_classes, model_fn,
use_cuda=True, batch_size=50, verbose=False):
if not torch.cuda.is_available():
use_cuda = False
self.use_cuda = use_cuda
self.model = det.EfficientDetBackbone(
compound_coef=compound_coef,
num_classes=num_classes,
ratios=anchor_ratios,
scales=anchor_scales
)
state_dict = torch.load(model_fn, map_location="cpu")
self.model.load_state_dict(state_dict)
if use_cuda:
self.model.to("cuda")
self.model.eval()
self.model.requires_grad_(False)
self.input_size = \
input_size = [512, 640, 768, 896, 1024, 1280, 1280, 1536][compound_coef]
self.batch_size = batch_size
self.verbose = verbose
def _detect(self, pil_images, class_lst, threshold=0.5, iou_threshold=0.5):
images = [from_PIL_to_cv(img) for img in pil_images]
ret = []
it = list(range(0, len(images), self.batch_size))
if self.verbose:
it = tqdm(it)
for i_start in it:
_, framed_imgs, framed_metas = \
det.preprocess(images[i_start:i_start+self.batch_size],
max_size=self.input_size)
x = torch.stack([torch.from_numpy(f_img)
for f_img in framed_imgs], dim=0)
x = x.to(torch.float32).permute(0, 3, 1, 2)
if self.use_cuda:
x = x.to("cuda")
with torch.no_grad():
feat, reg, clsf, anchor = self.model(x)
reg_box = det.BBoxTransform()
clip_box = det.ClipBoxes()
out = det.postprocess(x, anchor,reg, clsf, reg_box,
clip_box, threshold, iou_threshold)
out = det.invert_affine(framed_metas, out)
ret += [
[
{
"coordinates": tuple(out_image["rois"][j].astype(np.int)),
"class": class_lst[out_image["class_ids"][j]],
"score": out_image["scores"][j]
}
for j in range(len(out_image["rois"]))
]
for out_image in out
]
return ret
| StarcoderdataPython |
8105017 | # -*- coding: utf-8 -*-
import os
import cv2
import math
import torch
import numpy as np
import torch.nn.functional as F
class Detection:
def __init__(self):
caffemodel = "./checkpoint/Widerface-RetinaFace.caffemodel"
deploy = "./checkpoint/deploy.prototxt"
self.detector = cv2.dnn.readNetFromCaffe(deploy, caffemodel)
self.detector_confidence = 0.7
def get_bbox(self, img):
height, width = img.shape[0], img.shape[1]
aspect_ratio = width / height
if img.shape[1] * img.shape[0] >= 192 * 192:
img = cv2.resize(img,
(int(192 * math.sqrt(aspect_ratio)),
int(192 / math.sqrt(aspect_ratio))), interpolation=cv2.INTER_LINEAR)
blob = cv2.dnn.blobFromImage(img, 1, mean=(104, 117, 123))
self.detector.setInput(blob, 'data')
out = self.detector.forward('detection_out').squeeze()
max_conf_index = np.argmax(out[:, 2])
left, top, right, bottom = out[max_conf_index, 3]*width, out[max_conf_index, 4]*height,out[max_conf_index, 5]*width, out[max_conf_index, 6]*height
bbox = [int(left), int(top), int(right-left+1), int(bottom-top+1)]
return bbox
class AntiSpoofPredict(Detection):
def __init__(self, device_id):
super(AntiSpoofPredict, self).__init__()
self.device = torch.device("cuda:{}".format(device_id) if torch.cuda.is_available() else "cpu")
| StarcoderdataPython |
25810 | <gh_stars>0
from datetime import datetime
from django.conf import settings
from django.contrib.auth.models import User
from django.db import models
from kitsune.sumo.models import ModelBase, LocaleField
from kitsune.sumo.urlresolvers import reverse
from kitsune.sumo.utils import auto_delete_files
class Media(ModelBase):
"""Generic model for media"""
title = models.CharField(max_length=255, db_index=True)
created = models.DateTimeField(default=datetime.now, db_index=True)
updated = models.DateTimeField(default=datetime.now, db_index=True)
updated_by = models.ForeignKey(User, null=True)
description = models.TextField(max_length=10000)
locale = LocaleField(default=settings.GALLERY_DEFAULT_LANGUAGE,
db_index=True)
is_draft = models.NullBooleanField(default=None, null=True, editable=False)
class Meta(object):
abstract = True
ordering = ['-created']
unique_together = (('locale', 'title'), ('is_draft', 'creator'))
def __unicode__(self):
return '[%s] %s' % (self.locale, self.title)
@auto_delete_files
class Image(Media):
creator = models.ForeignKey(User, related_name='gallery_images')
file = models.ImageField(upload_to=settings.GALLERY_IMAGE_PATH,
max_length=settings.MAX_FILEPATH_LENGTH)
thumbnail = models.ImageField(
upload_to=settings.GALLERY_IMAGE_THUMBNAIL_PATH, null=True,
max_length=settings.MAX_FILEPATH_LENGTH)
def get_absolute_url(self):
return reverse('gallery.media', args=['image', self.id])
def thumbnail_url_if_set(self):
"""Returns self.thumbnail, if set, else self.file"""
return self.thumbnail.url if self.thumbnail else self.file.url
@auto_delete_files
class Video(Media):
creator = models.ForeignKey(User, related_name='gallery_videos')
webm = models.FileField(upload_to=settings.GALLERY_VIDEO_PATH, null=True,
max_length=settings.MAX_FILEPATH_LENGTH)
ogv = models.FileField(upload_to=settings.GALLERY_VIDEO_PATH, null=True,
max_length=settings.MAX_FILEPATH_LENGTH)
flv = models.FileField(upload_to=settings.GALLERY_VIDEO_PATH, null=True,
max_length=settings.MAX_FILEPATH_LENGTH)
poster = models.ImageField(upload_to=settings.GALLERY_VIDEO_THUMBNAIL_PATH,
max_length=settings.MAX_FILEPATH_LENGTH,
null=True)
thumbnail = models.ImageField(
upload_to=settings.GALLERY_VIDEO_THUMBNAIL_PATH, null=True,
max_length=settings.MAX_FILEPATH_LENGTH)
def get_absolute_url(self):
return reverse('gallery.media', args=['video', self.id])
def thumbnail_url_if_set(self):
"""Returns self.thumbnail.url, if set, else default thumbnail URL"""
progress_url = settings.GALLERY_VIDEO_THUMBNAIL_PROGRESS_URL
return self.thumbnail.url if self.thumbnail else progress_url
| StarcoderdataPython |
1650652 | <reponame>BlueBrain/Spack
from spack import *
from spack.pkg.builtin.py_pyarrow import PyPyarrow as BuiltinPyPyarrow
class PyPyarrow(BuiltinPyPyarrow):
__doc__ = BuiltinPyPyarrow.__doc__
version('6.0.1', sha256='423990d56cd8f12283b67367d48e142739b789085185018eb03d05087c3c8d43')
for v in ('@6.0.1',):
depends_on('arrow+python' + v, when=v)
depends_on('arrow+parquet+python' + v, when='+parquet' + v)
depends_on('arrow+cuda' + v, when='+cuda' + v)
depends_on('arrow+orc' + v, when='+orc' + v)
| StarcoderdataPython |
29220 | <reponame>jiadaizhao/LintCode<filename>1301-1400/1387-Binary Trees With Factors/1387-Binary Trees With Factors.py
class Solution:
"""
@param A:
@return: nothing
"""
def numFactoredBinaryTrees(self, A):
A.sort()
MOD = 10 ** 9 + 7
dp = {}
for j in range(len(A)):
dp[A[j]] = 1
for i in range(j):
if A[j] % A[i] == 0:
num = A[j] // A[i]
if num in dp:
dp[A[j]] = (dp[A[j]] + dp[A[i]] * dp[num]) % MOD
return sum(dp.values()) % MOD
| StarcoderdataPython |
6449490 | from opserver.plugins.alarm_base import *
from opserver.sandesh.alarmgen_ctrl.sandesh_alarm_base.ttypes import *
import json
class ProcessStatus(AlarmBase):
"""Process Failure.
NodeMgr reports abnormal status for process(es) in NodeStatus.process_info"""
def __init__(self):
AlarmBase.__init__(self, AlarmBase.SYS_ERR, at=10, it=10, fec=True,
fcs=300, fct=4)
def __call__(self, uve_key, uve_data):
or_list = []
v2 = None
v1 = uve_data.get("NodeStatus",None)
if v1 is not None:
v2 = v1.get("process_info",None)
if v2 is None:
or_list.append(AllOf(all_of=[AlarmElement(\
rule=AlarmTemplate(oper="==",
operand1=Operand1(keys=["NodeStatus","process_info"]),
operand2=Operand2(json_value="null")),
json_operand1_value="null")]))
return or_list
value = None
proc_status_list = v2
for proc_status in proc_status_list:
value = str(proc_status)
if proc_status["process_state"] != "PROCESS_STATE_RUNNING":
or_list.append(AllOf(all_of=[AlarmElement(\
rule=AlarmTemplate(oper="!=",
operand1=Operand1(\
keys=["NodeStatus","process_info","process_state"]),
operand2=Operand2(json_value=\
json.dumps("PROCESS_STATE_RUNNING"))),
json_operand1_value=json.dumps(proc_status["process_state"]),
json_vars={\
"NodeStatus.process_info.process_name":\
proc_status["process_name"]})]))
if len(or_list):
return or_list
else:
return None
| StarcoderdataPython |
6547547 | <reponame>M0gician/lightRaven<gh_stars>1-10
from .base import SamplingBase
from .imptsamp import IS
from .pdis import PDIS
from .wis import WIS
from .cwpdis import CWPDIS
| StarcoderdataPython |
4928669 | <filename>sts2/game/arena.py
# Copyright (C) 2020 Electronic Arts Inc. All rights reserved.
import numpy
class Arena:
def __init__(self, arena_size):
self.arena_size = arena_size
self.max_x = arena_size[0] / 2
self.min_x = -self.max_x
self.max_z = arena_size[1] / 2
self.min_z = -self.max_z
self.mins = numpy.array([self.min_x, self.min_z])
self.maxs = numpy.array([self.max_x, self.max_z])
self.net_position = [numpy.array([0, self.max_z]), numpy.array([0, self.min_z])]
def GetNormalizedCoord(self, pos):
return (pos - self.mins) / (self.maxs - self.mins)
def GetArenaCoordFromNormalized(self, pos):
return pos * (self.maxs - self.mins) + self.mins
| StarcoderdataPython |
3496889 | import gym
import jax
import jax.numpy as jnp
import coax
import haiku as hk
from numpy import prod
import optax
# the name of this script
name = 'ppo'
# the Pendulum MDP
env = gym.make('Pendulum-v0')
env = coax.wrappers.TrainMonitor(env, name=name, tensorboard_dir=f"./data/tensorboard/{name}")
def func_pi(S, is_training):
shared = hk.Sequential((
hk.Linear(8), jax.nn.relu,
hk.Linear(8), jax.nn.relu,
))
mu = hk.Sequential((
shared,
hk.Linear(8), jax.nn.relu,
hk.Linear(prod(env.action_space.shape), w_init=jnp.zeros),
hk.Reshape(env.action_space.shape),
))
logvar = hk.Sequential((
shared,
hk.Linear(8), jax.nn.relu,
hk.Linear(prod(env.action_space.shape), w_init=jnp.zeros),
hk.Reshape(env.action_space.shape),
))
return {'mu': mu(S), 'logvar': logvar(S)}
def func_v(S, is_training):
seq = hk.Sequential((
hk.Linear(8), jax.nn.relu,
hk.Linear(8), jax.nn.relu,
hk.Linear(8), jax.nn.relu,
hk.Linear(1, w_init=jnp.zeros), jnp.ravel
))
return seq(S)
# define function approximators
pi = coax.Policy(func_pi, env)
v = coax.V(func_v, env)
# target network
pi_targ = pi.copy()
# experience tracer
tracer = coax.reward_tracing.NStep(n=5, gamma=0.9)
buffer = coax.experience_replay.SimpleReplayBuffer(capacity=512)
# policy regularizer (avoid premature exploitation)
policy_reg = coax.regularizers.EntropyRegularizer(pi, beta=0.01)
# updaters
simpletd = coax.td_learning.SimpleTD(v, optimizer=optax.adam(1e-3))
ppo_clip = coax.policy_objectives.PPOClip(pi, regularizer=policy_reg, optimizer=optax.adam(1e-4))
# train
while env.T < 1000000:
s = env.reset()
for t in range(env.spec.max_episode_steps):
a, logp = pi_targ(s, return_logp=True)
s_next, r, done, info = env.step(a)
# trace rewards
tracer.add(s, a, r, done, logp)
while tracer:
buffer.add(tracer.pop())
# learn
if len(buffer) >= buffer.capacity:
for _ in range(int(4 * buffer.capacity / 32)): # 4 passes per round
transition_batch = buffer.sample(batch_size=32)
metrics_v, td_error = simpletd.update(transition_batch, return_td_error=True)
metrics_pi = ppo_clip.update(transition_batch, td_error)
env.record_metrics(metrics_v)
env.record_metrics(metrics_pi)
buffer.clear()
pi_targ.soft_update(pi, tau=0.1)
if done:
break
s = s_next
# generate an animated GIF to see what's going on
if env.period(name='generate_gif', T_period=10000) and env.T > 5000:
T = env.T - env.T % 10000 # round to 10000s
coax.utils.generate_gif(
env=env, policy=pi, filepath=f"./data/gifs/{name}/T{T:08d}.gif")
| StarcoderdataPython |
110508 |
from .code import stupid_sum, stupid_mul
| StarcoderdataPython |
4978262 | import numpy
from setuptools import setup
from setuptools.extension import Extension
from Cython.Distutils import build_ext
ext_modules=[
Extension('_filter', ['speaker_filter/_filter.pyx'], include_dirs=[numpy.get_include()]),
]
setup(
name = 'diy-speaker-filter',
cmdclass = {'build_ext': build_ext},
ext_modules = ext_modules,
install_requires=['numpy', 'cython']
) | StarcoderdataPython |
6705666 | <filename>library/migrations/0026_auto_20180825_2056.py
# -*- coding: utf-8 -*-
# Generated by Django 1.11.5 on 2018-08-25 20:56
from __future__ import unicode_literals
from django.db import migrations, models
import uuid
class Migration(migrations.Migration):
dependencies = [
('library', '0025_auto_20180825_2049'),
]
operations = [
migrations.AlterField(
model_name='profession',
name='code',
field=models.UUIDField(default=uuid.uuid4),
),
]
| StarcoderdataPython |
11398906 | from __future__ import absolute_import, print_function
import os
import pytest
from bokeh.io import output_file
from .webserver import SimpleWebServer
@pytest.fixture(scope='session', autouse=True)
def server(request):
server = SimpleWebServer()
server.start()
def stop_server():
server.stop()
request.addfinalizer(stop_server)
return server
@pytest.fixture(scope='session')
def base_url(request, server):
return 'http://%s:%s' % (server.host, server.port)
@pytest.fixture
def output_file_url(request, base_url):
filename = request.function.__name__ + '.html'
file_path = request.fspath.dirpath().join(filename).strpath
output_file(file_path)
def fin():
os.remove(file_path)
request.addfinalizer(fin)
return '%s/%s' % (base_url, file_path)
| StarcoderdataPython |
8041844 | <reponame>jolo-dev/show-solidarity<filename>tests/test_lambda.py<gh_stars>0
# from function.put_object import handler
# import json
# from os import path
# import pytest
# @pytest.fixture
# def test_event():
# with open(path.join(path.dirname(__file__), "event.json"), "r") as f:
# event = json.load(f)
# return event
# def test_lambda_handler(test_event):
# response = handler(test_event, None)
# assert response is not None
| StarcoderdataPython |
4883766 | <reponame>LukeHackett/rpi-temperature-sensor
#!/usr/bin/env python3
import os
import json
import time
import argparse
import urllib.request
import Adafruit_DHT
# Sensor Settings
# Set sensor type : Options are DHT11,DHT22 or AM2302
SENSOR = Adafruit_DHT.DHT11
GPIO_PIN = os.getenv('GPIO_PIN', 17)
# Open Weather Application ID
# Sign up for an account here: https://www.openweathermap.org/ (default key is a sample key)
APP_ID = os.getenv('OPEN_WEATHER_APP_ID', 'b6907d289e10d714a6e88b30761fae22')
def argument_parser():
parser = argparse.ArgumentParser(prog='sensor', description='description of the program')
parser.add_argument('location', type=str, help="the sensor's local location, e.g. bedroom or lounge")
parser.add_argument('--city', type=str, help="the sensor's city")
parser.add_argument('--country', type=str, help="the sensor's country")
return parser
def fetch_readings_from_sensor():
# Use read_retry method. This will retry up to 15 times to
# get a sensor reading (waiting 2 seconds between each retry).
humidity, temperature = Adafruit_DHT.read_retry(SENSOR, int(GPIO_PIN))
# Reading the DHT11 is very sensitive to timings and occasionally
# the Pi might fail to get a valid reading. So check if readings are valid
return temperature, humidity
def fetch_readings_from_api(city=None, country=None):
if city and country:
# todo add &units=metric to enable metric readings
url = "https://samples.openweathermap.org/data/2.5/weather?q={0},{1}&appid={2}".format(city, country, APP_ID)
response = urllib.request.urlopen(url).read()
body = json.loads(response)
return float(body['main']['temp']), float(body['main']['humidity'])
return None, None
def record_reading(reading):
# Create a json body
body = json.dumps(reading)
body = str(body).encode('utf-8')
# Post Method is invoked if data != None
# request = urllib.request.Request('', data=body)
# Response
# response = urllib.request.urlopen(request)
print(body)
return None
def epoch_in_seconds():
return int(time.time())
def celsius_to_kelvin(value):
return float(value + 273.15)
def celsius_to_fahrenheit(value):
return float((value * 9/5) + 32)
def get_reading(location, celsius, humidity, timestamp=None):
if timestamp == None:
timestamp = epoch_in_seconds()
return {
'location': location,
'temperature': {
'celsius': round(celsius, 2),
'kelvin': round(celsius_to_kelvin(celsius), 2),
'fahrenheit': round(celsius_to_fahrenheit(celsius), 2)
},
'humidity': humidity,
'timestamp': timestamp
}
if __name__ == "__main__":
# Parse the application arguments
parser = argument_parser()
args = parser.parse_args()
# Use a consistent timestamp for readings
timestamp = epoch_in_seconds()
# Record the sensor's temperature and humidity
sensor_temperature, sensor_humidity = fetch_readings_from_sensor()
if sensor_temperature and sensor_humidity:
reading = get_reading(args.location, sensor_temperature, sensor_humidity, timestamp)
record_reading(reading)
# Record the external location's temperature and humidity (if configured)
location_temperature, location_humidity = fetch_readings_from_api(args.city, args.country)
if location_temperature and location_humidity:
reading = get_reading(args.city, location_temperature, location_humidity, timestamp)
record_reading(reading)
| StarcoderdataPython |
1783425 | <filename>test/unit/messages/bloxroute/test_bloxroute_version_manager_v7.py<gh_stars>10-100
from unittest import skip
from bxcommon import constants
from bxcommon.messages.bloxroute.ack_message import AckMessage
from bxcommon.messages.bloxroute.bdn_performance_stats_message import BdnPerformanceStatsMessage
from bxcommon.messages.bloxroute.block_confirmation_message import BlockConfirmationMessage
from bxcommon.messages.bloxroute.broadcast_message import BroadcastMessage
from bxcommon.messages.bloxroute.get_txs_message import GetTxsMessage
from bxcommon.messages.bloxroute.hello_message import HelloMessage
from bxcommon.messages.bloxroute.key_message import KeyMessage
from bxcommon.messages.bloxroute.notification_message import NotificationMessage
from bxcommon.messages.bloxroute.ping_message import PingMessage
from bxcommon.messages.bloxroute.pong_message import PongMessage
from bxcommon.messages.bloxroute.transaction_cleanup_message import TransactionCleanupMessage
from bxcommon.messages.bloxroute.tx_message import TxMessage
from bxcommon.messages.bloxroute.tx_service_sync_blocks_short_ids_message import TxServiceSyncBlocksShortIdsMessage
from bxcommon.messages.bloxroute.tx_service_sync_complete_message import TxServiceSyncCompleteMessage
from bxcommon.messages.bloxroute.tx_service_sync_req_message import TxServiceSyncReqMessage
from bxcommon.messages.bloxroute.tx_service_sync_txs_message import TxServiceSyncTxsMessage
from bxcommon.messages.bloxroute.txs_message import TxsMessage
from bxcommon.messages.bloxroute.v13.pong_message_v13 import PongMessageV13
from bxcommon.messages.bloxroute.v7.tx_message_v7 import TxMessageV7
from bxcommon.test_utils.abstract_bloxroute_version_manager_test import AbstractBloxrouteVersionManagerTest
class BloxrouteVersionManagerV7Test(
AbstractBloxrouteVersionManagerTest[
HelloMessage,
AckMessage,
PingMessage,
PongMessageV13,
BroadcastMessage,
TxMessageV7,
GetTxsMessage,
TxsMessage,
KeyMessage,
TxServiceSyncReqMessage,
TxServiceSyncBlocksShortIdsMessage,
TxServiceSyncTxsMessage,
TxServiceSyncCompleteMessage,
BlockConfirmationMessage,
TransactionCleanupMessage,
NotificationMessage,
BdnPerformanceStatsMessage,
]
):
def version_to_test(self) -> int:
return 7
def old_tx_message(self, original_message: TxMessage) -> TxMessageV7:
return TxMessageV7(
original_message.message_hash(),
original_message.network_num(),
original_message.source_id(),
original_message.short_id(),
original_message.tx_val(),
original_message.transaction_flag().get_quota_type(),
)
def compare_tx_current_to_old(
self,
converted_old_message: TxMessageV7,
original_old_message: TxMessageV7,
):
self.assert_attributes_equal(
original_old_message,
converted_old_message,
[
"message_hash",
"tx_val",
"source_id",
"network_num",
"quota_type",
],
)
def compare_tx_old_to_current(
self,
converted_current_message: TxMessage,
original_current_message: TxMessage,
):
self.assertEqual(
constants.NULL_TX_TIMESTAMP, converted_current_message.timestamp()
)
self.assert_attributes_equal(
converted_current_message,
original_current_message,
[
"message_hash",
"tx_val",
"source_id",
"network_num",
"quota_type",
],
)
def old_pong_message(self, original_message: PongMessage) -> PongMessageV13:
return PongMessageV13(original_message.nonce())
@skip
def test_tx_message(self):
pass
@skip
def test_txtxs_message(self):
pass
@skip
def test_bdn_performance_stats_message(self):
pass
@skip
def test_broadcast_message(self):
pass
| StarcoderdataPython |
3321566 | <gh_stars>0
import random
import time
import matplotlib.pyplot as plt
class Find():
def __init__(self):
self.alphabet=['A', 'B', 'C', 'D', 'E', 'F', 'G', 'H', 'I', 'J', 'K', 'L', 'M', 'N', 'O', 'P', 'Q', 'R', 'S', 'T', 'U', 'V', 'W', 'X', 'Y', 'Z']
self.word= 'SORRY'
self.splitted= [char for char in self.word]
self.predicted=[]
self.predicted_new=[]
self.reward_t= 0
self.l=0
self.state_n=0
self.state=[]
self.reward_graph_n=0
self.reward_graph=[]
def random_generator(self):
for i in range(len(self.splitted)):
r= random.randint(1,len(self.alphabet)-1)
self.predicted.append(self.alphabet[r])
print(self.predicted)
return self.predicted
def compare(self):
predicted_r= self.random_generator()
while self.l<=(len(self.splitted)):
print(self.l)
if self.splitted[self.l]==predicted_r[self.l]:
return True
else:
return False
def reward(self):
c= self.compare()
self.state_n+=1
self.state.append(self.state_n)
if c==True:
self.predicted_new.append(self.predicted[self.l])
print(self.predicted_new)
if len(self.predicted)>=len(self.splitted):
self.predicted=[]
if c==False:
self.reward_graph_n+=-1
self.reward_graph.append(self.reward_graph_n)
return -1
if c==True:
self.reward_graph_n+=1
self.l+=1
self.reward_graph.append(self.reward_graph_n)
return 1
ff= Find()
try:
while ff.reward!=len(ff.splitted):
print(ff.reward())
print('Generation:',ff.state)
print('Rewards',ff.reward_graph)
except:
pass
plt.plot(ff.state,ff.reward_graph)
plt.show()
| StarcoderdataPython |
5046054 | <gh_stars>1-10
import json
import discord_pass
import fnmatch
import datetime
import math
import asyncio
intervals = (
('weeks', 604800), # 60 * 60 * 24 * 7
('days', 86400), # 60 * 60 * 24
('hours', 3600), # 60 * 60
('minutes', 60),
('seconds', 1),
)
def check_blacklist(ctx):
with open("storage/black.json") as f:
dev_users_list = json.load(f)
if ctx.author.id not in dev_users_list:
return False
return True
import os
import sys
import traceback
from dislash import InteractionClient, ActionRow, Button, ButtonStyle
import discord
from discord.ext import commands
class Tessarect(commands.Cog):
def __init__(self, bot):
self.bot = bot
self.inter_client = InteractionClient(bot)
self.theme_color = 0xFF0800
self._theme_color = 0x315399
self.logs_channel = 929333502577606656
self.description="Important cog for Tessarect Tessarect Another general purpose discord bot but with Economy commands and much more Well Attractive , Economy and Leveling Bot with tons of features. Utitlity Bot , Music Bot , Economy Bot , Moderation Bot and much more \n[p]help for more info"
@commands.Cog.listener()
async def on_guild_join(self, guild):
embed = discord.Embed(
title="I Have Joined A New Guild!",
description=f"{guild.name} \nOwner {guild.owner}",
timestamp=datetime.datetime.now(),
color=self._theme_color,
)
embed.add_field(name="Server ID",value=guild.id)
embed.add_field(
name=f"This Guild Has {guild.member_count} Members!",
value=f"Yay Another Server! We Are Now At {len(self.bot.guilds)} Guilds!",
)
await self.bot.get_channel(self.logs_channel).send(embed=embed)
owner=guild.owner
emx = discord.Embed(title='Hello',description=f"Hey {owner.mention} , thank you for adding me to {guild.name} , It is a informative embed for you to start up , you can join our support server [here](https://dsc.gg/tessarectsupport) for any kind of help ",color=discord.Color.gold())
emx.add_field(name="Tessarect",value = f'<:arrow_right:940608259075764265> Another general purpose discord bot but with Economy commands and much more Well Attractive , Economy and Leveling Bot with tons of features. Utitlity Bot , Music Bot , Economy Bot , Moderation Bot and much more .')
emx.add_field(name="**Seting Up**",value="<:arrow_right:940608259075764265> Type `[p]help`to know all commands ,(that's a lot !) , do `[p]stats` for getting stats .`[p]setup` for basic configuration")
await owner.send(embed=emx)
@commands.Cog.listener()
async def on_guild_remove(self, guild):
embed = discord.Embed(
title="<:downarrow:941994549822226452> ",
description=f"Guild Elevator Down",
timestamp=datetime.datetime.now(),
color=discord.Color.red(),
)
embed.add_field(name="Guild",value=f"{guild.name} \n<:owner:946288312220536863> Owner: {guild.owner}")
embed.set_image(url=guild.icon_url)
embed.set_thumbnail(url="https://image.shutterstock.com/image-vector/down-arrow-vector-line-icon-260nw-1162804441.jpg")
embed.add_field(name="Server ID",value=guild.id,inline=False)
embed.add_field(name="Current Guild count",value=len(self.bot.guilds),inline=False)
await self.bot.get_channel(self.logs_channel).send(embed=embed)
@commands.Cog.listener()
async def on_ready(self):
print("Tessarect cog loaded successfully")
@commands.command(pass_context=True)
async def cloc(self, ctx):
"""Outputs the total count of lines of code in the currently installed repo."""
# Script pulled and edited from https://github.com/kyco/python-count-lines-of-code/blob/python3/cloc.py
message = await ctx.send(embed=discord.Embed(title="Scrolling Code",color=self._theme_color))
# Get our current working directory <:checkboxsquare:942779132159356959>should be the bot's home
path = os.getcwd()
# Set up some lists
extensions = []
code_count = []
ext_dict = {
"py":"Python (.py)",
"yml":"Yaml files (for some silly stuff ) (.yml)",
"sh":"Shell Script (.sh)",
"txt":"Text files (.txt)",
"md":"Markdown Files (.md)"}
# Get the extensions <:checkboxsquare:942779132159356959>include our include list
extensions = self.get_extensions(path, list(ext_dict))
for run in extensions:
extension = "*."+run
temp = 0
for root, dir, files in os.walk(path):
for items in fnmatch.filter(files, extension):
value = root + "/" + items
temp += sum(+1 for line in open(value, 'rb'))
code_count.append(temp)
# Set up our output
fields = [{"name":ext_dict.get(extensions[x],extensions[x]),"value":"{:,} line{}".format(code_count[x],"" if code_count[x]==1 else "s")} for x in range(len(code_count))]
dd=discord.Embed(
title="Counted Lines of Code",
description="My lazy devs took the time to sloppily write the following to bring me life...",
color=discord.Color.blurple()
)
dd.set_footer(text="Thesse counts are fetched from my github repo and may not be up to date")
dd.set_thumbnail(url=self.bot.user.avatar_url)
for x in fields:
dd.add_field(name=x['name'],value=x['value'])
return await message.edit(embed=dd)
# Helper function to get extensions
def get_extensions(self, path, excl):
extensions = []
for root, dir, files in os.walk(path):
for items in fnmatch.filter(files, "*"):
temp_extensions = items.rfind(".")
ext = items[temp_extensions+1:]
if ext not in extensions:
if ext in excl:
extensions.append(ext)
return extensions
@commands.Cog.listener()
async def on_command_error(self, ctx, error):
if str(error) =="You are blacklisted":
await ctx.send(embed=discord.Embed(description=f'Blacklisted User Attempted to use a command\n Status : Command blocked \n Note {ctx.author.mention}, you cant use me because you were blacklisted by a verified developer for some reason , for appealing go to my support server and ask the devs ',color=discord.Color.dark_red()) )
return
if hasattr(ctx.command, "on_error"):
return
with open ('storage/errors.json', 'r') as f:
data = json.load(f)
error = getattr(error, "original", error)
if isinstance(error, commands.BotMissingPermissions):
missing = [
perm.replace("_", " ").replace("guild", "server").title()
for perm in error.missing_perms
]
if len(missing) > 2:
fmt = "{}, and {}".format("**, **".join(missing[:-1]), missing[-1])
else:
fmt = " and ".join(missing)
embed = discord.Embed(
title="Missing Permissions",
description=f"I am missing **{fmt}** permissions to run this command :(",
color=self.theme_color,
)
embed.set_author(name=ctx.author,icon_url=ctx.author.avatar_url)
return
elif isinstance(error, commands.CommandNotFound):
return
elif isinstance(error, commands.DisabledCommand):
embed = discord.Embed(
title="Command disabled",
description=f"Looks like This command is disabled for use !",
color=self.theme_color,
)
await ctx.send(embed=embed)
return
elif isinstance(error, commands.CommandOnCooldown):
embed = discord.Embed(
title="Whoa Slow it down!!!!",
description=f"Retry that command after {datetime.timedelta(seconds=error.retry_after)}.",
color=self.theme_color,
)
embed.set_author(name=ctx.author,icon_url=ctx.author.avatar_url)
#embed.set_thumbnail(url="https://cdn.discordapp.com/emojis/922468797108080660.png")
await ctx.send(embed=embed)
return
elif isinstance(error, commands.MissingPermissions):
missing = [
perm.replace("_", " ").replace("guild", "server").title()
for perm in error.missing_perms
]
if len(missing) > 2:
fmt = "{}, and {}".format("**, **".join(missing[:-1]), missing[-1])
else:
fmt = " and ".join(missing)
embed = discord.Embed(
title="Insufficient Permission(s)",
description=f"You need the **{fmt}** permission(s) to use this command.",
color=self.theme_color,
)
embed.set_author(name=ctx.author,icon_url=ctx.author.avatar_url)
#embed.set_thumbnail(url="https://cdn.discordapp.com/emojis/922468797108080660.png")
await ctx.send(embed=embed)
return
elif isinstance(error, commands.UserInputError):
if isinstance(error, commands.MissingRequiredArgument):
embed = discord.Embed(
title="<:blobno:941713015424839691> Required Argument Missing",
description=f"You need to provide me some inputs for that command , check it's help page for more info\n <:rightarrow:941994550124245013> Correct Usage: `{ctx.prefix}{ctx.command.name} {ctx.command.signature}`",
color=self.theme_color,
)
embed.set_author(name=ctx.author,icon_url=ctx.author.avatar_url)
#embed.set_thumbnail(url="https://cdn.discordapp.com/emojis/922468797108080660.png")
await ctx.send(embed=embed)
else:
embed = discord.Embed(
title="Wrong Inputs",
description=f"Maybe you forgot to specify inputs or gave an extra input or some invalid input \n<:rightarrow:941994550124245013>: `{error}`",
color=self.theme_color)
embed.set_author(name=ctx.author,icon_url=ctx.author.avatar_url)
#embed.set_thumbnail(url="https://cdn.discordapp.com/emojis/922468797108080660.png")
await ctx.send(embed=embed)
elif isinstance(error, commands.NoPrivateMessage):
try:
await ctx.author.send("This command cannot be used in direct messages.")
except discord.Forbidden:
raise error
return
elif isinstance(error, commands.MaxConcurrencyReached):
embed = discord.Embed(
title="<:dnd_status:946652840053600256> Bot Busy !",
description=f"Please use that command few moments after ",
color=self.theme_color,
)
embed.set_author(name=ctx.author,icon_url=ctx.author.avatar_url)
#embed.set_thumbnail(url="https://cdn.discordapp.com/emojis/922468797108080660.png")
await ctx.send(embed=embed)
return
elif isinstance(error, discord.errors.Forbidden):
try:
embed = discord.Embed(
title="Forbidden",
description=f"Error <:checkboxsquare:942779132159356959>403 <:checkboxsquare:942779132159356959>Forbidden | Missing perms\n Bot is missing permissions \n Recommended giving Permission `8` (admin)",
color=self.theme_color,
)
embed.set_author(name=ctx.author,icon_url=ctx.author.avatar_url)
#embed.set_thumbnail(url="https://cdn.discordapp.com/emojis/922468797108080660.png")
await ctx.send(embed=embed)
except:
print("Failed forbidden")
return
elif isinstance(error, commands.CheckFailure):
embed = discord.Embed(
title="Permissions Denied",
description=f"You do not have permissions to use this command",
color=self.theme_color,
)
#embed.set_thumbnail(url="https://cdn.discordapp.com/emojis/922468797108080660.png")
await ctx.send(embed=embed)
return
else:
devlogs=self.bot.get_channel(979345665081610271)
err_code=discord_pass.secure_password_gen(10)
log=discord.Embed(title=f"<:messagealert:942777256160428063> Error ( {err_code} )",description=f"```\n{error}\n```",color=self._theme_color,timestamp=ctx.message.created_at)
row2 = ActionRow(
Button(
style=ButtonStyle.grey,
label="Error Code",
custom_id="test_button"),
Button(
style=ButtonStyle.green,
label="Troubleshooting",
custom_id="tr")
)
msgcon=ctx.message.content
known_error=False
print(data)
for x in data:
if data[x]['error']==str(error) and data[x]['command used']==str(ctx.command) and data[x]['full text']==str(ctx.message.content):
known_error=True
if not known_error:
data[str(err_code)] = {}
data[str(err_code)]['error']=str(error)
data[str(err_code)]['authorid']=str(ctx.author.id)
data[str(err_code)]['author']=str(ctx.author)
data[str(err_code)]['guild']=str(ctx.guild.id)
data[str(err_code)]['full text']=str(ctx.message.content)
data[str(err_code)]['command used']=str(ctx.command)
data[str(err_code)]['time']=str(ctx.message.created_at)
with open ('storage/errors.json', 'w') as f:
json.dump(data, f, indent=4)
if not known_error:
log.add_field(name="Desc",value=f"```json\n{str(data[str(err_code)])}\n```",inline=False)
await devlogs.send(embed=log)
uem=discord.Embed(title="Oops!",description=f'It seems like an unexpected error happened\n|| ** {error} ** ||',color=0xe74c3c).add_field(name="Known Error?",value=known_error)
uem.set_author(name=ctx.author,icon_url=ctx.author.avatar_url)
msg2=await ctx.send(embed=uem,components=[row2])
on_click = msg2.create_click_listener(timeout=20)
@on_click.not_from_user(ctx.author, cancel_others=False, reset_timeout=True)
async def on_wrong_user(inter):
await inter.reply("You're not the author dude , dont come in between enjoy your milk", ephemeral=True)
@on_click.matching_id("test_button")
async def on_test_button(inter):
await inter.reply(embed=discord.Embed(description='This error is known , kindly be patient , devs are working ' if known_error else f"Your error code is **{err_code} **",color=discord.Color.dark_theme()),ephemeral=True)
@on_click.matching_id("tr")
async def on_test(inter):
await inter.reply("""**Basic Troubleshooting**
<:checkboxsquare:942779132159356959>Retry again
<:checkboxsquare:942779132159356959>Check bot's/your permissions
<:checkboxsquare:942779132159356959>check command help
<:checkboxsquare:942779132159356959>Ask developers
<:checkboxsquare:942779132159356959>Try after sometime
<:checkboxsquare:942779132159356959>Blacklisted cant use anything
<:checkboxsquare:942779132159356959>Drink milk and enjoy other commands
**Cant solve it?**
<:checkboxcheck:942779132117409863> Join our support server
<:checkboxcheck:942779132117409863> Open issue on github""",ephemeral=True)
@on_click.timeout
async def on_timeout():
#await ActionRow.disable_buttons(row2)
try:
await msg2.edit(components=[])
except Exception as e:
print(e)
print("Ignoring exception in command {}:".format(ctx.command), file=sys.stderr)
traceback.print_exception(
type(error), error, error.__traceback__, file=sys.stderr
)
def setup(bot):
bot.add_cog(Tessarect(bot)) | StarcoderdataPython |
6623217 | import setuptools
with open("README.md", "r") as fh:
long_description = fh.read()
setuptools.setup(
name="multiview_gpu",
version="0.1.0",
author="<NAME>",
author_email="<EMAIL>",
description="GPU-accelerated multiview clustering and dimensionality reduction",
long_description=long_description,
long_description_content_type="text/markdown",
url="https://github.com/dani-garcia/multiview_gpu",
keywords=["multiview", "clustering", "dimensionality reduction"],
packages=setuptools.find_packages(),
install_requires=[
'numpy',
],
extras_require={
"tf": ["tensorflow>=1.12.0"],
"tf_gpu": ["tensorflow-gpu>=1.12.0"],
},
setup_requires=[
"pytest-runner"
],
tests_require=[
"pytest",
"pytest-benchmark"
],
classifiers=[
"Programming Language :: Python",
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.6',
"Development Status :: 3 - Alpha",
"Environment :: Other Environment",
"Intended Audience :: Developers",
"Intended Audience :: Science/Research",
"License :: OSI Approved :: BSD License",
"Operating System :: OS Independent",
"Topic :: Software Development :: Libraries :: Python Modules",
"Topic :: Scientific/Engineering :: Artificial Intelligence",
],
) | StarcoderdataPython |
3207290 | #!usr/bin/python
"""
Script to download Open Street Map data of land use. Converts to GeoJSON
<NAME>
WB-DIME
Jan 31 2017
"""
from country_bounding_boxes import (
country_subunits_containing_point,
country_subunits_by_iso_code
)
from utils.overpass_client import OverpassClient
from numpy import floor
from modules.getFeatures import find_between
import json
import urllib2
# Query string to OSM to provide landuse data, with the query key later
# placed in the middle
query_begin = '''\
way
['''
query_end=''']
({query_bb_s},{query_bb_w},{query_bb_n},{query_bb_e});
(._;>;);
out;
'''
# Output is OSM format
datatype="GeoJSON"
def script(countryISO='US',query='landuse',outputFolder='data/',partOfData=1,
outputFile='OSMdata_'):
"""
Main function executed by top
'countryISO': Country for which BBox data should be downloaded. Can also contain custom boundary box
'query': Tag for OSM query to search for
'partOfData': Part of total data of a country to be processed
Returns list with [filename,datatype], where datatype is the
GDAL_CODE
"""
partOfData=float(partOfData)
subunits=[]
countryISOlist=countryISO.split()
if countrISOlist[1]: #if there is more than one entry
bbox=[]
bbox.append(float(contryISOlist[0]))
bbox.append(float(contryISOlist[1]))
bbox.append(float(contryISOlist[2]))
bbox.append(float(contryISOlist[3]))
else:
#Load country data
for c in country_subunits_by_iso_code(countryISO):
subunits.append(c)
#Chose subunits, if more than 1
subunit=1
if len(subunits)>1: #if there are subunits
cnt = 1
print "Subunits:"
for c in subunits:
print cnt,"- ",c.name
cnt += 1
subunit=input('Chose subunit: ')
elif len(subunits)==0: #if nothing found
print "Error: No country or entry with ISO code",countryISO
exit()
#Get BBox data for country
print "Acquiring data for",subunits[subunit-1].name
bbox = subunits[subunit-1].bbox #0-w, 1-s, 2-e, 3-n
w = bbox[0]
s = bbox[1]
e = bbox[2]
n = bbox[3]
print "Coordinates:",w,s,e,n
print "Key:",query
# Country is split into 100 boxes, as (for the us) sample is too big
# (timeout)
# Number of Boxes = (samples-1)^2 boxes.
#calculate number of boxes
mindiff=min([abs(w-e),abs(n-s)])
samples=int(floor((mindiff*8)+2))
print "Number of queries:",(samples-1)**2
#samples = 11 # 100 boxes
fullquery = query_begin+query+query_end
#Get Elements from OSM
overpass_client = OverpassClient(endpoint='fr')
d = overpass_client.get_bbox_elements(
ql_template=fullquery,
bb_s=s, bb_w=w, bb_n=n, bb_e=e,
samples=samples)
lene=len(d)
print 'Total elements found: %d' % lene
boundery_index=int(floor(partOfData*lene))
d=d[0:boundery_index]
dr=list(reversed(d))
lene=boundery_index
fileName=outputFolder+'/'+outputFile+str(subunits[subunit-1].name).replace(" ","_")+".json"
#Create GeoJSON string
geojson=[]
geojson.append('''
{
"type": "FeatureCollection",
"crs": { "type": "name", "properties": { "name": "urn:ogc:def:crs:EPSG::4326" } },
"features": [''')
#create faster node searchs
print "Create library for faster node searches"
#get max id
cnt = 0.
ids={}
for e in d:
print "\t Library creation:",int(cnt*100./lene),"% done.\r",
cnt+=1
if e['type']=='node':
ids[e['id']]=[e['lon'],e['lat']]
#creade list of nodes with ids.
#coordlist=[]
#cnt = 0.
#for i in range(0,maxid+1):
# print "\t Step 2/3:",cnt*100./maxid+1,"% done.\r",
# cnt+=1
# coordlist.append([0,0])
#cnt = 0.
#rint ""
#for e in d:
# print "\t Step 3/3:",cnt*100./lene,"% done.\r",
# cnt+=1
# if e['type']=='node':
# coordlist(e['id'])[0]=e['lon']
# coordlist(e['id'])[1]=e['lat']
#loop through elements and append to GeoJSON string
cnt = 0.
cnte = 0
print ""
print "Convert to GeoJSON file",fileName
writetag=[]
writecoord=[]
for e in d :
cnt+=1
print "\tGet elemenents:",int(cnt*100./lene),"% done.\r",
if e['type']=='way':
# if e['area']=='yes':
writetag.append(e['tags'][query])
writecoord.append([])
for node in e['nodes']:
try:
lon=str(ids[node][0])
lat=str(ids[node][1])
except KeyError:
print ''
print '\tNode',node,'not found in library. Download informations from openstreetmap.org ...'
response=urllib2.urlopen('http://api.openstreetmap.org/api/0.6/node/'+str(node))
fullxml = str(response.read())
lon=find_between(fullxml,"lon=\"","\"",lastfirst=True)
lat=find_between(fullxml,"lat=\"","\"",lastfirst=True)
writecoord[cnte].append([lon,lat])
cnte+=1
cnte2=0
print ''
for tag in writetag:
print "\tCreate GeoJSON:",int(cnte2*100./cnte),"% done.\r",
geojson.append('''
{
"type": "Feature",'''+\
"\n\t\t\t\"properties\":{\"Descriptio\":\""+\
tag+"\"},")
geojson.append('''
"geometry" : {
"type": "MultiPolygon",
"coordinates":[[[''')
for coord in writecoord[cnte2]:
geojson.append("["+coord[0]+","+coord[1]+"],")
cnte2+=1
geojson[-1]=geojson[-1][0:-1]+"]]]}},"
#geojson=geojson[0:-1]+"]]]}},"
geojson=''.join(geojson)
geojson=geojson[0:-1]+"\n]\n}"
print " "
with open(fileName, 'w+') as f:
json.dump(geojson,f) #save as geojson file
#replace escape characters
with open(fileName, 'r') as file :
filedata = file.read()[1:-1]
# Replace the target string
filedata = filedata.replace('\\n', '')
filedata = filedata.replace('\\t', '')
filedata = filedata.replace('\\"', '"')
# Save the result
with open(fileName, 'w') as file:
file.write(filedata)
print "Written to",fileName
return [fileName,datatype]
| StarcoderdataPython |
8141196 | import math
import random
import pytorch_lightning as pl
import torch
import os
import pickle
import cv2
import numpy as np
from torch.utils import data
from datasets.init_dataset import get_dataset
from ACT_utils.ACT_utils import tubelet_in_out_tubes, tubelet_has_gt
from MOC_utils.gaussian_hm import gaussian_radius, draw_umich_gaussian
from ACT_utils.ACT_aug import apply_distort, apply_expand, crop_image
from pprint import pprint
class UCFDataset(data.Dataset):
def __init__(self,
root_dir,
mode, # train or val
pkl_filename = 'UCF101v2-GT.pkl',
K=7,
skip=1,
downratio=4,
mean=[0.40789654, 0.44719302, 0.47026115],
std=[0.28863828, 0.27408164, 0.27809835],
resize=(288, 288), # (h, w)
max_objs=128):
super().__init__()
self.root_dir = root_dir
self.mode = mode
self.K = K
self.skip = skip # TODO implement skiping frames in getitem
self._resize_height = resize[0]
self._resize_width = resize[1]
self.down_ratio = downratio
self.mean = mean
self.std = std
self.max_objs = max_objs
pkl_file = os.path.join(root_dir, pkl_filename)
with open(pkl_file, 'rb') as fid:
pkl = pickle.load(fid, encoding='iso-8859-1')
for k in pkl:
setattr(self, ('_' if k != 'labels' else '') + k, pkl[k])
# labels, _nframes, _train_videos, _test_videos
# _gttubes, _resolution
self.num_classes = len(self.labels)
self._indices = []
video_list = self._train_videos if mode == 'train' else self._test_videos
for v in video_list:
vtubes = sum(self._gttubes[v].values(), [])
self._indices += [(v, i) for i in range(1, self._nframes[v] + 2 - self.K, self.K)
if tubelet_in_out_tubes(vtubes, i, self.K) and tubelet_has_gt(vtubes, i, self.K)]
self.init_aug_params()
def init_aug_params(self):
self._mean_values = [104.0136177, 114.0342201, 119.91659325]
self.distort_param = {
'brightness_prob': 0.5,
'brightness_delta': 32,
'contrast_prob': 0.5,
'contrast_lower': 0.5,
'contrast_upper': 1.5,
'hue_prob': 0.5,
'hue_delta': 18,
'saturation_prob': 0.5,
'saturation_lower': 0.5,
'saturation_upper': 1.5,
'random_order_prob': 0.0,
}
self.expand_param = {
'expand_prob': 0.5,
'max_expand_ratio': 4.0,
}
self.batch_samplers = [{
'sampler': {},
'max_trials': 1,
'max_sample': 1,
}, {
'sampler': {'min_scale': 0.3, 'max_scale': 1.0, 'min_aspect_ratio': 0.5, 'max_aspect_ratio': 2.0, },
'sample_constraint': {'min_jaccard_overlap': 0.1, },
'max_trials': 50,
'max_sample': 1,
}, {
'sampler': {'min_scale': 0.3, 'max_scale': 1.0, 'min_aspect_ratio': 0.5, 'max_aspect_ratio': 2.0, },
'sample_constraint': {'min_jaccard_overlap': 0.3, },
'max_trials': 50,
'max_sample': 1,
}, {
'sampler': {'min_scale': 0.3, 'max_scale': 1.0, 'min_aspect_ratio': 0.5, 'max_aspect_ratio': 2.0, },
'sample_constraint': {'min_jaccard_overlap': 0.5, },
'max_trials': 50,
'max_sample': 1,
}, {
'sampler': {'min_scale': 0.3, 'max_scale': 1.0, 'min_aspect_ratio': 0.5, 'max_aspect_ratio': 2.0, },
'sample_constraint': {'min_jaccard_overlap': 0.7, },
'max_trials': 50,
'max_sample': 1,
}, {
'sampler': {'min_scale': 0.3, 'max_scale': 1.0, 'min_aspect_ratio': 0.5, 'max_aspect_ratio': 2.0, },
'sample_constraint': {'min_jaccard_overlap': 0.9, },
'max_trials': 50,
'max_sample': 1,
}, {
'sampler': {'min_scale': 0.3, 'max_scale': 1.0, 'min_aspect_ratio': 0.5, 'max_aspect_ratio': 2.0, },
'sample_constraint': {'max_jaccard_overlap': 1.0, },
'max_trials': 50,
'max_sample': 1,
}, ]
def __len__(self):
return len(self._indices)
def imagefile(self, v, i):
return os.path.join(self.root_dir, 'rgb-images', v, '{:0>5}.jpg'.format(i))
def flip_video(self, images, frame, v):
do_mirror = random.getrandbits(1) == 1
# filp the image
if do_mirror:
images = [im[:, ::-1, :] for im in images]
h, w = self._resolution[v]
gt_bbox = {}
for ilabel, tubes in self._gttubes[v].items():
for t in tubes:
if frame not in t[:, 0]:
continue
assert frame + self.K - 1 in t[:, 0]
# copy otherwise it will change the gt of the dataset also
t = t.copy()
if do_mirror:
# filp the gt bbox
xmin = w - t[:, 3]
t[:, 3] = w - t[:, 1]
t[:, 1] = xmin
boxes = t[(t[:, 0] >= frame) * (t[:, 0] < frame + self.K), 1:5]
assert boxes.shape[0] == self.K
if ilabel not in gt_bbox:
gt_bbox[ilabel] = []
# gt_bbox[ilabel] ---> a list of numpy array, each one is K, x1, x2, y1, y2
gt_bbox[ilabel].append(boxes)
return images, gt_bbox
def make_gttbox(self, frame, v):
gt_bbox = {}
for ilabel, tubes in self._gttubes[v].items():
for t in tubes:
if frame not in t[:, 0]:
continue
assert frame + self.K - 1 in t[:, 0]
t = t.copy()
boxes = t[(t[:, 0] >= frame) * (t[:, 0] < frame + self.K), 1:5]
assert boxes.shape[0] == self.K
if ilabel not in gt_bbox:
gt_bbox[ilabel] = []
gt_bbox[ilabel].append(boxes)
return gt_bbox
def resize_video(self, images, gt_bbox):
original_h, original_w = images[0].shape[:2]
output_h = self._resize_height // self.down_ratio
output_w = self._resize_width // self.down_ratio
# resize the original img and it's GT bbox
for ilabel in gt_bbox:
for itube in range(len(gt_bbox[ilabel])):
gt_bbox[ilabel][itube][:, 0] = gt_bbox[ilabel][itube][:, 0] / original_w * output_w
gt_bbox[ilabel][itube][:, 1] = gt_bbox[ilabel][itube][:, 1] / original_h * output_h
gt_bbox[ilabel][itube][:, 2] = gt_bbox[ilabel][itube][:, 2] / original_w * output_w
gt_bbox[ilabel][itube][:, 3] = gt_bbox[ilabel][itube][:, 3] / original_h * output_h
images = [cv2.resize(im, (self._resize_width, self._resize_height), interpolation=cv2.INTER_LINEAR) for im in images]
return images, gt_bbox
def normalize(self, images):
data = [np.empty((3, self._resize_height, self._resize_width), dtype=np.float32) for i in range(self.K)]
mean = np.tile(np.array(self.mean, dtype=np.float32)[:, None, None], (1, 1, 1))
std = np.tile(np.array(self.std, dtype=np.float32)[:, None, None], (1, 1, 1))
for i in range(self.K):
data[i][0:3, :, :] = np.transpose(images[i], (2, 0, 1))
data[i] = ((data[i] / 255.) - mean) / std
return data
def draw_ground_truths(self, gt_bbox):
output_h = self._resize_height // self.down_ratio
output_w = self._resize_width // self.down_ratio
hm = np.zeros((self.num_classes, output_h, output_w), dtype=np.float32)
wh = np.zeros((self.max_objs, self.K * 2), dtype=np.float32)
mov = np.zeros((self.max_objs, self.K * 2), dtype=np.float32)
index = np.zeros((self.max_objs), dtype=np.int64)
index_all = np.zeros((self.max_objs, self.K * 2), dtype=np.int64)
mask = np.zeros((self.max_objs), dtype=np.uint8)
num_objs = 0
for ilabel in gt_bbox:
for itube in range(len(gt_bbox[ilabel])):
key = self.K // 2
# key frame's bbox height and width (both on the feature map)
key_h, key_w = gt_bbox[ilabel][itube][key, 3] - gt_bbox[ilabel][itube][key, 1], gt_bbox[ilabel][itube][key, 2] - gt_bbox[ilabel][itube][key, 0]
# create gaussian heatmap
radius = gaussian_radius((math.ceil(key_h), math.ceil(key_w)))
radius = max(0, int(radius))
# ground truth bbox's center in key frame
center = np.array([(gt_bbox[ilabel][itube][key, 0] + gt_bbox[ilabel][itube][key, 2]) / 2, (gt_bbox[ilabel][itube][key, 1] + gt_bbox[ilabel][itube][key, 3]) / 2], dtype=np.float32)
center_int = center.astype(np.int32)
assert 0 <= center_int[0] and center_int[0] <= output_w and 0 <= center_int[1] and center_int[1] <= output_h
# draw ground truth gaussian heatmap at each center location
draw_umich_gaussian(hm[ilabel], center_int, radius)
for i in range(self.K):
center_all = np.array([(gt_bbox[ilabel][itube][i, 0] + gt_bbox[ilabel][itube][i, 2]) / 2, (gt_bbox[ilabel][itube][i, 1] + gt_bbox[ilabel][itube][i, 3]) / 2], dtype=np.float32)
center_all_int = center_all.astype(np.int32)
# wh is ground truth bbox's height and width in i_th frame
wh[num_objs, i * 2: i * 2 + 2] = 1. * (gt_bbox[ilabel][itube][i, 2] - gt_bbox[ilabel][itube][i, 0]), 1. * (gt_bbox[ilabel][itube][i, 3] - gt_bbox[ilabel][itube][i, 1])
# mov is ground truth movement from i_th frame to key frame
mov[num_objs, i * 2: i * 2 + 2] = (gt_bbox[ilabel][itube][i, 0] + gt_bbox[ilabel][itube][i, 2]) / 2 - \
center_int[0], (gt_bbox[ilabel][itube][i, 1] + gt_bbox[ilabel][itube][i, 3]) / 2 - center_int[1]
# index_all are all frame's bbox center position
index_all[num_objs, i * 2: i * 2 + 2] = center_all_int[1] * output_w + center_all_int[0], center_all_int[1] * output_w + center_all_int[0]
# index is key frame's boox center position
index[num_objs] = center_int[1] * output_w + center_int[0]
# mask indicate how many objects in this tube
mask[num_objs] = 1
num_objs = num_objs + 1
return hm, wh, mov, index, index_all, mask
def __getitem__(self, id):
v, frame = self._indices[id]
# Read images
images = [cv2.imread(self.imagefile(v, frame + i)).astype(np.float32) for i in range(0,self.K,self.skip)]
if self.mode == 'train':
# apply data augmentation
images, gt_bbox = self.flip_video(images, frame, v)
images = apply_distort(images, self.distort_param)
images, gt_bbox = apply_expand(images, gt_bbox, self.expand_param, self._mean_values)
images, gt_bbox = crop_image(images, gt_bbox, self.batch_samplers)
else:
# no data augmentation or flip when validation
gt_bbox = self.make_gttbox(frame, v)
# Resize the video
images, gt_bbox = self.resize_video(images, gt_bbox)
data = self.normalize(images)
hm, wh, mov, index, index_all, mask = self.draw_ground_truths(gt_bbox)
return {'input': data, 'hm': hm, 'mov': mov, 'wh': wh, 'mask': mask, 'index': index, 'index_all': index_all}
def _draw_bb(self, video, frame, index):
i = index
for label in self._gttubes[video]:
# print(label)
tubes = self._gttubes[video][label]
for tube in tubes:
x = np.where(tube[..., 0] == i)[0]
if (len(x) != 0):
x = int(x)
x1, y1, x2, y2 = tube[x, 1:]
x1, y1, x2, y2 = int(x1), int(y1), int(x2), int(y2)
frame = cv2.rectangle(frame, (x1, y1), (x2, y2), color=(255, 0, 0), thickness=2)
return frame
def save_video(self, index, fps=25, drawbb=True, save_dir='.'):
video, start_frame = self._indices[index]
h, w = self._resolution[video]
save_path = video.split(os.path.sep)[-1] + '_'+ str(index) + '.mp4'
save_path = os.path.join(save_dir, save_path)
out = cv2.VideoWriter(save_path, cv2.VideoWriter_fourcc(*'mp4v'), fps, (w, h))
for i in range(start_frame, start_frame+self.K, self.skip):
frame = cv2.imread(self.imagefile(video, i))
if drawbb:
frame = self._draw_bb(video, frame, i)
out.write(frame)
out.release()
class VideoDataModule(pl.LightningDataModule):
def __init__(self,
root_dir,
pkl_file,
K,
resize,
batch_size,
num_workers=None,
pin_memory=False):
super().__init__()
self.root_dir = root_dir
self.pkl_file = pkl_file
self.batch_size = batch_size
self.num_workers = os.cpu_count() - 1 if num_workers is None else num_workers
self.pin_memory = pin_memory
self.Dataset = get_dataset("ucf101") #ucf101 or hmdb
self.num_classes = self.Dataset.num_classes
self.K = K
self.resize = resize
def train_dataloader(self):
return torch.utils.data.DataLoader(
UCFDataset(root_dir=self.root_dir,
pkl_filename=self.pkl_file,
mode='train',
K=self.K,
resize=self.resize,
),
batch_size=self.batch_size,
shuffle=True,
num_workers=self.num_workers,
pin_memory=self.pin_memory,
drop_last=True,
)
def val_dataloader(self):
return torch.utils.data.DataLoader(
UCFDataset(root_dir=self.root_dir,
pkl_filename=self.pkl_file,
mode='val',
K=self.K,
resize=self.resize,
),
batch_size=self.batch_size,
shuffle=False,
num_workers=self.num_workers,
pin_memory=self.pin_memory,
drop_last=True,
)
# TEST CASES
def test_dataset():
dataset = UCFDataset(root_dir='../data/ucf24',
mode='train',
pkl_filename='SalsaSpin.pkl',
K=7
)
print("len of dataset ", len(dataset))
data = dataset.__getitem__(0)
print(data.keys()) # 'input', 'hm', 'mov', 'wh', 'mask', 'index', 'index_all']
print(data['input'][0].shape)
print(data['hm'].shape)
print(data['mov'].shape)
print(data['wh'].shape)
# save_dir = '../SalsaSpin'
# os.makedirs(save_dir, exist_ok=True)
# for i in range(len(dataset)):
# dataset.save_video(i, fps=1, save_dir=save_dir, drawbb=True)
if __name__ == '__main__':
datamodule = VideoDataModule(root_dir='../data/ucf24',
pkl_file="SalsaSpin.pkl",
K=7,
resize=(288, 288),
batch_size=2,
num_workers=0,
pin_memory=False
)
print("Number of classes ", datamodule.num_classes)
train_dl = datamodule.train_dataloader()
print("Len of train_dl", len(train_dl))
for data in train_dl:
break
print(data.keys()) # 'input', 'hm', 'mov', 'wh', 'mask', 'index', 'index_all']
print(data['hm'].shape)
print(data['mov'].shape)
print(data['wh'].shape)
val_dl = datamodule.val_dataloader()
print("Len of val_dl", len(val_dl))
for data in val_dl:
break
print(data.keys()) | StarcoderdataPython |
1893741 | # internal
from src.ui.resources import icons
from .order_details import OrderDetails
from src.ui.components import BaseWidget, Table
# pyqt
from PyQt5.QtGui import QIcon
from PyQt5.QtCore import QSize, QDateTime
from PyQt5.QtWidgets import (QMainWindow, QWidget, QFrame, QTabWidget,
QHBoxLayout, QVBoxLayout, QFormLayout,
QPushButton, QLabel, QLineEdit, QComboBox,
QCheckBox, QDateTimeEdit, QSpinBox)
########
# Menu #
########
class Menu(BaseWidget):
"""Menu"""
def setupWidget(self):
# setup buttons
# - status
self.btnStatus = QPushButton('Status')
self.btnStatus.setIcon(QIcon(':/icons/btnStatus.png'))
# - invoices
self.btnInvoices = QPushButton('Invoices')
self.btnInvoices.setIcon(QIcon(':/icons/btnInvoices.png'))
# - woocommerce
self.btnWooCommerce = QPushButton('WooCommerce')
self.btnWooCommerce.setIcon(QIcon(':/icons/btnWooCommerce.png'))
# - settings
self.btnSettings = QPushButton('Settings')
self.btnSettings.setIcon(QIcon(':/icons/btnSettings.png'))
# - logs
self.btnLogs = QPushButton('Logs')
self.btnLogs.setIcon(QIcon(':/icons/btnLogs.png'))
# - help
self.btnHelp = QPushButton('Help')
self.btnHelp.setIcon(QIcon(':/icons/btnHelp.png'))
# - about
self.btnAbout = QPushButton('About')
self.btnAbout.setIcon(QIcon(':/icons/btnAbout.png'))
# register buttons
buttons = [
self.btnStatus,
self.btnInvoices,
self.btnWooCommerce,
self.btnSettings,
self.btnLogs,
self.btnHelp,
self.btnAbout
]
for btn in buttons:
btn.setIconSize(QSize(24, 24))
self.generalLayout.addWidget(btn)
# add stretch at the end
self.generalLayout.addStretch(1)
def setStyles(self):
self.setStyleSheet("""
QPushButton {
height: 50px;
text-align: left;
padding-left: 20px
}
""")
############
# Contents #
############
class BaseTab(BaseWidget):
"""Base Tab"""
def setupLayout(self):
super().setupLayout()
self.generalLayout.setContentsMargins(0, 0, 0, 0)
class StatusTab(BaseTab):
"""Status Tab"""
def bootstrap(self):
super().bootstrap()
# default state
self.stop()
def setupWidget(self):
# service frame
self.serviceFrame = QFrame()
self.serviceFrame.setObjectName('ServiceFrame')
self.generalLayout.addWidget(self.serviceFrame)
# service layout
self.serviceLayout = QVBoxLayout()
self.serviceFrame.setLayout(self.serviceLayout)
# service name
self.serviceName = QLabel('<h3>Recipient Engine</h3>')
self.serviceName.setFixedHeight(60)
self.serviceLayout.addWidget(self.serviceName)
# service state
serviceStateLayout = QHBoxLayout()
serviceStateLayout.setContentsMargins(0, 10, 0, 10)
self.serviceStateLabel = QLabel('<b>status: </b>')
self.serviceStateVal = QLabel()
serviceStateLayout.addWidget(self.serviceStateLabel)
serviceStateLayout.addWidget(self.serviceStateVal)
serviceStateLayout.addStretch(1)
self.serviceLayout.addLayout(serviceStateLayout)
# service control
serviceControlLayout = QHBoxLayout()
self.btnStart = QPushButton('Start')
self.btnStop = QPushButton('Stop')
serviceControlLayout.addWidget(self.btnStart)
serviceControlLayout.addWidget(self.btnStop)
serviceControlLayout.addStretch(1)
self.serviceLayout.addLayout(serviceControlLayout)
# add stretch at the end
self.generalLayout.addStretch(1)
def setStyles(self):
self.setStyleSheet("""
#ServiceFrame{
border: 1px solid silver;
border-right-width: 2px;
}
#ServiceFrame[state="start"]{
border-right-color: #26d926;
}
#ServiceFrame[state="stop"]{
border-right-color: red;
}
#ServiceFrame[state="connecting"]{
border-right-color: orange;
}
QPushButton{
height: 25px;
}
""")
def start(self):
self.serviceStateVal.setText('running')
self.btnStart.setDisabled(True)
self.btnStop.setEnabled(True)
self.serviceFrame.setProperty('state', 'start')
self.serviceFrame.setStyleSheet(self.serviceFrame.styleSheet())
def stop(self):
self.serviceStateVal.setText('stopped')
self.btnStart.setEnabled(True)
self.btnStop.setDisabled(True)
self.serviceFrame.setProperty('state', 'stop')
self.serviceFrame.setStyleSheet(self.serviceFrame.styleSheet())
def connecting(self):
self.serviceStateVal.setText('connecting')
self.btnStart.setDisabled(True)
self.btnStop.setDisabled(True)
self.serviceFrame.setProperty('state', 'connecting')
self.serviceFrame.setStyleSheet(self.serviceFrame.styleSheet())
def connecting_count(self, counter):
self.serviceStateVal.setText('Connecting after {} seconds...'.format(counter))
class InvoicesTab(BaseTab):
"""Invoices Tab"""
# tabs
ORDERS = 0
INVOICES = 1
def setupWidget(self):
# tabs
self.tabs = QTabWidget()
self.generalLayout.addWidget(self.tabs)
# - orders
self.ordersTable = Table(['ID', 'Order', 'Date', 'Status', 'Total'])
self.tabs.addTab(self.ordersTable, 'Orders')
# - invoices
self.invoicesTable = Table(['ID', 'Customer', 'OrderID', 'SavedDate'])
self.tabs.addTab(self.invoicesTable, 'Invoices')
# controls
self.controlLayout = QHBoxLayout()
self.controlLayout.addStretch(1)
self.generalLayout.addLayout(self.controlLayout)
self.btnRefresh = QPushButton('Refresh')
self.btnSaveAll = QPushButton('Save all')
self.btnRemove = QPushButton('Remove')
self.btnRemove.setHidden(True)
self.controlLayout.addWidget(self.btnRefresh)
self.controlLayout.addWidget(self.btnSaveAll)
self.controlLayout.addWidget(self.btnRemove)
# attach order details dialog
self.orderDetails = OrderDetails(self)
def setStyles(self):
self.setStyleSheet("""
QTabBar::tab{
min-height: 10ex;
min-width: 30ex;
}
QPushButton{
height: 25px;
width: 80px;
}
""")
def connectSignals(self):
self.tabs.currentChanged.connect(self.tabHandler)
def tabHandler(self, index):
if index == self.ORDERS:
self.btnRemove.setHidden(True)
self.btnRefresh.setHidden(False)
self.btnSaveAll.setHidden(False)
else:
self.btnRemove.setHidden(False)
self.btnRefresh.setHidden(True)
self.btnSaveAll.setHidden(True)
class WooCommerceTab(BaseTab):
"""WooCommerce Tab"""
# tabs
PRODUCTS = 0
CATEGORIES = 1
def setupWidget(self):
# tabs
self.tabs = QTabWidget()
self.generalLayout.addWidget(self.tabs)
# - products
self.productsTable = Table(['ID', 'Name', 'WCID', 'LastUpdate'])
self.tabs.addTab(self.productsTable, 'Products')
# - categories
self.categoriesTable = Table(['ID', 'Name', 'WCID', 'LastUpdate'])
self.tabs.addTab(self.categoriesTable, 'Categories')
# controls
controlLayout = QHBoxLayout()
controlLayout.addStretch(1)
self.btnAdd = QPushButton('Add')
self.btnEdit = QPushButton('Edit')
self.btnRemove = QPushButton('Remove')
self.btnUpdate = QPushButton('Update')
controlLayout.addWidget(self.btnAdd)
controlLayout.addWidget(self.btnEdit)
controlLayout.addWidget(self.btnRemove)
controlLayout.addWidget(self.btnUpdate)
self.generalLayout.addLayout(controlLayout)
def setStyles(self):
self.setStyleSheet("""
QTabBar::tab{
min-height: 10ex;
min-width: 30ex;
}
QPushButton{
height: 25px;
width: 80px;
}
""")
class SettingsTab(BaseTab):
"""Settings Tab"""
DATETIME_FORMAT = 'yyyy-MM-ddTHH:mm:ss'
DATETIME_DISPLAY_FORMAT = 'yyyy-MM-dd @ HH:mm:ss'
def setupWidget(self):
# tabs
self.tabs = QTabWidget()
self.generalLayout.addWidget(self.tabs)
# moein settings
self.moeinForm = QFormLayout()
moeinFormFrame = QFrame()
moeinFormFrame.setLayout(self.moeinForm)
self.tabs.addTab(moeinFormFrame, 'Moein')
# - server
self.server = QLineEdit()
self.moeinForm.addRow(QLabel('Server'), self.server,)
# - username
self.username = QLineEdit()
self.moeinForm.addRow(QLabel('Username'), self.username)
# - password
self.password = QLineEdit()
self.password.setEchoMode(QLineEdit.Password)
self.moeinForm.addRow(QLabel('Password'), self.password)
# - database
self.database = QLineEdit()
self.moeinForm.addRow(QLabel('DataBase'), self.database)
# woocommerce settings
self.wcForm = QFormLayout()
wcFormFrame = QFrame()
wcFormFrame.setLayout(self.wcForm)
self.tabs.addTab(wcFormFrame, 'WooCommerce')
# - url
self.url = QLineEdit()
self.wcForm.addRow(QLabel('URL'), self.url)
# - consumer key
self.ckey = QLineEdit()
self.wcForm.addRow(QLabel('Consumer Key'), self.ckey)
# - secret key
self.skey = QLineEdit()
self.wcForm.addRow(QLabel('Secret Key'), self.skey)
# - version
self.version = QComboBox()
self.version.addItems(['wc/v3', 'wc/v2', 'wc/v1'])
self.wcForm.addRow(QLabel('Version'), self.version)
# invoices settings
self.invoicesForm = QFormLayout()
invoicesFormFrame = QFrame()
invoicesFormFrame.setLayout(self.invoicesForm)
self.tabs.addTab(invoicesFormFrame, 'Invoices')
# - status
# -- options
self.cbxPending = QCheckBox('Pending')
self.cbxProcessing = QCheckBox('Processing')
self.cbxOnHold = QCheckBox('On Hold')
self.cbxCompleted = QCheckBox('Completed')
self.cbxCancelled = QCheckBox('Cancelled')
self.cbxRefunded = QCheckBox('Refunded')
self.cbxFailed = QCheckBox('Failed')
self.cbxTrash = QCheckBox('Trash')
self.cbxAny = QCheckBox('Any')
self.statusOptions = {
'pending': self.cbxPending,
'processing': self.cbxProcessing,
'on-hold': self.cbxOnHold,
'completed': self.cbxCompleted,
'cancelled': self.cbxCancelled,
'refunded': self.cbxRefunded,
'failed': self.cbxFailed,
'trash': self.cbxTrash,
'any': self.cbxAny
}
# -- options layout
statusOptionsLayout = QVBoxLayout()
statusOptions1Layout = QHBoxLayout()
statusOptions2Layout = QHBoxLayout()
statusOptions3Layout = QHBoxLayout()
statusOptionsLayout.addLayout(statusOptions1Layout)
statusOptionsLayout.addLayout(statusOptions2Layout)
statusOptionsLayout.addLayout(statusOptions3Layout)
statusOptionsLayout.addSpacing(5)
# any as gp1
statusOptions1Layout.addWidget(self.cbxAny)
# pending, processing, on-hold and complete as gp 2
statusOptions2Layout.addWidget(self.cbxPending)
statusOptions2Layout.addWidget(self.cbxProcessing)
statusOptions2Layout.addWidget(self.cbxOnHold)
statusOptions2Layout.addWidget(self.cbxCompleted)
# cancelled, refunded, failed and trash as gp3
statusOptions3Layout.addWidget(self.cbxCancelled)
statusOptions3Layout.addWidget(self.cbxRefunded)
statusOptions3Layout.addWidget(self.cbxFailed)
statusOptions3Layout.addWidget(self.cbxTrash)
self.invoicesForm.addRow(QLabel('Status'), statusOptionsLayout)
# - after
self.after = QDateTimeEdit()
self.after.setCalendarPopup(True)
self.after.setDisplayFormat(self.DATETIME_DISPLAY_FORMAT)
self.invoicesForm.addRow(QLabel('After'), self.after)
# - before
self.before = QDateTimeEdit()
self.before.setCalendarPopup(True)
self.before.setDisplayFormat(self.DATETIME_DISPLAY_FORMAT)
self.invoicesForm.addRow(QLabel('Before'), self.before)
# - guest
self.guest = QSpinBox()
self.invoicesForm.addRow(QLabel('Guest Customer ID'), self.guest)
# controls
controlLayout = QHBoxLayout()
controlLayout.addStretch(1)
self.btnClear = QPushButton('Clear')
self.btnSave = QPushButton('Save')
controlLayout.addWidget(self.btnClear)
controlLayout.addWidget(self.btnSave)
self.generalLayout.addLayout(controlLayout)
def setStyles(self):
self.setStyleSheet("""
QTabBar::tab{
min-height: 10ex;
min-width: 30ex;
}
QLabel{
height: 20px;
margin-right: 50px;
}
QComboBox, QLineEdit, QDateTimeEdit, QSpinBox{
height: 20px;
}
QPushButton{
height: 25px;
width: 80px;
}
""")
def get(self):
return {
'wc': {
'url': self.url.text(),
'ckey': self.ckey.text(),
'skey': self.skey.text(),
'version': self.version.currentText()
},
'moein': {
'server': self.server.text(),
'username': self.username.text(),
'password': self.password.text(),
'database': self.database.text()
},
'invoices': {
'status': [option for option, cbx in self.statusOptions.items() if cbx.isChecked()],
'after': self.after.dateTime().toString(self.DATETIME_FORMAT),
'before': self.before.dateTime().toString(self.DATETIME_FORMAT),
'guest': self.guest.value()
}
}
def set(self, settings):
# woocommerce
wc = settings.get('wc')
self.url.setText(wc.get('url')),
self.ckey.setText(wc.get('ckey'))
self.skey.setText(wc.get('skey'))
self.version.setCurrentText(wc.get('version'))
# moein
moein = settings.get('moein')
self.server.setText(moein.get('server'))
self.username.setText(moein.get('username'))
self.password.setText(<PASSWORD>('password'))
self.database.setText(moein.get('database'))
# invoices
invoices = settings.get('invoices')
for option in invoices.get('status'):
self.statusOptions[option].setChecked(True)
self.after.setDateTime(QDateTime.fromString(invoices.get('after'), self.DATETIME_FORMAT))
self.before.setDateTime(QDateTime.fromString(invoices.get('before'), self.DATETIME_FORMAT))
self.guest.setValue(invoices.get('guest'))
def clear(self):
# woocommerce
self.url.clear()
self.ckey.clear()
self.skey.clear()
# moein
self.server.clear()
self.username.clear()
self.password.clear()
self.database.clear()
class LogsTab(BaseTab):
"""Logs Tab"""
def setupWidget(self):
pass
class HelpTab(BaseTab):
"""Help Tab"""
def setupWidget(self):
pass
class AboutTab(BaseTab):
"""About Tab"""
def setupWidget(self):
pass
class Contents(BaseWidget):
"""Contents"""
STATUS = 0
INVOICES = 1
WOOCOMMERCE = 2
SETTINGS = 3
LOGS = 4
HELP = 5
ABOUT = 6
def __init__(self, parent=None):
self.tabs = []
super().__init__(parent)
def bootstrap(self):
super().bootstrap()
# default tab
self.showTab(self.STATUS)
def setupWidget(self):
# attach tabs
# - status
self.status = StatusTab()
# - invoices
self.invoices = InvoicesTab()
# - woocommerce
self.woocommerce = WooCommerceTab()
# - settings
self.settings = SettingsTab()
# - logs
self.logs = LogsTab()
# - help
self.help = HelpTab()
# - about
self.about = AboutTab()
# register tabs
self.tabs = [
self.status,
self.invoices,
self.woocommerce,
self.settings,
self.logs,
self.help,
self.about
]
for tab in self.tabs:
self.generalLayout.addWidget(tab)
def showTab(self, tabId):
for tab in self.tabs:
tab.hide()
self.tabs[tabId].show()
###############
# Main Window #
###############
class Main(QMainWindow):
"""Main Window"""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.bootstrap()
def bootstrap(self):
self.setupLayout()
self.setupMain()
def setupLayout(self):
# central widget
self.centralWidget = QWidget(self)
self.setCentralWidget(self.centralWidget)
# general layout
self.generalLayout = QHBoxLayout()
self.centralWidget.setLayout(self.generalLayout)
# set windows title
self.setWindowTitle('Recipient')
# set windows geometry
self.setGeometry(100, 100, 809, 500)
# set window min-size
self.setMinimumSize(809, 500)
# set window icon
windowIcon = QIcon()
sizes = [16, 24, 32, 48, 96, 256]
for size in sizes:
iconFile = ':/icons/windowIcon{}.png'.format(size)
windowIcon.addFile(iconFile, QSize(size, size))
self.setWindowIcon(windowIcon)
def setupMain(self):
# set menu widget 30%
self.menu = Menu(self)
self.generalLayout.addWidget(self.menu, 30)
# set content widget 70%
self.contents = Contents(self)
self.generalLayout.addWidget(self.contents, 70)
| StarcoderdataPython |
11392003 | <gh_stars>0
# coding=utf-8
import mock
from dmutils.asset_fingerprint import AssetFingerprinter
@mock.patch(
'dmutils.asset_fingerprint.AssetFingerprinter.get_asset_file_contents'
)
class TestAssetFingerprint(object):
def test_url_format(self, get_file_content_mock):
get_file_content_mock.return_value = """
body {
font-family: nta;
}
"""
asset_fingerprinter = AssetFingerprinter(
asset_root='/suppliers/static/'
)
assert (
asset_fingerprinter.get_url('application.css') ==
'/suppliers/static/application.css?418e6f4a6cdf1142e45c072ed3e1c90a' # noqa
)
assert (
asset_fingerprinter.get_url('application-ie6.css') ==
'/suppliers/static/application-ie6.css?418e6f4a6cdf1142e45c072ed3e1c90a' # noqa
)
def test_building_file_path(self, get_file_content_mock):
get_file_content_mock.return_value = """
document.write('Hello world!');
"""
fingerprinter = AssetFingerprinter()
fingerprinter.get_url('javascripts/application.js')
fingerprinter.get_asset_file_contents.assert_called_with(
'app/static/javascripts/application.js'
)
def test_hashes_are_consistent(self, get_file_content_mock):
get_file_content_mock.return_value = """
body {
font-family: nta;
}
"""
asset_fingerprinter = AssetFingerprinter()
assert (
asset_fingerprinter.get_asset_fingerprint('application.css') ==
asset_fingerprinter.get_asset_fingerprint('same_contents.css')
)
def test_hashes_are_different_for_different_files(
self, get_file_content_mock
):
asset_fingerprinter = AssetFingerprinter()
get_file_content_mock.return_value = """
body {
font-family: nta;
}
"""
css_hash = asset_fingerprinter.get_asset_fingerprint('application.css')
get_file_content_mock.return_value = """
document.write('Hello world!');
"""
js_hash = asset_fingerprinter.get_asset_fingerprint('application.js')
assert (
js_hash != css_hash
)
def test_hash_gets_cached(self, get_file_content_mock):
get_file_content_mock.return_value = """
body {
font-family: nta;
}
"""
fingerprinter = AssetFingerprinter()
assert (
fingerprinter.get_url('application.css') ==
'/static/application.css?418e6f4a6cdf1142e45c072ed3e1c90a'
)
fingerprinter._cache[
'application.css'
] = 'a1a1a1'
assert (
fingerprinter.get_url('application.css') ==
'a1a1a1'
)
fingerprinter.get_asset_file_contents.assert_called_once_with(
'app/static/application.css'
)
class TestAssetFingerprintWithUnicode(object):
def test_can_read_self(self):
string_with_unicode_character = 'Ralph’s apostrophe'
AssetFingerprinter(filesystem_path='tests/').get_url('test_asset_fingerprint.py')
| StarcoderdataPython |
5178704 | <gh_stars>1-10
"""
Author: <NAME>
Created: 10.01.2021
"""
from datetime import datetime
from granturismo_stats import api
week_number = datetime.now().isocalendar()[1]
for mode in (api.SportsMode.DAILY_A, api.SportsMode.DAILY_B, api.SportsMode.DAILY_C):
event_details = api.get_event_details(mode)
event_details.dump_json(f"event_details_daily{mode.value}_week{week_number}.json")
leaderboard = api.get_event_leaderboard(mode)
leaderboard.to_csv(f"leaderboard_daily{mode.value}_week{week_number}.csv")
| StarcoderdataPython |
40761 | v_carro = float(input("Informe o valor: "))
total_mes_carro = (0.004 * v_carro) +v_carro
p_inicial = 10000
renda_total = (0.007 * p_inicial) + p_inicial
mes = -1
while True:
mes += 1
total_mes_carro = (0.004 * total_mes_carro) + total_mes_carro
if renda_total < total_mes_carro:
renda_total = (0.007 * renda_total) + renda_total
else:
break
print(f'Você so podera comprar um carro com {mes} meses!') | StarcoderdataPython |
1609812 | """
MyToolBox is a collection of reusable tools.
Author: <EMAIL>
Copyright (C) CERN 2013-2021
"""
import sys
AUTHOR = "<NAME> <<EMAIL>>"
COPYRIGHT = "Copyright (C) CERN 2013-2021"
VERSION = "0.1.0"
DATE = "01 Mar 2013"
__author__ = AUTHOR
__version__ = VERSION
__date__ = DATE
PY2 = sys.hexversion < 0x03000000
PY3 = not PY2
| StarcoderdataPython |
1813326 | <gh_stars>1-10
n = int(input())
s = input()
for i in reversed(range(1, n+1)):
if i == 1:
print(f"1 bottle of {s} on the wall, 1 bottle of {s}.")
print(f"Take it down, pass it around, no more bottles of {s}.")
elif i == 2:
print(f"2 bottles of {s} on the wall, 2 bottles of {s}.")
print(f"Take one down, pass it around, 1 bottle of {s} on the wall.")
print()
else:
print(f"{i} bottles of {s} on the wall, {i} bottles of {s}.")
print(f"Take one down, pass it around, {i-1} bottles of {s} on the wall.")
print() | StarcoderdataPython |
6574016 | import pytest
@pytest.fixture(scope='function')
def talon(ctre):
return ctre.WPI_TalonSRX(1)
@pytest.fixture(scope='function')
def cdata(talon, hal_data):
return hal_data['CAN'][1]
def test_talon_init(ctre, hal_data):
assert 1 not in hal_data['CAN']
ctre.WPI_TalonSRX(1)
assert 1 in hal_data['CAN']
assert hal_data['CAN'][1]['type'] == 'talonsrx'
def test_talon_get(talon):
assert talon.get() == 0
def test_talon_set1(talon, cdata):
talon.set(1)
assert talon.get() == 1
assert cdata['control_mode'] == talon.ControlMode.PercentOutput
assert cdata['value'] == 1023
def test_talon_set2(talon, cdata):
talon.set(talon.ControlMode.Velocity, 1)
print(talon.get())
assert talon.get() != 1
assert cdata['control_mode'] == talon.ControlMode.Velocity
assert cdata['value'] == 1
def test_talon_set3(talon, cdata):
talon.set(talon.ControlMode.Position, 1, 55)
assert talon.get() != 1
assert cdata['control_mode'] == talon.ControlMode.Position
assert cdata['value'] == 1
def test_talon_set4(talon, cdata):
talon.set(talon.ControlMode.Current, 1.1)
assert cdata['control_mode'] == talon.ControlMode.Current
assert cdata['value'] == 1100
def test_talon_disable(talon, cdata):
talon.disable()
assert cdata['control_mode'] == talon.ControlMode.Disabled
assert cdata['value'] == 0
def test_talon_stopMotor(talon, cdata):
talon.stopMotor()
assert cdata['control_mode'] == talon.ControlMode.Disabled
assert cdata['value'] == 0
def test_talon_setinverted(talon, cdata):
assert cdata['inverted'] == False
talon.setInverted(True)
assert cdata['inverted'] == True
@pytest.mark.xfail(raises=NotImplementedError)
def test_talon_initSendable(talon, sendablebuilder):
talon.set(4)
talon.initSendable(sendablebuilder)
sendablebuilder.updateTable()
assert sendablebuilder.getTable().getNumber("Value", 0.0) == 4
sendablebuilder.properties[0].setter(3)
assert talon.get() == 3
@pytest.mark.xfail(raises=NotImplementedError)
def test_talon_configForwardLimitSwitchSource(talon):
talon.configForwardLimitSwitchSource(1, 2, 3)
@pytest.mark.xfail(raises=NotImplementedError)
def test_talon_configReverseLimitSwitchSource(talon):
talon.configReverseLimitSwitchSource(1, 2, 3)
@pytest.mark.xfail(raises=NotImplementedError)
def test_talon_configPeakCurrentLimit(talon):
talon.configPeakCurrentLimit(1, 2)
@pytest.mark.xfail(raises=NotImplementedError)
def test_talon_configPeakCurrentDuration(talon):
talon.configPeakCurrentDuration(1, 2)
@pytest.mark.xfail(raises=NotImplementedError)
def test_talon_configContinuousCurrentLimit(talon):
talon.configContinuousCurrentLimit(1, 2)
@pytest.mark.xfail(raises=NotImplementedError)
def test_talon_enableCurrentLimit(talon):
talon.enableCurrentLimit(True)
@pytest.mark.xfail(raises=NotImplementedError)
def test_basemotorcontroller_changeMotionControlFramePeriod(talon):
talon.changeMotionControlFramePeriod(1)
@pytest.mark.xfail(raises=NotImplementedError)
def test_basemotorcontroller_clearMotionProfileHasUnderrun(talon):
talon.clearMotionProfileHasUnderrun(1)
@pytest.mark.xfail(raises=NotImplementedError)
def test_basemotorcontroller_clearMotionProfileTrajectories(talon):
talon.clearMotionProfileTrajectories()
@pytest.mark.xfail(raises=NotImplementedError)
def test_basemotorcontroller_clearStickyFaults(talon):
talon.clearStickyFaults(1)
@pytest.mark.xfail(raises=NotImplementedError)
def test_basemotorcontroller_configAllowableClosedloopError(talon):
talon.configAllowableClosedloopError(1, 2, 3)
@pytest.mark.xfail(raises=NotImplementedError)
def test_basemotorcontroller_configClosedLoopRamp(talon):
talon.configClosedLoopRamp(1,2)
@pytest.mark.xfail(raises=NotImplementedError)
def test_basemotorcontroller_configForwardSoftLimitEnable(talon):
talon.configForwardSoftLimitEnable(True, 2)
@pytest.mark.xfail(raises=NotImplementedError)
def test_basemotorcontroller_configForwardSoftLimitThreshold(talon):
talon.configForwardSoftLimitThreshold(1, 2)
@pytest.mark.xfail(raises=NotImplementedError)
def test_basemotorcontroller_configGetCustomParam(talon):
talon.configGetCustomParam(1, 2)
@pytest.mark.xfail(raises=NotImplementedError)
def test_basemotorcontroller_configGetParameter(talon):
talon.configGetParameter(1,2,3)
@pytest.mark.xfail(raises=NotImplementedError)
def test_basemotorcontroller_configMaxIntegralAccumulator(talon):
talon.configMaxIntegralAccumulator(1,2,3)
@pytest.mark.xfail(raises=NotImplementedError)
def test_basemotorcontroller_configMotionAcceleration(talon):
talon.configMotionAcceleration(1, 2)
@pytest.mark.xfail(raises=NotImplementedError)
def test_basemotorcontroller_configMotionCruiseVelocity(talon):
talon.configMotionCruiseVelocity(1,2)
@pytest.mark.xfail(raises=NotImplementedError)
def test_basemotorcontroller_configNeutralDeadband(talon):
talon.configNeutralDeadband(1,2)
@pytest.mark.xfail(raises=NotImplementedError)
def test_basemotorcontroller_configNominalOutputForward(talon):
talon.configNominalOutputForward(1, 2)
@pytest.mark.xfail(raises=NotImplementedError)
def test_basemotorcontroller_configNominalOutputReverse(talon):
talon.configNominalOutputReverse(1,2)
@pytest.mark.xfail(raises=NotImplementedError)
def test_basemotorcontroller_configOpenLoopRamp(talon):
talon.configOpenLoopRamp(1,2)
@pytest.mark.xfail(raises=NotImplementedError)
def test_basemotorcontroller_configPeakOutputForward(talon):
talon.configPeakOutputForward(1,2)
@pytest.mark.xfail(raises=NotImplementedError)
def test_basemotorcontroller_configPeakOutputReverse(talon):
talon.configPeakOutputReverse(1,2)
@pytest.mark.xfail(raises=NotImplementedError)
def test_basemotorcontroller_configRemoteFeedbackFilter(talon):
talon.configRemoteFeedbackFilter(1,2,3,4)
@pytest.mark.xfail(raises=NotImplementedError)
def test_basemotorcontroller_configReverseSoftLimitEnable(talon):
talon.configReverseSoftLimitEnable(True,2)
@pytest.mark.xfail(raises=NotImplementedError)
def test_basemotorcontroller_configReverseSoftLimitThreshold(talon):
talon.configReverseSoftLimitThreshold(1,2)
@pytest.mark.xfail(raises=NotImplementedError)
def test_basemotorcontroller_configSelectedFeedbackSensor(talon):
talon.configSelectedFeedbackSensor(1, 2, 3)
@pytest.mark.xfail(raises=NotImplementedError)
def test_basemotorcontroller_configSensorTerm(talon):
talon.configSensorTerm(1,2,3)
@pytest.mark.xfail(raises=NotImplementedError)
def test_basemotorcontroller_configSetCustomParam(talon):
talon.configSetCustomParam(1,2,3)
@pytest.mark.xfail(raises=NotImplementedError)
def test_basemotorcontroller_configSetParameter(talon):
talon.configSetParameter(1,2,3,4,5)
@pytest.mark.xfail(raises=NotImplementedError)
def test_basemotorcontroller_configVelocityMeasurementPeriod(talon):
talon.configVelocityMeasurementPeriod(1,2)
@pytest.mark.xfail(raises=NotImplementedError)
def test_basemotorcontroller_configVelocityMeasurementWindow(talon):
talon.configVelocityMeasurementWindow(1,2)
@pytest.mark.xfail(raises=NotImplementedError)
def test_basemotorcontroller_configVoltageCompSaturation(talon):
talon.configVoltageCompSaturation(1,2)
@pytest.mark.xfail(raises=NotImplementedError)
def test_basemotorcontroller_configVoltageMeasurementFilter(talon):
talon.configVoltageMeasurementFilter(1,2)
@pytest.mark.xfail(raises=NotImplementedError)
def test_basemotorcontroller_config_IntegralZone(talon):
talon.config_IntegralZone(1,2,3)
@pytest.mark.xfail(raises=NotImplementedError)
def test_basemotorcontroller_config_kD(talon):
talon.config_kD(1, 2, 3)
@pytest.mark.xfail(raises=NotImplementedError)
def test_basemotorcontroller_config_kF(talon):
talon.config_kF(1, 2, 3)
@pytest.mark.xfail(raises=NotImplementedError)
def test_basemotorcontroller_config_kI(talon):
talon.config_kI(1, 2, 3)
@pytest.mark.xfail(raises=NotImplementedError)
def test_basemotorcontroller_config_kP(talon):
talon.config_kP(1, 2, 3)
@pytest.mark.xfail(raises=NotImplementedError)
def test_basemotorcontroller_enableHeadingHold(talon):
talon.enableHeadingHold(True)
@pytest.mark.xfail(raises=NotImplementedError)
def test_basemotorcontroller_enableVoltageCompensation(talon):
talon.enableVoltageCompensation(True)
@pytest.mark.xfail(raises=NotImplementedError)
def test_basemotorcontroller_follow(talon, ctre):
master = ctre.WPI_TalonSRX(3)
talon.follow(master)
@pytest.mark.xfail(raises=NotImplementedError)
def test_basemotorcontroller_getActiveTrajectoryHeading(talon):
talon.getActiveTrajectoryHeading()
@pytest.mark.xfail(raises=NotImplementedError)
def test_basemotorcontroller_getActiveTrajectoryPosition(talon):
talon.getActiveTrajectoryPosition()
@pytest.mark.xfail(raises=NotImplementedError)
def test_basemotorcontroller_getActiveTrajectoryVelocity(talon):
talon.getActiveTrajectoryVelocity()
@pytest.mark.xfail(raises=NotImplementedError)
def test_basemotorcontroller_getBaseID(talon):
talon.getBaseID()
@pytest.mark.xfail(raises=NotImplementedError)
def test_basemotorcontroller_getBusVoltage(talon):
talon.getBusVoltage()
@pytest.mark.xfail(raises=NotImplementedError)
def test_basemotorcontroller_getClosedLoopError(talon):
talon.getClosedLoopError(1)
@pytest.mark.xfail(raises=NotImplementedError)
def test_basemotorcontroller_getControlMode(talon):
talon.getControlMode()
@pytest.mark.xfail(raises=NotImplementedError)
def test_basemotorcontroller_getDeviceID(talon):
talon.getDeviceID()
@pytest.mark.xfail(raises=NotImplementedError)
def test_basemotorcontroller_getErrorDerivative(talon):
talon.getErrorDerivative(1)
@pytest.mark.xfail(raises=NotImplementedError)
def test_basemotorcontroller_getFaults(talon, ctre):
errcode, faults = talon.getFaults()
@pytest.mark.xfail(raises=NotImplementedError)
def test_basemotorcontroller_getFirmwareVersion(talon):
talon.getFirmwareVersion()
@pytest.mark.xfail(raises=NotImplementedError)
def test_basemotorcontroller_getIntegralAccumulator(talon):
talon.getIntegralAccumulator(1)
@pytest.mark.xfail(raises=NotImplementedError)
def test_basemotorcontroller_getLastError(talon):
talon.getLastError()
@pytest.mark.xfail(raises=NotImplementedError)
def test_basemotorcontroller_getMotionProfileStatus(talon, ctre):
retcode, m = talon.getMotionProfileStatus()
@pytest.mark.xfail(raises=NotImplementedError)
def test_basemotorcontroller_getMotionProfileTopLevelBufferCount(talon):
talon.getMotionProfileTopLevelBufferCount()
@pytest.mark.xfail(raises=NotImplementedError)
def test_basemotorcontroller_getMotorOutputPercent(talon):
talon.getMotorOutputPercent()
@pytest.mark.xfail(raises=NotImplementedError)
def test_basemotorcontroller_getMotorOutputVoltage(talon):
talon.getMotorOutputVoltage()
@pytest.mark.xfail(raises=NotImplementedError)
def test_basemotorcontroller_getOutputCurrent(talon):
talon.getOutputCurrent()
@pytest.mark.xfail(raises=NotImplementedError)
def test_basemotorcontroller_getSelectedSensorPosition(talon):
talon.getSelectedSensorPosition(1)
@pytest.mark.xfail(raises=NotImplementedError)
def test_basemotorcontroller_getSelectedSensorVelocity(talon):
talon.getSelectedSensorVelocity(1)
@pytest.mark.xfail(raises=NotImplementedError)
def test_basemotorcontroller_getSensorCollection(talon):
talon.getSensorCollection()
@pytest.mark.xfail(raises=NotImplementedError)
def test_basemotorcontroller_getStatusFramePeriod(talon):
talon.getStatusFramePeriod(1, 2)
@pytest.mark.xfail(raises=NotImplementedError)
def test_basemotorcontroller_getStickyFaults(talon):
errcode, stickyfaults = talon.getStickyFaults()
@pytest.mark.xfail(raises=NotImplementedError)
def test_basemotorcontroller_getTemperature(talon):
talon.getTemperature()
@pytest.mark.xfail(raises=NotImplementedError)
def test_basemotorcontroller_hasResetOccurred(talon):
talon.hasResetOccurred()
@pytest.mark.xfail(raises=NotImplementedError)
def test_basemotorcontroller_isMotionProfileTopLevelBufferFull(talon):
talon.isMotionProfileTopLevelBufferFull()
@pytest.mark.xfail(raises=NotImplementedError)
def test_basemotorcontroller_neutralOutput(talon):
talon.neutralOutput()
@pytest.mark.xfail(raises=NotImplementedError)
def test_basemotorcontroller_overrideLimitSwitchesEnable(talon):
talon.overrideLimitSwitchesEnable(True)
@pytest.mark.xfail(raises=NotImplementedError)
def test_basemotorcontroller_overrideSoftLimitsEnable(talon):
talon.overrideSoftLimitsEnable(True)
@pytest.mark.xfail(raises=NotImplementedError)
def test_basemotorcontroller_processMotionProfileBuffer(talon):
talon.processMotionProfileBuffer()
@pytest.mark.xfail(raises=NotImplementedError)
def test_basemotorcontroller_pushMotionProfileTrajectory(talon, ctre):
point = ctre.TrajectoryPoint(1, 2, 3, 0, 0, True, True,
ctre.TrajectoryPoint.TrajectoryDuration.T0ms)
talon.pushMotionProfileTrajectory(point)
@pytest.mark.xfail(raises=NotImplementedError)
def test_basemotorcontroller_selectDemandType(talon):
talon.selectDemandType(True)
@pytest.mark.xfail(raises=NotImplementedError)
def test_basemotorcontroller_selectProfileSlot(talon):
talon.selectProfileSlot(1, 2)
@pytest.mark.xfail(raises=NotImplementedError)
def test_basemotorcontroller_setControlFramePeriod(talon):
talon.setControlFramePeriod(1, 2)
@pytest.mark.xfail(raises=NotImplementedError)
def test_basemotorcontroller_setIntegralAccumulator(talon):
talon.setIntegralAccumulator(1, 2, 3)
@pytest.mark.xfail(raises=NotImplementedError)
def test_basemotorcontroller_setInverted(talon):
talon.setInverted(True)
assert talon.getInverted() == True
@pytest.mark.xfail(raises=NotImplementedError)
def test_basemotorcontroller_setNeutralMode(talon):
talon.setNeutralMode(1)
@pytest.mark.xfail(raises=NotImplementedError)
def test_basemotorcontroller_setSelectedSensorPosition(talon):
talon.setSelectedSensorPosition(1, 2, 3)
@pytest.mark.xfail(raises=NotImplementedError)
def test_basemotorcontroller_setSensorPhase(talon):
talon.setSensorPhase(True)
@pytest.mark.xfail(raises=NotImplementedError)
def test_basemotorcontroller_setStatusFramePeriod(talon):
talon.setStatusFramePeriod(1, 2, 3)
def test_basemotorcontroller_valueUpdated(talon):
talon.valueUpdated()
| StarcoderdataPython |
11331764 | def main():
print("""Library Memrise Scraping - Author: <NAME>""")
print(f"Package: {__package__}")
print("Github: https://github.com/tquangsdh20/memrise")
# if __name__ == main():
# main()
| StarcoderdataPython |
9691756 | <filename>motor.py
from gpiozero import Motor
import pygame
from pygame.locals import *
motor = Motor(2, 3)
motor.enable()
done = false
while not done:
event = pygame.event.poll()
if event.type == pygame.QUIT:
done = true
elif event.type == pygame.KEYUP:
if event.key == pygame.K_w:
motor.value = 0
if event.key == pygame.K_s:
motor.value = 0
else:
keys = pygame.key.get_pressed()
if keys[pygame.K_w]:
motor.forward()
elif keys[pygame.K_s]:
motor.backwards()
elif keys[pygame.K_SPACE]:
motor.stop()
| StarcoderdataPython |
1660903 | <gh_stars>1-10
#!/usr/bin/env python
# coding=utf-8
# Import third-party packages.
import numpy
import torch
class Agent:
"""Agent trainer.
"""
BATCH_SIZE = 32
MIN_SAMPLE = 64
LR = 0.00025
L2 = 0.0030
def __init__(self, model, memory):
"""Special method for object initialisation.
:param model: the predictive model.
:type model: torch module.
:param memory: replay memory buffer object.
:type memory: buffer.
"""
# Set the model and it loss.
self.model = model
self.model_loss = torch.nn.MSELoss()
self.model_optimizer = torch.optim.RMSprop(self.model.parameters(), lr=self.LR, weight_decay=self.L2)
# Set the memroy.
self.memory = memory
# Set the performance.
self.perf = 0.0
self.dperf = 0.0
return
def load(self, path="agent", model="model"):
"""Load the model of the agent.
:param path: the name of the path.
:type path: str.
:param model: the name of the model.
:type model: str.
"""
filename = "{}/{}.txt".format(path, model)
self.model.load_state_dict(torch.load(filename))
return
def save(self, path="agent", model="model"):
"""Save the policy and target networks.
:param path: the name of the path.
:type path: str.
:param model: the name of the model.
:type model: str.
"""
filename = "{}/{}.txt".format(path, model)
torch.save(self.model.state_dict(), filename)
return
# ------------------------- #
# --- 0. Model methods --- #
# ------------------------- #
def model_parameter_norm(self):
"""Returns the norm of the model parameters.
:return: the norm of the parameters.
:rtype: float.
"""
norm = torch.tensor(0.0)
for param in self.model.parameters():
norm += torch.norm(param)
return norm.item()
def model_performance(self):
"""Evaluate the performance of the model on the memory.
:return: the performance for the model.
:rtype: float.
"""
# Set the model in evaluation mode.
self.model.eval()
# Get the batch.
batch = self.memory.batch()
state, action, next_state = batch
# Get the next state predictions.
pred_state = self.model(state, action)
# Get the performance.
error = ( pred_state - next_state )
perf = torch.mean( error * error )
return perf.item()
def optimize(self):
"""Samples a random batch from replay memory and optimize.
:return: the loss.
:rtype: float.
"""
# Set the model in training mode.
self.model.train()
# Check the size of the memory.
if len(self.memory) <= self.MIN_SAMPLE:
return None
# Get samples out of the memory.
batch = self.memory.sample(self.BATCH_SIZE)
state, action, next_state = batch
# Get the predictions for the next states.
pred_state = self.model(state, action)
# Set the gradients of the optimizer.
self.model_optimizer.zero_grad()
# Perform the backward step.
loss = self.model_loss(pred_state, next_state)
loss.backward()
# Perform one optimisation step.
self.model_optimizer.step()
return loss.item()
| StarcoderdataPython |
1611678 | <reponame>legionscript/Django-3.2-Series<gh_stars>0
# Generated by Django 3.2.3 on 2021-05-31 20:23
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Artist',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=100)),
('email', models.EmailField(max_length=254, unique=True)),
('bio', models.TextField(blank=True, help_text='Give some info about the artist', null=True)),
('song_total', models.IntegerField()),
('choices', models.TextField(choices=[('1', 'Choice 1'), ('2', 'Choice 2')])),
('favorite', models.BooleanField(default=False)),
('last_modified', models.DateTimeField(auto_now=True)),
('created', models.DateTimeField(auto_now_add=True)),
('profile_picture', models.ImageField(upload_to='uploads')),
('download', models.FileField(upload_to='uploads')),
],
),
]
| StarcoderdataPython |
9744843 | """
======================================================
Experiment Infrastructure (:mod:`mlpy.experiments`)
======================================================
.. currentmodule:: mlpy.experiments
.. autosummary::
:toctree: generated/
:nosignatures:
Experiment
Tasks
=====
.. autosummary::
:toctree: generated/
:nosignatures:
~task.Task
~task.EpisodicTask
~task.SearchTask
"""
from __future__ import division, print_function, absolute_import
import time
__all__ = ['task']
class Experiment(object):
"""The experiment class.
An experiment sets up an agent in an environment and runs
until the environment is considered to have completed. This can
be the case when all agents acting in the environment have reached
their goal state.
An experiment can consist of multiple episodes and rests itself
at the end of each episode.
Parameters
----------
env : Environment
The environment in which to run the agent(s).
"""
def __init__(self, env):
self._t = 0.0
""":type: float"""
self._env = env
def reset(self):
"""Reset the experiment."""
self._t = time.time()
self._env.reset(self._t)
def enter(self):
"""Enter the experiment."""
self._t = time.time()
self._env.enter(self._t)
def update(self):
"""Update all modules during the program loop."""
dt = time.time() - self._t
self._t += dt
self._env.update(dt)
def exit(self):
"""Exit the experiment."""
self._env.exit()
def run(self):
"""Run the experiment.
The experiment finishes when the environment
is considered to have completed. Possible causes for completing
the environment is that all agents have reached a goal state.
"""
self.enter()
while True:
while not self._env.is_complete():
self.update()
self.reset()
if self._env.is_complete():
self.exit()
break
| StarcoderdataPython |
6482548 | <gh_stars>1-10
import numpy as np
import sys
import os
import requests
import pylru
import schedule
parentdir = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))#get parent dir path: memory_py
sys.path.insert(0, parentdir)
from SolrClient import SolrClient
from utils.query_util import tokenize
from utils.solr_util import solr_qa
from utils.embedding_util import ff_embedding, mlt_ff_embedding
from qa.base import BaseKernel
from amq.sim import BenebotSim
bt = BenebotSim()
THRESHOLD = 0.90
REACH = 1
CACHE_SIZE = 50 #2017/12/26 设置缓存大小
#'http://localhost:11403/solr'
class Qa:
def __init__(self, core, question_key='question', answer_key='answers', solr_addr = 'http://10.89.100.14:8999/solr'):
self.core = core
self.question_key = question_key #指代solr数据库doc里的key——‘question’
self.answer_key = answer_key #指代solr数据库doc里的key——‘answer’
self.base = BaseKernel()
self.solr = SolrClient(solr_addr)
self.cache = pylru.lrucache(CACHE_SIZE) #2017/12/26 调用lru算法模型
def get_responses(self, query, user='solr'):
'''
程序功能:传入问句query
#2017/12/26 query对应的key在缓存中不存在,加入缓存
否则从缓存中直接调取
return solr数据库中最大相似度的问句、最大相似度的回答以及最大相似度
'''
if query in self.cache:
best_query = self.cache[query]['query']
best_answer = self.cache[query]['answer']
best_score = self.cache[query]['score']
return best_query, best_answer,best_score
docs = solr_qa(self.core, query, field=self.question_key, solr=self.solr)
if len(docs) == 0:
docs = solr_qa(self.core, query, field=self.question_key)
else:
doc = np.random.choice(docs)#完全符合情况,随机选择
best_query = doc[self.question_key]
best_answer = doc[self.answer_key]
best_score = 1
cached = {"query": best_query, "answer": best_answer, "score": best_score}
self.cache[query] = cached
return best_query, np.random.choice(best_answer), best_score
print(docs)
best_query = None
best_answer = None
best_score = -1
#参数index:所有相似问句的数目
#参数doc:单个相似的问句
for index, doc in enumerate(docs):
if index > 10:
break
b = doc[self.answer_key]
g = doc[self.question_key] # solr库中相似的问句
score, _g = self.m_similarity(query, g)
# score,_g = self.bt_similarity(query, g)
if score > best_score:
best_score = score
best_query = _g
best_answer = b
if best_score < THRESHOLD:
print('redirecting to third party', best_score)
answer = '您好!您可以输入以下常见问题进行咨询:\n*科沃斯旺宝产品介绍。\n*如何购买科沃斯旺宝?\n*' \
'科沃斯旺宝可以在哪些行业中应用?\n*科沃斯旺宝有哪些使用实例?\n*科沃斯可以为用户和合作' \
'伙伴提供哪些服务?\n\n请在下方对话框中提交您的问题,小科将竭尽全力为您解答哟~'
cached = {'query': query, 'answer': answer, 'score': best_score}
self.cache[query] = cached
return query, answer, best_score
else:
cached = {"query": best_query, "answer": best_answer, "score": best_score}
self.cache[query] = cached
return best_query, np.random.choice(best_answer), best_score
def embed(self, tokens):
embeddings = [ff_embedding(word) for word in tokens]
embeddings = np.asarray(embeddings)
# print(embeddings.shape)
embedding = np.mean(embeddings, axis=0)
# print(embedding)
return embedding
def similarity(self, query1, query2):
def cos(embed1, embed2):
num = np.dot(embed1, embed2.T)
denom = np.linalg.norm(embed1) * np.linalg.norm(embed2)
cos = num / denom
sin = 0.5 + 0.5 * cos
return cos
tokens1 = tokenize(query1, 3)
tokens2 = tokenize(query2, 3)
embed1 = self.embed(tokens1)
embed2 = self.embed(tokens2)
return cos(embed1, embed2)
def m_similarity(self, query1, m_query2):
tokens1 = ','.join(tokenize(query1, 3))
tokens2 = '@@'.join([','.join(tokenize(t, 3)) for t in m_query2])
score, _g = mlt_ff_embedding(tokens1, tokens2)
return score, _g
def bt_similarity(self, query1, query2):
result = bt.getSim(query1, query2, True)
score = result.get('sim')
_g = query2
return score,_g
# def clear_cache(self):
def ceshi():
query1 = '我的名字是小明'
query2 = '要买抽油烟机'
qa = Qa('interactive')
print(qa.similarity(query1, query2))
def main():
qa = Qa('zx_weixin_qa')
question = '科沃斯旺宝服务。'
best_query, best_answer, best_score= qa.get_responses(question)
print ('best_query:{}'.format(best_query))
print ('best answer:{}'.format(best_answer))
print ('best score:{}'.format(best_score))
# query_list = ['我的名字是小明','要买抽油烟机','科沃斯旺宝服务。','科沃斯旺宝服务。']
# for quest in query_list:
# best_query, best_answer, best_score = qa.get_responses(quest)
# print(best_query, best_answer, best_score)
if __name__ == '__main__':
main()
# ceshi() | StarcoderdataPython |
3385820 | #
# Copyright (c) 2015 Cossack Labs Limited
#
# Licensed under the Apache License, Version 2.0 (the "License")
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
echo client with handmade ssession wrappers (see ssession_wrappers.py)
for none event handled transport, like plain socket
"""
import socket
import ssession_wrappers
client_private = b"\x52\x45\x43\x32\x00\x00\x00\x2d\x51\xf4\xaa\x72\x00\x9f\x0f\x09\xce\xbe\x09\x33\xc2\x5e\x9a\x05\x99\x53\x9d\xb2\x32\xa2\x34\x64\x7a\xde\xde\x83\x8f\x65\xa9\x2a\x14\x6d\xaa\x90\x01"
server_public = b"\x55\x45\x43\x32\x00\x00\x00\x2d\x75\x58\x33\xd4\x02\x12\xdf\x1f\xe9\xea\x48\x11\xe1\xf9\x71\x8e\x24\x11\xcb\xfd\xc0\xa3\x6e\xd6\xac\x88\xb6\x44\xc2\x9a\x24\x84\xee\x50\x4c\x3e\xa0"
class Transport(object):
def __init__(self):
self.socket = socket.socket()
self.socket.connect(("127.0.0.1", 26260))
def __del__(self):
self.socket.close()
# send callback
def send(self, message):
self.socket.sendall(message)
# receive callback
def receive(self, buffer_length):
return self.socket.recv(buffer_length)
# necessary callback
def get_pub_key_by_id(self, user_id):
# we have only one peer with id "server"
if user_id != b"server":
raise Exception("no such id")
return server_public
transport = Transport()
session = ssession_wrappers.SSessionClient(b"client", client_private,
transport)
for i in range(0, 9):
session.send(b"This is a test message")
message = session.receive()
print("receive: ", message)
session.send(b"finish")
| StarcoderdataPython |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.