id stringlengths 1 8 | text stringlengths 6 1.05M | dataset_id stringclasses 1
value |
|---|---|---|
8078241 | import torch
import torch.nn as nn
import torch.nn.functional as F
from .pointnet import PointNet
from .pooling import Pooling
from .. ops.transform_functions import PCRNetTransform as transform
class iPCRNet(nn.Module):
def __init__(self, feature_model=PointNet(), droput=0.0, pooling='max'):
super().__init__()
self.feature_model = feature_model
self.pooling = Pooling(pooling)
self.linear = [nn.Linear(self.feature_model.emb_dims * 2, 1024), nn.ReLU(),
nn.Linear(1024, 1024), nn.ReLU(),
nn.Linear(1024, 512), nn.ReLU(),
nn.Linear(512, 512), nn.ReLU(),
nn.Linear(512, 256), nn.ReLU()]
if droput>0.0:
self.linear.append(nn.Dropout(droput))
self.linear.append(nn.Linear(256,7))
self.linear = nn.Sequential(*self.linear)
# Single Pass Alignment Module (SPAM)
def spam(self, template_features, source, est_R, est_t):
batch_size = source.size(0)
self.source_features = self.pooling(self.feature_model(source))
y = torch.cat([template_features, self.source_features], dim=1)
pose_7d = self.linear(y)
pose_7d = transform.create_pose_7d(pose_7d)
# Find current rotation and translation.
identity = torch.eye(3).to(source).view(1,3,3).expand(batch_size, 3, 3).contiguous()
est_R_temp = transform.quaternion_rotate(identity, pose_7d).permute(0, 2, 1)
est_t_temp = transform.get_translation(pose_7d).view(-1, 1, 3)
# update translation matrix.
est_t = torch.bmm(est_R_temp, est_t.permute(0, 2, 1)).permute(0, 2, 1) + est_t_temp
# update rotation matrix.
est_R = torch.bmm(est_R_temp, est_R)
source = transform.quaternion_transform(source, pose_7d) # Ps' = est_R*Ps + est_t
return est_R, est_t, source
def forward(self, template, source, max_iteration=8):
est_R = torch.eye(3).to(template).view(1, 3, 3).expand(template.size(0), 3, 3).contiguous() # (Bx3x3)
est_t = torch.zeros(1,3).to(template).view(1, 1, 3).expand(template.size(0), 1, 3).contiguous() # (Bx1x3)
template_features = self.pooling(self.feature_model(template))
if max_iteration == 1:
est_R, est_t, source = self.spam(template_features, source, est_R, est_t)
else:
for i in range(max_iteration):
est_R, est_t, source = self.spam(template_features, source, est_R, est_t)
result = {'est_R': est_R, # source -> template
'est_t': est_t, # source -> template
'est_T': transform.convert2transformation(est_R, est_t), # source -> template
'r': template_features - self.source_features,
'transformed_source': source}
return result
if __name__ == '__main__':
template, source = torch.rand(10,1024,3), torch.rand(10,1024,3)
pn = PointNet()
net = iPCRNet(pn)
result = net(template, source)
import ipdb; ipdb.set_trace() | StarcoderdataPython |
1671408 | import os
import time
import datetime
import tweepy
def get_authorized_api():
consumer_key = os.getenv('GB_CONSUMER_KEY')
consumer_secret = os.getenv('GB_CONSUMER_SECRET')
access_token = os.getenv('GB_ACCESS_TOKEN')
access_token_secret = os.getenv('GB_TOKEN_SECRET')
auth = tweepy.OAuthHandler(consumer_key, consumer_secret)
auth.set_access_token(access_token, access_token_secret)
api = tweepy.API(auth)
return api
def rate_limiter(cursor):
while True:
try:
yield cursor.next()
except tweepy.RateLimitError:
time.sleep(60)
except tweepy.error.TweepError:
time.sleep(60)
def get_user_recent_tweets(api, id):
for tweet in rate_limiter(tweepy.Cursor(api.user_timeline, id=id).items()):
yield tweet
def search_messages(api, search_params):
for tweet in rate_limiter(tweepy.Cursor(api.search, **search_params).items()):
yield tweet
def search_and_reply(search_params, payload):
api = get_authorized_api()
me_account = api.me()
previous_targets = set([
tweet.in_reply_to_user_id
for tweet in get_user_recent_tweets(api, me_account.id)
if tweet.in_reply_to_user_id
])
for tweet in search_messages(api, search_params):
if tweet.author.id not in previous_targets:
print("Tweeting at: {}".format(tweet.author.screen_name))
api.update_status(
payload.format(target=tweet.author.screen_name),
in_reply_to_status_id=tweet.id
)
previous_targets.add(tweet.author.id)
if __name__ == '__main__':
OLDEST_REPLY_DAYS = 14
SEARCH_PARAMS = {
'q': '"gummi\ bears\ theme" OR "gummi\ bears\ song" OR "gummi\ bears\ intro" OR "gummy\ bears\ theme" OR "gummy\ bears\ song" OR "gummy\ bears\ intro" OR -williams -toto',
'lang': 'en',
'since': (
datetime.datetime.now() - datetime.timedelta(days=OLDEST_REPLY_DAYS)
).strftime('%Y-%m-%d')
}
PAYLOAD = (
"@{target} i thought you might like to know that the "
"Gummi Bears Theme Song was sung by <NAME>' son."
)
search_and_reply(SEARCH_PARAMS, PAYLOAD)
| StarcoderdataPython |
5003026 | """
Definition of ParentTreeNode:
class ParentTreeNode:
def __init__(self, val):
self.val = val
self.parent, self.left, self.right = None, None, None
"""
class Solution:
"""
@param: root: The root of the tree
@param: A: node in the tree
@param: B: node in the tree
@return: The lowest common ancestor of A and B
"""
def lowestCommonAncestorII(self, root, A, B):
# write your code here
visited = set()
while A is not root:
visited.add(A)
A = A.parent
while B is not root:
if B in visited:
return B
B = B.parent
return root | StarcoderdataPython |
11264560 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# <NAME>, 2018
from flask import Blueprint, render_template, request
from admin.database import Database
from utils.basic_auth import requires_auth
from utils.responses import Success, Error
blueprint = Blueprint('researcher_tokens', __name__)
database = Database()
@blueprint.route('/')
@requires_auth
def index():
page_data = {
'title': 'Researcher Invite Tokens - Itinerum Control Panel',
'researcher_tokens': []
}
for token in database.token.researcher_invite.get_active():
page_data['researcher_tokens'].append({
'token': token.token,
'survey_id': token.survey_id,
'pretty_name': token.survey.pretty_name,
'admin_email': database.survey_admin.get_admin_email(token.survey),
'created_at': token.created_at.replace(microsecond=0),
'usages': token.usages
})
return render_template('researcher_tokens.index.html', **page_data)
| StarcoderdataPython |
1794761 | import os
import re
import ai_info
import global_config
import collections
import glob
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from sklearn import linear_model
from sklearn.metrics import mean_squared_error
from read_data.utils import *
from read_data.perf import *
from analyze_interference import computeExpectedCost
# from read_data.resource import *
# from analyze_interference import *
NODE_TO_HOSTNAME = {"puri": "puri.mimuw.edu.pl", "kulcha": "kulcha.mimuw.edu.pl",
"baati": "ip_10_2_1_93", "dosa": "ip_10_2_1_91"}
HOSTNAME_TO_NODE = {host: node for node, host in NODE_TO_HOSTNAME.items()}
METRIC_CBTOOL_PREFIX = "app_"
class SchedulerExperimentRecord:
def __init__(self, base_path, expid, composition_id, shuffle_id, custom_scheduler, ai_types, exp_series):
self.base_path = base_path
self.expid = expid
self.path = os.path.join(base_path, expid)
self.composition_id = composition_id
self.shuffle_id = shuffle_id
self.custom_scheduler = custom_scheduler
self.ai_types = ai_types
self.exp_series = exp_series
self.split_interval = None
self.checkOneAIOneHost()
def getSplitInterval(self, df):
mins = [df.loc[df["ai_name"] == name, "datetime"].min() for name in df["ai_name"].unique()]
return max(mins) + pd.Timedelta(minutes=2), df["datetime"].max()
def aggregatePerfForAiNameAndMetric(self, df, d, metric, ai_name):
df = df.loc[df[metric].notna(), :]
if df.empty:
msg = f"Performance aggregation failed: no datapoints for " \
f"{d} metric={metric}"
raise ValueError(msg)
return getPerfAggregateForMetricHelper(df, d, metric)
def aggregatePerfForAiName(self, df, ai_name):
d = {"exp_id": self.expid, "composition_id": self.composition_id, "shuffle_id": self.shuffle_id,
"scheduler": self.custom_scheduler, "ai_name": ai_name}
df = df.loc[(df["ai_name"] == ai_name), :]
host_names = df["host_name"].unique()
if len(host_names) != 1:
raise KeyError(f"Unexpected number of host names for single ai {len(host_names)}!=1")
d.update({"host_name": host_names[0]})
ai_roles = df["role"].unique()
ai_types = [ai_info.AI_ROLE_TO_TYPE[role] for role in ai_roles]
if len(ai_types) != 1:
raise KeyError(f"Unexpected number of ai types for single ai {len(ai_types)}!=1")
d.update({"type": ai_types[0]})
for m in ai_info.AI_TYPE_TO_METRICS[ai_types[0]]:
metric = f"{METRIC_CBTOOL_PREFIX}{m}"
d = self.aggregatePerfForAiNameAndMetric(df, d, metric, ai_name)
return toSingleRowDF(d)
def aggregatePerf(self, df):
self.split_interval = self.getSplitInterval(df)
ts = self.split_interval
df = df.loc[dfInterval(df, *ts), :]
ai_names = df["ai_name"].unique()
results = pd.DataFrame()
for ai_name in ai_names:
result = self.aggregatePerfForAiName(df, ai_name)
results = results.append(result, ignore_index=True)
return results
def computeAINameToHostAndTypeMap(self, df):
df = df.loc[df["exp_id"] == self.expid, :]
ai_name_to_host_and_type = {}
for _, row in df.iterrows():
t = row["type"]
node = HOSTNAME_TO_NODE[row["host_name"]]
ai_name = row["ai_name"]
new_record = (node, t)
present_record = ai_name_to_host_and_type.get(ai_name, new_record)
if present_record != new_record:
raise ValueError(f"{ai_name} - two vms give different results {present_record} vs {new_record}")
ai_name_to_host_and_type[ai_name] = new_record
return ai_name_to_host_and_type
def checkOneAIOneHost(self):
path = os.path.join(self.path, f"VM_management_{self.expid}.csv")
df = pd.read_csv(path, skiprows=57)
for ai in df["ai_name"].unique():
host_names = df.loc[(df["ai_name"] == ai), "host_name"].unique()
if len(host_names) != 1:
raise ValueError(f"Ai spans more than one host {self.expid} {ai} {host_names}")
class SchedulerExperimentSeries:
def __init__(self, base_path, config, ai_count, skip_compositions=()):
self.type = "scheduler"
self.base_path = base_path
_, self.name = os.path.split(base_path)
self.ai_role_count = ai_info.AI_ROLE_TO_COUNT.copy()
self.rescale_map = config.rescale_map # hostname to exp_series
self.ai_count = ai_count
self.ai_types = config.tasks
self.experiments = dict()
self.dfs = {}
self.df = None
self.schedules = pd.DataFrame()
if config.ai_role_count:
self.ai_role_count.update(config.ai_role_count)
for exp_match in self.getExperimentPathsMatches(base_path):
composition_id, shuffle_id, custom_scheduler = exp_match.groups()
composition_id = int(composition_id)
if composition_id in skip_compositions:
print(f"Skipping composition {composition_id}")
continue
shuffle_id = int(shuffle_id)
custom_scheduler = "" if custom_scheduler is None else str(custom_scheduler)
exp = SchedulerExperimentRecord(base_path, exp_match.string,
composition_id, shuffle_id, custom_scheduler, self.ai_types, self)
self.experiments[(composition_id, shuffle_id, custom_scheduler)] = exp
self.readPerf()
self.aggregatePerf()
self.rescalePerf()
self.computeCost()
def getPerfMetricsForType(self, t1):
return METRIC_CBTOOL_PREFIX + ai_info.AI_TYPE_TO_METRICS[t1][0]
def readPerf(self):
print("Getting perf data")
perf = pd.DataFrame()
for k, exp in self.experiments.items():
composition_id, shuffle_id, custom_scheduler = k
df = readExp(exp)
df["exp_id"] = exp.expid
df["composition_id"] = composition_id
df["shuffle_id"] = shuffle_id
df["custom_scheduler"] = custom_scheduler
perf = perf.append(df, ignore_index=True)
self.dfs["perf"] = perf
def aggregatePerf(self):
results = pd.DataFrame()
perf = self.dfs["perf"]
for k, exp in self.experiments.items():
df = perf.loc[(perf["exp_id"] == exp.expid), :]
result = exp.aggregatePerf(df)
results = results.append(result, ignore_index=True)
self.dfs["perf_agg"] = results
self.df = results
def rescalePerf(self):
for expid in self.df["exp_id"].unique():
df = self.df.loc[self.df["exp_id"] == expid]
for ai_name in df["ai_name"].unique():
df2 = df.loc[df["ai_name"] == ai_name, :]
host_names = df2["host_name"].unique()
if host_names.size != 1:
raise ValueError(f"Unexpected number of host names for single ai_name {ai_name} "
f"{len(host_names)} != 1")
node = HOSTNAME_TO_NODE[host_names[0]]
t = df2["type"].min()
for metric in ai_info.AI_TYPE_TO_METRICS[t]:
factor = self.rescale_map[node][t][metric]
select = (self.df["exp_id"] == expid) & (df["ai_name"] == ai_name)
for mt in ["avg_", "std_"]:
input_col = f"{mt}{metric}"
output_col = f"rescaled_{input_col}"
self.df.loc[select, output_col] = self.df.loc[select, input_col] / factor
def computeCost(self):
for expid in self.df["exp_id"].unique():
df = self.df.loc[self.df["exp_id"] == expid]
for ai_name in df["ai_name"].unique():
df2 = df.loc[df["ai_name"] == ai_name, :]
t = df2["type"].min()
metric = ai_info.AI_TYPE_TO_METRICS[t][0]
output_col = "cost"
input_col = f"rescaled_avg_{metric}"
select = (self.df["exp_id"] == expid) & (df["ai_name"] == ai_name)
if metric == "throughput":
self.df.loc[select, output_col] = 1. / self.df.loc[select, input_col]
else:
self.df.loc[select, output_col] = self.df.loc[select, input_col]
select = self.df.exp_id == expid
self.df.loc[select, "max_cost"] = self.df.loc[select, "cost"].max()
def printExperimentResults(self, savefig=False):
xs = []
ys_map = {s: [] for s in sorted(self.df["scheduler"].unique())}
for i, composition in enumerate(sorted(self.df["composition_id"].unique())):
xs.append(i)
for scheduler in sorted(self.df["scheduler"].unique()):
select = (self.df["composition_id"] == composition) & (self.df["scheduler"] == scheduler)
ys_map[scheduler].append(self.df.loc[select, "max_cost"].min())
fig, ax = plt.subplots()
plt.title("Scheduler cost")
for k, v in ys_map.items():
ax.scatter(xs, v, label=k[1:])
ax.set_ylabel("Max observed cost")
ax.legend()
if savefig:
file_name = f"scheduler_results_{self.name}"
file_name = os.path.join(global_config.PLOTS_DIR, file_name)
plt.savefig(file_name)
else:
plt.show()
return xs, ys_map
@staticmethod
def getExperimentPathsMatches(base_path):
path_regex = "([0-9]{1,4})scheduler([0-9]{1,2})(_custom|_random|_round_robin|_default){0,1}"
def matchExpidRegex(e):
i = e.split("/")[-1]
return re.fullmatch(path_regex, i)
pattern = os.path.join(base_path, f"*")
expids = glob.glob(pattern)
matches = [matchExpidRegex(e) for e in expids]
return [m for m in matches if bool(m)]
def computeScheduleSummarySingle(self, exp, hosts, columns, index):
result = pd.DataFrame(np.zeros((1, len(columns)), dtype=np.int32), index=index, columns=columns)
ai_name_to_host_and_type = exp.computeAINameToHostAndTypeMap(self.df)
for _, host_and_type in ai_name_to_host_and_type.items():
result.loc[:, host_and_type] += 1
result.loc[:, (host_and_type[0], "all")] += 1
for host in hosts:
for t in ("all",) + self.ai_types:
result.loc[:, ("all", t)] += result.loc[:, (host, t)]
return result
# TODO shuffle id resilient
def computeScheduleSummary(self):
self.schedules = pd.DataFrame()
hosts = list(self.df["host_name"].unique())
hosts = sorted([HOSTNAME_TO_NODE[h] for h in hosts])
columns = pd.MultiIndex.from_product([["all"] + hosts, ("all",) + tuple(self.ai_types)])
for composition_id in sorted(self.df["composition_id"].unique()):
for shuffle_id in [0]:
for scheduler in sorted(self.df["scheduler"].unique()):
exp = self.experiments[(composition_id, shuffle_id, scheduler)]
index = pd.MultiIndex.from_tuples([(composition_id, scheduler)], names=["composition", "scheduler"])
result = self.computeScheduleSummarySingle(exp, hosts, columns, index)
self.schedules = self.schedules.append(result)
def extractNodeToLoads(self, composition_id, shuffle_id, scheduler):
results = {}
schedule = self.schedules.loc[(composition_id, scheduler)]
hosts = [h for h in schedule.index.levels[0] if h != "all"]
for host in hosts:
result = np.zeros(len(self.ai_types))
for i, ai_type in enumerate(self.ai_types):
result[i] = schedule[(host, ai_type)]
results[host] = result
return results
def extractActualCosts(self, composition_id, shuffle_id, scheduler):
xs = []
values = []
df = self.df
select = (df["composition_id"] == composition_id) & (df["shuffle_id"] == shuffle_id) & (df["scheduler"] == scheduler)
df = df.loc[select, :]
for _, row in df.iterrows():
values.append(row["cost"])
node = HOSTNAME_TO_NODE[row["host_name"]]
t = row["type"]
xs.append(f"{node} {t}")
return xs, values
class SchedulerMeanMetricComputer:
RecordId = collections.namedtuple("RecordId", "host type composition scheduler")
def __init__(self, ai_types, node_to_coeffs, xs, ys_actual, ys_expected):
self.ai_types = ai_types
self.node_to_coeffs = node_to_coeffs
self.xs = xs
self.ys_actual = ys_actual
self.ys_expected = ys_expected
def computeMetricForType(self, t=None, metric_fn=mean_squared_error):
data = self.getDataForType(t)
_, ys_actual, ys_expected = zip(*data)
return metric_fn(ys_actual, ys_expected)
def getDataForType(self, t):
data = zip(self.xs, self.ys_actual, self.ys_expected)
if t is None:
return data
return [(x, y1, y2) for (x, y1, y2) in data if x.type == t]
def computeMetrics(self, metric_fn=mean_squared_error):
result = dict()
result["all"] = self.computeMetricForType(metric_fn=metric_fn)
for t in self.ai_types:
result[t] = self.computeMetricForType(t, metric_fn)
return result
@staticmethod
def createFromExpSeries(exp_series, node_to_coeffs):
xs_result, ys_actual_result, ys_expected_result = [], [], []
df = exp_series.df
def toRecordId(x, c, s):
host, t = x.split(" ")
return SchedulerMeanMetricComputer.RecordId(host, t, composition, scheduler)
for composition in df["composition_id"].unique():
for scheduler in df["scheduler"].unique():
node_to_loads = exp_series.extractNodeToLoads(composition, 0, scheduler)
xs, ys_expected = computeExpectedCostMultipleNodes(exp_series.ai_types, node_to_loads, node_to_coeffs)
expected_cost_map = dict(zip(xs, ys_expected))
xs, ys_actual = exp_series.extractActualCosts(composition, 0, scheduler)
for x, y_actual in zip(xs, ys_actual):
xs_result.append(toRecordId(x, composition, scheduler))
ys_actual_result.append(y_actual)
ys_expected_result.append(expected_cost_map[x])
return SchedulerMeanMetricComputer(exp_series.ai_types, node_to_coeffs, xs_result,
ys_actual_result, ys_expected_result)
def computeExpectedCostMultipleNodes(ai_types, node_to_loads, node_to_coefficients):
values = []
xs = []
for node in node_to_loads.keys():
values.extend(computeExpectedCost(node_to_loads[node], node_to_coefficients[node]))
xs.extend([f"{node} {t}" for t in ai_types])
return xs, values
def plotActualVsExpectedCost(exp_series, node_to_coefficients, composition_id, savefig=False):
k = len(exp_series.df["scheduler"].unique())
fig, axs = plt.subplots(1, k, figsize=(k * 5, 4))
schedulers, actual_res, model_res = [], [], []
def updateResultList(result, result_list, ymax):
ymax = max([ymax] + result[1])
result_list.append(result)
return ymax
ymax = 0
for i, scheduler in enumerate(sorted(exp_series.df["scheduler"].unique())):
schedulers.append(scheduler)
actual = exp_series.extractActualCosts(composition_id, 0, scheduler)
ymax = updateResultList(actual, actual_res, ymax)
node_to_loads = exp_series.extractNodeToLoads(composition_id, 0, scheduler)
model = computeExpectedCostMultipleNodes(exp_series.ai_types, node_to_loads, node_to_coefficients)
ymax = updateResultList(model, model_res, ymax)
for i, (scheduler, actual, model) in enumerate(zip(schedulers, actual_res, model_res)):
xs_model, values_model = model
xs_actual, values_actual = actual
ax = axs[i]
ax.set_title(f"{scheduler[1:]} scheduler")
ax.set_ylabel("Performance cost")
ax.scatter(xs_model, values_model, label="predicted")
ax.scatter(xs_actual, values_actual, label="observed")
ax.tick_params('x', labelrotation=60)
ax.set_ylim(ymin=0, ymax=ymax)
ax.legend()
if savefig:
file_name = f"scheduler_observed_vs_predicted_{exp_series.name}_{composition_id}"
file_name = os.path.join(global_config.PLOTS_DIR, file_name)
plt.savefig(file_name)
else:
plt.show()
return schedulers, actual_res, model_res
| StarcoderdataPython |
1965623 | #!/bin/python
"""
Copyright 2016 <NAME>
1. Bubble Sort
O(n^2)
"""
def sort_bubblesort(my_list):
for pos_upper in xrange(len(my_list)-1,0,-1):
for i in xrange(pos_upper):
if my_list[i] > my_list[i+1]:
my_list[i], my_list[i+1] = my_list[i+1], my_list[i]
print "pos_upper: " + str(pos_upper) + " i: " + str(i) + " my_list: " + str(my_list)
return my_list
if __name__ == "__main__":
my_list = [54,26,93,17,77,31,44,55,20]
print my_list
print sort_bubblesort(my_list) | StarcoderdataPython |
1878644 | <filename>python-backend/app/api/variances/resources/variance_resource.py
from flask_restplus import Resource
from flask import request
from sqlalchemy_filters import apply_pagination, apply_filters
from app.extensions import api
from ..models.variance import Variance
from ..models.variance_application_status_code import VarianceApplicationStatusCode
from ..response_models import PAGINATED_VARIANCE_LIST
from ...utils.access_decorators import requires_any_of, MINE_VIEW
from ...utils.resources_mixins import UserMixin, ErrorMixin
PAGE_DEFAULT = 1
PER_PAGE_DEFAULT = 25
class VarianceResource(Resource, UserMixin, ErrorMixin):
@api.doc(
description='Get a list of variances.',
params={
'page': f'The page number of paginated records to return. Default: {PAGE_DEFAULT}',
'per_page': f'The number of records to return per page. Default: {PER_PAGE_DEFAULT}',
'variance_application_status_code':
'Comma-separated list of code statuses to include in results. Default: All status codes.',
})
@requires_any_of([MINE_VIEW])
@api.marshal_with(PAGINATED_VARIANCE_LIST, code=200)
def get(self):
records, pagination_details = self._apply_filters_and_pagination(
page_number=request.args.get('page', PAGE_DEFAULT, type=int),
page_size=request.args.get('per_page', PER_PAGE_DEFAULT, type=int),
application_status=request.args.get('variance_application_status_code', type=str))
if not records:
raise BadRequest('Unable to fetch variances.')
return {
'records': records.all(),
'current_page': pagination_details.page_number,
'total_pages': pagination_details.num_pages,
'items_per_page': pagination_details.page_size,
'total': pagination_details.total_results,
}
def _apply_filters_and_pagination(self,
page_number=PAGE_DEFAULT,
page_size=PER_PAGE_DEFAULT,
application_status=None):
status_filter_values = list(map(
lambda x: x.variance_application_status_code,
VarianceApplicationStatusCode.active()))
if application_status is not None:
status_filter_values = application_status.split(',')
filtered_query = apply_filters(
Variance.query,
[{
'field': 'variance_application_status_code',
'op': 'in',
'value': status_filter_values
}])
return apply_pagination(filtered_query, page_number, page_size)
| StarcoderdataPython |
307293 | <reponame>RealGeeks/django-cache-purge-hooks
import mock
from sampleproject.sample.models import TestModel
from django.test.utils import override_settings
import pytest
@pytest.mark.django_db
@mock.patch('cache_purge_hooks.backends.varnishbackend.VarnishManager.purge')
def test_purge_on_save(purge):
with override_settings(CACHE_PURGE_HOOKS_BACKEND='cache_purge_hooks.backends.varnishbackend.VarnishManager'):
a = TestModel()
a.save()
purge.assert_called_with('/foo')
| StarcoderdataPython |
11339271 | from modules.motion_detector import MotionDetector
class DetectMotionPipe:
def __init__(self, conf):
self.detector = MotionDetector(**conf)
def __call__(self, data):
return self.detect(data)
def detect(self, data):
image = data["image"]
# Detect motion locations
data["motion_locations"] = self.detector.detect(image)
return data
| StarcoderdataPython |
3436795 | import yaml
def handle_pr_push(payload):
(branch, pr_number, commit_sha, org, repo) = extract_pr_info(payload)
print(yaml.dump(scaffold_workflow(repo, org, f"pr-{pr_number}", commit_sha)))
def handle_master_push(payload):
(commit_sha, org, repo) = extract_master_info(payload)
print(yaml.dump(scaffold_workflow(repo, org, "master", commit_sha)))
def extract_pr_info(payload):
pull_request = payload["pull_request"]
head = pull_request["head"]
branch = head["ref"]
pr_number = pull_request["number"]
before_commit = head["sha"]
org = head["repo"]["owner"]["login"]
repo = head["repo"]["name"]
return (branch, pr_number, before_commit, org, repo)
def extract_master_info(payload):
repository = payload["repository"]
before_commit = payload["before"]
org = repository["owner"]["name"]
repo = repository["name"]
return (before_commit, org, repo)
def make_template(name, image, command, args):
return {
"name": name,
"container": {
"image": image,
"command": command,
"args": args,
"resources": {
"limits": {
"memory": "32Mi",
"cpu": "100m"
}
},
"env": [{
"name": "GITHUB_CREDS",
"valueFrom": {
"secretKeyRef": {
"name": "github",
"key": "creds"
}
}
}]
}
}
def git_clone_template(service_name, org):
return make_template("clone", "alpine/git", ["git"], ["clone", f"https://github.com/{org}/{service_name}.git"])
def scaffold_workflow(service_name, org, branch, commit_sha):
return {
"apiVersion": "argoproj.io/v1alpha1",
"kind": "Workflow",
"metadata": {
"generatename": f"metapipeline-{service_name}-{branch}-{commit_sha[0:7]}-"
}, "spec": {
"entrypoint": "clone",
"templates": [git_clone_template(service_name, org)]
}
} | StarcoderdataPython |
11379114 | import time
from idle_time import IdleMonitor
import json
from datetime import datetime
from src import actions
from pynput import keyboard
from threading import Thread
import subprocess
import requests
computer_code = "258752"
pressed_keys = ""
pressed = False
process = {}
user_automations = {}
system_actions = {
"lock": actions.lock_screen,
"rickroll": actions.rickroll,
"photo": actions.photo,
"wait": actions.wait,
"shell_run": actions.shell_run,
"sound": actions.sound,
"send_keys": actions.send_keys,
"write": actions.write,
"python": actions.python,
}
def refresh_automations():
global user_automations
user_automations = {}
body = {"code": computer_code}
automations = json.loads(requests.post('https://api-bta.tk/actions/automations',
json=body
).text)
for auto in automations:
action = json.loads(auto["action"])
user_automations[str(auto["id"])] = action
if action["trigger"]:
user_automations[str(auto["id"])]["tiggered"] = [False] * len(action["trigger"])
refresh_automations()
def on_release(q):
global pressed_keys, pressed
pressed = True
if hasattr(q, "char"):
pressed_keys += q.char
elif q == keyboard.Key.space:
pressed_keys += " "
if len(pressed_keys) > 20:
pressed_keys = pressed_keys[1:]
keyboard.Listener(on_release=on_release).start()
def process_listener():
global process
temp_process = {}
new_process = []
ex = subprocess.Popen(['sh', 'src/get_process.sh'], stdout=subprocess.PIPE)
std, _ = ex.communicate()
for proc in std.decode().splitlines():
if proc.startswith("kworker"):
continue
if not proc in process:
new_process.append(proc)
temp_process[proc] = (
1 if not proc in temp_process else temp_process[proc] + 1)
for proc in temp_process:
if (proc not in process or temp_process[proc] > process[proc]) and not proc in new_process:
new_process.append(proc)
process = temp_process
return new_process
process_listener()
def check_conditions(conditions):
global pressed_keys, pressed
triggers_cond = {
"inactivity": False,
"hour": False,
"minute": False,
"typed": False,
"process": False
}
now = datetime.now()
for cond in triggers_cond:
if cond not in conditions:
triggers_cond[cond] = True
else:
if cond == "inactivity":
triggers_cond[cond] = conditions["inactivity"] < monitor.get_idle_time(
)
elif cond == "hour":
triggers_cond[cond] = conditions["hour"] == int(
now.strftime("%H"))
elif cond == "minute":
triggers_cond[cond] = conditions["minute"] == int(
now.strftime("%M"))
elif cond == "typed":
if conditions["typed"] == True:
triggers_cond[cond] = pressed
pressed = False
else:
if conditions["typed"].lower() in pressed_keys:
triggers_cond[cond] = True
pressed_keys = ""
else:
triggers_cond[cond] = False
elif cond == "process":
triggers_cond[cond] = conditions["process"] in process_listener()
return all(value for value in triggers_cond.values())
def is_triggered(triggers, auto_index):
for i, trigger in enumerate(triggers):
if user_automations[auto_index]["tiggered"][i]:
continue
if check_conditions(trigger):
if not any(cond in trigger for cond in ["typed", "process"]):
user_automations[auto_index]["tiggered"][i] = True
return True
return False
def run_action(conditions):
if conditions == True:
return True
else:
for cond in conditions:
if check_conditions(cond):
return True
return False
monitor = IdleMonitor.get_monitor()
def reset_cond(condition):
for auto_i in user_automations:
auto = user_automations[auto_i]
if auto["trigger"]:
for j, trigger in enumerate(auto["trigger"]):
if condition in trigger:
user_automations[auto_i]["tiggered"][j] = False
print("Checking for automations")
last_values = {
"inactivity": 0,
"hour": "",
"minute": ""
}
counter = 0
while (True):
if counter == 10:
refresh_automations()
print(user_automations)
counter = 0
now = datetime.now()
if monitor.get_idle_time() < last_values["inactivity"]:
reset_cond("inactivity")
if now.strftime("%H") != last_values["hour"]:
reset_cond("hour")
if now.strftime("%M") != last_values["minute"]:
reset_cond("minute")
last_values["inactivity"] = monitor.get_idle_time()
last_values["hour"] = now.strftime("%H")
last_values["minute"] = now.strftime("%M")
for auto_i in user_automations:
auto = user_automations[auto_i]
if auto["trigger"] == False:
continue
if is_triggered(auto["trigger"], auto_i):
for action in auto["actions"]:
if run_action(action["condition"]) and action["action"] in system_actions:
for i in range(action["repeat"] if "repeat" in action else 1):
params = (action["params"]
if "params" in action else [])
system_actions[action["action"]](*params)
time.sleep(1)
counter += 1
| StarcoderdataPython |
4881192 | # View the specified parameters of your GBM model
air_model.params
# Examine the performance of the trained model
air_model
| StarcoderdataPython |
240698 | <filename>app/api/__init__.py<gh_stars>100-1000
# -*- coding: utf-8 -*-
# @Time : 2019/4/18 11:03 AM
# @Author : ShaHeTop-Almighty-ares
# @Email : <EMAIL>
# @File : __init__.py.py
# @Software: PyCharm
from flask import Blueprint
from flask_restful import Api
from .restful_demo.restful_demo import RestfulDemoApi, DemoApi
from .method_view_demo.method_view_demo import MethodViewDemo
from .route_demo.route_demo import module_01_index
method_view_api = Blueprint('cms', __name__)
restful_api = Blueprint('api', __name__)
api = Api(restful_api)
"""
flask restful 路由注册
带参数url 可以一起注册
无参数:http://0.0.0.0:9999/api/demo
带参数:http://0.0.0.0:9999/api/demo/123/456
api.add_resource(DemoApi, '/demo', '/demo/<page>/<size>', endpoint='demo')
"""
api.add_resource(DemoApi, '/demo', '/demo/<page>/<size>', endpoint='demo')
api.add_resource(RestfulDemoApi, '/', endpoint='restful_demo_api')
api.init_app(restful_api)
"""
Method View 类视图路由注册
带参数 url 需要分开注册
无参数: http://0.0.0.0:9999/cms/demo
带参数: http://0.0.0.0:9999/cms/demo/999/888
"""
method_view_api.add_url_rule('/', view_func=MethodViewDemo.as_view('demo'))
method_view_api.add_url_rule('/<page>/<size>/', view_func=MethodViewDemo.as_view('demo_pram'))
"""
路由注册
@method_view_api.route('/', methods=["GET", "POST"])
def index():
return jsonify('this cms')
上面方式等价于以下方式(统一管理路由):
def index():
return jsonify('this cms')
route_admin.add_url_rule('/index', methods=["GET", "POST"], endpoint='index', view_func=index)
"""
method_view_api.add_url_rule('/m1', methods=["GET", "POST"], endpoint='module_01_index', view_func=module_01_index)
"""
静态文件处理/访问方式
http://0.0.0.0:9999/static/flask.jpg
http://0.0.0.0:9999/static/images/flask.jpg
"""
@method_view_api.route('/<path:path>/images')
def static_file(path):
return method_view_api.send_static_file(path)
| StarcoderdataPython |
381601 | DEFAULT_OREMDA_VAR_DIR = "/tmp"
DEFAULT_PLASMA_SOCKET_PATH = f"{DEFAULT_OREMDA_VAR_DIR}/plasma.sock"
DEFAULT_DATA_DIR = "/data"
| StarcoderdataPython |
166227 | from setuptools import setup
setup(
name='rfpimp',
version='1.2',
url='https://github.com/parrt/random-forest-importances',
license='MIT',
py_modules=['rfpimp'],
author='<NAME>, <NAME>',
author_email='<EMAIL>, <EMAIL>',
install_requires=['numpy','pandas','sklearn','matplotlib'],
description='Permutation and drop-column importance for scikit-learn random forests',
keywords='scikit-learn random forest feature permutation importances',
classifiers=['License :: OSI Approved :: MIT License',
'Intended Audience :: Developers']
)
| StarcoderdataPython |
233634 |
# Copyright (c) 2011, <NAME> [see LICENSE.txt]
# This software is funded in part by NIH Grant P20 RR016454.
# Python 2 to 3 workarounds
import sys
if sys.version_info[0] == 2:
_strobj = str
_xrange = xrange
elif sys.version_info[0] == 3:
_strobj = str
_xrange = range
import collections
import csv
import itertools
import inspect
import math
import sqlite3
import warnings
from pprint import pprint as pp
from copy import copy, deepcopy
from collections import OrderedDict, Counter, namedtuple
import pylab
import scipy
import numpy as np
import pystaggrelite3
from dictset import DictSet
from . import stats
from .stats.qsturng import qsturng, psturng
from .misc.texttable import Texttable as TextTable
from .misc.support import *
from . import plotting
# base.py holds DataFrame and Pyvttbl
# this file is a bit long but they can't be split without
# running into circular import complications
class DataFrame(OrderedDict):
"""holds the data in a dummy-coded group format"""
def __init__(self, *args, **kwds):
"""
initialize a :class:`DataFrame` object.
| Subclass of :mod:`collections`. :class:`OrderedDict`.
| Understands the typical initialization :class:`dict` signatures.
| Keys must be hashable.
| Values become numpy.arrays or numpy.ma.MaskedArrays.
"""
super(DataFrame, self).__init__()
#: sqlite3 connection
self.conn = sqlite3.connect(':memory:')
#: sqlite3 cursor
self.cur = self.conn.cursor()
#: list of sqlite3 aggregates
self.aggregates = tuple('avg count count group_concat ' \
'group_concat max min sum total tolist' \
.split())
# Bind pystaggrelite3 aggregators to sqlite3
for n, a, f in pystaggrelite3.getaggregators():
self.bind_aggregate(n, a, f)
#: prints the sqlite3 queries to stdout before
#: executing them for debugging purposes
self.PRINTQUERIES = False
#: controls whether plot functions return the test dictionaries
self.TESTMODE = False
#: holds the factors conditions in a DictSet Singleton
self.conditions = DictSet()
#: dict to map keys to sqlite3 types
self._sqltypesdict = {}
super(DataFrame, self).update(*args, **kwds)
def bind_aggregate(self, name, arity, func):
"""
binds a sqlite3 aggregator to :class:`DataFrame`
args:
name: string to be associated with the aggregator
arity: the number of inputs required by the aggregator
func: the aggregator class
returns:
None
| :class:`DataFrame`.aggregates is a list of the available aggregators.
| For information on rolling your own aggregators see:
http://docs.python.org/library/sqlite3.html
"""
self.conn.create_aggregate(name, arity, func)
self.aggregates = list(self.aggregates)
self.aggregates.append(name)
self.aggregates = tuple(self.aggregates)
def _get_sqltype(self, key):
"""
returns the sqlite3 type associated with the provided key
args:
key: key in :class:`DataFrame` (raises KeyError if key not in self)
returns:
a string specifiying the sqlite3 type associated with the data in self[key]:
{ 'null', 'integer', 'real', 'text'}
"""
return self._sqltypesdict[key]
def _get_nptype(self, key):
"""
returns the numpy type object associated with the provided key
args:
key: key in :class:`DataFrame` (raises KeyError if key not in self)
returns:
a numpy object specifiying the type associated with the data in self[key]:
========= ================
sql type numpy type
========= ================
'null' np.dtype(object)
'integer' np.dtype(int)
'real' np.dtype(float)
'text' np.dtype(str)
========= ================
"""
return {'null' : np.dtype(object),
'integer' : np.dtype(int),
'real' : np.dtype(float),
'text' : np.dtype(str)}[self._sqltypesdict[key]]
def _get_mafillvalue(self, key):
"""
returns the default fill value for invalid data associated with the provided key.
args:
key: key in :class:`DataFrame` (raises KeyError if key not in self)
returns:
string, float, or int associated with the data in self[key]
========= ============
sql type default
========= ============
'null' '?'
'integer' 999999
'real' 1e20
'text' 'N/A'
========= ============
| returned values match the defaults associated with np.ma.MaskedArray
"""
return {'null' : '?',
'integer' : 999999,
'real' : 1e20,
'text' : 'N/A'}[self._sqltypesdict[key]]
def read_tbl(self, fname, skip=0, delimiter=',',labels=True):
"""
loads tabulated data from a plain text file
args:
fname: path and name of datafile
kwds:
skip: number of lines to skip before looking for column labels. (default = 0)
delimiter: string to seperate values (default = "'")
labels: bool specifiying whether first row (after skip) contains labels.
(default = True)
returns:
None
| Checks and renames duplicate column labels as well as checking
| for missing cells. readTbl will warn and skip over missing lines.
"""
# open and read dummy coded data results file to data dictionary
fid = open(fname, 'r')
csv_reader = csv.reader(fid, delimiter=delimiter)
data = OrderedDict()
mask = {}
colnames = []
for i, row in enumerate(csv_reader):
# skip requested rows
if i < skip:
pass
# read column labels from ith+1 line
elif i == skip and labels:
colnameCounter = Counter()
for k, colname in enumerate(row):
colname = colname.strip()#.replace(' ','_')
colnameCounter[colname] += 1
if colnameCounter[colname] > 1:
warnings.warn("Duplicate label '%s' found"
%colname,
RuntimeWarning)
colname += '_%i'%colnameCounter[colname]
colnames.append(colname)
data[colname] = []
mask[colname] = []
# if labels is false we need to make labels
elif i == skip and not labels:
colnames = ['COL_%s'%(k+1) for k in range(len(row))]
for j,colname in enumerate(colnames):
if _isfloat(row[j]):
data[colname] = [float(row[j])]
mask[colname] = [0]
else:
data[colname] = [row[i]]
if row[i] == '':
mask[colname] = [1]
else:
mask[colname] = [0]
# for remaining lines where i>skip...
else:
if len(row) != len(colnames):
warnings.warn('Skipping line %i of file. '
'Expected %i cells found %i'\
%(i+1, len(colnames), len(row)),
RuntimeWarning)
else:
for j, colname in enumerate(colnames):
colname = colname.strip()
if _isfloat(row[j]):
data[colname].append(float(row[j]))
mask[colname].append(0)
else:
data[colname].append(row[j])
if row[j] == '':
mask[colname].append(1)
else:
mask[colname].append(0)
# close data file
fid.close()
self.clear()
for k, v in list(data.items()):
## In __setitem__ the conditions DictSet and datatype are set
self.__setitem__(k, v, mask[k])
del data
def __setitem__(self, key, item, mask=None):
"""
assign a column in the table
args:
key: hashable object to associate with item
item: an iterable that is put in an np.array or np.ma.array
kwds:
mask: mask value passed to np.ma.MaskedArray.__init__()
returns:
None
| df.__setitem__(key, item) <==> df[key] = item
| The assigned item must be iterable. To add a single row use
the insert method. To another table to this one use
the attach method.
example:
>>> ...
>>> print(df)
first last age gender
==================================
Roger Lew 28 male
Bosco Robinson 5 male
Megan Whittington 26 female
John Smith 51 male
<NAME> 49 female
>>> import numpy as np
>>> df['log10(age)'] = np.log10(df['age'])
>>> print(df)
first last age gender log10(age)
===============================================
<NAME> 28 male 1.447
<NAME> 5 male 0.699
<NAME> 26 female 1.415
<NAME> 51 male 1.708
<NAME> 49 female 1.690
>>>
"""
# check item
if not hasattr(item, '__iter__'):
raise TypeError("'%s' object is not iterable"%type(item).__name__)
if key in list(self.keys()):
del self[key]
# a mask was provided
if mask != None:
# data contains invalid entries and a masked array should be created
# this needs to be nested incase mask != None
if not all([m==0 for m in mask]):
# figure out the datatype of the valid entries
self._sqltypesdict[key] = \
self._determine_sqlite3_type([d for d,m in zip(item,mask) if not m])
# replace invalid values
fill_val = self._get_mafillvalue(key)
x = np.array([(d, fill_val)[m] for d,m in zip(item,mask)])
# call super.__setitem__
super(DataFrame, self).\
__setitem__(key, \
np.ma.array(x, mask=mask, dtype=self._get_nptype(key)))
# set or update self.conditions DictSet
self.conditions[key] = self[key]
# return if successful
return
# no mask provided or mask is all true
self._sqltypesdict[key] = self._determine_sqlite3_type(item)
super(DataFrame, self).\
__setitem__(key, np.array(item, dtype=self._get_nptype(key)))
self.conditions[key] = self[key]
## def __iter__(self):
## raise NotImplementedError('use .keys() to iterate')
def __delitem__(self, key):
"""
delete a column from the table
args:
key: associated with the item to delete
returns:
None
| df.__delitem__(key) <==> del df[key]
example:
>>> ...
>>> print(df)
first last age gender log10(age)
===============================================
<NAME> 28 male 1.447
Bosco Robinson 5 male 0.699
Megan Whittington 26 female 1.415
John Smith 51 male 1.708
Jane Doe 49 female 1.690
>>> del df['log10(age)']
>>> print(df)
first last age gender
==================================
Roger Lew 28 male
Bosco Robinson 5 male
Megan Whittington 26 female
John Smith 51 male
Jane Doe 49 female
>>>
"""
del self._sqltypesdict[key]
del self.conditions[key]
super(DataFrame, self).__delitem__(key)
def __str__(self):
"""
returns human friendly string representation of object
args:
None
returns:
string with easy to read representation of table
| df.__str__() <==> str(df)
"""
if self == {}:
return '(table is empty)'
tt = TextTable(max_width=100000000)
dtypes = [t[0] for t in self.types()]
dtypes = list(''.join(dtypes).replace('r', 'f'))
tt.set_cols_dtype(dtypes)
aligns = [('l','r')[dt in 'fi'] for dt in dtypes]
tt.set_cols_align(aligns)
tt.header(list(self.keys()))
if self.shape()[1] > 0:
tt.add_rows(list(zip(*list(self.values()))), header=False)
tt.set_deco(TextTable.HEADER)
# output the table
return tt.draw()
def row_iter(self):
"""
iterate over the rows in table
args:
None
returns:
iterator that yields OrderedDict objects with (key,value) pairs
cooresponding to the data in each row
example:
>>> print(df)
first last age gender
==================================
Roger Lew 28 male
Bosco Robinson 5 male
Megan Whittington 26 male
John Smith 51 female
Jane Doe 49 female
>>> for case in df.row_iter():
print(case)
OrderedDict([('first', 'Roger'), ('last', 'Lew'), ('age', 28), ('gender', 'male')])
OrderedDict([('first', 'Bosco'), ('last', 'Robinson'), ('age', 5), ('gender', 'male')])
OrderedDict([('first', 'Megan'), ('last', 'Whittington'), ('age', 26), ('gender', 'male')])
OrderedDict([('first', 'John'), ('last', 'Smith'), ('age', 51), ('gender', 'female')])
OrderedDict([('first', 'Jane'), ('last', 'Doe'), ('age', 49), ('gender', 'female')])
>>>
"""
for i in _xrange(self.shape()[1]):
yield OrderedDict([(k, self[k][i]) for k in self])
def types(self):
"""
returns a list of the sqlite3 datatypes of the columns
args:
None
returns:
an ordered list of sqlite3 types.
| order matches self.keys()
"""
if len(self) == 0:
return []
return [self._sqltypesdict[k] for k in self]
def shape(self):
"""
returns the size of the data in the table as a tuple
args:
None
returns:
tuple (number of columns, number of rows)
"""
if len(self) == 0:
return (0, 0)
return (len(self), len(list(self.values())[0]))
def _are_col_lengths_equal(self):
"""
private method to check if the items in self have equal lengths
args:
None
returns:
returns True if all the items are equal
returns False otherwise
"""
if len(self) < 2:
return True
# if self is not empty
counts = list(map(len, list(self.values())))
if all(c - counts[0] + 1 == 1 for c in counts):
return True
else:
return False
def _determine_sqlite3_type(self, iterable):
"""
determine the sqlite3 datatype of iterable
args:
iterable: a 1-d iterable (list, tuple, np.array, etc.)
returns:
sqlite3 type as string: 'null', 'integer', 'real', or 'text'
"""
if len(iterable) == 0:
return 'null'
elif all(map(_isint, iterable)):
return 'integer'
elif all(map(_isfloat, iterable)):
return 'real'
else:
return 'text'
def _execute(self, query, t=None):
"""
private method to execute sqlite3 query
| When the PRINTQUERIES bool is true it prints the queries
before executing them
"""
if t == None:
t=tuple()
if self.PRINTQUERIES:
print(query)
if len(t) > 0:
print(' ', t)
print()
self.cur.execute(query, t)
def _executemany(self, query, tlist):
"""
private method to execute sqlite3 queries
| When the PRINTQUERIES bool is true it prints the queries
before executing them. The execute many method is about twice
as fast for building tables as the execute method.
"""
if self.PRINTQUERIES:
print(query)
print(' ', tlist[0])
print(' ...\n')
self.cur.executemany(query, tlist)
def _get_indices_where(self, where):
"""
determines the indices cooresponding to the conditions specified by the where
argument.
args:
where: a string criterion without the 'where'
returns:
a list of indices
"""
# preprocess where
tokens = []
nsubset2 = set()
names = list(self.keys())
for w in where.split():
print(w)
if w in names:
tokens.append(_sha1(w))
nsubset2.add(w)
else:
tokens.append(w)
where = ' '.join(tokens)
super(DataFrame, self).__setitem__(('INDICES','integer'),
list(range(self.shape()[1])))
nsubset2.add('INDICES')
# build the table
self.conn.commit()
self._execute('drop table if exists GTBL')
self.conn.commit()
query = 'create temp table GTBL\n ('
query += ', '.join('%s %s'%(_sha1(n), self._get_sqltype(n)) for n in nsubset2)
query += ')'
self._execute(query)
# build insert query
query = 'insert into GTBL values ('
query += ','.join('?' for n in nsubset2) + ')'
self._executemany(query, list(zip(*[self[n] for n in nsubset2])))
self.conn.commit()
super(DataFrame, self).__delitem__(('INDICES','integer'))
# get the indices
query = 'select %s from GTBL where %s'%(_sha1('INDICES'), where)
self._execute(query)
def _build_sqlite3_tbl(self, nsubset, where=None):
"""
build or rebuild sqlite table with columns in nsubset based on
the where list.
args:
nsubset: a list of keys to include in the table
where: criterion the entries in the table must satisfy
returns:
None
| where can be a list of tuples. Each tuple should have three
elements. The first should be a column key (label). The second
should be an operator: in, =, !=, <, >. The third element
should contain value for the operator.
| where can also be a list of strings. or a single string.
| sqlite3 table is built in memory and has the id TBL
"""
if where == None:
where = []
if isinstance(where, _strobj):
where = [where]
# 1. Perform some checking
##############################################################
if not hasattr(where, '__iter__'):
raise TypeError( "'%s' object is not iterable"
% type(where).__name__)
# 2. Figure out which columns need to go into the table
# to be able to filter the data
##############################################################
nsubset2 = set(nsubset)
for item in where:
if isinstance(item, _strobj):
tokens = item.split()
if tokens[0] not in list(self.keys()):
raise KeyError(tokens[0])
nsubset2.update(w for w in tokens if w in list(self.keys()))
else: # tuple
if item[0] in list(self.keys()):
nsubset2.add(item[0])
# orders nsubset2 to match the order in self.keys()
nsubset2 = [n for n in self if n in nsubset2]
# 3. Build a table
##############################################################
self.conn.commit()
self._execute('drop table if exists TBL2')
self.conn.commit()
query = 'create temp table TBL2\n ('
query += ', '.join('%s %s'%(_sha1(n), self._get_sqltype(n)) for n in nsubset2)
query += ')'
self._execute(query)
# build insert query
query = 'insert into TBL2 values ('
query += ','.join('?' for n in nsubset2) + ')'
# because sqlite3 does not understand numpy datatypes we need to recast them
# using astype to numpy.object
self._executemany(query, list(zip(*[self[n].astype(np.object) for n in nsubset2])))
self.conn.commit()
# 4. If where == None then we are done. Otherwise we need
# to build query to filter the rows
##############################################################
if where == []:
self._execute('drop table if exists TBL')
self.conn.commit()
self._execute('alter table TBL2 rename to TBL')
self.conn.commit()
else:
# Initialize another temporary table
self._execute('drop table if exists TBL')
self.conn.commit()
query = []
for n in nsubset:
query.append('%s %s'%(_sha1(n), self._get_sqltype(n)))
query = ', '.join(query)
query = 'create temp table TBL\n (' + query + ')'
self._execute(query)
# build filter query
query = []
for item in where:
# process item as a string
if isinstance(item, _strobj):
tokens = []
for word in item.split():
if word in list(self.keys()):
tokens.append(_sha1(word))
else:
tokens.append(word)
query.append(' '.join(tokens))
# process item as a tuple
else:
try:
(k,op,value) = item
except:
raise Exception('could not upack tuple from where')
if _isfloat(value):
query.append(' %s %s %s'%(_sha1(k), op, value))
elif isinstance(value,list):
if _isfloat(value[0]):
args = ', '.join(str(v) for v in value)
else:
args = ', '.join('"%s"'%v for v in value)
query.append(' %s %s (%s)'%(_sha1(k), op, args))
else:
query.append(' %s %s "%s"'%(_sha1(str(k)), op, value))
query = ' and '.join(query)
nstr = ', '.join(_sha1(n) for n in nsubset)
query = 'insert into TBL select %s from TBL2\n where '%nstr + query
# run query
self._execute(query)
self.conn.commit()
# delete TBL2
self._execute('drop table if exists TBL2')
self.conn.commit()
def _get_sqlite3_tbl_info(self):
"""
private method to get a list of tuples containing information
relevant to the current sqlite3 table
args:
None
returns:
list of tuples:
| Each tuple cooresponds to a column.
| Tuples include the column name, data type, whether or not the
| column can be NULL, and the default value for the column.
"""
self.conn.commit()
self._execute('PRAGMA table_info(TBL)')
return list(self.cur)
def pivot(self, val, rows=None, cols=None, aggregate='avg',
where=None, attach_rlabels=False, method='valid'):
"""
produces a contingency table according to the arguments and keywords
provided.
args:
val: the colname to place as the data in the table
kwds:
rows: list of colnames whos combinations will become rows
in the table if left blank their will be one row
cols: list of colnames whos combinations will become cols
in the table if left blank their will be one col
aggregate: function applied across data going into each cell
of the table <http://www.sqlite.org/lang_aggfunc.html>_
where: list of tuples or list of strings for filtering data
method:
'valid': only returns rows or columns with valid entries.
'full': return full factorial combinations of the
conditions specified by rows and cols
returns:
:class:`PyvtTbl` object
"""
if rows == None:
rows = []
if cols == None:
cols = []
if where == None:
where = []
##############################################################
# pivot programmatic flow #
##############################################################
# 1. Check to make sure the table can be pivoted with the #
# specified parameters #
# 2. Create a sqlite table with only the data in columns #
# specified by val, rows, and cols. Also eliminate #
# rows that meet the exclude conditions #
# 3. Build rnames and cnames lists #
# 4. Build query based on val, rows, and cols #
# 5. Run query #
# 6. Read data to from cursor into a list of lists #
# 7. Query grand, row, and column totals #
# 8. Clean up #
# 9. flatten if specified #
# 10. Initialize and return PyvtTbl Object #
##############################################################
# 1. Check to make sure the table can be pivoted with the
# specified parameters
##############################################################
# This may seem excessive but it provides better feedback
# to the user if the errors can be parsed out before had
# instead of crashing on confusing looking code segments
# check to see if data columns have equal lengths
if not self._are_col_lengths_equal():
raise Exception('columns have unequal lengths')
# check the supplied arguments
if val not in list(self.keys()):
raise KeyError(val)
if not hasattr(rows, '__iter__'):
raise TypeError( "'%s' object is not iterable"
% type(cols).__name__)
if not hasattr(cols, '__iter__'):
raise TypeError( "'%s' object is not iterable"
% type(cols).__name__)
for k in rows:
if k not in list(self.keys()):
raise KeyError(k)
for k in cols:
if k not in list(self.keys()):
raise KeyError(k)
# check for duplicate names
dup = Counter([val] + rows + cols)
del dup[None]
if not all(count == 1 for count in list(dup.values())):
raise Exception('duplicate labels specified')
# check aggregate function
aggregate = aggregate.lower()
if aggregate not in self.aggregates:
raise ValueError("supplied aggregate '%s' is not valid"%aggregate)
# check to make sure where is properly formatted
# todo
# 2. Create a sqlite table with only the data in columns
# specified by val, rows, and cols. Also eliminate
# rows that meet the exclude conditions
##############################################################
self._build_sqlite3_tbl([val] + rows + cols, where)
# 3. Build rnames and cnames lists
##############################################################
# Refresh conditions list so we can build row and col list
self._execute('select %s from TBL'
%', '.join(_sha1(n) for n in [val] + rows + cols))
Zconditions = DictSet(list(zip([val]+rows+cols, list(zip(*list(self.cur))))))
# rnames_mask and cnanes_mask specify which unique combinations of
# factor conditions have valid entries in the table.
# 1 = valid
# 0 = not_valid
# Build rnames
if rows == []:
rnames = [1]
rnames_mask = [1]
else:
rnames = []
rnames_mask = []
conditions_set = set(zip(*[self[n] for n in rows]))
for vals in Zconditions.unique_combinations(rows):
rnames_mask.append(tuple(vals) in conditions_set)
rnames.append(list(zip(rows,vals)))
# Build cnames
if cols == []:
cnames = [1]
cnames_mask = [1]
else:
cnames = []
cnames_mask = []
conditions_set = set(zip(*[self[n] for n in cols]))
for vals in Zconditions.unique_combinations(cols):
cnames_mask.append(tuple(vals) in conditions_set)
cnames.append(list(zip(cols,vals)))
# 4. Build query based on val, rows, and cols
##############################################################
# Here we are using string formatting to build the query.
# This method is generally discouraged for security, but
# in this circumstance I think it should be okay. The column
# labels are protected with leading and trailing underscores.
# The rest of the query is set by the logic.
#
# When we pass the data in we use the (?) tuple format
if aggregate == 'tolist':
agg = 'group_concat'
else:
agg = aggregate
query = ['select ']
if rnames == [1] and cnames == [1]:
query.append('%s( %s ) from TBL'%(agg, _sha1(val)))
else:
if rnames == [1]:
query.append(_sha1(val))
else:
query.append(', '.join(_sha1(r) for r in rows))
if cnames == [1]:
query.append('\n , %s( %s )'%(agg, _sha1(val)))
else:
for cs in cnames:
query.append('\n , %s( case when '%agg)
if all(map(_isfloat, list(zip(*cols))[1])):
query.append(
' and '.join(('%s=%s'%(_sha1(k), v) for k, v in cs)))
else:
query.append(
' and '.join(('%s="%s"'%(_sha1(k) ,v) for k, v in cs)))
query.append(' then %s end )'%_sha1(val))
if rnames == [1]:
query.append('\nfrom TBL')
else:
query.append('\nfrom TBL group by ')
for i, r in enumerate(rows):
if i != 0:
query.append(', ')
query.append(_sha1(r))
# 5. Run Query
##############################################################
self._execute(''.join(query))
# 6. Read data from cursor into a list of lists
##############################################################
data, mask = [],[]
val_type = self._get_sqltype(val)
fill_val = self._get_mafillvalue(val)
# keep the columns with the row labels
if attach_rlabels:
cnames = [(r, '') for r in rows].extend(cnames)
cnames_mask = [1 for i in _xrange(len(rows))].extend(cnames_mask)
if aggregate == 'tolist':
if method=='full':
i=0
for row in self.cur:
while not rnames_mask[i]:
data.append([[fill_val] for j in _xrange(len(cnames))])
mask.append([[True] for j in _xrange(len(cnames))])
i+=1
data.append([])
mask.append([])
for cell, _mask in zip(list(row)[-len(cnames):], cnames_mask):
if cell == None or not _mask:
data[-1].append([fill_val])
mask[-1].append([True])
else:
if val_type == 'real' or val_type == 'integer':
split =cell.split(',')
data[-1].append(list(map(float, split)))
mask[-1].append([False for j in _xrange(len(split))])
else:
split =cell.split(',')
data[-1].append(split)
mask[-1].append([False for j in _xrange(len(split))])
i+=1
else:
for row in self.cur:
data.append([])
mask.append([])
for cell, _mask in zip(list(row)[-len(cnames):], cnames_mask):
if _mask:
if cell == None:
data[-1].append([fill_val])
mask[-1].append([True])
elif val_type == 'real' or val_type == 'integer':
split =cell.split(',')
data[-1].append(list(map(float, split)))
mask[-1].append([False for j in _xrange(len(split))])
else:
split =cell.split(',')
data[-1].append(split)
mask[-1].append([False for j in _xrange(len(split))])
# numpy arrays must have the same number of dimensions so we need to pad
# cells to the maximum dimension of the data
max_len = max(_flatten([[len(c) for c in L] for L in data]))
for i,L in enumerate(data):
for j,c in enumerate(L):
for k in _xrange(max_len - len(data[i][j])):
data[i][j].append(fill_val)
mask[i][j].append(True)
else:
if method=='full':
i=0
for row in self.cur:
while not rnames_mask[i]:
data.append([fill_val for j in _xrange(len(cnames))])
mask.append([True for j in _xrange(len(cnames))])
i+=1
row_data = list(row)[-len(cnames):]
data.append([(fill_val,v)[m] for v,m in zip(row_data, cnames_mask)])
mask.append([not m for v,m in zip(row_data, cnames_mask)])
i+=1
else:
for row in self.cur:
row_data = list(row)[-len(cnames):]
data.append([v for v,m in zip(row_data, cnames_mask) if m])
mask.append([False for m in cnames_mask if m])
# 7. Get totals
##############################################################
row_tots, col_tots, grand_tot = [], [], np.nan
row_mask, col_mask = [], []
if aggregate not in ['tolist', 'group_concat', 'arbitrary']:
query = 'select %s( %s ) from TBL'%(agg, _sha1(val))
self._execute(query)
grand_tot = list(self.cur)[0][0]
if cnames != [1] and rnames != [1]:
query = ['select %s( %s ) from TBL group by'%(agg, _sha1(val))]
query.append(', '.join(_sha1(r) for r in rows))
self._execute(' '.join(query))
if method=='full':
i=0
row_tots=[]
row_mask=[]
for tup in self.cur:
while not rnames_mask[i]:
row_tots.append(fill_val)
row_mask.append(True)
i+=1
row_tots.append(tup[0])
row_mask.append(False)
i+=1
else:
row_tots = [tup[0] for tup in self.cur]
row_mask = [False for z in row_tots]
query = ['select %s( %s ) from TBL group by'%(agg, _sha1(val))]
query.append(', '.join(_sha1(r) for r in cols))
self._execute(' '.join(query))
if method=='full':
i=0
col_tots=[]
col_mask=[]
for tup in self.cur:
while not cnames_mask[i]:
col_tots.append(fill_val)
col_mask.append(True)
i+=1
col_tots.append(tup[0])
col_mask.append(False)
i+=1
else:
col_tots = [tup[0] for tup in self.cur]
col_mask = [False for z in col_tots]
row_tots = np.ma.array(row_tots, mask=row_mask)
col_tots = np.ma.array(col_tots, mask=col_mask)
# 8. Clean up
##############################################################
self.conn.commit()
# 9. Build rnames and cnames if method=='valid'
##############################################################
if method=='valid':
rnames = [n for n,m in zip(rnames,rnames_mask) if m]
cnames = [n for n,m in zip(cnames,cnames_mask) if m]
# 10. Initialize and return PyvtTbl Object
##############################################################
##
## print(data)
## print(mask)
## print(rnames)
## print(cnames)
## print(col_tots)
## print(row_tots)
## print(grand_tot)
## print()
##
return PyvtTbl(data, val, Zconditions, rnames, cnames, aggregate,
mask=mask,
row_tots=row_tots, col_tots=col_tots, grand_tot=grand_tot,
attach_rlabels=attach_rlabels)
def select_col(self, key, where=None):
"""
determines rows in table that satisfy the conditions given by where and returns
the values of key in the remaining rows
args:
key: column label of data to return
kwds:
where: constraints to apply to table before returning data
returns:
a list
example:
>>> ...
>>> print(df)
first last age gender
==================================
Roger Lew 28 male
Bosco Robinson 5 male
Megan Whittington 26 female
John Smith 51 male
Jane Doe 49 female
>>> df.select_col('age', where='gender == "male"')
[28, 5, 51]
>>>
"""
if where == None:
where = []
# 1.
# check to see if data columns have equal lengths
if not self._are_col_lengths_equal():
raise Exception('columns have unequal lengths')
# 2.
# check the supplied arguments
if key not in list(self.keys()):
raise KeyError(val)
## # check to make sure exclude is mappable
## # todo
##
## # warn if exclude is not a subset of self.conditions
## if not set(self.keys()) >= set(tup[0] for tup in where):
## warnings.warn("where is not a subset of table conditions",
## RuntimeWarning)
if where == []:
return copy(self[key])
else:
self._build_sqlite3_tbl([key], where)
self._execute('select * from TBL')
return [r[0] for r in self.cur]
def sort(self, order=None):
"""
sort the table in-place
kwds:
order: is a list of factors to sort by
to reverse order append " desc" to the factor
returns:
None
example:
>>> from pyvttbl import DataFrame
>>> from collections import namedtuple
>>> Person = namedtuple('Person',['first','last','age','gender'])
>>> df =DataFrame()
>>> df.insert(Person('Roger', 'Lew', 28, 'male')._asdict())
>>> df.insert(Person('Bosco', 'Robinson', 5, 'male')._asdict())
>>> df.insert(Person('Megan', 'Whittington', 26, 'female')._asdict())
>>> df.insert(Person('John', 'Smith', 51, 'male')._asdict())
>>> df.insert(Person('Jane', 'Doe', 49, 'female')._asdict())
>>> df.sort(['gender', 'age'])
>>> print(df)
first last age gender
==================================
Megan Whittington 26 female
Jane Doe 49 female
Bosco Robinson 5 male
Roger Lew 28 male
John Smith 51 male
>>>
"""
if order == None:
order = []
# Check arguments
if self == {}:
raise Exception('Table must have data to sort data')
# check to see if data columns have equal lengths
if not self._are_col_lengths_equal():
raise Exception('columns have unequal lengths')
if not hasattr(order, '__iter__'):
raise TypeError( "'%s' object is not iterable"
% type(order).__name__)
# check or build order
if order == []:
order = list(self.keys())
# there are probably faster ways to do this, we definitely need
# to treat the words as tokens to avoid problems were column
# names are substrings of other column names
for i, k in enumerate(order):
ks = k.split()
if ks[0] not in list(self.keys()):
raise KeyError(k)
if len(ks) == 1:
order[i] = _sha1(ks[0])
elif len(ks) == 2:
if ks[1].lower() not in ['desc', 'asc']:
raise Exception("'order arg must be 'DESC' or 'ASC'")
order[i] = '%s %s'%(_sha1(ks[0]), ks[1])
elif len(ks) > 2:
raise Exception('too many parameters specified')
# build table
self._build_sqlite3_tbl(list(self.keys()))
# build and excute query
query = 'select * from TBL order by ' + ', '.join(order)
self._execute(query)
# read sorted order from cursor
d = []
for row in self.cur:
d.append(list(row))
d = list(zip(*d)) # transpose
for i, n in enumerate(self.keys()):
self[n] = list(d[i])
def where(self, where):
"""
Applies the where filter to a copy of the DataFrame, and
returns the new DataFrame. The associated DataFrame is not copied.
args:
where: criterion to apply to new table
returns:
a new :class:`DataFrame`
example:
>>> ...
>>> print(df)
first last age gender
==================================
Roger Lew 28 male
Bosco Robinson 5 male
Megan Whittington 26 female
John Smith 51 male
<NAME> 49 female
>>> print(df.where('age > 20 and age < 45'))
first last age gender
==================================
<NAME> 28 male
<NAME> 26 female
>>>
"""
new = DataFrame()
self._build_sqlite3_tbl(list(self.keys()), where)
self._execute('select * from TBL')
for n, values in zip(list(self.keys()), list(zip(*list(self.cur)))):
new[n] = list(values)
return new
def where_update(self, where):
"""
Applies the where filter in-place.
args:
where: criterion to apply to table
returns:
None
"""
self._build_sqlite3_tbl(list(self.keys()), where)
self._execute('select * from TBL')
for n, values in zip(list(self.keys()), list(zip(*list(self.cur)))):
del self[n]
self[n] = list(values)
def validate(self, criteria, verbose=False, report=False):
"""
validate the data in the table.
args:
criteria: a dict whose keys should coorespond to columns in the table.
The values should be functions which take a single parameter and return
a boolean.
kwds:
verbose:
True: provide real-time feedback
False: don't provide feedback (default)
report:
True: print a report upon completion
False: don't print report (default)
returns:
True: the criteria was satisfied
False: the critera was not satisfied
example:
>>> ...
>>> print(df)
first last age gender
==================================
Roger Lew 28 male
Bosco Robinson 5 male
Megan Whittington 26 female
John Smith 51 male
Jane Doe 49 female
>>> def isint(x):
try : return int(x)-float(x)==0
except: return False
>>> df.validate({'age' : lambda x: isint(x),
'gender' : lambda x: x in ['male', 'female']},
verbose=True, report=True)
Validating gender:
.....
Validating age:
.....
Report:
Values tested: 10
Values passed: 10
Values failed: 0
***Validation PASSED***
True
>>>
"""
# do some checking
if self == {}:
raise Exception('table must have data to validate data')
try:
c = set(criteria.keys())
s = set(self.keys())
except:
raise TypeError('criteria must be mappable type')
# check if the criteria dict has keys that aren't in self
all_keys_found = bool((c ^ (c & s)) == set())
# if the user doesn't want a detailed report we don't have
# to do as much book keeping and can greatly simplify the
# logic
if not verbose and not report:
if all_keys_found:
return all(all(map(criteria[k], self[k])) for k in criteria)
else:
return False
# loop through specified columns and apply the
# validation function to each value in the column
valCounter = Counter()
reportDict = {}
for k in (c & s):
reportDict[k] = []
if verbose:
print('\nValidating %s:'%k)
for i,v in enumerate(self[k]):
try:
func = criteria[k]
result = func(v)
except:
result = False
valCounter['code_failures'] +=1
valCounter[result] += 1
valCounter['n'] += 1
if result:
if verbose:
print('.', end='')
else:
reportDict[k].append(
"Error: on index %i value "
"'%s' failed validation"%(i, str(v)))
if verbose:
print('X', end='')
if verbose:
print()
# do some book keeping
pass_or_fail = (valCounter['n'] == valCounter[True]) & all_keys_found
# print a report if the user has requested one
if report:
print('\nReport:')
for k in (c&s):
if len(reportDict[k]) > 0:
print('While validating %s:'%k)
for line in reportDict[k]:
print(' ',line)
print( ' Values tested:', valCounter['n'],
'\n Values passed:', valCounter[True],
'\n Values failed:', valCounter[False])
if valCounter['code_failures'] != 0:
print('\n (%i values failed because '
'func(x) did not properly execute)'
%valCounter['code_failures'])
if not all_keys_found:
print('\n Error: criteria dict contained '
'keys not found in table:'
'\n ', ', '.join(c ^ (c & s)))
if pass_or_fail:
print('\n***Validation PASSED***')
else:
print('\n***Validation FAILED***')
# return the test result
return pass_or_fail
def attach(self, other):
"""
attaches a second :class:`DataFrame` to self
args:
other: a :class:`DataFrame` object whose key set matches self
return:
None
"""
# do some checking
if not isinstance(other, DataFrame):
raise TypeError('second argument must be a DataFrame')
if not self._are_col_lengths_equal():
raise Exception('columns in self have unequal lengths')
if not other._are_col_lengths_equal():
raise Exception('columns in other have unequal lengths')
if not set(self.keys()) == set(other.keys()):
raise Exception('self and other must have the same columns')
if not all(self._get_sqltype(n) == other._get_sqltype(n) for n in self):
raise Exception('types of self and other must match')
# perform attachment
for n in list(self.keys()):
self[n] = np.concatenate((self[n], other[n]))
# update state variables
self.conditions = DictSet([(n, list(self[n])) for n in self])
def insert(self, row):
"""
insert a row into the table
args:
row: should be mappable. e.g. a dict or a list with key/value pairs.
returns:
None
example:
>>> from pyvttbl import DataFrame
>>> from collections import namedtuple
>>> Person = namedtuple('Person',['first','last','age','gender'])
>>> df =DataFrame()
>>> df.insert(Person('Roger', 'Lew', 28, 'male')._asdict())
>>> df.insert(Person('Bosco', 'Robinson', 5, 'male')._asdict())
>>> df.insert(Person('Megan', 'Whittington', 26, 'female')._asdict())
>>> print(df)
first last age gender
==================================
Roger Lew 28 male
Bosco Robinson 5 male
Megan Whittington 26 female
>>>
"""
try:
c = set(dict(row).keys())
s = set(self.keys())
except:
raise TypeError('row must be mappable type')
# the easy case
if self == {}:
# if the table is empty try and unpack the table as
# a row so it preserves the order of the column names
if isinstance(row, list):
for (k, v) in row:
self[k] = [v]
self.conditions[k] = [v]
else:
for (k, v) in list(row.items()):
self[k] = [v]
self.conditions[k] = [v]
elif c - s == set():
for (k, v) in list(OrderedDict(row).items()):
self[k]=np.concatenate((self[k],
np.array([v], dtype=self._get_nptype(k))))
self.conditions[k].add(v)
else:
raise Exception('row must have the same keys as the table')
def write(self, where=None, fname=None, delimiter=','):
"""
write the contents of the DataFrame to a plaintext file
kwds:
where: criterion to apply to table before writing to file
fname: the path + name of the output file
delimiter: string to separate row cells (default = ",")
"""
if where == None:
where = []
if self == {}:
raise Exception('Table must have data to print data')
# check to see if data columns have equal lengths
if not self._are_col_lengths_equal():
raise Exception('columns have unequal lengths')
if self.shape()[1] < 1:
raise Exception('Table must have at least one row to print data')
# check or build fname
if fname != None:
if not isinstance(fname, _strobj):
raise TypeError('fname must be a string')
else:
lnames = [str(n).lower().replace('1','') for n in list(self.keys())]
fname = 'X'.join(lnames)
if delimiter == ',':
fname += '.csv'
elif delimiter == '\t':
fname += '.tsv'
else:
fname += '.txt'
with open(fname,'wb') as fid:
wtr = csv.writer(fid, delimiter=delimiter)
wtr.writerow(list(self.keys()))
if where == []:
wtr.writerows(list(zip(*list(self[n] for n in self))))
else:
self._build_sqlite3_tbl(list(self.keys()), where)
self._execute('select * from TBL')
wtr.writerows(list(self.cur))
def descriptives(self, key, where=None):
"""
Conducts a descriptive statistical analysis of the data in self[key].
args:
key: column label
kwds:
where: criterion to apply to table before running analysis
returns:
a :mod:`pyvttbl.stats`. :class:`Descriptives` object
"""
if where == None:
where = []
if self == {}:
raise Exception('Table must have data to calculate descriptives')
# check to see if data columns have equal lengths
if not self._are_col_lengths_equal():
raise Exception('columns have unequal lengths')
if key not in list(self.keys()):
raise KeyError(key)
V = self.select_col(key, where=where)
d = stats.Descriptives()
d.run(V, key)
return d
def summary(self, where=None):
"""
prints the descriptive information for each column in DataFrame
kwds:
where: criterion to apply to table before running analysis
returns:
None
"""
for (cname,dtype) in list(self.keys()):
if dtype in ['real', 'integer']:
print(self.descriptives(cname, where))
print()
else:
print('%s contains non-numerical data\n'%cname)
def marginals(self, key, factors, where=None):
if where == None:
where = []
if self == {}:
raise Exception('Table must have data to find marginals')
# check to see if data columns have equal lengths
if not self._are_col_lengths_equal():
raise Exception('columns have unequal lengths')
m = stats.Marginals()
m.run(self, key, factors, where)
return m
marginals.__doc__ = stats.Marginals.__doc__
def anova1way(self, val, factor, posthoc='tukey', where=None):
"""
Conducts a one-way analysis of variance
on val over the conditions in factor. The conditions do not necessarily
need to have equal numbers of samples.
args:
val: dependent variable
factor: a dummy coded column label
kwds:
posthoc:
'tukey': conduct Tukey posthoc tests
'SNK': conduct Newman-Keuls posthoc tests
where:
conditions to apply before running analysis
return:
an :class:`pyvttbl.stats.Anova1way` object
"""
if where == None:
where = []
if self == {}:
raise Exception('Table must have data to find marginals')
# check to see if data columns have equal lengths
if not self._are_col_lengths_equal():
raise Exception('columns have unequal lengths')
# build list of lists for ANOVA1way object
list_of_lists = []
pt = self.pivot(val,rows=[factor],
aggregate='tolist',
where=where)
for L in pt:
list_of_lists.append(L.flatten().tolist())
# build list of condiitons
conditions_list = [tup[1] for [tup] in pt.rnames]
a = stats.Anova1way()
a.run(list_of_lists, val, factor, conditions_list, posthoc=posthoc)
return a
def chisquare1way(self, observed, expected_dict=None,
alpha=0.05, where=None):
"""
conducts a one-way chi-square goodness-of-fit test on the data in observed
args:
observed: column label containing categorical observations
kwds:
expected_dict: a dictionary object with keys matching the categories
in observed and values with the expected counts. The
categories in the observed column must be a subset of
the keys in the expected_dict. If expected_dict is None,
the total N is assumed to be equally distributed across
all groups.
alpha: the type-I error probability
where:
conditions to apply before running analysis
return:
an :class:`pyvttbl.stats.ChiSquare1way` object
"""
# ched the expected_dict
if expected_dict != None:
try:
expected_dict2 = dict(copy(expected_dict))
except:
raise TypeError("'%s' is not a mappable type"
%type(expected_dict).__name__())
if not self.conditions[observed] <= set(expected_dict2.keys()):
raise Exception('expected_dict must contain a superset of '
'of the observed categories')
else:
expected_dict2 = Counter()
# find the counts
observed_dict=Counter(self.select_col(observed, where))
# build arguments for ChiSquare1way
observed_list = []
expected_list = []
conditions_list = sorted(set(observed_dict.keys()) |
set(expected_dict2.keys()))
for key in conditions_list:
observed_list.append(observed_dict[key])
expected_list.append(expected_dict2[key])
if expected_dict == None:
expected_list = None
# run analysis
x = stats.ChiSquare1way()
x.run(observed_list, expected_list, conditions_list=conditions_list,
measure=observed, alpha=alpha)
return x
def chisquare2way(self, rfactor, cfactor, alpha=0.05, where=None):
"""
conducts a two-way chi-square goodness-of-fit test on the data in observed
args:
rfactor: column key
cfactor: column key
kwds:
alpha: the type-I error probability
where:
conditions to apply before running analysis
return:
an :class:`pyvttbl.stats.ChiSquare2way` object
"""
row_factor = self.select_col(rfactor, where)
col_factor = self.select_col(cfactor, where)
x2= stats.ChiSquare2way()
x2.run(row_factor, col_factor, alpha=alpha)
return x2
def correlation(self, variables, coefficient='pearson',
alpha=0.05, where=None):
"""
produces a correlation matrix and conducts step-down significance testing
on the column labels in variables.
args:
variables: column keys to include in correlation matrix
kwds:
coefficient:
{ 'pearson', 'spearman', 'kendalltau', 'pointbiserial' }
alpha: the type-I error probability
where:
conditions to apply before running analysis
return:
an :class:`pyvttbl.stats.Correlation` object
"""
list_of_lists = []
for var in sorted(variables):
list_of_lists.append(list(self.select_col(var, where)))
cor= stats.Correlation()
cor.run(list_of_lists, sorted(variables),
coefficient=coefficient, alpha=alpha)
return cor
def ttest(self, aname, bname=None, pop_mean=0., paired=False,
equal_variance=True, where=None):
"""
produces a correlation matrix and conducts step-down significance testing
on the column labels in variables.
args:
aname: column key
kwds:
bname: is not specified a one-sample t-test is performed on
comparing the values in column aname with a hypothesized
population mean.
pop_mean: specifies the null population mean for one-sample t-test.
Ignored if bname is supplied
paired:
True: a paired t-test is conducted
False: an independent samples t-test is conducted
equal_variance:
True: assumes aname and bname have equal variance
False: assumes aname and bname have unequal variance
where:
conditions to apply before running analysis
return:
an :class:`pyvttbl.stats.Ttest` object
"""
if where == None:
where = []
if self == {}:
raise Exception('Table must have data to find marginals')
# check to see if data columns have equal lengths
if not self._are_col_lengths_equal():
raise Exception('columns have unequal lengths')
adata = self.select_col(aname, where=where)
if bname != None:
bdata = self.select_col(bname, where=where)
else:
bdata = None
t = stats.Ttest()
t.run(adata, bdata, pop_mean=pop_mean,
paired=paired, equal_variance=equal_variance,
aname=aname, bname=bname)
return t
def histogram(self, key, where=None, bins=10,
range=None, density=False, cumulative=False):
"""
Conducts a histogram analysis of the data in self[key].
args:
key: column label of dependent variable
kwds:
where: criterion to apply to table before running analysis
bins: number of bins (default = 10)
range: list of length 2 defining min and max bin edges
returns:
a :mod:`pyvttbl.stats`. :class:`Descriptives` object
"""
if where == None:
where = []
if self == {}:
raise Exception('Table must have data to calculate histogram')
# check to see if data columns have equal lengths
if not self._are_col_lengths_equal():
raise Exception('columns have unequal lengths')
if key not in list(self.keys()):
raise KeyError(key)
V = sorted(self.select_col(key, where=where))
h = stats.Histogram()
h.run(V, cname=key, bins=bins, range=range,
density=density, cumulative=cumulative)
return h
def anova(self, dv, sub='SUBJECT', wfactors=None, bfactors=None,
measure='', transform='', alpha=0.05):
"""
conducts a betweeen, within, or mixed, analysis of variance
args:
dv: label containing dependent variable
kwds:
wfactors: list of within variable factor labels
bfactors: list of between variable factor labels
sub: label coding subjects (or the isomorphism)
measure: string to describe dv (outputs '<dv> of
<measure>') intended when dv name is generic
(e.g., MEAN, RMS, SD, ...)
transform: string specifying a data transformation
======================= =============== ==================
STRING OPTION TRANSFORM COMMENTS
======================= =============== ==================
'' X default
'log','log10' numpy.log(X) base 10 transform
'reciprocal', 'inverse' 1/X
'square-root', 'sqrt' numpy.sqrt(X)
'arcsine', 'arcsin' numpy.arcsin(X)
'windsor 10' windsor(X, 10) 10% windosr trim
======================= =============== ==================
"""
aov=stats.Anova()
aov.run(self, dv, sub=sub, wfactors=wfactors, bfactors=bfactors,
measure=measure, transform=transform, alpha=alpha)
return aov
def histogram_plot(self, val, **kwargs):
return plotting.histogram_plot(self, val, **kwargs)
histogram_plot.__doc__ = plotting.histogram_plot.__doc__
def scatter_plot(self, aname, bname, **kwargs):
return plotting.scatter_plot(self, aname, bname, **kwargs)
scatter_plot.__doc__ = plotting.scatter_plot.__doc__
def box_plot(self, val, factors=None, **kwargs):
return plotting.box_plot(self, val, factors=factors, **kwargs)
box_plot.__doc__ = plotting.box_plot.__doc__
def interaction_plot(self, val, xaxis, **kwargs):
return plotting.interaction_plot(self, val, xaxis, **kwargs)
interaction_plot.__doc__ = plotting.interaction_plot.__doc__
def scatter_matrix(self, variables, **kwargs):
return plotting.scatter_matrix(self, variables, **kwargs)
scatter_matrix.__doc__ = plotting.scatter_matrix.__doc__
class _ptmathmethod(object):
"""
Defines a wrapper for arithmetic array methods (add, mul...).
"""
def __init__ (self, methodname):
self.__name__ = methodname
self.__doc__ = getattr(np.ma.MaskedArray, methodname).__doc__
self.obj = None
def __get__(self, obj, objtype=None):
"Gets the calling object."
self.obj = obj
return self
def __call__ (self, other, *args):
"Execute the call behavior."
instance = self.obj
func = getattr(super(PyvtTbl, instance), self.__name__)
data = np.ma.MaskedArray(func(other, *args), subok=False)
if isinstance(other, PyvtTbl):
func = getattr(instance.row_tots, self.__name__)
row_tots = func(other.row_tots, *args)
func = getattr(instance.col_tots, self.__name__)
col_tots = func(other.col_tots, *args)
func = getattr(np.ma.array([instance.grand_tot]), self.__name__)
grand_tot = func(other.grand_tot, *args)[0]
elif _isfloat(other):
func = getattr(instance.row_tots, self.__name__)
row_tots = func(other, *args)
func = getattr(instance.col_tots, self.__name__)
col_tots = func(other, *args)
func = getattr(np.ma.array([instance.grand_tot]), self.__name__)
grand_tot = func(other, *args)[0]
else:
row_tots = np.ma.masked_equal(np.zeros(len(instance.row_tots)), 0.)
col_tots = np.ma.masked_equal(np.zeros(len(instance.col_tots)), 0.)
grand_tot = np.ma.masked
return PyvtTbl(data,
val=instance.val,
conditions=instance.conditions,
rnames=instance.rnames,
cnames=instance.cnames,
aggregate='N/A',
row_tots=row_tots,
col_tots=col_tots,
grand_tot=grand_tot,
attach_rlabels=instance.attach_rlabels)
class PyvtTbl(np.ma.MaskedArray, object):
"""
container holding the pivoted data
"""
def __new__(cls, data, val, conditions, rnames, cnames, aggregate, **kwds):
"""
creates a new PyvtTbl from scratch
args:
data: np.ma.array object holding pivoted data
val: string label for the data in the table
conditions: Dictset representing the factors and levels in the table
rnames: list of row labels
cnames: list of column labels
aggregate: string describing the aggregate function applied to the data
kwds:
calc tots: bool specifying whether totals were calculated
row_tots: row totals in a MaskedArray
col_tots: column totals in a MaskedArray
grand_tot: float holding grand total
attach_rlabels: bool specifying whether row labels are part of the table
| subclassing Numpy objects are a little different from subclassing other objects.
| see: http://docs.scipy.org/doc/numpy/user/basics.subclassing.html
"""
if data == None:
data = []
maparms = dict(copy=kwds.get('copy',False),
dtype=kwds.get('dtype',None),
fill_value=kwds.get('fill_value',None),
subok=kwds.get('subok',True),
keep_mask=kwds.get('keep_mask',True),
hard_mask=kwds.get('hard_mask',False))
mask = kwds.get('mask', np.ma.nomask)
obj = np.ma.MaskedArray.__new__(cls, data, mask=mask, ndmin=2, **maparms)
# Get data
if not kwds.get('subok',True) or not isinstance(obj, PyvtTbl):
obj = obj.view(cls)
# add attributes to instance
obj.val = val
obj.conditions = conditions
obj.rnames = rnames
obj.cnames = cnames
obj.aggregate = aggregate
if 'row_tots' in kwds:
obj.row_tots = np.ma.array(kwds['row_tots'])
else:
obj.row_tots = []#np.ma.masked_equal(np.zeros(len(cnames)), 0.)
if 'col_tots' in kwds:
obj.col_tots = np.ma.array(kwds['col_tots'])
else:
obj.col_tots = []#np.ma.masked_equal(np.zeros(len(rnames)), 0.)
obj.grand_tot = kwds.get('grand_tot', np.ma.masked)
obj.where = kwds.get('where', [])
obj.attach_rlabels = kwds.get('attach_rlabels', False)
obj.subok = maparms['subok']
obj.keep_mask = maparms['keep_mask']
obj.hard_mask = maparms['hard_mask']
return obj
def __array_finalize__(self, obj):
self.val = getattr(obj, 'val', None)
self.conditions = getattr(obj, 'conditions', DictSet())
self.rnames = getattr(obj, 'rnames', [1])
self.cnames = getattr(obj, 'cnames', [1])
self.aggregate = getattr(obj, 'aggregate', 'avg')
if hasattr(obj, 'row_tots'):
self.row_tots = np.ma.array(obj.row_tots)
else:
self.row_tots = np.ma.masked_equal(np.zeros(len(self.cnames)), 0.)
if hasattr(obj, 'col_tots'):
self.col_tots = np.ma.array(obj.col_tots)
else:
self.col_tots = np.ma.masked_equal(np.zeros(len(self.rnames)), 0.)
self.grand_tot = getattr(obj, 'grand_tot', np.ma.masked)
self.where = getattr(obj, 'where', [])
self.attach_rlabels = getattr(obj, 'attach_rlabels', False)
self.subok = getattr(obj, 'subok', True)
self.keep_mask = getattr(obj, 'keep_mask', True)
self.hard_mask = getattr(obj, 'hard_mask', False)
np.ma.MaskedArray.__array_finalize__(self, obj)
__array_finalize__.__doc__ = np.ma.MaskedArray.__array_finalize__.__doc__
def transpose(self):
"""
returns a transposed PyvtTbl object
"""
return PyvtTbl(super(PyvtTbl,self).transpose(),
self.val,
self.conditions,
self.cnames,
self.rnames,
self.aggregate,
row_tots=self.col_tots,
col_tots=self.row_tots,
grand_tot=self.grand_tot,
attach_rlabels=self.attach_rlabels,
subok=self.subok,
keep_mask=self.keep_mask,
hard_mask=self.hard_mask)
def astype(self, dtype):
"""
Convert the input to an array.
args:
a: array_like Input data, in any form that can be converted to an array. This
includes lists, lists of tuples, tuples, tuples of tuples, tuples
of lists and ndarrays.
kwds:
dtype: data-type. By default, the data-type is inferred from the input data.
order: {'C', 'F'} Whether to use row-major ('C') or column-major ('F' for FORTRAN)
memory representation. Defaults to 'C'.
returns:
out: ndarray
Array interpretation of `a`. No copy is performed if the input
is already an ndarray. If `a` is a subclass of ndarray, a base
class ndarray is returned.
"""
if hasattr(self.mask, '__iter__'):
data = eval('np.ma.array(%s, mask=%s, dtype=dtype)'%\
(repr(self.tolist()), repr(self.mask.tolist())))
else:
data =eval('np.ma.array(%s, mask=%s, dtype=dtype)'%\
(repr(self.tolist()), repr(self.mask)))
return PyvtTbl(data,
self.val,
self.conditions,
self.rnames,
self.cnames,
self.aggregate,
row_tots=self.row_tots.astype(dtype),
col_tots=self.col_tots.astype(dtype),
grand_tot=(dtype(self.grand_tot), np.ma.masked)\
[self.grand_tot is np.ma.masked],
attach_rlabels=self.attach_rlabels,
subok=self.subok,
keep_mask=self.keep_mask,
hard_mask=self.hard_mask)
######################################################################
# Adapted from numpy.ma.core
def _get_flat(self):
"Return a flat iterator."
return np.ma.core.MaskedIterator(self)
def _set_flat (self, value):
"Set a flattened version of self to value."
y = self.ravel()
y[:] = value
flat__doc__ = """\
Flat iterator object to iterate over PyvtTbl.
| A `MaskedIterator` iterator is returned by ``x.flat`` for any PyvtTbl
`x`. It allows iterating over the array as if it were a 1-D array,
either in a for-loop or by calling its `next` method.
| Iteration is done in C-contiguous style, with the last index varying the
fastest. The iterator can also be indexed using basic slicing or
advanced indexing.
"""
flat = property(fget=_get_flat, fset=_set_flat, doc=flat__doc__)
######################################################################
def flatten(self):
"""
returns a the PyvtTbl flattened as a MaskedArray
"""
# probably a better way to do this if you really know what your doing.
# subclassing numpy objects is not for the faint of heart
obj = super(PyvtTbl,self).flatten()
if hasattr(obj.mask, '__iter__'):
return eval('np.ma.array(%s, mask=%s)'%\
(repr(obj.tolist()), repr(obj.mask.tolist())))
else:
return eval('np.ma.array(%s, mask=%s)'%\
(repr(obj.tolist()), repr(obj.mask)))
# this is so Sphinx can find it
def __iter__(self):
return super(PyvtTbl, self).__iter__()
__iter__.__doc__ = np.ma.MaskedArray.__iter__.__doc__
def ndenumerate(self):
"""
Multidimensional index iterator.
returns:
returns an iterator yielding pairs of array coordinates and values.
"""
for i in _xrange(self.shape[0]):
for j in _xrange(self.shape[1]):
yield (i,j), self[i,j]
def _get_rows(self):
"""
returns a list of tuples containing row labels and conditions
"""
if self.rnames == [1]:
return [1]
else:
return [str(k) for (k, v) in self.rnames[0]]
def _get_cols(self):
"""
returns a list of tuples containing column labels and conditions
"""
if self.cnames == [1]:
return [1]
else:
return [str(k) for (k, v) in self.cnames[0]]
def to_dataframe(self):
"""
returns a DataFrame excluding row and column totals
"""
if self == []:
return DataFrame()
rows = self._get_rows()
cols = self._get_cols()
# initialize DataFrame
df = DataFrame()
# no rows or cols were specified
if self.rnames == [1] and self.cnames == [1]:
# build the header
header = ['Value']
elif self.rnames == [1]: # no rows were specified
# build the header
header = [',\n'.join('%s=%s'%(f, c) for (f, c) in L) \
for L in self.cnames]
if self.ndim == 2:
rdata = self[0,:].flatten().tolist()
else:
rdata = [self[0,j].flatten().tolist()
for j in _xrange(len(self.cnames))]
df.insert(list(zip(header, rdata)))
elif self.cnames == [1]: # no cols were specified
# build the header
header = rows + ['Value']
for i, L in enumerate(self.rnames):
if isinstance(self[i,0], PyvtTbl):
rdata = [c for (f, c) in L] + [self[i,0].flatten().tolist()]
else:
rdata = [c for (f, c) in L] + [self[i,0]]
df.insert(list(zip(header, rdata)))
else: # table has rows and cols
# build the header
header = copy(rows)
for L in self.cnames:
header.append(',\n'.join('%s=%s'%(f, c) for (f, c) in L))
for i, L in enumerate(self.rnames):
if self.ndim == 2:
rdata =[c for (f, c) in L] + self[i,:].flatten().tolist()
else:
rdata = [self[i,j].flatten().tolist()
for j in _xrange(len(self.cnames))]
df.insert(list(zip(header, rdata)))
return df
def __getitem__(self, indx):
"""
Return the item described by indx, as a PyvtTbl
args:
indx: index to array
can be int, tuple(int, int), tuple(slice, int),
tuple(int, slice) or tuple(slice, slice)
x[int] <==> x[int,:]
returns:
PyvtTbl that is at least 2-dimensional
(unless indx is tuple(int, int))
| x.__getitem__(indx) <==> x[indx]
"""
# x[i] <==> x[i,:] <==> x[i, slice(None, None, None)]
if _isint(indx) or isinstance(indx, slice):
return self.__getitem__((indx,slice(None, None, None)))
obj = super(PyvtTbl, self).__getitem__(indx)
if isinstance(obj, PyvtTbl):
if self.rnames == [1] or _isint(indx[0]):
m = 1
else:
m = len(self.rnames[indx[0]])
if self.cnames == [1] or _isint(indx[1]):
n = 1
else:
n = len(self.cnames[indx[1]])
## print(self.ndim, self.shape)
## print(obj.ndim, obj.shape)
## print((m,n))
## print()
##
if np.prod(obj.shape) == m*n:
obj = np.reshape(obj, (m,n))
else:
obj = np.reshape(obj, (m,n,-1))
## if obj.ndim == 1 and self.ndim == 2:
## obj = np.reshape(obj, (m,n))
##
## if obj.ndim == 1 and self.ndim == 3:
## obj = np.reshape(obj, (m,n,-1))
##
obj.rnames = self.rnames[indx[0]]
obj.cnames = self.cnames[indx[1]]
if _isint(indx[0]): obj.rnames = [obj.rnames]
if _isint(indx[1]): obj.cnames = [obj.cnames]
obj.row_tots = np.ma.masked_equal(np.zeros(m), 0.)
obj.col_tots = np.ma.masked_equal(np.zeros(n), 0.)
obj.val = self.val
return obj
def __str__(self):
"""
returns a human friendly string representation of the table
"""
## return 'PyvtTbl:\n'+'\n\n'.join(
## [super(PyvtTbl, self).__str__(),
## 'row_tots:'+str(self.row_tots),
## 'col_tots:'+str(self.col_tots)])+'\n\n'
if self == []:
return '(table is empty)'
show_col_tots = any(np.invert(self.col_tots.mask))
show_row_tots = any(np.invert(self.col_tots.mask))
show_grand_tot = _isfloat(self.grand_tot) and not math.isnan(self.grand_tot)
rows = self._get_rows()
cols = self._get_cols()
# initialize table
tt = TextTable(max_width=0)
# no rows or cols were specified
if self.rnames == [1] and self.cnames == [1]:
# build the header
header = ['Value']
# initialize the texttable and add stuff
tt.set_cols_dtype(['t'])
tt.set_cols_dtype(['l'])
tt.add_row(self)
elif self.rnames == [1]: # no rows were specified
# build the header
header = [',\n'.join('%s=%s'%(f, c) for (f, c) in L) \
for L in self.cnames]
if show_grand_tot:
header.append('Total')
# initialize the texttable and add stuff
# False and True evaluate as 0 and 1 for integer addition
# and list indexing
tt.set_cols_dtype(['a'] * (len(self.cnames)+show_grand_tot))
tt.set_cols_align(['r'] * (len(self.cnames)+show_grand_tot))
if self.ndim == 2:
tt.add_row(self[0,:].flatten().tolist()+
([],[self.grand_tot])[show_grand_tot])
else:
rdata = [self[0,j].flatten().tolist()
for j in _xrange(len(self.cnames))]
tt.add_row(rdata + ([],[self.grand_tot])[show_grand_tot])
elif self.cnames == [1]: # no cols were specified
# build the header
header = rows + ['Value']
# initialize the texttable and add stuff
tt.set_cols_dtype(['t'] * len(rows) + ['a'])
tt.set_cols_align(['l'] * len(rows) + ['r'])
for i, L in enumerate(self.rnames):
if isinstance(self[i,0], PyvtTbl):
tt.add_row([c for (f, c) in L] + [self[i,0].flatten().tolist()])
else:
tt.add_row([c for (f, c) in L] + [self[i,0]])
if show_grand_tot:
tt.footer(['Total'] +
['']*(len(rows)-1) +
[self.grand_tot])
else: # table has rows and cols
# build the header
header = copy(rows)
for L in self.cnames:
header.append(',\n'.join('%s=%s'%(f, c) for (f, c) in L))
if show_row_tots:
header.append('Total')
dtypes = ['t'] * len(rows) + ['a'] * (len(self.cnames)+show_row_tots)
aligns = ['l'] * len(rows) + ['r'] * (len(self.cnames)+show_row_tots)
numcols = len(dtypes)
# initialize the texttable and add stuff
tt.set_cols_dtype(dtypes)
tt.set_cols_align(aligns)
if show_col_tots:
for i, L in enumerate(self.rnames):
tt.add_row([c for (f, c) in L] +
self[i,:].flatten().tolist() +
[self.row_tots[i]])
tt.footer(['Total'] +
['']*(len(rows)-1) +
self.col_tots.tolist() +
[self.grand_tot])
else:
for i, L in enumerate(self.rnames):
if self.ndim == 2:
tt.add_row([c for (f, c) in L] +
self[i,:].flatten().tolist())
else:
rdata = [self[i,j].flatten().tolist()
for j in _xrange(len(self.cnames))]
tt.add_row([c for (f, c) in L] + rdata)
# add header and decoration
tt.header(header)
tt.set_deco(TextTable.HEADER | TextTable.FOOTER)
# return the formatted table
return '%s(%s)\n%s'%(self.aggregate, self.val, tt.draw())
def __repr__(self):
"""
returns a machine friendly string representation of the object
"""
if self == []:
return 'PyvtTbl()'
args = repr(self.tolist())
args += ", '%s'"%self.val
args += ", %s"%repr(self.conditions)
args += ", %s"%repr(self.rnames)
args += ", %s"%repr(self.cnames)
args += ", '%s'"%self.aggregate
kwds = []
if self.row_tots != None:
# sometimes np.ma.array.mask is a bool, somtimes it is a list.
# if we just copy the mask over it will first create a list and then
# keep appending to the list everytime the object is reprized. Not sure if
# if this is a bug or intentional. Anyways handling the masked string this
# way makes it so repr(eval(repr(myPyvttbl))) = repr(myPyvttbl)
mask_str =''
if any(_flatten([self.row_tots.mask])):
mask_str = ', mask=%s'%repr(self.row_tots.mask)
kwds.append(', row_tots=np.ma.array(%s%s)'%\
(self.row_tots.tolist(), mask_str))
if self.col_tots != None:
mask_str =''
if any(_flatten([self.col_tots.mask])):
mask_str = ', mask=%s'%repr(self.col_tots.mask)
kwds.append(', col_tots=np.ma.array(%s%s)'%\
(self.col_tots.tolist(), mask_str))
if self.grand_tot != None:
kwds.append(', grand_tot=%s'%repr(self.grand_tot))
if self.where != []:
if isinstance(self.where, _strobj):
kwds.append(", where='%s'"%self.where)
else:
kwds.append(", where=%s"%self.where)
if self.attach_rlabels != False:
kwds.append(', attach_rlabels=%s'%self.attach_rlabels)
# masked array related parameters
if any(_flatten([self.mask])) and hasattr(self.mask, '__iter__'):
kwds.append(', mask=%s'%repr(self.mask.tolist()))
if self.dtype != None:
kwds.append(', dtype=%s'%repr(self.dtype))
if self.fill_value != None:
kwds.append(', fill_value=%s'%repr(self.fill_value))
if self.subok != True:
kwds.append(', subok=%s'%repr(self.subok))
if self.keep_mask != True:
kwds.append(', keep_mask=%s'%repr(self.keep_mask))
if self.hard_mask != False:
kwds.append(', hard_mask=%s'%repr(self.hard_mask))
if len(kwds)>1:
kwds = ''.join(kwds)
return ('PyvtTbl(%s%s)'%(args,kwds)).replace('\n','')
__add__ = _ptmathmethod('__add__')
__add__.__doc__ = np.ma.MaskedArray.\
__add__.__doc__.replace('masked array', 'PyvtTbl')
__radd__ = _ptmathmethod('__add__')
__radd__.__doc__ = np.ma.MaskedArray.\
__radd__.__doc__.replace('masked array', 'PyvtTbl')
__sub__ = _ptmathmethod('__sub__')
__sub__.__doc__ = np.ma.MaskedArray.\
__sub__.__doc__.replace('masked array', 'PyvtTbl')
__rsub__ = _ptmathmethod('__rsub__')
__rsub__.__doc__ = np.ma.MaskedArray.\
__rsub__.__doc__.replace('masked array', 'PyvtTbl')
__pow__ = _ptmathmethod('__pow__')
__pow__.__doc__ = np.ma.MaskedArray.\
__pow__.__doc__.replace('masked array', 'PyvtTbl')
__mul__ = _ptmathmethod('__mul__')
__mul__.__doc__ = np.ma.MaskedArray.\
__mul__.__doc__.replace('masked array', 'PyvtTbl')
__rmul__ = _ptmathmethod('__mul__')
__rmul__.__doc__ = np.ma.MaskedArray.\
__rmul__.__doc__.replace('masked array', 'PyvtTbl')
__div__ = _ptmathmethod('__div__')
__div__.__doc__ = np.ma.MaskedArray.\
__div__.__doc__.replace('masked array', 'PyvtTbl')
# __rdiv__ = _ptmathmethod('__rdiv__')
# __rdiv__.__doc__ = np.ma.MaskedArray.\
# __rdiv__.__doc__.replace('masked array', 'PyvtTbl')
__truediv__ = _ptmathmethod('__truediv__')
__truediv__.__doc__ = np.ma.MaskedArray.\
__truediv__.__doc__.replace('masked array', 'PyvtTbl')
__rtruediv__ = _ptmathmethod('__rtruediv__')
__rtruediv__.__doc__ = np.ma.MaskedArray.\
__rtruediv__.__doc__.replace('masked array', 'PyvtTbl')
__floordiv__ = _ptmathmethod('__floordiv__')
__floordiv__.__doc__ = np.ma.MaskedArray.\
__floordiv__.__doc__.replace('masked array', 'PyvtTbl')
__rfloordiv__ = _ptmathmethod('__rfloordiv__')
__rfloordiv__.__doc__ = np.ma.MaskedArray.\
__rfloordiv__.__doc__.replace('masked array', 'PyvtTbl')
## __eq__ = _ptmathmethod('__eq__')
## __ne__ = _ptmathmethod('__ne__')
## __lt__ = _ptmathmethod('__lt__')
## __le__ = _ptmathmethod('__le__')
## __gt__ = _ptmathmethod('__gt__')
## __ge__ = _ptmathmethod('__ge__')
##
## copy = _tsarraymethod('copy', ondates=True)
## compress = _tsarraymethod('compress', ondates=True)
## cumsum = _tsarraymethod('cumsum', ondates=False)
## cumprod = _tsarraymethod('cumprod', ondates=False)
## anom = _tsarraymethod('anom', ondates=False)
##
## sum = _tsaxismethod('sum')
## prod = _tsaxismethod('prod')
## mean = _tsaxismethod('mean')
## var = _tsaxismethod('var')
## std = _tsaxismethod('std')
## all = _tsaxismethod('all')
## any = _tsaxismethod('any')
##
##df = DataFrame()
##df['first']='<NAME> <NAME>'.split()
##df['last']='<NAME>'.split()
##df['age']=[28,5,26,51,49]
##df['gender']=['male','male','male','female','fem
| StarcoderdataPython |
1669808 | #!/usr/bin/env python
'''
Licensed to the Apache Software Foundation (ASF) under one
or more contributor license agreements. See the NOTICE file
distributed with this work for additional information
regarding copyright ownership. The ASF licenses this file
to you under the Apache License, Version 2.0 (the
"License"); you may not use this file except in compliance
with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
'''
from mock.mock import MagicMock, patch
from stacks.utils.RMFTestCase import *
from unittest import skip
@patch("platform.linux_distribution", new = MagicMock(return_value="Linux"))
@patch("os.path.exists", new = MagicMock(return_value=True))
class TestPhoenixQueryServer(RMFTestCase):
COMMON_SERVICES_PACKAGE_DIR = "HBASE/0.96.0.2.0/package"
STACK_VERSION = "2.3"
def test_configure_default(self):
self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + "/scripts/phoenix_queryserver.py",
classname = "PhoenixQueryServer",
command = "configure",
config_file="hbase_default.json",
hdp_stack_version = self.STACK_VERSION,
target = RMFTestCase.TARGET_COMMON_SERVICES
)
self.assert_configure_default()
self.assertNoMoreResources()
def test_start_default(self):
self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + "/scripts/phoenix_queryserver.py",
classname = "PhoenixQueryServer",
command = "start",
config_file="hbase_default.json",
hdp_stack_version = self.STACK_VERSION,
target = RMFTestCase.TARGET_COMMON_SERVICES
)
self.assert_configure_default()
self.assertResourceCalled('Execute', '/usr/hdp/current/phoenix-server/bin/queryserver.py start',
environment = {'JAVA_HOME': '/usr/jdk64/jdk1.8.0_40', 'HBASE_CONF_DIR': '/usr/hdp/current/hbase-regionserver/conf'},
user = 'hbase'
)
self.assertNoMoreResources()
def test_stop_default(self):
self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + "/scripts/phoenix_queryserver.py",
classname = "PhoenixQueryServer",
command = "stop",
config_file="hbase_default.json",
hdp_stack_version = self.STACK_VERSION,
target = RMFTestCase.TARGET_COMMON_SERVICES
)
self.assertResourceCalled('Execute', '/usr/hdp/current/phoenix-server/bin/queryserver.py stop',
on_timeout = '! ( ls /var/run/hbase/phoenix-hbase-server.pid >/dev/null 2>&1 && ps -p `cat /var/run/hbase/phoenix-hbase-server.pid` >/dev/null 2>&1 ) || ambari-sudo.sh -H -E kill -9 `cat /var/run/hbase/phoenix-hbase-server.pid`',
timeout = 30,
environment = {'JAVA_HOME': '/usr/jdk64/jdk1.8.0_40', 'HBASE_CONF_DIR': '/usr/hdp/current/hbase-regionserver/conf'},
user = 'hbase'
)
self.assertResourceCalled('Execute', 'rm -f /var/run/hbase/phoenix-hbase-server.pid',
)
self.assertNoMoreResources()
def test_configure_secured(self):
self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + "/scripts/phoenix_queryserver.py",
classname = "PhoenixQueryServer",
command = "configure",
config_file="hbase_secure.json",
hdp_stack_version = self.STACK_VERSION,
target = RMFTestCase.TARGET_COMMON_SERVICES
)
self.assert_configure_secured()
self.assertNoMoreResources()
def test_start_secured(self):
self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + "/scripts/phoenix_queryserver.py",
classname = "PhoenixQueryServer",
command = "start",
config_file="hbase_secure.json",
hdp_stack_version = self.STACK_VERSION,
target = RMFTestCase.TARGET_COMMON_SERVICES
)
self.assert_configure_secured()
self.assertResourceCalled('Execute', '/usr/hdp/current/phoenix-server/bin/queryserver.py start',
environment = {'JAVA_HOME': '/usr/jdk64/jdk1.8.0_40', 'HBASE_CONF_DIR': '/usr/hdp/current/hbase-regionserver/conf'},
user = 'hbase'
)
self.assertNoMoreResources()
def test_stop_secured(self):
self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + "/scripts/phoenix_queryserver.py",
classname = "PhoenixQueryServer",
command = "stop",
config_file="hbase_secure.json",
hdp_stack_version = self.STACK_VERSION,
target = RMFTestCase.TARGET_COMMON_SERVICES
)
self.assertResourceCalled('Execute', '/usr/hdp/current/phoenix-server/bin/queryserver.py stop',
on_timeout = '! ( ls /var/run/hbase/phoenix-hbase-server.pid >/dev/null 2>&1 && ps -p `cat /var/run/hbase/phoenix-hbase-server.pid` >/dev/null 2>&1 ) || ambari-sudo.sh -H -E kill -9 `cat /var/run/hbase/phoenix-hbase-server.pid`',
timeout = 30,
environment = {'JAVA_HOME': '/usr/jdk64/jdk1.8.0_40', 'HBASE_CONF_DIR': '/usr/hdp/current/hbase-regionserver/conf'},
user = 'hbase'
)
self.assertResourceCalled('Execute', 'rm -f /var/run/hbase/phoenix-hbase-server.pid',
)
self.assertNoMoreResources()
@skip("there's nothing to upgrade to yet")
def test_start_default_24(self):
self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + "/scripts/phoenix_queryserver.py",
classname = "PhoenixQueryServer",
command = "start",
config_file="hbase-rs-2.4.json",
hdp_stack_version = self.STACK_VERSION,
target = RMFTestCase.TARGET_COMMON_SERVICES)
self.assertResourceCalled('Directory', '/etc/hbase',
mode = 0755)
self.assertResourceCalled('Directory', '/usr/hdp/current/hbase-regionserver/conf',
owner = 'hbase',
group = 'hadoop',
recursive = True)
self.assertResourceCalled('XmlConfig', 'hbase-site.xml',
owner = 'hbase',
group = 'hadoop',
conf_dir = '/usr/hdp/current/hbase-regionserver/conf',
configurations = self.getConfig()['configurations']['hbase-site'],
configuration_attributes = self.getConfig()['configuration_attributes']['hbase-site'])
self.assertResourceCalled('XmlConfig', 'core-site.xml',
owner = 'hbase',
group = 'hadoop',
conf_dir = '/usr/hdp/current/hbase-regionserver/conf',
configurations = self.getConfig()['configurations']['core-site'],
configuration_attributes = self.getConfig()['configuration_attributes']['core-site']
)
self.assertResourceCalled('File', '/usr/hdp/current/hbase-regionserver/conf/hbase-env.sh',
owner = 'hbase',
content = InlineTemplate(self.getConfig()['configurations']['hbase-env']['content']))
self.assertResourceCalled('Directory', '/var/run/hbase',
owner = 'hbase',
recursive = True)
self.assertResourceCalled('Directory', '/var/log/hbase',
owner = 'hbase',
recursive = True)
self.assertResourceCalled('File',
'/usr/lib/phoenix/bin/log4j.properties',
mode=0644,
group='hadoop',
owner='hbase',
content='log4jproperties\nline2')
self.assertResourceCalled('Execute', '/usr/hdp/current/phoenix-server/bin/queryserver.py start',
not_if = 'ls /var/run/hbase/phoenix-hbase-server.pid >/dev/null 2>&1 && ps -p `cat /var/run/hbase/phoenix-hbase-server.pid` >/dev/null 2>&1',
user = 'hbase')
self.assertNoMoreResources()
def assert_configure_default(self):
self.assertResourceCalled('Directory', '/etc/hbase',
mode = 0755
)
self.assertResourceCalled('Directory', '/usr/hdp/current/hbase-regionserver/conf',
owner = 'hbase',
group = 'hadoop',
recursive = True,
)
self.assertResourceCalled('Directory', '/hadoop/hbase',
owner = 'hbase',
mode=0775,
recursive = True,
cd_access='a'
)
self.assertResourceCalled('Directory', '/hadoop/hbase/local',
owner = 'hbase',
group = 'hadoop',
mode=0775,
recursive = True,
)
self.assertResourceCalled('Directory', '/hadoop/hbase/local/jars',
owner = 'hbase',
group = 'hadoop',
mode=0775,
recursive = True,
)
self.assertResourceCalled('XmlConfig', 'hbase-site.xml',
owner = 'hbase',
group = 'hadoop',
conf_dir = '/usr/hdp/current/hbase-regionserver/conf',
configurations = self.getConfig()['configurations']['hbase-site'],
configuration_attributes = self.getConfig()['configuration_attributes']['hbase-site']
)
self.assertResourceCalled('XmlConfig', 'core-site.xml',
owner = 'hbase',
group = 'hadoop',
conf_dir = '/usr/hdp/current/hbase-regionserver/conf',
configurations = self.getConfig()['configurations']['core-site'],
configuration_attributes = self.getConfig()['configuration_attributes']['core-site']
)
self.assertResourceCalled('XmlConfig', 'hdfs-site.xml',
owner = 'hbase',
group = 'hadoop',
conf_dir = '/usr/hdp/current/hbase-regionserver/conf',
configurations = self.getConfig()['configurations']['hdfs-site'],
configuration_attributes = self.getConfig()['configuration_attributes']['hdfs-site']
)
self.assertResourceCalled('XmlConfig', 'hdfs-site.xml',
owner = 'hdfs',
group = 'hadoop',
conf_dir = '/usr/hdp/current/hadoop-client/conf',
configurations = self.getConfig()['configurations']['hdfs-site'],
configuration_attributes = self.getConfig()['configuration_attributes']['hdfs-site']
)
self.assertResourceCalled('XmlConfig', 'hbase-policy.xml',
owner = 'hbase',
group = 'hadoop',
conf_dir = '/usr/hdp/current/hbase-regionserver/conf',
configurations = self.getConfig()['configurations']['hbase-policy'],
configuration_attributes = self.getConfig()['configuration_attributes']['hbase-policy']
)
self.assertResourceCalled('File', '/usr/hdp/current/hbase-regionserver/conf/hbase-env.sh',
owner = 'hbase',
content = InlineTemplate(self.getConfig()['configurations']['hbase-env']['content']),
)
self.assertResourceCalled('TemplateConfig', '/usr/hdp/current/hbase-regionserver/conf/hadoop-metrics2-hbase.properties',
owner = 'hbase',
template_tag = 'GANGLIA-RS',
)
self.assertResourceCalled('TemplateConfig', '/usr/hdp/current/hbase-regionserver/conf/regionservers',
owner = 'hbase',
template_tag = None,
)
self.assertResourceCalled('Directory', '/var/run/hbase',
owner = 'hbase',
recursive = True,
)
self.assertResourceCalled('Directory', '/var/log/hbase',
owner = 'hbase',
recursive = True,
)
self.assertResourceCalled('File',
'/usr/hdp/current/hbase-regionserver/conf/log4j.properties',
mode=0644,
group='hadoop',
owner='hbase',
content='log4jproperties\nline2'
)
def assert_configure_secured(self):
self.assertResourceCalled('Directory', '/etc/hbase',
mode = 0755
)
self.assertResourceCalled('Directory', '/usr/hdp/current/hbase-regionserver/conf',
owner = 'hbase',
group = 'hadoop',
recursive = True,
)
self.assertResourceCalled('Directory', '/hadoop/hbase',
owner = 'hbase',
mode=0775,
recursive = True,
cd_access='a'
)
self.assertResourceCalled('Directory', '/hadoop/hbase/local',
owner = 'hbase',
group = 'hadoop',
mode=0775,
recursive = True,
)
self.assertResourceCalled('Directory', '/hadoop/hbase/local/jars',
owner = 'hbase',
group = 'hadoop',
mode=0775,
recursive = True,
)
self.assertResourceCalled('XmlConfig', 'hbase-site.xml',
owner = 'hbase',
group = 'hadoop',
conf_dir = '/usr/hdp/current/hbase-regionserver/conf',
configurations = self.getConfig()['configurations']['hbase-site'],
configuration_attributes = self.getConfig()['configuration_attributes']['hbase-site']
)
self.assertResourceCalled('XmlConfig', 'core-site.xml',
owner = 'hbase',
group = 'hadoop',
conf_dir = '/usr/hdp/current/hbase-regionserver/conf',
configurations = self.getConfig()['configurations']['core-site'],
configuration_attributes = self.getConfig()['configuration_attributes']['core-site']
)
self.assertResourceCalled('XmlConfig', 'hdfs-site.xml',
owner = 'hbase',
group = 'hadoop',
conf_dir = '/usr/hdp/current/hbase-regionserver/conf',
configurations = self.getConfig()['configurations']['hdfs-site'],
configuration_attributes = self.getConfig()['configuration_attributes']['hdfs-site']
)
self.assertResourceCalled('XmlConfig', 'hdfs-site.xml',
owner = 'hdfs',
group = 'hadoop',
conf_dir = '/usr/hdp/current/hadoop-client/conf',
configurations = self.getConfig()['configurations']['hdfs-site'],
configuration_attributes = self.getConfig()['configuration_attributes']['hdfs-site']
)
self.assertResourceCalled('XmlConfig', 'hbase-policy.xml',
owner = 'hbase',
group = 'hadoop',
conf_dir = '/usr/hdp/current/hbase-regionserver/conf',
configurations = self.getConfig()['configurations']['hbase-policy'],
configuration_attributes = self.getConfig()['configuration_attributes']['hbase-policy']
)
self.assertResourceCalled('File', '/usr/hdp/current/hbase-regionserver/conf/hbase-env.sh',
owner = 'hbase',
content = InlineTemplate(self.getConfig()['configurations']['hbase-env']['content']),
)
self.assertResourceCalled('TemplateConfig', '/usr/hdp/current/hbase-regionserver/conf/hadoop-metrics2-hbase.properties',
owner = 'hbase',
template_tag = 'GANGLIA-RS',
)
self.assertResourceCalled('TemplateConfig', '/usr/hdp/current/hbase-regionserver/conf/regionservers',
owner = 'hbase',
template_tag = None,
)
self.assertResourceCalled('TemplateConfig', '/usr/hdp/current/hbase-regionserver/conf/hbase_queryserver_jaas.conf',
owner = 'hbase',
template_tag = None,
)
self.assertResourceCalled('Directory', '/var/run/hbase',
owner = 'hbase',
recursive = True,
)
self.assertResourceCalled('Directory', '/var/log/hbase',
owner = 'hbase',
recursive = True,
)
self.assertResourceCalled('File',
'/usr/hdp/current/hbase-regionserver/conf/log4j.properties',
mode=0644,
group='hadoop',
owner='hbase',
content='log4jproperties\nline2'
) | StarcoderdataPython |
6596753 | import re
import typing
from Custom.LineNumberReader import LineNumberReader
from Exceptions.Parse import YjcParseError
from Token.TokenYJC import TokenYJC, IdentifierTokenYJC, NumTokenYJC, StrTokenYJC
class LexerYJC:
IntegerLiteral = "[0-9]+"
IdentifierLiteral = "[A-Z_a-z][A-Z_a-z0-9]*|==|<=|>=|&&|\\|\\||\S"
StringLiteral='"(\\\\"|\\\\\\\\|\\\\n|[^"])*"'
CommentLiteral="//.*"
regexPattern = f"\s*(({CommentLiteral})|({IntegerLiteral})|({StringLiteral})|{IdentifierLiteral})?"
"在java 的 group 中 0 代表整个串串"
EntireFlag=0
"第一个括号里的东西 是空格*个空格 即\\s代表的"
SpaceFlag=1
CommentFlag=2
NumFlag=3
StrFlag = 4
IdentifierFlag = 5
def __init__(self):
self.patternCompiled=re.compile(self.regexPattern)
self.queue:typing.List[TokenYJC]
self.queue=[]
self.hasMore:bool
self.hasMore=True
def setRawString(self,rawString):
self.reader = LineNumberReader(rawString)
def read(self):
if self.fillQueue(0):
token=self.queue.pop(0)
return token
else:
return TokenYJC.EOF
def peek(self,i):
if self.fillQueue(i):
return self.queue[i]
else:
return False
def fillQueue(self,i:int):
while i>=len(self.queue):
if self.hasMore:
self.readLine()
else:
return False
else:
return True
def readLine(self):
try:
try:
line=next(self.reader.lineGenerator)
except StopIteration:
line=None
except IOError as e:
raise YjcParseError(e.__str__())
else:
if line==None:
self.hasMore=False
return
lineNo=self.reader.getLineNumber()
pos=0
endPos=len(line)
tempLine=line[:]
while pos<endPos:
matcher = self.patternCompiled.match(tempLine)
if matcher:
self.addToken(lineNo,matcher)
pos=matcher.end()+pos
tempLine=line[pos:endPos]
else:
raise YjcParseError("Bad Token at line %s"%lineNo)
self.queue.append(IdentifierTokenYJC(lineNo, TokenYJC.EOL))
def addToken(self,lineNo:int,matcher:typing.Match):
matchedOne=matcher.group(self.SpaceFlag)
if matchedOne:
if not matcher.group(self.CommentFlag):#如果不是注释
token_:TokenYJC
if matcher.group(self.NumFlag):
token_=NumTokenYJC(lineNo, int(matchedOne))
elif matcher.group(self.StrFlag):
token_=StrTokenYJC(lineNo, self.toStringLiteral(matchedOne))
else:
token_=IdentifierTokenYJC(lineNo, matchedOne)
self.queue.append(token_)
def toStringLiteral(self, matchedString:typing.AnyStr):
"""
:param matchedString:
:return:
"""
stringLength=len(matchedString)
strPointer=1
stringBuilder=""
while strPointer<stringLength:
singleStr=matchedString[strPointer]
if singleStr=="\\" and strPointer+1<stringLength:
singleStrNext=matchedString[strPointer + 1]
if singleStrNext=='"' or singleStrNext=="\\":
strPointer+=1
singleStr=matchedString[strPointer]
elif singleStrNext=="n":
strPointer+=1
singleStr="\n"
stringBuilder+=singleStr
return stringBuilder
| StarcoderdataPython |
1955144 | <gh_stars>0
# @copyright@
# Copyright (c) 2006 - 2018 Teradata
# All rights reserved. Stacki(r) v5.x stacki.com
# https://github.com/Teradata/stacki/blob/master/LICENSE.txt
# @copyright@
import stack.commands
class command(stack.commands.HostArgumentProcessor, stack.commands.set.command):
def verifyInterface(self, host, interface):
"""
Returns True IFF the host has the specificied interface.
"""
exists = False
for row in self.db.select("""
* from
networks net, nodes n where
n.name = '%s' and net.device = '%s' and
n.id = net.node
""" % (host, interface)):
exists = True
return exists
def getInterface(self, host, network):
"""
Returns the interface name of a host for the specified network.
"""
interface = None
for interface, in self.db.select("""
net.device from
networks net, subnets s, nodes n where
n.name='%s' and s.name='%s' and
n.id = net.node and
s.id = net.subnet
""" % (host, network)):
pass
return interface
| StarcoderdataPython |
9671112 | #
#
# Public Archive of Days Since Timers
# Setuptools Configuration
#
#
import os
from setuptools import find_packages, setup
with open(os.path.join(os.path.dirname(__file__), 'README.rst')) as readme:
README = readme.read()
os.chdir(os.path.normpath(os.path.join(
os.path.abspath(__file__), os.pardir)))
setup(
name='django-padsweb',
version='0.587',
packages=find_packages(),
include_package_data=True,
description='''Yet another long-term stopwatch web app. Works just
like the countless "days since" personal calendar apps found on
Google Play and the App Store, but intended to be accessible
from about any reasonably modern device with a web browser and
an internet connection.''',
long_description=README,
url='https://github.com/mounaiban/padsweb',
author='Mounaiban',
author_email='<EMAIL>',
classifiers=[
'Development Status :: 3 - Alpha',
'Environment :: Web Environment',
'Framework :: Django :: 2.0',
'Intended Audience :: Developers',
'Intended Audience :: End Users/Desktop',
'License :: OSI Approved :: MIT License',
'Natural Language :: English',
'Operating System :: Operating System Independent',
'Programming Language :: Python :: 3.6',
],
)
| StarcoderdataPython |
3279470 | <filename>tests/unit/test_docs.py
"""Test for the sphinx documentation."""
import subprocess
import glob
import os
import pytest
import regex
UNDOCUMENTED_FILES = ["__init__", "constants", "lib_mapper", "protocol_mapper", "variables"]
SPHINX_DIRECTORIES = [
{
"source_dir": "docs/source/",
"build_dir": "docs/build/",
}
]
MODULE_FOLDERS = [{"doc_module_folder": "../../docs/source/netutils/", "netutils_module_folder": "../../netutils/"}]
START_END_LINES = [
{
"name": "overview",
"start_value": "# netutils\n",
"end_value": "* VLANs - Provide the ability to convert configuration into lists or lists into configuration.\n",
},
{
"name": "installation",
"start_value": "Option 1: Install from PyPI.\n",
"end_value": "```\n",
},
{
"name": "examples",
"start_value": "While all functions come with examples in the docstrings, for quick reference of the types of problems this library intends to\n",
"end_value": "These are just some examples of the many functions provided by this library.\n",
},
{
"name": "attribution",
"start_value": "The library was built to be a centralized place for common network automation code to be accessed. While in most cases it is\n",
"end_value": "* https://github.com/ansible/ansible/pull/26566\n",
},
{
"name": "contributing",
"start_value": "Pull requests are welcomed and automatically built and tested against multiple versions of Python through TravisCI.\n",
"end_value": "Sign up [here](http://slack.networktocode.com/)\n",
},
]
with open("README.md", "r", encoding="utf-8") as file:
README_LIST = file.readlines()
def _get_readme_line(folder_name, start_end):
regex_dict = {"start": r"(:start-line:\s+(?P<value>\d+))", "end": r"(:end-line:\s+(?P<value>\d+))"}
with open(f"{SPHINX_DIRECTORIES[0]['source_dir']}/{folder_name}/index.rst", "r", encoding="utf-8") as index_file:
for line in index_file.readlines():
match = regex.search(regex_dict[start_end], line)
if match:
break
if match:
int_value = int(match.groupdict()["value"])
return int_value
raise Exception(
f"Not able to find {start_end} line value from {SPHINX_DIRECTORIES[0]['source_dir']}/{folder_name}/index.rst. Ensure each line is spelled correctly and exists. "
)
@pytest.mark.parametrize("data", SPHINX_DIRECTORIES)
def test_sphinx_build(data):
sphinx_dummy_build = subprocess.run( # pylint: disable=W1510
["sphinx-build", "-b", "dummy", "-W", data["source_dir"], data["build_dir"]], stdout=subprocess.PIPE
)
assert sphinx_dummy_build.returncode == 0
@pytest.mark.parametrize("data", MODULE_FOLDERS)
def test_folders_present_for_module(data):
netutils_modules = [file[15:-3] for file in glob.glob(data["netutils_module_folder"] + "*.py")]
netutils_modules = list(set(netutils_modules).difference(set(UNDOCUMENTED_FILES)))
doc_module_folders = [folder[27:-1] for folder in glob.glob(data["doc_module_folder"] + "*/")]
for module in netutils_modules:
assert module in doc_module_folders
@pytest.mark.parametrize("data", MODULE_FOLDERS)
def test_folders_contain_index(data):
doc_module_folders = glob.glob(data["doc_module_folder"])
for folder in doc_module_folders:
assert "index.rst" in os.listdir(folder)
@pytest.mark.parametrize("start_end", START_END_LINES, ids=[section["name"] for section in START_END_LINES])
def test_docs_start_end_lines(start_end):
start_line_value = _get_readme_line(start_end["name"], "start")
end_line_value = _get_readme_line(start_end["name"], "end")
assert README_LIST[start_line_value] == start_end["start_value"]
assert README_LIST[end_line_value - 1] == start_end["end_value"]
def test_docs_start_end_lines_fail():
end_line_value = _get_readme_line("overview", "end")
overview = {
"name": "overview",
"start_value": "# netutils\n",
"end_value": "This is what I think the last line of the overview section will be.\n",
}
assert README_LIST[end_line_value - 1] != overview["end_value"]
| StarcoderdataPython |
1620673 | <reponame>pcf26536/politico-api
from api.ver1.utils import error
def system_unavailable(e):
print('Runtime Exception: ' + e.args[0])
return error(
message='System unavailable, please try again later!',
code=500
)
| StarcoderdataPython |
90731 | # -*- coding: utf-8 -*-
"""
Created on Thu Jan 6 08:59:23 2022
@author: <NAME>
"""
import operator
import itertools
ewns = [[0] * 360] * 360
def theta_python(theta1, theta2):
ew = theta1 + 180
# ns = 180 - theta2 if theta2 > 179 else 179 - theta2
ns = -1 * (theta2 - 179)
return ew, ns
def python_theta(ew, ns):
theta1 = ew - 180
theta2 = -1 * ns + 179
return theta1, theta2
def antipode_theta(theta1, theta2):
antitheta1 = theta1 - 180 if theta1 >= 0 else theta1 + 180
antitheta2 = theta2 - 180 if theta2 >= 0 else theta2 + 180
return antitheta1, antitheta2
visited = []
def recursive_colorer(starttheta1, starttheta2, val, visited):
visited.append((starttheta1, starttheta2))
if starttheta1 > 178 or starttheta1 < -179 or starttheta2 > 178 or starttheta2 < -179:
return
else:
poss1 = [starttheta1, starttheta1 - 1, starttheta1 + 1]
poss2 = [starttheta2, starttheta2 - 1, starttheta2 + 1]
cartesian_product = itertools.product(poss1, poss2)
cartesian_list = list(set(cartesian_product) - set(visited))
# cartesian_list.remove((starttheta1, starttheta2))
for pair in cartesian_list:
recursive_colorer(0, 0, 0, visited)
# cartesian_list.append(list(set(recursive_colorer(pair[0], pair[1], 0))))
print(cartesian_list)
return cartesian_list
print(recursive_colorer(0, 0, 0, visited))
| StarcoderdataPython |
151654 | <reponame>royqh1979/programming_with_python
from easygraphics.turtle import *
def Fill(size, level):
if level == 0:
fd(size)
return
Fill(size / 3, level - 1)
lt(90)
Fill(size / 3, level - 1)
for i in range(3):
rt(90)
Fill(size / 3, level - 1)
for i in range(3):
lt(90)
Fill(size / 3, level - 1)
rt(90)
Fill(size / 3, level - 1)
def main():
create_world(800, 600)
set_speed(500)
setxy(0, -200)
Fill(400, 4);
pause()
close_world()
easy_run(main) | StarcoderdataPython |
5189750 | from building import Building
from material import Material
class IndoorMaterial(Material, Building):
"""Class that stores methods for the indoor material.
Mainly to calculate the material volume inside a building.
"""
def __init__(self, material='cinderblock'):
Building.__init__(self)
Material.__init__(self, material=material)
self.set_material_volume()
return
def set_material_volume(self):
"""Sets the total volume of material inside the modeled interior of
the building.
"""
material = self.get_material()
A_room = self.get_room_area()
penetration_depth = self.get_penetration_depth()
self.V_mat = A_room * penetration_depth
return
def get_material_volume(self):
"""Returns the total volume of material inside the modeled interior of
the building.
"""
return self.V_mat
def get_penetration_depth(self):
"""Returns the depth to which the contaminant penetrates the material.
This is pretty arbritraily chosen.
"""
material = self.get_material()
# depth to which contaminant has been adsorbed/penetrated into the material
penetration_depth = {'cinderblock': 5e-3, 'wood': 1e-3,
'drywall': 1e-2, 'carpet': 1e-2, 'paper': 1e-4, 'none': 0, 'soil': 0}
return penetration_depth[material]
| StarcoderdataPython |
6585197 | <gh_stars>0
"""Models used for API request and response validation."""
from pydantic import BaseModel
class AuthReq(BaseModel):
"""The request format for the authorization API request."""
email: str
password: str
class AccountReq(BaseModel):
"""The request format for account API request."""
password: str
class MessageReq(BaseModel):
"""The request format for the contact message API request."""
category: str = ""
first: str
last: str
message: str
class BaseResp(BaseModel):
"""General API response parameters."""
status: str
message: str
class AuthResp(BaseResp):
"""Response parameters for the authorization API."""
token: str
| StarcoderdataPython |
8070551 | <reponame>dshirle7/capstone2
import gym
from gym import error, spaces, utils
from gym.utils import seeding
import numpy as np
import random
class BlackjackEnv(gym.Env):
# Blackjack environment that runs on a single deck of cards
metadata = {'render.modes': ['human']}
def __init__(self):
self.action_space = spaces.Discrete(2)
# Observation Space:
# 1. Player's value (assuming aces are 11)
# 2. Dealer's shown value
# 3. Whether the player's ace could count as a 1 or 11
# 4. Whether the dealer's ace could count as a 1 or 11
self.observation_space = spaces.MultiDiscrete([10, 10, 2, 2])
self.state = None
self.done = False
self.steps_beyond_done = None
self.deck = None
def step(self, action):
# Convert hits to stays if the player is already at 21
if action == 0 or self.state[0] == 21:
reward = self.stay()
if action == 1:
self.hit()
reward = 0
if not self.done:
pass
elif self.steps_beyond_done is None:
# The episode is over
self.steps_beyond_done = 0
else:
if self.steps_beyond_done == 0:
pass
# logger.warn("You are calling 'step' even though this environment has already returned done = True")
self.steps_beyond_done += 1
reward = 0
return np.array(self.state), reward, self.done, {}
def reset(self):
self.done = False
self.deck = [i for i in range(2, 10)] * 4
self.deck.extend([10] * 16)
self.deck.extend([11] * 4)
random.shuffle(self.deck)
self.state = [0] * 4
self.hit(1)
while self.state[0] < 12:
self.hit(0)
return np.array(self.state)
def shuffle(self):
random.shuffle(self.deck)
def render(self, mode='human'):
print(self.state)
def hit(self, player=0):
# Deals the next card to the appropriate position
if player not in [0, 1]:
raise ValueError(f'hit() can only handle player of 0 or 1; received {player}')
# If the player gets an ace, show it in the state
if self.deck[-1] == 11:
self.state[player + 2] = 1
# Add the card's value to the player's current total
self.state[player] += self.deck.pop()
if self.state[player] > 21 and self.state[player + 2] == 0:
# The player just went bust
self.done = True
elif self.state[player] > 21 and self.state[player + 2] == 1:
# The player must use their soft ace as a 1
self.state[player] += -10
self.state[player + 2] = 0
else:
pass
def stay(self):
# Agent has chosen to stay
# This function represents dealer's behavior afterwards
# Then it evaluates the state and determines who wins
self.done = True
while self.state[1] < 17:
self.hit(1)
if self.state[1] > 21:
reward = 1
return reward
elif self.state[0] > self.state[1]:
reward = 1
return reward
elif self.state[0] == self.state[1]:
reward = 0.5
return reward
reward = 0
return reward | StarcoderdataPython |
29490 | <gh_stars>1-10
import sys
input = sys.stdin.readline
n = int(input())
if n < 2:
print(n)
exit(0)
d = [0] * (n+1)
d[1] = 1
d[2] = 3
for i in range(n+1):
if i < 3:
continue
d[i] = (d[i-1] % 10007 + (d[i-2]*2) % 10007) % 10007
print(d[n])
| StarcoderdataPython |
3565236 | <reponame>worldbank/moz-tech-reserva-de-nome
from django.apps import AppConfig
class CoreConfig(AppConfig):
name = "reserva.core"
verbose_name = "Reserva de nome"
| StarcoderdataPython |
1842669 | <reponame>rtibbles/python-delta-crdts
import pytest
from delta_crdt import LWWReg
from .helpers import transmit
@pytest.fixture
def lwwreg():
return LWWReg("test id")
def test_can_create(lwwreg):
pass
def test_starts_empty(lwwreg):
assert lwwreg == None # noqa
def test_can_write_values(lwwreg):
lwwreg.write(1, "a")
assert lwwreg == "a"
lwwreg.write(2, "b")
assert lwwreg == "b"
@pytest.fixture
def replicas():
replica1 = LWWReg("id1")
replica2 = LWWReg("id2")
replica1.write(0, "a")
replica1.write(1, "b")
replica2.write(0, "c")
replica2.write(1, "d")
return replica1, replica2
def test_concurrent_writes(replicas):
replica1, replica2 = replicas
assert replica1 == "b"
assert replica2 == "d"
def test_changes_converge(replicas):
replica1, replica2 = replicas
map(lambda x: replica1.apply(transmit(x)), replica2.deltas)
map(lambda x: replica2.apply(transmit(x)), replica1.deltas)
assert replica1 == "d"
assert replica2 == "d"
| StarcoderdataPython |
336302 | <filename>core/rules.py
#!/usr/bin/python2.4
# Copyright 2009 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Module describing rules applying to gLearn courses.
Depends on the rules_impl module for actual implementation of rules.
Rules can be applied to different actions:
- whenever a user signs up for a program, activity or specific schedule, some
rules need to be validated before the user action gets validated
- whenever a user unregisters from a particular activity
- when a user completes an activity, in order to grant some certification
Some use cases:
- a particular course may not accept more than 20 people at any given time
- SWEs are allowed to register for no more than 10 trainings / month
- a class cannot have more than 20% of sales people
- any google employee cannot take more than $10K worth of training in 2009
- a user is certified in Python only after completing 3 python trainings
"""
import copy
import logging
import os
from google.appengine.api import memcache
from ragendja import dbutils
import uuid
from core import errors
from core import utils
# Number of maximum retries to regenerate the rule context.
_NUM_RETRIES = 1
_PREDICTION_MODE = 'PREDICTION_MODE'
class RuleNames(utils.ChoiceBase):
"""This class is a helper to get the available rule names."""
EMPLOYEE_TYPE_RESTRICTION = 'EmployeeTypeRestriction'
LOCK_PAST_ACTIVITY = 'LockPastActivity'
MANAGER_APPROVAL = 'ManagerApproval'
MAX_PEOPLE_ACTIVITY = 'MaxNumberRegisteredByActivity'
TIME_CANCEL_ACTIVITY = 'TimeCancelByActivity'
TIME_CANCEL_ACCESS_POINT_TAG = 'TimeCancelByAccessPointTag'
TIME_REGISTER_BY_ACTIVITY = 'TimeFrameRegistrationByActivity'
TIME_REGISTER_BY_ACCESS_POINT_TAG = 'TimeFrameRegistrationByAccessPointTag'
class RuleConfig(dbutils.FakeModel):
"""Rule with parameters to be processed by the rule engine..
Attributes:
rule_name: A string name of RuleBase class.
parameters: A dictionary containing the function parameters and the
values to be used when the rule engine calls the function.
description: A string having the reason for using this rule, defaults to
rule description.
key: A unique key identifying this rule.
Example:
description: 'Instructor quiz is ideal only for under 10 members'.
rule_name: 'NumberRegisteredByAccessPoint'.
parameters: {max_register': 10}.
"""
fields = ('rule_name', 'parameters', 'description', 'key')
# objects = ContentTypeManager()
def __init__(self, rule_name, parameters, description=None, key=None):
self.rule_name = rule_name
# Transform unicode to regular strings for key names. FakeModel attributes
# are stored as unicode strings (JSON dumps) in datastore.
self.parameters = dict((str(k), v) for k, v in parameters.iteritems())
self.description = description
if not key:
# We generate a unique key for this rule. We convert to string so it can
# get JSON serialized when stored in datastore (FakeModel uses JSON)
self.key = str(uuid.uuid1())
else:
# The key is already provided. This happens when the RuleConfig is
# deserialized from datastore. Note that this key is always created with
# a call to uuid.uuid1() in the first place.
self.key = key
def CreateRules(self, eval_context, offline, namespace=''):
"""Creates the rule associated with this rule configuration.
Args:
eval_context: A EvalContext object.
offline: A boolean to indicate that this rule is evaluated offline.
Rules which run online are not run in a thread safe environment.
In offline mode, only one process will evaluate rules at any given time.
namespace: An optional string to control the namespace where this rule
stores its state. Note that the given namespace may not be the one
exactly used - but it will be part of the final namespace (so providing
a unique namespace here will avoid collisions).
Returns:
A list of RuleBase instance(s), [] if no rules available.
"""
rule = GetRule(self.rule_name)
process_lists = rule.CanProcessMultipleSchedules()
if process_lists:
rule_instance = rule(key=self.key,
eval_context=eval_context,
offline=offline,
namespace_prefix=namespace,
**self.parameters)
return [rule_instance]
else:
# This rule can only work off individual schedule/access points
# We break down the list of schedules/access points in individual items.
rules = []
for schedule, ap in zip(eval_context.schedule_list,
eval_context.access_point_list):
context = copy.copy(eval_context)
context.schedule_list = [schedule]
context.access_point_list = [ap]
rule_instance = rule(key=self.key,
eval_context=context,
offline=offline,
namespace_prefix=namespace,
**self.parameters)
rules.append(rule_instance)
return rules
def GetDescription(self):
"""The description of this rule or the default rule description."""
if self.description:
return self.description
else:
return GetRule(self.rule_name).GetDescription()
def __repr__(self):
tmp = 'RuleConfig(%s, %s, description=%s, key=%s)'
return tmp % (self.rule_name, self.parameters, self.description, self.key)
# Name required by appengine patch.
# Suppress pylint invalid method name warning.
# pylint: disable-msg=C6409
@classmethod
def all(cls):
"""Method needed by the django admin interface to list possible values."""
return []
class RuleBase(object):
"""Abstract class for a standard rule.
Attributes:
eval_context: A EvalContext object.
key: Unique key associated with this rule.
offline: A boolean to indicate whether this rule is being processed
online or offline.
"""
def __init__(self, key, eval_context, offline, namespace_prefix=''):
self.eval_context = eval_context
self.key = key
self.offline = offline
self.online = not offline
if offline:
prefix = 'offline'
else:
prefix = 'online'
self.namespace = '%s_%s%s' % (prefix, namespace_prefix, key)
@classmethod
def GetDescription(cls):
"""Returns the description of the rule.
Returns:
Default implementation returns the class name.
"""
return cls.__name__
@classmethod
def IsCertify(cls):
"""Returns whether this rule applies to program certification.
Returns:
True iff this rule applies to program certification.
"""
return issubclass(cls, RuleCertify)
@classmethod
def IsRegister(cls):
"""Returns whether this rule applies to registration.
Returns:
True iff this rule applies to registration for a program, activity or
schedule.
"""
return issubclass(cls, RuleRegister)
class RuleCertify(RuleBase):
"""Base class for a program certification rule."""
def __init__(self, *args, **kargs):
super(RuleCertify, self).__init__(*args, **kargs)
def Evaluate(self):
"""Evaluates the rule.
Returns:
An integer from 0 to 100 representing the percentage toward certification.
"""
raise NotImplementedError
class RuleRegister(RuleBase):
"""Base class for a program / activity / schedule registration rule."""
def __init__(self, *args, **kargs):
super(RuleRegister, self).__init__(*args, **kargs)
def Evaluate(self, initial_state, target_state):
"""Evaluates the rule.
Args:
initial_state: Original state of the user registration.
One of utils.RegistrationStatus states or None.
target_state: State that the user is trying to transition to.
One of utils.RegistrationStatus states, must be different from
initial_state.
Returns:
A dict with following keys:
- status: a utils.REGISTRATION_STATUS, outcome of rule evaluation.
- rule_tags: An optional string list of tags for the registration. These
tags can be used to gather registrations with some properties later.
Suppose there is a rule that places registered users to a baseball game
on waitlist until it can confirm the weather for New York is sunny on
game day. It can provide a unique tag for New York weather for that day
and all registrations in the system that depend on this event can later
be retrieved by the rule or rule engine for reprocessing.
"""
raise NotImplementedError
def ProcessOutcome(self, eval_state, final_state):
"""Method called by the rule engine after rules have been evaluated.
Default implementation does nothing.
Args:
eval_state: The state as returned by the call to _Evaluate() on this rule.
One of utils.RegistrationStatus states.
final_state: The final state after evaluation of all rules.
One of utils.RegistrationStatus states.
"""
pass
@classmethod
def CanProcessMultipleSchedules(cls):
"""Returns whether this rule can process list of access_points/schedules.
A rule evaluation can either work on a list of schedule/access points for a
particular user or on individual schedule/access point.
In the latter case, the evaluation of the rule will only consider the first
schedule/access point of the eval_context lists when Evaluate() is invoked.
Returns:
A boolean indicating if the rule can process list of schedules/access
points for registration.
"""
return True
@classmethod
def IsResourceRule(cls):
"""Returns whether this rule handles resource allocation.
Returns:
True iff this rule handles resource allocation.
"""
return issubclass(cls, RuleRegisterResource)
# Suppress pylint unused argument for default implementation.
# pylint: disable-msg=W0613
@classmethod
def TagsToReprocessOnChange(cls, rule_config, program_or_activity=None):
"""Tags that identify registrations in need of reprocessing on rule change.
A rule can decide which rule tags need to be reprocessed when it is changed.
Args:
rule_config: Ruleconfig associated with the rule.
program_or_activity: models.Program or models.Activity relevant to that
rule.
Returns:
The list of rule tags that identify registrations that need reprocessing.
"""
# By default if the rules don't implement this function then no
# registrations are reprocessed.
return []
class RuleRegisterResource(RuleRegister):
"""Base class for registration rules that deal with resource allocation."""
def _BuildContext(self):
"""Builds the context associated with this rule.
Each rule may need to gather some values in order to evaluate. This is
called the context of the rule. This context is cached and reused during
rule evaluation by the rule engine. This method is automatically called to
regenerate the cache when the cache gets flushed.
For example, a rule which restricts $10k/year of training for a team would
need to retrieve all the trainings that people from that team registered for
every time someone registers again.
This operation could be expensive. To make it faster, the rule can cache
information using its context.
Returns:
A dictionary of key / value pairs where the value is an integer.
Default implementations returns {}. Rules should override as needed.
"""
return {}
def _Regenerate(self):
"""Regenerates the rule context."""
context = self._BuildContext()
if self.online and not IsPredictionMode():
# We do not want to count ourself twice.
# We know that the UserRegistration entry that triggered the call is
# already stored in datastore when we reach this code - except if we
# are predicting outcome.
for key, value in context.iteritems():
context[key] = value - 1
assert value > 0
# The following code adds the key/values only if not present and then
# sets an __init__ flag which contains an enumeration of the keys.
# Since the memcache.add() calls are not transactional, it is possible
# that __init__ key can be set but still the values associated with the keys
# would not be present (for example cache gets flushed between add and
# add_multi call. This is a very remote/rare case though, and this situation
# still needs to be addressed in the _Incr / _Decr method anyway since keys
# can get evicted from the cache at any time.
# We add the values if and only they are not present.
memcache.add_multi(context, namespace=self.namespace)
# The __init__ contains a list of available keys for this context
memcache.add('__init__', context.keys(), namespace=self.namespace)
def _Incr(self, key, retries=_NUM_RETRIES):
"""Atomically increments a key's value.
Args:
key: String key to increment. Stored within the local rule context.
retries: Maximum number of retries to increment the value for the key.
Returns:
New long integer value, or None if key was not in the cache, or could not
be incremented for any other reason.
"""
if IsPredictionMode():
# If predicting outcome/online, we really don't need to increment, since
# we are not competing with other requests to enroll.
# No decrement either. We just need to get the current value.
value = self._Get(key)
if not value:
value = 1
else:
value += 1
return value
try:
value = memcache.incr(key, namespace=self.namespace)
# Either cache is not populated yet or it was flushed.
if value is None:
# We check if value should be there.
existing_keys = memcache.get('__init__', namespace=self.namespace)
if existing_keys is None or key in existing_keys:
# Info from memcache is stale, we regenerate
self._Regenerate()
existing_keys = memcache.get('__init__', namespace=self.namespace)
if existing_keys is None or key in existing_keys:
# key should be available now, we try again
if retries != 0:
value = self._Incr(key, retries - 1)
else:
value = None
else:
# the key is new, it was just not in the cache
# This situation can happen because some rules will create keys on the
# fly - not deriving them from the datastore. For example a key based
# on a schedule/access point will be generated the first time a user
# tries to register for that schedule/access point
added = memcache.add(key, 1, namespace=self.namespace)
if not added:
# we try again
if retries != 0:
value = self._Incr(key, retries - 1)
else:
value = None
else:
# Key was added, we updated existing keys
existing_keys.append(key)
memcache.set('__init__', existing_keys, namespace=self.namespace)
value = 1
except (TypeError, ValueError), e:
# Can happen if key is too long or invalid
logging.error(e)
value = None
if value is None:
# Despite all our efforts to inctrement the value, we could not
# That could be because memcache is not accessible etc.
# We can not afford to stay in such state.
logging.error('Can not increment value for namespace %s, key %s',
self.namespace, key)
raise errors.AppengineError
return value
def _Decr(self, key):
"""Atomically decrements a key's value.
Args:
key: Key to decrement. Stored within the local rule context.
"""
# The logic for _Decr is simpler than _Incr because if a key is not in the
# cache during _Decr, then a following call to _Incr will take care of
# rebuilding the correct value for that particular key. Rules which call
# _Decr to not care about the value of the key after call is complete. Only
# subsequent calls to _Incr are of interest.
try:
if not IsPredictionMode():
memcache.decr(key, namespace=self.namespace)
except (TypeError, ValueError), e:
# Can happen if key is too long or invalid
logging.error(e)
def _Get(self, key, retries=_NUM_RETRIES):
"""Looks up a single key.
Args:
key: The key to look up. Retrieved from the local rule context.
retries: Maximum number of retries to get the value for the key.
Returns:
The value of the key, if found, else None.
"""
try:
value = memcache.get(key, namespace=self.namespace) # @UndefinedVariable
# Either cache is not populated yet or it was flushed.
if value is None and retries != 0:
self._Regenerate()
return self._Get(key, retries - 1)
except (TypeError, ValueError), e:
# Can happen if key is too long or invalid
logging.error(e)
value = None
return value
def Evaluate(self, initial_state, target_state):
"""Evaluates the rule.
Prior to calling this method, a transient models.UserRegistration MUST have
been persisted by the caller in online mode in order to book resources
for that rule while the rule is being evaluated.
Args:
initial_state: Original state of the user registration.
One of utils.RegistrationStatus states or None.
target_state: State that the user is trying to transition to. One of
utils.RegistrationStatus states, must be different from initial_state.
Returns:
A dict with following keys:
- status: one of utils.REGISTRATION_STATUS values as outcome of the rule
- rule_tags: Array with a single resource identifier string key that the
resource registration rule is using to constrain this registration.
- resource_remaining: int remaining value of resource after which rule
will deny registration. Can be negative. For example -5 for max people
rule means that user is number 5 on the waitlist. Corresponds to the
single resource identifier present in the goupd_ids array above.
None iff resource_key is None.
"""
raise NotImplementedError
# Suppress pylint unused argument for default implementation.
# pylint: disable-msg=W0613
@classmethod
def TagsToReprocessOnChange(cls, rule_config, program_or_activity=None):
"""Tags that identify registrations in need of reprocessing on rule change.
For resource rules, the tags that are affected when a rule is changed are
set of all tags that were issued by the rule in the Evaluate function
above for registering users in the given program or activity.
Args:
rule_config: Ruleconfig associated with the rule.
program_or_activity: models.Program or models.Activity relevant to that
rule.
Returns:
The list of rule tags that identify registrations that need reprocessing.
"""
# Unlike other registration rules resource registration rules are forced to
# provide an implementation for this function.
raise NotImplementedError
def ExtractCertifyRules(rules):
"""Extracts a list of certification rules from the given rules.
Args:
rules: A string list of rule class names deriving from RuleBase.
Returns:
The extracted list of certification rules.
"""
return _ExtractRules(rules, 'IsCertify')
def ExtractRegisterRules(rules):
"""Extracts a list of registration rules from the given rules.
Args:
rules: A string list of RuleBase names.
Returns:
The extracted string list registration rules.
"""
return _ExtractRules(rules, 'IsRegister')
def _ExtractRules(rules, rule_attribute):
res = []
for rule_name in rules:
rule = GetRule(rule_name)
if rule is not None and getattr(rule, rule_attribute)():
res.append(rule_name)
return res
def GetRule(rule_name):
"""Returns class representing the given rule.
Args:
rule_name: A string containing the name of the rule.
Returns:
The class representing the rule_name or None if not found.
"""
try:
return getattr(_GetRulesImplModule(), rule_name)
except AttributeError:
# Rule not found
return None
def ListRules():
"""Returns a list of all available rules."""
rules_mod = _GetRulesImplModule()
return [val for val in rules_mod.__dict__.values() if isinstance(val, type)]
def _GetRulesImplModule():
# Not importing at top of file to avoid circular dependency.
# pylint: disable-msg=C6204
from core import rules_impl
return rules_impl
def SetPredictionMode(is_predict):
"""Sets the rules module to be in prediction mode.
Args:
is_predict: boolean whether to enable prediction or not.
Every rule evaluation will be done with the given that it is to predict a
registration, not do an actual enrollment.
"""
os.environ[_PREDICTION_MODE] = str(is_predict)
def IsPredictionMode():
mode = os.environ.get(_PREDICTION_MODE, 'False')
return mode == 'True'
| StarcoderdataPython |
3496969 | <filename>tests/utils/test_serialize.py
import tempfile
import pytest
from myia import xtype
from myia.abstract import ANYTHING
from myia.ir import Constant, Graph, isomorphic
from myia.prim.ops import scalar_add, switch
from myia.utils import dump, load
from ..common import to_abstract_test
parametrize = pytest.mark.parametrize
def dumpload(o):
f = tempfile.TemporaryFile()
pos = f.tell()
dump(o, f.fileno())
f.seek(pos)
return load(f.fileno())
@parametrize('v', [
'potato',
[],
4242,
(22,),
])
def test_roundtrip(v):
v2 = dumpload(v)
assert v is not v2
assert v == v2
@parametrize('v', [
None,
(),
22,
switch,
ANYTHING,
xtype.Bool,
xtype.i8,
xtype.u64,
xtype.f32,
])
def test_same(v):
v2 = dumpload(v)
assert v is v2
def test_dump_undefined():
with pytest.raises(Exception):
dumpload(object())
def test_exception():
e2 = dumpload(Exception("this is bad"))
assert e2.message == "Exception: this is bad\n"
assert repr(e2) == 'LoadedException'
g = Graph()
p1 = g.add_parameter()
p1.abstract = to_abstract_test(2)
p2 = g.add_parameter()
p2.abstract = to_abstract_test(2)
mid = g.apply(scalar_add, p1, p2)
mid.abstract = to_abstract_test(4)
c = Constant(1)
c.abstract = to_abstract_test(1)
g.output = g.apply(scalar_add, mid, c)
g.output.abstract = to_abstract_test(5)
g.return_.abstract = to_abstract_test(5)
g.return_.inputs[0].abstract = None
@parametrize('node', [
p1,
c,
mid,
])
def test_anfnode(node):
node2 = dumpload(node)
assert type(node) is type(node2)
assert len(node.inputs) == len(node2.inputs)
# more inputs assert?
if node.graph is None:
assert node2.graph is None
else:
assert isinstance(node2.graph, Graph) and node.graph is not node2.graph
assert node.debug.name == node2.debug.name
assert node.debug.id == node2.debug.id
# This is because of intern()
assert node.abstract is node2.abstract
def test_graph():
g2 = dumpload(g)
assert g is not g2
assert isinstance(g2, Graph)
assert isomorphic(g, g2)
assert g2.parameters[0].graph is g2
| StarcoderdataPython |
8022595 | <gh_stars>0
from fastapi.security import OAuth2PasswordBearer
from passlib.context import CryptContext
from jose import JWTError, jwt
# to get a string like this run:
# openssl rand -hex 32
SECRET_KEY = "09d25e094faa6ca2556c818166b7a9563b93f7099f6f0f4caa6cf63b88e8d3e7"
ALGORITHM = "HS256"
ACCESS_TOKEN_EXPIRE_MINUTES = 30
pwd_context = CryptContext(schemes=["bcrypt"], deprecated="auto")
oauth2_scheme = OAuth2PasswordBearer(tokenUrl="token")
def verify_password(plain_password, hashed_password):
return pwd_context.verify(plain_password, hashed_password)
def get_password_hash(password):
return pwd_context.hash(password)
async def get_current_user(token: str = Depends(oauth2_scheme)):
credentials_exception = HTTPException(
status_code=status.HTTP_401_UNAUTHORIZED,
detail="Could not validate credentials",
headers={"WWW-Authenticate": "Bearer"},
)
try:
payload = jwt.decode(token, SECRET_KEY, algorithms=[ALGORITHM])
username: str = payload.get("sub")
if username is None:
raise credentials_exception
token_data = TokenData(username=username)
except JWTError:
raise credentials_exception
user = get_user(fake_users_db, username=token_data.username)
if user is None:
raise credentials_exception
return user | StarcoderdataPython |
8192358 | import re
import numpy as np
import pandas as pd
from nltk.corpus import stopwords
from nltk.stem import PorterStemmer
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.model_selection import cross_val_score
from sklearn.naive_bayes import MultinomialNB
def preProc(tweet, stemmer):
tweet = re.sub('[^A-Za-z]', ' ', tweet)
tweet = tweet.lower()
tweet = tweet.split()
tweet = [stemmer.stem(word) for word in tweet if word not in stopwords.words('english')]
tweet = ' '.join(tweet)
return tweet
# load dataset
data = pd.read_csv('./dataset/train.csv', index_col=0)
# pre-processing tweets
stemmer = PorterStemmer()
corpus = [preProc(tweet, stemmer) for tweet in data.text]
# CountVectorizer feature extractor
fe = CountVectorizer(max_features=5000, ngram_range=(1,3))
fe.fit(corpus)
# transform tweets using CountVectorizer feature extractor
xData = fe.transform(corpus).toarray()
yData = data.target
# Instantiate the classifier
clf = MultinomialNB()
score = np.mean(cross_val_score(clf, xData, yData))
print('Accuracy for base model:', round(score,2))
| StarcoderdataPython |
9689252 | <reponame>txwhhny/vtk
#!/usr/bin/env python
# we need to use composite data pipeline with multiblock datasets
alg = vtk.vtkAlgorithm()
pip = vtk.vtkCompositeDataPipeline()
alg.SetDefaultExecutivePrototype(pip)
del pip
# Create the RenderWindow, Renderer and both Actors
#
Ren1 = vtk.vtkRenderer()
Ren1.SetBackground(0.33,0.35,0.43)
renWin = vtk.vtkRenderWindow()
renWin.AddRenderer(Ren1)
renWin.SetSize(300,300)
iren = vtk.vtkRenderWindowInteractor()
iren.SetRenderWindow(renWin)
pvTemp59 = vtk.vtkXMLRectilinearGridReader()
pvTemp59.SetFileName("" + str(VTK_DATA_ROOT) + "/Data/cth.vtr")
pvTemp59.UpdateInformation()
pvTemp59.SetCellArrayStatus("X Velocity",0)
pvTemp59.SetCellArrayStatus("Y Velocity",0)
pvTemp59.SetCellArrayStatus("Z Velocity",0)
pvTemp59.SetCellArrayStatus("Mass for Armor Plate",0)
pvTemp59.SetCellArrayStatus("Mass for Body, Nose",0)
pvTemp79 = vtk.vtkExtractCTHPart()
pvTemp79.SetInputConnection(pvTemp59.GetOutputPort())
pvTemp79.AddVolumeArrayName("Volume Fraction for Armor Plate")
pvTemp79.AddVolumeArrayName("Volume Fraction for Body, Nose")
pvTemp79.SetClipPlane(None)
pvTemp79.GenerateSolidGeometryOn();
pvTemp104 = vtk.vtkLookupTable()
pvTemp104.SetNumberOfTableValues(256)
pvTemp104.SetHueRange(0.6667,0)
pvTemp104.SetSaturationRange(1,1)
pvTemp104.SetValueRange(1,1)
pvTemp104.SetTableRange(0,1)
pvTemp104.SetVectorComponent(0)
pvTemp104.Build()
pvTemp79.Update()
compositeData = pvTemp79.GetOutput();
dataList = compositeData.NewIterator()
dataList.InitTraversal()
while not dataList.IsDoneWithTraversal():
# get next object in the composite dataset
currData = dataList.GetCurrentDataObject()
# construct mapper
pvTemp87 = vtk.vtkDataSetMapper()
pvTemp87.SetInputData(currData)
pvTemp87.SetScalarRange(0,1)
pvTemp87.UseLookupTableScalarRangeOn()
pvTemp87.SetScalarVisibility(1)
pvTemp87.SetScalarModeToUsePointFieldData()
pvTemp87.SelectColorArray("Part Index")
pvTemp87.SetLookupTable(pvTemp104)
# construct actor
pvTemp88 = vtk.vtkActor()
pvTemp88.SetMapper(pvTemp87)
pvTemp88.GetProperty().SetRepresentationToSurface()
pvTemp88.GetProperty().SetInterpolationToGouraud()
pvTemp88.GetProperty().SetAmbient(0)
pvTemp88.GetProperty().SetDiffuse(1)
pvTemp88.GetProperty().SetSpecular(0)
pvTemp88.GetProperty().SetSpecularPower(1)
pvTemp88.GetProperty().SetSpecularColor(1,1,1)
Ren1.AddActor(pvTemp88)
# update iterator
dataList.GoToNextItem()
renWin.Render()
alg.SetDefaultExecutivePrototype(None)
# --- end of script --
| StarcoderdataPython |
9796191 | import games as g
level = g.Game(700, 700, 100, 100)
level.addwall(600, 650, 150, 750)
level.addwall(450, 500, 0, 650)
level.addwall(300, 350, 150, 750)
level.addwall(150, 200, 0, 650)
| StarcoderdataPython |
5143059 | <gh_stars>0
from vizdoommaze.envs.vizdoomenv import VizdoomEnv
class VizdoomMazeFour6(VizdoomEnv):
def __init__(self):
super(VizdoomMazeFour6, self).__init__(79) | StarcoderdataPython |
3570978 | #!/usr/bin/python
# $Id: ircView.py,v 1.4 2001/10/09 22:19:57 ivo Exp $
import sys
from string import strip
from threading import Thread
from time import sleep
from view import guiMessage
from genericView import genericView
from msgSupport import msgSupport
from serverView import serverView
from util.debug import debug, DEBUG, NOTICE, ERROR
class ircView(genericView, msgSupport, Thread):
def __init__(self):
genericView.__init__(self, "most")
Thread.__init__(self)
self.handler = None
self.running = 1
def setcallback(self, cb):
self.handler = cb
def msg_handler(self, item, event):
""" pass to handler """
if self.handler:
# perhaps do something with event, i.e. source?
self.handler(item, event)
def new_view(self, name):
newview = serverView()
newview.start()
return newview
def run(self):
self.running = 1
while self.running:
text = raw_input()
event = None
if strip(text) != "":
if text[0] == '/':
event = guiMessage.guiMessage(text[1:])
# if event.type == guiMessage.UNKNOWN ...
else:
event = guiMessage.guiMessage()
event.type = guiMessage.MSG
if hasattr(self, 'name'):
event.source = self.name
event.target = self.name # This should invoke some method
event.data = text
if self.handler:
self.handler(self, event)
def mainloop(self):
#self.start()
pass
def mainquit(self):
self.running = 0
def threads_enter(self):
pass
def threads_leave(self):
pass
_view = None
def getIrcView():
global _view
if not _view:
_view = ircView()
return _view
def parse_config(file=None):
pass
| StarcoderdataPython |
4865739 | # Created by Kelvin_Clark on 3/10/22, 5:34 PM
| StarcoderdataPython |
1653691 | # -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'cal2.ui'
#
# Created by: PyQt5 UI code generator 5.15.0
#
# WARNING: Any manual changes made to this file will be lost when pyuic5 is
# run again. Do not edit this file unless you know what you are doing.
from PyQt5 import QtCore, QtGui, QtWidgets
class Ui_MainWindow(object):
def setupUi(self, MainWindow):
MainWindow.setObjectName("MainWindow")
MainWindow.resize(341, 476)
icon = QtGui.QIcon()
icon.addPixmap(QtGui.QPixmap("../../Downloads/mathematics-operation-addition-manic-math-multiplication-and-division-flashcard-math-games-mathematics.jpg"), QtGui.QIcon.Normal, QtGui.QIcon.Off)
MainWindow.setWindowIcon(icon)
MainWindow.setStyleSheet("background-color: rgb(15, 15, 15);")
self.centralwidget = QtWidgets.QWidget(MainWindow)
self.centralwidget.setObjectName("centralwidget")
self.gridLayout_2 = QtWidgets.QGridLayout(self.centralwidget)
self.gridLayout_2.setObjectName("gridLayout_2")
self.gridLayout = QtWidgets.QGridLayout()
self.gridLayout.setObjectName("gridLayout")
self.push_5 = QtWidgets.QPushButton(self.centralwidget)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.MinimumExpanding, QtWidgets.QSizePolicy.Preferred)
sizePolicy.setHorizontalStretch(50)
sizePolicy.setVerticalStretch(10)
sizePolicy.setHeightForWidth(self.push_5.sizePolicy().hasHeightForWidth())
self.push_5.setSizePolicy(sizePolicy)
self.push_5.setStyleSheet("QPushButton{\n"
"font: 75 20pt \"MS Shell Dlg 2\";\n"
" \n"
" color: rgb(255, 255, 255);\n"
"border-radius:30px;\n"
"}\n"
"\n"
"QPushButton:hover{\n"
"background-color: rgb(52, 157, 77);\n"
"}\n"
"\n"
"QPushBotton{\n"
"color: rgb(185, 185, 185);\n"
"}")
self.push_5.setObjectName("push_5")
self.gridLayout.addWidget(self.push_5, 3, 1, 1, 1)
self.push_3 = QtWidgets.QPushButton(self.centralwidget)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.MinimumExpanding, QtWidgets.QSizePolicy.Preferred)
sizePolicy.setHorizontalStretch(50)
sizePolicy.setVerticalStretch(10)
sizePolicy.setHeightForWidth(self.push_3.sizePolicy().hasHeightForWidth())
self.push_3.setSizePolicy(sizePolicy)
self.push_3.setStyleSheet("QPushButton{\n"
"font: 75 20pt \"MS Shell Dlg 2\";\n"
" \n"
" color: rgb(255, 255, 255);\n"
"border-radius:30px;\n"
"}\n"
"\n"
"QPushButton:hover{\n"
"background-color: rgb(52, 157, 77);\n"
"}\n"
"\n"
"QPushBotton{\n"
"color: rgb(185, 185, 185);\n"
"}")
self.push_3.setObjectName("push_3")
self.gridLayout.addWidget(self.push_3, 2, 2, 1, 1)
self.push_equal = QtWidgets.QPushButton(self.centralwidget)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.MinimumExpanding, QtWidgets.QSizePolicy.Preferred)
sizePolicy.setHorizontalStretch(50)
sizePolicy.setVerticalStretch(10)
sizePolicy.setHeightForWidth(self.push_equal.sizePolicy().hasHeightForWidth())
self.push_equal.setSizePolicy(sizePolicy)
self.push_equal.setStyleSheet("QPushButton{\n"
"font: 75 36pt \"MS Shell Dlg 2\";\n"
" color: rgb(255, 255, 127);\n"
"border-radius:30px;\n"
"}\n"
"\n"
"QPushButton:hover{\n"
"color: rgb(255, 255, 255);\n"
" background-color: rgb(230, 230, 0);\n"
"}\n"
"\n"
"QPushBotton{\n"
"color: rgb(232, 77, 0);\n"
"}")
self.push_equal.setObjectName("push_equal")
self.gridLayout.addWidget(self.push_equal, 5, 3, 1, 1)
self.push_clear = QtWidgets.QPushButton(self.centralwidget)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.MinimumExpanding, QtWidgets.QSizePolicy.Preferred)
sizePolicy.setHorizontalStretch(50)
sizePolicy.setVerticalStretch(10)
sizePolicy.setHeightForWidth(self.push_clear.sizePolicy().hasHeightForWidth())
self.push_clear.setSizePolicy(sizePolicy)
self.push_clear.setStyleSheet("QPushButton{\n"
"font: 75 20pt \"MS Shell Dlg 2\";\n"
" \n"
" \n"
" color: rgb(255, 255, 255);\n"
"border-radius:30px;\n"
"}\n"
"\n"
"QPushButton:hover{\n"
"background-color: rgb(185, 0, 0);\n"
"}\n"
"\n"
"QPushBotton:pressed{\n"
"\n"
" \n"
" color: rgb(27, 0, 0);\n"
"}")
self.push_clear.setShortcut("")
self.push_clear.setObjectName("push_clear")
self.gridLayout.addWidget(self.push_clear, 1, 0, 1, 2)
self.push_min = QtWidgets.QPushButton(self.centralwidget)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.MinimumExpanding, QtWidgets.QSizePolicy.Preferred)
sizePolicy.setHorizontalStretch(50)
sizePolicy.setVerticalStretch(10)
sizePolicy.setHeightForWidth(self.push_min.sizePolicy().hasHeightForWidth())
self.push_min.setSizePolicy(sizePolicy)
self.push_min.setStyleSheet("QPushButton{\n"
"font: 75 36pt \"MS Shell Dlg 2\";\n"
" color: rgb(255, 255, 127);\n"
"border-radius:30px;\n"
"}\n"
"\n"
"QPushButton:hover{\n"
"color: rgb(255, 255, 255);\n"
" background-color: rgb(230, 230, 0);\n"
"}\n"
"\n"
"QPushBotton{\n"
"color: rgb(232, 77, 0);\n"
"}")
self.push_min.setObjectName("push_min")
self.gridLayout.addWidget(self.push_min, 2, 3, 1, 1)
self.push_del = QtWidgets.QPushButton(self.centralwidget)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.MinimumExpanding, QtWidgets.QSizePolicy.Preferred)
sizePolicy.setHorizontalStretch(50)
sizePolicy.setVerticalStretch(10)
sizePolicy.setHeightForWidth(self.push_del.sizePolicy().hasHeightForWidth())
self.push_del.setSizePolicy(sizePolicy)
self.push_del.setStyleSheet("QPushButton{\n"
"font: 75 20pt \"MS Shell Dlg 2\";\n"
" \n"
" \n"
" color: rgb(255, 255, 255);\n"
"border-radius:30px;\n"
"}\n"
"\n"
"QPushButton:hover{\n"
"background-color: rgb(185, 0, 0);\n"
"}\n"
"\n"
"QPushBotton:pressed{\n"
"\n"
" \n"
" color: rgb(27, 0, 0);\n"
"}")
self.push_del.setObjectName("push_del")
self.gridLayout.addWidget(self.push_del, 1, 2, 1, 1)
self.push_point = QtWidgets.QPushButton(self.centralwidget)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.MinimumExpanding, QtWidgets.QSizePolicy.Preferred)
sizePolicy.setHorizontalStretch(50)
sizePolicy.setVerticalStretch(10)
sizePolicy.setHeightForWidth(self.push_point.sizePolicy().hasHeightForWidth())
self.push_point.setSizePolicy(sizePolicy)
self.push_point.setStyleSheet("QPushButton{\n"
"font: 75 20pt \"MS Shell Dlg 2\";\n"
" \n"
" color: rgb(255, 255, 255);\n"
"border-radius:30px;\n"
"}\n"
"\n"
"QPushButton:hover{\n"
"background-color: rgb(52, 157, 77);\n"
"}\n"
"\n"
"QPushBotton{\n"
"color: rgb(185, 185, 185);\n"
"}")
self.push_point.setObjectName("push_point")
self.gridLayout.addWidget(self.push_point, 5, 2, 1, 1)
self.push_1 = QtWidgets.QPushButton(self.centralwidget)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.MinimumExpanding, QtWidgets.QSizePolicy.Preferred)
sizePolicy.setHorizontalStretch(50)
sizePolicy.setVerticalStretch(10)
sizePolicy.setHeightForWidth(self.push_1.sizePolicy().hasHeightForWidth())
self.push_1.setSizePolicy(sizePolicy)
self.push_1.setStyleSheet("QPushButton{\n"
"font: 75 20pt \"MS Shell Dlg 2\";\n"
" \n"
" color: rgb(255, 255, 255);\n"
"border-radius:30px;\n"
"}\n"
"\n"
"QPushButton:hover{\n"
"background-color: rgb(52, 157, 77);\n"
"}\n"
"\n"
"QPushBotton{\n"
"color: rgb(185, 185, 185);\n"
"}")
self.push_1.setObjectName("push_1")
self.gridLayout.addWidget(self.push_1, 2, 0, 1, 1)
self.push_2 = QtWidgets.QPushButton(self.centralwidget)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.MinimumExpanding, QtWidgets.QSizePolicy.Preferred)
sizePolicy.setHorizontalStretch(50)
sizePolicy.setVerticalStretch(10)
sizePolicy.setHeightForWidth(self.push_2.sizePolicy().hasHeightForWidth())
self.push_2.setSizePolicy(sizePolicy)
self.push_2.setStyleSheet("QPushButton{\n"
"font: 75 20pt \"MS Shell Dlg 2\";\n"
" \n"
" color: rgb(255, 255, 255);\n"
"border-radius:30px;\n"
"}\n"
"\n"
"QPushButton:hover{\n"
"background-color: rgb(52, 157, 77);\n"
"}\n"
"\n"
"QPushBotton:pressed{\n"
"color: rgb(185, 185, 185);\n"
"}")
self.push_2.setObjectName("push_2")
self.gridLayout.addWidget(self.push_2, 2, 1, 1, 1)
self.push_mult = QtWidgets.QPushButton(self.centralwidget)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.MinimumExpanding, QtWidgets.QSizePolicy.Preferred)
sizePolicy.setHorizontalStretch(50)
sizePolicy.setVerticalStretch(10)
sizePolicy.setHeightForWidth(self.push_mult.sizePolicy().hasHeightForWidth())
self.push_mult.setSizePolicy(sizePolicy)
self.push_mult.setStyleSheet("QPushButton{\n"
"font: 75 36pt \"MS Shell Dlg 2\";\n"
" color: rgb(255, 255, 127);\n"
"border-radius:30px;\n"
"}\n"
"\n"
"QPushButton:hover{\n"
"color: rgb(255, 255, 255);\n"
" background-color: rgb(230, 230, 0);\n"
"}\n"
"\n"
"QPushBotton{\n"
"color: rgb(232, 77, 0);\n"
"}")
self.push_mult.setObjectName("push_mult")
self.gridLayout.addWidget(self.push_mult, 3, 3, 1, 1)
self.push_9 = QtWidgets.QPushButton(self.centralwidget)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.MinimumExpanding, QtWidgets.QSizePolicy.Preferred)
sizePolicy.setHorizontalStretch(50)
sizePolicy.setVerticalStretch(10)
sizePolicy.setHeightForWidth(self.push_9.sizePolicy().hasHeightForWidth())
self.push_9.setSizePolicy(sizePolicy)
self.push_9.setStyleSheet("QPushButton{\n"
"font: 75 20pt \"MS Shell Dlg 2\";\n"
" \n"
" color: rgb(255, 255, 255);\n"
"border-radius:30px;\n"
"}\n"
"\n"
"QPushButton:hover{\n"
"background-color: rgb(52, 157, 77);\n"
"}\n"
"\n"
"QPushBotton{\n"
"color: rgb(185, 185, 185);\n"
"}")
self.push_9.setObjectName("push_9")
self.gridLayout.addWidget(self.push_9, 4, 2, 1, 1)
self.push_8 = QtWidgets.QPushButton(self.centralwidget)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.MinimumExpanding, QtWidgets.QSizePolicy.Preferred)
sizePolicy.setHorizontalStretch(50)
sizePolicy.setVerticalStretch(10)
sizePolicy.setHeightForWidth(self.push_8.sizePolicy().hasHeightForWidth())
self.push_8.setSizePolicy(sizePolicy)
self.push_8.setStyleSheet("QPushButton{\n"
"font: 75 20pt \"MS Shell Dlg 2\";\n"
" \n"
" color: rgb(255, 255, 255);\n"
"border-radius:30px;\n"
"}\n"
"\n"
"QPushButton:hover{\n"
"background-color: rgb(52, 157, 77);\n"
"}\n"
"\n"
"QPushBotton{\n"
"color: rgb(185, 185, 185);\n"
"}")
self.push_8.setObjectName("push_8")
self.gridLayout.addWidget(self.push_8, 4, 1, 1, 1)
self.push_4 = QtWidgets.QPushButton(self.centralwidget)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.MinimumExpanding, QtWidgets.QSizePolicy.Preferred)
sizePolicy.setHorizontalStretch(50)
sizePolicy.setVerticalStretch(10)
sizePolicy.setHeightForWidth(self.push_4.sizePolicy().hasHeightForWidth())
self.push_4.setSizePolicy(sizePolicy)
self.push_4.setStyleSheet("QPushButton{\n"
"font: 75 20pt \"MS Shell Dlg 2\";\n"
" \n"
" color: rgb(255, 255, 255);\n"
"border-radius:30px;\n"
"}\n"
"\n"
"QPushButton:hover{\n"
"background-color: rgb(52, 157, 77);\n"
"}\n"
"\n"
"QPushBotton{\n"
"color: rgb(185, 185, 185);\n"
"}")
self.push_4.setObjectName("push_4")
self.gridLayout.addWidget(self.push_4, 3, 0, 1, 1)
self.push_zero = QtWidgets.QPushButton(self.centralwidget)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.MinimumExpanding, QtWidgets.QSizePolicy.Preferred)
sizePolicy.setHorizontalStretch(50)
sizePolicy.setVerticalStretch(10)
sizePolicy.setHeightForWidth(self.push_zero.sizePolicy().hasHeightForWidth())
self.push_zero.setSizePolicy(sizePolicy)
self.push_zero.setStyleSheet("QPushButton{\n"
"font: 75 20pt \"MS Shell Dlg 2\";\n"
" \n"
" color: rgb(255, 255, 255);\n"
"border-radius:30px;\n"
"}\n"
"\n"
"QPushButton:hover{\n"
"background-color: rgb(52, 157, 77);\n"
"}\n"
"\n"
"QPushBotton{\n"
"color: rgb(185, 185, 185);\n"
"}")
self.push_zero.setObjectName("push_zero")
self.gridLayout.addWidget(self.push_zero, 5, 0, 1, 2)
self.push_6 = QtWidgets.QPushButton(self.centralwidget)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.MinimumExpanding, QtWidgets.QSizePolicy.Preferred)
sizePolicy.setHorizontalStretch(50)
sizePolicy.setVerticalStretch(10)
sizePolicy.setHeightForWidth(self.push_6.sizePolicy().hasHeightForWidth())
self.push_6.setSizePolicy(sizePolicy)
self.push_6.setStyleSheet("QPushButton{\n"
"font: 75 20pt \"MS Shell Dlg 2\";\n"
" \n"
" color: rgb(255, 255, 255);\n"
"border-radius:30px;\n"
"}\n"
"\n"
"QPushButton:hover{\n"
"background-color: rgb(52, 157, 77);\n"
"}\n"
"\n"
"QPushBotton{\n"
"color: rgb(185, 185, 185);\n"
"}")
self.push_6.setObjectName("push_6")
self.gridLayout.addWidget(self.push_6, 3, 2, 1, 1)
self.push_div = QtWidgets.QPushButton(self.centralwidget)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.MinimumExpanding, QtWidgets.QSizePolicy.Preferred)
sizePolicy.setHorizontalStretch(50)
sizePolicy.setVerticalStretch(10)
sizePolicy.setHeightForWidth(self.push_div.sizePolicy().hasHeightForWidth())
self.push_div.setSizePolicy(sizePolicy)
self.push_div.setStyleSheet("QPushButton{\n"
"font: 75 36pt \"MS Shell Dlg 2\";\n"
" color: rgb(255, 255, 127);\n"
"border-radius:30px;\n"
"}\n"
"\n"
"QPushButton:hover{\n"
"color: rgb(255, 255, 255);\n"
" background-color: rgb(230, 230, 0);\n"
"}\n"
"\n"
"QPushBotton{\n"
"color: rgb(232, 77, 0);\n"
"}")
self.push_div.setObjectName("push_div")
self.gridLayout.addWidget(self.push_div, 4, 3, 1, 1)
self.push_plus = QtWidgets.QPushButton(self.centralwidget)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.MinimumExpanding, QtWidgets.QSizePolicy.Preferred)
sizePolicy.setHorizontalStretch(50)
sizePolicy.setVerticalStretch(10)
sizePolicy.setHeightForWidth(self.push_plus.sizePolicy().hasHeightForWidth())
self.push_plus.setSizePolicy(sizePolicy)
self.push_plus.setStyleSheet("QPushButton{\n"
"font: 75 36pt \"MS Shell Dlg 2\";\n"
" color: rgb(255, 255, 127);\n"
"border-radius:30px;\n"
"}\n"
"\n"
"QPushButton:hover{\n"
"color: rgb(255, 255, 255);\n"
" background-color: rgb(230, 230, 0);\n"
"}\n"
"\n"
"QPushBotton:pressed{\n"
"color: rgb(232, 77, 0);\n"
"}")
self.push_plus.setObjectName("push_plus")
self.gridLayout.addWidget(self.push_plus, 1, 3, 1, 1)
self.push_7 = QtWidgets.QPushButton(self.centralwidget)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.MinimumExpanding, QtWidgets.QSizePolicy.Preferred)
sizePolicy.setHorizontalStretch(50)
sizePolicy.setVerticalStretch(10)
sizePolicy.setHeightForWidth(self.push_7.sizePolicy().hasHeightForWidth())
self.push_7.setSizePolicy(sizePolicy)
self.push_7.setStyleSheet("QPushButton{\n"
"font: 75 20pt \"MS Shell Dlg 2\";\n"
" \n"
" color: rgb(255, 255, 255);\n"
"border-radius:30px;\n"
"}\n"
"\n"
"QPushButton:hover{\n"
"background-color: rgb(52, 157, 77);\n"
"}\n"
"\n"
"QPushBotton{\n"
"color: rgb(185, 185, 185);\n"
"}")
self.push_7.setObjectName("push_7")
self.gridLayout.addWidget(self.push_7, 4, 0, 1, 1)
self.label = QtWidgets.QLabel(self.centralwidget)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.MinimumExpanding, QtWidgets.QSizePolicy.Preferred)
sizePolicy.setHorizontalStretch(50)
sizePolicy.setVerticalStretch(13)
sizePolicy.setHeightForWidth(self.label.sizePolicy().hasHeightForWidth())
self.label.setSizePolicy(sizePolicy)
self.label.setStyleSheet("font: 75 36pt \"MS Shell Dlg 2\";\n"
"\n"
"color: rgb(255, 255, 255);")
self.label.setText("")
self.label.setAlignment(QtCore.Qt.AlignRight|QtCore.Qt.AlignTrailing|QtCore.Qt.AlignVCenter)
self.label.setObjectName("label")
self.gridLayout.addWidget(self.label, 0, 0, 1, 4)
self.gridLayout_2.addLayout(self.gridLayout, 0, 0, 1, 1)
MainWindow.setCentralWidget(self.centralwidget)
self.menubar = QtWidgets.QMenuBar(MainWindow)
self.menubar.setGeometry(QtCore.QRect(0, 0, 341, 21))
self.menubar.setObjectName("menubar")
MainWindow.setMenuBar(self.menubar)
self.statusbar = QtWidgets.QStatusBar(MainWindow)
self.statusbar.setObjectName("statusbar")
MainWindow.setStatusBar(self.statusbar)
self.retranslateUi(MainWindow)
QtCore.QMetaObject.connectSlotsByName(MainWindow)
self.push_1.clicked.connect(self.method_1)
self.push_2.clicked.connect(self.method_2)
self.push_3.clicked.connect(self.method_3)
self.push_4.clicked.connect(self.method_4)
self.push_5.clicked.connect(self.method_5)
self.push_6.clicked.connect(self.method_6)
self.push_7.clicked.connect(self.method_7)
self.push_8.clicked.connect(self.method_8)
self.push_9.clicked.connect(self.method_9)
self.push_zero.clicked.connect(self.method_zero)
self.push_point.clicked.connect(self.method_point)
self.push_plus.clicked.connect(self.method_plus)
self.push_min.clicked.connect(self.method_min)
self.push_mult.clicked.connect(self.method_mult)
self.push_div.clicked.connect(self.method_div)
self.push_equal.clicked.connect(self.method_equal)
self.push_clear.clicked.connect(self.method_clear)
self.push_del.clicked.connect(self.method_del)
def method_1(self):
text=self.label.text()
self.label.setText(text+"1")
def method_2(self):
text=self.label.text()
self.label.setText(text+"2")
def method_3(self):
text=self.label.text()
self.label.setText(text+"3")
def method_4(self):
text=self.label.text()
self.label.setText(text+"4")
def method_5(self):
text=self.label.text()
self.label.setText(text+"5")
def method_6(self):
text=self.label.text()
self.label.setText(text+"6")
def method_7(self):
text=self.label.text()
self.label.setText(text+"7")
def method_8(self):
text=self.label.text()
self.label.setText(text+"8")
def method_9(self):
text=self.label.text()
self.label.setText(text+"9")
def method_zero(self):
text=self.label.text()
self.label.setText(text+"0")
def method_point(self):
text=self.label.text()
self.label.setText(text+".")
def method_plus(self):
text=self.label.text()
self.label.setText(text+"+")
def method_min(self):
text=self.label.text()
self.label.setText(text+"-")
def method_mult(self):
text=self.label.text()
self.label.setText(text+"*")
def method_div(self):
text=self.label.text()
self.label.setText(text+"/")
def method_clear(self):
self.label.setText("")
def method_del(self):
text=self.label.text()
self.label.setText(text[:len(text)-1])
def method_equal(self):
text=self.label.text()
try:
ans=eval(text)
self.label.setText(str(ans))
except:
self.label.setText("Wrong Input")
def retranslateUi(self, MainWindow):
_translate = QtCore.QCoreApplication.translate
MainWindow.setWindowTitle(_translate("MainWindow", "Calculator"))
self.push_5.setText(_translate("MainWindow", "5"))
self.push_5.setShortcut(_translate("MainWindow", "5"))
self.push_3.setText(_translate("MainWindow", "3"))
self.push_3.setShortcut(_translate("MainWindow", "3"))
self.push_equal.setText(_translate("MainWindow", "="))
self.push_equal.setShortcut(_translate("MainWindow", "="))
self.push_clear.setText(_translate("MainWindow", "Clear"))
self.push_min.setText(_translate("MainWindow", "-"))
self.push_min.setShortcut(_translate("MainWindow", "-"))
self.push_del.setText(_translate("MainWindow", "Del"))
self.push_del.setShortcut(_translate("MainWindow", "Backspace"))
self.push_point.setText(_translate("MainWindow", "."))
self.push_point.setShortcut(_translate("MainWindow", "."))
self.push_1.setText(_translate("MainWindow", "1"))
self.push_1.setShortcut(_translate("MainWindow", "1"))
self.push_2.setText(_translate("MainWindow", "2"))
self.push_2.setShortcut(_translate("MainWindow", "2"))
self.push_mult.setText(_translate("MainWindow", "x"))
self.push_mult.setShortcut(_translate("MainWindow", "*"))
self.push_9.setText(_translate("MainWindow", "9"))
self.push_9.setShortcut(_translate("MainWindow", "9"))
self.push_8.setText(_translate("MainWindow", "8"))
self.push_8.setShortcut(_translate("MainWindow", "8"))
self.push_4.setText(_translate("MainWindow", "4"))
self.push_4.setShortcut(_translate("MainWindow", "4"))
self.push_zero.setText(_translate("MainWindow", "0"))
self.push_zero.setShortcut(_translate("MainWindow", "0"))
self.push_6.setText(_translate("MainWindow", "6"))
self.push_6.setShortcut(_translate("MainWindow", "6"))
self.push_div.setText(_translate("MainWindow", "÷"))
self.push_div.setShortcut(_translate("MainWindow", "/"))
self.push_plus.setText(_translate("MainWindow", "+"))
self.push_plus.setShortcut(_translate("MainWindow", "+"))
self.push_7.setText(_translate("MainWindow", "7"))
self.push_7.setShortcut(_translate("MainWindow", "7"))
if __name__ == "__main__":
import sys
app = QtWidgets.QApplication(sys.argv)
MainWindow = QtWidgets.QMainWindow()
ui = Ui_MainWindow()
ui.setupUi(MainWindow)
MainWindow.show()
sys.exit(app.exec_())
| StarcoderdataPython |
11270728 | <gh_stars>0
# Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for tink.python.tink.public_key_sign_key_manager."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from absl.testing import absltest
from proto import common_pb2
from proto import ecdsa_pb2
from proto import tink_pb2
from tink import tink_config
from tink.signature import public_key_sign
from tink.signature import public_key_sign_key_manager
from tink.signature import public_key_verify_key_manager
def setUpModule():
tink_config.register()
def new_ecdsa_key_template(hash_type, curve_type, encoding):
key_format = ecdsa_pb2.EcdsaKeyFormat()
key_format.params.hash_type = hash_type
key_format.params.curve = curve_type
key_format.params.encoding = encoding
key_template = tink_pb2.KeyTemplate()
key_template.type_url = (
'type.googleapis.com/google.crypto.tink.EcdsaPrivateKey')
key_template.value = key_format.SerializeToString()
return key_template
class PublicKeySignKeyManagerTest(absltest.TestCase):
def setUp(self):
super(PublicKeySignKeyManagerTest, self).setUp()
self.key_manager = public_key_sign_key_manager.from_cc_registry(
'type.googleapis.com/google.crypto.tink.EcdsaPrivateKey')
self.key_manager_verify = public_key_verify_key_manager.from_cc_registry(
'type.googleapis.com/google.crypto.tink.EcdsaPublicKey')
def test_primitive_class(self):
self.assertEqual(self.key_manager.primitive_class(),
public_key_sign.PublicKeySign)
def test_key_type(self):
self.assertEqual(self.key_manager.key_type(),
'type.googleapis.com/google.crypto.tink.EcdsaPrivateKey')
def test_new_key_data(self):
key_template = new_ecdsa_key_template(common_pb2.SHA256,
common_pb2.NIST_P256, ecdsa_pb2.DER)
key_data = self.key_manager.new_key_data(key_template)
self.assertEqual(key_data.type_url, self.key_manager.key_type())
key = ecdsa_pb2.EcdsaPrivateKey()
key.ParseFromString(key_data.value)
public_key = key.public_key
self.assertEqual(key.version, 0)
self.assertEqual(public_key.version, 0)
self.assertEqual(public_key.params.hash_type, common_pb2.SHA256)
self.assertEqual(public_key.params.curve, common_pb2.NIST_P256)
self.assertEqual(public_key.params.encoding, ecdsa_pb2.DER)
self.assertLen(key.key_value, 32)
def test_signature_success(self):
priv_key = self.key_manager.new_key_data(
new_ecdsa_key_template(common_pb2.SHA256, common_pb2.NIST_P256,
ecdsa_pb2.DER))
pub_key = self.key_manager.public_key_data(priv_key)
verifier = self.key_manager_verify.primitive(pub_key)
signer = self.key_manager.primitive(priv_key)
data = b'data'
signature = signer.sign(data)
# Starts with a DER sequence
self.assertEqual(bytearray(signature)[0], 0x30)
verifier.verify(signature, data)
if __name__ == '__main__':
absltest.main()
| StarcoderdataPython |
223813 | <reponame>gauravaror/pydano
import requests
from collections import Counter
import argparse
import tqdm
import json
import os
import logging
import random
import functools
import pandas as pd
from pydano.blockfrost.top_holder import TopHolders
parser = argparse.ArgumentParser()
parser.add_argument(
"--allow_multiple",
help="Allow multiple entry from a single person",
action="store_true",
)
parser.add_argument(
"--min_holdings",
help="Minimum holding to be eligible for airdrop",
type=int,
default=1,
)
parser.add_argument("--log_level", help="Set log level to", type=str, default="INFO")
parser.add_argument(
"--policy_id",
help="Policy Id to get top holders for",
type=str,
nargs="+",
default=None,
)
parser.add_argument("--api_key", help="Blockfrost API Key", type=str)
parser.add_argument(
"--total_pages", help="Total Pages to request", type=int, default=1000
)
parser.add_argument(
"--use_cache", help="Use cache instead of running", action="store_true"
)
parser.add_argument("--mainnet", help="Use Mainnet", action="store_true")
parser.add_argument(
"--exclude_address", help="Exclude Addresses", type=str, default=None
)
parser.add_argument(
"--sample_airdrop",
help="Number of people to sample for airdrop",
type=int,
default=1,
)
parser.add_argument(
"--token_name",
help="Used to add token in airdrop transaction file",
type=str,
default=None,
)
args = parser.parse_args()
logging.getLogger().setLevel(args.log_level)
if not args.total_pages:
raise Exception("Give total_pages")
if len(args.policy_id) == 0:
raise ValueError("Need atleast one policy_id to do the airdrop")
holders = []
for policy_id in args.policy_id:
top_holders = TopHolders(
policy_id, args.api_key, args.total_pages, args.use_cache, args.mainnet
)
top_holders.gather_assets()
top_holders.query_assets()
holders.append(top_holders.get_holders_counter())
if len(args.policy_id) > 1:
top_holders.c = functools.reduce(lambda a, b: a & b, holders)
holders = top_holders.get_all_holders()
df = pd.DataFrame(holders)
df.to_csv("top_holders.csv")
if args.exclude_address:
exclude_addresses = json.load(open(args.exclude_address, "r"))
df = df[~df.stake_address.isin(exclude_addresses)]
raffle_addresses = []
if args.min_holdings:
df = df[df.holding_count >= args.min_holdings]
for idx, row in df.iterrows():
count = row["holding_count"] if args.allow_multiple else 1
addr = row["stake_address"]
if not addr:
continue
raffle_addresses.extend([addr] * count)
print(
f"Total addresses: {len(raffle_addresses)}, unique addresses: {len(set(raffle_addresses))}"
)
print(f"Doing airdrop from: {len(raffle_addresses)}")
addresses = random.sample(raffle_addresses, args.sample_airdrop)
addresses = list(
map(
lambda x: {"stake": x, "address": top_holders.get_payment_address(x)[0]},
addresses,
)
)
if args.token_name:
for i in addresses:
i["token_name"] = args.token_name
json.dump(addresses, open("airdrop_pydano_transaction.json", "w"), indent=4)
print(f"Do airdrop to:", addresses)
print("Holders found", len(holders))
| StarcoderdataPython |
1609581 | <gh_stars>0
# -*- coding: utf-8 -*-
# Copyright 2014 Telefonica Investigación y Desarrollo, S.A.U
#
# This file is part of FI-WARE project.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
#
# You may obtain a copy of the License at:
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
#
# See the License for the specific language governing permissions and
# limitations under the License.
#
# For those usages not covered by the Apache version 2.0 License please
# contact with <EMAIL>
__author__ = 'jfernandez'
import http
from lettuce import world
class ProductSdcRequest:
def __init__(self, keystone_url, sdc_url, tenant, user, password, vdc):
"""
Init class vars and get initial token from keystone
"""
self.sdc_url = sdc_url
self.vdc = vdc
self.keystone_url = keystone_url
self.user = user
self.password = password
self.tenant = tenant
self.token = self.__get__token()
def __get__token(self):
""" Get token from keystone """
return http.get_token(self.keystone_url + '/tokens', self.tenant, self.user, self.password)
def __get_product_sdc(self, url):
""" Get product from SDC """
headers = {'X-Auth-Token': self.token, 'Tenant-Id': self.vdc,
'Content-Type': "application/xml"}
return http.get(url, headers)
def __add_product_sdc(self, url, product_sdc_payload):
""" Add product to SDC catalog """
headers = {'X-Auth-Token': self.token, 'Tenant-Id': self.vdc,
'Content-Type': "application/xml"}
return http.post(url, headers, product_sdc_payload)
def __delete_product_sdc(self, url):
""" Delete product from SDC catalog """
headers = {'X-Auth-Token': self.token, 'Tenant-Id': self.vdc,
'Content-Type': "application/xml"}
return http.delete(url, headers)
def __delete_node(self, url):
headers = {'X-Auth-Token': self.token, 'Tenant-Id': self.vdc,
'Content-Type': "application/xml"}
return http.delete(url, headers)
def get_product(self, product_name):
""" Get product from SDC catalog """
url = "%s/%s/%s/%s" % (self.sdc_url, "catalog", "product", product_name)
world.response = self.__get_product_sdc(url)
def get_product_release(self, product_name, product_release):
""" Get product release from SDC catalog """
url = "%s/%s/%s/%s/%s/%s" % (self.sdc_url, "catalog", "product", product_name, "release", product_release)
world.response = self.__get_product_sdc(url)
def add_product(self, product_name, product_description):
""" ADD product to SDC catalog """
url = "%s/%s/%s" % (self.sdc_url, "catalog", "product")
payload = "<product><name>%s</name><description>%s</description></product>" \
% (product_name, product_description)
world.response = self.__add_product_sdc(url, payload)
def add_product_with_installator(self, product_name, product_description, installator):
""" ADD product to SDC catalog with s custom installator """
url = "%s/%s/%s" % (self.sdc_url, "catalog", "product")
payload = "<product><name>%s</name><description>%s</description>" \
"<metadatas><key>installator</key><value>%s</value></metadatas></product>" \
% (product_name, product_description, installator)
world.response = self.__add_product_sdc(url, payload)
def add_product_with_attributes(self, product_name, product_description, attribute_list):
""" ADD product to SDC catalog with attributes """
url = "%s/%s/%s" % (self.sdc_url, "catalog", "product")
attribute_list_xml = ""
for attribute in attribute_list:
attribute_list_xml += "<attributes><key>%s</key><value>%s</value><type>%s</type></attributes>" % \
(attribute['key'], attribute['value'], attribute['type'])
payload = "<product><name>%s</name><description>%s</description>%s</product>" \
% (product_name, product_description, attribute_list_xml)
world.response = self.__add_product_sdc(url, payload)
def add_product_with_metadatas(self, product_name, product_description, metadata_list):
""" ADD product to SDC catalog with metadatas """
url = "%s/%s/%s" % (self.sdc_url, "catalog", "product")
metadata_list_xml = ""
for metadata in metadata_list:
metadata_list_xml += "<metadatas><key>%s</key><value>%s</value></metadatas>" % \
(metadata['key'], metadata['value'])
payload = "<product><name>%s</name><description>%s</description>%s</product>" \
% (product_name, product_description, metadata_list_xml)
world.response = self.__add_product_sdc(url, payload)
def add_product_with_attributes_and_installator(self, product_name, product_description, attribute_list,
installator):
""" Get product release from SDC catalog """
url = "%s/%s/%s" % (self.sdc_url, "catalog", "product")
attribute_list_xml = ""
for attribute in attribute_list:
if 'type' in attribute:
attribute_list_xml += "<attributes><key>%s</key><value>%s</value><type>%s</type></attributes>" % \
(attribute['key'], attribute['value'], attribute['type'])
else:
attribute_list_xml += "<attributes><key>%s</key><value>%s</value></attributes>" % \
(attribute['key'], attribute['value'])
installator_metadata = "<metadatas><key>installator</key><value>%s</value></metadatas>" % installator
payload = "<product><name>%s</name><description>%s</description>%s%s</product>" \
% (product_name, product_description, installator_metadata, attribute_list_xml)
world.response = self.__add_product_sdc(url, payload)
def add_product_release(self, product_name, product_release):
""" Add product release to SDC catalog """
url = "%s/%s/%s/%s/%s" % (self.sdc_url, "catalog", "product", product_name, "release")
payload = "<productReleaseDto><version>%s</version></productReleaseDto>" % product_release
world.response = self.__add_product_sdc(url, payload)
def delete_product(self, product_name):
""" Delete product from SDC catalog """
url = "%s/%s/%s/%s" % (self.sdc_url, "catalog", "product", product_name)
world.response = self.__delete_product_sdc(url)
def delete_product_release(self, product_name, product_release):
""" Delete product release from SDC catalog """
url = "%s/%s/%s/%s/%s/%s" % (self.sdc_url, "catalog", "product", product_name, "release", product_release)
world.response = self.__delete_product_sdc(url)
def delete_node(self, node_name):
""" Delete node from Chef-Server and Puppet-Master """
url = "%s/%s/%s/%s/%s" % (self.sdc_url, "vdc", self.vdc, "chefClient", node_name)
world.response = self.__delete_node(url)
def create_product_and_release(self, product_name, product_release, installator=None):
""" Helper: Create product and product release """
self.get_product(product_name)
if world.response.status is not 200:
if installator:
self.add_product_with_installator(product_name, 'QA Tests - PaaS Manager', installator)
else:
self.add_product(product_name, 'QA Tests - PaaS Manager')
self.add_product_release(product_name, product_release)
else:
self.get_product_release(product_name, product_release)
if world.response.status is not 200:
self.add_product_release(product_name, product_release)
world.product_and_release_list.append({'product_name': product_name, 'product_release': product_release})
def create_product_and_release_with_attributes(self, product_name, product_release, attribute_list):
""" Helper: Create product with attributes and it release """
self.get_product(product_name)
if world.response.status is not 200:
self.add_product_with_attributes(product_name, 'QA Tests - PaaS Manager', attribute_list)
self.add_product_release(product_name, product_release)
else:
self.get_product_release(product_name, product_release)
if world.response.status is not 200:
self.add_product_release(product_name, product_release)
world.product_and_release_list.append({'product_name': product_name, 'product_release': product_release})
def create_product_and_release_with_metadatas(self, product_name, product_release, metadata_list):
""" Helper: Create product with custom metadatas and it release """
self.get_product(product_name)
if world.response.status is not 200:
self.add_product_with_metadatas(product_name, 'QA Tests - PaaS Manager - Metadatas', metadata_list)
self.add_product_release(product_name, product_release)
else:
self.get_product_release(product_name, product_release)
if world.response.status is not 200:
self.add_product_release(product_name, product_release)
world.product_and_release_list.append({'product_name': product_name, 'product_release': product_release})
def create_product_and_release_with_attributes_and_installator(self, product_name, product_release, attribute_list,
installator):
""" Helper: Create product with attributes and installator, and its release """
self.get_product(product_name)
if world.response.status is not 200:
self.add_product_with_attributes_and_installator(product_name, 'QA Tests - PaaS Manager', attribute_list,
installator)
self.add_product_release(product_name, product_release)
else:
self.get_product_release(product_name, product_release)
if world.response.status is not 200:
self.add_product_release(product_name, product_release)
world.product_and_release_list.append({'product_name': product_name, 'product_release': product_release})
def delete_product_and_release(self, product_name, product_release):
""" Helper: Delete product and product release """
self.get_product_release(product_name, product_release)
if world.response.status is 200:
self.delete_product_release(product_name, product_release)
self.delete_product(product_name)
else:
self.get_product(product_name)
if world.response.status is 200:
self.delete_product(product_name)
# world.product_and_release_list.remove({'product_name': product_name, 'product_release': product_release})
| StarcoderdataPython |
1999479 | from setuptools import setup
from os import path
this_directory = path.abspath(path.dirname(__file__))
with open(path.join(this_directory, 'README.md'), encoding='utf-8') as f:
long_description = f.read()
setup(
name='zarenacord.py',
author='Bravestone',
version='2.0.1',
url='https://github.com/Zarenalabs/zarenacord',
description='A mirror package for zarenacord. Please install that instead.',
license='MIT',
long_description=long_description,
long_description_content_type='text/markdown',
packages=[],
install_requires=['zarenacord>=2.0.0'],
) | StarcoderdataPython |
8195074 | """Modules that handles how the post shall be sent according to the chat (adming group or channel)"""
from random import choice
from telegram import Message, Bot, InlineKeyboardMarkup
from modules.data.data_reader import config_map
from modules.data.meme_data import MemeData
from modules.utils.keyboard_util import get_approve_kb, get_vote_kb
def send_post_to(message: Message, bot: Bot, destination: str, user_id: int = None) -> Message:
"""Sends the post to the specified destination:
admin -> to the admin group, so it can be approved
channel -> to the channel, so it can be ejoyed by the users (and voted, if comments are disabled)
channel_group -> to the group associated to the channel, so that users can vote the post (if comments are enabled)
Args:
message (Message): message that contains the post to send
bot (Bot): bot
destination (str): destination of the message (admin | channel | channel_group)
user_id (int, optional): id of the user that originated the post. Defaults to None.
Returns:
Message: message used to send a post to a specific destination
"""
text = message.text
photo = message.photo
voice = message.voice
audio = message.audio
video = message.video
animation = message.animation
sticker = message.sticker
caption = message.caption
reply_markup = None
post_message = None
if destination == "admin": # send the post to the admin group so it can be approved
chat_id = config_map['meme']['group_id']
reply_markup = get_approve_kb()
elif destination == "channel": # send the post to the channel...
chat_id = config_map['meme']['channel_id']
if not config_map['meme']['comments']: # ... append the voting Inline Keyboard, if comments are not to be supported
reply_markup = get_vote_kb()
elif destination == "channel_group": # sends a support message with the voting Inline Keyboard in the comment session
post_message = send_helper_message(user_id=user_id,
chat_id=config_map['meme']['channel_group_id'],
reply_message_id=message.message_id,
bot=bot,
reply_markup=get_vote_kb())
else:
print("[error] send_message_to: unvalid destination")
return None
if post_message is None:
if text:
post_message = bot.sendMessage(chat_id=chat_id, text=text, reply_markup=reply_markup)
elif photo:
post_message = bot.sendPhoto(chat_id=chat_id, photo=photo[-1], caption=caption, reply_markup=reply_markup)
elif voice:
post_message = bot.sendVoice(chat_id=chat_id, voice=voice, reply_markup=reply_markup)
elif audio:
post_message = bot.sendAudio(chat_id=chat_id, audio=audio, reply_markup=reply_markup)
elif video:
post_message = bot.sendVideo(chat_id=chat_id, video=video, caption=caption, reply_markup=reply_markup)
elif animation:
post_message = bot.sendAnimation(chat_id=chat_id, animation=animation, reply_markup=reply_markup)
elif sticker:
post_message = bot.sendSticker(chat_id=chat_id, sticker=sticker, reply_markup=reply_markup)
if destination == "admin": # insert the post among the pending ones
MemeData.insert_pending_post(user_message=message, admin_message=post_message)
elif destination == "channel": # insert the post among the published ones and show the credtit...
if not config_map['meme']['comments']: # ... but only if the user can vote directly on the post
MemeData.insert_published_post(post_message)
send_helper_message(user_id=user_id,
chat_id=config_map['meme']['channel_id'],
reply_message_id=post_message.message_id,
bot=bot)
elif destination == "channel_group": # insert the first comment among the published posts, so that votes can be tracked
MemeData.insert_published_post(post_message)
return post_message
def send_helper_message(user_id: int,
chat_id: int,
reply_message_id: int,
bot: Bot,
reply_markup: InlineKeyboardMarkup = None) -> Message:
"""Sends an helper message to show the author of the post, and to vote on the post if comments are enabled
Args:
user_id (int): id of the user that originated the post
chat_id (int): id of the chat to which send the helper message
reply_message_id (int): id of the message the helper message will reply to
bot (Bot): bot
reply_markup (InlineKeyboardMarkup, optional): voting Inline Keyboard. Defaults to None.
Returns:
Message: helper message
"""
sign = anonym_name()
if MemeData.is_credited(user_id=user_id): # the user wants to be credited
username = bot.getChat(user_id).username
if username:
sign = "@" + username
return bot.send_message(chat_id=chat_id,
text=f"by: {sign}",
reply_markup=reply_markup,
reply_to_message_id=reply_message_id)
def show_admins_votes(chat_id: int, message_id: int, bot: Bot, approve: bool):
"""After a post is been approved or rejected, shows the admins that aproved or rejected it
Args:
chat_id (int): id of the admin group
message_id (int): id of the post in question in the group
bot (Bot): bot
approve (bool): whether the vote is approve or reject
"""
admins = MemeData.get_admin_list_votes(g_message_id=message_id, group_id=chat_id, approve=approve)
text = "Approvato da:\n" if approve else "Rifiutato da:\n"
for admin in admins:
username = bot.get_chat(admin).username
text += f"@{username}\n" if username else f"{bot.get_chat(admin).first_name}\n"
bot.edit_message_reply_markup(chat_id=chat_id, message_id=message_id, reply_markup=None)
bot.send_message(chat_id=chat_id, text=text, reply_to_message_id=message_id)
def anonym_name() -> str:
"""Generates a name for an anonym user
Returns:
str: a name among the ones proposed
"""
names = ("anonimo", "ciccio", "tizio", "puzzola", "patato", "literally who", "mucro", "topolino", "cribbio", "signorina",
"pensione a Cuba", "aranciataLover", "hotlena", "darkangelcraft", "I PUFFI", "pippo", "my love", "?",
"signor nessuno", "V per Vedetta (ops)", "bonk", "foot", "cycle", "impostore", "spook", "gessetto impaurito",
"shitposter", "weeb")
return choice(names)
| StarcoderdataPython |
4983017 | # Constantes para os datasets usdos
CLEAN_VEHICLES_DATASET = "../datasets/clean_vehicles.csv"
CLEAN_VEHICLES_2_DATASET = "../datasets/clean_vehicles_2.csv"
# Pontos limites em coordenadas geograficas dos EUA
TOP_LEFT_PT = (52.920556, -172.437778)
TOP_RIGHT_PT = (71.388889, -156.479167)
BOTTOM_RIGHT_PT = (44.816667, -66.916667)
BOTTOM_LEFT_PT = (18.744167, -155.681111)
# Estados dos EUA e suas respectivas siglas
STATES_DICT = {'Alabama':'al',
'Alaska':'ak',
'Arizona':'az',
'Arkansas':'ar' ,
'California':'ca',
'Colorado':'co',
'Connecticut':'ct',
'District of Columbia':'dc',
'Delaware':'de',
'Florida':'fl',
'Georgia':'ga',
'Hawaii':'hi',
'Idaho':'id',
'Illinois':'il',
'Indiana':'in',
'Iowa':'ia',
'Kansas':'ks',
'Kentucky':'ky',
'Louisiana':'la',
'Maine':'me',
'Maryland':'md',
'Massachusetts':'ma',
'Michigan':'mi',
'Minnesota':'mn',
'Mississippi':'ms',
'Missouri':'mo',
'Montana':'mt',
'Nebraska':'ne',
'Nevada':'nv',
'New Hampshire':'nh',
'New Jersey':'nj',
'New Mexico':'nm',
'New York':'ny',
'North Carolina':'nc',
'North Dakota':'nd',
'Ohio':'oh',
'Oklahoma':'ok',
'Oregon':'or',
'Pennsylvania':'pa',
'Rhode Island':'ri',
'South Carolina':'sc',
'South Dakota':'sd',
'Tennessee':'tn',
'Texas':'tx',
'Utah':'ut',
'Vermont':'vt',
'Virginia':'va',
'Washington':'wa',
'West Virginia':'wv',
'Wisconsin':'wi',
'Wyoming':'wy'
} | StarcoderdataPython |
11362704 | <gh_stars>100-1000
from .earnings import EarningsCalendarLoader
from .buyback_auth import (
CashBuybackAuthorizationsLoader,
ShareBuybackAuthorizationsLoader
)
from .dividends import (
DividendsByAnnouncementDateLoader,
DividendsByExDateLoader,
DividendsByPayDateLoader,
)
from .equity_pricing_loader import USEquityPricingLoader
__all__ = [
'CashBuybackAuthorizationsLoader',
'DividendsByAnnouncementDateLoader',
'DividendsByExDateLoader',
'DividendsByPayDateLoader',
'EarningsCalendarLoader',
'ShareBuybackAuthorizationsLoader',
'USEquityPricingLoader',
]
| StarcoderdataPython |
11368488 | import numpy
import pyaudio
def main():
# nothing to see yet here
| StarcoderdataPython |
3519000 | <filename>pyesmini/pyesminiRM.py
from ctypes import *
import ctypes
from .shared import *
# /home/wave/repositories/QtEsmini/pyesmini/pyesmini/esminiRMLib.hpp: 34
class PositionData(Structure):
_fields_ = [
('x', c_float),
('y', c_float),
('z', c_float),
('h', c_float),
('p', c_float),
('r', c_float),
('hRelative', c_float),
('roadId', c_int),
('laneId', c_int),
('laneOffset', c_float),
('s', c_float),
]
# /home/wave/repositories/QtEsmini/pyesmini/pyesmini/esminiRMLib.hpp: 45
class RoadLaneInfo(Structure):
_fields_ = [
('pos', c_float * int(3)),
('heading', c_float),
('pitch', c_float),
('roll', c_float),
('width', c_float),
('curvature', c_float),
('speed_limit', c_float),
]
# /home/wave/repositories/QtEsmini/pyesmini/pyesmini/esminiRMLib.hpp: 52
class RoadProbeInfo(Structure):
_fields_ = [
('road_lane_info', RoadLaneInfo),
('relative_pos', c_float * int(3)),
('relative_h', c_float),
]
# /home/wave/repositories/QtEsmini/pyesmini/pyesmini/esminiRMLib.hpp: 59
class PositionDiff(Structure):
_fields_ = [
('ds', c_float),
('dt', c_float),
('dLaneId', c_int),
]
class PyEsminiRM:
# def RM_Init(const char *odrFilename); ### TODO: What is the return value?
def __init__(self, odrFilename , fromFile= True):
dir_path = os.path.dirname(os.path.realpath(__file__))
if platform == "linux" or platform == "linux2":
self.se = CDLL(dir_path + "/libesminiRMLib.so")
elif platform == "darwin":
self.se = CDLL(dir_path + "/libesminiRMLib.dylib")
elif platform == "win32":
self.se = CDLL(dir_path + "\libesminiRMLib.dll")
else:
print("Unsupported platform: {}".format(platform))
raise Exception("Loading shared library: shared library not found")
self.se.RM_Init.argtypes = [String]
self.se.RM_Init.restype = c_int
#self.se.RM_InitWithPointer.argtypes = [c_void_p]
#self.se.RM_InitWithPointer.restype = c_int
if fromFile:
self.se.RM_Init(odrFilename)
#else:
# self.se.RM_InitWithPointer(odrFilename)
### REST OF INIT
self.se.RM_CreatePosition.argtypes = []
self.se.RM_CreatePosition.restype = c_int
self.se.RM_GetNrOfPositions.argtypes = []
self.se.RM_GetNrOfPositions.restype = c_int
self.se.RM_DeletePosition.argtypes = [c_int]
self.se.RM_DeletePosition.restype = c_int
self.se.RM_GetNumberOfRoads.argtypes = []
self.se.RM_GetNumberOfRoads.restype = c_int
self.se.RM_GetIdOfRoadFromIndex.argtypes = [c_int]
self.se.RM_GetIdOfRoadFromIndex.restype = c_int
self.se.RM_GetRoadLength.argtypes = [c_int]
self.se.RM_GetRoadLength.restype = c_float
self.se.RM_GetRoadNumberOfLanes.argtypes = [c_int, c_float]
self.se.RM_GetRoadNumberOfLanes.restype = c_int
self.se.RM_GetLaneIdByIndex.argtypes = [c_int, c_int, c_float]
self.se.RM_GetLaneIdByIndex.restype = c_int
self.se.RM_GetLaneIdByIndex.argtypes = [c_int, c_int, c_float, c_float, c_bool]
self.se.RM_GetLaneIdByIndex.restype = c_int
self.se.RM_SetS.argtypes = [c_int, c_float]
self.se.RM_SetS.restype = c_int
self.se.RM_SetWorldPosition.argtypes = [c_int, c_float, c_float, c_float, c_float, c_float, c_float]
self.se.RM_SetWorldPosition.restype = c_int
self.se.RM_SetWorldXYHPosition.argtypes = [c_int, c_float, c_float, c_float]
self.se.RM_SetWorldXYHPosition.restype = c_int
self.se.RM_SetWorldXYHPosition.argtypes = [c_int, c_float, c_float, c_float]
self.se.RM_SetWorldXYHPosition.restype = c_int
self.se.RM_PositionMoveForward.argtypes = [c_int, c_float, c_int]
self.se.RM_PositionMoveForward.restype = c_int
self.se.RM_GetPositionData.argtypes = [c_int, POINTER(PositionData)]
self.se.RM_GetPositionData.restype = c_int
self.se.RM_GetSpeedLimit.argtypes = [c_int]
self.se.RM_GetSpeedLimit.restype = c_float
self.se.RM_GetLaneInfo.argtypes = [c_int, c_float, POINTER(RoadLaneInfo), c_int]
self.se.RM_GetLaneInfo.restype = c_int
self.se.RM_GetProbeInfo.argtypes = [c_int, c_float, POINTER(RoadProbeInfo), c_int]
self.se.RM_GetProbeInfo.restype = c_int
self.se.RM_SubtractAFromB.argtypes = [c_int, c_int, POINTER(PositionDiff)]
self.se.RM_SubtractAFromB.restype = c_int
def close(self):
self.se.RM_Close.argtypes = []
self.se.RM_Close.restype = c_int
return self.se.RM_Close()
'''
Create a position object
@return Handle to the position object, to use for operations
'''
def createPosition(self):
return self.se.RM_CreatePosition()
'''
Get the number of created position objects
@return Number of created position objects
'''
def getNrOfPositions(self):
return self.se.RM_GetNrOfPositions()
'''
Delete one or all position object(s)
@param hande Handle to the position object. Set -1 to delete all.
@return 0 if succesful, -1 if specified position(s) could not be deleted
'''
def deletePosition(self, handle):
if self.se.RM_DeletePosition(handle) < 0:
return False
else:
return True
'''
Get the total number fo roads in the road network of the currently loaded OpenDRIVE file.
@return Number of roads
'''
def getNumberOfRoads(self):
return self.se.RM_GetNumberOfRoads()
'''
Get the Road ID of the road with specified index. E.g. if there are 4 roads, index 3 means the last one.
@param index The index of the road
@return The ID of the road
'''
def getIdOfRoadFromIndex(self, index):
return self.se.RM_GetIdOfRoadFromIndex(index)
'''
Get the lenght of road with specified ID
@param id The road ID
@return The length of the road if ID exists, else 0.0
'''
def getRoadLength(self, id):
return self.se.RM_GetRoadLength(id)
'''
Get the number of drivable lanes of specified road
@param roadId The road ID
@param s The distance along the road at what poto check number of lanes (which can vary along the road)
@return The number of drivable lanes
'''
def getRoadNumberOfLanes(self, roadId, s):
return self.se.RM_GetRoadNumberOfLanes(roadId, s)
'''
Get the ID of the lane given by index
@param roadId The road ID
@param laneIndex The index of the lane
@param s The distance along the road at what poto look up the lane ID
@return The lane ID
'''
def getLaneIdByIndex(self, roadId, laneIndex, s):
return self.se.RM_GetLaneIdByIndex(roadId, laneIndex, s)
'''
Set position from road coordinates, world coordinates being calculated
@param handle Handle to the position object
@param roadId Road specifier
@param laneId Lane specifier
@param laneOffset Offset from lane center
@param s Distance along the specified road
@param align If true the heading will be reset to the lane driving direction (typically only at initialization)
@return 0 if successful, -1 if not
'''
def setLanePosition(self, handle, roadId, laneId, laneOffset, s, align=True):
if self.se.RM_GetLaneIdByIndex(handle, roadId, laneId, laneOffset, s, align) < 0:
return False
else:
return True
'''
Set s (distance) part of a lane position, world coordinates being calculated
@param handle Handle to the position object
@param s Distance along the specified road
@return 0 if successful, -1 if not
'''
def setS(self, handle, s):
if self.se.RM_SetS(handle, s) < 0:
return False
else:
return True
'''
Set position from world coordinates, road coordinates being calculated
@param handle Handle to the position object
@param x cartesian coordinate x value
@param y cartesian coordinate y value
@param z cartesian coordinate z value
@param h rotation heading value
@param p rotation pitch value
@param r rotation roll value
@return 0 if successful, -1 if not
'''
def stWorldPosition(self, handle, x, y, z, h, p, r):
if self.se.RM_SetWorldPosition(handle, x, y, z, h, p, r) < 0:
return False
else:
return True
'''
Set position from world X, Y and heading coordinates; Z, pitch and road coordinates being calculated
@param handle Handle to the position object
@param x cartesian coordinate x value
@param y cartesian coordinate y value
@param h rotation heading value
@return 0 if successful, -1 if not
'''
def setWorldXYHPosition(self, handle, x, y, h):
if self.se.RM_SetWorldXYHPosition(handle, x, y, h) < 0:
return False
else:
return True
'''
Set position from world X, Y, Z and heading coordinates; pitch and road coordinates being calculated
Setting a Z value may have effect in mapping the position to the closest road, e.g. overpass
@param handle Handle to the position object
@param x cartesian coordinate x value
@param y cartesian coordinate y value
@param h rotation heading value
@return 0 if successful, -1 if not
'''
def setWorldXYHPosition(self, handle, x, y, h):
if self.se.RM_SetWorldXYHPosition(handle, x, y, h) < 0:
return False
else:
return True
'''
Move position forward along the road. Choose way randomly though any junctions.
@param handle Handle to the position object
@param dist Distance (meter) to move
@param strategy How to move in a junction where multiple route options appear, see Junction::JunctionStrategyType
@return 0 if successful, -1 if not
'''
def positionMoveForward(self, handle, dist, strategy):
if self.se.RM_PositionMoveForward(handle, dist, strategy) < 0:
return False
else:
return True
'''
Get the fields of the position of specified index
@param handle Handle to the position object
@param data Struct to fill in the values
@return 0 if successful, -1 if not
'''
def getPositionData(self, handle):
# PositionData *data
positionData = PositionData()
if self.se.RM_GetPositionData(handle, positionData) < 0:
return None
else:
return positionData
'''
Retrieve current speed limit (at current road, s-value and lane) based on ODR type elements or nr of lanes
@param handle Handle to the position object
@return 0 if successful, -1 if not
'''
def getSpeedLimit(self, handle):
if self.se.RM_GetSpeedLimit(handle) < 0:
return False
else:
return True
'''
Retrieve lane information from the position object (at current road, s-value and lane)
@param handle Handle to the position object
@param lookahead_distance The distance, along the road, to the poof interest
@param data Struct including all result values, see RoadLaneInfo typedef
@param lookAheadMode Measurement strategy: Along reference lane, lane center or current lane offset. See roadmanager::Position::LookAheadMode enum
@return 0 if successful, -1 if not
'''
def getLaneInfo(self, handle, lookahead_distance, lookAheadMode):
# RoadLaneInfo *data
roadLaneInfo = RoadLaneInfo()
if self.se.RM_GetLaneInfo(handle, lookahead_distance, roadLaneInfo, lookAheadMode) < 0:
return None
else:
return roadLaneInfo
'''
As RM_GetLaneInfo plus relative location of poof interest (probe) from current position
@param handle Handle to the position object from which to measure
@param lookahead_distance The distance, along the road to the probe (poof interest)
@param data Struct including all result values, see RoadProbeInfo typedef
@param lookAheadMode Measurement strategy: Along reference lane, lane center or current lane offset. See roadmanager::Position::LookAheadMode enum
@return 0 if successful, -1 if not
'''
def getProbeInfo(self, handle, lookahead_distance, lookAheadMode):
# RoadProbeInfo *data
roadProbeInfo = RoadProbeInfo()
if self.se.RM_GetProbeInfo(handle, lookahead_distance, roadProbeInfo, lookAheadMode) < 0:
return None
else:
return roadProbeInfo
'''
Find out the difference between two position objects, i.e. delta distance (long and lat) and delta laneId
@param handleA Handle to the position object from which to measure
@param handleB Handle to the position object to which the distance is measured
@param pos_diff Struct including all result values, see PositionDiff typedef
@return true if a valid path between the road positions was found and calculations could be performed
'''
def subtractAFromB(self, handleA, handleB):
# PositionDiff *pos_diff
positionDiff = PositionDiff()
return self.se.RM_SubtractAFromB(handleA, handleB, positionDiff)
| StarcoderdataPython |
1620585 | # coding=utf-8
from setuptools import setup
from cms_support.utils.constants import Constants
# long_description=open('README.md').read(),
# https://betterscientificsoftware.github.io/python-for-hpc/tutorials/python-pypi-packaging/
setup(
name=Constants.PACKAGE_NAME,
version=Constants.VERSION,
author=Constants.AUTHOR,
author_email=Constants.EMAIL,
packages=[Constants.PACKAGE_NAME, Constants.PACKAGE_NAME+'.sites', Constants.PACKAGE_NAME+'.transfers'],
scripts=[],
url=Constants.URL_PROJECT,
license='LICENSE',
description='Tools to accelerate monitoring in the CMS computing grid',
install_requires=open('requirements.txt').read().split("\n"),
classifiers=[
'Development Status :: 1 - Planning',
'Intended Audience :: Science/Research',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
],
)
| StarcoderdataPython |
381330 | """File that FLASK_APP should be set to. This allows the usage
of make run (for running the dev server), and flask shell
(for debugging the app)"""
from app import app, db, routes # pylint:disable=unused-import
from app.models import Users, Polls, Options
db.create_all()
@app.shell_context_processor
def make_shell_context():
"""Flask testing shell that can be running by calling 'flask shell',
given that FLASK_APP has been set to pollz.py"""
return {'db': db, 'Users': Users, 'Polls': Polls, 'Options': Options}
| StarcoderdataPython |
8079717 | # -*- coding: utf-8 -*-
"""
Created on Mon Sep 3 15:59:51 2018
请实现一个函数,将一个字符串中的每个空格替换成“%20”。
例如,当字符串为We Are Happy.则经过替换之后的字符串为We%20Are%20Happy。
@author: fengxue
"""
def Replace_blank(string):
new_string=''
for i in string:
if i!=' ':
new_string+=i
elif i==' ':
new_string+='%20'
return new_string
print(Replace_blank('we are happy'))
| StarcoderdataPython |
3361062 | <filename>elite/loader/maddavo/__init__.py
# -*- coding: UTF8
from elite.loader.maddavo import system
from elite.loader.maddavo import station
from elite.loader.maddavo import prices
from elite.loader.maddavo import items
from datetime import datetime, timedelta
def updateAll(mydb):
lastUpdateTime = mydb.getConfig( 'lastMaddavoDownload' )
if lastUpdateTime:
lastUpdateTime = datetime.strptime(lastUpdateTime , "%Y-%m-%d %H:%M:%S")
if lastUpdateTime > datetime.now() - timedelta(minutes=60):
return
print("update from maddavo")
maddavo_station = station.loader(mydb)
maddavo_station.update()
maddavo_prices = prices.loader(mydb)
maddavo_prices.update()
| StarcoderdataPython |
6637073 | <reponame>rodrigo-moliveira/PositioningSolver<gh_stars>0
from ..basics.DataType import DataTypeFactory
"""
Utility maps and dicts to deal with the constellations and services.
-> Available constellations: Galileo and GPS
"""
AvailableConstellations = {"GPS", "GAL", "UNKNOWN"}
# Rinex format
GPSAvailableServices = {'1C', '1S', '1L', '1X', '1P', '1W', '1Y', '1M',
'2C', '2D', '2S', '2L', '2X', '2P', '2W', '2Y', '2M',
'5I', '5Q', '5X'}
GALAvailableServices = {'1A', '1B', '1C', '1X', '1Z',
'5I', '5Q', '5X',
'7I', '7Q', '7X',
'8I', '8Q', '8X',
'6A', '6B', '6C', '6X', '6Z'}
ConstellationToCodeMap = {"GPS": "G", "GAL": "E", "UNKNOWN": "U"}
CodeToConstellationMap = {"G": "GPS",
"E": "GAL"}
def get_code_type_from_service(services, constellation):
datatypes = []
if constellation == "GPS":
for service in services:
if service in GPSAvailableServices:
datatypes.append(DataTypeFactory("C" + service[0]))
elif constellation == "GAL":
for service in services:
if service in GALAvailableServices:
datatypes.append(DataTypeFactory("C" + service[0]))
return datatypes
def get_carrier_type_from_service(services, constellation):
datatypes = []
if constellation == "GPS":
for service in services:
if service in GPSAvailableServices:
datatypes.append(DataTypeFactory("L" + service[0]))
elif constellation == "GAL":
for service in services:
if service in GALAvailableServices:
datatypes.append(DataTypeFactory("L" + service[0]))
return datatypes
| StarcoderdataPython |
4863250 | class SelfAttention(nn.Module):
"""Multi-head self attention layer
Args:
k (int): Size of attention embeddings
heads (int): Number of attention heads
Attributes:
to_keys: Transforms input to k x k*heads key vectors
to_queries: Transforms input to k x k*heads query vectors
to_values: Transforms input to k x k*heads value vectors
unify_heads: combines queries, keys and values to a single vector
"""
def __init__(self, k, heads=8, dropout=0.1):
super().__init__()
self.k, self.heads = k, heads
self.to_keys = nn.Linear(k, k * heads, bias=False)
self.to_queries = nn.Linear(k, k * heads, bias=False)
self.to_values = nn.Linear(k, k * heads, bias=False)
self.unify_heads = nn.Linear(k * heads, k)
self.attention = DotProductAttention(dropout)
def forward(self, x):
"""Implements forward pass of self-attention layer
Args:
x (torch.Tensor): batch x t x k sized input
"""
b, t, k = x.size()
h = self.heads
# We reshape the queries, keys and values so that each head has its own dimension
queries = self.to_queries(x).view(b, t, h, k)
keys = self.to_keys(x).view(b, t, h, k)
values = self.to_values(x).view(b, t, h, k)
out = self.attention(queries, keys, values, b, h, t, k)
return self.unify_heads(out) | StarcoderdataPython |
274234 | #!/usr/bin/env python3
"""Convert hector outputstreams to pickled python objects
This program was run on the sample_outputstream*.csv files distributed with
hector to produce the *.dat files in the data directory of this package.
"""
import pandas as pd
import pickle
for rcp in ['rcp26', 'rcp45', 'rcp60', 'rcp85']:
infile = f'hector-outputstream-{rcp}.csv'
outputstream = pd.read_csv(infile, comment='#')
outfile = open(f'hector-outputstream-{rcp}.dat', 'wb')
pickle.dump(outputstream, outfile)
outfile.close()
| StarcoderdataPython |
5174825 | <reponame>Matsui54/defx.nvim
# ============================================================================
# FILE: space.py
# AUTHOR: <NAME> <<EMAIL>.Matsu at g<EMAIL>>
# License: MIT license
# ============================================================================
from pynvim import Nvim
import typing
from defx.base.column import Base
from defx.context import Context
class Column(Base):
def __init__(self, vim: Nvim) -> None:
super().__init__(vim)
self.name = 'space'
def get(self, context: Context,
candidate: typing.Dict[str, typing.Any]) -> str:
return ' '
def length(self, context: Context) -> int:
return 1
| StarcoderdataPython |
88600 | <filename>opponents/behaviour_parser.py
from __future__ import unicode_literals
import re
import sys
from ordered_xml import OrderedXMLElement
if sys.version_info[0] < 3:
from io import open
class ParseError(Exception):
"""Represents an error encountered during parsing."""
def __init__(self, msg, parsed_seq, trace=None):
super(ParseError, self).__init__(msg, parsed_seq, trace)
def __str__(self):
return self.args[0]
_tag_start = re.compile(r'.*?\<([a-zA-Z0-9\-\_]+)\s*?(.*?)\s*?(\/?)\>', re.DOTALL)
_attribute = re.compile(r"([a-zA-Z0-9\-\_]+)(?:\s*\=\s*(\"(.*?)\"))?")
_comment = re.compile(r'\s*(?:\<\!\-\-(.*?)\-\-\>)?', re.DOTALL)
_decl_tag = re.compile(r'\<\?(.*?)\?\>', re.DOTALL) # ignored for now
base_tag_spec = {
'opponent': {
'first': None,
'last': None,
'label': None,
'gender': None,
'poses': {
'pose': {
'sprite': None,
'directive': { 'keyframe': None, 'animFrame': None }
}
},
'size': None,
'timer': None,
'intelligence': None,
'tags': { 'tag': None },
'start': { 'state': None },
'wardrobe': { 'clothing': None },
'behaviour': {
'stage': {
'case': { 'priority': None, 'condition': None, 'state': None }
}
},
'epilogue': {
'title': None,
'screen': {
'start': None,
'text': {
'x': None,
'y': None,
'width': None,
'arrow': None,
'content': None,
}
}
}
}
}
meta_tag_spec = {
'opponent': {
'enabled': None,
'first': None,
'last': None,
'label': None,
'pic': None,
'gender': None,
'height': None,
'from': None,
'writer': None,
'artist': None,
'description': None,
'has_ending': None,
'layers': None,
'tags': { 'tag': None },
'costume': None
}
}
listing_tag_spec = {
'catalog': {
'individuals': {
'opponent': None
},
'groups': {
'group': None
}
}
}
# skip whitespace and comments
def _skip_chars(seq, index):
match = _comment.match(seq, index)
while match is not None:
if(len(match.group(0)) == 0):
return index
index += len(match.group(0))
match = _comment.match(seq, index)
return index
def _consume_char(seq, char, index, suppress_eof_error=False):
index = _skip_chars(seq, index)
if index >= len(seq)-1 and not suppress_eof_error:
raise ParseError("Unexpected end of input", index)
if seq.startswith(char, index):
return True, index + len(char)
else:
return None, index
def _consume_re(seq, regex, index, suppress_eof_error=False):
index = _skip_chars(seq, index)
if index >= len(seq)-1 and not suppress_eof_error:
raise ParseError("Unexpected end of input", index)
match = regex.match(seq, index)
if match is not None:
index = match.end()
return match, index
def parse_attribute_list(seq, elem):
attr_match, index = _consume_re(seq, _attribute, 0, True)
while attr_match is not None:
try:
elem.attributes[attr_match.group(1)] = attr_match.group(3)
except IndexError:
elem.attributes[attr_match.group(1)] = True
attr_match, index = _consume_re(seq, _attribute, index, True)
return index
def parse_tag(seq, index, tag_spec, progress_cb=None):
if progress_cb is not None:
progress_cb(index)
_start_index = index
match, index = _consume_re(seq, _tag_start, index)
if match is None:
raise ParseError("Expected opening tag", index)
_tag_start_index = index - len(match.group(0))
tag_type = match.group(1)
if tag_type not in tag_spec:
raise ParseError("Unexpected tag type '{}'".format(tag_type), index)
elem = OrderedXMLElement(tag_type)
if len(match.group(2)) > 0:
parse_attribute_list(match.group(2), elem)
simple_tag_match = (len(match.group(3)) > 0)
try:
# For simple tags (for example: <br />) just return the empty element
if not simple_tag_match:
# This tag contains either child text or child elements.
child_tag_spec = tag_spec[tag_type]
if child_tag_spec is None:
# For text-only nodes, just grab everything up to the closing tag as the node's contents.
tag_close_regex = re.compile(r'(.*?)\<\s*?\/{}\s*?\>'.format(re.escape(tag_type)), re.DOTALL)
match, index = _consume_re(seq, tag_close_regex, index)
if match is None:
raise ParseError("Could not find closing tag for <{:s}> element".format(tag_type), index)
elem.text = match.group(1)
else:
# Otherwise, parse this node's child elements.
# The tag-close regex here is slightly different from the one above.
# we only want to know if the start of the string has a tag close.
tag_close_regex = re.compile(r'\<\s*?\/{}\s*?\>'.format(re.escape(tag_type)))
closing_tag_match, index = _consume_re(seq, tag_close_regex, index)
while closing_tag_match is None:
child, index = parse_tag(seq, index, child_tag_spec, progress_cb)
elem.children.append(child)
closing_tag_match, index = _consume_re(seq, tag_close_regex, index)
except ParseError as e:
context = seq[_tag_start_index:_tag_start_index+50].strip()
trace = "\n in {:s} (pos. {:d}): {:s} ...".format(tag_type, _tag_start_index, context)
if e.args[2] is not None:
trace = e.args[2] + trace
raise ParseError(e.args[0], e.args[1], trace)
return elem, index
def parse(seq, tag_spec=base_tag_spec, progress_cb=None):
_, index = _consume_re(seq, _decl_tag, 0)
try:
base_elem, _ = parse_tag(seq, index, tag_spec, progress_cb)
return base_elem
except ParseError as e:
error_index = e.args[1]
# find line number and position of error:
error_line = 0
error_pos = 0
cur_idx = 0
for i, line in enumerate(seq.split('\n')):
if error_index < cur_idx + len(line) + 1:
error_line = i
error_pos = error_index - cur_idx
break
cur_idx += len(line)+1
raise ParseError("{:s} at line {:d}, position {:d} (abs. position {:d})".format(e.args[0], error_line, error_pos, error_index), None)
def parse_file(fname, progress_cb=None):
with open(fname, encoding='utf-8') as infile:
if progress_cb is not None:
seq = infile.read()
def wrapped_progress_cb(cur_index):
return progress_cb(len(seq), cur_index)
return parse(seq, base_tag_spec, wrapped_progress_cb)
else:
return parse(infile.read())
def parse_meta(fname, progress_cb=None):
with open(fname, encoding='utf-8') as infile:
if progress_cb is not None:
seq = infile.read()
def wrapped_progress_cb(cur_index):
return progress_cb(len(seq), cur_index)
return parse(seq, meta_tag_spec, wrapped_progress_cb)
else:
return parse(infile.read(), meta_tag_spec)
def parse_listing(fname, progress_cb=None):
with open(fname, encoding='utf-8') as infile:
if progress_cb is not None:
seq = infile.read()
def wrapped_progress_cb(cur_index):
return progress_cb(len(seq), cur_index)
return parse(seq, listing_tag_spec, wrapped_progress_cb)
else:
return parse(infile.read(), listing_tag_spec)
| StarcoderdataPython |
363353 | <reponame>xe1gyq/EekMex
#!/usr/bin/python
import logging
from random import randint
try:
#import pyupm_bmpx8x as upmBmpx8x
import Adafruit_BMP.BMP085 as BMP085
except ImportError:
pass
def emPressureGet(mode=None):
if mode is None:
#pressure = upmBmpx8x.BMPX8X(1, upmBmpx8x.ADDR);
#pressuredata = pressure.getPressure()
sensor = BMP085.BMP085(busnum=1)
pressuredata = sensor.read_pressure()
else:
pressuredata = randint(1000,2000)
return pressuredata
# End of File
| StarcoderdataPython |
5179260 | import uuid
class Request:
'This is the main object that stores information about somones request'
def __init__(self, kind, value, user, date):
self.kind = kind # options: "search, url, IPFS hash, git URL
self.value = value # the value associated with the type
self.user = user # The user associated with this request. We can make a user object later if we feel like it
self.date = date # This should be a date-time object
self.downloaded_status = False # This is not included in the constructor because state should always be false when starting
self.uuid=str(uuid.uuid4())
self.file_location = "" # There is no downloaded location when the object is created
def set_downloaded_status(self, state):
self.downloaded_status = state # Should be a boolean
def set_file_location(self, loc):
self.file_location = loc # Is there a file object that would be better to use than a string?
| StarcoderdataPython |
3598457 | <reponame>sokil/VotingEngine
from app import db
from sqlalchemy import Column, String, Integer, ForeignKey
class Vote(db.Model):
__tablename__ = 'votes'
id = Column(Integer, primary_key=True)
voting_id = Column(Integer, ForeignKey('votings.id'))
voting_variant_id = Column(Integer, ForeignKey('voting_variants.id'))
point = Column(Integer)
user_id = Column(Integer, ForeignKey('users.id')) | StarcoderdataPython |
9684522 | <gh_stars>1-10
from unittest import TestCase
from pathlib import Path
from defx.util import deft_to_jsonl_converter
class DeftToJsonlConverterTest(TestCase):
"""Tests the conversion script from deft format into jsonl"""
@staticmethod
def test_chapter_detection():
true_example_1 = {'tokens': ['5', '.']}
assert deft_to_jsonl_converter._is_chapter_start(true_example_1)
true_example_2 = {'tokens': ['5320032', '.']}
assert deft_to_jsonl_converter._is_chapter_start(true_example_2)
false_example_1 = {'tokens': ['5d', '.']}
assert not deft_to_jsonl_converter._is_chapter_start(false_example_1)
false_example_2 = {'tokens': ['.']}
assert not deft_to_jsonl_converter._is_chapter_start(false_example_2)
false_example_3 = {'tokens': ['Some', 'regular', 'sentence', '.']}
assert not deft_to_jsonl_converter._is_chapter_start(false_example_3)
@staticmethod
def test_parse_sentence_with_chapter():
with open('tests/fixtures/deft_format_sentence_with_chapter.deft') as f:
example = deft_to_jsonl_converter._parse_example(f)
assert example['tokens'] == [
"5", ".", "Science", "includes", "such", "diverse", "fields",
"as", "astronomy", ",", "biology", ",", "computer", "sciences",
",", "geology", ",", "logic", ",", "physics", ",", "chemistry",
",", "and", "mathematics", "(", "[", "link", "]", ")", "."
]
assert example['tags'] == ['O'] * 31
assert len(example['ner_ids']) == 31
assert example['ner_ids'] == ['-1'] * 31
assert example['sentence_labels'] == [{
'label': 'NoDef',
'start_token_idx': 0,
'end_token_idx': 31
}]
@staticmethod
def test_parse_sentence_with_annotations():
with open('tests/fixtures/deft_format_sentence_with_annotations.deft') as f:
example = deft_to_jsonl_converter._parse_example(f)
assert example['tokens'][:4] == ["However", ",", "those", "fields"]
assert example['tokens'][-3:] == ["natural", "sciences", "."]
assert len(example['tokens']) == 21
assert example['sentence_labels'] == [{
'label': 'HasDef',
'start_token_idx': 0,
'end_token_idx': 21
}]
assert example['tags'][2] == "B-Definition"
assert example['tags'][3:16] == ["I-Definition"] * 13
assert example['ner_ids'][2:16] == ["T127"] * 14
assert example['tags'][18] == "B-Term"
assert example['tags'][19] == "I-Term"
assert example['ner_ids'][18:20] == ["T128"] * 2
assert example['relation_roots'][2:16] == ['T128'] * 14
assert example['relations'][2:16] == ['Direct-Defines'] * 14
# Relations pointing to the token itself should be ignored
assert example['relation_roots'][18:20] == ['-1'] * 2
assert example['relations'][18:20] == ['0'] * 2
@staticmethod
def test_convert_file():
input_file = Path('tests/fixtures/deft_format_samples.deft')
examples = deft_to_jsonl_converter._convert_deft_file(input_file)
assert len(examples) == 2
assert examples[0]['tokens'] == [
"5", ".", "Science", "includes", "such", "diverse", "fields", "as",
"astronomy", ",", "biology", ",", "computer", "sciences", ",",
"geology", ",", "logic", ",", "physics", ",", "chemistry", ",",
"and", "mathematics", "(", "[", "link", "]", ")", ".", "However",
",", "those", "fields", "of", "science", "related", "to", "the",
"physical", "world", "and", "its", "phenomena", "and", "processes",
"are", "considered", "natural", "sciences", ".", "Thus", ",", "a",
"museum", "of", "natural", "sciences", "might", "contain", "any",
"of", "the", "items", "listed", "above", "."
]
assert examples[0]['sentence_labels'] == [
{
'label': 'NoDef',
'start_token_idx': 0,
'end_token_idx': 31
},
{
'label': 'HasDef',
'start_token_idx': 31,
'end_token_idx': 52
},
{
'label': 'NoDef',
'start_token_idx': 52,
'end_token_idx': 68
}
]
| StarcoderdataPython |
1991635 | <gh_stars>0
import click
import paho.mqtt.client as mqtt
@click.command()
@click.option('--name', prompt='enter your name', help='enter user name')
@click.option('--age', prompt='enter your age', help='enter user age')
def init(name, age):
client = mqtt.Client()
print(client)
print(name)
print(age)
| StarcoderdataPython |
4986921 | <gh_stars>0
class HeadQuartersOffice():
'''Defines the HQ class'''
__rpio = None
__name = None
__title = None
__data = None
__dataframe = None
@property
def rpio( self ):
if self.__rpio is not None:
return self.__rpio
@rpio.setter
def rpio( self, code ):
if code is not None:
self.__rpio = str( code )
self.__data[ 'rpio' ] = self.__rpio
@property
def name( self ):
if self.__name is not None:
return self.__name
@name.setter
def name( self, name ):
if name is not None:
self.__name = str( name )
self.__data[ 'name' ] = self.__name
@property
def title( self ):
if self.__title is not None:
return self.__title
@title.setter
def title( self, title ):
if title is not None:
self.__title = str( title )
self.__data[ 'title' ] = self.__title
@property
def data( self ):
if self.__data is not None:
return self.__data
@property
def table( self ):
if self.__dataframe is not None:
return self.__dataframe
def __init__( self, rpio ):
self.__rpio = ResourcePlanningOffice( str( rpio ) )
self.__name = self.__rpio.name
self.__data = { 'rpio': self.__rpio,
'name': self.__name }
self.__dataframe = pd.DataFrame
def __str__( self ):
if self.__name is not None:
return self.__name
| StarcoderdataPython |
3308693 | <gh_stars>0
import os
from torchaudio.datasets.utils import (
download_url,
extract_archive
)
import shutil
FOLDER_IN_ARCHIVE = "SpeechCommands"
URL = "speech_commands_v0.02"
_CHECKSUMS_SC = {
"https://storage.googleapis.com/download.tensorflow.org/data/speech_commands_v0.01.tar.gz":
"3cd23799cb2bbdec517f1cc028f8d43c",
"https://storage.googleapis.com/download.tensorflow.org/data/speech_commands_v0.02.tar.gz":
"6b74f3901214cb2c2934e98196829835",
}
def download_speech_commands(root: str,
url: str = URL,
folder_in_archive: str = FOLDER_IN_ARCHIVE) -> None:
"""Download the Dataset Speech Commands (download the dataset if it is not found at root path).
Otherwise it only extracts the dataset.
Args:
root (str): Path to the directory where the dataset is found or downloaded.
url (str, optional): The URL to download the dataset from,
or the type of the dataset to dowload.
Allowed type values are ``"speech_commands_v0.01"`` and ``"speech_commands_v0.02"``
(default: ``"speech_commands_v0.02"``)
folder_in_archive (str, optional):
The top-level directory of the dataset in the root directory. Location of the extracted dataset (default: ``"SpeechCommands"``)
"""
if url in [
"speech_commands_v0.01",
"speech_commands_v0.02",
]:
base_url = "https://storage.googleapis.com/download.tensorflow.org/data/"
ext_archive = ".tar.gz"
url = os.path.join(base_url, url + ext_archive)
basename = os.path.basename(url)
archive = os.path.join(root, basename)
basename = basename.rsplit(".", 2)[0]
folder_in_archive = os.path.join(folder_in_archive, basename)
# build path
_path = os.path.join(root, folder_in_archive)
if not os.path.isdir(_path):
if not os.path.isfile(archive):
checksum = _CHECKSUMS_SC.get(url, None)
download_url(url, root, hash_value=checksum, hash_type="md5")
print("Extracting...")
extract_archive(archive, _path)
print("Success")
else:
print("Already downloaded and extracted")
_CHECKSUMS_LS = {
"http://www.openslr.org/resources/12/dev-clean.tar.gz":
"76f87d090650617fca0cac8f88b9416e0ebf80350acb97b343a85fa903728ab3",
"http://www.openslr.org/resources/12/dev-other.tar.gz":
"12661c48e8c3fe1de2c1caa4c3e135193bfb1811584f11f569dd12645aa84365",
"http://www.openslr.org/resources/12/test-clean.tar.gz":
"39fde525e59672dc6d1551919b1478f724438a95aa55f874b576be21967e6c23",
"http://www.openslr.org/resources/12/test-other.tar.gz":
"d09c181bba5cf717b3dee7d4d592af11a3ee3a09e08ae025c5506f6ebe961c29",
"http://www.openslr.org/resources/12/train-clean-100.tar.gz":
"d4ddd1d5a6ab303066f14971d768ee43278a5f2a0aa43dc716b0e64ecbbbf6e2",
"http://www.openslr.org/resources/12/train-clean-360.tar.gz":
"146a56496217e96c14334a160df97fffedd6e0a04e66b9c5af0d40be3c792ecf",
"http://www.openslr.org/resources/12/train-other-500.tar.gz":
"ddb22f27f96ec163645d53215559df6aa36515f26e01dd70798188350adcb6d2"
}
def download_LibriSpeech(root: str,url: str = "train-clean-100",
folder_in_archive: str = "LibriSpeech") -> None:
"""Download the Dataset LibriSpeech (download the dataset if it is not found at root path).
Otherwise it only extracts the dataset and put it in root/folder_in_archive/split
Args:
root (str): Path to the directory where the dataset is found or downloaded.
url (str): which dataset
folder_in_archive (str, optional):
The top-level directory of the dataset. (default: ``"LibriSpeech"``)
"""
if url == "all":
download_LibriSpeech(root,"train-clean-100",folder_in_archive)
download_LibriSpeech(root,"train-clean-360",folder_in_archive)
download_LibriSpeech(root,"train-other-500",folder_in_archive)
download_LibriSpeech(root,"dev-clean",folder_in_archive)
download_LibriSpeech(root,"dev-other",folder_in_archive)
download_LibriSpeech(root,"test-clean",folder_in_archive)
download_LibriSpeech(root,"test-other",folder_in_archive)
return
if url in [
"dev-clean",
"dev-other",
"test-clean",
"test-other",
"train-clean-100",
"train-clean-360",
"train-other-500",
]:
ext_archive = ".tar.gz"
base_url = "http://www.openslr.org/resources/12/"
url = os.path.join(base_url, url + ext_archive)
basename = os.path.basename(url)
archive = os.path.join(root, basename)
print("Downloading "+basename+"...")
basename = basename.split(".")[0]
folder_in_archive = os.path.join(folder_in_archive,"split", basename)
_path = os.path.join(root, folder_in_archive)
if not (os.path.isdir(_path)):
if not os.path.isfile(archive):
checksum = _CHECKSUMS_LS.get(url, None)
download_url(url, root, hash_value=checksum)
print("Extracting...")
extract_archive(archive)
print("Success")
print("Moving files in root/LibriSpeech/split")
source_dir=os.path.join(root,"LibriSpeech",basename)
shutil.move(source_dir, _path)
print("Done!")
else:
print("Already downloaded and extracted")
def download_LibriSpeech_Word(root: str,
folder_in_archive: str = "LibriSpeech") -> None:
"""Download the Dataset LibriSpeech Word (download the dataset if it is not found at root path).
Otherwise it only extracts the dataset
Use the same folder structure than in https://github.com/bepierre/SpeechVGG, that is
|_LibriSpeech
|_ word_labels
|_ split
|____ test-clean
|____ test-other
|____ dev-clean
|____ dev-other
|____ train-clean-100
|____ train-clean-360
|____ train-other-500
Args:
root (str): Path to the directory where the dataset is found or downloaded.
url (str): which dataset
folder_in_archive (str, optional):
The top-level directory of the dataset. (default: ``"LibriSpeech"``)
"""
curr_dir= os.path.join(root,folder_in_archive)
# download the compressed files first in the root directory and store decompressed versions in root/folder_in_archive/split
download_LibriSpeech(root,"all",folder_in_archive)
def download_word_meta(dir):
if not os.path.exists(os.path.join(dir,"word_labels")):
raise ValueError("You should add the word_labels directory by downloading it on https://imperialcollegelondon.app.box.com/s/yd541e9qsmctknaj6ggj5k2cnb4mabkc?page=1")
# check that word_labels directory exists
download_word_meta(curr_dir)
| StarcoderdataPython |
9749466 | '''
Faça um programa que simula o lançamento de dois dados, d1 e d2, n vezes, e tem como saída o número
de cada dado e a relação entre eles (>,<,=) de cada lançamento.
'''
# não entendi nada, portanto não vou fazer
| StarcoderdataPython |
6589987 | """ This script will test the submodules used by the scattering module"""
import torch
import os
import numpy as np
import pytest
from kymatio import HarmonicScattering3D
from kymatio.scattering3d.utils import generate_weighted_sum_of_gaussians
backends = []
try:
if torch.cuda.is_available():
from skcuda import cublas
import cupy
from kymatio.scattering3d.backend.torch_skcuda_backend import backend
backends.append(backend)
except:
pass
from kymatio.scattering3d.backend.torch_backend import backend
backends.append(backend)
if torch.cuda.is_available():
devices = ['cuda', 'cpu']
else:
devices = ['cpu']
def relative_difference(a, b):
return np.sum(np.abs(a - b)) / max(np.sum(np.abs(a)), np.sum(np.abs(b)))
@pytest.mark.parametrize("device", devices)
@pytest.mark.parametrize("backend", backends)
def test_FFT3d_central_freq_batch(device, backend):
# Checked the 0 frequency for the 3D FFT
for device in devices:
x = torch.zeros(1, 32, 32, 32, 2).float()
if device == 'gpu':
x = x.cuda()
a = x.sum()
y = backend.fft(x)
c = y[:,0,0,0].sum()
assert (c-a).abs().sum()<1e-6
@pytest.mark.parametrize("device", devices)
@pytest.mark.parametrize("backend", backends)
def test_fft3d_error(backend, device):
x = torch.zeros(8, 1)
with pytest.raises(TypeError) as record:
backend.fft(x)
assert "should be complex" in record.value.args[0]
@pytest.mark.parametrize("device", devices)
@pytest.mark.parametrize("backend", backends)
@pytest.mark.parametrize("inplace", [False, True])
def test_cdgmm3d(device, backend, inplace):
if backend.name == 'torch' or device != 'cpu':
# Not all backends currently implement the inplace variant
x = torch.zeros(2, 3, 4, 2).to(device)
x[...,0] = 2
x[...,1] = 3
y = torch.zeros_like(x)
y[...,0] = 4
y[...,1] = 5
prod = torch.zeros_like(x)
prod[...,0] = x[...,0]*y[...,0] - x[...,1]*y[...,1]
prod[...,1] = x[...,0]*y[...,1] + x[...,1]*y[...,0]
z = backend.cdgmm3d(x, y, inplace=inplace)
assert (z-prod).norm().cpu().item() < 1e-7
if inplace:
assert (x-z).norm().cpu().item() < 1e-7
with pytest.warns(UserWarning) as record:
x = torch.randn((3, 4, 3, 2), device=device)
x = x[:,0:3,...]
y = torch.randn((3, 3, 3, 2), device=device)
backend.cdgmm3d(x, y)
assert "A is converted" in record[0].message.args[0]
with pytest.warns(UserWarning) as record:
x = torch.randn((3, 3, 3, 2), device=device)
y = torch.randn((3, 4, 3, 2), device=device)
y = y[:,0:3,...]
backend.cdgmm3d(x, y)
assert "B is converted" in record[0].message.args[0]
with pytest.raises(RuntimeError) as record:
x = torch.randn((3, 3, 3, 2), device=device)
y = torch.randn((4, 4, 4, 2), device=device)
backend.cdgmm3d(x, y)
assert "not compatible" in record.value.args[0]
x = torch.randn((2, 3, 3, 3, 2), device=device)
y = torch.randn((3, 3, 3, 2), device=device)
backend.cdgmm3d(x, y)
with pytest.raises(TypeError) as record:
x = torch.randn((3, 3, 3, 1), device=device)
y = torch.randn((3, 3, 3, 1), device=device)
backend.cdgmm3d(x, y)
assert "should be complex" in record.value.args[0]
# This one is a little tricky. We can't have the number of dimensions be
# greater than 4 since that triggers the "not compatible" error.
with pytest.raises(RuntimeError) as record:
x = torch.randn((3, 3, 2), device=device)
y = torch.randn((3, 3, 2), device=device)
backend.cdgmm3d(x, y)
assert "must be simply a complex" in record.value.args[0]
# Create a tensor that behaves like `torch.Tensor` but is technically a
# different type.
class FakeTensor(torch.Tensor):
pass
with pytest.raises(RuntimeError) as record:
x = FakeTensor(3, 3, 3, 2)
y = torch.randn(3, 3, 3, 2)
backend.cdgmm3d(x, y)
assert "should be same type" in record.value.args[0]
if backend.name == 'torch_skcuda':
x = torch.randn((3, 3, 3, 2), device=torch.device('cpu'))
y = torch.randn((3, 3, 3, 2), device=torch.device('cpu'))
with pytest.raises(RuntimeError) as record:
backend.cdgmm3d(x, y)
assert "for cpu tensors" in record.value.args[0]
@pytest.mark.parametrize("device", devices)
@pytest.mark.parametrize("backend", backends)
def test_complex_modulus(backend, device):
x = torch.randn(4, 3, 2).to(device)
xm = torch.sqrt(x[...,0] ** 2 + x[...,1] ** 2)
y = backend.modulus(x)
assert (y[...,0] - xm).norm() < 1e-7
assert (y[...,1]).norm() < 1e-7
@pytest.mark.parametrize("device", devices)
@pytest.mark.parametrize("backend", backends)
def test_against_standard_computations(device, backend):
if backend.name == "torch_skcuda" and device == "cpu":
pytest.skip("The skcuda backend does not support CPU tensors.")
file_path = os.path.abspath(os.path.dirname(__file__))
data = torch.load(os.path.join(file_path, 'test_data_3d.pt'))
x = data['x'].to(device)
scattering_ref = data['Sx'].to(device)
J = data['J']
L = data['L']
integral_powers = data['integral_powers']
M = x.shape[1]
batch_size = x.shape[0]
N, O = M, M
sigma = 1
scattering = HarmonicScattering3D(J=J, shape=(M, N, O), L=L,
sigma_0=sigma, method='integral',
integral_powers=integral_powers, max_order=2, backend=backend, frontend='torch').to(device)
order_0 = backend.compute_integrals(x, integral_powers)
scattering.max_order = 2
scattering.method = 'integral'
scattering.integral_powers = integral_powers
orders_1_and_2 = scattering(x)
# WARNING: These are hard-coded values for the setting J = 2.
n_order_1 = 3
n_order_2 = 3
# Extract orders and make order axis the slowest in accordance with
# the stored reference scattering transform.
order_1 = orders_1_and_2[:,0:n_order_1,...]
order_2 = orders_1_and_2[:,n_order_1:n_order_1+n_order_2,...]
# Permute the axes since reference has (batch index, integral power, j,
# ell) while the computed transform has (batch index, j, ell, integral
# power).
order_1 = order_1.permute(0, 3, 1, 2)
order_2 = order_2.permute(0, 3, 1, 2)
order_1 = order_1.reshape((batch_size, -1))
order_2 = order_2.reshape((batch_size, -1))
orders_1_and_2 = torch.cat((order_1, order_2), 1)
order_0 = order_0.cpu().numpy().reshape((batch_size, -1))
start = 0
end = order_0.shape[1]
order_0_ref = scattering_ref[:,start:end].cpu().numpy()
orders_1_and_2 = orders_1_and_2.cpu().numpy().reshape((batch_size, -1))
start = end
end += orders_1_and_2.shape[1]
orders_1_and_2_ref = scattering_ref[:, start:end].cpu().numpy()
order_0_diff_cpu = relative_difference(order_0_ref, order_0)
orders_1_and_2_diff_cpu = relative_difference(
orders_1_and_2_ref, orders_1_and_2)
assert order_0_diff_cpu < 1e-6, "CPU : order 0 do not match, diff={}".format(order_0_diff_cpu)
assert orders_1_and_2_diff_cpu < 1e-6, "CPU : orders 1 and 2 do not match, diff={}".format(orders_1_and_2_diff_cpu)
@pytest.mark.parametrize("device", devices)
@pytest.mark.parametrize("backend", backends)
def test_solid_harmonic_scattering(device, backend):
if backend.name == "torch_skcuda" and device == "cpu":
pytest.skip("The skcuda backend does not support CPU tensors.")
# Compare value to analytical formula in the case of a single Gaussian
centers = np.zeros((1, 1, 3))
weights = np.ones((1, 1))
sigma_gaussian = 3.
sigma_0_wavelet = 3.
M, N, O, J, L = 128, 128, 128, 1, 3
grid = np.fft.ifftshift(np.mgrid[-M//2:-M//2+M, -N//2:-N//2+N, -O//2:-O//2+O].astype('float32'), axes=(1,2,3))
x = torch.from_numpy(generate_weighted_sum_of_gaussians(grid, centers,
weights, sigma_gaussian)).to(device).float()
scattering = HarmonicScattering3D(J=J, shape=(M, N, O), L=L,
sigma_0=sigma_0_wavelet,max_order=1, method='integral',
integral_powers=[1], frontend='torch',backend=backend).to(device)
scattering.max_order = 1
scattering.method = 'integral'
scattering.integral_powers = [1]
s = scattering(x)
for j in range(J+1):
sigma_wavelet = sigma_0_wavelet*2**j
k = sigma_wavelet / np.sqrt(sigma_wavelet**2 + sigma_gaussian**2)
for l in range(1, L+1):
err = torch.abs(s[0, j, l, 0] - k ** l).sum()/(1e-6+s[0, j, l, 0].abs().sum())
assert err<1e-4
@pytest.mark.parametrize("device", devices)
@pytest.mark.parametrize("backend", backends)
def test_larger_scales(device, backend):
if backend.name == "torch_skcuda" and device == "cpu":
pytest.skip("The skcuda backend does not support CPU tensors.")
shape = (32, 32, 32)
L = 3
sigma_0 = 1
x = torch.randn((1,) + shape).to(device)
for J in range(3, 4+1):
scattering = HarmonicScattering3D(J=J, shape=shape, L=L, sigma_0=sigma_0, frontend='torch', backend=backend).to(device)
scattering.method = 'integral'
Sx = scattering(x)
@pytest.mark.parametrize("device", devices)
@pytest.mark.parametrize("backend", backends)
def test_scattering_methods(device, backend):
if backend.name == "torch_skcuda" and device == "cpu":
pytest.skip("The skcuda backend does not support CPU tensors.")
shape = (32, 32, 32)
J = 4
L = 3
sigma_0 = 1
x = torch.randn((1,) + shape).to(device)
scattering = HarmonicScattering3D(J=J, shape=shape, L=L, sigma_0=sigma_0,
frontend='torch',backend=backend).to(device)
scattering.method = 'standard'
Sx = scattering(x)
scattering.rotation_covariant = False
Sx = scattering(x)
points = torch.zeros(1, 1, 3)
points[0,0,:] = torch.tensor(shape)/2
scattering.method = 'local'
scattering.points = points
Sx = scattering(x)
scattering.rotation_covariant = False
Sx = scattering(x)
| StarcoderdataPython |
1679685 | import traceback
from pycompss.api.task import task
from pycompss.api.constraint import constraint
from pycompss.api.multinode import multinode
from pycompss.api.parameter import FILE_IN, FILE_OUT
from biobb_common.tools import file_utils as fu
from biobb_pmx.pmx import mutate
import os
import sys
@constraint(computingUnits="1")
@task(input_structure_path=FILE_IN, output_structure_path=FILE_OUT,
on_failure="IGNORE")
def mutate_pc(input_structure_path, output_structure_path,
properties, **kwargs):
try:
os.environ.pop('PMI_FD', None)
os.environ.pop('PMI_JOBID', None)
os.environ.pop('PMI_RANK', None)
os.environ.pop('PMI_SIZE', None)
mutate.Mutate(input_structure_path=input_structure_path, output_structure_path=output_structure_path,
properties=properties, **kwargs).launch()
if not os.path.exists(output_structure_path):
fu.write_failed_output(output_structure_path)
except Exception:
traceback.print_exc()
fu.write_failed_output(output_structure_path)
finally:
sys.stdout.flush()
sys.stderr.flush()
| StarcoderdataPython |
82496 | <filename>Pacote Dowload/pythonProject/aula017.py
#num=[2,5,9,1]
#num[2]=3 #na posiçao 2 vai mudar de 9 para virar 3
#num.append(7) #estou adicionando o valor 7
#num.sort(reverse=True) #ordem reversa
#num.insert(2,0) #inseri um valor e reordena automaticamente o resto, na posição 2, vai inserir o valor 0
#num.pop(2) #vai tirar o valor que tem na posição 2
#num.insert(2,2) #na posição 2 vou adicionar o valor 2
#num.remove(2) #vai remover o primeiro 2
#if 4 in num:
# num.remove(4)
#else:
# print('Não achei o número 4')
#print(num)
#print(f'Essa lista tem {len(num)} elementos.')
valores=list()
for cont in range (0,5):
valores.append(int(input('Digite um valor: ')))
for c, v in enumerate (valores):
print(f'Na posição {c} encontrei o valor {v}!')
print('Chegei ao final da lista')
| StarcoderdataPython |
3525832 | import datetime
from tornado.web import RequestHandler
from tornado import gen
from bson.objectid import ObjectId
class Handler(RequestHandler):
@gen.coroutine
def get(self, _id):
"""比赛介绍"""
try:
contest = yield self._find_contest(_id)
if contest["password"]:
self._check_permission(_id)
except RuntimeError as err:
self.render("message.html", text=str(err))
else:
username = self._get_cookie_username()
self.render("contest/overview.html", contest=contest, now=datetime.datetime.now(), username=username)
def _check_permission(self, _id):
contest_id = self.get_secure_cookie("contest")
if not contest_id or _id != contest_id.decode():
raise RuntimeError("Please enter password")
@gen.coroutine
def _find_contest(self, _id):
contest = yield self.settings["database"]["contest"].find_one({
"_id": ObjectId(_id),
})
if not contest:
raise RuntimeError("No record")
return contest
def _get_cookie_username(self):
username = self.get_secure_cookie("username")
if not username:
return None
return username.decode()
| StarcoderdataPython |
357958 | import discord
from app.classes.bot import Bot
async def get_guild_leaderboard(bot: Bot, guild: discord.Guild) -> dict:
leaderboard = {}
top_users = await bot.db.fetch(
"""SELECT * FROM members
WHERE guild_id=$1
AND xp > 0
ORDER BY xp DESC
LIMIT 200""",
guild.id,
)
uids = [int(u["user_id"]) for u in top_users]
user_lookup = await bot.cache.get_members(uids, guild)
current_rank = 0
for u in top_users:
obj = user_lookup.get(int(u["user_id"]))
if not obj:
continue
if obj.bot:
continue
current_rank += 1
leaderboard[obj.id] = {
"name": str(obj),
"xp": u["xp"],
"level": u["level"],
"rank": current_rank,
}
return leaderboard
async def get_rank(bot: Bot, guild: discord.Guild, user_id: int) -> int:
lb = await get_guild_leaderboard(bot, guild)
u = lb.get(user_id)
return None if not u else u["rank"]
| StarcoderdataPython |
11244371 | from collections import defaultdict
def foodmenu(order, menu):
res = defaultdict(int)
queue = [menu[order]]
while queue:
menu_list = queue.pop(0)
while menu_list:
item = menu_list.pop(0)
# item.values()-- [1] or ["meat":...]
if type(item.values()[0]) == type(1):
res[item.keys()[0]] += item.values()[0]
else:
# sub component {water:100}
for each in item.values()[0]:
menu_list.append(each)
return res
def food(order, menu):
res = defaultdict(int)
queue = [menu[order]]
# [...]
while queue:
item_list = queue.pop(0)
while item_list:
item = item_list.pop(0)
if type(item.values()[0]) == type(1):
res[item.keys()[0]] += item.values()[0]
else:
for i in item.values()[0]:
menu_list.append(i)
menu = {"pizza":[{"floar":100},{"water":100},{"meat":[{"water":100}]}]}
print foodmenu("pizza", menu) | StarcoderdataPython |
11394730 | <reponame>xmyqsh/jittor
import setuptools
from setuptools import setup, find_packages
import os
path = os.path.dirname(__file__)
with open(path + "/README.src.md", "r") as fh:
long_description = fh.read()
setuptools.setup(
name='jittor',
version='1.0.0',
# scripts=[],
author="<NAME>",
author_email="<EMAIL>",
description="a Just-in-time(JIT) deep learning framework",
long_description=long_description,
long_description_content_type="text/markdown",
url="http://jittor.org",
# packages=setuptools.find_packages(),
python_requires='>=3.7',
packages=["jittor", "jittor.test", "jittor.models", "jittor.utils", "jittor_utils"],
package_dir={'':path+'/python'},
package_data={'': ['*', '*/*', '*/*/*','*/*/*/*','*/*/*/*/*','*/*/*/*/*/*']},
# include_package_data=True,
install_requires=[
"pybind11",
"numpy",
"tqdm",
"pillow",
"astunparse",
],
) | StarcoderdataPython |
4876863 | # Copied from https://www.kaggle.com/c/tgs-salt-identification-challenge/discussion/65938
# Full credit to <NAME> for the basic implementation of the original Focal Loss
# paper: https://arxiv.org/abs/1708.02002
import torch
import torch.nn as nn
from torch.nn import functional as F
class FocalLoss(nn.Module):
'''
Pixel-wise loss, down-weighing easy negative samples, e.g. for high background-foreground imbalance
Works with binary as well as probabilistic input
'''
def __init__(self, alpha=1, gamma=2, logits=False, reduce=True):
'''
Arguments:
alpha: Hyperparam; refer to paper
gamma: Hyperparam; refer to paper
logits: boolean: True -> expecting binary input else -> values between [0,1]
reduce: boolean: function same as reduction in torch.nn.BCEWithLogitsLoss | refer to torch.nn.functional.binary_cross_entropy_with_logits
'''
super(FocalLoss, self).__init__()
self.alpha = alpha
self.gamma = gamma
self.logits = logits
self.reduce = reduce
def forward(self, inputs, targets):
'''
Arguments:
inputs: refer to torch.nn.functional.binary_cross_entropy_with_logits
target: refer to torch.nn.functional.binary_cross_entropy_with_logits
return:
loss
'''
if self.logits:
BCE_loss = F.binary_cross_entropy_with_logits(inputs, targets, reduction='none')
else:
BCE_loss = F.binary_cross_entropy(inputs, targets, reduction='none')
pt = torch.exp(-BCE_loss) # -BCE_loss = log(pt)
F_loss = self.alpha * (1-pt)**self.gamma * BCE_loss
if self.reduce:
return torch.mean(F_loss)
else:
return F_loss
class ClassWiseFocalLoss(FocalLoss):
'''
See FocalLoss
+ Allows to apply different alpha and gamma for each class
alpha: class-class imbalance
gamma: class-background imbalance
'''
def __init__(self, alpha=[1, 1, 1], gamma=[2, 2, 2], logits=True, reduce=False):
'''
See FocalLoss
Expects gamma and alpha to be of same length
'''
super().__init__(alpha, gamma, logits, reduce)
def forward(self, inputs, targets):
'''
Expects targets and inputs to be structured like: batches x classes x X x Y
Arguments:
see FocalLoss
return:
loss
'''
if self.logits:
BCE_loss = F.binary_cross_entropy_with_logits(inputs, targets, reduction='none')
else:
BCE_loss = F.binary_cross_entropy(inputs, targets, reduction='none')
pt = torch.exp(-BCE_loss) # -BCE_loss = log(pt)
F_loss = torch.zeros_like(pt)
for i, (alpha, gamma) in enumerate(zip(self.alpha, self.gamma)):
F_loss[:, i, :, :] = alpha * (1-pt[:, i, :, :])**gamma * BCE_loss[:, i, :, :]
if self.reduce:
return torch.mean(F_loss)
else:
return F_loss
| StarcoderdataPython |
6660166 | #!/usr/bin/env python
##############################################################################
#
# diffpy.Structure by DANSE Diffraction group
# <NAME>
# (c) 2007 trustees of the Michigan State University.
# All rights reserved.
#
# File coded by: <NAME>
#
# See AUTHORS.txt for a list of people who contributed.
# See LICENSE_DANSE.txt for license information.
#
##############################################################################
"""Parser for basic CIF file format
http://www.iucr.org/iucr-top/cif/home.html
"""
import sys
import os
import re
import copy
import numpy
from diffpy.Structure import Structure, Lattice, Atom
from diffpy.Structure import StructureFormatError
from diffpy.Structure.Parsers import StructureParser
##############################################################################
# class P_cif
##############################################################################
class P_cif(StructureParser):
"""Simple parser for CIF structure format.
Reads Structure from the first block containing _atom_site_label key.
Following blocks, if any are ignored.
Data members:
format -- structure format name
ciffile -- instance of CifFile from PyCifRW
stru -- Structure instance used for cif input or output
Data members used for input only:
spacegroup -- instance of SpaceGroup used for symmetry expansion
eau -- instance of ExpandAsymmetricUnit from SymmetryUtilities
asymmetric_unit -- list of atom instances for the original asymmetric
unit in the CIF file
labelindex -- dictionary mapping unique atom label to index of atom
in self.asymmetric_unit
cif_sgname -- space group name obtained by looking up the value of
_space_group_name_Hall, _symmetry_space_group_name_Hall,
_space_group_name_H-M_alt, _symmetry_space_group_name_H-M
items. None when neither is defined.
"""
########################################################################
# static data and methods
########################################################################
# dictionary set of class methods for translating CIF values
# to Atom attributes
_atom_setters = dict.fromkeys((
'_tr_ignore',
'_tr_atom_site_label',
'_tr_atom_site_type_symbol',
'_tr_atom_site_fract_x',
'_tr_atom_site_fract_y',
'_tr_atom_site_fract_z',
'_tr_atom_site_cartn_x',
'_tr_atom_site_cartn_y',
'_tr_atom_site_cartn_z',
'_tr_atom_site_U_iso_or_equiv',
'_tr_atom_site_B_iso_or_equiv',
'_tr_atom_site_adp_type', '_tr_atom_site_thermal_displace_type',
'_tr_atom_site_occupancy',
'_tr_atom_site_aniso_U_11',
'_tr_atom_site_aniso_U_22',
'_tr_atom_site_aniso_U_33',
'_tr_atom_site_aniso_U_12',
'_tr_atom_site_aniso_U_13',
'_tr_atom_site_aniso_U_23',
'_tr_atom_site_aniso_B_11',
'_tr_atom_site_aniso_B_22',
'_tr_atom_site_aniso_B_33',
'_tr_atom_site_aniso_B_12',
'_tr_atom_site_aniso_B_13',
'_tr_atom_site_aniso_B_23',
))
BtoU = 1.0/(8 * numpy.pi**2)
def _tr_ignore(a, value):
return
_tr_ignore = staticmethod(_tr_ignore)
def _tr_atom_site_label(a, value):
a.label = value
# set element when not specified by _atom_site_type_symbol
if not a.element:
P_cif._tr_atom_site_type_symbol(a, value)
_tr_atom_site_label = staticmethod(_tr_atom_site_label)
# 3 regexp groups for nucleon number, atom symbol, and oxidation state
_psymb = re.compile(r'(\d+-)?([a-zA-Z]+)(\d[+-])?')
def _tr_atom_site_type_symbol(a, value):
rx = P_cif._psymb.match(value)
smbl = rx and rx.group(0) or value
a.element = smbl[:1].upper() + smbl[1:].lower()
_tr_atom_site_type_symbol = staticmethod(_tr_atom_site_type_symbol)
def _tr_atom_site_fract_x(a, value):
a.xyz[0] = leading_float(value)
_tr_atom_site_fract_x = staticmethod(_tr_atom_site_fract_x)
def _tr_atom_site_fract_y(a, value):
a.xyz[1] = leading_float(value)
_tr_atom_site_fract_y = staticmethod(_tr_atom_site_fract_y)
def _tr_atom_site_fract_z(a, value):
a.xyz[2] = leading_float(value)
_tr_atom_site_fract_z = staticmethod(_tr_atom_site_fract_z)
def _tr_atom_site_cartn_x(a, value):
a.xyz_cartn[0] = leading_float(value)
_tr_atom_site_cartn_x = staticmethod(_tr_atom_site_cartn_x)
def _tr_atom_site_cartn_y(a, value):
a.xyz_cartn[1] = leading_float(value)
_tr_atom_site_cartn_y = staticmethod(_tr_atom_site_cartn_y)
def _tr_atom_site_cartn_z(a, value):
a.xyz_cartn[2] = leading_float(value)
_tr_atom_site_cartn_z = staticmethod(_tr_atom_site_cartn_z)
def _tr_atom_site_U_iso_or_equiv(a, value):
a.Uisoequiv = leading_float(value)
_tr_atom_site_U_iso_or_equiv = staticmethod(_tr_atom_site_U_iso_or_equiv)
def _tr_atom_site_B_iso_or_equiv(a, value):
a.Uisoequiv = P_cif.BtoU * leading_float(value)
_tr_atom_site_B_iso_or_equiv = staticmethod(_tr_atom_site_B_iso_or_equiv)
def _tr_atom_site_adp_type(a, value):
a.anisotropy = value not in ("Uiso", "Biso")
_tr_atom_site_adp_type = staticmethod(_tr_atom_site_adp_type)
_tr_atom_site_thermal_displace_type = _tr_atom_site_adp_type
def _tr_atom_site_occupancy(a, value):
a.occupancy = leading_float(value)
_tr_atom_site_occupancy = staticmethod(_tr_atom_site_occupancy)
def _tr_atom_site_aniso_U_11(a, value):
a.U11 = leading_float(value)
_tr_atom_site_aniso_U_11 = staticmethod(_tr_atom_site_aniso_U_11)
def _tr_atom_site_aniso_U_22(a, value):
a.U22 = leading_float(value)
_tr_atom_site_aniso_U_22 = staticmethod(_tr_atom_site_aniso_U_22)
def _tr_atom_site_aniso_U_33(a, value):
a.U33 = leading_float(value)
_tr_atom_site_aniso_U_33 = staticmethod(_tr_atom_site_aniso_U_33)
def _tr_atom_site_aniso_U_12(a, value):
a.U12 = leading_float(value)
_tr_atom_site_aniso_U_12 = staticmethod(_tr_atom_site_aniso_U_12)
def _tr_atom_site_aniso_U_13(a, value):
a.U13 = leading_float(value)
_tr_atom_site_aniso_U_13 = staticmethod(_tr_atom_site_aniso_U_13)
def _tr_atom_site_aniso_U_23(a, value):
a.U23 = leading_float(value)
_tr_atom_site_aniso_U_23 = staticmethod(_tr_atom_site_aniso_U_23)
def _tr_atom_site_aniso_B_11(a, value):
a.U11 = P_cif.BtoU * leading_float(value)
_tr_atom_site_aniso_B_11 = staticmethod(_tr_atom_site_aniso_B_11)
def _tr_atom_site_aniso_B_22(a, value):
a.U22 = P_cif.BtoU * leading_float(value)
_tr_atom_site_aniso_B_22 = staticmethod(_tr_atom_site_aniso_B_22)
def _tr_atom_site_aniso_B_33(a, value):
a.U33 = P_cif.BtoU * leading_float(value)
_tr_atom_site_aniso_B_33 = staticmethod(_tr_atom_site_aniso_B_33)
def _tr_atom_site_aniso_B_12(a, value):
a.U12 = P_cif.BtoU * leading_float(value)
_tr_atom_site_aniso_B_12 = staticmethod(_tr_atom_site_aniso_B_12)
def _tr_atom_site_aniso_B_13(a, value):
a.U13 = P_cif.BtoU * leading_float(value)
_tr_atom_site_aniso_B_13 = staticmethod(_tr_atom_site_aniso_B_13)
def _tr_atom_site_aniso_B_23(a, value):
a.U23 = P_cif.BtoU * leading_float(value)
_tr_atom_site_aniso_B_23 = staticmethod(_tr_atom_site_aniso_B_23)
def _get_atom_setters(cifloop):
"""Find translators of CifLoop items to data in Atom instance.
Static method.
cifloop -- instance of CifLoop
Return a list of setter functions in the order of cifloop.keys().
"""
rv = []
for p in cifloop.keys():
fncname = "_tr" + p
if fncname in P_cif._atom_setters:
f = getattr(P_cif, fncname)
else:
f = P_cif._tr_ignore
rv.append(f)
return rv
_get_atom_setters = staticmethod(_get_atom_setters)
########################################################################
# normal methods
########################################################################
def __init__(self):
StructureParser.__init__(self)
self.format = "cif"
self.ciffile = None
self.stru = None
self.spacegroup = None
self.eau = None
self.asymmetric_unit = None
self.labelindex = {}
self.cif_sgname = None
pass
def parse(self, s):
"""Create Structure instance from a string in CIF format.
Return Structure instance or raise StructureFormatError.
"""
# CifFile seems to be only able to read from existing files
import tempfile
out, tmpfile = tempfile.mkstemp()
os.write(out, s)
os.close(out)
try:
rv = self.parseFile(tmpfile)
finally:
os.remove(tmpfile)
self.filename = None
return rv
def parseLines(self, lines):
"""Parse list of lines in CIF format.
lines -- list of strings stripped of line terminator
Return Structure instance or raise StructureFormatError.
"""
s = "\n".join(lines) + '\n'
return self.parse(s)
def parseFile(self, filename):
"""Create Structure from an existing CIF file.
filename -- path to structure file
Return Structure object.
Raise StructureFormatError or IOError.
"""
import CifFile
from StarFile import StarError
# CifFile fails when filename is a unicode string
if type(filename) is unicode:
filename = str(filename)
self.filename = filename
try:
fileurl = fixIfWindowsPath(filename)
self.ciffile = CifFile.CifFile(fileurl)
for blockname, ignore in self.ciffile.items():
self._parseCifBlock(blockname)
# stop after reading the first structure
if self.stru: break
except (StarError, ValueError, IndexError), err:
exc_type, exc_value, exc_traceback = sys.exc_info()
emsg = str(err).strip()
raise StructureFormatError, emsg, exc_traceback
# all good here
return self.stru
def _parseCifBlock(self, blockname):
"""Translate CIF file block, skip blocks without _atom_site_label.
Updates data members stru, eau.
blockname -- name of top level block in self.ciffile
No return value.
"""
block = self.ciffile[blockname]
if not block.has_key('_atom_site_label'): return
# here block contains structure, initialize output data
self.stru = Structure()
self.labelindex.clear()
# execute specialized block parsers
self._parse_lattice(block)
self._parse_atom_site_label(block)
self._parse_atom_site_aniso_label(block)
self._parse_space_group_symop_operation_xyz(block)
return
def _parse_lattice(self, block):
"""Obtain lattice parameters from a CifBlock.
This method updates self.stru.lattice.
block -- instance of CifBlock
No return value.
"""
if not block.has_key('_cell_length_a'): return
# obtain lattice parameters
try:
latpars = (
leading_float(block['_cell_length_a']),
leading_float(block['_cell_length_b']),
leading_float(block['_cell_length_c']),
leading_float(block['_cell_angle_alpha']),
leading_float(block['_cell_angle_beta']),
leading_float(block['_cell_angle_gamma']),
)
except KeyError, err:
exc_type, exc_value, exc_traceback = sys.exc_info()
emsg = str(err)
raise StructureFormatError, emsg, exc_traceback
self.stru.lattice = Lattice(*latpars)
return
def _parse_atom_site_label(self, block):
"""Obtain atoms in asymmetric unit from a CifBlock.
This method inserts Atom instances to self.stru and
updates labelindex dictionary.
block -- instance of CifBlock
No return value.
"""
# process _atom_site_label
atom_site_loop = block.GetLoop('_atom_site_label')
# get a list of setters for atom_site values
prop_setters = P_cif._get_atom_setters(atom_site_loop)
# index of the _atom_site_label item for the labelindex dictionary
ilb = atom_site_loop.keys().index('_atom_site_label')
# loop through the values and pass them to the setters
sitedatalist = zip(*atom_site_loop.values())
for values in sitedatalist:
curlabel = values[ilb]
self.labelindex[curlabel] = len(self.stru)
self.stru.addNewAtom()
a = self.stru.getLastAtom()
for fset, val in zip(prop_setters, values):
fset(a, val)
return
def _parse_atom_site_aniso_label(self, block):
"""Obtain value of anisotropic thermal displacements from a CifBlock.
This method updates U members of Atom instances in self.stru.
The labelindex dictionary has to be defined beforehand.
block -- instance of CifBlock
No return value.
"""
if not block.has_key('_atom_site_aniso_label'): return
# something to do here:
adp_loop = block.GetLoop('_atom_site_aniso_label')
# index of the _atom_site_label column
ilb = adp_loop.keys().index('_atom_site_aniso_label')
# get a list of setters for this loop
prop_setters = P_cif._get_atom_setters(adp_loop)
sitedatalist = zip(*adp_loop.values())
for values in sitedatalist:
idx = self.labelindex[values[ilb]]
a = self.stru[idx]
for fset, val in zip(prop_setters, values):
fset(a, val)
return
def _parse_space_group_symop_operation_xyz(self, block):
"""Process symmetry operations from a CifBlock. The method
updates spacegroup and eau data according to symmetry
operations defined in _space_group_symop_operation_xyz or
_symmetry_equiv_pos_as_xyz items in CifBlock.
block -- instance of CifBlock
No return value.
"""
from diffpy.Structure.SpaceGroups import IsSpaceGroupIdentifier
from diffpy.Structure.SpaceGroups import SpaceGroup, GetSpaceGroup
self.asymmetric_unit = list(self.stru)
sym_synonyms = ('_space_group_symop_operation_xyz',
'_symmetry_equiv_pos_as_xyz')
sym_loop_name = [n for n in sym_synonyms if block.has_key(n)]
# recover explicit list of symmetry operations
symop_list = []
if sym_loop_name:
# sym_loop exists here and we know its cif name
sym_loop_name = sym_loop_name[0]
sym_loop = block.GetLoop(sym_loop_name)
for eqxyz in sym_loop.GetLoopItem(sym_loop_name):
op = getSymOp(eqxyz)
symop_list.append(op)
# determine space group number
sg_nameHall = (block.get('_space_group_name_Hall', '') or
block.get('_symmetry_space_group_name_Hall', ''))
sg_nameHM = (block.get('_space_group_name_H-M_alt', '') or
block.get('_symmetry_space_group_name_H-M', ''))
self.cif_sgname = (sg_nameHall or sg_nameHM or None)
sgid = (int(block.get('_space_group_IT_number', '0')) or
int(block.get('_symmetry_Int_Tables_number', '0')) or
sg_nameHM)
# try to reuse existing space group
self.spacegroup = None
if sgid and IsSpaceGroupIdentifier(sgid):
sgstd = GetSpaceGroup(sgid)
oprep_std = [str(op) for op in sgstd.iter_symops()]
oprep_std.sort()
oprep_cif = [str(op) for op in symop_list]
oprep_cif.sort()
# make sure symmetry operations have the same order
if oprep_std == oprep_cif:
self.spacegroup = copy.copy(sgstd)
self.spacegroup.symop_list = symop_list
# use standard definition when symmetry operations were not listed
elif not symop_list:
self.spacegroup = sgstd
# define new spacegroup when symmetry operations were listed, but
# there is no match to an existing definition
if symop_list and self.spacegroup is None:
new_short_name = "CIF " + (sg_nameHall or 'data')
new_crystal_system = (
block.get('_space_group_crystal_system') or
block.get('_symmetry_cell_setting') or
'TRICLINIC' ).upper()
self.spacegroup = SpaceGroup(
short_name=new_short_name,
crystal_system=new_crystal_system,
symop_list=symop_list)
self._expandAsymmetricUnit()
return
def _expandAsymmetricUnit(self):
"""Perform symmetry expansion of self.stru using self.spacegroup.
This method updates data in stru and eau.
No return value.
"""
from diffpy.Structure.SymmetryUtilities import ExpandAsymmetricUnit
# get reverse-ordered unique indices
corepos = [a.xyz for a in self.stru]
coreUijs = [a.U for a in self.stru]
self.eau = ExpandAsymmetricUnit(self.spacegroup, corepos, coreUijs)
# build a nested list of new atoms:
newatoms = []
for i, ca in enumerate(self.stru):
eca = [] # expanded core atom
for j in range(self.eau.multiplicity[i]):
a = Atom(ca)
a.xyz = self.eau.expandedpos[i][j]
if j > 0:
a.label += '_' + str(j + 1)
if a.anisotropy:
a.U = self.eau.expandedUijs[i][j]
eca.append(a)
newatoms.append(eca)
# insert new atoms where they belong
self.stru[:] = sum(newatoms, [])
return
########################################################################
# conversion to CIF
########################################################################
def toLines(self, stru):
"""Convert Structure stru to a list of lines in basic CIF format.
Return list of strings.
"""
import time
lines = []
# may be replaced with filtered Structure.title
# for now, we can add the title as a comment
if stru.title.strip() != "":
title_lines = stru.title.split('\n')
lines.extend([ "# " + line.strip() for line in title_lines ])
lines.append("")
lines.append("data_3D")
iso_date = "%04i-%02i-%02i" % time.gmtime()[:3]
lines.extend([
"%-31s %s" % ("_audit_creation_date", iso_date),
"%-31s %s" % ("_audit_creation_method", "P_cif.py"),
"",
"%-31s %s" % ("_symmetry_space_group_name_H-M", "'P1'"),
"%-31s %s" % ("_symmetry_Int_Tables_number", "1"),
"%-31s %s" % ("_symmetry_cell_setting", "triclinic"),
"" ])
# there should be no need to specify equivalent positions for P1
# _symmetry_equiv_posi_as_xyz x,y,z
lines.extend([
"%-31s %.6g" % ("_cell_length_a", stru.lattice.a),
"%-31s %.6g" % ("_cell_length_b", stru.lattice.b),
"%-31s %.6g" % ("_cell_length_c", stru.lattice.c),
"%-31s %.6g" % ("_cell_angle_alpha", stru.lattice.alpha),
"%-31s %.6g" % ("_cell_angle_beta", stru.lattice.beta),
"%-31s %.6g" % ("_cell_angle_gamma", stru.lattice.gamma),
"" ])
# build a list of site labels and adp (displacement factor) types
element_count = {}
a_site_label = []
a_adp_type = []
for a in stru:
cnt = element_count[a.element] = element_count.get(a.element,0)+1
a_site_label.append( "%s%i" % (a.element, cnt) )
if numpy.all(a.U == a.U[0,0]*numpy.identity(3)):
a_adp_type.append("Uiso")
else:
a_adp_type.append("Uani")
# list all atoms
lines.extend([
"loop_",
" _atom_site_label",
" _atom_site_type_symbol",
" _atom_site_fract_x",
" _atom_site_fract_y",
" _atom_site_fract_z",
" _atom_site_U_iso_or_equiv",
" _atom_site_adp_type",
" _atom_site_occupancy" ])
for i in range(len(stru)):
a = stru[i]
line = " %-5s %-3s %11.6f %11.6f %11.6f %11.6f %-5s %.4f" % (
a_site_label[i], a.element, a.xyz[0], a.xyz[1], a.xyz[2],
a.Uisoequiv, a_adp_type[i], a.occupancy )
lines.append(line)
# find anisotropic atoms
idx_aniso = [ i for i in range(len(stru)) if a_adp_type[i] != "Uiso" ]
if idx_aniso != []:
lines.extend([
"loop_",
" _atom_site_aniso_label",
" _atom_site_aniso_U_11",
" _atom_site_aniso_U_22",
" _atom_site_aniso_U_33",
" _atom_site_aniso_U_12",
" _atom_site_aniso_U_13",
" _atom_site_aniso_U_23" ])
for i in idx_aniso:
a = stru[i]
line = " %-5s %9.6f %9.6f %9.6f %9.6f %9.6f %9.6f" % (
a_site_label[i], a.U[0,0], a.U[1,1], a.U[2,2],
a.U[0,1], a.U[0,2], a.U[1,2] )
lines.append(line)
return lines
# End of toLines
# End of class P_cif
##############################################################################
# Routines
##############################################################################
# constant regular expression for leading_float()
rx_float = re.compile(r'[-+]?(\d+(\.\d*)?|\.\d+)([eE][-+]?\d+)?')
def leading_float(s):
"""Obtain first float from a string and ignore any trailing characters.
Useful for extracting values from "value(std)" syntax.
Return float.
"""
sbare = s.strip()
mx = rx_float.match(sbare)
if mx:
rv = float(mx.group())
elif sbare == '.':
# ICSD CIF files may contain "." for unknown Uiso
rv = 0.0
else:
rv = float(sbare)
return rv
# helper dictionary for getSymOp()
symvec = {
'x' : numpy.array([1, 0, 0], dtype=float),
'y' : numpy.array([0, 1, 0], dtype=float),
'z' : numpy.array([0, 0, 1], dtype=float),
'-x' : numpy.array([-1, 0, 0], dtype=float),
'-y' : numpy.array([0, -1, 0], dtype=float),
'-z' : numpy.array([0, 0, -1], dtype=float),
}
symvec['+x'] = symvec['x']
symvec['+y'] = symvec['y']
symvec['+z'] = symvec['z']
def getSymOp(s):
"""Create SpaceGroups.SymOp instance from a string.
s -- formula for equivalent coordinates, for example 'x,1/2-y,1/2+z'
Return instance of SymOp.
"""
from diffpy.Structure.SpaceGroups import SymOp
snoblanks = s.replace(' ','')
eqlist = snoblanks.split(',')
R = numpy.zeros((3,3), dtype=float)
t = numpy.zeros(3, dtype=float)
for i in (0, 1, 2):
eqparts = re.split('(?i)([+-]?[xyz])', eqlist[i])
for Rpart in eqparts[1::2]:
R[i,:] += symvec[Rpart.lower()]
for tpart in eqparts[::2]:
t[i] += eval('1.0*%s+0' % tpart)
t -= numpy.floor(t)
rv = SymOp(R, t)
return rv
def fixIfWindowsPath(filename):
"""Convert Windows-style path to valid local URL.
CifFile loads files using urlopen, which fails for Windows-style paths.
filename -- path to be fixed
Return fixed URL when run on Windows, otherwise return filename.
"""
fixedname = filename
if os.name == "nt" and re.match(r'^[a-z]:\\', filename, re.I):
import urllib
fixedname = urllib.pathname2url(filename)
return fixedname
def getParser():
return P_cif()
# End of file
| StarcoderdataPython |
181200 | from tuiuiu.tuiuiusearch.views.frontend import search # noqa
| StarcoderdataPython |
6484541 | <reponame>alanStocco/leetcode-solutions
class Solution(object):
def dfs(self,i,j,matrix,explored,prev):
rows, cols = len(matrix),len(matrix[0])
if i < 0 or i >= rows or j < 0 or j >= cols or (i,j) in explored or matrix[i][j] < prev:
return
explored.add((i,j))
self.dfs(i-1,j,matrix,explored,matrix[i][j]) #up
self.dfs(i+1,j,matrix,explored,matrix[i][j]) #down
self.dfs(i,j-1,matrix,explored,matrix[i][j]) #left
self.dfs(i,j+1,matrix,explored,matrix[i][j]) #right
def pacificAtlantic(self, matrix):
if not matrix: return []
pacific,atlantic = set(),set()
rows, cols = len(matrix),len(matrix[0])
for i in range(cols):
self.dfs(0,i,matrix,pacific,-1)
self.dfs(rows-1,i,matrix,atlantic,-1)
for i in range(rows):
self.dfs(i,0,matrix,pacific,-1)
self.dfs(i,cols-1,matrix,atlantic,-1)
return list(pacific&atlantic)
sol = Solution()
matrix = [[1,2,2,3,5],[3,2,3,4,4],[2,4,5,3,1],[6,7,1,4,5],[5,1,1,2,4]]
matrix =[]
matrix = [[1]]
print(sol.pacificAtlantic(matrix)) | StarcoderdataPython |
4912377 | import numpy as np
import rospy
import time
from openai_ros import robot_gazebo_env
from std_msgs.msg import Float64
from sensor_msgs.msg import JointState
from sensor_msgs.msg import Image
from sensor_msgs.msg import LaserScan
from sensor_msgs.msg import PointCloud2
from nav_msgs.msg import Odometry
from geometry_msgs.msg import Twist
from openai_ros.openai_ros_common import ROSLauncher
from gazebo_msgs.msg import ModelStates
from cv_bridge import CvBridge, CvBridgeError
import cv2
from sensor_msgs.msg import Image
class TurtleBot2catchEnv(robot_gazebo_env.RobotGazeboEnv):
"""Superclass for all CubeSingleDisk environments.
"""
def __init__(self, ros_ws_abspath):
self.bridge = CvBridge()
rospy.logdebug("Start TurtleBot2catchEnv INIT...")
# Variables that we give through the constructor.
# None in this case
# We launch the ROSlaunch that spawns the robot into the world
#ROSLauncher(rospackage_name="dql_robot",
# launch_file_name="old_not_mine/put_robots_in_world.launch",
# ros_ws_abspath=ros_ws_abspath)
ROSLauncher(rospackage_name="dql_robot",
launch_file_name="put_line_follower_car_in_world.launch",
ros_ws_abspath=ros_ws_abspath)
ROSLauncher(rospackage_name="dql_robot",
launch_file_name="put_object_disposer_robot_in_world.launch",
ros_ws_abspath=ros_ws_abspath)
# Internal Vars
# Doesnt have any accesibles
self.controllers_list = []
# It doesnt use namespace
self.robot_name_space = ""
# We launch the init function of the Parent Class robot_gazebo_env.RobotGazeboEnv
super(TurtleBot2catchEnv, self).__init__(controllers_list=self.controllers_list,
robot_name_space=self.robot_name_space,
reset_controls=False,
start_init_physics_parameters=False,
reset_world_or_sim="WORLD")
self.gazebo.unpauseSim()
#self.controllers_object.reset_controllers()
self._check_all_sensors_ready()
# We Start all the ROS related Subscribers and publishers
#subscribe to front camera of line follower robot
rospy.Subscriber("/line_follower_car/front_camera/image_raw",Image,self._camera_rgb_image_raw_callback_line_follower_car)
#subscribe to front camera of object disposer robot
rospy.Subscriber("/object_disposer_robot/front_camera/image_raw",Image, self._camera_rgb_image_raw_callback_object_disposer_car)
#publuish speed to line follower robot
self._cmd_vel_pub_line_follower_car=rospy.Publisher('/line_follower_car/cmd_vel_car',
Twist, queue_size=1)
self.twist=Twist()
#publish speed to object_disposer car
self._cmd_vel_pub_object_disposer_robot=rospy.Publisher('/object_disposer_robot/cmd_vel_car',
Twist, queue_size=1)
rospy.Subscriber("/gazebo/model_states", ModelStates ,self._model_state_callback)
self._check_publishers_connection()
self.gazebo.pauseSim()
rospy.logdebug("Finished TurtleBot2Env INIT...")
# Methods needed by the RobotGazeboEnv
# ----------------------------
def _model_state_callback(self,msg):
models = msg.name
#predator_idx = models.index('predator')
#prey_idx = models.index('prey')
object_disposer_robot_idx=models.index('object_disposer_robot')
line_follower_car_idx=models.index('line_follower_car')
object_box_idx=models.index('object_box')
#self.predator_position = [msg.pose[predator_idx].position.x, msg.pose[predator_idx].position.y]
#self.prey_position = [msg.pose[prey_idx].position.x, msg.pose[prey_idx].position.y]
self.line_follower_car_position = [msg.pose[line_follower_car_idx].position.x, msg.pose[line_follower_car_idx].position.y]
self.object_disposer_robot_position=[msg.pose[object_disposer_robot_idx].position.x, msg.pose[object_disposer_robot_idx].position.y]
self.object_box_position=[msg.pose[object_box_idx].position.x, msg.pose[object_box_idx].position.y]
self.object_box_orientation=[msg.pose[object_box_idx].orientation.x, msg.pose[object_box_idx].orientation.y]
def _check_all_systems_ready(self):
"""
Checks that all the sensors, publishers and other simulation systems are
operational.
"""
self._check_all_sensors_ready()
return True
# CubeSingleDiskEnv virtual methods
# ----------------------------
def _check_all_sensors_ready(self):
rospy.logdebug("START ALL SENSORS READY")
rospy.logdebug("ALL SENSORS READY")
#def _front_camera_rgb_image_raw_callback_prey(self, data):
# self.front_camera_rgb_image_raw_prey = self.bridge.imgmsg_to_cv2(data,"rgb8")
#def _back_camera_rgb_image_raw_callback_prey(self, data):
# self.back_camera_rgb_image_raw_prey = self.bridge.imgmsg_to_cv2(data,"rgb8")
#def _camera_rgb_image_raw_callback_predator(self, data):
# self.camera_rgb_image_raw_predator = self.bridge.imgmsg_to_cv2(data,"rgb8")
def _camera_rgb_image_raw_callback_object_disposer_car(self, data):
self.camera_rgb_image_raw_object_disposer_car = self.bridge.imgmsg_to_cv2(data,"rgb8")
def _camera_rgb_image_raw_callback_line_follower_car(self, data):
self.camera_rgb_image_raw_line_follower_car = self.bridge.imgmsg_to_cv2(data,"rgb8")
hsv = cv2.cvtColor(self.camera_rgb_image_raw_line_follower_car, cv2.COLOR_BGR2HSV)
lower_yellow = np.array([ 10, 10, 10])
upper_yellow = np.array([220, 245, 90])
mask = cv2.inRange(hsv, lower_yellow, upper_yellow)
h, w, d = self.camera_rgb_image_raw_line_follower_car.shape
search_top = 3*h/4
search_bot = 3*h/4 + 20
mask[0:search_top, 0:w] = 0
mask[search_bot:h, 0:w] = 0
M = cv2.moments(mask)
if M['m00'] > 0:
#print "i did it!"
cx = int(M['m10']/M['m00'])
cy = int(M['m01']/M['m00'])
cv2.circle(self.camera_rgb_image_raw_line_follower_car, (cx, cy), 20, (0,0,255), -1)
#The proportional controller is implemented in the following four lines which
#is reposible of linear scaling of an error to drive the control output.
err = cx - w/2
#self.twist.linear.x = 10.0
#self.twist.angular.z = -float(err) / 12
self.twist.linear.x = 0.0
self.twist.angular.z = 0.0
self._cmd_vel_pub_line_follower_car.publish(self.twist)
time.sleep(0.025)
#def _right_camera_rgb_image_raw_callback_predator(self, data):
# self.right_camera_rgb_image_raw_predator = self.bridge.imgmsg_to_cv2(data,"rgb8")
#def _LaserScan_callback_predator(self, data):
# self.LaserScan_predator = data.ranges
def _check_publishers_connection(self):
"""
Checks that all the publishers are working
:return:
"""
rate = rospy.Rate(10) # 10hz
#while (self._cmd_vel_pub_predator.get_num_connections() == 0 or self._cmd_vel_pub_prey.get_num_connections() == 0 ) and not rospy.is_shutdown():
while (self._cmd_vel_pub_object_disposer_robot.get_num_connections() == 0) and not rospy.is_shutdown():
rospy.logdebug("No susbribers to _cmd_vel_pub yet so we wait and try again")
try:
rate.sleep()
except rospy.ROSInterruptException:
# This is to avoid error when world is rested, time when backwards.
pass
rospy.logdebug("_cmd_vel_pub Publisher Connected")
rospy.logdebug("All Publishers READY")
# Methods that the TrainingEnvironment will need to define here as virtual
# because they will be used in RobotGazeboEnv GrandParentClass and defined in the
# TrainingEnvironment.
# ----------------------------
def _init_env_variables(self):
"""Inits variables needed to be initialised each time we reset at the start
of an episode.
"""
raise NotImplementedError()
def _compute_reward(self, observations, done):
"""Calculates the reward to give based on the observations given.
"""
raise NotImplementedError()
def _set_action(self, action):
"""Applies the given action to the simulation.
"""
raise NotImplementedError()
def _get_obs(self):
raise NotImplementedError()
def _is_done(self, observations):
"""Checks if episode done based on observations given.
"""
raise NotImplementedError()
# Methods that the TrainingEnvironment will need.
# ----------------------------
def move_base(self, linear_speed, angular_speed,sleep_time = 0.1, epsilon=0.05, update_rate=10, min_laser_distance=-1):
cmd_vel_value = Twist()
cmd_vel_value.linear.x = linear_speed
cmd_vel_value.angular.z = angular_speed
rospy.logdebug("TurtleBot2 Base Twist Cmd>>" + str(cmd_vel_value))
self._check_publishers_connection()
self._cmd_vel_pub_object_disposer_robot.publish(cmd_vel_value)
def get_camera_rgb_image_raw(self):
return self.camera_rgb_image_raw_object_disposer_car
# def get_prey_position(self):
# return self.prey_position
# def get_predator_position(self):
# return self.predator_position
def get_object_disposer_robot_position(self):
return self.object_disposer_robot_position
def get_line_follower_car_position(self):
return self.line_follower_car_position
def get_object_box_position(self):
return self.object_box_position
def get_object_box_orientation(self):
return self.object_box_orientation | StarcoderdataPython |
5012799 | <reponame>tjensen/fsuipc-airspaces<filename>setup.py
import setuptools
with open("README.md", "r") as fh:
long_description = fh.read()
setuptools.setup(
name="fsuipc-airspaces",
version="1.0.0",
author="<NAME>",
author_email="<EMAIL>",
description="FSUIPC client for reporting flight simulator aircraft positioning to Airspaces",
long_description=long_description,
long_description_content_type="text/markdown",
license="MIT",
url="https://github.com/tjensen/fsuipc-airspaces",
packages=setuptools.find_packages(exclude=["tests"]),
package_data={"fsuipc_airspaces": ["py.typed"]},
zip_safe=False, # Enables mypy to find the installed package
classifiers=[
"Development Status :: 5 - Production/Stable",
"Environment :: Console",
"Environment :: Win32 (MS Windows)",
"License :: OSI Approved :: MIT License",
"Operating System :: Microsoft :: Windows",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3.8",
"Topic :: Games/Entertainment :: Simulation"
],
python_requires=">=3.6",
install_requires=["fsuipc==1.1.0"],
entry_points={
"console_scripts": [
"fsuipc_airspaces = fsuipc_airspaces.fsuipc_airspaces:main"
]
}
)
| StarcoderdataPython |
8099226 | <reponame>CaioCavalcanti/hackerhank
#!/bin/python3
"""
A linked list is said to contain a cycle if any node is visited more than once while traversing the list.
Complete the function provided for you in your editor.
It has one parameter: a pointer to a Node object named head that points to the head of a linked list.
Your function must return a boolean denoting whether or not there is a cycle in the list.
If there is a cycle, return true; otherwise, return false.
Note: If the list is empty, head will be null.
Output:
If the list contains a cycle, your function must return true.
If the list does not contain a cycle, it must return false.
The binary integer corresponding to the boolean value returned by your function is printed to stdout by our hidden code checker.
"""
import math
import os
import random
import re
import sys
class SinglyLinkedListNode:
def __init__(self, node_data):
self.data = node_data
self.next = None
class SinglyLinkedList:
def __init__(self):
self.head = None
self.tail = None
def insert_node(self, node_data):
node = SinglyLinkedListNode(node_data)
if not self.head:
self.head = node
else:
self.tail.next = node
self.tail = node
def print_singly_linked_list(node, sep, fptr):
while node:
fptr.write(str(node.data))
node = node.next
if node:
fptr.write(sep)
def has_cycle(head):
"""
Floyd’s Cycle-Finding Algorithm implementation
"""
if not head:
return False
tortoise = head
hare = head
while hare and hare.next:
tortoise = tortoise.next
hare = hare.next.next
if tortoise == hare:
return True
return False
if __name__ == '__main__':
fptr = open(os.environ['OUTPUT_PATH'], 'w')
tests = int(input())
for tests_itr in range(tests):
index = int(input())
llist_count = int(input())
llist = SinglyLinkedList()
for _ in range(llist_count):
llist_item = int(input())
llist.insert_node(llist_item)
extra = SinglyLinkedListNode(-1)
temp = llist.head
for i in range(llist_count):
if i == index:
extra = temp
if i != llist_count - 1:
temp = temp.next
temp.next = extra
result = has_cycle(llist.head)
fptr.write(str(int(result)) + '\n')
fptr.close()
| StarcoderdataPython |
267102 | <gh_stars>0
import torch
import torch.nn.functional as F
from random import shuffle
import numpy as np
class CosineClusters():
def __init__(self, num_clusters=100, Euclidean=False):
self.clusters = [] # 储存各个集群
self.item_cluster = {} # 储存每个样本所属集群
self.Euclidean = Euclidean
# 初始化集群
for i in range(0, num_clusters):
self.clusters.append(Cluster(self.Euclidean))
def add_random_training_items(self, items):
'''随机分配样本给集群'''
cur_index = 0
for index, item in enumerate(items):
self.clusters[cur_index].add_to_cluster(item)
textid = item[0]
self.item_cluster[textid] = self.clusters[cur_index]
cur_index += 1
if cur_index >= len(self.clusters):
cur_index = 0
def add_items_to_best_cluster(self, items):
"""无监督聚类"""
added = 0
for item in items:
new = self.add_item_to_best_cluster(item)
if new:
added += 1
return added
def add_item_to_best_cluster(self, item):
best_cluster = None
best_fit = float("-inf")
previous_cluster = None
# 从当前集群中删除后再匹配
textid = item[0]
if textid in self.item_cluster:
previous_cluster = self.item_cluster[textid]
previous_cluster.remove_from_cluster(item)
for cluster in self.clusters:
fit = cluster.cosine_similary(item, Euclidean=self.Euclidean)
if fit > best_fit:
best_fit = fit
best_cluster = cluster
# 重新匹配后得添加到最佳的样本库中
best_cluster.add_to_cluster(item)
self.item_cluster[textid] = best_cluster
if best_cluster == previous_cluster:
return False
else:
return True
def get_items_cluster(self, item):
textid = item[0]
if textid in self.item_cluster:
return self.item_cluster[textid]
else:
return None
def get_centroids(self, number_per_cluster=1):
centroids = []
for cluster in self.clusters:
centroids.append(cluster.get_centroid(number_per_cluster))
return centroids
def get_outliers(self, number_per_cluster=1):
outliers = []
for cluster in self.clusters:
outliers.append(cluster.get_outlier(number_per_cluster))
return outliers
def get_randoms(self, number_per_cluster=1):
randoms = []
for cluster in self.clusters:
randoms.append(cluster.get_random_members(number_per_cluster))
return randoms
def shape(self):
lengths = []
for cluster in self.clusters:
lengths.append(cluster.size())
return str(lengths)
class Cluster():
def __init__(self, Euclidean = False):
self.members = {} # 该集群中样本ID
self.feature_vector = None # 该集群整体特征
self.distance = [] # 集群中的样本到该集群中心的距离
self.Euclidean = Euclidean
def add_to_cluster(self, item):
dataid = item[0]
data = item[1]
self.members[dataid] = item
try:
if self.feature_vector == None:
self.feature_vector = data
except:
self.feature_vector = self.feature_vector + data
def remove_from_cluster(self, item):
"""从集群中删除某一个元素"""
dataid = item[0]
data = item[1]
exists = self.members.pop(dataid, False)
if exists:
self.feature_vector = self.feature_vector - data
def cosine_similary(self, item, Euclidean=False):
'''计算某样本距离集群中心的余弦距离'''
data = item[1]
center_vec = self.feature_vector / len(list(self.members.keys()))
item_tensor = torch.FloatTensor(data)
center_tensor = torch.FloatTensor(center_vec)
if Euclidean:
# print('欧式距离',end='\r')
similarity = - np.sqrt(np.sum(np.square(data - center_vec)))
return similarity
else:
# print('余弦距离',end='\r')
similarity = F.cosine_similarity(item_tensor, center_tensor, 0)
return similarity.item() # item() converts tensor value to float
def size(self):
return len(self.members.keys())
def distance_sort(self):
self.distance = []
for textid in self.members.keys():
item = self.members[textid]
similarity = self.cosine_similary(item, Euclidean=self.Euclidean)
self.distance.append([similarity, item[0], item[1]])
self.distance.sort(reverse=True, key=lambda x: x[0])
return self.distance
def get_centroid(self, number=1):
if len(self.members) == 0:
return []
return self.distance_sort()[:number]
def get_outlier(self, number=1):
if len(self.members) == 0:
return []
return self.distance_sort()[-number:]
def get_random_members(self, number=1):
if len(self.members) == 0:
return []
_ = self.distance_sort()
randoms = []
for i in range(0, number):
randoms.append(_[np.random.randint(len(self.members))])
return randoms
if __name__ == '__main__':
from sklearn.datasets import make_blobs
n_samples = 1500
random_state = 170
X, y = make_blobs(n_samples=n_samples, random_state=random_state)
num_clusters = 4
max_epochs = 10
data = X
NEWdata = [[str(index), item] for index, item in enumerate(data)]
# shuffle(NEWdata)
# print(NEWdata)
# raise 'pass'
# shuffle(NEWdata)
cosine_clusters = CosineClusters(num_clusters, Euclidean=True)
cosine_clusters.add_random_training_items(NEWdata)
for index, cluster in enumerate(cosine_clusters.clusters):
print(cluster.feature_vector)
print(set(cosine_clusters.item_cluster.values()))
for i in range(0, max_epochs):
print("Epoch "+str(i))
added = cosine_clusters.add_items_to_best_cluster(NEWdata)
if added == 0:
break
# centroids_per = list(set(cosine_clusters.item_cluster.values()))
sample_y = [cosine_clusters.clusters.index(_) for _ in cosine_clusters.item_cluster.values()]
# print(sample_y)
centroids = cosine_clusters.get_centroids(2)
outliers = cosine_clusters.get_outliers(2)
randoms = cosine_clusters.get_randoms(2)
centroids + outliers + randoms
# print(set(cosine_clusters.item_cluster.values()))
# print(cosine_clusters.clusters)
for index, cluster in enumerate(cosine_clusters.clusters):
sample_sort = cluster.distance_sort()
# print('centroids:\t',centroids[index])
# print('outliers:\t',outliers[index])
# print('randoms:\t',randoms[index])
# assert sample_sort[0][1] == centroids[index][0]
# assert sample_sort[-1][1] == outliers[index][0]
D_id_color = [u'orchid', u'darkcyan', u'dodgerblue', u'turquoise', u'darkviolet']
import matplotlib.pyplot as plt
plt.figure(figsize=(18, 6))
plt.subplot(131)
plt.scatter(X[:, 0], X[:, 1])
plt.subplot(132)
for label in [*range(len(cosine_clusters.clusters))]:
indices = [i for i, l in enumerate(sample_y) if l == label]
current_tx = np.take(data[:, 0], indices)
current_ty = np.take(data[:, 1], indices)
color = D_id_color[label]
print(current_tx.shape)
plt.scatter(current_tx, current_ty, c=color, label=label)
plt.legend(loc='best')
plt.subplot(133)
plt.scatter(X[:, 0], X[:, 1], alpha=0.2, color='gray')
f2 = lambda x:[_[2] for _ in x]
for label in [*range(len(cosine_clusters.clusters))]:
color = D_id_color[label]
plt.scatter(np.array(f2(centroids[label]))[:,0], np.array(f2(centroids[label]))[:,1], c=color, label=f'{label} centroids')
plt.scatter(np.array(f2(outliers[label]))[:,0], np.array(f2(outliers[label]))[:,1], marker='*', c=color, label=f'{label} outliers')
plt.scatter(np.array(f2(randoms[label]))[:,0], np.array(f2(randoms[label]))[:,1], marker='^', c=color, label=f'{label} randoms')
# sample_sort = cluster.distance_sort()
# print('centroids:\t',centroids[index])
# print('outliers:\t',outliers[index])
# print('randoms:\t',randoms[index])
# for index, cluster in enumerate(cosine_clusters.clusters):
# for item in outliers[index]:
# plt.scatter(item[-1][0], current_ty, c=color, label=label)
plt.legend(loc='best')
plt.show() | StarcoderdataPython |
368314 | <reponame>lemonviv/SplitNNDemo<gh_stars>0
import matplotlib.pyplot as plt
def plot_train(train_losses, train_accuracies, test_accuracies):
"""
Args
:param train_losses: historical train loss for each epoch
:param train_accuracies: historical train accuracy for each epoch
:param test_accuracies: historical test accuracy for each epoch
:return:
"""
plt.figure(figsize=(18, 3))
plt.subplot(1, 3, 1)
plt.plot(range(len(train_losses)), train_losses, '-ro')
plt.title("Losses")
plt.subplot(1, 3, 2)
plt.plot(range(len(train_accuracies)), train_accuracies, '-ro')
plt.title("Training Accuracy")
plt.subplot(1, 3, 3)
plt.plot(range(len(test_accuracies)), test_accuracies, '-ro')
plt.title("Testing Accuracy")
plt.savefig("../tmp/bank-splitnn.png")
| StarcoderdataPython |
1774077 | <reponame>ArakniD/InvenTree
# Generated by Django 3.0.7 on 2020-08-20 04:54
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('company', '0023_auto_20200808_0715'),
('stock', '0048_auto_20200807_2344'),
]
operations = [
migrations.AlterField(
model_name='stockitem',
name='customer',
field=models.ForeignKey(blank=True, help_text='Customer', limit_choices_to={'is_customer': True}, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='assigned_stock', to='company.Company', verbose_name='Customer'),
),
]
| StarcoderdataPython |
6696727 | <gh_stars>10-100
"""Utilities for RGB images."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import cv2
import numpy as np
import scipy.misc
import scipy.signal
import PIL.Image
def transform(data, translation, theta):
"""Create a new image by translating and rotating the current image.
Args:
translation: The XY translation vector.
theta: Rotation angle in radians, with positive meaning
counter-clockwise.
Returns:
An image of the same type that has been rotated and translated.
"""
translation_map = np.float32([[1, 0, translation[1]],
[0, 1, translation[0]]])
translation_map_affine = np.r_[translation_map, [[0, 0, 1]]]
theta = np.rad2deg(theta)
rotation_map = cv2.getRotationMatrix2D(
(data.shape[1] / 2, data.shape[0] / 2), theta, 1)
rotation_map_affine = np.r_[rotation_map, [[0, 0, 1]]]
full_map = rotation_map_affine.dot(translation_map_affine)
full_map = full_map[:2, :]
transformed_data = cv2.warpAffine(
data, full_map, (data.shape[1], data.shape[0]),
flags=cv2.INTER_NEAREST)
return transformed_data.astype(data.dtype)
def crop(data, height, width, c0=None, c1=None):
"""Crop the image centered around c0, c1.
Args:
height: The height of the desired image.
width: The width of the desired image.
c0: The center height point at which to crop. If not specified, the
center of the image is used.
c1: The center width point at which to crop. If not specified, the
center of the image is used.
Returns:
A cropped Image of the same type.
"""
# compute crop center px
height = int(np.round(height))
width = int(np.round(width))
if c0 is None:
c0 = float(data.shape[0]) / 2
if c1 is None:
c1 = float(data.shape[1]) / 2
# crop using PIL
desired_start_row = int(np.floor(c0 - float(height) / 2))
desired_end_row = int(np.floor(c0 + float(height) / 2))
desired_start_col = int(np.floor(c1 - float(width) / 2))
desired_end_col = int(np.floor(c1 + float(width) / 2))
pil_image = PIL.Image.fromarray(data)
cropped_pil_image = pil_image.crop(
(desired_start_col,
desired_start_row,
desired_end_col,
desired_end_row)
)
crop_data = np.array(cropped_pil_image)
if crop_data.shape[0] != height or crop_data.shape[1] != width:
raise ValueError('Crop dims are incorrect.')
return crop_data.astype(data.dtype)
def inpaint(data, rescale_factor=1.0, window_size=3):
"""Fills in the zero pixels in the RGB image.
Parameters:
data: The raw image.
rescale_factor: Amount to rescale the image for inpainting, smaller
numbers increase speed.
window_size: Size of window to use for inpainting.
Returns:
new_data: The inpainted imaga.
"""
# Resize the image
resized_data = scipy.misc.imresize(data, rescale_factor, interp='nearest')
# Inpaint smaller image.
mask = 1 * (np.sum(resized_data, axis=2) == 0)
inpainted_data = cv2.inpaint(resized_data, mask.astype(np.uint8),
window_size, cv2.INPAINT_TELEA)
# Fill in zero pixels with inpainted and resized image.
filled_data = scipy.misc.imresize(inpainted_data, 1.0 / rescale_factor,
interp='bilinear')
new_data = np.copy(data)
new_data[data == 0] = filled_data[data == 0]
return new_data
| StarcoderdataPython |
6432118 | <reponame>Michaeljurado24/nengo
"""Helper functions for constructing nengo models that implement actions."""
import numpy as np
import nengo
def convolution(module, target_name, effect, n_neurons_cconv, synapse):
"""Implement an action_objects.Convolution.
Parameters
----------
module : spa.Module
The module that will own this convolution
target_name : string
The name of the object to send the convolution result to
effect : action_objects.Convolution
The details of the convolution to implement
n_neurons_cconv : int
Number of neurons in each product population
synapse : float (or nengo.Synapse)
The synapse to use for connections into and out of the convolution
Returns the created nengo.networks.CircularConvolution.
"""
source1 = effect.source1
source2 = effect.source2
target_module = module.spa.get_module(target_name)
target, target_vocab = module.spa.get_module_input(target_name)
s1_output, s1_vocab = module.spa.get_module_output(source1.name)
s2_output, s2_vocab = module.spa.get_module_output(source2.name)
with target_module:
cconv = nengo.networks.CircularConvolution(
n_neurons_cconv,
s1_vocab.dimensions,
invert_a=False,
invert_b=False,
label="cconv_%s" % str(effect),
)
with module.spa:
# compute the requested transform
t = s1_vocab.parse(str(effect.transform)).get_convolution_matrix()
# handle conversion between different Vocabularies
if target_vocab is not s1_vocab:
t = np.dot(s1_vocab.transform_to(target_vocab), t)
nengo.Connection(cconv.output, target, transform=t, synapse=synapse)
t1 = s1_vocab.parse(source1.transform.symbol).get_convolution_matrix()
if source1.inverted:
D = s1_vocab.dimensions
t1 = np.dot(t1, np.eye(D)[-np.arange(D)])
nengo.Connection(s1_output, cconv.input_a, transform=t1, synapse=synapse)
t2 = s2_vocab.parse(source2.transform.symbol).get_convolution_matrix()
if source2.inverted:
D = s2_vocab.dimensions
t2 = np.dot(t2, np.eye(D)[-np.arange(D)])
if s1_vocab is not s2_vocab:
t2 = np.dot(s2_vocab.transform_to(s1_vocab), t2)
nengo.Connection(s2_output, cconv.input_b, transform=t2, synapse=synapse)
return cconv
| StarcoderdataPython |
6627452 | # -*- coding: utf-8 -*-
__author__ = '<NAME>'
__copyright__ = 'MacSeNet'
import torch
import torch.nn as nn
from torch.autograd import Variable
class DFFN(nn.Module):
def __init__(self, N, l_dim):
"""
Constructing blocks for a two-layer FFN.
Args :
N : (int) Original dimensionallity of the input.
l_dim : (int) Dimensionallity of the latent variables.
"""
super(DFFN, self).__init__()
print('Constructing 2-FFN')
self._N = N
self._ldim = l_dim
self.activation_function = torch.nn.ReLU()
# Encoder
self.ih_matrix = nn.Linear(self._N, self._ldim, bias=True)
# Decoder
self.ho_matrix = nn.Linear(self._ldim, self._N, bias=True)
# Initialize the weights
self.initialize_ffn()
def initialize_ffn(self):
"""
Manual weight/bias initialization.
"""
# Matrices
nn.init.xavier_normal(self.ih_matrix.weight)
nn.init.xavier_normal(self.ho_matrix.weight)
print('Initialization of the FFN done...')
return None
def forward(self, input_x):
if torch.has_cudnn:
x = Variable(torch.from_numpy(input_x).cuda(), requires_grad=True)
else:
x = Variable(torch.from_numpy(input_x), requires_grad=True)
# Encoder
hl_rep = self.activation_function(self.ih_matrix(x))
# Decoder
y_out = self.activation_function(self.ho_matrix(hl_rep))
return y_out, x
class DNN(nn.Module):
def __init__(self, N, l_dim):
"""
Constructing blocks for a deep neural network
for MSS.
Args :
N : (int) Original dimensionallity of the input.
l_dim : (int) Dimensionallity of the latent variables.
"""
super(DNN, self).__init__()
print('Constructing a Deep Neural Network')
self._N = N
self._ldim = l_dim
self.activation_function = torch.nn.ReLU()
# Layers
self.ih_matrix = nn.Linear(self._N, self._ldim, bias=True)
self.hh_matrix = nn.Linear(self._ldim, self._ldim, bias=True)
self.hh_b_matrix = nn.Linear(self._ldim, self._ldim, bias=True)
self.ho_matrix = nn.Linear(self._ldim, self._N, bias=True)
# Initialize the weights
self.initialize_ffn()
def initialize_ffn(self):
"""
Manual weight/bias initialization.
"""
# Matrices
nn.init.xavier_normal(self.ih_matrix.weight)
nn.init.xavier_normal(self.hh_matrix.weight)
nn.init.xavier_normal(self.hh_b_matrix.weight)
nn.init.xavier_normal(self.ho_matrix.weight)
print('Initialization of the DNN done...')
return None
def forward(self, input_x):
if torch.has_cudnn:
x = Variable(torch.from_numpy(input_x).cuda(), requires_grad=True)
else:
x = Variable(torch.from_numpy(input_x), requires_grad=True)
hl_rep = self.activation_function(self.ih_matrix(x))
hl_rep = self.activation_function(self.hh_matrix(hl_rep))
hl_rep = self.activation_function(self.hh_b_matrix(hl_rep))
y_out = self.activation_function(self.ho_matrix(hl_rep))
return y_out, x
class FFN(nn.Module):
def __init__(self, N):
"""
Constructing blocks for a single layer FFN,
for pre-training.
Args :
N : (int) Original dimensionallity of the input.
"""
super(FFN, self).__init__()
print('Constructing FFN')
self._N = N
self.activation_function = torch.nn.ReLU()
# Single Layer
self.io_matrix = nn.Linear(self._N, self._N, bias=True)
# Initialize the weights
self.initialize_ffn()
def initialize_ffn(self):
"""
Manual weight/bias initialization.
"""
# Matrix
nn.init.xavier_normal(self.io_matrix.weight)
print('Initialization of the FFN done...')
return None
def forward(self, input_x):
if torch.has_cudnn:
x = Variable(torch.from_numpy(input_x).cuda(), requires_grad=True)
else:
x = Variable(torch.from_numpy(input_x), requires_grad=True)
return self.activation_function(self.io_matrix(x)), x
# EOF
| StarcoderdataPython |
8083491 | """Helper functions to use across tests."""
from django.http import JsonResponse
def mock_check_auth_success(request, needs_auth, func):
"""Mocks `helpers.check_auth`, for success."""
return func(request)
def mock_render_error_response(error_str):
"""Mocks `helpers.render_error_response`, for failure."""
return JsonResponse({"error": error_str}, status=400)
| StarcoderdataPython |
6605800 | <reponame>asergeenko/get_some_food
# Generated by Django 3.1 on 2021-11-30 22:10
import django.db.models.deletion
from django.conf import settings
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('foodlist', '0013_auto_20211201_0039'),
]
operations = [
migrations.CreateModel(
name='ProductList',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('creation_date', models.DateTimeField(auto_now_add=True)),
('owner', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
('shared_with',
models.ManyToManyField(blank=True, related_name='product_lists', to=settings.AUTH_USER_MODEL)),
],
),
migrations.CreateModel(
name='ProductItem',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('amount', models.FloatField(default=1.0)),
('product', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='foodlist.product')),
('product_list',
models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='foodlist.productlist')),
],
),
]
| StarcoderdataPython |
1781085 | <filename>accelerator/shell/job.py
############################################################################
# #
# Copyright (c) 2020 <NAME> #
# #
# Licensed under the Apache License, Version 2.0 (the "License"); #
# you may not use this file except in compliance with the License. #
# You may obtain a copy of the License at #
# #
# http://www.apache.org/licenses/LICENSE-2.0 #
# #
# Unless required by applicable law or agreed to in writing, software #
# distributed under the License is distributed on an "AS IS" BASIS, #
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. #
# See the License for the specific language governing permissions and #
# limitations under the License. #
# #
############################################################################
from __future__ import print_function
from __future__ import division
from __future__ import unicode_literals
from traceback import print_exc
from datetime import datetime
import errno
from accelerator.compat import ArgumentParser
from accelerator.setupfile import encode_setup
from accelerator.compat import FileNotFoundError
from accelerator.unixhttp import call
from .parser import name2job, JobNotFound
def show(url, job, show_output):
print(job.path)
print('=' * len(job.path))
setup = job.json_load('setup.json')
setup.pop('_typing', None)
setup.starttime = str(datetime.fromtimestamp(setup.starttime))
if 'endtime' in setup:
setup.endtime = str(datetime.fromtimestamp(setup.endtime))
print(encode_setup(setup, as_str=True))
try:
with job.open('datasets.txt') as fh:
print()
print('datasets:')
for line in fh:
print(' %s/%s' % (job, line[:-1],))
except IOError:
pass
try:
post = job.json_load('post.json')
except FileNotFoundError:
print('\x1b[31mWARNING: Job did not finish\x1b[m')
post = None
if post and post.subjobs:
print()
print('subjobs:')
for sj in sorted(post.subjobs):
print(' ', sj)
if post and post.files:
print()
print('files:')
for fn in sorted(post.files):
print(' ', job.filename(fn))
if post and not call(url + '/job_is_current/' + job):
print('\x1b[34mJob is not current\x1b[m')
print()
out = job.output()
if show_output:
if out:
print('output (use --just-output/-O to see only the output):')
print(out)
if not out.endswith('\n'):
print()
else:
print(job, 'produced no output')
print()
elif out:
print('%s produced %d bytes of output, use --output/-o to see it' % (job, len(out),))
print()
def main(argv, cfg):
descr = 'show setup.json, dataset list, etc for jobs'
parser = ArgumentParser(prog=argv.pop(0), description=descr)
group = parser.add_mutually_exclusive_group()
group.add_argument('-o', '--output', action='store_true', help='show job output')
group.add_argument('-O', '--just-output', action='store_true', help='show only job output')
group.add_argument('-P', '--just-path', action='store_true', help='show only job path')
parser.add_argument(
'jobid',
nargs='+', metavar='jobid/path/method',
help='method shows the latest (current) job with that method\n' +
'(i.e. the latest finished job with current source code)\n' +
'you can use spec~ or spec~N to go back N current jobs\n' +
'with that method or spec^ or spec^N to follow .previous'
)
args = parser.parse_intermixed_args(argv)
res = 0
for path in args.jobid:
try:
job = name2job(cfg, path)
if args.just_output:
out = job.output()
print(out, end='' if out.endswith('\n') else '\n')
elif args.just_path:
print(job.path)
else:
show(cfg.url, job, args.output)
except JobNotFound as e:
print(e)
res = 1
except Exception as e:
if isinstance(e, IOError) and e.errno == errno.EPIPE:
raise
print_exc()
print("Failed to show %r" % (path,))
res = 1
return res
| StarcoderdataPython |
5175798 | # -*- coding: utf-8 -*-
import scrapy
from locations.hours import OpeningHours
from locations.items import GeojsonPointItem
class STBankSpider(scrapy.Spider):
name = "stbank"
item_attributes = {"brand": "S&T Bank"}
allowed_domains = ["stbank.com"]
start_urls = ["https://www.stbank.com/page-data/locations/page-data.json"]
def parse(self, response):
for location in response.json()["result"]["pageContext"]["locationData"]:
yield self.parse_location(location)
def parse_location(self, location):
hours = OpeningHours()
for day, intervals in location["hours"].items():
if "isClosed" in intervals:
continue
for interval in intervals["openIntervals"]:
hours.add_range(
day[:2].capitalize(), interval["start"], interval["end"]
)
properties = {
"lat": location["geocodedCoordinate"]["latitude"],
"lon": location["geocodedCoordinate"]["longitude"],
"ref": location["locationUri"],
"name": location["meta"]["id"],
"addr_full": location["address"]["line1"],
"city": location["address"]["city"],
"state": location["address"]["region"],
"postcode": location["address"]["postalCode"],
"country": location["address"]["countryCode"],
"website": "https://www.stbank.com" + location["locationUri"],
"phone": location["mainPhone"],
"extras": {"fax": location.get("fax")},
"opening_hours": hours.as_opening_hours(),
}
return GeojsonPointItem(**properties)
| StarcoderdataPython |
1626001 | """Defines the Data Classes used."""
import datetime
from datetime import datetime as dt
# pylint: disable=too-many-instance-attributes, too-many-public-methods
class StationData:
"""A representation of all data available for a specific Station ID."""
def __init__(self, data):
self._air_density = data["air_density"]
self._air_temperature = data["air_temperature"]
self._brightness = data["brightness"]
self._dew_point = data["dew_point"]
self._feels_like = data["feels_like"]
self._heat_index = data["heat_index"]
self._lightning_strike_last_time = data["lightning_strike_last_time"]
self._lightning_strike_last_distance = data["lightning_strike_last_distance"]
self._lightning_strike_count = data["lightning_strike_count"]
self._lightning_strike_count_last_1hr = data["lightning_strike_count_last_1hr"]
self._lightning_strike_count_last_3hr = data["lightning_strike_count_last_3hr"]
self._precip_accum_last_1hr = data["precip_accum_last_1hr"]
self._precip_accum_local_day = data["precip_accum_local_day"]
self._precip_accum_local_yesterday = data["precip_accum_local_yesterday"]
self._precip_rate = data["precip_rate"]
self._precip_minutes_local_day = data["precip_minutes_local_day"]
self._precip_minutes_local_yesterday = data["precip_minutes_local_yesterday"]
self._pressure_trend = data["pressure_trend"]
self._relative_humidity = data["relative_humidity"]
self._solar_radiation = data["solar_radiation"]
self._station_pressure = data["station_pressure"]
self._sea_level_pressure = data["sea_level_pressure"]
self._station_name = data["station_name"]
self._timestamp = data["timestamp"]
self._uv = data["uv"]
self._wind_avg = data["wind_avg"]
self._wind_bearing = data["wind_bearing"]
self._wind_chill = data["wind_chill"]
self._wind_gust = data["wind_gust"]
@property
def air_density(self) -> float:
"""Return Air Density."""
return self._air_density
@property
def air_temperature(self) -> float:
"""Return Outside Temperature."""
return self._air_temperature
@property
def brightness(self) -> int:
"""Return Brightness in Lux."""
return self._brightness
@property
def dew_point(self) -> float:
"""Return Outside Dewpoint."""
return self._dew_point
@property
def feels_like(self) -> float:
"""Return Outside Feels Like Temp."""
return self._feels_like
@property
def freezing(self) -> bool:
"""Return True if Freezing Outside."""
if self.air_temperature < 0:
return True
return False
@property
def heat_index(self) -> float:
"""Return Outside Heat Index."""
return self._heat_index
@property
def lightning(self) -> bool:
"""Return True if it is Lightning."""
if self.lightning_strike_count > 0:
return True
return False
@property
def lightning_strike_last_time(self) -> datetime:
"""Return the date and time of last strike."""
return self._lightning_strike_last_time
@property
def lightning_strike_last_distance(self) -> int:
"""Return the distance away of last strike."""
return self._lightning_strike_last_distance
@property
def lightning_strike_count(self) -> int:
"""Return the daily strike count."""
return self._lightning_strike_count
@property
def lightning_strike_count_last_1hr(self) -> int:
"""Return the strike count last 1hr."""
return self._lightning_strike_count_last_1hr
@property
def lightning_strike_count_last_3hr(self) -> int:
"""Return the strike count last 3hr."""
return self._lightning_strike_count_last_3hr
@property
def precip_accum_last_1hr(self) -> float:
"""Return Precipition for the Last Hour."""
return self._precip_accum_last_1hr
@property
def precip_accum_local_day(self) -> float:
"""Return Precipition for the Day."""
return self._precip_accum_local_day
@property
def precip_accum_local_yesterday(self) -> float:
"""Return Precipition for Yesterday."""
return self._precip_accum_local_yesterday
@property
def precip_rate(self) -> float:
"""Return current precipitaion rate."""
return self._precip_rate
@property
def precip_minutes_local_day(self) -> int:
"""Return Precipition Minutes Today."""
return self._precip_minutes_local_day
@property
def precip_minutes_local_yesterday(self) -> int:
"""Return Precipition Minutes Yesterday."""
return self._precip_minutes_local_yesterday
@property
def pressure_trend(self) -> int:
"""Return the Pressure Trend."""
return self._pressure_trend
@property
def relative_humidity(self) -> int:
"""Return relative Humidity."""
return self._relative_humidity
@property
def raining(self) -> bool:
"""Return True if it is raining."""
if self.precip_rate > 0:
return True
return False
@property
def solar_radiation(self) -> int:
"""Return Solar Radiation."""
return self._solar_radiation
@property
def station_pressure(self) -> float:
"""Return Station Pressure."""
return self._station_pressure
@property
def sea_level_pressure(self) -> float:
"""Return Sea Level Pressure."""
return self._sea_level_pressure
@property
def timestamp(self) -> str:
"""Return Data Timestamp."""
return self._timestamp
@property
def station_name(self) -> str:
"""Return Station Name."""
return self._station_name
@property
def uv(self) -> float:
"""Return UV Index."""
return self._uv
@property
def wind_avg(self) -> float:
"""Return Wind Speed Average."""
return self._wind_avg
@property
def wind_bearing(self) -> int:
"""Return Wind Bearing as Degree."""
return self._wind_bearing
@property
def wind_chill(self) -> float:
"""Return Wind Chill."""
return self._wind_chill
@property
def wind_gust(self) -> float:
"""Return Wind Gust Speed."""
return self._wind_gust
@property
def wind_direction(self) -> str:
"""Return Wind Direction Symbol."""
direction_array = [
"N",
"NNE",
"NE",
"ENE",
"E",
"ESE",
"SE",
"SSE",
"S",
"SSW",
"SW",
"WSW",
"W",
"WNW",
"NW",
"NNW",
"N",
]
direction = direction_array[int((self._wind_bearing + 11.25) / 22.5)]
return direction
class ForecastDataDaily:
"""A representation of Day Based Forecast Weather Data."""
def __init__(self, data):
self._timestamp = data["timestamp"]
self._epochtime = data["epochtime"]
self._conditions = data["conditions"]
self._icon = data["icon"]
self._sunrise = data["sunrise"]
self._sunset = data["sunset"]
self._temp_high = data["air_temp_high"]
self._temp_low = data["air_temp_low"]
self._precip = data["precip"]
self._precip_probability = data["precip_probability"]
self._precip_icon = data["precip_icon"]
self._precip_type = data["precip_type"]
self._wind_avg = data["wind_avg"]
self._wind_bearing = data["wind_bearing"]
self._current_condition = data["current_condition"]
self._current_icon = data["current_icon"]
self._temp_high_today = data["temp_high_today"]
self._temp_low_today = data["temp_low_today"]
@property
def timestamp(self) -> dt:
"""Forecast DateTime."""
return self._timestamp.isoformat()
@property
def epochtime(self):
"""Forecast Epoch Time."""
return self._epochtime
@property
def conditions(self) -> str:
"""Return condition text ."""
return self._conditions
@property
def icon(self) -> str:
"""Condition Icon."""
if self._icon.find("cc-") > -1:
icon = self._icon[3:]
else:
icon = self._icon
return icon
@property
def sunrise(self) -> dt:
"""Return Sunrise Time for Location ."""
return self._sunrise.isoformat()
@property
def sunset(self) -> dt:
"""Return Sunset Time for Location ."""
return self._sunset.isoformat()
@property
def temp_high(self) -> float:
"""Return High temperature."""
return self._temp_high
@property
def temp_low(self) -> float:
"""Return Low temperature."""
return self._temp_low
@property
def precip(self) -> float:
"""Return Precipitation."""
return self._precip
@property
def precip_probability(self) -> int:
"""Precipitation Probability."""
return self._precip_probability
@property
def precip_icon(self) -> str:
"""Precipitation Icon."""
return self._precip_icon
@property
def precip_type(self) -> str:
"""Precipitation Icon."""
return self._precip_type
@property
def wind_avg(self) -> float:
"""Return Wind Speed Average."""
return round(self._wind_avg, 2)
@property
def wind_bearing(self) -> int:
"""Return Wind Bearing in degrees."""
return int(self._wind_bearing)
@property
def current_condition(self) -> str:
"""Return Current condition text."""
return self._current_condition
@property
def current_icon(self) -> str:
"""Current Condition Icon."""
if self._current_icon.find("cc-") > -1:
icon = self._current_icon[3:]
else:
icon = self._current_icon
return icon
@property
def temp_high_today(self) -> float:
"""Return High temperature for current day."""
return self._temp_high_today
@property
def temp_low_today(self) -> float:
"""Return Low temperature for current day."""
return self._temp_low_today
class ForecastDataHourly:
"""A representation of Hour Based Forecast Weather Data."""
def __init__(self, data):
self._timestamp = data["timestamp"]
self._epochtime = data["epochtime"]
self._conditions = data["conditions"]
self._icon = data["icon"]
self._temperature = data["air_temperature"]
self._pressure = data["sea_level_pressure"]
self._humidity = data["relative_humidity"]
self._precip = data["precip"]
self._precip_probability = data["precip_probability"]
self._precip_icon = data["precip_icon"]
self._precip_type = data["precip_type"]
self._wind_avg = data["wind_avg"]
self._wind_gust = data["wind_gust"]
self._wind_bearing = data["wind_direction"]
self._wind_direction = data["wind_direction_cardinal"]
self._uv = data["uv"]
self._feels_like = data["feels_like"]
self._current_condition = data["current_condition"]
self._current_icon = data["current_icon"]
self._temp_high_today = data["temp_high_today"]
self._temp_low_today = data["temp_low_today"]
@property
def timestamp(self) -> dt:
"""Forecast DateTime."""
return self._timestamp.isoformat()
@property
def epochtime(self):
"""Forecast Epoch Time."""
return self._epochtime
@property
def conditions(self) -> str:
"""Return condition text ."""
return self._conditions
@property
def icon(self) -> str:
"""Condition Icon."""
if self._icon.find("cc-") > -1:
icon = self._icon[3:]
else:
icon = self._icon
return icon
@property
def temperature(self) -> float:
"""Return temperature."""
return self._temperature
@property
def pressure(self) -> float:
"""Return Sea Level Pressure."""
return self._pressure
@property
def humidity(self) -> int:
"""Return Relative Humidity."""
return self._humidity
@property
def precip(self) -> float:
"""Return Precipitation."""
return self._precip
@property
def precip_probability(self) -> int:
"""Precipitation Probability."""
return self._precip_probability
@property
def precip_icon(self) -> str:
"""Precipitation Icon."""
return self._precip_icon
@property
def precip_type(self) -> str:
"""Precipitation Icon."""
return self._precip_type
@property
def wind_avg(self) -> float:
"""Return Wind Speed Average."""
return self._wind_avg
@property
def wind_gust(self) -> float:
"""Return Wind Gust."""
return self._wind_gust
@property
def wind_bearing(self) -> int:
"""Return Wind Bearing in degrees."""
return self._wind_bearing
@property
def wind_direction(self) -> str:
"""Return Wind Direction Cardinal."""
return self._wind_direction
@property
def uv(self) -> float:
"""Return UV Index."""
return self._uv
@property
def feels_like(self) -> float:
"""Return Feels Like Temperature."""
return self._feels_like
@property
def current_condition(self) -> str:
"""Return Current condition text."""
return self._current_condition
@property
def current_icon(self) -> str:
"""Current Condition Icon."""
if self._current_icon.find("cc-") > -1:
icon = self._current_icon[3:]
else:
icon = self._current_icon
return icon
@property
def temp_high_today(self) -> float:
"""Return High temperature for current day."""
return self._temp_high_today
@property
def temp_low_today(self) -> float:
"""Return Low temperature for current day."""
return self._temp_low_today
class DeviceData:
"""A representation of Devices attached to the station."""
def __init__(self, data):
self._timestamp = data["obs_time"]
self._device_type = data["device_type"]
self._device_type_desc = data["device_type_desc"]
self._device_name = data["device_name"]
self._device_id = data["device_id"]
self._battery = data["battery"]
self._serial_number = data["serial_number"]
self._firmware_revision = data["firmware_revision"]
self._hardware_revision = data["hardware_revision"]
@property
def timestamp(self) -> dt:
"""Return observation time."""
return self._timestamp.isoformat()
@property
def device_type(self) -> str:
"""Returns Device Type."""
return self._device_type
@property
def device_type_desc(self) -> str:
"""Returns Device Type Description."""
return self._device_type_desc
@property
def device_name(self) -> str:
"""Returns Device Name."""
return self._device_name
@property
def device_id(self) -> str:
"""Returns Device ID."""
return self._device_id
@property
def battery(self) -> float:
"""Returns Battery (volts)."""
return self._battery
@property
def serial_number(self) -> str:
"""Returns Device Serial Number."""
return self._serial_number
@property
def firmware_revision(self) -> str:
"""Returns Device FW Version."""
return self._firmware_revision
@property
def hardware_revision(self) -> str:
"""Returns Device HW Version."""
return self._hardware_revision
| StarcoderdataPython |
3357851 | <filename>backend/api/models.py
from __future__ import unicode_literals
from django.db import models
from django.contrib.auth.models import User
from django.contrib.auth import get_user_model
from datetime import datetime
User = get_user_model()
| StarcoderdataPython |
57124 | <filename>build.py
#!/usr/bin/env python3
# vim: set fileencoding=utf-8:
"""
Build creativecommons.org primary site includes (scripts, styles, navigation
header, and navigation footer) based on WordPress REST API
"""
# Standard library
import argparse
import copy
import os
import sys
import traceback
# Third-party
import colorama
import jinja2
import requests
colorama.init()
C_GRAY = f"{colorama.Style.DIM}{colorama.Fore.WHITE}"
C_RESET = colorama.Style.RESET_ALL
C_WHITE = f"{colorama.Style.BRIGHT}{colorama.Fore.WHITE}"
DOMAINS = {
"prod": "creativecommons.org",
"stage": "stage.creativecommons.org",
}
ENDPOINTS = [
"/wp-json/ccnavigation-header/menu",
"/wp-json/ccnavigation-footer/menu",
"/wp-json/cc-wpscripts/get",
"/wp-json/cc-wpstyles/get",
]
REQUESTS_TIMEOUT = 5
class ScriptError(Exception):
def __init__(self, message, code=None):
self.code = code if code else 1
message = "({}) {}".format(self.code, message)
super(ScriptError, self).__init__(message)
def debug_function_name(args, name):
if args.debug:
print()
print()
print(f"{C_WHITE}## {name}{C_RESET}")
def process_header_footer_data(args, data_full):
data_path = copy.deepcopy(data_full)
info = [["ID", "Title", "Uniform Resource Locator (URL)"]]
prefix = f"https://{args.domain}"
for index, header in enumerate(data_full):
id_ = header["ID"]
title = header["title"]
url_full = header["url"]
url_path = remove_prefix(copy.copy(url_full), prefix)
data_path[index]["url"] = url_path
info_url = url_full
if url_full != url_path:
info_url = f"{C_GRAY}https://{args.domain}{C_RESET}{url_path}"
info.append([id_, title, info_url])
debug_info(args, info)
data_full = {"prefix": prefix, "json": data_full}
data_path = {"prefix": "", "json": data_path}
return data_full, data_path
def process_scripts_styles_data(args, data_full):
data_path = copy.deepcopy(data_full)
info = [["ID", "Uniform Resource Locator (URL)"]]
prefix = f"https://{args.domain}"
for id_, url_full in data_full.items():
url_path = remove_prefix(copy.copy(url_full), prefix)
data_path[id_] = url_path
info_url = url_full
if url_full != url_path:
info_url = f"{C_GRAY}https://{args.domain}{C_RESET}{url_path}"
info.append([id_, info_url])
debug_info(args, info)
data_full = {"prefix": prefix, "json": data_full}
data_path = {"prefix": "", "json": data_path}
return data_full, data_path
def remove_prefix(text, prefix):
if text.startswith(prefix):
return text[len(prefix) :] # noqa: E203
return text
def debug_info(args, info):
if not args.debug:
return
print()
print(list_of_lists_to_md_table(info))
print()
def file_template_path(args, project, file_name):
file_template = os.path.join("templates", project, file_name)
if args.debug:
print(f" - Template: {file_template}")
return file_template
def file_include_path(args, project, type_, file_name, action="Written "):
file_include = os.path.join("includes", project, type_, file_name)
if args.debug:
if type_ == "full":
message = "full"
elif type_ == "path":
message = "path-only"
print(f" - {action}: {file_include} ({message} CC URLs)")
return file_include
def render_write_include(args, file_template, file_include, data, mode="w"):
try:
template = args.j2env.get_template(file_template)
except jinja2.exceptions.TemplateNotFound as e:
raise ScriptError(f"Template not found: {e}")
rendered = template.render(data=data).strip()
with open(file_include, mode, encoding="utf-8") as file_out:
file_out.write(f"{rendered}\n")
def list_of_lists_to_md_table(rows):
"""Convert a list (rows) of lists (columns) to a Markdown table.
The last (right-most) column will not have any trailing whitespace so that
it wraps as cleanly as possible.
Based on solution provided by antak in http://stackoverflow.com/a/12065663
CC-BY-SA 4.0 (International)
https://creativecommons.org/licenses/by-sa/4.0/
"""
lines = []
widths = [max(map(len, map(str, col))) for col in zip(*rows)]
for r, row in enumerate(rows):
formatted = []
last_col = len(row) - 1
for i, col in enumerate(row):
if i == last_col:
formatted.append(str(col))
else:
formatted.append(str(col).ljust(widths[i]))
lines.append(f"| {' | '.join(formatted)} |")
formatted = []
last_col = len(rows[0]) - 1
for i, col in enumerate(rows[0]):
if i == last_col:
formatted.append("-" * (len(col)))
else:
formatted.append("-" * widths[i])
lines.insert(1, f"| {' | '.join(formatted)} |")
return "\n".join(lines)
def setup():
"""Instantiate and configure argparse and logging.
Return argsparse namespace.
"""
def default_from_env(ENV_KEY):
default_value = None
if ENV_KEY in os.environ and os.environ[ENV_KEY]:
default_value = os.environ[ENV_KEY]
return default_value
default_password = default_from_env("FETCH_PASSWORD")
default_username = default_from_env("FETCH_USERNAME")
ap = argparse.ArgumentParser(description=__doc__)
ap.add_argument(
"-d",
"--debug",
action="store_true",
help="debug mode: list changes without modification",
)
ap.add_argument(
"--domain", default=DOMAINS["prod"], help=argparse.SUPPRESS,
)
ap.add_argument(
"env",
help="specify which environment to fetch from",
choices=["prod", "stage"],
)
ap.add_argument(
"-p",
"--password",
default=default_password,
help=(
"HTTP Basic Auth password (required with 'stage' environment)."
" The FETCH_PASSWORD environment variable may also be used."
),
)
ap.add_argument(
"-u",
"--username",
default=default_username,
help=(
"HTTP Basic Auth username (required with 'stage' environment)."
" The FETCH_PASSWORD environment variable may also be used."
),
)
args = ap.parse_args()
args.domain = DOMAINS[args.env]
if args.env == "prod" and (args.username or args.password):
ap.error(
"the 'prod' environment does not use HTTP Basic Auth: do not use"
" the --username and --password options"
)
if args.env == "stage" and (not args.username or not args.password):
ap.error(
"the 'stage' environment requires both the --username and"
" --password options for HTTP Basic Auth"
)
return args
def request_data(args, end_url, json=True):
auth = None
if args.username and args.password:
auth = requests.auth.HTTPBasicAuth(args.username, args.password)
try:
response = requests.get(end_url, auth=auth, timeout=REQUESTS_TIMEOUT)
response.raise_for_status()
if json:
fetched_data = response.json()
else:
fetched_data = response.content
except requests.HTTPError as e:
raise ScriptError(f"FAILED to retrieve data due to HTTP {e}", 1)
except requests.exceptions.ConnectionError:
raise ScriptError(
"FAILED to retrieve data due to ConnectionError for url:"
f" {end_url}",
1,
)
except requests.exceptions.Timeout:
raise ScriptError(
f"FAILED to retrieve data due to Timeout for url: {end_url}", 1,
)
return fetched_data
def prime_style_script_cache(args):
"""Prime script/style cache
"""
request_data(args, f"https://{args.domain}/", json=False)
def format_ccnavigation_header(args, data):
debug_function_name(args, sys._getframe(0).f_code.co_name)
data_full, data_path = process_header_footer_data(args, data)
print("- Legalcode (legacy)")
project = "legalcode"
file_name = "site-header.html"
file_template = file_template_path(args, project, file_name)
type_ = "full"
file_include = file_include_path(args, project, type_, file_name)
render_write_include(args, file_template, file_include, data_full)
type_ = "path"
file_include = file_include_path(args, project, type_, file_name)
render_write_include(args, file_template, file_include, data_path)
def format_ccnavigation_footer(args, data):
debug_function_name(args, sys._getframe(0).f_code.co_name)
data_full, data_path = process_header_footer_data(args, data)
print("- Legalcode (legacy)")
project = "legalcode"
file_name = "site-footer.html"
file_template = file_template_path(args, project, file_name)
type_ = "full"
file_include = file_include_path(args, project, type_, file_name)
render_write_include(args, file_template, file_include, data_full)
type_ = "path"
file_include = file_include_path(args, project, type_, file_name)
render_write_include(args, file_template, file_include, data_path)
def format_cc_wpscripts(args, data):
debug_function_name(args, sys._getframe(0).f_code.co_name)
data_full, data_path = process_scripts_styles_data(args, data)
print("- Legalcode (legacy)")
project = "legalcode"
template_name = "footer-scripts.html"
file_template = file_template_path(args, project, template_name)
include_name = "site-footer.html"
type_ = "full"
file_include = file_include_path(
args, project, type_, include_name, action="Appended"
)
render_write_include(
args, file_template, file_include, data_full, mode="a"
)
type_ = "path"
file_include = file_include_path(
args, project, type_, include_name, action="Appended"
)
render_write_include(
args, file_template, file_include, data_path, mode="a"
)
def format_cc_wpstyles(args, data):
debug_function_name(args, sys._getframe(0).f_code.co_name)
data_full, data_path = process_scripts_styles_data(args, data)
print("- Legalcode (legacy)")
project = "legalcode"
file_name = "html-head.html"
file_template = file_template_path(args, project, file_name)
type_ = "full"
file_include = file_include_path(args, project, type_, file_name)
render_write_include(args, file_template, file_include, data_full)
type_ = "path"
file_include = file_include_path(args, project, type_, file_name)
render_write_include(args, file_template, file_include, data_path)
def main():
args = setup()
j2loader = jinja2.FileSystemLoader("./")
args.j2env = jinja2.Environment(loader=j2loader)
prime_style_script_cache(args)
for endpoint in ENDPOINTS:
end_url = f"https://{args.domain}{endpoint}"
format_function = f"format_{endpoint.split('/')[2].replace('-', '_')}"
data = request_data(args, end_url)
globals()[format_function](args, data)
if args.debug:
print()
print()
if __name__ == "__main__":
try:
main()
except SystemExit as e:
sys.exit(e.code)
except KeyboardInterrupt:
print("INFO (130) Halted via KeyboardInterrupt.", file=sys.stderr)
sys.exit(130)
except ScriptError:
error_type, error_value, error_traceback = sys.exc_info()
print("CRITICAL {}".format(error_value), file=sys.stderr)
sys.exit(error_value.code)
except: # noqa: ignore flake8: E722 do not use bare 'except'
print("ERROR (1) Unhandled exception:", file=sys.stderr)
print(traceback.print_exc(), file=sys.stderr)
sys.exit(1)
| StarcoderdataPython |
240796 | <reponame>edilson/django_ecommerce
from django.conf.urls import url
from . import views
urlpatterns = [
url(r'^adicionar/(?P<slug>[\w_-]+)/$', views.CreateCartItemView.as_view(), name='create_cartitem'),
url(r'^carrinho/$', views.CartItemView.as_view(), name='cart_item'),
url(r'^finalizando/$', views.CheckoutView.as_view(), name='checkout'),
url(r'^meus-pedidos/$', views.OrderListView.as_view(), name='order_list'),
url(r'^meus-pedidos/(?P<pk>\d+)/$', views.OrderDetailView.as_view(), name='order_detail'),
]
| StarcoderdataPython |
259424 | <gh_stars>10-100
# -----------------------------------------------------------------------------
#
# Copyright 2013-2019 lispers.net - <NAME> <<EMAIL>>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# -----------------------------------------------------------------------------
#
# make-provision-release.py
#
# This script takes a lispers.net tgz file and adds lisp.config.provision-xtr,
# provision.py and RL.provision-xtr files to it so the tgz can be provisioned
# for a particular instance-ID and mapping system.
#
# Usage: python make-provision-release.py [<release> <iid> <build-tag>]
#
# -----------------------------------------------------------------------------
from __future__ import print_function
import os
try:
from commands import getoutput
except:
from subprocess import getoutput
#endtry
import sys
from builtins import input
#------------------------------------------------------------------------------
def bold(string):
return("\033[1m" + string + "\033[0m")
#enddef
def green(string):
return("\033[92m" + string + "\033[0m")
#enddef
#------------------------------------------------------------------------------
#
# First check that this is running in the build directory.
#
curdir = getoutput("pwd")
curdir = curdir.split("/")
if (curdir[-1] != "build"):
print("Need to be in directory named 'build'")
exit(1)
#endif
if (os.path.exists("./releases") == False):
print("Directory './releases' needs to be in build directory")
exit(1)
#endif
#
# Get input parameters.
#
if (len(sys.argv) == 4):
version = sys.argv[1]
iid = sys.argv[2]
tag = sys.argv[3]
else:
version = input("Enter version number (in format x.y): ")
iid = input("Enter LISP xTR instance-ID: ")
tag = input("Enter tag to be part of tgz filename: ")
print("")
#endif
release = "./releases/release-{}".format(version)
if (os.path.exists(release) == False):
print("Could not find directory {}".format(release))
exit(1)
#endif
tgz = "lispers.net-x86-release-{}.tgz".format(version)
tmp = "/tmp/{}".format(tgz)
#
# Make /tmp directory to untar release so we can combine provision files
# with a "provisioned release" tarball.
#
print("Adding provision files to release {} tarball files ...". \
format(bold(version)), end=" "),
os.system("mkdir -p {}; cp {}/{} {}".format(tmp, release, tgz, tmp))
os.system("chmod 755 {}/{}".format(tmp, tgz))
os.system("cd {}; tar zxf {}; rm {}".format(tmp, tgz, tgz))
os.system("cp lisp.config.provision-xtr provision-lisp.py {}".format(tmp))
os.system("cp RL.provision-xtr {}/RL".format(tmp))
print("done")
#
# Don't need RL-template in this release since we customize the RL file for
# the instance-ID and MacOS device (or eth0 for docker).
#
print("Customize RL.provision-xtr file with provisioning parameters ...",
end=" ")
f = open("{}/RL".format(tmp), "r"); buf = f.read(); f.close()
buf = buf.replace('set IID = "0"', 'set IID = "{}"'.format(iid))
f = open("{}/RL".format(tmp), "w"); f.write(buf); f.close()
os.system("rm {}/RL-template".format(tmp))
print("done")
#
# Create new "provisioned release" tarball.
#
ptgz = "lispers.net-x86-release-{}-iid-{}-{}.tgz".format(version, iid, tag)
print("Creating provisioned tarball for release {}, instance-ID {} ...". \
format(bold(version), bold(iid)), end= " ")
cmd = "cd {}; export COPYFILE_DISABLE=true; tar czf {} *".format(tmp, ptgz)
os.system(cmd)
print("done")
#
# Copy to release directory where tarball of standard release resides.
#
os.system("cp {}/{} {}/{}".format(tmp, ptgz, release, ptgz))
os.system("rm -fr {}".format(tmp))
#
# We are done. Tell user where it is.
#
print("New file {} written to {} directory".format(green(ptgz),
bold("releases/release-{}".format(version))))
exit(0)
#------------------------------------------------------------------------------
| StarcoderdataPython |
315144 | <filename>apysc/_display/x_interface.py
"""Class implementation for the x-coordinate interface.
"""
from typing import Dict
from apysc._animation.animation_move_interface import AnimationMoveInterface
from apysc._animation.animation_x_interface import AnimationXInterface
from apysc._type.attr_linking_interface import AttrLinkingInterface
from apysc._type.int import Int
from apysc._type.revert_interface import RevertInterface
class XInterface(
AnimationXInterface, AnimationMoveInterface, RevertInterface,
AttrLinkingInterface):
_x: Int
def _initialize_x_if_not_initialized(self) -> None:
"""
Initialize the _x attribute if it hasn't been initialized yet.
"""
if hasattr(self, '_x'):
return
self._x = Int(0)
self._append_x_attr_linking_setting()
def _append_x_attr_linking_setting(self) -> None:
"""
Append a x attribute linking setting.
"""
import apysc as ap
with ap.DebugInfo(
callable_=self._append_x_attr_linking_setting,
locals_=locals(),
module_name=__name__, class_=XInterface):
self._append_applying_new_attr_val_exp(
new_attr=self._x, attr_name='x')
self._append_attr_to_linking_stack(
attr=self._x, attr_name='x')
@property
def x(self) -> Int:
"""
Get a x-coordinate.
Returns
-------
x : Int
X-coordinate.
References
----------
- Display object x and y interfaces document
- https://bit.ly/2ToA5ba
"""
import apysc as ap
with ap.DebugInfo(
callable_='x', locals_=locals(),
module_name=__name__, class_=XInterface):
from apysc._type import value_util
self._initialize_x_if_not_initialized()
x: ap.Int = value_util.get_copy(value=self._x)
return x
@x.setter
def x(self, value: Int) -> None:
"""
Update x-coordinate.
Parameters
----------
value : int or Int
X-coordinate value.
References
----------
- Display object x and y interfaces document
- https://bit.ly/2ToA5ba
"""
import apysc as ap
with ap.DebugInfo(
callable_='x', locals_=locals(),
module_name=__name__, class_=XInterface):
from apysc._type.number_value_interface import NumberValueInterface
from apysc._validation import number_validation
if not isinstance(value, NumberValueInterface):
number_validation.validate_integer(integer=value)
value = ap.Int(value=value)
self._x = value
self._x._append_incremental_calc_substitution_expression()
self._append_x_update_expression()
self._append_x_attr_linking_setting()
def _append_x_update_expression(self) -> None:
"""
Append the x position updating expression.
"""
import apysc as ap
with ap.DebugInfo(
callable_=self._append_x_update_expression, locals_=locals(),
module_name=__name__, class_=XInterface):
from apysc._type import value_util
self._initialize_x_if_not_initialized()
value_str: str = value_util.get_value_str_for_expression(
value=self._x)
expression: str = (
f'{self.variable_name}.x({value_str});'
)
ap.append_js_expression(expression=expression)
_x_snapshots: Dict[str, int]
def _make_snapshot(self, *, snapshot_name: str) -> None:
"""
Make a value's snapshot.
Parameters
----------
snapshot_name : str
Target snapshot name.
"""
self._initialize_x_if_not_initialized()
self._set_single_snapshot_val_to_dict(
dict_name='_x_snapshots',
value=int(self._x._value), snapshot_name=snapshot_name)
def _revert(self, *, snapshot_name: str) -> None:
"""
Revert a value if snapshot exists.
Parameters
----------
snapshot_name : str
Target snapshot name.
"""
if not self._snapshot_exists(snapshot_name=snapshot_name):
return
self._x._value = self._x_snapshots[snapshot_name]
| StarcoderdataPython |
255739 | <filename>start.py
import glob
from zipfile import ZipFile
files = glob.glob('./*.zip')
for n1 in range(0, 10):
for n2 in range(0, 10):
for n3 in range(0, 10):
for n4 in range(0, 10):
guess = ''.join(map(str, [n1, n2, n3, n4]))
for n in range(len(files)):
zip_file = files[n]
if zip_file is not None:
password = None
with ZipFile(zip_file) as zf:
try:
zf.setpassword(bytes(guess, 'utf-8'))
if zf.testzip() is None:
print("File: %s Password: %s" % (zip_file, guess))
password = <PASSWORD>
files[n] = None
except:
pass
| StarcoderdataPython |
165175 | # -*- coding: utf-8 -*-
from __future__ import print_function, unicode_literals, division
from eight import *
from .lca import LCA
from numpy.linalg import solve
class DenseLCA(LCA):
def solve_linear_system(self):
"""
Master solution function for linear system :math:`Ax=B`.
To most numerical analysts, matrix inversion is a sin.
-- <NAME>, Accuracy and Stability of Numerical Algorithms, Society for Industrial and Applied Mathematics, Philadelphia, PA, USA, 2002, p. 260.
We use `UMFpack <http://www.cise.ufl.edu/research/sparse/umfpack/>`_, which is a very fast solver for sparse matrices.
If the technosphere matrix has already been factorized, then the decomposed technosphere (``self.solver``) is reused. Otherwise the calculation is redone completely.
"""
return solve(self.technosphere_matrix.toarray(), self.demand_array)
| StarcoderdataPython |
5039796 | #coding=utf-8
'''
@author: <NAME>
@since: 2015-02-13
'''
import httplib
import json
import random
import re
import sys
import time
import urllib2
reload(sys)
sys.setdefaultencoding('utf8')
def getRandomIP():
file_read = open('china.ip', 'r')
list = file_read.readlines()
file_read.close()
file_write = open('ipcompare.csv', 'w')
file_write.write("ip,LBS ip定位API, IP138,133ip,http://ip.chinaz.com/ \n".encode("gb2312"))
file_write.flush()
cookiecoiunt = 1
for ipraw in list:
# if(cookiecoiunt % 5 == 0):
# time.sleep(2.34)
print(ipraw.split("\n")[0].rstrip())
ip = ipraw.split("\n")[0].rstrip()
iplocation = "/location/ip?ak=wOexvA0egnE0qUUWHYcyY4wX&ip=" + ip
result = getRequest("api.map.baidu.com", iplocation)
location = json.loads(result)
lbsiplocation = ""
try:
province = location['content']['address_detail']['province']
city = location['content']['address_detail']['city']
district = location['content']['address_detail']['district']
street = location['content']['address_detail']['street']
print(province + "," + city + "," + district + ","+ street)
lbsiplocation = province + "," + city + "," + dist rict + "," + street
except:
print('request: ' + iplocation)
print('result: ' + result)
#http://www.133ip.com/gn/jk.php?an=1&q=192.168.127.12
ip133context = "/gn/jk.php?an=1&q=" + ip
result = getRequest("www.133ip.com", ip133context)
location = json.loads(result)
ip133location = ""
try:
ip133location = location['s1']
except:
print('request: ' + ip133location)
print('result: ' + result)
#<strong class="red">查询结果[1]: 172.16.17.32 ==>> 3737583620 ==>> 山西省太原市 山西工程职业技术学院</strong>
chinazcontext = "http://ip.chinaz.com/?IP="
chinazLocation = ""
ip138Location = ""
headers = { 'User-Agent' : 'Mozilla/4.0 (compatible; MSIE 5.5; Windows NT)', 'Cookie' : str(cookiecoiunt) + ""}
try:
#<ul class="ul1"><li>本站主数据:湖南省湘潭市 电信</li><li>参考数据一:湖南省湘潭市 电信</li></ul>
ip138context = "http://www.ip138.com/ips138.asp?ip=" + ip + "&action=2"
m = urllib2.urlopen(ip138context).read().decode("gb2312")
#print(m)
pattern = re.compile(r'<ul[\s]+class=\"ul1\">.*</ul>')
match = pattern.findall(m)
pattern = re.compile(r"<li>[^<li>]*</li>")
result = pattern.findall(match[0])
print(result[0][13:-5] + "---------------ip138--------------")
ip138Location =result[0][13:-5]
request = urllib2.Request(chinazcontext + ip, headers={})
m = urllib2.urlopen(request).read()
#print(m)
patternstr = "<strong[\s]+class=\"red\">查询结果.*</strong>"
pattern = re.compile(r'<strong class=\"red\">查询结果.*</strong>')
match = pattern.findall(m)
pattern = re.compile(r"==>> .*</strong>")
result = pattern.findall(match[0])
print(result[0][20:-9] + "-----------chinaz----------------")
chinazLocation = result[0][20:-9]
except:
print('result: ' + result[0])
print(m)
item= ""
try:
item = (ip + "," + lbsiplocation + "," + ip138Location + "," + ip133location + "," + chinazLocation + "\n").encode("gb2312")
except:
continue
file_write.write(item)
print("SUCCESS----------------------")
file_write.flush()
cookiecoiunt = cookiecoiunt+1
# time.sleep(1)
#print(list)
# file_write = open('random.ip', 'w')
# size = len(list) - 1
# for i in range(1, 100):
# r = random.randint(0, size)
# file_write.write(list[r])
file_write.close()
def getRequest(requestUrl,context):
conn = httplib.HTTPConnection(requestUrl)
conn.request("GET", context)
r1 = conn.getresponse()
print(r1.status, r1.reason)
data1 = r1.read()
print(data1)
conn.close()
return data1
getRandomIP() | StarcoderdataPython |
8068205 | <gh_stars>0
from dataclasses import dataclass
from bindings.csw.vertical_csref_type import VerticalCsrefType
__NAMESPACE__ = "http://www.opengis.net/gml"
@dataclass
class UsesVerticalCs(VerticalCsrefType):
"""
Association to the vertical coordinate system used by this CRS.
"""
class Meta:
name = "usesVerticalCS"
namespace = "http://www.opengis.net/gml"
| StarcoderdataPython |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.