id stringlengths 2 8 | text stringlengths 16 264k | dataset_id stringclasses 1 value |
|---|---|---|
6644052 | <filename>infrastructure/app_stack.py
from aws_cdk import (
core as _core,
aws_apigateway as _apigateway,
aws_lambda as _lambda,
aws_iam as _iam,
)
class AppStack(_core.Stack):
def __init__(self, scope: _core.Construct, id: str, **kwargs) -> None:
super().__init__(scope, id, **kwargs)
code = _lambda.Code.asset('lambda/.dist/lambda.zip')
function = _lambda.Function(self,
'rate-limit-demo',
function_name='rate-limit-demo',
runtime=_lambda.Runtime.PYTHON_3_6,
code=code,
handler='demo_handler.handler',
tracing=_lambda.Tracing.ACTIVE,
)
bucket_tokens_table = _core.Stack.format_arn(self,
service='dynamodb',
resource='table',
sep='/',
resource_name='buckets_table')
rate_limit_ddb_statement = _iam.PolicyStatement()
rate_limit_ddb_statement.add_resources(bucket_tokens_table)
rate_limit_ddb_statement.add_actions('dynamodb:DescribeTable')
rate_limit_ddb_statement.add_actions('dynamodb:CreateTable')
rate_limit_ddb_statement.add_actions('dynamodb:GetItem')
rate_limit_ddb_statement.add_actions('dynamodb:PutItem')
rate_limit_ddb_statement.add_actions('dynamodb:UpdateItem')
function.add_to_role_policy(rate_limit_ddb_statement)
api = _apigateway.LambdaRestApi(self,'rate-limit-demo-api', handler=function)
| StarcoderdataPython |
12834846 | from ..commandparser import Member
from ..discordbot import unmoot_user
import discord
name = 'unmoot'
channels = None
roles = ('helper', 'trialhelper')
args = '<member>'
async def run(message, member: Member):
'Removes a moot from a member'
await unmoot_user(
member.id,
reason=f'Unmooted by {str(message.author)}'
)
await message.send(embed=discord.Embed(
description=f'<@{member.id}> has been unmooted.'
))
| StarcoderdataPython |
1908548 | import random
randNumber = random.randint(1, 100)
# print(randNumber) LET YOU KNOW THE NUMBER ,DELETE IT BEFORE PLAYING THE GAME
userGUESS = None
guesses = 0
while userGUESS != randNumber:
userGUESS = int(input("Enter your guess: "))
guesses += 1
if userGUESS == randNumber:
print("You guessed it right!!")
else:
if userGUESS > randNumber:
print("ENTER A SMALLER NUMBER")
else:
print("ENTER A LARGER NUMBER ")
print(f"You guessed the number in {guesses} guesses")
| StarcoderdataPython |
6590586 | #!/usr/bin/env python
def remove_junk(lst):
new_lst = []
for v, v2 in lst:
if v >= 0 and v < 32:
new_lst.append(v)
return new_lst
f = open('some_result_order_big_t.txt', 'r')
p_text = f.readline()
measure = []
while p_text:
pin_trace = eval(f.readline().strip())
_1_round = eval(f.readline().strip())
_2_round = eval(f.readline().strip())
measure.append({"p": eval(p_text.strip()), "pin": { "r_1" : pin_trace[0:16], "r_2" : pin_trace[16:32]}, "r_1" : remove_junk(_1_round), "r_2" : remove_junk(_2_round)})
p_text = f.readline()
def false_true_positive(sample, reference):
true_lst_index = []
t_p = 0
f_p = 0
f_n = 0
for v in sample:
try:
i = reference.index(v)
true_lst_index.append(i)
t_p += 1
except:
f_p += 1
for v in reference:
try:
i = sample.index(v)
except:
f_n += 1
return true_lst_index, float(t_p) / len(sample), float(f_p) / len(sample), float(f_n) / len(reference)
def order_stat(true_samples):
samples = list(true_samples)
cnt = 0
len_lst = len(samples)
i = 0
while i < len_lst:
j = i + 1
while j < len_lst:
if samples[i] > samples[j]:
samples.remove(samples[j])
len_lst -= 1
cnt += 1
break
j += 1
i += 1
return 1 - (float(cnt) / len(true_samples))
for m in measure:
true_lst_index, true_pos, false_pos, false_neg = false_true_positive(m['r_1'], m["pin"]["r_1"])
order = order_stat(tuple(true_lst_index))
print {"true_pos": true_pos, "false_pos": false_pos, "false_neg": false_neg, "ordered" : order}
true_lst_index, true_pos, false_pos, false_neg = false_true_positive(m['r_2'], m["pin"]["r_2"])
order = order_stat(tuple(true_lst_index))
print {"true_pos": true_pos, "false_pos": false_pos, "false_neg": false_neg, "ordered" : order}
| StarcoderdataPython |
8177130 | <gh_stars>10-100
# Copyright (c) 2020 - The Procedural Generation for Gazebo authors
# For information on the respective copyright owner see the NOTICE file
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ...utils import is_array
class _Picker(object):
_LABEL = None
def __init__(self, items=None, max_num=None):
self._items = list()
if is_array(items):
self._items = items
self._max_num = dict()
if isinstance(max_num, dict):
for tag in items:
if tag in max_num:
self._max_num[tag] = max_num[tag]
else:
self._max_num[tag] = None
else:
for tag in items:
self._max_num[tag] = None
self._counter = dict()
for tag in self._items:
self._counter[tag] = 0
@property
def items(self):
return self._items
@items.setter
def items(self, values):
assert is_array(values)
self._items = values
@property
def max_num(self):
return self._max_num
@property
def counter(self):
return self._counter
def are_all_counters_max(self):
for tag in self._counter:
if tag not in self._max_num:
return False
if self._max_num[tag] is None:
return False
if self._counter[tag] < self._max_num[tag]:
return False
return True
def are_all_counters_limited(self):
for tag in self._counter:
if tag not in self._max_num:
return False
if self._max_num[tag] is None or self._max_num[tag] < 0:
return False
return True
def get_counter(self, tag):
assert tag in self._counter
return self._counter[tag]
def increase_counter(self, tag):
assert tag in self._counter
self._counter[tag] += 1
def set_max_num(self, tag, value):
assert value > 0 or value is None
self._max_num[tag] = value
if tag not in self._items:
self._items.append(tag)
self._counter[tag] = 0
def get_max_num_items(self, tag):
if tag in self._max_num:
return self._max_num[tag]
else:
return None
def reset(self):
for tag in self._counter:
self._counter[tag] = 0
def get_selection(self):
raise NotImplementedError()
| StarcoderdataPython |
1839581 | <filename>python/odd-even.py
n=int(input("enter a number="))
i=1
while i<=n:
if i&1 :
print(i,"is odd")
else:
print(i,"is even")
i=i+1
print("loop ends")
| StarcoderdataPython |
8088555 | import typing
import pandas as pd
import scipy.stats
from pyextremes.models.model_emcee import Emcee
from pyextremes.models.model_mle import MLE
def get_model(
model: str,
extremes: pd.Series,
distribution: typing.Union[str, scipy.stats.rv_continuous],
distribution_kwargs: typing.Optional[dict] = None,
**kwargs,
) -> typing.Union[MLE, Emcee]:
"""
Get distribution fitting model and fit it to given extreme values.
Parameters
----------
model : str
Name of model.
Supported models:
MLE - Maximum Likelihood Estimate (MLE) model.
Based on 'scipy' package (scipy.stats.rv_continuous.fit).
Emcee - Markov Chain Monte Carlo (MCMC) model.
Based on 'emcee' package by <NAME>.
extremes : pandas.Series
Time series of extreme events.
distribution : str or scipy.stats.rv_continuous
Distribution name compatible with scipy.stats
or a subclass of scipy.stats.rv_continuous.
See https://docs.scipy.org/doc/scipy/reference/stats.html
distribution_kwargs : dict, optional
Special keyword arguments, passsed to the `.fit` method of the distribution.
These keyword arguments represent parameters to be held fixed.
Names of parameters to be fixed must have 'f' prefixes. Valid parameters:
- shape(s): 'fc', e.g. fc=0
- location: 'floc', e.g. floc=0
- scale: 'fscale', e.g. fscale=1
By default, no parameters are fixed.
See documentation of a specific scipy.stats distribution
for names of available parameters.
kwargs
Keyword arguments passed to a model .fit method.
MLE model:
MLE model takes no additional arguments.
Emcee model:
n_walkers : int, optional
The number of walkers in the ensemble (default=100).
n_samples : int, optional
The number of steps to run (default=500).
progress : bool or str, optional
If True, a progress bar will be shown as the sampler progresses.
If a string, will select a specific tqdm progress bar.
Most notable is 'notebook', which shows a progress bar
suitable for Jupyter notebooks.
If False (default), no progress bar will be shown.
This progress bar is a part of the `emcee` package.
Returns
-------
model : MLE or Emcee
Distribution fitting model fitted to the `extremes`.
"""
distribution_model_kwargs = {
"extremes": extremes,
"distribution": distribution,
"distribution_kwargs": distribution_kwargs,
**kwargs,
}
if model == "MLE":
return MLE(**distribution_model_kwargs)
elif model == "Emcee":
return Emcee(**distribution_model_kwargs)
else:
raise ValueError(
f"invalid value in '{model}' for the 'model' argument, "
f"available model: 'MLE', 'Emcee'"
)
| StarcoderdataPython |
1816661 | <reponame>faezakamran/sentence-transformers
from . import SentenceEvaluator
import logging
import os
import csv
from sklearn.metrics.pairwise import paired_cosine_distances, paired_euclidean_distances, paired_manhattan_distances
from sklearn.metrics import average_precision_score
import numpy as np
from typing import List
from ..readers import InputExample
logger = logging.getLogger(__name__)
class BinaryClassificationEvaluator(SentenceEvaluator):
"""
Evaluate a model based on the similarity of the embeddings by calculating the accuracy of identifying similar and
dissimilar sentences.
The metrics are the cosine similarity as well as euclidean and Manhattan distance
The returned score is the accuracy with a specified metric.
The results are written in a CSV. If a CSV already exists, then values are appended.
The labels need to be 0 for dissimilar pairs and 1 for similar pairs.
:param sentences1: The first column of sentences
:param sentences2: The second column of sentences
:param labels: labels[i] is the label for the pair (sentences1[i], sentences2[i]). Must be 0 or 1
:param name: Name for the output
:param batch_size: Batch size used to compute embeddings
:param show_progress_bar: If true, prints a progress bar
:param write_csv: Write results to a CSV file
"""
def __init__(self, sentences1: List[str], sentences2: List[str], labels: List[int], name: str = '', batch_size: int = 32, show_progress_bar: bool = False, write_csv: bool = True):
self.sentences1 = sentences1
self.sentences2 = sentences2
self.labels = labels
assert len(self.sentences1) == len(self.sentences2)
assert len(self.sentences1) == len(self.labels)
for label in labels:
assert (label == 0 or label == 1)
self.write_csv = write_csv
self.name = name
self.batch_size = batch_size
if show_progress_bar is None:
show_progress_bar = (logger.getEffectiveLevel() == logging.INFO or logger.getEffectiveLevel() == logging.DEBUG)
self.show_progress_bar = show_progress_bar
self.csv_file = "binary_classification_evaluation" + ("_"+name if name else '') + "_results.csv"
self.csv_headers = ["epoch", "steps",
"cossim_accuracy", "cossim_accuracy_threshold", "cossim_f1", "cossim_precision", "cossim_recall", "cossim_f1_threshold", "cossim_ap",
"manhatten_accuracy", "manhatten_accuracy_threshold", "manhatten_f1", "manhatten_precision", "manhatten_recall", "manhatten_f1_threshold", "manhatten_ap",
"euclidean_accuracy", "euclidean_accuracy_threshold", "euclidean_f1", "euclidean_precision", "euclidean_recall", "euclidean_f1_threshold", "euclidean_ap",
"dot_accuracy", "dot_accuracy_threshold", "dot_f1", "dot_precision", "dot_recall", "dot_f1_threshold", "dot_ap"]
@classmethod
def from_input_examples(cls, examples: List[InputExample], **kwargs):
sentences1 = []
sentences2 = []
scores = []
for example in examples:
sentences1.append(example.texts[0])
sentences2.append(example.texts[1])
scores.append(example.label)
return cls(sentences1, sentences2, scores, **kwargs)
def __call__(self, model, output_path: str = None, epoch: int = -1, steps: int = -1) -> float:
if epoch != -1:
if steps == -1:
out_txt = f" after epoch {epoch}:"
else:
out_txt = f" in epoch {epoch} after {steps} steps:"
else:
out_txt = ":"
logger.info("Binary Accuracy Evaluation of the model on " + self.name + " dataset" + out_txt)
scores = self.compute_metrices(model)
#Main score is the max of Average Precision (AP)
main_score = max(scores[short_name]['ap'] for short_name in scores)
file_output_data = [epoch, steps]
for header_name in self.csv_headers:
if '_' in header_name:
sim_fct, metric = header_name.split("_", maxsplit=1)
file_output_data.append(scores[sim_fct][metric])
if output_path is not None and self.write_csv:
csv_path = os.path.join(output_path, self.csv_file)
if not os.path.isfile(csv_path):
with open(csv_path, newline='', mode="w", encoding="utf-8") as f:
writer = csv.writer(f)
writer.writerow(self.csv_headers)
writer.writerow(file_output_data)
else:
with open(csv_path, newline='', mode="a", encoding="utf-8") as f:
writer = csv.writer(f)
writer.writerow(file_output_data)
return main_score
def compute_metrices(self, model):
sentences = list(set(self.sentences1 + self.sentences2))
embeddings = model.encode(sentences, batch_size=self.batch_size, show_progress_bar=self.show_progress_bar, convert_to_numpy=True)
emb_dict = {sent: emb for sent, emb in zip(sentences, embeddings)}
embeddings1 = [emb_dict[sent] for sent in self.sentences1]
embeddings2 = [emb_dict[sent] for sent in self.sentences2]
cosine_scores = 1 - paired_cosine_distances(embeddings1, embeddings2)
manhattan_distances = paired_manhattan_distances(embeddings1, embeddings2)
euclidean_distances = paired_euclidean_distances(embeddings1, embeddings2)
embeddings1_np = np.asarray(embeddings1)
embeddings2_np = np.asarray(embeddings2)
dot_scores = [np.dot(embeddings1_np[i], embeddings2_np[i]) for i in range(len(embeddings1_np))]
labels = np.asarray(self.labels)
output_scores = {}
for short_name, name, scores, reverse in [['cossim', 'Cosine-Similarity', cosine_scores, True], ['manhatten', 'Manhatten-Distance', manhattan_distances, False], ['euclidean', 'Euclidean-Distance', euclidean_distances, False], ['dot', 'Dot-Product', dot_scores, True]]:
acc, acc_threshold = self.find_best_acc_and_threshold(scores, labels, reverse)
f1, precision, recall, f1_threshold = self.find_best_f1_and_threshold(scores, labels, reverse)
ap = average_precision_score(labels, scores * (1 if reverse else -1))
logger.info("Accuracy with {}: {:.2f}\t(Threshold: {:.4f})".format(name, acc * 100, acc_threshold))
logger.info("F1 with {}: {:.2f}\t(Threshold: {:.4f})".format(name, f1 * 100, f1_threshold))
logger.info("Precision with {}: {:.2f}".format(name, precision * 100))
logger.info("Recall with {}: {:.2f}".format(name, recall * 100))
logger.info("Average Precision with {}: {:.2f}\n".format(name, ap * 100))
output_scores[short_name] = {
'accuracy' : acc,
'accuracy_threshold': acc_threshold,
'f1': f1,
'f1_threshold': f1_threshold,
'precision': precision,
'recall': recall,
'ap': ap
}
return output_scores
@staticmethod
def find_best_acc_and_threshold(scores, labels, high_score_more_similar: bool):
assert len(scores) == len(labels)
rows = list(zip(scores, labels))
rows = sorted(rows, key=lambda x: x[0], reverse=high_score_more_similar)
max_acc = 0
best_threshold = -1
positive_so_far = 0
remaining_negatives = sum(labels == 0)
for i in range(len(rows)-1):
score, label = rows[i]
if label == 1:
positive_so_far += 1
else:
remaining_negatives -= 1
acc = (positive_so_far + remaining_negatives) / len(labels)
if acc > max_acc:
max_acc = acc
best_threshold = (rows[i][0] + rows[i+1][0]) / 2
return max_acc, best_threshold
@staticmethod
def find_best_f1_and_threshold(scores, labels, high_score_more_similar: bool):
assert len(scores) == len(labels)
scores = np.asarray(scores)
labels = np.asarray(labels)
rows = list(zip(scores, labels))
rows = sorted(rows, key=lambda x: x[0], reverse=high_score_more_similar)
best_f1 = best_precision = best_recall = 0
threshold = 0
nextract = 0
ncorrect = 0
total_num_duplicates = sum(labels)
for i in range(len(rows)-1):
score, label = rows[i]
nextract += 1
if label == 1:
ncorrect += 1
if ncorrect > 0:
precision = ncorrect / nextract
recall = ncorrect / total_num_duplicates
f1 = 2 * precision * recall / (precision + recall)
if f1 > best_f1:
best_f1 = f1
best_precision = precision
best_recall = recall
threshold = (rows[i][0] + rows[i + 1][0]) / 2
return best_f1, best_precision, best_recall, threshold
| StarcoderdataPython |
1838777 | import numpy as np
import pandas as pd
import os.path
import re
def extract_file_name(file_path, extract_file_extension):
"""Takes a file route and returns the name with or without its extesion.
This function is OS independent.
INPUT:
file_path: string.
extract_file_extension: boolean.
OUTPUT:
string representing the file name.
EXAMPLES:
'bar.txt', './bar.txt', 'C:\foo\bar.txt' or './foo/bar.txt' will
all return 'bar.txt' with the default keyword, otherwise returns 'bar'.
"""
file_name_with_extension = os.path.split(file_path)[-1]
if extract_file_extension:
return file_name_with_extension
else:
extension_beginning = file_name_with_extension.rfind('.')
return file_name_with_extension[:extension_beginning]
def convert_to_csv_format(data_row):
"""Takes a data row from a Compend 2000 data file, removes the initial
and trailing tabs, and substitutes the rest of the tabs for commas.
INPUT:
data_row: string.
OUTPUT:
string.
EXAMPLES:
'\tvalue 1\tvalue 2\tvalue 3\tvalue 4\tvalue 5\t' returns
'value 1,value 2,value 3,value 4,value 5'.
"""
return str.join('', [data_row.strip().replace('\t', ','), '\n'])
def extract_HSD_file_name(line):
"""Takes a Compend 2000 data line that signals the start of high speed
data adquisition, and returns the name of the data file where it has been
stored.
INPUT:
line: string.
OUTPUT:
string.
EXAMPLES:
The string 'Fast data in =HYPERLINK("n762a_castrol_2-h001.tsv")'
will return 'n762a_castrol_2-h001.tsv'.
"""
initial_index = line.find('"') + 1
final_index = line.find('"', initial_index)
return line[initial_index:final_index]
def skip_lines(file, last_skippable_line):
"""Skips lines in an opened file until last_skippable_line is encountered,
the last line to be ignored can be given as a string that represents
the line's beginning, or as an integer that represents the index
of the last line to be skipped (zero based).
The function modifies inplace the file object, and returns the last line
to be skipped.
INPUT:
file: an opened file object (_io.TextIOWrapper).
last_skippable_line: string or positive integer.
OUTPUT:
string.
EXAMPLE:
If a line represented by the string "High speed data using 1000 Hz
Trigger Frequency" is encountered, the function will stop skipping
lines if the keyword last_skippable_line has a value like, but not
limited to:
- "High"
- "High speed data"
- "High speed data using 1000 Hz Trigger Frequency"
If the line's index position is known (for instance, it's the line no 4
in a text editor), setting a value last_skippable_line=3 will have the
same effect.
"""
if isinstance(last_skippable_line, str):
for line in file:
if line.startswith(last_skippable_line):
return line
else:
continue
elif isinstance(last_skippable_line, int):
for line_number in range(last_skippable_line):
line = file.readline()
return line
else:
raise TypeError('last_skippable_line is not a string or integer')
def extract_adquisition_rate(line):
"""Extracts the adquisition rate in Hz from the line of text that
is supposed to contain that information. Raises an exception if it is not
found.
INPUT:
line: string, file line containing a number followed by "Hz".
OUTPUT:
integer.
EXAMPLE:
From the string 'High speed data using 1000 Hz Trigger Frequency.'
the function will return 1000.
"""
match = re.search(r'(\d+) Hz', line)
if match:
return int(match.group(1))
else:
raise RuntimeError(f'Adquisition rate not found in line: {line}')
def calculate_movement_directions(data, stroke_label, direction_label):
"""Calculate the movement direction for each row in the data set based
upon the stroke values. This calculation method has the side effect of
losing the first and the last data rows.
INPUTS:
data: DataFrame.
stroke_label: string, label of the stroke column in data.
direction_label: string, label for the new direction column.
"""
empty_end_value = pd.Series(np.nan, index=[len(data)])
stroke = data.loc[:, stroke_label].append(empty_end_value)
empty_start_value = pd.Series(np.nan, index=[-1])
stroke_minus_1 = empty_start_value.append(data.loc[:, stroke_label])
stroke_minus_1.index += 1
data[direction_label] = (stroke - stroke_minus_1).apply(np.sign)
data.dropna(inplace=True)
data.index -= 1
def filter_out_outer_values(data, stroke_label, length_factor):
"""Filters out from the data the values that don't belong inside the
wear track's central region. That region is defined by length_factor as
a fraction of the wear track's length.
INPUT:
data: DataFrame.
stroke_label: string, label of the stroke column in data.
length_factor: float between 0.0 and 1.0.
OUTPUT:
DataFrame
EXAMPLE:
A length factor of 0.1 applied on a stroke length of 10 mm will filter
out all values outside of 0.1 * 10 mm / 2 = 0.5 mm around the
weartrack's center.
"""
max_filter_limit = data.loc[:, stroke_label].max() * length_factor / 2
min_filter_limit = data.loc[:, stroke_label].min() * length_factor / 2
central_values = ((data.loc[:, stroke_label] <= max_filter_limit) &
(data.loc[:, stroke_label] >= min_filter_limit))
return data.loc[central_values].copy()
def calculate_cycle_values(data, direction_label, cycle_label, initial_cycle):
"""The cycle every data row belongs to is calculated from the direction
column and added as a new data column.
The cycle count starts at the value provided in initial_cycle.
INPUT:
data: DataFrame.
direction_label: string, label of the direction column in data.
cycle_label: string, label for the new cycle column.
initial_cycle: integer.
"""
class Tracker:
"""This class acts as a data container for the assign_cycle function,
since it's needed to keep track of some variables out of its scope.
"""
cycle = initial_cycle
initial_sign = data.get_value(0, direction_label)
former_sign = -initial_sign
def assign_cycle(sign):
"""Gives a cycle value to the sign provided, relative to the initial
sign on the data and the sign assigned just before.
INPUT:
sign: integer with values 1 or -1.
OUTPUT:
integer.
"""
if Tracker.initial_sign == sign and Tracker.former_sign != sign:
Tracker.cycle += 1
Tracker.former_sign = sign
return Tracker.cycle
data[cycle_label] = data.loc[:, direction_label].apply(assign_cycle)
| StarcoderdataPython |
1637159 | # find two numbers that are closest to each other among a list of numbers and print min difference.
# a = [3, 5, 8, 9]
# b = sorted(a)
# minDiff = abs(b[1] - b[0])
# count = 0
# while count < len(b) - 1:
# minDiff = min(minDiff, abs(b[count] - b[count + 1]))
# count += 1
# print(minDiff)
| StarcoderdataPython |
29810 | # data structure module
| StarcoderdataPython |
8137863 | # -*- coding: utf-8 -*-
"""Fun with Car Plate Numbers!
Automatically generated by Colaboratory.
Original file is located at
https://colab.research.google.com/drive/1kgkowDKhtVaVJl4hacZrjE03nekWu9C_
Good morning! You have completed the math trail on car plate numbers in a somewhat (semi-)automated way.
Can you actually solve the same tasks with code? Read on and you will be amazed how empowering programming can be to help make mathematics learning more efficient and productive! :)
# Task
Given the incomplete car plate number `SLA9??2H`
Find the missing ?? numbers.
A valid Singapore car plate number typically starts with 3 letters, followed by 4 digits and ending with a 'check' letter.
For example, for the valid car plate number is 'SDG6136T',
- The first letter is 'S' for Singapore.
- The next two letters and the digits are used to compute the check letter, using the following steps:
- Ignoring the first letter 'S', the letters are converted to their positions in the alphabet. For example, 'D' is 4, 'G' is 7 and 'M' is 13.
- The converted letters and the digits form a sequence of 6 numbers. For example, 'DG6136' will give (4, 7, 6, 1, 3, 6).
- The sequence of 6 numbers is multiplied term by term by the sequence of 6 weights (9, 4, 5, 4, 3, 2) respectively, summed up and then divided by 19 to obtain the remainder.
- For example, '476136' will give 4x9 + 7x4 + 6x5 + 1x4 + 3x3 + 6x2 = 119, and this leaves a remainder of 5 after dividing by 19.
- The 'check' letter is obtained by referring to the following table. Thus the check letter corresponding to remainder 5 is T.
```
| Remainder | 'check' letter | Remainder | 'check' letter | Remainder | 'check' letter |
| 0 | A | 7 | R | 13 | H |
| 1 | Z | 8 | P | 14 | G |
| 2 | Y | 9 | M | 15 | E |
| 3 | X | 10 | L | 16 | D |
| 4 | U | 11 | K | 17 | C |
| 5 | T | 12 | J | 18 | B |
| 6 | S | | | | |
```
Reference: https://sgwiki.com/wiki/Vehicle_Checksum_Formula
Pseudocode
```
FOR i = 0 to 99
Car_Plate = 'SJT9' + str(i) + '2H'
IF Check_Letter(Car_Plate) is True
print (Car_Plate) on screen
ENDIF
NEXT
```
"""
# we need to store the mapping from A to 1, B to 2, etc.
# for the letters part of the car plate number
# a dictionary is good for this purpose
letter_map = {}
for i in range(27): # 26 alphabets
char = chr(ord('A') + i)
letter_map[char] = i + 1
#print(letter_map) # this will output {'A':1, 'B':2, 'C':3, ..., 'Z':26}
# we also need to store the mapping from remainders to the check letter
# and we can also use a dictionary! :)
check_map = {0:'A', 1:'Z', 2:'Y', 3:'X', 4:'U', 5:'T', 6:'S', 7:'R', 8:'P', \
9:'M', 10:'L', 11:'K', 12:'J', 13:'H', 14:'G', 15:'E', 16:'D', \
17:'C', 18:'B'}
# we define a reusable Boolean function to generate the check letter and
# check if it matches the last letter of the car plate number
def check_letter(car_plate):
weights = [9, 4, 5, 4, 3, 2]
total = 0
for i in range(len(car_plate)-1):
if i < 2: # letters
num = letter_map[car_plate[i]]
else: # digits
num = int(car_plate[i])
total += num * weights[i]
remainder = total % 19
return check_map[remainder] == car_plate[-1]
#main
car_plate = 'DG6136T' # you can use this to verify the given example
if check_letter(car_plate):
print('S' + car_plate, car_plate[3:5])
print()
for i in range(100): # this loop repeats 100 times for you! :)
car_plate = 'LA9' + str(i).zfill(2) + '2H' # 'LA9002H', 'LA9012H', ...
if check_letter(car_plate):
print('S' + car_plate, car_plate[3:5])
#main
for i in range(100):
car_plate = 'LA' + str(i).zfill(2) + '68Y'
if check_letter(car_plate):
print('S' + car_plate, car_plate[2:4])
'0'.zfill(2)
"""# Challenge
- How many car_plate numbers start with SMV and end with D?
"""
#main
count = 0
for i in range(10000):
car_plate = 'MV' + str(i).zfill(4) + 'D'
if check_letter(car_plate):
count += 1
print(count)
#main
wanted = []
for i in range(10000):
car_plate = 'MV' + str(i).zfill(4) + 'D'
if check_letter(car_plate):
print('S' + car_plate, end=' ')
wanted.append('S' + car_plate)
print(len(wanted))
"""# More challenges!
Suggest one or more variations of problems you can solve with car plate numbers using the power of Python programming. Some ideas include:
* Check if a given car plate number is valid
* Which valid car plate numbers have a special property (eg prime number, contains at least two '8' digits, does not contain the lucky number 13, etc.)
* If there are the same number of available car plate numbers each series (eg SMV and SMW)
* (your idea here)
Submit a pull request with your ideas and/or code to contribute to learning Mathematics using programming to benefit the world! :)
"""
"""# This is really more than car plate numbers!
You have just learned an application of mathematics called modulus arithmetic in generating check letters/digits. Do you know that actually the following are also applications of modulus arithmetic?
* Singapore NRIC numbers (http://www.ngiam.net/NRIC/NRIC_numbers.ppt)
* international ISBNs (https://en.wikipedia.org/wiki/International_Standard_Book_Number)
* credit card numbers (https://en.wikipedia.org/wiki/Luhn_algorithm)
* universal product codes (https://en.wikipedia.org/wiki/Universal_Product_Code)
Can you research on other applications modulus arithmetic has? Better still, contribute by submitting Python code to unleash the power of automation!
You can submit a pull request by doing one of the following:
- suggesting a new application for modulus arithmetic
- creating a new .py file
- uploading an existing .py file
We look forward to your pull requests! :)
""" | StarcoderdataPython |
1652067 | <reponame>fbailly/BioptimPaperExamples
from time import time
import biorbd_casadi as biorbd
from bioptim import Solver, OdeSolver
from .gait.load_experimental_data import LoadData
from .gait.ocp import prepare_ocp, get_phase_time_shooting_numbers, get_experimental_data
def generate_table(out):
root_path = "/".join(__file__.split("/")[:-1])
# Define the problem -- model path
biorbd_model = (
biorbd.Model(root_path + "/models/Gait_1leg_12dof_heel.bioMod"),
biorbd.Model(root_path + "/models/Gait_1leg_12dof_flatfoot.bioMod"),
biorbd.Model(root_path + "/models/Gait_1leg_12dof_forefoot.bioMod"),
biorbd.Model(root_path + "/models/Gait_1leg_12dof_0contact.bioMod"),
)
# --- files path ---
c3d_file = root_path + "/data/normal01_out.c3d"
q_kalman_filter_file = root_path + "/data/normal01_q_KalmanFilter.txt"
qdot_kalman_filter_file = root_path + "/data/normal01_qdot_KalmanFilter.txt"
data = LoadData(biorbd_model[0], c3d_file, q_kalman_filter_file, qdot_kalman_filter_file)
# --- phase time and number of shooting ---
phase_time, number_shooting_points = get_phase_time_shooting_numbers(data, 0.01)
# --- get experimental data ---
q_ref, qdot_ref, markers_ref, grf_ref, moments_ref, cop_ref = get_experimental_data(data, number_shooting_points, phase_time)
for i, ode_solver in enumerate([OdeSolver.RK4(), OdeSolver.COLLOCATION()]):
biorbd_model = (
biorbd.Model(root_path + "/models/Gait_1leg_12dof_heel.bioMod"),
biorbd.Model(root_path + "/models/Gait_1leg_12dof_flatfoot.bioMod"),
biorbd.Model(root_path + "/models/Gait_1leg_12dof_forefoot.bioMod"),
biorbd.Model(root_path + "/models/Gait_1leg_12dof_0contact.bioMod"),
)
ocp = prepare_ocp(
biorbd_model=biorbd_model,
final_time=phase_time,
nb_shooting=number_shooting_points,
markers_ref=markers_ref,
grf_ref=grf_ref,
q_ref=q_ref,
qdot_ref=qdot_ref,
nb_threads=8,
ode_solver=ode_solver,
)
solver = Solver.IPOPT()
solver.set_linear_solver("ma57")
solver.set_convergence_tolerance(1e-3)
solver.set_hessian_approximation("exact")
solver.set_maximum_iterations(3000)
solver.set_print_level(0)
# --- Solve the program --- #
tic = time()
sol = ocp.solve(solver=solver)
toc = time() - tic
sol_merged = sol.merge_phases()
out.solver.append(out.Solver("Ipopt"))
out.solver[i].nx = sol_merged.states["all"].shape[0]
out.solver[i].nu = sol_merged.controls["all"].shape[0]
out.solver[i].ns = sol_merged.ns[0]
out.solver[i].ode_solver = ode_solver
out.solver[i].n_iteration = sol.iterations
out.solver[i].cost = sol.cost
out.solver[i].convergence_time = toc
out.solver[i].compute_error_single_shooting(sol)
| StarcoderdataPython |
8057844 | description = 'Helium pressures'
group = 'lowlevel'
devices = dict(
center3_sens1 = device('nicos.devices.generic.ManualMove',
description = 'Center 3 Sensor 1',
default = 3.5e-6,
abslimits = (0, 1000),
fmtstr = '%.1g',
unit = 'mbar',
),
center3_sens2 = device('nicos.devices.generic.ManualMove',
description = 'Center 3 Sensor 2',
default = 3.5e-6,
abslimits = (0, 1000),
fmtstr = '%.1g',
unit = 'mbar',
),
He_pressure = device('nicos.devices.generic.ManualMove',
description = 'Pressure of He bottle',
default = 69.4,
abslimits = (0, 100),
fmtstr = '%.1g',
unit = 'bar',
),
)
| StarcoderdataPython |
1941314 | <reponame>Bricktheworld/rhombus-api-examples-python
from json.decoder import JSONDecodeError
import requests
import argparse
import sys
import json
import time
import os
import tkinter
from PIL import Image, ImageTk
class IsDeskOccupied:
#Set up workspace for API calls to Rhombus Systems
def __init__(self,args):
#Initialize argument parser
self.argParser = self.__initArgParser()
self.args = self.argParser.parse_args(args)
if self.args.dgui:
self.args.gui == False
if not self.args.time:
self.args.time = time.time()
elif self.args.time > time.time():
self.args.time = time.time()
if not self.args.duration:
self.args.duration = 60
self.args.time = int(self.args.time)
self.args.duration = int(self.args.duration)
#Create a session to set default call Header information
self.session = requests.session()
#self.session.headers = {"x-auth-scheme": "api-token","x-auth-apikey": self.args.apiKey}
self.session.headers = {"x-auth-scheme": "api-token","x-auth-apikey": self.args.apiKey}
#Set a default base URL for api calls
self.url = "https://api2.rhombussystems.com/api/"
self.areas = []#Stores User Defined Areas
self.humanMovementBounds = []#Stores bounds of Human Movement Events
self.img = None
self.width = None
self.height = None
#Define arguments which the user may be prompted for
@staticmethod
def __initArgParser():
argParser = argparse.ArgumentParser(description = 'Determine wether defined workspaces are occupied or not on a camera by camera basis')
argParser.add_argument("apiKey", help = 'Your personal api key, get this from your Rhombus Console')
argParser.add_argument("uuid", help = 'UUID of target Camera')
argParser.add_argument("--time","--t",help = 'Time to check occupancy (Seconds Since Epoch)(Defaults to current time)')
argParser.add_argument("--duration","--d",help='Duration before time to check for occupancy in seconds (Defaults to 60 seconds)')
argParser.add_argument("--newArea","--n", help = 'Input a string to define a new area bounding box, must include quotation marks eg: "(0,0)(.5,.2)", coordinates are given as values 0-1.0 in relation to camera resolution')
argParser.add_argument("--removeArea","--r", help = 'Open a command line menu to remove an area', action = 'store_true')
argParser.add_argument("--gui",help = 'Opens an image from the target camera and overlays existing areas over it. Draw and delete areas. Overrides concurrent CLA addition and Deletion actions',action = 'store_true')
argParser.add_argument("--dgui",help = 'Runs check of defined areas then Opens a Debugging GUI which displays motion event bounding boxes alongside user defined areas',action = 'store_true')
argParser.add_argument("--loadAreas","--l", help = "Name of or path to JSON file containing Areas to load")
argParser.add_argument("--saveAreas","--s", help = "Name of or path to JSON file to Save Areas to")
return argParser
#Load user defined areas from uuid.csv
def setup(self):
##Get a camera frame for GUI
#Must be done even if GUI/DGUI is not selected as the image provides height and width information for camera
payload = {
"cameraUuid": self.args.uuid,
"timestampMs": (self.args.time*1000)
}
frameURI = self.session.post(self.url+"video/getExactFrameUri", json = payload)
if frameURI.status_code != 200:#Check for unexpected status code
print("Failure to get frameURI (event/getExactFrameUri)")
return
frameURI = json.loads(frameURI.text)["frameUri"]
#Save frame to jpg
with open("tempFrame.jpg", "wb") as output_fp:
frame_resp = self.session.get(frameURI)
output_fp.write(frame_resp.content)
output_fp.flush()
self.img = Image.open('tempFrame.jpg')
self.width = int(self.img.width)
self.height = int(self.img.height)
#If user is loading areas, load areas from JSON
if self.args.loadAreas:
try:
with open(self.args.loadAreas+'.txt') as jsonInput:
self.areas=json.load(jsonInput)
except FileNotFoundError:
pass#Pass if an error is encountered while loading, self.areas is initialized as [] to still allow user interaction in the event of a failure here
except JSONDecodeError:
pass
#Define a new area and save if user wants
def newArea(self,newCoords):
newArea = {
"posOneX":newCoords[0],
"posOneY":newCoords[1],
"posTwoX":newCoords[2],
"posTwoY":newCoords[3]
}
self.areas.append(newArea)
if self.args.saveAreas:
with open(self.args.saveAreas+'.txt','w') as jsonOutput:
json.dump(self.areas,jsonOutput)
#Open a commandline interface to remove an area, and save updated area list to uuid.csv
def removeArea(self):
if self.areas == []:
print("No areas to remove.")
return
i = 1
for area in self.areas:
area = list(area.values())
area[0] = float(area[0]) * self.width
area[1] = float(area[1]) * self.height
area[2] = float(area[2]) * self.width
area[3] = float(area[3]) * self.height
print("%i : (%i,%i),(%i,%i)"%(i,area[0],area[1],area[2],area[3]))#Print area bounds and associated index
i+=1
print("Enter the index of the area to remove:")
removeMe = int(input())#Take user input
self.areas.pop(removeMe-1)#Remove specified index from list
if self.args.saveAreas:
with open(self.args.saveAreas+'.txt','w') as jsonOutput:
json.dump(self.areas,jsonOutput)
#Check areas for occupancy
def checkAreas(self):
if self.areas == []:
print("No areas to check.")
return
##Summary
#Camera UUID, Start Time and Duration -> camera/getFootageBoundingBoxes -> Bounding Box Coords
#Bounding Box Coords vs. user defined areas -> occupancy
#For each Area:
#For each Human Motion Event:
#if 4 bounds contained: Area Occupied
#if >=2 bounds contained: Area May Be Occupied
#Else: Area Not Occupied
##Get Recent Bounding Boxes
payload = {
"cameraUuid": self.args.uuid,
"duration": self.args.duration,
"startTime": self.args.time
}
currentBoundingBoxes = self.session.post(self.url+"camera/getFootageBoundingBoxes",json = payload)
if currentBoundingBoxes.status_code != 200:#Check for unexpected status code
print("Failure to get currentEvents (event/getPolicyAlertGroupsForDevice)")
return
currentBoundingBoxes = json.loads(currentBoundingBoxes.text)
currentBoundingBoxes = currentBoundingBoxes["footageBoundingBoxes"]
for box in currentBoundingBoxes:
activity = box.get("a")
if activity == "MOTION_HUMAN":
#convert event bounds to pixel coordinates and store
#Convert Permyiads to coordinates -> (value/10000) * total dimension
left = int((box.get("l")/10000) * self.width)
right = int((box.get("r")/10000) * self.width)
top = int((box.get("t")/10000) * self.height)
bottom = int((box.get("b")/10000) * self.height)
self.humanMovementBounds.append([left,top,right,bottom,activity])
##Compare user defined workspaces to Human Movement Bounding Boxes
print("\nChecking occupancy as of %i minutes ago" % int((time.time()-self.args.time)/60))
print("[-1: Area unoccupied | 0: Area may be occupied | 1: Area occupied]\n")
i = 1
for area in self.areas:
#Parse relative values into coordinates
area = list(area.values())
area[0] = float(area[0]) * self.width
area[1] = float(area[1]) * self.height
area[2] = float(area[2]) * self.width
area[3] = float(area[3]) * self.height
print("%i: (%i,%i)(%i,%i)" % (i,area[0],area[1],area[2],area[3])) #i: (x,y)(x,y)
i+=1
occupied = -1 #Occupied value starts as empty, may change from empty but will not return to empty
for bound in self.humanMovementBounds:
overlapCount = 0
#Determine number of corners in area
top = (area[1] < bound[1] < area[3]) or (area[3] < bound[1] < area[1])#is the top bound within the user defined area?
bottom = (area[1] < bound[3] < area[3]) or (area[3] < bound[3] < area[1])#is the bottom bound within the user defined area?
if area[0] < bound[0] < area[2] or area[2] < bound[0] < area[0]:#is the left bound within the user defined area?
overlapCount+=top#Top Left Corner
overlapCount+=bottom#Bottom Left Corner
if area[0] < bound[2] < area[2] or area[2] < bound[2] < area[0]:#is the right bound within the user defined area?
overlapCount+=top#Top Right Corner
overlapCount+=bottom#Bottom Right Corner
if overlapCount == 4: #if a relevant event is entirely within the bounds of the work area
occupied = 1 #Space is occupied, break from loop
break
if overlapCount >= 2: #if a relevant event is partially within the bounds of the work area
occupied = 0 #Space may be occupied, continue through loop
continue
print('\t',occupied) #print status of the area
#Display an image which will allow a user to manage defined areas
def gui(self):
global selectRect #the dashed rectangle which outlines a selection in progress
global selecting #a control boolean which tracks the state of user interaction - starts as False
global botx,boty,topx,topy #coordinates for selectRect
topx,topy,botx,boty = 0,0,0,0
selectRect = None
selecting = False
###Gui methods
def draw():
global selectRect
canvas.delete("all")#Clear the Canvas
canvas.create_image(0, 0, image=self.img, anchor=tkinter.NW)#Draw Image on Canvas
selectRect = canvas.create_rectangle(topx, topy, topx, topy,dash=(2,2), fill='', outline='white')#Draw selectRect
i = 1
for area in self.areas:
#Parse relative values into coordinates
area = list(area.values())
area[0] = float(area[0]) * self.width
area[1] = float(area[1]) * self.height
area[2] = float(area[2]) * self.width
area[3] = float(area[3]) * self.height
if area[0] < area[2]:#Place number identifying area in top left corner
canvas.create_text(area[0],area[1],text = str(i),fill = 'green',anchor=tkinter.NW, font=("Arial",25))
else:
canvas.create_text(area[2],area[1],text = str(i),fill = 'green',anchor=tkinter.NW, font=("Arial",25))
i+=1
canvas.create_rectangle(area[0], area[1], area[2], area[3], fill='', outline='green',width = 2)#Draw user defined areas
if self.args.dgui:
for area in self.humanMovementBounds:
if area[4] == "MOTION_HUMAN":
canvas.create_rectangle(area[0], area[1], area[2], area[3], fill='',outline='yellow',width = 2)#If in debug mode, draw motion bounds
else:
canvas.create_rectangle(area[0], area[1], area[2], area[3], fill='',outline='purple',width = 2)#If in debug mode, draw motion bounds
#two part process for adding a new area to track, triggered by double clicking mouse 1
def addArea(event):
global selecting
global selectRect
global topx,topy, botx, boty
if not selecting:#If first trigger, start selecting an area
topx,topy = event.x,event.y #Set first corner of selection to mouse position
selecting = True
return
else:#On second trigger, finish selecting an area and save selected area
newCoords = [topx,topy,botx,boty]
#Parse coordinates into relative values
newCoords[0]/=self.width
newCoords[1]/=self.height
newCoords[2]/=self.width
newCoords[3]/=self.height
self.newArea(newCoords)#Add new area
#Reset selectRect
topy,topx,boty,botx = 0,0,0,0
canvas.coords(selectRect, topx, topy, botx, boty)
#end selecting
selecting = False
draw()
#Frame updates associated with motion
def updateSelectRect(event):
#Always update title with current mouse position
window.title("Double Click to define new areas - Click to Remove :"+str(event.x)+","+str(event.y))
global selecting
if not selecting:#If not currently selecting an area, return
return
#Update selectRect and its coordinates
global selectRect
global topy, topx, botx, boty
botx, boty = event.x, event.y #set second corner of selectRect to mouse position
canvas.coords(selectRect, topx, topy, botx, boty)
#Remove the most recently added area which contains the mouse cursor
def removeArea(event):
i = len(self.areas)-1
for area in self.areas[::-1]:#Iterate through self.areas in reverse
area = list(area.values())
area[0] *= self.width
area[1] *= self.height
area[2] *= self.width
area[3] *= self.height
if (area[0] < event.x < area[2]) or (area[2] < event.x < area[0]):#If mouse x pos is in area
if (area[1] < event.y < area[3]) or (area[3] < event.y < area[1]):#If mouse y pos is in area
self.areas.pop(i)#Remove current index from list, break from loop
break
i-=1
#write updated list to saveAreas.txt if user wants to save new areas
if self.args.saveAreas:
with open(self.args.saveAreas+'.txt','w') as jsonOutput:
json.dump(self.areas,jsonOutput)
draw()#redraw frame
###Gui Main
window = tkinter.Tk()
window.title("Double Click to define new areas - Click to Remove")
self.img = ImageTk.PhotoImage(self.img)
window.geometry(str(self.width)+'x'+str(self.height))
window.configure(background='grey')
#create canvas
canvas = tkinter.Canvas(window, width=self.width, height=self.height,borderwidth=0, highlightthickness=0)
canvas.pack(expand=True)
draw()
canvas.bind('<Double-Button-1>', addArea)#On double click
canvas.bind('<Button-1>', removeArea)#On single click
canvas.bind('<Motion>', updateSelectRect)#On mouse motion
window.mainloop()
def execute(self):
self.setup()#Prep predefined areas and get backdrop for UI
if(self.args.gui):#gui overrides other CLA actions
self.gui()
else:
if(self.args.newArea):
#Parse the area to add from commandline "(x,y)(x,y)"
str = self.args.newArea.split(')(')
newCoords = str[0].split('(')[1].split(',')
newCoords+=(str[1].split(')')[0].split(','))
self.newArea(newCoords)
if(self.args.removeArea):
self.removeArea()
self.checkAreas()#Report occupied status of areas
if self.args.dgui:#Display debug GUI
self.gui()
os.remove('tempFrame.jpg')#Remove frame when done
return
if __name__ == "__main__":
engine = IsDeskOccupied(sys.argv[1:])
engine.execute()
| StarcoderdataPython |
315559 | <reponame>nestorPons/agendaOnLine<filename>.server/usuarios.py
import pymysql
# Conectar con base de datos
conexion = pymysql.connect(host="localhost",
user="root",
passwd="<PASSWORD>",
database="app")
cursor = conexion.cursor()
# Recuperar registros de la tabla 'Usuarios'
registros = "SELECT * FROM tickets;"
# Mostrar registros
cursor.execute(registros)
filas = cursor.fetchall()
for fila in filas:
string = "UPDATE tickets SET total=12 WHERE id="+str(fila[0])
cursor.execute(string)
print(fila)
# Finalizar
conexion.commit()
conexion.close()
| StarcoderdataPython |
1916921 | <filename>script.py
# from profanity_police.checker import Checker
# from profanity_police.youtube import YoutubeTranscript
# y_transcript = YoutubeTranscript(url = "https://www.youtube.com/watch?v=Vev2ybF2Z6g&ab_channel=AllIndiaBakchod")
# y_transcript.get_original_languages()
# checker = Checker()
# transcript = y_transcript.get_transcript(language_code = "en-GB")
# if not transcript:
# print("Transcript not found")
# else:
# swear_words_in_transcript = checker.check_swear_word(transcript["transcript"], "en-GB")
# print(swear_words_in_transcript)
from profanity_police.transcript_checker import TranscriptChecker
checker = TranscriptChecker()
print(checker.check_transcript(source = "youtube", video_id = "Vev2ybF2Z6g", language_code = "en")) | StarcoderdataPython |
11371964 | <gh_stars>0
import random
class Tile(object):
def __init__(self, x, y):
self.position = (x, y)
self.occupied = False
self.occupier = ''
self.entity = None
def update(self):
#print "X:%r, Y:%r: " % self.position,
#if self.occupied: print "%s on Tile." % self.occupier
#else: print "Tile empty."
pass
def is_occupied(self):
return self.occupied
def get_occupier(self):
return self.occupier
def get_entity(self):
return self.entity
def occupy(self, entity_type):
from hunter import Hunter
from zombie import Zombie
from victim import Victim
self.occupied = True
self.occupier = entity_type
if self.occupier == "Zombie":
self.entity = Zombie(self.position)
elif self.occupier == "Victim":
self.entity = Victim(self.position)
elif self.occupier == "Hunter":
self.entity = Hunter(self.position)
def vacated(self):
self.occupied = False
self.occupier = ''
self.entity = None
def print_pos(self):
print self.position
| StarcoderdataPython |
9621088 | import os
import xml.etree.ElementTree as ET
import zipfile
class Model:
def __init__(self, ref):
self.ref = ref
self.language_usages = {}
self.roots = []
def add_language_usage(self, usage):
self.language_usages[usage.id] = usage
def add_devkit_usage(self, usage):
pass
def uuid(self):
return self.ref[2:]
def add_root_node(self, node):
self.roots.append(node)
class LanguageUsage:
def __init__(self, id, name, version):
self.id = id
self.name = name
self.version = version
class LanguageDefinition:
def __init__(self, namespace, uuid):
self.namespace = namespace
self.uuid = uuid
class Node:
def __init__(self, id, concept_def):
self.id = id
self.concept_def = concept_def
self.properties = {}
self.references = {}
self.children = {}
def set_property(self, property_def, value):
self.properties[property_def.id] = (property_def, value)
def set_reference(self, reference_def, value_id, resolve):
self.properties[reference_def.id] = (reference_def, value_id, resolve)
def add_child(self, child_def, value):
if child_def.id not in self.children:
self.children[child_def.id] = (child_def, [])
self.children[child_def.id][1].append(value)
class ModelReference:
def __init__(self, ref, implicit):
self.ref = ref
self.implicit = implicit
class ImportingTable:
def __init__(self):
self.models = {}
self.concepts = {}
self.properties = {}
self.children = {}
self.references = {}
def load_language(self, lang):
for c_id in lang.concepts:
c = lang.concepts[c_id]
self.concepts[c.index] = c
for p_id in c.properties:
p = c.properties[p_id]
self.properties[p.index] = p
for r_id in c.references:
r = c.references[r_id]
self.references[r.index] = r
for ch_id in c.children:
ch = c.children[ch_id]
self.children[ch.index] = ch
def register_model(self, index, ref, implicit):
self.models[index] = ModelReference(ref, implicit)
def find_model(self, index):
if index in self.models:
return self.models[index]
else:
raise Exception("Model not found %s" % index)
def find_concept(self, index):
if index in self.concepts:
return self.concepts[index]
else:
raise Exception("Concept not found %s" % index)
def find_property(self, index):
if index in self.properties:
return self.properties[index]
else:
raise Exception("Property not found %s" % index)
def find_child(self, index):
if index in self.children:
return self.children[index]
else:
raise Exception("Child relationship ot found %s" % index)
def find_reference(self, index):
if index in self.references:
return self.references[index]
else:
raise Exception("Reference not found %s" % index)
class ImportedLanguage:
def __init__(self, id, name):
self.id = id
self.name = name
self.concepts = {}
def register_concept(self, concept):
self.concepts[concept.id] = concept
class ImportedConcept:
def __init__(self, id, name, flags, index):
self.id = id
self.name = name
self.flags = flags
self.index = index
self.properties = {}
self.children = {}
self.references = {}
def register_property(self, property):
self.properties[property.id] = property
def register_child(self, child):
self.children[child.id] = child
def register_reference(self, reference):
self.references[reference.id] = reference
class ImportedConceptProperty:
def __init__(self, id, name, index):
self.id = id
self.name = name
self.index = index
class ImportedConceptChild:
def __init__(self, id, name, index):
self.id = id
self.name = name
self.index = index
class ImportedConceptReference:
def __init__(self, id, name, index):
self.id = id
self.name = name
self.index = index
class Environment:
def __init__(self):
self.verbose = False
self.languages = {}
self.models = {}
def __log(self, message):
if self.verbose:
print("ENV %s" % message)
def register_language(self, language_definition):
self.languages[language_definition.uuid] = language_definition
def __load_language_usage(self, node):
return LanguageUsage(node.attrib['id'], node.attrib['name'], node.attrib['version'])
def load_devkit_usage(self, node):
pass
def __load_node(self, xml_node, imp_table):
id = xml_node.attrib['id']
concept_index = xml_node.attrib['concept']
concept_def = imp_table.find_concept(concept_index)
node = Node(id, concept_def)
for cn in xml_node:
if cn.tag == 'property':
property_index = cn.attrib['role']
property_def = imp_table.find_property(property_index)
node.set_property(property_def, cn.attrib['value'])
elif cn.tag == 'ref':
ref_index = cn.attrib['role']
ref_def = imp_table.find_reference(ref_index)
# instead of having the attribute 'node' a reference could have the attribute 'to'
# in that case we need to add a reference to a node in another model
if 'node' in cn.attrib:
node.set_reference(ref_def, cn.attrib['node'], cn.attrib['resolve'])
elif 'to' in cn.attrib:
to = cn.attrib['to']
model_index, node_index = to.split(":", 1)
model_def = imp_table.find_model(model_index)
else:
raise Exception()
elif cn.tag == 'node':
child_index = cn.attrib['role']
child_def = imp_table.find_child(child_index)
node.add_child(child_def, self.__load_node(cn, imp_table))
else:
raise Exception(cn.tag)
return node
def __load_imported_concept(self, xml_node):
c = ImportedConcept(xml_node.attrib['id'], xml_node.attrib['name'], xml_node.attrib['flags'], xml_node.attrib['index'])
#print("LOADING IMPORTED CONCEPT %s" % xml_node.attrib['id'])
for child in xml_node:
if child.tag == 'property':
c.register_property(ImportedConceptProperty(child.attrib['id'], child.attrib['name'], child.attrib['index']))
elif child.tag == 'child':
c.register_child(ImportedConceptChild(child.attrib['id'], child.attrib['name'], child.attrib['index']))
elif child.tag == 'reference':
c.register_reference(ImportedConceptReference(child.attrib['id'], child.attrib['name'], child.attrib['index']))
else:
raise Exception(child.tag)
return c
def __load_imported_language(self, xml_node):
lang = ImportedLanguage(xml_node.attrib['id'], xml_node.attrib['name'])
#print("LOADING IMPORTED LANG %s" % xml_node.attrib['id'])
for c in xml_node:
lang.register_concept(self.__load_imported_concept(c))
return lang
def load_mps_file(self, path):
#print(" MPS file %s" % path)
tree = ET.parse(path)
root = tree.getroot()
model = Model(root.attrib['ref'])
self.__log("Record model %s" % model.uuid())
imp_table = ImportingTable()
for language_node in root.find('languages'):
if language_node.tag == 'use':
model.add_language_usage(self.__load_language_usage(language_node))
elif language_node.tag == 'devkit':
model.add_devkit_usage((self.load_devkit_usage(language_node)))
else:
raise Exception("Unknown tag %s" % language_node.tag)
imported_languages = [self.__load_imported_language(n) for n in root.find('registry')]
for lang in imported_languages:
imp_table.load_language(lang)
for import_node in root.find('imports'):
implicit = False
if 'implicit' in import_node.attrib:
implicit = import_node.attrib['implicit']
imp_table.register_model(import_node.attrib['index'], import_node.attrib['ref'], implicit)
for child in root:
if child.tag == 'node':
model.add_root_node(self.__load_node(child, imp_table))
self.models[model.uuid()] = model
def load_jar_file(self, path):
#print("JAR %s" % path)
zf = zipfile.ZipFile(path, 'r')
module_entry = [zi for zi in zf.infolist() if zi.filename == "META-INF/module.xml"]
if len(module_entry) == 1:
data = zf.read("META-INF/module.xml")
root = ET.fromstring(data)
if root.attrib['type'] == 'language':
language_def = LanguageDefinition(root.attrib['namespace'], root.attrib['uuid'])
self.register_language(language_def)
elif root.attrib['type'] == 'solution':
pass
else:
raise Exception("Unknown type %s" % root.attrib['type'])
def load_dir(self, path):
for content in os.listdir(path):
childname = os.path.join(path, content)
if os.path.isdir(childname):
self.load_dir(childname)
else:
filename, file_extension = os.path.splitext(childname)
if file_extension == ".mps":
self.load_mps_file(childname)
elif file_extension == ".jar":
self.load_jar_file(childname)
def verify(self):
pass
def main():
environment = Environment()
#environment.load_jar_file("/home/federico/tools/MPS3.3.4/languages/languageDesign/jetbrains.mps.lang.structure.jar")
environment.load_dir("/home/federico/tools/MPS3.3.4")
environment.verbose = True
environment.load_dir("/home/federico/repos/mps-lwc-16")
if __name__ == '__main__':
main() | StarcoderdataPython |
8173174 | <reponame>KevZho/redditbot<filename>kol/request/ApiRequest.py
import kol.Error as Error
from GenericRequest import GenericRequest
from kol.util import Configuration
import json
class ApiRequest(GenericRequest):
def __init__(self, session):
super(ApiRequest, self).__init__(session)
self.url = session.serverURL + "api.php"
# Create a user agent string.
userAgent = Configuration.get("userAgent")
if userAgent == None:
userAgent = "RedditBot by KevZho (#2434890)"
self.requestData["for"] = userAgent
def parseResponse(self):
self.jsonData = json.loads(self.responseText)
if type(self.jsonData) == str or type(self.jsonData) == unicode:
raise Error.Error(self.jsonData, Error.REQUEST_GENERIC)
| StarcoderdataPython |
6660112 | <reponame>lmicra/paco
# -*- coding: utf-8 -*-
import asyncio
from .partial import partial
from .decorator import overload
from .concurrent import ConcurrentExecutor
from .assertions import assert_corofunction, assert_iter
@overload
@asyncio.coroutine
def some(coro, iterable, limit=0, timeout=None, loop=None):
"""
Returns `True` if at least one element in the iterable satisfies the
asynchronous coroutine test. If any iteratee call returns `True`,
iteration stops and `True` will be returned.
This function is a coroutine.
This function can be composed in a pipeline chain with ``|`` operator.
Arguments:
coro (coroutine function): coroutine function for test values.
iterable (iterable|asynchronousiterable): an iterable collection
yielding coroutines functions.
limit (int): max concurrency limit. Use ``0`` for no limit.
timeout can be used to control the maximum number
of seconds to wait before returning. timeout can be an int or
float. If timeout is not specified or None, there is no limit to
the wait time.
loop (asyncio.BaseEventLoop): optional event loop to use.
Raises:
TypeError: if input arguments are not valid.
Returns:
bool: `True` if at least on value passes the test, otherwise `False`.
Usage::
async def gt_3(num):
return num > 3
await paco.some(test, [1, 2, 3, 4, 5])
# => True
"""
assert_corofunction(coro=coro)
assert_iter(iterable=iterable)
# Reduced accumulator value
passes = False
# If no items in iterable, return False
if len(iterable) == 0:
return passes
# Create concurrent executor
pool = ConcurrentExecutor(limit=limit, loop=loop)
# Reducer partial function for deferred coroutine execution
@asyncio.coroutine
def tester(element):
nonlocal passes
if passes:
return None
if (yield from coro(element)):
# Flag as not test passed
passes = True
# Force stop pending coroutines
pool.cancel()
# Iterate and attach coroutine for defer scheduling
for element in iterable:
pool.add(partial(tester, element))
# Wait until all coroutines finish
yield from pool.run(timeout=timeout)
return passes
| StarcoderdataPython |
3527903 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
from cloudeyeclient.common import display
from cloudeyeclient.common import resource
from cloudeyeclient.common import utils
class Metric(resource.Resource, display.Display):
"""AutoScaling group resource instance."""
list_column_names = [
"Namespace",
"Metric Name",
"Dimension",
"Unit"
]
list_favorite_column_names = [
"Namespace",
"Metric Name",
"Dimension",
]
# column_mapping = {
# "name": "scaling_group_name",
# "status": "scaling_group_status",
# }
@property
def dimension(self):
if self.dimensions and len(self.dimensions) > 0:
return ';'.join([dim['name'] + '=' + dim['value']
for dim in self.dimensions])
return ''
def condition_formatter(condition):
return ("Event {filter}{comparison_operator}{value} occurs {count}"
" times in {period} seconds").format(**condition)
class Alarm(resource.Resource, display.Display):
"""Cloud Eye alarm resource instance."""
list_column_names = [
"id",
"name",
"desc",
"metric namespace",
"metric name",
"status"
]
show_column_names = [
"id",
"name",
"desc",
"metric namespace",
"metric name",
"metric dimensions",
"condition",
"enabled",
"action enabled",
"update time",
"status"
]
column_2_property = {
"id": "alarm_id",
"name": "alarm_name",
"desc": "alarm_description",
"enabled": "alarm_enabled",
"action enabled": "alarm_action_enabled",
"status": "alarm_state",
}
@property
def metric_namespace(self):
return self.metric["namespace"]
@property
def metric_name(self):
return self.metric["metric_name"]
@property
def metric_dimensions(self):
dimensions = self.metric['dimensions']
if dimensions and dimensions > 0:
return ';'.join([dim['name'] + '=' + dim['value']
for dim in dimensions])
return ''
formatter = {
"condition": condition_formatter,
"update time": utils.format_time
}
class Quota(resource.Resource, display.Display):
"""Cloud Eye quota resource instance."""
list_column_names = [
"type",
"quota",
"used",
"unit",
]
class MetricData(resource.Resource, display.Display):
"""Cloud Eye metric data resource instance."""
formatter = {
"timestamp": utils.format_time
}
| StarcoderdataPython |
11222537 | <gh_stars>10-100
"""Client class implementation"""
import asyncio
import reprlib
import logging
from collections import abc
from contextlib import suppress
import json
from typing import Optional, List, Union, Set, AsyncIterator, Type, Any
from types import TracebackType
import aiohttp
from aiocometd.transports import create_transport
from aiocometd.transports.abc import Transport
from aiocometd.constants import DEFAULT_CONNECTION_TYPE, ConnectionType, \
MetaChannel, SERVICE_CHANNEL_PREFIX, TransportState
from aiocometd.exceptions import ServerError, ClientInvalidOperation, \
TransportTimeoutError, ClientError
from aiocometd.utils import is_server_error_message
from aiocometd.extensions import Extension, AuthExtension
from aiocometd.typing import ConnectionTypeSpec, SSLValidationMode, \
JsonObject, JsonDumper, JsonLoader
LOGGER = logging.getLogger(__name__)
class Client: # pylint: disable=too-many-instance-attributes
"""CometD client"""
#: Predefined server error messages by channel name
_SERVER_ERROR_MESSAGES = {
MetaChannel.HANDSHAKE: "Handshake request failed.",
MetaChannel.CONNECT: "Connect request failed.",
MetaChannel.DISCONNECT: "Disconnect request failed.",
MetaChannel.SUBSCRIBE: "Subscribe request failed.",
MetaChannel.UNSUBSCRIBE: "Unsubscribe request failed."
}
#: Defualt connection types list
_DEFAULT_CONNECTION_TYPES = [ConnectionType.WEBSOCKET,
ConnectionType.LONG_POLLING]
#: Timeout to give to HTTP session to close itself
_HTTP_SESSION_CLOSE_TIMEOUT = 0.250
def __init__(self, url: str,
connection_types: Optional[ConnectionTypeSpec] = None, *,
connection_timeout: Union[int, float] = 10.0,
ssl: Optional[SSLValidationMode] = None,
max_pending_count: int = 100,
extensions: Optional[List[Extension]] = None,
auth: Optional[AuthExtension] = None,
json_dumps: JsonDumper = json.dumps,
json_loads: JsonLoader = json.loads,
loop: Optional[asyncio.AbstractEventLoop] = None) -> None:
"""
:param url: CometD service url
:param connection_types: List of connection types in order of \
preference, or a single connection type name. If ``None``, \
[:obj:`~ConnectionType.WEBSOCKET`, \
:obj:`~ConnectionType.LONG_POLLING`] will be used as a default value.
:param connection_timeout: The maximum amount of time to wait for the \
transport to re-establish a connection with the server when the \
connection fails.
:param ssl: SSL validation mode. None for default SSL check \
(:func:`ssl.create_default_context` is used), False for skip SSL \
certificate validation, \
`aiohttp.Fingerprint <https://aiohttp.readthedocs.io/en/stable/\
client_reference.html#aiohttp.Fingerprint>`_ for fingerprint \
validation, :obj:`ssl.SSLContext` for custom SSL certificate \
validation.
:param max_pending_count: The maximum number of messages to \
prefetch from the server. If the number of prefetched messages reach \
this size then the connection will be suspended, until messages are \
consumed. \
If it is less than or equal to zero, the count is infinite.
:param extensions: List of protocol extension objects
:param auth: An auth extension
:param json_dumps: Function for JSON serialization, the default is \
:func:`json.dumps`
:param json_loads: Function for JSON deserialization, the default is \
:func:`json.loads`
:param loop: Event :obj:`loop <asyncio.BaseEventLoop>` used to
schedule tasks. If *loop* is ``None`` then
:func:`asyncio.get_event_loop` is used to get the default
event loop.
"""
#: CometD service url
self.url = url
#: List of connection types to use in order of preference
if isinstance(connection_types, ConnectionType):
self._connection_types = [connection_types]
elif isinstance(connection_types, abc.Iterable):
self._connection_types = list(connection_types)
else:
self._connection_types = self._DEFAULT_CONNECTION_TYPES
self._loop = loop or asyncio.get_event_loop()
#: queue for consuming incoming event messages
self._incoming_queue: "Optional[asyncio.Queue[JsonObject]]" = None
#: transport object
self._transport: Optional[Transport] = None
#: marks whether the client is open or closed
self._closed = True
#: The maximum amount of time to wait for the transport to re-establish
#: a connection with the server when the connection fails
self.connection_timeout = connection_timeout
#: SSL validation mode
self.ssl = ssl
#: the maximum number of messages to prefetch from the server
self._max_pending_count = max_pending_count
#: List of protocol extension objects
self.extensions = extensions
#: An auth extension
self.auth = auth
#: Function for JSON serialization
self._json_dumps = json_dumps
#: Function for JSON deserialization
self._json_loads = json_loads
#: http session
self._http_session: Optional[aiohttp.ClientSession] = None
def __repr__(self) -> str:
"""Formal string representation"""
cls_name = type(self).__name__
fmt_spec = "{}({}, {}, connection_timeout={}, ssl={}, " \
"max_pending_count={}, extensions={}, auth={}, loop={})"
return fmt_spec.format(cls_name,
reprlib.repr(self.url),
reprlib.repr(self._connection_types),
reprlib.repr(self.connection_timeout),
reprlib.repr(self.ssl),
reprlib.repr(self._max_pending_count),
reprlib.repr(self.extensions),
reprlib.repr(self.auth),
reprlib.repr(self._loop))
@property
def closed(self) -> bool:
"""Marks whether the client is open or closed"""
return self._closed
@property
def subscriptions(self) -> Set[str]:
"""Set of subscribed channels"""
if self._transport:
return self._transport.subscriptions
return set()
@property
def connection_type(self) -> Optional[ConnectionType]:
"""The current connection type in use if the client is open,
otherwise ``None``"""
if self._transport is not None:
return self._transport.connection_type
return None
@property
def pending_count(self) -> int:
"""The number of pending incoming messages
Once :obj:`open` is called the client starts listening for messages
from the server. The incoming messages are retrieved and stored in an
internal queue until they get consumed by calling :obj:`receive`.
"""
if self._incoming_queue is None:
return 0
return self._incoming_queue.qsize()
@property
def has_pending_messages(self) -> bool:
"""Marks whether the client has any pending incoming messages"""
return self.pending_count > 0
async def _get_http_session(self) -> aiohttp.ClientSession:
"""Factory method for getting the current HTTP session
:return: The current session if it's not None, otherwise it creates a
new session.
"""
# it would be nicer to create the session when the class gets
# initialized, but this seems to be the right way to do it since
# aiohttp produces log messages with warnings that a session should be
# created in a coroutine
if self._http_session is None:
self._http_session = aiohttp.ClientSession(
json_serialize=self._json_dumps
)
return self._http_session
async def _close_http_session(self) -> None:
"""Close the http session if it's not already closed"""
# graceful shutdown recommended by the documentation
# https://aiohttp.readthedocs.io/en/stable/client_advanced.html\
# #graceful-shutdown
if self._http_session is not None and not self._http_session.closed:
await self._http_session.close()
await asyncio.sleep(self._HTTP_SESSION_CLOSE_TIMEOUT)
def _pick_connection_type(self, connection_types: List[str]) \
-> Optional[ConnectionType]:
"""Pick a connection type based on the *connection_types*
supported by the server and on the user's preferences
:param connection_types: Connection types \
supported by the server
:return: The connection type with the highest precedence \
which is supported by the server
"""
server_connection_types = []
for type_string in connection_types:
with suppress(ValueError):
server_connection_types.append(ConnectionType(type_string))
intersection = (set(server_connection_types) &
set(self._connection_types))
if not intersection:
return None
result = min(intersection, key=self._connection_types.index)
return result
async def _negotiate_transport(self) -> Transport:
"""Negotiate the transport type to use with the server and create the
transport object
:return: Transport object
:raise ClientError: If none of the connection types offered by the \
server are supported
"""
self._incoming_queue = asyncio.Queue(maxsize=self._max_pending_count)
http_session = await self._get_http_session()
transport = create_transport(DEFAULT_CONNECTION_TYPE,
url=self.url,
incoming_queue=self._incoming_queue,
ssl=self.ssl,
extensions=self.extensions,
auth=self.auth,
json_dumps=self._json_dumps,
json_loads=self._json_loads,
http_session=http_session,
loop=self._loop)
try:
response = await transport.handshake(self._connection_types)
self._verify_response(response)
LOGGER.info("Connection types supported by the server: %r",
response["supportedConnectionTypes"])
connection_type = self._pick_connection_type(
response["supportedConnectionTypes"]
)
if not connection_type:
raise ClientError("None of the connection types offered by "
"the server are supported.")
if transport.connection_type != connection_type:
# extract and reuse the client_id from the initial transport
client_id = transport.client_id
# extract and reuse the reconnect_advice from the initial
# transport
advice = transport.reconnect_advice
# close the initial transport
await transport.close()
# create the negotiated transport
transport = create_transport(
connection_type,
url=self.url,
incoming_queue=self._incoming_queue,
client_id=client_id,
ssl=self.ssl,
extensions=self.extensions,
auth=self.auth,
json_dumps=self._json_dumps,
json_loads=self._json_loads,
reconnect_advice=advice,
http_session=http_session,
loop=self._loop)
return transport
except Exception:
await transport.close()
await self._close_http_session()
raise
async def open(self) -> None:
"""Establish a connection with the CometD server
This method works mostly the same way as the `handshake` method of
CometD clients in the reference implementations.
:raise ClientError: If none of the connection types offered by the \
server are supported
:raise ClientInvalidOperation: If the client is already open, or in \
other words if it isn't :obj:`closed`
:raise TransportError: If a network or transport related error occurs
:raise ServerError: If the handshake or the first connect request \
gets rejected by the server.
"""
if not self.closed:
raise ClientInvalidOperation("Client is already open.")
LOGGER.info("Opening client with connection types %r ...",
[t.value for t in self._connection_types])
self._transport = await self._negotiate_transport()
response = await self._transport.connect()
self._verify_response(response)
self._closed = False
assert self.connection_type is not None
LOGGER.info("Client opened with connection_type %r",
self.connection_type.value)
async def close(self) -> None:
"""Disconnect from the CometD server"""
if not self.closed:
if self.pending_count == 0:
LOGGER.info("Closing client...")
else:
LOGGER.warning(
"Closing client while %s messages are still pending...",
self.pending_count)
try:
if self._transport:
await self._transport.disconnect()
await self._transport.close()
await self._close_http_session()
finally:
self._closed = True
LOGGER.info("Client closed.")
async def subscribe(self, channel: str) -> None:
"""Subscribe to *channel*
:param channel: Name of the channel
:raise ClientInvalidOperation: If the client is :obj:`closed`
:raise TransportError: If a network or transport related error occurs
:raise ServerError: If the subscribe request gets rejected by the \
server
"""
if self.closed:
raise ClientInvalidOperation("Can't send subscribe request while, "
"the client is closed.")
await self._check_server_disconnected()
assert self._transport is not None
response = await self._transport.subscribe(channel)
self._verify_response(response)
LOGGER.info("Subscribed to channel %s", channel)
async def unsubscribe(self, channel: str) -> None:
"""Unsubscribe from *channel*
:param channel: Name of the channel
:raise ClientInvalidOperation: If the client is :obj:`closed`
:raise TransportError: If a network or transport related error occurs
:raise ServerError: If the unsubscribe request gets rejected by the \
server
"""
if self.closed:
raise ClientInvalidOperation("Can't send unsubscribe request "
"while, the client is closed.")
await self._check_server_disconnected()
assert self._transport is not None
response = await self._transport.unsubscribe(channel)
self._verify_response(response)
LOGGER.info("Unsubscribed from channel %s", channel)
async def publish(self, channel: str, data: JsonObject) -> JsonObject:
"""Publish *data* to the given *channel*
:param channel: Name of the channel
:param data: Data to send to the server
:return: Publish response
:raise ClientInvalidOperation: If the client is :obj:`closed`
:raise TransportError: If a network or transport related error occurs
:raise ServerError: If the publish request gets rejected by the server
"""
if self.closed:
raise ClientInvalidOperation("Can't publish data while, "
"the client is closed.")
await self._check_server_disconnected()
assert self._transport is not None
response = await self._transport.publish(channel, data)
self._verify_response(response)
return response
def _verify_response(self, response: JsonObject) -> None:
"""Check the ``successful`` status of the *response* and raise \
the appropriate :obj:`~aiocometd.exceptions.ServerError` if it's False
If the *response* has no ``successful`` field, it's considered to be
successful.
:param response: Response message
:raise ServerError: If the *response* is not ``successful``
"""
if is_server_error_message(response):
self._raise_server_error(response)
def _raise_server_error(self, response: JsonObject) -> None:
"""Raise the appropriate :obj:`~aiocometd.exceptions.ServerError` for \
the failed *response*
:param response: Response message
:raise ServerError: If the *response* is not ``successful``
"""
channel = response["channel"]
message = type(self)._SERVER_ERROR_MESSAGES.get(channel)
if not message:
if channel.startswith(SERVICE_CHANNEL_PREFIX):
message = "Service request failed."
else:
message = "Publish request failed."
raise ServerError(message, response)
async def receive(self) -> JsonObject:
"""Wait for incoming messages from the server
:return: Incoming message
:raise ClientInvalidOperation: If the client is closed, and has no \
more pending incoming messages
:raise ServerError: If the client receives a confirmation message \
which is not ``successful``
:raise TransportTimeoutError: If the transport can't re-establish \
connection with the server in :obj:`connection_timeout` time.
"""
if not self.closed or self.has_pending_messages:
response = await self._get_message(self.connection_timeout)
self._verify_response(response)
return response
raise ClientInvalidOperation("The client is closed and there are "
"no pending messages.")
async def __aiter__(self) -> AsyncIterator[JsonObject]:
"""Asynchronous iterator
:raise ServerError: If the client receives a confirmation message \
which is not ``successful``
:raise TransportTimeoutError: If the transport can't re-establish \
connection with the server in :obj:`connection_timeout` time.
"""
while True:
try:
yield await self.receive()
except ClientInvalidOperation:
break
async def __aenter__(self) -> "Client":
"""Enter the runtime context and call :obj:`open`
:raise ClientInvalidOperation: If the client is already open, or in \
other words if it isn't :obj:`closed`
:raise TransportError: If a network or transport related error occurs
:raise ServerError: If the handshake or the first connect request \
gets rejected by the server.
:return: The client object itself
:rtype: Client
"""
try:
await self.open()
except Exception:
await self.close()
raise
return self
async def __aexit__(self, exc_type: Type[BaseException],
exc_val: BaseException,
exc_tb: TracebackType) -> None:
"""Exit the runtime context and call :obj:`open`"""
await self.close()
async def _get_message(self, connection_timeout: Union[int, float]) \
-> JsonObject:
"""Get the next incoming message
:param connection_timeout: The maximum amount of time to wait for the \
transport to re-establish a connection with the server when the \
connection fails.
:return: Incoming message
:raise TransportTimeoutError: If the transport can't re-establish \
connection with the server in :obj:`connection_timeout` time.
:raise ServerError: If the connection gets closed by the server.
"""
tasks: List[asyncio.Future[Any]] = []
# task waiting on connection timeout
if connection_timeout:
timeout_task = asyncio.ensure_future(
self._wait_connection_timeout(connection_timeout),
loop=self._loop
)
tasks.append(timeout_task)
assert self._incoming_queue is not None
# task waiting on incoming messages
get_task = asyncio.ensure_future(self._incoming_queue.get(),
loop=self._loop)
tasks.append(get_task)
assert self._transport is not None
# task waiting on server side disconnect
server_disconnected_task = asyncio.ensure_future(
self._transport.wait_for_state(
TransportState.SERVER_DISCONNECTED),
loop=self._loop
)
tasks.append(server_disconnected_task)
try:
done, pending = await asyncio.wait(
tasks,
return_when=asyncio.FIRST_COMPLETED,
loop=self._loop)
# cancel all pending tasks
for task in pending:
task.cancel()
# handle the completed task
if get_task in done:
return get_task.result()
if server_disconnected_task in done:
await self.close()
raise ServerError("Connection closed by the server",
self._transport.last_connect_result)
raise TransportTimeoutError("Lost connection with the server.")
except asyncio.CancelledError:
# cancel all tasks
for task in tasks:
task.cancel()
raise
async def _wait_connection_timeout(self, timeout: Union[int, float]) \
-> None:
"""Wait for and return when the transport can't re-establish \
connection with the server in *timeout* time
:param timeout: The maximum amount of time to wait for the \
transport to re-establish a connection with the server when the \
connection fails.
"""
assert self._transport is not None
while True:
await self._transport.wait_for_state(TransportState.CONNECTING)
try:
await asyncio.wait_for(
self._transport.wait_for_state(TransportState.CONNECTED),
timeout, loop=self._loop
)
except asyncio.TimeoutError:
break
async def _check_server_disconnected(self) -> None:
"""Checks whether the current transport'state is
:obj:`TransportState.SERVER_DISCONNECTED` and if it is then closes the
client and raises an error
:raise ServerError: If the current transport's state is \
:obj:`TransportState.SERVER_DISCONNECTED`
"""
if (self._transport and
self._transport.state == TransportState.SERVER_DISCONNECTED):
await self.close()
raise ServerError("Connection closed by the server",
self._transport.last_connect_result)
| StarcoderdataPython |
9695181 | import time
from typing import Dict, List, Optional, Union
from labml_db import Model, Key, Index
from . import project
from .status import create_status, Status
from .. import settings
class Computer(Model['Computer']):
name: str
comment: str
start_time: float
computer_ip: str
computer_uuid: str
is_claimed: bool
status: Key[Status]
configs: Dict[str, any]
errors: List[Dict[str, str]]
@classmethod
def defaults(cls):
return dict(name='',
comment='',
start_time=None,
computer_uuid='',
is_claimed=True,
computer_ip='',
status=None,
configs={},
errors=[]
)
@property
def url(self) -> str:
return f'{settings.WEB_URL}/computer?uuid={self.computer_uuid}'
def update_computer(self, data: Dict[str, any]) -> None:
if not self.name:
self.name = data.get('name', '')
if not self.comment:
self.comment = data.get('comment', '')
if 'configs' in data:
self.configs.update(data.get('configs', {}))
self.save()
def get_data(self) -> Dict[str, Union[str, any]]:
return {
'computer_uuid': self.computer_uuid,
'name': self.name,
'comment': self.comment,
'start_time': self.start_time,
'configs': [],
}
def get_summary(self) -> Dict[str, str]:
return {
'computer_uuid': self.computer_uuid,
'name': self.name,
'comment': self.comment,
'start_time': self.start_time,
}
class ComputerIndex(Index['Computer']):
pass
def get(computer_uuid: str, labml_token: str = '') -> Optional[Computer]:
p = project.get_project(labml_token)
if computer_uuid in p.computers:
return p.computers[computer_uuid].load()
else:
return None
def get_or_create(computer_uuid: str, labml_token: str = '', computer_ip: str = '') -> Computer:
p = project.get_project(labml_token)
if computer_uuid in p.computers:
return p.computers[computer_uuid].load()
is_claimed = True
if labml_token == settings.FLOAT_PROJECT_TOKEN:
is_claimed = False
time_now = time.time()
status = create_status()
computer = Computer(computer_uuid=computer_uuid,
start_time=time_now,
computer_ip=computer_ip,
is_claimed=is_claimed,
status=status.key,
)
p.computers[computer.computer_uuid] = computer.key
computer.save()
p.save()
ComputerIndex.set(computer.computer_uuid, computer.key)
return computer
def get_computers(labml_token: str) -> List[Computer]:
res = []
p = project.get_project(labml_token)
for computer_uuid, computer_key in p.computers.items():
res.append(computer_key.load())
return res
def get_computer(computer_uuid: str) -> Optional[Computer]:
computer_key = ComputerIndex.get(computer_uuid)
if computer_key:
return computer_key.load()
return None
def get_status(computer_uuid: str) -> Union[None, Status]:
c = get_computer(computer_uuid)
if c:
return c.status.load()
return None
| StarcoderdataPython |
4822338 | <gh_stars>1-10
# SPDX-FileCopyrightText: 2021 Division of Intelligent Medical Systems, DKFZ
# SPDX-FileCopyrightText: 2021 <NAME>
# SPDX-License-Identifier: MIT
import os
import inspect
import glob
import numpy as np
import matplotlib.pylab as plt
from simpa.utils.libraries.literature_values import OpticalTissueProperties
from simpa.utils.serializer import SerializableSIMPAClass
class Spectrum(SerializableSIMPAClass, object):
"""
An instance of this class represents the absorption spectrum over wavelength for a particular
"""
def __init__(self, spectrum_name: str, wavelengths: np.ndarray, values: np.ndarray):
"""
:param spectrum_name:
:param wavelengths:
:param values:
"""
self.spectrum_name = spectrum_name
self.wavelengths = wavelengths
self.max_wavelength = int(np.max(wavelengths))
self.min_wavelength = int(np.min(wavelengths))
self.values = values
if np.shape(wavelengths) != np.shape(values):
raise ValueError("The shape of the wavelengths and the absorption coefficients did not match: " +
str(np.shape(wavelengths)) + " vs " + str(np.shape(values)))
new_wavelengths = np.arange(self.min_wavelength, self.max_wavelength+1, 1)
self.new_absorptions = np.interp(new_wavelengths, self.wavelengths, self.values)
def get_value_over_wavelength(self):
"""
:return: numpy array with the available wavelengths and the corresponding absorption properties
"""
return np.asarray([self.wavelengths, self.values])
def get_value_for_wavelength(self, wavelength: int) -> float:
"""
:param wavelength: the wavelength to retrieve a optical absorption value for [cm^{-1}].
Must be an integer value between the minimum and maximum wavelength.
:return: the best matching linearly interpolated absorption value for the given wavelength.
"""
return self.new_absorptions[wavelength-self.min_wavelength]
def __eq__(self, other):
if isinstance(other, Spectrum):
return (self.spectrum_name == other.spectrum_name,
self.wavelengths == other.wavelengths,
self.values == other.values)
else:
return super().__eq__(other)
def serialize(self) -> dict:
serialized_spectrum = self.__dict__
return {"Spectrum": serialized_spectrum}
@staticmethod
def deserialize(dictionary_to_deserialize: dict):
deserialized_spectrum = Spectrum(spectrum_name=dictionary_to_deserialize["spectrum_name"],
wavelengths=dictionary_to_deserialize["wavelengths"],
values=dictionary_to_deserialize["values"])
return deserialized_spectrum
class SpectraLibrary(object):
def __init__(self, folder_name: str, additional_folder_path: str = None):
self.spectra = list()
self.add_spectra_from_folder(folder_name)
if additional_folder_path is not None:
self.add_spectra_from_folder(additional_folder_path)
def add_spectra_from_folder(self, folder_name):
base_path = os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe())))
for absorption_spectrum in glob.glob(os.path.join(base_path, folder_name, "*.npz")):
name = absorption_spectrum.split(os.path.sep)[-1][:-4]
numpy_data = np.load(absorption_spectrum)
values = numpy_data["values"]
wavelengths = numpy_data["wavelengths"]
self.spectra.append(Spectrum(spectrum_name=name, values=values, wavelengths=wavelengths))
def __next__(self):
if self.i > 0:
self.i -= 1
return self.spectra[self.i]
raise StopIteration()
def __iter__(self):
self.i = len(self.spectra)
return self
def get_spectra_names(self):
return [spectrum.spectrum_name for spectrum in self]
def get_spectrum_by_name(self, spectrum_name: str) -> Spectrum:
for spectrum in self:
if spectrum.spectrum_name == spectrum_name:
return spectrum
raise LookupError(f"No spectrum for the given name exists ({spectrum_name}). Try one of: {self.get_spectra_names()}")
class AnisotropySpectrumLibrary(SpectraLibrary):
def __init__(self, additional_folder_path: str = None):
super(AnisotropySpectrumLibrary, self).__init__("anisotropy_spectra_data", additional_folder_path)
@staticmethod
def CONSTANT_ANISOTROPY_ARBITRARY(anisotropy: float = 1):
return Spectrum("Constant Anisotropy (arb)", np.asarray([450, 1000]),
np.asarray([anisotropy, anisotropy]))
class ScatteringSpectrumLibrary(SpectraLibrary):
def __init__(self, additional_folder_path: str = None):
super(ScatteringSpectrumLibrary, self).__init__("scattering_spectra_data", additional_folder_path)
@staticmethod
def CONSTANT_SCATTERING_ARBITRARY(scattering: float = 1):
return Spectrum("Constant Scattering (arb)", np.asarray([450, 1000]),
np.asarray([scattering, scattering]))
@staticmethod
def scattering_from_rayleigh_and_mie_theory(name: str, mus_at_500_nm: float = 1.0, fraction_rayleigh_scattering: float = 0.0,
mie_power_law_coefficient: float = 0.0):
wavelengths = np.arange(450, 1001, 1)
scattering = (mus_at_500_nm * (fraction_rayleigh_scattering * (wavelengths / 500) ** 1e-4 +
(1 - fraction_rayleigh_scattering) * (wavelengths / 500) ** -mie_power_law_coefficient))
return Spectrum(name, wavelengths, scattering)
class AbsorptionSpectrumLibrary(SpectraLibrary):
def __init__(self, additional_folder_path: str = None):
super(AbsorptionSpectrumLibrary, self).__init__("absorption_spectra_data", additional_folder_path)
@staticmethod
def CONSTANT_ABSORBER_ARBITRARY(absorption_coefficient: float = 1):
return Spectrum("Constant Absorber (arb)", np.asarray([450, 1000]),
np.asarray([absorption_coefficient, absorption_coefficient]))
def get_simpa_internal_absorption_spectra_by_names(absorption_spectrum_names: list):
lib = AbsorptionSpectrumLibrary()
spectra = []
for spectrum_name in absorption_spectrum_names:
spectra.append(lib.get_spectrum_by_name(spectrum_name))
return spectra
def view_saved_spectra(save_path=None, mode="absorption"):
"""
Opens a matplotlib plot and visualizes the available absorption spectra.
:param save_path: If not None, then the figure will be saved as a png file to the destination.
:param mode: string that is "absorption", "scattering", or "anisotropy"
"""
plt.figure(figsize=(11, 8))
if mode == "absorption":
for spectrum in AbsorptionSpectrumLibrary():
plt.semilogy(spectrum.wavelengths,
spectrum.values,
label=spectrum.spectrum_name)
if mode == "scattering":
for spectrum in ScatteringSpectrumLibrary():
plt.semilogy(spectrum.wavelengths,
spectrum.values,
label=spectrum.spectrum_name)
if mode == "anisotropy":
for spectrum in AnisotropySpectrumLibrary():
plt.semilogy(spectrum.wavelengths,
spectrum.values,
label=spectrum.spectrum_name)
ax = plt.gca()
box = ax.get_position()
ax.set_position([box.x0, box.y0, box.width * 0.8, box.height])
ax.set_ylabel(mode)
ax.set_xlabel("Wavelength [nm]")
ax.set_title(f"{mode} spectra for all absorbers present in the library")
# ax.hlines([1e-4, 1e-3, 1e-2, 1e-1, 1e0, 1e1, 1e2, 1e3], 450, 1000, linestyles="dashed", colors=["#EEEEEE88"])
ax.legend(loc='best', bbox_to_anchor=(1, 0.5))
if save_path is not None:
plt.savefig(save_path + f"{mode}_spectra.png")
plt.show()
plt.close()
| StarcoderdataPython |
266849 | <filename>panda/src/tinydisplay/ztriangle.py
#!/usr/bin/env python
""" This simple Python script can be run to generate
ztriangle_code_*.h, ztriangle_table.*, and ztriangle_*.cxx, which
are a poor man's form of generated code to cover the explosion of
different rendering options while scanning out triangles.
Each different combination of options is compiled to a different
inner-loop triangle scan function. The code in
tinyGraphicsStateGuardian.cxx will select the appropriate function
pointer at draw time. """
# This is the number of generated ztriangle_code_*.h and
# ztriangle_*.cxx files we will produce. You may change this freely;
# you should also change the Sources.pp file accordingly.
NumSegments = 4
# We generate an #include "ztriangle_two.h" for each combination of
# these options.
Options = [
# depth write
[ 'zon', 'zoff' ],
# color write
[ 'cstore', 'cblend', 'cgeneral', 'coff', 'csstore', 'csblend' ],
# alpha test
[ 'anone', 'aless', 'amore' ],
# depth test
[ 'znone', 'zless' ],
# texture filters
[ 'tnearest', 'tmipmap', 'tgeneral' ],
]
# The total number of different combinations of the various Options, above.
OptionsCount = reduce(lambda a, b: a * b, map(lambda o: len(o), Options))
# The various combinations of these options are explicit within
# ztriangle_two.h.
ExtraOptions = [
# shade model
[ 'white', 'flat', 'smooth' ],
# texturing
[ 'untextured', 'textured', 'perspective', 'multitex2', 'multitex3' ],
]
# The expansion of all ExtraOptions combinations into a linear list.
ExtraOptionsMat = []
for i in range(len(ExtraOptions[0])):
for j in range(len(ExtraOptions[1])):
ExtraOptionsMat.append([i, j])
FullOptions = Options + ExtraOptions
CodeTable = {
# depth write
'zon' : '#define STORE_Z(zpix, z) (zpix) = (z)',
'zoff' : '#define STORE_Z(zpix, z)',
# color write
'cstore' : '#define STORE_PIX(pix, rgb, r, g, b, a) (pix) = (rgb)',
'cblend' : '#define STORE_PIX(pix, rgb, r, g, b, a) (pix) = PIXEL_BLEND_RGB(pix, r, g, b, a)',
'cgeneral' : '#define STORE_PIX(pix, rgb, r, g, b, a) zb->store_pix_func(zb, pix, r, g, b, a)',
'coff' : '#define STORE_PIX(pix, rgb, r, g, b, a)',
# color write, sRGB
'csstore' : '#define STORE_PIX(pix, rgb, r, g, b, a) (pix) = SRGBA_TO_PIXEL(r, g, b, a)',
'csblend' : '#define STORE_PIX(pix, rgb, r, g, b, a) (pix) = PIXEL_BLEND_SRGB(pix, r, g, b, a)',
# alpha test
'anone' : '#define ACMP(zb, a) 1',
'aless' : '#define ACMP(zb, a) (((int)(a)) < (zb)->reference_alpha)',
'amore' : '#define ACMP(zb, a) (((int)(a)) > (zb)->reference_alpha)',
# depth test
'znone' : '#define ZCMP(zpix, z) 1',
'zless' : '#define ZCMP(zpix, z) ((ZPOINT)(zpix) < (ZPOINT)(z))',
# texture filters
'tnearest' : '#define CALC_MIPMAP_LEVEL(mipmap_level, mipmap_dx, dsdx, dtdx)\n#define ZB_LOOKUP_TEXTURE(texture_def, s, t, level, level_dx) ZB_LOOKUP_TEXTURE_NEAREST(texture_def, s, t)',
'tmipmap' : '#define CALC_MIPMAP_LEVEL(mipmap_level, mipmap_dx, dsdx, dtdx) DO_CALC_MIPMAP_LEVEL(mipmap_level, mipmap_dx, dsdx, dtdx)\n#define INTERP_MIPMAP\n#define ZB_LOOKUP_TEXTURE(texture_def, s, t, level, level_dx) ZB_LOOKUP_TEXTURE_MIPMAP_NEAREST(texture_def, s, t, level)',
'tgeneral' : '#define CALC_MIPMAP_LEVEL(mipmap_level, mipmap_dx, dsdx, dtdx) DO_CALC_MIPMAP_LEVEL(mipmap_level, mipmap_dx, dsdx, dtdx)\n#define INTERP_MIPMAP\n#define ZB_LOOKUP_TEXTURE(texture_def, s, t, level, level_dx) ((level == 0) ? (texture_def)->tex_magfilter_func(texture_def, s, t, level, level_dx) : (texture_def)->tex_minfilter_func(texture_def, s, t, level, level_dx))',
}
ZTriangleStub = """
/* This file is generated code--do not edit. See ztriangle.py. */
#include <stdlib.h>
#include <stdio.h>
#include "pandabase.h"
#include "zbuffer.h"
/* Pick up all of the generated code references to ztriangle_two.h,
which ultimately calls ztriangle.h, many, many times. */
#include "ztriangle_table.h"
#include "ztriangle_code_%s.h"
"""
ops = [0] * len(Options)
class DoneException:
pass
# We write the code that actually instantiates the various
# triangle-filling functions to ztriangle_code_*.h.
code = None
codeSeg = None
fnameDict = {}
fnameList = None
def incrementOptions(ops, i = -1):
if i < -len(ops):
raise DoneException
# Increment the least-significant place if we can.
if ops[i] + 1 < len(Options[i]):
ops[i] += 1
return
# Recurse for the next-most-significant place.
ops[i] = 0
incrementOptions(ops, i - 1)
def getFname(ops):
# Returns the function name corresponding to the indicated ops
# vector.
keywordList = []
for i in range(len(ops)):
keyword = FullOptions[i][ops[i]]
keywordList.append(keyword)
if keywordList[-1].startswith('multitex'):
# We don't bother with white_multitex or flat_multitex.
keywordList[-2] = 'smooth'
fname = 'FB_triangle_%s' % ('_'.join(keywordList))
return fname
def getFref(ops):
# Returns a string that evaluates to a pointer reference to the
# indicated function.
fname = getFname(ops)
codeSeg, i = fnameDict[fname]
fref = 'ztriangle_code_%s[%s]' % (codeSeg, i)
return fref
def closeCode():
""" Close the previously-opened code file. """
if code:
print >> code, ''
print >> code, 'ZB_fillTriangleFunc ztriangle_code_%s[%s] = {' % (codeSeg, len(fnameList))
for fname in fnameList:
print >> code, ' %s,' % (fname)
print >> code, '};'
code.close()
def openCode(count):
""" Open the code file appropriate to the current segment. We
write out the generated code into a series of smaller files,
instead of one mammoth file, just to make it easier on the
compiler. """
global code, codeSeg, fnameList
seg = int(NumSegments * count / OptionsCount) + 1
if codeSeg != seg:
closeCode()
codeSeg = seg
fnameList = []
# Open a new file.
code = open('ztriangle_code_%s.h' % (codeSeg), 'wb')
print >> code, '/* This file is generated code--do not edit. See ztriangle.py. */'
print >> code, ''
# Also generate ztriangle_*.cxx, to include the above file.
zt = open('ztriangle_%s.cxx' % (codeSeg), 'wb')
print >> zt, ZTriangleStub % (codeSeg)
# First, generate the code.
count = 0
try:
while True:
openCode(count)
for i in range(len(ops)):
keyword = Options[i][ops[i]]
print >> code, CodeTable[keyword]
# This reference gets just the initial fname: omitting the
# ExtraOptions, which are implicit in ztriangle_two.h.
fname = getFname(ops)
print >> code, '#define FNAME(name) %s_ ## name' % (fname)
print >> code, '#include "ztriangle_two.h"'
print >> code, ''
# We store the full fnames generated by the above lines
# (including the ExtraOptions) in the fnameDict and fnameList
# tables.
for eops in ExtraOptionsMat:
fops = ops + eops
fname = getFname(fops)
fnameDict[fname] = (codeSeg, len(fnameList))
fnameList.append(fname)
count += 1
incrementOptions(ops)
assert count < OptionsCount
except DoneException:
pass
assert count == OptionsCount
closeCode()
# Now, generate the table of function pointers.
# The external reference for the table containing the above function
# pointers gets written here.
table_decl = open('ztriangle_table.h', 'wb')
print >> table_decl, '/* This file is generated code--do not edit. See ztriangle.py. */'
print >> table_decl, ''
# The actual table definition gets written here.
table_def = open('ztriangle_table.cxx', 'wb')
print >> table_def, '/* This file is generated code--do not edit. See ztriangle.py. */'
print >> table_def, ''
print >> table_def, '#include "pandabase.h"'
print >> table_def, '#include "zbuffer.h"'
print >> table_def, '#include "ztriangle_table.h"'
print >> table_def, ''
for i in range(NumSegments):
print >> table_def, 'extern ZB_fillTriangleFunc ztriangle_code_%s[];' % (i + 1)
print >> table_def, ''
def writeTableEntry(ops):
indent = ' ' * (len(ops) + 1)
i = len(ops)
numOps = len(FullOptions[i])
if i + 1 == len(FullOptions):
# The last level: write out the actual function names.
for j in range(numOps - 1):
print >> table_def, indent + getFref(ops + [j]) + ','
print >> table_def, indent + getFref(ops + [numOps - 1])
else:
# Intermediate levels: write out a nested reference.
for j in range(numOps - 1):
print >> table_def, indent + '{'
writeTableEntry(ops + [j])
print >> table_def, indent + '},'
print >> table_def, indent + '{'
writeTableEntry(ops + [numOps - 1])
print >> table_def, indent + '}'
arraySizeList = []
for opList in FullOptions:
arraySizeList.append('[%s]' % (len(opList)))
arraySize = ''.join(arraySizeList)
print >> table_def, 'const ZB_fillTriangleFunc fill_tri_funcs%s = {' % (arraySize)
print >> table_decl, 'extern const ZB_fillTriangleFunc fill_tri_funcs%s;' % (arraySize)
writeTableEntry([])
print >> table_def, '};'
| StarcoderdataPython |
4947849 | <gh_stars>0
class JsonValidateError(Exception):
pass
class ErrorMsg:
empty_error = "Json cannot be empty."
syntax_error = "Json element should be string or number."
list_error = "List cannot contains key-value pair."
dict_error = "Dictionary need a key."
map_error = "{} need to close."
error_format = "{} located in row {}, column {}."
unknown_error = "{} unknown error in row{}, column {}."
open_sign_error = "Element need to be close."
no_match_error = "Sign not match."
| StarcoderdataPython |
6416743 | import os
import pandas as pd
import numpy as np
from sklearn.preprocessing import StandardScaler
from sklearn.model_selection import train_test_split
from sklearn.model_selection import RepeatedKFold
from sklearn.metrics import classification_report
from ml_model import RandomForest
from ml_model import SVM
from mics import classifier_mics
if __name__ == '__main__':
# 读取数据
data_alff = pd.read_csv('./pe_feature/DATA_Recog/data_ALFF.csv', sep=',', engine='python')
data_falff = pd.read_csv('./pe_feature/DATA_Recog/data_fALFF.csv', sep=',', engine='python')
data_dc = pd.read_csv('./pe_feature/DATA_Recog/data_DC.csv', sep=',', engine='python')
data_vmhc_global = pd.read_csv('./pe_feature/DATA_Recog/data_global_VMHC.csv', sep=',', engine='python')
data_vmhc = pd.read_csv('./pe_feature/DATA_Recog/data_NonGlobal_VMHC.csv', sep=',', engine='python')
data_reho = pd.read_csv('./pe_feature/DATA_Recog/data_ReHo.csv', sep=',', engine='python')
data_vbm = pd.read_csv('./pe_feature/DATA_Recog/data_VBM.csv', sep=',', engine='python')
label = pd.read_csv('./pe_feature/DATA_Recog/label.csv', sep=',', engine='python', header=None)
X = np.array(data.iloc[:, 0:-1])
y = np.array(label.iloc[:, -1])
labelName = ['0', '1']
posLabel = 0
# print('data column')
# print(data.columns)
# 对数据进行标准化
ss = StandardScaler().fit(data)
X = ss.fit_transform(data)
print('X的均值:% s' % X.mean())
print('X的标准差: % s' % X.std())
# 划分训练集和验证集
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.5, stratify=y)
'''
随机森林模型构建及结果,包括baseline和随机参数搜索法结果
'''
# 使用随机森林模型分析数据
path = os.path.join('Result', 'Random Forest')
rf = RandomForest(X_train=X_train, X_test=X_test, y_train=y_train, y_test=y_test,
labelName=labelName, path=path)
baselinePath = os.path.join(path, 'baseline')
pred_train, y_score_train, pred_test, y_score_test = rf.rf_baseline(
columeName=data.columns,
n_estimators=100,
max_features='auto',
max_depth=None,
min_samples_split=2,
min_samples_leaf=1,
bootstrap=True
)
y_score_train = y_score_train[:, 1]
y_score_test = y_score_test[:, 1]
# 输出RF baseline的ROC和其余几大指标
mics = classifier_mics(y_train, pred_train, y_score_train, y_test, pred_test, y_score_test, baselinePath)
auc = mics.draw_roc(pos_index=posLabel)
acc, precision, recall, f1 = mics.mics_sum()
print('---------------------------------------------------')
print('Random Forest Baseline Model-classification report:')
print(classification_report(y_test, pred_test, target_names=labelName))
print('auc:%s, acc:%s, precision:%s, recall:%s, f1:%s' % (auc, acc, precision, recall, f1))
print('---------------------------------------------------')
# 使用网格搜索法确定最佳参数集合的RF模型
cross_val = RepeatedKFold(n_splits=3, n_repeats=1, random_state=10)
# 使用RandomSearchCV来详细搜索参数范围
n_estimators = [int(x) for x in np.linspace(start=100, stop=1000, num=10)]
max_features = ['log2', 'sqrt']
max_depth = [int(x) for x in np.linspace(start=1, stop=15, num=15)]
min_samples_split = [int(x) for x in np.linspace(start=2, stop=50, num=10)]
min_samples_leaf = [int(x) for x in np.linspace(start=2, stop=50, num=10)]
bootstrap = [True, False]
# 使用GridSearchCV来详细搜索参数范围
# n_estimators = [300, 500, 700]
# max_features = ['sqrt']
# max_depth = [7, 11, 15]
# min_samples_split = [7, 23, 44]
# min_samples_leaf = [18, 28, 34]
# bootstrap = [False]
# 构建RF参数搜索模型
RandomSearchCVPath = os.path.join(path, 'RandomSearchCV')
pred_train, y_score_train, pred_test, y_score_test = rf.rf_paraSearch(n_estimators, max_features, max_depth,
min_samples_split,
min_samples_leaf,
bootstrap,
cross_val=5,
search_method='RandomizedSearch')
# 输出RF参数搜索模型的ROC和其余几大指标
y_score_train = y_score_train[:, 1]
y_score_test = y_score_test[:, 1]
mics = classifier_mics(y_train, pred_train, y_score_train, y_test, pred_test, y_score_test, RandomSearchCVPath)
auc = mics.draw_roc(pos_index=posLabel)
acc, precision, recall, f1 = mics.mics_sum()
print('---------------------------------------------------')
print('Random Forest RandomSearch Model-classification report:')
print(classification_report(y_test, pred_test, target_names=labelName))
print('auc:%s, acc:%s, precision:%s, recall:%s, f1:%s' % (auc, acc, precision, recall, f1))
print('---------------------------------------------------')
| StarcoderdataPython |
1828526 | <reponame>lucijabrezocnik/NiaPy<filename>NiaPy/task/task.py
# encoding=utf8
"""The implementation of tasks."""
import logging
from enum import Enum
from matplotlib import pyplot as plt
from numpy import inf, random as rand
from NiaPy.util.utility import (
limit_repair,
fullArray
)
from NiaPy.util.exception import (
FesException,
GenException,
RefException
)
from NiaPy.task.utility import Utility
logging.basicConfig()
logger = logging.getLogger("NiaPy.task.Task")
logger.setLevel("INFO")
class OptimizationType(Enum):
r"""Enum representing type of optimization.
Attributes:
MINIMIZATION (int): Represents minimization problems and is default optimization type of all algorithms.
MAXIMIZATION (int): Represents maximization problems.
"""
MINIMIZATION = 1.0
MAXIMIZATION = -1.0
class Task:
r"""Class representing problem to solve with optimization.
Date:
2019
Author:
<NAME> and others
Attributes:
D (int): Dimension of the problem.
Lower (numpy.ndarray): Lower bounds of the problem.
Upper (numpy.ndarray): Upper bounds of the problem.
bRange (numpy.ndarray): Search range between upper and lower limits.
optType (OptimizationType): Optimization type to use.
See Also:
* :class:`NiaPy.util.Utility`
"""
D = 0
benchmark = None
Lower, Upper, bRange = inf, inf, inf
optType = OptimizationType.MINIMIZATION
def __init__(self, D=0, optType=OptimizationType.MINIMIZATION, benchmark=None, Lower=None, Upper=None, frepair=limit_repair, **kwargs):
r"""Initialize task class for optimization.
Arguments:
D (Optional[int]): Number of dimensions.
optType (Optional[OptimizationType]): Set the type of optimization.
benchmark (Union[str, Benchmark]): Problem to solve with optimization.
Lower (Optional[numpy.ndarray]): Lower limits of the problem.
Upper (Optional[numpy.ndarray]): Upper limits of the problem.
frepair (Optional[Callable[[numpy.ndarray, numpy.ndarray, numpy.ndarray, Dict[str, Any]], numpy.ndarray]]): Function for reparing individuals components to desired limits.
See Also:
* `func`:NiaPy.util.Utility.__init__`
* `func`:NiaPy.util.Utility.repair`
"""
# dimension of the problem
self.D = D
# set optimization type
self.optType = optType
# set optimization function
self.benchmark = Utility().get_benchmark(benchmark) if benchmark is not None else None
if self.benchmark is not None:
self.Fun = self.benchmark.function() if self.benchmark is not None else None
# set Lower limits
if Lower is not None:
self.Lower = fullArray(Lower, self.D)
elif Lower is None and benchmark is not None:
self.Lower = fullArray(self.benchmark.Lower, self.D)
else:
self.Lower = fullArray(0, self.D)
# set Upper limits
if Upper is not None:
self.Upper = fullArray(Upper, self.D)
elif Upper is None and benchmark is not None:
self.Upper = fullArray(self.benchmark.Upper, self.D)
else:
self.Upper = fullArray(0, self.D)
# set range
self.bRange = self.Upper - self.Lower
# set repair function
self.frepair = frepair
def dim(self):
r"""Get the number of dimensions.
Returns:
int: Dimension of problem optimizing.
"""
return self.D
def bcLower(self):
r"""Get the array of lower bound constraint.
Returns:
numpy.ndarray: Lower bound.
"""
return self.Lower
def bcUpper(self):
r"""Get the array of upper bound constraint.
Returns:
numpy.ndarray: Upper bound.
"""
return self.Upper
def bcRange(self):
r"""Get the range of bound constraint.
Returns:
numpy.ndarray: Range between lower and upper bound.
"""
return self.Upper - self.Lower
def repair(self, x, rnd=rand):
r"""Repair solution and put the solution in the random position inside of the bounds of problem.
Arguments:
x (numpy.ndarray): Solution to check and repair if needed.
rnd (mtrand.RandomState): Random number generator.
Returns:
numpy.ndarray: Fixed solution.
See Also:
* :func:`NiaPy.util.limitRepair`
* :func:`NiaPy.util.limitInversRepair`
* :func:`NiaPy.util.wangRepair`
* :func:`NiaPy.util.randRepair`
* :func:`NiaPy.util.reflectRepair`
"""
return self.frepair(x, self.Lower, self.Upper, rnd=rnd)
def nextIter(self):
r"""Increments the number of algorithm iterations."""
def start(self):
r"""Start stopwatch."""
def eval(self, A):
r"""Evaluate the solution A.
Arguments:
A (numpy.ndarray): Solution to evaluate.
Returns:
float: Fitness/function values of solution.
"""
return self.Fun(self.D, A) * self.optType.value
def isFeasible(self, A):
r"""Check if the solution is feasible.
Arguments:
A (Union[numpy.ndarray, Individual]): Solution to check for feasibility.
Returns:
bool: `True` if solution is in feasible space else `False`.
"""
return False not in (A >= self.Lower) and False not in (A <= self.Upper)
def stopCond(self):
r"""Check if optimization task should stop.
Returns:
bool: `True` if stopping condition is meet else `False`.
"""
return False
class CountingTask(Task):
r"""Optimization task with added counting of function evaluations and algorithm iterations/generations.
Attributes:
Iters (int): Number of algorithm iterations/generations.
Evals (int): Number of function evaluations.
See Also:
* :class:`NiaPy.util.Task`
"""
def __init__(self, **kwargs):
r"""Initialize counting task.
Args:
**kwargs (Dict[str, Any]): Additional arguments.
See Also:
* :func:`NiaPy.util.Task.__init__`
"""
Task.__init__(self, **kwargs)
self.Iters, self.Evals = 0, 0
def eval(self, A):
r"""Evaluate the solution A.
This function increments function evaluation counter `self.Evals`.
Arguments:
A (numpy.ndarray): Solutions to evaluate.
Returns:
float: Fitness/function values of solution.
See Also:
* :func:`NiaPy.util.Task.eval`
"""
r = Task.eval(self, A)
self.Evals += 1
return r
def evals(self):
r"""Get the number of evaluations made.
Returns:
int: Number of evaluations made.
"""
return self.Evals
def iters(self):
r"""Get the number of algorithm iteratins made.
Returns:
int: Number of generations/iterations made by algorithm.
"""
return self.Iters
def nextIter(self):
r"""Increases the number of algorithm iterations made.
This function increments number of algorithm iterations/generations counter `self.Iters`.
"""
self.Iters += 1
class StoppingTask(CountingTask):
r"""Optimization task with implemented checking for stopping criterias.
Attributes:
nGEN (int): Maximum number of algorithm iterations/generations.
nFES (int): Maximum number of function evaluations.
refValue (float): Reference function/fitness values to reach in optimization.
x (numpy.ndarray): Best found individual.
x_f (float): Best found individual function/fitness value.
See Also:
* :class:`NiaPy.util.CountingTask`
"""
def __init__(self, nFES=inf, nGEN=inf, refValue=None, logger=False, **kwargs):
r"""Initialize task class for optimization.
Arguments:
nFES (Optional[int]): Number of function evaluations.
nGEN (Optional[int]): Number of generations or iterations.
refValue (Optional[float]): Reference value of function/fitness function.
logger (Optional[bool]): Enable/disable logging of improvements.
Note:
Storing improvements during the evolutionary cycle is
captured in self.n_evals and self.x_f_vals
See Also:
* :func:`NiaPy.util.CountingTask.__init__`
"""
CountingTask.__init__(self, **kwargs)
self.refValue = (-inf if refValue is None else refValue)
self.logger = logger
self.x, self.x_f = None, inf
self.nFES, self.nGEN = nFES, nGEN
self.n_evals = []
self.x_f_vals = []
def eval(self, A):
r"""Evaluate solution.
Args:
A (numpy.ndarray): Solution to evaluate.
Returns:
float: Fitness/function value of solution.
See Also:
* :func:`NiaPy.util.StoppingTask.stopCond`
* :func:`NiaPy.util.CountingTask.eval`
"""
if self.stopCond():
return inf * self.optType.value
x_f = CountingTask.eval(self, A)
if x_f < self.x_f:
self.x_f = x_f
self.n_evals.append(self.Evals)
self.x_f_vals.append(x_f)
if self.logger:
logger.info('nFES:%d => %s' % (self.Evals, self.x_f))
return x_f
def stopCond(self):
r"""Check if stopping condition reached.
Returns:
bool: `True` if number of function evaluations or number of algorithm iterations/generations or reference values is reach else `False`.
"""
return (self.Evals >= self.nFES) or (self.Iters >= self.nGEN) or (self.refValue > self.x_f)
def stopCondI(self):
r"""Check if stopping condition reached and increase number of iterations.
Returns:
bool: `True` if number of function evaluations or number of algorithm iterations/generations or reference values is reach else `False`.
See Also:
* :func:`NiaPy.util.StoppingTask.stopCond`
* :func:`NiaPy.util.CountingTask.nextIter`
"""
r = self.stopCond()
CountingTask.nextIter(self)
return r
def return_conv(self):
r"""Get values of x and y axis for plotting covariance graph.
Returns:
Tuple[List[int], List[float]]:
1. List of ints of function evaluations.
2. List of ints of function/fitness values.
"""
r1, r2 = [], []
for i, v in enumerate(self.n_evals):
r1.append(v), r2.append(self.x_f_vals[i])
if i >= len(self.n_evals) - 1: break
diff = self.n_evals[i + 1] - v
if diff <= 1: continue
for j in range(diff - 1): r1.append(v + j + 1), r2.append(self.x_f_vals[i])
return r1, r2
def plot(self):
"""Plot a simple convergence graph."""
fess, fitnesses = self.return_conv()
plt.plot(fess, fitnesses)
plt.xlabel('nFes')
plt.ylabel('Fitness')
plt.title('Convergence graph')
plt.show()
class ThrowingTask(StoppingTask):
r"""Task that throw exceptions when stopping condition is meet.
See Also:
* :class:`NiaPy.util.StoppingTask`
"""
def __init__(self, **kwargs):
r"""Initialize optimization task.
Args:
**kwargs (Dict[str, Any]): Additional arguments.
See Also:
* :func:`NiaPy.util.StoppingTask.__init__`
"""
StoppingTask.__init__(self, **kwargs)
def stopCondE(self):
r"""Throw exception for the given stopping condition.
Raises:
* FesException: Thrown when the number of function/fitness evaluations is reached.
* GenException: Thrown when the number of algorithms generations/iterations is reached.
* RefException: Thrown when the reference values is reached.
* TimeException: Thrown when algorithm exceeds time run limit.
"""
# dtime = datetime.now() - self.startTime
if self.Evals >= self.nFES:
raise FesException()
if self.Iters >= self.nGEN:
raise GenException()
# if self.runTime is not None and self.runTime >= dtime: raise TimeException()
if self.refValue >= self.x_f:
raise RefException()
def eval(self, A):
r"""Evaluate solution.
Args:
A (numpy.ndarray): Solution to evaluate.
Returns:
float: Function/fitness values of solution.
See Also:
* :func:`NiaPy.util.ThrowingTask.stopCondE`
* :func:`NiaPy.util.StoppingTask.eval`
"""
self.stopCondE()
return StoppingTask.eval(self, A)
| StarcoderdataPython |
5125568 | <filename>RB_Utils.py
import ntpath
import sys
import os
import matplotlib
from matplotlib import pyplot as plt
import matplotlib.backends.backend_pdf
import math
import numpy as np
def readVCF(inVCF):
array_Column_Header = ()
array_vcfInfo = []
chr = ""
pos = ""
id = ""
ref = ""
alt = ""
qual = ""
filter = ""
info = ""
info_dict = {}
format = ""
format_dict = {}
sample_format_dict = {}
variantNumber = 0
mutation_type = ""
prior_chr = ""
prior_pos = 0
first_line_data_flag = True
# variable require for create rain fall plot
variant_pos_list = [] # list of SNP position
variant_pos_list_dict = {} # key is chromosome | value is list of SNP position
mutationType_list = [] # list of mutation type
mutationType_list_dict = {} # key is chromosome | value is list of mutation type
with open(inVCF) as vcfFile:
for line in vcfFile:
if line[0:2] == "##": #skip header
continue
elif line[0:2] == "#C":
array_Column_Header = line.split()
continue
array_vcfInfo = line.split()
chr = array_vcfInfo[0]
pos = array_vcfInfo[1]
id = array_vcfInfo[2]
ref = array_vcfInfo[3]
alt = array_vcfInfo[4]
qual = array_vcfInfo[5]
filter = array_vcfInfo[6]
info = array_vcfInfo[7]
format = array_vcfInfo[8]
for idx in list(range(len(array_Column_Header))):
format_dict = {}
if idx > 8:
dummyFormatHead = format.split(":")
dummyFormatInfo = array_vcfInfo[idx].split(":")
dummySample = array_Column_Header[idx]
for colNum in list(range(len(dummyFormatHead))):
key = dummyFormatHead[colNum]
if colNum >= len(dummyFormatInfo):
value = "."
else:
value = dummyFormatInfo[colNum]
format_dict.update({key: value})
sample_format_dict.update({dummySample: format_dict})
continue
# Start part analyze data for rain fall plot
#print("555")
mutation_type = classifyMutationType(ref , alt)
if mutation_type == "null":
continue # Skip other variant that is not SNP
if(first_line_data_flag == True):
prior_pos = pos
prior_chr = chr
first_line_data_flag = False
if prior_chr == chr:
variant_pos_list.append(pos)
mutationType_list.append(mutation_type)
elif prior_chr != chr:
variant_pos_list_dict.update({prior_chr: variant_pos_list})
mutationType_list_dict.update({prior_chr: mutationType_list})
variant_pos_list = []
mutationType_list = []
variant_pos_list.append(pos)
mutationType_list.append(mutation_type)
prior_chr = chr
prior_pos = pos
variant_pos_list_dict.update({prior_chr: variant_pos_list})
mutationType_list_dict.update({prior_chr: mutationType_list})
return variant_pos_list_dict, mutationType_list_dict
def classifyMutationType(ref, alt):
mutationType = str(ref + alt)
mutationType.upper()
if len(mutationType) > 2:
return "null"
if mutationType == "CA" or mutationType == "GT":
return "CA"
if mutationType == "CG" or mutationType == "GC":
return "CG"
if mutationType == "CT" or mutationType == "GA":
return "CT"
if mutationType == "TA" or mutationType == "AT":
return "TA"
if mutationType == "TC" or mutationType == "AG":
return "TC"
if mutationType == "TG" or mutationType == "AC":
return "TG"
def plotRainFallMutationType(variant_pos_list_dict: dict, mutationType_list_dict: dict, savePath, saveFilePrefix):
saveFile = os.path.join(savePath, saveFilePrefix + '_plotTestest.pdf')
pdf = matplotlib.backends.backend_pdf.PdfPages(saveFile)
column_max = 4
row_max = 6
count = 0
for chrName in variant_pos_list_dict.keys():
if chrName == "chrM" or chrName == "MT":
continue
count+=1
#plt.figure()
fig, ax = plt.subplots(figsize=(20,10))
mutation_list = mutationType_list_dict.get(chrName)
variant_list = variant_pos_list_dict.get(chrName)
n = len(mutation_list)
axisPlot = []
listCA_x = []
listCA_y = []
listCG_x = []
listCG_y = []
listCT_x = []
listCT_y = []
listTA_x = []
listTA_y = []
listTC_x = []
listTC_y = []
listTG_x = []
listTG_y = []
for idx in range(1,len(variant_list)):
query_idx = idx-1
y = math.log10(int(variant_list[query_idx+1]) - int(variant_list[query_idx]))
x = int(variant_list[query_idx+1])
mutationType = mutation_list[query_idx+1]
if mutationType == "CA":
listCA_x.append(x)
listCA_y.append(y)
elif mutationType == "CG":
listCG_x.append(x)
listCG_y.append(y)
elif mutationType == "CT":
listCT_x.append(x)
listCT_y.append(y)
elif mutationType == "TA":
listTA_x.append(x)
listTA_y.append(y)
elif mutationType == "TC":
listTC_x.append(x)
listTC_y.append(y)
elif mutationType == "TG":
listTG_x.append(x)
listTG_y.append(y)
for idx in range(6):
if idx == 0:
color = "blue"
mutationType="C>A"
ax.scatter(listCA_x, listCA_y, c=color, label=mutationType, alpha=0.75, edgecolors='none')
elif idx == 1:
color = "black"
mutationType = "C>G"
ax.scatter(listCG_x, listCG_y, c=color, label=mutationType, alpha=0.75, edgecolors='none')
elif idx == 2:
color = "red"
mutationType = "C>T"
ax.scatter(listCT_x, listCT_y, c=color, label=mutationType, alpha=0.75, edgecolors='none')
elif idx == 3:
color = "pink"
mutationType = "T>A"
ax.scatter(listTA_x, listTA_y, c=color, label=mutationType, alpha=0.75, edgecolors='none')
elif idx == 4:
color = "yellow"
mutationType = "T>C"
ax.scatter(listTC_x, listTC_y, c=color, label=mutationType, alpha=0.75, edgecolors='none')
elif idx == 5:
color = "green"
mutationType = "T>G"
ax.scatter(listTG_x, listTG_y, c=color, label=mutationType, alpha=0.75, edgecolors='none')
saveFile = os.path.join(savePath,saveFilePrefix+'_plot_'+chrName+'.pdf')
ax.legend()
ax.grid(True)
#plt.subplot(row_max, column_max, count)
#plt.figure(count)
plt.title("Rainfall plot of "+chrName)
plt.xlabel("Genomic position")
plt.ylabel("Intermutation distance (log(bp))")
plt.legend(loc='center left', bbox_to_anchor=(1, 0.5))
#plt.savefig(saveFile, bbox_inches='tight')
pdf.savefig(fig, bbox_inches='tight')
plt.close(fig)
pdf.close()
#plt.show()
#def plotMultipleRainFallMutationType()
def plotHeatMapTrinucleotide(Trinucleotide_raw_File):
x_3prime = []
y_5prime = []
freq_data = []
fileName = ntpath.basename(Trinucleotide_raw_File)
sampleName = fileName.split("_")[0]
firstLineFlag = True
firstSecondLineFlag = True
data_row_count = 0
mutation_type_dict = {}
with open(Trinucleotide_raw_File) as rawDataFile:
for line in rawDataFile:
line = line.strip('\n')
if firstLineFlag==True:
firstLineFlag=False
continue
elif line[0] == "M":
if firstSecondLineFlag == False:
mutation_type_dict.update({mutation_type: freq_data_array})
info = line.split()
mutation_type=info[3]
firstSecondLineFlag = False
elif line[0] == "B":
info = line.split(",")
freq_data = []
for idx in range(1,len(info)):
x_3prime.append(str(info[idx]))
else:
info = line.split(",")
for idx in range(len(info)):
if idx == 0:
y_5prime.append(str(info[idx]))
value_list = []
else:
value_list.append(int(info[idx]))
freq_data.append(value_list)
freq_data_array = np.asarray(freq_data)
mutation_type_dict.update({mutation_type: freq_data_array})
# Continue Here
# start plot heatMap
fig, ax = plt.subplots()
for key, value in mutation_type_dict.items():
mutation_type = key
freq_data_array = value
im, cbar = heatmap(freq_data_array,y_5prime,x_3prime,ax=ax,cmap="YlGn",cbarlabel="InterMutation (log10)")
fig.tight_layout()
plt.show()
print()
def heatmap(data, row_labels, col_labels, ax=None,
cbar_kw={}, cbarlabel="", **kwargs):
"""
Create a heatmap from a numpy array and two lists of labels.
Arguments:
data : A 2D numpy array of shape (N,M)
row_labels : A list or array of length N with the labels
for the rows
col_labels : A list or array of length M with the labels
for the columns
Optional arguments:
ax : A matplotlib.axes.Axes instance to which the heatmap
is plotted. If not provided, use current axes or
create a new one.
cbar_kw : A dictionary with arguments to
:meth:`matplotlib.Figure.colorbar`.
cbarlabel : The label for the colorbar
All other arguments are directly passed on to the imshow call.
"""
if not ax:
ax = plt.gca()
# Plot the heatmap
im = ax.imshow(data, **kwargs)
# Create colorbar
cbar = ax.figure.colorbar(im, ax=ax, **cbar_kw)
cbar.ax.set_ylabel(cbarlabel, rotation=-90, va="bottom")
# We want to show all ticks...
ax.set_xticks(np.arange(data.shape[1]))
ax.set_yticks(np.arange(data.shape[0]))
# ... and label them with the respective list entries.
ax.set_xticklabels(col_labels)
ax.set_yticklabels(row_labels)
# Let the horizontal axes labeling appear on top.
ax.tick_params(top=True, bottom=False,
labeltop=True, labelbottom=False)
# Rotate the tick labels and set their alignment.
plt.setp(ax.get_xticklabels(), rotation=-30, ha="right",
rotation_mode="anchor")
# Turn spines off and create white grid.
for edge, spine in ax.spines.items():
spine.set_visible(False)
ax.set_xticks(np.arange(data.shape[1]+1)-.5, minor=True)
ax.set_yticks(np.arange(data.shape[0]+1)-.5, minor=True)
ax.grid(which="minor", color="w", linestyle='-', linewidth=3)
ax.tick_params(which="minor", bottom=False, left=False)
return im, cbar
def annotate_heatmap(im, data=None, valfmt="{x:.2f}",
textcolors=["black", "white"],
threshold=None, **textkw):
"""
A function to annotate a heatmap.
Arguments:
im : The AxesImage to be labeled.
Optional arguments:
data : Data used to annotate. If None, the image's data is used.
valfmt : The format of the annotations inside the heatmap.
This should either use the string format method, e.g.
"$ {x:.2f}", or be a :class:`matplotlib.ticker.Formatter`.
textcolors : A list or array of two color specifications. The first is
used for values below a threshold, the second for those
above.
threshold : Value in data units according to which the colors from
textcolors are applied. If None (the default) uses the
middle of the colormap as separation.
Further arguments are passed on to the created text labels.
"""
if not isinstance(data, (list, np.ndarray)):
data = im.get_array()
# Normalize the threshold to the images color range.
if threshold is not None:
threshold = im.norm(threshold)
else:
threshold = im.norm(data.max())/2.
# Set default alignment to center, but allow it to be
# overwritten by textkw.
kw = dict(horizontalalignment="center",
verticalalignment="center")
kw.update(textkw)
# Get the formatter in case a string is supplied
if isinstance(valfmt, str):
valfmt = matplotlib.ticker.StrMethodFormatter(valfmt)
# Loop over the data and create a `Text` for each "pixel".
# Change the text's color depending on the data.
texts = []
for i in range(data.shape[0]):
for j in range(data.shape[1]):
kw.update(color=textcolors[im.norm(data[i, j]) > threshold])
text = im.axes.text(j, i, valfmt(data[i, j], None), **kw)
texts.append(text)
return texts | StarcoderdataPython |
5086362 | from __future__ import division
import numpy as np
import pandas as pd
from matplotlib.colors import to_rgb
import warnings
from logomaker.src.error_handling import check
from logomaker.src.matrix import ALPHABET_DICT
# Sets default color schemes specified sets of characters
CHARS_TO_COLORS_DICT = {
tuple('ACGT'): 'classic',
tuple('ACGU'): 'classic',
tuple('ACDEFGHIKLMNPQRSTVWY'): 'weblogo_protein',
}
weblogo_blue = [.02, .09, .74]
weblogo_pink = [.83, .11, .75]
weblogo_green = [.13, .83, .15]
weblogo_red = [.83, .04, .08]
weblogo_black = [0, 0, 0]
# COLOR_SCHEME_DICT provides a default set of logo colorschemes
# that can be passed to the 'color_scheme' argument of Logo()
three_ones = np.ones(3)
COLOR_SCHEME_DICT = {
'classic': {
'G': [1, .65, 0],
'TU': [1, 0, 0],
'C': [0, 0, 1],
'A': [0, .5, 0]
},
'grays': {
'A': .2 * three_ones,
'C': .4 * three_ones,
'G': .6 * three_ones,
'TU': .8 * three_ones
},
'base_pairing': {
'TAU': [1, .55, 0],
'GC': [0, 0, 1]
},
# Suggested by <NAME>
# https://twitter.com/rfriedman22/status/1085722502649786368
'colorblind_safe': {
'A': '#009980',
'C': '#59B3E6',
'G': '#E69B04',
'TU': '#1A1A1A'
},
# Weblogo: http://weblogo.berkeley.edu/examples.html
# BlockLogo: http://research4.dfci.harvard.edu/cvc/blocklogo/HTML/examples.php
'weblogo_protein': {
'RHK' : weblogo_blue, # positive charge
'DE' : weblogo_red, # negative charge
'QN' : weblogo_pink, # polar uncharged long
'GCSTY' : weblogo_green, # polar uncharged short and special cases (???)
'ILMAFVPW' : weblogo_black # hydrophobic
},
# Skylign: http://skylign.org/logo/example
'skylign_protein': {
'F': [.16, .99, .18],
'Y': [.04, .40, .05],
'L': [.99, .60, .25],
'V': [1.0, .80, .27],
'I': [.80, .60, .24],
'H': [.40, .02, .20],
'W': [.42, .79, .42],
'A': [.99, .60, .42],
'S': [.04, .14, .98],
'T': [.17, 1.0, 1.0],
'M': [.80, .60, .80],
'N': [.21, .40, .40],
'Q': [.40, .41, .79],
'R': [.59, .02, .04],
'K': [.40, .20, .03],
'E': [.79, .04, .22],
'G': [.95, .94, .22],
'D': [.99, .05, .11],
'P': [.10, .61, .99],
'C': [.09, .60, .60]
},
# dmslogo: https://jbloomlab.github.io/dmslogo/dmslogo.colorschemes.html
'dmslogo_charge': {
'A': '#000000',
'C': '#000000',
'D': '#0000FF',
'E': '#0000FF',
'F': '#000000',
'G': '#000000',
'H': '#FF0000',
'I': '#000000',
'K': '#FF0000',
'L': '#000000',
'M': '#000000',
'N': '#000000',
'P': '#000000',
'Q': '#000000',
'R': '#FF0000',
'S': '#000000',
'T': '#000000',
'V': '#000000',
'W': '#000000',
'Y': '#000000'
},
# dmslogo: https://jbloomlab.github.io/dmslogo/dmslogo.colorschemes.html
'dmslogo_funcgroup': {
'A': '#f76ab4',
'C': '#ff7f00',
'D': '#e41a1c',
'E': '#e41a1c',
'F': '#84380b',
'G': '#f76ab4',
'H': '#3c58e5',
'I': '#12ab0d',
'K': '#3c58e5',
'L': '#12ab0d',
'M': '#12ab0d',
'N': '#972aa8',
'P': '#12ab0d',
'Q': '#972aa8',
'R': '#3c58e5',
'S': '#ff7f00',
'T': '#ff7f00',
'V': '#12ab0d',
'W': '#84380b',
'Y': '#84380b'
},
'hydrophobicity': {
'RKDENQ': [0, 0, 1],
'SGHTAP': [0, .5, 0],
'YVMCLFIW': [0, 0, 0]
},
'chemistry': {
'GSTYC': [0, .5, 0],
'QN': [.5, 0, .5],
'KRH': [0, 0, 1],
'DE': [1, 0, 0],
'AVLIPWFM': [0, 0, 0]
},
'charge': {
'KRH': [0, 0, 1],
'DE': [1, 0, 0],
'GSTYCQNAVLIPWFM': [.5, .5, .5]
},
'NajafabadiEtAl2017': {
'DEC': [.42, .16, .42],
'PG': [.47, .47, 0.0],
'MIWALFV': [.13, .35, .61],
'NTSQ': [.25, .73, .28],
'RK': [.74, .18, .12],
'HY': [.09, .47, .46],
},
}
def list_color_schemes():
"""
Provides user with a list of valid color_schemes built into Logomaker.
returns
-------
colors_df: (dataframe)
A pandas dataframe listing each color_scheme and the corresponding
set of characters for which colors are specified.
"""
names = list(COLOR_SCHEME_DICT.keys())
colors_df = pd.DataFrame()
for i, name in enumerate(names):
color_scheme = COLOR_SCHEME_DICT[name]
characters = list(''.join(list(color_scheme.keys())))
characters.sort()
colors_df.loc[i, 'color_scheme'] = name
colors_df.loc[i, 'characters'] = ''.join(characters)
return colors_df
def _restrict_dict(in_dict, keys_to_keep):
""" Restricts a in_dict to keys that fall within keys_to_keep. """
return dict([(k, v) for k, v in in_dict.items() if k in keys_to_keep])
def _expand_color_dict(color_dict):
""" Expands the string keys in color_dict, returning new_dict that has
the same values but whose keys are single characters. These single
characters are both uppercase and lowercase versions of the characters
in the color_dict keys. """
new_dict = {}
for key in color_dict.keys():
value = color_dict[key]
for char in key:
new_dict[char.upper()] = value
new_dict[char.lower()] = value
return new_dict
def get_rgb(color_spec):
"""
Safely returns an RGB np.ndarray given a valid color specification
"""
# TODO: the following code should be reviewed for edge-cases:
# initalizing rgb to handle referenced before assignment type error
rgb = None
# If color specification is a string
if isinstance(color_spec, str):
try:
rgb = np.array(to_rgb(color_spec))
# This will trigger if to_rgb does not recognize color_spec.
# In this case, raise an error to user.
except:
check(False, 'invalid choice: color_spec=%s' % color_spec)
# Otherwise, if color_specification is array-like, it should
# specify RGB values; convert to np.ndarray
elif isinstance(color_spec, (list, tuple, np.ndarray)):
# color_spec must have length 3 to be RGB
check(len(color_spec) == 3,
'color_scheme, if array, must be of length 3.')
# color_spec must only contain numbers between 0 and 1
check(all(0 <= x <= 1 for x in color_spec),
'Values of color_spec must be between 0 and 1 inclusive.')
# Cast color_spec as RGB
rgb = np.array(color_spec)
# Otherwise, throw error
else:
check(False, 'type(color_spec) = %s is invalid.' % type(color_spec))
# Return RGB as an np.ndarray
return rgb
def get_color_dict(color_scheme, chars):
"""
Return a color_dict constructed from a user-specified color_scheme and
a list of characters
"""
# Check that chars is a list
check(isinstance(chars, (str, list, tuple, np.ndarray)),
"chars must be a str or be array-like")
# Check that chars has length of at least 1
check(len(chars) >= 1, 'chars must have length >= 1')
# Sort characters
chars = list(chars)
chars.sort()
# Check that all entries in chars are strings of length 1
for i, c in enumerate(chars):
c = str(c) # convert from unicode to string to work with python 2
check(isinstance(c, str) and len(c)==1,
'entry number %d in chars is %s; ' % (i, repr(c)) +
'must instead be a single character')
# if color_scheme is None, choose default based on chars
if color_scheme is None:
key = tuple(chars)
color_scheme = CHARS_TO_COLORS_DICT.get(key, 'gray')
color_dict = get_color_dict(color_scheme, chars)
# otherwise, if color_scheme is a dictionary
elif isinstance(color_scheme, dict):
# make sure all the keys are strings
for key in color_scheme.keys():
check(isinstance(key, str),
'color_scheme dict contains a key (%s) ' % repr(key) +
'that is not of type str.')
# expand the dictionary
color_dict = _expand_color_dict(color_scheme)
# set all values to rgb
for key in color_dict.keys():
color_dict[key] = to_rgb(color_dict[key])
# otherwise, if color_scheme is a string, it must either be a valid key in
# COLOR_SCHEME_DICT or a named matplotlib color
elif isinstance(color_scheme, str):
# If a valid key, get the color scheme dict and expand
if color_scheme in COLOR_SCHEME_DICT.keys():
tmp_dict = COLOR_SCHEME_DICT[color_scheme]
color_dict = _expand_color_dict(tmp_dict)
# Force each color to rgb
for c in color_dict.keys():
color = color_dict[c]
rgb = to_rgb(color)
color_dict[c] = np.array(rgb)
# Otherwise, try to convert color_scheme to RGB value, then create
# color_dict using keys from chars and setting all values to RGB value.
else:
try:
rgb = to_rgb(color_scheme)
color_dict = dict([(c, rgb) for c in chars])
# This will trigger if to_rgb does not recognize color_scheme.
# In this case, raise an error to user.
except:
check(False, 'invalid choice: color_scheme=%s' % color_scheme)
# Otherwise, if color_scheme is array-like, it should be an RGB value
elif isinstance(color_scheme, (list, tuple, np.ndarray)):
# color_scheme must have length 3 to be RGB
check(len(color_scheme) == 3,
'color_scheme, if array, must be of length 3.')
# Cast color_scheme as RGB
rgb = np.ndarray(color_scheme)
# Construct color_dict with rgb as value for every character in chars
color_dict = dict([(c, rgb) for c in chars])
# Otherwise, raise error
else:
check(False,
'Error: color_scheme has invalid type %s'
% type(color_scheme))
# If all the characters in chars are not also within the keys of color_dict,
# add them with color 'black'
if not set(chars) <= set(color_dict.keys()):
for c in chars:
if not c in color_dict:
warnings.warn(" Warning: Character '%s' is not in color_dict. " % c +
"Using black.")
color_dict[c] = to_rgb('black')
return color_dict
| StarcoderdataPython |
9753687 | """
twinpy
deals with twin boudnary
"""
__version__ = "1.0.0"
| StarcoderdataPython |
363447 | from batou.utils import Address
from batou.component import Component, Attribute
from batou.lib.file import File
from batou.lib.buildout import Buildout
class Test(Component):
address = Attribute(Address, 'default:8080')
def configure(self):
self += File('test', content='asdf {{component.address.listen}}')
self += Buildout(version='2.3.1',
python='2.7',
setuptools='17.1')
| StarcoderdataPython |
1744051 | import requests
import json
from pathlib import Path
class LabelStudioAPI:
def __init__(self, token, debug=True):
self.token = token
self.debug = debug
def Project(self, title='Demo', labelXML="""""", description='New Task', action='create', projID='1'):
if action == 'update':
url = 'http://localhost:8080/api/projects/'
headers = {'Authorization': 'Token ' + self.token}
payload = {'title': title, 'description': description, 'label_config': labelXML}
res = requests.patch(url, data=payload, headers=headers)
print(res.status_code, res.content)
elif action == 'create':
url = 'http://localhost:8080/api/projects/'
headers = {'Authorization': 'Token ' + self.token}
payload = {'title': title, 'description': description, 'label_config': labelXML}
res = requests.post(url, data=payload, headers=headers)
print(res.status_code, res.content)
if res.status_code == 201:
print('Successfully created NEW Project ' + title)
return res
else:
print('Could not create NEW project')
return res
elif action == 'delete':
url = 'http://localhost:8080/api/projects/' + projID + '/'
headers = {'Authorization': 'Token ' + self.token}
res = requests.delete(url, headers=headers)
print(res.status_code, res.content)
if res.status_code == 204:
print('Successfully deleted Project ' + projID)
return res
else:
print('Could not delete')
return res
else:
print("Not valid action")
def connectS3ProjStorage(self, projID="1", title="S3", bucket_name="", region_name="US", accessKey="",
secretKey="", s3_url=""):
url = 'http://localhost:8080/api/storages/s3'
headers = {'Authorization': 'Token ' + self.token}
payload = {"project": projID, "title": title, "bucket": bucket_name, "region_name": region_name,
"s3_endpoint": s3_url, "aws_access_key_id": accessKey, "aws_secret_access_key": secretKey,
"use_blob_urls": True, "presign_ttl": "1"}
res = requests.post(url, data=payload, headers=headers)
print(res.status_code)
if res.status_code == 201:
print('S3 connected')
return res
else:
print('Could not connect S3')
return res
print("Sync S3 bucket to see all your data in label studio")
def delS3ProjBucket(self, storageID='1'):
url = 'http://localhost:8080/api/storages/s3/' + storageID
headers = {'Authorization': 'Token ' + self.token}
# payload = {}
res = requests.delete(url, headers=headers)
print(res.status_code)
return res
def syncS3Bucket(self, storageID='1'):
url = 'http://localhost:8080/api/storages/s3/' + storageID + '/sync'
headers = {'Authorization': 'Token ' + self.token}
# payload = {}
res = requests.post(url, headers=headers)
print(res.status_code)
return res
# export Annotations in all kinds of widely accepted data annotation formats,
# JSON, CSV, COCO, PASCAL VOC (VOC)
def exportAnnotations(self, projID='1', exportFormat='JSON',
exportPath='/home/sumit/PycharmProjects/CortxProject/local/'):
folder = Path(exportPath)
url = 'http://localhost:8080/api/projects/' + projID + '/export?exportType=' + exportFormat
headers = {'Authorization': 'Token ' + self.token}
res = requests.get(url, headers=headers)
print(res.content)
if exportFormat == 'JSON':
python_data = json.loads(res.text)
object_name = ("annotations" + projID + ".json")
file_name = folder / ("annotations" + projID + ".json")
with open(file_name, 'w') as responseFile:
json.dump({'Data': python_data}, responseFile)
print("JSON data annotation local export completed.")
return object_name
elif exportFormat == 'CSV':
object_name = ("annotations" + projID + ".csv")
file_name = folder / ('annotations' + projID + '.csv')
f = open(file_name, "w")
f.write(res.text)
f.close()
print("CSV data annotation local export completed")
return object_name
elif exportFormat == 'COCO':
object_name = ("annotationsCOCO" + projID + ".zip")
file_name = folder / ('annotationsCOCO' + projID + '.zip')
with open(file_name, 'wb') as out_file:
out_file.write(res.content)
print("COCO data annotation local export completed")
return object_name
elif exportFormat == 'VOC':
object_name = ("annotationsPASCAL" + projID + ".zip")
file_name = folder / ('annotationsPASCAL' + projID + '.zip')
with open(file_name, 'wb') as out_file:
out_file.write(res.content)
print("PASCAL data annotation local export completed")
return object_name
else:
print("Not supported export format, currently supported are JSON,CSV,COCO,PASCAL")
def main():
token = '303a7e45277180b93567209aeca063088856ddf8'
a = LabelStudioAPI(token=token)
# loading the xml file
with open('label_ceate.xml', 'r') as f:
label_create = f.read()
# print(f.read())
a.makeProject(title='Hello', labelXML=label_create)
if __name__ == "__main__":
main()
| StarcoderdataPython |
6629054 | <reponame>KRNKRS/mgear<gh_stars>10-100
"""
Shifter's Component guide class.
"""
from functools import partial
import maya.cmds as cmds
# pyMel
import pymel.core as pm
from pymel.core import datatypes
# mgear
import mgear
from mgear import string
from mgear.maya import dag, vector, transform, applyop, attribute, curve, icon
from mgear.maya.shifter import guide, gui
import mainSettingsUI as msui
from mgear.vendor.Qt import QtWidgets, QtCore
##########################################################
# COMPONENT GUIDE
##########################################################
class ComponentGuide(guide.Main):
"""Main class for component guide creation.
This class handles all the parameters and objectDefs creation.
It also know how to parse its own hierachy of object to retrieve position
and transform.
Finally it also now how to export itself as xml_node.
Attributes:
paramNames (list): List of parameter name cause it's actually important
to keep them sorted.
paramDefs (dic): Dictionary of parameter definition.
values (dic): Dictionary of options values.
valid (bool): We will check a few things and make sure the guide we are
loading is up to date.
If parameters or object are missing a warning message will be
display and the guide should be updated.
tra (dic): dictionary of global transform
atra (list): list of global transform
pos (dic): dictionary of global postion
apos (list): list of global position
prim (dic): dictionary of primitive
blades (dic): dictionary of blades
size (float): Size reference of the component. Default = .1
save_transform (list): Transform of object name in this list will
be saved
save_primitive (list): Primitive of object name in this list will
be saved
save_blade (list): Normal and BiNormal of object will be saved
minmax (dic): Define the min and max object for multi location objects
"""
compType = "component" # Component type
compName = "component" # Component default name
compSide = "C"
compIndex = 0 # Component default index
description = "" # Description of the component
connectors = []
compatible = []
ctl_grp = ""
# ====================================================
# Init method.
def __init__(self):
# Parameters names, definition and values.
# List of parameter name cause it's actually important to keep
# them sorted.
self.paramNames = []
# Dictionary of parameter definition.
self.paramDefs = {}
# Dictionary of options values.
self.values = {}
# We will check a few things and make sure the guide we are loading is
# up to date.
# If parameters or object are missing a warning message will be display
# and the guide should be updated.
self.valid = True
self.root = None
self.id = None
# parent component identification
self.parentComponent = None
self.parentLocalName = None
# List and dictionary used during the creation of the component
self.tra = {} # dictionary of global transform
self.atra = [] # list of global transform
self.pos = {} # dictionary of global postion
self.apos = [] # list of global position
self.prim = {} # dictionary of primitive
self.blades = {}
self.size = .1
# self.root_size = None
# List and dictionary used to define data of the guide that
# should be saved
# Transform of object name in this list will be saved
self.save_transform = []
# Primitive of object name in this list will be saved
self.save_primitive = []
# Normal and BiNormal of object will be saved
self.save_blade = []
# Define the min and max object for multi location objects
self.minmax = {}
# Init the guide
self.postInit()
self.initialHierarchy()
self.addParameters()
def postInit(self):
"""Define the objects name and categories.
Note:
REIMPLEMENT. This method should be reimplemented in each component.
"""
self.save_transform = ["root"]
return
# ====================================================
# OBJECTS AND PARAMETERS
def initialHierarchy(self):
"""Initial hierachy.
It's no more than the basic set of parameters and layout
needed for the setting property.
"""
# Parameters --------------------------------------
# This are the necessary parameter for component guide definition
self.pCompType = self.addParam("comp_type", "string", self.compType)
self.pCompName = self.addParam("comp_name", "string", self.compName)
self.pCompSide = self.addParam("comp_side", "string", self.compSide)
self.pCompIndex = self.addParam(
"comp_index", "long", self.compIndex, 0)
self.pConnector = self.addParam("connector", "string", "standard")
self.pUIHost = self.addParam("ui_host", "string", "")
self.pCtlGroup = self.addParam("ctlGrp", "string", "")
# Items -------------------------------------------
typeItems = [self.compType, self.compType]
for type in self.compatible:
typeItems.append(type)
typeItems.append(type)
connectorItems = ["standard", "standard"]
for item in self.connectors:
connectorItems.append(item)
connectorItems.append(item)
def addObjects(self):
"""Create the objects of the guide.
Note:
REIMPLEMENT. This method should be reimplemented in each component.
"""
self.root = self.addRoot()
def addParameters(self):
"""Create the parameter definitions of the guide.
Note:
REIMPLEMENT. This method should be reimplemented in each component.
"""
return
# ====================================================
# SET / GET
def setFromHierarchy(self, root):
"""Set the component guide from given hierarchy.
Args:
root (dagNode): The root of the hierarchy to parse.
"""
self.root = root
self.model = self.root.getParent(generations=-1)
# ---------------------------------------------------
# First check and set the settings
if not self.root.hasAttr("comp_type"):
mgear.log("%s is not a proper guide." %
self.root.longName(), mgear.sev_error)
self.valid = False
return
self.setParamDefValuesFromProperty(self.root)
# ---------------------------------------------------
# Then get the objects
for name in self.save_transform:
if "#" in name:
i = 0
while not self.minmax[name].max > 0 or i < \
self.minmax[name].max:
localName = string.replaceSharpWithPadding(name, i)
node = dag.findChild(self.model, self.getName(localName))
if not node:
break
self.tra[localName] = node.getMatrix(worldSpace=True)
self.atra.append(node.getMatrix(worldSpace=True))
self.pos[localName] = node.getTranslation(space="world")
self.apos.append(node.getTranslation(space="world"))
i += 1
if i < self.minmax[name].min:
mgear.log("Minimum of object requiered for " +
name + " hasn't been reached!!",
mgear.sev_warning)
self.valid = False
continue
else:
node = dag.findChild(self.model, self.getName(name))
if not node:
mgear.log("Object missing : %s" % (
self.getName(name)), mgear.sev_warning)
self.valid = False
continue
self.tra[name] = node.getMatrix(worldSpace=True)
self.atra.append(node.getMatrix(worldSpace=True))
self.pos[name] = node.getTranslation(space="world")
self.apos.append(node.getTranslation(space="world"))
for name in self.save_blade:
node = dag.findChild(self.model, self.getName(name))
if not node:
mgear.log("Object missing : %s" % (
self.getName(name)), mgear.sev_warning)
self.valid = False
continue
self.blades[name] = vector.Blade(node.getMatrix(worldSpace=True))
self.size = self.getSize()
# ====================================================
# DRAW
def draw(self, parent):
"""Draw the guide in the scene.
Args:
parent (dagNode): the parent of the component.
"""
self.parent = parent
self.setIndex(self.parent)
self.addObjects()
pm.select(self.root)
# TODO: add function to scale the points of the icons
# Set the size of the root
# self.root.size = self.root_size
def drawFromUI(self, parent):
"""Draw the guide in the scene from the UI command.
Args:
parent (dagNode): the parent of the component.
"""
if not self.modalPositions():
mgear.log("aborded", mgear.sev_warning)
return False
self.draw(parent)
transform.resetTransform(self.root, r=False, s=False)
gui.Guide_UI.inspectSettings()
return True
def modalPositions(self):
"""Launch a modal dialog to set position of the guide."""
self.jNumberVal = False
self.dirAxisVal = False
self.jSpacVal = False
for name in self.save_transform:
if "#" in name:
def _addLocMultiOptions():
pm.setParent(q=True)
pm.columnLayout(adjustableColumn=True, cal="right")
pm.text(label='', al="center")
fl = pm.formLayout()
jNumber = pm.intFieldGrp(v1=3, label="Joint Number")
pm.setParent('..')
pm.formLayout(fl, e=True, af=(jNumber, "left", -30))
dirSet = ["X", "-X", "Y", "-Y", "Z", "-Z"]
fl = pm.formLayout()
dirAxis = pm.optionMenu(label="Direction")
dirAxis.addMenuItems(dirSet)
pm.setParent('..')
pm.formLayout(fl, e=True, af=(dirAxis, "left", 70))
fl = pm.formLayout()
jSpac = pm.floatFieldGrp(v1=1.0, label="spacing")
pm.setParent('..')
pm.formLayout(fl, e=True, af=(jSpac, "left", -30))
pm.text(label='', al="center")
pm.button(label='Continue', c=partial(
_retriveOptions, jNumber, dirAxis, jSpac))
pm.setParent('..')
def _retriveOptions(jNumber, dirAxis, jSpac, *args):
self.jNumberVal = jNumber.getValue()[0]
self.dirAxisVal = dirAxis.getValue()
self.jSpacVal = jSpac.getValue()[0]
pm.layoutDialog(dismiss="Continue")
def _show():
pm.layoutDialog(ui=_addLocMultiOptions)
_show()
if self.jNumberVal:
if self.dirAxisVal == "X":
offVec = datatypes.Vector(self.jSpacVal, 0, 0)
elif self.dirAxisVal == "-X":
offVec = datatypes.Vector(self.jSpacVal * -1, 0, 0)
elif self.dirAxisVal == "Y":
offVec = datatypes.Vector(0, self.jSpacVal, 0)
elif self.dirAxisVal == "-Y":
offVec = datatypes.Vector(0, self.jSpacVal * -1, 0)
elif self.dirAxisVal == "Z":
offVec = datatypes.Vector(0, 0, self.jSpacVal)
elif self.dirAxisVal == "-Z":
offVec = datatypes.Vector(0, 0, self.jSpacVal * -1)
newPosition = datatypes.Vector(0, 0, 0)
for i in range(self.jNumberVal):
newPosition = offVec + newPosition
localName = string.replaceSharpWithPadding(name, i)
self.tra[localName] = transform.getTransformFromPos(
newPosition)
return True
# ====================================================
# UPDATE
def setIndex(self, model):
"""Update the component index to get the next valid one.
Args:
model (dagNode): The parent model of the guide.
"""
self.model = model.getParent(generations=-1)
# Find next index available
while True:
obj = dag.findChild(self.model, self.getName("root"))
if not obj or (self.root and obj == self.root):
break
self.setParamDefValue("comp_index", self.values["comp_index"] + 1)
def symmetrize(self):
"""Inverse the transform of each element of the guide."""
if self.values["comp_side"] not in ["R", "L"]:
mgear.log("Can't symmetrize central component", mgear.sev_error)
return False
for name, paramDef in self.paramDefs.items():
if paramDef.valueType == "string":
self.setParamDefValue(
name, mgear.string.convertRLName(self.values[name]))
for name, t in self.tra.items():
self.tra[name] = transform.getSymmetricalTransform(t)
for name, blade in self.blades.items():
self.blades[name] = vector.Blade(
transform.getSymmetricalTransform(blade.transform))
return True
def rename(self, root, newName, newSide, newIndex):
"""Rename the component.
Args:
root (dagNode): The parent of the component
newName (str): The new name.
newSide (str): Side of the component.
newIndex (int): index of the comonent.
"""
self.parent = root
# store old properties
oldIndex = self.parent.attr("comp_index").get()
oldSide = self.parent.attr("comp_side").get()
oldName = self.parent.attr("comp_name").get()
oldSideIndex = oldSide + str(oldIndex)
# change attr side in root
self.parent.attr("comp_name").set(newName)
self.parent.attr("comp_side").set(newSide)
# set new index and update to the next valid
self.setParamDefValue("comp_name", newName)
self.setParamDefValue("comp_side", newSide)
self.setParamDefValue("comp_index", newIndex)
self.setIndex(self.parent)
self.parent.attr("comp_index").set(self.values["comp_index"])
# objList = dag.findComponentChildren(self.parent,
# oldName, oldSideIndex)
# NOTE: Experimenta using findComponentChildren2
objList = dag.findComponentChildren2(
self.parent, oldName, oldSideIndex)
newSideIndex = newSide + str(self.values["comp_index"])
objList.append(self.parent)
for obj in objList:
suffix = obj.name().split("_")[-1]
if len(obj.name().split("_")) == 3:
new_name = "_".join([newName, newSideIndex, suffix])
else:
subIndex = obj.name().split("_")[-2]
new_name = "_".join([newName, newSideIndex, subIndex, suffix])
pm.rename(obj, new_name)
# ====================================================
# ELEMENTS
def addRoot(self):
"""Add a root object to the guide.
This method can initialize the object or draw it.
Root object is a simple transform with a specific display and a setting
property.
Returns:
dagNode: The root
"""
if "root" not in self.tra.keys():
self.tra["root"] = transform.getTransformFromPos(
datatypes.Vector(0, 0, 0))
self.root = icon.guideRootIcon(self.parent, self.getName(
"root"), color=13, m=self.tra["root"])
# Add Parameters from parameter definition list.
for scriptName in self.paramNames:
paramDef = self.paramDefs[scriptName]
paramDef.create(self.root)
return self.root
def addLoc(self, name, parent, position=None):
"""Add a loc object to the guide.
This mehod can initialize the object or draw it.
Loc object is a simple null to define a position or a tranformation in
the guide.
Args:
name (str): Local name of the element.
parent (dagNode): The parent of the element.
position (vector): The default position of the element.
Returns:
dagNode: The locator object.
"""
if name not in self.tra.keys():
self.tra[name] = transform.getTransformFromPos(position)
if name in self.prim.keys():
# this functionality is not implemented. The actual design from
# softimage Gear should be review to fit in Maya.
loc = self.prim[name].create(
parent, self.getName(name), self.tra[name], color=17)
else:
loc = icon.guideLocatorIcon(parent, self.getName(
name), color=17, m=self.tra[name])
return loc
def addLocMulti(self, name, parent, updateParent=True):
"""Add multiple loc objects to the guide.
This method can initialize the object or draw it.
Loc object is a simple null to define a position or a tranformation in
the guide.
Args:
name (str): Local name of the element.
parent (dagNode): The parent of the element.
minimum (int): The minimum number of loc.
maximum (int): The maximum number of loc.
updateParent (bool): if True update the parent reference. False,
keep the same for all loc.
Returns:
list of dagNode: The created loc objects in a list.
"""
if "#" not in name:
mgear.log(
"You need to put a '#' in the name of multiple location.",
mgear.sev_error)
return False
locs = []
i = 0
while True:
localName = string.replaceSharpWithPadding(name, i)
if localName not in self.tra.keys():
break
loc = icon.guideLocatorIcon(parent, self.getName(
localName), color=17, m=self.tra[localName])
locs.append(loc)
if updateParent:
parent = loc
i += 1
return locs
def addBlade(self, name, parentPos, parentDir):
"""Add a blade object to the guide.
This mehod can initialize the object or draw it.
Blade object is a 3points curve to define a plan in the guide.
Args:
name (str): Local name of the element.
parentPos (dagNode): The parent of the element.
parentDir (dagNode): The direction constraint of the element.
Returns:
dagNode: The created blade curve.
"""
if name not in self.blades.keys():
self.blades[name] = vector.Blade(
transform.getTransformFromPos(datatypes.Vector(0, 0, 0)))
offset = False
else:
offset = True
dist = .6 * self.root.attr("scaleX").get()
blade = icon.guideBladeIcon(parent=parentPos, name=self.getName(
name), lenX=dist, color=13, m=self.blades[name].transform)
aim_cns = applyop.aimCns(blade, parentDir, axis="xy", wupType=2,
wupVector=[0, 1, 0], wupObject=self.root,
maintainOffset=offset)
pm.pointConstraint(parentPos, blade)
offsetAttr = attribute.addAttribute(
blade, "bladeRollOffset", "float", aim_cns.attr("offsetX").get())
pm.connectAttr(offsetAttr, aim_cns.attr("offsetX"))
attribute.lockAttribute(blade)
return blade
def addDispCurve(self, name, centers=[], degree=1):
"""Add a display curve object to the guide.
Display curve object is a simple curve to show the connection between
different guide element..
Args:
name (str): Local name of the element.
centers (list of dagNode): List of object to define the curve.
degree (int): Curve degree. Default 1 = lineal.
Returns:
dagNode: The newly creted curve.
"""
return icon.connection_display_curve(self.getName(name),
centers,
degree)
# ====================================================
# MISC
def getObjects(self, model, includeShapes=True):
"""Get the objects of the component.
Args:
model(dagNode): The root of the component.
includeShapes (boo): If True, will include the shapes.
Returns:
list of dagNode: The list of the objects.
"""
objects = {}
if includeShapes:
children = pm.listRelatives(model, ad=True)
else:
children = pm.listRelatives(model, ad=True, typ='transform')
pm.select(children)
for child in pm.ls(self.fullName + "_*", selection=True):
objects[child[child.index(
self.fullName + "_") + len(self.fullName + "_"):]] = child
return objects
def getObjects2(self, model, includeShapes=True):
"""Get the objects of the component.
Args:
model(dagNode): The root of the component.
includeShapes (boo): If True, will include the shapes.
Returns:
list of dagNode: The list of the objects.
"""
objects = {}
if includeShapes:
children = [pm.PyNode(x) for x in cmds.listRelatives(
model.longName(), ad=True, fullPath=True)]
else:
children = [pm.PyNode(x) for x in cmds.listRelatives(
model.longName(), ad=True, typ='transform', fullPath=True)]
for child in children:
cName = child.longName()
if cName.startswith(self.fullName):
objects[cName.split("_")[-1]] = child
return objects
def getObjects3(self, model):
"""
NOTE: Experimental function
Get the objects of the component.
This version only get the transforms by Name using Maya Cmds
Args:
model(dagNode): The root of the component.
Returns:
list of dagNode: The list of the objects.
"""
objects = {}
for child in cmds.ls(self.fullName + "_*", type="transform"):
if pm.PyNode(child).getParent(-1) == model:
objects[child[child.index(
self.fullName + "_") + len(self.fullName + "_"):]] = child
return objects
def addMinMax(self, name, minimum=1, maximum=-1):
"""Add minimun and maximum number of locator
When we use the modal menu.
"""
if "#" not in name:
mgear.log(
"Invalid definition for min/max. You should have a '#' in "
"the name", mgear.sev_error)
self.minmax[name] = MinMax(minimum, maximum)
def getSize(self):
"""Get the size of the component.
Returns:
float: the size
"""
size = .01
for pos in self.apos:
d = vector.getDistance(self.pos["root"], pos)
size = max(size, d)
size = max(size, .01)
return size
def getName(self, name):
"""Return the fullname of given element of the component.
Args:
name (str): Localname of the element.
Returns:
str: Element fullname.
"""
return self.fullName + "_" + name
def getFullName(self):
"""Return the fullname of the component.
Returns:
str: Component fullname.
"""
return self.values["comp_name"] + "_" + self.values["comp_side"] + \
str(self.values["comp_index"])
def getType(self):
"""Return the type of the component.
Returns:
str: component type.
"""
return self.compType
def getObjectNames(self):
"""Get the objects names of the component
Returns:
set: The names set.
"""
names = set()
names.update(self.save_transform)
names.update(self.save_primitive)
names.update(self.save_blade)
return names
def getVersion(self):
"""Get the version of the component.
Returns:
str: versionof the component.
"""
return ".".join([str(i) for i in self.version])
fullName = property(getFullName)
type = property(getType)
objectNames = property(getObjectNames)
##########################################################
# OTHER CLASSES
##########################################################
class MinMax(object):
"""
Minimun and maximum class.
This class is used in addMinMax method.
Attributes:
minimum (int): minimum.
maximum (int): maximum.
"""
def __init__(self, minimum=1, maximum=-1):
self.min = minimum
self.max = maximum
##########################################################
# Setting Page
##########################################################
class mainSettingsTab(QtWidgets.QDialog, msui.Ui_Form):
# ============================================
# INIT
def __init__(self, parent=None):
super(mainSettingsTab, self).__init__()
self.setupUi(self)
class componentMainSettings(QtWidgets.QDialog, guide.helperSlots):
valueChanged = QtCore.Signal(int)
def __init__(self, parent=None):
super(componentMainSettings, self).__init__()
# the inspectSettings function set the current selection to the
# component root before open the settings dialog
self.root = pm.selected()[0]
self.mainSettingsTab = mainSettingsTab()
self.create_controls()
self.populate_controls()
self.create_layout()
self.create_connections()
self.setAttribute(QtCore.Qt.WA_DeleteOnClose, True)
def create_controls(self):
"""
Create the controls for the component base
"""
self.tabs = QtWidgets.QTabWidget()
self.tabs.setObjectName("settings_tab")
# Close Button
self.close_button = QtWidgets.QPushButton("Close")
def populate_controls(self):
"""Populate Controls attribute values
Populate the controls values from the custom attributes
of the component.
"""
# populate tab
self.tabs.insertTab(0, self.mainSettingsTab, "Main Settings")
# populate main settings
self.mainSettingsTab.name_lineEdit.setText(
self.root.attr("comp_name").get())
sideSet = ["C", "L", "R"]
sideIndex = sideSet.index(self.root.attr("comp_side").get())
self.mainSettingsTab.side_comboBox.setCurrentIndex(sideIndex)
self.mainSettingsTab.componentIndex_spinBox.setValue(
self.root.attr("comp_index").get())
if self.root.attr("useIndex").get():
self.mainSettingsTab.useJointIndex_checkBox.setCheckState(
QtCore.Qt.Checked)
else:
self.mainSettingsTab.useJointIndex_checkBox.setCheckState(
QtCore.Qt.Unchecked)
self.mainSettingsTab.parentJointIndex_spinBox.setValue(
self.root.attr("parentJointIndex").get())
self.mainSettingsTab.host_lineEdit.setText(
self.root.attr("ui_host").get())
self.mainSettingsTab.subGroup_lineEdit.setText(
self.root.attr("ctlGrp").get())
def create_layout(self):
"""
Create the layout for the component base settings
"""
return
def create_connections(self):
"""
Create the slots connections to the controls functions
"""
self.close_button.clicked.connect(self.close_settings)
self.mainSettingsTab.name_lineEdit.editingFinished.connect(
self.updateComponentName)
self.mainSettingsTab.side_comboBox.currentIndexChanged.connect(
self.updateComponentName)
self.mainSettingsTab.componentIndex_spinBox.valueChanged.connect(
self.updateComponentName)
self.mainSettingsTab.useJointIndex_checkBox.stateChanged.connect(
partial(self.updateCheck,
self.mainSettingsTab.useJointIndex_checkBox,
"useIndex"))
self.mainSettingsTab.parentJointIndex_spinBox.valueChanged.connect(
partial(self.updateSpinBox,
self.mainSettingsTab.parentJointIndex_spinBox,
"parentJointIndex"))
self.mainSettingsTab.host_pushButton.clicked.connect(
partial(self.updateHostUI,
self.mainSettingsTab.host_lineEdit,
"ui_host"))
self.mainSettingsTab.subGroup_lineEdit.editingFinished.connect(
partial(self.updateLineEdit,
self.mainSettingsTab.subGroup_lineEdit,
"ctlGrp"))
| StarcoderdataPython |
1957355 | from .source import SourceOfNews, NewsManager
| StarcoderdataPython |
3202497 | <reponame>nanuxbe/django
from datetime import date, timedelta
from django.contrib import admin
from django.test import TestCase
from .admin import CorporateMemberAdmin
from .models import CorporateMember
class CorporateMemberAdminTests(TestCase):
@classmethod
def setUpTestData(cls):
cls.member = CorporateMember.objects.create(
display_name='Corporation',
billing_name='foo',
billing_email='<EMAIL>',
contact_email='<EMAIL>',
membership_level=2,
)
def test_membership_expires(self):
today = date.today()
yesterday = date.today() - timedelta(days=1)
plus_thirty_one_days = today + timedelta(days=31)
modeladmin = CorporateMemberAdmin(CorporateMember, admin.site)
self.assertIsNone(modeladmin.membership_expires(self.member))
self.member.invoice_set.create(amount=500)
self.assertIsNone(modeladmin.membership_expires(self.member))
self.member.invoice_set.create(amount=500, expiration_date=yesterday)
self.assertIn('red', modeladmin.membership_expires(self.member))
self.member.invoice_set.create(amount=500, expiration_date=today)
self.assertIn('orange', modeladmin.membership_expires(self.member))
self.member.invoice_set.create(amount=500, expiration_date=plus_thirty_one_days)
self.assertIn('green', modeladmin.membership_expires(self.member))
| StarcoderdataPython |
6596065 | """Backend objects for saving and loading data
DataStores provide a uniform interface for saving and loading data in different
formats. They should not be used directly, but rather through Dataset objects.
"""
from .memory import InMemoryDataStore
from .netCDF4_ import NetCDF4DataStore
from .pydap_ import PydapDataStore
from .scipy_ import ScipyDataStore
| StarcoderdataPython |
6557895 | <filename>intensio/test/python/basic/output/basicRAT-example/core/survey.py
# -*- coding: utf-8 -*-
import ctypes
import getpass
import os
import platform
import socket
import urllib
import uuid
def fRTWRMDwKMKaMMycbCQhFAtbEWeTzpNy(plat_type):
ZLWFyQexObWDAVRgVOJuNGQAedCeFocT = platform.platform()
processor = platform.processor()
architecture = platform.architecture()[0]
oBrOxpWZDpabzaLkTAJesLGjPbBlFraK = getpass.getuser()
uoirlwuHJEzzSfQmQybfeHjiQzoMBVIr = socket.gethostname()
zeWDOsIDNjfcOgoDtoXXwJAIcvanDOes = socket.getfqdn()
lmiVTKRFAoTwRlpedbOOJrmIRDOaQgUT = socket.gethostbyname(uoirlwuHJEzzSfQmQybfeHjiQzoMBVIr)
mPhVbEQrhRtgorOCfJTSQasOGECJdpuM = uuid.getnode()
ZtEUSaviunQGzuUFvZAlKjoxXPcpqgyo = ':'.join(("%012X" % mPhVbEQrhRtgorOCfJTSQasOGECJdpuM)[yUgtagPEWgZFwMNcuvyhbmhzBzVoIpvC:yUgtagPEWgZFwMNcuvyhbmhzBzVoIpvC+2] for yUgtagPEWgZFwMNcuvyhbmhzBzVoIpvC in range(0, 12, 2))
IiaItVCmjDDlMxTgJAgMuMzOyIgjIbBu = [ 'ipinfo.io/ip', 'icanhazip.com', 'ident.me',
'ipecho.net/plain', 'myexternalip.com/raw' ]
OsDjBhrfbNqLuKrVSCwLTjvTugpKWiGJ = ''
for NfoGrMlbvfpbjpDqKvyqgneTBRjEqipy in IiaItVCmjDDlMxTgJAgMuMzOyIgjIbBu:
try:
OsDjBhrfbNqLuKrVSCwLTjvTugpKWiGJ = urllib.urlopen('http://'+NfoGrMlbvfpbjpDqKvyqgneTBRjEqipy).read().rstrip()
except IOError:
pass
if OsDjBhrfbNqLuKrVSCwLTjvTugpKWiGJ and (6 < len(OsDjBhrfbNqLuKrVSCwLTjvTugpKWiGJ) < 16):
break
uhfZDdczDpVwciBNRUKKQBRRJMADcVGe = False
if plat_type.startswith('win'):
uhfZDdczDpVwciBNRUKKQBRRJMADcVGe = ctypes.windll.shell32.IsUserAnAdmin() != 0
elif plat_type.startswith('linux') or platform.startswith('darwin'):
uhfZDdczDpVwciBNRUKKQBRRJMADcVGe = os.getuid() == 0
xsOwClbybXVQtKGYGvPInPMiscPpfWkZ = 'Yes' if uhfZDdczDpVwciBNRUKKQBRRJMADcVGe else 'No'
xknplISuOFxzVrlhpVCqlAfQmvrfNmqg = '''
System Platform - {}
Processor - {}
Architecture - {}
Hostname - {}
FQDN - {}
Internal IP - {}
External IP - {}
MAC Address - {}
Current User - {}
Admin Access - {}
'''.format(ZLWFyQexObWDAVRgVOJuNGQAedCeFocT, processor, architecture,
uoirlwuHJEzzSfQmQybfeHjiQzoMBVIr, zeWDOsIDNjfcOgoDtoXXwJAIcvanDOes, lmiVTKRFAoTwRlpedbOOJrmIRDOaQgUT, OsDjBhrfbNqLuKrVSCwLTjvTugpKWiGJ, ZtEUSaviunQGzuUFvZAlKjoxXPcpqgyo, oBrOxpWZDpabzaLkTAJesLGjPbBlFraK, xsOwClbybXVQtKGYGvPInPMiscPpfWkZ)
return xknplISuOFxzVrlhpVCqlAfQmvrfNmqg
| StarcoderdataPython |
8034940 | import time
def timeit(f):
def wrapped(*args, **kwargs):
start = time.perf_counter()
result = f(*args, **kwargs)
print(f"{f.__name__} took: {time.perf_counter() - start}s")
return result
return wrapped | StarcoderdataPython |
1963266 | #!/usr/bin/python3
# -*- coding: utf-8 -*-
import os
import numpy as np
import pickle
from sklearn.metrics import confusion_matrix
def softmax(array):
array = np.exp(array)
return array/sum(array)
res_rgb_path = '../work_dirs/mydata/tsn_2d_rgb_resnet101_seg_3_f1s1_b32_g8/test.pkl'
res_flow_path = '../work_dirs/mydata/tsn_2d_flow_resnet101_seg_3_f1s1_b32_g8_lr_0.005/test.pkl'
res_rgb_data = pickle.load(open(res_rgb_path,'rb'))
res_flow_data = pickle.load(open(res_flow_path,'rb'))
print('data load ok')
ratio = 2
label = []
pre_rgb = []
pre_flow = []
pre = []
for k in res_rgb_data.keys():
label.append(res_rgb_data[k][1])
pre_rgb.append(np.argmax(res_rgb_data[k][0]))
pre_flow.append(np.argmax(res_flow_data[k][0]))
pre.append(np.argmax(ratio*softmax(res_flow_data[k][0])+softmax(res_rgb_data[k][0])))
label=np.array(label)
pre_rgb = np.array(pre_rgb)
pre_flow = np.array(pre_flow)
cls_conf = []
right = pre_rgb == label
for cls in np.unique(label):
cls_conf.append(sum(right[label==cls])/sum(label==cls))
print(cls_conf)
cls_conf = []
right = pre_flow == label
for cls in np.unique(label):
cls_conf.append(sum(right[label==cls])/sum(label==cls))
print(cls_conf)
cls_conf = []
right = pre == label
for cls in np.unique(label):
cls_conf.append(sum(right[label==cls])/sum(label==cls))
print(cls_conf)
cls_conf = np.array(cls_conf)
print(cls_conf[cls_conf>0.5].mean())
print(confusion_matrix(label,pre))
| StarcoderdataPython |
1732952 | <filename>waverly_project/urls.py
from django.conf.urls import url
from django.contrib import admin
from waverly import views
urlpatterns = [
url(r'^admin/', admin.site.urls), # admin
url(r'^$', views.index, name="index"), #login
url(r'^(?P<user_name>[\w\-]+)/$', views.account, name="account"), #account page
url(r'^podcast/(?P<podcast_id>[\w\-]+)/$', views.podcast, name="podcast_page"), #podcast page
url(r'^podcast/(?P<podcast_id>[\w\-]+)/voicestatus/$', views.voicestatus, name="voicestatus_endpoint"), #check voices availablity per podcast
url(r'^podcast/(?P<podcast_id>[\w\-]+)/voiceadd/(?P<voice_id>[\w\-]+)/$', views.voiceadd, name="voiceadd_endpoint"), #process new voice end point
]
| StarcoderdataPython |
90568 | <gh_stars>0
import argparse
import os.path
import time
import serial
import numpy as np
from struct import pack,unpack
alt_conv_factor = 3.2932160
crctable = \
b"\x00\x00\x89\x11\x12\x23\x9b\x32\x24\x46\xad\x57\x36\x65\xbf\x74\
\x48\x8c\xc1\x9d\x5a\xaf\xd3\xbe\x6c\xca\xe5\xdb\x7e\xe9\xf7\xf8\
\x19\x09\x90\x18\x0b\x2a\x82\x3b\x3d\x4f\xb4\x5e\x2f\x6c\xa6\x7d\
\x51\x85\xd8\x94\x43\xa6\xca\xb7\x75\xc3\xfc\xd2\x67\xe0\xee\xf1\
\x32\x12\xbb\x03\x20\x31\xa9\x20\x16\x54\x9f\x45\x04\x77\x8d\x66\
\x7a\x9e\xf3\x8f\x68\xbd\xe1\xac\x5e\xd8\xd7\xc9\x4c\xfb\xc5\xea\
\x2b\x1b\xa2\x0a\x39\x38\xb0\x29\x0f\x5d\x86\x4c\x1d\x7e\x94\x6f\
\x63\x97\xea\x86\x71\xb4\xf8\xa5\x47\xd1\xce\xc0\x55\xf2\xdc\xe3\
\x64\x24\xed\x35\x76\x07\xff\x16\x40\x62\xc9\x73\x52\x41\xdb\x50\
\x2c\xa8\xa5\xb9\x3e\x8b\xb7\x9a\x08\xee\x81\xff\x1a\xcd\x93\xdc\
\x7d\x2d\xf4\x3c\x6f\x0e\xe6\x1f\x59\x6b\xd0\x7a\x4b\x48\xc2\x59\
\x35\xa1\xbc\xb0\x27\x82\xae\x93\x11\xe7\x98\xf6\x03\xc4\x8a\xd5\
\x56\x36\xdf\x27\x44\x15\xcd\x04\x72\x70\xfb\x61\x60\x53\xe9\x42\
\x1e\xba\x97\xab\x0c\x99\x85\x88\x3a\xfc\xb3\xed\x28\xdf\xa1\xce\
\x4f\x3f\xc6\x2e\x5d\x1c\xd4\x0d\x6b\x79\xe2\x68\x79\x5a\xf0\x4b\
\x07\xb3\x8e\xa2\x15\x90\x9c\x81\x23\xf5\xaa\xe4\x31\xd6\xb8\xc7\
\xc8\x48\x41\x59\xda\x6b\x53\x7a\xec\x0e\x65\x1f\xfe\x2d\x77\x3c\
\x80\xc4\x09\xd5\x92\xe7\x1b\xf6\xa4\x82\x2d\x93\xb6\xa1\x3f\xb0\
\xd1\x41\x58\x50\xc3\x62\x4a\x73\xf5\x07\x7c\x16\xe7\x24\x6e\x35\
\x99\xcd\x10\xdc\x8b\xee\x02\xff\xbd\x8b\x34\x9a\xaf\xa8\x26\xb9\
\xfa\x5a\x73\x4b\xe8\x79\x61\x68\xde\x1c\x57\x0d\xcc\x3f\x45\x2e\
\xb2\xd6\x3b\xc7\xa0\xf5\x29\xe4\x96\x90\x1f\x81\x84\xb3\x0d\xa2\
\xe3\x53\x6a\x42\xf1\x70\x78\x61\xc7\x15\x4e\x04\xd5\x36\x5c\x27\
\xab\xdf\x22\xce\xb9\xfc\x30\xed\x8f\x99\x06\x88\x9d\xba\x14\xab\
\xac\x6c\x25\x7d\xbe\x4f\x37\x5e\x88\x2a\x01\x3b\x9a\x09\x13\x18\
\xe4\xe0\x6d\xf1\xf6\xc3\x7f\xd2\xc0\xa6\x49\xb7\xd2\x85\x5b\x94\
\xb5\x65\x3c\x74\xa7\x46\x2e\x57\x91\x23\x18\x32\x83\x00\x0a\x11\
\xfd\xe9\x74\xf8\xef\xca\x66\xdb\xd9\xaf\x50\xbe\xcb\x8c\x42\x9d\
\x9e\x7e\x17\x6f\x8c\x5d\x05\x4c\xba\x38\x33\x29\xa8\x1b\x21\x0a\
\xd6\xf2\x5f\xe3\xc4\xd1\x4d\xc0\xf2\xb4\x7b\xa5\xe0\x97\x69\x86\
\x87\x77\x0e\x66\x95\x54\x1c\x45\xa3\x31\x2a\x20\xb1\x12\x38\x03\
\xcf\xfb\x46\xea\xdd\xd8\x54\xc9\xeb\xbd\x62\xac\xf9\x9e\x70\x8f"
def calc_crc(crc_buff):
# start crc as 0x0001
crc = np.frombuffer(b"\x01\x00",np.uint16)
crc = crc[0]
table = np.frombuffer(crctable,np.uint16)
for x in crc_buff:
index = (np.right_shift(crc,8) ^ x)
# print("{},{},{},{}".format(crc,x,index,table[index]))
crc = (np.left_shift(crc,8) ^ table[index]) & 0xFFFF
return crc
def gps_ang_to_float(min,frac):
deg = np.int16(min/60.0)
min_deg = (min-(deg*60.0)+frac/100000.0)/60.0
return deg+min_deg;
def outfile_name(args):
if args.outfile != None:
return args.outfile
elif args.infile != None:
return "decoded_" + args.infile
else:
return str(int(time.time())) + ".log"
def find_header(buf):
header_bytes = 0
loc = 0
for x in buf:
if header_bytes == 0 and x == int('0x51',16):
header_bytes = 1
elif header_bytes == 1 and x == int('0xAC',16):
return loc - 1
else:
header_bytes = 0
loc += 1
return -1
def check_for_msg(buf,header_loc):
len_idx = header_loc + 2
if len(buf) < header_loc + 6:
# too short for a full msg
return False
elif len(buf) < header_loc + 3 + buf[len_idx]:
return False
return True
def process_msg(buf,header_loc,outfile):
# extract msg components
pkt_len = buf[header_loc+2]
if pkt_len < 3:
# Remove just the header and try again
buf = buf[(header_loc+2):]
return buf
id = buf[header_loc+3]
data = buf[(header_loc+4):(header_loc + 4 + pkt_len - 3)]
crc_buff = buf[(header_loc+3):(header_loc+3+pkt_len-2)]
crc_bytes = buf[(header_loc+3+pkt_len-2):(header_loc+3+pkt_len)]
crc = np.left_shift(crc_bytes[0],8) + crc_bytes[1]
# verify no msg corruption
if crc == calc_crc(crc_buff):
# process message by type
if id == int('0x10',16): # Raw Position Msg type
lat_min = unpack('h',pack('BB', data[0], data[1]))
lat_frac = unpack('i',pack('BBBB', data[2], data[3], data[4], data[5]))
lon_min = unpack('h',pack('BB', data[6], data[7]))
lon_frac = unpack('i',pack('BBBB', data[8], data[9], data[10], data[11]))
alt = unpack('H',pack('BB', data[12], data[13]))
print("Position (Raw) Msg: Lat = {:0.7f}, Lon = {:0.7f}, Alt = {:0.2f}\n".format(gps_ang_to_float(lat_min[0],lat_frac[0]), gps_ang_to_float(lon_min[0],lon_frac[0]), (alt[0]/alt_conv_factor)-900),end = '')
outfile.write("{:d}:{:0.7f}:{:0.7f}:{:0.2f}\n".format(id, gps_ang_to_float(lat_min[0],lat_frac[0]), gps_ang_to_float(lon_min[0],lon_frac[0]), (alt[0]/alt_conv_factor)-900))
elif id == int('0x11',16): #Extrapolated Position Msg type
lat_min = unpack('h',pack('BB', data[0], data[1]))
lat_frac = unpack('i',pack('BBBB', data[2], data[3], data[4], data[5]))
lon_min = unpack('h',pack('BB', data[6], data[7]))
lon_frac = unpack('i',pack('BBBB', data[8], data[9], data[10], data[11]))
alt = unpack('H',pack('BB', data[12], data[13]))
print("Position (Extrapolated) Msg: Lat = {:0.7f}, Lon = {:0.7f}, Alt = {:0.2f}\n".format(gps_ang_to_float(lat_min[0],lat_frac[0]), gps_ang_to_float(lon_min[0],lon_frac[0]), (alt[0]/alt_conv_factor)-900),end = '')
outfile.write("{:d}:{:0.7f}:{:0.7f}:{:0.2f}\n".format(id, gps_ang_to_float(lat_min[0],lat_frac[0]), gps_ang_to_float(lon_min[0],lon_frac[0]), (alt[0]/alt_conv_factor)-900))
elif id == int('0x20',16): #Orientation Msg type
heading = unpack('h',pack('BB', data[0], data[1]))
roll = unpack('h',pack('BB', data[2], data[3]))
pitch = unpack('h',pack('BB', data[4], data[5]))
print("Orientation Msg: Heading = {:0.2f}, Roll = {:0.2f}, Pitch = {:0.2f}\n".format(heading[0]/100.0, roll[0]/100.0, pitch[0]/100.0),end = '')
outfile.write("{:d}:{:0.2f}:{:0.2f}:{:0.2f}\n".format(id, heading[0]/100.0, roll[0]/100.0, pitch[0]/100.0))
elif id == int('0x30',16): #Radio Msg type
speed = unpack('h',pack('BB', data[0], data[1]))
steering = unpack('b',pack('B', data[2]))
print("Radio Msg: Speed = {:0.2f}, steering = {}\n".format(speed[0]/100.0, steering[0]),end = '')
outfile.write("{:d}:{:0.2f}:{}\n".format(id, speed[0]/100.0, steering[0]))
elif id == int('0x40',16): #IMU Msg type
euler_x = unpack('h',pack('BB', data[0], data[1]))
euler_y = unpack('h',pack('BB', data[2], data[3]))
euler_z = unpack('h',pack('BB', data[4], data[5]))
acc_x = unpack('h',pack('BB', data[6], data[7]))
acc_y = unpack('h',pack('BB', data[8], data[9]))
acc_z = unpack('h',pack('BB', data[10], data[11]))
gyro_x = unpack('h',pack('BB', data[12], data[13]))
gyro_y = unpack('h',pack('BB', data[14], data[15]))
gyro_z = unpack('h',pack('BB', data[16], data[17]))
print("IMU Msg: Eul_x = {:f}, Eul_y = {:f}, Eul_z = {:f}, Acc_x = {:f}, Acc_y = {:f}, Acc_z = {:f}, Gyr_x = {:f}, Gyr_y = {:f}, Gyr_z = {:f}\n".format(euler_x[0]/10430.0,euler_y[0]/10430.0,euler_z[0]/10430.0,acc_x[0]/8192.0,acc_y[0]/8192.0,acc_z[0]/8192.0,gyro_x[0]/16.4,gyro_y[0]/16.4,gyro_z[0]/16.4),end = '')
outfile.write("{:d}:{:f}:{:f}:{:f}:{:f}:{:f}:{:f}:{:f}:{:f}:{:f}\n".format(id, euler_x[0]/10430.0,euler_y[0]/10430.0,euler_z[0]/10430.0,acc_x[0]/8192.0,acc_y[0]/8192.0,acc_z[0]/8192.0,gyro_x[0]/16.4,gyro_y[0]/16.4,gyro_z[0]/16.4))
elif id == int('0x41',16): #Sonar Msg type
ping1 = unpack('H',pack('BB', data[0], data[1]))
ping2 = unpack('H',pack('BB', data[2], data[3]))
ping3 = unpack('H',pack('BB', data[4], data[5]))
ping4 = unpack('H',pack('BB', data[6], data[7]))
ping5 = unpack('H',pack('BB', data[8], data[9]))
print("Sonar Msg: ping1 = {:d}, ping2 = {:d}, ping3 = {:d}, ping4 = {:d}, ping5 = {:d}\n".format(ping1[0],ping2[0],ping3[0],ping4[0],ping5[0]),end = '')
outfile.write("{:d}:{:d}:{:d}:{:d}:{:d}:{:d}\n".format(id, ping1[0],ping2[0],ping3[0],ping4[0],ping5[0]))
elif id == int('0x42',16): #Bumper Msg type
left = unpack('B',pack('B', data[0]))
right = unpack('B',pack('B', data[1]))
print("Bumper Msg: left = {:d}, right = {:d}\n".format(left[0],right[0]))
outfile.write("{:d}:{:d}:{:d}\n".format(id,left[0],right[0]))
elif id == int('0x60',16): #State Msg type
apmState = unpack('b',pack('B', data[0]))
driveState = unpack('b',pack('B', data[1]))
autoState = unpack('b',pack('B', data[2]))
autoFlag = unpack('b',pack('B', data[3]))
voltage = unpack('b',pack('B', data[4]))
amperage = unpack('b',pack('B', data[5]))
groundSpeed = unpack('b',pack('B', data[6]))
print("State Msg: apmState = {:d}, driveState = {:d}, autoState = {:d}, autoFlag = {:d}, voltage = {:0.2f}, amperage = {:0.2f}, groundSpeed = {:0.2f}\n".format(apmState[0], driveState[0], autoState[0], autoFlag[0], voltage[0]/10.0, amperage[0]/10.0, groundSpeed[0]/10.0),end = '')
outfile.write("{:d}:{:d}:{:d}:{:d}:{:d}:{:0.2f}:{:0.2f}:{:0.2f}\n".format(id, apmState[0], driveState[0], autoState[0], autoFlag[0], voltage[0]/10.0, amperage[0]/10.0, groundSpeed[0]/10.0))
elif id == int('0x70',16): #Configuration Msg type
print("Configuration MSG recived (not defined)\n",end = '')
outfile.write("{:d}\n".format(id))
elif id == int('0x80',16): #Control Msg type
speed = unpack('h',pack('BB', data[0], data[1]))
steering = unpack('B',pack('B', data[2]))
print("Control Msg: Speed = {:0.2f}, steering = {}\n".format(speed[0]/100.0, steering[0]),end = '')
outfile.write("{:d}:{:0.2f}:{}\n".format(id, speed[0]/100.0, steering[0]))
elif id == int('0x81',16): #Waypoint Msg type
latStart_min = unpack('h',pack('BB', data[0], data[1]))
latStart_frac = unpack('i',pack('BBBB', data[2], data[3], data[4], data[5]))
lonStart_min = unpack('h',pack('BB', data[6], data[7]))
lonStart_frac = unpack('i',pack('BBBB', data[8], data[9], data[10], data[11]))
latIntermediate_min = unpack('h',pack('BB', data[12], data[13]))
latIntermediate_frac = unpack('i',pack('BBBB', data[14], data[15], data[16], data[17]))
lonIntermediate_min = unpack('h',pack('BB', data[18], data[19]))
lonIntermediate_frac = unpack('i',pack('BBBB', data[20], data[21], data[22], data[23]))
latTarget_min = unpack('h',pack('BB', data[24], data[25]))
latTarget_frac = unpack('i',pack('BBBB', data[26], data[27], data[28], data[29]))
lonTarget_min = unpack('h',pack('BB', data[30], data[31]))
lonTarget_frac = unpack('i',pack('BBBB', data[32], data[33], data[34], data[35]))
pathHeading = unpack('h',pack('BB', data[36], data[37]))
print("Waypoint Msg: latStart = {:0.7f}, lonStart = {:0.7f}, latInter = {:0.7f}, lonInter = {:0.7f}, latTarget = {:0.7f}, lonTarget = {:0.7f}, pathHead = {:0.2f}\n".format(gps_ang_to_float(latStart_min[0],latStart_frac[0]), gps_ang_to_float(lonStart_min[0],lonStart_frac[0]), gps_ang_to_float(latIntermediate_min[0],latIntermediate_frac[0]), gps_ang_to_float(lonIntermediate_min[0],lonIntermediate_frac[0]), gps_ang_to_float(latTarget_min[0],latTarget_frac[0]), gps_ang_to_float(lonTarget_min[0],lonTarget_frac[0]), pathHeading[0]/100.0),end = '')
outfile.write("{:d}:{:0.7f}:{:0.7f}:{:0.7f}:{:0.7f}:{:0.7f}:{:0.7f}:{:0.2f}\n".format(id, gps_ang_to_float(latStart_min[0],latStart_frac[0]), gps_ang_to_float(lonStart_min[0],lonStart_frac[0]), gps_ang_to_float(latIntermediate_min[0],latIntermediate_frac[0]), gps_ang_to_float(lonIntermediate_min[0],lonIntermediate_frac[0]), gps_ang_to_float(latTarget_min[0],latTarget_frac[0]), gps_ang_to_float(lonTarget_min[0],lonTarget_frac[0]), pathHeading[0]/100.0))
elif id == int('0x90',16): #ASCII Msg type
ascii = data[:pkt_len-3]
print("ASCII Msg: {0!s}\n".format(ascii.decode()),end = '')
outfile.write("{:d}:{!s}\n".format(id, ascii.decode()))
elif id == int('0xA0',16): #Version Msg type
debug_major = data[0]
debug_minor = data[1]
apm_major = data[2]
apm_minor = data[3]
print("Version Msg: debug_maj = {:d}, debug_min = {:d}, apm_major = {:d}, apm_minor = {:d}\n".format(debug_major,debug_minor,apm_major,apm_minor),end = '')
outfile.write("{:d}:{:d}:{:d}:{:d}:{:d}\n".format(id, debug_major,debug_minor,apm_major,apm_minor))
else:
print("Unknown Msg type recieved: %02x\n".format(id),end = '')
outfile.flush()
else:
print("Error: CRC does not match recieved\n",end = '')
# remove just the header and try again
buf = buf[(header_loc+2):len(buf)]
return buf
# remove processed message from input buffer
buf = buf[(header_loc+3+pkt_len):len(buf)]
return buf
if __name__ == "__main__":
# Gather arguments
parser = argparse.ArgumentParser()
parser.add_argument('-i', '--infile',
help='Input file with encoded log data.')
parser.add_argument('-o', '--outfile',
help='Optional - output file with decooded log data.')
parser.add_argument('-p', '--port',
help='Serial port path to read encoded data.')
args = parser.parse_args()
if args.infile != None and args.port != None:
print("Error: Both input file (-i) and serial port (-p) cannot be specified simultaneously.")
parser.print_help()
exit()
# Create output file for decoded data
filename = outfile_name(args)
try:
outfile = open(filename, "x")
except IOError:
print("Error: Could not create output file - {}".format(args.outfile))
exit()
# Live decoding (serial port)
if args.infile != None:
if not os.path.exists(args.infile):
print("Error: Input file does not exist: {}".format(args.infile))
print(args.infile)
# Read and process live data
# File decoding (file as input)
elif args.port != None:
# Configure and open serial port
try:
ser = serial.Serial(args.port, 115200, timeout=1)
except serial.SerialException:
print("Error: Could not open serial port - {}".format(args.port))
exit()
buf = bytes()
while True:
# Read and process data from file
new_bytes = ser.read(80)
read_size = len(new_bytes)
if read_size + len(buf) < 512:
buf += new_bytes
else:
print("Error: buffer overflow\n")
header_loc = -1
full_msg = False
check_buff = True
while check_buff:
header_loc = find_header(buf)
if header_loc >= 0:
full_msg = check_for_msg(buf,header_loc)
if full_msg:
buf = process_msg(buf,header_loc,outfile)
else:
check_buff = False
else:
# Garbage...toss all but last byte
buf = buf[(len(buf)-1):]
check_buff = False
else:
print("Error: Input file (-i) or serial port (-p) is required.")
parser.print_help()
| StarcoderdataPython |
8144267 | <reponame>prafiles/dj-revproxy
# -*- coding: utf-8 -
#
# This file is part of dj-revproxy released under the MIT license.
# See the NOTICE for more information.
# etree object
import posixpath
import urlparse
try:
from cStringIO import StringIO
except ImportError:
from StringIO import StringIO
from django.http import absolute_http_url_re
try:
from lxml import etree
import lxml.html
except ImportError:
raise ImportError("""lxml isn't installed
pip installl lxml
""")
from revproxy.util import normalize
HTML_CTYPES = (
"text/html",
"application/xhtml+xml",
"application/xml"
)
class Filter(object):
""" Filter Interface """
def __init__(self, request, **kwargs):
self.request = request
self.kwargs = kwargs
def setup(self):
""" used to update request options. Return None or a dict
instance"""
return None
class RewriteBase(Filter):
def __init__(self, request, **kwargs):
self.absolute_path = '%s://%s%s' % (
request.is_secure() and 'https' or 'http',
request.get_host(),
request.path)
super(RewriteBase, self).__init__(request, **kwargs)
def setup(self):
return {'decompress': True}
def rewrite_link(self, link):
if not absolute_http_url_re.match(link):
if link.startswith("/"):
link = link[1:]
absolute_path = self.absolute_path
if self.absolute_path.endswith("/"):
absolute_path = absolute_path[1:]
return normalize(absolute_path, link)
return link
def on_response(self, resp, req):
ctype = resp.headers.iget('content-type')
if not ctype:
return
ctype = ctype.split(";", 1)[0]
# if this is an html page, parse it
if ctype in HTML_CTYPES:
body = resp.body_string()
html = lxml.html.fromstring(body)
# rewrite links to absolute
html.rewrite_links(self.rewrite_link)
# add base
old_base = html.find(".//base")
base = etree.Element("base")
base.attrib['href'] = self.absolute_path
if not old_base:
head = html.find(".//head")
head.append(base)
# modify response
rewritten_body = lxml.html.tostring(html)
try:
resp.headers.ipop('content-length')
except KeyError:
pass
resp.headers['Content-Length'] = str(len(rewritten_body))
resp._body = StringIO(rewritten_body)
resp._already_read = False
| StarcoderdataPython |
3519521 | from .route_flow_dn import FlowRouter
from .lake_mapper import DepressionFinderAndRouter
from .flow_direction_DN import grid_flow_directions, flow_directions
__all__ = ['FlowRouter', 'DepressionFinderAndRouter', 'grid_flow_directions',
'flow_directions']
| StarcoderdataPython |
4826372 | <filename>python/ray/serve/tests/test_controller.py
import pytest
import time
import ray
from ray import serve
def test_controller_inflight_requests_clear(serve_instance):
controller = serve.api._global_client._controller
initial_number_reqs = ray.get(controller._num_pending_goals.remote())
@serve.deployment
def test(_):
return "hello"
test.deploy()
assert ray.get(controller._num_pending_goals.remote()) - initial_number_reqs == 0
def test_redeploy_start_time(serve_instance):
"""Check that redeploying a deployment doesn't reset its start time."""
controller = serve.api._global_client._controller
@serve.deployment
def test(_):
return "1"
test.deploy()
deployment_info_1, route_1 = ray.get(controller.get_deployment_info.remote("test"))
start_time_ms_1 = deployment_info_1.start_time_ms
time.sleep(0.1)
@serve.deployment
def test(_):
return "2"
test.deploy()
deployment_info_2, route_2 = ray.get(controller.get_deployment_info.remote("test"))
start_time_ms_2 = deployment_info_2.start_time_ms
assert start_time_ms_1 == start_time_ms_2
if __name__ == "__main__":
import sys
sys.exit(pytest.main(["-v", "-s", __file__]))
| StarcoderdataPython |
9690208 | import numpy as np
import cPickle as pickle
import decoder
def main(in_file, char_file, ali_file,num_to_print,lm_file=None):
with open(in_file,'r') as f:
ll_dict = pickle.load(f)
# read char mapping (need it here for alignments)
with open(char_file,'r') as f:
phone_list = map(lambda x: x.rstrip().split()[0], f.readlines())
# prepend symbol for blank
phone_list.insert(0,'_')
# read alignments
with open(ali_file,'r') as f:
ali_dict = {}
for l in f.readlines():
l_split = l.rstrip().split()
ali_dict[l_split[0]] = ''.join([phone_list[int(x)] for x in l_split[1:]])
# create decoders
dec_argmax = decoder.ArgmaxDecoder()
dec_argmax.load_chars(char_file)
dec_lm = decoder.BeamLMDecoder()
dec_lm.load_chars(char_file)
dec_lm.load_lm(lm_file)
n_printed = 0
for i,k in enumerate(ll_dict):
hyp_argmax,score_argmax = dec_argmax.decode(ll_dict[k].astype(np.double))
print score_argmax, hyp_argmax
hyp_lm, score_lm = dec_lm.decode(ll_dict[k].astype(np.double))
print score_lm, hyp_lm
print ali_dict[k]
n_printed+= 1
if n_printed >= num_to_print:
break
if __name__=='__main__':
ll_path = '/scail/scratch/group/deeplearning/speech/amaas/kaldi-stanford/stanford-nnet/ctc_fast/swbd_eval2000_lik/'
in_file = ll_path + 'loglikelihoods_1.pk'
char_file = ll_path + 'chars.txt'
ali_file = ll_path + 'alis1.txt'
lm_file = '/scail/group/deeplearning/speech/amaas/kaldi-stanford/kaldi-trunk/egs/wsj/s6/data/local/lm/text_char.2g.arpa'
main(in_file, char_file,ali_file,10, lm_file=lm_file)
| StarcoderdataPython |
3492215 | import string
wordlist = [acehorrst
D = {}
L = []
for i in range(100):
## file[i].split(' ')
for s in string.punctuation:
file[i] =file[i].replace(s, ' ')
## file = file[i].split()
for i in file[i].split() :
#
try:
D[i] += 1
except:
D[i] = 1
L.append(i)
for i in D:
print('%20s%6d' % (i, D[i]))
| StarcoderdataPython |
18760 | class ToolNameAPI:
thing = 'thing'
toolname_tool = 'example'
tln = ToolNameAPI()
the_repo = "reponame"
author = "authorname"
profile = "authorprofile" | StarcoderdataPython |
11283651 | import torch
import torch.nn as nn
import torch.nn.functional as F
import numpy as np
import copy
class EmbedNet(nn.Module):
def __init__(self, base_model, net_vlad):
super(EmbedNet, self).__init__()
self.base_model = base_model
self.net_vlad = net_vlad
def _init_params(self):
self.base_model._init_params()
self.net_vlad._init_params()
def forward(self, x):
pool_x, x = self.base_model(x)
vlad_x = self.net_vlad(x)
vlad_x = F.normalize(vlad_x, p=2, dim=2)
vlad_x = vlad_x.view(x.size(0), -1)
vlad_x = F.normalize(vlad_x, p=2, dim=1)
return pool_x, vlad_x
class EmbedRegionNet(nn.Module):
def __init__(self, base_model, net_vlad, tuple_size=1):
super(EmbedRegionNet, self).__init__()
self.base_model = base_model
self.net_vlad = net_vlad
self.tuple_size = tuple_size
def _init_params(self):
self.base_model._init_params()
self.net_vlad._init_params()
def _compute_region_sim(self, feature_A, feature_B):
def reshape(x):
N, C, H, W = x.size()
x = x.view(N, C, 2, int(H/2), 2, int(W/2))
x = x.permute(0,1,2,4,3,5).contiguous()
x = x.view(N, C, -1, int(H/2), int(W/2))
return x
def aggregate_quarter(x):
N, C, B, H, W = x.size()
x = x.permute(0,2,1,3,4).contiguous()
x = x.view(-1,C,H,W)
vlad_x = self.net_vlad(x)
_, cluster_num, feat_dim = vlad_x.size()
vlad_x = vlad_x.view(N,B,cluster_num,feat_dim)
return vlad_x
def quarter_to_half(vlad_x):
return torch.stack((vlad_x[:,0]+vlad_x[:,1], vlad_x[:,2]+vlad_x[:,3], \
vlad_x[:,0]+vlad_x[:,2], vlad_x[:,1]+vlad_x[:,3]), dim=1).contiguous()
def quarter_to_global(vlad_x):
return vlad_x.sum(1).unsqueeze(1).contiguous()
def norm(vlad_x):
N, B, C, _ = vlad_x.size()
vlad_x = F.normalize(vlad_x, p=2, dim=3)
vlad_x = vlad_x.view(N, B, -1)
vlad_x = F.normalize(vlad_x, p=2, dim=2)
return vlad_x
feature_A = reshape(feature_A)
feature_B = reshape(feature_B)
vlad_A_quarter = aggregate_quarter(feature_A)
vlad_B_quarter = aggregate_quarter(feature_B)
vlad_A_half = quarter_to_half(vlad_A_quarter)
vlad_B_half = quarter_to_half(vlad_B_quarter)
vlad_A_global = quarter_to_global(vlad_A_quarter)
vlad_B_global = quarter_to_global(vlad_B_quarter)
vlad_A = torch.cat((vlad_A_global, vlad_A_half, vlad_A_quarter), dim=1)
vlad_B = torch.cat((vlad_B_global, vlad_B_half, vlad_B_quarter), dim=1)
vlad_A = norm(vlad_A)
vlad_B = norm(vlad_B)
_, B, L = vlad_B.size()
vlad_A = vlad_A.view(self.tuple_size,-1,B,L)
vlad_B = vlad_B.view(self.tuple_size,-1,B,L)
score = torch.bmm(vlad_A.expand_as(vlad_B).view(-1,B,L), vlad_B.view(-1,B,L).transpose(1,2))
score = score.view(self.tuple_size,-1,B,B)
return score, vlad_A, vlad_B
def _forward_train(self, x):
B, C, H, W = x.size()
x = x.view(self.tuple_size, -1, C, H, W)
anchors = x[:, 0].unsqueeze(1).contiguous().view(-1,C,H,W)
pairs = x[:, 1:].view(-1,C,H,W)
return self._compute_region_sim(anchors, pairs)
def forward(self, x):
pool_x, x = self.base_model(x)
if (not self.training):
vlad_x = self.net_vlad(x)
vlad_x = F.normalize(vlad_x, p=2, dim=2)
vlad_x = vlad_x.view(x.size(0), -1)
vlad_x = F.normalize(vlad_x, p=2, dim=1)
return pool_x, vlad_x
return self._forward_train(x)
class EmbedNetPCA(nn.Module):
def __init__(self, base_model, net_vlad, dim=4096):
super(EmbedNetPCA, self).__init__()
self.base_model = base_model
self.net_vlad = net_vlad
self.pca_layer = nn.Conv2d(net_vlad.num_clusters*net_vlad.dim, dim, 1, stride=1, padding=0)
def _init_params(self):
self.base_model._init_params()
self.net_vlad._init_params()
def forward(self, x):
_, x = self.base_model(x)
vlad_x = self.net_vlad(x)
vlad_x = F.normalize(vlad_x, p=2, dim=2)
vlad_x = vlad_x.view(x.size(0), -1)
vlad_x = F.normalize(vlad_x, p=2, dim=1)
N, D = vlad_x.size()
vlad_x = vlad_x.view(N, D, 1, 1)
vlad_x = self.pca_layer(vlad_x).view(N, -1)
vlad_x = F.normalize(vlad_x, p=2, dim=-1) # L2 normalize
return vlad_x
class NetVLAD(nn.Module):
def __init__(self, num_clusters=64, dim=512, alpha=100.0, normalize_input=True):
super(NetVLAD, self).__init__()
self.num_clusters = num_clusters
self.dim = dim
self.alpha = alpha
self.normalize_input = normalize_input
self.conv = nn.Conv2d(dim, num_clusters, kernel_size=(1, 1), bias=False)
self.centroids = nn.Parameter(torch.rand(num_clusters, dim), requires_grad=True)
self.clsts = None
self.traindescs = None
def _init_params(self):
clstsAssign = self.clsts / np.linalg.norm(self.clsts, axis=1, keepdims=True)
dots = np.dot(clstsAssign, self.traindescs.T)
dots.sort(0)
dots = dots[::-1, :]
self.alpha = (-np.log(0.01) / np.mean(dots[0,:] - dots[1,:])).item()
self.centroids.data.copy_(torch.from_numpy(self.clsts))
self.conv.weight.data.copy_(torch.from_numpy(self.alpha*clstsAssign).unsqueeze(2).unsqueeze(3))
def forward(self, x):
N, C = x.shape[:2]
if self.normalize_input:
x = F.normalize(x, p=2, dim=1)
soft_assign = self.conv(x).view(N, self.num_clusters, -1)
soft_assign = F.softmax(soft_assign, dim=1)
x_flatten = x.view(N, C, -1)
residual = x_flatten.expand(self.num_clusters, -1, -1, -1).permute(1, 0, 2, 3) - \
self.centroids.expand(x_flatten.size(-1), -1, -1).permute(1, 2, 0).unsqueeze(0)
residual *= soft_assign.unsqueeze(2)
vlad = residual.sum(dim=-1)
return vlad | StarcoderdataPython |
11327144 | # Copyright (c) 2020 Paddle Quantum Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Paddle_VQSD: To learn more about the functions and properties of this application,
you could check the corresponding Jupyter notebook under the Tutorial folder.
"""
import numpy
from paddle import fluid
from paddle_quantum.circuit import UAnsatz
from paddle.complex import matmul, trace, transpose
SEED = 1
__all__ = [
"U_theta",
"Net",
"Paddle_VQSD",
]
# definition of U_theta
def U_theta(theta, N):
"""
U_theta
"""
cir = UAnsatz(N)
cir.rz(theta[0], 1)
cir.ry(theta[1], 1)
cir.rz(theta[2], 1)
cir.rz(theta[3], 2)
cir.ry(theta[4], 2)
cir.rz(theta[5], 2)
cir.cnot([2, 1])
cir.rz(theta[6], 1)
cir.ry(theta[7], 2)
cir.cnot([1, 2])
cir.rz(theta[8], 1)
cir.ry(theta[9], 1)
cir.rz(theta[10], 1)
cir.rz(theta[11], 2)
cir.ry(theta[12], 2)
cir.rz(theta[13], 2)
return cir.state
class Net(fluid.dygraph.Layer):
"""
Construct the model net
"""
def __init__(self,
shape,
rho,
sigma,
param_attr=fluid.initializer.Uniform(
low=0.0, high=2 * numpy.pi, seed=SEED),
dtype='float32'):
super(Net, self).__init__()
self.rho = fluid.dygraph.to_variable(rho)
self.sigma = fluid.dygraph.to_variable(sigma)
self.theta = self.create_parameter(
shape=shape, attr=param_attr, dtype=dtype, is_bias=False)
def forward(self, N):
"""
Args:
Returns:
The loss.
"""
out_state = U_theta(self.theta, N)
# rho_tilde is what you get after you put self.rho through the circuit
rho_tilde = matmul(
matmul(out_state, self.rho),
transpose(
fluid.framework.ComplexVariable(out_state.real,
-out_state.imag),
perm=[1, 0]))
# record the new loss
loss = trace(matmul(self.sigma, rho_tilde))
return loss.real, rho_tilde
def Paddle_VQSD(rho, sigma, N=2, THETA_SIZE=14, ITR=50, LR=0.1):
"""
Paddle_VQSD
"""
with fluid.dygraph.guard():
# net
net = Net(shape=[THETA_SIZE], rho=rho, sigma=sigma)
# optimizer
opt = fluid.optimizer.AdagradOptimizer(
learning_rate=LR, parameter_list=net.parameters())
# gradient descent loop
for itr in range(ITR):
loss, rho_tilde = net(N)
rho_tilde_np = rho_tilde.numpy()
loss.backward()
opt.minimize(loss)
net.clear_gradients()
print('iter:', itr, 'loss:', '%.4f' % loss.numpy()[0])
return rho_tilde_np
| StarcoderdataPython |
9646646 | # Copyright (c) 2015 <NAME>.
# Uranium is released under the terms of the LGPLv3 or higher.
import inspect
from PyQt5.QtCore import pyqtProperty, pyqtSignal, QObject, QCoreApplication, pyqtSlot
from PyQt5.QtQml import QJSValue
from UM.i18n import i18nCatalog
class i18nCatalogProxy(QObject): # [CodeStyle: Ultimaker code style requires classes to start with a upper case. But i18n is lower case by convention.]
def __init__(self, parent = None):
super().__init__(parent)
self._name = None
self._catalog = None
# Slightly hacky way of getting at the QML engine defined by QtApplication.
engine = QCoreApplication.instance()._qml_engine
self._i18n_function = self._wrapFunction(engine, self, self._call_i18n)
self._i18nc_function = self._wrapFunction(engine, self, self._call_i18nc)
self._i18np_function = self._wrapFunction(engine, self, self._call_i18np)
self._i18ncp_function = self._wrapFunction(engine, self, self._call_i18ncp)
def setName(self, name):
if name != self._name:
self._catalog = i18nCatalog(name)
self.nameChanged.emit()
nameChanged = pyqtSignal()
@pyqtProperty(str, fset = setName, notify = nameChanged)
def name(self):
return self._name
@pyqtProperty(QJSValue, notify = nameChanged)
def i18n(self):
return self._i18n_function
@pyqtProperty(QJSValue, notify = nameChanged)
def i18nc(self):
return self._i18nc_function
@pyqtProperty(QJSValue, notify = nameChanged)
def i18np(self):
return self._i18np_function
@pyqtProperty(QJSValue, notify = nameChanged)
def i18ncp(self):
return self._i18ncp_function
@pyqtSlot(str, result = str)
def _call_i18n(self, message):
return self._catalog.i18n(message)
@pyqtSlot(str, str, result = str)
def _call_i18nc(self, context, message):
return self._catalog.i18nc(context, message)
@pyqtSlot(str, str, int, result = str)
def _call_i18np(self, single, multiple, counter):
return self._catalog.i18np(single, multiple, counter)
@pyqtSlot(str, str, str, int, result = str)
def _call_i18ncp(self, context, single, multiple, counter):
return self._catalog.i18ncp(context, single, multiple, counter)
## Wrap a function in a bit of a javascript to re-trigger a method call on signal emit.
#
# This slightly magical method wraps a Python method exposed to QML in a JavaScript
# closure with the same signature as the Python method. This allows the closure to be
# exposed as a QML property instead of a QML slot. Using a property for this allows us
# to add a notify signal to re-trigger the method execution. Due to the way notify
# signals are handled by QML, re-triggering the method only needs a signal emit.
#
# \param engine \type{QQmlEngine} The QML engine to use to evaluate JavaScript.
# \param this_object \type{QObject} The object to call the function on.
# \param function \type{Function} The function to call. Should be marked as pyqtSlot.
#
# \return \type{QJSValue} A JavaScript closure that when called calls the wrapper Python method.
#
# \note Currently, only functions taking a fixed list of positional arguments are supported.
#
# \todo Move this to a more generic place so more things can use it.
def _wrapFunction(self, engine, this_object, function):
# JavaScript code that wraps the Python method call in a closure
wrap_js = """(function(this_object) {{
return function({args}) {{ return this_object.{function}({args}) }}
}})"""
# Get the function name and argument list.
function_name = function.__name__
function_args = inspect.getfullargspec(function)[0]
if function_args[0] == "self":
function_args = function_args[1:] # Drop "self" from argument list
# Replace arguments and function name with the proper values.
wrapped_function = wrap_js.format(function = function_name, args = ", ".join(function_args))
# Wrap the "this" object in a QML JSValue object.
this_jsvalue = engine.newQObject(this_object)
# Use the QML engine to evaluate the wrapped JS, then call that to retrieve the closure.
result = engine.evaluate(wrapped_function).call([this_jsvalue])
# Finally, return the resulting function.
return result
| StarcoderdataPython |
5059213 | <reponame>Intelligent-Systems-Laboratory/cvat
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
# import the necessary packages
import imutils
import cv2
import os
import argparse
# import cv2
import torch
import numpy as np
from glob import glob
from cvat.apps.engine.pysot.core.config import cfg
from cvat.apps.engine.pysot.models.model_builder import ModelBuilder
from cvat.apps.engine.pysot.tracker.tracker_builder import build_tracker
class Tracker:
results = []
def track(self, frameList, initBB, tracker):
if(tracker == 'CSRT'):
results = track_CSRT(frameList,initBB)
elif(tracker == 'pysot'):
results = track_CSRT(frameList,initBB)
self.results = results
return results
def track_CSRT(frameList, initBB):
start = 0
coords = []
tracker = cv2.TrackerCSRT_create()
# loop over frames from the video stream
for frame in frameList:
# grab the current frame, then handle if we are using a
# VideoStream or VideoCapture object
if start == 0:
tracker.init(frame, initBB)
# check to see if we are currently tracking an object
if (start % 1 == 0) and start > 0:
# grab the new bounding box coordinates of the object
(success, box) = tracker.update(frame)
# check to see if the tracking was a success
if success:
(x, y, w, h) = [int(v) for v in box]
coords.append([x, y, x + w, y + h])
start = start + 1
return coords
def track_pysot(frameList, initBB):
path = os.path.abspath("./")
config_path = os.path.join(path,'cvat/apps/engine/pysot/siamrpn_alex_dwxcorr/config.yaml')
model_path = os.path.join(path,'cvat/apps/engine/pysot/siamrpn_alex_dwxcorr/model.pth')
coords = []
# load config
cfg.merge_from_file(config_path)
cfg.CUDA = torch.cuda.is_available() and cfg.CUDA
device = torch.device('cuda')
# create model
model = ModelBuilder()
# load model
model.load_state_dict(torch.load(model_path,
map_location=lambda storage, loc: storage.cpu()))
model.eval().to(device)
# build tracker
tracker = build_tracker(model)
first_frame = True
for frame in frameList:
if first_frame:
tracker.init(frame, initBB)
first_frame = False
else:
outputs = tracker.track(frame)
if 'polygon' in outputs:
polygon = np.array(outputs['polygon']).astype(np.int32)
# print(polygon)
# cv2.polylines(frame, [polygon.reshape((-1, 1, 2))],
# True, (0, 255, 0), 3)
mask = ((outputs['mask'] > cfg.TRACK.MASK_THERSHOLD) * 255)
mask = mask.astype(np.uint8)
mask = np.stack([mask, mask*255, mask]).transpose(1, 2, 0)
frame = cv2.addWeighted(frame, 0.77, mask, 0.23, -1)
else:
bbox = list(map(int, outputs['bbox']))
coords.append([bbox[0],bbox[1],bbox[0]+bbox[2],bbox[1]+bbox[3]])
# print(bbox)
# cv2.rectangle(frame, (bbox[0], bbox[1]),
# (bbox[0]+bbox[2], bbox[1]+bbox[3]),
# (0, 255, 0), 3)
# cv2.imshow(video_name, frame)
# cv2.waitKey(40)
return coords
class TrackResultsStorage:
results = []
updated = False
def check(self):
size = len(self.results)
if(size>0):
return True
else:
return False
def flush(self):
self.results = []
def get(self,objectID):
for result in self.results:
if(result['objectID']==objectID):
return result['data']
def store(self, entry):
self.results.append(entry)
self.updated = True
def update(self, entry):
for result in self.results:
if(result['objectID']==entry['objectID']):
result['data']=result['data']+entry['data']
result['frameEnd']=entry['frameEnd']
def edit(self,objectID,slice_index,bbox,crop):
for result in self.results:
if(result['objectID']==objectID):
print('before',result['data'][slice_index]['bbox'])
result['data'][slice_index]['bbox']=bbox
result['data'][slice_index]['crop']=crop
print('after',result['data'][slice_index]['bbox'])
print(slice_index)
print('last index',len(result['data']))
print(result['data'][-1]['bbox']) | StarcoderdataPython |
49425 | # Copyright (c) 2021, <NAME>, <NAME>, <NAME>, <NAME>, <NAME>
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
import unittest
from src.lyap.verifier.z3verifier import Z3Verifier
from functools import partial
from src.lyap.learner.net import NN
from src.shared.activations import ActivationType
from experiments.benchmarks.benchmarks_lyap import *
import torch
from src.shared.components.Translator import Translator
from unittest import mock
from z3 import *
from src.shared.cegis_values import CegisStateKeys
from src.shared.consts import TranslatorType
class TestZ3Verifier(unittest.TestCase):
def test_poly2_with_good_Lyapunov_function(self):
system = partial(poly_2, batch_size=100)
n_vars = 2
verifier = Z3Verifier
x = verifier.new_vars(n_vars)
f, domain, _ = system(functions=verifier.solver_fncts(), inner=0, outer=100)
domain_z3 = domain(verifier.solver_fncts(), x)
verifier = Z3Verifier(n_vars, f, domain_z3, x)
# model
model = NN(2, 2,
bias=False,
activate=[ActivationType.SQUARE],
equilibria=None)
model.layers[0].weight[0][0] = 1
model.layers[0].weight[0][1] = 0
model.layers[0].weight[1][0] = 0
model.layers[0].weight[1][1] = 1
model.layers[1].weight[0][0] = 1
model.layers[1].weight[0][1] = 1
xdot = f(Z3Verifier.solver_fncts(), x)
translator = Translator(model, np.matrix(x).T, xdot, None, 1)
res = translator.get(**{'factors': None})
V, Vdot = res[CegisStateKeys.V], res[CegisStateKeys.V_dot]
print(V)
res = verifier.verify(V, Vdot)
self.assertEqual(res[CegisStateKeys.found], res[CegisStateKeys.cex] == [])
self.assertTrue(res[CegisStateKeys.found])
def test_poly2_with_bad_Lyapunov_function(self):
system = partial(poly_2, batch_size=100)
n_vars = 2
verifier = Z3Verifier
x = verifier.new_vars(n_vars)
f, domain, _ = system(functions=verifier.solver_fncts(), inner=0, outer=100)
domain_z3 = domain(verifier.solver_fncts(), x)
verifier = Z3Verifier(n_vars, f, domain_z3, x)
# model
model = NN(2, 2,
bias=True,
activate=[ActivationType.SQUARE],
equilibria=None)
model.layers[0].weight[0][0] = 1
model.layers[0].weight[0][1] = 0
model.layers[0].weight[1][0] = 0
model.layers[0].weight[1][1] = 1
model.layers[0].bias[0] = 1
model.layers[0].bias[1] = 1
xdot = f(Z3Verifier.solver_fncts(), x)
translator = Translator(model, np.matrix(x).T, xdot, None, 1)
res = translator.get(**{'factors': None})
V, Vdot = res[CegisStateKeys.V], res[CegisStateKeys.V_dot]
res = verifier.verify(V, Vdot)
self.assertEqual(res[CegisStateKeys.found], res[CegisStateKeys.cex] == [])
self.assertFalse(res[CegisStateKeys.found])
def test_poly2_with_another_bad_Lyapunov_function(self):
system = partial(poly_2, batch_size=100)
n_vars = 2
verifier = Z3Verifier
x = verifier.new_vars(n_vars)
f, domain, _ = system(functions=verifier.solver_fncts(), inner=0, outer=100)
domain_z3 = domain(verifier.solver_fncts(), x)
verifier = Z3Verifier(n_vars, f, domain_z3, x)
# model
model = NN(2, 2,
bias=False,
activate=[ActivationType.SQUARE],
equilibria=None)
model.layers[0].weight[0][0] = 1
model.layers[0].weight[0][1] = 1
model.layers[0].weight[1][0] = 0
model.layers[0].weight[1][1] = 1
xdot = f(Z3Verifier.solver_fncts(), x)
translator = Translator(model, np.matrix(x).T, xdot, None, 1)
res = translator.get(**{'factors': None})
V, Vdot = res[CegisStateKeys.V], res[CegisStateKeys.V_dot]
res = verifier.verify(V, Vdot)
self.assertEqual(res[CegisStateKeys.found], res[CegisStateKeys.cex] == [])
self.assertFalse(res[CegisStateKeys.found])
if __name__ == '__main__':
unittest.main()
| StarcoderdataPython |
3444699 | """Constants for the laundrify tests."""
from homeassistant.const import CONF_CODE
VALID_AUTH_CODE = "999-001"
VALID_ACCESS_TOKEN = "<PASSWORD>"
VALID_ACCOUNT_ID = "1234"
VALID_USER_INPUT = {
CONF_CODE: VALID_AUTH_CODE,
}
| StarcoderdataPython |
3536450 | # vim:fileencoding=utf-8:noet
from powerline.segments import shell, common
import tests.vim as vim_module
import sys
import os
from tests.lib import Args, urllib_read, replace_attr, new_module, replace_module_module, replace_env, Pl
from tests import TestCase
vim = None
class TestShell(TestCase):
def test_last_status(self):
pl = Pl()
segment_info = {'args': Args(last_exit_code=10)}
self.assertEqual(shell.last_status(pl=pl, segment_info=segment_info),
[{'contents': '10', 'highlight_group': 'exit_fail'}])
segment_info['args'].last_exit_code = 0
self.assertEqual(shell.last_status(pl=pl, segment_info=segment_info), None)
segment_info['args'].last_exit_code = None
self.assertEqual(shell.last_status(pl=pl, segment_info=segment_info), None)
def test_last_pipe_status(self):
pl = Pl()
segment_info = {'args': Args(last_pipe_status=[])}
self.assertEqual(shell.last_pipe_status(pl=pl, segment_info=segment_info), None)
segment_info['args'].last_pipe_status = [0, 0, 0]
self.assertEqual(shell.last_pipe_status(pl=pl, segment_info=segment_info), None)
segment_info['args'].last_pipe_status = [0, 2, 0]
self.assertEqual(shell.last_pipe_status(pl=pl, segment_info=segment_info), [
{'contents': '0', 'highlight_group': 'exit_success', 'draw_inner_divider': True},
{'contents': '2', 'highlight_group': 'exit_fail', 'draw_inner_divider': True},
{'contents': '0', 'highlight_group': 'exit_success', 'draw_inner_divider': True}
])
class TestCommon(TestCase):
def test_hostname(self):
pl = Pl()
with replace_env('SSH_CLIENT', '192.168.0.12 40921 22') as segment_info:
with replace_module_module(common, 'socket', gethostname=lambda: 'abc'):
self.assertEqual(common.hostname(pl=pl, segment_info=segment_info), 'abc')
self.assertEqual(common.hostname(pl=pl, segment_info=segment_info, only_if_ssh=True), 'abc')
with replace_module_module(common, 'socket', gethostname=lambda: 'abc.mydomain'):
self.assertEqual(common.hostname(pl=pl, segment_info=segment_info), 'abc.mydomain')
self.assertEqual(common.hostname(pl=pl, segment_info=segment_info, exclude_domain=True), 'abc')
self.assertEqual(common.hostname(pl=pl, segment_info=segment_info, only_if_ssh=True), 'abc.mydomain')
self.assertEqual(common.hostname(pl=pl, segment_info=segment_info, only_if_ssh=True, exclude_domain=True), 'abc')
segment_info['environ'].pop('SSH_CLIENT')
with replace_module_module(common, 'socket', gethostname=lambda: 'abc'):
self.assertEqual(common.hostname(pl=pl, segment_info=segment_info), 'abc')
self.assertEqual(common.hostname(pl=pl, segment_info=segment_info, only_if_ssh=True), None)
with replace_module_module(common, 'socket', gethostname=lambda: 'abc.mydomain'):
self.assertEqual(common.hostname(pl=pl, segment_info=segment_info), 'abc.mydomain')
self.assertEqual(common.hostname(pl=pl, segment_info=segment_info, exclude_domain=True), 'abc')
self.assertEqual(common.hostname(pl=pl, segment_info=segment_info, only_if_ssh=True, exclude_domain=True), None)
def test_user(self):
new_os = new_module('os', getpid=lambda: 1)
new_psutil = new_module('psutil', Process=lambda pid: Args(username='def'))
pl = Pl()
with replace_env('USER', 'def') as segment_info:
with replace_attr(common, 'os', new_os):
with replace_attr(common, 'psutil', new_psutil):
with replace_attr(common, '_geteuid', lambda: 5):
self.assertEqual(common.user(pl=pl, segment_info=segment_info), [
{'contents': 'def', 'highlight_group': 'user'}
])
with replace_attr(common, '_geteuid', lambda: 0):
self.assertEqual(common.user(pl=pl, segment_info=segment_info), [
{'contents': 'def', 'highlight_group': ['superuser', 'user']}
])
def test_branch(self):
pl = Pl()
segment_info = {'getcwd': os.getcwd}
with replace_attr(common, 'guess', lambda path: Args(branch=lambda: os.path.basename(path), status=lambda: None, directory='/tmp/tests')):
with replace_attr(common, 'tree_status', lambda repo, pl: None):
self.assertEqual(common.branch(pl=pl, segment_info=segment_info, status_colors=False),
[{'highlight_group': ['branch'], 'contents': 'tests'}])
self.assertEqual(common.branch(pl=pl, segment_info=segment_info, status_colors=True),
[{'contents': 'tests', 'highlight_group': ['branch_clean', 'branch']}])
with replace_attr(common, 'guess', lambda path: Args(branch=lambda: os.path.basename(path), status=lambda: 'D ', directory='/tmp/tests')):
with replace_attr(common, 'tree_status', lambda repo, pl: 'D '):
self.assertEqual(common.branch(pl=pl, segment_info=segment_info, status_colors=False),
[{'highlight_group': ['branch'], 'contents': 'tests'}])
self.assertEqual(common.branch(pl=pl, segment_info=segment_info, status_colors=True),
[{'contents': 'tests', 'highlight_group': ['branch_dirty', 'branch']}])
self.assertEqual(common.branch(pl=pl, segment_info=segment_info, status_colors=False),
[{'highlight_group': ['branch'], 'contents': 'tests'}])
with replace_attr(common, 'guess', lambda path: None):
self.assertEqual(common.branch(pl=pl, segment_info=segment_info, status_colors=False), None)
def test_cwd(self):
new_os = new_module('os', path=os.path, sep='/')
pl = Pl()
cwd = [None]
def getcwd():
wd = cwd[0]
if isinstance(wd, Exception):
raise wd
else:
return wd
segment_info = {'getcwd': getcwd, 'home': None}
with replace_attr(common, 'os', new_os):
cwd[0] = '/abc/def/ghi/foo/bar'
self.assertEqual(common.cwd(pl=pl, segment_info=segment_info), [
{'contents': '/', 'divider_highlight_group': 'cwd:divider', 'draw_inner_divider': True},
{'contents': 'abc', 'divider_highlight_group': 'cwd:divider', 'draw_inner_divider': True},
{'contents': 'def', 'divider_highlight_group': 'cwd:divider', 'draw_inner_divider': True},
{'contents': 'ghi', 'divider_highlight_group': 'cwd:divider', 'draw_inner_divider': True},
{'contents': 'foo', 'divider_highlight_group': 'cwd:divider', 'draw_inner_divider': True},
{'contents': 'bar', 'divider_highlight_group': 'cwd:divider', 'draw_inner_divider': True, 'highlight_group': ['cwd:current_folder', 'cwd']},
])
segment_info['home'] = '/abc/def/ghi'
self.assertEqual(common.cwd(pl=pl, segment_info=segment_info), [
{'contents': '~', 'divider_highlight_group': 'cwd:divider', 'draw_inner_divider': True},
{'contents': 'foo', 'divider_highlight_group': 'cwd:divider', 'draw_inner_divider': True},
{'contents': 'bar', 'divider_highlight_group': 'cwd:divider', 'draw_inner_divider': True, 'highlight_group': ['cwd:current_folder', 'cwd']},
])
self.assertEqual(common.cwd(pl=pl, segment_info=segment_info, dir_limit_depth=3), [
{'contents': '~', 'divider_highlight_group': 'cwd:divider', 'draw_inner_divider': True},
{'contents': 'foo', 'divider_highlight_group': 'cwd:divider', 'draw_inner_divider': True},
{'contents': 'bar', 'divider_highlight_group': 'cwd:divider', 'draw_inner_divider': True, 'highlight_group': ['cwd:current_folder', 'cwd']}
])
self.assertEqual(common.cwd(pl=pl, segment_info=segment_info, dir_limit_depth=1), [
{'contents': '⋯', 'divider_highlight_group': 'cwd:divider', 'draw_inner_divider': True},
{'contents': 'bar', 'divider_highlight_group': 'cwd:divider', 'draw_inner_divider': True, 'highlight_group': ['cwd:current_folder', 'cwd']}
])
self.assertEqual(common.cwd(pl=pl, segment_info=segment_info, dir_limit_depth=1, use_path_separator=True), [
{'contents': '⋯/', 'divider_highlight_group': 'cwd:divider', 'draw_inner_divider': False},
{'contents': 'bar', 'divider_highlight_group': 'cwd:divider', 'draw_inner_divider': False, 'highlight_group': ['cwd:current_folder', 'cwd']}
])
self.assertEqual(common.cwd(pl=pl, segment_info=segment_info, dir_limit_depth=2, dir_shorten_len=2), [
{'contents': '~', 'divider_highlight_group': 'cwd:divider', 'draw_inner_divider': True},
{'contents': 'fo', 'divider_highlight_group': 'cwd:divider', 'draw_inner_divider': True},
{'contents': 'bar', 'divider_highlight_group': 'cwd:divider', 'draw_inner_divider': True, 'highlight_group': ['cwd:current_folder', 'cwd']}
])
self.assertEqual(common.cwd(pl=pl, segment_info=segment_info, dir_limit_depth=2, dir_shorten_len=2, use_path_separator=True), [
{'contents': '~/', 'divider_highlight_group': 'cwd:divider', 'draw_inner_divider': False},
{'contents': 'fo/', 'divider_highlight_group': 'cwd:divider', 'draw_inner_divider': False},
{'contents': 'bar', 'divider_highlight_group': 'cwd:divider', 'draw_inner_divider': False, 'highlight_group': ['cwd:current_folder', 'cwd']}
])
ose = OSError()
ose.errno = 2
cwd[0] = ose
self.assertEqual(common.cwd(pl=pl, segment_info=segment_info, dir_limit_depth=2, dir_shorten_len=2),
[{'contents': '[not found]', 'divider_highlight_group': 'cwd:divider', 'highlight_group': ['cwd:current_folder', 'cwd'], 'draw_inner_divider': True}])
cwd[0] = OSError()
self.assertRaises(OSError, common.cwd, pl=pl, segment_info=segment_info, dir_limit_depth=2, dir_shorten_len=2)
cwd[0] = ValueError()
self.assertRaises(ValueError, common.cwd, pl=pl, segment_info=segment_info, dir_limit_depth=2, dir_shorten_len=2)
def test_date(self):
pl = Pl()
with replace_attr(common, 'datetime', Args(now=lambda: Args(strftime=lambda fmt: fmt))):
self.assertEqual(common.date(pl=pl), [{'contents': '%Y-%m-%d', 'highlight_group': ['date'], 'divider_highlight_group': None}])
self.assertEqual(common.date(pl=pl, format='%H:%M', istime=True), [{'contents': '%H:%M', 'highlight_group': ['time', 'date'], 'divider_highlight_group': 'time:divider'}])
def test_fuzzy_time(self):
time = Args(hour=0, minute=45)
pl = Pl()
with replace_attr(common, 'datetime', Args(now=lambda: time)):
self.assertEqual(common.fuzzy_time(pl=pl), 'quarter to one')
time.hour = 23
time.minute = 59
self.assertEqual(common.fuzzy_time(pl=pl), 'round about midnight')
time.minute = 33
self.assertEqual(common.fuzzy_time(pl=pl), 'twenty-five to twelve')
time.minute = 60
self.assertEqual(common.fuzzy_time(pl=pl), 'twelve o\'clock')
def test_external_ip(self):
pl = Pl()
with replace_attr(common, 'urllib_read', urllib_read):
self.assertEqual(common.external_ip(pl=pl), [{'contents': '127.0.0.1', 'divider_highlight_group': 'background:divider'}])
def test_uptime(self):
pl = Pl()
with replace_attr(common, '_get_uptime', lambda: 259200):
self.assertEqual(common.uptime(pl=pl), [{'contents': '3d', 'divider_highlight_group': 'background:divider'}])
with replace_attr(common, '_get_uptime', lambda: 93784):
self.assertEqual(common.uptime(pl=pl), [{'contents': '1d 2h 3m', 'divider_highlight_group': 'background:divider'}])
self.assertEqual(common.uptime(pl=pl, shorten_len=4), [{'contents': '1d 2h 3m 4s', 'divider_highlight_group': 'background:divider'}])
with replace_attr(common, '_get_uptime', lambda: 65536):
self.assertEqual(common.uptime(pl=pl), [{'contents': '18h 12m 16s', 'divider_highlight_group': 'background:divider'}])
self.assertEqual(common.uptime(pl=pl, shorten_len=2), [{'contents': '18h 12m', 'divider_highlight_group': 'background:divider'}])
self.assertEqual(common.uptime(pl=pl, shorten_len=1), [{'contents': '18h', 'divider_highlight_group': 'background:divider'}])
def _get_uptime():
raise NotImplementedError
with replace_attr(common, '_get_uptime', _get_uptime):
self.assertEqual(common.uptime(pl=pl), None)
def test_weather(self):
pl = Pl()
with replace_attr(common, 'urllib_read', urllib_read):
self.assertEqual(common.weather(pl=pl), [
{'divider_highlight_group': 'background:divider', 'highlight_group': ['weather_condition_partly_cloudy_day', 'weather_condition_cloudy', 'weather_conditions', 'weather'], 'contents': '☁ '},
{'divider_highlight_group': 'background:divider', 'highlight_group': ['weather_temp_gradient', 'weather_temp', 'weather'], 'contents': '-9°C', 'gradient_level': 30.0}
])
self.assertEqual(common.weather(pl=pl, temp_coldest=0, temp_hottest=100), [
{'divider_highlight_group': 'background:divider', 'highlight_group': ['weather_condition_partly_cloudy_day', 'weather_condition_cloudy', 'weather_conditions', 'weather'], 'contents': '☁ '},
{'divider_highlight_group': 'background:divider', 'highlight_group': ['weather_temp_gradient', 'weather_temp', 'weather'], 'contents': '-9°C', 'gradient_level': 0}
])
self.assertEqual(common.weather(pl=pl, temp_coldest=-100, temp_hottest=-50), [
{'divider_highlight_group': 'background:divider', 'highlight_group': ['weather_condition_partly_cloudy_day', 'weather_condition_cloudy', 'weather_conditions', 'weather'], 'contents': '☁ '},
{'divider_highlight_group': 'background:divider', 'highlight_group': ['weather_temp_gradient', 'weather_temp', 'weather'], 'contents': '-9°C', 'gradient_level': 100}
])
self.assertEqual(common.weather(pl=pl, icons={'cloudy': 'o'}), [
{'divider_highlight_group': 'background:divider', 'highlight_group': ['weather_condition_partly_cloudy_day', 'weather_condition_cloudy', 'weather_conditions', 'weather'], 'contents': 'o '},
{'divider_highlight_group': 'background:divider', 'highlight_group': ['weather_temp_gradient', 'weather_temp', 'weather'], 'contents': '-9°C', 'gradient_level': 30.0}
])
self.assertEqual(common.weather(pl=pl, icons={'partly_cloudy_day': 'x'}), [
{'divider_highlight_group': 'background:divider', 'highlight_group': ['weather_condition_partly_cloudy_day', 'weather_condition_cloudy', 'weather_conditions', 'weather'], 'contents': 'x '},
{'divider_highlight_group': 'background:divider', 'highlight_group': ['weather_temp_gradient', 'weather_temp', 'weather'], 'contents': '-9°C', 'gradient_level': 30.0}
])
self.assertEqual(common.weather(pl=pl, unit='F'), [
{'divider_highlight_group': 'background:divider', 'highlight_group': ['weather_condition_partly_cloudy_day', 'weather_condition_cloudy', 'weather_conditions', 'weather'], 'contents': '☁ '},
{'divider_highlight_group': 'background:divider', 'highlight_group': ['weather_temp_gradient', 'weather_temp', 'weather'], 'contents': '16°F', 'gradient_level': 30.0}
])
self.assertEqual(common.weather(pl=pl, unit='K'), [
{'divider_highlight_group': 'background:divider', 'highlight_group': ['weather_condition_partly_cloudy_day', 'weather_condition_cloudy', 'weather_conditions', 'weather'], 'contents': '☁ '},
{'divider_highlight_group': 'background:divider', 'highlight_group': ['weather_temp_gradient', 'weather_temp', 'weather'], 'contents': '264K', 'gradient_level': 30.0}
])
self.assertEqual(common.weather(pl=pl, temp_format='{temp:.1e}C'), [
{'divider_highlight_group': 'background:divider', 'highlight_group': ['weather_condition_partly_cloudy_day', 'weather_condition_cloudy', 'weather_conditions', 'weather'], 'contents': '☁ '},
{'divider_highlight_group': 'background:divider', 'highlight_group': ['weather_temp_gradient', 'weather_temp', 'weather'], 'contents': '-9.0e+00C', 'gradient_level': 30.0}
])
def test_system_load(self):
pl = Pl()
with replace_module_module(common, 'os', getloadavg=lambda: (7.5, 3.5, 1.5)):
with replace_attr(common, '_cpu_count', lambda: 2):
self.assertEqual(common.system_load(pl=pl),
[{'contents': '7.5 ', 'highlight_group': ['system_load_gradient', 'system_load'], 'divider_highlight_group': 'background:divider', 'gradient_level': 100},
{'contents': '3.5 ', 'highlight_group': ['system_load_gradient', 'system_load'], 'divider_highlight_group': 'background:divider', 'gradient_level': 75.0},
{'contents': '1.5', 'highlight_group': ['system_load_gradient', 'system_load'], 'divider_highlight_group': 'background:divider', 'gradient_level': 0}])
self.assertEqual(common.system_load(pl=pl, format='{avg:.0f}', threshold_good=0, threshold_bad=1),
[{'contents': '8 ', 'highlight_group': ['system_load_gradient', 'system_load'], 'divider_highlight_group': 'background:divider', 'gradient_level': 100},
{'contents': '4 ', 'highlight_group': ['system_load_gradient', 'system_load'], 'divider_highlight_group': 'background:divider', 'gradient_level': 100},
{'contents': '2', 'highlight_group': ['system_load_gradient', 'system_load'], 'divider_highlight_group': 'background:divider', 'gradient_level': 75.0}])
def test_cpu_load_percent(self):
pl = Pl()
with replace_module_module(common, 'psutil', cpu_percent=lambda **kwargs: 52.3):
self.assertEqual(common.cpu_load_percent(pl=pl), [{
'contents': '52%',
'gradient_level': 52.3,
'highlight_group': ['cpu_load_percent_gradient', 'cpu_load_percent'],
}])
self.assertEqual(common.cpu_load_percent(pl=pl, format='{0:.1f}%'), [{
'contents': '52.3%',
'gradient_level': 52.3,
'highlight_group': ['cpu_load_percent_gradient', 'cpu_load_percent'],
}])
def test_network_load(self):
from time import sleep
def gb(interface):
return None
f = [gb]
def _get_bytes(interface):
return f[0](interface)
pl = Pl()
with replace_attr(common, '_get_bytes', _get_bytes):
common.network_load.startup(pl=pl)
try:
self.assertEqual(common.network_load(pl=pl, interface='eth0'), None)
sleep(common.network_load.interval)
self.assertEqual(common.network_load(pl=pl, interface='eth0'), None)
while 'prev' not in common.network_load.interfaces.get('eth0', {}):
sleep(0.1)
self.assertEqual(common.network_load(pl=pl, interface='eth0'), None)
l = [0, 0]
def gb2(interface):
l[0] += 1200
l[1] += 2400
return tuple(l)
f[0] = gb2
while not common.network_load.interfaces.get('eth0', {}).get('prev', (None, None))[1]:
sleep(0.1)
self.assertEqual(common.network_load(pl=pl, interface='eth0'), [
{'divider_highlight_group': 'background:divider', 'contents': '⬇ 1 KiB/s', 'highlight_group': ['network_load_recv', 'network_load']},
{'divider_highlight_group': 'background:divider', 'contents': '⬆ 2 KiB/s', 'highlight_group': ['network_load_sent', 'network_load']},
])
self.assertEqual(common.network_load(pl=pl, interface='eth0', recv_format='r {value}', sent_format='s {value}'), [
{'divider_highlight_group': 'background:divider', 'contents': 'r 1 KiB/s', 'highlight_group': ['network_load_recv', 'network_load']},
{'divider_highlight_group': 'background:divider', 'contents': 's 2 KiB/s', 'highlight_group': ['network_load_sent', 'network_load']},
])
self.assertEqual(common.network_load(pl=pl, recv_format='r {value}', sent_format='s {value}', suffix='bps', interface='eth0'), [
{'divider_highlight_group': 'background:divider', 'contents': 'r 1 Kibps', 'highlight_group': ['network_load_recv', 'network_load']},
{'divider_highlight_group': 'background:divider', 'contents': 's 2 Kibps', 'highlight_group': ['network_load_sent', 'network_load']},
])
self.assertEqual(common.network_load(pl=pl, recv_format='r {value}', sent_format='s {value}', si_prefix=True, interface='eth0'), [
{'divider_highlight_group': 'background:divider', 'contents': 'r 1 kB/s', 'highlight_group': ['network_load_recv', 'network_load']},
{'divider_highlight_group': 'background:divider', 'contents': 's 2 kB/s', 'highlight_group': ['network_load_sent', 'network_load']},
])
self.assertEqual(common.network_load(pl=pl, recv_format='r {value}', sent_format='s {value}', recv_max=0, interface='eth0'), [
{'divider_highlight_group': 'background:divider', 'contents': 'r 1 KiB/s', 'highlight_group': ['network_load_recv_gradient', 'network_load_gradient', 'network_load_recv', 'network_load'], 'gradient_level': 100},
{'divider_highlight_group': 'background:divider', 'contents': 's 2 KiB/s', 'highlight_group': ['network_load_sent', 'network_load']},
])
class ApproxEqual(object):
def __eq__(self, i):
return abs(i - 50.0) < 1
self.assertEqual(common.network_load(pl=pl, recv_format='r {value}', sent_format='s {value}', sent_max=4800, interface='eth0'), [
{'divider_highlight_group': 'background:divider', 'contents': 'r 1 KiB/s', 'highlight_group': ['network_load_recv', 'network_load']},
{'divider_highlight_group': 'background:divider', 'contents': 's 2 KiB/s', 'highlight_group': ['network_load_sent_gradient', 'network_load_gradient', 'network_load_sent', 'network_load'], 'gradient_level': ApproxEqual()},
])
finally:
common.network_load.shutdown()
def test_virtualenv(self):
pl = Pl()
with replace_env('VIRTUAL_ENV', '/abc/def/ghi') as segment_info:
self.assertEqual(common.virtualenv(pl=pl, segment_info=segment_info), 'ghi')
segment_info['environ'].pop('VIRTUAL_ENV')
self.assertEqual(common.virtualenv(pl=pl, segment_info=segment_info), None)
def test_environment(self):
pl = Pl()
variable = 'FOO';
value = 'bar';
with replace_env(variable, value) as segment_info:
self.assertEqual(common.environment(pl=pl, segment_info=segment_info, variable=variable), value)
segment_info['environ'].pop(variable)
self.assertEqual(common.environment(pl=pl, segment_info=segment_info, variable=variable), None)
def test_email_imap_alert(self):
# TODO
pass
def test_now_playing(self):
# TODO
pass
def test_battery(self):
pl = Pl()
def _get_capacity():
return 86
with replace_attr(common, '_get_capacity', _get_capacity):
self.assertEqual(common.battery(pl=pl), [{
'contents': '80%',
'highlight_group': ['battery_gradient', 'battery'],
'gradient_level': 80.0
}])
self.assertEqual(common.battery(pl=pl, format='{batt:.2f}'), [{
'contents': '0.80',
'highlight_group': ['battery_gradient', 'battery'],
'gradient_level': 80.0
}])
self.assertEqual(common.battery(pl=pl, steps=7), [{
'contents': '86%',
'highlight_group': ['battery_gradient', 'battery'],
'gradient_level': 85.71428571428571
}])
self.assertEqual(common.battery(pl=pl, gamify=True), [
{
'contents': '♥♥♥♥',
'draw_soft_divider': False,
'highlight_group': ['battery_gradient', 'battery'],
'gradient_level': 99
},
{
'contents': '♥',
'draw_soft_divider': False,
'highlight_group': ['battery_gradient', 'battery'],
'gradient_level': 1
}
])
class TestVim(TestCase):
def test_mode(self):
pl = Pl()
segment_info = vim_module._get_segment_info()
self.assertEqual(vim.mode(pl=pl, segment_info=segment_info), 'NORMAL')
self.assertEqual(vim.mode(pl=pl, segment_info=segment_info, override={'i': 'INS'}), 'NORMAL')
self.assertEqual(vim.mode(pl=pl, segment_info=segment_info, override={'n': 'NORM'}), 'NORM')
with vim_module._with('mode', 'i') as segment_info:
self.assertEqual(vim.mode(pl=pl, segment_info=segment_info), 'INSERT')
with vim_module._with('mode', chr(ord('V') - 0x40)) as segment_info:
self.assertEqual(vim.mode(pl=pl, segment_info=segment_info), 'V·BLCK')
self.assertEqual(vim.mode(pl=pl, segment_info=segment_info, override={'^V': 'VBLK'}), 'VBLK')
def test_visual_range(self):
# TODO
pass
def test_modified_indicator(self):
pl = Pl()
segment_info = vim_module._get_segment_info()
self.assertEqual(vim.modified_indicator(pl=pl, segment_info=segment_info), None)
segment_info['buffer'][0] = 'abc'
try:
self.assertEqual(vim.modified_indicator(pl=pl, segment_info=segment_info), '+')
self.assertEqual(vim.modified_indicator(pl=pl, segment_info=segment_info, text='-'), '-')
finally:
vim_module._bw(segment_info['bufnr'])
def test_paste_indicator(self):
pl = Pl()
segment_info = vim_module._get_segment_info()
self.assertEqual(vim.paste_indicator(pl=pl, segment_info=segment_info), None)
with vim_module._with('options', paste=1):
self.assertEqual(vim.paste_indicator(pl=pl, segment_info=segment_info), 'PASTE')
self.assertEqual(vim.paste_indicator(pl=pl, segment_info=segment_info, text='P'), 'P')
def test_readonly_indicator(self):
pl = Pl()
segment_info = vim_module._get_segment_info()
self.assertEqual(vim.readonly_indicator(pl=pl, segment_info=segment_info), None)
with vim_module._with('bufoptions', readonly=1):
self.assertEqual(vim.readonly_indicator(pl=pl, segment_info=segment_info), '')
self.assertEqual(vim.readonly_indicator(pl=pl, segment_info=segment_info, text='L'), 'L')
def test_file_directory(self):
pl = Pl()
segment_info = vim_module._get_segment_info()
self.assertEqual(vim.file_directory(pl=pl, segment_info=segment_info), None)
with replace_env('HOME', '/home/foo', os.environ):
with vim_module._with('buffer', '/tmp/abc') as segment_info:
self.assertEqual(vim.file_directory(pl=pl, segment_info=segment_info), '/tmp/')
os.environ['HOME'] = '/tmp'
self.assertEqual(vim.file_directory(pl=pl, segment_info=segment_info), '~/')
def test_file_name(self):
pl = Pl()
segment_info = vim_module._get_segment_info()
self.assertEqual(vim.file_name(pl=pl, segment_info=segment_info), None)
self.assertEqual(vim.file_name(pl=pl, segment_info=segment_info, display_no_file=True),
[{'contents': '[No file]', 'highlight_group': ['file_name_no_file', 'file_name']}])
self.assertEqual(vim.file_name(pl=pl, segment_info=segment_info, display_no_file=True, no_file_text='X'),
[{'contents': 'X', 'highlight_group': ['file_name_no_file', 'file_name']}])
with vim_module._with('buffer', '/tmp/abc') as segment_info:
self.assertEqual(vim.file_name(pl=pl, segment_info=segment_info), 'abc')
with vim_module._with('buffer', '/tmp/’’') as segment_info:
self.assertEqual(vim.file_name(pl=pl, segment_info=segment_info), '’’')
def test_file_size(self):
pl = Pl()
segment_info = vim_module._get_segment_info()
self.assertEqual(vim.file_size(pl=pl, segment_info=segment_info), '0 B')
with vim_module._with('buffer', os.path.join(os.path.dirname(__file__), 'empty')) as segment_info:
self.assertEqual(vim.file_size(pl=pl, segment_info=segment_info), '0 B')
def test_file_opts(self):
pl = Pl()
segment_info = vim_module._get_segment_info()
self.assertEqual(vim.file_format(pl=pl, segment_info=segment_info),
[{'divider_highlight_group': 'background:divider', 'contents': 'unix'}])
self.assertEqual(vim.file_encoding(pl=pl, segment_info=segment_info),
[{'divider_highlight_group': 'background:divider', 'contents': 'utf-8'}])
self.assertEqual(vim.file_type(pl=pl, segment_info=segment_info), None)
with vim_module._with('bufoptions', filetype='python'):
self.assertEqual(vim.file_type(pl=pl, segment_info=segment_info),
[{'divider_highlight_group': 'background:divider', 'contents': 'python'}])
def test_line_percent(self):
pl = Pl()
segment_info = vim_module._get_segment_info()
segment_info['buffer'][0:-1] = [str(i) for i in range(100)]
try:
self.assertEqual(vim.line_percent(pl=pl, segment_info=segment_info), '1')
vim_module._set_cursor(50, 0)
self.assertEqual(vim.line_percent(pl=pl, segment_info=segment_info), '50')
self.assertEqual(vim.line_percent(pl=pl, segment_info=segment_info, gradient=True),
[{'contents': '50', 'highlight_group': ['line_percent_gradient', 'line_percent'], 'gradient_level': 50 * 100.0 / 101}])
finally:
vim_module._bw(segment_info['bufnr'])
def test_cursor_current(self):
pl = Pl()
segment_info = vim_module._get_segment_info()
self.assertEqual(vim.line_current(pl=pl, segment_info=segment_info), '1')
self.assertEqual(vim.col_current(pl=pl, segment_info=segment_info), '1')
self.assertEqual(vim.virtcol_current(pl=pl, segment_info=segment_info), [{
'highlight_group': ['virtcol_current_gradient', 'virtcol_current', 'col_current'], 'contents': '1', 'gradient_level': 100.0 / 80,
}])
self.assertEqual(vim.virtcol_current(pl=pl, segment_info=segment_info, gradient=False), [{
'highlight_group': ['virtcol_current', 'col_current'], 'contents': '1',
}])
def test_modified_buffers(self):
pl = Pl()
self.assertEqual(vim.modified_buffers(pl=pl), None)
def test_branch(self):
pl = Pl()
with vim_module._with('buffer', '/foo') as segment_info:
with replace_attr(vim, 'guess', lambda path: Args(branch=lambda: os.path.basename(path), status=lambda: None, directory=path)):
with replace_attr(vim, 'tree_status', lambda repo, pl: None):
self.assertEqual(vim.branch(pl=pl, segment_info=segment_info, status_colors=False),
[{'divider_highlight_group': 'branch:divider', 'highlight_group': ['branch'], 'contents': 'foo'}])
self.assertEqual(vim.branch(pl=pl, segment_info=segment_info, status_colors=True),
[{'divider_highlight_group': 'branch:divider', 'highlight_group': ['branch_clean', 'branch'], 'contents': 'foo'}])
with replace_attr(vim, 'guess', lambda path: Args(branch=lambda: os.path.basename(path), status=lambda: 'DU', directory=path)):
with replace_attr(vim, 'tree_status', lambda repo, pl: 'DU'):
self.assertEqual(vim.branch(pl=pl, segment_info=segment_info, status_colors=False),
[{'divider_highlight_group': 'branch:divider', 'highlight_group': ['branch'], 'contents': 'foo'}])
self.assertEqual(vim.branch(pl=pl, segment_info=segment_info, status_colors=True),
[{'divider_highlight_group': 'branch:divider', 'highlight_group': ['branch_dirty', 'branch'], 'contents': 'foo'}])
def test_file_vcs_status(self):
pl = Pl()
with vim_module._with('buffer', '/foo') as segment_info:
with replace_attr(vim, 'guess', lambda path: Args(branch=lambda: os.path.basename(path), status=lambda file: 'M', directory=path)):
self.assertEqual(vim.file_vcs_status(pl=pl, segment_info=segment_info),
[{'highlight_group': ['file_vcs_status_M', 'file_vcs_status'], 'contents': 'M'}])
with replace_attr(vim, 'guess', lambda path: Args(branch=lambda: os.path.basename(path), status=lambda file: None, directory=path)):
self.assertEqual(vim.file_vcs_status(pl=pl, segment_info=segment_info), None)
with vim_module._with('buffer', '/bar') as segment_info:
with vim_module._with('bufoptions', buftype='nofile'):
with replace_attr(vim, 'guess', lambda path: Args(branch=lambda: os.path.basename(path), status=lambda file: 'M', directory=path)):
self.assertEqual(vim.file_vcs_status(pl=pl, segment_info=segment_info), None)
old_cwd = None
def setUpModule():
global old_cwd
global __file__
sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), 'path')))
old_cwd = os.getcwd()
__file__ = os.path.abspath(__file__)
os.chdir(os.path.dirname(__file__))
from powerline.segments import vim
globals()['vim'] = vim
def tearDownModule():
global old_cwd
os.chdir(old_cwd)
sys.path.pop(0)
if __name__ == '__main__':
from tests import main
main()
| StarcoderdataPython |
9611033 | <gh_stars>0
#!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2014, <NAME> <<EMAIL>>, and others
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# this is a windows documentation stub. actual code lives in the .ps1
# file of the same name
DOCUMENTATION = '''
---
module: win_package
version_added: "1.7"
author: <NAME>
short_description: Installs/Uninstalls an installable package, either from local file system or url
description:
- Installs or uninstalls a package.
- 'Optionally uses a product_id to check if the package needs installing. You can find product ids for installed programs in the windows registry either in C(HKLM:Software\\Microsoft\\Windows\CurrentVersion\\Uninstall) or for 32 bit programs C(HKLM:Software\\Wow6432Node\\Microsoft\\Windows\\CurrentVersion\\Uninstall)'
options:
path:
description:
- Location of the package to be installed (either on file system, network share or url)
required: true
name:
description:
- Name of the package, if name isn't specified the path will be used for log messages
required: false
default: null
product_id:
description:
- product id of the installed package (used for checking if already installed)
- You can find product ids for installed programs in the windows registry either in C(HKLM:Software\\Microsoft\\Windows\CurrentVersion\\Uninstall) or for 32 bit programs C(HKLM:Software\\Wow6432Node\\Microsoft\\Windows\\CurrentVersion\\Uninstall)'
required: true
aliases: [productid]
arguments:
description:
- Any arguments the installer needs
default: null
required: false
state:
description:
- Install or Uninstall
choices:
- present
- absent
default: present
required: false
aliases: [ensure]
user_name:
description:
- Username of an account with access to the package if its located on a file share. Only needed if the winrm user doesn't have access to the package. Also specify user_password for this to function properly.
default: null
required: false
user_password:
description:
- Password of an account with access to the package if its located on a file share. Only needed if the winrm user doesn't have access to the package. Also specify user_name for this to function properly.
default: null
required: false
'''
EXAMPLES = '''
# Playbook example
- name: Install the vc thingy
win_package:
name="Microsoft Visual C thingy"
path="http://download.microsoft.com/download/1/6/B/16B06F60-3B20-4FF2-B699-5E9B7962F9AE/VSU_4/vcredist_x64.exe"
Product_Id="{CF2BEA3C-26EA-32F8-AA9B-331F7E34BA97}"
Arguments="/install /passive /norestart"
# Install/uninstall an msi-based package
- name: Install msi-based package (Remote Desktop Connection Manager)
win_package:
path: "https://download.microsoft.com/download/A/F/0/AF0071F3-B198-4A35-AA90-C68D103BDCCF/rdcman.msi"
product_id: "{0240359E-6A4C-4884-9E94-B397A02D893C}"
- name: Uninstall msi-based package
win_package:
path: "https://download.microsoft.com/download/A/F/0/AF0071F3-B198-4A35-AA90-C68D103BDCCF/rdcman.msi"
product_id: "{0240359E-6A4C-4884-9E94-B397A02D893C}"
state: absent
'''
| StarcoderdataPython |
3409802 | """The SETools SELinux policy analysis library."""
# Copyright 2014-2015, Tresys Technology, LLC
#
# This file is part of SETools.
#
# SETools is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as
# published by the Free Software Foundation, either version 2.1 of
# the License, or (at your option) any later version.
#
# SETools is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with SETools. If not, see
# <http://www.gnu.org/licenses/>.
#
__version__ = "4.0.1"
import logging
# Python classes for policy representation
from . import policyrep
from .policyrep import SELinuxPolicy
# Exceptions
from . import exception
# Component Queries
from .boolquery import BoolQuery
from .categoryquery import CategoryQuery
from .commonquery import CommonQuery
from .objclassquery import ObjClassQuery
from .polcapquery import PolCapQuery
from .rolequery import RoleQuery
from .sensitivityquery import SensitivityQuery
from .typequery import TypeQuery
from .typeattrquery import TypeAttributeQuery
from .userquery import UserQuery
# Rule Queries
from .mlsrulequery import MLSRuleQuery
from .rbacrulequery import RBACRuleQuery
from .terulequery import TERuleQuery
# Constraint queries
from .constraintquery import ConstraintQuery
# Other queries
from .boundsquery import BoundsQuery
from .defaultquery import DefaultQuery
# In-policy Context Queries
from .fsusequery import FSUseQuery
from .genfsconquery import GenfsconQuery
from .initsidquery import InitialSIDQuery
from .netifconquery import NetifconQuery
from .nodeconquery import NodeconQuery
from .portconquery import PortconQuery
from .ioportconquery import IoportconQuery
from .iomemconquery import IomemconQuery
from .pirqconquery import PirqconQuery
from .pcideviceconquery import PcideviceconQuery
from .devicetreeconquery import DevicetreeconQuery
# Information Flow Analysis
from .infoflow import InfoFlowAnalysis
from .permmap import PermissionMap
# Domain Transition Analysis
from .dta import DomainTransitionAnalysis
# Policy difference
from .diff import PolicyDifference
logging.getLogger(__name__).addHandler(logging.NullHandler())
| StarcoderdataPython |
11216353 | <reponame>JulyKikuAkita/PythonPrac<filename>cs15211/RobotReturntoOrigin.py<gh_stars>1-10
__source__ = 'https://leetcode.com/problems/robot-return-to-origin/'
# Time: O(N)
# Space: O(1)
#
# Description: Leetcode # 657. Robot Return to Origin
#
# There is a robot starting at position (0, 0), the origin, on a 2D plane.
# Given a sequence of its moves, judge if this robot ends up at (0, 0) after it completes its moves.
#
# The move sequence is represented by a string, and the character moves[i] represents its ith move.
# Valid moves are R (right), L (left), U (up), and D (down).
# If the robot returns to the origin after it finishes all of its moves,
# return true. Otherwise, return false.
#
#
# Note: The way that the robot is "facing" is irrelevant.
# "R" will always make the robot move to the right once,
# "L" will always make it move left, etc.
# Also, assume that the magnitude of the robot's movement is the same for each move.
#
# Example 1:
#
# Input: "UD"
# Output: true
# Explanation: The robot moves up once, and then down once.
# All moves have the same magnitude,
# so it ended up at the origin where it started. Therefore, we return true.
#
# Example 2:
#
# Input: "LL"
# Output: false
# Explanation: The robot moves left twice. It ends up two "moves" to the left of the origin.
# We return false because it is not at the origin at the end of its moves.
#
import unittest
import collections
# 120ms 33.66%
class Solution(object):
def judgeCircle(self, moves):
"""
:type moves: str
:rtype: bool
"""
c = collections.Counter(moves)
return c['L'] == c['R'] and c['U'] == c['D']
class TestMethods(unittest.TestCase):
def test_Local(self):
self.assertEqual(1, 1)
if __name__ == '__main__':
unittest.main()
Java = '''
# Thought: https://leetcode.com/problems/robot-return-to-origin/solution/
#
# 4ms 100%
class Solution {
public boolean judgeCircle(String moves) {
int[] table = new int[128];
for (char c : moves.toCharArray()) {
table[c]++;
}
if (table[68] == table[85] && table[76] == table[82]) return true;
else return false;
}
}
# 8ms 93.56%
class Solution {
public boolean judgeCircle(String moves) {
int x = 0;
int y = 0;
for (char ch : moves.toCharArray()) {
if (ch == 'U') y++;
if (ch == 'D') y--;
if (ch == 'R') x++;
if (ch == 'L') x--;
}
return x == 0 && y == 0;
}
}
'''
| StarcoderdataPython |
3349121 | <gh_stars>0
from django.contrib.admin.apps import AdminConfig
class MyAdminConfig(AdminConfig):
default_site = 'fxm.admin.MyAdminSite' | StarcoderdataPython |
9720969 | import requests
from subprocess import check_output
class TestMicrok8sBranches(object):
def test_branches(self):
"""Ensures LP builders push to correct snap tracks.
We need to make sure the LP builders pointing to the master github branch are only pushing
to the latest and current k8s stable snap tracks. An indication that this is not enforced is
that we do not have a branch for the k8s release for the previous stable release. Let me
clarify with an example.
Assuming upstream stable k8s release is v1.12.x, there has to be a 1.11 github branch used
by the respective LP builders for building the v1.11.y.
"""
upstream_version = self._upstream_release()
assert upstream_version
version_parts = upstream_version.split('.')
major_minor_upstream_version = "{}.{}".format(version_parts[0][1:], version_parts[1])
if version_parts[1] != "0":
prev_major_minor_version = "{}.{}".format(
version_parts[0][1:], int(version_parts[1]) - 1
)
else:
major = int(version_parts[0][1:]) - 1
minor = self._get_max_minor(major)
prev_major_minor_version = "{}.{}".format(major, minor)
print(
"Current stable is {}. Making sure we have a branch for {}".format(
major_minor_upstream_version, prev_major_minor_version
)
)
cmd = "git ls-remote --heads http://github.com/ubuntu/microk8s.git {}".format(
prev_major_minor_version
)
branch = check_output(cmd.split()).decode("utf-8")
assert prev_major_minor_version in branch
def _upstream_release(self):
"""Return the latest stable k8s in the release series"""
release_url = "https://dl.k8s.io/release/stable.txt"
r = requests.get(release_url)
if r.status_code == 200:
return r.content.decode().strip()
else:
None
def _get_max_minor(self, major):
"""Get the latest minor release of the provided major.
For example if you use 1 as major you will get back X where X gives you latest 1.X release.
"""
minor = 0
while self._upstream_release_exists(major, minor):
minor += 1
return minor - 1
def _upstream_release_exists(self, major, minor):
"""Return true if the major.minor release exists"""
release_url = "https://dl.k8s.io/release/stable-{}.{}.txt".format(major, minor)
r = requests.get(release_url)
if r.status_code == 200:
return True
else:
return False
| StarcoderdataPython |
1629613 | from django.core.urlresolvers import reverse
from django.test import TestCase
import json
from myshop.models import Product
from myshop.models.manufacturer import Manufacturer
class ProductSelectViewTest(TestCase):
def setUp(self):
manufacturer = Manufacturer.objects.create(name="testmanufacturer")
Product.objects.create(product_name="testproduct1", order=1, manufacturer=manufacturer)
def test_finds_product_case_insensitive(self):
response = self.client.get(reverse('shop:select-product') + "?term=Prod")
data = json.loads(response.content.decode("utf-8"))
self.assertEqual(data['count'], 1)
self.assertEqual(data['results'][0]['text'], "testproduct1")
def test_bogus_query_finds_nothing(self):
response = self.client.get(reverse('shop:select-product') + "?term=whatever")
data = json.loads(response.content.decode("utf-8"))
self.assertEqual(data['count'], 0)
| StarcoderdataPython |
6558283 | # recreate_pnx_record.py
""" Given an identifier, get a PNX record, modify it as needed, and output replacement """
import sys
from urllib import error
from get_existing_pnx_record import get_pnx_xml_given_docid # , get_pnx_given_filename
from modify_existing_pnx_record import modify_existing_pnx_record, get_unique_identifier_from_original_pnx
from write_pnx_file import write_pnx_file
from file_system_utilities import create_directory
def recreate_pnx_record(primo_doc_id):
""" Create a new PNX record given an existing PNX record """
repository = 'snite'
# pnx_xml = get_pnx_given_filename(pnx_filename)
try:
pnx_xml = get_pnx_xml_given_docid(primo_doc_id)
except error.HTTPError:
print('HTTPError encountered')
pass # if we didn't get a valid pnx entry, we can't do anything with it
else:
unique_identifier = get_unique_identifier_from_original_pnx(pnx_xml)
# print('unique_identifier = ', unique_identifier)
corrected_pnx_xml = modify_existing_pnx_record(pnx_xml, repository, unique_identifier)
pnx_directory = 'pnx'
create_directory(pnx_directory)
write_pnx_file(pnx_directory, unique_identifier + '.xml', corrected_pnx_xml)
if __name__ == "__main__":
primo_doc_id = ''
if len(sys.argv) >= 1:
primo_doc_id = ''.join(sys.argv[1])
if primo_doc_id > '':
recreate_pnx_record(primo_doc_id)
| StarcoderdataPython |
88733 | import hashlib
import pickle
import os
import sys
import subprocess
import time
class Colors:
HEADER = '\033[95m'
BLUE = '\033[94m'
GREEN = '\033[92m'
WARNING = '\033[93m'
FAIL = '\033[91m'
ENDC = '\033[0m'
BOLD = '\033[1m'
UNDERLINE = '\033[4m'
cache = {}
hashes = []
def get_cache():
try:
with open('tests/results.dat', 'rb') as afile:
if afile:
global cache
cache = pickle.load(afile)
except FileNotFoundError:
os.makedirs('tests')
def save_cache():
ks = [k for k in cache.keys() if not k in hashes]
for k in ks:
del cache[k]
afile = open('tests/results.dat', 'wb')
pickle.dump(cache, afile)
def get_tests_results(question):
global cache
main_folder = question
script = '{0}.py'.format(question)
hasher = hashlib.md5()
with open(script, 'rb') as f:
hasher.update(f.read())
hsh = hasher.digest()
hashes.append(hsh)
if hsh in cache:
return cache[hsh]
tests = os.listdir(main_folder)
right_answers = 0
total_answers = 0
for test_folder in tests:
test_folder_name = '{0}/{1}'.format(main_folder, test_folder)
tests_cases = [case for case in os.listdir(test_folder_name)]
for index in range(int(len(tests_cases) / 2)):
case_input_name = '{0}/{1}{2}'.format(
test_folder_name, 'in', index + 1)
case_output_name = '{0}/{1}{2}'.format(
test_folder_name, 'out', index + 1)
input_file = open(case_input_name)
proc = subprocess.Popen(
['python', script], stdin=input_file,
stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
result = proc.communicate()[0].decode('utf-8').strip()
output_file = open(case_output_name).read().strip()
if result == output_file:
right_answers += 1
sys.stdout.write('.')
else:
sys.stdout.write('F')
total_answers += 1
sys.stdout.flush()
cache[hsh] = (right_answers, total_answers)
return cache[hsh]
if __name__ == '__main__':
is_successful = True
obis = [f for f in os.listdir('.') if os.path.isdir(f) and ('obi' in f)]
obis.sort()
get_cache()
for obi in obis:
print(obi.upper())
levels = os.listdir(obi)
for level in levels:
print(' {0}'.format(level))
level = '{0}/{1}'.format(obi, level)
stages = os.listdir(level)
for stage in stages:
print(' {0}'.format(stage))
stage = '{0}/{1}'.format(level, stage)
questions = [f for f in os.listdir(stage) if f.endswith('.py')]
for question in questions:
s = time.time()
name = question
print(' {0} '.format(name), end='')
question = '{0}/{1}'.format(stage, question)
folder = '{0}/{1}'.format(stage, os.path.splitext(name)[0])
if os.path.isdir(folder):
right_answers, total_answers = get_tests_results(folder)
if right_answers < total_answers:
is_successful = False
color = Colors.GREEN if right_answers == total_answers else Colors.FAIL
print(' {1}/{2}'.format(name,right_answers, total_answers), end='')
print(' [{:.2f}s]'.format(time.time() - s))
save_cache()
if not is_successful:
exit(1)
exit(0)
| StarcoderdataPython |
9628412 | <gh_stars>1-10
from simplematrixbotlib.api import Api
from simplematrixbotlib.auth import Creds
from simplematrixbotlib.bot import Bot
from simplematrixbotlib.callbacks import Callbacks
from simplematrixbotlib.match import MessageMatch
from simplematrixbotlib.listener import Listener | StarcoderdataPython |
3583199 | <reponame>akatashev/chouette-iot<gh_stars>1-10
import time
import pytest
from chouette_iot.metrics._metrics import MergedMetric, WrappedMetric
def test_merged_metric_successfull_merge():
"""
MergedMetrics of the same type can be merged.
GIVEN: There are 2 MergedMetric objects with the same name, type and tags.
WHEN: One metric is added to another.
THEN: It returns a new MergedMetric of the same type with merged values
and timestamps.
"""
metric1 = MergedMetric(
metric="name", type="type", values=[1], timestamps=[2], tags={"tag": "1"}
)
metric2 = MergedMetric(
metric="name", type="type", values=[3], timestamps=[4], tags={"tag": "1"}
)
result = metric1 + metric2
assert result.metric == "name"
assert result.type == "type"
assert result.tags == {"tag": "1"}
assert result.timestamps == [2, 4]
assert result.values == [1, 3]
def test_merged_metric_unsuccessful_merge():
"""
MergedMetrics of different types can't be merged.
GIVEN: There are 2 MergedMetric objects with different names.
WHEN: One metric is added to another.
THEN: ValueError exception is raised.
"""
metric1 = MergedMetric(
metric="name", type="type1", values=[1], timestamps=[2], tags={"tag": "1"}
)
metric2 = MergedMetric(
metric="name", type="type2", values=[3], timestamps=[4], tags={"tag": "1"}
)
with pytest.raises(ValueError):
metric1 + metric2
def test_merged_metric_str_and_repr():
"""
MergedMetric:
__str__, __repr__ and asdict tests.
"""
expected_dict = {
"metric": "mergedMetric",
"type": "count",
"values": [1],
"timestamps": [2],
"tags": {"test": "test"},
"interval": 10,
}
metric = MergedMetric(
metric="mergedMetric",
type="count",
values=[1],
timestamps=[2],
tags={"test": "test"},
)
metric_dict = metric.asdict()
assert metric_dict == expected_dict
assert str(metric) == str(metric_dict)
assert repr(metric) == f"<MergedMetric: {str(metric_dict)}>"
def test_wrapped_metric_str_and_repr_no_interval():
"""
WrappedMetric:
__str__, __repr__ and asdict tests.
"""
expected_dict = {
"metric": "wrappedMetric",
"type": "count",
"points": [[2, 1]],
"tags": [],
}
metric = WrappedMetric(metric="wrappedMetric", type="count", value=1, timestamp=2)
metric_dict = metric.asdict()
assert metric_dict == expected_dict
assert str(metric) == str(metric_dict)
assert repr(metric) == f"<WrappedMetric: {str(metric_dict)}>"
def test_wrapped_metric_str_and_repr_with_interval():
"""
WrappedMetric:
__str__, __repr__ and asdict tests.
"""
expected_dict = {
"metric": "wrappedMetric",
"type": "count",
"points": [[2, 1]],
"tags": [],
"interval": 10,
}
metric = WrappedMetric(
metric="wrappedMetric", type="count", value=1, timestamp=2, interval=10
)
metric_dict = metric.asdict()
assert metric_dict == expected_dict
assert str(metric) == str(metric_dict)
assert repr(metric) == f"<WrappedMetric: {str(metric_dict)}>"
def test_wrapped_metric_gets_timestamp():
"""
WrappedMetric instances are expected to get an actual timestamp if
it didn't receive a "timestamp" keyword value.
"""
now = int(time.time())
metric = WrappedMetric(metric="wrappedMetric", type="count", value=1)
assert metric.timestamp >= now
@pytest.mark.parametrize("second_type, are_equal", [("count", True), ("gauge", False)])
def test_metrics_equality(second_type, are_equal):
"""
Metrics are considered equal if their dicts are equal.
"""
metric1 = WrappedMetric(metric="wrappedMetric", type="count", value=1, timestamp=2)
metric2 = WrappedMetric(
metric="wrappedMetric", type=second_type, value=1, timestamp=2
)
assert (metric1 == metric2) is are_equal
@pytest.mark.parametrize("not_a_metric", [None, "Not a metric", 11011010])
def test_comparing_a_metric_with_not_a_netric(not_a_metric):
"""
Metric is never equal to a not metric object.
"""
metric = WrappedMetric(metric="wrappedMetric", type="count", value=1)
assert metric != not_a_metric
| StarcoderdataPython |
8174032 | import numpy as np
def nelson_siegel_yield(tau, theta):
"""For details, see here.
Parameters
----------
tau : array, shape (n_,)
theta : array, shape (4,)
Returns
-------
y : array, shape (n_,)
"""
y = theta[0] - theta[1] * \
((1 - np.exp(-theta[3] * tau)) /
(theta[3] * tau)) + theta[2] * \
((1 - np.exp(-theta[3] * tau)) /
(theta[3] * tau) - np.exp(-theta[3] * tau))
return np.squeeze(y)
| StarcoderdataPython |
230964 | #__getitem__ not implemented yet
#a = bytearray(b'abc')
#assert a[0] == b'a'
#assert a[1] == b'b'
assert len(bytearray([1,2,3])) == 3
assert bytearray(b'1a23').isalnum()
assert not bytearray(b'1%a23').isalnum()
assert bytearray(b'abc').isalpha()
assert not bytearray(b'abc1').isalpha()
# travis doesn't like this
#assert bytearray(b'xyz').isascii()
#assert not bytearray([128, 157, 32]).isascii()
assert bytearray(b'1234567890').isdigit()
assert not bytearray(b'12ab').isdigit()
assert bytearray(b'lower').islower()
assert not bytearray(b'Super Friends').islower()
assert bytearray(b' \n\t').isspace()
assert not bytearray(b'\td\n').isspace()
assert bytearray(b'UPPER').isupper()
assert not bytearray(b'tuPpEr').isupper()
assert bytearray(b'Is Title Case').istitle()
assert not bytearray(b'is Not title casE').istitle()
| StarcoderdataPython |
9717522 | <gh_stars>1-10
#!/usr/bin/env python
#-*- coding:utf-8 -*-
import scipy
import pylab
import scipy.linalg as sl
import random
from collections import defaultdict
import h5py
def normalize(x):
n = scipy.sqrt(scipy.inner(x,x))
#n = sl.norm(x, scipy.inf)
if n > 0:
return x/n
else:
return x
class LSH(object):
"""
Example:
db = LSH(data, L, k)
label = db.query(y)
"""
def __init__(self, L, k, M):
"""
Initializes a list of L dictionaries of sets.
SELF.DATA is a Numpy array.
SELF.DATA[0] is one data point.
SELF.TABLE is a list.
SELF.TABLE[l] is a dictionary.
SELF.TABLE[l][k] is a set of labels.
"""
self.table = [defaultdict(set) for l in range(L)]
self.k = k
self.L = L
self.M = M
self.randomize()
def randomize(self):
self.projections = [scipy.randn(self.k, self.M) for l in range(self.L)]
#self.projections = [scipy.random.standard_cauchy((self.k, self.M)) for l in range(self.L)]
def keys(self, x):
"""
Returns a list of keys for point x.
"""
allkeys = []
for P in self.projections:
key = 0
y = scipy.dot(P, x)
for i in range(len(y)):
if y[i]>0:
key += 1<<i
allkeys.append(key)
return allkeys
def add(self, x, label):
"""
Adds the label of point x to each table.
"""
for l,key in enumerate(self.keys(x)):
self.table[l][key].add(label)
def query(self, x):
"""
Returns all points that share a bin with x.
"""
S = set()
key = self.keys(x)
for l in range(self.L):
S = S.union(self.table[l][key[l]])
return S
def match(self, x, y):
xkeys = self.keys(x)
ykeys = self.keys(y)
for xk,yk in zip(xkeys,ykeys):
if xk == yk:
return True
return False
def binsize(self, l, key):
"""
Returns the number of elements in bin KEY in table L.
"""
return len(self.table[l][key])
def tablesize(self, l):
"""
Returns the number of bins in table L.
"""
return len(self.table[l])
def overview(self):
for l in range(self.L):
print 'Table %d has %d bins:' % (l, self.tablesize(l)),
print [len(self.table[l][b]) for b in self.table[l]]
#if __name__ == '__main__':
# fig = pylab.figure()
# ax = Axes3D(fig)
# colors = ['r', 'b', 'k', 'y']
# markers = ['o', 's', 'x', '^']
# for key in range(2**k):
# i = list(db.table[0][key])
# ax.scatter3D(A[i,0], A[i,1], A[i,2], s=60, c=colors[key], marker=markers[key])
# fig.show()
#
| StarcoderdataPython |
1999403 | # Aula 19 Dicionarios. É assim que tratamos os dicionarios
brasil = [] # Criando uma lista [0] [1] [2] ex:
estado1 = {'uf': 'Rio de Janeiro', 'sigla': 'RJ'} # Criando dicionarios
estado2 = {'uf': 'São Paulo', 'sigla': 'SP'} # Criando dicionarios
brasil.append(estado1) # Adicionando os dicionarios a lista
brasil.append(estado2)
print(estado1) #1º mostra o dicionario1
print('-='*20, '1º')
print(estado2) #2º mostra o dicionario2
print('-='*20, '2º')
print(brasil) #3º mostra a lista toda, os dicionarios (1º e 2º estado)
print('-='*40, '3º')
print(brasil[0]) #4º mostra o primeiro dicionario dentro da lista[0] ( 1º estado)
print('-='*20, '4º')
print(brasil[0]['uf']) #5º mostra [0] 1º dicionario, e o conteudo do titulo uf = cidade
print('-='*20, '5º')
print(brasil[1]['sigla']) #6º mostra [1] 2º dicionario, e o conteudo do titulo sigla = SP
print('-='*20, '6º')
| StarcoderdataPython |
1683236 | from .database import MongoDBConnect
from .reader import MongoDBReader
from .actions import MongoDBActions
| StarcoderdataPython |
4920793 | <filename>inputs/new_frag_job.py
import dill
import numpy as np
import os
import sys
fragment_name = sys.argv[1]
level = sys.argv[2]
batch = sys.argv[3]
folder = sys.argv[4]
infile = open(fragment_name, 'rb')
frag_class = dill.load(infile)
#make changes as needed to frag_class
# example:
# frag_class.qc_backend.spin = 2
cmd = 'sbatch -J %s -o "%s" --export=LEVEL="%s",BATCH="%s",FOLDER="%s" slurm_pbs.sh'%(fragment_name+"rerun", path+"/"+submit_name+".out", path, string_num, folder) ##For TinkerCliffs/Huckleberry
os.system(cmd)
| StarcoderdataPython |
3458903 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# Simple Bot to reply to Telegram messages
# This program is dedicated to the public domain under the CC0 license.
"""
This Bot uses the Updater class to handle the bot.
First, a few callback functions are defined. Then, those functions are passed to
the Dispatcher and registered at their respective places.
Then, the bot is started and runs until we press Ctrl-C on the command line.
Usage:
Example of a bot-user conversation using ConversationHandler.
Send /start to initiate the conversation.
Press Ctrl-C on the command line or send a signal to the process to stop the
bot.
"""
from telegram import (ReplyKeyboardMarkup, ReplyKeyboardRemove, InlineKeyboardButton,
InlineKeyboardMarkup, error)
from telegram.ext import (Updater, CommandHandler, MessageHandler, Filters, RegexHandler,
ConversationHandler, CallbackQueryHandler)
import telegram.ext
import logging
import json
from time import time
# Enable logging
logging.basicConfig(format='%(asctime)s - %(name)s - %(levelname)s - %(message)s',
level=logging.INFO)
logger = logging.getLogger(__name__)
questions_dict = dict()
text = ''
No = '0'
QUESTION, ANSWER = range(2)
EDIT_QUESTION, EDIT_ANSWER = range(2)
def load_json():
with open("/root/kiray/save.json","r") as f:
return json.load(f)
def save_json(dicty):
myjsondump = json.dumps(dicty, indent=1)
with open("/root/kiray/save.json","w") as f:
f.writelines(myjsondump)
def generate_text():
text = ''
questions_dict = load_json()
leng = len(questions_dict)
for key in range(1,leng+1):
text = text + str(key) + '.'
text = text + questions_dict[str(key)][0] + '\n'
return text
def start(bot, update):
update.message.reply_text(
'这里是Kiri的V2Ray FAQ bot,用来存档一些常见的问题。你可以:\n '
'用 /questions 来获取问题列表(翻页模式)\n'
'用 /all_questions 来获取全部问题(请在私聊中使用)\n'
'用 /answer <NO.> 来获得对应答案,其中题号是从 /questions 中得到的\n'
'如果你想添加问题和答案的话,请私戳我( @kiraybot )并使用 /add 哟\n')
def all_questions(bot,update,chat_data):
global text
chat_id = update.message.chat_id
if chat_id < 0:
update.message.reply_text("为了防止刷屏,请在私戳中查看哟")
else:
update.message.reply_text(text)
def questions(bot,update,chat_data):
global questions_dict
current_time = time()
chat_id = update.message.chat_id
keyboard = [[InlineKeyboardButton("Next", callback_data='2')],
[InlineKeyboardButton(str(i), callback_data=str(i+2)) for i in range(1,6)]]
reply_markup = InlineKeyboardMarkup(keyboard)
reply_text = ''
for i in range(1,6):
reply_text += "{}.{}\n".format(str(i), questions_dict[str(i)][0])
if chat_id < 0:
if chat_id in chat_data:
delta = int(current_time - chat_data[chat_id])
else:
update.message.reply_text(reply_text, reply_markup=reply_markup)
chat_data[chat_id] = current_time
chat_data[int(chat_id) + 10086] = 1
return
if delta < 300:
return
else:
update.message.reply_text(reply_text, reply_markup=reply_markup)
chat_data[chat_id] = current_time
chat_data[int(chat_id) + 10086] = 1
else:
update.message.reply_text(reply_text, reply_markup=reply_markup)
chat_data[int(chat_id) + 10086] = 1
def button(bot, update, chat_data):
"""
:param bot: bot itself
:param update: updated message
:param chat_data: chat_data[chat_id] is the time of last call of function , while chat_data[chat_id + 10086] is the
status of current number of question.
:return: None
"""
global questions_dict
query = update.callback_query
choice = query.data
chat_id = query.message.chat_id
uid = query.from_user.id
mid = query.message.message_id
current_time = time()
if chat_id not in chat_data or (chat_id + 10086) not in chat_data:
chat_data[chat_id + 10086] = 1
qid = int(chat_id) + 10086
chat_data[chat_id] = current_time
if choice == '1' and chat_data[qid] >= 6:
chat_data[qid] -= 5
elif choice == '2' and chat_data[qid] <= (len(questions_dict) - 5):
chat_data[qid] += 5
elif choice == '1' or choice == '2':
chat_data[qid] = 1
else:
choice = int(choice) - 2
reply_text = "[{}](tg://user?id={})这是你想看哒\n{}.{}\n答:{}".format(
query.from_user.first_name,
query.from_user.id,
choice, questions_dict[str(choice)][0],
questions_dict[str(choice)][1])
if (uid, mid) in chat_data:
try:
chat_data[(uid, mid)].edit_text(text=reply_text, parse_mode='Markdown')
except[error.BadRequest, error.NetworkError]:
reply_text = "{}.{}\n答:{}".format(choice, questions_dict[str(choice)][0],
questions_dict[str(choice)][1])
chat_data[(uid, mid)].edit_text(text=reply_text)
query.answer()
return
archive = bot.send_message(chat_id, text=reply_text, parse_mode='Markdown')
chat_data[(uid, mid)] = archive
query.answer()
return
current = chat_data[qid]
edited_text = ''
for i in range(current, min(current+5,len(questions_dict)+1)):
edited_text += "{}.{}\n".format(i, questions_dict[str(i)][0])
questions_keyboard = [[InlineKeyboardButton(str(i), callback_data=str(i + 2))
for i in range(current, min(current+5,len(questions_dict)+1))]]
if chat_data[qid] <= 5:
keyboard = [[InlineKeyboardButton("Next", callback_data='2')]] + questions_keyboard
elif chat_data[qid] >= (len(questions_dict) -4):
keyboard = [[InlineKeyboardButton("Previous", callback_data='1')]] + questions_keyboard
else:
keyboard = [[InlineKeyboardButton("Previous", callback_data='1'),
InlineKeyboardButton("Next", callback_data='2')]] + questions_keyboard
reply_markup = InlineKeyboardMarkup(keyboard)
bot.edit_message_text(text=edited_text,
chat_id=query.message.chat_id,
message_id=query.message.message_id,
reply_markup=reply_markup)
query.answer()
def question(bot,update,args):
global questions_dict
try:
choice = str(args[0])
if choice in questions_dict:
update.message.reply_text(questions_dict[choice][1])
update.message.reply_text("另外,因为少个s很多视力不好的人看不出来,以后就用 /answer 来查答案啦~")
else:
update.message.reply_text("要输入正确的题号哦")
except (IndexError, ValueError):
update.message.reply_text("不对不对不对,要输入 /answer <No.>(因为你们老看错,所以question就弃用啦)")
def answer(bot,update,args):
global questions_dict
try:
choice = str(args[0])
if choice in questions_dict:
reply_text = '问:{}\n答:{}'.format(questions_dict[choice][0], questions_dict[choice][1])
try:
update.message.reply_text(reply_text, parse_mode='Markdown')
except[error.BadRequest, error.NetworkError]:
update.message.reply_text(reply_text)
else:
update.message.reply_text("要输入正确的题号哦")
except (IndexError, ValueError):
return
def edit(bot,update,args):
global questions_dict
global text
global No
try:
question_num = str(args[0])
if question_num not in questions_dict:
update.message.reply_text("这里没有你想编辑的题!")
return ConversationHandler.END
No = question_num
q_or_a = int(args[1])
if q_or_a == 0:
update.message.reply_text("现在开始编辑第{}道题的问题".format(question_num))
return EDIT_QUESTION
elif q_or_a == 1:
update.message.reply_text("现在开始编辑第{}道题的答案".format(question_num))
return EDIT_ANSWER
else :
update.message.reply_text("0代表问题,1代表答案!")
return ConversationHandler.END
except (KeyError, IndexError, ValueError):
update.message.reply_text("不对不对不对,要输入 /edit <No.> [0,1]")
return ConversationHandler.END
def edit_question(bot,update):
global questions_dict
global No
global text
questions_dict[No][0] = update.message.text
save_json(questions_dict)
update.message.reply_text("好耶!你已修改第{}题的问题为\n{}".format(No, update.message.text))
text = generate_text()
return ConversationHandler.END
def edit_answer(bot,update):
global questions_dict
global No
global text
questions_dict[No][1] = update.message.text
save_json(questions_dict)
update.message.reply_text("好耶!你已修改第{}题的答案为\n{}".format(No, update.message.text))
text = generate_text()
return ConversationHandler.END
def add(bot,update):
global questions_dict
chat_id = update.message.chat_id
if chat_id < 0:
update.message.reply_text("就说了要在私戳的时候用 /add 啦!")
return ConversationHandler.END
update.message.reply_text("不要不要不要乱玩bot!(小心被滥权哟)并且注意当你使用Markdown模式时,"
"bot收到的是渲染之后的文本,Markdown记号跟渲染效果都会被tg抹去再由bot接收。")
update.message.reply_text("那我们开始吧。输入你想放入的问题,尽量短一点。如果你是不小心按到,请输入 /cancel")
leng = str(len(questions_dict))
if len(questions_dict[leng]) != 2:
del questions_dict[leng]
return QUESTION
def add_question(bot,update):
global questions_dict
update.message.reply_text("好耶。继续输入你想放入的答案,一口气地。 如果你是不小心按到,请输入 /cancel")
leng = len(questions_dict)
current = str(leng+1)
questions_dict[current] = []
questions_dict[current].append(update.message.text)
return ANSWER
def add_answer(bot,update):
global questions_dict
global text
username = update.message.from_user.first_name
uid = update.message.from_user.id
shuiid = -1001108895871
update.message.reply_text("好耶!已加入V2RayFAQ全家桶")
leng = len(questions_dict)
current = str(leng)
questions_dict[current].append(update.message.text)
text = text + str(current) + '.'
text = text + questions_dict[str(current)][0] + '\n'
save_json(questions_dict)
little_report = '''就是这个人 [{}](tg://user?id={}) 刚刚提交了问题
{}.{}
答: {}'''.format(username, uid, current, questions_dict[current][0], questions_dict[current][1])
bot.send_message(shuiid, text=little_report, parse_mode='Markdown')
return ConversationHandler.END
def search(bot,update,args):
global questions_dict
def delete(bot,update):
global questions_dict
global text
user = update.message.from_user.first_name
if user != 'Kiri':
update.message.reply_text("别尼玛乱玩bot啦!")
else:
leng = len(questions_dict)
del questions_dict[str(leng)]
update.message.reply_text("删除成功惹!")
save_json(questions_dict)
text = ''
leng = len(questions_dict)
for key in range(1,leng+1):
text = text + str(key) + '.'
text = text + questions_dict[str(key)][0] + '\n'
def cancel(bot, update):
global questions_dict
update.message.reply_text('好啦,那就下次再见咯。',
reply_markup=ReplyKeyboardRemove())
leng = str(len(questions_dict))
if len(questions_dict[leng]) != 2:
del questions_dict[leng]
return ConversationHandler.END
def error(bot, update, error):
"""Log Errors caused by Updates."""
logger.warning('Update "%s" caused error "%s"', update, error)
def get_chatid(bot,update):
update.message.reply_text(update.message.chat_id)
def get_username(bot,update):
update.message.reply_text(update.message.from_user.username)
def main():
global questions_dict
global text
questions_dict = load_json()
text = generate_text()
updater = Updater("TOKEN")
dp = updater.dispatcher
dp.add_handler(telegram.ext.CommandHandler("start", start))
dp.add_handler(telegram.ext.CommandHandler("help", start))
dp.add_handler(telegram.ext.CommandHandler("delete", delete))
dp.add_handler(telegram.ext.CommandHandler("get_chatid", get_chatid))
dp.add_handler(telegram.ext.CommandHandler("get_username", get_username))
dp.add_handler(telegram.ext.CommandHandler("question", question,
pass_args=True))
dp.add_handler(telegram.ext.CommandHandler("answer", answer,
pass_args=True))
dp.add_handler(telegram.ext.CommandHandler("all_questions", all_questions,
pass_chat_data=True))
dp.add_handler(CommandHandler('questions', questions,
pass_chat_data=True))
dp.add_handler(CallbackQueryHandler(button, pass_chat_data=True))
conv_handler1 = ConversationHandler(
entry_points=[CommandHandler('add', add)],
states={
QUESTION: [MessageHandler(Filters.text, add_question),
CommandHandler('cancel', cancel)],
ANSWER: [MessageHandler(Filters.text, add_answer),
CommandHandler('cancel', cancel)]
},
fallbacks=[CommandHandler('cancel', cancel)]
)
conv_handler2 = ConversationHandler(
entry_points=[CommandHandler('edit', edit, pass_args=True)],
states={
EDIT_QUESTION: [MessageHandler(Filters.text, edit_question),
CommandHandler('cancel', cancel)],
EDIT_ANSWER: [MessageHandler(Filters.text, edit_answer),
CommandHandler('cancel', cancel)]
},
fallbacks=[CommandHandler('cancel', cancel)]
)
dp.add_handler(conv_handler1)
dp.add_handler(conv_handler2)
# log all errors
dp.add_error_handler(error)
# Start the Bot
updater.start_polling()
# Run the bot until you press Ctrl-C or the process receives SIGINT,
# SIGTERM or SIGABRT. This should be used most of the time, since
# start_polling() is non-blocking and will stop the bot gracefully.
updater.idle()
if __name__ == '__main__':
main()
| StarcoderdataPython |
3556244 | from straph.generators import erdos_renyi, barabasi_albert
from straph.parser import parser, sort_csv
from straph.paths import Path
from straph.paths import Metawalk
from straph.stream import (StreamGraph,
read_stream_graph,
stream_graph_from_events_list,
read_stream_graph_from_json,
DFS_iterative)
from straph.utils import nx_degree, hist_plot
name = "straph"
# Let users know if they're missing any of our hard dependencies
# From panda's github
hard_dependencies = ("numpy", "matplotlib", "dateutil", "networkx", "pandas","dpkt","sortedcontainers")
missing_dependencies = []
for dependency in hard_dependencies:
try:
__import__(dependency)
except ImportError as e:
missing_dependencies.append(f"{dependency}: {e}")
if missing_dependencies:
raise ImportError(
"Unable to import required dependencies:\n" + "\n".join(missing_dependencies)
)
del hard_dependencies, dependency, missing_dependencies
| StarcoderdataPython |
1614298 | <filename>HOP_Learning_Algorithm/classification_mnist.py
#classification_mnist.py
#
#Semi-supervised classification example:
#This script shows how to construct a weight matrix for the whole
#MNIST dataset, using precomputed kNN data, randomly select some
#training data, run Laplace and Poisson Learning, and compute accuracy.
import graphlearning as gl
#Load labels, knndata, and build 10-nearest neighbor weight matrix
labels = gl.load_labels('mnist')
I,J,D = gl.load_kNN_data('mnist',metric='vae')
W = gl.weight_matrix(I,J,D,10)
#Equivalently, we can compute knndata from scratch
#X = gl.load_dataset('mnist',metric='vae')
#labels = gl.load_labels('mnist')
#I,J,D = gl.knnsearch_annoy(X,10)
#W = gl.weight_matrix(I,J,D,10)
#Randomly chose training datapoints
num_train_per_class = 1
train_ind = gl.randomize_labels(labels, num_train_per_class)
train_labels = labels[train_ind]
#Run Laplace and Poisson learning
labels_laplace = gl.graph_ssl(W,train_ind,train_labels,algorithm='laplace')
labels_poisson = gl.graph_ssl(W,train_ind,train_labels,algorithm='poisson')
#Compute and print accuracy
print('Laplace learning: %.2f%%'%gl.accuracy(labels,labels_laplace,num_train_per_class))
print('Poisson learning: %.2f%%'%gl.accuracy(labels,labels_poisson,num_train_per_class))
| StarcoderdataPython |
4980384 | # -*- coding: utf-8 -*-
# This code is part of Qiskit.
#
# (C) Copyright IBM 2017, 2019.
#
# This code is licensed under the Apache License, Version 2.0. You may
# obtain a copy of this license in the LICENSE.txt file in the root directory
# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
#
# Any modifications or derivative works of this code must retain this
# copyright notice, and modified files need to carry a notice indicating
# that they have been altered from the originals.
"""
Acquire.
"""
import warnings
from typing import Optional, Union, List
from qiskit.pulse.channels import Qubit, MemorySlot, RegisterSlot, AcquireChannel
from qiskit.pulse.exceptions import PulseError
from .instruction import Instruction
from .meas_opts import Discriminator, Kernel
from .command import Command
class Acquire(Command):
"""Acquire."""
ALIAS = 'acquire'
prefix = 'acq'
def __init__(self, duration: int, kernel: Optional[Kernel] = None,
discriminator: Optional[Discriminator] = None,
name: Optional[str] = None):
"""Create new acquire command.
Args:
duration: Duration of acquisition
kernel: The data structures defining the measurement kernels
to be used (from the list of available kernels) and set of parameters
(if applicable) if the measurement level is 1 or 2.
discriminator: Discriminators to be used (from the list of available discriminator)
if the measurement level is 2
name: Name of this command.
Raises:
PulseError: when invalid discriminator or kernel object is input.
"""
super().__init__(duration=duration)
self._name = Acquire.create_name(name)
if kernel and not isinstance(kernel, Kernel):
raise PulseError('Invalid kernel object is specified.')
self._kernel = kernel
if discriminator and not isinstance(discriminator, Discriminator):
raise PulseError('Invalid discriminator object is specified.')
self._discriminator = discriminator
@property
def kernel(self):
"""Return kernel settings."""
return self._kernel
@property
def discriminator(self):
"""Return discrimination settings."""
return self._discriminator
def __eq__(self, other: 'Acquire'):
"""Two Acquires are the same if they are of the same type
and have the same kernel and discriminator.
Args:
other: Other Acquire
Returns:
bool: are self and other equal.
"""
return (super().__eq__(other) and
self.kernel == other.kernel and
self.discriminator == other.discriminator)
def __hash__(self):
return hash((super().__hash__(), self.kernel, self.discriminator))
def __repr__(self):
return '%s(duration=%d, kernel=%s, discriminator=%s, name="%s")' % \
(self.__class__.__name__, self.duration, self.name,
self.kernel, self.discriminator)
# pylint: disable=arguments-differ
def to_instruction(self,
qubit: Union[AcquireChannel, List[AcquireChannel]],
mem_slot: Optional[Union[MemorySlot, List[MemorySlot]]] = None,
reg_slots: Optional[Union[RegisterSlot, List[RegisterSlot]]] = None,
mem_slots: Optional[Union[List[MemorySlot]]] = None,
reg_slot: Optional[RegisterSlot] = None,
name: Optional[str] = None) -> 'AcquireInstruction':
return AcquireInstruction(self, qubit, mem_slot=mem_slot, reg_slot=reg_slot,
mem_slots=mem_slots, reg_slots=reg_slots, name=name)
# pylint: enable=arguments-differ
class AcquireInstruction(Instruction):
"""Pulse to acquire measurement result."""
def __init__(self,
command: Acquire,
acquire: Union[AcquireChannel, List[AcquireChannel]],
mem_slot: Optional[Union[MemorySlot, List[MemorySlot]]] = None,
reg_slots: Optional[Union[RegisterSlot, List[RegisterSlot]]] = None,
mem_slots: Optional[Union[List[MemorySlot]]] = None,
reg_slot: Optional[RegisterSlot] = None,
name: Optional[str] = None):
if isinstance(acquire, list) or isinstance(mem_slot, list) or reg_slots:
warnings.warn('The AcquireInstruction on multiple qubits, multiple '
'memory slots and multiple reg slots is deprecated. The '
'parameter "mem_slots" has been replaced by "mem_slot" and '
'"reg_slots" has been replaced by "reg_slot"', DeprecationWarning, 3)
if not isinstance(acquire, list):
acquire = [acquire]
if isinstance(acquire[0], Qubit):
raise PulseError("AcquireInstruction can not be instantiated with Qubits, "
"which are deprecated.")
if mem_slot and not isinstance(mem_slot, list):
mem_slot = [mem_slot]
elif mem_slots:
mem_slot = mem_slots
if reg_slot:
reg_slot = [reg_slot]
elif reg_slots and not isinstance(reg_slots, list):
reg_slot = [reg_slots]
else:
reg_slot = reg_slots
if not (mem_slot or reg_slot):
raise PulseError('Neither memoryslots or registers were supplied')
if mem_slot and len(acquire) != len(mem_slot):
raise PulseError("The number of mem_slots must be equals to the number of acquires")
if reg_slot:
if len(acquire) != len(reg_slot):
raise PulseError("The number of reg_slots must be equals "
"to the number of acquires")
else:
reg_slot = []
super().__init__(command, *acquire, *mem_slot, *reg_slot, name=name)
self._acquires = acquire
self._mem_slots = mem_slot
self._reg_slots = reg_slot
@property
def acquire(self):
"""Acquire channel to be acquired on."""
return self._acquires[0] if self._acquires else None
@property
def mem_slot(self):
"""MemorySlot."""
return self._mem_slots[0] if self._mem_slots else None
@property
def reg_slot(self):
"""RegisterSlot."""
return self._reg_slots[0] if self._reg_slots else None
@property
def acquires(self):
"""Acquire channels to be acquired on."""
return self._acquires
@property
def mem_slots(self):
"""MemorySlots."""
return self._mem_slots
@property
def reg_slots(self):
"""RegisterSlots."""
return self._reg_slots
| StarcoderdataPython |
3482958 | <filename>libs/labelFile.py<gh_stars>100-1000
import os.path
import sys
from .pascal_voc_io import PascalVocWriter
from base64 import b64encode, b64decode
class LabelFileError(Exception):
pass
class LabelFile(object):
# It might be changed as window creates
suffix = '.lif'
def __init__(self, filename=None):
self.shapes = ()
self.imagePath = None
self.imageData = None
if filename is not None:
self.load(filename)
def savePascalVocFormat(
self,
savefilename,
image_size,
shapes,
imagePath=None,
databaseSrc=None,
shape_type_='RECT'):
imgFolderPath = os.path.dirname(imagePath)
imgFolderName = os.path.split(imgFolderPath)[-1]
imgFileName = os.path.basename(imagePath)
imgFileNameWithoutExt = os.path.splitext(imgFileName)[0]
#img = cv2.imread(imagePath)
writer = PascalVocWriter(
imgFolderName,
imgFileNameWithoutExt,
image_size,
localImgPath=imagePath,
shape_type=shape_type_)
bSave = False
for shape in shapes:
points = shape['points']
label = shape['label']
if shape['shape_type'] == 0:
print ('add rects')
bndbox = LabelFile.convertPoints2BndBox(points)
writer.addBndBox(
bndbox[0],
bndbox[1],
bndbox[2],
bndbox[3],
label)
if shape['shape_type'] == 1:
print ('add polygons')
writer.addPolygon(points, label)
bSave = True
if bSave:
writer.save(targetFile=savefilename)
return
@staticmethod
def isLabelFile(filename):
fileSuffix = os.path.splitext(filename)[1].lower()
return fileSuffix == LabelFile.suffix
@staticmethod
def convertPoints2BndBox(points):
xmin = sys.maxsize
ymin = sys.maxsize
xmax = -sys.maxsize
ymax = -sys.maxsize
for p in points:
x = p[0]
y = p[1]
xmin = min(x, xmin)
ymin = min(y, ymin)
xmax = max(x, xmax)
ymax = max(y, ymax)
# <NAME>, 2015/11/12
# 0-valued coordinates of BB caused an error while
# training faster-rcnn object detector.
if (xmin < 1):
xmin = 1
if (ymin < 1):
ymin = 1
return (int(xmin), int(ymin), int(xmax), int(ymax))
| StarcoderdataPython |
257079 | import numpy as np
import pandas as pd
from scipy.io import arff
import matplotlib.pyplot as plt
from sklearn.model_selection import train_test_split
from tqdm import tqdm
import csv
class DimensionValueError(ValueError):
pass
class TypeError(ValueError):
pass
class IterError(ValueError):
pass
class DataProcess:
# 读入数据
def loadArffData(self, path):
data = arff.loadarff(path)
df = pd.DataFrame(data[0])
df = pd.DataFrame(df.values, columns=['sepal length','sepal width', 'petal length', 'petal width', 'class'])
return df
def Predata(self, path):
df = self.loadArffData(path)
# labels = df.values[:, -1]
title_mapping = {b"Iris-setosa": 1, b"Iris-versicolor": 2, b"Iris-virginica": 3}#将标签对应数值
df['class'] = df['class'].map(title_mapping)#处理数据
df['class'] = df['class'].fillna(0)##将其余标签填充为0值
data = df.values[:, 0:df.values.shape[-1] - 1]
labels = df.values[:, -1]
return data, labels
#划分数据集
def Split_Data(self, path, t_s=0.5):
data, labels = self.Predata(path)
#为w和b和统一表示,加一维全为1的值
# data = np.hstack((np.ones((data.shape[0], 1)), data))
return train_test_split(data, labels, test_size = t_s, random_state = 0)
class SoftMax:
def __init__(self):
super().__init__()
pass
def fit(self, data, labels, epoch = 50, learning_rate = 0.1, threshold = 0.5):
self.data = data
self.data = np.insert(self.data, 0, 1, axis=1)
self.labels = labels
self.sorts = np.unique(self.labels)
self.epoch = epoch
self.learning_rate = learning_rate
self.threshold = threshold
self.w = np.random.rand(self.sorts.shape[0], self.data.shape[1])
self.BGA()
return self
def BGA(self):
for i in tqdm(range(self.epoch)):
self.w += self.learning_rate * self.getGD()
loss = self.loss()
print('loss = ', loss)
def getGD(self):
G = np.zeros((self.sorts.shape[0], self.data.shape[0]))
for i in range(self.data.shape[0]):
G[self.labels[i] - 1][i] = 1
H = self.hypothesis(self.w, self.data)
return np.dot((G - H), self.data)
def hypothesis(self, w, data):
eta = np.dot(w, data.T)
H = np.exp(eta)
H /= np.sum(H, axis = 0)
self.H = H
return H
def predict(self, Vdata, Vlabels):
Vdata = np.insert(Vdata, 0, 1, axis=1)
predict_score = self.hypothesis(self.w, Vdata)
predict_labels = np.argmax(predict_score, axis = 0)
TP_TN = 0
test_size = len(predict_labels)
for i in range(test_size):
if predict_labels[i] + 1 == Vlabels[i]:
TP_TN += 1
print("准确率:", TP_TN / test_size)
def loss(self):
# predict, labels
log = np.log(self.H)
sum = np.sum(log, axis=0)
loss = 0.0
for i in range(self.data.shape[0]):
loss += log[self.labels[i] - 1, i] / sum[i]
return 1 * loss / self.data.shape[0]
if __name__ == "__main__":
Train_data, Validation_data, Train_labels, Validation_labels = DataProcess().Split_Data('iris.arff')
# Train_data = Train_data.T
# Validation_data = Validation_data.T
Train_data = np.array(Train_data, dtype=np.float64)
Validation_data = np.array(Validation_data, dtype=np.float64)
Train_labels = np.array(Train_labels, dtype=np.int64)
Validation_labels = np.array(Validation_labels, dtype=np.int64)
SoftMax = SoftMax().fit(Train_data, Train_labels, epoch = 100)
SoftMax.predict(Validation_data, Validation_labels)
pass | StarcoderdataPython |
166194 | from copy import deepcopy
from typing import Any, Dict
def merge_dicts(a: Dict, b: Dict):
"""Merge two dictionaries in a recursive way.
It means that if there is a key match, the keys is merged as well.
Only dict and list keys merging is supported
Args:
a (Dict):
b (Dict)
Raises:
ValueError: if a key is present in both the dictionaries and does not falls
in one of these classes: Dict, List
"""
res = deepcopy(a)
for k, v in b.items():
if k not in res:
res[k] = v
else:
if isinstance(v, dict):
res[k] = merge_dicts(res[k], v)
elif isinstance(v, list):
res[k] += v
else:
raise ValueError(
f"""Impossible to merge {res[k].__class__} and {v.__class__}.
Data structure combinations not supported. Only merge between list and dict possible"""
)
return res
def get_key(obj: Any, key: str):
try:
return obj.get(key)
except:
return getattr(obj, key)
def set_key(obj: Any, key: str, value: Any):
try:
obj[key] = value
except:
setattr(obj, key, value)
def get_nested_value(obj: Any, path: str) -> Any:
if "[]" in path:
raise Exception(
"This function works only for non array objects. Use get_nested_values instead"
)
keys = path.split(".")
current_key = keys[0]
if len(keys) == 1:
return get_key(obj, path)
return get_nested_value(get_key(obj, current_key), ".".join(keys[1:]))
def set_nested_value(obj: Any, path: str, value: Any) -> Any:
def aux(obj: Any, path: str, value: Any):
keys = path.split(".")
current_key = keys[0]
if len(keys) == 1:
set_key(obj, path, value)
return
aux(get_key(obj, current_key), ".".join(keys[1:]), value)
instance = deepcopy(obj)
aux(instance, path, value)
return instance
def get_nested_values(obj: Any, path: str) -> Any:
keys = path.split(".")
current_key = keys[0]
if len(keys) == 1:
return get_key(obj, path)
if current_key.endswith("[]"):
current_key = current_key[:-2]
return [
get_nested_values(element, ".".join(keys[1:]))
for element in get_key(obj, current_key)
]
return get_nested_values(get_key(obj, current_key), ".".join(keys[1:]))
| StarcoderdataPython |
1950485 | import mysql.connector
from backupdb import *
def flushDB():
backupDataBase()
db_connection = mysql.connector.connect(host="localhost", user="django", passwd="<PASSWORD>", database="detectionnav")
db_cursor = db_connection.cursor()
db_cursor.execute('DELETE FROM DetectionChart_techniques')
db_cursor.execute('DELETE FROM DetectionChart_ttp')
db_cursor.execute('DELETE FROM DetectionChart_tactic')
db_connection.commit()
| StarcoderdataPython |
316275 | <reponame>PauMAVA/feather<filename>feather/generated/generators/biome.py
# Generation of the Biome enum. Uses minecraft-data/biomes.json.
import common
data = common.load_minecraft_json("biomes.json")
variants = []
ids = {}
names = {}
display_names = {}
rainfalls = {}
temperatures = {}
for biome in data:
variant = common.camel_case(biome['name'])
variants.append(variant)
ids[variant] = biome['id']
names[variant] = biome['name']
display_names[variant] = biome['displayName']
rainfalls[variant] = biome['rainfall']
temperatures[variant] = biome['temperature']
output = common.generate_enum("Biome", variants)
output += common.generate_enum_property("Biome", "id", "u32", ids, True)
output += common.generate_enum_property("Biome", "name", "&str", names, True, "&'static str")
output += common.generate_enum_property("Biome", "display_name", "&str", display_names, True, "&'static str")
output += common.generate_enum_property("Biome", "rainfall", "f32", rainfalls)
output += common.generate_enum_property("Biome", "temperature", "f32", temperatures)
common.output("src/biome.rs", output)
| StarcoderdataPython |
6607385 | """
Utility functions for the benchmarks
==========================================
"""
import importlib
import os
import time
import matplotlib as mpl
from matplotlib import pyplot as plt
from si_prefix import si_format
import numpy as np
import torch
import jax
use_cuda = torch.cuda.is_available()
##################################################
# Utility functions:
#
def timer(use_torch=True):
if use_cuda and use_torch:
torch.cuda.synchronize()
return time.perf_counter()
def flatten(list_of_lists):
return [val for sublist in list_of_lists for val in sublist]
def clear_gpu_cache():
if use_cuda:
torch.cuda.empty_cache()
################################################
# Timeout helper:
#
from functools import wraps
import errno
import signal
class TimeoutError(Exception):
pass
def timeout(seconds=10, error_message=os.strerror(errno.ETIME)):
def decorator(func):
def _handle_timeout(signum, frame):
raise TimeoutError(error_message)
def wrapper(*args, **kwargs):
signal.signal(signal.SIGALRM, _handle_timeout)
signal.alarm(seconds)
try:
result = func(*args, **kwargs)
finally:
signal.alarm(0)
return result
return wraps(func)(wrapper)
return decorator
##################################################
# Conversion routines:
#
def tensor(*x):
if use_cuda:
return torch.cuda.FloatTensor(*x)
else:
return torch.FloatTensor(*x)
def int_tensor(*x):
if use_cuda:
return torch.cuda.LongTensor(*x)
else:
return torch.LongTensor(*x)
def jax_tensor(*x):
return jax.device_put(*x)
#####################################################
# Random samples:
#
def random_normal(device="cuda", lang="torch"):
def sampler(shape):
if lang == "torch":
return torch.randn(shape, device=torch.device(device))
else:
return np.random.rand(*shape).astype("float32")
return sampler
def unit_tensor(device="cuda", lang="torch"):
def sampler(shape):
if lang == "torch":
return torch.ones(shape, device=torch.device(device))
else:
return np.ones(*shape).astype("float32")
return sampler
###########################################
# Multiprocessing code
# -----------------------------------------
#
#
# Unfortunately, some FAISS routines throw a C++ "abort" signal instead
# of a proper Python exception for out of memory errors on large problems.
# Letting them run in a separate process is the only way of handling
# the error without aborting the full benchmark.
import multiprocess as mp
import traceback
import queue
import sys
import uuid
def globalize(func):
def result(*args, **kwargs):
return func(*args, **kwargs)
result.__name__ = result.__qualname__ = uuid.uuid4().hex
setattr(sys.modules[result.__module__], result.__name__, result)
return result
class Process(mp.Process):
"""Exception-friendly process class."""
def __init__(self, *args, **kwargs):
mp.Process.__init__(self, *args, **kwargs)
self._pconn, self._cconn = mp.Pipe()
self._exception = None
def run(self):
try:
mp.Process.run(self)
self._cconn.send(None)
except Exception as e:
tb = traceback.format_exc()
self._cconn.send((e, tb))
raise e # You can still rise this exception if you need to
@property
def exception(self):
if self._pconn.poll():
self._exception = self._pconn.recv()
return self._exception
def with_queue(f, queue, points):
o = f(points)
queue.put(o)
def run_safely(f, x):
"""Runs f(args) in a separate process."""
# f_global = f # globalize(f)
# mp.freeze_support()
mp.set_start_method("spawn")
q = mp.Queue()
p = Process(target=with_queue, args=(f, q, x))
p.start()
p.join()
if p.exception:
error, traceback = p.exception
print(traceback)
raise error
try:
out = q.get(False, 2.0) # Non-blocking mode
except queue.Empty:
print("Empty queue!")
print("Exit code: ", p.exitcode)
raise MemoryError()
return out
##############################################
# Benchmarking loops
# -----------------------
def simple_loop(N, loops, routine, max_time, args, kwargs):
# Warmup run, to compile and load everything:
output = routine(*args, **kwargs)
t_0 = timer()
for i in range(loops):
output = routine(*args, **kwargs)
elapsed = timer() - t_0
B = kwargs.get("batchsize", 1)
perf = elapsed / (B * loops)
print(
f"{B:3}x{loops:3} loops of size {si_format(N,precision=0):>5}: {B:3}x{loops:3}x {si_format(perf):>7}s"
)
return perf
def recall(out_indices, true_indices):
Ntest, K = out_indices.shape
true_indices = true_indices[:Ntest, :K]
r = 0.0
for k in range(Ntest):
r += np.sum(np.in1d(out_indices[k], true_indices[k], assume_unique=True)) / K
r /= Ntest
return r
def train_test_loop(N, loops, routine, max_time, args, kwargs):
x_train = args["train"]
x_test = args["test"]
ground_truth = args["output"]
# Warmup run, to compile and load everything:
operator = routine(N, **args, **kwargs)
clear_gpu_cache()
model, _ = timeout(6 * max_time)(operator)(x_train)
clear_gpu_cache()
output, _ = timeout(max_time)(model)(x_test)
# Time the training step:
train_time = 0.0
for i in range(loops):
clear_gpu_cache()
model, elapsed = operator(x_train)
train_time += elapsed
# Time the test step:
test_time = 0.0
for i in range(loops):
clear_gpu_cache()
output, elapsed = model(x_test)
test_time += elapsed
B = kwargs.get("batchsize", 1)
train_perf = train_time / (B * loops)
test_perf = test_time / (B * loops)
perf = recall(output, ground_truth)
print(f"{B:3}x{loops:3} loops of size {si_format(N,precision=0):>5}: ", end="")
print(f"train = {B:3}x{loops:3}x {si_format(train_perf):>7}s, ", end="")
print(f"test = {B:3}x{loops:3}x {si_format(test_perf):>7}s, ", end="")
print(f"recall = {100*perf:>3.0f}%")
if perf < 0.75:
raise ValueError("** Recall lower than 75%!")
return test_perf
def benchmark(
routine,
label,
N,
max_time,
loops=10,
generate_samples=None,
**kwargs,
):
importlib.reload(torch) # In case we had a memory overflow just before...
args = generate_samples(N, **kwargs)
benchmark_loop = train_test_loop if type(args) is dict else simple_loop
# Actual benchmark:
elapsed = benchmark_loop(N, loops, routine, max_time, args, kwargs)
return elapsed
def bench_config(
routine,
label,
kwargs,
generate_samples=None,
problem_sizes=[1],
max_time=10,
red_time=2,
loops=[100, 10, 1],
):
"""Times a convolution for an increasing number of samples."""
print(f"{label} -------------")
times = []
not_recorded_times = []
try:
Nloops = loops.copy()
nloops = Nloops.pop(0)
for n in problem_sizes:
elapsed = benchmark(
routine,
label,
n,
max_time,
loops=nloops,
generate_samples=generate_samples,
**kwargs,
)
times.append(elapsed)
if (nloops * elapsed > max_time) or (
nloops * elapsed > red_time and len(Nloops) > 0
):
nloops = Nloops.pop(0)
except MemoryError:
print("** Memory overflow!")
not_recorded_times = (len(problem_sizes) - len(times)) * [np.nan]
except (TimeoutError, IndexError): # Thrown by Nloops.pop(0) if Nloops = []
print("** Too slow!")
not_recorded_times = (len(problem_sizes) - len(times)) * [np.Infinity]
except NotImplementedError:
print("** This metric is not supported!")
not_recorded_times = (len(problem_sizes) - len(times)) * [np.Infinity]
except ValueError as err:
print(err)
not_recorded_times = (len(problem_sizes) - len(times)) * [np.NINF]
except RuntimeError as err:
print(err)
print("** Runtime error!")
not_recorded_times = (len(problem_sizes) - len(times)) * [np.nan]
return times + not_recorded_times
def identity(x):
return x
def queries_per_second(N):
def qps(x):
return N / x
return qps
def inf_to_nan(x):
y = x.copy()
y[~np.isfinite(y)] = np.nan
return y
def full_benchmark(
to_plot,
routines,
generate_samples,
problem_sizes,
min_time=1e-5,
max_time=10,
red_time=2,
loops=[100, 10, 1],
xlabel="Number of samples",
ylabel="Time (s)",
frequency=False,
legend_location="upper left",
linestyles=["o-", "s-", "^-", "<-", ">-", "v-", "+-", "*-", "x-", "p-", "d-"],
):
if frequency:
N = len(generate_samples(1)["test"])
transform = queries_per_second(N)
ymin, ymax = transform(max_time), transform(min_time)
y_suffix = "Hz"
else:
transform = identity
ymin, ymax = min_time, max_time
y_suffix = "s"
print("Benchmarking : {} ===============================".format(to_plot))
labels = [label for (_, label, _) in routines]
lines = [problem_sizes] + [
bench_config(
*routine,
generate_samples=generate_samples,
problem_sizes=problem_sizes,
max_time=max_time,
red_time=red_time,
loops=loops,
)
for routine in routines
]
benches = np.array(lines).T
# Creates a pyplot figure:
plt.figure(figsize=(12, 8))
for i, label in enumerate(labels):
plt.plot(
benches[:, 0],
transform(inf_to_nan(benches[:, i + 1])),
linestyles[i % len(linestyles)],
linewidth=2,
label=label,
)
for (j, val) in enumerate(benches[:, i + 1]):
if np.isnan(val) and j > 0:
x, y = benches[j - 1, 0], transform(benches[j - 1, i + 1])
plt.annotate(
"Memory overflow!",
xy=(1.05 * x, y),
horizontalalignment="left",
verticalalignment="center",
)
break
elif np.isposinf(val) and j > 0:
x, y = benches[j - 1, 0], transform(benches[j - 1, i + 1])
plt.annotate(
"Too slow!",
xy=(1.05 * x, y),
horizontalalignment="left",
verticalalignment="center",
)
break
elif np.isneginf(val) and j > 0:
x, y = benches[j - 1, 0], transform(benches[j - 1, i + 1])
plt.annotate(
"Recall < 75%",
xy=(1.05 * x, y),
horizontalalignment="left",
verticalalignment="center",
)
break
plt.title(to_plot)
plt.xlabel(xlabel)
plt.ylabel(ylabel)
plt.yscale("log")
plt.xscale("log")
plt.legend(loc=legend_location)
plt.grid(True, which="major", linestyle="-")
plt.grid(True, which="minor", linestyle="dotted")
plt.axis([problem_sizes[0], problem_sizes[-1], ymin, ymax])
fmt = lambda x, pos: si_format(x, precision=0)
plt.gca().xaxis.set_major_formatter(mpl.ticker.FuncFormatter(fmt))
fmt = lambda x, pos: si_format(x, precision=0) + y_suffix
plt.gca().yaxis.set_major_formatter(mpl.ticker.FuncFormatter(fmt))
# plt.tight_layout()
# Save as a .csv to put a nice Tikz figure in the papers:
header = "Npoints, " + ", ".join(labels)
os.makedirs("output", exist_ok=True)
np.savetxt(
f"output/{to_plot}.csv",
benches,
fmt="%-9.5f",
header=header,
comments="",
delimiter=",",
)
| StarcoderdataPython |
1838523 | <reponame>ClaudeCoulombe/AgentConversationnel
from setuptools import setup
setup(name='AgentConversationnel',
version='0.1',
description='Agent conversationnel en français',
url='https://github.com/ClaudeCoulombe/AgentConversationnel',
authors=['<NAME>'],
authors_email=['<EMAIL>'],
license='Apache 2',
packages=['.'],
package_data={
'AgentConversationnel/DATA/': ['donnees.txt']},
zip_safe=False)
| StarcoderdataPython |
80123 | <gh_stars>0
from nose.tools import assert_true, assert_raises
import config
import os
os.environ["TEST_TOKEN"] = "1234"
os.environ["TEST_NAME"] = "test_room"
def test_Env_GivenVariableArray_LoadsVariablesFromOs():
env_vars = ['TEST_TOKEN', 'TEST_NAME']
conf = config.Env(env_vars)
assert_true(conf.env['TEST_TOKEN'] == "1234" and conf.env['TEST_NAME'] == "test_room")
def test_Env_VariableNotInOs_ThrowsError():
env_vars = ['MISSING_VAR']
assert_raises(TypeError, config.Env, env_vars) | StarcoderdataPython |
205363 | from django.conf.urls import include
from django.urls import path
from blog import views as blog_views
from storage import views as strage_views
from work import views as work_views
from account import views as account_views
from rest_framework import routers
blog_router = routers.DefaultRouter()
blog_router.register('article', blog_views.ArticleViewSet)
blog_router.register('my_article', blog_views.MyArticlesViewSet)
blog_router.register('article_tag', blog_views.ArticleTagViewSet)
storage_router = routers.DefaultRouter()
storage_router.register('fileobject', strage_views.FileObjectViewSet)
storage_router.register('myfileobject', strage_views.MyFileObjectViewSet)
work_router = routers.DefaultRouter()
work_router.register('item', work_views.WorkItemViewSet)
work_router.register('tag', work_views.WorkTagViewSet)
work_router.register('myitem', work_views.MyWorkItemViewSet)
account_router = routers.DefaultRouter()
account_router.register('userinfo', account_views.MyUserViewSet)
urlpatterns = [
path('blog/', include(blog_router.urls)),
path('storage/', include(storage_router.urls)),
path('storage/fileobject/upload', strage_views.UploadFileObjectView.as_view()),
path('work/', include(work_router.urls)),
path('account/', include(account_router.urls)),
]
| StarcoderdataPython |
5103842 | <reponame>os-climate/sostrades-core<gh_stars>1-10
'''
Copyright 2022 Airbus SAS
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
'''
# mode: python; py-indent-offset: 4; tab-width: 8; coding:utf-8
from sos_trades_core.study_manager.study_manager import StudyManager
class Study(StudyManager):
def __init__(self, execution_engine=None):
super().__init__(__file__, execution_engine=execution_engine)
def setup_usecase(self):
x1 = 2
x2 = 4
x3 = 0
x4 = 3
dict_values = {}
dict_values[f'{self.study_name}.multi_scenarios.name_1.x_trade'] = [
x1, x2]
dict_values[f'{self.study_name}.multi_scenarios.name_2.x_trade'] = [
x3, x4]
dict_values[f'{self.study_name}.multi_scenarios.trade_variables'] = {'name_1.x': 'float',
'name_2.x': 'float'}
dict_values[f'{self.study_name}.multi_scenarios.name_list'] = [
'name_1', 'name_2']
scenario_list = ['scenario_1', 'scenario_2',
'scenario_3', 'scenario_4']
for scenario in scenario_list:
a1 = 3
b1 = 4
a2 = 6
b2 = 2
dict_values[self.study_name + '.name_1.a'] = a1
dict_values[self.study_name + '.name_2.a'] = a2
dict_values[self.study_name + '.multi_scenarios.' +
scenario + '.Disc1.name_1.b'] = b1
dict_values[self.study_name + '.multi_scenarios.' +
scenario + '.Disc1.name_2.b'] = b2
dict_values[self.study_name + '.multi_scenarios.' +
scenario + '.Disc3.constant'] = 3
dict_values[self.study_name + '.multi_scenarios.' +
scenario + '.Disc3.power'] = 2
dict_values[self.study_name +
'.multi_scenarios.' + scenario + '.Disc3.z'] = 1.2
return [dict_values]
if '__main__' == __name__:
uc_cls = Study()
uc_cls.load_data()
uc_cls.run(for_test=True)
| StarcoderdataPython |
5035904 | from django.conf import settings
from django import http
from django import urls
ACCESS_CONTROL_MAX_AGE = getattr(settings, 'ACCESS_CONTROL_MAX_AGE', 0)
ACCESS_CONTROL_ALLOW_ORIGINS = set(getattr(settings, 'ACCESS_CONTROL_ALLOW_ORIGINS', []))
ACCESS_CONTROL_ALLOW_HEADERS = set(map(str.lower, getattr(
settings, 'ACCESS_CONTROL_ALLOW_HEADERS', [
'Content-Type',
'Accept',
'Authorization',
]
)))
ACCESS_CONTROL_ALLOW_METHODS = set(map(str.upper, getattr(
settings, 'ACCESS_CONTROL_ALLOW_METHODS',
['POST', 'GET', 'OPTIONS', 'PUT', 'DELETE']
)))
ACCESS_CONTROL_EXPOSE_HEADERS = set(getattr(settings, 'ACCESS_CONTROL_EXPOSE_HEADERS', []))
HTTP_ORIGIN = 'HTTP_ORIGIN'
HTTP_ACR_METHOD = 'HTTP_ACCESS_CONTROL_REQUEST_METHOD'
# This is here so that we only process the values once instead of for every request
ACCESS_CONTROL_ALLOW_METHODS_VALUE = ', '.join(ACCESS_CONTROL_ALLOW_METHODS)
ACCESS_CONTROL_EXPOSE_HEADERS_VALUE = ', '.join(ACCESS_CONTROL_EXPOSE_HEADERS)
def match(pattern, origin):
origin_length = len(origin)
origin_index = origin_length - 1
if len(pattern) is 0:
return True
for character in reversed(pattern):
if character == '*':
pattern_offset = (origin_length - origin_index) * -1
for index in range(origin_index, 0, -1):
if match(pattern[:pattern_offset], origin[:index]):
return True
return False
if origin[origin_index] != character:
return False
origin_index -= 1
return True
def origin_is_match(patterns, origin):
if origin is None:
return False
for pattern in patterns:
if match(pattern, origin):
return True
return False
class CORSMiddleware(object):
""" Provides necessary responses and headers for CORS requests.
The process that this implements is defined in the Cross-Origin Resource
Sharing specification - which can be found here:
https://www.w3.org/TR/cors/#resource-requests
"""
def __init__(self, get_response):
self.get_response = get_response
def request_supports_credentials(self, request):
""" Returns True if the given request supports credentials. Otherwise, False.
TODO: This should actually attempt to detect credentials support
"""
return True
def __call__(self, request):
response = self.get_response(request)
origin = request.META.get(HTTP_ORIGIN, None)
if not origin_is_match(ACCESS_CONTROL_ALLOW_ORIGINS, origin):
# We have an Origin header, but it doesn't match ALLOW origins. Don't allow CORS here.
return response
response['Access-Control-Allow-Origin'] = origin
if self.request_supports_credentials(request):
# TODO: We should verify that the value of origin is not ` * `` here
response['Access-Control-Allow-Credentials'] = 'true'
if request.method != 'OPTIONS':
response['Access-Control-Allow-Headers'] = ACCESS_CONTROL_ALLOW_HEADERS
response['Access-Control-Expose-Headers'] = ACCESS_CONTROL_EXPOSE_HEADERS_VALUE
return response
# At this point, we know that we have a pre-flight request
requested_method = request.META.get(HTTP_ACR_METHOD, None)
requested_headers = request.META.get('Access-Control-Request-Headers', [])
if requested_method not in ACCESS_CONTROL_ALLOW_METHODS_VALUE:
return response
# The spec requires these to be ASCII and case-insensitive, so lower() is a safe comparison
# in this case. Note that we intentionally don't do this above in some cases in order to
# avoid any potential hacks using UTF-8 characters.
if len(requested_headers) is 0:
allowed_and_requested_header_values = ACCESS_CONTROL_ALLOW_HEADERS
else:
allowed_and_requested_header_values = []
for header in requested_headers:
if header.lower() not in ALLOW_HEADERS:
# Since the header isn't ALLOW, we don't allow the requested request
return response
allowed_and_requested_header_values.append(header)
response['Access-Control-Max-Age'] = ACCESS_CONTROL_MAX_AGE
response['Access-Control-Allow-Methods'] = ACCESS_CONTROL_ALLOW_METHODS_VALUE
response['Access-Control-Allow-Headers'] = ','.join(allowed_and_requested_header_values)
return response
| StarcoderdataPython |
11322379 | """
Generates a schema diagram of the database based on the
SQLAlchemy schema.
"""
from eralchemy import render_er
from ferry.database.models import Base
render_er(Base, "../docs/db_diagram.png")
| StarcoderdataPython |
1994419 | <filename>tests/test_webclient.py<gh_stars>1-10
"""
from twisted.internet import defer
Tests borrowed from the twisted.web.client tests.
"""
import os
import shutil
import OpenSSL.SSL
from twisted.trial import unittest
from twisted.web import server, static, util, resource
from twisted.internet import reactor, defer
try:
from twisted.internet.testing import StringTransport
except ImportError:
# deprecated in Twisted 19.7.0
# (remove once we bump our requirement past that version)
from twisted.test.proto_helpers import StringTransport
from twisted.python.filepath import FilePath
from twisted.protocols.policies import WrappingFactory
from twisted.internet.defer import inlineCallbacks
from scrapy.core.downloader import webclient as client
from scrapy.core.downloader.contextfactory import ScrapyClientContextFactory
from scrapy.http import Request, Headers
from scrapy.settings import Settings
from scrapy.utils.misc import create_instance
from scrapy.utils.python import to_bytes, to_unicode
from tests.mockserver import ssl_context_factory
def getPage(url, contextFactory=None, response_transform=None, *args, **kwargs):
"""Adapted version of twisted.web.client.getPage"""
def _clientfactory(url, *args, **kwargs):
url = to_unicode(url)
timeout = kwargs.pop('timeout', 0)
f = client.ScrapyHTTPClientFactory(
Request(url, *args, **kwargs), timeout=timeout)
f.deferred.addCallback(response_transform or (lambda r: r.body))
return f
from twisted.web.client import _makeGetterFactory
return _makeGetterFactory(to_bytes(url), _clientfactory,
contextFactory=contextFactory, *args, **kwargs).deferred
class ParseUrlTestCase(unittest.TestCase):
"""Test URL parsing facility and defaults values."""
def _parse(self, url):
f = client.ScrapyHTTPClientFactory(Request(url))
return (f.scheme, f.netloc, f.host, f.port, f.path)
def testParse(self):
lip = '127.0.0.1'
tests = (
("http://127.0.0.1?c=v&c2=v2#fragment", ('http', lip, lip, 80, '/?c=v&c2=v2')),
("http://127.0.0.1/?c=v&c2=v2#fragment", ('http', lip, lip, 80, '/?c=v&c2=v2')),
("http://127.0.0.1/foo?c=v&c2=v2#frag", ('http', lip, lip, 80, '/foo?c=v&c2=v2')),
("http://127.0.0.1:100?c=v&c2=v2#fragment", ('http', lip + ':100', lip, 100, '/?c=v&c2=v2')),
("http://127.0.0.1:100/?c=v&c2=v2#frag", ('http', lip + ':100', lip, 100, '/?c=v&c2=v2')),
("http://127.0.0.1:100/foo?c=v&c2=v2#frag", ('http', lip + ':100', lip, 100, '/foo?c=v&c2=v2')),
("http://127.0.0.1", ('http', lip, lip, 80, '/')),
("http://127.0.0.1/", ('http', lip, lip, 80, '/')),
("http://127.0.0.1/foo", ('http', lip, lip, 80, '/foo')),
("http://127.0.0.1?param=value", ('http', lip, lip, 80, '/?param=value')),
("http://127.0.0.1/?param=value", ('http', lip, lip, 80, '/?param=value')),
("http://127.0.0.1:12345/foo", ('http', lip + ':12345', lip, 12345, '/foo')),
("http://spam:12345/foo", ('http', 'spam:12345', 'spam', 12345, '/foo')),
("http://spam.test.org/foo", ('http', 'spam.test.org', 'spam.test.org', 80, '/foo')),
("https://127.0.0.1/foo", ('https', lip, lip, 443, '/foo')),
("https://127.0.0.1/?param=value", ('https', lip, lip, 443, '/?param=value')),
("https://127.0.0.1:12345/", ('https', lip + ':12345', lip, 12345, '/')),
("http://scrapytest.org/foo ", ('http', 'scrapytest.org', 'scrapytest.org', 80, '/foo')),
("http://egg:7890 ", ('http', 'egg:7890', 'egg', 7890, '/')),
)
for url, test in tests:
test = tuple(
to_bytes(x) if not isinstance(x, int) else x for x in test)
self.assertEqual(client._parse(url), test, url)
class ScrapyHTTPPageGetterTests(unittest.TestCase):
def test_earlyHeaders(self):
# basic test stolen from twisted HTTPageGetter
factory = client.ScrapyHTTPClientFactory(Request(
url='http://foo/bar',
body="some data",
headers={
'Host': 'example.net',
'User-Agent': 'fooble',
'Cookie': 'blah blah',
'Content-Length': '12981',
'Useful': 'value'}))
self._test(factory,
b"GET /bar HTTP/1.0\r\n"
b"Content-Length: 9\r\n"
b"Useful: value\r\n"
b"Connection: close\r\n"
b"User-Agent: fooble\r\n"
b"Host: example.net\r\n"
b"Cookie: blah blah\r\n"
b"\r\n"
b"some data")
# test minimal sent headers
factory = client.ScrapyHTTPClientFactory(Request('http://foo/bar'))
self._test(factory,
b"GET /bar HTTP/1.0\r\n"
b"Host: foo\r\n"
b"\r\n")
# test a simple POST with body and content-type
factory = client.ScrapyHTTPClientFactory(Request(
method='POST',
url='http://foo/bar',
body='name=value',
headers={'Content-Type': 'application/x-www-form-urlencoded'}))
self._test(factory,
b"POST /bar HTTP/1.0\r\n"
b"Host: foo\r\n"
b"Connection: close\r\n"
b"Content-Type: application/x-www-form-urlencoded\r\n"
b"Content-Length: 10\r\n"
b"\r\n"
b"name=value")
# test a POST method with no body provided
factory = client.ScrapyHTTPClientFactory(Request(
method='POST',
url='http://foo/bar'
))
self._test(factory,
b"POST /bar HTTP/1.0\r\n"
b"Host: foo\r\n"
b"Content-Length: 0\r\n"
b"\r\n")
# test with single and multivalued headers
factory = client.ScrapyHTTPClientFactory(Request(
url='http://foo/bar',
headers={
'X-Meta-Single': 'single',
'X-Meta-Multivalued': ['value1', 'value2'],
}))
self._test(factory,
b"GET /bar HTTP/1.0\r\n"
b"Host: foo\r\n"
b"X-Meta-Multivalued: value1\r\n"
b"X-Meta-Multivalued: value2\r\n"
b"X-Meta-Single: single\r\n"
b"\r\n")
# same test with single and multivalued headers but using Headers class
factory = client.ScrapyHTTPClientFactory(Request(
url='http://foo/bar',
headers=Headers({
'X-Meta-Single': 'single',
'X-Meta-Multivalued': ['value1', 'value2'],
})))
self._test(factory,
b"GET /bar HTTP/1.0\r\n"
b"Host: foo\r\n"
b"X-Meta-Multivalued: value1\r\n"
b"X-Meta-Multivalued: value2\r\n"
b"X-Meta-Single: single\r\n"
b"\r\n")
def _test(self, factory, testvalue):
transport = StringTransport()
protocol = client.ScrapyHTTPPageGetter()
protocol.factory = factory
protocol.makeConnection(transport)
self.assertEqual(
set(transport.value().splitlines()),
set(testvalue.splitlines()))
return testvalue
def test_non_standard_line_endings(self):
# regression test for: http://dev.scrapy.org/ticket/258
factory = client.ScrapyHTTPClientFactory(Request(
url='http://foo/bar'))
protocol = client.ScrapyHTTPPageGetter()
protocol.factory = factory
protocol.headers = Headers()
protocol.dataReceived(b"HTTP/1.0 200 OK\n")
protocol.dataReceived(b"Hello: World\n")
protocol.dataReceived(b"Foo: Bar\n")
protocol.dataReceived(b"\n")
self.assertEqual(protocol.headers,
Headers({'Hello': ['World'], 'Foo': ['Bar']}))
from twisted.web.test.test_webclient import ForeverTakingResource, \
ErrorResource, NoLengthResource, HostHeaderResource, \
PayloadResource, BrokenDownloadResource
class EncodingResource(resource.Resource):
out_encoding = 'cp1251'
def render(self, request):
body = to_unicode(request.content.read())
request.setHeader(b'content-encoding', self.out_encoding)
return body.encode(self.out_encoding)
class WebClientTestCase(unittest.TestCase):
def _listen(self, site):
return reactor.listenTCP(0, site, interface="127.0.0.1")
def setUp(self):
self.tmpname = self.mktemp()
os.mkdir(self.tmpname)
FilePath(self.tmpname).child("file").setContent(b"0123456789")
r = static.File(self.tmpname)
r.putChild(b"redirect", util.Redirect(b"/file"))
r.putChild(b"wait", ForeverTakingResource())
r.putChild(b"error", ErrorResource())
r.putChild(b"nolength", NoLengthResource())
r.putChild(b"host", HostHeaderResource())
r.putChild(b"payload", PayloadResource())
r.putChild(b"broken", BrokenDownloadResource())
r.putChild(b"encoding", EncodingResource())
self.site = server.Site(r, timeout=None)
self.wrapper = WrappingFactory(self.site)
self.port = self._listen(self.wrapper)
self.portno = self.port.getHost().port
@inlineCallbacks
def tearDown(self):
yield self.port.stopListening()
shutil.rmtree(self.tmpname)
def getURL(self, path):
return "http://127.0.0.1:%d/%s" % (self.portno, path)
def testPayload(self):
s = "0123456789" * 10
return getPage(self.getURL("payload"), body=s).addCallback(
self.assertEqual, to_bytes(s))
def testHostHeader(self):
# if we pass Host header explicitly, it should be used, otherwise
# it should extract from url
return defer.gatherResults([
getPage(self.getURL("host")).addCallback(
self.assertEqual, to_bytes("127.0.0.1:%d" % self.portno)),
getPage(self.getURL("host"), headers={"Host": "www.example.com"}).addCallback(
self.assertEqual, to_bytes("www.example.com"))])
def test_getPage(self):
"""
L{client.getPage} returns a L{Deferred} which is called back with
the body of the response if the default method B{GET} is used.
"""
d = getPage(self.getURL("file"))
d.addCallback(self.assertEqual, b"0123456789")
return d
def test_getPageHead(self):
"""
L{client.getPage} returns a L{Deferred} which is called back with
the empty string if the method is C{HEAD} and there is a successful
response code.
"""
def _getPage(method):
return getPage(self.getURL("file"), method=method)
return defer.gatherResults([
_getPage("head").addCallback(self.assertEqual, b""),
_getPage("HEAD").addCallback(self.assertEqual, b"")])
def test_timeoutNotTriggering(self):
"""
When a non-zero timeout is passed to L{getPage} and the page is
retrieved before the timeout period elapses, the L{Deferred} is
called back with the contents of the page.
"""
d = getPage(self.getURL("host"), timeout=100)
d.addCallback(
self.assertEqual, to_bytes("127.0.0.1:%d" % self.portno))
return d
def test_timeoutTriggering(self):
"""
When a non-zero timeout is passed to L{getPage} and that many
seconds elapse before the server responds to the request. the
L{Deferred} is errbacked with a L{error.TimeoutError}.
"""
finished = self.assertFailure(
getPage(self.getURL("wait"), timeout=0.000001),
defer.TimeoutError)
def cleanup(passthrough):
# Clean up the server which is hanging around not doing
# anything.
connected = list(self.wrapper.protocols.keys())
# There might be nothing here if the server managed to already see
# that the connection was lost.
if connected:
connected[0].transport.loseConnection()
return passthrough
finished.addBoth(cleanup)
return finished
def testNotFound(self):
return getPage(self.getURL('notsuchfile')).addCallback(self._cbNoSuchFile)
def _cbNoSuchFile(self, pageData):
self.assertIn(b'404 - No Such Resource', pageData)
def testFactoryInfo(self):
url = self.getURL('file')
_, _, host, port, _ = client._parse(url)
factory = client.ScrapyHTTPClientFactory(Request(url))
reactor.connectTCP(to_unicode(host), port, factory)
return factory.deferred.addCallback(self._cbFactoryInfo, factory)
def _cbFactoryInfo(self, ignoredResult, factory):
self.assertEqual(factory.status, b'200')
self.assertTrue(factory.version.startswith(b'HTTP/'))
self.assertEqual(factory.message, b'OK')
self.assertEqual(factory.response_headers[b'content-length'], b'10')
def testRedirect(self):
return getPage(self.getURL("redirect")).addCallback(self._cbRedirect)
def _cbRedirect(self, pageData):
self.assertEqual(pageData,
b'\n<html>\n <head>\n <meta http-equiv="refresh" content="0;URL=/file">\n'
b' </head>\n <body bgcolor="#FFFFFF" text="#000000">\n '
b'<a href="/file">click here</a>\n </body>\n</html>\n')
def test_encoding(self):
""" Test that non-standart body encoding matches
Content-Encoding header """
body = b'\xd0\x81\xd1\x8e\xd0\xaf'
return getPage(
self.getURL('encoding'), body=body, response_transform=lambda r: r)\
.addCallback(self._check_Encoding, body)
def _check_Encoding(self, response, original_body):
content_encoding = to_unicode(response.headers[b'Content-Encoding'])
self.assertEqual(content_encoding, EncodingResource.out_encoding)
self.assertEqual(
response.body.decode(content_encoding), to_unicode(original_body))
class WebClientSSLTestCase(unittest.TestCase):
context_factory = None
def _listen(self, site):
return reactor.listenSSL(
0, site,
contextFactory=self.context_factory or ssl_context_factory(),
interface="127.0.0.1")
def getURL(self, path):
return "https://127.0.0.1:%d/%s" % (self.portno, path)
def setUp(self):
self.tmpname = self.mktemp()
os.mkdir(self.tmpname)
FilePath(self.tmpname).child("file").setContent(b"0123456789")
r = static.File(self.tmpname)
r.putChild(b"payload", PayloadResource())
self.site = server.Site(r, timeout=None)
self.wrapper = WrappingFactory(self.site)
self.port = self._listen(self.wrapper)
self.portno = self.port.getHost().port
@inlineCallbacks
def tearDown(self):
yield self.port.stopListening()
shutil.rmtree(self.tmpname)
def testPayload(self):
s = "0123456789" * 10
return getPage(self.getURL("payload"), body=s).addCallback(
self.assertEqual, to_bytes(s))
class WebClientCustomCiphersSSLTestCase(WebClientSSLTestCase):
# we try to use a cipher that is not enabled by default in OpenSSL
custom_ciphers = 'CAMELLIA256-SHA'
context_factory = ssl_context_factory(cipher_string=custom_ciphers)
def testPayload(self):
s = "0123456789" * 10
settings = Settings({'DOWNLOADER_CLIENT_TLS_CIPHERS': self.custom_ciphers})
client_context_factory = create_instance(ScrapyClientContextFactory, settings=settings, crawler=None)
return getPage(self.getURL("payload"), body=s,
contextFactory=client_context_factory).addCallback(self.assertEqual, to_bytes(s))
def testPayloadDefaultCiphers(self):
s = "0123456789" * 10
d = getPage(self.getURL("payload"), body=s, contextFactory=ScrapyClientContextFactory())
return self.assertFailure(d, OpenSSL.SSL.Error)
| StarcoderdataPython |
5141856 | # -*- coding: utf-8 -*-
import logging
import paho.mqtt.client as mqtt
import json
from matrix.matrixled import MatrixLed, get_led, LedRunner, colors
class LedControl:
_SUB_ON_HOTWORD = 'hermes/hotword/default/detected'
_SUB_ON_SAY = 'hermes/tts/say'
_SUB_ON_THINK = 'hermes/asr/textCaptured'
_SUB_ON_LISTENING = 'hermes/asr/startListening'
_SUB_ON_HOTWORD_TOGGLE_ON = 'hermes/hotword/toggleOn'
_SUB_ON_ERROR = 'hermes/nlu/intentNotRecognized'
_SUB_ON_SUCCESS = 'hermes/nlu/intentParsed'
_SUB_ON_PLAY_FINISHED = 'hermes/audioServer/default/playFinished'
_SUB_ON_TTS_FINISHED = 'hermes/tts/sayFinished'
'''
_SUB_ON_LEDS_TOGGLE = 'hermes/leds/toggle'
_SUB_ON_LEDS_TOGGLE_ON = 'hermes/leds/toggleOn'
_SUB_ON_LEDS_TOGGLE_OFF = 'hermes/leds/toggleOff'
_SUB_UPDATING = 'hermes/leds/systemUpdate'
_SUB_ON_CALL = 'hermes/leds/onCall'
_SUB_SETUP_MODE = 'hermes/leds/setupMode'
_SUB_CON_ERROR = 'hermes/leds/connectionError'
_SUB_ON_MESSAGE = 'hermes/leds/onMessage'
_SUB_ON_DND = 'hermes/leds/doNotDisturb'
'''
def __init__(self, mqtt_host, mqtt_port):
self._logger = logging.getLogger('LedControl')
self._logger.info('Initializing')
self._me = 'default'
self._matrix = MatrixLed()
self._runner = LedRunner()
self._matrix.connect()
self.mqtt_client = None
self.mqtt_host = mqtt_host
self.mqtt_port = mqtt_port
self.mqtt_client = self.connect()
def on_connect(self, client, userdata, flags, rc):
self._logger.info("Connected with result code {0}".format(rc))
client.subscribe([
(self._SUB_ON_HOTWORD, 0),
(self._SUB_ON_SAY, 0),
(self._SUB_ON_THINK, 0),
(self._SUB_ON_LISTENING, 0),
(self._SUB_ON_HOTWORD_TOGGLE_ON, 0),
(self._SUB_ON_ERROR, 0),
(self._SUB_ON_SUCCESS, 0),
(self._SUB_ON_PLAY_FINISHED, 0),
(self._SUB_ON_TTS_FINISHED, 0),
])
def event_to_func(self, event):
return {
self._SUB_ON_HOTWORD:self.wakeup_event,
self._SUB_ON_SAY:self.tts_start_event,
self._SUB_ON_THINK: self.think_event,
self._SUB_ON_LISTENING: self.listening_event,
self._SUB_ON_HOTWORD_TOGGLE_ON: self.backtosleep_event,
self._SUB_ON_ERROR: self.intent_error_event,
self._SUB_ON_SUCCESS: self.intent_success_event,
self._SUB_ON_PLAY_FINISHED: self.play_finished_event,
self._SUB_ON_TTS_FINISHED: self.tts_finished_event
}.get(event, self.unmanaged_event)
def wakeup_event(self, payload):
self._runner.once(self._matrix.fadeIn, colors['green'])
self._logger.info("=> wakeup: {}".format(payload))
def backtosleep_event(self, payload):
self._runner.once(self._matrix.shutdown)
self._logger.info("=> backtosleep: {}".format(payload))
def listening_event(self, payload):
self._logger.info("=> listening: {}".format(payload))
def think_event(self, payload):
self._logger.info("=> thinking: {}".format(payload))
likelihood = 0
if payload is not None and 'likelihood' in payload:
likelihood = payload['likelihood']
if likelihood == 0:
self._runner.once(self._matrix.solid, colors['red'])
else:
self._runner.start(self._matrix.wave, colors['blue'])
def tts_start_event(self, payload):
self._logger.info("=> tts start: {}".format(payload))
def tts_finished_event(self, payload):
self._logger.info("=> tts finished: {}".format(payload))
def intent_error_event(self, payload):
self._runner.once(self._matrix.solid, colors['red'])
self._logger.info("=> intent error: {}".format(payload))
def intent_success_event(self, payload):
self._runner.once(self._matrix.solid, colors['blue'])
self._logger.info("=> intent success: {}".format(payload))
def play_finished_event(self, payload):
self._logger.info("=> play finished: {}".format(payload))
def unmanaged_event(self, payload):
self._logger.info("=> unmanaged: {}".format(payload))
def on_message(self, client, userdata, message):
if hasattr(message, 'payload') and message.payload:
try:
payload = json.loads(message.payload.decode('utf-8'))
self._logger.info("LedControl has received {}".format(message.topic))
self.event_to_func(message.topic)(payload)
except Exception as e:
print(e)
def connect(self):
mqtt_client = mqtt.Client()
mqtt_client.on_connect = self.on_connect
mqtt_client.on_message = self.on_message
return mqtt_client
def start(self):
self._runner.once(self._matrix.solid)
self.mqtt_client.connect(self.mqtt_host, self.mqtt_port, 60)
self.mqtt_client.loop_start()
def stop(self):
self.mqtt_client.loop_stop()
self.mqtt_client.disconnect()
self._runner.stop()
self._runner.once(self._matrix.solid)
self._matrix.disconnect()
| StarcoderdataPython |
4950869 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
###############################################################################
# Copyright Kitware Inc.
#
# Licensed under the Apache License, Version 2.0 ( the "License" );
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
###############################################################################
class PluginSettings():
MINERVA_COLLECTION = 'minerva'
MINERVA_FOLDER = 'minerva'
DATASET_FOLDER = 'dataset'
MINERVA_SHARED_DATASET = 'minerva_shared_dataset'
DATASET_SHARING_GROUP_NAME = 'dataset sharing'
SOURCE_FOLDER = 'source'
SESSION_FOLDER = 'session'
GEOJSON_EXTENSION = '.geojson'
SESSION_FILENAME = 'session.json'
| StarcoderdataPython |
1999635 | #!/usr/bin/env python
import argparse
import glob
import re
from re import search
import pandas as pd
import matplotlib.pyplot as plt
#import seaborn as sns
if __name__ == '__main__':
parser = argparse.ArgumentParser()
#directory with the slurm logs
parser.add_argument('--dir', required=True)
args = parser.parse_args()
log_directory = args.dir
summary_file = open(log_directory + '/summary.txt', 'w')
A_count = 0
B_count = 0
A_times = []
B_times = []
#SLURM log name pattern
#slurm-xxxxxx.out
for logfilename in glob.glob(log_directory + '/slurm-*.out'):
print(logfilename)
with open(logfilename) as logfile:
log_lines = logfile.readlines()
for line in log_lines:
#Node count, ids
#Example: n[40-41,65]
if search('n\[*\]', line):
print("node count line")
#Time based on time() difference just after advance_model()
#Example: cell 45, 143 complete.113
if 'complete' in line:
match = re.search(r'\.[0-9]+', line)
if match:
A_count+=1
A_times.append(int(match.group()[1:]))
#Time based on time() difference after writing status and fail_log
#Example: Total Seconds: 1219
if 'Total Seconds' in line:
match = re.search(r': [0-9]+', line)
if match:
B_count+=1
B_times.append(int(match.group()[2:]))
#total_A_time = 0
summary_file.write(f"Count of A times: {A_count}\n")
summary_file.write("Average A time: " + str(sum(A_times)/len(A_times)) + "\n")
summary_file.write(f"A times: {A_times}\n")
summary_file.write("\n")
summary_file.write(f"Count of B times: {B_count}\n")
summary_file.write("Average B time: " + str(sum(B_times)/len(B_times)) + "\n")
summary_file.write(f"B times: {B_times}\n")
#Convert times to minutes
A_minutes = [time / 60 for time in A_times]
B_minutes = [time / 60 for time in B_times]
#Find max and min for plot limits
A_min = min(A_minutes)
A_max = max(A_minutes)
print(f"A min: {A_min}")
print(f"A max: {A_max}")
B_min = min(B_minutes)
B_max = max(B_minutes)
#Count into buckets based on
bin_size = 5 #5 minutes
A_bins = range(int(A_min), int(A_max)+bin_size, bin_size)
print(A_bins)
B_bins = range(int(B_min), int(B_max)+bin_size, bin_size)
A_binned = pd.cut(A_minutes, bins=A_bins, include_lowest=True)
print(A_binned)
B_binned = pd.cut(B_minutes, bins=B_bins, include_lowest=True)
print(B_binned)
print(B_binned.value_counts())
#fig, axes = plt.subplots(2)
fig, ax = plt.subplots()
#axes[0] = A_binned.value_counts().plot.bar(rot=0)
#axes[1] = B_binned.value_counts().plot.bar(rot=0)
ax = B_binned.value_counts().plot.bar(rot=0)
plt.show()
| StarcoderdataPython |
3531373 | <gh_stars>1-10
#!/usr/bin/env python
import csv
import itertools
import cv2
import numpy as np
from tqdm import tqdm
def read_rgb_image(img_path, size, flip):
assert type(size) is tuple, "size parameter must be a tuple, (96, 96) for instance"
img = cv2.imread(img_path, cv2.IMREAD_COLOR)
if img is None:
print("ERROR: can't read " + img_path)
if flip:
img = cv2.flip(img, 1)
img = cv2.resize(img, size, cv2.INTER_CUBIC)
return img
def load_one_flipped_pair(l_path, r_path, size):
l_img = read_rgb_image(l_path, size, flip=False)
r_img = read_rgb_image(r_path, size, flip=True)
return l_img, r_img
class RTBeneDataset(object):
def __init__(self, csv_subject_list, input_size):
self.csv_subject_list = csv_subject_list
self.input_size = input_size
self.subjects = {}
self.training_set = {}
self.validation_set = {}
self.folds = {}
self.load()
def load_one_subject(self, csv_labels, left_folder, right_folder):
subject = {'y': []}
left_inputs = []
right_inputs = []
with open(csv_labels) as csvfile:
csv_rows = csv.reader(csvfile)
for row in tqdm(csv_rows):
img_name = row[0]
img_lbl = float(row[1])
if img_lbl == 0.5: # annotators did not agree whether eye is open or not, so discard this sample
continue
left_img_path = left_folder + img_name
right_img_path = right_folder + img_name.replace("left", "right")
try:
left_img, right_img = load_one_flipped_pair(left_img_path, right_img_path, self.input_size)
left_inputs.append(left_img)
right_inputs.append(right_img)
subject['y'].append(img_lbl)
except:
print('Failure loading pair ' + left_img_path + ' ' + right_img_path)
subject['x'] = [np.array(left_inputs), np.array(right_inputs)]
return subject
def load(self):
with open(self.csv_subject_list) as csvfile:
csv_rows = csv.reader(csvfile)
for row in csv_rows:
subject_id = int(row[0])
csv_labels = row[1]
left_folder = row[2]
right_folder = row[3]
fold_type = row[4]
fold_id = int(row[5])
if fold_type == 'discarded':
print('\nsubject ' + str(subject_id) + ' is discarded.')
else:
print('\nsubject ' + str(subject_id) + ' is loading...')
csv_filename = self.csv_subject_list.split('/')[-1]
csv_labels = self.csv_subject_list.replace(csv_filename, csv_labels)
left_folder = self.csv_subject_list.replace(csv_filename, left_folder)
right_folder = self.csv_subject_list.replace(csv_filename, right_folder)
if fold_type == 'training':
self.training_set[subject_id] = self.load_one_subject(csv_labels, left_folder, right_folder)
if fold_id not in self.folds.keys():
self.folds[fold_id] = []
self.folds[fold_id].append(subject_id)
elif fold_type == 'validation':
self.validation_set[subject_id] = self.load_one_subject(csv_labels, left_folder, right_folder)
@staticmethod
def get_data(dataset, subject_list):
all_x_left = [dataset[subject_id]['x'][0] for subject_id in subject_list]
all_x_right = [dataset[subject_id]['x'][1] for subject_id in subject_list]
all_y = [np.array(dataset[subject_id]['y']) for subject_id in subject_list]
fold = {'x': [np.concatenate(all_x_right), np.concatenate(all_x_left)], 'y': np.concatenate(all_y)}
fold['positive'] = np.count_nonzero(fold['y'] == 1.)
fold['negative'] = np.count_nonzero(fold['y'] == 0.)
fold['y'] = fold['y'].tolist()
return fold
def get_training_data(self, fold_ids):
subject_list = list(itertools.chain(*[self.folds[fold_id] for fold_id in fold_ids]))
return self.get_data(self.training_set, subject_list)
def get_validation_data(self):
subject_list = self.validation_set.keys()
return self.get_data(self.validation_set, subject_list)
| StarcoderdataPython |
8170847 | <filename>sensorai/layers.py
# AUTOGENERATED! DO NOT EDIT! File to edit: nbs/02_layers.ipynb (unless otherwise specified).
__all__ = ['AdaptiveConcatPool1d', 'AdaptiveConcatPool2d', 'PoolType', 'adaptive_pool']
# Cell
from .imports import *
from .tf_imports import *
# Cell
class AdaptiveConcatPool1d(keras.layers.Layer):
def __init__(self, size=None):
super(AdaptiveConcatPool1d,self).__init__()
self.size = size or 1
self.ap = tfa.layers.AdaptiveAveragePooling1D(self.size)
self.mp = tfa.layers.AdaptiveMaxPooling1D(self.size)
def call(self, x):
return tf.concat([self.mp(x), self.ap(x)], -1)
# Cell
class AdaptiveConcatPool2d(keras.layers.Layer):
def __init__(self, size=None):
super(AdaptiveConcatPool2d,self).__init__()
self.size = size or 1
self.ap = tfa.layers.AdaptiveAveragePooling2D(self.size)
self.mp = tfa.layers.AdaptiveMaxPooling2D(self.size)
def call(self, x):
return tf.concat([self.mp(x), self.ap(x)], -1)
# Cell
class PoolType: Avg,Max,Cat = 'Avg','Max','Cat'
# Cell
def adaptive_pool(pool_type):
return (tfa.layers.AdaptiveAveragePooling2D if pool_type=='Avg' else
tfa.layers.AdaptiveMaxPooling2D if pool_type=='Max' else
AdaptiveConcatPool2d) | StarcoderdataPython |
6489970 | START = "Yes, I'm Kazuma."
RESTART = "Re;Starting Bot in Another Instance from Zero."
GITPULL = "Re;Starting Bot in Another Instance from the Latest Commit."
NOT_SUDO = "This is a developer restricted command.\nYou do not have permissions to run this."
STEALING = "_STEAL!!_"
STEALING_PACK = "Stolen {} out of {} stickers."
NEW_PACK = "_STEAL!!_\nCreating a new steal pack."
NEW_PACK_CREATED = "New steal pack created! [Get it here](t.me/addstickers/{})."
REPLY_NOT_STICKER = "This skill works only when replied to stickers."
REPLY_NOT_MY_STICKER = "This skill only works on stickers of packs that I have stolen."
REPLY_NOT_STICKER_IMAGE = "This skill only works on stickers and images."
STEAL_ERROR = "I couldn't steal that sticker. Blame Aqua for being so useless."
STEAL_NOT_REPLY = "Reply to an image or a sticker to steal."
STEAL_SUCESSFUL = "Steal sucessful! Here's your [steal pack](t.me/addstickers/{})."
STEAL_SKIPPED = "Steal sucessful but some stickers might've been skipped. Here's your [steal pack](t.me/addstickers/{})."
STEALPACK_NO_ARGS = "Specify a packname to steal stickers into. Like this:\n/stealpack _<pack-name>_"
STEALPACK_NOT_REPLY = "Reply to a sticker and send:\n/stealpack _<pack-name>_"
NEWPACK_ERROR = "I was unable to create that sticker pack. Must be the Demon King's magic."
RESIZE_ERROR = "Unable to resize image to the correct dimensions."
INVALID_EMOJI = "Some of the emojis you specified are not supported."
INVALID_PACKNAME = "The pack name or emojis you specified contain unsupported characters."
INVALID_PEER_ID = "Freshly isekai-d? Click the button to join my guild!"
PACK_DOESNT_EXIST = "What you're trying to steal doesn't exist.\n( ͡° ͜ʖ ͡°)"
PACK_LIMIT_EXCEEDED = "This pack has reached maximum capacity. You can /switch to a different pack or make a new one."
PACK_ALREADY_EXISTS = "I think you're looking for [this pack](t.me/addstickers/{})."
NO_STOLEN_PACKS = "You haven't stolen any packs yet newb."
UNANIMATED_IN_ANIMATED = "You can't add normal stickers in an animated sticker pack. Try stealing it in a normal pack."
ANIMATED_IN_UNANIMATED = "You can't add animated stickers in a normal sticker pack. Try stealing it in an animated pack."
NOT_YOUR_PACK = "Hah! Nice try but you can't mess with others' stickers."
DELETE_PACK = "This is beyond my powers. Use @stickers to delete sticker packs."
DELETE_ERROR = "I couldn't delete that sticker. Looks like those Arch-Devils are at it again."
DELETE_SUCESSFUL = "Poof! The sticker is gone."
DELETE_NOT_REPLY = "This skill only works on stickers of packs that I have stolen."
SETPOSITION_INVALID_INPUT = "That's not how this skill works. Reply to a sticker and try:\n/setposition _<position-number>_"
SETPOSITION_NOT_REPLY = "Reply to the sticker whose position you wanna change."
SETPOSITION_ERROR = "I couldn't change sticker positions. Maybe the undead are interfering with my magic."
SWITCH_INVALID_INPUT = "Specify the pack you want to be set as default by:\n/switch _<pack-name>_\n/switch _<pack-index-number>_"
SWITCH_PACK_DOESNT_EXIST = "I don't think this pack exists. Use /mypacks to get a list of packs that you've stolen."
SWITCH_ALREADY_DEFAULT = "*{}* is already set as your default pack."
SWITCH_CHANGED_DEFAULT = "*{}* is now set as your default pack."
SWITCH_INDEX_ERROR = "I couldn't switch default packs. Maybe those Axis Cult members are interfering with my magic."
SWITCH_PACKNAME_ERROR = "I couldn't switch default packs. Maybe those Eris Cult members are interfering with my magic."
HELP = """
Hi, I'm Kazuma.
Here's a list of skills that I can use:
/steal - Steal a sticker
/stealpack - Steal the whole pack
/mypacks - List your steal packs
/switch - Change your default pack
/delsticker - Delete sticker from pack
/setposition - Change sticker postiton
"""
STATS = "Stealers: {}\nStolen Packs: {}"
GIST = "https://gist.github.com/notdedsec/2c4aa0359aef072b0e3025d55eaba858" | StarcoderdataPython |
8140760 | # experiment tracker
import sys
import os
import numpy as np
import pandas as pd
from dask import compute, delayed
sys.path.append('../../')
sys.path.append('../')
sys.path.append('../../experiment-impact-tracker/')
from experiment_impact_tracker.data_interface import DataInterface
from experiment_impact_tracker.data_utils import *
from experiment_impact_tracker.data_utils import (load_data_into_frame,
load_initial_info,
zip_data_and_info)
def compute_aggregate_power(df, info, PUE, task_epoch_df,use_cuda):
''' Aggregates and partitions power consumption based on task interval timpestamps. Allows to see breakdown of power consumptions for different subtasks.
'''
# time calcs
exp_end_timestamp = datetime.timestamp(info["experiment_end"])
exp_len = exp_end_timestamp - datetime.timestamp(info["experiment_start"])
exp_len_hours = exp_len / 3600.0
time_differences = df["timestamp_orig"].diff()
time_differences[0] = df["timestamp_orig"][0] - datetime.timestamp(
info["experiment_start"]
)
# Add final timestamp and extrapolate last row of power estimates
time_differences.loc[len(time_differences)] = (
exp_end_timestamp - df["timestamp_orig"][len(df["timestamp_orig"]) - 1]
)
time_differences_in_hours = time_differences / 3600.0
# rapl calcs
power_draw_rapl_kw = df["rapl_estimated_attributable_power_draw"] / 1000.0
power_draw_rapl_kw.loc[len(power_draw_rapl_kw)] = power_draw_rapl_kw.loc[
len(power_draw_rapl_kw) - 1
]
kw_hr_rapl = (
np.multiply(time_differences_in_hours, power_draw_rapl_kw)
if power_draw_rapl_kw is not None
else None
)
# nvidia calcs
if use_cuda:
num_gpus = len(info["gpu_info"])
nvidia_power_draw_kw = df["nvidia_estimated_attributable_power_draw"] / 1000.0
nvidia_power_draw_kw.loc[len(nvidia_power_draw_kw)] = nvidia_power_draw_kw.loc[
len(nvidia_power_draw_kw) - 1
]
# elementwise multiplication and sum
kw_hr_nvidia = np.multiply(time_differences_in_hours, nvidia_power_draw_kw)
# apply PUE
if use_cuda and (kw_hr_rapl is not None):
total_power_per_timestep = PUE * (kw_hr_nvidia + kw_hr_rapl)
elif kw_hr_rapl is not None:
total_power_per_timestep = PUE * (kw_hr_rapl)
elif use_cuda:
total_power_per_timestep = PUE * (kw_hr_nvidia)
else:
raise ValueError("Unable to get either GPU or CPU metric.")
# interpolate power based on timesteps
# Append last row which implies power draw from last sample extrapolated till the end of experiment
df.loc[len(df)] = df.loc[len(df) - 1] ## Duplicating last row to match length of total_power_per_timestep
df.loc[len(df)-1,'timestamp'] = task_epoch_df.loc[len(task_epoch_df)-1,'epoch_timestamp'] #update the timestamp to match end of experiment
df['total_power_per_timestep'] = total_power_per_timestep.copy()
task_power_df = pd.DataFrame(columns=['task','power'])
if total_power_per_timestep is not None:
# end-to-end power consumption
task_power_df.loc[0] = ['Experiment', total_power_per_timestep.sum()]
prev_epoch_power = 0
print('number of timestamps: {}'.format(len(total_power_per_timestep)))
# power consumption per task
for i in range(len(task_epoch_df)):
task = task_epoch_df.loc[i,'task']
epoch = task_epoch_df.loc[i,'epoch_timestamp']
epoch_idx = len(df[df['timestamp'] <= epoch])
current_epoch_power = total_power_per_timestep[:epoch_idx].sum()
task_power_df.loc[i+1] = [task, current_epoch_power - prev_epoch_power ]
prev_epoch_power = current_epoch_power
return df, task_power_df
def get_EIT_tracker_data(logdir, use_cuda, read_flops):
''' Fetches experiment impact tracker data from data_interface and separates it into 1) end-to-end experiment df 2) power consumption per sampling epoch df and 3) flops and power consumption per task df
'''
# try:
info = load_initial_info(logdir)
# Get total values from default data interface for the entire experiment
data_interface = DataInterface([logdir])
total_power = data_interface.total_power
total_carbon = data_interface.kg_carbon
PUE = data_interface.PUE
exp_len_hours = data_interface.exp_len_hours
# Calculate your own sepeartely for each subtask in the experiment
# impact tracker log
tracker_df = load_data_into_frame(logdir)
if use_cuda:
power_df = tracker_df[0][['timestamp','rapl_power_draw_absolute','rapl_estimated_attributable_power_draw','nvidia_draw_absolute','nvidia_estimated_attributable_power_draw']].copy()
power_df.loc[:,'total_attributable_power_draw'] = power_df['rapl_estimated_attributable_power_draw'] + power_df['nvidia_estimated_attributable_power_draw']
else:
power_df = tracker_df[0][['timestamp','rapl_power_draw_absolute','rapl_estimated_attributable_power_draw']].copy()
power_df.loc[:,'total_attributable_power_draw'] = power_df['rapl_estimated_attributable_power_draw']
# start time from 0
power_df.loc[:,'timestamp_orig'] = power_df['timestamp']
power_df.loc[:,'timestamp'] = power_df['timestamp'] - power_df['timestamp'][0]
# papi log
flops_df = None
total_duration = 0
if read_flops:
compute_flops_csv = logdir + 'compute_costs_flop.csv'
flops_df = pd.read_csv(compute_flops_csv)
flops_df.loc[:,'start_time'] = flops_df['start_time'] - flops_df['start_time'][0]
# Aggregate power draws per epoch for each papi context calculation (i.e. setup, axial, aggr etc))
epoch_power_draw_list = []
epoch_timestamps = list(flops_df['start_time'].values[1:]) + [flops_df['start_time'].values[-1] + flops_df['duration'].values[-1]]
task_epoch_df = pd.DataFrame()
task_epoch_df.loc[:,'task'] = flops_df['task'].values
task_epoch_df.loc[:,'epoch_timestamp'] = epoch_timestamps
power_df, task_power_df = compute_aggregate_power(power_df, info, PUE, task_epoch_df, use_cuda)
flops_df = pd.merge(flops_df,task_power_df,on='task',how='left')
print('total_power sanity check: default: {:6.5f}, calculated: {:6.5f}, {:6.5f}'.format(total_power, task_power_df.loc[0,'power'],power_df['total_power_per_timestep'].sum()))
total_duration_papi = (power_df['timestamp'].values[-1]-power_df['timestamp'].values[0])/3600
tracker_summary_df = pd.DataFrame(columns=['total_power','total_carbon','PUE','total_duration_papi','total_duration_impact_tracker'])
tracker_summary_df.loc[0] = [total_power,total_carbon,PUE,total_duration_papi,exp_len_hours]
return power_df, flops_df, tracker_summary_df
# except:
# print(f'No valid experiment impact tracker log found at {logdir}')
# return None
def collate_EIT_tracker_data(tracker_log_dir_list, use_cuda, read_flops):
''' Collates EIT tracker data from a set of experiments e.g. FastSurfer results for all subjects
'''
power_df_concat = pd.DataFrame()
flops_df_concat = pd.DataFrame()
tracker_summary_df_concat = pd.DataFrame()
values = [delayed(get_EIT_tracker_data)(tracker_log_dir, use_cuda, read_flops)
for tracker_log_dir in tracker_log_dir_list]
tracker_data_list = compute(*values, scheduler='threads',num_workers=4)
i = 0
for td in tracker_data_list:
if td is not None:
power_df, flops_df, tracker_summary_df = td
power_df_concat = power_df_concat.append(power_df)
flops_df_concat = flops_df_concat.append(flops_df)
tracker_summary_df_concat = tracker_summary_df_concat.append(tracker_summary_df)
return tracker_summary_df_concat, flops_df_concat, power_df_concat
def collate_CC_tracker_data(log_dirs):
''' Collates CodeCarbon tracker data from a set of experiments e.g. FastSurfer results for all subjects
'''
CC_df = pd.DataFrame()
for log_dir in log_dirs:
df = pd.read_csv(f'{log_dir}/emissions.csv')
CC_df = CC_df.append(df)
return CC_df | StarcoderdataPython |
11293222 | <gh_stars>1-10
from typing import Callable, Any, List
from pandas import Series, concat
from probability.calculations.mixins import OperatorMixin
from probability.custom_types.calculation_types import CalculationValue
from probability.utils import is_scalar
class ArrayOperator(OperatorMixin, object):
"""
An Operator that produces an output from a list of inputs
e.g. Minimum, Maximum, Mean, Median or some other array method.
"""
symbol: str
operator: Callable[[Any], Any]
pandas_op_name: str
@classmethod
def get_name(cls, names: List[str]) -> str:
"""
Return the name of the Operator applied to a list of inputs.
:param names: List of input names.
"""
cs_names = ', '.join(names)
return f'{cls.symbol}({cs_names})'
@classmethod
def operate(
cls,
values: List[CalculationValue]
) -> CalculationValue:
"""
Execute the operation on a list of input values.
:param values: The values to operate on.
"""
if all([is_scalar(v) for v in values]):
return cls.operator(values)
elif all([isinstance(v, Series) for v in values]):
data = concat(values, axis=1)
return data.apply(cls.pandas_op_name, axis=1)
else:
types = [type(v) for v in values]
raise TypeError(f'Unsupported types in values. {types}')
| StarcoderdataPython |
315667 | <reponame>Dlubal-Software/RFEM_Python_Client<gh_stars>10-100
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import sys
import os
PROJECT_ROOT = os.path.abspath(os.path.join(
os.path.dirname(__file__),
os.pardir)
)
sys.path.append(PROJECT_ROOT)
from RFEM.initModel import Model
from RFEM.LoadCasesAndCombinations.staticAnalysisSettings import StaticAnalysisSettings
from RFEM.LoadCasesAndCombinations.designSituation import DesignSituation
from RFEM.enums import DesignSituationType
if Model.clientModel is None:
Model()
def test_design_situation():
Model.clientModel.service.delete_all()
Model.clientModel.service.begin_modification()
StaticAnalysisSettings()
# Testing: Automatic naming, design situation keys and manual comments
DesignSituation(1, DesignSituationType.DESIGN_SITUATION_TYPE_EQU_PERMANENT_AND_TRANSIENT, True, 'ULS (EQU) - Permanent and transient', 'ULS (EQU) - Permanent and transient')
ds = Model.clientModel.service.get_design_situation(1)
assert ds.no == 1
DesignSituation(2, DesignSituationType.DESIGN_SITUATION_TYPE_EQU_ACCIDENTAL_PSI_1_1, comment='ULS (EQU) - Accidental - psi-1,1')
ds = Model.clientModel.service.get_design_situation(2)
assert ds.no == 2
DesignSituation(3, DesignSituationType.DESIGN_SITUATION_TYPE_EQU_ACCIDENTAL_PSI_2_1, comment='ULS (EQU) - Accidental - psi-2,1')
ds = Model.clientModel.service.get_design_situation(3)
assert ds.no == 3
# Testing: Manual naming, design situation keys
DesignSituation(4, DesignSituationType.DESIGN_SITUATION_TYPE_EQU_SEISMIC, comment='MANUAL NAME: ULS (EQU) - Seismic')
ds = Model.clientModel.service.get_design_situation(4)
assert ds.no == 4
assert ds.design_situation_type == DesignSituationType.DESIGN_SITUATION_TYPE_EQU_SEISMIC.name
Model.clientModel.service.finish_modification()
| StarcoderdataPython |
1748130 | class Frood:
def __init__(self, age):
self.age = age
print("Frood initialized")
def anniversary(self):
self.age += 1
print("Frood is now {} years old".format(self.age))
f1 = Frood(12)
f2 = Frood(97)
f1.anniversary()
f2.anniversary()
f1.anniversary()
f2.anniversary()
| StarcoderdataPython |
6633963 | <reponame>Its-LALOL/Python-Helper
# https://github.com/Its-LALOL/Python-Helper
from random import choice
from time import sleep
from os import name as osname, system
try: from urllib.request import urlopen
except: from urllib2 import urlopen
def random_chars(amount, english_chars=True, russian_chars=False, numbers=True, spec_chars=False, big_chars=True, litte_chars=True):
text=''
chars=[]
if english_chars==True:
if big_chars==True:
chars+=list('ABCDEFGHIJKLMNOPQRSTUVWXYZ')
if litte_chars==True:
chars+=list('abcdefghijklmnopqrstuvwxyz')
if russian_chars==True:
if big_chars==True:
chars+=list('АБВГДЕЁЖЗИЙКЛМНОПРСТУФХЦЧШЩЪЫЬЭЮЯ')
if litte_chars==True:
chars+=list('абвгдеёжзийклмнопрстуфхцчшщъыьэюя')
if numbers==True:
chars+=list('1234567890')
if spec_chars==True:
chars+=list('~`!@#$%^&*()-_+={}[]|\/:;"\'<>,.?')
for i in range(amount):
text+=choice(chars)
return text
inputtt=input
def type(text, speed=0.1, input=False):
for i in text:
print(i, end='', flush=True)
sleep(speed)
if input:
return inputtt()
print()
def clear():
if osname in ['nt', 'dos']:
system('cls')
return
system('clear')
def download(url, name=None):
response=urlopen(url)
if name is None:
try: name=response.headers['Content-Disposition'].replace('attachment; filename=', '')
except: raise Exception('Unkown file name.')
with open(name, 'wb') as f:
f.write(response.read())
| StarcoderdataPython |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.