id stringlengths 1 265 | text stringlengths 6 5.19M | dataset_id stringclasses 7
values |
|---|---|---|
1659993 | <filename>lightbus_examples/ex01_quickstart/another_service/check_password.py
# File: ./another_service/check_password.py
# Import our service's bus client
from bus import bus
# Call the check_password() procedure on our auth API
valid = bus.auth.check_password(username="admin", password="<PASSWORD>")
# Show the result
if valid:
print("Password valid!")
else:
print("Oops, bad username or password")
| StarcoderdataPython |
1662661 | import rsa
def getIntSize(i):
return (i.bit_length() + 7) // 8
def pubkeyToBin(key):
out = bytearray()
out += getIntSize(key.n).to_bytes(4, "little")
out += key.n.to_bytes(getIntSize(key.n), "little")
out += key.e.to_bytes(getIntSize(key.e), "little")
return out
def pubkeyFromBin(data):
n_size = int.from_bytes(data[:4] , "little") + 4
n = int.from_bytes(data[4:n_size], "little")
e = int.from_bytes(data[n_size:] , "little")
return rsa.PublicKey(n, e)
| StarcoderdataPython |
177651 | <reponame>ruijis/buildsimhub_python_api
from .model_action import ModelAction
class HeatingEfficiency(ModelAction):
def __init__(self):
ModelAction.__init__(self, 'heating_all_equip')
self._measure_name = 'HeatingEff'
self._lower_limit = 0
self._upper_limit = 1
self._measure_help = '''
measure name: HeatingEff
Unit: Not required
Minimum: 0
Maximum: 1
Type: numeric
This measure will update all the thermal efficiency heating equipment, including:
Coil:Heating:Fuel, Coil:Heating:Gas:MultiStage,
Boiler:HotWater, Boiler:Steam, HeatPump:WaterToWater:ParameterEstimation:Heating
''' | StarcoderdataPython |
154823 |
from .views import yaml_to_html
try:
from django.urls import path
urlpatterns = [
path('api-doc/', yaml_to_html, name="api-doc"),
]
except:
from django.conf.urls import url
urlpatterns = [
url(r'^api-doc/', yaml_to_html, name="api-doc"),
]
| StarcoderdataPython |
4809658 | """Base class for source collector unit tests."""
import io
import json
import logging
import pathlib
import unittest
import sys
import zipfile
from typing import Union
from unittest.mock import AsyncMock, PropertyMock, patch
import aiohttp
from base_collectors import MetricCollector
MODULE_DIR = pathlib.Path(__file__).resolve().parent
SERVER_SRC_PATH = MODULE_DIR.parent.parent.parent / "server" / "src"
sys.path.insert(0, str(SERVER_SRC_PATH))
from data_model import DATA_MODEL_JSON # pylint: disable=import-error,wrong-import-order,wrong-import-position
DATA_MODEL = json.loads(DATA_MODEL_JSON)
class SourceCollectorTestCase(unittest.IsolatedAsyncioTestCase): # skipcq: PTC-W0046
"""Base class for source collector unit tests."""
METRIC_TYPE = SOURCE_TYPE = "Subclass responsibility"
METRIC_ADDITION = "sum"
@classmethod
def setUpClass(cls) -> None: # pylint: disable=invalid-name
"""Override to disable logging and load the data model so it is available for all unit tests."""
logging.disable(logging.CRITICAL)
cls.data_model = DATA_MODEL
@classmethod
def tearDownClass(cls) -> None: # pylint: disable=invalid-name
"""Override to reset logging."""
logging.disable(logging.NOTSET)
def setUp(self) -> None: # pylint: disable=invalid-name
"""Extend to set up the source and metric under test."""
self.sources = dict(source_id=dict(type=self.SOURCE_TYPE, parameters=dict(url=f"https://{self.SOURCE_TYPE}")))
self.metric = dict(type=self.METRIC_TYPE, sources=self.sources, addition=self.METRIC_ADDITION)
async def collect(
self,
*,
get_request_json_return_value=None,
get_request_json_side_effect=None,
get_request_content="",
get_request_text="",
get_request_headers=None,
get_request_links=None,
post_request_side_effect=None,
post_request_json_return_value=None,
):
"""Collect the metric."""
get_request = self.__mock_get_request(
self.__mock_get_request_json(get_request_json_return_value, get_request_json_side_effect),
get_request_content,
get_request_text,
get_request_headers,
get_request_links,
)
post_request = self.__mock_post_request(post_request_json_return_value)
mocked_get = AsyncMock(return_value=get_request)
mocked_post = AsyncMock(return_value=post_request, side_effect=post_request_side_effect)
with patch("aiohttp.ClientSession.get", mocked_get), patch("aiohttp.ClientSession.post", mocked_post):
async with aiohttp.ClientSession() as session:
collector = MetricCollector(session, self.metric, self.data_model)
return await collector.collect()
@staticmethod
def __mock_get_request(get_request_json, content, text, headers, links) -> AsyncMock:
"""Create the mock get request."""
get_request = AsyncMock()
get_request.json = get_request_json
get_request.read.return_value = content
get_request.text.return_value = text
type(get_request).headers = PropertyMock(return_value=headers or {})
type(get_request).links = PropertyMock(return_value={}, side_effect=[links, {}] if links else None)
type(get_request).filename = PropertyMock(return_value="")
return get_request
@staticmethod
def __mock_get_request_json(json_return_value, json_side_effect) -> AsyncMock:
"""Create the mock JSON."""
get_request_json = AsyncMock()
get_request_json.side_effect = json_side_effect
get_request_json.return_value = json_return_value
return get_request_json
@staticmethod
def __mock_post_request(json_return_value) -> AsyncMock:
"""Create the mock post request."""
post_request = AsyncMock()
post_request.json.return_value = json_return_value
return post_request
def assert_measurement(self, measurement, *, source_index: int = 0, **attributes) -> None:
"""Assert that the measurement has the expected attributes."""
for attribute_key in ("connection_error", "parse_error"):
if (attribute_value := attributes.get(attribute_key)) is not None:
self.assertIn(attribute_value, getattr(measurement.sources[source_index], attribute_key))
else:
self.assertIsNone(getattr(measurement.sources[source_index], attribute_key))
for attribute_key in ("value", "total", "entities", "api_url", "landing_url"):
if (attribute_value := attributes.get(attribute_key, "value not specified")) != "value not specified":
self.__assert_measurement_source_attribute(attribute_key, attribute_value, measurement, source_index)
def __assert_measurement_source_attribute(self, attribute_key, attribute_value, measurement, source_index):
"""Assert that the measurement source attribute has the expected value."""
if isinstance(attribute_value, list):
for pair in zip(attribute_value, getattr(measurement.sources[source_index], attribute_key)):
assert_equal = self.assertDictEqual if isinstance(pair[0], dict) else self.assertEqual
assert_equal(pair[0], pair[1])
else:
self.assertEqual(attribute_value, getattr(measurement.sources[source_index], attribute_key))
@staticmethod
def zipped_report(*filenames_and_contents: tuple[str, str]) -> bytes:
"""Return a zipped report."""
bytes_io = io.BytesIO()
with zipfile.ZipFile(bytes_io, mode="w") as zipped_report:
for filename, content in filenames_and_contents:
zipped_report.writestr(filename, content)
return bytes_io.getvalue()
def set_source_parameter(self, key: str, value: Union[str, list[str]]) -> None:
"""Set a source parameter."""
self.sources["source_id"]["parameters"][key] = value
| StarcoderdataPython |
3375214 | <reponame>linbinbin92/data_driven_microstructure_paper_material<gh_stars>0
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
@author: <EMAIL>
"""
import os
import glob
import subprocess
import time
# bashCommand = "sbatch job.sh"
def CreaJobFileCluster(dest):
os.chdir(dest)
cdir_origin=os.getcwd()
print(cdir_origin)
for root, dirs, files in os.walk(cdir_origin):
for name in dirs:
p= os.path.join(root, name)
os.chdir(p)
cdir=os.getcwd()
files=glob.glob("*.i" )
for file in files:
try:
os.remove("job.sh")
except:
pass
data = open('job.sh','w')
data.write("#!/bin/bash\
\n#SBATCH -J" +' '+ file[3:])
data.write("\n#SBATCH -e ./MATID.err.%J\
\n#SBATCH -o ./MATID.out.%J\
\n# Please use the complete path details :\
\n#SBATCH --exclusive\
\n#SBATCH -n 24 # Number of MPI processes\
\n###SBATCH -c 24 # Number of CPU cores (OpenMP-Threads) per MPI process\
\n#SBATCH --mem-per-cpu=2400 # Main memory in MByte per MPI task\
\n#SBATCH -t 5:58:00 # Hours, minutes and seconds, or '#SBATCH -t 10' - only minutes\
\n#SBATCH --exclusive\
\n#SBATCH -C avx2\
\n\
\n# -------------------------------\
\n# Afterwards you write your own commands, e.g.\
\n\
\nulimit -c unlimited\
\nulimit -s unlimited\
\n\
\nsource ~/.moose-profile\
\n\
\nsrun /home/ac01asac/moose_applications/ppr_x64/ppr-opt -i" + " " + file + " " + "> moose.log")
data.close
def Submit(dest):
cdir=dest
for root, dirs, files in os.walk(cdir):
for name in dirs:
pp = os.path.join(root, name)
os.chdir(pp)
print('Current Folder is', pp)
process = subprocess.Popen(["sbatch", "job.sh"], stdout=subprocess.PIPE)
output, error = process.communicate()
print("Job successfully submitted")
| StarcoderdataPython |
4832462 | <reponame>z-yin/CityLink
#!/usr/bin/env python
# coding: utf-8
import os
import csv
import time
import jieba
import pprint
import re, string
import numpy as np
from gensim import corpora
# from threading import Semaphore, Thread, Event # for multi-processing
from sklearn.manifold import TSNE
from sklearn.cluster import KMeans
from collections import defaultdict
from gensim.models.keyedvectors import KeyedVectors
from smart_open import open # for transparently opening compressed files
import multiprocessing as mp # pool, pipe, process for multi-processing
from itertools import repeat, islice # itertools.islice, for slicing generators
# ## Number of Cores
NCORE = 8 #input("Enter Number of Processes")
NUM_FILES = 8 #input("Enter Number of Files")
# ## Parallel Process Function
def calc_given_keywords(words, expanded_nclass, expanded_keywords):
"""Calculate frequency given keywords and return it"""
freq = np.array([0 for _ in range(expanded_nclass)]) # easy to add up
for word in words:
for i, category in enumerate(expanded_keywords, 1):
if word in category:
freq[i-1] += 1 # -1 for the right index
return freq
def parallel_process(document, city_link, expanded_keywords):
words = document[0]
cities = document[1]
# frequency in the format: array([0, 0, 0, 0, 0, 0, 0]), easy to add up
_freq = calc_given_keywords(words, len(expanded_keywords), expanded_keywords)
# combine "A-B" and "B-A" city pairs
for i in range(len(cities)):
for j in range(len(cities)):
if i != j:
if (cities[i], cities[j]) in city_link:
city_link[(cities[i], cities[j])] += _freq
if (cities[j], cities[i]) in city_link:
city_link[(cities[j], cities[i])] += _freq
return city_link
# # Main
# #### File List
file_list = [f for f in os.listdir('../webdata') if f.startswith('part-')]
# #### Dictionary
print('loading dictionary...')
if 'dict.dict' in os.listdir('../dict'):
dictionary = corpora.Dictionary().load('../dict/dict.dict') # already processed from embedding file
else:
texts = []
with open('../embedding/Tencent_AILab_ChineseEmbedding.txt') as f:
skip_head = True
for line in f:
if skip_head:
skip_head = False
continue
else:
texts.append(line.split(' ')[0])
dictionary = corpora.Dictionary([texts])
dictionary.save('../dict/dict.dict')
print(dictionary)
# #### Stop List
stop_list = []
with open('resources/stopwords_zh.txt') as f:
for line in f:
stop_list.append(line[:-1])
stop_list = set(stop_list)
# #### City List
city_list = []
with open('resources/China_Cities_Coordinates_CHN_ENG.csv') as f:
skip_head = True
for line in f:
if skip_head:
skip_head = False
continue
else:
city_list.append(line.split(',')[0])
city_list = list(set(city_list))
# #### Save 'Bin_Tencent_AILab_ChineseEmbedding.bin' in '../embedding'
if 'Bin_Tencent_AILab_ChineseEmbedding.bin' not in os.listdir('../embedding'):
print('saving word embeddings...')
embedding_file = '../embedding/Tencent_AILab_ChineseEmbedding.txt'
wv = KeyedVectors.load_word2vec_format(embedding_file, binary=False)
wv.init_sims(replace=True)
wv.save('../embedding/Bin_Tencent_AILab_ChineseEmbedding.bin')
# #### Load Word Embeddings
print('loading word embeddings...')
wv = KeyedVectors.load('../embedding/Bin_Tencent_AILab_ChineseEmbedding.bin', mmap='r')
wv.vectors_norm = wv.vectors # prevent recalc of normed vectors
# #### Save 'expanded_keywords.csv' in 'resources'
if 'expanded_keywords.csv' not in os.listdir('resources'):
print('saving expanded keywords...')
# Expand the existing keywords by finding words in the embedding file that are above the threshold
'''load keywords'''
nclass = 7
keywords = [[] for _ in range(nclass)]
with open('resources/keywords.csv') as f:
for line in f:
line = line.replace('\n', '')
for i, category in enumerate(line.split(',')):
if category != '' and category in wv:
keywords[i].append(category)
'''save expanded keywords'''
expanded_nclass = 7
expanded_keywords = [[] for _ in range(expanded_nclass)]
for i, category in enumerate(keywords, 1):
for key in category:
# get most similar words to keys whose similarity > threshold
expanded_keywords[i-1].append(key)
closest = wv.most_similar(key)
for sim_pair in closest:
if sim_pair[1] > 0.8:
expanded_keywords[i-1].append(sim_pair[0])
else:
break
with open('resources/expanded_keywords.csv', "w") as f:
writer = csv.writer(f, delimiter=',')
for category in expanded_keywords:
writer.writerow(category)
# #### Load Expanded Keywords
print('loading expanded keywords...')
if 'expanded_keywords.csv' in os.listdir('resources'): # already expanded, load from saved
expanded_nclass = 7
expanded_keywords = [[] for _ in range(expanded_nclass)]
with open('resources/expanded_keywords.csv') as f:
for i, line in enumerate(f):
line = line.replace('\n', '')
line = line.split(',')
for keyword in line:
expanded_keywords[i].append(keyword)
else:
print('Error: Expanded keywords not found')
# #### Get Documents
print('loading {} files...'.format(NUM_FILES))
start = time.time()
documents = [] # [document[[words],[cities]]]
jieba.enable_parallel(NCORE)
for filename in file_list[:NUM_FILES]:
with open('../webdata/' + filename, encoding='utf-8') as f:
for line in f:
document = []
# drop meta-info
if line == '' or line.startswith('\r') or line.startswith('WARC') or line.startswith('Content'):
continue
# drop alphabetic characters
line = re.sub(r'[a-zA-Z]', '', line)
# drop digits and punctuations
line = re.sub('[%s]' % (string.punctuation + string.digits), '', line)
# drop empty line
if line == '\r':
continue
# segment the sentence using jieba
words = ' '.join(jieba.cut(line, cut_all=False)).split(' ')
# drop stopwords
words = [word for word in words if word not in stop_list]
if not words or len(words) < 2: # less than 2 words won't contain 2 cities
continue
cities = []
indices = []
for idx, word in enumerate(words):
if word in city_list:
cities.append(word)
indices.append(idx)
# remove cities from the document
for idx in indices[::-1]:
del words[idx]
cities = list(set(cities)) # get unique cities
if len(cities) < 2: # less than 2 cities won't composite a link
continue
all_doc += 1
document.append(words)
document.append(cities)
documents.append(document)
jieba.disable_parallel()
print('Get {} websites from {} files after {} seconds'.format(len(documents), NUM_FILES, time.time()-start))
# ### Main Run Part
# Initialise City Link
city_link_multi = {}
expanded_nclass = len(expanded_keywords)
for i in range(len(city_list)-1):
for j in range(i+1,len(city_list)):
city_link_multi[(city_list[i], city_list[j])] = np.array([0 for _ in range(expanded_nclass)]) # easy to add up
start = time.time()
NDOC = len(documents)//NCORE # num_documents_per_process
print('Start {} processes...'.format(NCORE))
# Instantiate the pool here
pool = mp.Pool(processes=NCORE)
result = pool.starmap_async(parallel_process, zip(documents, repeat(city_link_multi), repeat(expanded_keywords)), NDOC)
pool.close()
pool.join()
result = result.get()
print('Get individual result from {} process after {} seconds elapsed.'.format(NCORE, time.time() - start))
city_link_multi = result[0]
for key in city_link_multi.keys():
key_used_to_remove_duplicate_dict = key
break
saved_previous = city_link_multi[key_used_to_remove_duplicate_dict]
for res in result:
if not np.array_equal(saved_previous, res[key_used_to_remove_duplicate_dict]):
saved_previous = res[key_used_to_remove_duplicate_dict]
for key in city_link_multi.keys():
city_link_multi[key] += res[key]
print('Get final results after {} seconds elapsed.'.format(time.time() - start))
print('Saving...')
with open('results/city_link_frequency_multiThreads.csv', "w") as f:
writer = csv.writer(f, delimiter=',')
writer.writerow(('City1','City2','经济','科技','法律','文学','娱乐','第二产业','农业')) # first row as header
for key, value in city_link_multi.items():
writer.writerow((key[0], key[1], value[0], value[1], value[2], value[3], value[4], value[5], value[6]))
print('Done.')
| StarcoderdataPython |
167003 | # Code based on https://github.com/yaringal/ConcreteDropout
# License:
# MIT License
#
# Copyright (c) 2017
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import torch
import numpy as np
from torch import nn
from models.base_nn import BaseNN
class ConcreteDropout(nn.Module):
def __init__(self, weight_regularizer=1e-6,
dropout_regularizer=1e-5, init_min=0.1, init_max=0.1):
super(ConcreteDropout, self).__init__()
self.weight_regularizer = weight_regularizer
self.dropout_regularizer = dropout_regularizer
init_min = np.log(init_min) - np.log(1. - init_min)
init_max = np.log(init_max) - np.log(1. - init_max)
self.p_logit = nn.Parameter(torch.empty(1).uniform_(init_min, init_max))
def forward(self, x, layer):
p = torch.sigmoid(self.p_logit)
out = layer(self._concrete_dropout(x, p))
sum_of_square = 0
for param in layer.parameters():
sum_of_square += torch.sum(torch.pow(param, 2))
weights_regularizer = self.weight_regularizer * sum_of_square / (1 - p)
dropout_regularizer = p * torch.log(p)
dropout_regularizer += (1. - p) * torch.log(1. - p)
input_dimensionality = x[0].numel() # Number of elements of first item in batch
dropout_regularizer *= self.dropout_regularizer * input_dimensionality
regularization = weights_regularizer + dropout_regularizer
return out, regularization
def _concrete_dropout(self, x, p):
eps = 1e-7
temp = 0.1
unif_noise = torch.rand_like(x)
drop_prob = (torch.log(p + eps)
- torch.log(1 - p + eps)
+ torch.log(unif_noise + eps)
- torch.log(1 - unif_noise + eps))
drop_prob = torch.sigmoid(drop_prob / temp)
random_tensor = 1 - drop_prob
retain_prob = 1 - p
x = torch.mul(x, random_tensor)
x /= retain_prob
return x
class ConcreteDropoutNN(BaseNN):
def __init__(self, weight_regularizer, dropout_regularizer, input_size, output_size, **kwargs):
super(ConcreteDropoutNN, self).__init__(**kwargs)
self.hidden_size.append(output_size)
self.linear1 = nn.Linear(input_size, self.hidden_size[0])
self.linears = nn.ModuleList([nn.Linear(self.hidden_size[i], self.hidden_size[i + 1]) for i in range(len(self.hidden_size) - 1)])
self.conc_drops = nn.ModuleList([ConcreteDropout(weight_regularizer=weight_regularizer,
dropout_regularizer=dropout_regularizer)
for i in range(len(self.hidden_size))])
self.act = nn.ReLU()
def forward(self, x):
regularization = torch.empty(len(self.hidden_size), device=x.device)
out_arr = []
out, regularization[0] = self.conc_drops[0](x, nn.Sequential(self.linear1, self.act))
out_arr.append(out)
for i in range(len(self.hidden_size) - 1):
if i == len(self.hidden_size) - 2:
act = nn.Identity()
else:
act = self.act
out, regularization[i + 1] = self.conc_drops[i + 1](out, nn.Sequential(self.linears[i], act))
out_arr.append(out)
return out, regularization.sum()
| StarcoderdataPython |
1721273 | <filename>flirt/hrv/features/nl_features.py
import numpy as np
from flirt.hrv.features.data_utils import DomainFeatures
class NonLinearFeatures(DomainFeatures):
def __init__(self, emb_dim: int = 2):
self.emb_dim = emb_dim
def __get_type__(self) -> str:
return "Non-Linear"
def __generate__(self, data: np.array) -> dict:
data_np = np.asarray(data)
results = {
'hrv_SD1': np.nan,
'hrv_SD2': np.nan,
'hrv_SD2SD1': np.nan,
'hrv_CSI': np.nan,
'hrv_CVI': np.nan,
'hrv_CSI_Modified': np.nan,
}
if len(data_np) > self.emb_dim:
results.update(_nonlinear(data_np))
# TODO: Raise one error in case DFA features are not possible to compute not everytime the function is called...
return results
def _nonlinear(rri):
diff_rri = np.diff(rri)
out = {} # Initialize empty container for results
# Poincaré
sd_rri = np.std(rri, ddof=1) ** 2
sd_heart_period = np.std(diff_rri, ddof=1) ** 2
out["hrv_SD1"] = np.sqrt(sd_heart_period * 0.5)
out["hrv_SD2"] = np.sqrt(2 * sd_rri - 0.5 * sd_heart_period)
out["hrv_SD2SD1"] = out["hrv_SD2"] / out["hrv_SD1"]
# CSI / CVI
T = 4 * out["hrv_SD1"]
L = 4 * out["hrv_SD2"]
out["hrv_CSI"] = L / T
out["hrv_CVI"] = np.log10(L * T)
out["hrv_CSI_Modified"] = L ** 2 / T
return out
| StarcoderdataPython |
3247859 | """
Project Module.
"""
import os
from optparse import Values
from pathlib import Path
from typing import List, Optional, Any, Dict, Callable, TYPE_CHECKING
# from pineboolib.fllegacy.flaccesscontrollists import FLAccessControlLists # FIXME: Not allowed yet
from PyQt5 import QtWidgets
from pineboolib.core.utils import logging, utils_base
from pineboolib.core.utils.struct import AreaStruct
from pineboolib.core import exceptions, settings, message_manager
from pineboolib.application.database import pnconnectionmanager
from pineboolib.application.utils import path, xpm
from pineboolib.application import module, file
if TYPE_CHECKING:
from pineboolib.interfaces.dgi_schema import dgi_schema
from pineboolib.application.database import pnconnection
from pineboolib.core.utils.struct import ActionStruct # noqa: F401
LOGGER = logging.getLogger(__name__)
class Project(object):
"""
Singleton for the whole application.
Can be accessed with pineboolib.project from anywhere.
"""
_conn_manager: "pnconnectionmanager.PNConnectionManager"
_app: Optional[QtWidgets.QApplication] = None
# _conn: Optional["PNConnection"] = None # Almacena la conexión principal a la base de datos
debug_level = 100
options: Values
# _initModules = None
main_form: Any = None # FIXME: How is this used? Which type?
main_window: Any = None
acl_ = None
dgi: Optional["dgi_schema"] = None
delete_cache: bool = False
parse_project: bool = False
path = None
_splash = None
sql_drivers_manager = None
timer_ = None
no_python_cache = False # TODO: Fill this one instead
_msg_mng = None
alternative_folder: Optional[str]
_session_func_: Optional[Callable]
areas: Dict[str, AreaStruct]
files: Dict[Any, Any]
tables: Dict[Any, Any]
actions: Dict[Any, "ActionStruct"]
translator_: List[Any]
modules: Dict[str, "module.Module"]
pending_conversion_list: List[str]
def __init__(self) -> None:
"""Constructor."""
# self._conn = None
self.dgi = None
self.tree = None
self.root = None
self.alternative_folder = None
self.apppath = ""
self.tmpdir = settings.config.value("ebcomportamiento/temp_dir")
self.parser = None
# self.main_form_name: Optional[str] = None
self.delete_cache = False
self.parse_project = False
self.translator_ = [] # FIXME: Add proper type
self.actions = {} # FIXME: Add proper type
self.tables = {} # FIXME: Add proper type
self.files = {} # FIXME: Add proper type
self.areas = {}
self.modules = {}
self.options = Values()
if self.tmpdir is None:
self.tmpdir = utils_base.filedir("%s/Pineboo/tempdata" % Path.home())
settings.config.set_value("ebcomportamiento/temp_dir", self.tmpdir)
if not os.path.exists(self.tmpdir):
Path(self.tmpdir).mkdir(parents=True, exist_ok=True)
self._session_func_ = None
self._conn_manager = pnconnectionmanager.PNConnectionManager()
self.pending_conversion_list = []
@property
def app(self) -> QtWidgets.QApplication:
"""Retrieve current Qt Application or throw error."""
if self._app is None:
raise Exception("No application set")
return self._app
def set_app(self, app: QtWidgets.QApplication):
"""Set Qt Application."""
self._app = app
@property
def conn_manager(self) -> "pnconnectionmanager.PNConnectionManager":
"""Retrieve current connection or throw."""
if self._conn_manager is None:
raise Exception("Project is not initialized")
return self._conn_manager
@property
def DGI(self) -> "dgi_schema":
"""Retrieve current DGI or throw."""
if self.dgi is None:
raise Exception("Project is not initialized")
return self.dgi
def init_conn(self, connection: "pnconnection.PNConnection") -> bool:
"""Initialize project with a connection."""
# if self._conn is not None:
# del self._conn
# self._conn = None
result = self._conn_manager.setMainConn(connection)
if result:
self.apppath = utils_base.filedir("..")
self.delete_cache = settings.config.value("ebcomportamiento/deleteCache", False)
self.parse_project = settings.config.value("ebcomportamiento/parseProject", False)
return result
def init_dgi(self, dgi: "dgi_schema") -> None:
"""Load and associate the defined DGI onto this project."""
# FIXME: Actually, DGI should be loaded here, or kind of.
self.dgi = dgi
self._msg_mng = message_manager.Manager(dgi)
self.dgi.extraProjectInit()
def load_modules(self) -> None:
"""Load all modules."""
for module_name, mod_obj in self.modules.items():
mod_obj.load()
self.tables.update(mod_obj.tables)
def setDebugLevel(self, level: int) -> None:
"""
Set debug level for application.
@param q Número con el nivel espeficicado
***DEPRECATED***
"""
self.debug_level = level
# self.dgi.pnqt3ui.Options.DEBUG_LEVEL = q
# def acl(self) -> Optional[FLAccessControlLists]:
# """
# Retorna si hay o no acls cargados
# @return Objeto acl_
# """
# return self.acl_
def acl(self):
"""Return loaded ACL."""
raise exceptions.CodeDoesNotBelongHereException("ACL Does not belong to PROJECT. Go away.")
def run(self) -> bool:
"""Run project. Connects to DB and loads data."""
self.pending_conversion_list = []
if self.actions:
del self.actions
if self.tables:
del self.tables
self.actions = {}
self.tables = {}
if self.dgi is None:
raise Exception("DGI not loaded")
if not self.conn_manager or "main_conn" not in self.conn_manager.connections_dict.keys():
raise exceptions.NotConnectedError(
"Cannot execute Pineboo Project without a connection in place"
)
conn = self.conn_manager.mainConn()
db_name = conn.DBName()
# TODO: Refactorizar esta función en otras más sencillas
# Preparar temporal
if self.delete_cache and os.path.exists(path._dir("cache/%s" % db_name)):
self.message_manager().send("splash", "showMessage", ["Borrando caché ..."])
LOGGER.debug(
"DEVELOP: delete_cache Activado\nBorrando %s", path._dir("cache/%s" % db_name)
)
for root, dirs, files in os.walk(path._dir("cache/%s" % db_name), topdown=False):
for name in files:
os.remove(os.path.join(root, name))
for name in dirs:
os.rmdir(os.path.join(root, name))
else:
keep_images = settings.config.value("ebcomportamiento/keep_general_cache", False)
if keep_images is False:
for file_name in os.listdir(self.tmpdir):
if file_name.find(".") > -1 and not file_name.endswith("sqlite3"):
file_path = os.path.join(self.tmpdir, file_name)
try:
os.remove(file_path)
except Exception:
LOGGER.warning(
"No se ha podido borrar %s al limpiar la cache", file_path
)
pass
if not os.path.exists(path._dir("cache")):
os.makedirs(path._dir("cache"))
if not os.path.exists(path._dir("cache/%s" % db_name)):
os.makedirs(path._dir("cache/%s" % db_name))
# Conectar:
# Se verifica que existen estas tablas
for table in (
"flareas",
"flmodules",
"flfiles",
"flgroups",
"fllarge",
"flserial",
"flusers",
"flvar",
"flmetadata",
"flsettings",
"flupdates",
"flmetadata",
"flseqs",
"flsettings",
):
if not self.conn_manager.manager().existsTable(table):
self.conn_manager.manager().createSystemTable(table)
cursor_ = self.conn_manager.dbAux().cursor()
self.areas = {}
cursor_.execute(""" SELECT idarea, descripcion FROM flareas WHERE 1 = 1""")
for idarea, descripcion in list(cursor_):
self.areas[idarea] = AreaStruct(idarea=idarea, descripcion=descripcion)
self.areas["sys"] = AreaStruct(idarea="sys", descripcion="Area de Sistema")
# Obtener módulos activos
cursor_.execute(
""" SELECT idarea, idmodulo, descripcion, icono FROM flmodules WHERE bloqueo = %s """
% conn.driver().formatValue("bool", "True", False)
)
self.modules = {}
for idarea, idmodulo, descripcion, icono in cursor_:
icono = xpm.cache_xpm(icono)
self.modules[idmodulo] = module.Module(idarea, idmodulo, descripcion, icono)
file_object = open(
utils_base.filedir(utils_base.get_base_dir(), "system_module", "sys.xpm"), "r"
)
icono = file_object.read()
file_object.close()
# icono = clearXPM(icono)
self.modules["sys"] = module.Module("sys", "sys", "Administración", icono)
cursor_.execute(
""" SELECT idmodulo, nombre, sha FROM flfiles WHERE NOT sha = '' ORDER BY idmodulo, nombre """
)
file_1 = open(path._dir("project.txt"), "w")
self.files = {}
count = 0
list_files: List[str] = []
for idmodulo, nombre, sha in list(cursor_):
if not self.dgi.accept_file(nombre):
continue
count += 1
if idmodulo not in self.modules:
continue # I
fileobj = file.File(idmodulo, nombre, sha, db_name=db_name)
if nombre in self.files:
LOGGER.warning("run: file %s already loaded, overwritting..." % nombre)
self.files[nombre] = fileobj
self.modules[idmodulo].add_project_file(fileobj)
file_1.write(fileobj.filekey + "\n")
fileobjdir = os.path.dirname(path._dir("cache", fileobj.filekey))
file_name = path._dir("cache", fileobj.filekey)
if not os.path.exists(fileobjdir):
os.makedirs(fileobjdir)
if os.path.exists(file_name):
if file_name.endswith(".qs"):
folder_path = os.path.dirname(file_name)
static_flag = "%s/STATIC" % folder_path
file_name_py = "%s.py" % file_name[:-3]
if os.path.exists(static_flag):
os.remove(static_flag)
if os.path.exists(file_name):
os.remove(file_name)
if os.path.exists(file_name_py):
os.remove(file_name_py)
elif os.path.exists(file_name_py):
continue
elif file_name.endswith(".mtd"):
if settings.config.value(
"ebcomportamiento/orm_enabled", False
) and not settings.config.value("ebcomportamiento/orm_parser_disabled", False):
if os.path.exists("%s_model.py" % path._dir("cache", fileobj.filekey[:-4])):
continue
else:
continue
cur2 = self.conn_manager.useConn("dbAux").cursor()
sql = (
"SELECT contenido FROM flfiles WHERE idmodulo = %s AND nombre = %s AND sha = %s"
% (
conn.driver().formatValue("string", idmodulo, False),
conn.driver().formatValue("string", nombre, False),
conn.driver().formatValue("string", sha, False),
)
)
cur2.execute(sql)
for (contenido,) in list(cur2):
encode_ = "utf-8" if str(nombre).endswith((".kut", ".ts", ".py")) else "ISO-8859-15"
folder = path._dir(
"cache",
"/".join(fileobj.filekey.split("/")[: len(fileobj.filekey.split("/")) - 1]),
)
if os.path.exists(folder) and not os.path.exists(
file_name
): # Borra la carpeta si no existe el fichero destino
for root, dirs, files in os.walk(folder):
for file_item in files:
os.remove(os.path.join(root, file_item))
if contenido and not os.path.exists(file_name):
self.message_manager().send(
"splash", "showMessage", ["Volcando a caché %s..." % nombre]
)
file_2 = open(file_name, "wb")
txt = contenido.encode(encode_, "replace")
file_2.write(txt)
file_2.close()
if self.parse_project and nombre.endswith(".qs"):
if os.path.exists(file_name):
list_files.append(file_name)
file_1.close()
self.message_manager().send("splash", "showMessage", ["Convirtiendo a Python ..."])
if list_files:
self.parse_script_list(list_files)
# Cargar el núcleo común del proyecto
for root, dirs, files in os.walk(
utils_base.filedir(utils_base.get_base_dir(), "system_module")
):
# list_files = []
for nombre in files:
if root.find("modulos") == -1:
fileobj = file.File("sys", nombre, basedir=root, db_name=db_name)
self.files[nombre] = fileobj
self.modules["sys"].add_project_file(fileobj)
# if self.parse_project and nombre.endswith(".qs"):
# self.parseScript(path._dir(root, nombre))
# list_files.append(path._dir(root, nombre))
# self.parse_script_lists(list_files)
if settings.config.value(
"ebcomportamiento/orm_enabled", False
) and not settings.config.value("ebcomportamiento/orm_load_disabled", False):
self.message_manager().send("splash", "showMessage", ["Cargando objetos ..."])
from pineboolib.application.parsers.mtdparser import pnormmodelsfactory
pnormmodelsfactory.load_models()
# FIXME: ACLs needed at this level?
# self.acl_ = FLAccessControlLists()
# self.acl_.init()
return True
def call(
self,
function: str,
args: List[Any],
object_context: Any = None,
show_exceptions: bool = True,
) -> Optional[Any]:
"""
Call to a QS project function.
@param function. Nombre de la función a llamar.
@param args. Array con los argumentos.
@param object_context. Contexto en el que se ejecuta la función.
@param show_exceptions. Boolean que especifica si se muestra los errores.
@return Boolean con el resultado.
"""
# FIXME: No deberíamos usar este método. En Python hay formas mejores
# de hacer esto.
LOGGER.trace(
"JS.CALL: fn:%s args:%s ctx:%s", function, args, object_context, stack_info=True
)
# Tipicamente flfactalma.iface.beforeCommit_articulos()
if function[-2:] == "()":
function = function[:-2]
array_fun = function.split(".")
if not object_context:
if not array_fun[0] in self.actions:
if len(array_fun) > 1:
if show_exceptions:
LOGGER.error(
"No existe la acción %s en el módulo %s", array_fun[1], array_fun[0]
)
else:
if show_exceptions:
LOGGER.error("No existe la acción %s", array_fun[0])
return None
fun_action = self.actions[array_fun[0]]
if array_fun[1] == "iface" or len(array_fun) == 2:
main_window = fun_action.load()
if len(array_fun) == 2:
object_context = None
if hasattr(main_window.widget, array_fun[1]):
object_context = main_window.widget
if hasattr(main_window.iface, array_fun[1]):
object_context = main_window.iface
if not object_context:
object_context = main_window
else:
object_context = main_window.iface
elif array_fun[1] == "widget":
script = fun_action.load_script(array_fun[0], None)
object_context = script.iface
else:
return False
if not object_context:
if show_exceptions:
LOGGER.error(
"No existe el script para la acción %s en el módulo %s",
array_fun[0],
array_fun[0],
)
return None
function_name_object = None
if len(array_fun) == 1: # Si no hay puntos en la llamada a functión
function_name = array_fun[0]
elif len(array_fun) > 2: # si existe self.iface por ejemplo
function_name = array_fun[2]
elif len(array_fun) == 2:
function_name = array_fun[1] # si no exite self.iiface
else:
if len(array_fun) == 0:
function_name_object = object_context
if not function_name_object:
function_name_object = getattr(object_context, function_name, None)
if function_name_object is None:
if show_exceptions:
LOGGER.error("No existe la función %s en %s", function_name, array_fun[0])
return True
# FIXME: debería ser false, pero igual se usa por el motor para detectar propiedades
try:
return function_name_object(*args)
except Exception:
LOGGER.exception("JSCALL: Error executing function %s", stack_info=True)
return None
def parse_script(self, scriptname: str, txt_: str = "") -> None:
"""
Convert QS script into Python and stores it in the same folder.
@param scriptname, Nombre del script a convertir
"""
from pineboolib.application.parsers.qsaparser import postparse
# Intentar convertirlo a Python primero con flscriptparser2
if not os.path.isfile(scriptname):
raise IOError
python_script_path = (scriptname + ".xml.py").replace(".qs.xml.py", ".qs.py")
if not os.path.isfile(python_script_path) or self.no_python_cache:
file_name_l = scriptname.split(os.sep) # FIXME: is a bad idea to split by os.sep
file_name = file_name_l[len(file_name_l) - 2]
msg = "Convirtiendo a Python . . . %s.qs %s" % (file_name, txt_)
LOGGER.info(msg)
# clean_no_python = self.dgi.clean_no_python() # FIXME: No longer needed. Applied on the go.
try:
postparse.pythonify([scriptname], ["--strict"])
except Exception:
LOGGER.exception("El fichero %s no se ha podido convertir", scriptname)
def parse_script_list(self, path_list: List[str]) -> None:
"""Convert QS scripts list into Python and stores it in the same folders."""
from multiprocessing import Pool
from pineboolib.application.parsers import qsaparser
from pineboolib.application.parsers.qsaparser import pytnyzer, pyconvert
if not path_list:
return
for file_path in path_list:
if not os.path.isfile(file_path):
raise IOError
pytnyzer.STRICT_MODE = True
itemlist = []
for num, path_file in enumerate(path_list):
dest_file_name = "%s.py" % path_file[:-3]
if dest_file_name in self.pending_conversion_list:
LOGGER.warning("The file %s is already being converted. Waiting", dest_file_name)
while dest_file_name in self.pending_conversion_list:
# Esperamos a que el fichero se convierta.
QtWidgets.QApplication.processEvents()
else:
self.pending_conversion_list.append(dest_file_name)
itemlist.append(
pyconvert.PythonifyItem(
src=path_file, dst=dest_file_name, number=num, len=len(path_list), known={}
)
)
# itemlist = [
# pyconvert.PythonifyItem(
# src=path_file, dst="%s.py" % path_file[:-3], n=n, len=len(path_list), known={}
# )
# for n, path_file in enumerate(path_list)
# ]
msg = "Convirtiendo a Python . . ."
LOGGER.info(msg)
threads_num = pyconvert.CPU_COUNT
if len(itemlist) < threads_num:
threads_num = len(itemlist)
pycode_list: List[bool] = []
if qsaparser.USE_THREADS:
with Pool(threads_num) as thread:
# TODO: Add proper signatures to Python files to avoid reparsing
pycode_list = thread.map(pyconvert.pythonify_item, itemlist, chunksize=2)
else:
for item in itemlist:
pycode_list.append(pyconvert.pythonify_item(item))
for item in itemlist:
self.pending_conversion_list.remove(item.dst_path)
if not all(pycode_list):
LOGGER.warning("Conversion failed for some files")
def get_temp_dir(self) -> str:
"""
Return temporary folder defined for pineboo.
@return ruta a la carpeta temporal
***DEPRECATED***
"""
# FIXME: anti-pattern in Python. Getters for plain variables are wrong.
raise exceptions.CodeDoesNotBelongHereException("Use project.tmpdir instead, please.")
# return self.tmpdir
def load_version(self) -> str:
"""Initialize current version numbers."""
self.version = "0.68.36"
if settings.config.value("application/dbadmin_enabled", False):
self.version = "DBAdmin v%s" % self.version
else:
self.version = "Quick v%s" % self.version
return self.version
def message_manager(self):
"""Return message manager for splash and progress."""
return self._msg_mng
def set_session_function(self, fun_: Callable) -> None:
"""Set session funcion."""
self._session_func_ = fun_
def session_id(self) -> str:
"""Return id if use pineboo like framework."""
return str(self._session_func_()) if self._session_func_ is not None else "auto"
| StarcoderdataPython |
119512 | <reponame>manish1822510059/Python-1000-program
inc = 1
num = 1
for x in range (5,0,-1):
for y in range(x,0,-1):
print(" ",end="")
print(str(num)*inc)
num += 2
inc += 2 | StarcoderdataPython |
3340890 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
""" a robot
"""
import gevent
# from gevent import socket
import struct
import time
from net import baseclient
from dao.stock_def import EtfInfo
from pb import base_pb2
from pb import error_pb2
from pb import pq_quota_pb2
from pb import quotation_def_pb2
from pb import quota_pb2
from pb import structured_fund_pb2
import quotation
__author__ = 'qinjing'
RED = '\033[31m'
GREEN = '\033[32m'
BLUE = '\033[33m'
CLEAR = '\033[0m'
# -----------------------------------------------------------------------------
# class PreQuotation(object):
# def __init__(self, app, addr):
# self.addr = addr
# # self.app = app
# self.pre_quo_svr = utils.connect_server(addr)
# self.subscibe()
# self.etfs = {}
#
# def subscibe(self):
# print('subscibe')
# submsg = pq_quota_pb2.PreQuoSubMsg()
# submsg.sub_flag = 1
# submsg.sub_type.append(quotation_def_pb2.LYMK_ETF_BASE_INFO)
# # submsg.sub_type.append(quotation_def_pb2.LYMK_MARKET_OVERVIEW)
# # submsg.sub_type.append(quotation_def_pb2.LYMK_ETF_DIFF_INFO)
# # submsg.sub_type.append(quotation_def_pb2.LYMK_FJJJ_INFO)
# # submsg.sub_type.append(quotation_def_pb2.LYMK_STRUCTURED_FUND_BASE)
# # submsg.sub_type.append(quotation_def_pb2.LYMK_TRADEDAY_INFO)
# fmt = ">iHHiiii"
#
# headbuffer = struct.pack(fmt, submsg.ByteSize(), 0x8001,
# quotation_def_pb2.LYMK_PRE_QUO_SUB, 0, 0, 0,
# 0)
# sentdata = headbuffer + submsg.SerializeToString()
# self.pre_quo_svr.send(sentdata)
#
# def recv_data(self):
# MAX_PACK = 1024 * 1024
# fmt = ">iHHiiii"
# seq = 0
# maxpack = 1
# while 1:
# status, packlen, pi = utils.recv_header2(
# self.pre_quo_svr, base_pb2.HEADER_SIZE)
# if error_pb2.SUCCESS != status:
# print('\033[31mreconnect..\033[0m')
# self.pre_quo_svr = utils.connect_server(self.addr)
# self.subscibe()
# continue
#
# if packlen < 0 or packlen > MAX_PACK:
# print 'close connect error length', packlen
# self.pre_quo_svr = utils.connect_server(self.addr)
# self.subscibe()
# continue
#
# if maxpack < packlen:
# maxpack = packlen
# # 获取包体 收全包体
# body = ''
# status, body = utils.recv_body(self.pre_quo_svr, packlen)
# if error_pb2.SUCCESS != status:
# print('\033[31mRecv %d %d\033[0m' % (packlen, len(body)))
# print('reconnect...')
# self.pre_quo_svr = utils.connect_server(self.addr)
# self.subscibe()
# continue
#
# if quotation_def_pb2.LYMK_ETF_BASE_INFO == pi.cmd:
# etfbase = pq_quota_pb2.EtfBasketInfo()
# etfbase.ParseFromString(body)
# pi.pack = etfbase
# print etfbase
# self.etf_base_info_handle(pi)
# elif quotation_def_pb2.LYMK_HEARTBEAT_REQ == pi.cmd:
# headbuffer = struct.pack(
# fmt, 0, 0x8001, quotation_def_pb2.LYMK_HEARTBEAT,
# 1, 0, 0, 0)
# seq += 1
# self.pre_quo_svr.send(headbuffer)
# print('%s etfs %d maxpack %d' % (time.ctime(time.time()),
# len(self.etfs), maxpack))
# else:
# print('unknow cmd 0x%x' % (pi.cmd))
#
# def etf_base_info_handle(self, pi):
# self.etfs[pi.pack.etf_code] = pi.pack
# if pi.pack.etf_code == '159919':
# pi.pack
# # print('%s pi.pack.creation_limit %s pi.redemption_limit %s'
# # % (pi.pack.etf_code, pi.pack.creation_limit,
# # pi.pack.redemption_limit))
#
# # print pi
# # print pi.pack
# -----------------------------------------------------------------------------
class PreQuotation(baseclient.BaseClient):
def __init__(self, app, addr):
super(PreQuotation, self).__init__(app, addr)
self.etfs = {}
self.sf_info = {}
self.quo = self.app.quo
self.seq = 0
self.sublist = []
self.log = open('etf.code', 'w+')
def after_connect(self, err):
submsg = pq_quota_pb2.PreQuoSubMsg()
submsg.sub_flag = 1
submsg.sub_type.append(quotation_def_pb2.LYMK_ETF_BASE_INFO)
submsg.sub_type.append(quotation_def_pb2.LYMK_MARKET_OVERVIEW)
submsg.sub_type.append(quotation_def_pb2.LYMK_ETF_DIFF_INFO)
submsg.sub_type.append(quotation_def_pb2.LYMK_FJJJ_INFO)
submsg.sub_type.append(quotation_def_pb2.LYMK_STRUCTURED_FUND_BASE)
# submsg.sub_type.append(quotation_def_pb2.LYMK_TRADEDAY_INFO)
fmt = ">iHHiiii"
headbuffer = struct.pack(fmt, submsg.ByteSize(), 0x8001,
quotation_def_pb2.LYMK_PRE_QUO_SUB, 0, 0, 0,
0)
sentdata = headbuffer + submsg.SerializeToString()
self.svr_sock.sendall(sentdata)
def package_handle(self, packlen, pi, body):
# print('pre quo cmd 0x%x' % (pi.cmd))
if quotation_def_pb2.LYMK_ETF_BASE_INFO == pi.cmd:
# print('LYMK_ETF_BASE_INFO')
etfbase = pq_quota_pb2.EtfBasketInfo()
etfbase.ParseFromString(body)
pi.pack = etfbase
self.etf_base_info_handle(pi)
elif ((base_pb2.CMD_HEARTBEAT_REQ == pi.cmd or
base_pb2.CMD_HEARTBEAT_RESP == pi.cmd)):
# print('LYMK_HEARTBEAT_REQ ')
self.send_heartbeat(base_pb2.SYS_QUOTATION)
# fmt = ">iHHiiii"
# headbuffer = struct.pack(
# fmt, 0, 0x8001, base_pb2.CMD_HEARTBEAT_RESP, 1,
# 0, 0, 0)
# self.svr_sock.sendall(headbuffer)
elif quotation_def_pb2.LYMK_STRUCTURED_FUND_BASE == pi.cmd:
# print('LYMK_STRUCTURED_FUND_BASE')
pack = structured_fund_pb2.structured_fund()
pack.ParseFromString(body)
pi.pack = pack
self.sf_info[pack.m_stk_id] = pack
elif quotation_def_pb2.LYMK_FJJJ_INFO == pi.cmd:
# print('LYMK_FJJJ_INFO')
pack = pq_quota_pb2.StructuredFundInfo()
pack.ParseFromString(body)
pi.pack = pack
elif quotation_def_pb2.LYMK_MARKET_OVERVIEW == pi.cmd:
# print('LYMK_MARKET_OVERVIEW')
pack = pq_quota_pb2.MarketDaily()
pack.ParseFromString(body)
pi.pack = pack
# print('quotation_def_pb2.LYMK_STRUCTURED_FUND_BASE == pi.cmd')
# print pack
elif quotation_def_pb2.LYMK_ETF_DIFF_INFO == pi.cmd:
pass
else:
print('pre quo unknow cmd 0x%x' % (pi.cmd))
def etf_base_info_handle(self, pi):
etfinfo = EtfInfo(pi.pack.etf_code)
self.etfs[pi.pack.etf_code] = etfinfo
etfinfo.etf_base_info = pi.pack
# print('etfcode %s' % (etfinfo.etfcode))
# if '159919' == pi.pack.etf_code:
# print('%s type(pi.pack.etf_code) %s %s' %
# (RED, type(pi.pack.etf_code), CLEAR))
# print('%s len(pro_quo.etfs) %d' %
# (time.ctime(time.time()), len(self.etfs)))
# print pi.pack
# print pi.pack.etf_code, pi.pack.creation_redemption_unit
if '510050' == pi.pack.etf_code:
# print pi.pack
pass
ret = self.sub_constituent_stock(etfinfo)
if ret != 0:
self.sublist.append(etfinfo)
else:
for etfinf in self.sublist:
ret = self.sub_constituent_stock(etfinf)
if ret != 0:
print('err ret %d' % (ret))
break
else:
pass
# print('sub ret %d' % (ret))
def get_etf(self, etfcode):
return self.etfs.get(etfcode)
def sub_constituent_stock(self, etfinfo):
if self.quo is None:
self.quo = self.app.quo
print('pre quotation re set quo')
if self.quo is None:
print('pre quotation re set quo 1 %s' % (etfinfo.etfcode))
return 1
if len(self.quo.c2id) < 1:
# print('pre quotation re set quo 2 %s' % (etfinfo.etfcode))
return 2
# if '159919' == etfinfo.etfcode:
# print('%spre %s sub stock %s' % (RED, etfinfo.etfcode, CLEAR))
# ecode == etf_code
# isinstance(etf, EtfInfo)
# print ecode, etf
# etfinfo.stcks[base_pb2.MKT_SZ] = []
# etfinfo.stcks[base_pb2.MKT_SH] = []
# etfinfo.stcks[base_pb2.MKT_CF] = []
for stk_etf in etfinfo.etf_base_info.etf_list:
# if '300251' == stk_etf.stock_id:
# print('%ssub 300251 %s' % (RED, CLEAR))
self.log.write('%s %s\n' % (etfinfo.etfcode, stk_etf.stock_id))
if stk_etf.stock_id in self.quo.c2id:
lst = self.quo.c2id[stk_etf.stock_id]
lst[1].qty = stk_etf.execute_qty
etfinfo.stcks[lst[1].mkid].append(lst[1]) # list version
# dict ver
# etf.stcks[pack.source][stk_etf.stock_id] = lst[1]
# sid = lst[0]
md = quota_pb2.MarketDataReqByIdnum()
md.sub_type = 1
md.idnum.append(lst[0])
# if stk_etf.stock_id == '000001':
# print '000001 in', ecode, lst[1], lst[0]
self.quo.try_send(
md, quotation_def_pb2.LYMK_MARKETDATA_REQ_BY_IDNUM)
self.log.write('\n')
return 0
def try_send(self, pack, cmd):
fmt = ">iHHiiii"
headbuffer = struct.pack(fmt, pack.ByteSize(), base_pb2.SYS_QUOTATION,
cmd, base_pb2.SYS_QUOTATION, 0, 0, self.seq)
self.seq += 1
sentdata = headbuffer + pack.SerializeToString()
return self.svr_sock.send(sentdata)
# -----------------------------------------------------------------------------
class RobotApp(object):
'''
this is a robot
'''
rid = '100001'
# map robotapp.seq <==> (client, req_seq)
cfg = None # config
quo = None # quotation stock server
# q_f_sock = None # quotation future server
pre_quo = None # quotation pre stock server
# q_p_f_sock = None # quotation pre future server
def __init__(self, conf):
with open('robot.yaml') as f:
import yaml
self.cfg = yaml.load(f)
def timer_proc(self):
# one = '000001'
while 1:
gevent.sleep(5)
for key, etf in self.pre_quo.etfs.items():
assert isinstance(etf, EtfInfo)
if len(etf.stcks[base_pb2.MKT_SZ]) > 0:
print etf.stcks[base_pb2.MKT_SZ][0]
print etf.stcks[base_pb2.MKT_SZ][0].md
print('pre quo %s' % (time.ctime(time.time())))
# -----------------------------------------------------------------------------
def timer_proc(rapp):
# one = '000001'
while 1:
gevent.sleep(5)
for key, etf in rapp.pre_quo.etfs.items():
assert isinstance(etf, EtfInfo)
if len(etf.stcks[base_pb2.MKT_SZ]) > 0:
print etf.stcks[base_pb2.MKT_SZ][0]
print etf.stcks[base_pb2.MKT_SZ][0].md
# if ((one in etf.stcks[base_pb2.MKT_SZ] or
# one in etf.stcks[base_pb2.MKT_SH] or
# one in etf.stcks[base_pb2.MKT_CF])):
# stkinfo = etf.stcks[base_pb2.MKT_SZ][one]
# print(stkinfo)
# print stkinfo.md
# print('%d %s %s' % (len(rapp.pre_quo.etfs),
# key, type(etf.stcks[base_pb2.MKT_SZ])))
# print('etf.stcks[base_pb2.MKT_SZ] %d' %
# (len(etf.stcks[base_pb2.MKT_SZ])))
# print('etf.stcks[base_pb2.MKT_SH] %d' %
# (len(etf.stcks[base_pb2.MKT_SH])))
# print('etf.stcks[base_pb2.MKT_CF] %d' %
# (len(etf.stcks[base_pb2.MKT_CF])))
# -----------------------------------------------------------------------------
def main():
print('begin %s ' % (time.ctime(time.time())))
rapp = RobotApp('robot.yaml')
jobs = []
pq_ip = rapp.cfg['pre_quo']['pqip']
pq_port = rapp.cfg['pre_quo']['port']
print pq_ip, pq_port
pre_quo = PreQuotation(rapp, (pq_ip, pq_port))
rapp.pre_quo = pre_quo
jobs.append(gevent.spawn(pre_quo.recv_data))
# gevent.sleep(3)
q_ip = rapp.cfg['quo_server']['qip']
q_port = rapp.cfg['quo_server']['port']
print q_ip, q_port
quota = quotation.Quotation(rapp, (q_ip, q_port))
rapp.quo = quota
jobs.append(gevent.spawn(quota.recv_data))
gevent.sleep(3)
jobs.append(gevent.spawn(rapp.timer_proc))
gevent.joinall(jobs)
while 1:
print 's' * 60
gevent.sleep(10)
if __name__ == '__main__':
main()
| StarcoderdataPython |
1668721 | import zlib
from rdkit import Chem
from rdkit.Chem.PropertyMol import PropertyMol
import torch
import numpy as np
from modules.mol import conformation_generation, get_mol_coordinate, get_mol_type_one_hot
from modules.surface import MoleculeAtomsToPointNormal
def generate_and_encode(smi):
"""
Generate the conformation of a specific smiles.
:param smi:
:return:
"""
mol, idxs = conformation_generation(smi, RmsThresh=1)
mol_blocks_list = []
for idx in idxs:
mol_clone = PropertyMol(mol)
conformer = mol.GetConformer(idx)
mol_clone.AddConformer(conformer)
mol_clone.SetProp('_Name', smi)
mol_blocks_list.append(Chem.MolToMolBlock(mol_clone, ))
s = '\n'.join(mol_blocks_list)
s = s.encode()
zlib_s = zlib.compress(s)
del mol
del mol_blocks_list
return zlib_s
def decode(block):
"""
:param block:
:return:
"""
string = zlib.decompress(block)
string = string.decode()
mols = []
for string_i in string.split('END\n\n'):
mols.append(Chem.MolFromMolBlock(string_i + 'END\n\n'))
return mols
def to_point_cloud(mol, B=500, theta_distance=1.0, r=2.05, smoothness=0.1, variance=0.2, ite=100):
"""
:param mol: rdkit.mol. The mol object to process the point cloud.
:param B: int, the number of the sampling points.
:param theta_distance: float, the variance distance (A) of the normal sampling of the neighborhood points.
:param r: float, the radius of the level set surface.
:param smoothness: float, the smooth constant for SDF calculation.
:param variance: float,
:param ite: int, The number of the iterations.
:return:
"""
conformer = mol.GetConformer()
atoms = get_mol_coordinate(conformer)
atomtype = get_mol_type_one_hot(mol)
atomtype = torch.from_numpy(np.array(atomtype)).cuda()
atoms = torch.from_numpy(atoms).cuda()
point_processer = MoleculeAtomsToPointNormal(atoms=atoms, atomtype=atomtype, B=B, r=r,
smoothness=smoothness, variance=variance,
theta_distance=theta_distance)
atoms, z = point_processer.sampling()
z = point_processer.descend(atoms, z, ite=ite)
z = point_processer.cleaning(atoms, z)
z = point_processer.sub_sampling(z)
return z.detach()
| StarcoderdataPython |
80991 | import yaml
import pytest
from unittest import mock
import kubernetes
from kubernetes.config.config_exception import ConfigException
from mlflow.projects import kubernetes as kb
from mlflow.exceptions import ExecutionException
from mlflow.entities import RunStatus
def test_run_command_creation(): # pylint: disable=unused-argument
"""
Tests command creation.
"""
command = [
"python train.py --alpha 0.5 --l1-ratio 0.1",
"--comment 'foo bar'",
'--comment-bis "bar foo"',
]
command = kb._get_run_command(command)
assert [
"python",
"train.py",
"--alpha",
"0.5",
"--l1-ratio",
"0.1",
"--comment",
"'foo bar'",
"--comment-bis",
"'bar foo'",
] == command
def test_valid_kubernetes_job_spec(): # pylint: disable=unused-argument
"""
Tests job specification for Kubernetes.
"""
custom_template = yaml.safe_load(
"apiVersion: batch/v1\n"
"kind: Job\n"
"metadata:\n"
" name: pi-with-ttl\n"
"spec:\n"
" ttlSecondsAfterFinished: 100\n"
" template:\n"
" spec:\n"
" containers:\n"
" - name: pi\n"
" image: perl\n"
" command: ['perl', '-Mbignum=bpi', '-wle']\n"
" env: \n"
" - name: DUMMY\n"
' value: "test_var"\n'
" restartPolicy: Never\n"
)
project_name = "mlflow-docker-example"
image_tag = "image_tag"
image_digest = "5e74a5a"
command = ["mlflow", "run", ".", "--no-conda", "-P", "alpha=0.5"]
env_vars = {"RUN_ID": "1"}
job_definition = kb._get_kubernetes_job_definition(
project_name=project_name,
image_tag=image_tag,
image_digest=image_digest,
command=command,
env_vars=env_vars,
job_template=custom_template,
)
container_spec = job_definition["spec"]["template"]["spec"]["containers"][0]
assert container_spec["name"] == project_name
assert container_spec["image"] == image_tag + "@" + image_digest
assert container_spec["command"] == command
assert 2 == len(container_spec["env"])
assert container_spec["env"][0]["name"] == "DUMMY"
assert container_spec["env"][0]["value"] == "test_var"
assert container_spec["env"][1]["name"] == "RUN_ID"
assert container_spec["env"][1]["value"] == "1"
def test_run_kubernetes_job():
active_run = mock.Mock()
project_name = "mlflow-docker-example"
image_tag = "image_tag"
image_digest = "5e74a5a"
command = ["python train.py --alpha 0.5 --l1-ratio 0.1"]
env_vars = {"RUN_ID": "1"}
kube_context = "docker-for-desktop"
job_template = yaml.safe_load(
"apiVersion: batch/v1\n"
"kind: Job\n"
"metadata:\n"
" name: pi-with-ttl\n"
" namespace: mlflow\n"
"spec:\n"
" ttlSecondsAfterFinished: 100\n"
" template:\n"
" spec:\n"
" containers:\n"
" - name: pi\n"
" image: perl\n"
" command: ['perl', '-Mbignum=bpi', '-wle']\n"
" restartPolicy: Never\n"
)
with mock.patch("kubernetes.config.load_kube_config") as kube_config_mock:
with mock.patch("kubernetes.client.BatchV1Api.create_namespaced_job") as kube_api_mock:
submitted_run_obj = kb.run_kubernetes_job(
project_name=project_name,
active_run=active_run,
image_tag=image_tag,
image_digest=image_digest,
command=command,
env_vars=env_vars,
job_template=job_template,
kube_context=kube_context,
)
assert submitted_run_obj._mlflow_run_id == active_run.info.run_id
assert submitted_run_obj._job_name.startswith(project_name)
assert submitted_run_obj._job_namespace == "mlflow"
assert kube_api_mock.call_count == 1
args = kube_config_mock.call_args_list
assert args[0][1]["context"] == kube_context
def test_run_kubernetes_job_current_kubecontext():
active_run = mock.Mock()
project_name = "mlflow-docker-example"
image_tag = "image_tag"
image_digest = "5e74a5a"
command = ["python train.py --alpha 0.5 --l1-ratio 0.1"]
env_vars = {"RUN_ID": "1"}
kube_context = None
job_template = yaml.safe_load(
"apiVersion: batch/v1\n"
"kind: Job\n"
"metadata:\n"
" name: pi-with-ttl\n"
" namespace: mlflow\n"
"spec:\n"
" ttlSecondsAfterFinished: 100\n"
" template:\n"
" spec:\n"
" containers:\n"
" - name: pi\n"
" image: perl\n"
" command: ['perl', '-Mbignum=bpi', '-wle']\n"
" restartPolicy: Never\n"
)
with mock.patch("kubernetes.config.load_kube_config") as kube_config_mock:
with mock.patch("kubernetes.config.load_incluster_config") as incluster_kube_config_mock:
with mock.patch("kubernetes.client.BatchV1Api.create_namespaced_job") as kube_api_mock:
submitted_run_obj = kb.run_kubernetes_job(
project_name=project_name,
active_run=active_run,
image_tag=image_tag,
image_digest=image_digest,
command=command,
env_vars=env_vars,
job_template=job_template,
kube_context=kube_context,
)
assert submitted_run_obj._mlflow_run_id == active_run.info.run_id
assert submitted_run_obj._job_name.startswith(project_name)
assert submitted_run_obj._job_namespace == "mlflow"
assert kube_api_mock.call_count == 1
assert kube_config_mock.call_count == 1
assert incluster_kube_config_mock.call_count == 0
def test_run_kubernetes_job_in_cluster():
active_run = mock.Mock()
project_name = "mlflow-docker-example"
image_tag = "image_tag"
image_digest = "5e74a5a"
command = ["python train.py --alpha 0.5 --l1-ratio 0.1"]
env_vars = {"RUN_ID": "1"}
kube_context = None
job_template = yaml.safe_load(
"apiVersion: batch/v1\n"
"kind: Job\n"
"metadata:\n"
" name: pi-with-ttl\n"
" namespace: mlflow\n"
"spec:\n"
" ttlSecondsAfterFinished: 100\n"
" template:\n"
" spec:\n"
" containers:\n"
" - name: pi\n"
" image: perl\n"
" command: ['perl', '-Mbignum=bpi', '-wle']\n"
" restartPolicy: Never\n"
)
with mock.patch("kubernetes.config.load_kube_config") as kube_config_mock:
kube_config_mock.side_effect = ConfigException()
with mock.patch("kubernetes.config.load_incluster_config") as incluster_kube_config_mock:
with mock.patch("kubernetes.client.BatchV1Api.create_namespaced_job") as kube_api_mock:
submitted_run_obj = kb.run_kubernetes_job(
project_name=project_name,
active_run=active_run,
image_tag=image_tag,
image_digest=image_digest,
command=command,
env_vars=env_vars,
job_template=job_template,
kube_context=kube_context,
)
assert submitted_run_obj._mlflow_run_id == active_run.info.run_id
assert submitted_run_obj._job_name.startswith(project_name)
assert submitted_run_obj._job_namespace == "mlflow"
assert kube_api_mock.call_count == 1
assert kube_config_mock.call_count == 1
assert incluster_kube_config_mock.call_count == 1
def test_push_image_to_registry():
image_uri = "dockerhub_account/mlflow-kubernetes-example"
with mock.patch("docker.from_env") as docker_mock:
client = mock.MagicMock()
docker_mock.return_value = client
kb.push_image_to_registry(image_uri)
assert client.images.push.call_count == 1
args = client.images.push.call_args_list
assert args[0][1]["repository"] == image_uri
def test_push_image_to_registry_handling_errors():
image_uri = "dockerhub_account/mlflow-kubernetes-example"
with pytest.raises(ExecutionException):
kb.push_image_to_registry(image_uri)
def test_submitted_run_get_status_killed():
mlflow_run_id = 1
job_name = "job-name"
job_namespace = "job-namespace"
with mock.patch("kubernetes.client.BatchV1Api.delete_namespaced_job") as kube_api_mock:
submitted_run = kb.KubernetesSubmittedRun(mlflow_run_id, job_name, job_namespace)
submitted_run.cancel()
assert RunStatus.KILLED == submitted_run.get_status()
assert kube_api_mock.call_count == 1
args = kube_api_mock.call_args_list
assert args[0][1]["name"] == job_name
assert args[0][1]["namespace"] == job_namespace
def test_submitted_run_get_status_failed():
mlflow_run_id = 1
job_name = "job-name"
job_namespace = "job-namespace"
condition = kubernetes.client.models.V1JobCondition(type="Failed", status="True")
job_status = kubernetes.client.models.V1JobStatus(
active=1,
completion_time=None,
conditions=[condition],
failed=1,
start_time=1,
succeeded=None,
)
job = kubernetes.client.models.V1Job(status=job_status)
with mock.patch("kubernetes.client.BatchV1Api.read_namespaced_job_status") as kube_api_mock:
kube_api_mock.return_value = job
submitted_run = kb.KubernetesSubmittedRun(mlflow_run_id, job_name, job_namespace)
print("status", submitted_run.get_status())
assert RunStatus.FAILED == submitted_run.get_status()
assert kube_api_mock.call_count == 1
args = kube_api_mock.call_args_list
assert args[0][1]["name"] == job_name
assert args[0][1]["namespace"] == job_namespace
def test_submitted_run_get_status_succeeded():
mlflow_run_id = 1
job_name = "job-name"
job_namespace = "job-namespace"
condition = kubernetes.client.models.V1JobCondition(type="Complete", status="True")
job_status = kubernetes.client.models.V1JobStatus(
active=None,
completion_time=None,
conditions=[condition],
failed=None,
start_time=None,
succeeded=1,
)
job = kubernetes.client.models.V1Job(status=job_status)
with mock.patch("kubernetes.client.BatchV1Api.read_namespaced_job_status") as kube_api_mock:
kube_api_mock.return_value = job
submitted_run = kb.KubernetesSubmittedRun(mlflow_run_id, job_name, job_namespace)
print("status", submitted_run.get_status())
assert RunStatus.FINISHED == submitted_run.get_status()
assert kube_api_mock.call_count == 1
args = kube_api_mock.call_args_list
assert args[0][1]["name"] == job_name
assert args[0][1]["namespace"] == job_namespace
def test_submitted_run_get_status_running():
mlflow_run_id = 1
job_name = "job-name"
job_namespace = "job-namespace"
job_status = kubernetes.client.models.V1JobStatus(
active=1, completion_time=None, conditions=None, failed=1, start_time=1, succeeded=1
)
job = kubernetes.client.models.V1Job(status=job_status)
with mock.patch("kubernetes.client.BatchV1Api.read_namespaced_job_status") as kube_api_mock:
kube_api_mock.return_value = job
submitted_run = kb.KubernetesSubmittedRun(mlflow_run_id, job_name, job_namespace)
assert RunStatus.RUNNING == submitted_run.get_status()
assert kube_api_mock.call_count == 1
args = kube_api_mock.call_args_list
print(args)
assert args[0][1]["name"] == job_name
assert args[0][1]["namespace"] == job_namespace
def test_state_transitions():
mlflow_run_id = 1
job_name = "job-name"
job_namespace = "job-namespace"
submitted_run = kb.KubernetesSubmittedRun(mlflow_run_id, job_name, job_namespace)
with mock.patch("kubernetes.client.BatchV1Api.read_namespaced_job_status") as kube_api_mock:
def set_return_value(**kwargs):
job_status = kubernetes.client.models.V1JobStatus(**kwargs)
kube_api_mock.return_value = kubernetes.client.models.V1Job(status=job_status)
set_return_value()
assert RunStatus.SCHEDULED == submitted_run.get_status()
set_return_value(start_time=1)
assert RunStatus.RUNNING == submitted_run.get_status()
set_return_value(start_time=1, failed=1)
assert RunStatus.RUNNING == submitted_run.get_status()
set_return_value(start_time=1, failed=1)
assert RunStatus.RUNNING == submitted_run.get_status()
set_return_value(start_time=1, failed=1, active=1)
assert RunStatus.RUNNING == submitted_run.get_status()
set_return_value(start_time=1, failed=1, succeeded=1)
assert RunStatus.RUNNING == submitted_run.get_status()
set_return_value(start_time=1, failed=1, succeeded=1, completion_time=2)
assert RunStatus.RUNNING == submitted_run.get_status()
condition = kubernetes.client.models.V1JobCondition(type="Complete", status="True")
set_return_value(
conditions=[condition], failed=1, start_time=1, completion_time=2, succeeded=1
)
assert RunStatus.FINISHED == submitted_run.get_status()
| StarcoderdataPython |
1757735 | import logging
import sys
LOGGER_NAME = 'aqueduct'
log = logging.getLogger(LOGGER_NAME)
log.setLevel(logging.DEBUG)
formatter = logging.Formatter('[%(asctime)s] %(levelname)s [Flow] [pid:%(process)d] %(message)s')
ch = logging.StreamHandler(sys.stderr)
ch.setFormatter(formatter)
log.addHandler(ch)
| StarcoderdataPython |
93665 | <reponame>gecko17/project-sailor
from unittest.mock import patch, Mock, call
import pytest
from sailor.assetcentral.equipment import Equipment
from sailor.assetcentral.location import Location, LocationSet
from sailor.assetcentral import constants
@pytest.fixture
def mock_url():
with patch('sailor.assetcentral.equipment._ac_application_url') as mock:
mock.return_value = 'base_url'
yield mock
class TestEquipment:
@pytest.fixture()
def eq_obj(self, make_equipment):
return make_equipment(equipmentId='D2602147691E463DA91EA2B4C3998C4B', name='testEquipment', location='USA')
@patch('sailor._base.apply_filters_post_request')
@patch('sailor.assetcentral.equipment._ac_fetch_data')
def test_find_equipment_indicators_fetch_and_apply(self, mock_request, mock_apply, mock_url,
eq_obj, make_indicator_set):
object_list = Mock(name='raw_object_list')
mock_request.return_value = object_list
mock_apply.return_value = [{'propertyId': 'indicator_1', 'pstid': 'group_id', 'categoryID': 'template_id'},
{'propertyId': 'indicator_2', 'pstid': 'group_id', 'categoryID': 'template_id'}]
filter_kwargs = {'param1': 'one'}
extended_filters = ['other_param > 22']
expected_result = make_indicator_set(propertyId=['indicator_1', 'indicator_2'])
actual = eq_obj.find_equipment_indicators(**filter_kwargs, extended_filters=extended_filters)
assert constants.VIEW_EQUIPMENT in mock_request.call_args.args[0]
assert mock_apply.call_args.args[:-1] == (object_list, filter_kwargs, extended_filters)
assert actual == expected_result
@patch('sailor.assetcentral.equipment._ac_fetch_data')
def test_find_failure_modes(self, mock_request, mock_config, eq_obj):
mock_request.return_value = [{'ID': 'fm_id1'}, {'ID': 'fm_id2'}]
expected = 'expected return value is the value returned by the delegate function "find_failure_modes"'
with patch('sailor.assetcentral.equipment.find_failure_modes', return_value=expected) as mock_delegate:
actual = eq_obj.find_failure_modes(extended_filters=['some_param > some_value'], param='123')
mock_delegate.assert_called_once_with(extended_filters=['some_param > some_value'],
id=['fm_id1', 'fm_id2'], param='123')
assert actual == expected
@pytest.mark.parametrize('function_name', [
'find_notifications', 'find_workorders'
])
def test_delegate_called_with_filters(self, eq_obj, function_name):
expected = f'expected return value is the value returned by the delegate function "{function_name}"'
function_under_test = getattr(eq_obj, function_name)
with patch(f'sailor.assetcentral.equipment.{function_name}', return_value=expected) as mock_delegate:
actual = function_under_test(extended_filters=['some_param > some_value'], param='123')
mock_delegate.assert_called_once_with(extended_filters=['some_param > some_value'], param='123',
equipment_id=eq_obj.id)
assert actual == expected
@patch('sailor.assetcentral.location._ac_fetch_data')
def test_location_returns_location(self, mock_request, mock_config, make_equipment):
equipment = make_equipment(equipment_id='123', location='Walldorf')
mock_request.return_value = [{'locationId': '456', 'name': 'Walldorf'}]
expected_result = Location({'locationId': '456', 'name': 'Walldorf'})
actual = equipment.location
assert type(actual) == Location
assert actual == expected_result
@patch('sailor.assetcentral.equipment.find_locations')
def test_location_fetches_only_once(self, mock_find, make_equipment):
mock_find.return_value = LocationSet([Location({'locationId': '456', 'name': 'Walldorf'})])
equipment = make_equipment(equipment_id='123', location='Walldorf')
equipment.location
equipment.location
mock_find.assert_called_once()
@patch('sailor.assetcentral.equipment.find_locations')
def test_location_different_instances_always_fetch(self, mock_find, make_equipment):
mock_find.return_value = LocationSet([Location({'locationId': '456', 'name': 'Walldorf'})])
equipment1 = make_equipment(equipment_id='123', location='Walldorf')
equipment2 = make_equipment(equipment_id='123', location='Walldorf')
expected_calls = [call(name='Walldorf'), call(name='Walldorf')]
equipment1.location
equipment2.location
mock_find.assert_has_calls(expected_calls)
@patch('sailor.assetcentral.equipment._create_or_update_notification')
def test_create_notification_builds_request(self, create_update_mock, make_equipment):
equipment = make_equipment(equipmentId='123', location='Walldorf')
equipment._location = Location({'locationId': '456', 'name': 'Walldorf'})
create_kwargs = {'notification_type': 'M2', 'short_description': 'test', 'priority': 15, 'status': 'NEW'}
expected_request_dict = {
'equipmentID': '123', 'locationID': '456', 'type': 'M2', 'description': {'shortDescription': 'test'},
'priority': 15, 'status': ['NEW']}
equipment.create_notification(**create_kwargs)
create_update_mock.assert_called_once_with(expected_request_dict, 'POST')
@pytest.mark.parametrize('create_kwargs', [
({'id': 123}),
({'notificationID': 123}),
({'equipment_id': 123}),
({'equipmentID': 123})
])
def test_create_notification_forbidden_fields_raises(self, create_kwargs, make_equipment):
equipment = make_equipment()
equipment._location = Location({'locationId': '456', 'name': 'Walldorf'})
expected_offender = list(create_kwargs.keys())[0]
with pytest.raises(RuntimeError, match=f"You cannot set '{expected_offender}' in this request."):
equipment.create_notification(**create_kwargs)
class TestEquipmentSet:
@pytest.mark.parametrize('function_name', [
'find_notifications', 'find_workorders',
])
def test_delegate_called_with_filters(self, make_equipment_set, function_name):
eq_set = make_equipment_set(equipmentId=['equipment_id_1', 'equipment_id_2'])
expected = f'expected return value is the value returned by the delegate function "{function_name}"'
function_under_test = getattr(eq_set, function_name)
with patch(f'sailor.assetcentral.equipment.{function_name}', return_value=expected) as mock_delegate:
actual = function_under_test(extended_filters=['some_param > some_value'], param='123')
mock_delegate.assert_called_once_with(extended_filters=['some_param > some_value'], param='123',
equipment_id=[equipment.id for equipment in eq_set])
assert actual == expected
def test_find_common_indicators(self, make_indicator, make_indicator_set, make_equipment_set):
equipment_set = make_equipment_set(equipmentId=['equipment_id_1', 'equipment_id_2', 'equipment_id_3'])
indicator_ids = [['1', '2', '3'], ['1', '3'], ['3', '1']]
for i, equipment in enumerate(equipment_set):
equipment.find_equipment_indicators = Mock()
equipment.find_equipment_indicators.return_value = make_indicator_set(propertyId=indicator_ids[i])
expected_result = make_indicator_set(propertyId=['3', '1'])
actual_result = equipment_set.find_common_indicators()
assert expected_result == actual_result
def test_expected_public_attributes_are_present(self):
expected_attributes = [
'name', 'model_name', 'location_name', 'status_text', 'short_description', 'manufacturer',
'operator', 'installation_date', 'build_date', 'criticality_description', 'id', 'model_id',
'template_id', 'serial_number', 'batch_number',
]
fieldmap_public_attributes = [
field.our_name for field in Equipment._field_map.values() if field.is_exposed
]
assert expected_attributes == fieldmap_public_attributes
| StarcoderdataPython |
1647223 | <filename>pymodaq_plugins/hardware/picoquant/histomode.py
# Demo for access to TimeHarp 260 Hardware via TH260LIB.DLL v 3.1.
# The program performs a measurement based on hard coded settings.
# The resulting histogram is stored in an ASCII output file.
#
# <NAME>, PicoQuant GmbH, February 2018
import time
import ctypes as ct
from ctypes import byref
# From th260defin.h
LIB_VERSION = "3.1"
MAXDEVNUM = 4
MODE_HIST = 0
MAXLENCODE = 5
MAXINPCHAN = 2
MAXHISTLEN = 32768
FLAG_OVERFLOW = 0x0001
# Measurement parameters, these are hardcoded since this is just a demo
binning = 0 # you can change this
offset = 0
tacq = 1000 # Measurement time in millisec, you can change this
syncDivider = 1 # you can change this
### For TimeHarp 260 P
syncCFDZeroCross = -10 # you can change this
syncCFDLevel = -50 # you can change this
inputCFDZeroCross = -10 # you can change this
inputCFDLevel = -50 # you can change this
### For TimeHarp 260 N
syncTriggerEdge = 0 # you can change this
syncTriggerLevel = -50 # you can change this
inputTriggerEdge = 0 # you can change this
inputTriggerLevel = -50 # you can change this
# Variables to store information read from DLLs
counts = [(ct.c_uint * MAXHISTLEN)() for i in range(0, MAXINPCHAN)]
dev = []
libVersion = ct.create_string_buffer(b"", 8)
hwSerial = ct.create_string_buffer(b"", 8)
hwPartno = ct.create_string_buffer(b"", 8)
hwVersion = ct.create_string_buffer(b"", 16)
hwModel = ct.create_string_buffer(b"", 16)
errorString = ct.create_string_buffer(b"", 40)
numChannels = ct.c_int()
histLen = ct.c_int()
resolution = ct.c_double()
syncRate = ct.c_int()
countRate = ct.c_int()
flags = ct.c_int()
warnings = ct.c_int()
warningstext = ct.create_string_buffer(b"", 16384)
cmd = 0
th260lib = ct.CDLL("th260lib64.dll")
def closeDevices():
for i in range(0, MAXDEVNUM):
th260lib.TH260_CloseDevice(ct.c_int(i))
exit(0)
def tryfunc(retcode, funcName):
if retcode < 0:
th260lib.TH260_GetErrorString(errorString, ct.c_int(retcode))
print("TH260_%s error %d (%s). Aborted." % (funcName, retcode,\
errorString.value.decode("utf-8")))
closeDevices()
th260lib.TH260_GetLibraryVersion(libVersion)
print("Library version is %s" % libVersion.value.decode("utf-8"))
if libVersion.value.decode("utf-8") != LIB_VERSION:
print("Warning: The application was built for version %s" % LIB_VERSION)
outputfile = open("histomode.out", "w+")
print("\nSearching for TimeHarp devices...")
print("Devidx Status")
for i in range(0, MAXDEVNUM):
retcode = th260lib.TH260_OpenDevice(ct.c_int(i), hwSerial)
if retcode == 0:
print(" %1d S/N %s" % (i, hwSerial.value.decode("utf-8")))
dev.append(i)
else:
if retcode == -1: # TH260_ERROR_DEVICE_OPEN_FAIL
print(" %1d no device" % i)
else:
th260lib.TH260_GetErrorString(errorString, ct.c_int(retcode))
print(" %1d %s" % (i, errorString.value.decode("utf8")))
# In this demo we will use the first TimeHarp device we find, i.e. dev[0].
# You can also use multiple devices in parallel.
# You can also check for specific serial numbers, so that you always know
# which physical device you are talking to.
if len(dev) < 1:
print("No device available.")
closeDevices()
print("Using device #%1d" % dev[0])
outputfile.write("Binning : %d\n" % binning)
outputfile.write("Offset : %d\n" % offset)
outputfile.write("AcquisitionTime : %d\n" % tacq)
outputfile.write("SyncDivider : %d\n" % syncDivider)
print("\nInitializing the device...")
# Histo mode with internal clock
tryfunc(th260lib.TH260_Initialize(ct.c_int(dev[0]), ct.c_int(MODE_HIST)),\
"Initialize")
tryfunc(th260lib.TH260_GetHardwareInfo(dev[0], hwModel, hwPartno, hwVersion),\
"GetHardwareInfo")
print("Found Model %s Part no %s Version %s" % (hwModel.value.decode("utf-8"),\
hwPartno.value.decode("utf-8"), hwVersion.value.decode("utf-8")))
if hwModel.value.decode("utf-8") == "TimeHarp 260 P":
outputfile.write("SyncCFDZeroCross : %d\n" % syncCFDZeroCross)
outputfile.write("SyncCFDLevel : %d\n" % syncCFDLevel)
outputfile.write("InputCFDZeroCross : %d\n" % inputCFDZeroCross)
outputfile.write("InputCFDLevel : %d\n" % inputCFDLevel)
elif hwModel.value.decode("utf-8") == "TimeHarp 260 N":
outputfile.write("SyncTriggerEdge : %d\n" % syncTriggerEdge)
outputfile.write("SyncTriggerLevel : %d\n" % syncTriggerLevel)
outputfile.write("InputTriggerEdge : %d\n" % inputTriggerEdge)
outputfile.write("InputTriggerLevel : %d\n" % inputTriggerLevel)
else:
print("Unknown hardware model %s. Aborted." % hwModel.value.decode("utf-8"))
closeDevices()
tryfunc(th260lib.TH260_GetNumOfInputChannels(ct.c_int(dev[0]), byref(numChannels)),\
"GetNumOfInputChannels")
print("Device has %i input channels." % numChannels.value)
tryfunc(th260lib.TH260_SetSyncDiv(ct.c_int(dev[0]), ct.c_int(syncDivider)),\
"SetSyncDiv")
if hwModel.value.decode("utf-8") == "TimeHarp 260 P":
tryfunc(
th260lib.TH260_SetSyncCFD(ct.c_int(dev[0]), ct.c_int(syncCFDLevel),\
ct.c_int(syncCFDZeroCross)),\
"SetSyncCFD"
)
# we use the same input settings for all channels, you can change this
for i in range(0, numChannels.value):
tryfunc(
th260lib.TH260_SetInputCFD(ct.c_int(dev[0]), ct.c_int(i),\
ct.c_int(inputCFDLevel),\
ct.c_int(inputCFDZeroCross)),\
"SetInputCFD"
)
if hwModel.value.decode("utf-8") == "TimeHarp 260 N":
tryfunc(
th260lib.TH260_SetSyncEdgeTrg(ct.c_int(dev[0]), ct.c_int(syncTriggerLevel),\
ct.c_int(syncTriggerEdge)),\
"SetSyncEdgeTrg"
)
# we use the same input settings for all channels, you can change this
for i in range(0, numChannels.value):
tryfunc(
th260lib.TH260_SetInputEdgeTrg(ct.c_int(dev[0]), ct.c_int(i),\
ct.c_int(inputTriggerLevel),\
ct.c_int(inputTriggerEdge)),\
"SetInputEdgeTrg"
)
tryfunc(th260lib.TH260_SetSyncChannelOffset(ct.c_int(dev[0]), ct.c_int(0)),\
"SetSyncChannelOffset")
for i in range(0, numChannels.value):
tryfunc(
th260lib.TH260_SetInputChannelOffset(ct.c_int(dev[0]), ct.c_int(i),\
ct.c_int(0)),\
"SetInputChannelOffset"
)
tryfunc(
th260lib.TH260_SetHistoLen(ct.c_int(dev[0]), ct.c_int(MAXLENCODE), byref(histLen)),\
"SetHistoLen"
)
print("Histogram length is %d" % histLen.value)
tryfunc(th260lib.TH260_SetBinning(ct.c_int(dev[0]), ct.c_int(binning)), "SetBinning")
tryfunc(th260lib.TH260_SetOffset(ct.c_int(dev[0]), ct.c_int(offset)), "SetOffset")
tryfunc(th260lib.TH260_GetResolution(ct.c_int(dev[0]), byref(resolution)),\
"GetResolution")
print("Resolution is %1.1lfps" % resolution.value)
# Note: after Init or SetSyncDiv allow 150 ms for valid count rate readings
# Otherwise you get new values after every 100ms
time.sleep(0.15)
tryfunc(th260lib.TH260_GetSyncRate(ct.c_int(dev[0]), byref(syncRate)), "GetSyncRate")
print("\nSyncrate=%1d/s" % syncRate.value)
for i in range(0, numChannels.value):
tryfunc(th260lib.TH260_GetCountRate(ct.c_int(dev[0]), ct.c_int(i), byref(countRate)),\
"GetCountRate")
print("Countrate[%1d]=%1d/s" % (i, countRate.value))
# after getting the count rates you can check for warnings
tryfunc(th260lib.TH260_GetWarnings(ct.c_int(dev[0]), byref(warnings)), "GetWarnings")
if warnings.value != 0:
th260lib.TH260_GetWarningsText(ct.c_int(dev[0]), warningstext, warnings)
print("\n\n%s" % warningstext.value.decode("utf-8"))
tryfunc(th260lib.TH260_SetStopOverflow(ct.c_int(dev[0]), ct.c_int(0), ct.c_int(10000)),\
"SetStopOverflow") # for example only
while cmd != "q":
tryfunc(th260lib.TH260_ClearHistMem(ct.c_int(dev[0])), "ClearHistMem")
print("press RETURN to start measurement")
input()
tryfunc(th260lib.TH260_GetSyncRate(ct.c_int(dev[0]), byref(syncRate)),\
"GetSyncRate")
print("Syncrate=%1d/s" % syncRate.value)
for i in range(0, numChannels.value):
tryfunc(
th260lib.TH260_GetCountRate(ct.c_int(dev[0]), ct.c_int(i),\
byref(countRate)),\
"GetCountRate"
)
print("Countrate[%1d]=%1d/s" % (i, countRate.value))
# here you could check for warnings again
tryfunc(th260lib.TH260_StartMeas(ct.c_int(dev[0]), ct.c_int(tacq)), "StartMeas")
print("\nMeasuring for %1d milliseconds..." % tacq)
ctcstatus = ct.c_int(0)
while ctcstatus.value == 0:
tryfunc(th260lib.TH260_CTCStatus(ct.c_int(dev[0]), byref(ctcstatus)),\
"CTCStatus")
tryfunc(th260lib.TH260_StopMeas(ct.c_int(dev[0])), "StopMeas")
for i in range(0, numChannels.value):
tryfunc(
th260lib.TH260_GetHistogram(ct.c_int(dev[0]), byref(counts[i]),\
ct.c_int(i), ct.c_int(0)),\
"GetHistogram"
)
integralCount = 0
for j in range(0, histLen.value):
integralCount += counts[i][j]
print(" Integralcount[%1d]=%1.0lf" % (i,integralCount))
tryfunc(th260lib.TH260_GetFlags(ct.c_int(dev[0]), byref(flags)), "GetFlags")
if flags.value & FLAG_OVERFLOW > 0:
print(" Overflow.")
print("Enter c to continue or q to quit and save the count data.")
cmd = input()
for j in range(0, histLen.value):
for i in range(0, numChannels.value):
outputfile.write("%5d " % counts[i][j])
outputfile.write("\n")
closeDevices()
outputfile.close() | StarcoderdataPython |
164127 | ## interaction / scripts / create_translation_repository.py
'''
This script will pre-calculate the translation operators for a given bounding
box, max level, and frequency steps for a multi-level fast multipole algorithm.
This can take hours to days depending on the number of threads available, size
of bounding box, number of levels etc. The output is stored in a single H5 file.
To use, run with a corresponding yaml config file for setting the input
parameters.
python create_translation_repository.py <path to config file>
Author: <NAME> (<EMAIL>)
'''
import numpy as np
import pandas as pd
import sqlite3 as sql
import multiprocessing
from itertools import repeat
from contextlib import closing
import os
from tqdm import tqdm
from interaction3.bem.core import fma_functions as fma
from interaction3.bem.core.db_functions import get_order
# register adapters for sqlite to convert numpy types
sql.register_adapter(np.float64, float)
sql.register_adapter(np.float32, float)
sql.register_adapter(np.int64, int)
sql.register_adapter(np.int32, int)
## PROCESS FUNCTIONS ##
def generate_translations(file, f, k, dims, levels, orders_db):
xdim, ydim = dims
minlevel, maxlevel = levels
for l in range(minlevel, maxlevel + 1):
order = get_order(orders_db, f, l)
qrule = fma.fft_quadrule(order, order)
group_xdim, group_ydim = xdim / (2 ** l), ydim / (2 ** l)
kcoordT = qrule['kcoordT']
theta = qrule['theta']
phi = qrule['phi']
unique_coords = fma.get_unique_coords()
for coords in unique_coords:
r = coords * np.array([group_xdim, group_ydim, 1])
rhat = r / fma.mag(r)
cos_angle = rhat.dot(kcoordT)
translation = np.ascontiguousarray(fma.mod_ff2nf_op(fma.mag(r), cos_angle, k, order))
with write_lock:
with closing(sql.connect(file)) as conn:
update_translations_table(conn, f, k, l, order, tuple(coords), theta, phi, translation)
def init_process(_write_lock):
global write_lock
write_lock = _write_lock
def process(proc_args):
file, f, k, dims, levels, orders_db = proc_args
generate_translations(file, f, k, dims, levels, orders_db)
with write_lock:
with closing(sql.connect(file)) as conn:
update_progress(conn, f)
## ENTRY POINT ##
def main(**kwargs):
threads = kwargs['threads']
freqs = kwargs['freqs']
levels = kwargs['levels']
dims = kwargs['dims']
c = kwargs['sound_speed']
file = kwargs['file']
orders_db = kwargs['orders_db']
# set default threads to logical core count
if threads is None:
threads = multiprocessing.cpu_count()
kwargs['threads'] = threads
# path to this module's directory
module_dir = os.path.dirname(os.path.realpath(__file__))
# set default file name for database
if file is None:
file = os.path.join(module_dir, 'translations_dims_{:0.4f}_{:0.4f}.db'.format(*dims))
kwargs['file'] = file
# set default file nam of orders database to use
if orders_db is None:
orders_db = os.path.join(module_dir, 'orders_dims_{:0.4f}_{:0.4f}.db'.format(*dims))
kwargs['orders_db'] = orders_db
# read orders database and form interpolating functions
# orders_interp_funcs = get_orders_interp_funcs(orders_db, levels)
# check for existing file
if os.path.isfile(file):
# conn = sql.connect(file)
response = input('Database ' + str(file) + ' already exists. \nContinue (c), Overwrite (o), or Do nothing ('
'any other key)?')
if response.lower() in ['o', 'overwrite']:
os.remove(file)
# determine frequencies and wavenumbers
f_start, f_stop, f_step = freqs
fs = np.arange(f_start, f_stop + f_step, f_step)
ks = 2 * np.pi * fs / c
# create database
with closing(sql.connect(file)) as conn:
# create database tables
create_metadata_table(conn, **kwargs)
create_frequencies_table(conn, fs, ks)
create_levels_table(conn, levels)
create_coordinates_table(conn)
create_translations_table(conn)
elif response.lower() in ['c', 'continue']:
with closing(sql.connect(file)) as conn:
query = '''
SELECT frequency, wavenumber FROM frequencies
WHERE is_complete=0
'''
table = pd.read_sql(query, conn)
fs = np.array(table['frequency'])
ks = np.array(table['wavenumber'])
else:
raise Exception('Database already exists')
else:
# Make directories if they do not exist
file_dir = os.path.dirname(file)
if not os.path.exists(file_dir):
os.makedirs(file_dir)
# determine frequencies and wavenumbers
f_start, f_stop, f_step = freqs
fs = np.arange(f_start, f_stop + f_step, f_step)
ks = 2 * np.pi * fs / c
# create database
with closing(sql.connect(file)) as conn:
# create database tables
create_metadata_table(conn, **kwargs)
create_frequencies_table(conn, fs, ks)
create_levels_table(conn, levels)
create_coordinates_table(conn)
create_translations_table(conn)
try:
# start multiprocessing pool and run process\
write_lock = multiprocessing.Lock()
pool = multiprocessing.Pool(threads, initializer=init_process, initargs=(write_lock,))
proc_args = [(file, f, k, dims, levels, orders_db) for f, k in zip(fs, ks)]
result = pool.imap_unordered(process, proc_args)
for r in tqdm(result, desc='Building', total=len(fs)):
pass
except Exception as e:
print(e)
finally:
pool.terminate()
pool.close()
## DATABASE FUNCTIONS ##
def create_metadata_table(conn, **kwargs):
'''
'''
table = [[str(v) for v in list(kwargs.values())]]
columns = list(kwargs.keys())
pd.DataFrame(table, columns=columns, dtype=str).to_sql('metadata', conn, if_exists='replace', index=False)
def create_frequencies_table(conn, fs, ks):
'''
'''
# create table
query = '''
CREATE TABLE frequencies (
frequency float,
wavenumber float,
is_complete boolean
)
'''
conn.execute(query)
# create unique index on frequency
query = '''
CREATE UNIQUE INDEX frequency_index ON frequencies (frequency)
'''
conn.execute(query)
# create unique index on wavenumber
query = '''
CREATE UNIQUE INDEX wavenumber_index ON frequencies (wavenumber)
'''
conn.execute(query)
# insert values into table
query = '''
INSERT INTO frequencies (frequency, wavenumber, is_complete)
VALUES (?, ?, ?)
'''
conn.executemany(query, zip(fs, ks, repeat(False)))
conn.commit()
def update_progress(conn, f):
query = '''
UPDATE frequencies SET is_complete=1 WHERE frequency=?
'''
conn.execute(query, [f,])
conn.commit()
def create_levels_table(conn, levels):
'''
'''
minlevel, maxlevel = levels
# create table
query = '''
CREATE TABLE levels (
level int
)
'''
conn.execute(query)
# create unique index on level
query = '''
CREATE UNIQUE INDEX level_index ON levels (level)
'''
conn.execute(query)
# insert values into table
query = '''
INSERT INTO levels (level)
VALUES (?)
'''
conn.executemany(query, list((x,) for x in range(minlevel, maxlevel + 1)))
conn.commit()
def create_coordinates_table(conn):
unique_coords = fma.get_unique_coords()
query = '''
CREATE TABLE coordinates (
x int,
y int,
z int
)
'''
conn.execute(query)
query = '''
CREATE UNIQUE INDEX coordinates_index ON coordinates (x, y, z)
'''
conn.execute(query)
query = '''
INSERT INTO coordinates
VALUES (?, ?, ?)
'''
conn.executemany(query, unique_coords)
conn.commit()
def create_translations_table(conn):
query = '''
CREATE TABLE translations (
id INTEGER PRIMARY KEY,
frequency float,
wavenumber float,
level int,
x int,
y int,
z int,
theta float,
phi float,
ntheta int,
nphi int,
translation_order int,
translation_real float,
translation_imag float,
FOREIGN KEY (frequency) REFERENCES frequencies (frequency),
FOREIGN KEY (wavenumber) REFERENCES frequencies (wavenumber),
FOREIGN KEY (level) REFERENCES levels (level),
FOREIGN KEY (x, y, z) REFERENCES coordinates (x, y, z)
)
'''
conn.execute(query)
query = '''
CREATE INDEX translation_index ON translations (frequency, level, x, y, z)
'''
conn.execute(query)
conn.commit()
def update_translations_table(conn, f, k, l, order, coord, thetas, phis, translations):
x, y, z = coord
thetav, phiv = np.meshgrid(thetas, phis, indexing='ij')
ntheta = len(thetas)
nphi = len(phis)
query = '''
INSERT INTO translations (frequency, wavenumber, level, x, y, z, ntheta, nphi, theta, phi,
translation_order, translation_real, translation_imag)
VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)
'''
conn.executemany(query, zip(repeat(f), repeat(k), repeat(l), repeat(x), repeat(y), repeat(z), repeat(ntheta),
repeat(nphi), thetav.ravel(), phiv.ravel(), repeat(order),
np.real(translations.ravel()), np.imag(translations.ravel())))
conn.commit()
## COMMAND LINE INTERFACE ##
if __name__ == '__main__':
import argparse
# default arguments
nthreads = None
freqs = 50e3, 50e6, 50e3
levels = 2, 6
dims = 4e-3, 4e-3
sound_speed = 1500
file = None
orders_db = None
# define and parse arguments
parser = argparse.ArgumentParser()
parser.add_argument('file', nargs='?', default=file)
parser.add_argument('-t', '--threads', type=int, default=nthreads)
parser.add_argument('-f', '--freqs', nargs=3, type=float, default=freqs)
parser.add_argument('-l', '--levels', nargs=2, type=int, default=levels)
parser.add_argument('-d', '--dims', nargs=2, type=float, default=dims)
parser.add_argument('-o', '--orders-db', type=str, default=orders_db)
parser.add_argument('--sound-speed', type=float, default=sound_speed)
args = vars(parser.parse_args())
main(**args)
| StarcoderdataPython |
96241 | from flask import request, render_template_string, session, abort, Response
import pathlib
from io import StringIO
class Watch_HTML():
endpoints = ["/webwatch"]
endpoint_name = "page_webwatch_html"
endpoint_access_level = 0
def __init__(self, fhdhr, plugin_utils):
self.fhdhr = fhdhr
self.template_file = pathlib.Path(plugin_utils.config.dict["plugin_web_paths"][plugin_utils.namespace]["path"]).joinpath('webwatch.html')
self.template = StringIO()
self.template.write(open(self.template_file).read())
def __call__(self, *args):
return self.get(*args)
def get(self, *args):
watch_url = None
origin_methods = self.fhdhr.origins.valid_origins
if len(self.fhdhr.origins.valid_origins):
channel_number = request.args.get('channel', None, type=str)
if not channel_number:
return "Missing Channel"
origin = request.args.get('origin', default=origin_methods[0], type=str)
if origin:
if str(channel_number) in [str(x) for x in self.fhdhr.device.channels.get_channel_list("number", origin)]:
chan_obj = self.fhdhr.device.channels.get_channel_obj("number", channel_number, origin)
elif str(channel_number) in [str(x) for x in self.fhdhr.device.channels.get_channel_list("id", origin)]:
chan_obj = self.fhdhr.device.channels.get_channel_obj("id", channel_number, origin)
else:
response = Response("Not Found", status=404)
response.headers["X-fHDHR-Error"] = "801 - Unknown Channel"
self.fhdhr.logger.error(response.headers["X-fHDHR-Error"])
abort(response)
else:
if str(channel_number) in [str(x) for x in self.fhdhr.device.channels.get_channel_list("id")]:
chan_obj = self.fhdhr.device.channels.get_channel_obj("id", channel_number)
else:
response = Response("Not Found", status=404)
response.headers["X-fHDHR-Error"] = "801 - Unknown Channel"
self.fhdhr.logger.error(response.headers["X-fHDHR-Error"])
abort(response)
if not chan_obj.dict["enabled"]:
response = Response("Service Unavailable", status=503)
response.headers["X-fHDHR-Error"] = str("806 - Tune Failed")
self.fhdhr.logger.error(response.headers["X-fHDHR-Error"])
abort(response)
origin = chan_obj.origin
channel_number = chan_obj.number
channel_id = chan_obj.dict["id"]
watch_url = '/api/webwatch?method=stream&channel=%s&origin=%s' % (channel_id, origin)
whatson = self.fhdhr.device.epg.whats_on_now(chan_obj.number, origin, chan_obj=chan_obj)
channel_dict = chan_obj.dict.copy()
channel_dict["number"] = chan_obj.number
channel_dict["chan_thumbnail"] = chan_obj.thumbnail
current_listing = whatson["listing"][0]
channel_dict["listing_title"] = current_listing["title"]
channel_dict["listing_thumbnail"] = current_listing["thumbnail"]
channel_dict["listing_description"] = current_listing["description"]
return render_template_string(self.template.getvalue(), request=request, session=session, fhdhr=self.fhdhr, watch_url=watch_url, channel_dict=channel_dict)
| StarcoderdataPython |
1719504 | <reponame>alexmanuele/Indizio<filename>filter_graphml.py
import networkx as nx
import argparse
argparser = argparse.ArgumentParser(description='Filter GraphML file to explore relationships.')
requiredNamed = argparser.add_argument_group('required named arguments')
requiredNamed.add_argument('-i', help='Input GraphML file.', required=True)
requiredNamed.add_argument('-n', help='Node of interest. Must be an exact match with a node in the graph.', required=True)
requiredNamed.add_argument('-d', help='Degree of neighborhood from node of interest to include.', required=True, type=int)
requiredNamed.add_argument('-lr', help='Likelihood ratio threshold. Edges below this value will be excluded.', required=True, type=float)
requiredNamed.add_argument('-p', help='P-value ratio threshold. Edges above this value will be excluded.', required=True, type=float)
requiredNamed.add_argument('-o', help='Path for output file.', required=True)
#Function uses dijkstra to calculate paths through the graph.
#Given a node and a degree, returns the nodes within degree n
def neighborhood(G, node, n):
path_lengths = nx.single_source_dijkstra_path_length(G, node)
return [node for node, length in path_lengths.items()
if length <= n]
if __name__=='__main__':
args = argparser.parse_args()
inpath = args.i
outpath = args.o
node = args.n
degree = args.d
lr_threshold = args.lr
p_threshold = args.p
G = nx.graphml.read_graphml(inpath)
try:
assert node in G.nodes
except:
print("Node {} was not found in the graph. Please double check spelling of the node and file path.")
exit()
edges = []
for u,v,e in G.edges(data=True):
if e['lr'] >= lr_threshold and e['p'] <= p_threshold:
edges.append((u,v))
H = G.edge_subgraph(edges)
try:
selected = neighborhood(H, node, degree)
except:
print("The node was not found in the filtered graph.")
print("Try specifying a different node or different thresholds.")
exit()
nx.readwrite.graphml.write_graphml(H.subgraph(selected), outpath)
| StarcoderdataPython |
3363613 | # -*- coding:utf-8 -*-
import os
import traceback
import time
import numpy as np
#os.environ['KERAS_BACKEND'] = "theano"
#os.environ['THEANO_FLAGS'] = "device=cpu"
from utils.preprocess import *
from utils.load_data import *
import csv
from utils.Extract_Features import Extract_Features
from keras.models import model_from_json
from models.clf_mlp import clf_model
import tushare as ts
# import myglobal
import time
dates = []
oneDayLine = []
thirtyDayLine = []
month_dates = []
acc_result = []
from multiprocessing import Pool, Array, Process, Manager
# manager = Manager()
# acc_result = []
acc_result = Manager().list()
models_path = './data/models_22_test/'
# 删除原有目录,××××注意××××××
import shutil
# shutil.rmtree(models_path,True)
# os.remove('./models/*') #清空
if os.path.isdir(models_path) is not True:
os.mkdir(models_path)
stock_data_path = './data/stock_data/'
if os.path.isdir(stock_data_path) is not True:
os.mkdir(stock_data_path)
# 结果文件需改 sz 或 sh
choose_stock_results = './data/sort_results_22_test.csv'
# 600004 603999 sh
stock_code_start_sh = 600004
stock_code_end_sh = 600009 #603999
# 000002 002815 sz
stock_code_start_sz = 2
stock_code_end_sz = 9 #2815
download_economy()
stock_codes = [code for code in range(stock_code_start_sh, stock_code_end_sh)] #603996
stock_codes += [code for code in range(stock_code_start_sz, stock_code_end_sz)]
#
open_index_sh, close_index_sh, volume_index_sh, ma5_index_sh, vma5_index_sh, dates_index_sh = load_index_open_close_volume_ma5_vma5_from_tushare(
stock_data_path + '../sh.csv')
#
open_index_sz, close_index_sz, volume_index_sz, ma5_index_sz, vma5_index_sz, dates_index_sz = load_index_open_close_volume_ma5_vma5_from_tushare(
stock_data_path + '../sz.csv')
def compute_code(code):
time.sleep(0.1) #
if len(str(code))<6:
code = ''.join('0' for _ in range(6-len(str(code))))+str(code)
try:
# print "this is code ", code
if download_fq_data_from_tushare(code):
print code, "download over ~ "
else:
print('failed to download %s' % code)
return
download_from_tushare(code)
# oneDayLine, dates = load_data_from_tushare(stock_data_path + str(code) + '.csv')
# volume, volume_dates = load_volume_from_tushare(stock_data_path + str(code) + '.csv')
open_price, oneDayLine, volume, ma5, vma5, turnover, dates = load_fq_open_close_volume_ma5_vma5_turnover_from_tushare(stock_data_path + str(code) + '_fq.csv')
if (str(code)[0] == '6'):
#
open_index, close_index, volume_index, ma5_index, vma5_index, dates_index = open_index_sh, close_index_sh, volume_index_sh, ma5_index_sh, vma5_index_sh, dates_index_sh
else:
#
open_index, close_index, volume_index, ma5_index, vma5_index, dates_index = open_index_sz, close_index_sz, volume_index_sz, ma5_index_sz, vma5_index_sz, dates_index_sz
# thirtyDayLine, month_dates = load_data_from_tushare(stock_data_path + str(code) + '_month.csv')
if len(oneDayLine) < 400:
return
ef = Extract_Features()
daynum = 5
'''
~~~~~ for classification ~~~~~~ X is delta close price, y is 10 for increase while 01 for decrease
'''
X_clf = []
y_clf = []
for i in range(daynum, len(oneDayLine)-1):
#
big_deals = get_big_deal_volume(code, dates[i])
'''
'''
p = dates_index.index(dates[i])
#
X_delta = [oneDayLine[k] - oneDayLine[k - 1] for k in range(i - daynum, i)] + \
[volume[k] - volume[k-1] for k in range(i - daynum, i)] + \
[turnover[k] for k in range(i - daynum, i)] + \
[ma5[i]] + \
[vma5[i]] + \
[open_index[p]] + [close_index[p]] + [volume_index[p]] + [ma5_index[p]] + [vma5_index[p]] + \
[big_deals] + \
[ef.parse_weekday(dates[i])] + \
[ef.lunar_month(dates[i])] + \
[ef.MoneySupply(dates[i])]
# [ef.rrr(dates[i - 1])] + \
X_clf.append(X_delta)
y_clf.append([1, 0] if oneDayLine[i + 1] - oneDayLine[i] > 0 else [0, 1])
# y_clf.append([1, 0] if ((oneDayLine[i] - oneDayLine[i - 1])/oneDayLine[i - 1]) > 0.01 else [0, 1])
# X_clf = preprocessing.MinMaxScaler().fit_transform(X_clf)
y_clf = preprocessing.MinMaxScaler().fit_transform(y_clf)
#!
X_clf_train, X_clf_test, y_clf_train, y_clf_test = create_Xt_Yt(X_clf, y_clf, 0.86)#0.8
input_dime = len(X_clf[0])
# out = input_dime * 2 + 1
if True:#not os.path.isfile('./data/model_'+str(code)+'.h5'):
model = clf_model(input_dime)
model.fit(np.array(X_clf_train),
np.array(y_clf_train),
nb_epoch=700,
batch_size=50,
verbose=0,
# validation_split=0.12
)
# serialize model to JSON
model_json = model.to_json()
with open(models_path + "model_" + str(code) + ".json", "w") as json_file:
json_file.write(model_json)
# serialize weights to HDF5
model.save_weights(models_path +"model_" + str(code) + ".h5")
print("Saved model to disk")
else:
json_file = open(models_path + 'model_' + str(code) + '.json', 'r')
loaded_model_json = json_file.read()
json_file.close()
model = model_from_json(loaded_model_json)
# load weights into new model
model.load_weights(models_path + "model_" + str(code) + ".h5")
print("Loaded model from disk")
print "model" + str(code) + "loaded!"
score = model.evaluate(np.array(X_clf_test), np.array(y_clf_test), batch_size=10)
print "****************************************"
print 'code =', code
print "****************************************"
print ""
print "test : ", model.metrics_names[0], score[0], model.metrics_names[1], score[1]
print("%s: %.2f%%" % (model.metrics_names[1], score[1] * 100))
acc_result.append([code, score[1]])
# return [code, score[1]]
except Exception as e:
traceback.print_exc()
#
print code, "is non type or is too less data!"
return
# for stock in stock_codes:
# compute_code(stock)
"""
#并行化
from multiprocessing import Pool
# Make the Pool of workers
pool = Pool(1)
# Open the urls in their own threads
# and return the results
results = pool.map(compute_code, stock_codes)
#close the pool and wait for the work to finish
pool.close()
pool.join()
"""
compute_code(600082)
exit(-1)
#
sort = sorted(acc_result, key=lambda x: (x[1]), reverse=True)
print sort
#
with open(choose_stock_results, 'wb') as csvfile:
writer = csv.writer(csvfile)
writer.writerows(sort) # in order of list
print 'acc_result', len(acc_result)
#
print "*******************************"
print sum([acc_result[i][1] for i in range(0, len(acc_result))])*1.0/len(acc_result)
print "*******************************"
| StarcoderdataPython |
1701871 | <gh_stars>1-10
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from base.AwsService import AwsService
class S3(AwsService):
def set_serviceConfig(self, config):
self.set_s3Service(config)
def get_serviceConfig(self):
ret=self.get_s3Service()
return ret
def get_s3Service(self):
## Standard Storage:
section = self.get_element('table.service.S3Service table.subSection:nth-child(1)')
# Standard Storage:
s3_size ,s3_size_unit = self.get_val_and_type("table.SF_S3_STORAGE", section)
# PUT/COPY/POST/LIST リクエスト
s3_req_put = int(self.get_value("table.SF_S3_PUT_COPY_POST_LIST_REQUESTS input", section))
# GET とその他のリクエスト
s3_req_get =int(self.get_value("table.SF_S3_GET_OTHER_REQUESTS input", section))
## Standard - Infrequent Access Storage:
section = self.get_element('table.service.S3Service table.subSection:nth-child(2)')
# Infrequent Access Storage:
ia_size ,ia_size_unit = self.get_val_and_type("table.SF_S3_IA_STORAGE", section)
# PUT/COPY/POST/LIST リクエスト
ia_req_put = int(self.get_value("table.SF_S3_PUT_COPY_POST_LIST_REQUESTS input", section))
# GET とその他のリクエスト
ia_req_get =int(self.get_value("table.SF_S3_GET_OTHER_REQUESTS input", section))
# Lifecycle Transitions
ia_transitions = int(self.get_value("table.SF_S3_LIFECYCLE_TRANSITION_REQUESTS input", section))
# Data Retrieval
ia_retrieval ,ia_retrieval_unit = self.get_val_and_type("table.SF_S3_DATA_RETRIEVALS", section)
## Reduced Redundancy Storage:
section = self.get_element('table.service.S3Service table.subSection:nth-child(3)')
# 低冗長化ストレージ:
rr_size ,rr_size_unit = self.get_val_and_type("table.SF_S3_RR_STORAGE", section)
# PUT/COPY/POST/LIST リクエスト
rr_req_put = int(self.get_value("table.SF_S3_PUT_COPY_POST_LIST_REQUESTS input", section))
# GET とその他のリクエスト
rr_req_get =int(self.get_value("table.SF_S3_GET_OTHER_REQUESTS input", section))
# データ転送:
section = self.get_element('table.service.S3Service table.subSection:nth-child(4)')
# リージョン間データ転送送信:
inter_region , inter_region_type = self.get_val_and_type("div.subContent > table:nth-child(1)", section)
# データ転送送信:
internet_out , internet_out_type = self.get_val_and_type("div.subContent > table:nth-child(2)", section)
# データ転送受信:
internet_in , internet_in_type = self.get_val_and_type("div.subContent > table:nth-child(3)", section)
return {
'StandardStorage': {
'Size' : {
'Value' : s3_size,
'Type' : s3_size_unit
},
'PutCopyPostListRequests' : s3_req_put,
'GetOtherRequests' : s3_req_get,
},
'InfrequentAccessStorage': {
'Size' : {
'Value' : ia_size,
'Type' : ia_size_unit
},
'PutCopyPostListRequests' : ia_req_put,
'GetOtherRequests' : ia_req_get,
'LifecycleTransitions' : ia_transitions,
'DataRetrieval' : {
'Value' : ia_retrieval,
'Type' : ia_retrieval_unit
}
},
'ReducedRedundancy': {
'Size' : {
'Value' : rr_size,
'Type' : rr_size_unit
},
'PutCopyPostListRequests' : rr_req_put,
'GetOtherRequests' : rr_req_get
},
"InterRegion" : {
"Value" : inter_region,
"Type" : inter_region_type
},
"InternetSend" : {
"Value" : internet_out,
"Type" : internet_out_type
},
"InternetReceive" : {
"Value" : internet_in,
"Type" : internet_in_type
}
}
def set_s3Service(self, conf):
table = self.get_element('table.service.S3Service')
## Standard - Infrequent Access Storage:
section = self.get_element('table.service.S3Service table.subSection:nth-child(1)')
if 'StandardStorage' in conf:
s = conf['StandardStorage']
# Standard Storage:
self.set_val_and_type('table.SF_S3_STORAGE', s['Size'], section)
# PUT/COPY/POST/LIST リクエスト
self.set_value('table.SF_S3_PUT_COPY_POST_LIST_REQUESTS input', s['PutCopyPostListRequests'], section, int)
# GET とその他のリクエスト
self.set_value('table.SF_S3_GET_OTHER_REQUESTS input', s['GetOtherRequests'], section, int)
## Standard - Infrequent Access Storage:
section = self.get_element('table.service.S3Service table.subSection:nth-child(2)')
if 'InfrequentAccessStorage' in conf:
s = conf['InfrequentAccessStorage']
# Infrequent Access Storage:
self.set_val_and_type('table.SF_S3_IA_STORAGE', s['Size'], section)
# PUT/COPY/POST/LIST リクエスト
self.set_value('table.SF_S3_PUT_COPY_POST_LIST_REQUESTS input', s['PutCopyPostListRequests'], section, int)
# GET とその他のリクエスト
self.set_value('table.SF_S3_GET_OTHER_REQUESTS input', s['GetOtherRequests'], section, int)
# Lifecycle Transitions
self.set_value('table.SF_S3_LIFECYCLE_TRANSITION_REQUESTS input', s['LifecycleTransitions'], section, int)
# Data Retrieval
self.set_val_and_type('table.SF_S3_DATA_RETRIEVALS', s['DataRetrieval'], section)
## Reduced Redundancy Storage
section = self.get_element('table.service.S3Service table.subSection:nth-child(3)')
if 'ReducedRedundancy' in conf:
s = conf['ReducedRedundancy']
# 低冗長化ストレージ:
self.set_val_and_type('table.SF_S3_RR_STORAGE', s['Size'], section)
# PUT/COPY/POST/LIST リクエスト
self.set_value('table.SF_S3_PUT_COPY_POST_LIST_REQUESTS input', s['PutCopyPostListRequests'], section, int)
# GET とその他のリクエスト
self.set_value('table.SF_S3_GET_OTHER_REQUESTS input', s['GetOtherRequests'], section, int)
# データ転送:
section = self.get_element('table.service.S3Service table.subSection:nth-child(4)')
# リージョン間データ転送送信:
if 'InterRegion' in conf:
self.set_val_and_type('div.subContent > table:nth-child(1)', conf['InterRegion'], section)
# データ転送送信:
if 'InternetSend' in conf:
self.set_val_and_type('div.subContent > table:nth-child(2)', conf['InternetSend'], section)
# データ転送受信:
if 'InternetReceive' in conf:
self.set_val_and_type('div.subContent > table:nth-child(3)', conf['InternetReceive'], section)
| StarcoderdataPython |
3358770 | import pandas as pd
import itertools
from functools import partial
from fastai.callbacks import CSVLogger
def get_config_df(config):
df = pd.DataFrame(list(itertools.product(*config.values())), columns=config.keys())
df.index = [f'model_{i+1}' for i in range(len(df))]
return df
def create_experiment(nm, path, folder='results'):
exp_path = (path/folder/nm)
exp_path.mkdir(exist_ok=True)
return nm, exp_path
def record_experiment(learn, fn, exp_path):
learn.callback_fns.append(partial(CSVLogger, filename=exp_path/fn))
def load_results(exp_path):
config_df = pd.read_csv(exp_path/'config.csv', index_col=0)
param_names = config_df.columns.values
recorder_df=[]
for p in exp_path.ls():
if p.name.startswith(tuple(config_df.index.values)):
df = pd.read_csv(p)
ind_name, fold_name = p.stem.split('-')
df['index']=ind_name
df['fold']=int(fold_name.split('_')[-1].split('.')[0])
recorder_df.append(df)
recorder_df = pd.concat(recorder_df)
metric_names = list(set(recorder_df.columns).symmetric_difference(['index', 'epoch', 'train_loss', 'fold']))
recorder_df = recorder_df.merge(config_df.reset_index())
return config_df, recorder_df, param_names, metric_names
def summarise_results(recorder_df, param_names, metric_names):
return (recorder_df.groupby(['index', *param_names, 'epoch'], as_index=False)
.agg({k:['mean', 'std'] for k in metric_names})) | StarcoderdataPython |
131765 | <gh_stars>1-10
import graphene
from ...tests.utils import assert_no_permission, get_graphql_content
QUERY_CHANNELS = """
query {
channels {
name
slug
currencyCode
}
}
"""
def test_query_channels_as_staff_user(staff_api_client, channel_USD, channel_PLN):
# given
# when
response = staff_api_client.post_graphql(QUERY_CHANNELS, {})
content = get_graphql_content(response)
# then
channels = content["data"]["channels"]
assert len(channels) == 2
assert {
"slug": channel_PLN.slug,
"name": channel_PLN.name,
"currencyCode": channel_PLN.currency_code,
} in channels
assert {
"slug": channel_USD.slug,
"name": channel_USD.name,
"currencyCode": channel_USD.currency_code,
} in channels
def test_query_channels_as_app(app_api_client, channel_USD, channel_PLN):
# given
# when
response = app_api_client.post_graphql(QUERY_CHANNELS, {})
content = get_graphql_content(response)
# then
channels = content["data"]["channels"]
assert len(channels) == 2
assert {
"slug": channel_PLN.slug,
"name": channel_PLN.name,
"currencyCode": channel_PLN.currency_code,
} in channels
assert {
"slug": channel_USD.slug,
"name": channel_USD.name,
"currencyCode": channel_USD.currency_code,
} in channels
def test_query_channels_as_customer(user_api_client, channel_USD, channel_PLN):
# given
# when
response = user_api_client.post_graphql(QUERY_CHANNELS, {})
# then
assert_no_permission(response)
def test_query_channels_as_anonymous(api_client, channel_USD, channel_PLN):
# given
# when
response = api_client.post_graphql(QUERY_CHANNELS, {})
# then
assert_no_permission(response)
QUERY_CHANNEL = """
query getChannel($id: ID!){
channel(id: $id){
id
name
slug
currencyCode
}
}
"""
def test_query_channel_as_staff_user(staff_api_client, channel_USD):
# given
channel_id = graphene.Node.to_global_id("Channel", channel_USD.id)
variables = {"id": channel_id}
# when
response = staff_api_client.post_graphql(QUERY_CHANNEL, variables)
content = get_graphql_content(response)
# then
channel_data = content["data"]["channel"]
assert channel_data["id"] == channel_id
assert channel_data["name"] == channel_USD.name
assert channel_data["slug"] == channel_USD.slug
assert channel_data["currencyCode"] == channel_USD.currency_code
def test_query_channel_as_app(app_api_client, channel_USD):
# given
channel_id = graphene.Node.to_global_id("Channel", channel_USD.id)
variables = {"id": channel_id}
# when
response = app_api_client.post_graphql(QUERY_CHANNEL, variables)
content = get_graphql_content(response)
# then
channel_data = content["data"]["channel"]
assert channel_data["id"] == channel_id
assert channel_data["name"] == channel_USD.name
assert channel_data["slug"] == channel_USD.slug
assert channel_data["currencyCode"] == channel_USD.currency_code
def test_query_channel_as_customer(user_api_client, channel_USD):
# given
channel_id = graphene.Node.to_global_id("Channel", channel_USD.id)
variables = {"id": channel_id}
# when
response = user_api_client.post_graphql(QUERY_CHANNEL, variables)
# then
assert_no_permission(response)
def test_query_channel_as_anonymous(api_client, channel_USD):
# given
channel_id = graphene.Node.to_global_id("Channel", channel_USD.id)
variables = {"id": channel_id}
# when
response = api_client.post_graphql(QUERY_CHANNEL, variables)
# then
assert_no_permission(response)
| StarcoderdataPython |
1717365 | <reponame>ninasm/M242_Uebungsprojekte
import os, pathlib
def RewriteCsv( fname, outName ):
with open( fname, "r") as input:
lines = input.readlines()
lineNr = 0
values = lines[lineNr].split(',')
while values[0] != 'Sample-Nr':
lineNr+=1
values = lines[lineNr].split(',')
rewrittenLines = [lines[lineNr]]
for l in lines[lineNr+1:]:
values = l.split(',')
lineStr = "{0},{1}\n".format(int(values[0],16), int(values[1],16))
rewrittenLines.append(lineStr)
with open( outName, "w") as output:
output.writelines( rewrittenLines )
if __name__ == "__main__":
RewriteCsv("Intensity_0.csv", "Intensity_0_dec.csv" )
RewriteCsv("Intensity_4.csv", "Intensity_4_dec.csv" )
RewriteCsv("Intensity_8.csv", "Intensity_8_dec.csv" )
RewriteCsv("Intensity_12.csv", "Intensity_12_dec.csv" )
RewriteCsv("Intensity_16.csv", "Intensity_16_dec.csv" )
RewriteCsv("Intensity_72.csv", "Intensity_72_dec.csv" )
RewriteCsv("Intensity_100.csv", "Intensity_100_dec.csv" )
RewriteCsv("Intensity_108.csv", "Intensity_108_dec.csv" ) | StarcoderdataPython |
1625615 | #------------------------------------------------------------------------------
# Get the status of the color trend report.
# GET /v1/color_trends/{report_name}/trend_report
#------------------------------------------------------------------------------
import os
import json
import requests
from urlparse import urljoin
from pprint import pprint
from props import *
# Replace this with the custom url generated for you.
api_gateway_url = props['api_gateway_url']
# Pass the api key into the header
# Replace 'your_api_key' with your API key.
headers = {'X-Api-Key': props['X-Api-Key']}
report_name = 'vogue-autumn-winter'
api_endpoint = '/v1/color_trends/%s/trend_report'%(report_name)
url = urljoin(api_gateway_url,api_endpoint)
response = requests.get(url,headers=headers)
print response.status_code
pprint(response.json())
| StarcoderdataPython |
109303 | """ Use this module for imports to extend the Admin classes
"""
from django.conf import settings
if 'facebook.modules.profile.page' in settings.INSTALLED_APPS:
from facebook.modules.profile.page.admin import PageAdmin
if 'facebook.modules.profile.user' in settings.INSTALLED_APPS:
from facebook.modules.profile.user.admin import UserAdmin
if 'facebook.modules.profile.event' in settings.INSTALLED_APPS:
from facebook.modules.profile.event.admin import EventAdmin
if 'facebook.modules.profile.application' in settings.INSTALLED_APPS:
from facebook.modules.profile.application.admin import RequestAdmin
if 'facebook.modules.media' in settings.INSTALLED_APPS:
from facebook.modules.media.admin import PhotoAdmin
if 'facebook.modules.connections.post' in settings.INSTALLED_APPS:
from facebook.modules.connections.post.admin import PostAdmin
if 'facebook.modules.connections.game' in settings.INSTALLED_APPS:
from facebook.modules.connections.game.admin import ScoreAdmin, AchievementAdmin
| StarcoderdataPython |
1661872 | <filename>logbunker/apps/backoffice/backend/dependencies/BackofficeContainer.py
from dependency_injector import containers, providers
from logbunker.apps.backoffice.backend.controllers.LogGetController import LogGetController
from logbunker.apps.bunker.controllers.StatusGetController import StatusGetController
from logbunker.contexts.backoffice.logs.application.findall.FindLogsByCriteriaQueryHandler import \
FindLogsByCriteriaQueryHandler
from logbunker.contexts.backoffice.logs.application.findall.LogsByCriteriaFinder import LogsByCriteriaFinder
from logbunker.contexts.backoffice.logs.infrastructure.persistence.PyMongoBackofficeLogRepository import \
PyMongoBackofficeLogRepository
from logbunker.contexts.bunker.logs.infrastructure.persistence.mongo.config.PyMongoLogConfigFactory import PyMongoLogConfigFactory
from logbunker.contexts.shared.Infrastructure.persistence.mongo.PyMongoClientFactory import PyMongoClientFactory
from logbunker.contexts.shared.Infrastructure.querybus.InMemoryQueryBus import InMemoryQueryBus
class BackofficeContainer(containers.DeclarativeContainer):
db_config = providers.Singleton(PyMongoLogConfigFactory.create)
db_client = providers.Singleton(PyMongoClientFactory.create_instance, 'bunker', db_config)
log_repository = providers.Singleton(PyMongoBackofficeLogRepository, db_client)
log_by_criteria_finder = providers.Singleton(LogsByCriteriaFinder, log_repository)
find_logs_by_criteria_command_handler = providers.Singleton(
FindLogsByCriteriaQueryHandler,
log_by_criteria_finder,
)
query_bus = providers.Singleton(
InMemoryQueryBus,
find_logs_by_criteria_command_handler,
)
status_get_controller = providers.Singleton(StatusGetController)
log_get_controller = providers.Singleton(LogGetController, query_bus)
backoffice_container: BackofficeContainer = BackofficeContainer()
| StarcoderdataPython |
167836 | <reponame>pollen-robotics/reachy_pyluos_hal<filename>reachy_pyluos_hal/reachy.py
"""Reachy wrapper around serial LUOS GateClients which handle the communication with the hardware."""
import sys
import time
import numpy as np
from collections import OrderedDict, defaultdict
from glob import glob
from logging import Logger
from operator import attrgetter
from threading import Lock
from typing import Dict, List, Tuple
from .config import load_config
from .device import Device
from .discovery import find_gate
from .dynamixel import AX18, DynamixelMotor
from .fan import DxlFan, Fan, OrbitaFan
from .force_sensor import ForceSensor
from .joint import Joint
from .orbita import OrbitaActuator, OrbitaRegister
from .pycore import GateClient, GateProtocol
class Reachy(GateProtocol):
"""Reachy wrapper around serial GateClients which handle the communication with the hardware."""
if sys.platform == 'linux':
port_template: str = '/dev/ttyUSB*'
elif sys.platform == 'darwin':
port_template: str = '/dev/tty.usbserial*'
elif sys.platform == 'win32':
port_template: str = 'COM*'
else:
raise OSError('Unsupported platform')
def __init__(self, config_name: str, logger: Logger) -> None:
"""Create all GateClient defined in the devices class variable."""
self.logger = logger
self.config = load_config(config_name)
class GateProtocolDelegate(GateProtocol):
lock = Lock()
def handle_dxl_pub_data(_self, register, ids, errors, values):
with _self.lock:
return self.handle_dxl_pub_data(register, ids, errors, values)
def handle_load_pub_data(_self, ids: List[int], values: List[bytes]):
with _self.lock:
return self.handle_load_pub_data(ids, values)
def handle_orbita_pub_data(_self, id: int, register: OrbitaRegister, values: bytes):
with _self.lock:
return self.handle_orbita_pub_data(id, register, values)
def handle_fan_pub_data(_self, fan_ids: List[int], states: List[int]):
with _self.lock:
return self.handle_fan_pub_data(fan_ids, states)
def handle_assert(_self, msg: bytes):
with _self.lock:
return self.handle_assert(msg)
self.gates: List[GateClient] = []
self.gate4name: Dict[str, GateClient] = {}
self.dxls: Dict[str, Joint] = OrderedDict({})
self.dxl4id: Dict[int, DynamixelMotor] = {}
self.fans: Dict[str, Fan] = OrderedDict({})
self.fan4id: Dict[int, Fan] = {}
self.orbita4id: Dict[int, OrbitaActuator] = {}
self.orbitas: Dict[str, OrbitaActuator] = OrderedDict({})
self.force_sensors: Dict[str, ForceSensor] = OrderedDict({})
self.force4id: Dict[int, ForceSensor] = {}
self.ports = glob(self.port_template)
if len(self.ports) == 0:
raise IOError(f'No Gate found on "{self.port_template}"')
missing_parts_cards = {}
first_piece_to_part = {
"l_shoulder_pitch": "left_arm",
"r_shoulder_pitch": "right_arm",
"neck": "head",
}
for devices in self.config:
self.logger.info(f'Looking for {list(devices.keys())} on {self.ports}.')
port, matching, missing = find_gate(devices, self.ports, self.logger)
missing_containers = [{container.__module__: container.id} for container in missing]
first_piece = list(devices.keys())[0]
missing_parts_cards[first_piece_to_part[first_piece]] = missing_containers
if len(missing) > 0:
self.logger.warning(f'Could not find {missing} on {port}')
continue
self.logger.info(f'Found devices on="{port}", connecting...')
gate = GateClient(port=port, protocol_factory=GateProtocolDelegate)
self.gates.append(gate)
for name, dev in devices.items():
self.gate4name[name] = gate
if isinstance(dev, DynamixelMotor):
self.dxls[name] = dev
if dev.id in self.dxl4id:
raise ValueError(f'All dynamixels id should be unique ({dev})!')
self.dxl4id[dev.id] = dev
if isinstance(dev, ForceSensor):
self.force_sensors[name] = dev
if dev.id in self.force4id:
raise ValueError(f'All force sensors id should be unique ({dev})!')
self.force4id[dev.id] = dev
if isinstance(dev, OrbitaActuator):
if dev.id in self.orbita4id:
raise ValueError(f'All orbitas id should be unique ({dev})!')
self.orbita4id[dev.id] = dev
self.orbitas[name] = dev
if isinstance(dev, Fan):
self.fans[name] = dev
if dev.id in self.fan4id:
raise ValueError(f'All fans id should be unique ({dev})!')
self.fan4id[dev.id] = dev
dev.logger = self.logger
if not np.array_equal(np.asarray(list(missing_parts_cards.values())).flatten(), np.array([])):
raise MissingContainerError(missing_parts_cards)
def __enter__(self):
"""Enter context handler."""
self.start()
return self
def __exit__(self, exc_type, exc_val, exc_tb):
"""Stop and close."""
self.stop()
def start(self):
"""Start all GateClients (start sending/receiving data with hardware)."""
for gate in self.gates:
gate.start()
gate.protocol.logger = self.logger
self.setup()
def stop(self):
"""Stop all GateClients (start sending/receiving data with hardware)."""
for gate in self.gates:
gate.stop()
def setup(self):
"""Set up everything before actually using (eg. offset for instance)."""
for name, orbita in self.orbitas.items():
zero = [int(x) for x in self.get_orbita_values('zero', name, clear_value=True, retry=10)]
pos = [int(x) for x in self.get_orbita_values('absolute_position', name, clear_value=True, retry=10)]
orbita.set_offset(zero, pos)
self.set_orbita_values('recalibrate', name, {'disk_top': True})
def get_all_joints_names(self) -> List[str]:
"""Return the names of all joints."""
dxl_names = list(self.dxls.keys())
orbita_disk_names = []
for name, orbita in self.orbitas.items():
for disk_name in orbita.get_joints_name():
orbita_disk_names.append(f'{name}_{disk_name}')
return dxl_names + orbita_disk_names
def get_joints_value(self, register: str, joint_names: List[str], retry: int = 10) -> List[float]:
"""Return the value of the specified joints."""
# TODO: both get (dxl and orbita) should run in parallel (via asyncio?)
clear_value = False if register in ('present_position', 'temperature') else True
dxl_names = [name for name in joint_names if name in self.dxls]
dxl_values = dict(zip(dxl_names, self.get_dxls_value(register, dxl_names, clear_value, retry)))
orbitas_values = {}
if register in ['moving_speed']:
for name in joint_names:
orbita_name = name.partition('_')[0]
if orbita_name in self.orbitas:
orbita = self.orbitas[orbita_name]
for disk in orbita.get_joints_name():
orbitas_values[f'{orbita_name}_{disk}'] = 0.0
else:
for name in joint_names:
orbita_name = name.partition('_')[0]
if orbita_name in self.orbitas:
disk_values = self.get_orbita_values(register, orbita_name, clear_value, retry)
for disk, val in zip(self.orbitas[orbita_name].get_disks_name(), disk_values):
orbitas_values[f'{orbita_name}_{disk}'] = val
orbitas_values[f'{orbita_name}_roll'] = 0.0
orbitas_values[f'{orbita_name}_pitch'] = 0.0
orbitas_values[f'{orbita_name}_yaw'] = 0.0
values = {}
values.update(dxl_values)
values.update(orbitas_values)
return [values[joint] for joint in joint_names]
def get_joints_pid(self, joint_names: List[str], retry: int = 10) -> List[Tuple[float, float, float]]:
"""Return the pids of the specified joints."""
pids: Dict[str, Tuple[float, float, float]] = {}
dxl_names = [name for name in joint_names if name in self.dxls]
ax_names = [name for name in dxl_names if isinstance(self.dxls[name], AX18)]
dxl_names_with_pids = [name for name in dxl_names if name not in ax_names]
if dxl_names_with_pids:
dxl_p = self.get_dxls_value('p_gain', dxl_names_with_pids, clear_value=True, retry=retry)
dxl_i = self.get_dxls_value('i_gain', dxl_names_with_pids, clear_value=True, retry=retry)
dxl_d = self.get_dxls_value('d_gain', dxl_names_with_pids, clear_value=True, retry=retry)
for name, p, i, d in zip(dxl_names_with_pids, dxl_p, dxl_i, dxl_d):
pids[name] = [float(gain) for gain in (p, i, d)]
if ax_names:
cw_margin = self.get_dxls_value('cw_compliance_margin', ax_names, clear_value=True, retry=retry)
ccw_margin = self.get_dxls_value('ccw_compliance_margin', ax_names, clear_value=True, retry=retry)
cw_slope = self.get_dxls_value('cw_compliance_slope', ax_names, clear_value=True, retry=retry)
ccw_slope = self.get_dxls_value('ccw_compliance_slope', ax_names, clear_value=True, retry=retry)
for name, cwm, ccwm, cws, ccws in zip(ax_names, cw_margin, ccw_margin, cw_slope, ccw_slope):
pids[name] = [float(gain) for gain in (cwm, ccwm, cws, ccws)]
for name in joint_names:
orbita_name = name.partition('_')[0]
if orbita_name in self.orbitas:
orbita_pids = self.get_orbita_values('pid', orbita_name, clear_value=True, retry=retry)
for disk, val in zip(self.orbitas[orbita_name].get_disks_name(), orbita_pids):
pids[f'{orbita_name}_{disk}'] = list(val)
pids[f'{orbita_name}_roll'] = [0.0, 0.0, 0.0]
pids[f'{orbita_name}_pitch'] = [0.0, 0.0, 0.0]
pids[f'{orbita_name}_yaw'] = [0.0, 0.0, 0.0]
return [pids[name] for name in joint_names]
def set_joints_value(self, register: str, value_for_joint: Dict[str, float]):
"""Set the value for the specified joints."""
dxl_values: Dict[str, float] = {}
orbita_values: Dict[str, Dict[str, float]] = {}
for name, value in value_for_joint.items():
orbita_name, _, disk_name = name.partition('_')
if name in self.dxls:
dxl_values[name] = value
elif orbita_name in self.orbitas:
if disk_name not in self.orbitas[orbita_name].get_disks_name():
continue
if orbita_name not in orbita_values:
orbita_values[orbita_name] = {}
orbita_values[orbita_name][disk_name] = value
else:
self.logger.warning(f'"{name}" is an unknown joints!')
if dxl_values:
self.set_dxls_value(register, dxl_values)
if orbita_values:
if register == 'moving_speed':
if self.logger is not None:
self.logger.debug('Speed for orbita not handled!')
return
for orbita, values in orbita_values.items():
self.set_orbita_values(register, orbita, values)
def set_joints_pid(self, goal_pids: Dict[str, Tuple[float, float, float]]) -> None:
"""Set the PIDs for the specified joints."""
dxl_pids: Dict[str, Tuple[float, float, float]] = {}
orbita_pids: Dict[str, Dict[str, Tuple[float, float, float]]] = {}
for name, value in goal_pids.items():
orbita_name, _, disk_name = name.partition('_')
if name in self.dxls:
dxl_pids[name] = [int(gain) for gain in value]
elif orbita_name in self.orbitas:
if disk_name not in self.orbitas[orbita_name].get_disks_name():
continue
if orbita_name not in orbita_pids:
orbita_pids[orbita_name] = {}
if len(value) != 3:
raise ValueError(f'Orbita PIDs should be a triplet ({value})')
orbita_pids[orbita_name][disk_name] = value
else:
raise ValueError(f'"{name}" is an unknown joints!')
if dxl_pids:
ax, other = {}, {}
for name, value in dxl_pids.items():
dxl = self.dxls[name]
if isinstance(dxl, AX18):
ax[name] = value
else:
other[name] = value
if ax:
cwm, ccwm, cws, ccws = zip(*ax.values())
self.set_joints_value('cw_compliance_margin', dict(zip(ax.keys(), cwm)))
self.set_joints_value('ccw_compliance_margin', dict(zip(ax.keys(), ccwm)))
self.set_joints_value('cw_compliance_slope', dict(zip(ax.keys(), cws)))
self.set_joints_value('ccw_compliance_slope', dict(zip(ax.keys(), ccws)))
if other:
p, i, d = zip(*other.values())
self.set_joints_value('p_gain', dict(zip(other.keys(), p)))
self.set_joints_value('i_gain', dict(zip(other.keys(), i)))
self.set_joints_value('d_gain', dict(zip(other.keys(), d)))
if orbita_pids:
for orbita, values in orbita_pids.items():
self.set_orbita_values('pid', orbita, values)
def get_dxls_value(self, register: str, dxl_names: List[str], clear_value: bool, retry: int) -> List[float]:
"""Retrieve register value on the specified dynamixels.
The process is done as follows.
First, clear any cached value for the register, we want to make sure we get an updated one.
Then, split joints among their respective gate and send a single get request per gate (multiple ids per request).
Finally, wait for all joints to received the updated value, converts it and returns it.
"""
dxl_ids_per_gate: Dict[GateClient, List[int]] = defaultdict(list)
dxl_reg_per_gate: Dict[GateClient, Tuple[int, int]] = {}
for name in dxl_names:
dxl = self.dxls[name]
if clear_value:
dxl.clear_value(register)
if clear_value or (not dxl.is_value_set(register)):
if isinstance(dxl, DynamixelMotor):
gate = self.gate4name[name]
dxl_ids_per_gate[gate].append(dxl.id)
dxl_reg_per_gate[gate] = dxl.get_register_config(register)
for gate, ids in dxl_ids_per_gate.items():
addr, num_bytes = dxl_reg_per_gate[gate]
gate.protocol.send_dxl_get(addr, num_bytes, ids)
try:
return [
self.dxls[name].get_value_as_usi(register)
for name in dxl_names
]
except TimeoutError as e:
missing_dxls = [
name for name in dxl_names
if not self.dxls[name].is_value_set(register)
]
if self.logger is not None:
self.logger.warning(f'Timeout occurs after GET cmd: dev="{missing_dxls}" reg="{register}"!')
if retry == 0:
raise e
if register in ('present_position', 'temperature'):
# We are waiting for te module to send us the data
# So wait before retrying
time.sleep(1)
return self.get_dxls_value(register, dxl_names, clear_value, retry - 1)
def set_dxls_value(self, register: str, values_for_dxls: Dict[str, float]):
"""Set new value for register on the specified dynamixels.
The values are splitted among the gates corresponding to the joints.
One set request per gate is sent (with possible multiple ids).
"""
dxl_data_per_gate: Dict[GateClient, Dict[int, bytes]] = defaultdict(dict)
dxl_reg_per_gate: Dict[GateClient, Tuple[int, int]] = {}
for name, dxl_value in values_for_dxls.items():
dxl = self.dxls[name]
if isinstance(dxl, DynamixelMotor):
self.dxl4id[dxl.id].update_value_using_usi(register, dxl_value)
if self._is_torque_enable(name) or register not in ['goal_position', 'moving_speed']:
gate = self.gate4name[name]
dxl_data_per_gate[gate][dxl.id] = self.dxl4id[dxl.id].get_value(register)
dxl_reg_per_gate[gate] = self.dxl4id[dxl.id].get_register_config(register)
for gate, value_for_id in dxl_data_per_gate.items():
addr, num_bytes = dxl_reg_per_gate[gate]
gate.protocol.send_dxl_set(addr, num_bytes, value_for_id)
if register == 'torque_enable':
names = [name for name, value in values_for_dxls.items() if value == 1]
cached_speed = dict(zip(names, self.get_dxls_value('moving_speed', names, clear_value=False, retry=10)))
self.set_dxls_value('moving_speed', cached_speed)
self.get_dxls_value('goal_position', names, clear_value=True, retry=10)
def get_orbita_values(self, register_name: str, orbita_name: str, clear_value: bool, retry: int) -> List[float]:
"""Retrieve register value on the specified orbita actuator."""
orbita = self.orbitas[orbita_name]
register = OrbitaActuator.register_address[register_name]
gate = self.gate4name[orbita_name]
if clear_value:
orbita.clear_value(register)
gate.protocol.send_orbita_get(
orbita_id=orbita.id,
register=register.value,
)
try:
return orbita.get_value_as_usi(register)
except TimeoutError as e:
if self.logger is not None:
self.logger.warning(f'Timeout occurs after GET cmd: dev="{orbita_name}" reg="{register_name}"!')
if retry == 0:
raise e
if register_name in ('present_position', 'temperature'):
# We are waiting for te module to send us the data
# So wait before retrying
time.sleep(1)
return self.get_orbita_values(register_name, orbita_name, clear_value, retry - 1)
def set_orbita_values(self, register_name: str, orbita_name: str, value_for_disks: Dict[str, float]):
"""Set new value for register on the specified disks."""
orbita = self.orbitas[orbita_name]
register = OrbitaActuator.register_address[register_name]
gate = self.gate4name[orbita_name]
for disk_name, value in value_for_disks.items():
attrgetter(f'{disk_name}.{register_name}')(orbita).update_using_usi(value)
value_for_id = {
orbita.get_id_for_disk(disk_name): attrgetter(f'{disk_name}.{register_name}')(orbita).get()
for disk_name in value_for_disks.keys()
}
gate.protocol.send_orbita_set(orbita.id, register.value, value_for_id)
def get_fans_state(self, fan_names: List[str], retry=10) -> List[float]:
"""Retrieve state for the specified fans."""
dxl_fans_per_gate: Dict[GateClient, List[int]] = defaultdict(list)
dxl_fans: List[str] = []
orbita_fans: List[Tuple[str, str]] = []
for name in fan_names:
fan = self.fans[name]
if isinstance(fan, DxlFan):
fan.state.reset()
dxl_fans_per_gate[self.gate4name[name]].append(fan.id)
dxl_fans.append(name)
elif isinstance(fan, OrbitaFan):
orbita_fans.append((name, fan.orbita))
for gate, ids in dxl_fans_per_gate.items():
gate.protocol.send_dxl_fan_get(ids)
try:
fans_state = {}
for name in dxl_fans:
fans_state[name] = self.fans[name].state.get_as_usi()
for fan_name, orbita_name in orbita_fans:
fans_state[fan_name] = self.get_orbita_values('fan_state', orbita_name, clear_value=True, retry=retry)[0]
return [fans_state[name] for name in fan_names]
except TimeoutError:
if retry > 0:
return self.get_fans_state(fan_names, retry - 1)
raise
def set_fans_state(self, state_for_fan: Dict[str, float]):
"""Set state for the specified fans."""
fans_per_gate: Dict[GateClient, Dict[int, float]] = defaultdict(dict)
for name, state in state_for_fan.items():
fan = self.fans[name]
if isinstance(fan, DxlFan):
fan.state.update_using_usi(state)
fans_per_gate[self.gate4name[name]][fan.id] = state
for gate, values in fans_per_gate.items():
gate.protocol.send_dxl_fan_set(values)
def _is_torque_enable(self, name: str) -> bool:
return self.get_dxls_value('torque_enable', [name], clear_value=False, retry=10)[0] == 1
def handle_dxl_pub_data(self, addr: int, ids: List[int], errors: List[int], values: List[bytes]):
"""Handle dxl update received on a gate client."""
for id, err, val in zip(ids, errors, values):
if (err != 0) and self.logger is not None:
self.logger.warning(f'Dynamixel error {err} on motor id={id}!')
if id not in self.dxl4id and self.logger is not None:
self.logger.debug(f'Dynamixel id={id} not in config!')
continue
m = self.dxl4id[id]
m.update_value(m.find_register_by_addr(addr), val)
def handle_load_pub_data(self, ids: List[int], values: List[bytes]):
"""Handle load update received on a gate client."""
for id, val in zip(ids, values):
if id not in self.force4id and self.logger is not None:
self.logger.info(f'Force sensor id={id} not in config!')
continue
self.force4id[id].update_force(val)
def handle_orbita_pub_data(self, orbita_id: int, reg_type: OrbitaRegister, values: bytes):
"""Handle orbita update received on a gate client."""
if orbita_id not in self.orbita4id and self.logger is not None:
self.logger.info(f'Orbita id={orbita_id} not in config!')
return
self.orbita4id[orbita_id].update_value(reg_type, values)
def handle_fan_pub_data(self, fan_ids: List[int], states: List[int]):
"""Handle fan state update received on a gate client."""
for id, state in zip(fan_ids, states):
self.fan4id[id].state.update_using_usi(state)
def handle_assert(self, msg: bytes):
"""Handle an assertion received on a gate client."""
raise AssertionError(msg)
class MissingContainerError(Exception):
"""Custom exception for missing container."""
def __init__(self, missing: List[Device]):
"""Set up the missing container execption."""
super().__init__(f'Could not find given devices {missing}!')
| StarcoderdataPython |
91547 | import time
import gex
# pwm frequency sweep
with gex.Client(gex.TrxRawUSB()) as client:
pwm = gex.PWMDim(client, 'dim')
pwm.start()
pwm.set_duty_single(1, 500)
for i in range(2000, 200, -15):
pwm.set_frequency(i)
time.sleep(0.05)
pwm.stop()
| StarcoderdataPython |
3228811 | <filename>clock/db.py
'''
闹钟表
- ID (key)
- USER #id
- CONTENT #闹钟备注
- TYPE #private or group
- C_TIME #默认 时/分
-ONES #一次性闹钟 #默认true
-
CREATE TABLE clocks(
id INTEGER NOT NULL primary key autoincrement,
type CHAR(10),
user INTEGER NOT NULL,
content VARCHAR(20),
c_time TIME ,
ones INTEGER NOT NULL
);
'''
import os
import sqlite3
path =os.path.dirname(__file__)
db = path+ '/data.db'
def execute(sql:str):
conn = sqlite3.connect(db)
c = conn.cursor()
res = c.execute(sql)
res = list(res)
conn.commit()
conn.close()
return res
def add_clock_db(user:int, content:str, time:str, ones = 1, type='private'):
sql = f'''INSERT INTO CLOCKS (type, user, content, c_time, ones)
values ("{type}", {user}, "{content}", "{time}", {ones});'''
execute(sql)
def del_clock_db(id):
execute(f"DELETE from clocks where id = {id}")
def select_all():
'''
(id, type, user, content, c_time, ones)
'''
#DataFrame(data = data, columns=['id', 'type', 'uid', 'note', 'time', 'omes'])
return execute("SELECT * FROM clocks;")
def new_id():
res = execute("SELECT max(id) FROM clocks;")
return res[0][0]
| StarcoderdataPython |
3362449 | <gh_stars>0
# -*- coding: utf-8 -*-
"""
Created on Thu Nov 26 17:29:54 2020
@author: simon
"""
import numpy as np
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
from scipy.spatial.transform import Rotation as R
def normalise(vector):
return vector / np.sqrt(vector[0]**2 + vector[1]**2 + vector[2]**2)
def plot_points(xs, ys, zs):
fig = plt.figure(figsize=(10,8), dpi=100)
ax = fig.add_subplot(111, projection='3d')
ax.scatter(xs, ys, zs)
ax.set_xlabel('X')
ax.set_ylabel('Y')
ax.set_zlabel('Z')
ax.set_xlim(-20, 20)
ax.set_ylim(-20, 20)
ax.set_zlim(-20, 20)
plt.show()
def rotate(points, normal_vector, angle):
r = R.from_rotvec(np.radians(angle) * np.array(normalise(normal_vector)))
r.as_matrix()
return r.apply(points.T).T
def create_circle(centre, start_angle, end_angle, radius, point_count):
angles = np.linspace(start_angle, end_angle, point_count, endpoint=False)
x_coords = centre[0] + (radius * np.cos(angles))
y_coords = centre[1] + (radius * np.sin(angles))
z_coords = centre[2] + np.zeros(point_count)
return np.array([x_coords, y_coords, z_coords])
def create_spiral(centre, start_angle, end_angle, radius, length, revolutions, points_per_rev):
angles = np.linspace(start_angle, revolutions * end_angle, points_per_rev * revolutions, endpoint=False)
lengths = np.linspace(-length/2, length/2, points_per_rev * revolutions, endpoint=False)
x_coords = centre[0] + (radius * np.cos(angles))
y_coords = centre[1] + (radius * np.sin(angles))
z_coords = centre[2] + lengths
return np.array([x_coords, y_coords, z_coords])
class shape_factory():
def __init__(self, step_func=None, path_func=None, sweep_func=None):
self.step_generator = self.default_step() if step_func is None else step_func
self.sweep_generator = self.default_sweep() if sweep_func is None else sweep_func
self.path_generator = self.default_path() if path_func is None else path_func
class default_path():
def __call__(self, coords):
return coords
class default_sweep():
def __call__(self, steps):
return np.array([np.array([0.,i,0.]) for i in steps]).T
class default_step():
def __call__(self, step_count):
return np.linspace(0, step_count)
def generate(self, point_count):
steps = np.linspace(0, point_count)
sweep_coords = self.sweep_generator(steps)
return self.path_generator(sweep_coords)
def main():
# circle
start = 0
end = 2 * np.pi
point_count = 100
radius = 10
centre = 0.,0.,0.
normal = 0.,1.,0.
rotation_angle = 10
coords = create_circle(centre, start, end, radius, point_count)
coords = rotate(coords, normal, rotation_angle)
#plot_points(*coords)
# spiral
start = 0
end = 2 * np.pi
points_per_rev = 100
radius = 10
length = 40
revolutions = 3
centre = 0.,0.,0.
normal = 1.,1.,1.
rotation_angle = 10
coords = create_spiral(centre, start, end, radius, length, revolutions, points_per_rev)
coords = rotate(coords, normal, rotation_angle)
#plot_points(*coords)
def test_path(coords):
x_coords = np.cos(coords[0])
y_coords = np.sin(coords[1])
z_coords = coords[2]
return np.array([x_coords, y_coords, z_coords])
test = shape_factory()
test.path_generator = test_path
points = test.generate(50)
plot_points(*points)
if __name__ == "__main__":
main()
| StarcoderdataPython |
179617 | TIMEOUT = 2
| StarcoderdataPython |
3299650 | # Source: https://leetcode.com/problems/house-robber/
# Type: Dynamic Programming
# Time Complexity: O(n)
# Space Complexity: O(n)
## I really suck at DP. Leetcode has marked it at easy. Is it me or Leetcode needs to change its criterias?
class Solution:
def rob(self, nums: List[int]) -> int:
if len(nums) == 0:
return 0
if len(nums) <= 2:
return max(nums)
dp = [0] * len(nums)
dp[0] = nums[0]
dp[1] = max(nums[0], nums[1])
for i in range(2, len(nums)):
dp[i] = max(nums[i] + dp[i-2], dp[i - 1])
return dp[-1]
| StarcoderdataPython |
1731146 | import os
from enum import IntEnum
import random
import re
import time
from study_tool.card import Card, CardSide, SourceLocation
from study_tool.card_attributes import CardAttributes
from study_tool.external import googledocs
from study_tool.russian.types import *
from study_tool.russian.word import *
from study_tool.config import Config
class CardSetType(IntEnum):
"""
Types of card set groupings.
"""
Other = 0
Categorical = 1 # Has a common category of meaning or usage
Grammatical = 2 # Has common grammatical structures or usages.
Media = 3 # Found in some media: story, video, cartoon, etc.
class CardGroupMetrics:
def __init__(self):
self.history_score = 0.0
self.proficiency_counts = []
for level in range(0, Config.proficiency_levels + 1):
self.proficiency_counts.append(0)
def get_total_count(self):
return sum(self.proficiency_counts)
def get_proficiency_percent(self):
score = 0
potential_score = 0
for level, count in enumerate(self.proficiency_counts):
score += count * Config.proficiency_level_score_multiplier[level]
potential_score += count
return score / potential_score
def get_proficiency_count(self):
return self.get_proficiency_percent() * self.get_total_count()
def serialize(self):
return {"history_score": self.history_score,
"proficiency_counts": self.proficiency_counts}
def deserialize(self, state):
self.history_score = state["history_score"]
self.proficiency_counts = list(state["proficiency_counts"])
class StudySet:
def __init__(self, name="", cards=()):
self.name = AccentedText(name)
self.cards = list(cards)
def get_name(self) -> AccentedText:
return self.name
def get_cards(self) -> list:
return self.cards
def get_card_count(self) -> int:
return len(self.cards)
def has_card(self, card: Card) -> bool:
return card in self.cards
def set_name(self, name: AccentedText):
self.name = AccentedText(name)
def add_card(self, card: Card):
self.cards.append(card)
def remove_card(self, card: Card):
self.cards.remove(card)
def set_cards(self, cards: list):
self.cards = list(cards)
def clear(self):
self.cards = []
def get_study_metrics(self):
metrics = CardGroupMetrics()
metrics.history_score = sum(c.get_history_score() for c in self.cards)
for level in range(0, Config.proficiency_levels + 1):
metrics.proficiency_counts[level] = len(
[c for c in self.cards if c.proficiency_level == level])
return metrics
def get_problem_cards(self):
cs = sorted(self.cards, key=lambda c: c.get_history_score())
for c in reversed(cs):
history_preview_len = 16
history_preview = ""
for i in range(min(history_preview_len, len(c.history))):
history_preview += "." if c.history[i] else "x"
history_preview += " " * \
(history_preview_len - len(history_preview))
print("{:.4f} : {:3} [{}] {}".format(c.get_history_score(),
len(c.history),
history_preview,
c.russian,
c.english))
return StudySet(name=self.name,
cards=[c for c in self.cards
if len(c.history) < 5
or c.get_history_score() < 0.9])
def __repr__(self):
return "StudySet<{} cards>".format(len(self.cards))
class CardSet(StudySet):
def __init__(self, cards=(), fixed_card_set=False, name="Untitled", path=None,
card_set_type=CardSetType.Other):
StudySet.__init__(self, name=name, cards=cards)
self.key = None
self.path = ""
self.info = ""
self.side = CardSide.English
self.source = None
self.__card_set_type = card_set_type
self.__package = None
self.__file_path = None
self.__is_fixed_card_set = fixed_card_set
def get_package(self):
return self.__package
def get_card_set_type(self) -> CardSetType:
return self.__card_set_type
def get_file_path(self) -> str:
return self.__file_path
def set_card_set_type(self, card_set_type: CardSetType):
self.__card_set_type = card_set_type
def set_package(self, package):
self.__package = package
def set_file_path(self, path: str):
self.__file_path = path
def set_fixed_card_set(self, fixed: bool):
self.__is_fixed_card_set = fixed
for card in self.cards:
if fixed:
card.set_fixed_card_set(self)
else:
card.set_fixed_card_set(None)
def is_fixed_card_set(self) -> bool:
return self.__is_fixed_card_set
def next(self, seen, unseen):
index = random.randint(0, len(unseen) - 1)
card = unseen[index]
del unseen[index]
return card
def serialize(self) -> dict:
state = {
"name": repr(self.name),
"version": 1,
"cards": [],
"type": self.__card_set_type.name,
}
for card in self.cards:
state["cards"].append(
[card.get_word_type().name.lower(),
card.get_russian().text,
card.get_english().text])
return {"card_set": state}
def __repr__(self):
return "CardSet(\"{}\")".format(self.get_name())
class CardSetPackage(StudySet):
def __init__(self, name, path: str, parent=None):
# NOTE: Purposefully avoiding super __init__ here
self.name = AccentedText(name)
self.path = path
self.__dirname = os.path.basename(path)
self.parent = parent
self.card_sets = []
self.packages = []
def get_path(self) -> str:
return self.path
def get_directory_name(self) -> str:
return self.__dirname
def get_parent(self):
return self.parent
def get_root(self):
root = self
while root.get_parent():
root = root.get_parent()
return root
def get_packages(self) -> list:
return self.packages
def get_card_sets(self) -> list:
return self.card_sets
def all_card_sets(self):
for package in self.packages:
for card_set in package.all_card_sets():
yield card_set
for card_set in self.card_sets:
yield card_set
@property
def cards(self) -> list:
unique_cards = set()
for card_set in self.all_card_sets():
for card in card_set.cards:
unique_cards.add(card)
return list(unique_cards)
def add_card_set(self, card_set: CardSet):
"""Adds a new card set to the package."""
card_set.set_package(self)
self.card_sets.append(card_set)
def __getitem__(self, name):
for package in self.packages:
if package.name == name:
return package
for card_set in self.card_sets:
if card_set.key == name:
return card_set
raise KeyError(name)
def __repr__(self):
return "CardSetPackage(\"{}\")".format(self.get_name())
| StarcoderdataPython |
3707 | #!/usr/bin/env python
import os
import os.path
import yaml
import time
import random
import multiprocessing
import RPi.GPIO as GPIO
from talk import say
GPIO.setwarnings(False)
GPIO.setmode(GPIO.BCM)
from adafruit_servokit import ServoKit
Motor1 = {'EN': 27, 'input1': 19, 'input2': 16}
Motor2 = {'EN': 22, 'input1': 26, 'input2': 20}
for x in Motor1:
GPIO.setup(Motor1[x], GPIO.OUT)
GPIO.setup(Motor2[x], GPIO.OUT)
EN1 = GPIO.PWM(Motor1['EN'], 100)
EN2 = GPIO.PWM(Motor2['EN'], 100)
EN1.start(0)
EN2.start(0)
hand = ServoKit(channels=16)
ROOT_PATH = os.path.realpath(os.path.join(__file__, '..', '..'))
def readYaml():
with open('{}/src/configuration.yaml'.format(ROOT_PATH),'r+', encoding='utf8') as conf:
servo = yaml.load(conf, Loader=yaml.FullLoader)
return servo
def writeYaml(s=None):
with open('{}/src/configuration.yaml'.format(ROOT_PATH),'w', encoding='utf8') as conf:
if s==None:
yaml.dump(servo,conf)
else:
yaml.dump(s,conf)
servo = readYaml()
if servo == None:
with open('{}/src/configurationBackUp.yaml'.format(ROOT_PATH),'r+', encoding='utf8') as conf:
servoBackUp = yaml.load(conf, Loader=yaml.FullLoader)
writeYaml(servoBackUp)
servo = readYaml()
if servo == None:
print('close')
exit()
Initial = servo['Initial_Position']['I2C']
Current = servo['Current_Position']['I2C']
InitialGpio = servo['Initial_Position']['Gpio']
CurrentGpio = servo['Current_Position']['Gpio']
GpioPin = servo['Pin']['Gpio']
for i in range(0,6):
GPIO.setup(GpioPin[i], GPIO.OUT)
Servo = []
for i in range(0,6):
Servo.append(GPIO.PWM(GpioPin[i],50))
Servo[i].start(0)
def changeDegree(pin,newDegree,time1=0.05,update=5):
maxChange = 0
pinSize = len(pin)
for i in range(0,pinSize):
maxChange = max(abs(Current[pin[i]]-newDegree[i]),maxChange)
for deg in range(0,maxChange,update):
for i in range(0,pinSize):
if Current[pin[i]]<newDegree[i]:
Current[pin[i]] += update
elif Current[pin[i]]>newDegree[i]:
Current[pin[i]] -= update
for i in range(0,pinSize):
hand.servo[pin[i]].angle = Current[pin[i]]
servo['Current_Position']['I2C'][pin[i]] = Current[pin[i]]
writeYaml()
time.sleep(time1)
def takePosition():
changeDegree([7,8],[180,0])
changeDegree([0,1,2,3,4,5,6,7,8,9,10,11],[0,50,130,0,170,170,0,180,0,60,150,0])
def changeDegreeGpio(pin,degree,update,duration):
pinSize = len(pin)
for i in range(0,pinSize):
p = pin[i]
if CurrentGpio[p]>degree[i]:
update = -update
for deg in range(CurrentGpio[p],degree[i],update):
duty = deg/18
duty+=2
Servo[p].ChangeDutyCycle(duty)
time.sleep(duration)
CurrentGpio[p]=degree[i]
writeYaml()
def Run(a, b, c, d, x):
GPIO.output(Motor1['input1'], GPIO.LOW)
GPIO.output(Motor1['input2'], GPIO.LOW)
GPIO.output(Motor2['input1'], GPIO.LOW)
GPIO.output(Motor2['input2'], GPIO.LOW)
if a==1:
GPIO.output(Motor1['input1'], GPIO.HIGH)
if b==1:
GPIO.output(Motor1['input2'], GPIO.HIGH)
if c==1:
GPIO.output(Motor2['input1'], GPIO.HIGH)
if d==1:
GPIO.output(Motor2['input2'], GPIO.HIGH)
EN2.ChangeDutyCycle(x)
EN1.ChangeDutyCycle(x)
def Stop():
Run(0,0,0,0,0)
def Start_Slow(a, b, c, d):
for i in range(0,100,20):
Run(a,b,c,d,i)
time.sleep(0.5)
def Stop_Slow(a,b,c,d):
for i in range(100,0,-20):
Run(a,b,c,d,i)
time.sleep(0.5)
def yes(times=3):
for i in range(0,times):
changeDegree([0],[30])
time.sleep(0.08)
changeDegree([0],[0])
time.sleep(0.08)
def no(times=3):
for i in range(0,times):
changeDegree([15],[70],5,0.05)
time.sleep(0.2)
changeDegree([15],[110],5,0.05)
time.sleep(0.2)
changeDegree([15],[90],5,0.05)
def move_head(times=3):
for i in range(0,times):
changeDegree([0],[20])
changeDegreeGpio([0],[80],5,0.05)
changeDegree([0],[0])
changeDegreeGpio([0],[100],5,0.05)
changeDegreeGpio([0],[90],10,0.01)
def random0():
r = random.randrange(1,10000000)%3
if(r==1):
changeDegree([0],[20])
changeDegree([0],[0])
elif(r==2):
changeDegreeGpio([0],[120],5,0.05)
changeDegreeGpio([0],[90],5,0.05)
else:
changeDegreeGpio([0],[60],5,0.05)
changeDegreeGpio([0],[90],5,0.05)
def random1():
r = random.randrange(1,3)
if(r==1):
changeDegree([0],[20])
changeDegree([0],[0])
changeDegree([3],[50])
changeDegree([9],[100])
changeDegree([9],[60])
changeDegree([3],[0])
elif(r==2):
changeDegree([0],[20])
changeDegree([0],[0])
changeDegree([4],[120])
changeDegree([10],[140])
changeDegree([10],[180])
changeDegree([4],[170])
else:
changeDegree([3,4],[50,120])
changeDegree([9,10],[100,140])
changeDegree([9,10],[60,180])
changeDegree([3,4],[0,180])
def random2():
changeDegree([3,4],[20,150])
pin = [7,8,9,10]
deg = [[160,0,60,100],[180,20,100,140]]
ok = [0,0,0,0]
select = [1,2,0,3,1,0,3,2,1,0,2,3,1,2,3,0,3,1,2,3,1,2,3,0,3,1]
for i in range(0,15):
r = select[i%len(select)]%4
print (' move ',r)
changeDegree([pin[r]],[deg[ok[r]][r]])
ok[r]^=1
takePosition()
def random3():
changeDegree([3,4],[20,150])
pin = [7,8,9,10]
deg = [[160,0,60,100],[180,20,100,140]]
ok = [0,0,0,0]
for i in range(0,15):
r = random.randrange(1,1000000)%4
print (' move ',r)
changeDegree([pin[r]],[deg[ok[r]][r]])
takePosition()
def randomCall(t):
changeDegree([3,4,5,6,7,8,9,10],[50,110,80,70,100,80,160,20])
pin = [5,6,7,8]
deg = [[80,50,100,70],[110,90,110,90]]
select = [89,93,472,347,2, 34, 134, 1937, 1983, 1739, 107, 894, 48, 28, 2048,589,689,123, 34,27,4,91,102,893,10283,53,1283,9485,1973,873,1973,0,10973]
ok = [0,0,0,0]
ln = len(select)
for i in range(0,t*3):
r = select[i%16]%4
changeDegree([pin[r]],[deg[ok[r]][r]])
ok[r]^=1
takePosition()
def expression(t):
print (' i got value of t is : ',t)
if(t==0):
random0()
elif(t==1):
random1()
elif(t==2):
random2()
elif(t==3):
random3()
else:
randomCall(t)
def speakOnline(t):
expression(t)
def speakOffline(speech):
t = int(len(speech)/15)
print ('Offline t value is : ',t)
p1 = multiprocessing.Process(target=expression,args=[t])
p1.start()
say(speech)
| StarcoderdataPython |
1738056 | <reponame>mbeckem/tiro
# This module can be used to auto generate tagged unions types for C++.
import cog
from .codegen import camel_to_snake, avoid_keyword, ENV
class Tag:
def __init__(self, name, underlying_type, start_value=None, doc=None):
self.kind = "tag"
self.name = name
self.underlying_type = underlying_type
self.start_value = start_value
self.doc = doc
self.union = None
class Union:
def __init__(self, name, tag, members=None, doc=None):
self.name = name
self.kind = "union"
self.tag = tag
self.doc = doc
self.members = [] if members is None else members
self.format = None
self.equality = None
self.hash = None
self.doc_mode = "member"
self.storage_mode = "trivial"
self.is_final = True
self.accessors = "const"
if tag.union:
raise RuntimeError("Tag already belongs to a different union")
tag.union = self
# declare: only declare, but do not define the format(FormatStream&) function.
# define: do both
def set_format_mode(self, which):
if which not in [None, "declare", "define"]:
raise RuntimeError(f"Invalid value for 'which': {which}.")
self.format = which
return self
# define: declare and implement equality operators.
def set_equality_mode(self, which):
if which not in [None, "define"]:
raise RuntimeError(f"Invalid value for 'which': {which}.")
self.equality = which
return self
# define: declare and implement build_hash function.
def set_hash_mode(self, which):
if which not in [None, "define"]:
raise RuntimeError(f"Invalid value for 'which': {which}.")
self.hash = which
return self
# member: Use doc for the doc comment of the member type
# tag: Document the type tag instead
def set_doc_mode(self, which):
if which not in ["member", "tag"]:
raise RuntimeError(f"Invalid value for 'which': {which}")
self.doc_mode = which
return self
# trivial: do not declare any special member functions
# movable: declare and implement destroy and move
def set_storage_mode(self, which):
if which not in ["trivial", "movable"]:
raise RuntimeError(f"Invalid value for 'which': {which}")
self.storage_mode = which
return self
def set_final(self, which):
if which not in [True, False]:
raise RuntimeError(f"Invalid value for 'which': {which}")
self.is_final = which
return self
# Implement only constant ("const") accessors or "all".
def set_accessors(self, which):
if which not in ["const", "all"]:
raise RuntimeError(f"Invalid value for 'which': {which}")
self.accessors = which
return self
class UnionMember:
def __init__(self, name, kind, doc=None):
self.name = name
snake = camel_to_snake(name)
self.argument_name = avoid_keyword(snake)
self.accessor_name = "as_" + snake
self.factory_name = "make_" + snake
self.field_name = snake + "_"
self.visit_name = "visit_" + snake
self.kind = kind
# some libary are de-facto noexcept movable but don't declare it
self.force_noexcept = False
self.doc = doc
def set_name(self, name):
self.name = name
return self
def set_argument_name(self, argument_name):
self.argument_name = argument_name
return self
def set_accessor_name(self, accessor_name):
self.accessor_name = accessor_name
return self
def set_force_noexcept(self, force_noexcept):
self.force_noexcept = force_noexcept
return self
class Struct(UnionMember):
def __init__(self, name, members=None, doc=None):
super().__init__(name, "struct", doc)
self.members = [] if members is None else members
class Alias(UnionMember):
def __init__(self, name, target, pass_as="copy", doc=None):
super().__init__(name, "alias", doc)
self.target = target
self.pass_as = pass_as
class Field:
def __init__(self, name, type, pass_as="copy", doc=None):
self.name = avoid_keyword(name)
self.type = type
self.pass_as = pass_as
self.doc = doc
def _declare(T):
if T.kind == "tag":
templ = ENV.get_template("union_tag.jinja2")
cog.outl(templ.module.tag_decl(T))
elif T.kind == "union":
templ = ENV.get_template("unions.jinja2")
cog.outl(templ.module.union_decl(T))
else:
raise RuntimeError("Invalid type.")
def _define(T):
if T.kind == "tag":
templ = ENV.get_template("union_tag.jinja2")
cog.outl(templ.module.tag_def(T))
elif T.kind == "union":
templ = ENV.get_template("unions.jinja2")
cog.outl(templ.module.union_def(T))
else:
raise RuntimeError("Invalid type.")
def _implement_inlines(T):
if T.kind == "tag":
pass
elif T.kind == "union":
templ = ENV.get_template("unions.jinja2")
cog.outl(templ.module.union_inline_impl(T))
else:
raise RuntimeError("Invalid type.")
def _implement(T):
if T.kind == "tag":
templ = ENV.get_template("union_tag.jinja2")
cog.outl(templ.module.tag_impl(T))
elif T.kind == "union":
templ = ENV.get_template("unions.jinja2")
cog.outl(templ.module.union_impl(T))
else:
raise RuntimeError("Invalid type.")
def _joined(Ts, each):
for (index, T) in enumerate(Ts):
if index != 0:
cog.outl()
each(T)
def declare(*Ts):
_joined(Ts, lambda T: _declare(T))
def define(*Ts):
_joined(Ts, lambda T: _define(T))
def implement_inlines(*Ts):
_joined(Ts, lambda T: _implement_inlines(T))
def implement(*Ts):
_joined(Ts, lambda T: _implement(T))
| StarcoderdataPython |
4819132 | def cut_rod(prices, rod_size):
r = s = [0 for i in range(rod_size + 1)]
for j in range(1, rod_size + 1):
max_price = -1
for k in range(1, j + 1):
new_price = 0
if(k < len(prices)):
new_price = prices[k] + r[j - k]
else:
new_price = r[j - k]
if(new_price > max_price):
max_price = new_price
s[j] = k
r[j] = max_price
print(r)
print(s)
return r,s
def main():
# define the price per length of a rod
prices = [0, 1, 5, 8, 9, 10, 17, 17, 20]
# prompt the user to enter the size of the rod to be cut
rod_size = int(input("Enter the rod's size: "))
print()
# get the optimal price for cutting a rod of size rod_size
r, s = cut_rod(prices, rod_size)
# print the optimal solution
print(f"Optimal price = {r[rod_size]}")
# print the cuts of the rod
while(rod_size > 0):
print(s[rod_size])
rod_size -= s[rod_size]
main() | StarcoderdataPython |
21029 | <reponame>beng92/Statify
'''
x Total plays
x Total artists
x Total unique songs
Average song per artist
x Favourite track
Favourite artist (by plays or time)
Favourite album (req. api)
Average/total/unique plays per range
Average/total/unique artists per range
Average/total time listened per range
Favourite genre (req. api) (by plays or time)
% songs skipped before end (req. api)
Most skipped song/artist (req. api)
Graph of time of day listening
Graph of day of the week listening
Listening habits by Spotify values e.g. accousticness (req. api)
Search listening history
https://developer.spotify.com/web-api/
https://github.com/plamere/spotipy
http://spotipy.readthedocs.org/en/latest/
http://cgbystrom.com/articles/deconstructing-spotifys-builtin-http-server/
https://github.com/cgbystrom/spotify-local-http-api/issues/2
https://github.com/cgbystrom/spotify-local-http-api
http://effbot.org/zone/wcklib-calendar.htm
http://svn.python.org/projects/sandbox/trunk/ttk-gsoc/samples/ttkcalendar.py
'''
import time, datetime, StatifyCache, logging
# Songs read in order (date, Song)
class StatifyStats:
def __init__(self):
self.allSongs = []
self.allItems = []
self.firstdate = None
self.enddate = None
logging.basicConfig(filename="debug.log", filemode='w', level=logging.DEBUG, format='%(asctime)s %(levelname)s > %(message)s', datefmt='%Y/%m/%d %H:%M:%S')
self.length = 0
self.sc = StatifyCache.StatifyCache()
def most_common(self, list):
d = {}
for item in list:
if item in d:
d[item] = d[item] + 1
else:
d[item] = 1
max = 0
name = ""
for item in d:
if d[item] > max:
max = d[item]
name = item
return (name,max)
def most_common_artist_plays(self, list):
return self.most_common([s.artist for d,s in list])
def most_common_artist_link(self, artist):
song = self.sc.getName(artist, "artist")
return song.artisturl if song != None else None
def most_common_song_plays(self, list):
return self.most_common([s.name for d,s in list])
def most_common_song_link(self, song):
song = self.sc.getName(song, "song")
return song.songurl if song != None else None
def listening_time(self, list): # Expecting self.allItems in form (d,s)
timer = datetime.timedelta()
start = None
for d,s in list:
if start == None:
start = d
if s == "Spotify":
end = d
timer = timer + (d - start)
start = None
if start != None:
timer = timer + (datetime.datetime.now() - start)
return timer
def daysInRange(self, list):
startDate = list[0][0]
endDate = list[len(list)-1][0]
return (startDate - endDate).days
def load(self, start, end):
"""Loads the data.txt file created by StatifyTracking.pyw
"""
file = open("data/data.txt")
lines = file.read().splitlines()
file.close()
ret = len(lines) > self.length
self.length = len(lines)
self.allItems = []
self.firstdate = None
for line in lines:
dateLine = line.split('>',1)[0]
date = datetime.datetime.strptime(dateLine, "%a %b %d %H:%M:%S %Y")
if self.firstdate == None:
self.firstdate = date
self.enddate = date
song = line.split('>',1)[1]
index = lines.index(line)
if song != "Spotify" and song != "":
artistName = song.split(" - ",1)[0]
songName = song.split(" - ",1)[1]
songObj = self.sc.getSong(songName, artistName)
self.allItems.append((date, songObj))
elif song == "Spotify":
self.allItems.append((date,"Spotify"))
if start != None and end != None:
self.allItems = [(d,s) for d,s in self.allItems if d >= start and d <= end]
previous = ""
self.allSongs = [(d,s) for d,s in self.allItems if not isinstance(s, str)]
for item in self.allSongs:
date, song = item
#remove consecutive appearances
if song == previous:
self.allSongs.delete(item)
previous = song
return ret
def plays(self):
"""Return total number of plays for the currently loaded list
"""
return str(len(self.allSongs))
def artists(self):
"""Return number of artists in the currently loaded list
"""
return str(len(set([s.artist for d,s in self.allSongs])))
def uniquePlays(self):
"""Return the number of songs in the currently loaded list
"""
return str(len(set([s.name for d,s in self.allSongs])))
def playsPerDay(self):
"""Return the number of songs in the currently loaded list
"""
return abs(int(len(self.allSongs) / self.daysInRange(self.allSongs)))
def mcSong(self):
"""Returns the most common song, with a link to the Spotify page.
"""
name, max = self.most_common_song_plays(self.allSongs)
song = self.sc.getName(name, "song")
return (name + " - " + song.artist + " (" + str(max) + ")", self.most_common_song_link(name))
def mcArtist(self):
"""Returns the most common artist, with a link to the Spotify page.
"""
artist, max = self.most_common_artist_plays(self.allSongs)
return (artist + " (" + str(max) + ")", self.most_common_artist_link(artist))
def listenTime(self):
"""Returns the total listening time for the currently selected range.
"""
result = self.listening_time(self.allItems)
days = int(result.days)
hours = int(result.seconds/3600)
minutes = int(result.seconds/60)-(hours*60)
return str(days) + (" day, " if result.days == 1 else " days, ") + str(hours) + (" hour, " if hours == 1 else " hours, ") + str(minutes) + (" minute " if minutes == 1 else " minutes ")
return ret | StarcoderdataPython |
3353511 | <gh_stars>0
import asyncio
import gzip
import json
import os
import random
import re
import string
import tempfile
import logging
import aioboto3
import aiofiles
from tenacity import RetryError, AsyncRetrying, stop_after_attempt, retry_if_exception_type
class AlttprDoor():
def __init__(self, settings=None, spoilers=True):
self.settings = settings
self.spoilers = spoilers
async def generate_game(self):
with tempfile.TemporaryDirectory() as tmp:
settings_file_path = os.path.join(tmp, "settings.json")
self.hash = ''.join(random.choices(string.ascii_letters + string.digits, k=12))
self.settings['outputpath'] = tmp
self.settings['outputname'] = self.hash
self.settings['create_rom'] = True
self.settings['create_spoiler'] = True
self.settings['calc_playthrough'] = False
self.settings['rom'] = os.environ.get('ALTTP_ROM')
self.settings['enemizercli'] = os.path.join(os.environ.get('ENEMIZER_HOME'), 'EnemizerCLI.Core')
# set some defaults we do NOT want to change ever
self.settings['count'] = 1
self.settings['multi'] = 1
self.settings['names'] = ""
self.settings['race'] = False
with open(settings_file_path, "w") as f:
json.dump(self.settings, f)
attempts = 0
try:
async for attempt in AsyncRetrying(stop=stop_after_attempt(20), retry=retry_if_exception_type(Exception)):
with attempt:
attempts += 1
proc = await asyncio.create_subprocess_exec(
'python3',
'DungeonRandomizer.py',
'--settingsfile', settings_file_path,
stdout=asyncio.subprocess.PIPE,
stderr=asyncio.subprocess.PIPE,
cwd=os.environ.get('DOOR_RANDO_HOME'))
stdout, stderr = await proc.communicate()
logging.info(stdout.decode())
if proc.returncode > 0:
raise Exception(f'Exception while generating game: {stderr.decode()}')
except RetryError as e:
raise e.last_attempt._exception from e
self.attempts = attempts
self.patch_name = "DR_" + self.settings['outputname'] + ".bps"
self.rom_name = "DR_" + self.settings['outputname'] + ".sfc"
self.spoiler_name = "DR_" + self.settings['outputname'] + "_Spoiler.txt"
rom_path = os.path.join(tmp, self.rom_name)
patch_path = os.path.join(tmp, self.patch_name)
spoiler_path = os.path.join(tmp, self.spoiler_name)
proc = await asyncio.create_subprocess_exec(
os.path.join('utils', 'flips'),
'--create',
'--bps-delta',
os.environ.get("ALTTP_ROM"),
rom_path,
patch_path,
stdout=asyncio.subprocess.PIPE,
stderr=asyncio.subprocess.PIPE)
stdout, stderr = await proc.communicate()
logging.info(stdout.decode())
if proc.returncode > 0:
raise Exception(f'Exception while while creating patch: {stderr.decode()}')
async with aiofiles.open(patch_path, "rb") as f:
patchfile = await f.read()
async with aioboto3.client('s3') as s3:
await s3.put_object(
Bucket=os.environ.get('SAHASRAHBOT_BUCKET'),
Key=f"patch/{self.patch_name}",
Body=patchfile,
ACL='public-read'
)
async with aiofiles.open(spoiler_path, "rb") as f:
self.spoilerfile = await f.read()
async with aioboto3.client('s3') as s3:
await s3.put_object(
Bucket=os.environ.get('SAHASRAHBOT_BUCKET'),
Key=f"spoiler/{self.spoiler_name}",
Body=gzip.compress(self.spoilerfile),
ACL='public-read' if self.spoilers else 'private',
ContentEncoding='gzip',
ContentDisposition='attachment'
)
@classmethod
async def create(
cls,
settings,
spoilers=True
):
seed = cls(settings=settings, spoilers=spoilers)
await seed.generate_game()
return seed
@property
def url(self):
return f"https://alttprpatch.synack.live/patcher.html?patch={self.patch_url}"
@property
def spoiler_url(self):
return f"https://{os.environ.get('SAHASRAHBOT_BUCKET')}.s3.amazonaws.com/spoiler/{self.spoiler_name}"
@property
def patch_url(self):
return f"https://{os.environ.get('SAHASRAHBOT_BUCKET')}.s3.amazonaws.com/patch/{self.patch_name}"
# Pull the code from the spoiler file, and translate it to what SahasrahBot expects
@property
def code(self):
file_select_code = re.search("Hash:*\s(.*,.*,.*,.*,.*)", self.spoilerfile.decode()).groups()[0]
code = list(file_select_code.split(', '))
code_map = {
'Bomb': 'Bombs',
'Powder': 'Magic Powder',
'Rod': 'Ice Rod',
'Ocarina': 'Flute',
'Bug Net': 'Bugnet',
'Bottle': 'Empty Bottle',
'Potion': 'Green Potion',
'Cane': 'Somaria',
'Pearl': 'Moon Pearl',
'Key': 'Big Key'
}
p = list(map(lambda x: code_map.get(x, x), code))
return [p[0], p[1], p[2], p[3], p[4]]
@property
def version(self):
return re.search("ALttP Entrance Randomizer Version (.*) - Seed: ([0-9]*)", self.spoilerfile.decode()).groups()[0]
@property
def doors(self):
return True
| StarcoderdataPython |
3320182 | <filename>setup.py
"""Setup for the ezkey package."""
import setuptools
with open('README.md') as f:
README = f.read()
setuptools.setup(
author="<NAME>",
author_email="<EMAIL>",
name='ezkey',
license="Apache 2.0",
description='ezkey is a package that allows developers to easily manage locally stored API keys.',
version='v0.0.1',
long_description=README,
long_description_content_type="text/markdown",
url='https://github.com/espressoAndCode/ezkey',
packages=setuptools.find_packages(),
python_requires=">=2.7",
classifiers=[
'Development Status :: 4 - Beta',
'License :: OSI Approved :: Apache Software License',
'Programming Language :: Python',
'Topic :: Software Development :: Libraries',
'Topic :: Software Development :: Libraries :: Python Modules',
'Intended Audience :: Developers',
],
)
| StarcoderdataPython |
1661014 | import asyncio
import nextcord
from nextcord.ext import commands
from ..tools.Embeds import embeds
import uuid
import datetime
import os
from ..tools.Athena import Athena
class team_builder(commands.Cog, embeds):
LOAD = True
NAME = "Team Builder"
def __init__(self, client: Athena):
self.client = client
@commands.command(name="addteam")
async def add_team(self, ctx, *name):
name = " ".join(name)
category = await ctx.guild.create_category_channel(name=name)
team_role = await ctx.guild.create_role(name=f"{name}")
tryout_role = await ctx.guild.create_role(name=f"{name} Tryout")
ringer_role = await ctx.guild.create_role(name=f"{name} Ringer")
staff_role = await ctx.guild.create_role(name=f"{name} Staff")
everyone = ctx.guild.default_role
moderator = nextcord.utils.get(ctx.guild.roles, id=951245208287334410)
await category.set_permissions(everyone, view_channel=False)
await category.set_permissions(team_role, view_channel=True, connect=True)
await category.set_permissions(tryout_role, view_channel=True, connect=True)
await category.set_permissions(ringer_role, view_channel=True, connect=True)
await category.set_permissions(staff_role, view_channel=True, connect=True)
# await category.set_permissions(moderator, view_channel=True, connect=True)
print("roles and cat permissions set")
await category.create_text_channel(name="announcements")
office_channel = await category.create_text_channel(name="office")
await category.create_text_channel(name="replays")
await category.create_text_channel(name="team-chat")
await category.create_voice_channel(name="Scrim")
meetings_voice = await category.create_voice_channel(name="<NAME>")
await office_channel.set_permissions(team_role, view_channel=False)
await office_channel.set_permissions(tryout_role, view_channel=False)
await office_channel.set_permissions(ringer_role, view_channel=False)
await ctx.send("Team has been created")
| StarcoderdataPython |
1642785 | <reponame>trinamic/PyTrinamicMicro
'''
Example using the MAX14912PMB.
This script switches all the outputs to high and then back to low.
Created on 15.02.2021
@author: JH
'''
from pyb import Pin
from PyTrinamicMicro.platforms.motionpy2.modules.max.max14912 import MAX14912
import time
import logging
logger = logging.getLogger(__name__)
logger.info("MAX14912PMB example running")
pmod0 = dict({
"pin_cs" : Pin.cpu.A4,
"pin_fltr" : Pin.cpu.C6,
"pin_cmd" : Pin.cpu.C13,
"spi" : 1,
})
pmod1 = dict({
"pin_cs" : Pin.cpu.B12,
"pin_fltr" : Pin.cpu.C2,
"pin_cmd" : Pin.cpu.C7,
"spi" : 2,
})
'''Change pmod connector here'''
connector = pmod0
max14912 = MAX14912(connector["pin_cs"],connector["spi"],connector["pin_fltr"],connector["pin_cmd"] )
while(True):
logger.info("Switching everything to HIGH")
for y in range(0, 8):
max14912.set_output(y,1)
time.sleep(0.25)
logger.info("Switching everything to LOW")
for y in range(0, 8):
max14912.set_output(y,0)
time.sleep(0.25)
| StarcoderdataPython |
86229 | <gh_stars>0
from typing import ClassVar, List, Optional
from ...constants import AclOperation, AclPermissionType, ApiKey, ErrorCode, ResourceType
from ..base import ResponseData
class MatchingAcl:
error_code: ErrorCode
error_message: Optional[str]
resource_type: ResourceType
resource_name: str
resource_pattern_type: int
principal: str
host: str
operation: AclOperation
permission_type: AclPermissionType
def __init__(
self,
error_code: ErrorCode,
error_message: Optional[str],
resource_type: ResourceType,
resource_name: str,
resource_pattern_type: int,
principal: str,
host: str,
operation: AclOperation,
permission_type: AclPermissionType,
):
"""
:param error_code: Response error code
:type error_code: ErrorCode
:param error_message: Response error message
:type error_message: Optional[str]
:param resource_type: The resource type
:type resource_type: ResourceType
:param resource_name: The resource name
:type resource_name: str
:param resource_pattern_type: The resource pattern type
:type resource_pattern_type: int
:param principal: The ACL principal
:type principal: str
:param host: The ACL host
:type host: str
:param operation: The ACL operation
:type operation: AclOperation
:param permission_type: The ACL permission type
:type permission_type: AclPermissionType
"""
self.error_code = error_code
self.error_message = error_message
self.resource_type = resource_type
self.resource_name = resource_name
self.resource_pattern_type = resource_pattern_type
self.principal = principal
self.host = host
self.operation = operation
self.permission_type = permission_type
class FilterResponse:
error_code: ErrorCode
error_message: Optional[str]
matching_acls: List[MatchingAcl]
def __init__(self, error_code: ErrorCode, error_message: Optional[str], matching_acls: List[MatchingAcl]):
"""
:param error_code: Response error code
:type error_code: ErrorCode
:param error_message: Response error message
:type error_message: Optional[str]
:param matching_acls: The matching ACLs
:type matching_acls: List[MatchingAcl]
"""
self.error_code = error_code
self.error_message = error_message
self.matching_acls = matching_acls
class DeleteAclsResponseData(ResponseData):
throttle_time_ms: int
filter_responses: List[FilterResponse]
api_key: ClassVar[ApiKey] = ApiKey.DELETE_ACLS
def __init__(self, throttle_time_ms: int, filter_responses: List[FilterResponse]):
"""
:param throttle_time_ms: Duration in milliseconds for which the request was throttled due to quota violation
(Zero if the request did not violate any quota)
:type throttle_time_ms: int
:param filter_responses: None
:type filter_responses: List[FilterResponse]
"""
self.throttle_time_ms = throttle_time_ms
self.filter_responses = filter_responses
| StarcoderdataPython |
131945 | #!/usr/bin/env python
from __future__ import division
from past.utils import old_div
import unittest
import os.path
import sys
import numpy
import anuga
from anuga.abstract_2d_finite_volumes.mesh_factory import rectangular_cross
from anuga.shallow_water.shallow_water_domain import Domain
from anuga.abstract_2d_finite_volumes.util import file_function
from anuga.utilities.system_tools import get_pathname_from_package
from anuga.structures.inlet_operator import Inlet_operator
class Test_inlet_operator(unittest.TestCase):
"""
Test the boyd box operator, in particular the discharge_routine!
"""
def setUp(self):
pass
def tearDown(self):
try:
os.remove('Test_Outlet_Inlet.sww')
except:
pass
def _create_domain(self,d_length,
d_width,
dx,
dy,
elevation_0,
elevation_1,
stage_0,
stage_1):
points, vertices, boundary = rectangular_cross(int(old_div(d_length,dx)), int(old_div(d_width,dy)),
len1=d_length, len2=d_width)
domain = Domain(points, vertices, boundary)
domain.set_name('Test_Outlet_Inlet') # Output name
domain.set_store()
domain.set_default_order(2)
domain.H0 = 0.01
domain.tight_slope_limiters = 1
#print 'Size', len(domain)
#------------------------------------------------------------------------------
# Setup initial conditions
#------------------------------------------------------------------------------
def elevation(x, y):
"""Set up a elevation
"""
z = numpy.zeros(x.shape,dtype='d')
z[:] = elevation_0
numpy.putmask(z, x > old_div(d_length,2), elevation_1)
return z
def stage(x,y):
"""Set up stage
"""
z = numpy.zeros(x.shape,dtype='d')
z[:] = stage_0
numpy.putmask(z, x > old_div(d_length,2), stage_1)
return z
#print 'Setting Quantities....'
domain.set_quantity('elevation', elevation) # Use function for elevation
domain.set_quantity('stage', stage) # Use function for elevation
Br = anuga.Reflective_boundary(domain)
domain.set_boundary({'left': Br, 'right': Br, 'top': Br, 'bottom': Br})
return domain
def test_inlet_constant_Q(self):
"""test_inlet_Q
This tests that the inlet operator adds the correct amount of water
"""
stage_0 = 11.0
stage_1 = 10.0
elevation_0 = 10.0
elevation_1 = 10.0
domain_length = 200.0
domain_width = 200.0
domain = self._create_domain(d_length=domain_length,
d_width=domain_width,
dx = 10.0,
dy = 10.0,
elevation_0 = elevation_0,
elevation_1 = elevation_1,
stage_0 = stage_0,
stage_1 = stage_1)
vol0 = domain.compute_total_volume()
finaltime = 3.0
line1 = [[95.0, 10.0], [105.0, 10.0]]
Q1 = 5.00
line2 = [[10.0, 90.0], [20.0, 90.0]]
Q2 = 10.0
Inlet_operator(domain, line1, Q1, logging=False)
Inlet_operator(domain, line2, Q2)
for t in domain.evolve(yieldstep = 1.0, finaltime = finaltime):
#domain.write_time()
#print domain.volumetric_balance_statistics()
pass
vol1 = domain.compute_total_volume()
assert numpy.allclose((Q1+Q2)*finaltime, vol1-vol0, rtol=1.0e-8)
assert numpy.allclose((Q1+Q2)*finaltime, domain.fractional_step_volume_integral, rtol=1.0e-8)
def test_inlet_constant_Q_polygon(self):
"""test_inlet_Q
This tests that the inlet operator adds the correct amount of water
"""
stage_0 = 11.0
stage_1 = 10.0
elevation_0 = 10.0
elevation_1 = 10.0
domain_length = 200.0
domain_width = 200.0
domain = self._create_domain(d_length=domain_length,
d_width=domain_width,
dx = 10.0,
dy = 10.0,
elevation_0 = elevation_0,
elevation_1 = elevation_1,
stage_0 = stage_0,
stage_1 = stage_1)
vol0 = domain.compute_total_volume()
finaltime = 3.0
poly1 = [[95.0, 10.0], [105.0, 10.0], [105, 20.0], [95.0, 20.0]]
Q1 = 5.00
Inlet_operator(domain, poly1, Q1, logging=False)
for t in domain.evolve(yieldstep = 1.0, finaltime = finaltime):
#domain.write_time()
#print domain.volumetric_balance_statistics()
pass
vol1 = domain.compute_total_volume()
assert numpy.allclose((Q1)*finaltime, vol1-vol0, rtol=1.0e-8)
assert numpy.allclose((Q1)*finaltime, domain.fractional_step_volume_integral, rtol=1.0e-8)
def test_inlet_variable_Q(self):
"""test_inlet_Q
This tests that the inlet operator adds the correct amount of water
"""
stage_0 = 11.0
stage_1 = 10.0
elevation_0 = 10.0
elevation_1 = 10.0
domain_length = 200.0
domain_width = 200.0
domain = self._create_domain(d_length=domain_length,
d_width=domain_width,
dx = 10.0,
dy = 10.0,
elevation_0 = elevation_0,
elevation_1 = elevation_1,
stage_0 = stage_0,
stage_1 = stage_1)
vol0 = domain.compute_total_volume()
finaltime = 3.0
#Make sure we are inthe right directory to find the
#time series data for the inlets
import os
path = get_pathname_from_package('anuga.structures')
filename1 = os.path.join(path, 'tests', 'data', 'inlet_operator_test1.tms')
filename2 = os.path.join(path, 'tests', 'data', 'inlet_operator_test2.tms')
line1 = [[95.0, 10.0], [105.0, 10.0]]
Q1 = file_function(filename=filename1, quantities=['hydrograph'])
line2 = [[10.0, 90.0], [20.0, 90.0]]
Q2 = file_function(filename=filename2, quantities=['hydrograph'])
Inlet_operator(domain, line1, Q1)
Inlet_operator(domain, line2, Q2)
for t in domain.evolve(yieldstep = 1.0, finaltime = finaltime):
#domain.write_time()
#print domain.volumetric_balance_statistics()
pass
vol1 = domain.compute_total_volume()
#print vol1-vol0
assert numpy.allclose(13.5, vol1-vol0, rtol=1.0e-8)
assert numpy.allclose(vol1-vol0, domain.fractional_step_volume_integral, rtol=1.0e-8)
def test_inlet_variable_Q_default(self):
"""test_inlet_Q
This tests that the inlet operator adds the correct amount of water
"""
stage_0 = 11.0
stage_1 = 10.0
elevation_0 = 10.0
elevation_1 = 10.0
domain_length = 200.0
domain_width = 200.0
domain = self._create_domain(d_length=domain_length,
d_width=domain_width,
dx = 10.0,
dy = 10.0,
elevation_0 = elevation_0,
elevation_1 = elevation_1,
stage_0 = stage_0,
stage_1 = stage_1)
vol0 = domain.compute_total_volume()
finaltime = 5.0
#Make sure we are inthe right directory to find the
#time series data for the inlets
import os
baseDir = os.getcwd()
path = get_pathname_from_package('anuga.structures')
filename1 = os.path.join(path, 'tests', 'data', 'inlet_operator_test1.tms')
filename2 = os.path.join(path, 'tests', 'data', 'inlet_operator_test2.tms')
line1 = [[95.0, 10.0], [105.0, 10.0]]
Q1 = file_function(filename=filename1, quantities=['hydrograph'])
line2 = [[10.0, 90.0], [20.0, 90.0]]
Q2 = file_function(filename=filename2, quantities=['hydrograph'])
os.chdir(baseDir)
import warnings
warnings.simplefilter("ignore")
Inlet_operator(domain, line1, Q1, default=6)
Inlet_operator(domain, line2, Q2, default=3)
for t in domain.evolve(yieldstep = 1.0, finaltime = finaltime):
#domain.write_time()
#print domain.volumetric_balance_statistics()
pass
warnings.simplefilter("default")
vol1 = domain.compute_total_volume()
#print vol1-vol0
assert numpy.allclose(31.5, vol1-vol0, rtol=1.0e-8)
assert numpy.allclose(vol1-vol0, domain.fractional_step_volume_integral, rtol=1.0e-8)
# =========================================================================
if __name__ == "__main__":
suite = unittest.makeSuite(Test_inlet_operator, 'test')
runner = unittest.TextTestRunner()
runner.run(suite)
| StarcoderdataPython |
3376726 | # Generated by Django 2.1.1 on 2018-11-22 03:00
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('home', '0003_auto_20181121_2041'),
]
operations = [
migrations.AlterField(
model_name='contact',
name='cell_phone',
field=models.CharField(max_length=14),
),
migrations.AlterField(
model_name='contact',
name='work_phone',
field=models.CharField(max_length=14),
),
]
| StarcoderdataPython |
1778983 | """
This file controls all DB setup and session logic.
"""
from sqlalchemy import create_engine
from sqlalchemy.exc import SQLAlchemyError
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.orm import scoped_session, sessionmaker
from utils import CONFIG, uuid
db_url = CONFIG.get('database', 'url')
db_name = CONFIG.get('database', 'name')
db_user = CONFIG.get('database', 'username')
db_pass = <PASSWORD>.get('database', 'password')
connection_fmt = '://{username}:{password}@{url}/{name}'
DRIVER = 'postgresql+psycopg2'
FULL_DB_URL = DRIVER + connection_fmt.format(username=db_user, password=<PASSWORD>,
url=db_url, name=db_name)
engine = create_engine(FULL_DB_URL, convert_unicode=True,
isolation_level="SERIALIZABLE")
session = scoped_session(sessionmaker(autocommit=False,
autoflush=False,
bind=engine))
Base = declarative_base()
class MyBase(object):
""" Our Mixin class for defining declarative table models
in SQLAlchemy. We use this class to define consistent table
args, methods, etc."""
def __init__(self, **kwargs):
""" Override default __init__, if the mapper has an id
column and it isn't set, set it to a new uuid."""
for k, v in kwargs.items():
setattr(self, k, v)
if hasattr(self, 'id') and not self.id:
self.id = uuid()
def insert(self):
"""Convenience method to add a model to the session
and ultimately insert in the database permanently upon commit."""
session.add(self)
return self.id
def to_dict(self):
'''
Convenience method to generate a dict from a model instance.
'''
return_dict = {}
for column in self.__table__.columns:
return_dict[column.name] = getattr(self, column.name)
return return_dict
def safe_commit():
""" This commit function will rollback the transaction if
committing goes awry."""
from sqlalchemy.exc import InvalidRequestError
try:
session.commit()
except InvalidRequestError as exc:
print exc # to the app log
except (StandardError, SQLAlchemyError):
session.rollback()
raise
| StarcoderdataPython |
3272437 | <reponame>Yoooi0/chaturbate-osr
import asyncio
import json
import logging
import math
import random
import re
import requests
import serial
import string
import sys
import time
import websockets
deviceSettings = {
'port': 'COM4',
'interval': 1 / 60,
'range': {
'L0': [0.0, 1.0],
'L1': [0.0, 1.0],
'L2': [0.0, 1.0],
'R0': [0.0, 1.0],
'R1': [0.0, 1.0],
'R2': [0.0, 1.0],
'V0': [0.0, 1.0],
'V1': [0.0, 1.0]
}
}
if len(sys.argv) != 2:
print('Usage: {} settings-file.json'.format(sys.argv[0]))
exit()
logging.basicConfig(format='[%(asctime)s][%(levelname)s] %(message)s', datefmt='%H:%M:%S')
logger = logging.getLogger(__name__)
logger.setLevel(logging.INFO)
logger.info('loading settings: %s', sys.argv[1])
with open(sys.argv[1]) as f:
settings = json.load(f)
settings['device'] = deviceSettings
def clamp(v, a, b):
return max(min(v, b), a)
def clamp01(v):
return clamp(v, 0, 1)
def lerp(a, b, t):
return a * (1 - t) + b * t
class AbstractDevice():
def __init__(self, loop, queue):
self.loop = loop
self.queue = queue
try:
self.device = serial.Serial(settings['device']['port'], 115200)
except Exception as e:
self.device = None
logger.fatal(e)
self.defaultPositions = {
'L0': 0.5,
'L1': 0.5,
'L2': 0.5,
'R0': 0.5,
'R1': 0.5,
'R2': 0.5,
'V0': 0.0,
'V1': 0.0
}
self.positions = self.defaultPositions.copy()
self.updateDevice()
async def run(self):
pass
def getCommand(self, axis, value):
range = settings['device']['range'][axis]
value = lerp(range[0], range[1], value)
value = clamp(int(value * 1000), 0, 999)
interval = int(settings['device']['interval'] * 1000)
return '{}{:03d}I{}'.format(axis, value, interval)
def updateDevice(self):
commands = [self.getCommand(k, v) for k, v in self.positions.items()]
logger.debug('devc: %s', ' '.join(commands))
if self.device:
self.device.write('{}\n'.format(' '.join(commands)).encode())
class TipMenuDevice(AbstractDevice):
async def run(self):
while True:
amount = await self.queue.get()
await self.process(amount)
self.queue.task_done()
if self.queue.empty():
await self.reset(1)
async def process(self, amount):
actions, duration = self.getActions(amount)
if not actions:
logger.warning('devc: could not find actions for %d tip!', amount)
return
s = ', '.join(['[{}, {}, {}]'.format(a['axis'], a['motion'], a['frequency']) for a in actions])
logger.info('devc: excecuting %d tip: "%s" for %ds', amount, s, duration)
await self.execute(actions, duration)
async def execute(self, actions, duration):
def update(t):
for action in actions:
if t < action.get('delay', 0):
continue
actionT = ((t + action.get('offset', 0)) % action['frequency']) / action['frequency']
self.positions[action['axis']] = self.getValue(action, actionT)
resetT = clamp01(t / min(duration, 1))
for axis in idleAxes:
self.positions[axis] = lerp(positionsCopy[axis], self.defaultPositions[axis], resetT)
self.updateDevice()
idleAxes = [axis for axis in self.positions.keys() if not axis in [action['axis'] for action in actions]]
positionsCopy = self.positions.copy()
start = time.perf_counter()
while((time.perf_counter() - start) <= duration):
update(time.perf_counter() - start)
await asyncio.sleep(settings['device']['interval'])
update(duration)
async def reset(self, duration):
def update(t):
for k, v in self.positions.items():
self.positions[k] = lerp(positionsCopy[k], self.defaultPositions[k], t)
self.updateDevice()
logger.info('devc: resetting positions for {}s'.format(duration))
positionsCopy = self.positions.copy()
start = time.perf_counter()
while((time.perf_counter() - start) <= duration):
update((time.perf_counter() - start) / duration)
await asyncio.sleep(settings['device']['interval'])
update(1)
def getActions(self, amount):
for option in settings['tipmenu']:
range = option['amount']
if (len(range) == 1 and range[0] == amount) or (len(range) == 2 and amount >= range[0] and amount <= range[1]):
return option['actions'], option['duration']
return None, None
def getValue(self, action, t):
value = self.defaultPositions[action['axis']]
if action['motion'] == 'triangle':
value = abs(abs(t * 2 - 1.5) - 1)
elif action['motion'] == 'sine':
value = -math.sin(t * math.pi * 2) / 2 + 0.5
elif action['motion'] == 'bounce':
x = t * math.pi * 2 - math.pi / 4
value = -(math.sin(x)**5 + math.cos(x)**5) / 2 + 0.5
elif action['motion'] == 'sharp':
x = (t + 0.4195) * math.pi / 2
s = math.sin(x)**2
c = math.cos(x)**2
value = math.sqrt(max(c - s, s - c))
return clamp01(value)
class ExcitementDevice(AbstractDevice):
def __init__(self, loop, queue):
AbstractDevice.__init__(self, loop, queue)
self.excitiment = 0.0
self.lastTipTime = time.perf_counter()
self.dt = 0.01
self.tick = 0
async def run(self):
while True:
if not self.queue.empty():
amount = await self.queue.get()
self.process(amount)
self.queue.task_done()
await self.execute()
def process(self, amount):
self.lastTipTime = time.perf_counter()
self.excitiment = clamp01(self.excitiment + amount / 500)
logger.info('devc: excecuting %d tip', amount)
async def execute(self):
alpha = lerp(1.5, 0.5, (time.perf_counter() - self.lastTipTime) / 12)
decay = math.pow(self.excitiment, math.pow(math.e, alpha)) / 8
self.excitiment = clamp01(self.excitiment - decay * settings['device']['interval'])
targetScale = lerp(0.005, 0.5, self.excitiment)
spread = lerp(0.25, 1, self.excitiment)
self.dt = lerp(self.dt, targetScale, 0.05)
self.tick += self.dt
self.positions['L0'] = lerp(0.5 - spread / 2, 0.5 + spread / 2, (math.sin(self.tick) + 1) / 2)
self.updateDevice()
logger.info('devc: excitiment: %f decay: %f dt: %f spread: %f L0: %f', self.excitiment, decay, self.dt, spread, self.positions['L0'])
await asyncio.sleep(settings['device']['interval'])
class Chaturbate():
def __init__(self, loop, queue):
self.loop = loop
self.queue = queue
response = requests.get('https://chaturbate.com/{}/'.format(settings['room']), headers={'User-Agent': 'Mozilla/5.0'})
dossier = re.search(r'window\.initialRoomDossier\s?=\s?"(.+?)";', response.text, re.UNICODE | re.MULTILINE).group(1)
dossier = dossier.encode().decode("unicode-escape")
dossier = json.loads(dossier)
if dossier['room_status'] == 'offline':
logger.info('Room is offline!')
return
id0 = random.randint(0, 1000)
id1 = ''.join(random.choice(string.ascii_lowercase) for x in range(8))
self.wschat = dossier['wschat_host']
self.wschat = self.wschat.replace('https://', 'wss://')
self.wschat = '{}/{}/{}/websocket'.format(self.wschat, id0, id1)
self.username = dossier['chat_username']
self.password = dossier['<PASSWORD>']
self.roomPassword = dossier['room_pass']
async def run(self):
logger.info('connecting to websocket: %s', self.wschat)
async with websockets.connect(self.wschat) as ws:
self.connectedTime = time.perf_counter()
while True:
message = await ws.recv()
logger.debug('received: %s', message)
if not message:
continue
await self.process(ws, message)
async def process(self, ws, message):
if message[0] == 'o':
logger.info('sending connect')
await ws.send(self.createConnectedMessage())
elif message[0] == 'h':
pass
elif message[0] == 'a':
data = message[1:]
data = json.loads(data)
data = json.loads(data[0])
if data['method'] == 'onAuthResponse':
result = int(data['args'][0])
logger.info('received onAuthResponse %d', result)
if result != 1:
logger.critical('Failed to authenticate!')
return
await ws.send(self.createAuthResponseMessage(result))
elif data['method'] == 'onNotify':
args = json.loads(data['args'][0])
if args['type'] == 'tip_alert':
ignored = time.perf_counter() - self.connectedTime < 2
logger.info('received tip_alert %s %d %s', args['from_username'], args['amount'], 'ignored' if ignored else '')
if not ignored:
loop.call_later(settings['delay'], self.pushTip, args['amount'])
elif data['method'] == 'onRoomMsg':
pass
def createMessage(self, method, data):
message = json.dumps({
'method': method,
'data': data
})
message = json.dumps([message])
logger.debug('sending: %s', message)
return message
def createConnectedMessage(self):
return self.createMessage('connect', {
'user': self.username,
'password': <PASSWORD>,
'room': settings['room'],
'room_password': <PASSWORD>
})
def createAuthResponseMessage(self, result):
return self.createMessage('joinRoom', {
'room': settings['room']
})
def pushTip(self, amount):
self.queue.put_nowait(amount)
loop = asyncio.new_event_loop()
q = asyncio.Queue(loop=loop)
chaturbate = Chaturbate(loop, q)
device = TipMenuDevice(loop, q)
try:
loop.create_task(chaturbate.run())
loop.create_task(device.run())
loop.run_forever()
finally:
loop.close() | StarcoderdataPython |
161240 | <reponame>lendingblock/lb-py<filename>lendingblock/orders.py
from .component import CrudComponent
class Orders(CrudComponent):
pass
| StarcoderdataPython |
3311804 | <filename>projects/simple_2d_game/main.py
# -*- coding: utf-8-sig -*-
#
# $ brew install hg sdl sdl_image sdl_ttf sdl_mixer portmidi
import sys
import os
import pygame
from pygame.sprite import Group
from settings import Settings
from ship import Ship
from alien import Alien
import game_functions as gf
def run_game():
"""
This is a function description
"""
pygame.init()
defaultSettings = Settings()
screen = pygame.display.set_mode(
(defaultSettings.screen_width, defaultSettings.screen_height))
pygame.display.set_caption("Alien Invasion")
# create a ship
ship = Ship(defaultSettings, screen)
# creat an alien
aliens = Group()
gf.create_fleet(defaultSettings, screen, ship, aliens)
# create a container to save bullets
bullets = Group()
while True:
gf.check_events(defaultSettings, screen, ship, bullets)
ship.update()
gf.update_bullets(bullets)
gf.update_aliens(defaultSettings, aliens)
gf.update_screen(defaultSettings, screen, ship, aliens, bullets)
rootPath = os.path.dirname(os.path.abspath(__file__))
originalPath = os.getcwd()
os.chdir(rootPath)
workingPath = os.getcwd()
startInfo = """
===== Start Info =====
* Root Path:
- {root_path}
* Original Working Path:
- {original_path}
* Changed Working Path:
- {working_path}
======================
""".format(root_path = rootPath, original_path = originalPath, working_path = workingPath)
print(startInfo)
run_game() | StarcoderdataPython |
1645065 | """Platform for sensor integration."""
from __future__ import annotations
import logging
from typing import Final
from homeassistant.core import HomeAssistant
from homeassistant.config_entries import ConfigEntry
from homeassistant.const import (ELECTRIC_POTENTIAL_VOLT, ELECTRIC_CURRENT_AMPERE, POWER_WATT, POWER_KILO_WATT,
FREQUENCY_HERTZ, ENERGY_WATT_HOUR, ENERGY_KILO_WATT_HOUR, TEMP_CELSIUS,
SIGNAL_STRENGTH_DECIBELS, DEVICE_CLASS_CURRENT, DEVICE_CLASS_ENERGY,
DEVICE_CLASS_POWER_FACTOR, DEVICE_CLASS_POWER, DEVICE_CLASS_SIGNAL_STRENGTH,
DEVICE_CLASS_TEMPERATURE, DEVICE_CLASS_VOLTAGE)
from homeassistant.helpers.update_coordinator import CoordinatorEntity, DataUpdateCoordinator
from homeassistant.components.sensor import STATE_CLASS_MEASUREMENT, STATE_CLASS_TOTAL_INCREASING
from homeassistant.components.sensor import SensorEntity
from .const import DOMAIN
POWER_FACTOR: Final = "%"
_LOGGER = logging.getLogger(__name__)
async def async_setup_entry(hass: HomeAssistant, config_entry: ConfigEntry, async_add_entities) -> None:
coordinator = hass.data[DOMAIN][config_entry.entry_id]
serial = config_entry.data["serial"]
def car_state_data(data):
car_state_texts = {
0: "Unknown",
1: "Idle",
2: "Charging",
3: "WaitCar",
4: "Complete",
5: "Error"
}
if data["car"] in car_state_texts:
return car_state_texts[data["car"]]
return "Unknown (" + str(data["car"]) + ")"
def error_data(data):
error_texts = {
0: "None",
1: "FiAc",
2: "FiDc",
3: "Phase",
4: "Overvolt",
5: "Overamp",
6: "Diode",
7: "PpInvalid",
8: "GndInvalid",
9: "ContactorStuck",
10: "ContactorMiss",
11: "FiUnknown",
12: "Unknown",
13: "Overtemp",
14: "NoComm",
15: "StatusLockStuckOpen",
16: "StatusLockStuckLocked",
17: "Reserved20",
18: "Reserved21",
19: "Reserved22",
20: "Reserved23",
21: "Reserved24"
}
if data["err"] in error_texts:
return error_texts[data["err"]]
return "Unknown (" + str(data["err"]) + ")"
def model_status_data(data):
model_status_texts = {
0: "NotChargingBecauseNoChargeCtrlData",
1: "NotChargingBecauseOvertemperature",
2: "NotChargingBecauseAccessControlWait",
3: "ChargingBecauseForceStateOn",
4: "NotChargingBecauseForceStateOff",
5: "NotChargingBecauseScheduler",
6: "NotChargingBecauseEnergyLimit",
7: "ChargingBecauseAwattarPriceLow",
8: "ChargingBecauseAutomaticStopTestLadung",
9: "ChargingBecauseAutomaticStopNotEnoughTime",
10: "ChargingBecauseAutomaticStop",
11: "ChargingBecauseAutomaticStopNoClock",
12: "ChargingBecausePvSurplus",
13: "ChargingBecauseFallbackGoEDefault",
14: "ChargingBecauseFallbackGoEScheduler",
15: "ChargingBecauseFallbackDefault",
16: "NotChargingBecauseFallbackGoEAwattar",
17: "NotChargingBecauseFallbackAwattar",
18: "NotChargingBecauseFallbackAutomaticStop",
19: "ChargingBecauseCarCompatibilityKeepAlive",
20: "ChargingBecauseChargePauseNotAllowed",
22: "NotChargingBecauseSimulateUnplugging",
23: "NotChargingBecausePhaseSwitch",
24: "NotChargingBecauseMinPauseDuration"
}
if data["modelStatus"] in model_status_texts:
return model_status_texts[data["modelStatus"]]
return "Unknown (" + str(data["modelStatus"]) + ")"
async_add_entities([
GoeChargerSensor(coordinator, "Voltage L1", serial, "voltage_l1", ELECTRIC_POTENTIAL_VOLT, DEVICE_CLASS_VOLTAGE,
STATE_CLASS_MEASUREMENT, "nrg", lambda data: data["nrg"][0]),
GoeChargerSensor(coordinator, "Voltage L2", serial, "voltage_l2", ELECTRIC_POTENTIAL_VOLT, DEVICE_CLASS_VOLTAGE,
STATE_CLASS_MEASUREMENT, "nrg", lambda data: data["nrg"][1]),
GoeChargerSensor(coordinator, "Voltage L3", serial, "voltage_l3", ELECTRIC_POTENTIAL_VOLT, DEVICE_CLASS_VOLTAGE,
STATE_CLASS_MEASUREMENT, "nrg", lambda data: data["nrg"][2]),
GoeChargerSensor(coordinator, "Voltage N", serial, "voltage_n", ELECTRIC_POTENTIAL_VOLT, DEVICE_CLASS_VOLTAGE,
STATE_CLASS_MEASUREMENT, "nrg", lambda data: data["nrg"][3]),
GoeChargerSensor(coordinator, "Current L1", serial, "current_l1", ELECTRIC_CURRENT_AMPERE, DEVICE_CLASS_CURRENT,
STATE_CLASS_MEASUREMENT, "nrg", lambda data: data["nrg"][4]),
GoeChargerSensor(coordinator, "Current L2", serial, "current_l2", ELECTRIC_CURRENT_AMPERE, DEVICE_CLASS_CURRENT,
STATE_CLASS_MEASUREMENT, "nrg", lambda data: data["nrg"][5]),
GoeChargerSensor(coordinator, "Current L3", serial, "current_l3", ELECTRIC_CURRENT_AMPERE, DEVICE_CLASS_CURRENT,
STATE_CLASS_MEASUREMENT, "nrg", lambda data: data["nrg"][6]),
GoeChargerSensorNative(coordinator, "Power L1", serial, "power_l1", POWER_KILO_WATT, DEVICE_CLASS_POWER,
STATE_CLASS_MEASUREMENT, "nrg", (lambda data: data["nrg"][7] / 1000), POWER_KILO_WATT,
lambda data: data["nrg"][7]),
GoeChargerSensorNative(coordinator, "Power L2", serial, "power_l2", POWER_KILO_WATT, DEVICE_CLASS_POWER,
STATE_CLASS_MEASUREMENT, "nrg", (lambda data: data["nrg"][8] / 1000), POWER_KILO_WATT,
lambda data: data["nrg"][8]),
GoeChargerSensorNative(coordinator, "Power L3", serial, "power_l3", POWER_KILO_WATT, DEVICE_CLASS_POWER,
STATE_CLASS_MEASUREMENT, "nrg", (lambda data: data["nrg"][9] / 1000), POWER_KILO_WATT,
lambda data: data["nrg"][9]),
GoeChargerSensorNative(coordinator, "Power N", serial, "power_n", POWER_KILO_WATT, DEVICE_CLASS_POWER,
STATE_CLASS_MEASUREMENT, "nrg", (lambda data: data["nrg"][10] / 1000), POWER_KILO_WATT,
lambda data: data["nrg"][10]),
GoeChargerSensorNative(coordinator, "Power Total", serial, "power_total", POWER_KILO_WATT, DEVICE_CLASS_POWER,
STATE_CLASS_MEASUREMENT, "nrg", (lambda data: data["nrg"][11] / 1000), POWER_KILO_WATT,
lambda data: data["nrg"][11]),
GoeChargerSensor(coordinator, "Powerfactor L1", serial, "powerfactor_l1", POWER_FACTOR,
DEVICE_CLASS_POWER_FACTOR, STATE_CLASS_MEASUREMENT, "nrg", lambda data: data["nrg"][12]),
GoeChargerSensor(coordinator, "Powerfactor L2", serial, "powerfactor_l2", POWER_FACTOR,
DEVICE_CLASS_POWER_FACTOR, STATE_CLASS_MEASUREMENT, "nrg", lambda data: data["nrg"][13]),
GoeChargerSensor(coordinator, "Powerfactor L3", serial, "powerfactor_l3", POWER_FACTOR,
DEVICE_CLASS_POWER_FACTOR, STATE_CLASS_MEASUREMENT, "nrg", lambda data: data["nrg"][14]),
GoeChargerSensor(coordinator, "Powerfactor N", serial, "powerfactor_n", POWER_FACTOR, DEVICE_CLASS_POWER_FACTOR,
STATE_CLASS_MEASUREMENT, "nrg", lambda data: data["nrg"][15]),
GoeChargerSensor(coordinator, "Frequency", serial, "frequency", FREQUENCY_HERTZ, None, STATE_CLASS_MEASUREMENT,
"fhz", lambda data: data["fhz"]),
GoeChargerSensorNative(coordinator, "Charged", serial, "charged", ENERGY_KILO_WATT_HOUR, DEVICE_CLASS_ENERGY,
STATE_CLASS_TOTAL_INCREASING, "wh", (lambda data: data["wh"] / 1000), POWER_KILO_WATT,
lambda data: data["wh"]),
GoeChargerSensorNative(coordinator, "Charged total", serial, "charged_total", ENERGY_KILO_WATT_HOUR,
DEVICE_CLASS_ENERGY, STATE_CLASS_TOTAL_INCREASING, "eto",
(lambda data: data["eto"] / 1000), POWER_KILO_WATT, lambda data: data["eto"]),
GoeChargerSensor(coordinator, "Temperature 1", serial, "temperature_1", TEMP_CELSIUS, DEVICE_CLASS_TEMPERATURE,
STATE_CLASS_MEASUREMENT, "tma", lambda data: data["tma"][0]),
GoeChargerSensor(coordinator, "Temperature 2", serial, "temperature_2", TEMP_CELSIUS, DEVICE_CLASS_TEMPERATURE,
STATE_CLASS_MEASUREMENT, "tma", lambda data: data["tma"][1]),
GoeChargerSensor(coordinator, "WiFi RSSI", serial, "wifi_rssi", SIGNAL_STRENGTH_DECIBELS,
DEVICE_CLASS_SIGNAL_STRENGTH, STATE_CLASS_MEASUREMENT, "rssi", lambda data: data["rssi"]),
GoeChargerSensor(coordinator, "Cable current limit", serial, "cable_current_limit", ELECTRIC_CURRENT_AMPERE,
DEVICE_CLASS_CURRENT, None, "cbl", lambda data: data["cbl"]),
GoeChargerSensor(coordinator, "Allowed current", serial, "allowed_current", ELECTRIC_CURRENT_AMPERE,
DEVICE_CLASS_CURRENT, None, "acu", lambda data: "" if data["acu"] is None else data["acu"]),
GoeChargerSensor(coordinator, "Car state", serial, "car_state", None, None, None, "car", car_state_data),
GoeChargerSensor(coordinator, "Error", serial, "error", None, None, None, "err", error_data),
GoeChargerSensor(coordinator, "Model status", serial, "model_status", None, None, None, "modelStatus",
model_status_data),
])
class GoeChargerSensor(CoordinatorEntity, SensorEntity):
"""Representation of a Sensor."""
def __init__(self, coordinator: DataUpdateCoordinator, name: str, serial: str, unique_id: str,
unit_of_measurement: str | None, device_class: str | None, state_class: str | None, key: str,
state_cb):
"""Pass coordinator to CoordinatorEntity."""
super().__init__(coordinator)
self._name = name
self._serial = serial
self._unique_id = unique_id
self._unit_of_measurement = unit_of_measurement
self._device_class = device_class
self._state_class = state_class
self._key = key
self._state_cb = state_cb
@property
def name(self):
"""Return the name of the device."""
return self._name
@property
def unique_id(self):
"""Return the unique id of the device."""
return "goe_charger_" + self._serial + "_" + self._unique_id
@property
def available(self) -> bool:
"""Return True if entity is available."""
return (self.coordinator.data is not None and
self._key in self.coordinator.data and
self.coordinator.data[self._key] is not None)
@property
def state(self):
"""Return the state of the sensor."""
return None if not self.available else self._state_cb(self.coordinator.data)
@property
def unit_of_measurement(self):
"""Return the unit of measurement."""
return self._unit_of_measurement
@property
def device_class(self):
"""Return the device class."""
return self._device_class
@property
def state_class(self):
"""Return the state class."""
return self._state_class
async def async_update(self):
"""Fetch new state data for the sensor.
This is the only method that should fetch new data for Home Assistant.
"""
await self.coordinator.async_request_refresh()
@property
def device_info(self):
"""Get attributes about the device."""
return {
"identifiers": {(DOMAIN, self._serial)}
}
class GoeChargerSensorNative(GoeChargerSensor):
"""Representation of a Sensor with separated native unit/value."""
def __init__(self, coordinator: DataUpdateCoordinator, name: str, serial: str, unique_id: str,
unit_of_measurement: str | None, device_class: str | None, state_class: str | None, key: str, state_cb,
native_unit_of_measurement: str | None, native_state_cb):
"""Pass coordinator to GoeChargerSensor."""
super().__init__(coordinator, name, serial, unique_id, unit_of_measurement, device_class, state_class, key,
state_cb)
self._native_unit_of_measurement = native_unit_of_measurement
self._native_state_cb = native_state_cb
@property
def native_value(self):
"""Return the value reported by the sensor."""
return None if not self.available else self._native_state_cb(self.coordinator.data)
@property
def native_unit_of_measurement(self) -> str | None:
"""Return the native unit of measurement."""
return self._native_unit_of_measurement
| StarcoderdataPython |
1658273 | #!/usr/bin/python
#
# Copyright 2018-2021 Polyaxon, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
PROXIES_CONFIG = None
def set_proxies_config():
from polyaxon_deploy.managers.proxies import ProxiesManager
global PROXIES_CONFIG
PROXIES_CONFIG = ProxiesManager.get_config_from_env()
| StarcoderdataPython |
3261503 | <gh_stars>1-10
from rply.词 import 词, 字符位置, BaseBox
Token = 词
SourcePosition = 字符位置 | StarcoderdataPython |
3326396 | import pyspark
from pyspark.sql import SparkSession
from pyspark.sql.types import *
from pyspark.sql.functions import *
# This is our first testing exploration of the data and possible implementations
# Configure spark session
spark = SparkSession\
.builder\
.master('local[2]')\
.appName('accidents_etl')\
.config("spark.mongodb.input.uri", 'mongodb://127.0.0.1/Accident.us_accidents?readPreference=primaryPreferred')\
.config('spark.mongodb.output.uri', 'mongodb://127.0.0.1/Accident.us_accidents')\
.config('spark.jars.packages', 'org.mongodb.spark:mongo-spark-connector_2.12:3.0.1')\
.getOrCreate()
accidents_schema = StructType([
StructField('ID', StringType()),
StructField('Severity', DoubleType()),
StructField('Start_Time', StringType()),
StructField('End_Time', StringType()),
StructField('Start_Lat', DoubleType()),
StructField('Start_Lng', DoubleType()),
StructField('End_Lat', DoubleType()),
StructField('End_Lng', DoubleType()),
StructField('Distance(mi)', DoubleType()),
StructField('Description', StringType()),
StructField('Number', DoubleType()),
StructField('Street', StringType()),
StructField('Side', StringType()),
StructField('City', StringType()),
StructField('County', StringType()),
StructField('State', StringType()),
StructField('Zipcode', StringType()),
StructField('Country', StringType()),
StructField('Timezone', StringType()),
StructField('Airport_Code', StringType()),
StructField('Weather_Timestamp', StringType()),
StructField('Temperature(F)', DoubleType()),
StructField('Wind_Chill(F)', DoubleType()),
StructField('Humidity(%)', DoubleType()),
StructField('Pressure(in)', DoubleType()),
StructField('Visibility(mi)', DoubleType()),
StructField('Wind_Direction', StringType()),
StructField('Wind_Speed(mph)', DoubleType()),
StructField('Precipitation(in)', DoubleType()),
StructField('Weather_Condition', StringType()),
StructField('Amenity', StringType()),
StructField('Bump', StringType()),
StructField('Crossing', StringType()),
StructField('Give_Way', StringType()),
StructField('Junction', StringType()),
StructField('No_Exit', StringType()),
StructField('Railway', StringType()),
StructField('Roundabout', StringType()),
StructField('Station', StringType()),
StructField('Stop', StringType()),
StructField('Traffic_Calming', StringType()),
StructField('Traffic_Signal', StringType()),
StructField('Turning_Loop', StringType()),
StructField('Sunrise_Sunset', StringType()),
StructField('Civil_Twilight', StringType()),
StructField('Nautical_Twilight', StringType()),
StructField('Astronomical_Twiligh', StringType()),
])
# Load the dataset
df_load = spark.read.csv(r"Accident_No_NA.csv", schema=accidents_schema)
# Drop fields we don't need from df_load
lst_dropped_columns = ['ID','Description','Turning_Loop','Country','Weather_Timestamp','Number','Wind_Chill(F)']
df_load = df_load.drop(*lst_dropped_columns).cache()
# Preview df_load
df_load.show(5)
#df_clean1 = df_load.select('Wind_Direction').distinct()
#print(df_load.collect())
df_load = df_load.withColumn('Wind_Direction', when((df_load['Wind_Direction'] == 'WSW') | (df_load['Wind_Direction'] == 'WNW') | (df_load['Wind_Direction'] == 'W'), 'West')
.when((df_load['Wind_Direction'] == 'SSW') | (df_load['Wind_Direction'] == 'SSE') | (df_load['Wind_Direction'] == 'SW') | (df_load['Wind_Direction'] == 'S') | (df_load['Wind_Direction'] == 'SE'), 'South')
.when((df_load['Wind_Direction'] == 'NNW') | (df_load['Wind_Direction'] == 'NNE') | (df_load['Wind_Direction'] == 'NW') | (df_load['Wind_Direction'] == 'NE') | (df_load['Wind_Direction'] == 'N'), 'North')
.when((df_load['Wind_Direction'] == 'ESE') | (df_load['Wind_Direction'] == 'ENE') | (df_load['Wind_Direction'] == 'E'), 'East')
.when(df_load['Wind_Direction'] == 'CALM', 'Clam')
.when(df_load['Wind_Direction'] == 'VAR', 'Variable')
.otherwise(df_load['Wind_Direction']))
#df_load = df_load.select('Weather_Condition').distinct()
#print(df_load.collect())
df_load = df_load.withColumn('Weather_Condition', when(df_load['Weather_Condition'].rlike('Fog|Overcast|Haze|Mist|Smoke'), 'Fog')
.when(df_load['Weather_Condition'].rlike('Clear|Fair'), 'Clear')
.when(df_load['Weather_Condition'].rlike('Rain|Showers|Drizzle|Thunder'), 'Rain')
.when(df_load['Weather_Condition'].rlike('Ice|Snow|Sleet|Hail'), 'Snow')
.when(df_load['Weather_Condition'].rlike('Storm|storm|Tornado'), 'Storm')
.when(df_load['Weather_Condition'].rlike('Stand|Dust'), 'Sand')
.when(df_load['Weather_Condition'].rlike('Cloudy|Clouds|Cloud'), 'Cloudy')
.otherwise('Other'))
# Create a year field and add it to the dataframe
df_load = df_load.withColumn('Year', year(to_timestamp('Start_Time')))
df_load.show(5)
# Build the accidents frequency dataframe using the year field and counts for each year
df_accidents_freq = df_load.groupBy('Year').count().withColumnRenamed('count', 'Counts').sort('Year')
df_accidents_freq.show(5)
# Write df_quake_freq to mongodb
df_accidents_freq.write.format('mongo')\
.mode('overwrite')\
.option('spark.mongodb.output.uri', 'mongodb://127.0.0.1:27017Accident.us_accidents').save()
"""
Section: Data visulization
"""
import pandas as pd
from bokeh.io import output_notebook, output_file
from bokeh.plotting import figure, show, ColumnDataSource
from bokeh.models.tools import HoverTool
import math
from math import pi
from bokeh.palettes import Category20c
from bokeh.transform import cumsum
from bokeh.tile_providers import CARTODBPOSITRON
from bokeh.themes import built_in_themes
from bokeh.io import curdoc
from pymongo import MongoClient
# Create a custom read function to read data from mongodb into a dataframe
def read_mongo(host='127.0.0.1', port=27017, username=None, password=<PASSWORD>, db='Quake', collection='pred_results'):
mongo_uri = 'mongodb://{}:{}/{}.{}'.format(host, port, db, collection)
# Connect to mongodb
conn = MongoClient(mongo_uri)
db = conn[db]
# Select all records from the collection
cursor = db[collection].find()
# Create the dataframe
df = pd.DataFrame(list(cursor))
# Delete the _id field
del df['_id']
return df
# Load the datasets from mongodb
df_quakes = read_mongo(collection='quakes')
df_quake_freq = read_mongo(collection='quake_freq')
df_quake_pred = read_mongo(collection='pred_results')
df_quakes_2016 = df_quakes[df_quakes['Year'] == 2016]
# Preview df_quakes_2016
df_quakes_2016.head()
# Show plots embedded in jupyter notebook
output_notebook()
# Create custom style function to style our plots
def style(p):
# Title
p.title.align = 'center'
p.title.text_font_size = '20pt'
p.title.text_font = 'serif'
# Axis titles
p.xaxis.axis_label_text_font_size = '14pt'
p.xaxis.axis_label_text_font_style = 'bold'
p.yaxis.axis_label_text_font_size = '14pt'
p.yaxis.axis_label_text_font_style = 'bold'
# Tick labels
p.xaxis.major_label_text_font_size = '12pt'
p.yaxis.major_label_text_font_size = '12pt'
# Plot the legend in the top left corner
p.legend.location = 'top_left'
return p
# Create the Geo Map plot
def plotMap():
lat = df_quakes_2016['Latitude'].values.tolist()
lon = df_quakes_2016['Longitude'].values.tolist()
pred_lat = df_quake_pred['Latitude'].values.tolist()
pred_lon = df_quake_pred['Longitude'].values.tolist()
lst_lat = []
lst_lon = []
lst_pred_lat = []
lst_pred_lon = []
i = 0
j = 0
# Convert Lat and Long values into merc_projection format
for i in range(len(lon)):
r_major = 6378137.000
x = r_major * math.radians(lon[i])
scale = x / lon[i]
y = 180.0 / math.pi * math.log(math.tan(math.pi / 4.0 +
lat[i] * (math.pi / 180.0) / 2.0)) * scale
lst_lon.append(x)
lst_lat.append(y)
i += 1
# Convert predicted lat and long values into merc_projection format
for j in range(len(pred_lon)):
r_major = 6378137.000
x = r_major * math.radians(pred_lon[j])
scale = x / pred_lon[j]
y = 180.0 / math.pi * math.log(math.tan(math.pi / 4.0 +
pred_lat[j] * (math.pi / 180.0) / 2.0)) * scale
lst_pred_lon.append(x)
lst_pred_lat.append(y)
j += 1
df_quakes_2016['coords_x'] = lst_lat
df_quakes_2016['coords_y'] = lst_lon
df_quake_pred['coords_x'] = lst_pred_lat
df_quake_pred['coords_y'] = lst_pred_lon
# Scale the circles
df_quakes_2016['Mag_Size'] = df_quakes_2016['Magnitude'] * 4
df_quake_pred['Mag_Size'] = df_quake_pred['Pred_Magnitude'] * 4
# create datasources for our ColumnDataSource object
lats = df_quakes_2016['coords_x'].tolist()
longs = df_quakes_2016['coords_y'].tolist()
mags = df_quakes_2016['Magnitude'].tolist()
years = df_quakes_2016['Year'].tolist()
mag_size = df_quakes_2016['Mag_Size'].tolist()
pred_lats = df_quake_pred['coords_x'].tolist()
pred_longs = df_quake_pred['coords_y'].tolist()
pred_mags = df_quake_pred['Pred_Magnitude'].tolist()
pred_year = df_quake_pred['Year'].tolist()
pred_mag_size = df_quake_pred['Mag_Size'].tolist()
# Create column datasource
cds = ColumnDataSource(
data=dict(
lat=lats,
lon=longs,
mag=mags,
year=years,
mag_s=mag_size
)
)
pred_cds = ColumnDataSource(
data=dict(
pred_lat=pred_lats,
pred_long=pred_longs,
pred_mag=pred_mags,
year=pred_year,
pred_mag_s=pred_mag_size
)
)
# Tooltips
TOOLTIPS = [
("Year", " @year"),
("Magnitude", " @mag"),
("Predicted Magnitude", " @pred_mag")
]
# Create figure
p = figure(title='Earthquake Map',
plot_width=2300, plot_height=450,
x_range=(-2000000, 6000000),
y_range=(-1000000, 7000000),
tooltips=TOOLTIPS)
p.circle(x='lon', y='lat', size='mag_s', fill_color='#cc0000', fill_alpha=0.7,
source=cds, legend='Quakes 2016')
# Add circles for our predicted earthquakes
p.circle(x='pred_long', y='pred_lat', size='pred_mag_s', fill_color='#ccff33', fill_alpha=7.0,
source=pred_cds, legend='Predicted Quakes 2017')
p.add_tile(CARTODBPOSITRON)
# Style the map plot
# Title
p.title.align = 'center'
p.title.text_font_size = '20pt'
p.title.text_font = 'serif'
# Legend
p.legend.location = 'bottom_right'
p.legend.background_fill_color = 'black'
p.legend.background_fill_alpha = 0.8
p.legend.click_policy = 'hide'
p.legend.label_text_color = 'white'
p.xaxis.visible = False
p.yaxis.visible = False
p.axis.axis_label = None
p.axis.visible = False
p.grid.grid_line_color = None
# show(p)
return p
# plotMap()
# Create the Bar Chart
def plotBar():
# Load the datasource
cds = ColumnDataSource(data=dict(
yrs=df_quake_freq['Year'].values.tolist(),
numQuakes=df_quake_freq['Counts'].values.tolist()
))
# Tooltip
TOOLTIPS = [
('Year', ' @yrs'),
('Number of earthquakes', ' @numQuakes')
]
# Create a figure
barChart = figure(title='Frequency of Earthquakes by Year',
plot_height=400,
plot_width=1150,
x_axis_label='Years',
y_axis_label='Number of Occurances',
x_minor_ticks=2,
y_range=(0, df_quake_freq['Counts'].max() + 100),
toolbar_location=None,
tooltips=TOOLTIPS)
# Create a vertical bar
barChart.vbar(x='yrs', bottom=0, top='numQuakes',
color='#cc0000', width=0.75,
legend='Year', source=cds)
# Style the bar chart
barChart = style(barChart)
# show(barChart)
return barChart
# plotBar()
# Create a magnitude plot
def plotMagnitude():
# Load the datasource
cds = ColumnDataSource(data=dict(
yrs=df_quake_freq['Year'].values.tolist(),
avg_mag=df_quake_freq['Avg_Magnitude'].round(1).values.tolist(),
max_mag=df_quake_freq['Max_Magnitude'].values.tolist()
))
# Tooltip
TOOLTIPS = [
('Year', ' @yrs'),
('Average Magnitude', ' @avg_mag'),
('Maximum Magnitude', ' @max_mag')
]
# Create the figure
mp = figure(title='Maximum and Average Magnitude by Year',
plot_width=1150, plot_height=400,
x_axis_label='Years',
y_axis_label='Magnitude',
x_minor_ticks=2,
y_range=(5, df_quake_freq['Max_Magnitude'].max() + 1),
toolbar_location=None,
tooltips=TOOLTIPS)
# Max Magnitude
mp.line(x='yrs', y='max_mag', color='#cc0000', line_width=2, legend='Max Magnitude', source=cds)
mp.circle(x='yrs', y='max_mag', color='#cc0000', size=8, fill_color='#cc0000', source=cds)
# Average Magnitude
mp.line(x='yrs', y='avg_mag', color='yellow', line_width=2, legend='Avg Magnitude', source=cds)
mp.circle(x='yrs', y='avg_mag', color='yellow', size=8, fill_color='yellow', source=cds)
mp = style(mp)
# show(mp)
return mp
# plotMagnitude()
# Display the visuals directly in the browser
output_file('dashboard.html')
# Change to a dark theme
curdoc().theme = 'dark_minimal'
# Build the grid plot
from bokeh.layouts import gridplot
# Make the grid
grid = gridplot([[plotMap()], [plotBar(), plotMagnitude()]])
# Shor the grid
show(grid) | StarcoderdataPython |
3347631 | <reponame>jarredthejellyfish/lcoin
from datetime import datetime
import imp
from lcoin_wallet import db, login_manager
from flask_login import UserMixin
from itsdangerous import URLSafeTimedSerializer as Serializer
from flask import current_app
@login_manager.user_loader
def load_user(user_id):
return User.query.get(int(user_id))
class User(db.Model, UserMixin):
id = db.Column(db.Integer, primary_key=True)
username = db.Column(db.String(20), unique=True, nullable=False)
email = db.Column(db.String(120), unique=True, nullable=False)
image_file = db.Column(db.String(20), nullable=False,
default='default.jpeg')
password = db.Column(db.String(60), nullable=False)
balance = db.Column(db.Float, nullable=False)
def get_reset_token(self):
s = Serializer(current_app.config['SECRET_KEY'],)
return s.dumps({'user_id': self.id})
@staticmethod
def verify_reset_token(token, expires_sec=1800):
s = Serializer(current_app.config['SECRET_KEY'])
try:
user_id=s.loads(token, expires_sec)['user_id']
except:
return None
return User.query.get(user_id)
def __repr__(self):
return f"User('{self.username}', '{self.email}', '{self.image_file}')"
class Transaction(db.Model):
id = db.Column(db.Integer, primary_key=True)
by = db.Column(db.String(20), nullable=False)
to = db.Column(db.String(20), nullable=False)
amount = db.Column(db.Float, nullable=False)
date = db.Column(db.DateTime, nullable=False, default=datetime.utcnow)
concept = db.Column(db.String(240), nullable=True)
def __repr__(self):
return f"Transaction('{self.date}', '{self.by}', '{self.to}', '{self.amount}', '{self.concept}')"
class Request(db.Model):
id = db.Column(db.Integer, primary_key=True)
by = db.Column(db.String(20), nullable=False)
to = db.Column(db.String(20), nullable=False)
amount = db.Column(db.Float, nullable=False)
date = db.Column(db.DateTime, nullable=False, default=datetime.utcnow)
concept = db.Column(db.String(240), nullable=True)
active = db.Column(db.Boolean(), nullable=False)
def __repr__(self):
return f"Request('{self.date}', '{self.by}', '{self.to}', '{self.amount}', '{self.concept}')"
class EmailWhitelist(db.Model):
id = db.Column(db.Integer, primary_key=True)
email = db.Column(db.String(120), unique=True, nullable=False)
initial_balance = db.Column(db.Integer, nullable=False)
def __repr__(self):
return f"EmailWhitelist('{self.id}', '{self.email}', '{self.initial_balance}')" | StarcoderdataPython |
1766577 | <reponame>t141/scikit-robot
import numpy as np
class Interpolator(object):
def __init__(self):
self.time = 0.0
self.segment_time = 0.0
self.segment = 0
self.segment_num = 0
self.is_interpolating = False
def reset(self, position_list=None,
time_list=None):
""""Initialize interpolator.
Args:
position-list:
list of control point
time-list:
list of time from start for each control point,
time in fisrt contrall point is zero, so length
of this list is length of control point minus 1
"""
if position_list is None:
position_list = self.position_list
else:
self.position_list = position_list
if time_list is None:
time_list = self.time_list
else:
self.time_list = time_list
if len(position_list) != len(time_list) + 1:
raise ValueError(
'length of position_list must be length of time_list + 1')
self.time = 0.0
self.segment_time = 0.0
self.segment = 0
self.segment_num = len(position_list) - 1
self.stop_interpolation()
def start_interpolation(self):
self.is_interpolating = True
def stop_interpolation(self):
self.is_interpolating = False
def interpolation(self):
raise NotImplementedError
def pass_time(self, dt):
"""process interpolation for dt[sec]
Args:
dt (float):
sec order
"""
if self.is_interpolating:
self.position = self.interpolation()
self.time += dt
self.segment_time += dt
if self.time > self.time_list[self.segment]:
self.segment_time = (self.time - self.time_list[self.segment])
self.segment += 1
if self.segment >= self.segment_num:
self.reset()
return self.position
class LinearInterpolator(Interpolator):
def __init__(self):
Interpolator.__init__(self)
def interpolation(self):
"""Linear Interpolation."""
v1 = self.position_list[self.segment]
v2 = self.position_list[self.segment + 1]
if self.segment > 0:
total_time = self.time_list[self.segment] - \
self.time_list[self.segment - 1]
else:
total_time = self.time_list[self.segment]
t1 = self.segment_time
t2 = total_time - t1
v1 = v1 * (t2 / total_time)
v2 = v2 * (t1 / total_time)
return v1 + v2
class MinjerkInterpolator(Interpolator):
def __init__(self):
Interpolator.__init__(self)
def reset(self,
velocity_list=None,
acceleration_list=None,
**kwargs):
"""Initialize interpolator
Args:
position_list:
list of control point
velocity-list:
list of velocity in each control point
acceleration-list:
list of acceleration in each control point
"""
Interpolator.reset(self, **kwargs)
if velocity_list is None:
self.velocity_list = [
np.zeros(len(self.position_list[0]))
for _ in range(self.segment_num + 1)]
else:
self.velocity_list = velocity_list
if acceleration_list is None:
self.acceleration_list = [
np.zeros(len(self.position_list[0]))
for _ in range(self.segment_num + 1)]
else:
self.acceleration_list = acceleration_list
def interpolation(self):
"""Minjerk interpolator, a.k.a Hoff & Arbib."""
xi = self.position_list[self.segment]
xf = self.position_list[self.segment + 1]
vi = self.velocity_list[self.segment]
vf = self.velocity_list[self.segment + 1]
ai = self.acceleration_list[self.segment]
af = self.acceleration_list[self.segment + 1]
if self.segment > 0:
total_time = self.time_list[self.segment] - \
self.time_list[self.segment - 1]
else:
total_time = self.time_list[self.segment]
# A=(gx-(x+v*t+(a/2.0)*t*t))/(t*t*t)
# B=(gv-(v+a*t))/(t*t)
# C=(ga-a)/toi
A = (xf - (xi + total_time * vi + (total_time ** 2)
* 0.5 * ai)) / (total_time ** 3)
B = (vf - (vi + total_time * ai)) / (total_time ** 2)
C = (af - ai) / total_time
# a0=x
# a1=v
# a2=a/2.0
# a3=10*A-4*B+0.5*C
# a4=(-15*A+7*B-C)/t
# a5=(6*A-3*B+0.5*C)/(t*t)
a0 = xi
a1 = vi
a2 = 0.5 * ai
a3 = 10 * A - 4 * B + 0.5 * C
a4 = (-15 * A + 7 * B - C) / total_time
a5 = (6 * A - 3 * B + 0.5 * C) / (total_time * total_time)
# x=a0+a1*t+a2*t*t+a3*t*t*t+a4*t*t*t*t+a5*t*t*t*t*t
# v=a1+2*a2*t+3*a3*t*t+4*a4*t*t*t+5*a5*t*t*t*t
# a=2*a2+6*a3*t+12*a4*t*t+20*a5*t*t*t
self.position = a0 + \
self.segment_time ** 1 * a1 + \
self.segment_time ** 2 * a2 + \
self.segment_time ** 3 * a3 + \
self.segment_time ** 4 * a4 + \
self.segment_time ** 5 * a5
self.velocity = a1 + \
self.segment_time ** 1 * a2 + \
self.segment_time ** 2 * a3 + \
self.segment_time ** 3 * a4 + \
self.segment_time ** 4 * a5
self.acceleration = a2 + \
self.segment_time ** 1 * a3 + \
self.segment_time ** 2 * a4 + \
self.segment_time ** 3 * a5
return self.position
def position_list_interpolation(
position_list, time_list, dt,
interpolator=MinjerkInterpolator(),
initial_time=0.0,
neglect_first=False,
vel_vector_list=None,
acc_vector_list=None):
data_list = []
tm_list = []
vel_data_list = []
acc_data_list = []
if vel_vector_list is None:
vel_vector_list = []
if acc_vector_list is None:
acc_vector_list = []
r = []
for n in time_list:
if len(r):
r.append(n + r[-1])
else:
r.append(n)
kwargs = dict(position_list=position_list,
time_list=r)
if hasattr(interpolator, 'velocity'):
kwargs['velocity_list'] = vel_vector_list
if hasattr(interpolator, 'acceleration'):
kwargs['acceleration_list'] = acc_vector_list
interpolator.reset(**kwargs)
interpolator.start_interpolation()
while interpolator.is_interpolating:
if interpolator.is_interpolating:
tm_list.append(initial_time + interpolator.time)
else:
tm_list.append(dt + tm_list[0])
interpolator.pass_time(dt)
data_list.append(interpolator.position)
if hasattr(interpolator, 'velocity'):
vel_data_list.append(interpolator.velocity)
if hasattr(interpolator, 'acceleration'):
acc_data_list.append(interpolator.acceleration)
if neglect_first:
data_list = data_list[1:]
tm_list = tm_list[1:]
result_dict = dict(position=data_list,
time=tm_list)
if hasattr(interpolator, 'velocity'):
if neglect_first:
result_dict['velocity'] = vel_data_list[1:]
else:
result_dict['velocity'] = vel_data_list
if hasattr(interpolator, 'acceleration'):
if neglect_first:
result_dict['acceleration'] = acc_data_list[1:]
else:
result_dict['acceleration'] = acc_data_list
return result_dict
| StarcoderdataPython |
150333 | <filename>problem1.py<gh_stars>0
### PROBLEM 1
def main():
print("Name: <NAME>")
print("Favorite Food: Brie Cheese")
print("Favorite Color: Red")
print("Favorite Hobby: Traveling")
if __name__ == "__main__":
main()
#Name: <NAME>
#Favorite Food: Brie Cheese
#Favorite Color: Red
#Favorite Hobby: Traveling
| StarcoderdataPython |
36410 | from __future__ import annotations
import os
import string
import random
import logging
import vapoursynth as vs
from pathlib import Path
from requests import Session
from functools import partial
from requests_toolbelt import MultipartEncoder, MultipartEncoderMonitor
from typing import Any, Mapping, Callable, Dict, Final, List, NamedTuple, Optional, Set, cast
from PyQt5.QtGui import QImage
from PyQt5.QtCore import QObject, QThread, pyqtSignal
from PyQt5.QtWidgets import QHBoxLayout, QLabel, QLineEdit, QCheckBox, QPushButton, QComboBox, QProgressBar
from ...utils import set_qobject_names
from ...widgets import ComboBox, FrameEdit
from ...models import PictureTypes, VideoOutputs
from ...core import AbstractMainWindow, AbstractToolbar, PictureType, try_load, main_window
from .settings import CompSettings
_MAX_ATTEMPTS_PER_PICTURE_TYPE: Final[int] = 50
def select_frames(clip: vs.VideoNode, indices: List[int]) -> vs.VideoNode:
return clip.std.BlankClip(length=len(indices)).std.FrameEval(lambda n: clip[indices[n]])
class WorkerConfiguration(NamedTuple):
outputs: VideoOutputs
collection_name: str
public: bool
nsfw: bool
optimise: bool
remove_after: Optional[int]
frames: List[int]
compression: int
path: Path
class Worker(QObject):
finished = pyqtSignal()
progress_bar = pyqtSignal(int)
progress_status = pyqtSignal(str, int, int)
outputs: VideoOutputs
is_finished = False
def _progress_update_func(self, value: int, endvalue: int) -> None:
if value == 0:
self.progress_bar.emit(0)
else:
self.progress_bar.emit(int(100 * value / endvalue))
def run(self, conf: WorkerConfiguration) -> None:
self.conf = conf
all_images: List[List[Path]] = []
try:
for i, output in enumerate(conf.outputs):
if self.is_finished:
raise StopIteration
self.progress_status.emit('extract', i + 1, len(conf.outputs))
path_name = conf.path / output.name
path_name.mkdir(parents=True)
max_num = max(conf.frames)
path_images = [
path_name / (f'{output.name}_' + f'{f}'.zfill(len("%i" % max_num)) + '.png')
for f in conf.frames
]
def _save(n: int, f: vs.VideoFrame) -> vs.VideoFrame:
if self.is_finished:
raise StopIteration
QImage(cast(bytes, f[0]), f.width, f.height, QImage.Format_RGB32).save(
str(path_images[n]), 'PNG', conf.compression
)
return f
decimated = select_frames(output.prepared.clip, conf.frames)
clip = decimated.std.ModifyFrame(decimated, _save)
with open(os.devnull, 'wb') as devnull:
clip.output(devnull, y4m=False, progress_update=self._progress_update_func)
if self.is_finished:
raise StopIteration
all_images.append(sorted(path_images))
except StopIteration:
return self.finished.emit('')
fields: Dict[str, Any] = {}
for i, (output, images) in enumerate(zip(conf.outputs, all_images)):
if self.is_finished:
return self.finished.emit('')
for j, (image, frame) in enumerate(zip(images, conf.frames)):
if self.is_finished:
return self.finished.emit('') # type: ignore
fields[f'comparisons[{j}].name'] = str(frame)
fields[f'comparisons[{j}].images[{i}].name'] = output.name
fields[f'comparisons[{j}].images[{i}].file'] = (image.name, image.read_bytes(), 'image/png')
self.progress_status.emit('upload', 0, 0)
with Session() as sess:
sess.get('https://slow.pics/api/comparison')
if self.is_finished:
return self.finished.emit('')
head_conf = {
'collectionName': conf.collection_name,
'public': str(conf.public).lower(),
'optimizeImages': str(conf.optimise).lower(),
'hentai': str(conf.nsfw).lower(),
}
if conf.remove_after is not None:
head_conf |= {'removeAfter': str(conf.remove_after)}
def _monitor_cb(monitor: MultipartEncoderMonitor) -> None:
self._progress_update_func(monitor.bytes_read, monitor.len)
files = MultipartEncoder(head_conf | fields)
monitor = MultipartEncoderMonitor(files, _monitor_cb)
response = sess.post(
'https://slow.pics/api/comparison',
monitor.to_string(), # type: ignore
headers={
"Accept": "*/*",
"Accept-Encoding": "gzip, deflate, br",
"Accept-Language": "en-US,en;q=0.5",
"Content-Length": str(files.len),
"Content-Type": files.content_type,
"Origin": "https://slow.pics/",
"Referer": "https://slow.pics/comparison",
"Sec-Fetch-Mode": "cors",
"Sec-Fetch-Site": "same-origin",
"User-Agent": (
"Mozilla/5.0 (Windows NT 10.0; Win64; x64) "
"AppleWebKit/537.36 (KHTML, like Gecko) Chrome/91.0.4472.124 Safari/537.36"
),
"X-XSRF-TOKEN": sess.cookies.get_dict()["XSRF-TOKEN"] # noqa
}
)
self.progress_status.emit(f'https://slow.pics/c/{response.text}', 0, 0)
self.finished.emit()
class CompToolbar(AbstractToolbar):
_storable_attrs = ('settings', 'visibility')
_thread_running = False
__slots__ = (
*_storable_attrs, 'random_frames_control', 'manual_frames_lineedit',
'current_frame_checkbox', 'is_public_checkbox', 'is_nsfw_checkbox',
'output_url_lineedit', 'output_url_copy_button', 'start_upload_button', 'stop_upload_button',
'upload_progressbar', 'upload_status_label', 'upload_status_elements'
)
def __init__(self, main: AbstractMainWindow) -> None:
super().__init__(main, CompSettings())
self.setup_ui()
set_qobject_names(self)
def setup_ui(self) -> None:
layout = QHBoxLayout(self)
layout.setContentsMargins(0, 0, 0, 0)
random_frames_label = QLabel('Num Random Frames:', self)
layout.addWidget(random_frames_label)
self.random_frames_control = FrameEdit(self)
layout.addWidget(self.random_frames_control)
manual_frames_label = QLabel('Additional Frames:', self)
layout.addWidget(manual_frames_label)
self.manual_frames_lineedit = QLineEdit(self)
self.manual_frames_lineedit.setPlaceholderText('frame,frame,frame')
layout.addWidget(self.manual_frames_lineedit)
self.current_frame_checkbox = QCheckBox('Current Frame', self)
self.current_frame_checkbox.setChecked(True)
layout.addWidget(self.current_frame_checkbox)
layout.addWidget(self.get_separator())
picture_type_label = QLabel('Filter per Picture Type:', self)
layout.addWidget(picture_type_label)
self.pic_type_combox = ComboBox[PictureType](self)
self.pic_type_combox.setModel(PictureTypes())
self.pic_type_combox.setEditable(True)
self.pic_type_combox.setInsertPolicy(QComboBox.InsertAtCurrent)
self.pic_type_combox.setDuplicatesEnabled(True)
self.pic_type_combox.setSizeAdjustPolicy(QComboBox.AdjustToContents)
self.pic_type_combox.view().setMinimumWidth(self.pic_type_combox.minimumSizeHint().width())
temp_width = self.pic_type_combox.minimumSizeHint().width()
self.pic_type_combox.setMinimumWidth(temp_width + temp_width // 10)
self.pic_type_combox.setCurrentIndex(0)
layout.addWidget(self.pic_type_combox)
layout.addWidget(self.get_separator())
self.is_public_checkbox = QCheckBox('Public', self)
self.is_public_checkbox.setChecked(True)
layout.addWidget(self.is_public_checkbox)
self.is_nsfw_checkbox = QCheckBox('NSFW', self)
self.is_nsfw_checkbox.setChecked(False)
layout.addWidget(self.is_nsfw_checkbox)
layout.addWidget(self.get_separator())
self.output_url_lineedit = QLineEdit('https://slow.pics/c/', self)
self.output_url_lineedit.setEnabled(False)
layout.addWidget(self.output_url_lineedit)
self.output_url_copy_button = QPushButton(self)
self.output_url_copy_button.clicked.connect(self.on_copy_output_url_clicked)
self.output_url_copy_button.setText('⎘')
layout.addWidget(self.output_url_copy_button)
self.start_upload_button = QPushButton('Upload to slow.pics', self)
self.start_upload_button.clicked.connect(self.on_start_upload)
layout.addWidget(self.start_upload_button)
self.stop_upload_button = QPushButton('Stop Uploading', self)
self.stop_upload_button.clicked.connect(self.on_stop_upload)
self.stop_upload_button.setVisible(False)
layout.addWidget(self.stop_upload_button)
upload_separator = self.get_separator()
layout.addWidget(upload_separator)
self.upload_progressbar = QProgressBar(self)
self.upload_progressbar.setGeometry(200, 80, 250, 20)
self.upload_progressbar.setValue(0)
layout.addWidget(self.upload_progressbar)
self.upload_status_label = QLabel(self)
layout.addWidget(self.upload_status_label)
self.update_status_label('extract')
self.upload_status_elements = (
upload_separator, self.upload_progressbar,
self.upload_status_label
)
self.update_upload_status_visibility(False)
layout.addStretch()
layout.addStretch()
def on_copy_output_url_clicked(self, checked: bool | None = None) -> None:
self.main.clipboard.setText(self.output_url_lineedit.text())
self.main.show_message('Slow.pics URL copied to clipboard')
def update_upload_status_visibility(self, visible: bool) -> None:
for element in self.upload_status_elements:
element.setVisible(visible)
def on_start_upload(self) -> None:
if self._thread_running:
return
self.start_upload_button.setVisible(False)
self.stop_upload_button.setVisible(True)
self.upload_to_slowpics()
def on_end_upload(self, forced: bool = False) -> None:
self.start_upload_button.setVisible(True)
self.stop_upload_button.setVisible(False)
self._thread_running = False
self.upload_thread.deleteLater()
if forced:
self.upload_status_label.setText("Stopped!")
else:
self.upload_status_label.setText("Finished!")
def on_stop_upload(self) -> None:
self.upload_worker.is_finished = True
self.on_end_upload(forced=True)
def update_status_label(self, kind: str, curr: int | None = None, total: int | None = None) -> None:
message = ''
moreinfo = f" {curr or '?'}/{total or '?'} " if curr or total else ''
if kind == 'extract':
message = 'Extracting'
elif kind == 'upload':
message = 'Uploading'
elif kind == 'search':
message = 'Searching'
else:
return self.output_url_lineedit.setText(kind)
self.upload_status_label.setText(f'{message}{moreinfo}...')
def _rand_num_frames(self, checked: Set[int], rand_func: Callable[[], int]) -> int:
rnum = rand_func()
while rnum in checked:
rnum = rand_func()
return rnum
def _select_samples_ptypes(self, num_frames: int, k: int, picture_type: PictureType) -> List[int]:
samples: Set[int] = set()
_max_attempts = 0
_rnum_checked: Set[int] = set()
while len(samples) < k:
_attempts = 0
while True:
self.update_status_label('search', _attempts, _MAX_ATTEMPTS_PER_PICTURE_TYPE)
if len(_rnum_checked) >= num_frames:
raise ValueError(f'There aren\'t enough of {picture_type} in these clips')
rnum = self._rand_num_frames(_rnum_checked, partial(random.randrange, start=0, stop=num_frames))
_rnum_checked.add(rnum)
if all(
f.props['_PictType'].decode('utf-8') == str(picture_type)[0]
for f in vs.core.std.Splice(
[select_frames(out.prepared.clip, [rnum]) for out in self.main.outputs], True
).frames()
):
break
_attempts += 1
_max_attempts += 1
if _attempts > _MAX_ATTEMPTS_PER_PICTURE_TYPE:
logging.warning(
f'{_MAX_ATTEMPTS_PER_PICTURE_TYPE} attempts were made for sample {len(samples)} '
f'and no match found for {picture_type}; stopping iteration...')
break
if _max_attempts > (curr_max_att := _MAX_ATTEMPTS_PER_PICTURE_TYPE * k):
raise RecursionError(f'Comp: attempts max of {curr_max_att} has been reached!')
if _attempts < _MAX_ATTEMPTS_PER_PICTURE_TYPE:
samples.add(rnum)
self.upload_progressbar.setValue(int())
self.upload_progressbar.setValue(int(100 * len(samples) / k))
return list(samples)
def get_slowpics_conf(self) -> WorkerConfiguration:
self.update_upload_status_visibility(True)
clips: Dict[str, vs.VideoNode]
num = int(self.random_frames_control.value())
frames: List[int] = list(
map(int, filter(None, [x.strip() for x in self.manual_frames_lineedit.text().split(',')]))
)
picture_type = self.pic_type_combox.currentData()
lens = set(out.prepared.clip.num_frames for out in self.main.outputs)
if len(lens) != 1:
logging.warning('Outputted clips don\'t all have the same length!')
lens_n = min(lens)
path = Path(main_window().config_dir) / ''.join(random.choices(string.ascii_uppercase + string.digits, k=16))
path.mkdir(parents=True)
if num:
if picture_type is PictureType.UNSET:
samples = random.sample(range(lens_n), num)
else:
logging.info('Making samples according to specified picture types...')
samples = self._select_samples_ptypes(lens_n, num, picture_type)
else:
samples = []
if len(frames):
samples.extend(frames)
if self.current_frame_checkbox.isChecked():
samples.append(int(self.main.current_frame))
return WorkerConfiguration(
self.main.outputs, 'Function Test',
self.is_public_checkbox.isChecked(), self.is_nsfw_checkbox.isChecked(),
True, None, sorted(set(samples)), -1, path
)
def upload_to_slowpics(self) -> None:
self.upload_thread = QThread()
self.upload_worker = Worker()
self.upload_worker.moveToThread(self.upload_thread)
self.upload_thread.started.connect(
partial(self.upload_worker.run, conf=self.get_slowpics_conf())
)
self.upload_worker.finished.connect(self.upload_thread.quit)
self.upload_worker.finished.connect(self.upload_worker.deleteLater)
self.upload_thread.finished.connect(self.on_end_upload)
self.upload_worker.progress_bar.connect(self.upload_progressbar.setValue)
self.upload_worker.progress_status.connect(self.update_status_label)
self.upload_thread.start()
self._thread_running = True
def __getstate__(self) -> Mapping[str, Any]:
return {
attr_name: getattr(self, attr_name)
for attr_name in self._storable_attrs
}
def __setstate__(self, state: Mapping[str, Any]) -> None:
try_load(state, 'visibility', bool, self.on_toggle)
try_load(state, 'settings', CompSettings, self.__setattr__)
| StarcoderdataPython |
1603801 | <gh_stars>1000+
def recite(start, take=1):
pass
| StarcoderdataPython |
3244028 | <reponame>stumpr/py-aws-tools
#!/usr/bin/env python
import boto3, json, argparse
from botocore.exceptions import ProfileNotFound, ClientError
parser = argparse.ArgumentParser(description="Delete policy with specific resource ARN")
parser.add_argument(
'--arn',
type=str,
)
parser.add_argument(
'--assume_role',
type=str,
default="CloudCoreAdmin",
)
pargs = parser.parse_args()
def getSessionWithAssumeRole(AccountId=None, policyName=None):
arn = "arn:aws:iam::{0}:role/{1}".format(AccountId,policyName)
response = boto3.client('sts').assume_role(RoleArn=arn, RoleSessionName="DeleteRole")
session = boto3.Session(
aws_access_key_id = response['Credentials']['AccessKeyId'],
aws_secret_access_key = response['Credentials']['SecretAccessKey'],
aws_session_token = response['Credentials']['SessionToken'] )
return session
def deletePolicyWithArn(Arn=None):
policyArnParts = Arn.split(":",5)
policyName = policyArnParts[5].split("/",1)[1]
accountId = policyArnParts[4]
if accountId == 'aws':
print("[{}] policy={} is a managed policy and cannot be deleted".format("AmazonPolicy", policyArn))
return
session = getSessionWithAssumeRole(AccountId=accountId, policyName=pargs.assume_role)
client = session.client('iam')
try:
policyResponse = client.get_policy(PolicyArn=Arn)
except:
print("[{}] policy={} not found".format(accountId, policyName))
return
if policyResponse['Policy']['AttachmentCount'] > 0:
print("[{}] policy={} has {} attachments and cannot be deleted".format(accountId, policyName))
print("[{}] deleting policy={}...".format(accountId, policyName))
''' Delete Policy Versions '''
pager = client.get_paginator('list_policy_versions')
pages = pager.paginate(**{"PolicyArn": Arn})
for page in pages:
for version in page['Versions']:
if version['IsDefaultVersion'] == False:
print("[{}] deleting version={} from policy={}".format(
accountId, version['VersionId'], policyName,
))
client.delete_policy_version(PolicyArn=Arn, VersionId=version['VersionId'])
''' Delete Policy '''
client.delete_policy(PolicyArn=Arn)
print("[{}] policy={} deleted".format(accountId, policyName))
if __name__ == '__main__':
deletePolicyWithArn(Arn=pargs.arn)
| StarcoderdataPython |
138472 | <gh_stars>1000+
# -----------------
# list comprehensions
# -----------------
# basics:
a = ['' for a in [1]]
#? str()
a[0]
#? ['insert']
a.insert
a = [a for a in [1]]
#? int()
a[0]
y = 1.0
# Should not leak.
[y for y in [3]]
#? float()
y
a = [a for a in (1, 2)]
#? int()
a[0]
a = [a for a,b in [(1,'')]]
#? int()
a[0]
a = [a for (a,b) in [(1,'')]]
#? int()
a[0]
arr = [1,'']
a = [a for a in arr]
#? int()
a[0]
#? str()
a[1]
#? int() str()
a[2]
a = [a if 1.0 else '' for a in [1] if [1.0]]
#? int() str()
a[0]
# name resolve should be correct
left, right = 'a', 'b'
left, right = [x for x in (left, right)]
#? str()
left
# with a dict literal
#? int()
[a for a in {1:'x'}][0]
# list comprehensions should also work in combination with functions
def _listen(arg):
for x in arg:
#? str()
x
_listen(['' for x in [1]])
#?
([str for x in []])[0]
# -----------------
# nested list comprehensions
# -----------------
b = [a for arr in [[1, 1.0]] for a in arr]
#? int()
b[0]
#? float()
b[1]
b = [arr for arr in [[1, 1.0]] for a in arr]
#? int()
b[0][0]
#? float()
b[1][1]
b = [a for arr in [[1]] if '' for a in arr if '']
#? int()
b[0]
b = [b for arr in [[[1.0]]] for a in arr for b in a]
#? float()
b[0]
#? str()
[x for x in 'chr'][0]
# From GitHub #26
#? list()
a = [[int(v) for v in line.strip().split() if v] for line in ["123", str(), "123"] if line]
#? list()
a[0]
#? int()
a[0][0]
# From GitHub #1524
#?
[nothing for nothing, _ in [1]][0]
# -----------------
# generator comprehensions
# -----------------
left, right = (i for i in (1, ''))
#? int()
left
#? str()
right
gen = (i for i in (1,))
#? int()
next(gen)
#?
gen[0]
gen = (a for arr in [[1.0]] for a in arr)
#? float()
next(gen)
#? int()
(i for i in (1,)).send()
# issues with different formats
left, right = (i for i in
('1', 2))
#? str()
left
#? int()
right
# -----------------
# name resolution in comprehensions.
# -----------------
def x():
"""Should not try to resolve to the if hio, which was a bug."""
#? 22
[a for a in h if hio]
if hio: pass
# -----------------
# slices
# -----------------
#? list()
foo = [x for x in [1, '']][:1]
#? int()
foo[0]
#? str()
foo[1]
# -----------------
# In class
# -----------------
class X():
def __init__(self, bar):
self.bar = bar
def foo(self):
x = [a for a in self.bar][0]
#? int()
x
return x
#? int()
X([1]).foo()
# -----------------
# dict comprehensions
# -----------------
#? int()
list({a - 1: 3 for a in [1]})[0]
d = {a - 1: b for a, b in {1: 'a', 3: 1.0}.items()}
#? int()
list(d)[0]
#? str() float()
d.values()[0]
#? str()
d[0]
#? float() str()
d[1]
#? float()
d[2]
# -----------------
# set comprehensions
# -----------------
#? set()
{a - 1 for a in [1]}
#? set()
{a for a in range(10)}
#? int()
[x for x in {a for a in range(10)}][0]
#? int()
{a for a in range(10)}.pop()
#? float() str()
{b for a in [[3.0], ['']] for b in a}.pop()
#? int()
next(iter({a for a in range(10)}))
#? int()
[a for a in {1, 2, 3}][0]
# -----------------
# syntax errors
# -----------------
# Issue #1146
#? ['list']
[int(str(x.value) for x in list
def reset_missing_bracket(): pass
# -----------------
# function calls
# -----------------
def foo(arg):
return arg
x = foo(x for x in [1])
#? int()
next(x)
#?
x[0]
# While it's illegal to have more than one argument, when a generator
# expression is involved, it's still a valid parse tree and Jedi should still
# work (and especially not raise Exceptions). It's debatable wheter inferring
# values for invalid statements is a good idea, but not failing is a must.
#? int()
next(foo(x for x in [1], 1))
def bar(x, y):
return y
#? str()
next(bar(x for x in [1], x for x in ['']))
| StarcoderdataPython |
1773037 | <reponame>VyomUnadkat/finding-clip-in-movie-using-feature
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Wed Jul 18 11:28:24 2018
@author: vyomunadkat
"""
import numpy as np
from scipy import spatial
#calculating the difference between the f.v.
def mse(imageA, imageB):
err = np.sum((imageA.astype("float") - imageB.astype("float")) ** 2)
err /= float(imageA.shape[0] * imageA.shape[1])
return err
def similar(imageA, imageB):
m = 1 - mse(imageA, imageB)
s = 1 - spatial.distance.cosine(imageA, imageB)
b = spatial.distance.euclidean(imageA, imageB)
print(m)
print(s)
if (s>=0.97 and m>=0.99):
return True
else:
return False
data = np.load('bohemain_360.npy')
data1 = np.load('bohemain_720_60.npy')
data2 = np.load('bohemain_720_60_blur.npy')
mse(data, data1)
mse(data, data2)
reference_list_fr = pd.DataFrame(
{'video': files_fr,
'duration': duration_fr,
'feature_vector' : fv_fr,
't/f' : true_or_false
})
# JUST TESTING
#euclidian
import scipy.stats as stats
a= reference_list_cll.iloc[1]['feature_vector']
b= reference_list_fr.iloc[8]['feature_vector']
dist = np.linalg.norm(a-b)
print(dist)
#chi squared
def chiSquared(p,q):
return 0.5*np.sum((p-q)**2/(p+q+1e-6))
a= reference_list_cll.iloc[0]['feature_vector']
b= reference_list_fr.iloc[3]['feature_vector']
print(chiSquared(a,b)) | StarcoderdataPython |
1696328 | <gh_stars>0
from typing import *
from electronics_model.PinAssignmentUtil import leaf_circuit_ports, PinName, ConcretePinName
from electronics_model import *
@non_library
class AssignablePinBlock(GeneratorBlock):
"""Provides two features common to microcontrollers:
new_io(tpe): returns a fresh IO port of type tpe
suggest_pin(port, str): suggests a pinmap
TODO should these be separate?"""
def __init__(self):
super().__init__()
self._all_assignable_ios: List[Port] = []
self._remaining_assignable_ios: Dict[Type[Port], List[Port]] = {}
self.pin_assigns = self.Parameter(StringExpr())
# TODO type signature could be enhanced to only allow iterable pin with Bundle type
PortType = TypeVar('PortType', bound=Union[CircuitPort, Bundle])
def new_io(self, tpe: Type[PortType]) -> PortType:
# TODO maybe tpe should be a connectable type? or should this be an assign-and-connect op?
assert tpe in self._remaining_assignable_ios, f"{type(self)} has no IOs of type {tpe}"
remaining_list = self._remaining_assignable_ios[tpe]
assert remaining_list, f"{type(self)} has no more IOs of type {tpe}"
port = remaining_list.pop(0)
return port # type: ignore
def _add_assignable_io(self, port: Port):
self._all_assignable_ios.append(port)
self._remaining_assignable_ios.setdefault(type(port), []).append(port)
def _get_suggested_pin_maps(self, assigns_str: str) -> IdentityDict[CircuitPort, PinName]:
assigns_per_pin = [pin_str.split('=')
for pin_str in assigns_str.split(';')
if pin_str]
assigns_by_pin = {pin_str[0]: pin_str[1]
for pin_str in assigns_per_pin}
assigned_pins: Set[str] = set()
pinmap: IdentityDict[CircuitPort, PinName] = IdentityDict()
for top_port in self._all_assignable_ios:
if self.get(top_port.is_connected()):
port_name = self.get(top_port.link().name())
for leaf_postfix, leaf_port in leaf_circuit_ports("", top_port):
leaf_name = port_name + leaf_postfix
if leaf_name in assigns_by_pin:
assign_target_str = assigns_by_pin[leaf_name]
if assign_target_str == 'NC':
pinmap[leaf_port] = NotConnectedPin
else:
pinmap[leaf_port] = assign_target_str
assigned_pins.add(leaf_name)
unassigned_pins = set(assigns_by_pin.keys()).difference(assigned_pins)
assert not unassigned_pins, f"specified pin assigns with invalid names: {', '.join(unassigned_pins)}"
return pinmap
| StarcoderdataPython |
44966 | import json
import os
from eg import config
from eg import substitute
from eg import util
from mock import Mock
from mock import patch
PATH_UNSQUEEZED_FILE = os.path.join(
'test',
'assets',
'pwd_unsqueezed.md'
)
PATH_SQUEEZED_FILE = os.path.join(
'test',
'assets',
'pwd_squeezed.md'
)
def _create_config(
examples_dir=None,
custom_dir=None,
color_config=None,
use_color=True,
pager_cmd=None,
editor_cmd=None,
squeeze=False,
subs=None
):
"""
Create a config.Config object with default values for expediency in
testing.
"""
return config.Config(
examples_dir=examples_dir,
custom_dir=custom_dir,
color_config=color_config,
use_color=use_color,
pager_cmd=pager_cmd,
editor_cmd=editor_cmd,
squeeze=squeeze,
subs=subs
)
@patch('os.walk')
def test_get_file_paths_for_program_with_single(mock_walk):
program = 'cp'
examples_dir = '/Users/tyrion'
program_file = program + util.EXAMPLE_FILE_SUFFIX
expected = ['/Users/tyrion/cp.md']
mock_walk.return_value = [
[examples_dir, [], [program_file, 'cp.txt', 'other_file.md']],
]
actual = util.get_file_paths_for_program(program, examples_dir)
assert actual == expected
mock_walk.assert_called_once_with(examples_dir)
@patch('os.walk')
def test_get_file_paths_for_program_with_nested(mock_walk):
program = 'cp'
examples_dir = '/Users/tyrion'
program_file = 'cp.md'
mock_walk.return_value = [
[
examples_dir,
['dirA', 'dirB'],
[program_file, 'cp.txt', 'other_file.md'],
],
[
examples_dir + '/dirA',
['dirA-child'],
[program_file, 'bad.md'],
],
[
examples_dir + '/dirA/dirA-child',
[],
['bad.md', program_file, 'wtf.md'],
],
[
examples_dir + '/dirB',
[],
['foo.md', program_file],
],
]
expected = [
'/Users/tyrion/cp.md',
'/Users/tyrion/dirA/cp.md',
'/Users/tyrion/dirA/dirA-child/cp.md',
'/Users/tyrion/dirB/cp.md',
]
actual = util.get_file_paths_for_program(program, examples_dir)
assert actual == expected
mock_walk.assert_called_once_with(examples_dir)
@patch('os.walk')
def test_get_file_paths_for_program_with_none(mock_walk):
expected = []
mock_walk.return_value = []
actual = util.get_file_paths_for_program('cp', '/Users/tyrion')
assert actual == expected
mock_walk.assert_called_once_with('/Users/tyrion')
@patch('os.walk')
def test_get_file_paths_for_program_with_no_dir(mock_walk):
assert util.get_file_paths_for_program('cp', None) == []
@patch('eg.util.page_string')
@patch('eg.util.get_formatted_contents')
@patch('eg.util.get_contents_from_files')
@patch('eg.util.get_resolved_program')
def test_handle_program_no_entries(
mock_resolve_program,
mock_get_contents,
mock_format,
mock_page_string,
):
"""
We should do the right thing if there are no entries for a given program.
"""
program = 'cp'
test_config = _create_config()
mock_resolve_program.return_value = program
util.handle_program(program, test_config)
mock_resolve_program.assert_called_once_with(
program,
test_config
)
# We should have aborted and not called any of the
# other methods.
assert mock_get_contents.call_count == 0
assert mock_format.call_count == 0
assert mock_page_string.call_count == 0
@patch('eg.util.get_resolved_program')
@patch('eg.util.get_contents_from_files')
@patch('eg.util.get_file_paths_for_program')
@patch('eg.util.get_formatted_contents')
@patch('eg.util.page_string')
def test_handle_program_finds_paths_and_calls_open_pager_no_alias(
mock_page,
mock_format,
mock_get_paths,
mock_get_contents,
mock_resolve,
):
"""
If there are entries for the program, handle_program needs to get the
paths, get the contents, format the contents, and page the resulting
string.
"""
program = 'mv'
examples_dir = 'test-eg-dir'
custom_dir = 'test-custom-dir'
color_config = None
use_color = False
pager_cmd = 'foo bar'
squeeze = False
subs = ['foo', 'bar']
file_contents = 'I am the contents of mv.md.'
formatted_contents = 'and I am the formatted contents of mv.md.'
test_config = _create_config(
examples_dir=examples_dir,
custom_dir=custom_dir,
color_config=color_config,
use_color=use_color,
pager_cmd=pager_cmd,
squeeze=squeeze,
subs=subs
)
default_paths = ['test-eg-dir/mv.md', 'test-eg-dir/foo/mv.md']
custom_paths = ['test-custom-dir/mv.md', 'test-custom-dir/bar.md']
def return_correct_path(*args, **kwargs):
program_param = args[0]
dir_param = args[1]
if program_param != program:
raise NameError('expected ' + program + ', got ' + program_param)
if dir_param == examples_dir:
return default_paths
elif dir_param == custom_dir:
return custom_paths
else:
raise NameError(
'got ' +
dir_param +
', expected ' +
examples_dir +
' or ' +
custom_dir)
mock_format.return_value = formatted_contents
mock_get_paths.side_effect=return_correct_path
mock_get_contents.return_value = file_contents
mock_resolve.return_value = program
util.handle_program(program, test_config)
mock_resolve.assert_called_once_with(
program,
test_config
)
mock_get_paths.assert_any_call(
program,
examples_dir
)
mock_get_paths.assert_any_call(
program,
custom_dir,
)
mock_get_contents.assert_called_once_with(
custom_paths[0],
custom_paths[1],
default_paths[0],
default_paths[1],
)
mock_format.assert_called_once_with(
file_contents,
use_color=test_config.use_color,
color_config=test_config.color_config,
squeeze=test_config.squeeze,
subs=test_config.subs
)
mock_page.assert_called_once_with(
formatted_contents,
test_config.pager_cmd
)
@patch('eg.util.get_resolved_program')
@patch('eg.util.get_contents_from_files')
@patch('eg.util.get_file_paths_for_program')
@patch('eg.util.get_formatted_contents')
@patch('eg.util.page_string')
def test_handle_program_finds_paths_and_calls_open_pager_with_alias(
mock_page,
mock_format,
mock_get_paths,
mock_get_contents,
mock_resolve,
):
"""
If there are entries for the program, handle_program needs to get the
paths, get the contents, format the contents, and page the resulting
string.
"""
alias_for_program = 'link'
resolved_program = 'ln'
examples_dir = 'test-eg-dir'
custom_dir = 'test-custom-dir'
color_config = None
use_color = False
pager_cmd = 'foo bar'
squeeze = False
subs = ['foo', 'bar']
file_contents = 'I am the contents of ln.md.'
formatted_contents = 'and I am the formatted contents of ln.md.'
test_config = _create_config(
examples_dir=examples_dir,
custom_dir=custom_dir,
color_config=color_config,
use_color=use_color,
pager_cmd=pager_cmd,
squeeze=squeeze,
subs=subs
)
default_paths = ['test-eg-dir/ln.md']
custom_paths = ['test-custom-dir/ln.md']
def return_correct_path(*args, **kwargs):
program_param = args[0]
dir_param = args[1]
if program_param != resolved_program:
raise NameError(
'expected ' +
resolved_program +
', got ' +
program_param
)
if dir_param == examples_dir:
return default_paths
elif dir_param == custom_dir:
return custom_paths
else:
raise NameError(
'got ' +
dir_param +
', expected ' +
examples_dir +
' or ' +
custom_dir)
mock_format.return_value = formatted_contents
mock_get_paths.side_effect = return_correct_path
mock_get_contents.return_value = file_contents
mock_resolve.return_value = resolved_program
util.handle_program(
alias_for_program,
test_config
)
mock_resolve.assert_called_once_with(
alias_for_program,
test_config
)
mock_get_paths.assert_any_call(
resolved_program,
examples_dir
)
mock_get_paths.assert_any_call(
resolved_program,
custom_dir,
)
mock_get_contents.assert_called_once_with(
custom_paths[0],
default_paths[0]
)
mock_format.assert_called_once_with(
file_contents,
use_color=test_config.use_color,
color_config=test_config.color_config,
squeeze=test_config.squeeze,
subs=test_config.subs
)
mock_page.assert_called_once_with(
formatted_contents,
test_config.pager_cmd
)
def test_get_list_of_all_supported_commands(tmpdir):
dir_example = tmpdir.mkdir('examples')
dir_custom = tmpdir.mkdir('custom')
config = _create_config(
examples_dir=str(dir_example),
custom_dir=str(dir_custom),
)
expected = [
'a-only-default',
'b-both *',
'c-only-custom +',
'd-only-custom-nested +',
'e-only-default-nested',
'f-default-custom-nested',
'g-both-different-levels *',
't-a-only-default-alias -> a-only-default',
'u-b-both-alias -> b-both *',
'v-c-only-custom-alias -> c-only-custom +'
]
aliases = {
't-a-only-default-alias': 'a-only-default',
'u-b-both-alias': 'b-both',
'v-c-only-custom-alias': 'c-only-custom'
}
# Make the directory structure we expect.
dir_example_nested = dir_example.mkdir('default-nested')
dir_custom_nested = dir_custom.mkdir('custom-nested')
dir_example.join('a-only-default.md').write('foo')
dir_example.join('b-both.md').write('foo')
dir_custom.join('b-both.md').write('foo')
dir_custom.join('c-only-custom.md').write('foo')
dir_custom_nested.join('d-only-custom-nested.md').write('foo')
dir_example_nested.join('e-only-default-nested.md').write('foo')
dir_example_nested.join('f-default-custom-nested.md').write('foo')
dir_example.join('g-both-different-levels.md').write('foo')
dir_custom_nested.join('g-both-different-levels.md').write('foo')
# Use the 'with' context manager rather than the @decorator, because the
# tmpdir fixture doesn't play nice with the decorator.
with patch('eg.util.get_alias_dict') as mock_get_alias:
mock_get_alias.return_value = aliases
actual = util.get_list_of_all_supported_commands(config)
assert actual == expected
mock_get_alias.assert_called_once_with(config)
def test_list_supported_programs_fails_gracefully_if_no_dirs():
test_config = _create_config()
actual = util.get_list_of_all_supported_commands(test_config)
target = []
assert actual == target
def test_calls_pipepager_if_not_less():
"""
We're special casing less a bit, as it is the default value, so if a custom
command has been set that is NOT less, we should call pipepager straight
away.
"""
_helper_assert_about_pager('page me plz', 'cat', False)
def test_calls_fallback_pager_if_none():
"""
If pager_cmd is None, we should just use the fallback pager.
"""
_helper_assert_about_pager('page me plz', None, True)
def test_calls_pipepager_if_less():
"""
We should call pipepager if we ask to use less and less is installed on the
machine.
"""
_helper_assert_about_pager('a fancy value to page', 'less -R', False)
def test_calls_fallback_if_cmd_is_flag_string():
"""
We are using a flag string to indicate if we should use the fallback pager.
"""
_helper_assert_about_pager(
'page via fallback',
util.FLAG_FALLBACK,
True
)
@patch('pydoc.pager')
@patch('pydoc.pipepager')
def _helper_assert_about_pager(
str_to_page,
pager_cmd,
use_fallback,
pipepager,
default_pager,
):
"""
Help with asserting about pager.
str_to_page: what you're paging
pager_cmd: the string you're passing to pipepager (or None)
use_default: false if we should actually use pydoc.pipepager, true if we
instead are going to fallback to pydoc.pager
"""
util.page_string(str_to_page, pager_cmd)
if use_fallback:
default_pager.assert_called_once_with(str_to_page)
assert pipepager.call_count == 0
else:
assert default_pager.call_count == 0
pipepager.assert_called_once_with(
str_to_page,
cmd=pager_cmd
)
@patch('eg.util.pydoc.pipepager', side_effect=KeyboardInterrupt)
def test_page_string_excepts_keyboard_interrupt_if_not_less(pipepager_mock):
"""
Do not fail when user hits ctrl-c while in pager.
"""
try:
util.page_string('page me plz', 'cat')
except KeyboardInterrupt:
raise AssertionError('Should not have got this far')
pipepager_mock.assert_called_once_with('page me plz', cmd='cat')
@patch('eg.util.pydoc.pager', side_effect=KeyboardInterrupt)
def test_page_string_excepts_keyboard_interrupt_if_none(pager_mock):
"""
Do not fail when user hits ctrl-c while in pipepager.
"""
try:
util.page_string('page me plz', None)
except KeyboardInterrupt:
raise AssertionError('Should not have got this far')
pager_mock.assert_called_once_with('page me plz')
def test_get_contents_from_files_handles_none():
"""
Empty string if no files.
"""
_helper_assert_file_contents(
[],
''
)
def test_get_contents_from_files_handles_one():
file_infos = [
{
'path': 'test/path',
'contents': 'contents of file'
}
]
combined_contents = 'contents of file'
_helper_assert_file_contents(
file_infos,
combined_contents
)
def test_get_contents_from_files_handles_multiple():
file_infos = [
{
'path': 'path/1',
'contents': 'foo\n'
},
{
'path': 'path/2/foo',
'contents': 'bar\n'
},
{
'path': 'another/path',
'contents': 'baz'
}
]
combined_contents = 'foo\nbar\nbaz'
_helper_assert_file_contents(
file_infos,
combined_contents
)
@patch('eg.util._get_contents_of_file')
def _helper_assert_file_contents(
file_infos,
target_contents,
get_contents_mock,
):
"""
Helper method to assert things about the get_contents_from_files method.
Does not actually hit the disk.
file_infos: array of { path, contents } dicts representing files. Array so
that we can assert proper order calling
target_contents: the final combined contents that should be returned by the
get_contents_from_files method.
"""
# This method will be used by the mock framework to return the right file
# contents based on the file name.
def return_file_contents(*args, **kwargs):
for file_info in file_infos:
if file_info['path'] == args[0]:
return file_info['contents']
raise TypeError('did not find path in test obj')
get_contents_mock.side_effect = return_file_contents
paths = [el['path'] for el in file_infos]
actual = util.get_contents_from_files(*paths)
assert actual == target_contents
@patch('eg.util.get_colorized_contents')
@patch('eg.util.get_squeezed_contents')
@patch('eg.util.get_substituted_contents')
def _helper_assert_formatted_contents(
starting_contents,
use_color,
color_config,
squeeze,
subs,
colorized_contents,
squeezed_contents,
subbed_contents,
formatted_result,
sub_method,
squeeze_method,
color_method,
):
"""
Helper method to assist in asserting things about the
get_formatted_contents method.
starting_contents: the starting string that we are working with
use_color: True if we should use color
color_config: the color config to be passed to get_colorized_contents
squeeze: True if we should squeeze
subs: the list of Substitutions that we should pass to
get_substituted_contents
colored_contents: the result of get_colorized_contents
squeezed_contents: the result of get_squeezed_contents
subbed_contents: the result of subbed_contents
formatted_result: the final, formatted string that should be returned
"""
sub_method.return_value = subbed_contents
squeeze_method.return_value = squeezed_contents
color_method.return_value = colorized_contents
actual = util.get_formatted_contents(
starting_contents,
use_color,
color_config,
squeeze,
subs
)
# We'll update the contents as they get formatted to make sure
# we pass the right thing to the various methods.
contents_thus_far = starting_contents
if use_color:
color_method.assert_called_once_with(
contents_thus_far,
color_config
)
contents_thus_far = colorized_contents
else:
assert color_method.call_count == 0
if squeeze:
squeeze_method.assert_called_once_with(contents_thus_far)
contents_thus_far = squeezed_contents
else:
assert squeeze_method.call_count == 0
if subs:
sub_method.assert_called_once_with(
contents_thus_far,
subs
)
contents_thus_far = subbed_contents
else:
assert sub_method.call_count == 0
assert actual == formatted_result
def test_get_formatted_contents_does_not_format_methods_if_all_falsey():
"""
We should invoke none of the formatter methods if the flags are false and
subs is not truthy.
"""
starting_contents = 'this is where we start'
_helper_assert_formatted_contents(
starting_contents,
False,
'some color config',
False,
None,
'this was colored',
'this was squeezed',
'these contents were subbed',
starting_contents
)
def test_get_formatted_contents_calls_colorize_if_use_color():
"""
Colorize the contents if use_color = True.
"""
starting_contents = 'this is where we start'
colorized_contents = 'COLORIZED: this is where we start'
_helper_assert_formatted_contents(
starting_contents,
True,
'some color config',
False,
None,
colorized_contents,
'this was squeezed',
'these contents were subbed',
colorized_contents
)
def test_get_formatted_contents_squeezes():
"""If squeeze, we need to squeeze."""
starting_contents = 'this is where we start'
squeezed_contents = 'this is the result of a squeezing'
_helper_assert_formatted_contents(
starting_contents,
False,
'some color config',
True,
None,
'this was colored',
squeezed_contents,
'these contents were subbed',
squeezed_contents
)
def test_get_formatted_contents_subsitutes():
"""If subs is truthy, get_substituted contents should be called."""
starting_contents = 'this is where we start'
subbed_contents = 'substituted like a teacher'
_helper_assert_formatted_contents(
starting_contents,
False,
'some color config',
False,
['truthy', 'list'],
'this was colored',
'this was squeezed',
subbed_contents,
subbed_contents
)
def test_perform_all_formatting():
"""
When use_color, squeeze, and subs are all truthy, all the formatting
should be applied in that order.
"""
starting_contents = 'the starting point for grand formatting'
subbed_contents = 'subbed is the last thing called so should be the result'
_helper_assert_formatted_contents(
starting_contents,
True,
'some color config',
True,
['truthy', 'list'],
'this was colored',
'this was squeezed',
subbed_contents,
subbed_contents
)
def _get_file_as_string(path):
"""Get the contents of the file as a string."""
with open(path, 'r') as f:
data = f.read()
return data
def test_get_squeezed_contents_correctly_squeezes():
"""
Our squeeze method should follow our convention, which is to remove the
blank line between a description and an example, to keep two blank lines
between sections, and otherwise have only single blank lines.
"""
unsqueezed = _get_file_as_string(PATH_UNSQUEEZED_FILE)
# the target squeezed output is a reference implementation in
# pwd_squeezed.md.
target = _get_file_as_string(PATH_SQUEEZED_FILE)
actual = util.get_squeezed_contents(unsqueezed)
assert actual == target
def test_get_substituted_contents_handles_empty_subs():
"""Nothing should be formatted if there are no substitutions."""
raw_contents = 'this should not be subbed'
actual = util.get_substituted_contents(raw_contents, [])
assert actual == raw_contents
def test_get_substituted_contents_substitutes_calls_correct_methods():
"""
The get_substituted_contents method calls things in the correct order.
"""
sub_one = Mock(auto_spec=substitute.Substitution)
sub_one_result = 'result of sub one'
sub_one.apply_and_get_result.return_value = sub_one_result
sub_two = Mock(auto_spec=substitute.Substitution)
sub_two_result = 'result of sub two'
sub_two.apply_and_get_result.return_value = sub_two_result
starting_contents = 'the string we should be substituting into'
target = sub_two_result
subs = [sub_one, sub_two]
actual = util.get_substituted_contents(starting_contents, subs)
sub_one.apply_and_get_result.assert_called_once_with(starting_contents)
sub_two.apply_and_get_result.assert_called_once_with(sub_one_result)
assert actual == target
def test_get_substituted_contents_substitutes_correctly():
"""
Basic test to make sure Substitutions can get applied correctly.
"""
sub_one = substitute.Substitution('foo', 'bar', False)
sub_two = substitute.Substitution('bar\n\n', 'baz\n', True)
start = 'foo\n\n something else\n\n bar\n\n'
target = 'baz\n something else\n\n baz\n'
subs = [sub_one, sub_two]
actual = util.get_substituted_contents(start, subs)
assert actual == target
@patch('eg.color.EgColorizer')
def test_get_colorized_contents_calls_methods(patched_colorizer_class):
"""
We should call the correct methods on the EgColorizer objects when we color
a file.
"""
raw_contents = 'these are uncolored contents'
colored_contents = 'COLORED: ' + raw_contents
color_config = 'some color config'
# The actual instance created by these calls is stored at return_value.
colorizer_instance = patched_colorizer_class.return_value
colorizer_instance.colorize_text.return_value = colored_contents
actual = util.get_colorized_contents(raw_contents, color_config)
assert actual == colored_contents
colorizer_instance.colorize_text.assert_called_once_with(raw_contents)
@patch('eg.util.get_alias_dict')
def _helper_assert_get_resolved_program(
program,
resolved_program,
config_obj,
alias_dict,
mock_dict,
):
"""
program: the program to resolved for as an alias
resolved_program: the result of the resolution.
config_obj: the config_obj to use toe resolve the alias path
alias_dict: the dict of aliases to be returned
"""
mock_dict.return_value = alias_dict
actual = util.get_resolved_program(program, config_obj)
assert actual == resolved_program
mock_dict.assert_called_once_with(config_obj)
def test_get_resolved_program_no_alias():
"""
A program that is not an alias should return itself.
"""
alias_dict = {
'link': 'ln',
'nc': 'netcat'
}
config_obj = 'a config'
_helper_assert_get_resolved_program('link', 'ln', config_obj, alias_dict)
def test_get_resolved_program_is_alias():
"""
A program that is an alias should return the resolved value.
"""
alias_dict = {
'link': 'ln',
'nc': 'netcat'
}
config_obj = 'some new config'
_helper_assert_get_resolved_program('cp', 'cp', config_obj, alias_dict)
def test_get_alias_dict_returns_contents_of_correct_file():
"""
get_alias_dict should read data from the file at the default path.
"""
alias_dict = {
'link': 'ln',
'nc': 'netcat'
}
config_obj = _create_config(
examples_dir='path/to/examples/dir',
)
alias_file_path = 'path/to/alias/file'
alias_dict_str = json.dumps(alias_dict)
_helper_assert_get_alias_dict(
alias_dict_str,
alias_dict,
config_obj,
alias_file_path,
True
)
def test_get_alias_dict_fails_gracefully_if_not_file():
"""
Since users can specify a directory for examples that might not contain the
aliases file, we want to fail gracefully if the file doesn't exist.
"""
contents_of_alias_dict_file = 'should never be reached'
config_obj = _create_config(
examples_dir='path/to/examples/dir',
)
alias_file_path = 'path/to/the/alias/file'
_helper_assert_get_alias_dict(
contents_of_alias_dict_file,
{},
config_obj,
alias_file_path,
False
)
@patch('eg.util._get_contents_of_file')
@patch('eg.util._get_alias_file_path')
@patch('os.path.isfile')
def _helper_assert_get_alias_dict(
contents_of_alias_dict_file,
target_alias_dict,
config_obj,
alias_file_path,
alias_file_path_is_file,
mock_is_file,
mock_get_alias_file_path,
mock_get_contents,
):
"""
contents_of_alias_dict_file: the string contents of the file storing the
dictionary of aliases
target_alias_dict: the target result of get_alias_dict
config_obj: the Config object
alias_file_path: the path to be returned by _get_alias_file_path
alias_file_path_is_file: True if the alias path is a file, else False
"""
mock_is_file.return_value = alias_file_path_is_file
mock_get_alias_file_path.return_value = alias_file_path
mock_get_contents.return_value = contents_of_alias_dict_file
actual = util.get_alias_dict(config_obj)
assert actual == target_alias_dict
mock_get_alias_file_path.assert_called_once_with(config_obj)
mock_is_file.assert_called_once_with(alias_file_path)
if alias_file_path_is_file:
mock_get_contents.assert_called_once_with(alias_file_path)
else:
assert mock_get_contents.call_count == 0
@patch('os.path.join')
def test_get_alias_file_path(mock_join):
"""
_get_alias_file_path should just join the example dir and the alias file
name, to make sure we look in the right place for the file.
"""
config_obj = _create_config(
examples_dir='handy/dandy/examples/dir',
)
join_result = 'joined path'
mock_join.return_value = join_result
actual = util._get_alias_file_path(config_obj)
assert actual == join_result
mock_join.assert_called_once_with(
config_obj.examples_dir,
util.ALIAS_FILE_NAME
)
def test_is_example_file_true_if_has_suffix():
"""
Should be true if ends in EXAMPLE_FILE_SUFFIX.
"""
file_name = 'find.md'
actual = util._is_example_file(file_name)
assert actual == True
def test_is_example_file_true_if_not_suffix():
"""
Should be false if the file does not end in EXAMPLE_FILE_SUFFIX.
"""
file_name = 'aliases.json'
actual = util._is_example_file(file_name)
assert actual == False
def test_can_parse_alias_file():
"""
Make sure aliases.json file can be parsed.
This is to make sure an edit doesn't accidentally corrupt it.
"""
# We'll have to hardcode this.
alias_file_path = os.path.join(
config.DEFAULT_EXAMPLES_DIR,
util.ALIAS_FILE_NAME
)
alias_file_contents = util._get_contents_of_file(alias_file_path)
alias_dict = json.loads(alias_file_contents)
# We'll check that link goes to ln, as we know that one will be present.
assert alias_dict['link'] == 'ln'
@patch('os.path.exists')
@patch('eg.util._inform_cannot_edit_no_custom_dir')
@patch('eg.util.get_resolved_program')
@patch('eg.util.get_file_paths_for_program')
@patch('subprocess.call')
def test_edit_custom_examples_correct_with_custom_dir(
mock_call,
mock_get_paths,
mock_get_program,
mock_inform,
mock_exists,
):
"""
We should resolve aliases, get the custom file path, and call subprocess.
"""
program = 'du'
resolved_program = 'alias for du'
config = _create_config(custom_dir='path/to/custom', editor_cmd='nano')
paths = ['path/to/custom/du.md', 'foo.md']
mock_get_program.return_value = resolved_program
mock_get_paths.return_value = paths
mock_exists.return_value = True
util.edit_custom_examples(program, config)
mock_get_program.assert_called_once_with(program, config)
mock_get_paths.assert_called_once_with(resolved_program, config.custom_dir)
mock_call.assert_called_once_with([config.editor_cmd, paths[0]])
assert mock_inform.call_count == 0
@patch('os.path.exists')
@patch('eg.util._inform_cannot_edit_no_custom_dir')
@patch('eg.util.get_resolved_program')
@patch('eg.util.get_file_paths_for_program')
@patch('subprocess.call')
def test_edit_custom_examples_creates_file_if_none_exist(
mock_call,
mock_get_paths,
mock_get_program,
mock_inform,
mock_exists,
):
program = 'du'
resolved_program = 'alias-for-du'
config = _create_config(custom_dir='path/to/custom', editor_cmd='nano')
paths = []
mock_get_program.return_value = resolved_program
mock_get_paths.return_value = paths
mock_exists.return_value = True
util.edit_custom_examples(program, config)
mock_get_program.assert_called_once_with(program, config)
mock_get_paths.assert_called_once_with(resolved_program, config.custom_dir)
mock_call.assert_called_once_with(
[config.editor_cmd, 'path/to/custom/alias-for-du.md'])
assert mock_inform.call_count == 0
@patch('os.path.exists')
@patch('eg.util._inform_cannot_edit_no_custom_dir')
@patch('eg.util.get_resolved_program')
@patch('eg.util.get_file_paths_for_program')
@patch('subprocess.call')
def test_edit_custom_examples_informs_if_no_custom_dir(
mock_call,
mock_get_paths,
mock_get_program,
mock_inform,
mock_exists,
):
"""
We should inform the user if they are trying to edit with no custom dir.
This should be true if it is not set and if the path does not exist.
"""
program = 'awk'
# First with no custom dir set.
config = _create_config(editor_cmd='vi -e')
mock_exists.return_value = True
util.edit_custom_examples(program, config)
assert mock_inform.call_count == 1
# And now with it set but a nonexistent path.
config = _create_config(custom_dir='/path/to/custom', editor_cmd='vi -e')
mock_exists.return_value = False
util.edit_custom_examples(program, config)
assert mock_inform.call_count == 2
assert mock_call.call_count == 0
assert mock_get_paths.call_count == 0
assert mock_get_program.call_count == 0
| StarcoderdataPython |
1797239 | <reponame>MerinRose123/celery_caching_example
# Importing LRUcache from celery
from celery.utils.functional import LRUCache
class Config:
DEBUG = False
SERVICE_NAME = 'celery_caching'
REDIS_HOST = "0.0.0.0"
REDIS_PORT = 6379
BROKER_URL = "redis://{host}:{port}/0".format(
host=REDIS_HOST, port=str(REDIS_PORT))
CELERY_RESULT_BACKEND = BROKER_URL
# Set the cache with a key limit of 10
RESOURCE_CACHE = LRUCache(limit=10)
class LocalConfig(Config):
DEBUG = True
class DevelopmentConfig(Config):
# Development environment
DEBUG = False
config_by_name = {
"local": LocalConfig,
"dev": DevelopmentConfig,
} | StarcoderdataPython |
109448 | from django.shortcuts import render, get_object_or_404
from django.views.generic import (TemplateView, DetailView,
ListView, CreateView,
UpdateView,DeleteView,FormView,)
from django.core.paginator import Paginator, EmptyPage, PageNotAnInteger
from django.urls import reverse_lazy
from django.http import HttpResponseRedirect
from .models import Department
def dept_list(request):
departments = Department.published.all()
paginator = Paginator(departments, 10) # 10 departments in each page
page = request.GET.get('page')
try:
departments = paginator.page(page)
except PageNotAnInteger:
# If page is not an integer deliver the first page
departments = paginator.page(1)
except EmptyPage:
# If page is out of range deliver last page of results
departments = paginator.page(paginator.num_pages)
return render(request,'home/dept_list.html',{'departments':departments, page:'pages'})
def dept_detail(request, department):
department=get_object_or_404(Department,slug=department,status='published')
return render(request, 'home/dept_detail.html',{'department':department})
| StarcoderdataPython |
54798 | <filename>testing/resources/test_pmatrixprint.py
a = PMatrix3D()
a.print()
exit()
| StarcoderdataPython |
139359 | <gh_stars>10-100
class LoginBase():
freeTimeExpires = -1
def __init__(self, cr):
self.cr = cr
def sendLoginMsg(self, loginName, password, createFlag):
pass
def getErrorCode(self):
return 0
def needToSetParentPassword(self):
return 0 | StarcoderdataPython |
3256044 | """
Copyright (c) 2018 <NAME>
Utility functions for reading and writing data files.
For simplicity, each file stores a single JSON object. Admittedly this isn't
great if the files get large, since we need to read the whole thing to get at
any part of it (or to modify any part of it). But in that case, you should
probably just create a bunch of smaller files anyway.
Each script is supposed to have its own subdirectory where it keeps data files.
Therefore, the read/write functions explicitly take in a parameter for the
subdirectory, so you can't accidentally write your files in the base data/
directory.
"""
import errno
import json
import os
DATAFILE_DIR="data"
###############################################################################
# Public API
def readFile(subdir, name, errorOnNonexistent=False, default=None):
"""
Read the data structure stored in the specified file.
If the file does not exist (or isn't readable)...
If errorOnNonexistent then throw an error.
Else return default.
If the file exists but can't be parsed, throw an error. If you want to
catch this case, see tryReadFile.
"""
fname = _getFilename(subdir, name)
# Note: this would probably look more natural as:
# if not os.path.exists(fname):
# # ... file does not exist ...
# else:
# try:
# with open(fname) as f:
# # ... read file ...
# except:
# # ... assume file was malformed ...
# but there's technically a race condition in the above: the file could be
# removed after the os.path.exists() check and before the open(fname). This
# isn't going to matter in practice, but on principle I've coded it in a
# different way which I _think_ avoids that race condition.
#
# Technically fileExists is really more like "file is a regular file and we
# have permission to read it", but the point is that if we can't read it
# and errorOnNonexistent is False, then we want to return the default value
# rather than error.
fileExists = False
try:
with open(fname, "r") as f:
fileExists = True
return json.load(f)
except:
if not fileExists and not errorOnNonexistent:
return default
else:
raise
def tryReadFile(*args, **kwargs):
"""
Same as readFile, except that this function returns a pair:
(success, value)
Where readFile would succeed, success is True and value is the value
readFile would return. Where readFile would fail, success is False.
"""
try:
value = readFile(*args, **kwargs)
return (True, value)
except:
return (False, None)
def writeFile(subdir, name, value):
"""
Write the specified value into the specified file. This overwrites the file
if it already exists. Can fail if we are unable to encode value or if for
some reason we don't have permission to write the file.
"""
fname = _getFilename(subdir, name)
_createDirsAbove(fname)
with open(fname, "w") as f:
json.dump(value, f, indent=4, separators=(",", ": "))
def tryWriteFile(*args, **kwargs):
"""
Same as writeFile, except that this function returns True on success and
Falase on failure, rather than throwing an error.
"""
try:
writeFile(*args, **kwargs)
return True
except:
return False
def fileExists(subdir, name):
fname = _getFilename(subdir, name)
return os.path.isfile(fname)
###############################################################################
# Internal helper
def _getFilename(subdir, name):
return os.path.join(DATAFILE_DIR, subdir, name)
def _createDirsAbove(fname):
"""
Create all directories above a file. For example, if fname is
"a/b/c/d.txt", then this call is (intended to be) equivalent to
`mkdir -p a/b/c`.
"""
dirToCreate = os.path.dirname(fname)
# https://stackoverflow.com/a/600612
try:
os.makedirs(dirToCreate)
except OSError as e:
if e.errno == errno.EEXIST and os.path.isdir(dirToCreate):
pass
else:
raise
| StarcoderdataPython |
41455 | <filename>bokeh-app/main.py
import pandas as pd
import numpy as np
from bokeh.io import curdoc
from bokeh.layouts import layout
from bokeh.models import (Button, CategoricalColorMapper, ColumnDataSource,
HoverTool, Label, SingleIntervalTicker, Slider)
from bokeh.palettes import Spectral6
from bokeh.plotting import figure
all_df_dict = np.load('./renta/my_file.npy',allow_pickle='TRUE').item()
data = {}
source = ColumnDataSource(data=all_df_dict['0'])
TOOLS = 'save,pan,box_zoom,reset,wheel_zoom'
p = figure(x_range=(1, 16), y_range=(0, 30),title="Kernel de distribución de renta", y_axis_type="linear", plot_height = 400,
tools = TOOLS, plot_width = 800)
p.vbar(x = 'x', top = 'y', color = 'grey', width = np.min(np.abs(np.array(source.data['x'])[0:-2] - np.array(source.data['x'])[1:-1])) , visible = True, source = source)
p.add_tools(HoverTool(tooltips=[("Renta", "@x"), ("Densidad", "@y")]))
#p.xaxis.ticker = SingleIntervalTicker(interval=0)
p.xaxis.axis_label = 'Renta'
#p.yaxis.ticker = SingleIntervalTicker(interval=0)
p.yaxis.axis_label = 'Densidad'
def slider_update(attrname, old, new):
year = slider.value
# label.text = str(year)
source.data = all_df_dict[str(year)]
slider = Slider(start=0, end=len(all_df_dict) - 1, value=0, step=1, title="Date")
slider.on_change('value', slider_update)
callback_id = None
def animate_update():
year = slider.value + 1
if year > len(all_df_dict):
year = 0
slider.value = year
def animate():
global callback_id
if button.label == '► Play':
button.label = '❚❚ Pause'
callback_id = curdoc().add_periodic_callback(animate_update, 200)
else:
button.label = '► Play'
curdoc().remove_periodic_callback(callback_id)
button = Button(label='► Play', width=60)
button.on_click(animate)
layout = layout([
[p],
[slider, button],
], sizing_mode='fixed')
curdoc().add_root(layout)
curdoc().title = "renta"
"""
in terminal use: bokeh serve --show myapp.py
""" | StarcoderdataPython |
1656561 | <gh_stars>100-1000
__all__ = []
import torch
from torch import nn
from onnx2torch.node_converters.registry import add_converter
from onnx2torch.onnx_graph import OnnxGraph
from onnx2torch.onnx_node import OnnxNode
from onnx2torch.utils.common import OnnxMapping
from onnx2torch.utils.common import OperationConverterResult
from onnx2torch.utils.common import get_shape_from_value_info
_BN_CLASS_FROM_SPATIAL_RANK = {
0: nn.BatchNorm1d,
1: nn.BatchNorm1d,
2: nn.BatchNorm2d,
3: nn.BatchNorm3d,
}
@add_converter(operation_type='BatchNormalization', version=15)
@add_converter(operation_type='BatchNormalization', version=14)
@add_converter(operation_type='BatchNormalization', version=9)
def _(node: OnnxNode, graph: OnnxGraph) -> OperationConverterResult:
scale_value_name = node.input_values[1]
scale = graph.initializers[scale_value_name]
scale = scale.to_torch()
bias_value_name = node.input_values[2]
bias = graph.initializers[bias_value_name]
bias = bias.to_torch()
mean_value_name = node.input_values[3]
mean = graph.initializers[mean_value_name]
mean = mean.to_torch()
var_value_name = node.input_values[4]
var = graph.initializers[var_value_name]
var = var.to_torch()
input_value_info = graph.value_info[node.input_values[0]]
input_shape = get_shape_from_value_info(input_value_info)
spatial_rank = len(input_shape) - 2
bn_class = _BN_CLASS_FROM_SPATIAL_RANK.get(spatial_rank, None)
if bn_class is None:
raise NotImplementedError(f'BatchNorm operation with spatial rank == {spatial_rank} is not implemented')
node_attributes = node.attributes
training_mode = node_attributes.get('training_mode', 0)
epsilon = node_attributes.get('epsilon', 1e-5)
momentum = node_attributes.get('momentum', 0.9)
if training_mode != 0:
raise NotImplementedError('BatchNorm nodes in training mode are not supported.')
torch_module = bn_class(
num_features=scale.size()[0],
eps=epsilon,
momentum=1 - momentum, # See PyTorch documentation for batch norm.
)
torch_module.eval()
with torch.no_grad():
torch_module.running_mean.data = mean
torch_module.running_var.data = var
torch_module.weight.data = scale
torch_module.bias.data = bias
return OperationConverterResult(
torch_module=torch_module,
onnx_mapping=OnnxMapping(
inputs=(node.input_values[0],),
outputs=node.output_values,
),
)
| StarcoderdataPython |
4823085 | """
Module to initiate sight words, place them into managable lists, and call each word at random
"""
import random
sight_list_1 = ['the', 'of', 'and', 'a', 'to', 'in', 'is', 'you', 'that', 'it', 'he', 'was', 'for', 'on', 'are', 'as', 'with', 'his', 'they', 'I', 'at', 'be', 'this', 'have', 'from', 'or', 'one', 'had', 'by', 'word', 'but', 'not', 'what', 'all', 'we\'re', 'we', 'when', 'your', 'can', 'said', 'there', 'use', 'an', 'each', 'which', 'she', 'do', 'how', 'their', 'if']
sight_list_2 = ['will', 'up', 'other', 'about', 'out', 'many', 'then', 'them', 'these', 'so', 'some', 'her', 'would', 'make', 'like', 'him', 'into', 'time', 'has', 'look', 'two', 'more', 'write', 'go', 'see', 'number', 'no', 'way', 'could', 'people', 'my', 'than', 'first', 'water', 'been', 'call', 'who', 'oil', 'now', 'find', 'long', 'down', 'day', 'did', 'get', 'come','made', 'may', 'part', 'over']
sight_list_3 = ['new', 'sound', 'take', 'only', 'little', 'work', 'know', 'place', 'year', 'live', 'me', 'back','give', 'most', 'very', 'after', 'thing', 'our', 'just', 'name', 'good', 'sentence', 'man', 'think', 'say', 'great', 'where', 'help', 'through', 'much', 'before', 'line', 'right', 'too', 'mean', 'old', 'any', 'same', 'tell', 'boy', 'follow', 'came', 'want', 'show', 'also', 'around', 'farm', 'three', 'small', 'set']
sight_list_4 = ['put', 'end', 'does', 'another', 'well', 'large', 'must', 'big', 'even', 'such', 'because', 'turn', 'here', 'why', 'ask', 'went', 'men', 'read', 'need', 'land', 'different', 'home', 'us', 'move', 'try', 'kind', 'hand', 'picture', 'again', 'change', 'off', 'play', 'spell', 'air', 'away', 'animal', 'house', 'point', 'page', 'letter', 'mother', 'answer', 'found', 'study', 'still', 'learn', 'should', 'America', 'world', 'high']
sight_list_5 = ['every', 'near', 'add', 'food', 'between', 'own', 'below', 'country', 'plant', 'last', 'school', 'father', 'keep', 'tree', 'never', 'start', 'city', 'earth', 'eye', 'light', 'thought', 'head', 'under', 'story', 'saw', 'left', 'don\'t', 'few', 'while', 'along', 'might', 'close', 'something', 'seem', 'next', 'hard', 'open', 'example', 'begin', 'life', 'always', 'those', 'both', 'paper', 'together', 'got', 'group', 'often', 'run', 'important']
sight_list_6 = ['until', 'children', 'side', 'feet', 'car', 'mile', 'night', 'walk', 'white', 'sea', 'began', 'grow', 'took', 'river', 'four', 'carry', 'state', 'once', 'book', 'hear', 'stop', 'without', 'second', 'late', 'miss', 'idea', 'enough', 'eat', 'face', 'watch', 'far', 'country', 'real', 'almost', 'let', 'above', 'girl', 'sometimes', 'mountain', 'cut', 'young', 'talk', 'soon', 'list', 'song', 'being', 'leave', 'family', 'it\'s', 'afternoon']
master = [sight_list_1, sight_list_2, sight_list_3, sight_list_4, sight_list_5, sight_list_6]
pick_word = ''
word = []
def word_choose(array, indexes, subindex1, subindex2):
"""Chooses a random word from pre-defined sight word lists, 6 lists in master list. Argument 'array' defaults to master, Arguments 'indexes, subindex1 and subindex2' are slices::: sub-list of master, then slices of the sub-list. """
word = array[indexes]
pick_word = random.choice(word[subindex1:subindex2])
return pick_word
def correct(word):
master.index(word_choose.word).pop(pick_word)
| StarcoderdataPython |
1693852 | import siliconcompiler
# TODO: find pytest-based way to do mocking
import unittest.mock
def test_cli_multi_source():
''' Regression test for bug where CLI parser wasn't handling multiple
source files properly.
'''
chip = siliconcompiler.Chip()
# I think it doesn't matter if these files actually exist, since we're just
# checking that the CLI app parses them correctly
args = ['sc', 'examples/ibex/ibex_alu.v', 'examples/ibex/ibex_branch_predict.v',
'-target', 'asicflow_freepdk45']
with unittest.mock.patch('sys.argv', args):
chip.create_cmdline('sc')
assert chip.get('source') == ['examples/ibex/ibex_alu.v',
'examples/ibex/ibex_branch_predict.v']
assert chip.get('target') == 'asicflow_freepdk45'
| StarcoderdataPython |
1746979 | #!/usr/bin/env python3
import os
from setuptools import find_packages, setup, Extension
CYTHONIZE = True
# We'll cythonize by default since we can rely on Cython being installed at
# install-time: it is a build-requirement, defined in pyproject.toml. If you
# don't want to cythonize, but compile using the .c sources included in the
# package source distribution, set the CYTHONIZE environment variable, e.g.
#
# CYTHONIZE=0 pip install ...
#
if "CYTHONIZE" in os.environ:
print("CYTHONIZE = %s" % os.environ["CYTHONIZE"])
if os.environ["CYTHONIZE"] == '0':
CYTHONIZE = False
try:
from Cython.Build import cythonize
except ImportError:
print("WARNING: Cython not available")
CYTHONIZE = False
def no_cythonize(extensions, **_ignore):
"""Transform extensions to use packaged pre-cythonized sources.
This function replaces :func:`Cython.Build.cythonize` when running
``setup.py`` from a source distribution that packages the ``.c`` files that
result from cythonization of ``.pyx`` files.
Adapted from https://tinyurl.com/y4aavzq5.
"""
# https://tinyurl.com/y4aavzq5 ->
# https://cython.readthedocs.io/en/latest/src/userguide/
# source_files_and_compilation.html#distributing-cython-modules
for extension in extensions:
sources = []
for sfile in extension.sources:
path, ext = os.path.splitext(sfile)
if ext in (".pyx", ".py"):
ext = {"c++": ".cpp"}.get(extension.language, '.c')
sfile = path + {"c++": ".cpp"}.get(extension.language, '.c')
sources.append(sfile)
extension.sources[:] = sources
return extensions
EXTENSIONS = [
Extension("cypack.utils", ["src/cypack/utils.pyx"]),
Extension("cypack.answer", ["src/cypack/answer.pyx"]),
Extension("cypack.fibonacci", ["src/cypack/fibonacci.pyx"]),
Extension(
"cypack.sub.wrong",
["src/cypack/sub/wrong.pyx", "src/cypack/sub/helper.c"],
),
]
if CYTHONIZE:
EXTENSIONS = cythonize(
EXTENSIONS,
# https://cython.readthedocs.io/en/latest/src/userguide/source_files_and_compilation.html#compiler-directives
compiler_directives={"language_level": 3, "embedsignature": True},
)
else:
print("WARNING: Not cythonizating")
EXTENSIONS = no_cythonize(EXTENSIONS)
with open("requirements.txt") as fp:
INSTALL_REQUIRES = fp.read().strip().split("\n")
with open("requirements-dev.txt") as fp:
DEV_REQUIRES = fp.read().strip().split("\n")
with open('README.md', encoding='utf8') as readme_file:
README = readme_file.read()
def get_version(filename):
"""Extract the package version."""
with open(filename, encoding='utf8') as in_fh:
for line in in_fh:
if line.startswith('__version__'):
return line.split('=')[1].strip()[1:-1]
raise ValueError("Cannot extract version from %s" % filename)
setup(
ext_modules=EXTENSIONS,
install_requires=INSTALL_REQUIRES,
extras_require={
"dev": DEV_REQUIRES,
"docs": ["sphinx", "sphinx-rtd-theme"],
},
author="<NAME>",
author_email='<EMAIL>',
classifiers=[
'Development Status :: 2 - Pre-Alpha',
'Intended Audience :: Education',
'Intended Audience :: Developers',
'Topic :: Scientific/Engineering',
'Natural Language :: English',
'Operating System :: OS Independent',
'Programming Language :: C',
'Programming Language :: Cython',
'Programming Language :: Python',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
'Programming Language :: Python :: 3.9',
'Programming Language :: Python :: Implementation :: CPython',
'License :: OSI Approved :: MIT License',
],
description="Example of a package with Cython extensions",
python_requires='>=3.7',
long_description=README,
long_description_content_type='text/markdown',
name='mg-cython-package-example',
packages=find_packages(where="src"),
package_dir={"": "src"},
package_data={'*': ['*.pxd', '*.h'], 'cypack': ['data/*']},
url='https://github.com/goerz-testing/cython-package-example',
version=get_version('./src/cypack/__init__.py'),
zip_safe=False,
)
| StarcoderdataPython |
35375 | <filename>lc0415_add_strings.py
"""Leetcode 415. Add Strings
Easy
URL: https://leetcode.com/problems/add-strings/
Given two non-negative integers num1 and num2 represented as string,
return the sum of num1 and num2.
Note:
- The length of both num1 and num2 is < 5100.
- Both num1 and num2 contains only digits 0-9.
- Both num1 and num2 does not contain any leading zero.
- You must not use any built-in BigInteger library or convert the inputs to
integer directly.
"""
class SolutionPaddingAddBackwardIter(object):
def _padding(self, num1, num2):
n1, n2 = len(num1), len(num2)
if n1 < n2:
num1 = '0' * (n2 - n1) + num1
elif n1 > n2:
num2 = '0' * (n1 - n2) + num2
return num1, num2
def addStrings(self, num1, num2):
"""
:type num1: str
:type num2: str
:rtype: str
Time complexity: O(n).
Space complexity: O(1).
"""
from collections import deque
# Pad shorter num with leading zeros to string of equal length.
num1, num2 = self._padding(num1, num2)
# Start with carry 0, add digits of num1 & num2 from backward to array.
sum_arr = deque([])
i = len(num1) - 1
carry = 0
while i >= 0 or carry > 0:
if i >= 0:
val = int(num1[i]) + int(num2[i]) + carry
else:
val = carry
carry, val = val // 10, val % 10
sum_arr.appendleft(str(val))
i -= 1
return ''.join(list(sum_arr))
def main():
# Output: 807.
num1 = '342'
num2 = '465'
print SolutionPaddingAddBackwardIter().addStrings(num1, num2)
# Output: 10110.
num1 = '9999'
num2 = '111'
print SolutionPaddingAddBackwardIter().addStrings(num1, num2)
if __name__ == '__main__':
main()
| StarcoderdataPython |
3311045 | <reponame>cobrab11/black1-bot<gh_stars>1-10
# BS mark.1-55
# /* coding: utf-8 */
# BlackSmith plugin
# acclist_plugin.py
# Coded by: WitcherGeralt (<EMAIL>)
# http://witcher-team.ucoz.ru/
def global_acclist(type, source, body):
adminslist = u'Список администраторов:'
ignorelist = u'Список игнора:'
acclist = u'Остальные доступы:'
col_1 = 0
col_2 = 0
col_3 = 0
for jid in GLOBACCESS:
access = GLOBACCESS[jid]
if access >= 80:
col_1 = col_1 + 1
if access == 100:
comment = ' - BOSS'
elif access == 80:
comment = ' - CHIEF'
else:
comment = ': '+str(access)
adminslist += '\n'+str(col_1)+'. '+jid+comment
elif access < 10:
col_2 = col_2 + 1
if access == -100:
comment = u' - полный игнор'
elif access == -5:
comment = u' - заблокирован'
else:
comment = ': '+str(access)
ignorelist += '\n'+str(col_2)+'. '+jid+comment
else:
col_3 = col_3 + 1
acclist += '\n'+str(col_3)+'. '+jid+': '+str(access)
if col_1 == 0:
adminslist = u'Список администраторов пуст'
if col_2 == 0:
ignorelist = u'Список игнора пуст'
if col_3 == 0:
acclist = u'Нестандартных доступов нет'
if type == 'public':
reply(type, source, u'глянь в приват')
reply('private', source, adminslist+'\n\n'+ignorelist+'\n\n'+acclist)
def local_acclist(type, source, body):
if source[1] in GROUPCHATS:
acclist = u'Список доступов:'
ignorelist = u'Список игнора:'
col_1 = 0
col_2 = 0
if source[1] in CONFACCESS:
for jid in CONFACCESS[source[1]]:
access = CONFACCESS[source[1]][jid]
if access < 10:
col_2 = col_2 + 1
if access == -100:
comment = u' - полный игнор'
elif access == -5:
comment = u' - заблокирован'
else:
comment = ': '+str(access)
ignorelist += '\n'+str(col_2)+'. '+jid+comment
else:
col_1 = col_1 + 1
if access == 100:
comment = ' - BOSS'
elif access == 80:
comment = ' - CHIEF'
else:
comment = ': '+str(access)
acclist += '\n'+str(col_1)+'. '+jid+comment
if col_1 == 0:
acclist = u'Список доступов пуст'
if col_2 == 0:
ignorelist = u'Список игнора пуст'
if type == 'public':
reply(type, source, u'глянь в приват')
reply('private', source, acclist+'\n\n'+ignorelist)
else:
reply(type, source, u'нет локальных доступов')
else:
reply(type, source, u'какие ещё локальные доступы в ростере?')
command_handler(local_acclist, 20, "acclist")
command_handler(global_acclist, 80, "acclist")
| StarcoderdataPython |
1771551 | <reponame>Timothy-Wangwe/To-do-List-App-Django-
from django.shortcuts import render, redirect
from django.contrib.auth.models import User
from django.contrib import messages
from django.contrib.auth import authenticate, login, logout
from django.contrib.auth.decorators import login_required
from accounts.forms import RegistrationForm, LoginForm, ProfileForm
# Create your views here.
def register(request):
logout(request)
if request.method == 'POST':
form = RegistrationForm(request.POST)
if form.is_valid():
User.objects.create_user(
first_name=request.POST['first_name'],
last_name=request.POST['last_name'],
username=request.POST['username'],
email=request.POST['email'],
password=request.POST['password'],
)
messages.success(request, 'Account has been created! Please login below...')
return redirect('accounts:login')
else:
return render(request, 'accounts/register.html', {'form':form})
else:
return render(request, 'accounts/register.html', {'form': RegistrationForm()})
def login_view(request):
logout(request)
if request.method == 'POST':
form = LoginForm(request.POST)
if form.is_valid():
user = authenticate(
username=request.POST['username'],
password=request.POST['password'],
)
if user is not None:
if user.is_active:
login(request, user)
messages.success(request, f'Welcome, {user.first_name}!')
return redirect('task:task_list')
else:
return render(request, 'accounts/login.html', {'form':form})
else:
return render(request, 'accounts/login.html', {'form':LoginForm()})
return redirect('accounts:login')
def logout_view(request):
logout(request)
return redirect('task:index')
@login_required(login_url='/auth/login/')
def profile(request):
if request.method == 'POST':
form = ProfileForm(request.POST, instance=request.user)
if form.is_valid():
form.save()
messages.success(request, 'Your Profile has been Updated!')
return redirect('accounts:profile')
else:
form = ProfileForm(instance=request.user)
return render(request, 'accounts/profile.html', {'form':form})
| StarcoderdataPython |
179958 | <gh_stars>0
default_app_config = 'rdmo.projects.app_config.ProjectsConfig'
| StarcoderdataPython |
45523 | print(int(input()) - 543)
| StarcoderdataPython |
4806577 | #!/usr/bin/env python
# coding: utf-8
in_file = open('taxon_3.tab', 'r')
out_file_t = open('taxonomy.tsv', 'w')
out_file_t.write('uid | parent_uid | name | rank | sourceinfo | ' + '\n')
next(in_file)
for line in in_file:
line = line.strip('\n')
row = line.split('\t')
parent_id = row[2]
name = row[3]
taxon_id = row[0]
rank = row[5]
source = ''
out_file_t.write(taxon_id + '\t|\t' + parent_id + '\t|\t' + name + '\t|\t' + rank + '\t|\t' + source + '\t|\t' + '\n')
| StarcoderdataPython |
4824808 | from rest_framework import serializers
from .models import PingbackNotification
from .models import PingbackNotificationDismissed
class PingbackNotificationSerializer(serializers.ModelSerializer):
i18n = serializers.JSONField(default='{}')
class Meta:
model = PingbackNotification
fields = ('id', 'version_range', 'timestamp', 'link_url', 'i18n')
class PingbackNotificationDismissedSerializer(serializers.ModelSerializer):
class Meta:
model = PingbackNotificationDismissed
fields = ('user', 'notification')
| StarcoderdataPython |
3303086 | import numpy as np
from scipy.ndimage.filters import gaussian_filter
from scipy.stats import mode
from .libcudawrapper import deskewGPU as deskew
from .util import imread
def threshold_li(image):
"""Return threshold value based on adaptation of Li's Minimum Cross Entropy method.
From skimage.filters.threshold_li
Parameters
----------
image : (N, M) ndarray
Input image.
Returns
-------
threshold : float
Upper threshold value. All pixels with an intensity higher than
this value are assumed to be foreground.
References
----------
.. [1] <NAME>. and <NAME>. (1993) "Minimum Cross Entropy Thresholding"
Pattern Recognition, 26(4): 617-625
DOI:10.1016/0031-3203(93)90115-D
.. [2] <NAME>. and <NAME>. (1998) "An Iterative Algorithm for Minimum
Cross Entropy Thresholding" Pattern Recognition Letters, 18(8): 771-776
DOI:10.1016/S0167-8655(98)00057-9
.. [3] <NAME>. and <NAME>. (2004) "Survey over Image Thresholding
Techniques and Quantitative Performance Evaluation" Journal of
Electronic Imaging, 13(1): 146-165
DOI:10.1117/1.1631315
.. [4] ImageJ AutoThresholder code, http://fiji.sc/wiki/index.php/Auto_Threshold
"""
# Make sure image has more than one value
if np.all(image == image.flat[0]):
raise ValueError(
"threshold_li is expected to work with images "
"having more than one value. The input image seems "
"to have just one value {}.".format(image.flat[0])
)
# Copy to ensure input image is not modified
image = image.copy()
# Requires positive image (because of log(mean))
immin = np.min(image)
image -= immin
imrange = np.max(image)
tolerance = 0.5 * imrange / 256
# Calculate the mean gray-level
mean = np.mean(image)
# Initial estimate
new_thresh = mean
old_thresh = new_thresh + 2 * tolerance
# Stop the iterations when the difference between the
# new and old threshold values is less than the tolerance
while abs(new_thresh - old_thresh) > tolerance:
old_thresh = new_thresh
threshold = old_thresh + tolerance # range
# Calculate the means of background and object pixels
mean_back = image[image <= threshold].mean()
mean_obj = image[image > threshold].mean()
temp = (mean_back - mean_obj) / (np.log(mean_back) - np.log(mean_obj))
new_thresh = temp - tolerance if temp < 0 else temp + tolerance
return threshold + immin
def trimedges(im, trim, ninterleaved=1):
nz, ny, nx = im.shape
im = im[
trim[0][0] * ninterleaved : nz - trim[0][1] * ninterleaved,
trim[1][0] : ny - trim[1][1],
trim[2][0] : nx - trim[2][1],
]
return im
def cropX(im, width=0, shift=0):
nz, ny, nx = im.shape
if width == 0:
width = nx - np.abs(shift)
middle = np.ceil(nx / 2 + shift)
left = int(np.maximum(np.ceil(middle - width / 2), 0))
right = int(np.minimum(np.ceil(middle + width / 2), nx))
im = im[:, :, left:right]
return im
def imcontentbounds(im, sigma=2):
"""Get image content bounding box via gaussian filter and threshold."""
# get rid of the first two planes in case of high dark noise
if im.ndim == 3:
im = np.squeeze(np.max(im[2:], 0))
im = im.astype(np.float)
fullwidth = im.shape[-1]
# from scipy.ndimage.filters import median_filter
# mm = median_filter(b.astype(float),3)
mm = im
imgaus = gaussian_filter(mm, sigma)
mask = imgaus > threshold_li(imgaus)
linesum = np.sum(mask, 0)
abovethresh = np.where(linesum > 0)[0]
right = abovethresh[-1]
left = abovethresh[0]
return [left, right, fullwidth]
def feature_width(E, background=None, pad=50, t=0):
"""automated detection of post-deskew image content width.
the width can be used during deskewing to crop the final image to
reasonable bounds
"""
# first deskew just first and last timepoints of each channel
P = E.parameters
# first and last timepoint
maxT = max(P.tset)
minT = min(P.tset)
raw_stacks = [imread(f) for f in E.get_files(t=(minT, maxT))]
raw_stacks = [sub_background(f, background) for f in raw_stacks]
if P.samplescan:
deskewed_stacks = [deskew(s, P.dz, P.dx, P.angle) for s in raw_stacks]
else:
deskewed_stacks = raw_stacks
# then get minimum bounding box of features
bounds = np.array([imcontentbounds(d) for d in deskewed_stacks])
rightbound = np.max(bounds[:, 1])
leftbound = np.min(bounds[:, 0])
deskewedWidth = bounds[0, 2]
width = int(rightbound - leftbound + pad)
middle = np.floor((rightbound + leftbound) / 2)
offset = int(np.floor(middle - (deskewedWidth / 2)))
return {"width": width, "offset": offset, "deskewed_nx": deskewedWidth}
def detect_background(im):
"""get mode of the first plane"""
if im.ndim == 4:
im = im[0][2]
if im.ndim == 3:
im = im[1] # pick the third plane... avoid noise in first plane on lattice
return mode(im.flatten())[0][0]
def sub_background(im, background=None):
"""subtract provided background or autodetct as mode of the first plane"""
if background is None:
background = detect_background(im)
out = im.astype(np.float) - background
out[out < 0] = 0
return out
def deskew_gputools(rawdata, dz=0.5, dx=0.102, angle=31.5, filler=0):
try:
from gputools.transforms import affine
except ImportError:
# sys.stdout = sys.__stdout__
print("could not import gputools")
return
deskewFactor = np.cos(angle * np.pi / 180) * dz / dx
T = np.array([[1, 0, deskewFactor, 0], [0, 1, 0, 0], [0, 0, 1, 0], [0, 0, 0, 1]])
(nz, ny, nx) = rawdata.shape
# Francois' method:
# nxOut = math.ceil((nz - 1) * deskewFactor) + nx
nxOut = np.int(np.floor((nz - 1) * dz * abs(np.cos(angle * np.pi / 180)) / dx) + nx)
# +1 to pad left side with 1 column of filler pixels
# otherwise, edge pixel values are smeared across the image
paddedData = np.ones((nz, ny, nxOut), rawdata.dtype) * filler
paddedData[..., :nx] = rawdata
return affine(paddedData, T, interpolation="linear", mode="wrap")
| StarcoderdataPython |
103585 | #!/usr/bin/env python
from pwn import *
def numbers_from_to(a = 0, b = None):
if b == None:
b = a
return ''.join(str(n) + " " for n in range(a, b + 1))
def binary_search(io, c, nmax, nmin = 0):
if nmin == nmax:
for _ in range(c):
io.sendline(str(nmin))
io.recvline()
io.sendline(str(nmin))
return
middle = (nmin + nmax) // 2
payload = numbers_from_to(nmin, middle)
io.sendline(payload)
weight = int(io.recvline())
if weight % 10:
binary_search(io, c - 1, middle, nmin)
else:
binary_search(io, c - 1, nmax, middle + 1)
return
io = remote('localhost', 9007)
io.recvuntil('... -')
for _ in range(100):
io.recvuntil('=')
n = int(io.recvuntil(' '))
io.recvuntil('=')
c = int(io.recvuntil('\n'))
binary_search(io, c, n - 1)
print(io.recvline(False))
io.recvline()
print(io.recvline())
io.close()
| StarcoderdataPython |
111873 | from ccgetusers.generator import GenerateToken
from ccgetusers.customerid import CustomerId
from ccgetusers.users import Users
import sys
def main():
if len(sys.argv) < 4:
print('Must provide CloudCheckr CMx auth endpoint, client id and access key')
print('ccgetusers <cloudcheckr endpoint> <client id> <access key>')
sys.exit(-1)
else:
cc_endpoint = sys.argv[1]
client_id = sys.argv[2]
client_secret = sys.argv[3]
token = GenerateToken(cc_endpoint=cc_endpoint, client_id=client_id, client_secret=client_secret)
token = token.token
customer_id = CustomerId(cc_endpoint=cc_endpoint, token=token)
customer_id = customer_id.customer_id
users = Users(cc_endpoint=cc_endpoint, token=token, customer_id=customer_id)
return users.users
| StarcoderdataPython |
3369533 | <reponame>aarcro/django-report-builder<filename>report_builder_demo/urls.py
from django.conf.urls import include, url
from django.conf import settings
from django.conf.urls.static import static
from django.contrib import admin
admin.autodiscover()
urlpatterns = [
url(r'^admin/', include(admin.site.urls)),
url(r'^report_builder/', include('report_builder.urls'))
]
if settings.DEBUG:
urlpatterns += static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
| StarcoderdataPython |
188914 | frase = "Curso em Video Python"
print(len(frase))# para contar a quatidade de caracteres em frase.
print(frase[3:12])#para separar letras de frase apartir da casa 3 (lembrando que começa de 0 então a posicao e 4) ate a numero 12 sendo que a 12 será excluída.
print(frase[3::2])#Inicia o print da 3 casa da frase até a ultima posicão, mas irá saltar de 2 em 2 casa printando as letras da casa selecionada.
print(frase.upper())# todas as letras de frase passarao para MAIUSCULAS.
print(frase.lower())# todas as letras de frase passarão a ser minúsculas.
print(frase.count('o'))# para contar determinada letra dentro de frase.
print(frase.strip()) # ira remover os espaços (se houver) da frente e do final.
print(frase.replace("Python","Android"))# replace substitui a frase escolhida por outra.
dividido = frase.split() # para dividir os valores contidos dentro da variavel frase
print(dividido[1]) #imprimir o que estiver na posicao um da frase dividida
print(dividido[1][1]) #imprimir a letra contida na posicao 1 (0 e,1 m) da frase contida na posicao 1 (em).
print("Video" in frase) # função IN para procurar algo dentro da variavel
print()
| StarcoderdataPython |
1676163 | <filename>abc158_d.py
S=list(input())
Q=int(input())
T=[]
for _ in range(Q):
q=input()
if q[0]=='1':
S,T=T,S
else:
if q[2]=='1':
T.append(q[4])
else:
S.append(q[4])
print("".join(T[::-1]+S))
| StarcoderdataPython |
1632387 | import sys, os
from socket import *
if(len(sys.argv)>2):
host=sys.argv[1]
port=int(sys.argv[2])
else:
print("Unable to create connection, required parameters 'Host' and/or 'Port' where not provided")
sys.exit(1)
server_address=gethostbyname(host)
connection_socket=socket(AF_INET,SOCK_STREAM)
connection_socket.connect((server_address,port))
pid=os.fork()
if pid!=0:
incoming_stream=connection_socket.makefile("r")
print("Client - Client is accepting server messages")
while True:
msg=incoming_stream.readline()
print(msg)
if msg=="salir\n":
break
incoming_stream.close()
connection_socket.close()
print("Server disconnected, if you are not disconnected type 'salir'")
os.waitpid(pid,0)
else:
outgoing_stream=connection_socket.makefile("w")
print("Client - Server is accepting client messages")
while True:
msg=input()
outgoing_stream.write(msg+"\n")
outgoing_stream.flush()
if msg=="salir\n":
break
outgoing_stream.close()
connection_socket.close()
sys.exit(0)
| StarcoderdataPython |
1640904 | <reponame>henriktao/pulumi-azure
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from .. import _utilities
from . import outputs
from ._inputs import *
__all__ = ['SubscriptionCostManagementExportArgs', 'SubscriptionCostManagementExport']
@pulumi.input_type
class SubscriptionCostManagementExportArgs:
def __init__(__self__, *,
export_data_options: pulumi.Input['SubscriptionCostManagementExportExportDataOptionsArgs'],
export_data_storage_location: pulumi.Input['SubscriptionCostManagementExportExportDataStorageLocationArgs'],
recurrence_period_end_date: pulumi.Input[str],
recurrence_period_start_date: pulumi.Input[str],
recurrence_type: pulumi.Input[str],
subscription_id: pulumi.Input[str],
active: Optional[pulumi.Input[bool]] = None,
name: Optional[pulumi.Input[str]] = None):
"""
The set of arguments for constructing a SubscriptionCostManagementExport resource.
:param pulumi.Input['SubscriptionCostManagementExportExportDataOptionsArgs'] export_data_options: A `export_data_options` block as defined below.
:param pulumi.Input['SubscriptionCostManagementExportExportDataStorageLocationArgs'] export_data_storage_location: A `export_data_storage_location` block as defined below.
:param pulumi.Input[str] recurrence_period_start_date: The date the export will start capturing information.
:param pulumi.Input[str] recurrence_type: How often the requested information will be exported. Valid values include `Annually`, `Daily`, `Monthly`, `Weekly`.
:param pulumi.Input[str] subscription_id: The id of the subscription on which to create an export.
:param pulumi.Input[bool] active: Is the cost management export active? Default is `true`.
:param pulumi.Input[str] name: Specifies the name of the Cost Management Export. Changing this forces a new resource to be created.
"""
pulumi.set(__self__, "export_data_options", export_data_options)
pulumi.set(__self__, "export_data_storage_location", export_data_storage_location)
pulumi.set(__self__, "recurrence_period_end_date", recurrence_period_end_date)
pulumi.set(__self__, "recurrence_period_start_date", recurrence_period_start_date)
pulumi.set(__self__, "recurrence_type", recurrence_type)
pulumi.set(__self__, "subscription_id", subscription_id)
if active is not None:
pulumi.set(__self__, "active", active)
if name is not None:
pulumi.set(__self__, "name", name)
@property
@pulumi.getter(name="exportDataOptions")
def export_data_options(self) -> pulumi.Input['SubscriptionCostManagementExportExportDataOptionsArgs']:
"""
A `export_data_options` block as defined below.
"""
return pulumi.get(self, "export_data_options")
@export_data_options.setter
def export_data_options(self, value: pulumi.Input['SubscriptionCostManagementExportExportDataOptionsArgs']):
pulumi.set(self, "export_data_options", value)
@property
@pulumi.getter(name="exportDataStorageLocation")
def export_data_storage_location(self) -> pulumi.Input['SubscriptionCostManagementExportExportDataStorageLocationArgs']:
"""
A `export_data_storage_location` block as defined below.
"""
return pulumi.get(self, "export_data_storage_location")
@export_data_storage_location.setter
def export_data_storage_location(self, value: pulumi.Input['SubscriptionCostManagementExportExportDataStorageLocationArgs']):
pulumi.set(self, "export_data_storage_location", value)
@property
@pulumi.getter(name="recurrencePeriodEndDate")
def recurrence_period_end_date(self) -> pulumi.Input[str]:
return pulumi.get(self, "recurrence_period_end_date")
@recurrence_period_end_date.setter
def recurrence_period_end_date(self, value: pulumi.Input[str]):
pulumi.set(self, "recurrence_period_end_date", value)
@property
@pulumi.getter(name="recurrencePeriodStartDate")
def recurrence_period_start_date(self) -> pulumi.Input[str]:
"""
The date the export will start capturing information.
"""
return pulumi.get(self, "recurrence_period_start_date")
@recurrence_period_start_date.setter
def recurrence_period_start_date(self, value: pulumi.Input[str]):
pulumi.set(self, "recurrence_period_start_date", value)
@property
@pulumi.getter(name="recurrenceType")
def recurrence_type(self) -> pulumi.Input[str]:
"""
How often the requested information will be exported. Valid values include `Annually`, `Daily`, `Monthly`, `Weekly`.
"""
return pulumi.get(self, "recurrence_type")
@recurrence_type.setter
def recurrence_type(self, value: pulumi.Input[str]):
pulumi.set(self, "recurrence_type", value)
@property
@pulumi.getter(name="subscriptionId")
def subscription_id(self) -> pulumi.Input[str]:
"""
The id of the subscription on which to create an export.
"""
return pulumi.get(self, "subscription_id")
@subscription_id.setter
def subscription_id(self, value: pulumi.Input[str]):
pulumi.set(self, "subscription_id", value)
@property
@pulumi.getter
def active(self) -> Optional[pulumi.Input[bool]]:
"""
Is the cost management export active? Default is `true`.
"""
return pulumi.get(self, "active")
@active.setter
def active(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "active", value)
@property
@pulumi.getter
def name(self) -> Optional[pulumi.Input[str]]:
"""
Specifies the name of the Cost Management Export. Changing this forces a new resource to be created.
"""
return pulumi.get(self, "name")
@name.setter
def name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "name", value)
@pulumi.input_type
class _SubscriptionCostManagementExportState:
def __init__(__self__, *,
active: Optional[pulumi.Input[bool]] = None,
export_data_options: Optional[pulumi.Input['SubscriptionCostManagementExportExportDataOptionsArgs']] = None,
export_data_storage_location: Optional[pulumi.Input['SubscriptionCostManagementExportExportDataStorageLocationArgs']] = None,
name: Optional[pulumi.Input[str]] = None,
recurrence_period_end_date: Optional[pulumi.Input[str]] = None,
recurrence_period_start_date: Optional[pulumi.Input[str]] = None,
recurrence_type: Optional[pulumi.Input[str]] = None,
subscription_id: Optional[pulumi.Input[str]] = None):
"""
Input properties used for looking up and filtering SubscriptionCostManagementExport resources.
:param pulumi.Input[bool] active: Is the cost management export active? Default is `true`.
:param pulumi.Input['SubscriptionCostManagementExportExportDataOptionsArgs'] export_data_options: A `export_data_options` block as defined below.
:param pulumi.Input['SubscriptionCostManagementExportExportDataStorageLocationArgs'] export_data_storage_location: A `export_data_storage_location` block as defined below.
:param pulumi.Input[str] name: Specifies the name of the Cost Management Export. Changing this forces a new resource to be created.
:param pulumi.Input[str] recurrence_period_start_date: The date the export will start capturing information.
:param pulumi.Input[str] recurrence_type: How often the requested information will be exported. Valid values include `Annually`, `Daily`, `Monthly`, `Weekly`.
:param pulumi.Input[str] subscription_id: The id of the subscription on which to create an export.
"""
if active is not None:
pulumi.set(__self__, "active", active)
if export_data_options is not None:
pulumi.set(__self__, "export_data_options", export_data_options)
if export_data_storage_location is not None:
pulumi.set(__self__, "export_data_storage_location", export_data_storage_location)
if name is not None:
pulumi.set(__self__, "name", name)
if recurrence_period_end_date is not None:
pulumi.set(__self__, "recurrence_period_end_date", recurrence_period_end_date)
if recurrence_period_start_date is not None:
pulumi.set(__self__, "recurrence_period_start_date", recurrence_period_start_date)
if recurrence_type is not None:
pulumi.set(__self__, "recurrence_type", recurrence_type)
if subscription_id is not None:
pulumi.set(__self__, "subscription_id", subscription_id)
@property
@pulumi.getter
def active(self) -> Optional[pulumi.Input[bool]]:
"""
Is the cost management export active? Default is `true`.
"""
return pulumi.get(self, "active")
@active.setter
def active(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "active", value)
@property
@pulumi.getter(name="exportDataOptions")
def export_data_options(self) -> Optional[pulumi.Input['SubscriptionCostManagementExportExportDataOptionsArgs']]:
"""
A `export_data_options` block as defined below.
"""
return pulumi.get(self, "export_data_options")
@export_data_options.setter
def export_data_options(self, value: Optional[pulumi.Input['SubscriptionCostManagementExportExportDataOptionsArgs']]):
pulumi.set(self, "export_data_options", value)
@property
@pulumi.getter(name="exportDataStorageLocation")
def export_data_storage_location(self) -> Optional[pulumi.Input['SubscriptionCostManagementExportExportDataStorageLocationArgs']]:
"""
A `export_data_storage_location` block as defined below.
"""
return pulumi.get(self, "export_data_storage_location")
@export_data_storage_location.setter
def export_data_storage_location(self, value: Optional[pulumi.Input['SubscriptionCostManagementExportExportDataStorageLocationArgs']]):
pulumi.set(self, "export_data_storage_location", value)
@property
@pulumi.getter
def name(self) -> Optional[pulumi.Input[str]]:
"""
Specifies the name of the Cost Management Export. Changing this forces a new resource to be created.
"""
return pulumi.get(self, "name")
@name.setter
def name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "name", value)
@property
@pulumi.getter(name="recurrencePeriodEndDate")
def recurrence_period_end_date(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "recurrence_period_end_date")
@recurrence_period_end_date.setter
def recurrence_period_end_date(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "recurrence_period_end_date", value)
@property
@pulumi.getter(name="recurrencePeriodStartDate")
def recurrence_period_start_date(self) -> Optional[pulumi.Input[str]]:
"""
The date the export will start capturing information.
"""
return pulumi.get(self, "recurrence_period_start_date")
@recurrence_period_start_date.setter
def recurrence_period_start_date(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "recurrence_period_start_date", value)
@property
@pulumi.getter(name="recurrenceType")
def recurrence_type(self) -> Optional[pulumi.Input[str]]:
"""
How often the requested information will be exported. Valid values include `Annually`, `Daily`, `Monthly`, `Weekly`.
"""
return pulumi.get(self, "recurrence_type")
@recurrence_type.setter
def recurrence_type(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "recurrence_type", value)
@property
@pulumi.getter(name="subscriptionId")
def subscription_id(self) -> Optional[pulumi.Input[str]]:
"""
The id of the subscription on which to create an export.
"""
return pulumi.get(self, "subscription_id")
@subscription_id.setter
def subscription_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "subscription_id", value)
class SubscriptionCostManagementExport(pulumi.CustomResource):
@overload
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
active: Optional[pulumi.Input[bool]] = None,
export_data_options: Optional[pulumi.Input[pulumi.InputType['SubscriptionCostManagementExportExportDataOptionsArgs']]] = None,
export_data_storage_location: Optional[pulumi.Input[pulumi.InputType['SubscriptionCostManagementExportExportDataStorageLocationArgs']]] = None,
name: Optional[pulumi.Input[str]] = None,
recurrence_period_end_date: Optional[pulumi.Input[str]] = None,
recurrence_period_start_date: Optional[pulumi.Input[str]] = None,
recurrence_type: Optional[pulumi.Input[str]] = None,
subscription_id: Optional[pulumi.Input[str]] = None,
__props__=None):
"""
Manages a Cost Management Export for a Subscription.
## Example Usage
```python
import pulumi
import pulumi_azure as azure
example_subscription = azure.core.get_subscription()
example_resource_group = azure.core.ResourceGroup("exampleResourceGroup", location="West Europe")
example_account = azure.storage.Account("exampleAccount",
resource_group_name=example_resource_group.name,
location=example_resource_group.location,
account_tier="Standard",
account_replication_type="LRS")
example_container = azure.storage.Container("exampleContainer", storage_account_name=azurerm_storage_account["test"]["name"])
example_subscription_cost_management_export = azure.core.SubscriptionCostManagementExport("exampleSubscriptionCostManagementExport",
subscription_id=azurerm_subscription["example"]["id"],
recurrence_type="Monthly",
recurrence_period_start_date="2020-08-18T00:00:00Z",
recurrence_period_end_date="2020-09-18T00:00:00Z",
export_data_storage_location=azure.core.SubscriptionCostManagementExportExportDataStorageLocationArgs(
container_id=example_container.resource_manager_id,
root_folder_path="/root/updated",
),
export_data_options=azure.core.SubscriptionCostManagementExportExportDataOptionsArgs(
type="Usage",
time_frame="WeekToDate",
))
```
## Import
Subscription Cost Management Exports can be imported using the `resource id`, e.g.
```sh
$ pulumi import azure:core/subscriptionCostManagementExport:SubscriptionCostManagementExport example /subscriptions/12345678-1234-9876-4563-123456789012/providers/Microsoft.CostManagement/exports/export1
```
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[bool] active: Is the cost management export active? Default is `true`.
:param pulumi.Input[pulumi.InputType['SubscriptionCostManagementExportExportDataOptionsArgs']] export_data_options: A `export_data_options` block as defined below.
:param pulumi.Input[pulumi.InputType['SubscriptionCostManagementExportExportDataStorageLocationArgs']] export_data_storage_location: A `export_data_storage_location` block as defined below.
:param pulumi.Input[str] name: Specifies the name of the Cost Management Export. Changing this forces a new resource to be created.
:param pulumi.Input[str] recurrence_period_start_date: The date the export will start capturing information.
:param pulumi.Input[str] recurrence_type: How often the requested information will be exported. Valid values include `Annually`, `Daily`, `Monthly`, `Weekly`.
:param pulumi.Input[str] subscription_id: The id of the subscription on which to create an export.
"""
...
@overload
def __init__(__self__,
resource_name: str,
args: SubscriptionCostManagementExportArgs,
opts: Optional[pulumi.ResourceOptions] = None):
"""
Manages a Cost Management Export for a Subscription.
## Example Usage
```python
import pulumi
import pulumi_azure as azure
example_subscription = azure.core.get_subscription()
example_resource_group = azure.core.ResourceGroup("exampleResourceGroup", location="West Europe")
example_account = azure.storage.Account("exampleAccount",
resource_group_name=example_resource_group.name,
location=example_resource_group.location,
account_tier="Standard",
account_replication_type="LRS")
example_container = azure.storage.Container("exampleContainer", storage_account_name=azurerm_storage_account["test"]["name"])
example_subscription_cost_management_export = azure.core.SubscriptionCostManagementExport("exampleSubscriptionCostManagementExport",
subscription_id=azurerm_subscription["example"]["id"],
recurrence_type="Monthly",
recurrence_period_start_date="2020-08-18T00:00:00Z",
recurrence_period_end_date="2020-09-18T00:00:00Z",
export_data_storage_location=azure.core.SubscriptionCostManagementExportExportDataStorageLocationArgs(
container_id=example_container.resource_manager_id,
root_folder_path="/root/updated",
),
export_data_options=azure.core.SubscriptionCostManagementExportExportDataOptionsArgs(
type="Usage",
time_frame="WeekToDate",
))
```
## Import
Subscription Cost Management Exports can be imported using the `resource id`, e.g.
```sh
$ pulumi import azure:core/subscriptionCostManagementExport:SubscriptionCostManagementExport example /subscriptions/12345678-1234-9876-4563-123456789012/providers/Microsoft.CostManagement/exports/export1
```
:param str resource_name: The name of the resource.
:param SubscriptionCostManagementExportArgs args: The arguments to use to populate this resource's properties.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
...
def __init__(__self__, resource_name: str, *args, **kwargs):
resource_args, opts = _utilities.get_resource_args_opts(SubscriptionCostManagementExportArgs, pulumi.ResourceOptions, *args, **kwargs)
if resource_args is not None:
__self__._internal_init(resource_name, opts, **resource_args.__dict__)
else:
__self__._internal_init(resource_name, *args, **kwargs)
def _internal_init(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
active: Optional[pulumi.Input[bool]] = None,
export_data_options: Optional[pulumi.Input[pulumi.InputType['SubscriptionCostManagementExportExportDataOptionsArgs']]] = None,
export_data_storage_location: Optional[pulumi.Input[pulumi.InputType['SubscriptionCostManagementExportExportDataStorageLocationArgs']]] = None,
name: Optional[pulumi.Input[str]] = None,
recurrence_period_end_date: Optional[pulumi.Input[str]] = None,
recurrence_period_start_date: Optional[pulumi.Input[str]] = None,
recurrence_type: Optional[pulumi.Input[str]] = None,
subscription_id: Optional[pulumi.Input[str]] = None,
__props__=None):
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = SubscriptionCostManagementExportArgs.__new__(SubscriptionCostManagementExportArgs)
__props__.__dict__["active"] = active
if export_data_options is None and not opts.urn:
raise TypeError("Missing required property 'export_data_options'")
__props__.__dict__["export_data_options"] = export_data_options
if export_data_storage_location is None and not opts.urn:
raise TypeError("Missing required property 'export_data_storage_location'")
__props__.__dict__["export_data_storage_location"] = export_data_storage_location
__props__.__dict__["name"] = name
if recurrence_period_end_date is None and not opts.urn:
raise TypeError("Missing required property 'recurrence_period_end_date'")
__props__.__dict__["recurrence_period_end_date"] = recurrence_period_end_date
if recurrence_period_start_date is None and not opts.urn:
raise TypeError("Missing required property 'recurrence_period_start_date'")
__props__.__dict__["recurrence_period_start_date"] = recurrence_period_start_date
if recurrence_type is None and not opts.urn:
raise TypeError("Missing required property 'recurrence_type'")
__props__.__dict__["recurrence_type"] = recurrence_type
if subscription_id is None and not opts.urn:
raise TypeError("Missing required property 'subscription_id'")
__props__.__dict__["subscription_id"] = subscription_id
super(SubscriptionCostManagementExport, __self__).__init__(
'azure:core/subscriptionCostManagementExport:SubscriptionCostManagementExport',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None,
active: Optional[pulumi.Input[bool]] = None,
export_data_options: Optional[pulumi.Input[pulumi.InputType['SubscriptionCostManagementExportExportDataOptionsArgs']]] = None,
export_data_storage_location: Optional[pulumi.Input[pulumi.InputType['SubscriptionCostManagementExportExportDataStorageLocationArgs']]] = None,
name: Optional[pulumi.Input[str]] = None,
recurrence_period_end_date: Optional[pulumi.Input[str]] = None,
recurrence_period_start_date: Optional[pulumi.Input[str]] = None,
recurrence_type: Optional[pulumi.Input[str]] = None,
subscription_id: Optional[pulumi.Input[str]] = None) -> 'SubscriptionCostManagementExport':
"""
Get an existing SubscriptionCostManagementExport resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[bool] active: Is the cost management export active? Default is `true`.
:param pulumi.Input[pulumi.InputType['SubscriptionCostManagementExportExportDataOptionsArgs']] export_data_options: A `export_data_options` block as defined below.
:param pulumi.Input[pulumi.InputType['SubscriptionCostManagementExportExportDataStorageLocationArgs']] export_data_storage_location: A `export_data_storage_location` block as defined below.
:param pulumi.Input[str] name: Specifies the name of the Cost Management Export. Changing this forces a new resource to be created.
:param pulumi.Input[str] recurrence_period_start_date: The date the export will start capturing information.
:param pulumi.Input[str] recurrence_type: How often the requested information will be exported. Valid values include `Annually`, `Daily`, `Monthly`, `Weekly`.
:param pulumi.Input[str] subscription_id: The id of the subscription on which to create an export.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = _SubscriptionCostManagementExportState.__new__(_SubscriptionCostManagementExportState)
__props__.__dict__["active"] = active
__props__.__dict__["export_data_options"] = export_data_options
__props__.__dict__["export_data_storage_location"] = export_data_storage_location
__props__.__dict__["name"] = name
__props__.__dict__["recurrence_period_end_date"] = recurrence_period_end_date
__props__.__dict__["recurrence_period_start_date"] = recurrence_period_start_date
__props__.__dict__["recurrence_type"] = recurrence_type
__props__.__dict__["subscription_id"] = subscription_id
return SubscriptionCostManagementExport(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter
def active(self) -> pulumi.Output[Optional[bool]]:
"""
Is the cost management export active? Default is `true`.
"""
return pulumi.get(self, "active")
@property
@pulumi.getter(name="exportDataOptions")
def export_data_options(self) -> pulumi.Output['outputs.SubscriptionCostManagementExportExportDataOptions']:
"""
A `export_data_options` block as defined below.
"""
return pulumi.get(self, "export_data_options")
@property
@pulumi.getter(name="exportDataStorageLocation")
def export_data_storage_location(self) -> pulumi.Output['outputs.SubscriptionCostManagementExportExportDataStorageLocation']:
"""
A `export_data_storage_location` block as defined below.
"""
return pulumi.get(self, "export_data_storage_location")
@property
@pulumi.getter
def name(self) -> pulumi.Output[str]:
"""
Specifies the name of the Cost Management Export. Changing this forces a new resource to be created.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter(name="recurrencePeriodEndDate")
def recurrence_period_end_date(self) -> pulumi.Output[str]:
return pulumi.get(self, "recurrence_period_end_date")
@property
@pulumi.getter(name="recurrencePeriodStartDate")
def recurrence_period_start_date(self) -> pulumi.Output[str]:
"""
The date the export will start capturing information.
"""
return pulumi.get(self, "recurrence_period_start_date")
@property
@pulumi.getter(name="recurrenceType")
def recurrence_type(self) -> pulumi.Output[str]:
"""
How often the requested information will be exported. Valid values include `Annually`, `Daily`, `Monthly`, `Weekly`.
"""
return pulumi.get(self, "recurrence_type")
@property
@pulumi.getter(name="subscriptionId")
def subscription_id(self) -> pulumi.Output[str]:
"""
The id of the subscription on which to create an export.
"""
return pulumi.get(self, "subscription_id")
| StarcoderdataPython |
1679596 | # Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import collections
import logging
import os.path
import textwrap
import yaml
from reno import defaults
LOG = logging.getLogger(__name__)
Opt = collections.namedtuple('Opt', 'name default help')
_OPTIONS = [
Opt('notesdir', defaults.NOTES_SUBDIR,
textwrap.dedent("""\
The notes subdirectory within the relnotesdir where the
notes live.
""")),
Opt('collapse_pre_releases', True,
textwrap.dedent("""\
Should pre-release versions be merged into the final release
of the same number (1.0.0.0a1 notes appear under 1.0.0).
""")),
Opt('stop_at_branch_base', True,
textwrap.dedent("""\
Should the scanner stop at the base of a branch (True) or go
ahead and scan the entire history (False)?
""")),
Opt('branch', None,
textwrap.dedent("""\
The git branch to scan. Defaults to the "current" branch
checked out. If a stable branch is specified but does not
exist, reno attempts to automatically convert that to an
"end-of-life" tag. For example, ``origin/stable/liberty``
would be converted to ``liberty-eol``.
""")),
Opt('earliest_version', None,
textwrap.dedent("""\
The earliest version to be included. This is usually the
lowest version number, and is meant to be the oldest
version. If unset, all versions will be scanned.
""")),
Opt('template', defaults.TEMPLATE.format(defaults.PRELUDE_SECTION_NAME),
textwrap.dedent("""\
The template used by reno new to create a note.
""")),
Opt('release_tag_re',
textwrap.dedent('''\
((?:v?[\\d.ab]|rc)+) # digits, a, b, and rc cover regular and
# pre-releases
'''),
textwrap.dedent("""\
The regex pattern used to match the repo tags representing a
valid release version. The pattern is compiled with the
verbose and unicode flags enabled.
""")),
Opt('pre_release_tag_re',
textwrap.dedent('''\
(?P<pre_release>\\.v?\\d+(?:[ab]|rc)+\\d*)$
'''),
textwrap.dedent("""\
The regex pattern used to check if a valid release version tag
is also a valid pre-release version. The pattern is compiled
with the verbose and unicode flags enabled. The pattern must
define a group called 'pre_release' that matches the
pre-release part of the tag and any separator, e.g for
pre-release version '12.0.0.0rc1' the default pattern will
identify '.0rc1' as the value of the group 'pre_release'.
""")),
Opt('branch_name_re', 'stable/.+',
textwrap.dedent("""\
The pattern for names for branches that are relevant when
scanning history to determine where to stop, to find the
"base" of a branch. Other branches are ignored.
""")),
Opt('closed_branch_tag_re', '(.+)-eol',
textwrap.dedent("""\
The pattern for names for tags that replace closed
branches that are relevant when scanning history to
determine where to stop, to find the "base" of a
branch. Other tags are ignored.
""")),
Opt('branch_name_prefix', 'stable/',
textwrap.dedent("""\
The prefix to add to tags for closed branches
to restore the old branch name to allow sorting
to place the tag in the proper place in history.
For example, OpenStack turns "mitaka-eol" into
"stable/mitaka" by removing the "-eol" suffix
via closed_branch_tag_re and setting the prefix
to "stable/".
""")),
Opt('sections',
[
['features', 'New Features'],
['issues', 'Known Issues'],
['upgrade', 'Upgrade Notes'],
['deprecations', 'Deprecation Notes'],
['critical', 'Critical Issues'],
['security', 'Security Issues'],
['fixes', 'Bug Fixes'],
['other', 'Other Notes'],
],
textwrap.dedent("""\
The identifiers and names of permitted sections in the
release notes, in the order in which the final report will
be generated. A prelude section will always be automatically
inserted before the first element of this list.
""")),
Opt('prelude_section_name', defaults.PRELUDE_SECTION_NAME,
textwrap.dedent("""\
The name of the prelude section in the note template. This
allows users to rename the section to, for example,
'release_summary' or 'project_wide_general_announcements',
which is displayed in titlecase in the report after
replacing underscores with spaces.
""")),
Opt('ignore_null_merges', True,
textwrap.dedent("""\
When this option is set to True, any merge commits with no
changes and in which the second or later parent is tagged
are considered "null-merges" that bring the tag information
into the current branch but nothing else.
OpenStack used to use null-merges to bring final release
tags from stable branches back into the master branch. This
confuses the regular traversal because it makes that stable
branch appear to be part of master and/or the later stable
branch. This option allows us to ignore those.
""")),
Opt('ignore_notes', [],
textwrap.dedent("""\
Note files to be ignored. It's useful to be able to ignore a
file if it is edited on the wrong branch. Notes should be
specified by their filename or UID.
Setting the option in the main configuration file makes it
apply to all branches. To ignore a note in the HTML build, use
the ``ignore-notes`` parameter to the ``release-notes`` sphinx
directive.
""")),
Opt('unreleased_version_title', '',
textwrap.dedent("""\
The title to use for any notes that do not appear in a
released version. If this option is unset, the development
version number is used (for example, ``3.0.0-3``).
""")),
]
class Config(object):
_OPTS = {o.name: o for o in _OPTIONS}
@classmethod
def get_default(cls, opt):
"Return the default for an option."
try:
return cls._OPTS[opt].default
except KeyError:
raise ValueError('unknown option name %r' % (opt,))
def __init__(self, reporoot, relnotesdir=None):
"""Instantiate a Config object
:param str reporoot:
The root directory of the repository.
:param str relnotesdir:
The directory containing release notes. Defaults to
'releasenotes'.
"""
self.reporoot = reporoot
if relnotesdir is None:
relnotesdir = defaults.RELEASE_NOTES_SUBDIR
self.relnotesdir = relnotesdir
# Initialize attributes from the defaults.
self.override(**{o.name: o.default for o in _OPTIONS})
self._contents = {}
self._load_file()
def _load_file(self):
filenames = [
os.path.join(self.reporoot, self.relnotesdir, 'config.yaml'),
os.path.join(self.reporoot, 'reno.yaml')]
for filename in filenames:
LOG.debug('looking for configuration file %s', filename)
if os.path.isfile(filename):
break
else:
self._report_missing_config_files(filenames)
return
try:
with open(filename, 'r') as fd:
self._contents = yaml.safe_load(fd)
LOG.info('loaded configuration file %s', filename)
except IOError as err:
self._report_failure_config_file(filename, err)
else:
self.override(**self._contents)
def _report_missing_config_files(self, filenames):
# NOTE(dhellmann): This is extracted so we can mock it for
# testing.
LOG.info('no configuration file in: %s', ', '.join(filenames))
def _report_failure_config_file(self, filename, err):
# NOTE(dhellmann): This is extracted so we can mock it for
# testing.
LOG.warning('did not load config file %s: %s', filename, err)
def _rename_prelude_section(self, **kwargs):
key = 'prelude_section_name'
if key in kwargs and kwargs[key] != self._OPTS[key].default:
new_prelude_name = kwargs[key]
self.template = defaults.TEMPLATE.format(new_prelude_name)
def override(self, **kwds):
"""Set the values of the named configuration options.
Take the values of the keyword arguments as the current value
of the same option, regardless of whether a value is already
present.
"""
# Replace prelude section name if it has been changed.
self._rename_prelude_section(**kwds)
for n, v in kwds.items():
if n not in self._OPTS:
LOG.warning('ignoring unknown configuration value %r = %r',
n, v)
else:
setattr(self, n, v)
def override_from_parsed_args(self, parsed_args):
"""Set the values of the configuration options from parsed CLI args.
This method assumes that the DEST values for the command line
arguments are named the same as the configuration options.
"""
arg_values = {
o.name: getattr(parsed_args, o.name)
for o in _OPTIONS
if getattr(parsed_args, o.name, None) is not None
}
if arg_values:
LOG.info('[config] updating from command line options')
self.override(**arg_values)
@property
def reporoot(self):
return self._reporoot
# Ensure that the 'reporoot' value always only ends in one '/'.
@reporoot.setter
def reporoot(self, value):
self._reporoot = value.rstrip('/') + '/'
@property
def notespath(self):
"""The path in the repo where notes are kept.
.. important::
This does not take ``reporoot`` into account. You need to add this
manually if required.
"""
return os.path.join(self.relnotesdir, self.notesdir)
@property
def options(self):
"""Get all configuration options as a dict.
Returns the actual configuration options after overrides.
"""
options = {
o.name: getattr(self, o.name)
for o in _OPTIONS
}
return options
# def parse_config_into(parsed_arguments):
# """Parse the user config onto the namespace arguments.
# :param parsed_arguments:
# The result of calling :meth:`argparse.ArgumentParser.parse_args`.
# :type parsed_arguments:
# argparse.Namespace
# """
# config_path = get_config_path(parsed_arguments.relnotesdir)
# config_values = read_config(config_path)
# for key in config_values.keys():
# try:
# getattr(parsed_arguments, key)
# except AttributeError:
# LOG.info('Option "%s" does not apply to this particular command.'
# '. Ignoring...', key)
# continue
# setattr(parsed_arguments, key, config_values[key])
# parsed_arguments._config = config_values
| StarcoderdataPython |
1705569 | # Create your first MLP in Keras
from keras.models import Sequential
from keras.layers import Dense
import numpy, json
# fix random seed for reproducibility
numpy.random.seed(7)
f = open("matlav", "r")
inp = json.loads(f.read())
f = open("matlov", "r")
outp = json.loads(f.read())
# load pima indians dataset
#dataset = numpy.loadtxt("pima-indians-diabetes.csv", delimiter=",")
# split into input (X) and output (Y) variables
X = numpy.array(inp)#dataset[:,0:8]
Y = numpy.array(outp)#dataset[:,8]
# create model
model = Sequential()
model.add(Dense(128, input_dim=254, activation='relu')) #TODO
model.add(Dense(128, activation='relu'))
model.add(Dense(1, activation='linear'))
# Compile model
model.compile(loss='mean_squared_error', optimizer='adam')
# Fit the model
model.fit(X, Y, epochs=1500, batch_size=10)
predictions = model.predict(X)
# evaluate the model
#scores = model.evaluate(X, Y)
#print("\n%s: %.2f%%" % (model.metrics_names[1], scores[1]*100))
print(model.predict(X))
model.save_weights("weights.hdf5")
open("model.json", "w+").write(model.to_json())
# Recover model:
# from keras.models import model_from_json
# model = model_from_json(open("model.json", "r").read())
# model.load_weights("weights.hdf5", by_name=False)
| StarcoderdataPython |
1667934 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from django.db.models import AutoField
from django.contrib import admin
from .models import Project, School
class AllFieldsAdmin(admin.ModelAdmin):
@property
def list_display(self):
return [field.name for field in self.model._meta.fields if not isinstance(field, AutoField)]
list_per_page = 20
class ProjectAdmin(AllFieldsAdmin):
model = Project
search_fields = ['title', 'description']
class SchoolAdmin(AllFieldsAdmin):
model = School
for model, model_admin in ((Project, ProjectAdmin), (School, SchoolAdmin)):
admin.site.register(model, model_admin)
| StarcoderdataPython |
172755 | <reponame>Jawayria/estore_project<gh_stars>0
from django.urls import path
from rest_framework_simplejwt.views import TokenRefreshView
from .views import LoginView, Signup
app_name = "users"
urlpatterns = [
path("signup/", view=Signup.as_view(), name="signup"),
path("login/", LoginView.as_view(), name="login"),
path("token/refresh/", TokenRefreshView.as_view(), name="token_refresh"),
]
| StarcoderdataPython |
3343622 | import unittest
from aioprometheus.negotiator import negotiate
from aioprometheus.formats import text, binary
class TestNegotiate(unittest.TestCase):
def test_protobuffer(self):
""" check that a protobuf formatter is returned """
headers = (
"proto=io.prometheus.client.MetricFamily;application/vnd.google.protobuf;encoding=delimited",
"application/vnd.google.protobuf;proto=io.prometheus.client.MetricFamily;encoding=delimited",
"encoding=delimited;application/vnd.google.protobuf;proto=io.prometheus.client.MetricFamily",
)
for accept in headers:
self.assertEqual(binary.BinaryFormatter, negotiate(set(accept.split(";"))))
def test_text_004(self):
""" check that a text formatter is returned for version 0.0.4 """
headers = (
"text/plain; version=0.0.4",
"text/plain;version=0.0.4",
"version=0.0.4; text/plain",
)
for accept in headers:
self.assertEqual(text.TextFormatter, negotiate(set(accept.split(";"))))
def test_text_default(self):
""" check that a text formatter is returned for plain text """
headers = ("text/plain;",)
for accept in headers:
self.assertEqual(text.TextFormatter, negotiate(set(accept.split(";"))))
def test_default(self):
""" check that a text formatter is returned if no matches """
headers = ("application/json", "*/*", "application/nothing")
for accept in headers:
self.assertEqual(text.TextFormatter, negotiate(set(accept.split(";"))))
def test_no_accept_header(self):
""" check request with no accept header works """
self.assertEqual(text.TextFormatter, negotiate(set()))
self.assertEqual(text.TextFormatter, negotiate(set([""])))
| StarcoderdataPython |
198480 | <filename>MORSEProject/classes/kalman3D.py
import numpy as np
class Kalman3D:
"""
Class that helps estimate current position based on observations.
You need to run update method for calculations to make place
"""
def __init__(self, dt=1/60.0):
"""
Initialize all used matrices.
:param dt: 1 / frequency at wchich sensors work, default value is 60Hz
"""
self.gain = np.eye(6) * 0.25 # g = Eest/(Eset+Emea) <0,1>
self.dt = dt # time between samples
self.state = np.array([0, 0, 0, 0, 0, 0]) # state matrix with initial state (x,y positions and x,y velocities)
# accelerations on input
self.B = np.array([[0, 0, 0],
[0, 0, 0],
[0, 0, 0],
[self.dt, 0, 0],
[0, self.dt, 0],
[0, 0, self.dt]]) # input vector, maby do not take z under account because of gravity
self.G = np.array([[0, 0, 0],
[0, 0, 0],
[0, 0, 0],
[1, 0, 0],
[0, 1, 0],
[0, 0, 1]])
# positions then accelerations
self.A = np.array([[1, 0, 0, self.dt, 0, 0],
[0, 1, 0, 0, self.dt, 0],
[0, 0, 1, 0, 0, self.dt],
[0, 0, 0, 1, 0, 0],
[0, 0, 0, 0, 1, 0],
[0, 0, 0, 0, 0, 1]])
# these values will figure out themselves during iterations
self.P = np.array([[50, 0, 0, 0, 0, 0],
[0, 50, 0, 0, 0, 0],
[0, 0, 50, 0, 0, 0],
[0, 0, 0, 50, 0, 0],
[0, 0, 0, 0, 50, 0],
[0, 0, 0, 0, 0, 50]]) # state covariant matrix(error in the estimate)
# it's hard to figure out -> issue #38
self.s_d_q = 0.05
# process noise covariance matrix
self.Q = np.array([[self.s_d_q**2, 0, 0],
[0, self.s_d_q**2, 0],
[0, 0, self.s_d_q**2]]) # state covariant matrix(error in the estimate)
# this also need's to be figured out but only when there will be position from "odometry"
self.s_d_r = 0.25
self.R = np.array([[self.s_d_r**2, 0, 0],
[0, self.s_d_r**2, 0],
[0, 0, self.s_d_r**2]]) # measurement covariance matrix(error in the measurement)
# matrix that helps to get only values needed from different size matrix
self.H = np.array([[1, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0]])
self.I = np.eye(6)
self.prediction = np.array([0, 0, 0])
def get_predicted_position(self):
"""
:return: [x,y,z] last predicted position
"""
return self.prediction
def get_estimated_position(self):
"""
:return: [x,y,z] estimated current position based on last given observation to update
"""
return np.matmul(self.H, self.state)
def update(self, measured_position, accelerations):
"""
Updates kalman with position and accelerations
:param measured_position: [x,y,z] position from odometry or other position sensor
:param accelerations: [ax,ay,az] in m^2/s
:return:
"""
accelerations[2] = 0 # just because gravity sucks
# predict new state
self.state = np.matmul(self.A, self.state) + np.matmul(self.B, accelerations)
self.P = np.matmul(np.matmul(self.A, self.P), np.transpose(self.A))\
+ np.matmul(np.matmul(self.G, self.Q), np.transpose(self.G))
# get only position that we can observe
self.prediction = np.matmul(self.H, self.state)
innovation = measured_position - self.prediction
# new gain matrix
innovation_covariance = np.matmul(np.matmul(self.H, self.P), np.transpose(self.H)) + self.R
self.gain = np.matmul(np.matmul(self.P, np.transpose(self.H)), np.linalg.inv(innovation_covariance))
# estimate current state
self.state = self.state + np.matmul(self.gain, innovation)
# update prediction matrix
self.P = np.matmul(self.I - np.matmul(self.gain, self.H), self.P)
| StarcoderdataPython |
130104 | # Copyright (c) Case Western Reserve University 2015
# This software is distributed under Apache License 2.0
# Consult the file LICENSE.txt
# Author: <NAME> <EMAIL>
# Dec 14 2016
# PYTHON 2
import os
import numpy as np
import subprocess as sp
import sys
import time
import datetime as dt
fdate = "20161215"
yr = int(fdate[:4])
mo = int(fdate[4:6])
dy = int(fdate[6:])
os.chdir('/home/augta/web_monitor/tmpevt')
# Expected layout in cwd
# tmpevt/
# t2/
# Ensure t2/ is clean before proceeding
files_in_t2 = os.listdir('t2/')
if len(files_in_t2) > 0:
for y in files_in_t2:
sp.call(['rm','t2/'+y])
else:
print "Directory empty, no files to remove."
sp.call(['cp','/home/augta/data/north/Events/%s/evt_list.txt' %fdate,'.'])
evt_dat_list = np.loadtxt('evt_list.txt',dtype='S100')
os.chdir('t2')
dirlist = os.listdir('/home/augta/data/north/Events/%s' %fdate,)
tsp_list = [j for j in dirlist if '.tsp' in j]
tsp_list.sort()
for m in tsp_list:
sp.call(['cp','/home/augta/data/north/Events/%s/%s' %(fdate,m),'.'])
# Rename .tsp file to match .dat file so the correct
#file name is given for the event retrieval script
tsp_k = 0
for k in tsp_list:
tsp_time = k.split('.')[0]
tsp_time = dt.datetime.strptime(tsp_time,'%Y%m%d_%H%M%S')
for j in evt_dat_list:
evt_time = j.split('.')[0]
evt_time = dt.datetime.strptime(evt_time,'%Y%m%d_%H%M%S')
tdiff = evt_time - tsp_time
if abs(tdiff.total_seconds()) < 5:
new_tsp = j.replace('.dat','.tsp')
tsp_list[tsp_k] = new_tsp
tsp_k += 1
break
today = fdate
sp.call(['cp','../make_t2.sh','.'])
sp.call(['cp','../testtsp','.'])
sp.call(['./make_t2.sh','%s' %today])
big_t2_sec = np.loadtxt('%s.T2' %today,delimiter='.',dtype=int,usecols=(0,))
big_t2_micro = np.loadtxt('%s.T2' %today,delimiter='.',dtype=float,usecols=(1,))
big_t2_micro = big_t2_micro / 1e8
sp.call(['cp','/home/augta/data/coincidence/%i_%02d_%02d.CTAL.gz' %(yr,mo,dy),'.'])
sp.call(['gunzip','%i_%02d_%02d.CTAL.gz' %(yr,mo,dy)])
tal_list = np.loadtxt('%i_%02d_%02d.CTAL'%(yr,mo,dy),usecols=(1,),dtype=str)
evt_counter = 0
denom = 20001
old_evt_str = ''
evt_str_counter = 0
with open('get_events_local.sh','w') as F:
F.write('#!/bin/bash\n\n')
if tal_list.size > 0:
if tal_list.size == 1:
new_tal = np.zeros(1,dtype='S500')
new_tal[0] = tal_list
tal_list = new_tal
for x in tal_list:
lsec = int(x.split('.')[0])
micro = float(x.split('.')[1])
micro = micro / 1e6
roi = np.where(big_t2_sec == lsec - 1)[0]
if len(roi) > 0:
roi_micro = abs(big_t2_micro[roi] - micro)
ind = roi[roi_micro.argmin()]
fileind = ind / denom
ind_final = ind - fileind * denom
# If new data file, must transfer from the LSC
evt_dat_str = tsp_list[fileind][:-3]+'dat'
if evt_dat_str != old_evt_str:
if evt_str_counter > 0:
F.write('rm %s\n' %old_evt_str)
F.write('scp -c arcfour root@192.168.3.101:/data/Events/%i%02d%02d/%s .\n' %(yr,mo,dy,evt_dat_str))
evt_str_counter += 1
F.write('./getevt -i %i -o %02d_local.evt %s\n' %(ind_final,evt_counter,evt_dat_str))
evt_counter += 1
old_evt_str = evt_dat_str
else:
print "**** MISSING T2 DATA FOR GPS SECOND %i" %lsec
evt_counter += 1
F.write('rm %s' %evt_dat_str)
sp.call(['cp','/home/augta/data/coincidence/%i_%02d_%02d.CTAG.gz' %(yr,mo,dy),'.'])
sp.call(['gunzip','%i_%02d_%02d.CTAG.gz' %(yr,mo,dy)])
tag_list = np.loadtxt('%i_%02d_%02d.CTAG'%(yr,mo,dy),usecols=(6,),dtype='S500',
comments=None)
evt_counter = 0
denom = 20001
old_evt_str = ''
evt_str_counter = 0
with open('get_events_global.sh','w') as F:
F.write('#!/bin/bash\n\n')
if tag_list.size > 0:
if tag_list.size == 1:
new_tag = np.zeros(1,dtype='S500')
new_tag[0] = tag_list
tag_list = new_tag
for x in tag_list:
lsec = int(x.split('.')[0])
micro = float(x.split('.')[1])
micro = micro / 1e6
roi = np.where(big_t2_sec == lsec - 1)[0]
if len(roi) > 0:
roi_micro = abs(big_t2_micro[roi] - micro)
ind = roi[roi_micro.argmin()]
fileind = ind / denom
ind_final = ind - fileind * denom
evt_dat_str = tsp_list[fileind][:-3]+'dat'
if evt_dat_str != old_evt_str:
if evt_str_counter > 0:
F.write('rm %s\n' %old_evt_str)
F.write('scp -c arcfour root@192.168.3.101:/data/Events/%i%02d%02d/%s .\n' %(yr,mo,dy,evt_dat_str))
evt_str_counter += 1
F.write('./getevt -i %i -o %02d_global.evt %s\n' %(ind_final,evt_counter,evt_dat_str))
evt_counter += 1
old_evt_str = evt_dat_str
else:
print "**** MISSING T2 DATA FOR GPS SECOND %i" %lsec
evt_counter += 1
F.write('rm %s' %evt_dat_str)
sp.call(['cp','get_events_global.sh','/home/augta/data/north'])
sp.call(['cp','get_events_local.sh','/home/augta/data/north'])
# Temp folder cleanup
newlist = os.listdir('/home/augta/web_monitor/tmpevt/t2/')
for x in newlist:
sp.call(['rm','/home/augta/web_monitor/tmpevt/t2/'+x]) | StarcoderdataPython |
3350715 | <filename>bash_workbench/misc.py<gh_stars>0
import math
def convert_size(size_bytes):
"""https://stackoverflow.com/questions/5194057/better-way-to-convert-file-sizes-in-python"""
if size_bytes == 0:
return "0B"
size_name = ("B", "KB", "MB", "GB", "TB", "PB", "EB", "ZB", "YB")
i = int(math.floor(math.log(size_bytes, 1024)))
p = math.pow(1024, i)
s = round(size_bytes / p, 2)
return "%s %s" % (s, size_name[i]) | StarcoderdataPython |
1686026 | import pytest
import torch
from dreamer.models.action import ActionDecoder
@pytest.mark.parametrize('dist', ['tanh_normal', 'one_hot', 'not_implemented_dist'])
def test_action_decoder(dist):
batch_size = 4
action_size = 10
feature_size = 20
hidden_size = 40
layers = 5
try:
action_decoder = ActionDecoder(action_size, feature_size, hidden_size, layers, dist)
except NotImplementedError:
return
features = torch.randn(batch_size, feature_size)
action_dist = action_decoder(features)
if dist == 'tanh_normal':
action_mean = action_dist.mean()
action_mode = action_dist.mode()
action_ent = action_dist.entropy()
assert isinstance(action_mean, torch.Tensor)
assert action_mean.shape == (batch_size, action_size)
assert isinstance(action_mode, torch.Tensor)
assert action_mode.shape == (batch_size, action_size)
assert isinstance(action_ent, torch.Tensor)
assert action_ent.shape == (batch_size,)
true_action = torch.randn(batch_size, action_size)
# make sure gradients can propagate backwards
loss = torch.sum((action_mean - true_action) ** 2)
loss += torch.sum((action_mode - true_action) ** 2)
loss += - torch.sum(action_ent)
loss.backward()
elif dist == 'one_hot':
action_mean = action_dist.mean
action_ent = action_dist.entropy()
assert isinstance(action_mean, torch.Tensor)
assert action_mean.shape == (batch_size, action_size)
assert isinstance(action_ent, torch.Tensor)
assert action_ent.shape == (batch_size,)
| StarcoderdataPython |
82233 | #!/usr/bin/env python
# -*- coding:utf-8 -*-
# @Time : 2020/9/25 21:50
# Author: <NAME>
# @Email : <EMAIL>
# Description:
from uti import ReducedEpsilon
from uti import Status
from solution import Solution
class ColumnGeneration:
def __init__(self, node):
self.node = node
self.rmp = node.rmp
def solve(self):
# iterations = 0
while True:
# iterations += 1
self.rmp.optimize() # 单纯形法求解该模型
# self.rmp.model.write(f'iteration-{iterations}.lp')
# print(f"In {iterations} iteration the value is {self.rmp.get_objVal()}")
assert self.rmp.get_status() != Status.INFEASIBLE
# 判断是否存在reduced cost 小于0 的列
# 1.获取两类约束对应的对偶变量
ex_dual, sr_dual = self.rmp.get_dual()
# 2.求解对应的定价问题
self.rmp.optimize_pricing(ex_dual, sr_dual)
# 3.获取reduced cost并判断
reduced_cost = self.rmp.get_reduced_cost()
# print(f"{reduced_cost=}")
if reduced_cost + ReducedEpsilon >= 0: # reduced cost为正
assert self.node.rmp is self.rmp
return Solution(self.rmp.get_objVal(), self.rmp.getVars()) # 返回此时的RMP最优解
# 4.此时存在reduced cost < 0的列,返回并在rmp中添加该列
coe = self.rmp.get_pricing_coe() # [[], []]
self.node.update_param(coe)
self.rmp.add_col(coe)
if __name__ == '__main__':
pass
| StarcoderdataPython |
3203676 | import os
import collections
import itertools
from . import helpers
__all__ = ('Display',)
class Graphic:
_Visual = collections.namedtuple('Visual', 'dirty ready clean')
__slots__ = ('_io', '_cursor', '_visuals', '_origin', '_width')
def __init__(self, io, cursor):
self._io = io
self._cursor = cursor
self._visuals = []
self._origin = None
self._width = None
@property
def visuals(self):
return self._visuals
def reset(self):
self._visuals.clear()
def resize(self, width):
self._width = width
def _locate(self):
(cy, cx) = self._cursor.locate()
self._origin = cx - 1
def locate(self):
self._locate()
def _originate(self, index):
if index < 0:
return self._origin
visual = self._visuals[index]
lines = visual.clean.rsplit(os.linesep, 1)
origin = len(lines.pop()) # removes last
if not lines: # checks if empty
origin += self._originate(index - 1)
return origin
def _draw(self, index):
visuals = self._visuals[index:]
for visual in visuals:
self._io.send(visual.ready)
def _clear(self, index):
visuals = self._visuals[index:]
ysize = 0
for visual in visuals:
ysize += visual.clean.count(os.linesep)
self._cursor.last(ysize)
xsize = self._originate(index - 1)
self._cursor.right(xsize)
self._cursor.clear()
@staticmethod
def _clean(value):
value = helpers.seq.clean(value)
runes = helpers.clean(value, ignore = {os.linesep})
value = ''.join(runes)
return value
def _format(self, index, value):
clean = self._clean(value)
lines = clean.split(os.linesep)
current = self._originate(index - 1)
# injects \n whenever part of each
# line is about to exceed the width
step = self._width
for (state, line) in enumerate(lines):
index = step
if not state:
index -= current
for cycle in itertools.count():
if not index < len(line):
break
value = helpers.seq.inject(value, index + cycle, os.linesep)
index += step
return value
def _build(self, index, dirty):
ready = self._format(index, dirty)
clean = self._clean(ready)
visual = self._Visual(dirty, ready, clean)
return visual
def _insert(self, index, value):
visual = self._build(index, value)
self._visuals.insert(index, visual)
after = index + 1
values = []
while True:
try:
visual = self._visuals.pop(after)
except IndexError:
break
values.append(visual.dirty)
for (subindex, value) in enumerate(values, start = after):
visual = self._build(subindex, value)
self._visuals.insert(subindex, visual)
self._draw(index)
return visual
def _create(self, index, value):
visual = self._insert(index, value)
return visual
def create(self, value, index = None):
if index is None:
index = len(self._visuals)
return self._create(index, value)
def _remove(self, index):
self._clear(index)
visual = self._visuals.pop(index)
return visual
def _delete(self, index):
visual = self._remove(index)
self._draw(index)
return visual
def delete(self, index):
return self._delete(index)
def _update(self, index, value):
self._remove(index)
visual = self._insert(index, value)
return visual
def update(self, index, value):
return self._update(index, value)
class Display:
__slots__ = ('_graphic',)
def __init__(self, io, cursor):
self._graphic = Graphic(io, cursor)
@property
def _size(self):
return len(self._graphic.visuals)
def locate(self, width):
self._graphic.locate()
self._graphic.resize(width)
def create(self, value, *rest, fall = 0):
values = [value, *rest, fall * os.linesep]
for value in values:
if value is None:
value = ''
self._graphic.create(value)
def update(self, *values):
if len(values) > self._size - 1:
raise ValueError('too many values')
pairs = tuple(enumerate(values, start = 1))
for (index, value) in reversed(pairs):
if value is None:
continue
self._graphic.update(index, value)
def finish(self, value, full = False):
enter = not full
leave = self._size
indexes = range(enter, leave)
for index in reversed(indexes):
self._graphic.delete(index)
if not value is None:
self._graphic.create(value)
self._graphic.reset()
| StarcoderdataPython |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.