id stringlengths 1 8 | text stringlengths 6 1.05M | dataset_id stringclasses 1
value |
|---|---|---|
5087027 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Sun Aug 19 08:14:34 2018
@author: uber-abdul
"""
import numpy as np #for mathematical calculation
import matplotlib.pyplot as plt #for ploting nice chat and graph
import pandas as pd
dataset = pd.read_csv('Position_Salaries.csv')
X = dataset.iloc[:, 1:2].values
y = dataset.iloc[:,2].values
"""
This data doesnt requires splitting data into test and tranning as data is less
** Linear regression is also done as to compare the data with linear and
non-linear regression
"""
# fitting linear regression to dataset
from sklearn.linear_model import LinearRegression
lin_reg = LinearRegression()
lin_reg.fit(X,y)
# fitting polynomial regression to dataset
from sklearn.preprocessing import PolynomialFeatures
poly_reg = PolynomialFeatures(degree = 4)
X_poly = poly_reg.fit_transform(X)
poly_reg.fit(X_poly, y)
lin_reg2 = LinearRegression()
lin_reg2.fit(X_poly, y)
# Visualation the linear regression results
plt.scatter(X, y, color = 'red')
plt.plot(X,lin_reg.predict(X), color = 'blue')
plt.title('Truth or bluff linear regression')
plt.xlabel('Position level')
plt.ylabel('Salary')
plt.show()
# Visualation the polynomial regression results
plt.scatter(X, y, color = 'red')
plt.plot(X, lin_reg2.predict(poly_reg.fit_transform(X)), color = 'blue')
plt.title('Truth or bluff polynomial regression')
plt.xlabel('Position level')
plt.ylabel('Salary')
plt.show()
# predicting a new result with linear Regression
lin_reg.predict(6.5)
# predicting a new result with polynomial regression
lin_reg2.predict(poly_reg.fit_transform(6.5))
| StarcoderdataPython |
196289 | # Configuration file for the Sphinx documentation builder.
#
# This file only contains a selection of the most common options. For a full
# list see the documentation:
# https://www.sphinx-doc.org/en/master/usage/configuration.html
# -- Path setup --------------------------------------------------------------
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
import docutils
import os
import re
# import sys
# sys.path.insert(0, os.path.abspath('.'))
# -- Project information -----------------------------------------------------
project = 'OpenROAD'
copyright = 'The OpenROAD Project, 2020'
author = 'OpenROAD Team'
# -- General configuration ---------------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.ifconfig',
'sphinx.ext.mathjax',
'sphinx.ext.napoleon',
'sphinx.ext.todo',
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The master toctree document.
master_doc = 'index'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path.
exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = None
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = "sphinx_symbiflow_theme"
html_theme_options = {
# Specify a list of menu in Header.
# Tuples forms:
# ('Name', 'external url or path of pages in the document', boolean, 'icon name')
#
# Third argument:
# True indicates an external link.
# False indicates path of pages in the document.
#
# Fourth argument:
# Specify the icon name.
# For details see link.
# https://material.io/icons/
'header_links' : [
('Home', 'index', False, 'home'),
("The OpenROAD Project", "https://theopenroadproject.org", True, 'launch'),
("GitHub", "https://github.com/The-OpenROAD-Project/OpenROAD", True, 'link')
],
# Customize css colors.
# For details see link.
# https://getmdl.io/customize/index.html
#
# Values: amber, blue, brown, cyan deep_orange, deep_purple, green, grey, indigo, light_blue,
# light_green, lime, orange, pink, purple, red, teal, yellow(Default: indigo)
'primary_color': 'indigo',
# Values: Same as primary_color. (Default: pink)
'accent_color': 'blue',
# Customize layout.
# For details see link.
# https://getmdl.io/components/index.html#layout-section
'fixed_drawer': True,
'fixed_header': True,
'header_waterfall': True,
'header_scroll': False,
# Render title in header.
# Values: True, False (Default: False)
'show_header_title': False,
# Render title in drawer.
# Values: True, False (Default: True)
'show_drawer_title': True,
# Render footer.
# Values: True, False (Default: True)
'show_footer': True,
# Hide the symbiflow links
'hide_symbiflow_links': True,
}
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
# html_static_path = ['_static']
| StarcoderdataPython |
8161859 | <reponame>Ketsia-a/djangoporto
from django.shortcuts import render
from django.http import HttpResponse
from .models import Image,Location,Category
# Create your views here.
def homepage(request):
images = Image.objects.all()
locations = Location.get_locations()
category = Category.objects.all()
context = {'images':images, 'locations': locations, 'category':category}
return render(request, 'homepage.html', context )
def location(request, location):
images = Image.filter_by_location(location)
locations = Location.get_locations()
context = {'images':images, 'locations': locations}
return render(request, 'location.html', context)
def search_image(request):
locations = Location.get_locations()
if 'searchimage' in request.GET and request.GET["searchimage"]:
category = request.GET.get("searchimage")
images = Image.search_by_category(category)
context = {'images':images, 'locations': locations}
return render(request, 'search.html',context)
else:
flash = 'Your search is empty'
context = {'locations': locations, 'flash':flash}
return render(request, 'search.html', context)
| StarcoderdataPython |
3466467 | <reponame>loonghao/rayvision_sync<gh_stars>0
"""Download models.
Including download, automatic download (the task is fully rendered,
one frame is downloaded after downloading one frame), download so
the un-downloaded task is recorded (the task is rendered).
"""
# Import built-in modules
import time
import os
# Import local modules
from rayvision_sync.transfer import RayvisionTransfer
from rayvision_sync.manage import RayvisionManageTask
from rayvision_sync.utils import run_cmd
from rayvision_sync.utils import str2unicode
from rayvision_sync.utils import create_transfer_params
class RayvisionDownload(object):
"""Downloader.
Download all the passed tasks by calling the cmd command.
"""
def __init__(self, api):
"""Initialize instance."""
params = create_transfer_params(api)
self.api = api
self.trans = RayvisionTransfer(**params)
self.manage_task = self.trans.manage_task or RayvisionManageTask(api.query)
self.logger = self.trans.logger
def _download_log(self, task_id_list, local_path):
"""Download log Settings.
Args:
task_id_list (list): List of tasks ids that need to be downloaded.
"""
self.logger.info('INPUT:')
self.logger.info('=' * 20)
self.logger.info('task_id_list: %s', task_id_list)
self.logger.info('local_path: %s', local_path)
self.logger.info('=' * 20)
def _check_local_path(self, local_path):
"""Check the download path.
Args:
local_path (str): Local path to download.
Returns:
str
"""
if not local_path:
if self.api.user_info['local_os'] == "windows":
local_path = os.path.join(os.environ["USERPROFILE"],
"renderfarm_sdk")
else:
local_path = os.path.join(os.environ["HOME"], "renderfarm_sdk")
return local_path
@staticmethod
def check_params(task_id_list, custom_server_output_path):
"""Check the parameters.
Task_id_list and custom_server_output_path must have one.
"""
if not task_id_list and not custom_server_output_path:
raise Exception("One of the task_id_list and custom_server_output_path"
" must exist")
def download(self, task_id_list=None,
max_speed=None, print_log=True,
download_filename_format="true",
local_path=None, server_path=None):
"""Download and update the undownloaded record.
Args:
task_id_list (list of int): List of tasks ids that need to be
downloaded.
max_speed (str, optional): Download speed limit,
The unit of ``max_speed`` is KB/S,default value is 1048576
KB/S, means 1 GB/S.
print_log (bool, optional): Print log, True: print, False: not
print.
download_filename_format: File download local save style,
"true": tape task ID and scene name,
"false" : download directly without doing processing.
local_path (str): Download file locally save path,
default Window system is "USERPROFILE" environment variable address splicing "renderfarm_sdk",
Linux system is "HOME" environment variable address splicing "renderfarm_sdk",
server_path (str or list): The user customizes the file structure to be downloaded from
the output server, and all file structures are downloaded by default,
example: "18164087_test/l_layer".
Returns:
bool: True is success.
"""
self.check_params(task_id_list, server_path)
local_path = self._check_local_path(local_path)
self.logger.info("[Rayvision_sync start download .....]")
self._download_log(task_id_list, local_path)
self._run_download(task_id_list, local_path, max_speed, print_log,
download_filename_format, server_path)
self.logger.info("[Rayvision_sync end download.....]")
return True
def auto_download(self, task_id_list=None, max_speed=None,
print_log=False, sleep_time=10,
download_filename_format="true",
local_path=None):
"""Automatic download (complete one frame download).
Wait for all downloads to update undownloaded records.
Args:
task_id_list (list of int): List of tasks ids that need to be
downloaded.
max_speed (str, optional): Download speed limit,
The unit of 'max_speed' is KB/S,default value is 1048576 KB/S,
means 1 GB/S.
print_log (bool, optional): Print log, True: print, False: not
print.
sleep_time (int, optional): Sleep time between download,
unit is second.
download_filename_format: File download local save style,
"true": tape task ID and scene name,
"false" : download directly without doing processing.
local_path (str): Download file locally save path,
default Window system is "USERPROFILE" environment variable address splicing "renderfarm_sdk",
Linux system is "HOME" environment variable address splicing "renderfarm_sdk".
Returns:
bool: True is success.
"""
local_path = self._check_local_path(local_path)
self.logger.info("[Rayvision_sync start auto_download.....]")
self._download_log(task_id_list, local_path)
self._auto_download_tool(task_id_list, sleep_time,
max_speed, print_log, local_path,
download_filename_format)
self.logger.info("[Rayvision_sync end auto_download.....]")
return True
def _auto_download_tool(self, task_id_list, sleep_time,
max_speed, print_log, local_path,
download_filename_format="true"):
"""Automatic download (complete one frame download).
Args:
task_id_list(list of int): List of tasks ids that need to be
downloaded.
sleep_time(int): Sleep time between download.
max_speed(str): Download speed limit.
print_log(bool): Print log, True: print, False: not print.
download_filename_format(str): File download local save style,
"true": tape task ID and scene name,
"false" : download directly without doing processing.
"""
while True:
if task_id_list:
time.sleep(float(sleep_time))
for task_id in task_id_list:
is_task_end = self.manage_task.is_task_end(task_id)
self._run_download([task_id], local_path, max_speed,
print_log, download_filename_format)
if is_task_end is True:
self.logger.info('The tasks end: %s', task_id)
task_id_list.remove(task_id)
else:
break
def auto_download_after_task_completed(self, task_id_list=None,
max_speed=None, print_log=True,
sleep_time=10,
download_filename_format="true",
local_path=None):
"""Auto download after the tasks render completed.
Args:
task_id_list(list of int): List of tasks ids that need to be
downloaded.
max_speed(str, optional): Download speed limit,
The unit of 'max_speed' is KB/S,default value is 1048576 KB/S,
means 1 GB/S.
print_log(bool, optional): Print log, True: print, False: not
print.
sleep_time(int, optional): Sleep time between download,
unit is second.
download_filename_format: File download local save style,
"true": tape task ID and scene name,
"false" : download directly without doing processing.
local_path (str): Download file locally save path,
default Window system is "USERPROFILE" environment variable address splicing "renderfarm_sdk",
Linux system is "HOME" environment variable address splicing "renderfarm_sdk".
Returns:
bool: True is success.
"""
local_path = self._check_local_path(local_path)
self.logger.info("[Rayvision_sync start"
"auto_download_after_task_completed .....]")
self._download_log(task_id_list, local_path)
while True:
if task_id_list:
time.sleep(float(sleep_time))
for task_id in task_id_list:
is_task_end = self.manage_task.is_task_end(task_id)
if is_task_end is True:
time.sleep(float(5))
self.logger.info('The tasks end: %s', task_id)
self._run_download([task_id], local_path,
max_speed, print_log,
download_filename_format,
)
task_id_list.remove(task_id)
else:
break
self.logger.info("[Rayvision_sync end -- "
"auto_download_after_task_completed......]")
return True
def _run_download(self, task_id_list, local_path, max_speed=None,
print_log=True, download_filename_format="true",
server_path=None):
"""Execute the cmd command for multitasking download.
Args:
task_id_list (list of int): Task id list.
local_path (str): Download to local path.
max_speed (str): Maximum transmission speed, default value
is 1048576 KB/S.
print_log (bool): Print log, True: print, False: not print.
download_filename_format: File download local save style,
"true": tape task ID and scene name,
"false" : download directly without doing processing.
server_path (str / list): The user customizes the file structure to be downloaded from
the output server, and all file structures are downloaded by default,
example: "18164087_test/l_layer".
"""
transmit_type = 'download_files'
local_path = str2unicode(local_path)
# The unit of 'max_speed' is KB/S, default value is 1048576 KB/S,
# means 1 GB/S.
max_speed = max_speed if max_speed is not None else "1048576"
if not server_path:
task_status_list = self.manage_task.get_task_status(task_id_list)
output_file_names = (self.manage_task.
output_file_names(task_status_list))
else:
if isinstance(server_path, str):
output_file_names = [server_path]
elif isinstance(server_path, list):
isinstance(server_path, list)
output_file_names = server_path
else:
raise Exception("custom_server_output_path must a list or str.")
for output_file_name in output_file_names:
cmd_params = [transmit_type, local_path, output_file_name,
max_speed, download_filename_format, 'output_bid']
cmd = self.trans.create_cmd(cmd_params)
run_cmd(cmd, print_log=print_log, logger=self.logger)
| StarcoderdataPython |
3217261 | <gh_stars>10-100
from tests.fields.subclass_models import RaceParticipant, RacePlacingEnum
from tortoise.contrib import test
class TestCustomFieldFilters(test.IsolatedTestCase):
tortoise_test_modules = ["tests.fields.subclass_models"]
async def asyncSetUp(self):
await super().asyncSetUp()
await RaceParticipant.create(
first_name="George", place=RacePlacingEnum.FIRST, predicted_place=RacePlacingEnum.SECOND
)
await RaceParticipant.create(
first_name="John", place=RacePlacingEnum.SECOND, predicted_place=RacePlacingEnum.THIRD
)
await RaceParticipant.create(first_name="Paul", place=RacePlacingEnum.THIRD)
await RaceParticipant.create(first_name="Ringo", place=RacePlacingEnum.RUNNER_UP)
await RaceParticipant.create(first_name="Stuart", predicted_place=RacePlacingEnum.FIRST)
async def test_equal(self):
self.assertEqual(
set(
await RaceParticipant.filter(place=RacePlacingEnum.FIRST).values_list(
"place", flat=True
)
),
{RacePlacingEnum.FIRST},
)
async def test_not(self):
self.assertEqual(
set(
await RaceParticipant.filter(place__not=RacePlacingEnum.FIRST).values_list(
"place", flat=True
)
),
{
RacePlacingEnum.SECOND,
RacePlacingEnum.THIRD,
RacePlacingEnum.RUNNER_UP,
RacePlacingEnum.DNF,
},
)
async def test_in(self):
self.assertSetEqual(
set(
await RaceParticipant.filter(
place__in=[RacePlacingEnum.DNF, RacePlacingEnum.RUNNER_UP]
).values_list("place", flat=True)
),
{RacePlacingEnum.DNF, RacePlacingEnum.RUNNER_UP},
)
async def test_not_in(self):
self.assertSetEqual(
set(
await RaceParticipant.filter(
place__not_in=[RacePlacingEnum.DNF, RacePlacingEnum.RUNNER_UP]
).values_list("place", flat=True)
),
{RacePlacingEnum.FIRST, RacePlacingEnum.SECOND, RacePlacingEnum.THIRD},
)
async def test_isnull(self):
self.assertSetEqual(
set(
await RaceParticipant.filter(predicted_place__isnull=True).values_list(
"first_name", flat=True
)
),
{"Paul", "Ringo"},
)
self.assertSetEqual(
set(
await RaceParticipant.filter(predicted_place__isnull=False).values_list(
"first_name", flat=True
)
),
{"George", "John", "Stuart"},
)
async def test_not_isnull(self):
self.assertSetEqual(
set(
await RaceParticipant.filter(predicted_place__not_isnull=False).values_list(
"first_name", flat=True
)
),
{"Paul", "Ringo"},
)
self.assertSetEqual(
set(
await RaceParticipant.filter(predicted_place__not_isnull=True).values_list(
"first_name", flat=True
)
),
{"George", "John", "Stuart"},
)
| StarcoderdataPython |
231759 | <gh_stars>0
import json
import os
import tempfile
from time import time
from softframe.classification import lib
from softframe.classification.app import classificar_documento_para_tipo
from softframe.processing.utils import Parser
from softframe.database import DatabaseConnector
from lxml import etree
from selenium import webdriver
def read_and_convert(path):
parser = Parser()
con_ = DatabaseConnector()
files = parser.find_files(path)
for i, f in enumerate(files):
print("Reading HTML document...")
print("File %d: %s" % (i, f))
pdf2html = parser.convert_pdf_to_html(f)
print("Done... ")
if pdf2html is not None:
try:
print("Parsing HTML")
html_ = parser.read_html(html_string=pdf2html)
db_doc = {"value": pdf2html}
print("Done... ")
print("Inserting document at DB...")
con_.set_collection("documents")
doc_id = con_.insert_item(db_doc)
print("Done...")
print("Extracting paragraphs...")
pars = parser.find_paragraphs(html_)
if len(pars) > 0:
objs = [{"doc_pos": idx, "value": el, "doc_id": doc_id} for idx, el in enumerate(pars)]
print("Done...")
print("Inserting paragraphs at DB...")
con_.set_collection("paragraphs")
pars_ids = con_.bulk_insert_items(objs)
print("Done...")
print("Extracting sentences...")
sen_insert_list = []
tuples = zip(pars_ids, pars)
for par_id, par in tuples:
sentences = parser.sentence_extractor(par)
sen_objs = [{"doc_id": doc_id, "par_id": par_id, "par_pos": idx, "value": el} for idx, el in enumerate(sentences)]
sen_insert_list.extend(sen_objs)
print("Done...")
print("Inserting sentences at DB...")
con_.set_collection("sentences")
con_.bulk_insert_items(sen_insert_list)
print("Done...")
except Exception as e:
print(repr(e))
else:
print("Failed to parse pdf.")
print("Closing connection...")
con_.close_connection()
print("Done...")
def classify_paragraphs(html, use_paragraph=True):
parser = Parser()
# 'phantomjs.exe' executable needs to be in PATH
# driver = webdriver.PhantomJS("../misc/resources/files/phantomjs.exe") # Headless browser used by selenium
# driver = webdriver.PhantomJS("src/softframe/misc/resources/files/phantomjs.exe") # Headless browser
element_tree = parser.read_html(html_string=html.strip()) # Convert html string to LXML HtmlElement
paragraphs = element_tree.xpath(".//p")
tree = etree.ElementTree(element_tree)
__, temp_path = tempfile.mkstemp(suffix=".html") # Should point to the system temp directory
with open(temp_path, encoding='utf8', mode='w') as f:
f.write(html)
response = [] # Response array used to store every sentence classified and its respective xpath location
if os.name == "nt":
path = "file:///" + temp_path.replace("\\", "/")
else:
path = "file:///" + temp_path
for paragraph in paragraphs:
raw_text = paragraph.text_content()
if use_paragraph is False:
sentences = parser.sentence_extractor(raw_text)
for sentence in sentences:
# driver.get(path)
javascript_string = ''
try:
text_to_find = sentence.strip()
classification = classificar_documento_para_tipo(text_to_find, lib.CLASSIFICADOR_INICIAL) # Classify
javascript_string = \
json.dumps("window.find('{}'); return window.getSelection().getRangeAt(0);".format(text_to_find),
ensure_ascii=False)
# path_object = driver.execute_script(javascript_string[1:-1]) # Remove leading and trailing quotes
path_object = None
response.append({"prediction": classification, "path": path_object})
except Exception as e:
print(javascript_string)
else:
# driver.get(path)
try:
text_to_find = raw_text.strip()
# xpath = tree.getpath(paragraph)
xpath = parser.find_xpath(paragraph, tree)
classification = classificar_documento_para_tipo(text_to_find, lib.CLASSIFICADOR_INICIAL) # Classify
response.append({"prediction": classification, "xpath": dict(xpath), "text": text_to_find})
except Exception as e:
print(repr(e))
os.close(__)
os.remove(temp_path)
return response
if __name__ == '__main__':
t0 = time()
with open("C:/Users/pedro.castanha/Downloads/file_1.html", encoding="utf8", mode='r') as f:
html_string = f.read()
locations = classify_paragraphs(html_string)
#locations = None
t0 = time() - t0
print("Done in {}".format(t0))
print(len(locations))
print(locations)
| StarcoderdataPython |
8093592 | <reponame>peiss/ant-learn-python-100P
def read_file():
result = []
with open("./student_grade_input.txt") as fin:
for line in fin:
line = line[:-1]
result.append(line.split(","))
return result
def sort_grades(datas):
return sorted(datas,
key=lambda x: int(x[2]),
reverse=True)
def write_file(datas):
with open("./student_grade_output.txt", "w") as fout:
for data in datas:
fout.write(",".join(data) + "\n")
# 读取文件
datas = read_file()
print("read_file datas:", datas)
# 排序数据
datas = sort_grades(datas)
print("sort_grades datas:", datas)
# 写出文件
write_file(datas)
| StarcoderdataPython |
1946906 | import setuptools
with open("README.md", "r") as f:
long_description = f.read()
with open("requirements.txt", "r") as f:
install_requires = [line.strip('\n') for line in f.readlines()]
setuptools.setup(
name="visepoch",
version='0.2.1',
author="<NAME>",
description="A package for visualising EPOCH results",
long_description=long_description,
license="MIT",
packages=["visepoch"],
classifiers=[
"License :: OSI Approved :: MIT License",
"Programming Language :: Python :: 3.8"
],
install_requires=install_requires
) | StarcoderdataPython |
1892956 | <gh_stars>10-100
import serial, os, math, argparse
from time import sleep
BLOCK_SIZE = 64 # 128 for parts with >8k flash
REQ_ENTER = (0xde, 0xad, 0xbe, 0xef)
ACK = (0xaa, 0xbb)
NACK = (0xde, 0xad)
FILE = None
def crc8_update(data, crc):
crc ^= data
for i in range(0, 8):
if crc & 0x80 != 0:
crc = (crc << 1) ^ 0x07
else:
crc <<= 1
return crc & 0xFF
def get_crc():
crc = 0
data = open(FILE, 'rb')
with data as f:
chunk = bytearray(f.read(BLOCK_SIZE))
while chunk:
chunk.extend([0xFF] * (BLOCK_SIZE - len(chunk)))
for i in chunk:
crc = crc8_update(i, crc)
chunk = bytearray(f.read(BLOCK_SIZE))
return crc
def bootloader_enter(ser):
# toggle reset via DTR
ser.setDTR(True)
sleep(0.1)
ser.setDTR(False)
# send payload
req = bytearray(REQ_ENTER)
chunks = os.path.getsize(FILE)
chunks = int(math.ceil(float(chunks) / BLOCK_SIZE))
print('Need to send %s chunks' % chunks)
crc = get_crc()
req.extend([chunks, crc, crc])
ser.write(req)
ser.flushOutput()
return ser
def bootloader_exec(port, baud):
ser = serial.Serial(port, 115200, timeout=1.0)
bootloader_enter(ser)
data = open(FILE, 'rb')
total = 0
with data as f:
chunk = bytearray(f.read(BLOCK_SIZE))
while chunk:
rx = ser.read(2)
if len(rx) != 2:
print('Timeout')
return
total += len(chunk)
print(total)
chunk.extend([0xFF] * (BLOCK_SIZE - len(chunk)))
ser.write(chunk)
ser.flushOutput()
chunk = bytearray(f.read(BLOCK_SIZE))
ack = ser.read(2)
if ack == bytearray(ACK):
print('Done')
elif ack == bytearray(NACK):
print('CRC mismatch')
else:
print('Invalid response')
ser.close()
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='stm8-bootloader update utility')
parser.add_argument('--port', '-p', default='/dev/ttyUSB0')
parser.add_argument('--baud', '-b', default=115200)
parser.add_argument('file', help='firmware in binary format')
args = parser.parse_args()
FILE = args.file
bootloader_exec(args.port, args.baud)
| StarcoderdataPython |
1773959 | <filename>api/__init__.py
import os
import sys
import pymysql
import logging
from chatterbot import ChatBot
from chatterbot import languages
from chatterbot.response_selection import get_random_response, get_first_response, get_most_frequent_response
from chatterbot.trainers import ChatterBotCorpusTrainer, ListTrainer
from chatterbot import comparisons
from flask import Flask, Response, request, jsonify
from flask_cors import CORS
from flask_bcrypt import Bcrypt
from flask_jwt_extended import JWTManager, jwt_required
from flask_sqlalchemy import SQLAlchemy
from flask_marshmallow import Marshmallow
logging.basicConfig(level=logging.INFO)
global chatbot
global default_response
global minimum_confidence
default_response = 'Desculpe, não entendi a sua pergunta.'
minimum_confidence = float(os.getenv('MINIMUM_CONFIDENCE', 0.6))
connection = pymysql.connect(
host=os.environ['MYSQL_HOST'], user=os.environ['MYSQL_USER'], password=os.environ['MYSQL_PASSWORD'],
database=os.environ['MYSQL_DATABASE'], port=int(os.environ['MYSQL_PORT'])
)
with connection:
with connection.cursor() as cursor:
try:
cursor.execute("DROP TABLE IF EXISTS {}".format("tag_association"))
cursor.execute("DROP TABLE IF EXISTS {}".format("tag"))
cursor.execute("DROP TABLE IF EXISTS {}".format("statement"))
except:
print('Tables not found', file=sys.stderr)
chatbot = ChatBot(
"Nome do bot",
preprocessors=[
'chatterbot.preprocessors.clean_whitespace'
],
storage_adapter={
'tagger_language': languages.POR,
'import_path': 'chatterbot.storage.SQLStorageAdapter',
'database_uri': "mysql+pymysql://"+os.environ['MYSQL_USER']+":"+os.environ['MYSQL_PASSWORD'] + \
"@"+os.environ['MYSQL_HOST']+":"+"3306"+"/"+ \
os.environ['MYSQL_DATABASE']+"?charset=utf8mb4",
},
logic_adapters=[
{
'import_path': 'chatterbot.logic.BestMatch',
'statement_comparison_function': comparisons.LevenshteinDistance, # LevenshteinDistance, SpacySimilarity, JaccardSimilarity
'response_selection_method': get_random_response,
'default_response': default_response,
'maximum_similarity_threshold': 0.95,
'threshold': 0.75
}
],
read_only=True
)
api = Flask(__name__)
origins=os.environ['DOMAIN_WHITELIST'].split(',')
cors = CORS(api, resources={r"/*": {"origins": origins}})
cors = CORS(api, resources={r"/*": {"origins": "*"}})
bcrypt = Bcrypt(api)
jwt = JWTManager(api)
api.config.from_object('config')
ma = Marshmallow(api)
db = SQLAlchemy(api)
from .models.user import UserModel
from .models.training import TrainingModel
from .models.rating_final import RatingFinalModel
from .models.rating_response import RatingResponseModel
from .schemas.user import UserSchema
from .schemas.training import TrainingSchema
from .schemas.rating_final import RatingFinalSchema
from .schemas.rating_response import RatingResponseSchema
db.create_all()
db.session.commit()
@api.route("/restart")
@jwt_required()
def restart_chatbot():
global chatbot
connection = pymysql.connect(
host=os.environ['MYSQL_HOST'], user=os.environ['MYSQL_USER'], password=os.environ['MYSQL_PASSWORD'],
database=os.environ['MYSQL_DATABASE'], port=int(os.environ['MYSQL_PORT'])
)
with connection:
with connection.cursor() as cursor:
try:
cursor.execute("SET FOREIGN_KEY_CHECKS = 0")
cursor.execute("TRUNCATE TABLE {}".format("tag"))
cursor.execute(
"TRUNCATE TABLE {}".format("statement"))
cursor.execute(
"TRUNCATE TABLE {}".format("tag_association"))
cursor.execute("SET FOREIGN_KEY_CHECKS = 1")
except error:
print('Tables not found', file=sys.stderr)
# Create a new trainer for the chatbot
trainer = ChatterBotCorpusTrainer(chatbot, show_training_progress=True)
trainer.train("./api/chatterbot-custom/greetings.yml")
trainer.train("./api/chatterbot-custom/farewells.yml")
trainer.train("./api/chatterbot-custom/thanks.yml")
trainerList = ListTrainer(chatbot, show_training_progress=True)
print('========================================================', file=sys.stderr)
print('Selected language: ' + str(chatbot.storage.tagger.language.ENGLISH_NAME), file=sys.stderr)
print('========================================================', file=sys.stderr)
connection = pymysql.connect(
host=os.environ['MYSQL_HOST'], user=os.environ['MYSQL_USER'], password=os.environ['<PASSWORD>'],
database=os.environ['MYSQL_DATABASE'], port=int(os.environ['MYSQL_PORT'])
)
with connection:
with connection.cursor() as cursor:
result = cursor.execute("SELECT * FROM training")
rows = cursor.fetchall()
for row in rows:
trainerList.train([row[1], row[2]])
print('========================================================', file=sys.stderr)
print('== Restart finished ====================================', file=sys.stderr)
print('========================================================', file=sys.stderr)
return jsonify(message="Chatbot API restarted"), 200
from .routes import auth
from .routes import rating
from .routes import admin
from .routes import chatterbot
from .resources.administration import Administration
Administration.create_admin_user(
os.environ['ADMIN_NAME'], os.environ['ADMIN_USERNAME'], os.environ['ADMIN_PASSWORD'])
| StarcoderdataPython |
3427573 | import csv
import requests
import pandas as pd
FRED_DGS30 = 'https://www.quandl.com/api/v3/datasets/FRED/DGS30/data.csv?api_key=<KEY>'
with requests.Session() as s:
download = s.get(FRED_DGS30)
decoded_content = download.content.decode('utf-8')
cr = csv.reader(decoded_content.splitlines(), delimiter = ',')
DGS30_list = list(cr)
for row in DGS30_list:
print(row)
DGS30_list = pd.DataFrame(DGS30_list)
DGS30_list.to_csv('a15.csv', encoding = 'utf-8')
| StarcoderdataPython |
8178767 | import inspect
import facebook
from . import FacebookTestCase
class FacebookAllConnectionsMethodTestCase(FacebookTestCase):
def test_function_with_zero_connections(self):
token = facebook.GraphAPI().get_app_access_token(self.app_id, self.secret, True)
graph = facebook.GraphAPI(token)
self.create_test_users(self.app_id, graph, 1)
friends = graph.get_all_connections(self.test_users[0]["id"], "friends")
self.assertTrue(inspect.isgenerator(friends))
self.assertTrue(len(list(friends)) == 0)
# def test_function_returns_correct_connections(self):
# token = facebook.GraphAPI().get_app_access_token(
# self.app_id, self.secret, True
# )
# graph = facebook.GraphAPI(token)
# self.create_test_users(self.app_id, graph, 3)
# self.create_friend_connections(self.test_users[0], self.test_users)
# friends = graph.get_all_connections(
# self.test_users[0]["id"], "friends"
# )
# self.assertTrue(inspect.isgenerator(friends))
# friends_list = list(friends)
# self.assertTrue(len(friends_list) == 2)
# for f in friends:
# self.assertTrue(isinstance(f, dict))
# self.assertTrue("name" in f)
# self.assertTrue("id" in f)
| StarcoderdataPython |
8087944 | <filename>tests/Modem/test_Modem.py
import sys
import pytest
sys.path.append(".")
sys.path.append("..")
sys.path.append("../..")
from Hologram.Network.Modem.MockModem import MockModem
class TestModem(object):
def test_invalid_is_connected(self):
modem = MockModem()
with pytest.raises(Exception, message = 'Must instantiate a Modem type'):
modem.isConnected()
def test_invalid_connect(self):
modem = MockModem()
with pytest.raises(Exception, message = 'Must instantiate a Modem type'):
modem.connect()
def test_invalid_disconnect(self):
modem = MockModem()
with pytest.raises(Exception, message = 'Must instantiate a Modem type'):
modem.diconnect()
def test_get_result_string(self):
modem = MockModem()
assert modem.getResultString(0) == 'Modem returned OK'
assert modem.getResultString(-1) == 'Modem timeout'
assert modem.getResultString(-2) == 'Modem error'
assert modem.getResultString(-3) == 'Modem response doesn\'t match expected return value'
assert modem.getResultString(-99) == 'Unknown response code'
| StarcoderdataPython |
6637825 | from operator import xor
import numpy as np
import scipy.special
from dataclasses import dataclass, field
from pb_bss_eval.distribution.complex_angular_central_gaussian import (
ComplexAngularCentralGaussian,
ComplexAngularCentralGaussianTrainer,
normalize_observation,
)
from pb_bss_eval.distribution.mixture_model_utils import (
apply_inline_permutation_alignment,
estimate_mixture_weight,
log_pdf_to_affiliation,
)
from pb_bss_eval.distribution.utils import _ProbabilisticModel
from pb_bss_eval.permutation_alignment import _PermutationAlignment
__all__ = [
'CACGMM',
'CACGMMTrainer',
'sample_cacgmm',
'normalize_observation',
]
def sample_cacgmm(
size,
weight,
covariance,
return_label=False
):
assert weight.ndim == 1, weight
assert isinstance(size, int), size
assert covariance.ndim == 3, covariance.shape
num_classes, = weight.shape
D = covariance.shape[-1]
assert covariance.shape == (num_classes, D, D), (covariance.shape, num_classes, D) # noqa
labels = np.random.choice(range(num_classes), size=size, p=weight)
x = np.zeros((size, D), dtype=np.complex128)
for l in range(num_classes):
cacg = ComplexAngularCentralGaussian.from_covariance(
covariance=covariance[l, :, :]
)
x[labels == l, :] = cacg.sample(size=(np.sum(labels == l),))
if return_label:
return x, labels
else:
return x
@dataclass
class CACGMM(_ProbabilisticModel):
weight: np.array = None # (..., K, 1) for weight_constant_axis==(-1,)
cacg: ComplexAngularCentralGaussian = field(
default_factory=ComplexAngularCentralGaussian)
def predict(self, y, return_quadratic_form=False):
assert np.iscomplexobj(y), y.dtype
y = normalize_observation(y) # swap D and N dim
affiliation, quadratic_form, _ = self._predict(y)
if return_quadratic_form:
return affiliation, quadratic_form
else:
return affiliation
def _predict(self, y, source_activity_mask=None, affiliation_eps=0.):
"""
Note: y shape is (..., D, N) and not (..., N, D) like in predict
Args:
y: Normalized observations with shape (..., D, N).
Returns: Affiliations with shape (..., K, N) and quadratic format
with the same shape.
"""
*independent, _, num_observations = y.shape
log_pdf, quadratic_form = self.cacg._log_pdf(y[..., None, :, :])
affiliation = log_pdf_to_affiliation(
self.weight,
log_pdf,
source_activity_mask=source_activity_mask,
affiliation_eps=affiliation_eps,
)
return affiliation, quadratic_form, log_pdf
def log_likelihood(self, y):
"""
>>> import paderbox as pb
>>> F, T, D, K = 513, 400, 6, 3
>>> y = pb.utils.random_utils.normal([F, T, D], dtype=np.complex128)
>>> mm = CACGMMTrainer().fit(y, num_classes=K, iterations=2)
>>> log_likelihood1 = mm.log_likelihood(y)
>>> mm = CACGMMTrainer().fit(y, initialization=mm, iterations=1)
>>> log_likelihood2 = mm.log_likelihood(y)
>>> assert log_likelihood2 > log_likelihood1, (log_likelihood1, log_likelihood2)
>>> np.isscalar(log_likelihood1), log_likelihood1.dtype
(True, dtype('float64'))
"""
assert np.iscomplexobj(y), y.dtype
y = normalize_observation(y) # swap D and N dim
affiliation, quadratic_form, log_pdf = self._predict(y)
return self._log_likelihood(y, log_pdf)
def _log_likelihood(self, y, log_pdf):
"""
Note: y shape is (..., D, N) and not (..., N, D) like in log_likelihood
Args:
y: Normalized observations with shape (..., D, N).
log_pdf: shape (..., K, N)
Returns:
log_likelihood, scalar
"""
*independent, channels, num_observations = y.shape
# log_pdf.shape: *independent, speakers, num_observations
# first: sum above the speakers
# second: sum above time frequency in log domain
log_likelihood = np.sum(scipy.special.logsumexp(log_pdf, axis=-2))
return log_likelihood
class CACGMMTrainer:
def fit(
self,
y,
initialization=None,
num_classes=None,
iterations=100,
*,
saliency=None,
source_activity_mask=None,
weight_constant_axis=(-1,),
hermitize=True,
covariance_norm='eigenvalue',
affiliation_eps=1e-10,
eigenvalue_floor=1e-10,
inline_permutation_aligner: _PermutationAlignment = None,
):
"""
Args:
y: Shape (..., N, D)
initialization:
Affiliations between 0 and 1. Shape (..., K, N)
or CACGMM instance
num_classes: Scalar >0
iterations: Scalar >0
saliency:
Importance weighting for each observation, shape (..., N)
Should be pre-calculated externally, not just a string.
source_activity_mask: Boolean mask that says for each time point
for each source if it is active or not.
Shape (..., K, N)
weight_constant_axis: The axis that is used to calculate the mean
over the affiliations. The affiliations have the
shape (..., K, N), so the default value means averaging over
the sample dimension. Note that averaging over an independent
axis is supported.
hermitize:
covariance_norm: 'eigenvalue', 'trace' or False
affiliation_eps:
eigenvalue_floor: Relative flooring of the covariance eigenvalues
inline_permutation_aligner: In rare cases you may want to run a
permutation alignment solver after each E-step. You can
instantiate a permutation alignment solver outside of the
fit function and pass it to this function.
Returns:
"""
assert xor(initialization is None, num_classes is None), (
"Incompatible input combination. "
"Exactly one of the two inputs has to be None: "
f"{initialization is None} xor {num_classes is None}"
)
assert np.iscomplexobj(y), y.dtype
assert y.shape[-1] > 1, y.shape
y = normalize_observation(y) # swap D and N dim
assert iterations > 0, iterations
model = None
*independent, D, num_observations = y.shape
if initialization is None:
assert num_classes is not None, num_classes
affiliation_shape = (*independent, num_classes, num_observations)
affiliation = np.random.uniform(size=affiliation_shape)
affiliation /= np.einsum("...kn->...n", affiliation)[..., None, :]
quadratic_form = np.ones(affiliation_shape, dtype=y.real.dtype)
elif isinstance(initialization, np.ndarray):
num_classes = initialization.shape[-2]
assert num_classes > 1, num_classes
affiliation_shape = (*independent, num_classes, num_observations)
# Force same number of dims (Prevent wrong input)
assert initialization.ndim == len(affiliation_shape), (
initialization.shape, affiliation_shape
)
# Allow singleton dimensions to be broadcasted
assert initialization.shape[-2:] == affiliation_shape[-2:], (
initialization.shape, affiliation_shape
)
affiliation = np.broadcast_to(initialization, affiliation_shape)
quadratic_form = np.ones(affiliation_shape, dtype=y.real.dtype)
elif isinstance(initialization, CACGMM):
# weight[-2] may be 1, when weight is fixed to 1/K
# num_classes = initialization.weight.shape[-2]
num_classes = initialization.cacg.covariance_eigenvectors.shape[-3]
model = initialization
else:
raise TypeError('No sufficient initialization.')
if isinstance(weight_constant_axis, list):
# List does not work in numpy 1.16.0 as axis
weight_constant_axis = tuple(weight_constant_axis)
if source_activity_mask is not None:
assert source_activity_mask.dtype == np.bool, source_activity_mask.dtype # noqa
assert source_activity_mask.shape[-2:] == (num_classes, num_observations), (source_activity_mask.shape, independent, num_classes, num_observations) # noqa
if isinstance(initialization, np.ndarray):
assert source_activity_mask.shape == initialization.shape, (source_activity_mask.shape, initialization.shape) # noqa
assert num_classes < 20, f'num_classes: {num_classes}, sure?'
assert D < 35, f'Channels: {D}, sure?'
for iteration in range(iterations):
if model is not None:
affiliation, quadratic_form, _ = model._predict(
y,
source_activity_mask=source_activity_mask,
affiliation_eps=affiliation_eps,
)
if inline_permutation_aligner is not None:
affiliation, quadratic_form \
= apply_inline_permutation_alignment(
affiliation=affiliation,
quadratic_form=quadratic_form,
weight_constant_axis=weight_constant_axis,
aligner=inline_permutation_aligner,
)
model = self._m_step(
y,
quadratic_form,
affiliation=affiliation,
saliency=saliency,
hermitize=hermitize,
covariance_norm=covariance_norm,
eigenvalue_floor=eigenvalue_floor,
weight_constant_axis=weight_constant_axis,
)
return model
def fit_predict(
self,
y,
initialization=None,
num_classes=None,
iterations=100,
*,
saliency=None,
source_activity_mask=None,
weight_constant_axis=(-1,),
hermitize=True,
covariance_norm='eigenvalue',
affiliation_eps=1e-10,
eigenvalue_floor=1e-10,
inline_permutation_aligner: _PermutationAlignment = None,
):
"""Fit a model. Then just return the posterior affiliations."""
model = self.fit(
y=y,
initialization=initialization,
num_classes=num_classes,
iterations=iterations,
saliency=saliency,
source_activity_mask=source_activity_mask,
weight_constant_axis=weight_constant_axis,
hermitize=hermitize,
covariance_norm=covariance_norm,
affiliation_eps=affiliation_eps,
eigenvalue_floor=eigenvalue_floor,
inline_permutation_aligner=inline_permutation_aligner,
)
return model.predict(y)
def _m_step(
self,
x,
quadratic_form,
affiliation,
saliency,
hermitize,
covariance_norm,
eigenvalue_floor,
weight_constant_axis,
):
weight = estimate_mixture_weight(
affiliation=affiliation,
saliency=saliency,
weight_constant_axis=weight_constant_axis,
)
if saliency is None:
masked_affiliation = affiliation
else:
masked_affiliation = affiliation * saliency[..., None, :]
cacg = ComplexAngularCentralGaussianTrainer()._fit(
y=x[..., None, :, :],
saliency=masked_affiliation,
quadratic_form=quadratic_form,
hermitize=hermitize,
covariance_norm=covariance_norm,
eigenvalue_floor=eigenvalue_floor,
)
return CACGMM(weight=weight, cacg=cacg)
| StarcoderdataPython |
1868614 | # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
import json
import os
from html.parser import HTMLParser
import requests
import progressbar
class GenreScapper(HTMLParser):
def __init__(self):
super(GenreScapper, self).__init__()
self.genre = None
self.inGoodTag = False
self.takeData = False
self.getTitle = False
def handle_starttag(self, tag, attr):
if tag == "p":
if ('class', 'book-page-genre') in attr:
self.inGoodTag = True
if tag == "span" and self.inGoodTag:
self.getTitle = True
def handle_endtag(self, tag):
if tag == "p":
self.inGoodTag = False
self.getTitle = False
def handle_data(self, data):
if self.getTitle:
if data.find("Genre") >= 0:
self.takeData = True
self.getTitle = False
elif self.takeData and self.genre is None:
self.genre = data
def getGenre(self):
if self.genre is None:
return None
allGenres = self.genre.replace('\\', '').split(',')
output = []
for item in allGenres:
if len(item) == 0 or item == ' ':
continue
output.append(item.lstrip().rstrip())
if len(output) == 0:
return None
return output
def getGenreFromMetadata(metadata):
urlLibriVoxPage = metadata["url_librivox"]
parser = GenreScapper()
req = requests.get(urlLibriVoxPage)
parser.feed(str(req._content))
return parser.getGenre()
def gather_all_genres(pathDIR, metadataList):
out = []
print("Retrieving all books' genres...")
bar = progressbar.ProgressBar(maxval=len(metadataList))
bar.start()
for index, fileName in enumerate(metadataList):
bar.update(index)
pathMetadata = os.path.join(pathDIR, fileName)
with open(pathMetadata, 'rb') as file:
metadata = json.load(file)
try:
genre = getGenreFromMetadata(metadata)
except KeyboardInterrupt:
break
except:
genre = None
out.append((fileName, genre))
bar.finish()
return out
| StarcoderdataPython |
9625001 | # OpenCV program to detect face in real time
# import libraries of python OpenCV
# where its functionality resides
import cv2
import sys
import numpy as np
def process_vid(url_path):
print('url is ' + url_path)
# load the required trained XML classifiers
# https://github.com/Itseez/opencv/blob/master/
# data/haarcascades/haarcascade_frontalface_default.xml
# Trained XML classifiers describes some features of some
# object we want to detect a cascade function is trained
# from a lot of positive(faces) and negative(non-faces)
# images.
face_cascade = cv2.CascadeClassifier('haarcascade_frontalface_default.xml')
source = url_path
cap = cv2.VideoCapture(source) # Start the video source
mask_width = 214
mask_height = 295
mask_face_x = 60
mask_face_y = 65
mask_face_height = 95
mask = cv2.imread('mask-sm.png', 0)
last_face = None
def should_show(px, py):
if mask[py, px] == 0:
return False
return True
def get_crop(face, img):
(fx, fy, fw, fh) = face
ih, iw, _ = img.shape
scale = fh/mask_face_height
minx = int(fx - (mask_face_x*scale))
miny = int(fy - (mask_face_y*scale))
remw = iw - minx
remh = ih - miny
width = min(int(mask_width*scale), remw)
height = min(int(mask_height*scale), remh)
return (minx, miny, width, height)
fourcc = cv2.VideoWriter_fourcc(*'mp4v')
fourcc = cv2.VideoWriter_fourcc(*'vp80')
video_out = cv2.VideoWriter(
'video-out.mp4', fourcc, 20.0, (mask_width, mask_height))
while True:
# Wait for Esc key to stop
k = cv2.waitKey(30) & 0xff
if k == 27:
break
ret, img = cap.read()
if not ret:
break
try:
# convert to gray scale of each frames
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
except:
continue
# Detects faces of different sizes in the input image
faces = face_cascade.detectMultiScale(gray, 1.3, 5)
face = None
if len(faces) > 0:
face = faces[0]
if face is not None and last_face is not None:
(fx, fy, fw, fh) = face
(lfx, lfy, lfw, lfh) = last_face
face = (int((fx*0.2+lfx*0.8)), int((fy*0.2+lfy*0.8)),
int((fw*0.2+lfw*0.8)), int((fh*0.2+lfh*0.8)))
if face is None:
face = last_face
last_face = face
if face is not None:
try:
(cx, cy, cw, ch) = get_crop(face, img)
img = img[cy:cy+ch, cx:cx+cw]
img = cv2.resize(img, (mask_width, mask_height))
# img = cv2.bitwise_and(img,img,mask = mask)
except:
continue
# Display an image in a window
# cv2.imshow('img',img)
video_out.write(img)
# Close the window
cap.release()
video_out.release()
# De-allocate any associated memory usage
cv2.destroyAllWindows()
if len(sys.argv) > 1:
if ('-h' == sys.argv[1]) or ('--help' == sys.argv[1]):
print("usage: greenscreen.py [-h,--help] [input_file]")
print(" If input_file is blank, defaults to live camera capture")
sys.exit()
source = sys.argv[1]
process_vid(source)
| StarcoderdataPython |
3394053 | <reponame>intel/Theano-dev<gh_stars>10-100
from __future__ import absolute_import, print_function, division
from itertools import product
import unittest
from nose.plugins.skip import SkipTest
import six.moves.builtins as builtins
from six import integer_types
import numpy
import math
import theano
import theano.tensor as T
from theano.tests import unittest_tools as utt
from theano import function
from theano.sandbox import mkl
from theano.sandbox.mkl.mkl_pool import Pool
from theano.sandbox.mkl.basic_ops import (U2IPool, I2U)
if not mkl.mkl_available:
raise SkipTest('Optional package MKL disabled')
class TestMKLPool(unittest.TestCase):
@staticmethod
def numpy_pool_2d(input, ds, ignore_border=False, mode='max'):
'''Helper function, implementing pool_2d in pure numpy'''
if len(input.shape) < 2:
raise NotImplementedError('input should have at least 2 dim,'
' shape is %s'
% str(input.shape))
xi = 0
yi = 0
if not ignore_border:
if input.shape[-2] % ds[0]:
xi += 1
if input.shape[-1] % ds[1]:
yi += 1
out_shp = list(input.shape[:-2])
out_shp.append(input.shape[-2] // ds[0] + xi)
out_shp.append(input.shape[-1] // ds[1] + yi)
output_val = numpy.zeros(out_shp)
func = numpy.max
if mode == 'sum':
func = numpy.sum
elif mode != 'max':
func = numpy.average
for k in numpy.ndindex(*input.shape[:-2]):
for i in range(output_val.shape[-2]):
ii = i * ds[0]
for j in range(output_val.shape[-1]):
jj = j * ds[1]
patch = input[k][ii:ii + ds[0], jj:jj + ds[1]]
output_val[k][i, j] = func(patch)
return output_val
@staticmethod
def numpy_pool_2d_stride(input, ds, ignore_border=False, st=None,
mode='max'):
'''Helper function, implementing pool_2d in pure numpy
this function provides st input to indicate the stide size
for the pooling regions. if not indicated, st == sd.'''
if len(input.shape) < 2:
raise NotImplementedError('input should have at least 2 dim,'
' shape is %s'
% str(input.shape))
if st is None:
st = ds
img_rows = input.shape[-2]
img_cols = input.shape[-1]
out_r = 0
out_c = 0
if img_rows - ds[0] >= 0:
out_r = (img_rows - ds[0]) // st[0] + 1
if img_cols - ds[1] >= 0:
out_c = (img_cols - ds[1]) // st[1] + 1
if not ignore_border:
if out_r > 0:
if img_rows - ((out_r - 1) * st[0] + ds[0]) > 0:
rr = img_rows - out_r * st[0]
if rr > 0:
out_r += 1
else:
if img_rows > 0:
out_r += 1
if out_c > 0:
if img_cols - ((out_c - 1) * st[1] + ds[1]) > 0:
cr = img_cols - out_c * st[1]
if cr > 0:
out_c += 1
else:
if img_cols > 0:
out_c += 1
out_shp = list(input.shape[:-2])
out_shp.append(out_r)
out_shp.append(out_c)
func = numpy.max
if mode == 'sum':
func = numpy.sum
elif mode != 'max':
func = numpy.average
output_val = numpy.zeros(out_shp)
for k in numpy.ndindex(*input.shape[:-2]):
for i in range(output_val.shape[-2]):
ii_st = i * st[0]
ii_end = builtins.min(ii_st + ds[0], img_rows)
for j in range(output_val.shape[-1]):
jj_st = j * st[1]
jj_end = builtins.min(jj_st + ds[1], img_cols)
patch = input[k][ii_st:ii_end, jj_st:jj_end]
output_val[k][i, j] = func(patch)
return output_val
@staticmethod
def numpy_pool_2d_stride_padding(
x, ds, ignore_border=True, st=None, padding=(0, 0), mode='max'):
assert (ignore_border is False)
in_h = x.shape[-2]
in_w = x.shape[-1]
kernel_h = ds[0]
kernel_w = ds[1]
stride_h = st[0]
stride_w = st[1]
pad_h = padding[0]
pad_w = padding[1]
assert ds[0] > pad_h
assert ds[1] > pad_w
def pad_img(x):
y = numpy.zeros(
(x.shape[0], x.shape[1],
x.shape[2] + pad_h * 2, x.shape[3] + pad_w * 2),
dtype=x.dtype)
y[:, :, pad_h:(x.shape[2] + pad_h), pad_w:(x.shape[3] + pad_w)] = x
return y
h = in_h + 2 * pad_h
w = in_w + 2 * pad_w
out_h = int(math.ceil((float)(h - kernel_h) / stride_h)) + 1
out_w = int(math.ceil((float)(w - kernel_w) / stride_w)) + 1
out_shp = list(x.shape[:-2])
out_shp.extend([out_h, out_w])
output_val = numpy.zeros(out_shp)
y = pad_img(x)
func = numpy.max
if mode == 'sum':
func = numpy.sum
elif mode != 'max':
func = numpy.average
inc_pad = mode == 'average_inc_pad'
for k in numpy.ndindex(*x.shape[:-2]):
for i in range(output_val.shape[-2]):
ii_st = i * st[0]
if ii_st > h:
print ('ii_st > h!!!')
continue
ii_end = builtins.min(ii_st + ds[0], h)
if not inc_pad:
ii_st = builtins.max(ii_st, pad_h)
ii_end = builtins.min(ii_end, in_h + pad_h)
for j in range(output_val.shape[-1]):
jj_st = j * st[1]
if jj_st > w:
print ('jj_st > w!!!')
continue
jj_end = builtins.min(jj_st + ds[1], w)
if not inc_pad:
jj_st = builtins.max(jj_st, pad_w)
jj_end = builtins.min(jj_end, in_w + pad_w)
patch = y[k][ii_st:ii_end, jj_st:jj_end]
output_val[k][i, j] = func(patch)
return output_val
def mkl_pool_func(*inputs):
mkl_ver = theano.sandbox.mkl.mkl_version()
if inputs[2] and isinstance(mkl_ver, integer_types) and (mkl_ver < 20170206):
raise SkipTest("Need newer MKL to support 'ignore_border=True'.")
if len(inputs) == 5:
# self, images, ignore_border, mode, ds
_, images, ignore_border, mode, ds, = inputs
x_internal = U2IPool(ignore_border=ignore_border,
mode=mode)(images, ds)
poolOut = Pool(ignore_border=ignore_border,
mode=mode)(x_internal, ds)
output = I2U()(poolOut)
elif len(inputs) == 6:
# self, images, ignore_border, mode, ds, st,
_, images, ignore_border, mode, ds, st, = inputs
x_internal = U2IPool(ignore_border=ignore_border,
mode=mode)(images, ds, st)
poolOut = Pool(ignore_border=ignore_border,
mode=mode)(x_internal, ds, st)
output = I2U()(poolOut)
elif len(inputs) == 7:
# self, images, ignore_border, mode, ds, st, pad
_, images, ignore_border, mode, ds, st, pad = inputs
x_internal = U2IPool(ignore_border=ignore_border,
mode=mode)(images, ds, st, pad)
poolOut = Pool(ignore_border=ignore_border,
mode=mode)(x_internal, ds, st, pad)
output = I2U()(poolOut)
else:
raise ValueError("incorrect inputs list, should be 4 ~ 6 parameters!")
return output
def test_pool(self):
rng = numpy.random.RandomState(utt.fetch_seed())
images = T.dtensor4()
ds_list = ((1, 1), (2, 2), (3, 3), (2, 3))
# generate random images
imval = rng.rand(4, 2, 16, 16)
for ds, ignore_border, mode in product(ds_list,
[False, True],
['max',
'average_exc_pad']):
# Pure Numpy computation
numpy_output_val = self.numpy_pool_2d(imval, ds,
ignore_border,
mode=mode)
# MKL Ops
output = self.mkl_pool_func(images, ignore_border, mode, ds)
f = function([images, ], [output, ])
output_val = f(imval)
utt.assert_allclose(output_val, numpy_output_val)
def test_pool_stride(self):
rng = numpy.random.RandomState(utt.fetch_seed())
# generate random images
ds_list = ((1, 1), (2, 2), (3, 3), (2, 3), (5, 3))
st_list = ((1, 1), (3, 3), (5, 3))
imval = rng.rand(4, 2, 16, 16)
images = T.dtensor4()
for ds, st, ignore_border, mode in product(ds_list,
st_list,
[False, True],
['max',
'average_exc_pad']):
# Pure Numpy computation
numpy_output_val = self.numpy_pool_2d_stride(imval, ds,
ignore_border, st, mode)
# MKL Ops
output = self.mkl_pool_func(images, ignore_border, mode, ds, st)
f = function([images, ], [output, ])
output_val = f(imval)
utt.assert_allclose(output_val, numpy_output_val)
def test_pool_stride_padding(self):
rng = numpy.random.RandomState(utt.fetch_seed())
# generate random images
ds_list = ((3, 3), (4, 4), (3, 4), (5, 5))
st_list = ((1, 1), (2, 2), (3, 3), (1, 2))
pad_list = ((1, 1), (0, 0), (1, 1), (1, 1))
imgsize_list = ((5, 5), (6, 6), (6, 6), (8, 8))
n = 4
c = 2
images = T.dtensor4()
for idx, ignore_border, mode in product(numpy.arange(len(ds_list)),
[False],
['max',
'average_exc_pad']):
imgsize = imgsize_list[idx]
imval = rng.rand(n, c, imgsize[0], imgsize[1])
ds = ds_list[idx]
st = st_list[idx]
pad = pad_list[idx]
# Pure Numpy computation
numpy_output_val = self.numpy_pool_2d_stride_padding(imval, ds,
ignore_border, st,
pad, mode)
# MKL Ops
output = self.mkl_pool_func(images, ignore_border, mode, ds, st, pad)
f = function([images, ], [output, ])
output_val = f(imval)
utt.assert_allclose(output_val, numpy_output_val)
def test_pool_grad(self):
rng = numpy.random.RandomState(utt.fetch_seed())
ds_list = ((1, 1), (3, 2), (2, 3))
imval = rng.rand(2, 3, 3, 4) * 10.0
for ds, ignore_border, mode in product(ds_list,
[False, True],
['max',
'average_exc_pad']):
def mp(input):
return self.mkl_pool_func(input, ignore_border, mode, ds)
utt.verify_grad(mp, [imval])
def test_pool_stride_grad(self):
rng = numpy.random.RandomState(utt.fetch_seed())
ds_list = ((1, 1), (2, 2), (3, 3), (2, 3), (5, 3))
st_list = ((1, 1), (3, 3), (5, 3))
imval = rng.rand(4, 2, 16, 16)
for ds, st, ignore_border, mode in product(ds_list,
st_list,
[False, True],
['max',
'average_exc_pad']):
def mp(input):
return self.mkl_pool_func(input, ignore_border, mode, ds, st)
utt.verify_grad(mp, [imval])
def test_pool_stride_pad_grad(self):
rng = numpy.random.RandomState(utt.fetch_seed())
ds_list = ((3, 3), (4, 4), (3, 4), (5, 5))
st_list = ((1, 1), (2, 2), (3, 3), (1, 2))
pad_list = ((1, 1), (0, 0), (1, 1), (1, 1))
imgsize_list = ((5, 5), (6, 6), (6, 6), (8, 8))
n = 4
c = 3
for idx, ignore_border, mode in product(numpy.arange(len(ds_list)),
[False],
['max',
'average_exc_pad']):
imgsize = imgsize_list[idx]
imval = rng.rand(n, c, imgsize[0], imgsize[1])
ds = ds_list[idx]
st = st_list[idx]
pad = pad_list[idx]
def mp(input):
return self.mkl_pool_func(input, ignore_border, mode, ds, st, pad)
utt.verify_grad(mp, [imval])
if __name__ == '__main__':
unittest.main()
| StarcoderdataPython |
3214019 | import csv
from dataclasses import dataclass
from datetime import date
from pathlib import Path
from typing import Optional, Iterable
@dataclass
class ReportEntry:
"""An entry in a report."""
pdf_path: Path
"""The path on disk to the pdf file."""
pdf_hash_type: str
"""The hash algorithm used to generate the pdf file hash value."""
pdf_hash_value: str
"""The pdf file hash digest."""
pdf_page: str
"""The page number in the pdf file."""
pdf_line: str
"""The line number in the pdf page."""
pdf_created_date: Optional[date]
"""The created date from the pdf file metadata."""
pdf_modified_date: Optional[date]
"""The modified date from the pdf file metadata."""
pdf_downloaded_date: date
"""The date the pdf file was downloaded."""
pdf_url: str
"""The absolute url for the pdf file."""
referrer_url: Optional[str]
"""The referrer url that lead to the pdf url."""
website_modified_date: Optional[date]
"""The last updated date parsed from the website."""
processed_date: Optional[date]
"""The processed date from the pdf file."""
signed_date: Optional[date]
"""The signed date from the pdf file."""
assembly: str
"""
The involvement the person has:
'member' (house of reps) or 'senator' (senate).
"""
last_name: str
"""The person's last name."""
first_name: str
"""The person's first name."""
state_or_territory: str
"""The Australian state or territory the person is representing."""
electorate: Optional[str]
"""Name of person's electorate for house of representatives."""
register_section: str
"""The title of the section in the register of member's interests form."""
change_type: str
"""The type of change to the register: addition or deletion."""
# --- form table headings ---
form_who: str
"""
The person or people relevant to the entry for all sections:
'self', 'partner', 'dependent'.
"""
form_name: Optional[str] = None
"""
Name of the thing.
Name for section
1. Shareholdings - company;
2. i. trust beneficial interest - trust/nominee;
2. ii. trustee and beneficial interest - trust/nominee;
4. Directorships of companies - company;
5. Partnerships - partnership;
6. Liabilities - creditor;
7. bonds and other investments - body in which investment is held;
8. Saving or investment accounts - name of bank/institution;
13. Organisation membership - name of organisation;
"""
form_activity: Optional[str] = None
"""
What does the thing do.
Nature / activities of trust / company for section
2. i. trust beneficial interest - trust operation;
2. ii. trustee and beneficial interest - trust operation;
4. Directorships of companies - company;
5. Partnerships - partnership;
"""
form_participation: Optional[str] = None
"""
What is your interest / participation / involvement in the thing.
Beneficial interest / nature of participation for section
2. i. trust beneficial interest - Beneficial interest;
2. ii. trustee and beneficial interest - Beneficial interest;
3. Real estate - Purpose for which owned;
5. Partnerships - nature of interest;
6. Liabilities - nature of liability;
7. bonds and other investments - type of investment;
8. Saving or investment accounts - nature of account;
9. other assets - nature of any other assets;
10. Other income - nature of income;
11. Gifts - details of gifts;
12. travel and hospitality - details of travel/hospitality;
14. Other interests - nature of interest;
"""
form_location: Optional[str] = None
"""
Where is the thing physically located.
Real estate location for section
3. Real estate;
"""
@property
def is_valid(self):
return all(
[
self.pdf_url,
self.assembly,
self.last_name,
self.state_or_territory,
self.register_section,
self.change_type,
any(
[
self.form_location,
self.form_who,
self.form_name,
self.form_activity,
self.form_participation,
]
),
]
)
@property
def short_hash(self):
return self.pdf_hash_value[0:15]
@classmethod
def save(cls, path: Path, items: Iterable["ReportEntry"]):
"""Save items to a csv file."""
fields = [
"assembly",
"state_or_territory",
"electorate",
"last_name",
"first_name",
"register_section",
"change_type",
"form_who",
"form_name",
"form_activity",
"form_participation",
"form_location",
"pdf_created_date",
"pdf_modified_date",
"pdf_downloaded_date",
"website_modified_date",
"processed_date",
"signed_date",
"pdf_page",
"pdf_line",
"pdf_url",
"referrer_url",
"pdf_hash_type",
"pdf_hash_value",
]
path.parent.mkdir(parents=True, exist_ok=True)
with open(path, "wt", newline="", encoding="utf8") as f:
writer = csv.DictWriter(f, fields, dialect="excel")
writer.writeheader()
for i in items:
writer.writerow(
{
"pdf_hash_type": i.pdf_hash_type,
"pdf_hash_value": i.pdf_hash_value,
"pdf_page": i.pdf_page,
"pdf_line": i.pdf_line,
"pdf_created_date": cls.fmt_date(i.pdf_created_date),
"pdf_modified_date": cls.fmt_date(i.pdf_modified_date),
"pdf_downloaded_date": cls.fmt_date(i.pdf_downloaded_date),
"pdf_url": i.pdf_url,
"referrer_url": i.referrer_url,
"website_modified_date": cls.fmt_date(i.website_modified_date),
"processed_date": cls.fmt_date(i.processed_date),
"signed_date": cls.fmt_date(i.signed_date),
"assembly": i.assembly,
"last_name": i.last_name,
"first_name": i.first_name,
"state_or_territory": i.state_or_territory,
"electorate": i.electorate,
"register_section": i.register_section,
"change_type": i.change_type,
"form_who": i.form_who,
"form_name": i.form_name,
"form_activity": i.form_activity,
"form_participation": i.form_participation,
"form_location": i.form_location,
}
)
@classmethod
def fmt_date(cls, value: date) -> str:
if not value:
return ""
return value.isoformat()
def __str__(self):
items = [
("doc", self.short_hash),
("page", self.pdf_page),
("line", self.pdf_line),
("person", self.last_name),
("group", self.register_section),
("location", self.form_location),
("who", self.form_who),
("name", self.form_name),
("activity", self.form_activity),
("participation", self.form_participation),
]
return "; ".join(f"{k}={v}" for k, v in items if v)
| StarcoderdataPython |
6701599 | import pytest
from rest_api.controller.request import Question
from rest_api.controller.response import Answer, AnswersToIndividualQuestion
def test_query_dsl_with_without_valid_query_field():
query = {
"query": {
"bool": {
"must": [
{"match": {"title": "Search"}},
{"match": {"content": "Elasticsearch"}}
],
"filter": [
{"term": {"status": "published"}},
{"range": {"publish_date": {"gte": "2015-01-01"}}}
]
}
}
}
with pytest.raises(Exception):
Question.from_elastic_query_dsl(query)
def test_query_dsl_with_without_multiple_query_field():
query = {
"query": {
"bool": {
"should": [
{"match": {"name.first": {"query": "shay", "_name": "first"}}},
{"match": {"name.last": {"query": "banon", "_name": "last"}}}
],
"filter": {
"terms": {
"name.last": ["banon", "kimchy"],
"_name": "test"
}
}
}
}
}
with pytest.raises(Exception):
Question.from_elastic_query_dsl(query)
def test_query_dsl_with_single_query():
query = {
"query": {
"match": {
"message": {
"query": "this is a test"
}
}
}
}
question = Question.from_elastic_query_dsl(query)
assert 1 == len(question.questions)
assert question.questions.__contains__("this is a test")
assert question.filters is None
def test_query_dsl_with_filter():
query = {
"query": {
"bool": {
"should": [
{"match": {"name.first": {"query": "shay", "_name": "first"}}}
],
"filter": {
"terms": {
"name.last": ["banon", "kimchy"],
"_name": "test"
}
}
}
}
}
question = Question.from_elastic_query_dsl(query)
assert 1 == len(question.questions)
assert question.questions.__contains__("shay")
assert len(question.filters) == 1
assert question.filters["_name"] == "test"
def test_query_dsl_with_complex_query():
query = {
"size": 17,
"query": {
"bool": {
"should": [
{
"multi_match": {
"query": "I am test1",
"type": "most_fields",
"fields": ["text", "title"]
}
}
],
"filter": [
{
"terms": {
"year": "2020"
}
},
{
"terms": {
"quarter": "1"
}
},
{
"range": {
"date": {
"gte": "12-12-12"
}
}
}
]
}
}
}
top_k_reader = 7
question = Question.from_elastic_query_dsl(query, top_k_reader)
assert 1 == len(question.questions)
assert question.questions.__contains__("I am test1")
assert 2 == len(question.filters)
assert question.filters["year"] == "2020"
assert question.filters["quarter"] == "1"
assert 17 == question.top_k_retriever
assert 7 == question.top_k_reader
def test_response_dsl_with_empty_answers():
sample_answer = AnswersToIndividualQuestion(question="test question", answers=[])
response = AnswersToIndividualQuestion.to_elastic_response_dsl(sample_answer.__dict__)
assert 0 == response['hits']['total']['value']
assert 0 == len(response['hits']['hits'])
def test_response_dsl_with_answers():
full_answer = Answer(
answer="answer",
question="question",
score=0.1234,
probability=0.5678,
context="context",
offset_start=200,
offset_end=300,
offset_start_in_doc=2000,
offset_end_in_doc=2100,
document_id="id_1",
meta={
"meta1": "meta_value"
}
)
empty_answer = Answer(
answer=None,
question=None,
score=None,
probability=None,
context=None,
offset_start=250,
offset_end=350,
offset_start_in_doc=None,
offset_end_in_doc=None,
document_id=None,
meta=None
)
sample_answer = AnswersToIndividualQuestion(question="test question", answers=[full_answer, empty_answer])
response = AnswersToIndividualQuestion.to_elastic_response_dsl(sample_answer.__dict__)
# Test number of returned answers
assert response['hits']['total']['value'] == 2
# Test converted answers
hits = response['hits']['hits']
assert len(hits) == 2
# Test full answer record
assert hits[0]["_score"] == 0.1234
assert hits[0]["_id"] == "id_1"
assert hits[0]["_source"]["answer"] == "answer"
assert hits[0]["_source"]["question"] == "question"
assert hits[0]["_source"]["context"] == "context"
assert hits[0]["_source"]["probability"] == 0.5678
assert hits[0]["_source"]["offset_start"] == 200
assert hits[0]["_source"]["offset_end"] == 300
assert hits[0]["_source"]["offset_start_in_doc"] == 2000
assert hits[0]["_source"]["offset_end_in_doc"] == 2100
assert hits[0]["_source"]["meta"] == {"meta1": "meta_value"}
# Test empty answer record
assert hits[1]["_score"] is None
assert hits[1]["_id"] is None
assert hits[1]["_source"]["answer"] is None
assert hits[1]["_source"]["question"] is None
assert hits[1]["_source"]["context"] is None
assert hits[1]["_source"]["probability"] is None
assert hits[1]["_source"]["offset_start"] == 250
assert hits[1]["_source"]["offset_end"] == 350
assert hits[1]["_source"]["offset_start_in_doc"] is None
assert hits[1]["_source"]["offset_end_in_doc"] is None
assert hits[1]["_source"]["meta"] is None
| StarcoderdataPython |
6579324 | <gh_stars>0
from setuptools import setup
setup(
name='grow-ext-kintaro',
version='1.0.4',
license='MIT',
author='<NAME>',
author_email='<EMAIL>',
include_package_data=False,
packages=[
'kintaro',
],
install_requires=[
'google-api-python-client==1.6.2',
'python-slugify==3.0.2',
],
)
| StarcoderdataPython |
1858585 | <filename>Genotypes.py
from os import listdir
import re
files = listdir('./../files')
vcfs = [i for i in filter(lambda x: x.endswith('.vcf.gz'), files)]
prefix = [re.sub('.vcf.gz', '', vcf) for vcf in vcfs]
if len(prefix) == 0:
print("There are no vcf.gz files in the files/ directory.")
print("Bye!")
exit()
else:
from multiprocessing import Pool
from os import system
from Functions import ExtractRsID
import time
print("Please stand by. Processing the VCF files might take a while, depending on their size.")
start_time = time.time()
pool = Pool()
pool.map(ExtractRsID, prefix)
print("---Processing {} VCFs took {} minutes ---".format(len(prefix), str((time.time() - start_time)/60)))
from Functions import importRaw
A = importRaw(prefix[0])
for i in range(1,len(prefix)):
B = importRaw(prefix[i])
A = A.merge(B, how = 'outer', on = 'FID')
A.to_csv("./../Outputs/Genotypes.csv", sep=",")
from os import system
system('rm *.raw *.ped *.map *.nosex *.log')
print("The rsID-specific genotypes of your cohort are now saved at Outputs/Genotypes.csv")
| StarcoderdataPython |
11328380 | <gh_stars>1-10
from fastapi import FastAPI
from fastapi.responses import JSONResponse
from typing import Optional
from pydantic import BaseModel
# Schemas
class Password(BaseModel):
length: int
def length(self, length):
self.length = length
return self.length
password = Password()
| StarcoderdataPython |
3567326 | <reponame>connectthefuture/tensorflow<gh_stars>10-100
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Enum for metric keys."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
class MetricKey(object):
"""Metric key strings."""
LOSS = "loss"
AUC = "auc"
CLASS_AUC = "auc/class%d"
PREDICTION_MEAN = "labels/prediction_mean"
CLASS_PREDICTION_MEAN = "labels/prediction_mean/class%d"
CLASS_LOGITS_MEAN = "labels/logits_mean/class%d"
CLASS_PROBABILITY_MEAN = "labels/probability_mean/class%d"
LABEL_MEAN = "labels/actual_label_mean"
CLASS_LABEL_MEAN = "labels/actual_label_mean/class%d"
ACCURACY = "accuracy"
ACCURACY_BASELINE = "accuracy/baseline_label_mean"
ACCURACY_MEAN = "accuracy/threshold_%f_mean"
PRECISION_MEAN = "precision/positive_threshold_%f_mean"
RECALL_MEAN = "recall/positive_threshold_%f_mean"
| StarcoderdataPython |
1766466 | <reponame>tordans/osmcha-django<filename>osmchadjango/users/migrations/0011_auto_20190206_1257.py
# Generated by Django 2.0.10 on 2019-02-06 12:57
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('users', '0010_auto_20190123_1129'),
]
operations = [
migrations.AlterField(
model_name='mappingteam',
name='name',
field=models.CharField(db_index=True, max_length=255, unique=True),
),
migrations.AlterUniqueTogether(
name='mappingteam',
unique_together=set(),
),
]
| StarcoderdataPython |
333187 | <reponame>SantiagoMille/micom<filename>micom/workflows/db_media.py
"""Test growth media for a model database."""
import pandas as pd
from cobra.medium import find_external_compartment
from micom.annotation import annotate_metabolites_from_exchanges
from micom.db import load_zip_model_db, load_manifest
from micom.workflows.core import workflow
from micom.workflows.media import process_medium
import micom.media as mm
from micom.logger import logger
from micom.solution import OptimizationError
from micom.util import load_model
from micom.qiime_formats import load_qiime_model_db
import re
from tempfile import TemporaryDirectory
def _grow(args):
"""Get the maximum growth rate under a given medium."""
file, med = args
mod = load_model(file)
good = med[med.index.isin([r.id for r in mod.exchanges])]
if len(good) == 0:
logger.warning(
"Could not find any reactions from the medium in `%s`. "
"Maybe a mismatch in IDs?"
)
mod.medium = med[med.index.isin([r.id for r in mod.exchanges])]
rate = mod.slim_optimize()
return rate
def _try_complete(args):
"""Try to complete the medium for a model."""
file, med, growth, max_import, mip, w = args
mod = load_model(file)
exc = find_external_compartment(mod)
try:
fixed = mm.complete_medium(
mod, med, growth, max_import=max_import, minimize_components=mip, weights=w
)
added = sum(i not in med.index for i in fixed.index)
can_grow = True
logger.info("Could grow `%s` by adding %d import." % (file, added))
except OptimizationError:
fixed = pd.Series(float("nan"), index=med.index)
added = float("nan")
can_grow = False
logger.info("Could not grow `%s`." % file)
fixed.index = [
re.sub(
"(_{}$)|([^a-zA-Z0-9 :]{}[^a-zA-Z0-9 :]$)".format(exc, exc),
"_m",
rid,
)
for rid in fixed.index
]
return (can_grow, added, fixed)
def check_db_medium(model_db, medium, threads=1):
"""Complete a growth medium for all models in a database.
Arguments
---------
model_db : str
A pre-built model database. If ending in `.qza` must be a Qiime 2
artifact of type `MetabolicModels[JSON]`. Can also be a folder,
zip (must end in `.zip`) file or None if the taxonomy contains a
column `file`.
medium : pd.DataFrame
A growth medium. Must have columns "reaction" and "flux" denoting
exchange reactions and their respective maximum flux. Can not be sample
specific.
threads : int >=1
The number of parallel workers to use when building models. As a
rule of thumb you will need around 1GB of RAM for each thread.
Returns
-------
pd.DataFrame
Returns an annotated manifest file with a column `can_grow` that tells you
whether the model can grow on the (fixed) medium, and a column `growth_rate`
that gives the growth rate.
"""
medium = process_medium(medium, ["dummy"])
medium.index = medium.global_id
compressed = model_db.endswith(".qza") or model_db.endswith(".zip")
if compressed:
tdir = TemporaryDirectory(prefix="micom_")
if model_db.endswith(".qza"):
manifest = load_qiime_model_db(model_db, tdir.name)
elif model_db.endswith(".zip"):
manifest = load_zip_model_db(model_db, tdir.name)
else:
manifest = load_manifest(model_db)
rank = manifest["summary_rank"][0]
logger.info(
"Checking %d %s-level models on a medium with %d components."
% (manifest.shape[0], rank, len(medium))
)
args = [(f, medium.flux) for f in manifest.file]
results = workflow(_grow, args, threads)
manifest["growth_rate"] = results
manifest["can_grow"] = manifest.growth_rate.notna() & (manifest.growth_rate > 1e-6)
if compressed:
tdir.cleanup()
return manifest
def complete_db_medium(
model_db,
medium,
growth=0.001,
max_added_import=1,
minimize_components=False,
weights=None,
threads=1,
):
"""Complete a growth medium for all models in a database.
Arguments
---------
model_db : str
A pre-built model database. If ending in `.qza` must be a Qiime 2
artifact of type `MetabolicModels[JSON]`. Can also be a folder,
zip (must end in `.zip`) file or None if the taxonomy contains a
column `file`.
medium : pd.DataFrame
A growth medium. Must have columns "reaction" and "flux" denoting
exchange reactions and their respective maximum flux. Can not be sample
specific.
growth : positive float or pandas.Series
The minimum growth rate the model has to achieve with the (fixed) medium. If
a Series will have a minimum growth rate for each id/taxon in the model db.
max_added_import : positive float
Maximum import flux for each added additional import not included in the growth
medium. If positive will expand the medium with additional imports in order to
fulfill the growth objective.
minimize_components : boolean
Whether to minimize the number of components instead of the total
import flux. Might be more intuitive if set to True but may also be
slow to calculate.
weights : str
Will scale the fluxes by a weight factor. Can either be "mass" which will
scale by molecular mass, a single element which will scale by
the elemental content (for instance "C" to scale by carbon content).
If None every metabolite will receive the same weight.
Will be ignored if `minimize_components` is True.
threads : int >=1
The number of parallel workers to use when building models. As a
rule of thumb you will need around 1GB of RAM for each thread.
Returns
-------
tuple of (manifest, import fluxes)
Returns an annotated manifest file with a column `can_grow` that tells you
whether the model can grow on the (fixed) medium, and a column `added` that
gives the number of added imports apart from the ones in the medium.
"""
medium = process_medium(medium, ["dummy"])
medium.index = medium.global_id
compressed = model_db.endswith(".qza") or model_db.endswith(".zip")
if compressed:
tdir = TemporaryDirectory(prefix="micom_")
if model_db.endswith(".qza"):
manifest = load_qiime_model_db(model_db, tdir.name)
elif model_db.endswith(".zip"):
manifest = load_zip_model_db(model_db, tdir.name)
else:
manifest = load_manifest(model_db)
rank = manifest["summary_rank"][0]
logger.info(
"Checking %d %s-level models on a medium with %d components."
% (manifest.shape[0], rank, len(medium))
)
if not isinstance(growth, pd.Series):
growth = pd.Series(growth, index=manifest.id)
manifest.index = manifest.id
args = [
(
manifest.loc[i, "file"],
medium.flux,
growth[i],
max_added_import,
minimize_components,
weights,
)
for i in manifest.index
]
results = workflow(_try_complete, args, threads)
manifest["can_grow"] = [r[0] for r in results]
manifest["added"] = [r[1] for r in results]
imports = pd.DataFrame.from_records([r[2] for r in results]).fillna(0.0)
imports.index = manifest.id
if compressed:
tdir.cleanup()
return (manifest, imports)
def _annotate(f):
"""Get annotation for a model."""
mod = load_model(f)
return annotate_metabolites_from_exchanges(mod)
def db_annotations(
model_db,
threads=1,
):
"""Get metabolite annotations from a model DB.
Arguments
---------
model_db : str
A pre-built model database. If ending in `.qza` must be a Qiime 2
artifact of type `MetabolicModels[JSON]`. Can also be a folder,
zip (must end in `.zip`) file or None if the taxonomy contains a
column `file`.
threads : int >=1
The number of parallel workers to use when building models. As a
rule of thumb you will need around 1GB of RAM for each thread.
Returns
-------
pd.DataFrame
Annotations for all exchanged metabolites.
"""
compressed = model_db.endswith(".qza") or model_db.endswith(".zip")
if compressed:
tdir = TemporaryDirectory(prefix="micom_")
if model_db.endswith(".qza"):
manifest = load_qiime_model_db(model_db, tdir.name)
elif model_db.endswith(".zip"):
manifest = load_zip_model_db(model_db, tdir.name)
else:
manifest = load_manifest(model_db)
rank = manifest["summary_rank"][0]
logger.info(
"Getting annotations from %d %s-level models ." % (manifest.shape[0], rank)
)
args = manifest.file.tolist()
results = workflow(_annotate, args, threads)
anns = pd.concat(results).drop_duplicates()
if compressed:
tdir.cleanup()
return anns
| StarcoderdataPython |
12820358 | <gh_stars>0
#!/usr/bin/env python
"""
Script for testing kernel code before
running the experimental code.
"""
import numpy as np
from data import get_dataset
import kernel
def is_pos_def(K):
return np.all(np.linalg.eigvals(K) > 0)
def test_nsk(B):
k = kernel.by_name('nsk', base_kernel='rbf', gamma=1e-1, normalization='averaging')
K = k(B, B)
print K
print is_pos_def(K)
def main():
data = get_dataset('musk1')
test_nsk(data.bags)
if __name__ == '__main__':
main()
| StarcoderdataPython |
3477754 | <reponame>tokuma09/MLproject_template
import os
import pickle
import tempfile
from google.cloud import storage as gcs
class GCSOperator():
def __init__(self, project_id, bucket_name, credentials=None):
"""GCSOperator GSC wrapper class
This class provides following APIs.
- upload file
- delete file
- download file
- load pickle
- GCS file path
Parameters
----------
project_id : str
GoogleCloudPlatform Project ID
bucket_name : str
GoogleCloudStorage Bucket Name
credentials : str, optional
GoogleCloudPlatform Credential Information, by default None
"""
self._client = gcs.Client(project_id, credentials=credentials)
self._bucket_name = bucket_name
self._bucket = self._client.get_bucket(bucket_name)
self._suffix = 'gs://'
def upload_file(self,
gcs_path,
local_path,
bucket_name=None,
delete_local=False):
"""upload_file
upload a file to GCS
Parameters
----------
gcs_path : str
GCS file path
local_path : str
local file path
bucket_name : str, optional
GoogleCloudStorage Bucket Name
if None, use default bucket, by default None
delete_local : bool, optional
delete local file option, by default False
"""
if bucket_name is None:
blob = self._bucket.blob(gcs_path)
else:
bucket = self._client.get_bucket(bucket_name)
blob = bucket.blob(gcs_path)
# upload file
blob.upload_from_filename(local_path)
print(f'Upload {local_path} to {gcs_path}')
if delete_local:
# remove local files
os.remove(local_path)
print(f'Delete {local_path}')
def delete_file(self, gcs_path, bucket_name=None):
"""delete_file
delete GCS file.
Parameters
----------
gcs_path : str
GCS file path
bucket_name : str, optional
GoogleCloudStorage Bucket Name
if None, use default bucket, by default None
"""
if bucket_name is None:
blob = self._bucket.blob(gcs_path)
else:
bucket = self._client.get_bucket(bucket_name)
blob = bucket.blob(gcs_path)
# delete files
blob.delete()
print(f'Delete {gcs_path} in the GCS')
def download_file(self, gcs_path, local_path, bucket_name=None):
"""download_file
download a GCS file to local.
Parameters
----------
gcs_path : str
GCS file path
local_path : str
local file path
bucket_name : str, optional
GoogleCloudStorage Bucket Name
if None, use default bucket, by default None
"""
if bucket_name is None:
blob = self._bucket.blob(gcs_path)
else:
bucket = self._client.get_bucket(bucket_name)
blob = bucket.blob(gcs_path)
blob.download_to_filename(local_path)
print(f'Download {gcs_path} to {local_path}')
def is_exist(self, gcs_path, bucket_name=None):
if bucket_name is None:
blob = self._bucket.blob(gcs_path)
else:
bucket = self._client.get_bucket(bucket_name)
blob = bucket.blob(gcs_path)
return blob.exists()
def load_pickle(self, gcs_path, bucket_name=None):
"""load_pickle
load pickle file without download.
This method is intended for ML models.
Parameters
----------
gcs_path : str
GCS file path
bucket_name : str, optional
GoogleCloudStorage Bucket Name
if None, use default bucket, by default None
Returns
-------
model : scikit-learn model
trained ML model
"""
if bucket_name is None:
blob = self._bucket.blob(gcs_path)
else:
bucket = self._client.get_bucket(bucket_name)
blob = bucket.blob(gcs_path)
with tempfile.TemporaryFile() as fp:
# download blob into temp file
blob.download_to_file(fp)
fp.seek(0)
# load into joblib
model = pickle.load(fp)
print('load model')
return model
def get_fullpath(self, gcs_path, bucket_name=None):
"""get_fullpath get GCS full path
Parameters
----------
gcs_path : str
GCS path, deeper than bucket_name
bucket_name : str, optional
GCS bucket name, by default None
Returns
-------
full_path : str
GCS full path
"""
if bucket_name is None:
full_path = os.path.join(self._suffix, self._bucket_name, gcs_path)
return full_path
else:
full_path = os.path.join(self._suffix, bucket_name, gcs_path)
return full_path
def show_bucket_names(self):
"""show_bucket_names
"""
[print(bucket.name) for bucket in self._client.list_buckets()]
def show_file_names(self):
"""show_file_names
"""
[print(file.name) for file in self._client.list_blobs(self._bucket)]
| StarcoderdataPython |
1745425 | <reponame>namuan/print-rider-py
from http import HTTPStatus
from flask import request, make_response, render_template, current_app, redirect
from printrider import db_config
from printrider.prints import prints_blueprint
from printrider.prints.service import save_document, find_document
@prints_blueprint.route('/')
def index():
return redirect("https://deskriders.dev/http-rider-docs", code=302)
@prints_blueprint.route('/prints', methods=["POST"])
def save_print():
doc_json = request.get_json()
doc_id = save_document(db_config, doc_json)
resp = make_response('', HTTPStatus.CREATED)
resp.headers['Location'] = current_app.config['HOST_NAME'] + "/prints/" + doc_id
return resp
@prints_blueprint.route("/prints/<print_id>", methods=["GET"])
def get_print(print_id):
document_code = find_document(db_config, print_id)
return render_template('print.html', code=document_code)
@prints_blueprint.errorhandler(ValueError)
def handle_bad_request(e):
return render_template('error.html', error_msg=e)
@prints_blueprint.errorhandler(HTTPStatus.BAD_REQUEST)
@prints_blueprint.errorhandler(HTTPStatus.INTERNAL_SERVER_ERROR)
def handle_all_errors(e):
return render_template('error.html', error_msg=e.description)
| StarcoderdataPython |
9679748 | # Copyright 1999-2020 Alibaba Group Holding Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
def ndim(a):
"""
Return the number of dimensions of a tensor.
Parameters
----------
a : array_like
Input tebsir. If it is not already a tensor, a conversion is
attempted.
Returns
-------
number_of_dimensions : int
The number of dimensions in `a`. Scalars are zero-dimensional.
See Also
--------
ndarray.ndim : equivalent method
shape : dimensions of tensor
Tensor.shape : dimensions of tensor
Examples
--------
>>> import mars.tensor as mt
>>> mt.ndim([[1,2,3],[4,5,6]])
2
>>> mt.ndim(mt.array([[1,2,3],[4,5,6]]))
2
>>> mt.ndim(1)
0
"""
from ..datasource import asarray
try:
return a.ndim
except AttributeError:
return asarray(a).ndim
| StarcoderdataPython |
5053367 | <filename>app/stack_widgets/__init__.py
from .playlist_widget import PlayListWidget
from .kugou_api_widget import KugouApiWidget
from .metadata_widget import MetadataWidget
| StarcoderdataPython |
1669351 | # Defines the following shortcuts:
# signal.name -> return the shortname
# signal -> display nicely the content of the signal
# signal(3) -> recompute the signal at time 3, and display nicely
# signal +1 -> increment the signal time of 1, recompute, and display.
# signal.deps -> display the graph dependancy up to the default value (3)
# signal.deps(6) -> same, but with depth = 6.
# entity -> same as print(entity)
# change the prompt to be '%'
from dynamic_graph.signal_base import *
from dynamic_graph.entity import *
from matlab import matlab
# Enables shortcut "name"
def sig_short_name(self):
return self.getName().split(':')[-1]
setattr(SignalBase,'name',property(sig_short_name))
# Enables shortcuts "m"
# This code implements a pseudo function 'm' in the class signal_base,
# with no args, or optional args. Three calls can be made:
# - sig.m : print the current value.
# - sig.m(time): recompute at given <time>, and display the current value
# - sig.m +time: recompute at <time> after current time, and display.
class PrettySignalPrint:
sig = None
def __init__(self,sig):
self.sig = sig
def __str__(self):
return self.sig.name+" = "+str(matlab(self.sig.value))
def __repr__(self):
return str(self)
def __call__(self,iter):
self.sig.recompute(iter)
return self
def __add__(self,iter):
self.sig.recompute( self.sig.time+iter )
return self
def sigMatPrint(sig):
return PrettySignalPrint(sig)
setattr(SignalBase,'m',property(PrettySignalPrint))
#print('Pretty matlab print set')
# Enable the same as 'm', but directly on the signal object.
def sigRepr( self ):
return self.name+' = '+str(matlab(self.value))
def sigCall( sig,iter ):
sig.recompute(iter)
print(sigRepr(sig))
def sigTimeIncr( sig,iter ):
sig.recompute(sig.time+iter)
print(sigRepr(sig))
setattr(SignalBase,'__repr__',sigRepr)
setattr(SignalBase,'__call__',sigCall)
setattr(SignalBase,'__add__',sigTimeIncr)
# Enables shortcut "deps"
# Implements the peudo function 'deps', that can be called without arg,
# or specifying a specific depth to be printed.
class SignalDepPrint:
defaultDepth = 2
sig = None
def __init__(self,sig):
self.sig=sig
def __repr__(self):
return self.sig.displayDependencies(self.defaultDepth)
def __call__(self,depth):
self.defaultDepth = depth
return self
setattr(SignalBase,'deps',property(SignalDepPrint))
setattr(Entity,'sigs',property(Entity.displaySignals))
setattr(Entity,'__repr__',Entity.__str__)
# Changing prompt
import sys
sys.ps1 = '% '
# Enable function that can be call without()def optionalparentheses(f):
def optionalparentheses(f):
class decoclass:
def __init__(self,f): self.functor=f
def __repr__(self):
res=self.functor()
if isinstance(res,str): return res
else: return ''
def __call__(self,*arg):
return self.functor(*arg)
return decoclass(f)
| StarcoderdataPython |
4840469 | # Maximum path sum II
# Problem 67
# By starting at the top of the triangle below and moving to adjacent numbers on the row below, the maximum total from top to bottom is 23.
# 3
# 7 4
# 2 4 6
# 8 5 9 3
# That is, 3 + 7 + 4 + 9 = 23.
# Find the maximum total from top to bottom in triangle.txt (right click and 'Save Link/Target As...'), a 15K text file containing a triangle with one-hundred rows.
# NOTE: This is a much more difficult version of Problem 18. It is not possible to try every route to solve this problem, as there are 299 altogether! If you could check one trillion (1012) routes every second it would take over twenty billion years to check them all. There is an efficient algorithm to solve it. ;o)
def solve(problem):
problem = problem
traversal = []
for row in problem:
traversal.append(tuple([int(node), int(node)] for node in row))
size = len(traversal)
for row, nodes in enumerate(traversal):
row_size = len(nodes)
# update connected nodes
next_row = row + 1
if next_row == size:
break
for col, node in enumerate(nodes):
for c in (col, col +1):
connected_node = traversal[next_row][c]
distance = node[1] + connected_node[0]
if distance > connected_node[1]:
connected_node[1] = distance
return max(node[1] for node in traversal[-1])
def get_triangle():
with open('triangle.txt', 'r') as f:
triangle = tuple(tuple(line.split()) for line in f)
return triangle
if __name__ == '__main__':
print(__file__, ': ', solve(get_triangle()))
| StarcoderdataPython |
6478263 | import pytest
@pytest.fixture
def testcases():
return {
"used": (
"""
def test_used(snapshot):
assert snapshot == 'used'
"""
),
"unused": (
"""
def test_unused(snapshot):
assert snapshot == 'unused'
"""
),
}
@pytest.fixture
def extra_testcases():
return {
"extra_a": (
"""
def test_extra_a(snapshot):
assert snapshot == 'extra_a'
"""
),
"extra_b": (
"""
def test_extra_b(snapshot):
assert snapshot == 'extra_b'
"""
),
}
@pytest.fixture
def run_testfiles_with_update(testdir):
def run_testfiles_with_update_impl(**testfiles):
testdir.makepyfile(
**{
filename: "\n\n".join(cases.values())
for (filename, cases) in testfiles.items()
}
)
result = testdir.runpytest("-v", "--snapshot-update")
result.stdout.re_match_lines((r"[0-9]+ snapshots generated\."))
return testdir
return run_testfiles_with_update_impl
@pytest.mark.parametrize(
(
"options",
"expected_status_code",
),
(
(("-v", "--snapshot-details"), 1),
(("-v", "--snapshot-details", "--snapshot-warn-unused"), 0),
),
)
def test_unused_snapshots_details(
options, expected_status_code, run_testfiles_with_update, testcases
):
testdir = run_testfiles_with_update(test_file=testcases)
testdir.makepyfile(test_file=testcases["used"])
result = testdir.runpytest(*options)
result.stdout.re_match_lines(
(
r"1 snapshot passed\. 1 snapshot unused\.",
r"Unused test_unused \(__snapshots__[\\/]test_file.ambr\)",
r"Re-run pytest with --snapshot-update to delete unused snapshots\.",
)
)
assert result.ret == expected_status_code
def test_unused_snapshots_details_multiple_tests(
run_testfiles_with_update, testcases, extra_testcases
):
testdir = run_testfiles_with_update(
test_file=testcases, test_second_file=extra_testcases
)
testdir.makepyfile(
test_file="\n\n".join(testcases.values()),
test_second_file="",
)
result = testdir.runpytest("-v", "--snapshot-details")
result.stdout.re_match_lines(
(
r"2 snapshots passed\. 2 snapshots unused\.",
r"Unused test_extra_a, test_extra_b "
r"\(__snapshots__[\\/]test_second_file.ambr\)",
r"Re-run pytest with --snapshot-update to delete unused snapshots\.",
)
)
assert result.ret == 1
def test_unused_snapshots_details_multiple_locations(
run_testfiles_with_update, testcases, extra_testcases
):
testdir = run_testfiles_with_update(
test_file=testcases, test_second_file=extra_testcases
)
testdir.makepyfile(
test_file=testcases["used"],
test_second_file=extra_testcases["extra_a"],
)
result = testdir.runpytest("-v", "--snapshot-details")
result.stdout.re_match_lines_random(
(
r"2 snapshots passed\. 2 snapshots unused\.",
r"Unused test_extra_b \(__snapshots__[\\/]test_second_file.ambr\)",
r"Unused test_unused \(__snapshots__[\\/]test_file.ambr\)",
r"Re-run pytest with --snapshot-update to delete unused snapshots\.",
)
)
assert result.ret == 1
def test_unused_snapshots_details_no_details_on_deletion(
run_testfiles_with_update, testcases
):
testdir = run_testfiles_with_update(test_file=testcases)
testdir.makepyfile(test_file=testcases["used"])
result = testdir.runpytest("-v", "--snapshot-details", "--snapshot-update")
result.stdout.re_match_lines(
(
r"1 snapshot passed\. 1 unused snapshot deleted\.",
r"Deleted test_unused \(__snapshots__[\\/]test_file.ambr\)",
)
)
assert result.ret == 0
| StarcoderdataPython |
4906824 | <gh_stars>1-10
##
## Copyright (C) 2017, <NAME>, all rights reserved.
##
## This file is part of Camera Network
## (see https://bitbucket.org/amitibo/cameranetwork_git).
##
## Redistribution and use in source and binary forms, with or without modification,
## are permitted provided that the following conditions are met:
##
## 1) The software is provided under the terms of this license strictly for
## academic, non-commercial, not-for-profit purposes.
## 2) Redistributions of source code must retain the above copyright notice, this
## list of conditions (license) and the following disclaimer.
## 3) Redistributions in binary form must reproduce the above copyright notice,
## this list of conditions (license) and the following disclaimer in the
## documentation and/or other materials provided with the distribution.
## 4) The name of the author may not be used to endorse or promote products derived
## from this software without specific prior written permission.
## 5) As this software depends on other libraries, the user must adhere to and keep
## in place any licensing terms of those libraries.
## 6) Any publications arising from the use of this software, including but not
## limited to academic journal and conference publications, technical reports and
## manuals, must cite the following works:
## <NAME>, <NAME>, <NAME> and <NAME>, "Clouds in The Cloud" Proc. ACCV, pp. 659-674 (2014).
##
## THIS SOFTWARE IS PROVIDED BY THE AUTHOR "AS IS" AND ANY EXPRESS OR IMPLIED
## WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
## MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO
## EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT,
## INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
## BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
## DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
## LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
## OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
## ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.##
"""
Utilities for handling mercurial repository.
Based on code by <NAME>.
"""
import os
from tornado import gen
from utils import sbp_run
class MercurialException(Exception):
pass
class Repository():
def __init__(self, repo_path):
self.path = repo_path
self.name = os.path.basename(repo_path)
self.commands = {'update':['hg', 'update']}
self.commands['log'] = ['hg', 'log']
self.commands['push'] = ['hg', 'push']
self.commands['pull'] = ['hg', 'pull']
self.commands['incoming'] = ['hg', 'incoming']
self.commands['outgoing'] = ['hg', 'outgoing']
def run_repo_command(self, command, *params):
"""
Execute a command against the repository
"""
command = command + [param for param in params]
stdout, stderr = sbp_run(
command,
shell=False,
working_directory=self.path
)
if stderr:
err_msg = '{command} returned the following error:\n{err}\n'.format(
command=command,
err=stderr
)
raise MercurialException(err_msg)
return stdout
def log(self, *params):
"""
Execute the hg log command on the repository
"""
return self.run_repo_command(self.commands['log'], *params)
@gen.coroutine
def update(self, *params):
"""
Execute the hg update command on the repository
"""
return self.run_repo_command(self.commands['update'], *params)
def push(self, *params):
"""
Execute the hg push command on the repository
"""
return self.run_repo_command(self.commands['push'], *params)
@gen.coroutine
def pull(self, *params):
"""
Executes the hg pull command on the repository
"""
return self.run_repo_command(self.commands['pull'], *params)
def incoming(self, *params):
"""
Executes the hg incoming command on the repository
"""
return self.run_repo_command(self.commands['incoming'])
def outgoing(self, *params):
"""
Executes the hg outgoing command on the repository
"""
return self.run_repo_command(self.commands['outgoing'])
| StarcoderdataPython |
1945449 | # SPDX-License-Identifier: MIT
#
# Copyright (c) 2021 The Anvil Extras project team members listed at
# https://github.com/anvilistas/anvil-extras/graphs/contributors
#
# This software is published at https://github.com/anvilistas/anvil-extras
import sys
from functools import lru_cache
__version__ = "2.1.0"
def __dir__():
return [
"auto_refreshing",
"ProxyItem",
"correct_canvas_resolution",
"import_module",
"timed",
"wait_for_writeback",
]
def import_module(name, package=None):
"""Import a module.
The 'package' argument is required when performing a relative import. It
specifies the package to use as the anchor point from which to resolve the
relative import to an absolute import.
"""
level = 0
if name.startswith("."):
if not package:
msg = (
"the 'package' argument is required to perform a relative "
"import for {!r}"
)
raise TypeError(msg.format(name))
for character in name:
if character != ".":
break
level += 1
if package not in sys.modules:
# make sure the package exists
__import__(package, {"__package__": None})
name = name[level:]
mod = __import__(name, {"__package__": package}, level=level)
attrs = name.split(".")[1:]
for attr in attrs:
mod = getattr(mod, attr)
return mod
_imports = {
"auto_refreshing": "._auto_refreshing",
"BindingRefreshDict": "._auto_refreshing",
"ProxyItem": "._auto_refreshing",
"correct_canvas_resolution": "._canvas_helpers",
"timed": "._timed",
"wait_for_writeback": "._writeback_waiter",
}
@lru_cache(maxsize=None)
def __getattr__(name):
try:
rel_import = _imports[name]
except KeyError:
raise AttributeError(name)
module = import_module(rel_import, __package__)
return getattr(module, name)
| StarcoderdataPython |
3365535 | <filename>third-party/xed/examples/mfile.py
#!/usr/bin/env python
# -*- python -*-
#BEGIN_LEGAL
#
#Copyright (c) 2017 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
#END_LEGAL
import sys
import os
def _find_dir(d):
dir = os.getcwd()
last = ''
while dir != last:
target_dir = os.path.join(dir,d)
if os.path.exists(target_dir):
return target_dir
last = dir
(dir,tail) = os.path.split(dir)
return None
def _fatal(m):
sys.stderr.write("\n\nXED build error: %s\n\n" % (m) )
sys.exit(1)
def _try_mbuild_import():
try:
import mbuild
return True
except:
return False
def _find_add_import(d):
p = _find_dir(d)
if p and os.path.exists(p):
sys.path = [p] + sys.path
return
_fatal("Could not find {} directory".format(d))
def _find_mbuild_import():
if _try_mbuild_import():
return
_find_add_import('mbuild')
def _find_common():
p = os.path.dirname(_find_dir('xed_build_common.py'))
if p and os.path.exists(p):
sys.path = [p] + sys.path
return
_fatal("Could not find xed_build_common.py")
def setup():
if sys.version_info[0] >= 3:
_fatal("Python version 3.x not supported.")
if sys.version_info[0] == 2 and sys.version_info[1] < 7:
_fatal("Need python version 2.7 or later.")
_find_mbuild_import()
# when building in the source tree the xed_build_common.py file is
# in the parent directory of the examples. When building in the
# kit that file is in the example source directory.
_find_common()
def work():
import xed_build_common
import xed_examples_mbuild
try:
retval = xed_examples_mbuild.execute()
except Exception, e:
xed_build_common.handle_exception_and_die(e)
return retval
if __name__ == "__main__":
setup()
retval = work()
sys.exit(retval)
| StarcoderdataPython |
11221730 | <reponame>GeniALE/SiteWebGeniALE
from django.apps import AppConfig
class OrchesterCmsIntegrationConfig(AppConfig):
name = 'orchester_cms_integration'
| StarcoderdataPython |
11245408 | import psutil
from django.db import models
class Stats(models.Model):
@staticmethod
def sizeof_fmt(num, suffix='B'):
for unit in ['', 'Ki', 'Mi', 'Gi', 'Ti', 'Pi', 'Ei', 'Zi']:
if abs(num) < 1024.0:
return "%3.1f%s%s" % (num, unit, suffix)
num /= 1024.0
return "%.1f%s%s" % (num, 'Yi', suffix)
@staticmethod
def get_disk_usage():
return psutil.disk_usage(".").percent
@staticmethod
def get_disk_free_space():
return Stats.sizeof_fmt(psutil.disk_usage(".").free)
@staticmethod
def get_disk_full_space():
return Stats.sizeof_fmt(psutil.disk_usage(".").total)
@staticmethod
def get_cpu_usage():
return psutil.cpu_percent(0, True)
| StarcoderdataPython |
8189486 | <reponame>42cc/dashr-gw<filename>apps/core/tests/test_utils.py
from decimal import Decimal
from django.test import TestCase
from apps.core.models import GatewaySettings
from apps.core.utils import (
get_minimal_transaction_amount,
get_received_amount,
)
class UtilsTest(TestCase):
def setUp(self):
GatewaySettings.objects.create(
gateway_fee_percent=Decimal('0.5'),
max_dash_miner_fee=Decimal('0.001'),
)
def test_get_received_amount_deposit(self):
self.assertEqual(
get_received_amount('1', 'deposit'),
Decimal('0.995'),
)
self.assertEqual(
get_received_amount('1.1', 'deposit'),
Decimal('1.0945'),
)
self.assertEqual(
get_received_amount('0', 'deposit'),
Decimal('0'),
)
self.assertEqual(
get_received_amount('1.123456789', 'deposit'),
Decimal('1.11783949'),
)
self.assertEqual(
get_received_amount('1.123456784', 'deposit'),
Decimal('1.11783949'),
)
def test_get_received_amount_withdrawal(self):
self.assertEqual(
get_received_amount('1', 'withdrawal'),
Decimal('0.994'),
)
self.assertEqual(
get_received_amount('1.1', 'withdrawal'),
Decimal('1.0935'),
)
self.assertEqual(
get_received_amount('0', 'withdrawal'),
Decimal('0'),
)
self.assertEqual(
get_received_amount('1.123456789', 'withdrawal'),
Decimal('1.11683949'),
)
self.assertEqual(
get_received_amount('1.123456784', 'withdrawal'),
Decimal('1.11683949'),
)
def test_get_minimal_withdrawal_amount(self):
self.assertEqual(
get_minimal_transaction_amount('deposit'),
Decimal('0.00000002'),
)
self.assertEqual(
get_minimal_transaction_amount('withdrawal'),
Decimal('0.00100504'),
)
| StarcoderdataPython |
3356482 | #!/usr/bin/env python
# encoding: utf-8
# The MIT License (MIT)
# Copyright (c) 2014-2019 CNRS
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
# AUTHORS
# <NAME> - http://herve.niderb.fr
"""
#############
Visualization
#############
"""
try:
from IPython.core.pylabtools import print_figure
except Exception as e:
pass
from matplotlib.cm import get_cmap
import numpy as np
from itertools import cycle, product, groupby
from .segment import Segment
from .timeline import Timeline
from .annotation import Annotation
from .scores import Scores
class Notebook(object):
def __init__(self):
super(Notebook, self).__init__()
self.reset()
def reset(self):
linewidth = [3, 1]
linestyle = ['solid', 'dashed', 'dotted']
cm = get_cmap('Set1')
colors = [cm(1. * i / 8) for i in range(9)]
self._style_generator = cycle(product(linestyle, linewidth, colors))
self._style = {None: ('solid', 1, (0.0, 0.0, 0.0))}
del self.crop
del self.width
def crop():
doc = "The crop property."
def fget(self):
return self._crop
def fset(self, segment):
self._crop = segment
def fdel(self):
self._crop = None
return locals()
crop = property(**crop())
def width():
doc = "The width property."
def fget(self):
return self._width
def fset(self, value):
self._width = value
def fdel(self):
self._width = 20
return locals()
width = property(**width())
def __getitem__(self, label):
if label not in self._style:
self._style[label] = next(self._style_generator)
return self._style[label]
def setup(self, ax=None, ylim=(0, 1), yaxis=False, time=True):
import matplotlib.pyplot as plt
if ax is None:
ax = plt.gca()
ax.set_xlim(self.crop)
if time:
ax.set_xlabel('Time')
else:
ax.set_xticklabels([])
ax.set_ylim(ylim)
ax.axes.get_yaxis().set_visible(yaxis)
return ax
def draw_segment(self, ax, segment, y, label=None, boundaries=True):
# do nothing if segment is empty
if not segment:
return
linestyle, linewidth, color = self[label]
# draw segment
ax.hlines(y, segment.start, segment.end, color,
linewidth=linewidth, linestyle=linestyle, label=label)
if boundaries:
ax.vlines(segment.start, y + 0.05, y - 0.05,
color, linewidth=1, linestyle='solid')
ax.vlines(segment.end, y + 0.05, y - 0.05,
color, linewidth=1, linestyle='solid')
if label is None:
return
def get_y(self, segments):
"""
Parameters
----------
segments : iterator
`Segment` iterator (sorted)
Returns
-------
y : np.array
y coordinates of each segment
"""
# up_to stores the largest end time
# displayed in each line (at the current iteration)
# (at the beginning, there is only one empty line)
up_to = [-np.inf]
# y[k] indicates on which line to display kth segment
y = []
for segment in segments:
# so far, we do not know which line to use
found = False
# try each line until we find one that is ok
for i, u in enumerate(up_to):
# if segment starts after the previous one
# on the same line, then we add it to the line
if segment.start >= u:
found = True
y.append(i)
up_to[i] = segment.end
break
# in case we went out of lines, create a new one
if not found:
y.append(len(up_to))
up_to.append(segment.end)
# from line numbers to actual y coordinates
y = 1. - 1. / (len(up_to) + 1) * (1 + np.array(y))
return y
def __call__(self, resource, time=True, legend=True):
if isinstance(resource, Segment):
self.plot_segment(resource, time=time)
elif isinstance(resource, Timeline):
self.plot_timeline(resource, time=time)
elif isinstance(resource, Annotation):
self.plot_annotation(resource, time=time, legend=legend)
elif isinstance(resource, Scores):
self.plot_scores(resource, time=time, legend=legend)
def plot_segment(self, segment, ax=None, time=True):
if not self.crop:
self.crop = segment
ax = self.setup(ax=ax, time=time)
self.draw_segment(ax, segment, 0.5)
def plot_timeline(self, timeline, ax=None, time=True):
if not self.crop and timeline:
self.crop = timeline.extent()
cropped = timeline.crop(self.crop, mode='loose')
ax = self.setup(ax=ax, time=time)
for segment, y in zip(cropped, self.get_y(cropped)):
self.draw_segment(ax, segment, y)
# ax.set_aspect(3. / self.crop.duration)
def plot_annotation(self, annotation, ax=None, time=True, legend=True):
if not self.crop:
self.crop = annotation.get_timeline(copy=False).extent()
cropped = annotation.crop(self.crop, mode='intersection')
labels = cropped.labels()
segments = [s for s, _ in cropped.itertracks()]
ax = self.setup(ax=ax, time=time)
for (segment, track, label), y in zip(
cropped.itertracks(yield_label=True),
self.get_y(segments)):
self.draw_segment(ax, segment, y, label=label)
if legend:
# this gets exactly one legend handle and one legend label per label
# (avoids repeated legends for repeated tracks with same label)
H, L = ax.get_legend_handles_labels()
HL = groupby(sorted(zip(H, L), key=lambda h_l: h_l[1]),
key=lambda h_l: h_l[1])
H, L = zip(*list((next(h_l)[0], l) for l, h_l in HL))
ax.legend(H, L, bbox_to_anchor=(0, 1), loc=3,
ncol=5, borderaxespad=0., frameon=False)
def plot_scores(self, scores, ax=None, time=True, legend=True):
if not self.crop:
self.crop = scores.to_annotation().get_timeline(copy=False).extent()
cropped = scores.crop(notebook.crop, mode='loose')
labels = cropped.labels()
data = scores.dataframe_.values
m = np.nanmin(data)
M = np.nanmax(data)
ylim = (m - 0.1 * (M - m), M + 0.1 * (M - m))
ax = self.setup(ax=ax, yaxis=True, ylim=ylim, time=time)
for segment, track, label, value in cropped.itervalues():
y = value
self.draw_segment(ax, segment, y, label=label, boundaries=False)
# ax.set_aspect(6. / ((ylim[1] - ylim[0]) * self.crop.duration))
if legend:
# this gets exactly one legend handle and one legend label per label
# (avoids repeated legends for repeated tracks with same label)
H, L = ax.get_legend_handles_labels()
HL = groupby(sorted(zip(H, L), key=lambda h_l: h_l[1]),
key=lambda h_l: h_l[1])
H, L = zip(*list((next(h_l)[0], l) for l, h_l in HL))
ax.legend(H, L, bbox_to_anchor=(0, 1), loc=3,
ncol=5, borderaxespad=0., frameon=False)
def plot_feature(self, feature, ax=None, time=True, ylim=None):
if not self.crop:
self.crop = feature.getExtent()
window = feature.sliding_window
indices = window.crop(self.crop, mode='loose')
t = [window[i].middle for i in indices]
data = np.take(feature.data, indices, axis=0, mode='clip')
for i, index in enumerate(indices):
if index < 0:
data[i] = np.NAN
if index >= len(feature.data):
data[i] = np.NAN
if ylim is None:
m = np.nanmin(data)
M = np.nanmax(data)
ylim = (m - 0.1 * (M - m), M + 0.1 * (M - m))
ax = self.setup(ax=ax, yaxis=False, ylim=ylim, time=time)
ax.plot(t, data)
notebook = Notebook()
def repr_segment(segment):
"""Get `png` data for `segment`"""
import matplotlib.pyplot as plt
figsize = plt.rcParams['figure.figsize']
plt.rcParams['figure.figsize'] = (notebook.width, 1)
fig, ax = plt.subplots()
notebook.plot_segment(segment, ax=ax)
data = print_figure(fig, 'png')
plt.close(fig)
plt.rcParams['figure.figsize'] = figsize
return data
def repr_timeline(timeline):
"""Get `png` data for `timeline`"""
import matplotlib.pyplot as plt
figsize = plt.rcParams['figure.figsize']
plt.rcParams['figure.figsize'] = (notebook.width, 1)
fig, ax = plt.subplots()
notebook.plot_timeline(timeline, ax=ax)
data = print_figure(fig, 'png')
plt.close(fig)
plt.rcParams['figure.figsize'] = figsize
return data
def repr_annotation(annotation):
"""Get `png` data for `annotation`"""
import matplotlib.pyplot as plt
figsize = plt.rcParams['figure.figsize']
plt.rcParams['figure.figsize'] = (notebook.width, 2)
fig, ax = plt.subplots()
notebook.plot_annotation(annotation, ax=ax)
data = print_figure(fig, 'png')
plt.close(fig)
plt.rcParams['figure.figsize'] = figsize
return data
def repr_scores(scores):
"""Get `png` data for `scores`"""
import matplotlib.pyplot as plt
figsize = plt.rcParams['figure.figsize']
plt.rcParams['figure.figsize'] = (notebook.width, 2)
fig, ax = plt.subplots()
notebook.plot_scores(scores, ax=ax)
data = print_figure(fig, 'png')
plt.close(fig)
plt.rcParams['figure.figsize'] = figsize
return data
def repr_feature(feature):
"""Get `png` data for `feature`"""
import matplotlib.pyplot as plt
figsize = plt.rcParams['figure.figsize']
plt.rcParams['figure.figsize'] = (notebook.width, 2)
fig, ax = plt.subplots()
notebook.plot_feature(feature, ax=ax)
data = print_figure(fig, 'png')
plt.close(fig)
plt.rcParams['figure.figsize'] = figsize
return data
| StarcoderdataPython |
8119991 | import os
import network
import time
import ntptime
import ubinascii
import machine
import micropython
import gc
from umqtt.simple import MQTTClient
class pir:
state_msg = "{\"Time\":\"%s\",\"Uptime\":\"%s\",\"UptimeSec\":%s,\"MemFree\":%s,\"MemAlloc\":%s,\"Stack\":%s,\"Sleep\":%s,\"MqttCount\":%s,\"POWER\":\"%s\",\"Wifi\":{\"AP\":1,\"SSId\":\"%s\",\"MAC\":\"%s\",\"RSSI\":%s,\"Signal\":%s,\"LinkCount\":%s,\"Downtime\":\"%s\"}}"
hass_state_msg = "{\"Version\":\"%s\",\"BuildDateTime\":\"%s\",\"Module or Template\":\"Sonoff Basic\",\"RestartReason\":\"%s\",\"Uptime\":\"%s\",\"Hostname\":\"%s-%s\",\"IPAddress\":\"%s\",\"RSSI\":\"%s\",\"Signal (dBm)\":\"%s\",\"WiFi LinkCount\":%s,\"WiFi Downtime\":\"%s\",\"MqttCount\":%s}"
import app_cfg as cf
tim1 = machine.Timer(-1)
tim2 = machine.Timer(-1)
tim3 = machine.Timer(-1)
sta_if = network.WLAN(network.STA_IF)
def __init__(self):
self.debug_f = True
rstc = {0:'Power ON', 1:'Watchdog Reset', 2:'Hard Reset', 4:'Soft Reset', 5:'Deep Sleep Reset', 6:'Hard Reset'}
self.reset_cause = rstc[machine.reset_cause()]
del rstc
mac = ubinascii.hexlify(self.sta_if.config('mac'))
mqtt_mac = bytearray('00:00:00:00:00:00', 'ascii')
j = 0
for i in range(17):
if mqtt_mac[i] == 58:
continue
mqtt_mac[i] = mac[j]
if mac[j] > 96:
mqtt_mac[i] = mac[j] - 32
j += 1
self.mqtt_mac = mqtt_mac.decode()
self.mqtt_essid = self.sta_if.config('essid').encode()
self.ipaddr = self.sta_if.ifconfig()[0].encode()
self.deactivate_ap()
self.wifi_conn_f = False
self.mqtt_conn_f = False
self.irh_1s_f = False
self.irh_30s_f = False
self.irh_5m_f = False
self.reboot = False
self.network_fail_c = 0
self.wifi_link_c = 0
self.mqtt_link_c = 0
self.wifi_dwnt = 0
self.wifi_dwnt_start = 0
if self.sta_if.isconnected():
self.log(self.sta_if.ifconfig(), 'Network config')
self.wifi_conn_f = True
self.wifi_link_c = 1
try:
ntptime.settime()
except OSError as e:
self.log(e, 'NTP Timeout')
self.network_fail_c += 1
self.mqtt_client = MQTTClient(self.cf.mqtt_client_id, self.cf.mqtt_server, self.cf.mqtt_port, self.cf.mqtt_user, self.cf.mqtt_pass)
self.led_heartbeat = machine.Pin(12, machine.Pin.OUT)
self.led_motion = machine.Pin(13, machine.Pin.OUT)
self.led_motion.value(0)
self.pir = machine.Pin(14, machine.Pin.IN)
self.pir.irq(trigger=machine.Pin.IRQ_RISING, handler=self.handle_interrupt)
self.mqtt_client.set_callback(self.sub_cb)
self.ci = 1
self.mt = 0
self.pir_flag = False
self.runable = True
self.start = time.time()
gc.collect()
gc.threshold(gc.mem_free() // 4 + gc.mem_alloc())
def log(self, data, desc = ''):
if self.debug_f:
print("%s: [%s] %s - %r" % (self.cf.mqtt_name, self.time(), desc, data))
def irh_1s(self,t):
self.irh_1s_f = True
def irh_30s(self,t):
self.irh_30s_f = True
def irh_5m(self,t):
self.irh_5m_f = True
def deactivate_ap(self):
ap_if = network.WLAN(network.AP_IF)
self.log(ap_if.ifconfig(), 'Deactivating AP')
ap_if.active(False)
def sub_cb(self, topic, msg):
msg = msg.decode()
self.log(str(msg), 'MQTT cmnd')
if msg == 'stop':
self.runable = False
if msg == 'ON':
self.pir_flag = True
self.mt = 0
if msg == 'OFF':
self.pir_flag = False
if msg == 'debugon':
self.debug_f = True
if msg == 'debugoff':
self.debug_f = False
if msg == 'reboot':
self.reboot = True
def handle_interrupt(self, pin):
self.pir_flag = True
self.mt = 0
def fmt_num(self, n):
if n < 10:
return('0'+str(n))
return(str(n))
def uptime(self, delta):
d = delta // 86400
h = (delta % 86400) // 3600
m = ((delta % 86400) // 60) % 60
s = (delta % 86400) % 60
return(str(d)+"T"+self.fmt_num(h)+":"+self.fmt_num(m)+":"+self.fmt_num(s))
def uptimesec(self):
return(str(time.time() - self.start))
def time(self, v = 0):
if v == 0:
t = time.localtime()
else:
t = time.localtime(v)
return(str(t[0]) + "-" + self.fmt_num(t[1]) + "-" +self.fmt_num(t[2]) + "T" + self.fmt_num(t[3]) + ":" + self.fmt_num(t[4]) + ":" + self.fmt_num(t[5]))
def publish_state(self, lp_sleep):
pwr = 'OFF'
if self.pir_flag:
pwr = 'ON'
self.mqtt_publish("tele/%s/STATE".encode() % (self.cf.mqtt_topic), self.state_msg.encode() % (self.time(), self.uptime(time.time() - self.start), self.uptimesec(), str(gc.mem_free()), str(gc.mem_alloc()), str(micropython.stack_use()), lp_sleep, str(self.mqtt_link_c), pwr, self.mqtt_essid, self.mqtt_mac, (self.sta_if.status('rssi') + 100) * 2, str(self.sta_if.status('rssi')), str(self.wifi_link_c), self.uptime(self.wifi_dwnt)))
def publish_hass_state(self):
st = os.stat('pir.py')
self.mqtt_publish("tele/%s/HASS_STATE".encode() % (self.cf.mqtt_topic), self.hass_state_msg.encode() % (self.cf.version, self.time(st[7]), self.reset_cause, self.uptime(time.time() - self.start), self.cf.mqtt_name, self.cf.hass_id, self.ipaddr, str((self.sta_if.status('rssi') + 100) * 2), str(self.sta_if.status('rssi')), str(self.wifi_link_c), self.uptime(self.wifi_dwnt), str(self.mqtt_link_c)))
def publish_lwt(self, status):
self.mqtt_publish("tele/%s/LWT".encode() % (self.cf.mqtt_topic), status.encode())
def publish_motion(self, status):
self.mqtt_publish("stat/%s/POWER".encode() % (self.cf.mqtt_topic), status.encode())
time.sleep_ms(100)
self.mqtt_publish("stat/%s/RESULT".encode() % (self.cf.mqtt_topic), "{\"POWER\":\"%s\"}".encode() % (status))
time.sleep_ms(100)
self.mqtt_publish("stat/%s/SWITCH1T".encode() % (self.cf.mqtt_topic), b"{\"TRIG\":\"SINGLE\"}")
def mqtt_publish(self, topic, msg):
if self.wifi_chk_conn() and self.mqtt_conn_f:
try:
self.mqtt_client.publish(topic, msg)
except OSError as e:
self.log(e, 'MQTT Pub fail')
self.mqtt_conn_f = False
def mqtt_chk_msg(self):
if self.wifi_chk_conn() and self.mqtt_conn_f:
try:
self.mqtt_client.check_msg()
except OSError as e:
self.log(e, 'MQTT Chk Msg fail')
self.mqtt_conn_f = False
def wifi_chk_conn(self):
if self.wifi_conn_f:
if self.sta_if.isconnected():
return(True)
else:
self.wifi_conn_f = False
self.network_fail_c += 1
self.log('Wifi Disconnected')
self.wifi_dwnt_start = time.time()
return(False)
else:
if self.sta_if.isconnected():
self.wifi_conn_f = True
self.wifi_link_c += 1
self.wifi_dwnt += (time.time() - self.wifi_dwnt_start)
return(True)
else:
return(False)
def mqtt_connect(self):
try:
self.mqtt_client.connect()
except OSError as e:
self.log(e, 'MQTT Conn fail')
self.mqtt_conn_f = False
self.network_fail_c += 1
else:
self.mqtt_conn_f = True
self.mqtt_link_c += 1
self.network_fail_c = 0
time.sleep_ms(100)
if self.mqtt_conn_f:
try:
self.mqtt_client.subscribe(b"cmnd/" + self.cf.mqtt_topic + b"/MP")
except OSError as e:
self.log(e, 'MQTT Sub. fail')
self.mqtt_conn_f = False
time.sleep_ms(100)
if self.mqtt_conn_f:
try:
self.mqtt_client.subscribe(b"cmnd/" + self.cf.mqtt_topic + b"/POWER")
except OSError as e:
self.log(e, 'MQTT Sub fail')
self.mqtt_conn_f = False
def mqtt_chk_conn(self):
if self.mqtt_conn_f:
try:
self.mqtt_client.ping()
except OSError as e:
self.log(e, 'MQTT Conn fail')
self.mqtt_conn_f = False
self.network_fail_c += 1
else:
self.mqtt_conn_f = True
self.network_fail_c = 0
def cron_1s(self):
self.irh_1s_f = False
self.led_heartbeat.value(self.led_heartbeat.value() ^ 1)
def cron_30s(self):
self.irh_30s_f = False
self.mqtt_chk_conn()
if not self.mqtt_conn_f:
if self.wifi_chk_conn():
self.mqtt_connect()
if self.network_fail_c > 20:
self.log('Machine Reset', '>20 net. failures')
machine.reset()
def cron_5m(self):
self.irh_5m_f = False
self.publish_state('10')
time.sleep_ms(100)
self.publish_hass_state()
time.sleep_ms(100)
gc.collect()
def run(self):
self.tim1.init(period=1000, mode=machine.Timer.PERIODIC, callback=self.irh_1s)
self.tim2.init(period=30000, mode=machine.Timer.PERIODIC, callback=self.irh_30s)
self.tim3.init(period=300000, mode=machine.Timer.PERIODIC, callback=self.irh_5m)
if self.wifi_chk_conn():
self.mqtt_connect()
self.publish_lwt('Online')
time.sleep_ms(100)
self.publish_state('10')
time.sleep_ms(100)
self.publish_hass_state()
time.sleep_ms(100)
while self.runable:
self.mqtt_chk_msg()
self.ci += 1
if self.ci > 17280000: #roll over approx every 2 days at 10 ms sleep
self.ci = 1
if self.wifi_chk_conn():
try:
ntptime.settime()
except OSError as e:
self.log(e, 'NTP Timeout')
if self.pir_flag:
if self.mt == 0:
self.led_motion.value(1)
self.publish_state('10')
time.sleep_ms(100)
self.publish_motion('ON')
self.mt += 1
if self.mt == 2000:
self.pir_flag = False
self.led_motion.value(0)
self.publish_state('10')
time.sleep_ms(100)
self.publish_motion('OFF')
if self.irh_1s_f:
self.cron_1s()
if self.irh_30s_f:
self.cron_30s()
if self.irh_5m_f:
self.cron_5m()
time.sleep_ms(10)
self.tim1.deinit()
self.tim2.deinit()
self.tim3.deinit()
self.publish_lwt('Offline')
time.sleep_ms(100)
self.runable = True
if self.reboot:
machine.reset()
| StarcoderdataPython |
1833903 | # Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import datetime
import hashlib
import logging
import time
from bisect import bisect_right
from collections import OrderedDict, defaultdict
from enum import Enum
from typing import List
import numpy as np
import torch
from fairseq import distributed_utils
from fairseq.data import FairseqDataset, data_utils
from fairseq.data import SampledMultiDataset
def get_time_gap(s, e):
return (
datetime.datetime.fromtimestamp(e) - datetime.datetime.fromtimestamp(s)
).__str__()
logger = logging.getLogger(__name__)
class SampledMultiLangDataset(SampledMultiDataset):
"""Samples from multiple sub-datasets according to given sampling ratios.
Args:
datasets (
List[~torch.utils.data.Dataset]
or OrderedDict[str, ~torch.utils.data.Dataset]
): datasets
sampling_ratios (List[float]): list of probability of each dataset to be sampled
(default: None, which corresponds to concatenating all dataset together).
seed (int): RNG seed to use (default: 2).
epoch (int): starting epoch number (default: 1).
eval_key (str, optional): a key used at evaluation time that causes
this instance to pass-through batches from *datasets[eval_key]*.
collate_format (CollateFormat): collater output format, either CollateFormat.ordered_dict or
CollateFormat.single (default: CollateFormat.single) where CollateFormat.single configures
the collater to output batches of data mixed from all sub-datasets,
and CollateFormat.ordered_dict configures the collater to output a dictionary of batches indexed by keys
of sub-datasets.
Note that not all sub-datasets will present in a single batch in both formats.
virtual_size (int, or callable): the expected virtual size of the dataset (default: default_virtual_size_func).
split (str): the split of the data, e.g. 'train', 'valid' or 'test'.
shared_collater (bool): whether or not to all sub-datasets have the same collater.
shuffle (bool): whether or not to shuffle data (default: True).
"""
def ordered_indices(self):
multi_lang_sizes = self.sizes
multi_lang_sizes = [
multi_lang_sizes[
0 if i == 0 else self.cumulated_sizes[i - 1] : self.cumulated_sizes[i]
]
for i in range(len(self.datasets))
]
multi_lang_sort_indices = []
for i, sizes in enumerate(multi_lang_sizes):
if self.shuffle:
indices = np.random.permutation(len(sizes))
else:
indices = np.arange(len(sizes))
tgt_sizes = sizes[:, 1] if len(sizes.shape) > 0 and sizes.shape[1] > 1 else None
src_sizes = (
sizes[:, 0] if len(sizes.shape) > 0 and sizes.shape[1] > 1 else sizes
)
if tgt_sizes is not None:
sort_indices = indices[np.argsort(np.maximum(src_sizes[indices], tgt_sizes[indices]), kind="mergesort")]
else:
sort_indices = indices[np.argsort(src_sizes[indices], kind="mergesort")]
multi_lang_sort_indices.append(sort_indices + (0 if i==0 else self.cumulated_sizes[i - 1]))
multi_lang_sort_indices = np.concatenate(multi_lang_sort_indices)
return multi_lang_sort_indices
def batch_by_size(
self,
indices,
max_tokens=None,
max_sentences=None,
required_batch_size_multiple=1,
):
multi_lang_sort_indices = [
indices[
0 if i == 0 else self.cumulated_sizes[i - 1] : self.cumulated_sizes[i]
]
for i in range(len(self.datasets))
]
batches = []
for single_lang_sort_indices in multi_lang_sort_indices:
batches += super().batch_by_size(
single_lang_sort_indices,
max_tokens,
max_sentences,
required_batch_size_multiple
)
return batches
def collater(self, samples, **extra_args):
"""Merge a list of samples to form a mini-batch."""
if len(samples) == 0:
return None
if self.collate_format == "ordered_dict":
collect_samples = [[] for _ in range(len(self.datasets))]
for (i, sample) in samples:
collect_samples[i].append(sample)
batch = OrderedDict(
[
(self.keys[i], dataset.collater(collect_samples[i]))
for i, (key, dataset) in enumerate(zip(self.keys, self.datasets))
if len(collect_samples[i]) > 0
]
)
elif self.shared_collater:
batch = self.datasets[0].collater([s for _, s in samples])
else:
samples_dict = defaultdict(list)
pad_to_length = (
defaultdict(int)
if "pad_to_length" not in extra_args
else extra_args["pad_to_length"]
)
for ds_idx, s in samples:
pad_to_length["source"] = max(
pad_to_length["source"], s["source"].size(0)
)
if s["target"] is not None:
pad_to_length["target"] = max(
pad_to_length["target"], s["target"].size(0)
)
s['prepend_target'] = torch.cat([s['source'][:1], s['target']])
pad_to_length["prepend_target"] = pad_to_length["target"] + 1
samples_dict[ds_idx].append(s)
batches = [
self.datasets[i].collater(samples_dict[i], pad_to_length=pad_to_length)
for i in range(len(self.datasets))
if len(samples_dict[i]) > 0
]
def straight_data(tensors):
batch = torch.cat(tensors, dim=0)
return batch
src_lengths = straight_data(
[b["net_input"]["src_lengths"] for b in batches]
)
src_lengths, sort_order = src_lengths.sort(descending=True)
def straight_order(tensors):
batch = straight_data(tensors)
return batch.index_select(0, sort_order)
batch = {
"id": straight_order([b["id"] for b in batches]),
"nsentences": sum(b["nsentences"] for b in batches),
"ntokens": sum(b["ntokens"] for b in batches),
"net_input": {
"src_tokens": straight_order(
[b["net_input"]["src_tokens"] for b in batches]
),
"src_lengths": src_lengths,
"prepend_target": straight_order([b["net_input"]["prepend_target"] for b in batches]),
},
"target": straight_order([b["target"] for b in batches])
if batches[0]["target"] is not None
else None,
"tgt_lengths": straight_order([b["tgt_lengths"] for b in batches])
if batches[0]["tgt_lengths"] is not None
else None
}
def check_alignment(alignment, src_len, tgt_len):
if alignment is None or len(alignment) == 0:
return False
if (
alignment[:, 0].max().item() >= src_len - 1
or alignment[:, 1].max().item() >= tgt_len - 1
):
logger.warning("alignment size mismatch found, skipping alignment!")
return False
return True
if "alignments" in batches[0].keys() and batches[0]["alignments"] is not None:
alignments = [b["alignments"][align_index] for b in batches for align_index in range(len(b["alignments"]))]
alignments = [alignments[align_index] for align_index in sort_order]
bsz, src_sz = batch["net_input"]["src_tokens"].shape
_, tgt_sz = batch["net_input"]["prepend_target"].shape
tgt_lengths=batch["tgt_lengths"] + 1 #prepend_target length
offsets = torch.zeros((len(sort_order), 2), dtype=torch.long)
offsets[:, 0] += torch.arange(len(sort_order), dtype=torch.long) * src_sz + 1
offsets[:, 1] += torch.arange(len(sort_order), dtype=torch.long) * tgt_sz + 1
offsets[:, 0] += src_sz - src_lengths #left pad source
offsets[:, 1] += tgt_sz - tgt_lengths #left pad prepend_target
alignments = [
alignment + offset
for align_idx, (offset, src_len, tgt_len) in enumerate(
zip(offsets, src_lengths, tgt_lengths)
)
for alignment in [alignments[align_idx].view(-1, 2)]
if check_alignment(alignment + 1, src_len, tgt_len)
]
batch["net_input"]["alignments"] = torch.cat(alignments)
if "prev_output_tokens" in batches[0]["net_input"]:
batch["net_input"]["prev_output_tokens"] = straight_order(
[b["net_input"]["prev_output_tokens"] for b in batches]
)
if "src_lang_id" in batches[0]["net_input"]:
batch["net_input"]["src_lang_id"] = straight_order(
[b["net_input"]["src_lang_id"] for b in batches]
)
if "tgt_lang_id" in batches[0]:
batch["net_input"]["tgt_lang_id"] = straight_order(
[b["tgt_lang_id"] for b in batches]
)
batch["tgt_lang_id"] = straight_order(
[b["tgt_lang_id"] for b in batches]
)
return batch | StarcoderdataPython |
11358689 | import os
import re
import sys
from py._io.terminalwriter import get_terminal_width
from . import __version__ as testmynb__version__
from ._ansi import green, red, orange, strip_ansi
class TestHandler:
def __init__(self, *notebooks):
self.notebooks = notebooks
@property
def _summary(self):
notebook_count = len(self.notebooks)
test_count = sum([len(nb.extract_codes()) for nb in self.notebooks])
py_ver = re.sub(r"\s.*", "", sys.version)
header = self._h1_message("Test My Notebook ({})".format(testmynb__version__))
return "{}".format(header) + "\n".join(
[
"Platform {}".format(sys.platform),
"Python {}".format(py_ver),
"Working Directory: {}".format(os.getcwd()),
"",
"{0} test cells across {1} notebook(s) detected.".format(
test_count, notebook_count
),
"",
]
)
@staticmethod
def _h1_message(message):
col = get_terminal_width()
no_formats = strip_ansi(message)
# Remove the ANSI escape codes to check the message length
num_equals = (col - len(no_formats) - 3) // 2
equals_sign = num_equals * "="
return "{1} {0} {1}\n".format(message, equals_sign)
@property
def _notebook_summary_section(self):
section = ["Notebooks:\n"]
for nb in self.notebooks:
trust = green("Trusted") if nb.trusted else red("Untrusted")
string = "{} {}: {}\n".format(trust, nb.name, nb.result)
section.append(string)
section.append("\n")
return "".join(section)
def __call__(self):
failed_or_error = False
output_message = list()
for nb in self.notebooks:
nb()
output_message.append(self._summary)
output_message.append(self._notebook_summary_section)
errors = self.collect_errors()
fails = self.collect_fails()
if fails:
failed_or_error = True
head_message = red(self._h1_message("Failed Test(s)"))
output_message.append(head_message)
for cell, err in fails.items():
string = "---- {}: {} ----\n".format(cell.notebook, cell.name)
output_message.append(string)
output_message.append(str(cell))
output_message.append(
red("\n-----------------------------------------\n")
)
output_message.append(err)
output_message.append("\n\n")
if errors:
failed_or_error = True
head_message = orange(self._h1_message("Errored Test(s)"))
output_message.append(head_message)
for cell, err in errors.items():
string = "---- {}: {} ----\n".format(cell.notebook, cell.name)
output_message.append(string)
output_message.append(str(cell))
output_message.append(
red("\n-----------------------------------------\n")
)
output_message.append(err)
output_message.append("\n\n")
output_message.append(self._final_remarks)
output_message = "".join(output_message)
print(output_message)
if failed_or_error:
sys.exit(1)
@property
def _final_remarks(self):
all_tests = "".join([nb.result for nb in self.notebooks])
passed_test_count = all_tests.count(".")
failed_test_count = all_tests.count("F")
errored_test_count = all_tests.count("E")
passed_text = green("{} test(s) passed".format(passed_test_count))
failed_text = red("{} failed".format(failed_test_count))
error_text = orange(" and {} raised an error".format(errored_test_count))
return self._h1_message(
"{}, {},{}".format(passed_text, failed_text, error_text)
)
def collect_errors(self):
errors = dict()
for nb in self.notebooks:
errors.update(nb.get_error_stack())
return errors
def collect_fails(self):
fails = dict()
for nb in self.notebooks:
fails.update(nb.get_fail_stack())
return fails
def find_notebooks(*args):
notebooks = list()
if len(args):
for path in args:
if os.path.isfile(path):
notebooks.append(path)
elif os.path.isdir(path):
notebooks.extend(_recursive_find_notebooks(path))
else:
notebooks = _recursive_find_notebooks(os.getcwd())
return notebooks
def _recursive_find_notebooks(path):
notebooks = list()
for root, dirs, files in os.walk(path):
for file in files:
if ".ipynb_checkpoints" in root:
continue
if re.match(r"^test_.+\.ipynb", file):
notebooks.append(os.path.join(root, file))
return notebooks
| StarcoderdataPython |
8157965 | <reponame>sdruskat/toolbox-scripts
#!/usr/bin/python
# -*- coding: utf-8 -*-
#############################################################################
# Copyright 2018ff. <NAME>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Contributors:
# <NAME> - initial API and implementation
#############################################################################
import re
import os
from collections import defaultdict
import csv
def main():
"""
Main method
"""
s = "Directory containing Toolbox text files (recursively): "
dir_str = input(s)
s = "Name of directories that may contain Toolbox files [toolbox]: "
toolbox_dir = input(s)
if toolbox_dir is "":
toolbox_dir = "toolbox"
toolbox_ext = input("Toolbox extension [txt]: ")
if toolbox_ext is "":
toolbox_ext = "txt"
candidates = retrieve_candidates(dir_str, toolbox_ext, toolbox_dir)
marker_tuple = retrieve_markers(dir_str, candidates)
language_markers = marker_tuple[0]
all_markers = marker_tuple[1]
write_csv(language_markers, all_markers)
print(len(all_markers))
print("Goodbye.")
def retrieve_candidates(dir_str, toolbox_ext, toolbox_dir):
"""
Retrieve the candidate files from the given directory, recursively.
Requirements are:
- The path of the directory must include the toolbox_dir string
- The file must have the toolbox_ext extension
- The path must not contain the string "/v1/", i.e., only the "/v2/"
versions of Toolbox files are used, iff versions exist
"""
candidates = []
for root, dirs, files in os.walk(dir_str):
for file in files:
# if file.endswith(".txt"):
p = os.path.join(root, file)
if "/" + toolbox_dir + "/" in p:
if "/v1/" not in p:
if file.endswith("." + toolbox_ext):
candidates.append(p)
return candidates
def retrieve_markers(dir_str, candidates):
"""
Retrieve the markers from the candidate files, and save them into
a defaultdict(set) that take the language name (retrieved from the
root directory's first children, which must be named after the
corpus language) as key, and a set of markers as value.
"""
marker_dict = defaultdict(set)
all_markers = set()
# The marker regex: At the start of the string, match a backslash
# then more than one Unicode word, then a string.
marker = re.compile("^\\\\[\w\[\]_-]+\s")
# Get the name of the parent directory
root_split = dir_str.rsplit("/")
parent_name = ""
if dir_str.endswith("/"):
parent_name = root_split[-2]
else:
parent_name = root_split[-1]
# Iterate the candidates and extract the markers
for c in candidates:
# This assumes that the parent directory contains directories
# named after corpus language as first children:
# Get the index of the parent dir string in the full path string
# then add the length of the parent name string + one for the
# suffix "/", i.e., get the index where the directory that is
# named after the corpus language starts.
root_i = c.find(parent_name + "/") + len(parent_name) + 1
# Find the first slash after the calculated index above
post_lang_slash = c.find("/", root_i)
# Slice the path to retrieve the directory (= language) name
language_str = c[root_i:post_lang_slash]
# Read files, retrieve markers and save them to the language
# name-keyed set
with open(c, 'r') as in_file:
buf = in_file.readlines()
for line in buf:
matches = re.match(marker, line)
if matches:
match = matches.group(0)[:-1]
all_markers.add(match)
marker_dict[language_str].add(match)
return (marker_dict, all_markers)
def write_csv(markers, all_markers):
"""
Write a CSV file with rows that contain the key of the set, i.e.,
the language name, and a sorted list representation of the set
containing the markers.
"""
sorted_markers = sorted(list(all_markers))
csv_dir = input("Directory to save the CSV file to: ")
if not csv_dir.endswith("/"):
csv_dir = csv_dir + "/"
with open(os.path.join(csv_dir, 'markers.csv'), 'w') as f:
w = csv.writer(f)
keys = list(markers)
for key in keys:
# Construct a list of markers that are actually in the
# set used for the language
val_list = []
for all_key in sorted_markers:
if all_key in markers[key]:
val_list.append(all_key)
else:
val_list.append("")
val_list.insert(0, key)
w.writerow(val_list)
if __name__ == '__main__':
"""
Call main() when run as a script.
"""
main()
| StarcoderdataPython |
5176992 | <filename>pytd62/common.py
import math
import numpy as np
import pandas as pd
import sys, clr
fileobj = open("path.txt", "r", encoding="utf_8")
sys.path.append(fileobj.readline().strip())
clr.AddReference("OpenTDv62")
import OpenTDv62
clr.AddReference('System')
from System.Drawing.Imaging import ImageFormat
from System.Drawing import RectangleF
from System.Collections.Generic import List
def rotx(theta: float) -> np.ndarray:
mat = np.array([[1.0, 0.0, 0.0],
[0.0, math.cos(theta), -math.sin(theta)],
[0.0, math.sin(theta), math.cos(theta)]])
return mat
def roty(theta: float) -> np.ndarray:
mat = np.array([[math.cos(theta), 0.0, math.sin(theta)],
[0.0, 1.0, 0.0],
[-math.sin(theta), 0.0, math.cos(theta)]])
return mat
def rotz(theta: float) -> np.ndarray:
mat = np.array([[math.cos(theta), -math.sin(theta), 0.0],
[math.sin(theta), math.cos(theta), 0.0],
[0.0, 0.0, 1.0]])
return mat
def rotate_base_euler(ang1, ang2, ang3, order='ZYX', base=np.eye(3)):
orders = ['XYX', 'XYZ', 'XZX', 'XZY',
'YXY', 'YXZ', 'YZX', 'YZY',
'ZXY', 'ZXZ', 'ZYX', 'ZYZ']
if not order in orders:
raise ValueError('unexpected rotation order')
angles = [ang1, ang2, ang3]
base_trans = np.transpose(base)
for i, axis in enumerate(order):
if axis == 'X':
base_trans = np.transpose(rotx(angles[i])) @ base_trans
elif axis == 'Y':
base_trans = np.transpose(roty(angles[i])) @ base_trans
else:
base_trans = np.transpose(rotz(angles[i])) @ base_trans
return np.transpose(base_trans)
def get_number_of_nodes(td: OpenTDv62.ThermalDesktop, submodel_name: str='') -> int:
nodes = td.GetNodes()
if submodel_name == '':
nodes = [inode for inode in nodes if submodel_name == inode.Submodel.Name]
return len(nodes)
def get_node_handle(td, submodel_name, node_id, nodes: list=[]):
if nodes == []:
nodes = td.GetNodes()
node_extracted = [inode for inode in nodes if submodel_name == inode.Submodel.Name and node_id == inode.Id]
if len(node_extracted) == 1:
return node_extracted[0].Handle
elif len(node_extracted) == 0:
error_message = submodel_name + '.' + str(node_id) + ' does not exist.'
raise ValueError(error_message)
elif len(node_extracted) > 1:
error_message = submodel_name + '.' + str(node_id) + ' is assigned to multiple nodes.'
raise ValueError(error_message)
def get_node(td: OpenTDv62.ThermalDesktop, submodel_name: str, node_id: int, nodes: list=[]):
if nodes == []:
nodes = td.GetNodes()
node_extracted = [inode for inode in nodes if submodel_name == inode.Submodel.Name and node_id == inode.Id]
if len(node_extracted) == 1:
return node_extracted[0]
elif len(node_extracted) == 0:
error_message = submodel_name + '.' + str(node_id) + ' does not exist.'
raise ValueError(error_message)
else:
error_message = submodel_name + '.' + str(node_id) + ' is assigned to multiple nodes.'
raise ValueError(error_message)
def get_submodel_nodes(td: OpenTDv62.ThermalDesktop, submodel_name: str) -> list:
nodes = td.GetNodes()
submodel_nodes = [inode for inode in nodes if submodel_name == inode.Submodel.Name]
# sorted_nodes = sorted(submodel_nodes, key=lambda node: node.id)
return submodel_nodes
def renumber_submodel_nodes(td: OpenTDv62.ThermalDesktop, submodel_name: str):
submodel_nodes = get_submodel_nodes(td, submodel_name)
for i, inode in enumerate(submodel_nodes):
inode.Id = i+1
inode.Update()
def merge_submodel_nodes(td: OpenTDv62.ThermalDesktop, submodel_name: str, tolerance: float=0.1e-3, node_nums: list=[]):
merge = OpenTDv62.MergeNodesOptionsData()
merge.KeepMethod = OpenTDv62.MergeNodesOptionsData.KeepMethods.FIRST_SELECTED
nodes = List[str]()
submodel_nodes = get_submodel_nodes(td, submodel_name)
if node_nums == []:
for node in submodel_nodes:
nodes.Add(node.Handle)
else:
for num in node_nums:
node_extract = [inode for inode in submodel_nodes if num == inode.Id]
if len(node_extract) == 1:
nodes.Add(node_extract[0].Handle)
else:
raise ValueError('The given nodeID does not exist or assigned to multiple nodes')
merge.NodeHandles = nodes
merge.Tolerance = OpenTDv62.Dimension.Dimensional[OpenTDv62.Dimension.ModelLength](tolerance)
td.MergeNodes(merge)
def create_node(td: OpenTDv62.ThermalDesktop, data: pd.Series):
node = td.CreateNode()
node.Submodel = OpenTDv62.SubmodelNameData(data['Submodel name'])
node.Id = data['ID']
node.InitialTemp = OpenTDv62.Dimension.Dimensional[OpenTDv62.Dimension.Temp](data['Initial temperature [K]']*1.0)
node.Origin = OpenTDv62.Point3d(data['X [mm]']*0.001, data['Y [mm]']*0.001, data['Z [mm]']*0.001)
node.MassVol = data['Mass volume [J/K]']*1.0
if data['Node type'] == 'DIFFUSION':
node.NodeType = OpenTDv62.RcNodeData.NodeTypes.DIFFUSION
elif data['Node type'] == 'ARITHMETIC':
node.NodeType = OpenTDv62.RcNodeData.NodeTypes.ARITHMETIC
elif data['Node type'] == 'BOUNDARY':
node.NodeType = OpenTDv62.RcNodeData.NodeTypes.BOUNDARY
elif data['Node type'] == 'CLONE':
node.NodeType = OpenTDv62.RcNodeData.NodeTypes.CLONE
else:
raise ValueError('Unexpected node type')
node.Comment = str(data['Comment'])
node.Update()
def create_nodes(td: OpenTDv62.ThermalDesktop, file_name: str):
df = pd.read_csv(file_name)
for index, data in df.iterrows():
create_node(td, data)
def create_conductor(td: OpenTDv62.ThermalDesktop, data: pd.Series, nodes: list=[]):
if nodes == []:
nodes = td.GetNodes()
fr_handle = OpenTDv62.Connection(get_node_handle(td, data['From submodel'], data['From ID'], nodes))
to_handle = OpenTDv62.Connection(get_node_handle(td, data['To submodel'], data['To ID'], nodes))
cond = td.CreateConductor(fr_handle, to_handle)
cond.Submodel = OpenTDv62.SubmodelNameData(data['Submodel'])
cond.Value = data['Conductance [W/K]']*1.0
cond.Comment = str(data['Comment'])
cond.Update()
def create_conductors(td: OpenTDv62.ThermalDesktop, file_name):
df = pd.read_csv(file_name)
nodes = td.GetNodes()
for index, data in df.iterrows():
create_conductor(td, data, nodes)
def delete_contactors(td: OpenTDv62.ThermalDesktop):
contactors = td.GetContactors()
for contactor in contactors:
entity = OpenTDv62.TdDbEntityData(contactor.Handle)
td.DeleteEntity(entity)
def delete_conductors(td: OpenTDv62.ThermalDesktop):
contactors = td.GetConductors()
for contactor in contactors:
entity = OpenTDv62.TdDbEntityData(contactor.Handle)
td.DeleteEntity(entity)
def delete_heaters(td: OpenTDv62.ThermalDesktop):
heaters = td.GetHeaters()
for heater in heaters:
entity = OpenTDv62.TdDbEntityData(heater.Handle)
td.DeleteEntity(entity)
def delete_heatloads(td: OpenTDv62.ThermalDesktop):
heatloads = td.GetHeatLoads()
for heatload in heatloads:
entity = OpenTDv62.TdDbEntityData(heatload.Handle)
td.DeleteEntity(entity)
def import_thermo_properties(td: OpenTDv62.ThermalDesktop, file_name: str):
df = pd.read_csv(file_name)
for index, data in df.iterrows():
add_thermo_property(td, data)
def add_thermo_property(td: OpenTDv62.ThermalDesktop, data: pd.Series):
try:
thermo = td.CreateThermoProps(data['Name'])
except OpenTDv62.OpenTDException:
thermo = td.GetThermoProps(data['Name'])
thermo.Anisotropic = int(data['Anisotropic'])
thermo.Comment = str(data['Comment'])
thermo.Density = OpenTDv62.Dimension.Dimensional[OpenTDv62.Dimension.Density](float(data['Density [kg/m3]'])) # [kg/m3]
thermo.Conductivity = OpenTDv62.Dimension.Dimensional[OpenTDv62.Dimension.CondPerLength](float(data['Conductivity [W/mK]'])) # [W/mK]
thermo.ConductivityY = OpenTDv62.Dimension.Dimensional[OpenTDv62.Dimension.CondPerLength](float(data['ConductivityY [W/mK]'])) # [W/mK]
thermo.ConductivityZ = OpenTDv62.Dimension.Dimensional[OpenTDv62.Dimension.CondPerLength](float(data['ConductivityZ [W/mK]'])) # [W/mK]
thermo.SpecificHeat = OpenTDv62.Dimension.Dimensional[OpenTDv62.Dimension.SpecificHeat](float(data['Specific heat [J/kgK]'])) # [J/kgK]
thermo.Update()
def import_optical_properties(td: OpenTDv62.ThermalDesktop, file_name: str):
df = pd.read_csv(file_name)
for index, data in df.iterrows():
add_optical_property(td, data)
def add_optical_property(td: OpenTDv62.ThermalDesktop, data: pd.Series):
try:
optical = td.CreateOpticalProps(data['Name'])
except OpenTDv62.OpenTDException:
optical = td.GetOpticalProps(data['Name'])
optical.Comment = str(data['Comment'])
optical.Alph = float(data['Absorptivity'])
optical.Emis = float(data['Emissivity'])
optical.Update()
def rotate_all(td: OpenTDv62.ThermalDesktop, rotate: np.ndarray):
elements = get_elements(td)
for key in elements:
for elem in elements[key]:
if key == 'Polygon':
rotate_polygon(elem)
else:
rotate_element(elem)
def get_elements(td: OpenTDv62.ThermalDesktop):
# create empty dictionary object
elements = {}
elements['Disk'] = td.GetDisks()
elements['Cylinder'] = td.GetCylinders()
elements['Rectangle'] = td.GetRectangles()
elements['Cone'] = td.GetCones()
elements['Sphere'] = td.GetSpheres()
elements['Torus'] = td.GetToruses()
elements['ScarfedCylinder'] = td.GetScarfedCylinders()
elements['Polygon'] = td.GetPolygons()
elements['SolidCylinder'] = td.GetSolidCylinders()
elements['SolidBrick'] = td.GetSolidBricks()
elements['SolidSphere'] = td.GetSolidSpheres()
return elements
def rotate_element(element, rotate: np.ndarray):
base = np.array([[element.BaseTrans.entry[0][0], element.BaseTrans.entry[0][1], element.BaseTrans.entry[0][2]],
[element.BaseTrans.entry[1][0], element.BaseTrans.entry[1][1], element.BaseTrans.entry[1][2]],
[element.BaseTrans.entry[2][0], element.BaseTrans.entry[2][1], element.BaseTrans.entry[2][2]]])
origin = np.array([element.BaseTrans.entry[0][3], element.BaseTrans.entry[1][3], element.BaseTrans.entry[2][3]])
new_origin = rotate @ origin
new_base = rotate @ base
element.BaseTrans.entry[0][0] = new_base[0,0]
element.BaseTrans.entry[1][0] = new_base[1,0]
element.BaseTrans.entry[2][0] = new_base[2,0]
element.BaseTrans.entry[0][1] = new_base[0,1]
element.BaseTrans.entry[1][1] = new_base[1,1]
element.BaseTrans.entry[2][1] = new_base[2,1]
element.BaseTrans.entry[0][2] = new_base[0,2]
element.BaseTrans.entry[1][2] = new_base[1,2]
element.BaseTrans.entry[2][2] = new_base[2,2]
element.BaseTrans.entry[0][3] = new_origin[0]
element.BaseTrans.entry[1][3] = new_origin[1]
element.BaseTrans.entry[2][3] = new_origin[2]
element.Update()
def rotate_polygon(td: OpenTDv62.ThermalDesktop, polygon: OpenTDv62.RadCAD.Polygon, rotate: np.ndarray):
"""
The polygon property "Vertices" is not settable. Therefore, it is not possible to move the existing polygon.
With this function, a new polygon is created at a new position, and the original polygon is deleted.
"""
new_edges = List[OpenTDv62.Point3d]()
for i in range(len(polygon.Vertices)//2):
poly = polygon.Vertices[i]
point = np.array([poly.X.GetValueSI(),poly.Y.GetValueSI(),poly.Z.GetValueSI()])
new_point = rotate @ point
new_edges.Add(OpenTDv62.Point3d(new_point[0], new_point[1], new_point[2]))
new_polygon = td.CreatePolygon(new_edges)
new_polygon.TopStartSubmodel = polygon.TopStartSubmodel
new_polygon.TopStartId = polygon.TopStartId
new_polygon.BreakdownU.Num = polygon.BreakdownU.Num
new_polygon.BreakdownV.Num = polygon.BreakdownV.Num
new_polygon.TopOpticalProp = polygon.TopOpticalProp
new_polygon.BotOpticalProp = polygon.BotOpticalProp
new_polygon.TopMaterial = polygon.TopMaterial
new_polygon.TopThickness = polygon.TopThickness
new_polygon.AnalysisGroups = polygon.AnalysisGroups
new_polygon.CondSubmodel = polygon.CondSubmodel
new_polygon.ColorIndex = polygon.ColorIndex
new_polygon.Comment = polygon.Comment
new_polygon.Update()
td.DeleteEntity(OpenTDv62.TdDbEntityData(polygon.Handle))
def issurface(element):
if isinstance(element, OpenTDv62.RadCAD.Disk):
flag = True
elif isinstance(element, OpenTDv62.RadCAD.Cylinder):
flag = True
elif isinstance(element, OpenTDv62.RadCAD.Rectangle):
flag = True
elif isinstance(element, OpenTDv62.RadCAD.Cone):
flag = True
elif isinstance(element, OpenTDv62.RadCAD.Sphere):
flag = True
elif isinstance(element, OpenTDv62.RadCAD.Torus):
flag = True
elif isinstance(element, OpenTDv62.RadCAD.ScarfedCylinder):
flag = True
elif isinstance(element, OpenTDv62.RadCAD.Polygon):
flag = True
else:
flag = False
return flag
def issolid(element):
if isinstance(element, OpenTDv62.RadCAD.FdSolid.SolidCylinder):
flag = True
elif isinstance(element, OpenTDv62.RadCAD.FdSolid.SolidBrick):
flag = True
elif isinstance(element, OpenTDv62.RadCAD.FdSolid.SolidSphere):
flag = True
else:
flag = False
return flag
def get_element(td: OpenTDv62.ThermalDesktop, submodel_name: str, element_type: str, comment: str, elements: dict={}):
if elements == {}:
elements = get_elements(td)
if element_type in ['Disk', 'Cylinder', 'Rectangle', 'Cone', 'Sphere', 'Torus', 'ScarfedCylinder', 'Polygon']:
element = [ielement for ielement in elements[element_type] if submodel_name == ielement.TopStartSubmodel.Name and comment == ielement.Comment]
elif element_type in ['SolidCylinder', 'SolidBrick', 'SolidSphere']:
element = [ielement for ielement in elements[element_type] if submodel_name == ielement.StartSubmodel.Name and comment == ielement.Comment]
else:
raise ValueError('Unexpected element type')
num = len(element)
if num == 1:
return element[0]
elif num == 0:
raise ValueError('the requested element does not exist')
else:
raise ValueError('multiple elements are found with the same designation')
def create_contactors(td: OpenTDv62.ThermalDesktop, file_name: str):
df = pd.read_csv(file_name)
for index, data in df.iterrows():
create_contactor(td, data)
def create_contactor(td: OpenTDv62.ThermalDesktop, data: pd.Series):
elements = get_elements(td)
contfrom = List[OpenTDv62.Connection]()
contfrom.Add(OpenTDv62.Connection(get_element(td, data['FromSubmodel'], data['FromElement'], str(data['FromComment']), elements), int(data['Marker'])))
i = int(1)
while True:
try:
submodel = str(data[f'FromSubmodel.{i}'])
element = str(data[f'FromElement.{i}'])
comment = str(data[f'FromComment.{i}'])
marker = int(data[f'Marker.{i}'])
if submodel=='nan' or element=='nan' or comment=='nan': raise ValueError
except:
break
else:
contfrom.Add(OpenTDv62.Connection(get_element(td, submodel, element, comment, elements), marker))
i = i + 1
contto = List[OpenTDv62.Connection]()
contto.Add(OpenTDv62.Connection(get_element(td, data['ToSubmodel'], data['ToElement'], str(data['ToComment']), elements)))
j = int(1)
while True:
try:
submodel = str(data[f'ToSubmodel.{j}'])
element = str(data[f'ToElement.{j}'])
comment = str(data[f'ToComment.{j}'])
if submodel=='nan' or element=='nan' or comment=='nan': raise ValueError
except:
break
else:
contto.Add(OpenTDv62.Connection(get_element(td, submodel, element, comment, elements)))
j = j + 1
cont = td.CreateContactor(contfrom, contto)
cont.ContactCond = data['Conductance [W/K]'] # [W/K]
if data['UseFace'] == 'Face':
cont.UseFace = 1 # 1:Face
elif data['UseFace'] == 'Edges':
cont.UseFace = 0 # 0:Edges
cont.CondSubmodel = OpenTDv62.SubmodelNameData(data['FromSubmodel'])
if data['InputValueType'] == 'PER_AREA_OR_LENGTH':
cont.InputValueType = OpenTDv62.RcConnData.ContactorInputValueTypes.PER_AREA_OR_LENGTH
elif data['InputValueType'] == 'ABSOLUTE_COND_REDUCED_BY_UNCONNECTED':
cont.InputValueType = OpenTDv62.RcConnData.ContactorInputValueTypes.ABSOLUTE_COND_REDUCED_BY_UNCONNECTED
elif data['InputValueType'] == 'ABSOLUTE_ADJUST_FOR_UNCONNECTED':
cont.InputValueType = OpenTDv62.RcConnData.ContactorInputValueTypes.ABSOLUTE_ADJUST_FOR_UNCONNECTED
cont.Name = str(data['Name'])
cont.Update()
def set_variable_nodetemp(filename: str, td: OpenTDv62.ThermalDesktop, submodel: str, nodeid: int, columnname1: str='Time [s]', columnname2: str='Data [K]'):
bc = pd.read_csv(filename)
nodes = td.GetNodes()
node_list = [inode for inode in nodes if inode.Submodel.Name==submodel and inode.Id==nodeid]
if len(node_list) == 1:
node = node_list[0]
elif len(node_list) == 0:
raise ValueError('The requested node does not exist')
else:
raise ValueError('The given node_id is assigned to multiple nodes')
addlist_time = List[float]()
addlist_temp = List[float]()
for itime in bc[columnname1]:
addlist_time.Add(itime)
for itemp in bc[columnname2]:
addlist_temp.Add(itemp)
timelist = OpenTDv62.Dimension.DimensionalList[OpenTDv62.Dimension.Time]()
timelist.AddRange(addlist_time)
node.TimeArray = timelist
templist = OpenTDv62.Dimension.DimensionalList[OpenTDv62.Dimension.Temp]()
templist.AddRange(addlist_temp)
node.ValueArray = templist
node.SteadyStateBoundaryType = OpenTDv62.RcNodeData.SteadyStateBoundaryTypes.INITIAL_TEMP
node.InitialTemp = OpenTDv62.Dimension.Dimensional[OpenTDv62.Dimension.Temp](bc[columnname2][0])
node.UseVersusTime = 1
node.Update()
def set_variable_heatload(filename: str, td: OpenTDv62.ThermalDesktop, submodel: str, comment: str, columnname1: str='Time [s]', columnname2: str='Data [W]'):
bc = pd.read_csv(filename)
heats = td.GetHeatLoads()
heat_list = [iheat for iheat in heats if iheat.Submodel.Name==submodel and iheat.Name==comment]
if len(heat_list) == 1:
heat = heat_list[0]
elif len(heat_list) == 0:
raise ValueError('The requested heatload does not exist')
else:
raise ValueError('The given name is assigned to multiple heatloads')
heat.TempVaryType = OpenTDv62.RcHeatLoadData.HeatLoadTypes.LOAD
heat.AppliedType = OpenTDv62.RcHeatLoadData.AppliedTypeBoundaryConds.NODE
heat.HeatLoadTransientType = OpenTDv62.RcHeatLoadData.HeatLoadTransientTypes.TIME_VARY_HEAT_LOAD
heat.TimeDependentSteadyStateType = OpenTDv62.RcHeatLoadData.TimeDependentSteadyStateTypes.TIME_INTERP
addlist_time = List[float]()
addlist_heat = OpenTDv62.ListSI()
for itime in bc[columnname1]:
addlist_time.Add(itime)
for iheat in bc[columnname2]:
addlist_heat.Add(iheat)
timelist = OpenTDv62.Dimension.DimensionalList[OpenTDv62.Dimension.Time]()
timelist.AddRange(addlist_time)
heat.TimeArray = timelist
heat.ValueArraySI = addlist_heat
heat.Update()
def screenshot(td: OpenTDv62.ThermalDesktop, name: str, view: str='', style: str='XRAY', imageformat: str='png', width: int=0, height: int=0):
td.SendCommand('CLEANSCREENON ')
#IsoViews Enummeration
iso_dict = {'SW':0, 'SE':1, 'NE':2, 'NW':3}
#OrthoViews Enummeration
ortho_dict = {'TOP':0, 'BOTTOM':1,
'FRONT':2, 'BACK':3,
'LEFT':4, 'RIGHT':5}
if view in iso_dict:
td.RestoreIsoView(iso_dict[view])
td.ZoomExtents()
elif view in ortho_dict:
td.RestoreOrthoView(ortho_dict[view])
td.ZoomExtents()
# VisualStyles Enumeration
style_dict = {'WIRE_2D':0, 'WIRE':1, 'CONCEPTUAL':2,
'HIDDEN':3, 'REALISTIC':4, 'SHADED':5,
'SHADED_W_EDGES':6, 'SHADES_OF_GREY':7, 'SKETCHY':8,
'THERMAL':9, 'THERMAL_PP':10, 'XRAY':11}
if style in style_dict:
td.SetVisualStyle(style_dict[style])
else:
td.SetVisualStyle(OpenTDv62.VisualStyles.XRAY)
# screenshot
bmp = td.CaptureGraphicsArea()
# trimming
if width <= 0 and height <= 0:
# do nothing
pass
elif width <= 0 and height > 0:
if bmp.Height > height:
# trim height
rect = RectangleF((0.0, (bmp.Height-height)/2, bmp.Width*1.0, height*1.0))
bmp = bmp.Clone(rect, bmp.PixelFormat)
else:
# do nothing
pass
elif width > 0 and height <= 0:
if bmp.Width > width:
# trim width
rect = RectangleF(((bmp.Width-width)/2, 0.0, width*1.0, bmp.Height*1.0))
bmp = bmp.Clone(rect, bmp.PixelFormat)
else:
# do nothing
pass
else:
# trim width and hight
rect = RectangleF((bmp.Width-width)/2, (bmp.Height-height)/2, width*1.0, height*1.0)
bmp = bmp.Clone(rect, bmp.PixelFormat)
# ImageFormat Class
# Bmp, Emf, Exif, Gif, Icon, Jpeg, Png, Tiff, Wmf
if imageformat=='png':
file_name = name + '.png'
bmp.Save(file_name, ImageFormat.Png)
elif imageformat=='jpeg' or imageformat=='jpg':
file_name = name + '.jpeg'
bmp.Save(file_name, ImageFormat.Jpeg)
elif imageformat=='bmp':
file_name = name + '.bmp'
bmp.Save(file_name, ImageFormat.Bmp)
elif imageformat=='emf':
file_name = name + '.emf'
bmp.Save(file_name, ImageFormat.Emf)
elif imageformat=='wmf':
file_name = name + '.wmf'
bmp.Save(file_name, ImageFormat.Wmf)
elif imageformat=='tiff':
file_name = name + '.tiff'
bmp.Save(file_name, ImageFormat.Tiff)
elif imageformat=='gif':
file_name = name + '.gif'
bmp.Save(file_name, ImageFormat.Gif)
elif imageformat=='exif':
file_name = name + '.exif'
bmp.Save(file_name, ImageFormat.Exif)
elif imageformat=='icon':
file_name = name + '.icon'
bmp.Save(file_name, ImageFormat.Icon)
else:
raise ValueError('unexpected imageformat')
td.SendCommand('CLEANSCREENOFF ') | StarcoderdataPython |
54602 | <reponame>dsroche/obliv<filename>tests/test_fstore.py
#!/usr/bin/env python3
"""Test program for the fstore class."""
import unittest
import tempfile
import random
from obliv import fstore
def randbytes(s):
return bytes(random.getrandbits(8) for _ in range(s))
class TestFstore(unittest.TestCase):
def setUp(self):
tdobj = tempfile.TemporaryDirectory()
self.addCleanup(tdobj.cleanup)
self.dirname = tdobj.name
random.seed(0xf00dface)
def test_fstore_small(self):
n = 21 # number of files
s = 213 # size of each file in bytes
check = [randbytes(s) for _ in range(n)]
with fstore.fstore(self.dirname) as fs:
# insert everything
for dat in check:
fs.append(dat)
# change 2 values
i1, i2 = random.sample(range(n), 2)
check[i1] = randbytes(s)
check[i2] = randbytes(s)
fs[i1] = check[i1]
fs[i2] = check[i2]
# check everything in random order
for i in random.sample(range(n), n):
self.assertEqual(check[i], fs[i])
# re-open
with fstore.fstore(self.dirname) as fs:
# check everything in random order
for i in random.sample(range(n), n):
self.assertEqual(check[i], fs[i])
# delete some things
for _ in range(3):
del check[-1]
del fs[-1]
n -= 1
self.assertEqual(len(fs), n)
# insert some new things
for _ in range(5):
check.append(randbytes(s))
fs.append(check[-1])
n += 1
self.assertEqual(len(fs), n)
# check everything in random order
for i in random.sample(range(n), n):
self.assertEqual(check[i], fs[i])
def test_fstore_sizes(self):
maxn = 100 # max number of files
s = 51 # size of each file in bytes
check = [randbytes(s) for _ in range(maxn)]
for _ in range(100):
n = random.randrange(maxn)
with fstore.fstore(self.dirname) as fs:
# insert everything
fs.extend(check[:n])
with fstore.fstore(self.dirname) as fs:
# check len, first and last
self.assertEqual(n, len(fs))
if n:
self.assertEqual(check[0], fs[0])
self.assertEqual(check[n-1], fs[-1])
# delete everything
while fs:
del fs[-1]
if __name__ == '__main__':
unittest.main()
| StarcoderdataPython |
1701008 |
GLOBAL_DISCOUNT_PRODUCT = ('E', 'B', 'N', 'M', 'R', 'Q', 'S','T','X','Y','Z')
class Discount(object):
def __init__(self, discount_purchase=None, discount_receive=None, ref_skus="", occurence=0, free=False):
self.discount_purchase = discount_purchase
self.discount_receive = discount_receive
self.ref_skus = ref_skus
self.free = free
self.occurence = occurence
def get_discount_purchase(self):
return self.discount_purchase
def get_discount_receive(self):
return self.discount_receive
def __str__(self):
print('discount_purchase: {}, discount_receive: {}'.format(self.discount_purchase, self.discount_receive))
class Skus(object):
def __init__(self, product_name, price):
self.product_name = product_name
self.price = price
self.discounts = []
self.number_of_items = 1
def get_product_name(self):
return self.product_name
def get_price(self):
total = -1
result = -1
for discount in self.discounts:
if discount.discount_purchase and not(discount.free):
total = self.get_discount(discount)
if total != -1:
if result == -1 or result > total:
print('ICI1111: {}'.format(total))
result = total
if len(self.discounts) > 1:
res = self.compute_discount()
print('compute_discount :res {} result {}'.format(res, result))
if result == -1 or result > res:
result = res
if result != -1:
return result
else:
return self.number_of_items * self.price
def get_discount(self, discount):
total = -1
if self.number_of_items >= discount.discount_purchase:
if (self.number_of_items % discount.discount_purchase) == 0:
total = (discount.discount_receive * (self.number_of_items / discount.discount_purchase))
else:
total = (discount.discount_receive * (self.number_of_items // discount.discount_purchase) +
((self.number_of_items % discount.discount_purchase) * self.price))
return total
def get_exact_match_discount(self, discount, reste):
total = 0
rescount = reste
if self.number_of_items >= discount.discount_purchase:
rescount = reste % discount.discount_purchase
if (rescount) == 0:
total = (discount.discount_receive * (reste / discount.discount_purchase))
else:
total = (discount.discount_receive * (reste // discount.discount_purchase))
return total, rescount
def compute_discount(self):
best_discount = self.discounts[1]
total1 = 0
total2 = 0
(total1, reste) = self.get_exact_match_discount(best_discount, self.number_of_items)
if reste > 0:
best_discount = self.discounts[0]
(total2, reste) = self.get_exact_match_discount(best_discount, reste)
if reste > 0:
total2 += reste * self.price
return total1 + total2
# def get_price(self):
# result = -1
# total = -1
# for discount in self.discounts:
# if discount.discount_purchase and not(discount.free):
# if self.number_of_items >= discount.discount_purchase:
# if (self.number_of_items % discount.discount_purchase) == 0:
# total = (discount.discount_receive * (self.number_of_items / discount.discount_purchase))
# else:
# total = (discount.discount_receive * (self.number_of_items // discount.discount_purchase) + ((self.number_of_items % discount.discount_purchase) * self.price))
# if total != -1:
# if result == -1 or result > total:
# result = total
# if result != -1:
# return result
# else:
# return self.number_of_items * self.price
def get_number_of_items(self):
return self.number_of_items
def add_discount(self, discount):
self.discounts.append(discount)
def get_discounts(self):
return self.discounts
def increment_number_of_items(self):
self.number_of_items += 1
class Basket():
def __init__(self):
self.items_list = list()
self.e_number = 0
self.b_number = 0
def add_item(self, item):
found = False
index = 0
current_item = item.get_product_name()
for el in self.items_list:
index += 1
if el.get_product_name() == item.get_product_name():
found = True
break
if found:
self.items_list[index - 1].increment_number_of_items()
else:
self.items_list.append(item)
if current_item == 'E':
self.e_number += 1
elif current_item == 'B':
self.b_number += 1
def get_items(self):
return self.items_list
def get_total(self):
item_list = [item for item in self.items_list if item.product_name not in GLOBAL_DISCOUNT_PRODUCT]
return sum(map(lambda item : item.get_price(), item_list))
def get_global_discount(self):
total = 0
item_e = [sku for sku in self.items_list if sku.product_name == 'E']
item_b = [sku for sku in self.items_list if sku.product_name == 'B']
item_n = [sku for sku in self.items_list if sku.product_name == 'N']
item_m = [sku for sku in self.items_list if sku.product_name == 'M']
item_r = [sku for sku in self.items_list if sku.product_name == 'R']
item_q = [sku for sku in self.items_list if sku.product_name == 'Q']
total += self.get_linked_discounts(item_e, item_b)
total += self.get_linked_discounts(item_n, item_m)
total += self.get_linked_discounts(item_r, item_q)
return total
def get_linked_discounts(self, item_1, item_2):
total = 0
free = 0
for sku in item_1:
for discount in sku.get_discounts():
total += sku.number_of_items * sku.price
if sku.number_of_items >= discount.discount_purchase:
if (sku.number_of_items % discount.discount_purchase) == 0:
free += discount.occurence * (sku.number_of_items / discount.discount_purchase)
else:
free += discount.occurence * (sku.number_of_items // discount.discount_purchase)
for sku in item_2:
sku.number_of_items -= free
if sku.number_of_items > 0:
total += sku.get_price()
return total
def checkout(self, skus_string):
stock = build_stocks()
for skus_name in skus_string:
skus_objects = list(filter(lambda item : item.product_name == skus_name, stock))
if len(skus_objects) == 0:
return -1
else:
self.add_item(skus_objects[0])
total = self.get_total()
total_discount = self.get_global_discount()
group_discount = self.group_discount()
print('total {} total_discount {} group_discount {}'.format(total, total_discount, group_discount))
return total + total_discount + group_discount
def get_price_reste(self, item, nb_item):
if nb_item >= item.number_of_items:
total = item.get_price()
nb_item -= item.number_of_items
print('ICI333333 {} nb_item {}'.format(total, nb_item))
else:
item.number_of_items = nb_item
total = item.get_price()
nb_item = 0
return total, nb_item
def group_discount(self):
items = [item for item in self.items_list if item.product_name in ('S','T','X','Y','Z')]
nb_item = 0
total = 0
curent_items = []
for item in items:
nb_item += item.number_of_items
if nb_item >= 3:
total += 45
nb_item -= 3
print('ICI111111 {} nb_item {}'.format(total, nb_item))
if nb_item > 0:
items_x = [item for item in items if item.product_name == 'X']
if len(items_x) > 0:
res, nb_item = self.get_price_reste(items_x[0], nb_item)
total += res
print('ICI22222 {} nb_item {}'.format(total, nb_item))
if nb_item > 0:
items_sty = [item for item in items if item.product_name in ('S', 'T', 'Y')]
for it in items_sty:
res, nb_item = self.get_price_reste(it, nb_item)
total += res
if nb_item <= 0:
break
if nb_item > 0:
items_sty = [item for item in items if item.product_name == 'Z']
for it in items_sty:
res, nb_item = self.get_price_reste(it, nb_item)
total += res
if nb_item <= 0:
break
return total
# noinspection PyUnusedLocal
# skus = unicode string
def checkout(skus):
basket = Basket()
return basket.checkout(skus)
def build_stocks():
stock = []
discount1 = Discount(discount_purchase=3, discount_receive=130)
discount2 = Discount(discount_purchase=2, discount_receive=45)
discount3 = Discount(discount_purchase=5, discount_receive=200)
discount4 = Discount(discount_purchase=2, ref_skus="B", free=True, occurence=1)
discount5 = Discount(discount_purchase=3, discount_receive=20)
discount6 = Discount(discount_purchase=5, discount_receive=45)
discount7 = Discount(discount_purchase=10, discount_receive=80)
discount8 = Discount(discount_purchase=2, discount_receive=150)
discount9 = Discount(discount_purchase=3, ref_skus="M", free=True, occurence=1)
discount10 = Discount(discount_purchase=3, discount_receive=80)
discount11 = Discount(discount_purchase=3, ref_skus="Q", free=True, occurence=1)
discount12 = Discount(discount_purchase=4, discount_receive=120)
discount13 = Discount(discount_purchase=2, discount_receive=90)
discount14 = Discount(discount_purchase=3, discount_receive=130)
discount15 = Discount(discount_purchase=3, discount_receive=45)
skus_a = Skus(product_name="A", price=50)
skus_a.add_discount(discount1)
skus_a.add_discount(discount3)
skus_b = Skus(product_name="B", price=30)
skus_b.add_discount(discount2)
stock.append(skus_a)
stock.append(skus_b)
skus_c = Skus(product_name="C", price=20)
skus_d = Skus(product_name="D", price=15)
skus_e = Skus(product_name="E", price=40)
skus_e.add_discount(discount4)
skus_f = Skus(product_name="F", price=10)
skus_f.add_discount(discount5)
skus_g = Skus(product_name='G', price=20)
skus_h = Skus(product_name='H', price=10)
skus_h.add_discount(discount6)
skus_h.add_discount(discount7)
skus_i = Skus(product_name='I', price=35)
skus_j = Skus(product_name='J', price=60)
skus_k = Skus(product_name='K', price=70)
skus_k.add_discount(discount8)
skus_l = Skus(product_name='L', price=90)
skus_m = Skus(product_name='M', price=15)
skus_n = Skus(product_name='N', price=40)
skus_n.add_discount(discount9)
skus_o = Skus(product_name='O', price=10)
skus_p = Skus(product_name='P', price=50)
skus_p.add_discount(discount3)
skus_q = Skus(product_name='Q', price=30)
skus_q.add_discount(discount10)
skus_r = Skus(product_name='R', price=50)
skus_r.add_discount(discount11)
skus_s = Skus(product_name='S', price=20)
skus_s.add_discount(discount15)
skus_t = Skus(product_name='T', price=20)
skus_t.add_discount(discount15)
skus_u = Skus(product_name='U', price=40)
skus_u.add_discount(discount12)
skus_v = Skus(product_name='V', price=50)
skus_v.add_discount(discount13)
skus_v.add_discount(discount14)
skus_w = Skus(product_name='W', price=20)
skus_x = Skus(product_name='X', price=17)
skus_x.add_discount(discount15)
skus_y = Skus(product_name='Y', price=20)
skus_y.add_discount(discount15)
skus_z = Skus(product_name='Z', price=21)
skus_z.add_discount(discount15)
stock.append(skus_c)
stock.append(skus_d)
stock.append(skus_e)
stock.append(skus_f)
stock.append(skus_g)
stock.append(skus_h)
stock.append(skus_i)
stock.append(skus_j)
stock.append(skus_k)
stock.append(skus_l)
stock.append(skus_m)
stock.append(skus_n)
stock.append(skus_o)
stock.append(skus_p)
stock.append(skus_q)
stock.append(skus_r)
stock.append(skus_s)
stock.append(skus_t)
stock.append(skus_u)
stock.append(skus_v)
stock.append(skus_w)
stock.append(skus_x)
stock.append(skus_y)
stock.append(skus_z)
return stock | StarcoderdataPython |
6691998 | <gh_stars>10-100
"""empty message
Revision ID: 0029_fix_email_from
Revises: 0028_fix_reg_template_history
Create Date: 2016-06-13 15:15:34.035984
"""
# revision identifiers, used by Alembic.
revision = "0029_fix_email_from"
down_revision = "0028_fix_reg_template_history"
import sqlalchemy as sa
from alembic import op
service_id = "d6aa2c68-a2d9-4437-ab19-3ae8eb202553"
def upgrade():
op.get_bind()
op.execute("update services set email_from = 'gov.uk.notify' where id = '{}'".format(service_id))
op.execute("update services_history set email_from = 'gov.uk.notify' where id = '{}'".format(service_id))
def downgrade():
### commands auto generated by Alembic - please adjust! ###
pass
### end Alembic commands ###
| StarcoderdataPython |
3213974 | <reponame>deepshig/mastermind
import copy
from kripke_model import get_relations, generate_worlds, get_proposition
from codemaker import LENGTH_OF_CODE
from github_com.erohkohl.mlsolver.kripke import KripkeStructure
class KnowledgeManager:
"""
Maintains the knowledge structure for the game.
Initialises with a Kripke Model for the game.
Keeps track of the real world, according to the secret code.
Processes every move to update the knowledge structure accrodingly.
"""
def __init__(self):
worlds = generate_worlds()
relations = get_relations(worlds)
self.model = KripkeStructure(worlds, relations)
def get_real_world(self, code):
"""
Initialises the real world in the Kripke Model
from the secret code generated for the game.
"""
real_world_assignment = get_assignment(code)
for world in self.model.worlds:
if world.assignment == real_world_assignment:
self.real_world = world
return
def handle_move(self, move, feedback):
"""
Handles each move and its feedback as a public
announcement, and restricts the available model
accordingly.
"""
for i in range(0, LENGTH_OF_CODE):
if feedback[i] == 1:
assignment = get_proposition(i+1, move[i])
self.__handle_perfectly_correct_element(assignment)
if feedback[i] == 0:
assignment = get_proposition(i+1, move[i])
self.__handle_wrongly_positioned_element(
assignment)
if feedback[i] == -1:
self.__handle_incorrect_element(move[i])
return
def __handle_perfectly_correct_element(self, assignment):
"""
For correctly guessed color, remove the worlds
which have some other color at this position
and derive the reduced model.
"""
worlds = copy.deepcopy(self.model.worlds)
for w in worlds:
if not (assignment in w.assignment):
self.model.remove_node_by_name(w.name)
return
def __handle_wrongly_positioned_element(self, assignment):
"""
For wrongly positioned color, remove the worlds
which have this color at this position
and derive the reduced model.
"""
worlds = copy.deepcopy(self.model.worlds)
for w in worlds:
if assignment in w.assignment:
self.model.remove_node_by_name(w.name)
return
def __handle_incorrect_element(self, color_number):
"""
For incorrectly guessed color, remove the worlds
which have this color at any position
and derive the reduced model.
"""
worlds = copy.deepcopy(self.model.worlds)
for w in worlds:
for i in range(1, LENGTH_OF_CODE+1):
assignment = get_proposition(i, color_number)
if assignment in w.assignment:
self.model.remove_node_by_name(w.name)
return
def get_assignment(code):
"""
Get world assignment from the code
[1 2 3 4] -> {'1:yellow', '2:violet', '3:red', '4:green'}
"""
assignment = {}
for i in range(0, LENGTH_OF_CODE):
proposition = get_proposition(i+1, code[i])
assignment[proposition] = True
return assignment
| StarcoderdataPython |
3269341 | #!/usr/bin/env python
from setuptools import setup, find_packages
import sys
import sys, os
print(os.path.dirname(sys.executable), '\n')
setup(name='bornraytrace',
version='0.2',
description='Weak gravitational lensing: born raytrace maps, noise and intrinsic alignments',
author='<NAME>',
url='https://github.com/NiallJeffrey/BornRaytrace',
packages=find_packages(),
install_requires=[
"numpy",
"astropy",
"healpy",
"scipy",
])
| StarcoderdataPython |
3226523 | <reponame>zxteloiv/TrialBot<filename>trialbot/data/datasets/tsv_dataset.py
from trialbot.data.datasets.file_dataset import FileDataset
class TSVDataset(FileDataset):
def __init__(self, filename, sep="\t", has_title=True, lazy=True):
super().__init__(filename, lazy)
self._sep = sep
self._has_title = has_title
self._fields = None
if (not self.lazy) and len(self._data) > 0:
self._fields = self._data[0].split(self._sep)
def get_example(self, i):
example = super().get_example(i + 1)
example_tuple = example.split(self._sep)
if not self._has_title:
return example_tuple
if self._fields is None:
self._fields = self._data[0].split(self._sep)
example_dict = dict(zip(self._fields, example_tuple))
return example_dict
def __len__(self):
length = super().__len__()
if self._has_title:
length -= 1
return length
| StarcoderdataPython |
3263235 | <reponame>Arcensoth/mecha
from pathlib import Path
import pytest
from beet import Context, Function, run_beet
from pytest_insta import SnapshotFixture
from mecha import AstNode, CompilationDatabase, CompilationUnit, DiagnosticError, Mecha
from mecha.contrib.annotate_diagnostics import annotate_diagnostics
from mecha.contrib.bolt import Runtime
BOLT_EXAMPLES = [
Function(source)
for source in Path("tests/resources/bolt_examples.mcfunction")
.read_text()
.split("###\n")
]
@pytest.fixture(scope="session")
def ctx_bolt():
with run_beet({"require": ["mecha.contrib.bolt"]}) as ctx:
yield ctx
@pytest.mark.parametrize(
"source",
params := BOLT_EXAMPLES,
ids=range(len(params)),
)
def test_parse(snapshot: SnapshotFixture, ctx_bolt: Context, source: Function):
mc = ctx_bolt.inject(Mecha)
runtime = ctx_bolt.inject(Runtime)
ast = None
diagnostics = None
try:
ast = mc.parse(source)
except DiagnosticError as exc:
diagnostics = exc.diagnostics
if ast:
assert snapshot() == f"{source.text}---\n{ast.dump()}\n"
text, output, refs = runtime.codegen(ast)
text = text or "# Nothing\n"
assert snapshot() == f"{text}---\noutput = {output}\n---\n" + "".join(
f"_mecha_refs[{i}]\n{obj.dump(shallow=True) if isinstance(obj, AstNode) else repr(obj)}\n"
for i, obj in enumerate(refs)
)
elif diagnostics:
database = CompilationDatabase()
database[source] = CompilationUnit(source=source.text)
annotate_diagnostics(database, diagnostics)
assert snapshot() == source.text
| StarcoderdataPython |
237272 | class CrabNavy:
def __init__(self, positions):
self.positions = positions
@classmethod
def from_str(cls, positions_str):
positions = [int(c) for c in positions_str.split(",")]
return cls(positions)
def calculate_consumption(self, alignment):
total = 0
for position in self.positions:
total += abs(alignment - position)
return total
@property
def ideal_alignment(self):
min_consumption = None
min_idx = None
for idx in range(min(self.positions), max(self.positions)):
consumption = self.calculate_consumption(idx)
if min_consumption is None or consumption < min_consumption:
min_consumption = consumption
min_idx = idx
return min_idx
@property
def ideal_alignment_consumption(self):
return self.calculate_consumption(self.ideal_alignment)
def main():
with open("input", "r") as f:
lines_raw = f.read().splitlines()
sample_navy = CrabNavy([16, 1, 2, 0, 4, 2, 7, 1, 2, 14])
navy = CrabNavy.from_str(lines_raw[0])
print(sample_navy.ideal_alignment)
print(sample_navy.ideal_alignment_consumption)
print(navy.ideal_alignment_consumption)
if __name__ == "__main__":
main()
| StarcoderdataPython |
209329 | # http test
import sys
from urllib.request import Request, urlopen
from datetime import datetime
try:
url = 'http://www.naver.com'
request = Request(url)
resp = urlopen(request)
resp_body = resp.read().decode('utf-8') # usally data's type is byte
print(resp_body) # this program is browser. but, dont have image, script, etc... rendering function.
except Exception as e:
print('%s %s' % (e, datetime.now()), file = sys.stdout )
| StarcoderdataPython |
12802812 | from django.conf.urls import url
from django.contrib import admin
from .views import (
PostCreateAPIView,
PostDetailAPIView,
PostUpdateAPIView,
PostDeleteAPIView,
PostListAPIView
)
urlpatterns = [
url(r'^$', PostListAPIView.as_view(), name='list'),
url(r'^create/$', PostCreateAPIView.as_view(), name='create'),
url(r'^(?P<slug>[\w-]+)/$', PostDetailAPIView.as_view(), name='detail'),
url(r'^(?P<slug>[\w-]+)/edit/$', PostUpdateAPIView.as_view(), name='update'),
url(r'^(?P<slug>[\w-]+)/delete/$', PostDeleteAPIView.as_view(), name='delete'),
# url(r'^posts/$', "<appname>.views.<function_name>"),
]
| StarcoderdataPython |
5097302 |
from .PacketFieldType import PacketFieldType
class ComplexFieldType(PacketFieldType):
@classmethod
def _CreateInstance(cls):
"""
Default is ComplexFieldType(PacketFieldType)
"""
return cls(PacketFieldType)
def __init__(self, dataType, attributes=None):
super().__init__(attributes)
self._dataType = dataType
def dataType(self):
return self._dataType
def initializeData(self):
if not self._data:
self._data = self._dataType()
# TODO: Warnings, Errors, etc?
def _setTypedData(self, data):
if not isinstance(data, self._dataType):
raise ValueError("Invalid data {} for ComplexFieldType. Must be of type {}.".format(data, self._dataType))
super()._setTypedData(data)
def __call__(self, newAttributes=None):
cls = self.__class__
cloneAttributes = {}
cloneAttributes.update(self._attributes)
if newAttributes: cloneAttributes.update(newAttributes)
instance = cls(self._dataType, cloneAttributes)
return instance
def __repr__(self):
return "{}({})".format(self.__class__.__name__, self._dataType)
| StarcoderdataPython |
4958054 | <reponame>Aparna-Sakshi/sktime<filename>sktime/classification/compose/tests/test_column_ensemble.py<gh_stars>1-10
# -*- coding: utf-8 -*-
"""Column ensemble test code."""
__author__ = ["TonyBagnall"]
import numpy as np
from numpy import testing
from sktime.classification.compose import ColumnEnsembleClassifier
from sktime.classification.dictionary_based import TemporalDictionaryEnsemble
from sktime.classification.feature_based import FreshPRINCE
from sktime.classification.interval_based import DrCIF
from sktime.datasets import load_basic_motions, load_unit_test
def test_col_ens_on_basic_motions():
"""Test of ColumnEnsembleClassifier on basic motions data."""
# load basic motions data
X_train, y_train = load_basic_motions(split="train")
X_test, y_test = load_basic_motions(split="test")
indices = np.random.RandomState(4).choice(len(y_train), 10, replace=False)
fp = FreshPRINCE(
random_state=0,
default_fc_parameters="minimal",
n_estimators=10,
)
tde = TemporalDictionaryEnsemble(
n_parameter_samples=10,
max_ensemble_size=5,
randomly_selected_params=5,
random_state=0,
)
drcif = DrCIF(n_estimators=10, random_state=0, save_transformed_data=True)
estimators = [
("FreshPrince", fp, [0, 1, 2]),
("TDE", tde, [3, 4]),
("DrCIF", drcif, [5]),
]
# train column ensemble
col_ens = ColumnEnsembleClassifier(estimators=estimators)
col_ens.fit(X_train, y_train)
# preds = col_ens.predict(X_test.iloc[indices])
# assert preds[0] == 2
# assert probabilities are the same
probas = col_ens.predict_proba(X_test.iloc[indices])
testing.assert_array_almost_equal(probas, col_ens_basic_motions_probas, decimal=2)
def test_col_ens_on_unit_test_data():
"""Test of ColumnEnsembleClassifier on unit test data."""
# load unit test data
X_train, y_train = load_unit_test(split="train")
X_test, y_test = load_unit_test(split="test")
indices = np.random.RandomState(0).choice(len(y_train), 10, replace=False)
# train Column ensemble with a single
fp = FreshPRINCE(
random_state=0,
default_fc_parameters="minimal",
n_estimators=10,
)
estimators = [("FreshPrince", fp, [0])]
col_ens = ColumnEnsembleClassifier(estimators=estimators)
col_ens.fit(X_train, y_train)
# preds = col_ens.predict(X_test.iloc[indices])
# assert preds[0] == 2
# assert probabilities are the same
probas = col_ens.predict_proba(X_test.iloc[indices])
testing.assert_array_almost_equal(probas, col_ens_unit_test_probas, decimal=2)
col_ens_unit_test_probas = np.array(
[
[
0.2,
0.8,
],
[
1.0,
0.0,
],
[
0.1,
0.9,
],
[
1.0,
0.0,
],
[
0.9,
0.1,
],
[
1.0,
0.0,
],
[
0.9,
0.1,
],
[
0.8,
0.2,
],
[
0.9,
0.1,
],
[
1.0,
0.0,
],
]
)
col_ens_basic_motions_probas = np.array(
[
[0.00000, 0.07885, 0.03333, 0.88781],
[0.90585, 0.00000, 0.00000, 0.09415],
[0.00000, 0.09681, 0.82433, 0.07885],
[0.06667, 0.67615, 0.22385, 0.03333],
[0.00000, 0.00000, 0.06397, 0.93603],
[0.00000, 0.03063, 0.26667, 0.70270],
[0.72748, 0.03333, 0.06352, 0.17567],
[0.00000, 0.00000, 0.88781, 0.11219],
[0.00000, 0.76082, 0.16033, 0.07885],
[0.00000, 0.82429, 0.06352, 0.11219],
]
)
| StarcoderdataPython |
352354 | <filename>spatial_lda/primal_dual.py
import logging
import numpy as np
from scipy.special import gammaln, digamma, polygamma
import scipy.sparse
import scipy.sparse.linalg
# Line-search parameters
ALPHA = 0.1
BETA = 0.5
MAXLSITER = 50
# Primal-dual iteration parameters
MU = 1e-3
MAXITER = 500
TOL = 1e-2
def make_gamma(xi, chi):
xi_flat = np.reshape(xi, [np.prod(xi.shape), 1])
chi_flat = np.reshape(chi, [np.prod(chi.shape), 1])
return np.vstack((xi_flat, chi_flat))
def split_gamma(gamma, n, k, l):
xi = np.reshape(gamma[0:n * k], [n, k])
chi = np.reshape(gamma[n * k: n * k + l * k], [l, k])
return xi, chi
def make_A(D, k):
return scipy.sparse.kron(D, np.eye(k))
def make_C(D, k, l):
"""Given differencing matrix on samples D construct C."""
A = make_A(D, k)
I = scipy.sparse.eye(l * k)
As = scipy.sparse.vstack((A, -A))
Is = scipy.sparse.vstack((-I, -I))
return scipy.sparse.hstack((As, Is)).tocsr()
def f0(gamma, c, s):
"""Compute f0 for primal vars xi and chi, counts c, and edge weights s"""
# n documents, k topics, l ties between documents
n, k = np.shape(c)
l = np.shape(s)[0]
xi, chi = split_gamma(gamma, n, k, l)
objective = 1 / n * np.sum(gammaln(xi))
objective -= 1 / n * np.sum(gammaln(np.sum(xi, axis=1)))
objective -= 1 / n * np.sum(np.multiply(xi, c))
objective += np.sum(scipy.sparse.diags(s, 0).dot(chi))
return objective
def gradient_f0(gamma, c, s):
"""Compute gradient of objective given variable gamma, counts c, and edge weights s."""
n, k = np.shape(c)
l = np.shape(s)[0]
xi, chi = split_gamma(gamma, n, k, l)
gxi = 1 / n * \
(digamma(xi) - digamma(np.sum(xi, axis=1, keepdims=True)) - c)
gchi = scipy.sparse.diags(s, 0).dot(np.ones((l, k)))
gxi = np.reshape(gxi, (n * k, 1))
gchi = np.reshape(gchi, (l * k, 1))
return np.vstack((gxi, gchi))
def assemble_block_diag(mats):
row = []
col = []
data = []
offset = 0
nrows, ncols = mats[0].shape
row_idx, col_idx = np.meshgrid(range(nrows), range(ncols))
for a in mats:
row.append((row_idx + offset).flatten())
col.append((col_idx + offset).flatten())
data.append(a.flatten())
offset += nrows
data = np.hstack(data)
row = np.hstack(row)
col = np.hstack(col)
return scipy.sparse.coo_matrix((data, (row, col)))
def hessian_f0(gamma, n, k, l):
"""Compute Hessian of objective given xi and count of edges"""
xi, chi = split_gamma(gamma, n, k, l)
blocks = []
for i in range(n):
block = np.diag(polygamma(1, xi[i, :])) - \
polygamma(1, np.sum(xi[i, :]))
blocks.append(block)
# nabla2_xi = 1/n*scipy.sparse.block_diag(blocks)
nabla2_xi = 1 / n * assemble_block_diag(blocks)
zeros_nk_lk = scipy.sparse.coo_matrix((n * k, l * k))
zeros_lk_lk = scipy.sparse.coo_matrix((l * k, l * k))
H = scipy.sparse.vstack((scipy.sparse.hstack((nabla2_xi, zeros_nk_lk)),
scipy.sparse.hstack((zeros_nk_lk.T, zeros_lk_lk))))
return H
def r_dual(gamma, u, C, cs, s):
g = gradient_f0(gamma, cs, s)
r = np.squeeze(g) + np.squeeze((C.T.dot(u)))
return r
def r_cent(gamma, u, C, t):
f1 = C.dot(gamma)
return -np.squeeze(scipy.sparse.diags(u, 0).dot(f1)) - 1. / t
def compute_r(gamma, u, C, cs, s, t):
r1 = r_dual(gamma, u, C, cs, s)
r2 = r_cent(gamma, u, C, t)
r = -np.hstack((r1, r2))
return r
def build_linear_system(gamma, u, C, cs, s, t):
n, k = cs.shape
l = u.shape[0] // (2 * k)
H = hessian_f0(gamma, n, k, l)
uC = scipy.sparse.diags(np.squeeze(u), 0).dot(C)
Cg = scipy.sparse.diags(np.squeeze(C.dot(gamma)))
M = scipy.sparse.vstack((scipy.sparse.hstack((H, C.T)),
scipy.sparse.hstack((-uC, -Cg)))).tocsr()
r = compute_r(gamma, u, C, cs, s, t)
return M, r
def split_primal_dual_vars(z, n, k, l):
gamma = z[:n * k + l * k]
u = z[n * k + l * k:]
return np.squeeze(gamma), np.squeeze(u)
def gap(gamma, C, u):
return -np.sum(C.dot(np.squeeze(gamma)) * np.squeeze(u))
def line_search(gamma, u, C, cs, s, t, n, l, k):
M, r = build_linear_system(gamma, u, C, cs, s, t)
delta = scipy.sparse.linalg.spsolve(M, r)
dgamma, du = split_primal_dual_vars(delta, n, k, l)
step_max = 1.0
neg_du = du < 0
if np.any(neg_du):
step_max = np.min((step_max, np.min(u[neg_du] / (-du[neg_du]))))
neg_dgamma = dgamma < 0
if np.any(neg_dgamma):
step_max = np.min(
(step_max, np.min(gamma[neg_dgamma] / (-dgamma[neg_dgamma]))))
step = step_max * 0.99
r = compute_r(gamma, u, C, cs, s, t)
for lsit in range(MAXLSITER):
new_gamma = gamma + step * dgamma
new_u = u + step * du
new_r = compute_r(new_gamma, new_u, C, cs, s, t)
if (np.any(C.dot(new_gamma) > 0) or
np.linalg.norm(new_r) > (1 - ALPHA * step) * np.linalg.norm(r)):
step = step * BETA
else:
u = new_u
gamma = new_gamma
r = new_r
break
if lsit == MAXLSITER - 1:
logging.warn('Line search failed.')
return gamma, u, step
def primal_dual(cs, D, s, init_gamma=None,
init_u=None, verbose=False, tol=TOL):
l, n = D.shape
_, k = cs.shape
assert cs.shape[0] == D.shape[1]
# gamma = np.hstack((np.ones(n*k), np.ones(l*k)))
init = (cs / np.sum(cs, axis=1, keepdims=True)).ravel()
gamma = np.hstack((init, np.ones(l * k)))
u = 0.01 * np.ones(2 * l * k)
if init_gamma is not None:
gamma = init_gamma
if init_u is not None:
u = init_u
C = make_C(D, k, l)
t = 1.0
for it in range(MAXITER):
nu = gap(gamma, C, u)
t = np.max((2 * MU * l * k * k / nu, t * 1.2))
gamma, u, step = line_search(gamma, u, C, cs, s, t, n, l, k)
r = np.linalg.norm(r_dual(gamma, u, C, cs, s))
xis, chis = split_gamma(gamma, n, k, l)
if (np.linalg.norm(r) < tol and nu < tol):
break
if verbose:
logging.info('it: {0}, gap: {1}, t: {2}, step: {3}, res: {4}'.format(
it, nu, t, step, np.linalg.norm(r)))
return gamma, u
| StarcoderdataPython |
28892 | <gh_stars>0
import argparse
import logging
import sys
import json
import plotly.offline
import plotly.graph_objs as go
sys.path.append(sys.path[0] + "/..")
from utils.fileprovider import FileProvider
from preprocessing.reader import EvalitaDatasetReader
from nltk.tokenize import TweetTokenizer
logging.getLogger().setLevel(logging.INFO)
def plot_distribution(dictionary, title, x_axis_title, y_axis_title, min_frequency, dtick, color, output_path):
logging.info("Plotting {}".format(title))
min_frequency = min_frequency
X = []
Y = []
for element, element_count in sorted(dictionary.items(), key=lambda kv: kv[1], reverse=False):
if element_count > min_frequency:
X.append(element_count)
Y.append(element)
plotly.offline.plot({"data": [go.Bar(orientation="h",
x=X,
y=Y,
marker=dict(color=color))],
"layout": go.Layout(title="<b>{}</b>".format(title),
xaxis=dict(title="<b>{}</b>".format(x_axis_title),
titlefont=dict(color=color)),
yaxis=dict(title="<b>{}</b>".format(y_axis_title), dtick=dtick,
titlefont=dict(color=color)),
margin=go.layout.Margin(l=250, r=250)
)
},
filename=output_path,
auto_open=False)
if __name__ == '__main__':
"""##### Parameter parsing"""
parser = argparse.ArgumentParser(description='Data analysis for the ITAmoji task')
parser.add_argument('--workdir', required=True, help='Work path')
args = parser.parse_args()
files = FileProvider(args.workdir)
logging.info("Loading txt_2_emoji.json file")
with open("{}/{}".format("data_analysis", "txt_2_emoji.json"), 'r') as txt_2_emoji_file:
txt_2_emoji = json.load(txt_2_emoji_file)
logging.info("Loading idx_2_emoji.json file")
with open("{}/{}".format("data_analysis", "idx_2_emoji.json"), 'r') as idx_2_emoji_file:
idx_2_emoji = json.load(idx_2_emoji_file)
logging.info("Starting data analysis with parameters: {0}".format(vars(args)))
raw_train = EvalitaDatasetReader(files.evalita)
train_token_dict = dict()
train_hashtag_dict = dict()
train_mention_dict = dict()
train_url_dict = dict()
train_label_dict = dict()
tweet_tokenizer = TweetTokenizer()
logging.info("Computing counts for train set")
for train_tweet_text, train_tweet_label in zip(raw_train.X, raw_train.Y):
# tokens
for token in tweet_tokenizer.tokenize(train_tweet_text.lower()):
# general token
train_token_dict[token] = train_token_dict[token] + 1 if train_token_dict.get(token) else 1
if token.startswith("#"): # hashtag token
train_hashtag_dict[token] = train_hashtag_dict[token] + 1 if train_hashtag_dict.get(token) else 1
if token.startswith("@"): # mention token
train_mention_dict[token] = train_mention_dict[token] + 1 if train_mention_dict.get(token) else 1
if token.startswith("http"): # url token
train_url_dict[token] = train_url_dict[token] + 1 if train_url_dict.get(token) else 1
# labels
train_label_dict[train_tweet_label] = train_label_dict[train_tweet_label] + 1 if train_label_dict.get(train_tweet_label) else 1
with open("data_analysis/data_analysis.txt", 'w') as data_analysis_output:
total_number_of_tokens = sum([count for token, count in train_token_dict.items()])
total_number_of_unique_tokens = len(train_token_dict)
logging.info("Total number of tokens: {}".format(total_number_of_tokens))
data_analysis_output.write("Total number of tokens: {}\n".format(total_number_of_tokens))
logging.info("Total number of unique tokens: {}".format(total_number_of_unique_tokens))
data_analysis_output.write("Total number of unique tokens: {}\n".format(total_number_of_unique_tokens))
total_number_of_hashtags = sum([count for token, count in train_hashtag_dict.items()])
total_number_of_unique_hashtags = len(train_hashtag_dict)
logging.info("Total number of hashtags: {}".format(total_number_of_hashtags))
data_analysis_output.write("Total number of hashtags: {}\n".format(total_number_of_hashtags))
logging.info("Total number of unique hashtags: {}".format(total_number_of_unique_hashtags))
data_analysis_output.write("Total number of unique hashtags: {}\n".format(total_number_of_unique_hashtags))
total_number_of_mentions = sum([count for token, count in train_mention_dict.items()])
total_number_of_unique_mentions = len(train_mention_dict)
logging.info("Total number of mentions: {}".format(total_number_of_mentions))
data_analysis_output.write("Total number of mentions: {}\n".format(total_number_of_mentions))
logging.info("Total number of unique mentions: {}".format(total_number_of_unique_mentions))
data_analysis_output.write("Total number of unique mentions: {}\n".format(total_number_of_unique_mentions))
total_number_of_urls = sum([count for token, count in train_url_dict.items()])
total_number_of_unique_urls = len(train_url_dict)
logging.info("Total number of URLs: {}".format(total_number_of_urls))
data_analysis_output.write("Total number of URLs: {}\n".format(total_number_of_urls))
logging.info("Total number of unique URLs: {}".format(total_number_of_unique_urls))
data_analysis_output.write("Total number of unique URLs: {}\n".format(total_number_of_unique_urls))
total_number_of_labels = sum([count for token, count in train_label_dict.items()])
total_number_of_unique_labels = len(train_label_dict)
logging.info("Total number of labels: {}".format(total_number_of_labels))
data_analysis_output.write("Total number of labels: {}\n".format(total_number_of_labels))
logging.info("Total number of unique labels: {}".format(total_number_of_unique_labels))
data_analysis_output.write("Total number of unique labels: {}\n".format(total_number_of_unique_labels))
plot_distribution(train_token_dict, "token distribution", "frequency", "token", 2000, 2, "#<PASSWORD>", "data_analysis/token_distribution.html")
plot_distribution(train_hashtag_dict, "hashtag distribution", "frequency", "hashtag", 250, 2, "#3498db", "data_analysis/hashtag_distribution.html")
plot_distribution(train_mention_dict, "mention distribution", "frequency", "mention", 150, 2, "#3498db", "data_analysis/mention_distribution.html")
plot_distribution(train_url_dict, "URL distribution", "frequency", "URL", 5, 2, "#3498db", "data_analysis/url_distribution.html")
logging.info("Plotting label distribution")
min_frequency = 0
X_label = []
Y_label = []
for label, label_count in sorted(train_label_dict.items(), key=lambda kv: kv[1], reverse=True):
if label_count > min_frequency:
X_label.append(label_count)
Y_label.append(idx_2_emoji[str(label)])
plotly.offline.plot({"data": [go.Bar(orientation="h",
x=X_label,
y=Y_label,
marker=dict(color="#3498db"))],
"layout": go.Layout(title="<b>label distribution</b>",
xaxis=dict(title="<b>label</b>",
titlefont=dict(color="#3498db")),
yaxis=dict(title="<b>frequency</b>", dtick=1,
titlefont=dict(color="#3498db")),
margin=go.layout.Margin(l=250, r=250)
)
},
filename="data_analysis/label_distribution.html",
auto_open=False) | StarcoderdataPython |
317321 | <filename>awwward/migrations/0002_auto_20210403_1439.py<gh_stars>0
# Generated by Django 3.1.7 on 2021-04-03 14:39
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('awwward', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='rating',
name='creator',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='rater', to=settings.AUTH_USER_MODEL),
),
migrations.AlterField(
model_name='rating',
name='project',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='ratings', to='awwward.project'),
),
]
| StarcoderdataPython |
3421899 | <reponame>toaomatis/greenchoice-variable-tariffs
DOMAIN = "greenchoice_variable_tariffs"
CONF_POSTAL_CODE = 'postal_code'
CONF_USE_ELECTRICITY = 'use_electricity'
CONF_USE_LOW_TARIFF = 'use_low_tariff'
CONF_USE_GAS = 'use_gas'
DEFAULT_POSTAL_CODE = '9999ZZ'
DEFAULT_USE_ELECTRICITY = True
DEFAULT_USE_LOW_TARIFF = True
DEFAULT_USE_GAS = False
DEFAULT_NAME = 'Energiekosten'
DEFAULT_DATE_FORMAT = "%y-%m-%dT%H:%M:%S"
ATTR_NAME = 'name'
ATTR_UPDATE_CYCLE = 'update_cycle'
ATTR_ICON = 'icon'
ATTR_MEASUREMENT_DATE = 'date'
FRIENDLY_NAME_NORMAL_TARIFF = 'Greenchoice Electricity Normal Tariff'
FRIENDLY_NAME_LOW_TARIFF = 'Greenchoice Electricity Low Tariff'
FRIENDLY_NAME_GAS_TARIFF = 'Greenchoice Gas Tariff'
SENSOR_TYPE_NORMAL_TARIFF = 'Normal Tariff'
SENSOR_TYPE_LOW_TARIFF = 'Low Tariff'
SENSOR_TYPE_GAS_TARIFF = 'Gas Tariff'
SENSOR_MEASUREMENT_DATE = 'Measurement Date'
| StarcoderdataPython |
4860953 | from ..model_tests_utils import (
status_codes,
DELETE,
PUT,
POST,
GET,
ERROR,
random_model_dict,
check_status_code,
compare_data
)
from core.models import (
Organization
)
org_test_data = {
'org_test_0':{
'org0': {
"description": "Test",
"full_name": "Test",
"short_name": "Test",
"address1": "Test",
"address2": "Test",
"city": "Test",
"state_province": "TT",
"zip": "21345",
"country": "Test",
"website_url": "www.test.com",
"phone": "1231231",
"parent": None
},
'org1': {
"description": "Test12",
"full_name": "Test12",
"short_name": "Test12",
"address1": "Test",
"address2": "Test",
"city": "Test",
"state_province": "TT",
"zip": "21345",
"country": "Test",
"website_url": "www.test.com",
"phone": "1231231",
"parent": "org0__url"
},
'org0_update_0': {
"description": "test_update",
"full_name": "test_update",
"short_name": "test_update",
"address1": "test_update",
"address2": "test_update",
"city": "test_update",
"state_province": "TF",
"zip": "213453",
"country": "test_update",
"website_url": "www.test_update.com",
"phone": "12312313",
"parent": "org1__url"
},
}
}
org_tests = [
##----TEST 0----##
#creates an organization
#creates an organization that is the child of the previous org
#updates the first organization to be a child of the second (both are parent orgs)
#gets the first org
#deletes the first org
#gets the first org (should return error)
[
{
'name': 'org0',
'method': POST,
'endpoint': 'organization-list',
'body': random_model_dict(Organization),
'args': [],
'query_params': [],
'is_valid_response': {
'function': check_status_code,
'args': [],
'kwargs': {
'status_code': POST
}
}
},
{
'name': 'org1',
'method': POST,
'endpoint': 'organization-list',
'body': (request_body := random_model_dict(Organization, parent='org0__url')),
'args': [],
'query_params': [],
'is_valid_response': {
'function': compare_data,
'args': [],
'kwargs': {
'status_code': POST,
'request_body': request_body
}
}
},
{
'name': 'org0_update_0',
'method': PUT,
'endpoint': 'organization-detail',
'body': (request_body := random_model_dict(Organization, parent='org1__url')),
'args': [
'org0__uuid'
],
'query_params': [],
'is_valid_response': {
'function': compare_data,
'args': [],
'kwargs': {
'status_code': PUT,
'request_body': request_body
}
}
},
{
'name': 'org0_get_0',
'method': GET,
'endpoint': 'organization-detail',
'body': {},
'args': [
'org0__uuid'
],
'query_params': [],
'is_valid_response': {
'function': check_status_code,
'args': [],
'kwargs': {
'status_code': GET
}
}
},
{
'name': 'org0_delete_0',
'method': DELETE,
'endpoint': 'organization-detail',
'body': {},
'args': [
'org0__uuid'
],
'query_params': [],
'is_valid_response': {
'function': check_status_code,
'args': [],
'kwargs': {
'status_code': DELETE
}
}
},
{
'name': 'org0_get_1',
'method': GET,
'endpoint': 'organization-detail',
'body': {},
'args': [
'org0__uuid'
],
'query_params': [],
'is_valid_response': {
'function': check_status_code,
'args': [],
'kwargs': {
'status_code': ERROR
}
}
},
],
] | StarcoderdataPython |
108527 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
def import_data(file_name, time_column_index=None, mode='csv', header=True, room_name=None, tz=0):
"""
Load raw data from the disk.
:type file_name: str
:param file_name: the name of the raw data file
:type time_column_index: int
:param time_column_index: the column index for the timestamp in given raw data file
:type mode: str
:param mode: the format for raw data. Currently only support ``csv``
:type header: bool
:param header: indicate whether the raw data contains a header on the first row. If ``False``, then assign unique
index for each column
:type room_name: str or None
:param room_name: the name of the room. If ``None``, then assign unique number for the room
:type tz: int
:param tz: the time zone offset that need to fix in the raw data file
:rtype: core.data.dataset.Dataset
:return: The structured data set with one raw input data
"""
from csv import reader
from dateutil.parser import parse
from numpy import nan, asarray
from .dataset import Dataset
if mode == 'csv':
with open(file_name, 'r') as input_file:
csv_reader = reader(input_file, delimiter=',')
feature_name = []
data = []
if header:
feature_name = next(csv_reader)[:-1]
for line in csv_reader:
if not len(line):
continue
for i in range(len(line)):
if i == time_column_index:
line[i] = parse(line[i]).timestamp() + tz * 60 * 60
elif not len(line[i]):
line[i] = nan
else:
try:
line[i] = float(line[i])
except ValueError:
line[i] = nan
data.append(line)
data = asarray(data, dtype=float)
if not len(feature_name):
feature_name = list(range(data.shape[1]))
dataset = Dataset()
dataset.add_room(data[:, :-1], occupancy=data[:, -1], header=False, room_name=room_name)
dataset.set_feature_name(feature_name)
dataset.time_column_index = time_column_index
return dataset
| StarcoderdataPython |
3351826 | <reponame>mahmoud/clastic<filename>clastic/tests/test_meta.py
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from clastic import Application, render_basic, MetaApplication
from clastic.middleware.cookie import SignedCookieMiddleware
from clastic.tests.common import cookie_hello_world
def test_meta_basic():
app = Application([('/meta', MetaApplication()),
('/<name?>', cookie_hello_world, render_basic)],
middlewares=[SignedCookieMiddleware()])
cl = app.get_local_client()
assert cl.get('/').status_code == 200
assert cl.get('/meta/').status_code == 200
| StarcoderdataPython |
9754205 | import os
import uuid
from .. import BASE_URL, test_csv_file, test_tif_file
from ..user import test_token
test_upload_name = "pytest_upload_csv"
test_export_cm_layer_uuid = "3ef057c3-b65a-448f-9718-d46c33b9ec3b"
| StarcoderdataPython |
277471 | <gh_stars>0
import random
from datetime import datetime, time, timedelta
from faker import Faker
from ..tournament import models as TModels
from ..users.forms import UserCreationForm
from ..users.models import User
# setjum testsorp í database-ið
def generate_initial_data():
print("Generating initial data...")
fake = Faker()
# búum til 100 usera
for i in range(100):
create_test_user()
# búum til yfirflokka
sports = TModels.SuperCategory.objects.create(name="Sports")
gaming = TModels.SuperCategory.objects.create(name="Gaming")
# búum til undirflokka
foosball = TModels.Category.objects.create(name="Foosball", super_category=sports)
table_tennis = TModels.Category.objects.create(
name="Table Tennis", super_category=sports
)
competetive_eating = TModels.Category.objects.create(
name="Competetive Eating", super_category=sports
)
cs_go = TModels.Category.objects.create(name="CS:GO", super_category=gaming)
dota = TModels.Category.objects.create(name="DotA", super_category=gaming)
lol = TModels.Category.objects.create(name="LoL", super_category=gaming)
flokkar = [foosball, table_tennis, competetive_eating, cs_go, dota, lol]
# mót
users = list(User.objects.all())
# superuser = User.objects.get(is_superuser=True)
slots = [8, 16, 32, 64, 128]
dates = [datetime.now(), datetime.now() + timedelta(weeks=1)]
times = [time(hour=17, minute=0), time(hour=19, minute=30), time(hour=20, minute=0)]
locations = ["Nörd", "Hallgrímskirkja"]
for f in flokkar:
# 10 mót fyrir hvern flokk
for i in range(10):
creator = random.choice(users)
slots_curr = random.choice(slots)
t = TModels.Tournament.objects.create(
creator=creator,
name=" ".join(fake.words()) + " tournament",
category=f,
slots=slots_curr,
date=random.choice(dates),
time=random.choice(times),
location=random.choice(locations),
)
users_max = slots_curr if len(users) >= slots_curr else len(users)
random_users = random.sample(users, random.randint(0, users_max))
t.registered_users.set(random_users)
print("Generating initial data done")
def create_test_user():
fake = Faker()
test_pw = "<PASSWORD>"
form = UserCreationForm(
{
"username": fake.user_name(),
"password1": <PASSWORD>,
"password2": <PASSWORD>,
"email": fake.email(),
"name": fake.name(),
}
)
if form.is_valid():
form.save()
return "User created"
return form.errors
def drop_all_data():
pass
| StarcoderdataPython |
6547804 | """
entradas
valora-->A-->float
valorb-->B-->float
valorc-->C-->float
salidas
valorx1-->x1-->float
valorx2-->x2-->float
"""
#entradas
A=float(input("Ingrese el valor de a "))
B=float(input("Ingrese el valor de b "))
C=float(input("Ingrese el valor de c "))
#cajanegra
D=B**2-4*A*C
if D==0:
x1=-B/(2*A)
print("X1 = X2 = ",x1) #salidas
elif D>0:
x1=(-B+(B**2-4*A*C)**1/2)/(2*A)
x2=(-B-(B**2-4*A*C)**1/2)/(2*A)
print("X1= ",x1) #salidas
print("X2= ",x2) #salidas
elif D<0:
print("No tiene solucion en los reales") #salidas | StarcoderdataPython |
6484997 | <reponame>Rockysed/PSC_classification
# -*- coding: utf-8 -*-
"""
Created on Fri Mar 1 14:08:03 2019
@author: rocco
"""
import h5py
from sklearn.ensemble import RandomForestClassifier as RFC
from sklearn.decomposition import PCA
from sklearn import svm
from sklearn.model_selection import train_test_split
from sklearn.model_selection import GridSearchCV
import os
import pandas as pd
import numpy as np
from sklearn.preprocessing import normalize
from sklearn.preprocessing import MinMaxScaler
import pickle
"""
#load csdb dataset
new_file = h5py.File("../data/csdb_new/csdb_complete.h5", "r")
btd_csdb = new_file["btd_complete"][:]
labels = new_file["labels"][:]
new_file.close()
#load htang
new_file = h5py.File("../data/csdb_new/csdb_complete.h5", "r")
htang = new_file["htang"][:]
new_file.close()
#option: merge all STS mixes in one label
#labels[((labels == 4) | (labels == 5))] = 3
#instantiate RF classifier
forest = RFC(n_jobs=-1, n_estimators=100)
#fit data
forest.fit(btd_csdb, labels.ravel())
#retrieve feature importance
importances = forest.feature_importances_
f_n = normalize(importances.reshape(1,-1), norm='max')
#indices_rf = importances.argsort()[-50:]
#extract selected btds
#btd_selected_rf = btd_complete[:, indices_rf]
#select features with importance above 10% of the maximum
indices = np.where(f_n>0.1)[1]
#btd_sel = btd_csdb_scaled[:, indices]
btd_sel = btd_csdb[:, indices]
#clear not used var
del btd_csdb
#fit PCA
pca = PCA(n_components=10)
pc = pca.fit_transform(btd_sel)
#clear not used var
del btd_sel
scaler_1 = MinMaxScaler()
pc = scaler_1.fit_transform(pc)
#save principal components
f = open('pc_csdb.pckl', 'wb')
pickle.dump(pc, f)
f.close()
"""
#grid search
parameters = [
#{'C': [1, 10, 100, 1000], 'kernel': ['linear']},
{'C': [1, 10, 100, 1000, 10000], 'gamma': [0.0001, 0.001, 0.01, 0.1, 1, 10, "auto", "scale"], 'kernel': ['rbf']},
#{'C': [1, 10, 100, 1000], 'gamma': [0.0001, 0.001, 0.01, 0.1, 1], 'kernel': ['poly']}
]
svc = svm.SVC()
clf = GridSearchCV(svc, parameters, cv=5)
clf.fit(pc, labels.ravel())
"""
f = open('grid_svm_rbf.pckl', 'wb')
pickle.dump(clf, f)
f.close()
""" | StarcoderdataPython |
3311732 | <reponame>SusannaMichael/OCN506A<gh_stars>0
"""
Code to try to plot data better
"""
#imports
import numpy as np
import sys, os
import matplotlib.pyplot as plt
import pickle
import argparse
import matplotlib.pyplot as plt
#local imports
sys.path.append(os.path.abspath('../shared'))
import my_module as mymod
from importlib import reload
reload(mymod)
# make sure the output directory exists
this_dir = os.path.abspath('.').split('/')[-1]
this_parent = os.path.abspath('.').split('/')[-2]
out_dir = '../' + this_parent + '_output/'
print('Creating ' + out_dir + ', if needed')
mymod.make_dir(out_dir)
myplace = 'OCN506A' # Change this to fit your own computer
# input directory
in_dir = '../' + myplace + '_data/'
# define the input filename
in_fn = in_dir + 'goa_surface_simple.txt'
# create empty lists to put our data in
dist = []
sal = []
DAl = []
TDAl = []
DMn = []
TDMn = []
count = 0
month = []
april_dist = []
april_sal = []
april_DAl = []
april_TDAl = []
april_DMn = []
april_TDMn = []
may_dist = []
may_sal = []
may_DAl = []
may_TDAl = []
may_DMn = []
may_TDMn = []
july_dist = []
july_sal = []
july_DAl = []
july_TDAl = []
july_DMn = []
july_TDMn = []
with open(in_fn, 'r', errors='ignore') as f:
for line in f:
if count > 0:
count = count + 1
lls = line.split()
month.append(str(lls[0]))
dist.append(float(lls[2]))
sal.append(float(lls[1]))
DAl.append(float(lls[3]))
TDAl.append(float(lls[4]))
DMn.append(float(lls[5]))
TDMn.append(float(lls[6]))
else:
count = count + 1
#print(month)
#print(dist)
#print(sal)
#print(DAl)
#print(TDAl)
#print(DMn)
#print(TDMn)
for i in range(len(month)):
if month[i]=='April':
april_dist.append(dist[i])
april_sal.append(sal[i])
april_DAl.append(DAl[i])
april_TDAl.append(TDAl[i])
april_DMn.append(DMn[i])
april_TDMn.append(TDMn[i])
elif month[i]=='May':
may_dist.append(dist[i])
may_sal.append(sal[i])
may_DAl.append(DAl[i])
may_TDAl.append(TDAl[i])
may_DMn.append(DMn[i])
may_TDMn.append(TDMn[i])
elif month[i]=='July':
july_dist.append(dist[i])
july_sal.append(sal[i])
july_DAl.append(DAl[i])
july_TDAl.append(TDAl[i])
july_DMn.append(DMn[i])
july_TDMn.append(TDMn[i])
i = i+1
#print(april_sal)
#print(may_sal)
#print(july_sal)
# PLOTTING
plt.close('all') # always start by cleaning up
month_list = ['April','May','July']
# make a plot with distance from shore on the x axis, and 2 y-axes: analyte and salinity. Want 4 fof these arranged in a grid
fs = 16
fig = plt.figure(figsize=(12,8))
ax = fig.add_subplot(111)
# make a dict that associates letters with lines - tried to do this for a legend, but don't feel like it worked.
abc_list = list('abc') # a quick way to make the list ['a', 'b', 'c', 'd']
month_list = ['April','May','July']
month_dict = dict(zip(abc_list, month_list))
ax.plot(april_dist, april_TDAl,'#014F94', marker = 'o', linewidth=0, markersize = '8', label = 'April')
ax.plot(may_dist,may_TDAl,'#00877B' , marker = 'o', linewidth=0, markersize ='8', label = 'May')
ax.plot(july_dist,july_TDAl,'#CB7941', marker = 'o', linewidth=0, markersize='8', label = 'July')
ax.set_ylabel('TDAl(nM)', color='black', size=fs)
ax.set_xlabel('distance from shore(m)')
ax.set_yscale('log')
ax2 = ax.twinx()
ax2.plot(april_dist,april_sal,'#014F94', marker = '+', linewidth=1, label = 'salinity')
ax2.plot(may_dist, may_sal, '#00877B',marker = '+', linewidth=1)
ax2.plot(july_dist, july_sal,'#CB7841',marker = '+', linewidth=1)
ax2.set_ylabel('Salinity', color='black', size=fs)
ax.set_title('TDAl', fontsize=24)
ax.legend(fontsize=20, ncol=1, loc = 7)
ax2.legend(fontsize=20, ncol=1, loc = 8)
plt.show()
fig.savefig(out_dir + 'GoA_TDAl.png') | StarcoderdataPython |
12826569 | <filename>formatter_for_output.py<gh_stars>1-10
"""
Support classes for coloring the console output
"""
import logging
import sys
from config import FrameworkConfiguration
def format_console_output():
"""
Format console with a common format and if selected with a colored output
"""
logging.basicConfig(stream=sys.stdout, level=logging.DEBUG, format='[%(levelname)s]\t(%(threadName)s) %(message)s', )
logging.basicConfig(stream=sys.stdout, level=logging.ERROR, format='[%(levelname)s]\t(%(threadName)s) %(message)s', )
logging.basicConfig(stream=sys.stdout, level=logging.INFO, format='[%(levelname)s]\t(%(threadName)s) %(message)s', )
logging.basicConfig(stream=sys.stdout, level=logging.WARNING, format='[%(levelname)s]\t(%(threadName)s) %(message)s', )
# Set colored output for console
if FrameworkConfiguration.use_colored_output and FrameworkConfiguration.DEBUG is False:
LOG = logging.getLogger()
LOG.setLevel(logging.DEBUG)
for handler in LOG.handlers:
LOG.removeHandler(handler)
LOG.addHandler(ColorHandler())
class _AnsiColorizer(object):
"""
A colorizer is an object that loosely wraps around a stream, allowing
callers to write text to the stream in a particular color.
Colorizer classes must implement C{supported()} and C{write(text, color)}.
"""
_colors = dict(black=30, red=31, green=32, yellow=33,
blue=34, magenta=35, cyan=36, white=37)
def __init__(self, stream):
self.stream = stream
@classmethod
def supported(cls, stream=sys.stdout):
"""
A class method that returns True if the current platform supports
coloring terminal output using this method. Returns False otherwise.
"""
if not stream.isatty():
return False # auto color only on TTYs
try:
import curses
except ImportError:
return False
else:
try:
try:
return curses.tigetnum("colors") > 2
except curses.error:
curses.setupterm()
return curses.tigetnum("colors") > 2
except:
raise
# guess false in case of error
return False
def write(self, text, color):
"""
Write the given text to the stream in the given color.
@param text: Text to be written to the stream.
@param color: A string label for a color. e.g. 'red', 'white'.
"""
color = self._colors[color]
self.stream.write('\x1b[%s;1m%s\x1b[0m' % (color, text))
class ColorHandler(logging.StreamHandler):
def __init__(self, stream=sys.stderr):
super(ColorHandler, self).__init__(_AnsiColorizer(stream))
def emit(self, record):
msg_colors = {
logging.DEBUG: "green",
logging.INFO: "blue",
logging.WARNING: "yellow",
logging.ERROR: "red"
}
color = msg_colors.get(record.levelno, "green")
self.stream.write(record.msg + "\n", color)
| StarcoderdataPython |
1873485 | # Copyright (c) 2013, ac and contributors
# For license information, please see license.txt
import datetime
import frappe
from accounting.accounting.utils import get_fiscal_date_range
from frappe import _
def execute(filters=None):
filter_type = filters.get('filter_type')
if filter_type == 'Date Range':
from_date = filters.get('from_date')
to_date = filters.get('to_date')
elif filter_type == 'Fiscal Year':
try:
fiscal_year = filters.get('fiscal_year')
fiscal_year = frappe.get_all('Fiscal Year', fields=['*'], filters={'year_name': fiscal_year})
from_date = fiscal_year[0].start_date
to_date = fiscal_year[0].end_date
except Exception as e:
print(e)
now = datetime.datetime.now()
from_date, to_date = get_fiscal_date_range(now)
date_range = [from_date, to_date]
columns = [
{
'fieldname': 'account',
'label': _('Account'),
'fieldtype': 'Link',
'options': 'Account',
'width': '200px',
},
{
'fieldname': 'balance',
'label': _('Balance'),
'fieldtype': 'Currency',
'width': '100px',
}
]
accounts = []
accounts.extend(get_accounts(account_type='Asset'))
accounts.extend(get_accounts(account_type='Liability'))
for account in accounts:
account['account'] = account['name']
accounts = indent_accounts(accounts)
accounts = get_balances(accounts, date_range)
accounts = propagate_balances(accounts)
accounts.append(get_profit_and_loss(accounts))
data = accounts
return columns, data
def get_profit_and_loss(accounts):
asset = frappe.get_list('Account', filters={'root_type': 'Asset'}, order_by='lft')[0]
liability = frappe.get_list('Account', filters={'root_type': 'Liability'}, order_by='lft')[0]
asset_balance = asset.get('balance') or 0.0
liability_balance = liability.get('balance') or 0.0
balance = asset_balance - liability_balance
pnl = {
'account': 'Provisional Profit/Loss',
'balance': balance,
}
return pnl
def get_children(accounts, account):
children = []
for _account in accounts:
if _account['parent_account'] == account['name']:
children.append(_account)
return children
def propagate_balances(accounts):
for account in accounts:
if account['balance'] is None:
account['balance'] = get_cumulative_balance(accounts, account)
return accounts
def get_accounts(account_type=None):
filters = {}
if account_type:
filters = {'root_type': ['=', account_type]}
fields = ['name', 'parent_account', 'lft', 'root_type', 'root_type', 'is_group']
accounts = frappe.get_all('Account', fields=fields, filters=filters, order_by='lft')
return accounts
def get_account_balance(account, date_range):
query = f'''
SELECT
SUM(debit) - SUM(credit)
FROM
`tabGL Entry`
WHERE
account="{account}" and is_cancelled=0 and
posting_date >= "{date_range[0]}" and posting_date <= "{date_range[1]}"
'''
result = frappe.db.sql(query)
return result[0][0]
def get_cumulative_balance(accounts, account):
children = get_children(accounts, account)
if not children:
return account['balance']
for child in children:
if child['balance'] is None:
child['balance'] = get_cumulative_balance(accounts, child)
balance = 0
for child in children:
if child['balance']:
balance += child['balance']
return balance
def get_balances(accounts, date_range):
for account in accounts:
balance = get_account_balance(account['name'], date_range)
if balance and balance < 0:
balance = abs(balance)
account['balance'] = balance
return accounts
def indent_accounts(accounts):
for account in accounts:
depth = 0
current_account = account
parent = current_account['parent_account']
while parent is not None:
depth += 1
current_account = frappe.get_list(
'Account', fields=['name', 'parent_account'],
filters={'name': parent}
)
parent = current_account[0]['parent_account']
account.indent = depth
return accounts
| StarcoderdataPython |
1605570 | #!/usr/bin/python
# Copyright 2020 Makani Technologies LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Write a sinusoidal waveform to a text file."""
import sys
import gflags
import numpy as np
gflags.DEFINE_float('sample_rate', 100.0, 'Sample rate [Hz]')
gflags.DEFINE_float('frequency', None, 'Frequency [Hz]')
gflags.DEFINE_float('mean', 0.0, 'Mean value of time series')
gflags.DEFINE_float('amplitude', 1.0, 'Amplitude of sine wave.')
gflags.DEFINE_float('duration', 0.0, 'Time series duration [s]. '
'By default one period of the waveform will be generated.')
gflags.DEFINE_integer('columns', 8, 'Number of time series data columns')
gflags.DEFINE_string('output_file', '', 'Output filename.')
gflags.MarkFlagAsRequired('frequency')
FLAGS = gflags.FLAGS
def main(argv):
try:
argv = FLAGS(argv)
except gflags.FlagsError, e:
print '\nError: %s\n\nUsage: %s\n%s' % (e, sys.argv[0], FLAGS)
sys.exit(1)
duration = FLAGS.duration
if duration == 0.0:
duration = 1.0 / FLAGS.frequency
t = np.arange(0.0, duration, 1.0 / FLAGS.sample_rate)
y = FLAGS.amplitude * np.sin(2.0 * np.pi * FLAGS.frequency * t)
filename = FLAGS.output_file
if not filename:
filename = 'sine_%0.2f_Hz.txt' % FLAGS.frequency
print "Writing to '%s'..." % filename
np.savetxt(filename, np.vstack((t, y)).T, fmt='%0.02f %e', delimiter='\t')
if __name__ == '__main__':
main(sys.argv)
| StarcoderdataPython |
291732 | import os
import sys
import time
import subprocess
from rich import print
env = os.environ.copy()
def main():
subprocess.run(["sh", "wget_cmems.sh"], env=env)
if __name__ == "__main__":
start = time.time()
try:
main()
except KeyboardInterrupt:
elapsed = round(time.time() - start, 1)
AVG_BYTES = 58086740
NSOURCES = 8901
SOURCE_GBS = round((AVG_BYTES*NSOURCES)/1e9, 2)
TARGET = env["CMEMS_DIRECTORY"]
years = sorted(os.listdir(TARGET))
months = sorted(os.listdir(TARGET + f"/{years[-1]}"))
files = os.listdir(TARGET + f"/{years[-1]}/{months[-1]}")
files = sorted([f[30:32] for f in files])
final_day = f"{years[-1]}-{months[-1]}-{files[-1]}"
nfiles = sum([len(files) for _, _, files in os.walk(TARGET)])
nbytes = round((AVG_BYTES*nfiles)/1e9, 2)
remaining = round(((elapsed/60)/nfiles) * (NSOURCES-nfiles), ndigits=2)
r, m, c = "[red]", "[bold magenta]", "[/]"
print(
"\n"
f"\n {r}Download interrupted after {c}{m}{round(elapsed/60, 2)} minutes{c}"
f"{r} ({round(elapsed, 1)} seconds).{c}"
f"\n {r}Of target range (1993-01-01 to 2017-05-15),{c} {m}{final_day}{c}"
f"{r} was last day reached.{c}"
f"\n {r}Downloaded {c}{m}{nfiles} {c}{r}of {c}{m}{NSOURCES}{c}{r} files{c}"
f" {r}(~{nbytes} GBs of ~{SOURCE_GBS} GBs)."
f"\n {r}Full download would take an additional{c}"
f"{m} ~{round(remaining/60, 2)} hours{c}{r} (~{remaining} minutes).{c}"
"\n"
)
sys.exit(0)
| StarcoderdataPython |
1728629 | num = int(input("Enter the number :"))
add=0
for i in range(1,num+1):
add=add+(i**3)
print("sum =",add)
| StarcoderdataPython |
3346420 | # Standard Library packages
import os
import platform
import sys
from time import time
# 3rd party packages
from PyQt5.QtCore import (pyqtSignal, pyqtSlot, QEasingCurve, QPoint, QPropertyAnimation, QSettings, QSize, Qt, QTimer)
from PyQt5.QtGui import (QIcon, QPixmap, QColor, QPainter, QBrush)
from PyQt5.QtWidgets import (QApplication, QDesktopWidget, QDialog, QFileDialog, QFormLayout, QGroupBox, QHBoxLayout,
QLabel, QLineEdit, QMessageBox, QPushButton, QRadioButton, QVBoxLayout, QWidget,
QGridLayout)
import send2trash
# Local Imports
from custom_widgets import Label
from spotlight import Spotlight
import style
from UI_main_window import MainWindow, app_name
from _version import __version__
print('System: ', platform.system(), platform.release())
# PyInstaller function to help create exe file - (NB: NOT IN USE AS I NOW USE CX_FREEZE)
# def resource_path(relative_path):
# """ Get absolute path to resource, works for dev and for PyInstaller """
# try:
# # PyInstaller creates a temp folder and stores path in _MEIPASS
# base_path = sys._MEIPASS
# except Exception:
# base_path = os.path.abspath(".")
#
# return os.path.join(base_path, relative_path)
#
# resource_path('UI_main_window.py')
# resource_path('custom_widgets.py')
# resource_path('spotlight.py')
# resource_path('style.py')
# resource_path('icons_rc.py')
# GLOBAL VARIABLES ---------------------------------------------
settings_registry_key_name = 'CHR-onicles'
settings_registry_subkey_name = app_name + ' v' + __version__
class RenameDialogBox(QDialog):
"""
class for Renaming Dialog Box.
"""
signal_new_name = pyqtSignal(str, str)
def __init__(self):
super().__init__()
self.setWindowTitle('Rename')
self.setObjectName('RenameDialogBox')
self.setWindowIcon(QIcon(':/icons/cat'))
self.setStyleSheet(style.rename_dialog_style())
self.setModal(True) # deactivates other windows till this window is interacted with
self.DIALOG_WIDTH, self.DIALOG_HEIGHT = 400, 220
self.resize(self.DIALOG_WIDTH, self.DIALOG_HEIGHT)
# Positioning at center of screen
self.D_WIDTH, self.D_HEIGHT = main_.DESKTOP_WIDTH, main_.DESKTOP_HEIGHT
self.xpos = int((self.D_WIDTH / 2) - (self.DIALOG_WIDTH / 2))
self.ypos = int((self.D_HEIGHT / 2) - (self.DIALOG_HEIGHT / 2))
# self.setGeometry(int(self.xpos), int(self.ypos), self.DIALOG_WIDTH, self.DIALOG_HEIGHT)
self.setFixedSize(self.size())
# RENAME DIALOG SETTINGS ---------------------------------------------------------------
# self.default_prefix = ''
self.settings = QSettings(settings_registry_key_name, settings_registry_subkey_name)
self.default_prefix = self.settings.value('default prefix')
try:
self.move(self.settings.value('rename dialog position', QPoint(self.xpos, self.ypos), type=QPoint))
except:
pass
self.ui()
def closeEvent(self, event):
self.settings.setValue('rename dialog position', self.pos())
def ui(self):
self.widgets()
self.layouts()
def widgets(self):
# BUTTONS ----------------------------------------------------------------------------------------
self.btn_submit = QPushButton('Submit')
self.btn_submit.clicked.connect(self.submit_new_name)
self.btn_submit.setObjectName('btn_submit')
self.btn_cancel = QPushButton('Cancel')
self.btn_cancel.clicked.connect(self.close)
# LABELS -----------------------------------------------------------------------------------------
self.lbl_rename = QLabel('')
self.lbl_rename.setObjectName('lbl_rename')
# ENTRIES ---------------------------------------------------------------------------------------
self.entry_prefix = QLineEdit(self.default_prefix)
self.entry_prefix.setToolTip('<b>Default</b> prefix for all photos')
self.entry_prefix.setReadOnly(True)
self.entry_prefix.setFocusPolicy(Qt.NoFocus)
self.entry_new_name = QLineEdit()
self.entry_new_name.setToolTip('<b>Short</b> description about photo')
self.entry_new_name.setFocus()
# SIGNALS --------------------------------------------------------------------------------------
main_.signal_photo_name.connect(self.get_photo_name)
def layouts(self):
# DEFINING LAYOUTS ------------------------------------------------------------------------------
self.main_layout = QVBoxLayout()
self.top_layout = QHBoxLayout()
self.bottom_layout = QFormLayout()
self.button_layout = QHBoxLayout()
self.name_group_box = QGroupBox('Name options')
self.name_group_box.setLayout(self.bottom_layout)
# Adding Widgets to Top Layout -----------------------------------------------------------------
self.top_layout.addWidget(self.lbl_rename)
self.top_layout.setContentsMargins(0, 0, 0, 15)
# Adding Buttons to Button Layout --------------------------------------------------------------
self.button_layout.addWidget(self.btn_submit)
self.button_layout.addWidget(self.btn_cancel)
self.button_layout.setContentsMargins(110, 0, 0, 0)
# Adding Widgets to Bottom Layout ---------------------------------------------------------------
self.bottom_layout.setContentsMargins(10, 10, 0, 0)
self.bottom_layout.addRow(QLabel('Prefix:'), self.entry_prefix)
self.bottom_layout.addRow(QLabel('New Name:'), self.entry_new_name)
# Adding Layouts and Widgets to Main Layout ----------------------------------------------------
self.main_layout.addLayout(self.top_layout, 20)
self.main_layout.addWidget(self.name_group_box, 70)
self.main_layout.addLayout(self.button_layout, 10)
self.setLayout(self.main_layout)
@pyqtSlot(str)
def get_photo_name(self, pic):
# print('pic', pic)
self.photoname = pic
if len(self.photoname) > 21:
new_photoname = self.photoname[0:4] + '...' + self.photoname[-15:]
self.lbl_rename.setText(f'Renaming photo \'<i>{new_photoname}</i>\' to: ')
else:
self.lbl_rename.setText(f'Renaming photo \'<i>{self.photoname}</i>\' to: ')
def submit_new_name(self):
prefix = self.entry_prefix.text()
name = self.entry_new_name.text()
if name in ['', ' ']:
QMessageBox.critical(self, 'Rename Failed', 'Name provided is invalid!')
elif (prefix + name + '.png') in main_.images:
QMessageBox.critical(self, 'Rename Failed', 'Name provided already exists!')
else:
self.signal_new_name.emit(prefix, name)
QMessageBox.information(self, 'Rename success', 'Image renamed successfully.')
self.close()
class SettingsDialog(QDialog):
"""
Class for Settings Dialog Box.
"""
def __init__(self):
super().__init__()
self.setWindowTitle('Settings')
self.setObjectName('SettingsDialogBox')
self.setWindowIcon(QIcon(':/icons/cat'))
self.setStyleSheet(style.settings_dialog_style())
self.setModal(True) # deactivates other windows till this window is interacted with
self.DIALOG_WIDTH, self.DIALOG_HEIGHT = 450, 300
self.resize(self.DIALOG_WIDTH, self.DIALOG_HEIGHT)
self.setMaximumSize(self.DIALOG_WIDTH, self.DIALOG_HEIGHT)
# Positioning at center of screen
self.D_WIDTH, self.D_HEIGHT = main_.DESKTOP_WIDTH, main_.DESKTOP_HEIGHT
self.xpos = int((self.D_WIDTH / 2) - (self.DIALOG_WIDTH / 2))
self.ypos = int((self.D_HEIGHT / 2) - ((self.DIALOG_HEIGHT + 308) / 2))
# SETTINGS DIALOG SETTINGS lol --------------------------------------------------------------------
self.settings = QSettings(settings_registry_key_name, settings_registry_subkey_name)
try:
self.move(self.settings.value('settings dialog location', QPoint(self.xpos, self.ypos), type=QPoint))
self.default_prefix_text = self.settings.value('default prefix', 'SP_', type=str)
self.temp_dir = str(self.settings.value('temporary directory', '', type=str))
self.target_dir = str(self.settings.value('target directory', '', type=str))
self.rbtn_fav_state = (self.settings.value('fav button checked', False, type=bool))
self.rbtn_all_state = (self.settings.value('all button checked', False, type=bool))
self.rbtn_one_state = (self.settings.value('one button checked', False, type=bool))
except Exception as e:
print(f'There was an exception: \"{e}\" while trying to read from QSettings.')
# DIALOG ANIMATION SETTINGS ----------------------------------------------------------------------
# self.openingAnimation(self.DIALOG_WIDTH, self.DIALOG_HEIGHT + 308)
self.ui()
def closeEvent(self, event):
self.settings.setValue('settings dialog location', self.pos())
def ui(self):
self.widgets()
self.layouts()
def widgets(self):
# TOP LAYOUT WIDGETS ------------------------------------------------------------------------------
self.lbl_default_prefix = QLabel('Default Prefix')
self.lbl_custom_prefix = QLabel('Custom Prefix')
self.entry_default_prefix = QLineEdit(self.default_prefix_text)
self.entry_default_prefix.setReadOnly(True)
self.entry_default_prefix.setToolTip('<b>Current</b> default prefix for images')
self.entry_default_prefix.setObjectName('default_prefix')
self.entry_default_prefix.setFocusPolicy(Qt.NoFocus)
self.entry_custom_prefix = QLineEdit()
self.entry_custom_prefix.setFocus()
# self.entry_custom_prefix.textEdited.connect(self.showHint)
self.lbl_custom_prefix_hint = QLabel('')
self.lbl_custom_prefix_hint.setWordWrap(True)
# MIDDLE LAYOUT WIDGETS ----------------------------------------------------------------------------
self.lbl_temp_dir = QLabel('Temp. Folder')
self.lbl_target_dir = QLabel('Target Folder')
self.entry_temp_dir = QLineEdit(self.temp_dir)
self.entry_temp_dir.setReadOnly(True)
self.entry_temp_dir.setObjectName('entry_dir')
self.entry_temp_dir.setToolTip('Folder path to keep retrieved Spotlight Photos for processing')
self.entry_target_dir = QLineEdit(self.target_dir)
self.entry_target_dir.setReadOnly(True)
self.entry_target_dir.setObjectName('entry_dir')
self.entry_target_dir.setToolTip('Folder path to <b>export</b> images to')
self.btn_temp_dir_browse = QPushButton('Browse')
self.btn_temp_dir_browse.setObjectName('btn_browse')
self.btn_temp_dir_browse.setToolTip('Select folder for <b>temporary</b> storage of spotlight photos')
self.btn_temp_dir_browse.clicked.connect(self.browse_temp_directory)
self.btn_target_dir_browse = QPushButton('Browse')
self.btn_target_dir_browse.setObjectName('btn_browse')
self.btn_target_dir_browse.setToolTip('Select folder to <b>export</b> images to')
self.btn_target_dir_browse.clicked.connect(self.browse_target_directory)
# BOTTOM LAYOUT WIDGETS ----------------------------------------------------------------------------
self.rbtn_fav = QRadioButton('Favorite images only')
self.rbtn_fav.setChecked(self.rbtn_fav_state)
self.rbtn_all = QRadioButton('All images')
self.rbtn_all.setChecked(self.rbtn_all_state)
self.rbtn_one = QRadioButton('One at a time')
self.rbtn_one.setChecked(self.rbtn_one_state)
self.btn_ok = QPushButton('OK')
self.btn_ok.setObjectName('btn_ok')
self.btn_ok.clicked.connect(self.submit_settings)
self.btn_cancel = QPushButton('Cancel')
self.btn_cancel.clicked.connect(self.close)
def layouts(self):
# DEFINING LAYOUTS ---------------------------------------------------------------------------------
self.main_layout = QVBoxLayout()
self.top_layout = QVBoxLayout()
self.middle_layout = QVBoxLayout()
# self.bottom_layout = QHBoxLayout()
self.bottom_layout = QGridLayout()
self.btn_ok_cancel_layout = QHBoxLayout()
self.top_group_box = QGroupBox('Prefix options')
self.top_group_box.setLayout(self.top_layout)
self.middle_group_box = QGroupBox('Folder options')
self.middle_group_box.setLayout(self.middle_layout)
self.bottom_group_box = QGroupBox('Export options')
self.bottom_group_box.setLayout(self.bottom_layout)
self.top_form_layout = QFormLayout()
self.middle_form_layout = QFormLayout()
self.bottom_rbtn_layout = QHBoxLayout()
self.bottom_form_layout = QFormLayout()
self.temp_dir_row_layout = QHBoxLayout()
self.target_dir_row_layout = QHBoxLayout()
# TOP LAYOUT --------------------------------------------------------------------------------------
self.top_form_layout.addRow(self.lbl_default_prefix, self.entry_default_prefix)
self.top_form_layout.addRow(self.lbl_custom_prefix, self.entry_custom_prefix)
self.top_form_layout.setContentsMargins(10, 0, 0, 0)
self.top_layout.addLayout(self.top_form_layout)
# MIDDLE LAYOUT -----------------------------------------------------------------------------------
self.temp_dir_row_layout.addWidget(self.entry_temp_dir)
self.temp_dir_row_layout.addWidget(self.btn_temp_dir_browse)
self.target_dir_row_layout.addWidget(self.entry_target_dir)
self.target_dir_row_layout.addWidget(self.btn_target_dir_browse)
self.middle_form_layout.addRow(self.lbl_temp_dir, self.temp_dir_row_layout)
self.middle_form_layout.addRow(self.lbl_target_dir, self.target_dir_row_layout)
self.middle_form_layout.setContentsMargins(10, 0, 0, 0)
self.middle_layout.addLayout(self.middle_form_layout)
# BOTTOM LAYOUT -----------------------------------------------------------------------------------
self.bottom_layout.addWidget(self.rbtn_all, 0, 0)
self.bottom_layout.addWidget(self.rbtn_fav, 0, 1)
self.bottom_layout.addWidget(self.rbtn_one, 0, 2)
self.bottom_group_box.setContentsMargins(10, 0, 0, 0) # or use hlayout rather
# BUTTONS LAYOUT -----------------------------------------------------------------------------------
self.btn_ok_cancel_layout.addWidget(self.btn_ok)
self.btn_ok_cancel_layout.addWidget(self.btn_cancel)
self.btn_ok_cancel_layout.setContentsMargins(170, 0, 8, 0)
# CONFIGURING MAIN LAYOUT ----------------------------------------------------------------------------
self.main_layout.addWidget(self.top_group_box, 38)
self.main_layout.addWidget(self.middle_group_box, 30)
self.main_layout.addWidget(self.bottom_group_box, 20)
self.main_layout.addLayout(self.btn_ok_cancel_layout, 12)
self.setLayout(self.main_layout)
# def showHint(self):
# if self.entry_custom_prefix.text() != '':
# # print('Typed something in custom prefix')
# self.lbl_custom_prefix_hint.setText(
# 'This prefix will be used as the default prefix for all images from now on.')
# self.lbl_custom_prefix_hint.setStyleSheet('font: 8pt segoe UI; color: #3db7ff;')
# self.top_form_layout.addRow('', self.lbl_custom_prefix_hint)
# else:
# self.lbl_custom_prefix_hint.clear()
def submit_settings(self):
if self.entry_temp_dir.text() and self.entry_target_dir.text() != '':
if self.entry_custom_prefix.text() == '':
self.custom_prefix = self.entry_default_prefix.text()
print('set custom prefix to default entry', self.entry_default_prefix.text())
else:
self.custom_prefix = self.entry_custom_prefix.text()
print('set custom prefix to custom entry')
self.temp_dir = self.entry_temp_dir.text()
self.target_dir = self.entry_target_dir.text()
self.settings.setValue('default prefix', self.custom_prefix)
self.settings.setValue('temporary directory', self.temp_dir)
self.settings.setValue('target directory', self.target_dir)
self.settings.setValue('fav button checked', self.rbtn_fav.isChecked())
self.settings.setValue('all button checked', self.rbtn_all.isChecked())
self.settings.setValue('one button checked', self.rbtn_one.isChecked())
QMessageBox.information(self, 'Settings saved', 'Settings have been updated!')
self.close()
else:
QMessageBox.warning(self, 'Settings Warning', 'Please fill <b>all</b> required fields!')
def browse_temp_directory(self):
self.temp_dir = QFileDialog.getExistingDirectory(self, 'Select Temporary Folder for Images')
if self.temp_dir != '' and self.temp_dir == self.target_dir:
QMessageBox.critical(self, 'Invalid Folder Error',
'You <b>cannot</b> use the same folder as the <b>Target Folder<b>!')
return
if self.temp_dir != '':
print('temp dir: ', self.temp_dir)
# if len(self.temp_dir) > 26:
# new_temp_dir = self.temp_dir[0:4] + '...' + self.temp_dir[-20:]
# self.entry_temp_dir.setText(new_temp_dir)
# else: # need to introduce new var to prevent this from affecting the real path used for processing
self.entry_temp_dir.setText(self.temp_dir)
def browse_target_directory(self):
self.target_dir = QFileDialog.getExistingDirectory(self, 'Select Target Folder for Favorite/All Images')
if self.target_dir != '' and self.target_dir == self.temp_dir:
QMessageBox.critical(self, 'Invalid Folder Error',
'You <b>cannot</b> use the same folder as the <b>Temporary Folder<b>!')
return
if self.target_dir != '':
print('target dir: ', self.target_dir)
self.entry_target_dir.setText(self.target_dir)
# def openingAnimation(self, width, height):
# self.open_animation = QPropertyAnimation(self, b'size')
# self.open_animation.setDuration(1000)
# self.open_animation.setEndValue(QSize(width, height))
# self.open_animation.setEasingCurve(QEasingCurve.Linear)
# self.open_animation.start()
# self.setMaximumSize(QSize(width, height))
class MainApp(MainWindow, QWidget):
"""
class for main app which makes use of main window UI
"""
signal_photo_name = pyqtSignal(str)
def __init__(self):
super().__init__()
if (platform.system() != 'Windows') or (platform.release() != '10'):
mbox = QMessageBox(QMessageBox.Critical, 'App Error', 'Your platform does not support Windows Spotlight!')
mbox.setWindowIcon(QIcon(':/icons/cat'))
mbox.setInformativeText('Windows Spotlight Photos is only supported on Windows 10.')
mbox.exec_()
sys.exit()
# Positioning window at center of screen
d = QDesktopWidget().screenGeometry()
self.DESKTOP_WIDTH, self.DESKTOP_HEIGHT = d.width(), d.height()
self.APP_WIDTH, self.APP_HEIGHT = 1200, 800
self.app_x_pos = int((self.DESKTOP_WIDTH / 2) - (self.APP_WIDTH / 2))
self.app_y_pos = int((self.DESKTOP_HEIGHT / 2) - (self.APP_HEIGHT / 2))
self.setMinimumSize(600, 555)
self.load_in_button_clicked = 0
# Object Attributes
self.images = []
self.image_index = 0
# APP SETTINGS -------------------------------------------------------------------------------
self.setts = QSettings(settings_registry_key_name, settings_registry_subkey_name)
print('App data already exists:', self.setts.contains('default prefix'))
try:
self.resize(self.setts.value('window size', QSize(self.APP_WIDTH, self.APP_HEIGHT), type=QSize))
self.move(self.setts.value('window position', QPoint(self.app_x_pos, self.app_y_pos), type=QPoint))
except:
pass
if self.setts.contains('default prefix') is False:
self.timer = QTimer()
self.timer.singleShot(500, self.open_settings)
# self.open_settings() # can't open it instantly as the main app's objects would not have been created yet
self.ui()
def closeEvent(self, event):
self.setts.setValue('window size', self.size())
self.setts.setValue('window position', self.pos())
def ui(self):
self.app_widgets()
def app_widgets(self):
# BUTTONS ---------------------------------------------------------------------------
self.btn_load_in.clicked.connect(self.retrieve_spotlight_photos)
self.btn_load_in.setShortcut('Ctrl+D')
self.btn_next.clicked.connect(self.next_image)
self.btn_next.setShortcut('Right')
self.btn_previous.clicked.connect(self.previous_image)
self.btn_previous.setShortcut('Left')
self.btn_delete.clicked.connect(self.delete_image)
self.btn_delete.setShortcut('Del')
self.btn_save.clicked.connect(self.save_image)
self.btn_save.setShortcut('Return')
self.btn_export.clicked.connect(self.export_images)
self.btn_export.setShortcut('Ctrl+E')
self.btn_settings.clicked.connect(self.open_settings)
def retrieve_spotlight_photos(self):
self.image_index = 0
prefix = self.setts.value('default prefix')
temp_dir = self.setts.value('temporary directory')
target_dir = self.setts.value('target directory')
if ((temp_dir is None or target_dir is None) or (
temp_dir in ['none', 'None'] or target_dir in ['none', 'None'])):
QMessageBox.critical(self, 'Directory Error', 'Folder(s) NOT chosen in <b>Settings</b>')
else:
if self.load_in_button_clicked == 0 or (self.load_in_button_clicked != 0 and self.images == []):
# First time its clicked or Clicked when user deletes all pictures
self.t1 = time()
self.spotlight = Spotlight(prefix=prefix, temp_storage=temp_dir)
print(self.spotlight.selected_new_win_files)
self.setup_first_pic_after_retrieval()
else: # Clicked while user is still viewing pictures.
mbox = QMessageBox.warning(self, 'Spotlight Photos', 'Previous images could be lost!',
# todo: change message here to something about having duplicate images
QMessageBox.Ok | QMessageBox.Cancel, QMessageBox.Cancel)
if mbox == QMessageBox.Cancel:
pass
else:
self.t1 = time()
self.spotlight = Spotlight(prefix=prefix, temp_storage=temp_dir)
print(self.spotlight.selected_new_win_files)
self.setup_first_pic_after_retrieval()
if self.setts.value('default prefix') in self.images[self.image_index]:
self.set_fav_icon_visible()
else:
self.lbl_fav_icon.clear()
def next_image(self):
if self.image_index == (len(self.images) - 1):
self.image_index -= 1
self.btn_next.setEnabled(False)
self.image_index += 1
self.lbl_image.setPixmap(self.make_label_rounded(os.path.join(self.spotlight.temp_storage,
self.images[self.image_index])))
self.btn_previous.setEnabled(True)
self.setWindowTitle(self.title + ' - ' + self.images[self.image_index])
if self.image_index == (len(self.images) - 1):
self.btn_next.setEnabled(False)
if self.setts.value('default prefix') in self.images[self.image_index]:
self.set_fav_icon_visible()
else:
self.lbl_fav_icon.clear()
def previous_image(self):
# Check before executing button function
if self.image_index == 0:
self.image_index += 1
self.btn_previous.setEnabled(False)
self.image_index -= 1
self.lbl_image.setPixmap(self.make_label_rounded(os.path.join(self.spotlight.temp_storage,
self.images[self.image_index])))
self.btn_next.setEnabled(True)
self.setWindowTitle(self.title + ' - ' + self.images[self.image_index])
# Check after executing button function
if self.image_index == 0:
self.btn_previous.setEnabled(False)
if self.setts.value('default prefix') in self.images[self.image_index]:
self.set_fav_icon_visible()
else:
self.lbl_fav_icon.clear()
def delete_image(self):
if len(self.images) == 1: # Deleting the last image
send2trash.send2trash(self.images[self.image_index])
self.images.remove(self.images[self.image_index])
print(self.images)
self.lbl_image.close()
self.lbl_image = QLabel()
self.lbl_image.setPixmap(QPixmap(':/icons/no_image')) # No need to make rounded as borders are not visible
self.lbl_image.setAlignment(Qt.AlignCenter)
self.top_layout.addWidget(self.lbl_image)
self.lbl_counter.setText('')
self.setWindowTitle(self.title)
# Disable buttons to prevent crash
self.btn_next.setEnabled(False)
self.btn_previous.setEnabled(False)
self.btn_save.setEnabled(False)
self.btn_delete.setEnabled(False)
self.btn_export.setEnabled(False)
self.lbl_fav_icon.clear()
return
if self.image_index == len(self.images) - 1:
if len(self.images) == 2:
self.btn_next.setEnabled(False) # Don't know whether this works or not
print('deleting last image in list')
send2trash.send2trash(self.images[self.image_index])
self.images.remove(self.images[self.image_index])
self.image_index -= 1
self.btn_next.setEnabled(False)
if len(self.images) == 1:
self.btn_previous.setEnabled(False)
print('remaining images:', self.images)
elif self.image_index <= 0:
print('deleting first image in list')
send2trash.send2trash(self.images[self.image_index])
self.images.remove(self.images[self.image_index])
if len(self.images) == 1:
self.btn_previous.setEnabled(False)
self.btn_next.setEnabled(False)
else:
self.image_index += 1
print('remaining images', self.images)
else:
print('deleting image in the middle of list')
send2trash.send2trash(self.images[self.image_index])
self.images.remove(self.images[self.image_index])
self.image_index -= 1
if self.image_index == 0:
self.btn_previous.setEnabled(False)
elif len(self.images) == 2 and self.image_index == 1:
self.btn_next.setEnabled(False)
print('remaining images:', self.images)
self.lbl_image.setPixmap(self.make_label_rounded(os.path.join(self.spotlight.temp_storage,
self.images[self.image_index])))
self.setWindowTitle(self.title + ' - ' + self.images[self.image_index])
if self.setts.value('default prefix') in self.images[self.image_index]:
self.set_fav_icon_visible()
else:
self.lbl_fav_icon.clear()
if len(self.images) > 1:
self.lbl_counter.setText(str(len(self.images)) + ' items')
else:
self.lbl_counter.setText(str(len(self.images)) + ' item')
def save_image(self):
self.save_dialog = RenameDialogBox()
self.signal_photo_name.emit(self.images[self.image_index])
self.save_dialog.show()
self.save_dialog.signal_new_name.connect(self.get_new_name)
@pyqtSlot(str, str)
def get_new_name(self, prefix, name):
self.new_prefix = prefix
self.new_name = name
print('\nOld name: ' + self.images[self.image_index])
print(self.new_prefix + self.new_name)
old_file = self.images[self.image_index]
os.rename(old_file, self.new_prefix + self.new_name + '.png')
self.images.remove(old_file)
self.images = [x for x in os.listdir() if '.png' in x]
# print(self.images.index('.png')) # Doesn't work...have no idea why
for count, item in enumerate(self.images):
if self.new_name in item:
print('Renamed image at:', count)
break
self.image_index = count
print('index: ', self.image_index)
if self.image_index == len(self.images) - 1:
print('last image')
if len(self.images) == 1:
self.btn_next.setEnabled(False)
self.btn_previous.setEnabled(False)
else:
self.btn_next.setEnabled(False)
self.btn_previous.setEnabled(True)
elif self.image_index == 0:
print('first image')
self.btn_previous.setEnabled(False)
self.btn_next.setEnabled(True)
self.setWindowTitle(self.title + ' - ' + self.new_prefix + self.new_name + '.png')
if self.setts.value('default prefix') in self.images[self.image_index]:
self.set_fav_icon_visible()
else:
self.lbl_fav_icon.clear()
print('New Images:', self.images)
def export_images(self):
print('cur directory: ', os.getcwd())
print('Dir chosen:', self.setts.value('target directory'))
if self.setts.value('fav button checked', False, type=bool) is True:
print('fav button checked:', self.setts.value('fav button checked'))
selected_pics = self.spotlight.move_favorites_to_specific_folder(
prefix=self.setts.value('default prefix'), target_folder=self.setts.value('target directory'))
print('from main app, selected pics:', selected_pics)
if selected_pics is None:
QMessageBox.critical(self, 'Export Failed', '<b>NO</b> Favorite images to Export!')
elif selected_pics[0] == 'FileExistsError':
QMessageBox.critical(self, 'Image already exists', f'Image with the name \'<i>{selected_pics[1]}</i>\''
f' already exists at <b>target folder</b>!')
self.conditions_for_what_to_do_after_export(extra_condition='FileExistsError')
else:
for item in selected_pics:
self.images.remove(item)
self.conditions_for_what_to_do_after_export()
elif self.setts.value('all button checked', False, type=bool) is True:
print('all button checked:', self.setts.value('all button checked'))
all_pics = self.spotlight.move_all_to_specific_folder(target_folder=self.setts.value('target directory'))
print('from main app, all pics:', all_pics)
if all_pics[0] == 'FileExistsError':
QMessageBox.critical(self, 'Image already exists', f'Image with the name \'<i>{all_pics[1]}</i>\''
f' already exists at <b>target folder</b>!')
return
else:
self.images.clear()
self.conditions_for_what_to_do_after_export()
elif self.setts.value('one button checked', False, type=bool) is True:
print('one button checked:', self.setts.value('one button checked'))
single_pic = self.spotlight.move_one_to_specific_folder(
single_pic=self.images[self.image_index], target_folder=self.setts.value('target directory'))
print('from main app, single pic:', single_pic)
if single_pic[0] == 'FileExistsError':
QMessageBox.critical(self, 'Image already exists', f'Image with the name \'<i>{single_pic[1]}</i>\''
f' already exists at <b>target folder</b>!')
return
else:
self.images.remove(single_pic)
self.conditions_for_what_to_do_after_export()
else:
QMessageBox.critical(self, 'Export Choice', 'No <b>Export Option</b> was selected in Settings!')
# TODO: Add informative text here to: 'go to settings'
def open_settings(self):
self.settings_dialog = SettingsDialog()
self.settings_dialog.show()
# CLASS HELPER FUNCTIONS (to reduce repetition) ------------------------------------------------------
def conditions_for_what_to_do_after_export(self, extra_condition=None):
self.images = [x for x in os.listdir() if '.png' in x]
print(self.images, len(self.images))
self.image_index = 0
if len(self.images) != 0:
self.lbl_image.setPixmap(self.make_label_rounded(self.images[self.image_index]))
if len(self.images) == 1:
self.lbl_counter.setText(str(len(self.images)) + ' item')
self.btn_next.setEnabled(False)
self.btn_previous.setEnabled(False)
elif len(self.images) > 1:
self.lbl_counter.setText(str(len(self.images)) + ' items')
self.btn_previous.setEnabled(False)
self.btn_next.setEnabled(True)
if extra_condition is None:
QMessageBox.information(self, 'Export Success', 'Image(s) exported successfully.')
self.setWindowTitle(self.title + ' - ' + self.images[self.image_index])
else:
self.setWindowTitle(self.title + ' - ' + self.images[self.image_index])
if self.setts.value('default prefix') in self.images[self.image_index]:
self.set_fav_icon_visible()
else:
self.lbl_fav_icon.clear()
else:
self.lbl_fav_icon.clear()
self.lbl_image.close()
self.lbl_image = QLabel()
self.lbl_image.setPixmap(QPixmap(':/icons/no_image'))
self.lbl_image.setAlignment(Qt.AlignCenter)
self.top_layout.addWidget(self.lbl_image)
self.lbl_counter.setText('')
self.setWindowTitle(self.title)
# Disable buttons to prevent crash
self.btn_next.setEnabled(False)
self.btn_previous.setEnabled(False)
self.btn_save.setEnabled(False)
self.btn_delete.setEnabled(False)
self.btn_export.setEnabled(False)
QMessageBox.information(self, 'Export Success', 'Image(s) exported successfully.')
def set_fav_icon_visible(self):
self.lbl_fav_icon.setPixmap(
QPixmap(':/icons/save_icon').scaledToHeight(self.fav_icon_size_y, Qt.SmoothTransformation))
self.left_bottom_layout.addWidget(self.lbl_fav_icon)
def setup_first_pic_after_retrieval(self):
if self.spotlight.selected_new_win_files == []:
QMessageBox.critical(self, 'Spotlight Photos', 'No New Spotlight Photos Found!')
# For debugging
self.t2 = time()
print('Stopped abruptly - Time elapsed :', self.t2 - self.t1)
# with open('log.txt', 'a') as f:
# f.write('Stopped abruptly - Time elapsed :' + str(self.t2 - self.t1) + '\n')
return
else:
self.lbl_counter.setText(str(len(self.spotlight.selected_new_win_files)) + ' items')
# self.lbl_counter.setToolTip('Number of <b>selected</b> img')
self.images = self.spotlight.selected_new_win_files
self.lbl_image.close()
self.lbl_image = Label()
self.top_layout.addWidget(self.lbl_image)
self.lbl_image.setPixmap(self.make_label_rounded(os.path.join(self.spotlight.temp_storage,
self.images[self.image_index])))
# self.lbl_image.setPixmap(
# QPixmap(os.path.join(self.spotlight.temp_storage, self.images[self.image_index])))
self.setWindowTitle(self.title + ' - ' + self.images[self.image_index])
# Enable buttons except previous button since we'll be at first image
self.btn_delete.setEnabled(True)
self.btn_next.setEnabled(True)
self.btn_previous.setEnabled(False)
self.btn_save.setEnabled(True)
self.btn_export.setEnabled(True)
self.load_in_button_clicked += 1
# For debugging
self.t2 = time()
print('Time elapsed :', self.t2 - self.t1)
# print(os.getcwd())
# with open('log.txt', 'a') as f:
# f.write('Time elapsed :' + str(self.t2 - self.t1) + '\n')
def make_label_rounded(self, image_path):
"""
"Here is one way to do this. This works by drawing a rounded rect
on an initially transparent pixmap using the original pixmap as
the brush for the painter."
~ Heike, StackOverflow.
(https://stackoverflow.com/questions/63656328/rounding-a-qlabels-corners-in-pyqt5)
"""
# Stack overflow solution for rounded rectangle label
self.pixmap = QPixmap(image_path)
radius = 30
self.rounded = QPixmap(self.pixmap.size())
self.rounded.fill(QColor('transparent'))
self.painter = QPainter(self.rounded)
self.painter.setRenderHint(QPainter.Antialiasing)
self.painter.setBrush(QBrush(self.pixmap))
self.painter.setPen(Qt.NoPen)
self.painter.drawRoundedRect(self.pixmap.rect(), radius, radius)
return self.rounded
if __name__ == '__main__':
app = QApplication(sys.argv)
app.setAttribute(Qt.AA_DisableWindowContextHelpButton)
main_ = MainApp()
sys.exit(app.exec_())
# TODO: [High Priority]:
# - Animate download button to get user attention or some other form of in-app tutorial
# - Animating loading in of pictures with a round progress bar kinda style
# - Add to README or settings that 'temp storage' should not be used as 'permanent wallpaper folder' as it will affect performance of the app
# - Option to favorite without necessarily renaming
# - Add checkbox to allow user to remove prefix and deactivate it in the rename dialog box
# - Option to open previous pics or load new ones (Use 'more icon' and put some buttons there) [Have to use new naming scheme: SP-210107-1319-abcde] where a-zA-Z0-9a for abcde
# - Lookup context menus [for the 'More' icon]
# - Add text to buttons on main window and draw attention to 'download' button.
#
# TODO: [Moderate Priority]:
# - Add validators to entries [optional]
# - Check if spotlight images is enabled
# - Refactor repeating code into helper functions across board
# - Informative text with Messagebox for 'No fav image selected', and possibly all messageboxes
#
# TODO: [Optional]:
# - Edit the no_image icon to show the text more and reduce opacity of the circle
# - Change 'CHR-onicles' to a global variable to prevent redundancy
# TODO: FOR SETTINGS OPTIONS (this version would not have these...so delete this later)
# - Option to disable prefix with checkbox [MEDIUM PRIORITY]
# - Option for user to delete temp storage when done [optional]
| StarcoderdataPython |
3201523 | <reponame>BrianCarela/google-python-exercises<gh_stars>0
#!/usr/bin/python -tt
# Copyright 2010 Google Inc.
# Licensed under the Apache License, Version 2.0
# http://www.apache.org/licenses/LICENSE-2.0
# Google's Python Class
# http://code.google.com/edu/languages/google-python-class/
"""A tiny Python program to check that Python is working.
Try running this program from the command line like this:
python hello.py
python hello.py Alice
That should print:
Hello World -or- Hello Alice
Try changing the 'Hello' to 'Howdy' and run again.
Once you have that working, you're ready for class -- you can edit
and run Python code; now you just need to learn Python!
"""
import sys
# trying my own function
def repeat(n, exclaim):
"""
This is what large comments look like. Triple quotes
Anyways, this just repeats the input. Or adds it, depending on type
"""
# the * operator means repeat. Multiplies numbers, repeats strings
result = n * 3
if exclaim:
result = result + '!!!'
print result
# Define a main() function that prints a little greeting.
def main():
# Get the name from the command line, using 'World' as a fallback.
if len(sys.argv) >= 2:
name = sys.argv[1]
else:
name = 'World'
# booleans begin with capital letters in Python
repeat(name, True)
print 'Whats gooooooood', name
# print 'YAS BITCH'
# print len(sys.argv)
# my name is hard coded here, and booleans need cap letters
# repeat('Brian', True)
# This is the standard boilerplate that calls the main() function.
if __name__ == '__main__':
main()
| StarcoderdataPython |
6643690 | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator 2.3.33.0
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class DatasetSnapshotDto(Model):
"""DatasetSnapshotDto.
:param dataset_snapshot_name:
:type dataset_snapshot_name: str
:param time_stamp:
:type time_stamp: datetime
:param dataset_id:
:type dataset_id: str
:param definition_version:
:type definition_version: str
:param profile_action_id:
:type profile_action_id: str
:param data_snapshot_path:
:type data_snapshot_path: ~_restclient.models.DataPathDto
:param created_time:
:type created_time: datetime
"""
_attribute_map = {
'dataset_snapshot_name': {'key': 'datasetSnapshotName', 'type': 'str'},
'time_stamp': {'key': 'timeStamp', 'type': 'iso-8601'},
'dataset_id': {'key': 'datasetId', 'type': 'str'},
'definition_version': {'key': 'definitionVersion', 'type': 'str'},
'profile_action_id': {'key': 'profileActionId', 'type': 'str'},
'data_snapshot_path': {'key': 'dataSnapshotPath', 'type': 'DataPathDto'},
'created_time': {'key': 'createdTime', 'type': 'iso-8601'},
}
def __init__(self, dataset_snapshot_name=None, time_stamp=None, dataset_id=None, definition_version=None, profile_action_id=None, data_snapshot_path=None, created_time=None):
super(DatasetSnapshotDto, self).__init__()
self.dataset_snapshot_name = dataset_snapshot_name
self.time_stamp = time_stamp
self.dataset_id = dataset_id
self.definition_version = definition_version
self.profile_action_id = profile_action_id
self.data_snapshot_path = data_snapshot_path
self.created_time = created_time
| StarcoderdataPython |
1940398 | # -*- coding: utf-8 -*-
#
# This file is part of Invenio.
# Copyright (C) 2015-2018 CERN.
#
# Invenio is free software; you can redistribute it and/or modify it
# under the terms of the MIT License; see LICENSE file for more details.
"""Persistent identifier fetchers.
A proper fetcher is defined as a function that return a
:data:`invenio_pidstore.fetchers.FetchedPID` instance.
E.g.
.. code-block:: python
def my_fetcher(record_uuid, data):
return FetchedPID(
provider=MyRecordIdProvider,
pid_type=MyRecordIdProvider.pid_type,
pid_value=extract_pid_value(data),
)
To see more about providers see :mod:`invenio_pidstore.providers`.
"""
from __future__ import absolute_import, print_function
from collections import namedtuple
from flask import current_app
from .providers.recordid import RecordIdProvider
FetchedPID = namedtuple('FetchedPID', ['provider', 'pid_type', 'pid_value'])
"""A pid fetcher."""
def recid_fetcher(record_uuid, data):
"""Fetch a record's identifiers.
:param record_uuid: The record UUID.
:param data: The record metadata.
:returns: A :data:`invenio_pidstore.fetchers.FetchedPID` instance.
"""
pid_field = current_app.config['PIDSTORE_RECID_FIELD']
return FetchedPID(
provider=RecordIdProvider,
pid_type=RecordIdProvider.pid_type,
pid_value=str(data[pid_field]),
)
| StarcoderdataPython |
1849855 |
# Copyright 2020, Battelle Energy Alliance, LLC
# ALL RIGHTS RESERVED
"""
Populates user manual with automatic documentation.
"""
import os
import collect_tex
build_path = os.path.abspath(os.path.join(os.path.dirname(os.path.abspath(__file__)), '..', 'build'))
source_path = os.path.relpath(os.path.join(build_path, '..', 'src'), start=build_path)
# def difference_strings(list_1,list_2):
# """
# A function to compute difference of list of strings
# @ In, list_1: First List
# @ Out, list_2: Second List
# """
# c = set(list_1).union(set(list_2))
# d = set(list_1).intersection(set(list_2))
# return list(c-d)
print("Populating TEAL_user_manual.tex...")
filenames = collect_tex.specs_to_load.keys()
string = ''
input_template = '\\input{{{name}}}\n'
# these are directly in the src
#for txt in ['Introduction', 'Howtorun']:
# string += input_template.format(name=os.path.join(source_path, txt))
for txt in filenames:
string += input_template.format(name=txt)
manual = os.path.join(build_path, 'TEAL_user_manual.tex')
with open(manual, 'r') as file:
document = file.read()
document = document.replace('%INSERT_SECTIONS_HERE', string + '\n\\clearpage')
with open(manual, 'w') as file:
file.write(document)
print("... TEAL_user_manual.tex populated")
| StarcoderdataPython |
8113787 | import ast, os
from app.extinsions import db
from sqlalchemy.event import listens_for
basedir = os.path.abspath(os.path.dirname(__file__))
likes = db.Table('likes',
db.Column('user_id', db.Integer, db.ForeignKey('user.id')),
db.Column('post_id', db.Integer, db.ForeignKey('post.id'))
)
class Comment(db.Model):
user_id = db.Column('user_id', db.Integer, db.ForeignKey('user.id'))
post_id = db.Column('post_id', db.Integer, db.ForeignKey('post.id'), primary_key=True)
body = db.Column(db.String(50))
class Post(db.Model):
id = db.Column(db.Integer, primary_key=True)
user_id = db.Column(db.Integer, db.ForeignKey('user.id'))
caption = db.Column(db.String(50))
images = db.Column(db.String(100), nullable=False)
comments = db.relationship('Comment', backref='post', lazy='dynamic')
get_images = lambda self : ast.literal_eval(self.images)
@listens_for(Post, "after_delete")
def delete_images(mapper, connection, target):
for image in target.get_images():
try :
os.remove(os.path.join(basedir, 'static/images/', image))
except OSError:
pass
| StarcoderdataPython |
4895062 | <reponame>specter119/py4cytoscape<filename>py4cytoscape/filters.py<gh_stars>10-100
# -*- coding: utf-8 -*-
"""Functions for working with FILTERS for the selection of nodes and edges in
networks, including operations to import and export filters. In the Cytoscape
user interface, filters are managed in the Select tab of the Control Panel.
"""
"""Copyright 2020 The Cytoscape Consortium
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated
documentation files (the "Software"), to deal in the Software without restriction, including without limitation the
rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit
persons to whom the Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all copies or substantial portions of the
Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO
THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""
# Internal module convenience imports
import time
import json
import warnings
# Internal module imports
from . import commands
from . import networks
from . import network_selection
from . import tables
from . import style_bypasses
from . import sandbox
# External library imports
from .exceptions import CyError
from .py4cytoscape_utils import *
from .py4cytoscape_logger import cy_log, show_error
from .py4cytoscape_tuning import CATCHUP_FILTER_SECS
from .py4cytoscape_sandbox import get_abs_sandbox_path
from .py4cytoscape_notebook import running_remote
@cy_log
def apply_filter(filter_name='Default filter', hide=False, network=None, base_url=DEFAULT_BASE_URL):
"""Run an existing filter by supplying the filter name.
Args:
filter_name (str): Name of filter to apply. Default is "Default filter".
hide (bool): Whether to hide filtered out nodes and edges. Default is FALSE.
Ignored if all nodes or edges are filtered out. This is an alternative to filtering for node and edge selection.
network (SUID or str or None): Name or SUID of the network. Default is the "current" network active in Cytoscape.
base_url (str): Ignore unless you need to specify a custom domain,
port or version to connect to the CyREST API. Default is http://localhost:1234
and the latest version of the CyREST API supported by this version of py4cytoscape.
Returns:
dict: {'nodes': <node list>, 'edges': <edge list>} returns list of nodes and edges selected after filter executes
Raises:
CyError: if filter doesn't exist
requests.exceptions.RequestException: if can't connect to Cytoscape or Cytoscape returns an error
Examples:
>>> apply_filter('degree filter 1x')
{'nodes': ['YDR395W', 'YLR362W', 'YPL248C', 'YGL035C'], 'edges': None}
>>> apply_filter('degree filter 1x', hide=True, network='My Network')
{'nodes': ['YDR395W', 'YLR362W', 'YPL248C', 'YGL035C'], 'edges': None}
See Also:
:meth:`unhide_all`
"""
if not filter_name in get_filter_list(base_url=base_url):
raise CyError(f'Filter "{filter_name}" does not exist.')
net_suid = networks.get_network_suid(network, base_url=base_url)
networks.set_current_network(net_suid, base_url=base_url)
# TODO: It looks like R can't properly use filter_name with blank embedded, and doesn't wait for filter to be applied
res = commands.commands_post(f'filter apply container="filter" name="{filter_name}" network=SUID:"{net_suid}"',
base_url=base_url)
return _check_selected(hide, net_suid, base_url)
@cy_log
def create_column_filter(filter_name, column, criterion, predicate, caseSensitive=False, anyMatch=True, type='nodes',
hide=False, network=None, base_url=DEFAULT_BASE_URL, *, apply=True):
"""Create Column Filter.
Create a filter to control node or edge selection. Works on columns of boolean, string, numeric
and lists. Note the unique restrictions for criterion and predicate depending on the type of column
being filtered.
Args:
filter_name (str): Name for new filter.
column (str): Table column to base filter upon.
criterion (list, bool, str, int or float): For boolean columns: True or False. For string columns: a
string value, e.g., "hello". If the predicate is REGEX then this can be a regular expression as
accepted by the Java Pattern class (https://docs.oracle.com/javase/7/docs/api/java/util/regex/Pattern.html).
For numeric columns: If the predicate is BETWEEN or IS_NOT_BETWEEN then this is a two-element list of
numbers, example: [1,5], otherwise a single number.
predicate (str): For boolean columns: IS, IS_NOT. For string columns: IS, IS_NOT, CONTAINS, DOES_NOT_CONTAIN,
REGEX. For numeric columns: IS, IS_NOT, GREATER_THAN, GREATER_THAN_OR_EQUAL, LESS_THAN, LESS_THAN_OR_EQUAL,
BETWEEN, IS_NOT_BETWEEN.
caseSensitive (bool): If string matching should be case sensitive. Default is FALSE.
anyMatch (bool): Only applies to List columns. If true then at least one element in the list must pass the
filter, if false then all the elements in the list must pass the filter. Default is TRUE.
type (str): Apply filter to "nodes" (default) or "edges".
hide (bool): Whether to hide filtered out nodes and edges. Default is FALSE.
Ignored if all nodes or edges are filtered out. This is an alternative to filtering for node and edge selection.
network (SUID or str or None): Name or SUID of the network. Default is the "current" network active in Cytoscape.
base_url (str): Ignore unless you need to specify a custom domain,
port or version to connect to the CyREST API. Default is http://localhost:1234
and the latest version of the CyREST API supported by this version of py4cytoscape.
apply (bool): True to execute filter immediately; False to define filter but not execute it
Returns:
dict: {'nodes': <node list>, 'edges': <edge list>} returns list of nodes and edges selected after filter executes; None if filter wasn't applied
Raises:
CyError: if column doesn't exist in the table named by ``type`` or filter couldn't be applied
requests.exceptions.RequestException: if can't connect to Cytoscape or Cytoscape returns an error
Examples:
>>> create_column_filter('myFilter', 'log2FC', [-1,1], "IS_NOT_BETWEEN") # filter on numeric value log2FC
{'nodes': ['YDR395W', 'YLR362W', 'YPL248C', 'YGL035C'], 'edges': None}
>>> create_column_filter('myFilter', 'pValue', 0.05, "LESS_THAN") # Filter on floating point column pValue
{'nodes': ['YDR395W', 'YLR362W', 'YPL248C', 'YGL035C'], 'edges': None}
>>> create_column_filter('myFilter', 'function', "kinase", "CONTAINS", False) # Function on string column name
{'nodes': ['YDR395W', 'YLR362W', 'YPL248C', 'YGL035C'], 'edges': None}
>>> create_column_filter('myFilter', 'name', "^Y.*C$", "REGEX") # Filter on string column name
{'nodes': ['YDR395W', 'YLR362W', 'YPL248C', 'YGL035C'], 'edges': None}
>>> create_column_filter('myFilter', 'isTarget', True , "IS") # Filter on boolean column isTarget
{'nodes': ['YDR395W', 'YLR362W', 'YPL248C', 'YGL035C'], 'edges': None}
>>> create_column_filter('myFilter', 'isTarget', True , "IS", hide=True) # Filter on boolean column isTarget
{'nodes': ['YDR395W', 'YLR362W', 'YPL248C', 'YGL035C'], 'edges': None}
>>> create_column_filter('myFilter', 'Betweenness', [300, 600] , "BETWEEN", type='edges') # Filter edges
{'nodes': None, 'edges': [{'YPR119W (pd) YMR043W', 'YDR412W (pp) YPR119W'}]}
>>> create_column_filter('myFilter', 'Betweenness', [300, 600] , "BETWEEN", type='edges', apply=False) # Define filter
{'nodes': None, 'edges': [{'YPR119W (pd) YMR043W', 'YDR412W (pp) YPR119W'}]}
"""
networks.set_current_network(network, base_url=base_url)
if column not in tables.get_table_column_names(type[:4], base_url=base_url):
raise CyError('Column "%s" does not exist in the "%s" table' % (column, type[:4]))
if predicate == "REGEX" and check_supported_versions(cytoscape='3.9'):
show_error('Warning -- Cytoscape version pre-3.9 in use ... REGEX filter may hang forever')
elif predicate in ['BETWEEN', 'IS_NOT_BETWEEN']:
if not isinstance(criterion, list) or len(criterion) != 2:
raise CyError('Criterion "{criterion}" must be a list of two numeric values, e.g., [0.5, 2.0]')
elif predicate in ['GREATER_THAN', 'GREATER_THAN_OR_EQUAL']:
# # manually feed max bound so that UI is also correct ... UI doesn't show these predicates directly ... it uses BETWEEN, and doesn't distinguish between > and >=
# TODO: Recommend that this check be limited to GREATER_THAN_OR_EQUAL because that's what the UI supports
col_vals = tables.get_table_columns(type[:4], column, base_url=base_url)
crit_max = col_vals[column].max()
criterion = [criterion, crit_max]
# same trick to fix UI does not work for LESS_THAN cases
# } else if (predicate %in% c("LESS_THAN", "LESS_THAN_OR_EQUAL")){
# col.vals <- getTableColumns(substr(type,1,4), column, base.url = base.url)
# crit.max <- min(na.omit(col.vals))
# criterion <- c(crit.max,criterion[1])
# TODO: Find out what range criterion should be for GREATER_THAN ... different than GREAT_THAN_OR_EQUAL
elif isinstance(criterion, bool):
if predicate == "IS_NOT": criterion = not criterion
elif isinstance(criterion, int) or isinstance(criterion, float):
if predicate == 'IS':
criterion = [criterion, criterion]
predicate = 'BETWEEN'
elif predicate == 'IS_NOT':
criterion = [criterion, criterion]
predicate = 'IS_NOT_BETWEEN'
# Actually create the filter
cmd_json = {'id': 'ColumnFilter',
'parameters': {'criterion': criterion, 'columnName': column, 'predicate': predicate,
'caseSensitive': caseSensitive, 'anyMatch': anyMatch, 'type': type}}
cmd_body = {'name': filter_name, 'json': json.dumps(cmd_json)}
return _create_filter_and_finish('commands/filter/create', cmd_body, hide, apply, network, base_url)
@cy_log
def create_degree_filter(filter_name, criterion, predicate='BETWEEN', edge_type='ANY', hide=False, network=None,
base_url=DEFAULT_BASE_URL, *, apply=True):
"""Create Degree Filter.
Creates a filter to control node selection base on in/out degree.
Args:
filter_name (str): Name for new filter.
criterion (list): A two-element vector of numbers, example: [1,5].
predicate (str): BETWEEN (default) or IS_NOT_BETWEEN
edgeType (str): Type of edges to consider in degree count: ANY (default), UNDIRECTED, INCOMING, OUTGOING, DIRECTED
hide (bool): Whether to hide filtered out nodes and edges. Default is FALSE.
Ignored if all nodes or edges are filtered out. This is an alternative to filtering for node and edge selection.
network (SUID or str or None): Name or SUID of the network. Default is the "current" network active in Cytoscape.
base_url (str): Ignore unless you need to specify a custom domain,
port or version to connect to the CyREST API. Default is http://localhost:1234
and the latest version of the CyREST API supported by this version of py4cytoscape.
apply (bool): True to execute filter immediately; False to define filter but not execute it
Returns:
dict: {'nodes': <node list>, 'edges': <edge list>} returns list of nodes and edges selected after filter executes; None if filter wasn't applied
Raises:
CyError: if criterion is not list of two values or filter couldn't be applied
requests.exceptions.RequestException: if can't connect to Cytoscape or Cytoscape returns an error
Examples:
>>> create_degree_filter('myFilter', [2, 5]) # filter on any nodes having between 2 and 5 edges
{'nodes': ['YDR395W', 'YLR362W', 'YPL248C', 'YGL035C'], 'edges': None}
>>> create_degree_filter('myFilter', [2, 5], predicate='IS_NOT_BETWEEN') # filter for edges < 2 or > 5
{'nodes': ['YDR395W', 'YLR362W', 'YPL248C', 'YGL035C'], 'edges': None}
>>> create_column_filter('myFilter', [2, 5], edge_type='INCOMING') # filter for between 2 and 5 incoming edges
{'nodes': ['YDR395W', 'YLR362W', 'YPL248C', 'YGL035C'], 'edges': None}
>>> create_column_filter('myFilter', [2, 5], hide=True) # filter for between 2 and 5 edges, and hide them
{'nodes': ['YDR395W', 'YLR362W', 'YPL248C', 'YGL035C'], 'edges': None}
>>> create_column_filter('myFilter', [2, 5], apply=False) # define filter for between 2 and 5 edges, and hide them
{'nodes': ['YDR395W', 'YLR362W', 'YPL248C', 'YGL035C'], 'edges': None}
"""
networks.set_current_network(network, base_url=base_url)
if not isinstance(criterion, list) or len(criterion) != 2:
raise CyError(f'Criterion "{criterion}" must be a list of two numeric values, e.g., [0.5, 2.0]')
cmd_json = {'id': 'DegreeFilter',
'parameters': {'criterion': criterion, 'predicate': predicate, 'edgeType': edge_type}}
cmd_body = {'name': filter_name, 'json': json.dumps(cmd_json)}
return _create_filter_and_finish('commands/filter/create', cmd_body, hide, apply, network, base_url)
@cy_log
def create_composite_filter(filter_name, filter_list, type='ALL', hide=False, network=None, base_url=DEFAULT_BASE_URL, *, apply=True):
"""Combine filters to control node and edge selection based on previously created filters.
Args:
filter_name (str): Name for new filter.
filter_list (list): List of names of filters to combine.
type (str): Type of composition, requiring ALL (default) or ANY filters to pass for final node and edge selection.
hide (bool): Whether to hide filtered out nodes and edges. Default is FALSE.
Ignored if all nodes or edges are filtered out. This is an alternative to filtering for node and edge selection.
network (SUID or str or None): Name or SUID of the network. Default is the "current" network active in Cytoscape.
base_url (str): Ignore unless you need to specify a custom domain,
port or version to connect to the CyREST API. Default is http://localhost:1234
and the latest version of the CyREST API supported by this version of py4cytoscape.
apply (bool): True to execute filter immediately; False to define filter but not execute it
Returns:
dict: {'nodes': <node list>, 'edges': <edge list>} returns list of nodes and edges selected after filter executes; None if filter wasn't applied
Raises:
CyError: if filter list contains less than one filter or has filters that don't exist, or filter couldn't be applied
requests.exceptions.RequestException: if can't connect to Cytoscape or Cytoscape returns an error
Examples:
>>> create_composite_filter('New Filter', ['degree filter 1x', 'degree filter 2x'])
{'nodes': ['YDR395W', 'YLR362W', 'YPL248C', 'YGL035C'], 'edges': None}
>>> create_composite_filter('New Filter', ['degree filter 1x', 'column filter 10x'], type='ANY', network="My network")
{'nodes': ['YDR395W', 'YLR362W', 'YPL248C', 'YGL035C'], 'edges': [{'YPR119W (pd) YMR043W', 'YDR412W (pp) YPR119W'}]}
>>> create_composite_filter('New Filter', ['degree filter 1x', 'degree filter 2x'], hide=True)
{'nodes': ['YDR395W', 'YLR362W', 'YPL248C', 'YGL035C'], 'edges': None}
>>> create_composite_filter('New Filter', ['degree filter 1x', 'degree filter 2x'], apply=False)
{'nodes': ['YDR395W', 'YLR362W', 'YPL248C', 'YGL035C'], 'edges': None}
"""
networks.set_current_network(network, base_url=base_url)
if len(filter_list) < 2:
raise CyError(f'Filter list "{filter_list}" is invalid. Must provide a list of two or more filter names, e.g., ["filter1", "filter2"]')
def fetch(x):
return commands.commands_post('filter get name="' + x + '"', base_url=base_url)
def extract(y):
return y[0]['transformers'][0] if y else None
trans_list = [extract(fetch(filter)) for filter in filter_list]
if None in trans_list:
raise CyError('Filter name "%s" does not exist' % (filter_list[trans_list.index(None)]))
cmd_json = {'id': 'CompositeFilter', 'parameters': {'type': type}, 'transformers': trans_list}
cmd_body = {'name': filter_name, 'json': json.dumps(cmd_json)}
return _create_filter_and_finish('commands/filter/create', cmd_body, hide, apply, network, base_url)
@cy_log
def get_filter_list(base_url=DEFAULT_BASE_URL):
"""Retrieve list of named filters in current session
Args:
base_url (str): Ignore unless you need to specify a custom domain,
port or version to connect to the CyREST API. Default is http://localhost:1234
and the latest version of the CyREST API supported by this version of py4cytoscape.
Returns:
list: returns list of available filter names
Raises:
requests.exceptions.RequestException: if can't connect to Cytoscape or Cytoscape returns an error
Examples:
>>> get_filter_list()
['degree filter 1x', 'degree filter 2x']
"""
res = commands.commands_post('filter list', base_url=base_url)
return res
@cy_log
def export_filters(filename='filters.json', base_url=DEFAULT_BASE_URL, *, overwrite_file=True):
"""Saves filters to file in JSON format.
Args:
filename (str): Full path or path relavtive to current working directory, in addition to the name of the file. Default is "filters.json".
base_url (str): Ignore unless you need to specify a custom domain,
port or version to connect to the CyREST API. Default is http://localhost:1234
and the latest version of the CyREST API supported by this version of py4cytoscape.
overwrite_file (bool): False allows an error to be generated if the file already exists;
True allows Cytoscape to overwrite it without asking
Returns:
list: []
Raises:
requests.exceptions.RequestException: if can't connect to Cytoscape or Cytoscape returns an error
Examples:
>>> export_filters() # Saves all filters in file 'filters.json'
[]
>>> export_filters('test.json') # Saves all filters in file 'test.json'
[]
>>> export_filters('test') # Saves all filters in file 'test.json'
[]
>>> export_filters('test', overwrite_file=False) # Save filters only if test.json doesn't already exist
[]
"""
ext = '.json'
if re.search(ext + '$', filename) is None: filename += ext
file_info = sandbox.sandbox_get_file_info(filename, base_url=base_url)
if len(file_info['modifiedTime']) and file_info['isFile']:
if overwrite_file:
narrate('This file has been overwritten.')
else:
raise CyError(f'File "{filename}" already exists ... filters not saved.')
full_filename = file_info['filePath']
res = commands.commands_get(f'filter export file="{full_filename}"', base_url=base_url)
return res
@cy_log
def import_filters(filename, base_url=DEFAULT_BASE_URL):
"""Loads filters from a file in JSON format.
Adds filters to whatever filters already exist, and renames filters where names already exist. Also executes
each filter.
Note:
To load a filter file from cloud storage, use the file's URL and the ``sandbox_url_to`` function to download
the file to a sandbox, and then use ``import_filters`` to load it from there.
Args:
filename (str): Path and name of the filters file to load.
base_url (str): Ignore unless you need to specify a custom domain,
port or version to connect to the CyREST API. Default is http://localhost:1234
and the latest version of the CyREST API supported by this version of py4cytoscape.
Returns:
list: []
Raises:
requests.exceptions.RequestException: if can't connect to Cytoscape or Cytoscape returns an error
Examples:
>>> import_filters('test.json') # Fetches filters in file 'test.json'
[]
>>> import_filters('test') # Fetches filters in file 'test'
[]
"""
res = commands.commands_get(f'filter import file="{get_abs_sandbox_path(filename)}"', base_url=base_url)
time.sleep(
CATCHUP_FILTER_SECS) # give the filters time to finish executing ... this race condition is a Cytoscape bug
return res
def _create_filter_and_finish(cmd, cmd_body, hide, apply, network, base_url):
AUTO_APPLY_THRESHOLD = 100000
if check_supported_versions(cytoscape='3.9') is None:
cmd_body['apply'] = apply
res = commands.cyrest_post(cmd, body=cmd_body, base_url=base_url)
else:
# Before Cytoscape 3.9, the filter was automatically applied when it was created unless
# the total of nodes and edges was 100,000 or more. So, we create the filter and then
# consider applying it if it wasn't automatically applied already.
res = commands.cyrest_post(cmd, body=cmd_body, base_url=base_url)
if networks.get_node_count(network=network, base_url=base_url) \
+ networks.get_edge_count(network=network, base_url=base_url) > AUTO_APPLY_THRESHOLD:
if apply:
show_error('Warning -- Cytoscape version pre-3.9 in use ... explicitly applying filter')
res = commands.commands_post(
f'filter apply container="filter" name="{cmd_body["name"]}" network="{network}"',
base_url=base_url)
elif not apply:
raise CyError('Attempt to create but not apply filter in Cytoscape version pre-3.9 is not supported')
return _check_selected(hide, network, base_url)
def _check_selected(hide, network, base_url):
if check_supported_versions(cytoscape='3.9'):
# This delay became unnecessary in Cytoscape 3.9
show_error('Warning -- Cytoscape version pre-3.9 in use ... settling delay inserted after filter execution')
time.sleep(CATCHUP_FILTER_SECS) # Yikes! Have to wait a second for selection to settle!
sel_nodes = network_selection.get_selected_nodes(network=network, base_url=base_url)
sel_edges = network_selection.get_selected_edges(network=network, base_url=base_url)
if hide:
res = style_bypasses.unhide_all(network=network, base_url=base_url)
# TODO: Ignore return result res??
if sel_nodes is not None and len(sel_nodes) != 0:
res= style_bypasses.hide_nodes(network_selection.invert_node_selection(network=network, base_url=base_url)['nodes'])
if sel_edges is not None and len(sel_edges) != 0:
res = style_bypasses.hide_edges(network_selection.invert_edge_selection(network=network, base_url=base_url)['edges'])
return {'nodes': sel_nodes, 'edges': sel_edges}
# TODO: Need to add Topological filter, too.
# TODO: Need to add rename/remove filter
# TODO: Need to add filter chaining
# TODO: Need to fetch existing filter???
| StarcoderdataPython |
1791394 | <reponame>dcopm999/pharmcrm2-goods
# Generated by Django 3.2.7 on 2021-10-08 08:17
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('goods', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='catalog',
name='slug',
field=models.SlugField(blank=True, editable=False, max_length=250, verbose_name='slug'),
),
migrations.AddField(
model_name='dosagepacking',
name='slug',
field=models.SlugField(blank=True, editable=False, max_length=250, verbose_name='slug'),
),
migrations.AlterField(
model_name='dosagepacking',
name='quantity',
field=models.DecimalField(decimal_places=2, max_digits=10, verbose_name='Quantity'),
),
migrations.AlterField(
model_name='originalpacking',
name='quantity',
field=models.DecimalField(decimal_places=2, max_digits=10, verbose_name='Quantity'),
),
]
| StarcoderdataPython |
3265596 | <gh_stars>0
import sys
sys.path.append('../modules/')
import matplotlib.pyplot as plt
import numpy as np
import sounddevice as sd
import time
import fsk
import microphone
from sinais import specgram, bandpass
fc = 44100
Bd = 1200
carrier = 1200
N = 500
bandwidth = carrier/2 + carrier*0.1
print('### BAUD {} CARRIER {}Hz ###'.format(str(Bd), str(carrier)))
msg = 'Hello world'
bmsg = fsk.encode_ascii(msg)
X = fsk.binary_signal(bmsg, fc, Bd)
print('Bitstream da mensagem original \n{}\n'.format(bmsg))
# byte = ''
# bytearray = []
#
# for k, bit in enumerate(bmsg, start=1):
# if k % 8 == 0:
# byte += bit
# bytearray.append(byte)
# byte = ''
# else:
# byte += bit
#
# for byte in bytearray:
# s = fsk.generate_tones(byte, fc, Bd, carrier)
# tone = s * (2**15 - 1) / np.max(np.abs(s))
# tone = tone.astype(np.int16)
# sd.play(tone, fc)
# status = sd.wait()
# C, encoded_msg = fsk.demodulate(s, fc, Bd, carrier, 20, bandwidth, N)
# print(fsk.decode_ascii(encoded_msg), end='', flush=True)
# s = fsk.generate_tones(bmsg, fc, Bd, carrier)
# white_noise = np.random.normal(0, 0.5, size=len(s))*0
# s = s + white_noise
mic = microphone.Microphone()
s = np.array(mic.get_mic_data())
C, encoded_msg = fsk.demodulate(s, fc, Bd, carrier, 500, bandwidth, N)
# self.MESSAGE = fsk.decode_ascii(encoded_msg)
# print(self.MESSAGE, flush=True, end='')
# C, encoded_msg = fsk.demodulate(s, fc, Bd, carrier, 5, bandwidth, N)
string = ''.join([chr(int(encoded_msg[i:i+8],2)) for i in range(0,len(encoded_msg),8)])
# print('Mensagem original: {}\n'.format(msg))
print('Mensagem decodificada: {}\n'.format(string))
print('Tamanho do sinal transmitido: {}Mb'.format(str(s.nbytes/1e6)))
plt.plot(C)
plt.show()
| StarcoderdataPython |
44738 | <reponame>Sajaki/intellij-community<gh_stars>1-10
from django import apps
import requests | StarcoderdataPython |
8193048 | <gh_stars>0
#########################################
# File name: Tetris.py #
# Author: <NAME> #
# Course: ICS3U #
# Instructor: <NAME> #
# --------------------------------------#
# Last Modified: 11/12/2017 @ 21:02 #
#########################################
import sys
from random import randint, choice
from Classes import *
pygame.init()
HEIGHT = 600
WIDTH = 575
GRIDSIZE = HEIGHT // 24
screen = pygame.display.set_mode((WIDTH, HEIGHT))
pygame.display.set_caption("Tetris - David Gurevich and microcode plus funny friends changed.")
LVL_1, LVL_2, LVL_3, LVL_4, LVL_5, LVL_6, LVL_7, LVL_8, LVL_9 = 30, 18, 16, 7, 5, 4, 3, 2, 1
LEVELS = [LVL_1, LVL_2, LVL_3, LVL_4, LVL_5, LVL_6, LVL_7, LVL_8, LVL_9, LVL_9]
SCORE = 0
HighSchore = 1000
# ---------------------------------------#
COLUMNS = 14
ROWS = 24
LEFT = 0
RIGHT = LEFT + COLUMNS
MIDDLE = LEFT + COLUMNS // 2
TOP = 1
FLOOR = TOP + ROWS
# -------------IMAGES and MUSIC--------------------#
pygame.mixer.set_num_channels(6)
# Channel 0: Background Music
# Channel 1: Block Rotation
# Channel 2: Force Hit
# Channel 3: Line Remove
# Channel 4: Slow Hit
# Channel 5: Tetris Remove
# ---- BACKGROUND IMAGES ---- #
tetris_img = pygame.image.load('images/Tetris.jpg')
grid_img = pygame.image.load('images/gridBG.jpg')
intro_screen = pygame.image.load('images/Intro.jpg')
outro_screen = pygame.image.load('images/Outro.jpg')
# --------------------------- #
# ---- SOUND EFFECTS ---- #
block_rotate = pygame.mixer.Sound('Sounds/block-rotate.ogg')
force_hit = pygame.mixer.Sound('Sounds/force-hit.ogg')
line_remove = pygame.mixer.Sound('Sounds/line-remove.ogg')
slow_hit = pygame.mixer.Sound('Sounds/slow-hit.ogg')
tetris_remove = pygame.mixer.Sound('Sounds/tetris-remove.ogg')
# ----------------------- #
# ---- BACKGROUND MUSIC ---- #
kalinka = pygame.mixer.Sound('Music/kalinka.ogg')
katyusha = pygame.mixer.Sound('Music/katyusha.ogg')
korobushka = pygame.mixer.Sound('Music/korobushka.ogg')
smuglianka = pygame.mixer.Sound('Music/smuglianka.ogg')
# -------------------------- #
# ---- BLOCK PREVIEWS ---- #
cube_block = pygame.image.load('Previews/cube-block.png').convert_alpha()
i_block = pygame.image.load('Previews/i-block.png').convert_alpha()
j_block = pygame.image.load('Previews/j-block.png').convert_alpha()
L_block = pygame.image.load('Previews/L-block.png').convert_alpha()
r_s_block = pygame.image.load('Previews/r-s-block.png').convert_alpha()
s_block = pygame.image.load('Previews/s-block.png').convert_alpha()
t_block = pygame.image.load('Previews/t-block.png').convert_alpha()
block_img_lst = [r_s_block, s_block, L_block, j_block, i_block, t_block, cube_block] # MUST MATCH LIST IN CLASSES.PY
# ------------------------ #
# ---- FAVICON ---- #
favicon = pygame.image.load('images/favicon.png').convert_alpha()
pygame.display.set_icon(favicon)
# ----------------- #
# ---- FONTS ---- #
pygame.font.init()
my_font = pygame.font.SysFont('Arial Black', 21)
# --------------- #
# ------------- FUNCTIONS -------------------- #
def draw_grid():
""" Draw horisontal and vertical lines on the entire game window.
Space between the lines is GRIDSIZE.
"""
for i in range(15):
pygame.draw.line(screen, BLACK, (i * GRIDSIZE, 0), (i * GRIDSIZE, HEIGHT), 1)
for i in range(24):
pygame.draw.line(screen, BLACK, (0, i * GRIDSIZE), (GRIDSIZE * 24, i * GRIDSIZE), 1)
def redraw_screen():
score_text = my_font.render(str(SCORE), True, WHITE)
timer_text = my_font.render(str(round(pygame.time.get_ticks() / 1000, 2)), True, WHITE)
level_text = my_font.render(str(level + 1), True, WHITE)
screen.blit(grid_img, (0, 0))
draw_grid()
screen.blit(tetris_img, (GRIDSIZE * 14, 0))
shape.draw(screen, GRIDSIZE)
shadow.draw(screen, GRIDSIZE, True)
obstacles.draw(screen, GRIDSIZE)
# BLIT FONTS
screen.blit(score_text, ((GRIDSIZE * 14) + 90, 460))
screen.blit(timer_text, ((GRIDSIZE * 14) + 85, 538))
screen.blit(level_text, ((GRIDSIZE * 14) + 100, 380))
# BLIT NEXT SHAPE
screen.blit(block_img_lst[nextShapeNo - 1], ((GRIDSIZE * 14) + 72, 240))
pygame.display.flip()
def drop(my_shape):
flow = False
while not flow:
my_shape.move_down()
if my_shape.collides(floor) or my_shape.collides(obstacles):
my_shape.move_up()
flow = True
if not my_shape.shadow:
pygame.mixer.Channel(2).play(force_hit)
# -------------------------------------------- #
# ------------- MAIN PROGRAM -------------------- #
# infinity loop
while True:
counter = 0
shapeNo = randint(1, 7)
nextShapeNo = randint(1, 7)
shape = Shape(MIDDLE, TOP, shapeNo)
floor = Floor(LEFT, ROWS, COLUMNS)
leftWall = Wall(LEFT - 1, 0, ROWS)
rightWall = Wall(RIGHT, 0, ROWS)
# 再ゲームのための初期化
obstacles = Obstacles(LEFT, FLOOR)
inPlay = False
hasPlayed = False
level = 0
SCORE = 0
PREV_TETRIS = False
# レベルのオーバーライド
# SCORE = 1800
previous_key = 0
# ブロック接地猶予時間[ms]
delayTime = 700
# ブロック接地フラグ
fitflag = False
# ブロック接地までの猶予時間管理変数。 ブロックが一段下がるとブロック接地フラグと共にリセットされる
downtime = 0
# 曲選択(ランダム)
bg_music = choice([kalinka, katyusha, korobushka, smuglianka])
pygame.mixer.Channel(0).play(bg_music, -1)
start_timer = 0
# ---- INTRO SCREEN ---- #
while not inPlay and not hasPlayed:
screen.blit(intro_screen, (0, 0))
pygame.display.flip()
screen.blit(intro_screen, (0, 0))
pygame.display.flip()
for event in pygame.event.get():
if event.type == pygame.QUIT:
pygame.quit()
sys.exit(0)
if event.type == pygame.KEYDOWN:
# if event.key == pygame.K_SPACE:
if (event.key == pygame.K_SPACE) or (event.key == pygame.K_9):
inPlay = True
hasPlayed = True
# ---------------------- #
while inPlay:
shadow = Shape(shape.col, shape.row, shape.clr, shape._rot, True)
drop(shadow)
if counter % LEVELS[level] == 0:
# 最下段もしくは、ブロックに接地していた場合、カウントダウンを開始し1.5秒経過で着地とみなす
# もしその間に移動が発生しフロアがなくなった場合はFallとする
# ブロックの論理位置を一段落とす
shape.move_down()
# ブロックが接地(collides(floor))または、ブロックの上にある(shape.collides(obstacles))かをチェックする
if shape.collides(floor) or shape.collides(obstacles):
# 接地後時間経過判定
if fitflag == False:
# 接地フラグ判定。 接地していなければ、現在時刻+1.5秒をアラーム時刻に設定
fitflag = True
downtime = pygame.time.get_ticks() + delayTime
# 接地していないので下げたブロックの論理座標を元に戻す
shape.move_up()
# 現在時刻が接地後猶予時間を超えていたらブロック接地とみなす
elif ( pygame.time.get_ticks() >= downtime ):
# ブロック接地後判定処理の開始。 ブロックの論理位置を一段上げる(下げたブロックを元に戻す)
shape.move_up()
obstacles.append(shape)
pygame.mixer.Channel(5).play(slow_hit)
fullRows = obstacles.findFullRows(TOP, FLOOR, COLUMNS)
# --------- CHECK --------- #
if 4 > len(fullRows) > 0:
SCORE += 100 * len(fullRows)
pygame.mixer.Channel(3).play(line_remove)
elif len(fullRows) >= 4:
SCORE += 800 + (100 * (len(fullRows) - 4))
pygame.mixer.Channel(4).play(tetris_remove)
PREV_TETRIS = True
elif len(fullRows) >= 4 and PREV_TETRIS:
SCORE += 1200 + (100 * (len(fullRows) - 4))
PREV_TETRIS = True
pygame.mixer.Channel(4).play(tetris_remove)
else:
SCORE += 30 + ( level * 5 )
# ------------------------ #
# fullRowsが0以外(かつ4以下)ならライン消去発生。 ウェイト0.5secを挿入
if ( len(fullRows) != 0 ):
pygame.time.delay( 500 ) # 500ms = 0.5sec
obstacles.removeFullRows(fullRows)
shapeNo = nextShapeNo
nextShapeNo = randint(1, 7)
if not shape.row <= 1:
shape = Shape(MIDDLE, TOP, shapeNo)
else:
# ゲームオーバー
pygame.time.delay(2500)
inPlay = False
else:
# 接地後時間経過判定がFalseだったので、ブロックの論理位置を元に戻す
shape.move_up()
else:
# ブロックは接地していなかった。 ブロック接地フラグをクリア
fitflag = False
for event in pygame.event.get():
if event.type == pygame.QUIT:
inPlay = False
if event.type == pygame.KEYDOWN:
if event.key == pygame.K_8:
# ボタン押下時のダイアル誤動作抑止のために、2カウントで移動する判定
# if ( previous_key == pygame.K_8 ):
if True:
previous_key = 0
shape.rotateClkwise()
shape._rotate()
if shape.collides(leftWall) or shape.collides(rightWall) or shape.collides(floor) or shape.collides(
obstacles):
shape.rotateCntclkwise()
shape._rotate()
else:
pygame.mixer.Channel(1).play(block_rotate)
else:
previous_key = pygame.K_8
if event.key == pygame.K_7:
# ボタン押下時のダイアル誤動作抑止のために、2カウントで移動する判定
# if ( previous_key == pygame.K_7 ):
if ( True ):
previous_key = 0
shape.rotateCntclkwise()
shape._rotate()
if shape.collides(leftWall) or shape.collides(rightWall) or shape.collides(floor) or shape.collides(
obstacles):
shape.rotateClkwise()
shape._rotate()
else:
pygame.mixer.Channel(1).play(block_rotate)
else:
previous_key = pygame.K_7
if event.key == pygame.K_LEFT:
shape.move_left()
if shape.collides(leftWall):
shape.move_right()
elif shape.collides(obstacles):
shape.move_right()
if event.key == pygame.K_RIGHT:
shape.move_right()
if shape.collides(rightWall):
shape.move_left()
elif shape.collides(obstacles):
shape.move_left()
# if event.key == pygame.K_DOWN:
# if event.key == pygame.K_SPACE:
if event.key == pygame.K_9:
shape.move_down()
if shape.collides(floor) or shape.collides(obstacles):
# 接地後時間経過判定
if fitflag == False:
# 接地フラグ判定。 接地していなければ、現在時刻+1.5秒をアラーム時刻に設定
fitflag = True
downtime = pygame.time.get_ticks() + delayTime
# 接地していないので下げたブロックの論理座標を元に戻す
shape.move_up()
elif ( pygame.time.get_ticks() >= downtime ):
shape.move_up()
obstacles.append(shape)
fullRows = obstacles.findFullRows(TOP, FLOOR, COLUMNS)
# --------- CHECK --------- #
if 4 > len(fullRows) > 0:
SCORE += 100 * len(fullRows)
pygame.mixer.Channel(3).play(line_remove)
elif len(fullRows) >= 4:
SCORE += 800 + (100 * (len(fullRows) - 4))
pygame.mixer.Channel(4).play(tetris_remove)
PREV_TETRIS = True
elif len(fullRows) >= 4 and PREV_TETRIS:
SCORE += 1200 + (100 * (len(fullRows) - 4))
PREV_TETRIS = True
pygame.mixer.Channel(4).play(tetris_remove)
else:
SCORE += 80
# ------------------------- #
obstacles.removeFullRows(fullRows)
shapeNo = nextShapeNo
nextShapeNo = randint(1, 7)
shape = Shape(MIDDLE, TOP, shapeNo)
# shape = Shape(MIDDLE, TOP, shapeNo)
else:
# 接地後時間経過判定がFalseだったので、ブロックの論理位置を元に戻す
shape.move_up()
else:
# ブロック非接地なので接地フラグをリセット
fitflag = False
SCORE += (level+1) * 2
# if event.key == pygame.K_SPACE:
if event.key == pygame.K_DOWN:
drop(shape)
obstacles.append(shape)
shapeNo = nextShapeNo
nextShapeNo = randint(1, 7)
shape = Shape(MIDDLE, TOP, shapeNo)
fullRows = obstacles.findFullRows(TOP, FLOOR, COLUMNS)
# --------- CHECK --------- #
if 4 > len(fullRows) > 0:
SCORE += 100 * len(fullRows)
pygame.mixer.Channel(3).play(line_remove)
elif len(fullRows) >= 4:
SCORE += 800 + (100 * (len(fullRows) - 4))
pygame.mixer.Channel(4).play(tetris_remove)
PREV_TETRIS = True
elif len(fullRows) >= 4 and PREV_TETRIS:
SCORE += 1200 + (100 * (len(fullRows) - 4))
PREV_TETRIS = True
pygame.mixer.Channel(4).play(tetris_remove)
# ------------------------- #
obstacles.removeFullRows(fullRows)
if 1000 >= SCORE >= 500:
level = 1
elif 2000 >= SCORE > 1000:
level = 2
elif 3000 >= SCORE > 2000:
level = 3
elif 4500 >= SCORE > 3000:
level = 4
elif 6000 >= SCORE > 4500:
level = 5
elif 10000 >= SCORE > 15000:
level = 6
elif 22500 >= SCORE > 15000:
level = 7
elif 35000 >= SCORE > 50000:
level = 8
elif SCORE >= 50000:
level = 9
PREV_TETRIS = False
counter += 1
redraw_screen()
while not inPlay and hasPlayed:
if start_timer == 0:
start_timer = pygame.time.get_ticks() + 8000
screen.blit(outro_screen, (0, 0))
pygame.display.flip()
for event in pygame.event.get():
if event.type == pygame.QUIT:
pygame.quit()
sys.exit(0)
if event.type == pygame.KEYDOWN:
# if event.key == pygame.K_SPACE:
if (event.key == pygame.K_SPACE) or (event.key == pygame.K_o):
# キーを押されると、メインプログラムの最初から実行
hasPlayed = False
pygame.time.delay(500)
pygame.event.clear()
break
# pygame.quit()
# sys.exit(0)
# 8秒経過でタイトル画面へ
if pygame.time.get_ticks() >= start_timer:
break
# pygame.quit()
# sys.exit(0)
# ----------------------------------------------- #
pygame.quit()
sys.exit("Exited Final")
| StarcoderdataPython |
237729 | import sys
import argparse
import statistics as stat
from config import *
import os
delay = 30
parser = argparse.ArgumentParser('Analysis Plots')
parser.add_argument('--topo',
type=str,
required=True,
help='what topology to generate summary for')
parser.add_argument('--payment-graph-type',
type=str,
help='what graph type topology to generate summary for', default="circ")
parser.add_argument('--credit-list',
nargs="+",
required=True,
help='Credits to collect stats for')
parser.add_argument('--demand',
type=int,
help='Single number denoting the demand to collect data for', default="30")
parser.add_argument('--path-type-list',
nargs="*",
help='types of paths to collect data for', default=["shortest"])
parser.add_argument('--scheduling-alg-list',
nargs="*",
help='scheduling algorithms to collect info for', default=[None])
parser.add_argument('--queue-threshold-list',
nargs="*",
help='queue thresholds to collect info for', default=[None])
parser.add_argument('--dag-percent-list',
nargs="*",
help='dag percents to collect info for', default=[None])
parser.add_argument('--path-num-list',
nargs="*",
help='number of paths to collect data for', default=[4])
parser.add_argument('--scheme-list',
nargs="*",
help='set of schemes to aggregate results for', default=["priceSchemeWindow"])
parser.add_argument('--save',
type=str,
required=True,
help='file name to save data in')
parser.add_argument('--num-max',
type=int,
help='Single number denoting the maximum number of runs to aggregate data over', default="5")
# collect all arguments
args = parser.parse_args()
topo = args.topo
credit_list = args.credit_list
demand = args.demand
path_type_list = args.path_type_list
scheme_list = args.scheme_list
path_num_list = args.path_num_list
queue_threshold_list = args.queue_threshold_list
dag_percent_list = args.dag_percent_list
scheduling_algorithms = args.scheduling_alg_list
output_file = open(GGPLOT_DATA_DIR + args.save, "w+")
if args.payment_graph_type == "circ":
output_file.write("Scheme,Credit,")
else:
output_file.write("Scheme,Credit,DAGAmt,")
output_file.write("Topo,CreditType,NumPaths,PathType,SchedulingAlg," + \
"Threshold,SuccRatio,SuccRatioMin,SuccRatioMax,SuccVolume," + \
"SuccVolumeMin," +\
"SuccVolumeMax,CompTime,CompTimeMin,CompTimeMax\n")
# determine topology and credit type
if "sw" in args.topo or "sf" in args.topo:
topo_type = args.save[:2]
else:
topo_type = args.save[:3]
if "lnd_uniform" in args.topo:
credit_type = "uniform"
elif "lnd_july15" in args.topo or "lndCap" in args.topo:
credit_type = "lnd"
else:
credit_type = "uniform"
# go through all relevant files and aggregate info
for credit in credit_list:
for scheme in scheme_list:
for path_type in path_type_list:
if path_type == "widest" and scheme not in ["waterfilling", "DCTCPQ"]:
continue
if path_type == "shortest" and len(scheme_list) > 1 and scheme in ["waterfilling", "DCTCPQ"] and \
credit_type == "lnd":
continue
for queue_threshold in queue_threshold_list:
for num_paths in path_num_list:
for percent in dag_percent_list:
for alg in scheduling_algorithms:
succ_ratios, succ_vols,comp_times = [], [], []
for run_num in range(0, args.num_max + 1):
if args.payment_graph_type == "circ" or percent == '0':
file_name = topo + str(credit) + "_circ" + str(run_num)
else:
file_name = topo + "_dag" + str(percent) + "_" + str(credit) + "_num" + \
str(run_num)
file_name += "_delay" + str(delay) + "_demand" + str(demand) + "_" + scheme + \
"_" + path_type
if scheme != "shortestPath":
file_name += "_" + str(num_paths)
if alg is not None:
file_name += "_" + alg
elif scheme == "celer":
file_name += "_FIFO"
else:
file_name += "_LIFO"
if queue_threshold is not None and percent != '0' and scheme == "DCTCPQ":
file_name += "_qd" + str(queue_threshold)
file_name += "_summary.txt"
try:
with open(SUMMARY_DIR + file_name) as f:
for line in f:
if line.startswith("Success ratio"):
succ_ratio = float(line.split(" ")[4])
elif line.startswith("Success volume"):
succ_volume = float(line.split(" ")[5])
elif line.startswith("Avg completion time"):
comp_time = float(line.split(" ")[3][:-1])
succ_ratios.append(succ_ratio * 100)
succ_vols.append(succ_volume * 100)
comp_times.append(comp_time)
except IOError:
print("error with " , file_name)
continue
if "lndtopo" in args.save and "lnd_credit" in args.save:
capacity = int(credit) * 650
elif "lndnewtopo" in args.save and "lnd_credit" in args.save:
capacity = int(credit) * 422
else:
capacity = int(credit)
if len(succ_ratios) > 0:
if args.payment_graph_type == "circ":
output_file.write(SCHEME_CODE[scheme] + "," + str(capacity) + ",")
else:
output_file.write(SCHEME_CODE[scheme] + "," + str(capacity) + "," + \
str(PERCENT_MAPPING[percent]) + ",")
output_file.write(topo_type + "," + credit_type + "," \
+ str(num_paths) + "," \
+ str(path_type) + "," \
+ str(alg) + "," \
+ str(queue_threshold) + "," \
+ ("%f,%f,%f,%f,%f,%f,%f,%f,%f\n" % (stat.mean(succ_ratios), min(succ_ratios), \
max(succ_ratios), stat.mean(succ_vols), min(succ_vols), max(succ_vols), \
stat.mean(comp_times), min(comp_times), max(comp_times))))
if args.payment_graph_type == 'dag':
for percent in dag_percent_list:
if "lndtopo" in args.save and "lnd_credit" in args.save:
capacity = int(credit) * 650
else:
capacity = int(credit)
output_file.write("Circ," + str(capacity) + "," + \
str(PERCENT_MAPPING[percent]) + ",")
ideal = 100 - PERCENT_MAPPING[percent]
output_file.write(topo_type + "," + credit_type + ",4,ideal,0," \
+ ("%f,%f,%f,%f,%f,%f,0,0,0\n" % (ideal, ideal, ideal, ideal, ideal, ideal)))
output_file.close()
| StarcoderdataPython |
4804062 | <filename>stu_grade_prediction.py
# -*- coding: utf-8 -*-
"""
@author: Emmanuel
"""
import pandas as pd
import numpy as np
import seaborn as sns
import matplotlib.pyplot as plt
from sklearn.model_selection import train_test_split
import warnings
import time
from sklearn.linear_model import ElasticNet, Lasso, LinearRegression, OrthogonalMatchingPursuitCV, Ridge
import joblib
from sklearn.metrics import mean_squared_error, r2_score
warnings.filterwarnings('ignore')
# function to load and run saved models
# To easily run saved models
# function to run our regression models
def run_reg_models(regressor_names, regressors, X_train, X_test, y_train, y_test):
counter = 0
for name, clf in zip(regressor_names, regressors):
result = clf.fit(X_train, y_train)
y_pred = clf.predict(X_test)
model_performance = pd.DataFrame(data=[r2_score(y_test, y_pred), np.sqrt(mean_squared_error(y_test, y_pred))],
index=["R2", "RMSE"])
print(name + ' performance: ')
print(model_performance)
df_raw = pd.read_csv(
r'C:/Users/Emmanuel/Documents/Projects/Python/Student Grade Prediction/Dataset/student.csv')
# check for null values
print(df_raw.isnull().sum())
# There are only 1 empty values per each column
# so the best way to deal with the empty values
# is to simply drop all the null columns
df_raw.dropna(inplace=True)
# our target column is the G3 column which
# before we handle our categorical data
# we need find elements most correlated with G3
sns.heatmap(df_raw.corr(), annot=True)
plt.show()
# now we handle the categorical data
# from our heat map we can see that the most correlation with G3 is with:
# G1, G2, failures and Medu
def handle_cat_data(cat_feats, data):
for f in cat_feats:
to_add = pd.get_dummies(data[f], prefix=f, drop_first=True)
merged_list = data.join(
to_add, how='left', lsuffix='_left', rsuffix='_right')
data = merged_list
# then drop the categorical features
data.drop(cat_feats, axis=1, inplace=True)
return data
#----------- End of Handle cat data function ------------#
student_df = df_raw[['G1', 'G2', 'failures', 'Medu']]
cat_features = ['Medu']
# handle_cat_data(student_df, cat_features)
# divide dataset into training and testing dataset
X_train, X_test, y_train, y_test = train_test_split(
student_df, df_raw['G3'], test_size=0.2, random_state=0)
regressor_names = ['Linear Regression', 'Ridge Regression', 'Lasso Regression',
'Elastic Net Regression', 'Orthongonal Matching Pursuit CV']
regressors = [
LinearRegression(normalize=True),
Ridge(alpha=0, normalize=True),
Lasso(alpha=0.01, normalize=True),
ElasticNet(random_state=0),
OrthogonalMatchingPursuitCV(cv=8, normalize=True)
]
run_reg_models(regressor_names, regressors, X_train, X_test, y_train, y_test)
# Predict the score of one student from our dataset
# print(student_df.head(1))
# print(df_raw.head(1))
sel_reg = regressors[4]
predicted_val = sel_reg.predict(student_df.head(1))
print('Predicted Final Grade:' + str(round(predicted_val[0])))
print('Actual Final Grade: ' + str(df_raw.head(1)['G3'].values[0]))
| StarcoderdataPython |
9758189 | <gh_stars>0
#!/usr/bin/python3
# -*- coding: utf-8 -*-
from Array import Array
def generalSum(lhs, rhs):
return lhs + rhs
#
if __name__ == "__main__":
with open("../rsc/test_val", "r") as fic:
line = fic.readline()
arr = Array(line.split(" "))
processedSum = arr.map(int).reduce(generalSum)
joined = arr.join("+")
print("%s = %s" % (joined, processedSum))
#
'''
arr.map(int).filter(isNotZero).reduce(makeSum)
VS
reduce(makeSum, filter(isNotZero, map(int, arr)))
or
reduce(
makeSum,
filter(
isNotZero,
map(int, arr)
)
)
'''
# | StarcoderdataPython |
8144896 | <reponame>eyalev/gcloud
# Copyright 2013 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""The Delete command."""
from googlecloudsdk.api_lib.app import appengine_api_client
from googlecloudsdk.calliope import base
from googlecloudsdk.calliope import exceptions
from googlecloudsdk.command_lib.app import flags
from googlecloudsdk.core import log
from googlecloudsdk.core.console import console_io
class Delete(base.Command):
"""Delete a specific version of the given modules.
This command is deprecated. Please use
`gcloud preview app versions delete` instead.
This command deletes the specified version of the given modules from the
App Engine server.
"""
detailed_help = {
'DESCRIPTION': '{description}',
'EXAMPLES': """\
To delete a version from a single module, run:
$ {command} default --version=1
To delete a single version from multiple modules, run:
$ {command} module1 module2 --version=1
""",
}
@staticmethod
def Args(parser):
flags.SERVER_FLAG.AddToParser(parser)
flags.VERSION_FLAG.AddToParser(parser)
flags.MODULES_ARG.AddToParser(parser)
def Run(self, args):
log.warn('This command is deprecated. '
'Please use `gcloud preview app versions delete` instead.')
api_client = appengine_api_client.GetApiClient()
message = 'You are about to delete the following module versions:\n\t'
message += '\n\t'.join(
['{0}/{1}/{2}'.format(api_client.project, m, args.version)
for m in args.modules])
console_io.PromptContinue(message=message, cancel_on_no=True)
# Will delete each specified version.
# In event of a failure, will still attempt to delete the remaining modules.
# Prints out a warning or error as appropriate for each module deletion.
delete_results = []
for module in args.modules:
delete_results.append(api_client.DeleteVersion(module, args.version))
if not all(delete_results):
raise exceptions.ToolException('Not all deletions succeeded.')
| StarcoderdataPython |
1952419 | import sys
import argparse
import pandas
import fasttext
import pymorphy2
from sentencepiece import SentencePieceProcessor as sp_processor
from razdel import tokenize
from razdel.substring import Substring
from grammar_ngram_lm import load_grams, make_ngram_correction, make_hypotheses_neni
grams = load_grams()
tokens_fixes = {
"бес": "без",
"вши": "ваши",
"веером": "вечером",
"длинны": "длины",
"длинна": "длина",
}
substrings_fixes = {
"белее или менее": "более или менее",
"белее чем скромные": "более чем скромные",
"без везти": "без вести",
"в пошлом веке": "в прошлом веке",
# "в течении года": "в течение года",
"несколько не изменился": "нисколько не изменился",
"не кто не может": "никто не может",
"ни кому": "никому",
"одно и тоже": "одно и то же",
"как то так же": "как-то так же",
"бес толку": "бестолку"
}
_ALLOWED_POS_TAGS_FOR_TAKI = {'ADVB', 'VERB', 'INFN', 'PRCL'}
_ALLOWED_POS_TAGS_FOR_TO = {'ADVB', 'NPRO'}
_HARD_PRONOUNS = [
('ввиду', 'в виду'),
('вместо', 'в место'),
('вследствие', 'в следствии'),
('вследствие', 'в следствие'),
('навстречу', 'на встречу'),
('наподобие', 'на подобие'),
('наподобие', 'на подобии'),
('насчёт', 'на счёт'),
('насчет', 'на счет'),
("вслед", "в след"),
("в виде", "ввиде"),
("в течение", "в течении"),
("в продолжение", "в продолжении"),
("в заключение", "в заключении"),
("в завершение", "в завершение"),
("в отличие от", "в отличии от"),
("в сравнении с", "в сравнение с"),
("в связи с", "в связе с"),
("по окончании", "по окончание"),
("по прибытии", "по прибытие")
]
_MORPH = pymorphy2.MorphAnalyzer()
def _fix_dictionary(original_sentences):
for i, sentence in enumerate(original_sentences):
for key, value in substrings_fixes.items():
if key in sentence or key.capitalize() in sentence:
original_sentences[i] = sentence.replace(key, value)
original_sentences[i] = original_sentences[i].replace(key.capitalize(), value.capitalize())
tokenized_sentences = [(sentence, list(tokenize(sentence))) for sentence in original_sentences]
fixed_sentences = []
for sentence, tokens in tokenized_sentences:
fixed_sentence = ""
offset = 0
for i, token in enumerate(tokens):
tokens[i].start += offset
tokens[i].stop += offset
token_text = token.text
fixed_token_text = tokens_fixes.get(token_text, None)
if fixed_token_text is not None:
tokens[i].text = fixed_token_text
offset += len(fixed_token_text) - len(token_text)
fixed_sentence = sentence
for token in tokens:
fixed_sentence = fixed_sentence[:token.start] + token.text + fixed_sentence[token.stop:]
fixed_sentences.append(fixed_sentence)
return fixed_sentences
def _fix_tsya(fixed_sentences,
tsya_border=0.55,
tsya_model_path="models/tsya_predictor.bin",
bpe_model_path="models/grammar_bpe.model"):
tsya_predictor = fasttext.load_model(tsya_model_path)
bpe_model = sp_processor()
bpe_model.load(bpe_model_path)
for i, sentence in enumerate(fixed_sentences):
tsya_count = sentence.count("тся")
tsjya_count = sentence.count("ться")
if tsya_count + tsjya_count != 1:
continue
processed_sentence = " ".join(bpe_model.EncodeAsPieces(sentence.lower()))
tsya_predictions = tsya_predictor.predict(processed_sentence)
tsya_proba = float(tsya_predictions[1][0])
tsya_label = int(tsya_predictions[0][0][-1])
if tsya_label == 0 and tsya_proba > tsya_border and tsya_count >= 1 and tsjya_count == 0:
fixed_sentences[i] = sentence.replace("тся", "ться")
elif tsya_label == 0 and tsya_proba > tsya_border and tsjya_count >= 1 and tsya_count == 0:
fixed_sentences[i] = sentence.replace("ться", "тся")
def _fix_nn(fixed_sentences,
nn_border=0.6,
nn_model_path="models/nn_predictor.bin",
bpe_model_path="models/opencorpora_bpe.model"):
nn_predictor = fasttext.load_model(nn_model_path)
bpe_model = sp_processor()
bpe_model.load(bpe_model_path)
for i, sentence in enumerate(fixed_sentences):
nn_count = sentence.count("нн")
if nn_count != 1:
continue
processed_sentence = " ".join(bpe_model.EncodeAsPieces(sentence.lower()))
nn_predictions = nn_predictor.predict(processed_sentence)
nn_proba = float(nn_predictions[1][0])
nn_label = int(nn_predictions[0][0][-1])
if nn_label == 0 and nn_proba > nn_border and nn_count == 1:
fixed_sentences[i] = sentence.replace("нн", "н")
def _fix_merge_to(fixed_sentences,
border=0.6,
model_path="models/to_merge_predictor.bin",
bpe_model_path="models/opencorpora_bpe.model"):
predictor = fasttext.load_model(model_path)
bpe_model = sp_processor()
bpe_model.load(bpe_model_path)
for i, sentence in enumerate(fixed_sentences):
text = sentence
chto_bi_count = text.count("что бы")
to_je_count = text.count("то же")
tak_je_count = text.count("так же")
if chto_bi_count + to_je_count + tak_je_count != 1:
continue
processed_sentence = " ".join(bpe_model.EncodeAsPieces(sentence.lower()))
predictions = predictor.predict(processed_sentence)
proba = float(predictions[1][0])
label = int(predictions[0][0][-1])
if label == 1 or proba < border:
continue
if chto_bi_count == 1:
fixed_sentences[i] = sentence.replace("что бы", "чтобы")
elif to_je_count == 1:
fixed_sentences[i] = sentence.replace("то же", "тоже")
elif tak_je_count == 1:
fixed_sentences[i] = sentence.replace("так же", "также")
def _fix_izza_on_text(text):
tokens = list(tokenize(text))
result_tokens = []
i = 0
while i < len(tokens) - 1:
if tokens[i].text.lower() == 'из' and (tokens[i+1].text.lower() == 'за' or tokens[i+1].text.lower() == 'под'):
result_tokens.append(
Substring(tokens[i].start, tokens[i+1].stop, tokens[i].text + '-' + tokens[i+1].text)
)
i += 2
else:
result_tokens.append(tokens[i])
i += 1
fixed_sentence = text
for token in result_tokens:
fixed_sentence = fixed_sentence[:token.start] + token.text + fixed_sentence[token.stop:]
return fixed_sentence
def _fix_izza(fixed_sentences):
return [_fix_izza_on_text(text) for text in fixed_sentences]
def _is_good_for_particle(prev_token, allowed_pos_tags):
for parse in _MORPH.parse(prev_token):
if any(tag in parse.tag for tag in allowed_pos_tags):
return True
return False
def _fix_particles_on_text(text):
tokens = list(tokenize(text))
result_text = ''
prev_end = 0
for i, token in enumerate(tokens):
if token.text not in {'то', 'либо', 'нибудь', 'таки'}:
result_text += text[prev_end: token.start] + token.text
elif token.text == 'таки':
if i > 0 and _is_good_for_particle(tokens[i - 1].text, _ALLOWED_POS_TAGS_FOR_TAKI):
result_text += '-' + token.text
else:
result_text += text[prev_end: token.start] + token.text
else:
if i > 0 and _is_good_for_particle(tokens[i - 1].text, _ALLOWED_POS_TAGS_FOR_TO):
result_text += '-' + token.text
else:
result_text += text[prev_end: token.start] + token.text
prev_end = token.stop
if tokens:
result_text += text[tokens[-1].stop:]
return result_text
def _fix_particles(fixed_sentences):
return [_fix_particles_on_text(text) for text in fixed_sentences]
def _fix_pronouns(fixed_sentences,
nn_border=0.65,
nn_model_path="models/pronoun_model.bin",
bpe_model_path="models/opencorpora_bpe.model"):
nn_predictor = fasttext.load_model(nn_model_path)
bpe_model = sp_processor()
bpe_model.load(bpe_model_path)
for i, sentence in enumerate(fixed_sentences):
for from_text, to_text in _HARD_PRONOUNS:
if (from_text in sentence or to_text in sentence
or from_text.capitalize() in sentence
or to_text.capitalize() in sentence
):
processed_sentence = " ".join(bpe_model.EncodeAsPieces(sentence.lower()))
nn_predictions = nn_predictor.predict(processed_sentence)
nn_proba = float(nn_predictions[1][0])
nn_label = int(nn_predictions[0][0][-1])
if nn_label == 0 and nn_proba > nn_border:
if from_text in sentence:
fixed_sentences[i] = sentence.replace(from_text, to_text)
elif from_text.capitalize() in sentence:
fixed_sentences[i] = sentence.replace(from_text.capitalize(), to_text)
elif to_text in sentence:
fixed_sentences[i] = sentence.replace(to_text, from_text)
else:
fixed_sentences[i] = sentence.replace(to_text.capitalize(), from_text)
def _fix_neni(sentences):
return [
make_ngram_correction(
text=s,
hypo_makers=[make_hypotheses_neni],
grams=grams,
)
for s in sentences
]
def fix_mistakes(input_csv, output_csv):
df_test = pandas.read_csv(input_csv, index_col='id')
original_sentences = df_test['sentence_with_a_mistake'].tolist()
fixed_sentences = _fix_dictionary(original_sentences)
_fix_tsya(fixed_sentences)
_fix_nn(fixed_sentences)
fixed_sentences = _fix_izza(fixed_sentences)
fixed_sentences = _fix_particles(fixed_sentences)
_fix_merge_to(fixed_sentences)
fixed_sentences = _fix_neni(fixed_sentences)
_fix_pronouns(fixed_sentences)
df_test['correct_sentence'] = fixed_sentences
df_test.to_csv(output_csv)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('input_csv', help="path to input file")
parser.add_argument('output_csv', help="path to output file")
args = parser.parse_args()
fix_mistakes(**vars(args))
| StarcoderdataPython |
4978117 | <gh_stars>10-100
import os.path
from setuptools import setup, find_packages
setup(
name="eastern",
description="Simple Kubernetes Deployment",
long_description=open(os.path.join(os.path.dirname(__file__), "README.md")).read(),
long_description_content_type="text/markdown",
version="4.5.1",
packages=find_packages(),
url="https://github.com/wongnai/eastern",
install_requires=["Click~=6.7", "click-log~=0.3.2", "PyYAML~=4.2b4", "stevedore~=1.29.0", "pre-commit~=1.18.3"],
setup_requires=["pytest-runner"],
tests_require=["pytest", "pytest-asyncio"],
entry_points={
"console_scripts": ["eastern = eastern.cli:cli"],
"eastern.command": [
"load? = eastern.yaml_formatter.overrides:load",
"load! = eastern.yaml_formatter.overrides:load_strict",
],
"eastern.formatter": ["yaml = eastern.yaml_formatter:Formatter"],
},
classifiers=[
"Development Status :: 5 - Production/Stable",
"Intended Audience :: System Administrators",
"License :: OSI Approved :: MIT License",
"Programming Language :: Python :: 3 :: Only",
"Topic :: System :: Systems Administration",
],
license="MIT",
)
| StarcoderdataPython |
1986839 | <filename>AART_project/LSTM/DNN.py
from __future__ import print_function
from matplotlib import pyplot as plt
import numpy as np
import pandas as pd
import seaborn as sns
from scipy import stats
from IPython.display import display, HTML
from sklearn import metrics
from sklearn.metrics import classification_report
from sklearn import preprocessing
import keras
from keras.models import Sequential
from keras.layers import Dense, Dropout, Flatten, Reshape
from keras.layers import Conv2D, MaxPooling2D
from keras.utils import np_utils
# Set some standard parameters upfront
pd.options.display.float_format = '{:.1f}'.format
sns.set() # Default seaborn look and feel
plt.style.use('ggplot')
print('keras version ', keras.__version__)
# shooting => 投籃, layup => 上籃, dribble => 運球
LABELS = ['shooting',
'layup',
'dribble']
# The number of steps within one time segment
# FPS20 * 3 second = 60 => 60 value for one step
TIME_PERIODS = 60
# The steps to take from one segment to the next; if this value is equal to
# TIME_PERIODS, then there is no overlap between the segments
STEP_DISTANCE = 20
def read_data(file_path):
column_names = ['user_id', 'type', 'photo number',
'nose_x', 'nose_y',
'neck_x', 'neck_y',
'shoulderR_x', 'shoulderR_y',
'elbowR_x', 'elbowR_y',
'handR_x', 'handR_y',
'shoulderL_x', 'shoulderL_y',
'elbowL_x', 'elbowL_y',
'handL_x', 'handL_y',
'ass_x', 'ass_y',
'legR_x', 'legR_y',
'kneeR_x', 'kneeR_y',
'feetR_x', 'feetR_y',
'legL_x', 'legL_y',
'kneeL_x', 'kneeL_y',
'feetL_x', 'feetL_y',
'eyeR_x', 'eyeR_y',
'eyeL_x', 'eyeL_y',
'earR_x', 'earR_y',
'earL_x', 'earL_y',
'footBoardR1_x', 'footBoardR1_y',
'footBoardR2_x', 'footBoardR2_y',
'footBoardR3_x', 'footBoardR3_y',
'footBoardL1_x', 'footBoardL1_y',
'footBoardL2_x', 'footBoardL2_y',
'footBoardL3_x', 'footBoardL3_y']
df = pd.read_csv(file_path,
header=None,
names=column_names)
# Last column has a ";" character which must be removed ...
df['footBoardL3_y'].replace(regex=True, inplace=True, to_replace=r';', value=r'')
# ... and then this column must be transformed to float explicitly
# df['footBoardL3_y'] = df['footBoardL3_y'].apply(convert_to_float)
# This is very important otherwise the model will not fit and loss
# will show up as NAN
df.dropna(axis=0, how='any', inplace=True)
return df
def convert_to_float(x):
try:
return np.float(x)
except BaseException:
return np.nan
def show_basic_dataframe_info(dataframe):
# Shape and how many rows and columns
print('Number of columns in the dataframe: %i' % (dataframe.shape[1]))
print('Number of rows in the dataframe: %i\n' % (dataframe.shape[0]))
# Load data set containing all the data from csv
df = read_data('/home/louisme/PycharmProjects/LSTM/LSTMDataset.txt')
show_basic_dataframe_info(df)
df.head(20)
# Show how many training examples exist for each of the six activities
df['type'].value_counts().plot(kind='bar', title='Training Examples by Activity Type')
# plt.show()
# Better understand how the recordings are spread across the different
# users who participated in the study
df['user_id'].value_counts().plot(kind='bar', title='Training Examples by User')
# plt.show()
# Define column name of the label vector
LABEL = 'TypeEncoded'
# Transform the labels from String to Integer via LabelEncoder
le = preprocessing.LabelEncoder()
# Add a new column to the existing DataFrame with the encoded values
df[LABEL] = le.fit_transform(df['type'].values.ravel())
df_train = read_data('/home/louisme/PycharmProjects/LSTM/LSTMDataset_train.txt')
df_train[LABEL] = le.fit_transform(df_train['type'].values.ravel())
df_test = read_data('/home/louisme/PycharmProjects/LSTM/LSTMDataset_test.txt')
df_test[LABEL] = le.fit_transform(df_test['type'].values.ravel())
def create_segments_and_labels(df, time_steps, step, label_name):
nfeatures = 50
# Number of steps to advance in each iteration (for me, it should always
# be equal to the time_steps in order to have no overlap between segments)
# step = time_steps
segments = []
labels = []
for i in range(0, len(df) - time_steps, step):
nose_x = df['nose_x'].values[i: i + time_steps]
nose_y = df['nose_y'].values[i: i + time_steps]
neck_x = df['neck_x'].values[i: i + time_steps]
neck_y = df['neck_y'].values[i: i + time_steps]
shoulderr_x = df['shoulderR_x'].values[i: i + time_steps]
shoulderr_y = df['shoulderR_y'].values[i: i + time_steps]
elbowr_x = df['elbowR_x'].values[i: i + time_steps]
elbowr_y = df['elbowR_y'].values[i: i + time_steps]
handr_x = df['handR_x'].values[i: i + time_steps]
handr_y = df['handR_y'].values[i: i + time_steps]
shoulderl_x = df['shoulderL_x'].values[i: i + time_steps]
shoulderl_y = df['shoulderL_y'].values[i: i + time_steps]
elbowl_x = df['elbowL_x'].values[i: i + time_steps]
elbowl_y = df['elbowL_y'].values[i: i + time_steps]
handl_x = df['handL_x'].values[i: i + time_steps]
handl_y = df['handL_y'].values[i: i + time_steps]
ass_x = df['ass_x'].values[i: i + time_steps]
ass_y = df['ass_y'].values[i: i + time_steps]
legr_x = df['legR_x'].values[i: i + time_steps]
legr_y = df['legR_y'].values[i: i + time_steps]
kneer_x = df['kneeR_x'].values[i: i + time_steps]
kneer_y = df['kneeR_y'].values[i: i + time_steps]
feetr_x = df['feetR_x'].values[i: i + time_steps]
feetr_y = df['feetR_y'].values[i: i + time_steps]
legl_x = df['legL_x'].values[i: i + time_steps]
legl_y = df['legL_y'].values[i: i + time_steps]
kneel_x = df['kneeL_x'].values[i: i + time_steps]
kneel_y = df['kneeL_y'].values[i: i + time_steps]
feetl_x = df['feetL_x'].values[i: i + time_steps]
feetl_y = df['feetL_y'].values[i: i + time_steps]
eyer_x = df['eyeR_x'].values[i: i + time_steps]
eyer_y = df['eyeR_y'].values[i: i + time_steps]
eyel_x = df['eyeL_x'].values[i: i + time_steps]
eyel_y = df['eyeL_y'].values[i: i + time_steps]
earr_x = df['earR_x'].values[i: i + time_steps]
earr_y = df['earR_y'].values[i: i + time_steps]
earl_x = df['earL_x'].values[i: i + time_steps]
earl_y = df['earL_y'].values[i: i + time_steps]
footboardr1_x = df['footBoardR1_x'].values[i: i + time_steps]
footboardr1_y = df['footBoardR1_y'].values[i: i + time_steps]
footboardr2_x = df['footBoardR2_x'].values[i: i + time_steps]
footboardr2_y = df['footBoardR2_y'].values[i: i + time_steps]
footboardr3_x = df['footBoardR3_x'].values[i: i + time_steps]
footboardr3_y = df['footBoardR3_y'].values[i: i + time_steps]
footboardl1_x = df['footBoardL1_x'].values[i: i + time_steps]
footboardl1_y = df['footBoardL1_y'].values[i: i + time_steps]
footboardl2_x = df['footBoardL2_x'].values[i: i + time_steps]
footboardl2_y = df['footBoardL2_y'].values[i: i + time_steps]
footboardl3_x = df['footBoardL3_x'].values[i: i + time_steps]
footboardl3_y = df['footBoardL3_y'].values[i: i + time_steps]
# Retrieve the most often used label in this segment
label = stats.mode(df[label_name][i: i + time_steps])[0][0]
segments.append([nose_x, nose_y, neck_x, neck_y, shoulderr_x, shoulderr_y, elbowr_x, elbowr_y,
handr_x, handr_y, shoulderl_x, shoulderl_y, elbowl_x, elbowl_y, handl_x, handl_y,
ass_x, ass_y, legr_x, legr_y, kneer_x, kneer_y, feetr_x, feetr_y, legl_x, legl_y,
kneel_x, kneel_y, feetl_x, feetl_y, eyer_x, eyer_y, eyel_x, eyel_y, earr_x, earr_y,
earl_x, earl_y, footboardr1_x, footboardr1_y, footboardr2_x, footboardr2_y,
footboardr3_x, footboardr3_y, footboardl1_x, footboardl1_y, footboardl2_x, footboardl2_y,
footboardl3_x, footboardl3_y])
labels.append(label)
# Bring the segments into a better shape
reshaped_segments = np.asarray(segments, dtype=np.float32).reshape(-1, time_steps, nfeatures)
labels = np.asarray(labels)
return reshaped_segments, labels
x_train, y_train = create_segments_and_labels(df_train, TIME_PERIODS, STEP_DISTANCE, LABEL)
print('x_train shape: ', x_train.shape)
print(x_train.shape[0], 'training samples')
print('y_train shape: ', y_train.shape)
print(y_train)
# Set input & output dimensions
num_time_periods, num_sensors = x_train.shape[1], x_train.shape[2]
num_classes = le.classes_.size
print(list(le.classes_))
# keras can only support a one dimension data, reshape to 60*30=3000
input_shape = (num_time_periods * num_sensors)
x_train = x_train.reshape(x_train.shape[0], input_shape)
print('x_train shape:', x_train.shape)
print('input_shape:', input_shape)
# convert to keras accept datatype
x_train = x_train.astype('float32')
y_train = y_train.astype('float32')
y_train_hot = np_utils.to_categorical(y_train, num_classes)
print('New y_train shape: ', y_train_hot.shape)
model_m = Sequential()
# Remark: since coreml cannot accept vector shapes of complex shape like
# [80,3] this workaround is used in order to reshape the vector internally
# prior feeding it into the network
model_m.add(Reshape((TIME_PERIODS, 50), input_shape=(input_shape,)))
model_m.add(Dense(100, activation='relu'))
model_m.add(Dense(100, activation='relu'))
model_m.add(Dense(100, activation='relu'))
model_m.add(Flatten())
model_m.add(Dense(num_classes, activation='softmax'))
print(model_m.summary())
callbacks_list = [
keras.callbacks.ModelCheckpoint(
filepath='best_model.{epoch:02d}-{val_loss:.2f}.h5',
monitor='val_loss', save_best_only=True),
keras.callbacks.EarlyStopping(monitor='acc', patience=1)
]
model_m.compile(loss='categorical_crossentropy',
optimizer='adam', metrics=['accuracy'])
# Hyper-parameters
BATCH_SIZE = 400
EPOCHS = 50
# Enable validation to use ModelCheckpoint and EarlyStopping callbacks.
history = model_m.fit(x_train,
y_train_hot,
batch_size=BATCH_SIZE,
epochs=EPOCHS,
callbacks=callbacks_list,
validation_split=0.2,
verbose=1)
plt.figure(figsize=(6, 4))
plt.plot(history.history['acc'], 'r', label='Accuracy of training data')
plt.plot(history.history['val_acc'], 'b', label='Accuracy of validation data')
plt.plot(history.history['loss'], 'r--', label='Loss of training data')
plt.plot(history.history['val_loss'], 'b--', label='Loss of validation data')
plt.title('Model Accuracy and Loss')
plt.ylabel('Accuracy and Loss')
plt.xlabel('Training Epoch')
plt.ylim(0)
plt.legend()
plt.show()
# Print confusion matrix for training data
y_pred_train = model_m.predict(x_train)
# Take the class with the highest probability from the train predictions
max_y_pred_train = np.argmax(y_pred_train, axis=1)
print(classification_report(y_train, max_y_pred_train))
| StarcoderdataPython |
3298803 | # -*- coding: utf-8 -*-
# Generated by Django 1.9.9 on 2017-02-23 00:37
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('programme', '0054_auto_20170212_2334'),
]
operations = [
migrations.AddField(
model_name='programme',
name='signup_link',
field=models.CharField(blank=True, default='', help_text='If the programme requires signing up in advance, put a link here and it will be shown as a button in the schedule.', max_length=255, verbose_name='Signup link'),
),
]
| StarcoderdataPython |
9715687 | """
Hard-coded
"""
import asyncio
import traceback
from sentry_sdk import capture_exception
import aioredis
from blob import Context
from config import Config
from helpers import userHelper, new_utils
from lib import logger
from objects.constants.BanchoRanks import BanchoRanks
from objects.constants.IdleStatuses import Action
from packets.Builder.index import PacketBuilder
async def disconnect_handler(ch: aioredis.Channel) -> bool:
try:
data = await ch.get_json()
if not data.get('userID', 0):
raise ValueError("userID must be integer")
token = Context.players.get_token(uid=data.get('userID'))
if token:
await token.kick(reason=data.get('reason', ''))
except Exception as e:
capture_exception(e)
traceback.print_exc()
return False
return True
async def notification(ch: aioredis.Channel) -> bool:
try:
data = await ch.get_json()
if not data.get('userID', 0):
raise ValueError("userID must be integer")
token = Context.players.get_token(uid=data.get('userID'))
if token:
token.enqueue(await PacketBuilder.Notification(data.get('message', '')))
except Exception as e:
capture_exception(e)
traceback.print_exc()
return False
return True
async def change_username(ch: aioredis.Channel) -> bool:
try:
data = await ch.get_json()
if not data.get('userID', 0):
raise ValueError("userID must be integer")
token = Context.players.get_token(uid=data.get('userID'))
if token:
if token.pr_status.action != Action.Playing and token.pr_status.action != Action.Multiplayer_play:
await userHelper.handle_username_change(data.get('userID'), data.get('newUsername'), token)
else:
await Context.redis.set(
f"ripple:change_username_pending:{data.get('userID')}", data.get('newUsername')
)
else:
await Context.redis.set(
f"ripple:change_username_pending:{data.get('userID')}", data.get('newUsername')
)
except Exception as e:
capture_exception(e)
traceback.print_exc()
return False
return True
async def reload_settings(ch: aioredis.Channel) -> bool:
return await ch.get() == b"reload" and await new_utils.reload_settings()
async def update_cached_stats(ch: aioredis.Channel) -> bool:
data = await ch.get()
if not data.isdigit():
return False
token = Context.players.get_token(uid=int(data))
if token:
await token.update_stats()
return True
async def silence(ch: aioredis.Channel) -> bool:
data = await ch.get()
if not data.isdigit():
return False
userID = int(data)
token = Context.players.get_token(uid=userID)
if token:
await token.silence()
return True
async def ban(ch: aioredis.Channel) -> bool:
data = await ch.get()
if not data.isdigit():
return False
userID = int(data)
token = Context.players.get_token(uid=userID)
if token:
await userHelper.ban(token.id)
await token.kick("You are banned. Join our discord for additional information.")
return True
async def killHQUser(ch: aioredis.Channel) -> bool:
data = await ch.get()
if not data.isdigit():
return False
userID = int(data)
token = Context.players.get_token(uid=userID)
if token:
token.enqueue(await PacketBuilder.Notification("Bye-bye! See ya!"))
token.enqueue(await PacketBuilder.BanchoPrivileges(BanchoRanks(BanchoRanks.SUPPORTER + BanchoRanks.PLAYER)))
token.enqueue(await PacketBuilder.BanchoPrivileges(BanchoRanks(BanchoRanks.BAT + BanchoRanks.PLAYER)))
token.enqueue(await PacketBuilder.KillPing())
return True
MAPPED_FUNCTIONS = {
b"peppy:disconnect": disconnect_handler,
b"peppy:change_username": change_username,
b"peppy:reload_settings": reload_settings,
b"peppy:update_cached_stats": update_cached_stats,
b"peppy:silence": silence,
b"peppy:ban": ban,
b"peppy:notification": notification,
b"kotrik:hqosu": killHQUser
}
async def sub_reader(ch: aioredis.Channel):
while await ch.wait_message():
if ch.name in MAPPED_FUNCTIONS:
logger.klog(f"[Redis/Pubsub] Received event in {ch.name}")
await MAPPED_FUNCTIONS[ch.name](ch)
async def init():
redis_values = dict(
db=Config.config['redis']['db']
)
if Config.config['redis']['password']:
redis_values['password'] = Config.config['redis']['password']
subscriber = await aioredis.create_redis(
f"redis://{Config.config['redis']['host']}",
**redis_values
)
subscribed_channels = await subscriber.subscribe(*[
k for (k, _) in MAPPED_FUNCTIONS.items()
])
Context.redis_sub = subscriber
loop = asyncio.get_event_loop()
[loop.create_task(sub_reader(ch)) for ch in subscribed_channels]
return True
| StarcoderdataPython |
6451148 | # Copyright 2016 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Client for interacting with the Google Cloud Logging API."""
import logging
import os
import sys
import google.api_core.client_options
from google.cloud.client import ClientWithProject
from google.cloud.environment_vars import DISABLE_GRPC
from google.cloud.logging_v2._helpers import _add_defaults_to_filter
from google.cloud.logging_v2._http import Connection
from google.cloud.logging_v2._http import _LoggingAPI as JSONLoggingAPI
from google.cloud.logging_v2._http import _MetricsAPI as JSONMetricsAPI
from google.cloud.logging_v2._http import _SinksAPI as JSONSinksAPI
from google.cloud.logging_v2.handlers import CloudLoggingHandler
from google.cloud.logging_v2.handlers import StructuredLogHandler
from google.cloud.logging_v2.handlers import setup_logging
from google.cloud.logging_v2.handlers.handlers import EXCLUDED_LOGGER_DEFAULTS
from google.cloud.logging_v2.resource import Resource
from google.cloud.logging_v2.handlers._monitored_resources import detect_resource
from google.cloud.logging_v2.logger import Logger
from google.cloud.logging_v2.metric import Metric
from google.cloud.logging_v2.sink import Sink
_DISABLE_GRPC = os.getenv(DISABLE_GRPC, False)
_HAVE_GRPC = False
try:
if not _DISABLE_GRPC:
# only import if DISABLE_GRPC is not set
from google.cloud.logging_v2 import _gapic
_HAVE_GRPC = True
except ImportError: # pragma: NO COVER
# could not import gapic library. Fall back to HTTP mode
_HAVE_GRPC = False
_gapic = None
_USE_GRPC = _HAVE_GRPC and not _DISABLE_GRPC
_GAE_RESOURCE_TYPE = "gae_app"
_GKE_RESOURCE_TYPE = "k8s_container"
_GCF_RESOURCE_TYPE = "cloud_function"
_RUN_RESOURCE_TYPE = "cloud_run_revision"
class Client(ClientWithProject):
"""Client to bundle configuration needed for API requests."""
_logging_api = None
_sinks_api = None
_metrics_api = None
SCOPE = (
"https://www.googleapis.com/auth/logging.read",
"https://www.googleapis.com/auth/logging.write",
"https://www.googleapis.com/auth/logging.admin",
"https://www.googleapis.com/auth/cloud-platform",
)
"""The scopes required for authenticating as a Logging consumer."""
def __init__(
self,
*,
project=None,
credentials=None,
_http=None,
_use_grpc=None,
client_info=None,
client_options=None,
):
"""
Args:
project (Optional[str]): the project which the client acts on behalf of.
If not passed, falls back to the default inferred
from the environment.
credentials (Optional[google.auth.credentials.Credentials]):
Thehe OAuth2 Credentials to use for this
client. If not passed (and if no ``_http`` object is
passed), falls back to the default inferred from the
environment.
_http (Optional[requests.Session]): HTTP object to make requests.
Can be any object that defines ``request()`` with the same interface as
:meth:`requests.Session.request`. If not passed, an
``_http`` object is created that is bound to the
``credentials`` for the current object.
This parameter should be considered private, and could
change in the future.
_use_grpc (Optional[bool]): Explicitly specifies whether
to use the gRPC transport or HTTP. If unset,
falls back to the ``GOOGLE_CLOUD_DISABLE_GRPC``
environment variable
This parameter should be considered private, and could
change in the future.
client_info (Optional[Union[google.api_core.client_info.ClientInfo, google.api_core.gapic_v1.client_info.ClientInfo]]):
The client info used to send a user-agent string along with API
requests. If ``None``, then default info will be used. Generally,
you only need to set this if you're developing your own library
or partner tool.
client_options (Optional[Union[dict, google.api_core.client_options.ClientOptions]]):
Client options used to set user options
on the client. API Endpoint should be set through client_options.
"""
super(Client, self).__init__(
project=project,
credentials=credentials,
_http=_http,
client_options=client_options,
)
kw_args = {"client_info": client_info}
if client_options:
if type(client_options) == dict:
client_options = google.api_core.client_options.from_dict(
client_options
)
if client_options.api_endpoint:
api_endpoint = client_options.api_endpoint
kw_args["api_endpoint"] = api_endpoint
self._connection = Connection(self, **kw_args)
self._client_info = client_info
self._client_options = client_options
if _use_grpc is None:
self._use_grpc = _USE_GRPC
else:
self._use_grpc = _use_grpc
@property
def logging_api(self):
"""Helper for logging-related API calls.
See
https://cloud.google.com/logging/docs/reference/v2/rest/v2/entries
https://cloud.google.com/logging/docs/reference/v2/rest/v2/projects.logs
"""
if self._logging_api is None:
if self._use_grpc:
self._logging_api = _gapic.make_logging_api(self)
else:
self._logging_api = JSONLoggingAPI(self)
return self._logging_api
@property
def sinks_api(self):
"""Helper for log sink-related API calls.
See
https://cloud.google.com/logging/docs/reference/v2/rest/v2/projects.sinks
"""
if self._sinks_api is None:
if self._use_grpc:
self._sinks_api = _gapic.make_sinks_api(self)
else:
self._sinks_api = JSONSinksAPI(self)
return self._sinks_api
@property
def metrics_api(self):
"""Helper for log metric-related API calls.
See
https://cloud.google.com/logging/docs/reference/v2/rest/v2/projects.metrics
"""
if self._metrics_api is None:
if self._use_grpc:
self._metrics_api = _gapic.make_metrics_api(self)
else:
self._metrics_api = JSONMetricsAPI(self)
return self._metrics_api
def logger(self, name, *, labels=None, resource=None):
"""Creates a logger bound to the current client.
Args:
name (str): The name of the logger to be constructed.
resource (Optional[~logging_v2.Resource]): a monitored resource object
representing the resource the code was run on. If not given, will
be inferred from the environment.
labels (Optional[dict]): Mapping of default labels for entries written
via this logger.
Returns:
~logging_v2.logger.Logger: Logger created with the current client.
"""
return Logger(name, client=self, labels=labels, resource=resource)
def list_entries(
self,
*,
resource_names=None,
filter_=None,
order_by=None,
max_results=None,
page_size=None,
page_token=None,
):
"""Return a generator of log entry resources.
Args:
resource_names (Sequence[str]): Names of one or more parent resources
from which to retrieve log entries:
::
"projects/[PROJECT_ID]"
"organizations/[ORGANIZATION_ID]"
"billingAccounts/[BILLING_ACCOUNT_ID]"
"folders/[FOLDER_ID]"
If not passed, defaults to the project bound to the API's client.
filter_ (str): a filter expression. See
https://cloud.google.com/logging/docs/view/advanced_filters
order_by (str) One of :data:`~logging_v2.ASCENDING`
or :data:`~logging_v2.DESCENDING`.
max_results (Optional[int]):
Optional. The maximum number of entries to return.
Non-positive values are treated as 0. If None, uses API defaults.
page_size (int): number of entries to fetch in each API call. Although
requests are paged internally, logs are returned by the generator
one at a time. If not passed, defaults to a value set by the API.
page_token (str): opaque marker for the starting "page" of entries. If not
passed, the API will return the first page of entries.
Returns:
Generator[~logging_v2.LogEntry]
"""
if resource_names is None:
resource_names = [f"projects/{self.project}"]
filter_ = _add_defaults_to_filter(filter_)
return self.logging_api.list_entries(
resource_names=resource_names,
filter_=filter_,
order_by=order_by,
max_results=max_results,
page_size=page_size,
page_token=page_token,
)
def sink(self, name, *, filter_=None, destination=None):
"""Creates a sink bound to the current client.
Args:
name (str): the name of the sink to be constructed.
filter_ (Optional[str]): the advanced logs filter expression
defining the entries exported by the sink. If not
passed, the instance should already exist, to be
refreshed via :meth:`Sink.reload`.
destination (str): destination URI for the entries exported by
the sink. If not passed, the instance should
already exist, to be refreshed via
:meth:`Sink.reload`.
Returns:
~logging_v2.sink.Sink: Sink created with the current client.
"""
return Sink(name, filter_=filter_, destination=destination, client=self)
def list_sinks(
self, *, parent=None, max_results=None, page_size=None, page_token=None
):
"""List sinks for the a parent resource.
See
https://cloud.google.com/logging/docs/reference/v2/rest/v2/projects.sinks/list
Args:
parent (Optional[str]): The parent resource whose sinks are to be listed:
::
"projects/[PROJECT_ID]"
"organizations/[ORGANIZATION_ID]"
"billingAccounts/[BILLING_ACCOUNT_ID]"
"folders/[FOLDER_ID]".
If not passed, defaults to the project bound to the API's client.
max_results (Optional[int]):
Optional. The maximum number of entries to return.
Non-positive values are treated as 0. If None, uses API defaults.
page_size (int): number of entries to fetch in each API call. Although
requests are paged internally, logs are returned by the generator
one at a time. If not passed, defaults to a value set by the API.
page_token (str): opaque marker for the starting "page" of entries. If not
passed, the API will return the first page of entries.
Returns:
Generator[~logging_v2.Sink]
"""
if parent is None:
parent = f"projects/{self.project}"
return self.sinks_api.list_sinks(
parent=parent,
max_results=max_results,
page_size=page_size,
page_token=page_token,
)
def metric(self, name, *, filter_=None, description=""):
"""Creates a metric bound to the current client.
Args:
name (str): The name of the metric to be constructed.
filter_(Optional[str]): The advanced logs filter expression defining the
entries tracked by the metric. If not
passed, the instance should already exist, to be
refreshed via :meth:`Metric.reload`.
description (Optional[str]): The description of the metric to be constructed.
If not passed, the instance should already exist,
to be refreshed via :meth:`Metric.reload`.
Returns:
~logging_v2.metric.Metric: Metric created with the current client.
"""
return Metric(name, filter_=filter_, client=self, description=description)
def list_metrics(self, *, max_results=None, page_size=None, page_token=None):
"""List metrics for the project associated with this client.
See
https://cloud.google.com/logging/docs/reference/v2/rest/v2/projects.metrics/list
Args:
max_results (Optional[int]):
Optional. The maximum number of entries to return.
Non-positive values are treated as 0. If None, uses API defaults.
page_size (int): number of entries to fetch in each API call. Although
requests are paged internally, logs are returned by the generator
one at a time. If not passed, defaults to a value set by the API.
page_token (str): opaque marker for the starting "page" of entries. If not
passed, the API will return the first page of entries.
Returns:
Generator[logging_v2.Metric]
"""
return self.metrics_api.list_metrics(
self.project,
max_results=max_results,
page_size=page_size,
page_token=page_token,
)
def get_default_handler(self, **kw):
"""Return the default logging handler based on the local environment.
Args:
kw (dict): keyword args passed to handler constructor
Returns:
logging.Handler: The default log handler based on the environment
"""
monitored_resource = kw.pop("resource", detect_resource(self.project))
if isinstance(monitored_resource, Resource):
if monitored_resource.type == _GAE_RESOURCE_TYPE:
return CloudLoggingHandler(self, resource=monitored_resource, **kw)
elif monitored_resource.type == _GKE_RESOURCE_TYPE:
return StructuredLogHandler(**kw, project_id=self.project)
elif monitored_resource.type == _GCF_RESOURCE_TYPE:
# __stdout__ stream required to support structured logging on Python 3.7
kw["stream"] = kw.get("stream", sys.__stdout__)
return StructuredLogHandler(**kw, project_id=self.project)
elif monitored_resource.type == _RUN_RESOURCE_TYPE:
return StructuredLogHandler(**kw, project_id=self.project)
return CloudLoggingHandler(self, resource=monitored_resource, **kw)
def setup_logging(
self, *, log_level=logging.INFO, excluded_loggers=EXCLUDED_LOGGER_DEFAULTS, **kw
):
"""Attach default Cloud Logging handler to the root logger.
This method uses the default log handler, obtained by
:meth:`~get_default_handler`, and attaches it to the root Python
logger, so that a call such as ``logging.warn``, as well as all child
loggers, will report to Cloud Logging.
Args:
log_level (Optional[int]): Python logging log level. Defaults to
:const:`logging.INFO`.
excluded_loggers (Optional[Tuple[str]]): The loggers to not attach the
handler to. This will always include the
loggers in the path of the logging client
itself.
Returns:
dict: keyword args passed to handler constructor
"""
handler = self.get_default_handler(**kw)
setup_logging(handler, log_level=log_level, excluded_loggers=excluded_loggers)
| StarcoderdataPython |
5097146 | <gh_stars>10-100
#!/usr/bin/env/python3
import json
import datetime
from collections import namedtuple
QuietTime = namedtuple('QuietTime', 'start end')
def get_quiet_times(filename):
"""
reads a json text file of the form
[
{"start": {"hour": 17, "minute": 17, "second": 0}, "end": {"hour": 17, "minute": 20, "second": 0}},
{"start": {"hour": 17, "minute": 27, "second": 0}, "end": {"hour": 17, "minute": 31, "second": 20}},
]
:param filename: a string, e.g. './data/quiet_times.json'
:return: a list of QuietTime
"""
with open(filename, 'r') as f:
quiet_times_from_json = json.load(f)
# list comprehension
quiet_times = [quiet_time_from_dict(x) for x in quiet_times_from_json]
return quiet_times
def quiet_time_from_dict(quiet_time_dict):
"""
:param quiet_time_dict: a dictionary of the form
{"start": {"hour": 17, "minute": 27, "second": 0}, "end": {"hour": 17, "minute": 31, "second": 20}}
:return: a QuietTime
"""
start_dict = quiet_time_dict.get("start")
end_dict = quiet_time_dict.get("end")
start_time = time_from_dict(start_dict)
end_time = time_from_dict(end_dict)
quiet_time = QuietTime(start_time, end_time)
return quiet_time
def time_from_dict(time_dict):
"""
:param time_dict: a dictionary of the form
{"hour": 17, "minute": 27, "second": 0}
:return: a datetime.time
"""
# datetime.time: An idealized time, independent of any particular day
date_time_time = datetime.time(hour=time_dict.get("hour"),
minute=time_dict.get("minute"),
second=time_dict.get("second"))
return date_time_time
| StarcoderdataPython |
9758347 | """"
usage-
./manage.py builddata load_knowledgebase_csv ~/Documents/Scratch/knowledgebase.csv
Creates derived dataset of constants used by JS frontend. Data is sourced from cla_common.
you can then load the fixture with-
./manage.py loaddata cla_backend/apps/knowledgebase/fixtures/kb_from_spreadsheet.json
"""
from django.core.management.base import BaseCommand
import os
import sys
from ._csv_2_fixture import KnowledgebaseCsvParse
class Command(BaseCommand):
args = "load_knowledgebase_csv CSV_FILE.csv"
help = (
"Create a derived dataset. At present, just load_knowledgebase_csv "
"is implemented. It loads a CSV spreadsheet into a fixture ready "
"to be loaddata'ed into DB"
)
KNOWLEDGEBASE_FIXTURE = "cla_backend/apps/knowledgebase/fixtures/kb_from_spreadsheet.json"
def handle(self, *args, **options):
if args[0] == "load_knowledgebase_csv":
if len(args) != 2:
self.stdout.write("Last argument needs to be path to CSV file")
sys.exit(-1)
if not os.access(args[1], os.R_OK):
self.stdout.write("File '%s' couldn't be read" % args[1])
sys.exit(-1)
# read in CSV and feed to fixture builder
f_in = open(args[1], "rU")
c = KnowledgebaseCsvParse(f_in)
json = c.fixture_as_json()
f_in.close()
# write json doc to fixture file
f_out = open(self.KNOWLEDGEBASE_FIXTURE, "w")
f_out.write(json)
f_out.close()
self.stdout.write("Fixture written to %s" % self.KNOWLEDGEBASE_FIXTURE)
| StarcoderdataPython |
8035945 | <filename>src/mypy_structured_data/__init__.py
from mypy.plugin import Plugin
__version__ = "0.1.1"
class StructuredDataPlugin(Plugin):
pass
def plugin(version: str):
# ignore version argument if the plugin works with all mypy versions.
return StructuredDataPlugin
| StarcoderdataPython |
3358002 | from django.db import models
from django.utils import timezone
from django.contrib.auth.models import User
# Create your models here.
class Reference(models.Model):
title = models.CharField(max_length = 250)
description = models.CharField(max_length=250)
## description = models.TextField()
link = models.URLField(max_length=200)
author = models.ForeignKey(User,
on_delete = models.CASCADE,
related_name = 'desc_posts' )
created = models.DateTimeField(auto_now_add=True)
updated = models.DateTimeField(auto_now=True)
publish = models.DateTimeField(default=timezone.now)
def __str__(self):
return self.title
| StarcoderdataPython |
6433961 | """
Work command
~~~~~~~~~~~~~~~~~
You get moni
:copyright: (c) 2021-2021 M2rsho
:license: MIT, see LICENSE for more details.
"""
from discord.ext import commands
import discord
from discord.ext.commands.core import is_owner
import support
from discord.ext.commands import cooldown, BucketType
import random
from cogs import checks
class work(commands.Cog):
def __init__(self, client):
self.client = client
@checks.default()
@cooldown(1, 3600, BucketType.user)
@commands.command(description=support.getDescription("en.json", "work"))
async def work(self, ctx):
support.getLanguageFileG(ctx.guild)
money = random.randint(2000, 20000)
texts = support.getLanguageFile(support.getLanguage(ctx.guild))
texts = texts["commands"]["work"]["messages"]
bonus = (money * (await support.globalData.getSocialCredit(ctx.message.author)/1000)) - money
await support.globalData.addBalance(ctx.message.author, money+(bonus if await support.globalData.getSocialCredit(ctx.message.author) >= 1000 else 0))
await ctx.reply(mention_author=False, embed=discord.Embed(
description=random.choice(texts)
.format(ammount=money, bitcoin=support.convertToBitcoin(money, "USD")),
colour=support.colours.default)
.set_footer(text=(f"{bonus}$. Social Credit Bonus | {money+bonus}$ in total." if await support.globalData.getSocialCredit(ctx.message.author) >= 1000 else '')))
def setup(bot):
bot.add_cog(work(bot))
| StarcoderdataPython |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.