id stringlengths 1 265 | text stringlengths 6 5.19M | dataset_id stringclasses 7
values |
|---|---|---|
173243 | import os
import re
from django.conf import settings
from statsd.defaults.django import statsd
DATADOG_METRICS = False
DATADOG_TAGS = None
if settings.DATADOG_API_KEY:
from datadog import initialize
options = {
'api_key': settings.DATADOG_API_KEY,
'app_key': settings.DATADOG_APP_KEY
}
initialize(**options)
from datadog import statsd as datadog_statsd
DATADOG_METRICS = True
DATADOG_TAGS = [f"env:{os.environ.get('ENVIRONMENT')}"]
__author__ = "<NAME>"
__copyright__ = "Copyright 2021, Helium Edu"
__version__ = "1.4.46"
def increment(metric, request=None, ignore_staff=True, ignore_anonymous=False):
if request and ignore_staff and request.user.is_authenticated and request.user.is_staff:
return
if request and ignore_anonymous and not request.user.is_authenticated:
return
statsd.incr(f"platform.{metric}")
if DATADOG_METRICS:
datadog_statsd.increment(f"platform.{metric}", tags=DATADOG_TAGS)
def request_start(request):
metric_id = f"platform.request.{re.sub('[^a-zA-Z]+', '', request.path)}.{request.method}"
timer = statsd.timer(metric_id, rate=1)
timer.start()
return {
'Request-Timer': timer,
'Request-Metric-ID': metric_id,
'Request-Metric-Start': int(round(timer._start_time * 1000))
}
def request_stop(metrics, response):
metrics['Request-Timer'].stop()
metrics['Request-Metric-Millis'] = metrics['Request-Timer'].ms
statsd.incr(metrics['Request-Metric-ID'])
statsd.incr(f"{metrics['Request-Metric-ID']}.{response.status_code}")
if DATADOG_METRICS:
datadog_statsd.increment(metrics['Request-Metric-ID'], tags=DATADOG_TAGS)
datadog_statsd.increment(f"{metrics['Request-Metric-ID']}.{response.status_code}", tags=DATADOG_TAGS)
datadog_statsd.timing(metrics['Request-Metric-ID'], metrics['Request-Timer'].ms, tags=DATADOG_TAGS)
metrics.pop('Request-Timer')
for name, value in metrics.items():
response._headers[name] = (name, str(value))
| StarcoderdataPython |
3339451 | # -*- coding: utf-8 -*-
from __future__ import (
absolute_import,
division,
print_function,
unicode_literals,
)
import unittest
from unittest import TestCase
class DataTest(TestCase):
"""Obey the testing goat."""
def test_something(self):
"""A testing template -- make to update tests.yml if you change the testing name"""
matches = True
expected_matches = True
self.assertEqual(matches, expected_matches)
if __name__ == "__main__":
unittest.main()
| StarcoderdataPython |
160296 | class Node:
def __init__(self, data):
self.data = data
self.next = None
class LinkedList:
def __init__(self):
self.head = None
def add(self, data):
if self.head is None:
self.head = Node(data)
else:
temp = self.head
self.head = Node(data)
self.head.next = temp
def reverse(self, node):
if node.next is None:
self.head = node
return
self.reverse(node.next)
temp = node.next
temp.next = node
node.next = None
def printList(self):
if self.head is None:
print("LinkedList is empty")
else:
temp = self.head
while(temp):
print(temp.data)
temp = temp.next
llist = LinkedList()
for i in range(0, 5):
llist.add(i)
llist.reverse(llist.head)
llist.printList()
| StarcoderdataPython |
1787440 | """Unit tests for agent factory.
"""
import unittest
from parameterized import parameterized
from stock_trading_backend.agent import create_agent, FollowingFeatureAgent
from stock_trading_backend.util import read_config_file
class TestAgentFactory(unittest.TestCase):
"""Unit tests for agent factory.
"""
@parameterized.expand([
("agent/following_feature_agent_1.yaml", FollowingFeatureAgent),
])
def test_creates_agent(self, config_filename, expected_class):
"""Checks if created agent class is of the right class.
Args:
config_filename: the filename for the config file.
expected_class: the expected class created from config file.
"""
data_collection_config = read_config_file("data/default.yaml")
agent = create_agent(read_config_file(config_filename), data_collection_config, None)
self.assertIsInstance(agent, expected_class)
def test_lookup_error(self):
"""Checks if create agent raises lookup error.
"""
with self.assertRaises(LookupError):
_ = create_agent({"name": "not_the_right_name"}, None, None)
| StarcoderdataPython |
3361716 | <filename>lib/p5/pvector.py
class PVector(object):
def __init__(self, x, y, z=0):
self.x = x
self.y = y
self.z = z
def add(self, *args):
x, y, z = (args[0].x, args[0].y, args[0].z) if len(args) == 1 else args
self.x += x
self.y += y
self.z += z
def sub(self, *args):
x, y, z = (args[0].x, args[0].y, args[0].z) if len(args) == 1 else args
self.x -= x
self.y -= y
self.z -= z
def __add__(self, v):
v1 = self.copy()
v1.add(v)
return v1
def __sub__(self, v):
v1 = self.copy()
v1.sub(v)
return v1
def copy(self):
return PVector(self.x, self.y, self.z)
def get(self, target=None):
if target is None:
return self.copy()
if len(target) >= 2:
target[0] = self.x
target[1] = self.y
if len(target) >= 3:
target[2] = self.z
return target
| StarcoderdataPython |
198786 | <filename>src/common.py
import os
import sys
import logging
import logging.handlers
import configparser
import hashlib
import subprocess
DONE = "done"
SYSTEM_CONF_FILE="DisNOTE.ini"
SEG_TMP_AUDIO_LENGTH="seg_tmp_audio_length"
IS_RECOGNIZE_NOIZE="is_recognize_noize"
def getVersion():
return "v2.1.1"
# 無音検出時に作るテンポラリファイルの音声の長さ(ミリ秒)
def getSegTmpAudioLength():
min = 30 # 30分ごとに分割(デフォルト)
try:
config = readSysConfig()
val = config['DEFAULT'].get(SEG_TMP_AUDIO_LENGTH)
min = int(val)
if min < 10: # 最低でも10分区切り
min = 10
except: # 設定ファイルが読めなかったり(初回起動時)、値がおかしかったらデフォルトで保存
min = 30
config.set('DEFAULT',SEG_TMP_AUDIO_LENGTH , str(min))
writeSysConfig(config)
return min * 60 * 1000
# ノイズっぽい音声を認識するかどうか
def isRecognizeNoize():
ret = 0;
try:
config = readSysConfig()
val = config['DEFAULT'].get(IS_RECOGNIZE_NOIZE)
ret = int(val)
except: # 設定ファイルが読めなかったり(初回起動時)、値がおかしかったらデフォルトで保存
config.set('DEFAULT',IS_RECOGNIZE_NOIZE , str(ret))
writeSysConfig(config)
return ret != 0;
# システムconfig読み込み
def readSysConfig():
config = configparser.ConfigParser()
config.read(SYSTEM_CONF_FILE, "utf-8")
return config
# システムconfig書き込み
def writeSysConfig(config):
with open(SYSTEM_CONF_FILE, "w", encoding="utf-8") as configfile:
config.write(configfile)
# configファイルのpath
def getConfigFile(input_file):
base = getFileNameWithoutExtension(input_file)
basedir = os.path.dirname(input_file) # 入力音声ファイルの置いてあるディレクトリ
outputdir = os.path.join(basedir, base) # 各種ファイルの出力先ディレクトリ
ini_file = "_{}.ini".format(base)
return os.path.join(outputdir, ini_file)
# config読み込み
def readConfig(input_file):
ini_file = getConfigFile(input_file)
config = configparser.ConfigParser()
config.read(ini_file, "utf-8")
# 音声のhash値が違ったら最初からやり直し
hash = inputFileHash(input_file)
if config['DEFAULT'].get('hash') != hash:
config = configparser.ConfigParser()
config.set('DEFAULT', 'hash', hash)
config.set('DEFAULT', 'input_file', input_file)
return config
# config書き込み
def writeConfig(input_file, config):
ini_file = getConfigFile(input_file)
with open(ini_file, "w", encoding="utf-8") as configfile:
config.write(configfile)
# 元になる音声のhash値
def inputFileHash(input_file):
with open(input_file, 'rb') as file:
fileData = file.read()
hash_sha3_256 = hashlib.sha3_256(fileData).hexdigest()
return hash_sha3_256
# 拡張子を省いたファイル名を返す(これをフォルダ名などにする)
def getFileNameWithoutExtension(input_file):
return os.path.splitext(os.path.basename(input_file))[0]
# 分析結果ファイル
def getSegResultFile(input_file, index):
base = getFileNameWithoutExtension(input_file)
basedir = os.path.dirname(input_file) # 入力音声ファイルの置いてあるディレクトリ
outputdir = os.path.join(basedir, base) # 各種ファイルの出力先ディレクトリ
if index > 0:
output_file = "_{}_{}.txt".format(base, index+1)
else:
output_file = "_{}.txt".format(base)
# なければmkdir
try:
os.mkdir(outputdir)
except FileExistsError:
pass
return os.path.join(outputdir, output_file)
# 分割音声ファイルのprefix
def getSplitAudioFilePrefix(input_file):
base = getFileNameWithoutExtension(input_file)
basedir = os.path.dirname(input_file) # 入力音声ファイルの置いてあるディレクトリ
outputdir = os.path.join(basedir, base) # 各種ファイルの出力先ディレクトリ
output_prefix = "{}_".format(base)
return os.path.join(outputdir, output_prefix)
# 分割結果ファイル
def getSplitResultFile(input_file):
base = getFileNameWithoutExtension(input_file)
basedir = os.path.dirname(input_file) # 入力音声ファイルの置いてあるディレクトリ
outputdir = os.path.join(basedir, base) # 各種ファイルの出力先ディレクトリ
output_file = "_{}_split.txt".format(base)
return os.path.join(outputdir, output_file)
# 認識結果ファイル
def getRecognizeResultFile(input_file):
base = getFileNameWithoutExtension(input_file)
basedir = os.path.dirname(input_file) # 入力音声ファイルの置いてあるディレクトリ
outputdir = os.path.join(basedir, base) # 各種ファイルの出力先ディレクトリ
output_file = "_{}.csv".format(base)
return os.path.join(outputdir, output_file)
# logger
def getLogger(srcfile):
name = os.path.splitext(os.path.basename(srcfile))[0] # ソースファイル名(拡張子を取る)
logger = logging.getLogger(name) #logger名loggerを取得
logger.setLevel(logging.INFO)
# logフォルダがなければmkdir
try:
os.mkdir("log")
except FileExistsError:
pass
#標準出力
handler1 = logging.StreamHandler(sys.stdout)
handler1.setLevel(logging.INFO)
handler1.setFormatter(logging.Formatter("%(asctime)s [%(name)s] %(message)s"))
#ログファイル
handler2 = logging.handlers.RotatingFileHandler(filename="log/speechrec.log", maxBytes=1024 * 1024 * 10, backupCount=3)
handler2.setLevel(logging.INFO)
handler2.setFormatter(logging.Formatter("%(asctime)s %(process)8d [%(levelname)s] %(name)s %(message)s"))
#loggerに2つのハンドラを設定
logger.addHandler(handler1)
logger.addHandler(handler2)
return logger
# サブプロセス実行(returncodeが非0の場合は標準エラー出力をログに吐いて例外を投げる。正常終了時、res.stdoutに標準出力)
def runSubprocess(args):
res = subprocess.run(args, encoding='utf-8', stdout=subprocess.PIPE, stderr=subprocess.PIPE)
if res.returncode != 0:
logger = getLogger(__file__)
logger.error(res.stderr)
raise RuntimeError(res.stderr)
return res
# メディアファイルのフォーマットを返す
def getFileFormat(input_file):
try:
res = runSubprocess("ffprobe.exe -v error -show_streams -print_format json \"{}\"".format(input_file))
return res.stdout
except Exception as e:
logger = getLogger(__file__)
logger.error("フォーマット確認失敗。{} は音声ファイルではないようです。".format(input_file))
pass
| StarcoderdataPython |
122225 | from setuptools import setup
setup(
name='beets-mpdadd',
version='0.2',
description='beets plugin that adds query results to the current MPD playlist',
author='<NAME>',
author_email='<EMAIL>',
license='MIT',
platforms='ALL',
packages=['beetsplug'],
install_requires=['beets', 'python-mpd2'],
)
| StarcoderdataPython |
1733154 | <gh_stars>1-10
import re
class DigestConfig:
def __init__(self):
self._digest_pattern = "[KR]" # Trypsin/P = "([KR](?=[^P]))", AspN = "\w(?=D)"
self.digest_pattern = self._digest_pattern
self.min_len = 7
self.max_len = 30
self.max_miss_cleave = 2
self.cleave_type = "full"
@property
def digest_pattern(self):
return self._digest_pattern
@digest_pattern.setter
def digest_pattern(self, _pattern):
self._digest_pattern = _pattern
self.regex = re.compile(_pattern)
def digest(protein, pepset, digest_config):
if digest_config.cleave_type == "full":
return digest_full(protein, pepset, digest_config.regex, digest_config.min_len, digest_config.max_len,
digest_config.max_miss_cleave)
else:
return pepset
def digest_full(protein, pepset, regex, min_len=7, max_len=30, max_miss_cleave=2):
seq = protein.seq
digest_sites = [m.start()+1 for m in regex.finditer(seq)]
digest_sites.insert(0,0)
if digest_sites[-1] != len(seq) - 1:
# no matter what, cleavage at the last aa
digest_sites.append(len(seq))
return cleave_full(seq, pepset, digest_sites, min_len, max_len, max_miss_cleave)
def cleave_full(seq, seq_set, sites, min_len=7, max_len=30, max_miss_cleave=2):
def add_Nterm_M_loss(seq_set, sub_seq):
if sub_seq[0] == "M" and len(sub_seq) - 1 >= min_len and len(sub_seq) - 1 <= max_len:
seq_set.add(sub_seq[1:])
for i in range(len(sites)):
for msclv in range(max_miss_cleave + 1):
if i + msclv + 1 >= len(sites): break
sub_seq = seq[sites[i]:sites[i + msclv + 1]]
if len(sub_seq) > max_len or len(sub_seq) < min_len: continue
if i == 0:
add_Nterm_M_loss(seq_set, sub_seq)
seq_set.add(sub_seq)
return seq_set
| StarcoderdataPython |
4840497 | <reponame>iBalag/claimant
from datetime import datetime, timedelta
from typing import List, Optional
from aiogram import types
from aiogram.dispatcher import FSMContext
from aiogram.types import ReplyKeyboardMarkup, ReplyKeyboardRemove, InlineKeyboardButton, \
InlineKeyboardMarkup
from common import calc_oof_profit, calc_payoff_profit
from common.oof_profit_calculator import OOFCalculation
from common.payoff_profit_calculator import PayOffCalculation
from keyboards import get_next_actions_kb, example_btn, get_claim_parts_kb, emojis
from repository import Repository
TERM_DISPLAY_NAME_MAP: dict = {
"essence": "суть нарушения",
"proofs": "доказательства",
"claims": "требования",
"additions": "приложения"
}
async def process_manual_enter(message: types.Message, state: FSMContext, state_groups):
manual_entered_value: str = message.text
user_data = await state.get_data()
chosen_options: List[str]
if "chosen_options" in user_data.keys():
chosen_options = user_data["chosen_options"]
chosen_options.append(manual_entered_value)
else:
chosen_options = [manual_entered_value]
await state.update_data(chosen_options=chosen_options)
await message.answer("Свой вариант добавлен.")
await state_groups.waiting_for_user_action.set()
next_actions_kb: ReplyKeyboardMarkup = get_next_actions_kb()
await message.answer("Введите свой вариант самостоятельно. "
"Или выберите дальнейшее действие с помощью клавиатуры",
reply_markup=next_actions_kb)
async def process_option_selection(message: types.Message, claim_part: str, state_groups):
repository: Repository = Repository()
claim_theme: Optional[str] = repository.get_current_claim_theme(message.from_user.id)
options: Optional[List[str]] = repository.get_claim_tmp_options(claim_theme, claim_part)
if options is None or len(options) == 0:
await state_groups.waiting_for_user_action.set()
kb = ReplyKeyboardMarkup(resize_keyboard=True)
kb.row(example_btn)
await message.reply("Для данного шаблона не найдено опций для выбора. "
"Введите свой вариант", reply_markup=kb)
return
options_kb = InlineKeyboardMarkup()
options_text = []
for i, option in enumerate(options):
options_text.append(f"{i+1}. {option}")
option_btn = InlineKeyboardButton(f"{i+1}", callback_data=f"option {i}")
options_kb.insert(option_btn)
options_kb.add(InlineKeyboardButton(f"{emojis.chequered_flag} завершить выбор опций",
callback_data="complete options"))
await state_groups.waiting_for_option_chosen.set()
await message.answer("Выберите одну из опций:")
await message.answer("\n".join(options_text), reply_markup=options_kb)
async def claim_tmp_option_chosen(callback_query: types.CallbackQuery, state: FSMContext, claim_part: str):
chosen_option_index: int = int(callback_query.data.split(" ")[1])
repository: Repository = Repository()
claim_theme: Optional[str] = repository.get_current_claim_theme(callback_query.from_user.id)
options: Optional[List[str]] = repository.get_claim_tmp_options(claim_theme, claim_part)
chosen_option: str = options[chosen_option_index]
user_data = await state.get_data()
chosen_options: List[str]
if "chosen_options" in user_data.keys():
chosen_options = user_data["chosen_options"]
if chosen_option not in chosen_options:
chosen_options.append(chosen_option)
await callback_query.answer(text="Опциональный вариант успешно добавлен.", show_alert=True)
else:
await callback_query.answer(text="Данная опция уже была добавлена ранее.", show_alert=True)
else:
chosen_options = [chosen_option]
await callback_query.answer(text="Опциональный вариант успешно добавлен.", show_alert=True)
await state.update_data(chosen_options=chosen_options)
async def show_claim_tmp_example(message: types.Message, claim_part):
repository: Repository = Repository()
claim_theme: Optional[str] = repository.get_current_claim_theme(message.from_user.id)
examples: Optional[List[str]] = repository.get_claim_tmp_examples(claim_theme, claim_part)
next_actions_kb: ReplyKeyboardMarkup = get_next_actions_kb()
if examples is None or len(examples) == 0:
await message.reply("Для данной части примеров не найдено.")
await message.answer("Введите свой вариант самостоятельно. "
"Или выберите дальнейшее действие с помощью клавиатуры",
reply_markup=next_actions_kb)
return
claim_data = repository.get_claim_data(message.from_user.id, claim_theme)
placeholders = get_placeholders(claim_data["claim_data"])
for i, example in enumerate(examples):
await message.reply(f"Пример №{i+1}:\n{example.format(**placeholders)}")
await message.answer("Введите свой вариант самостоятельно. "
"Или выберите дальнейшее действие с помощью клавиатуры",
reply_markup=next_actions_kb)
async def process_complete_part_editing(message: types.Message, state: FSMContext, claim_part: str):
display_name: str = TERM_DISPLAY_NAME_MAP[claim_part]
await message.answer(f"Данные раздела '{display_name}' успешно заполнены.", reply_markup=ReplyKeyboardRemove())
user_id = message.from_user.id
user_data = await state.get_data()
repository: Repository = Repository()
claim_data: Optional[dict] = repository.get_claim_data(user_id)
new_claim_data: dict = {
f"claim_data.{claim_part}": user_data
}
repository.update_record("claim-data", claim_data["_id"], new_claim_data)
await state.finish()
claim_parts_kb: ReplyKeyboardMarkup = get_claim_parts_kb(message.from_user.id)
await message.answer("Выберите часть искового заявления для заполнения", reply_markup=claim_parts_kb)
def get_placeholders(claim_data: dict) -> dict:
placeholders: dict = {
"start_work_date": claim_data["story"]["start_work_date"].strftime("%d.%m.%Y"),
"salary": claim_data["story"]["user_salary"],
}
current_date: datetime = datetime.now()
end_work_date = claim_data["story"].get("end_work_date")
avr_salary = claim_data["story"].get("avr_salary")
if end_work_date is not None and avr_salary is not None:
placeholders["current_date"] = current_date.strftime("%d.%m.%Y")
placeholders["avr_salary"] = avr_salary
start_oof_date: datetime = end_work_date + timedelta(days=1)
placeholders["start_oof_date"] = start_oof_date.strftime("%d.%m.%Y")
oof_profit_calc: OOFCalculation = calc_oof_profit(start_oof_date, current_date, avr_salary)
placeholders["oof_days"] = oof_profit_calc.oof_days
placeholders["oof_profit"] = oof_profit_calc.oof_profit
payoff_date = claim_data["story"].get("payoff_date")
if payoff_date is not None:
payoff_profit_calc: PayOffCalculation = calc_payoff_profit(payoff_date,
claim_data["story"]["pay_day_1"],
claim_data["story"]["payment_1"],
claim_data["story"]["pay_day_2"],
claim_data["story"].get("payment_2"),
current_date)
placeholders["payoff_profit"] = payoff_profit_calc.payoff_profit
placeholders["defendant"] = claim_data["head"]["chosen_employer_name"]
placeholders["compensation"] = payoff_profit_calc.compensation
return placeholders
| StarcoderdataPython |
3298576 | <reponame>AlexRogalskiy/DevArtifacts<filename>master/kismet-2018-08-BETA1/kismet-2018-08-BETA1/capture_freaklabs_zigbee/KismetCaptureFreaklabsZigbee/__init__.py<gh_stars>1-10
#!/usr/bin/env python2
"""
freaklabs zigbee sniffer source
accepts standard source hop options
accepts additional options:
device=/path/to/serial
baud=baudrate
band=800|900|2400
Based in part on the Sensniff code from:
https://github.com/freaklabs/sensniff-freaklabs.git
Under the following license:
Copyright (c) 2012, <NAME> (<EMAIL>)
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
* Neither the name of the owner nor the names of its contributors may be
used to endorse or promote products derived from this software without
specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
POSSIBILITY OF SUCH DAMAGE.
"""
import argparse
from datetime import datetime
import json
import os
import serial
import struct
import subprocess
import sys
import threading
import time
import uuid
import KismetExternal
LINKTYPE_IEEE802_15_4_NOFCS = 230
LINKTYPE_IEEE802_15_4 = 195
NETWORK = LINKTYPE_IEEE802_15_4_NOFCS
CMD_FRAME = 0x00
CMD_CHANNEL = 0x01
CMD_GET_CHANNEL = 0x81
CMD_SET_CHANNEL = 0x82
SNIFFER_PROTO_VERSION = 1
class FreaklabException(Exception):
pass
class SerialInputHandler(object):
def __init__(self, port, baudrate):
self.__sensniff_magic_legacy = struct.pack('BBBB', 0x53, 0x6E, 0x69, 0x66)
self.__sensniff_magic = struct.pack('BBBB', 0xC1, 0x1F, 0xFE, 0x72)
try:
self.port = serial.Serial(port = port,
baudrate = baudrate,
bytesize = serial.EIGHTBITS,
parity = serial.PARITY_NONE,
stopbits = serial.STOPBITS_ONE,
xonxoff = False,
rtscts = False,
timeout = 0.1)
self.port.flushInput()
self.port.flushOutput()
except (serial.SerialException, ValueError, IOError, OSError) as e:
raise FreaklabException("Could not open freaklabs device: {}".format(e))
def read_frame(self):
try:
# Read the magic + 1 more byte
b = self.port.read(5)
size = len(b)
except (IOError, OSError) as e:
raise FreaklabException("Error reading port: {}".format(e))
if size == 0:
return b
if size < 5:
self.port.flushInput()
return ''
if b[0:4] not in (self.__sensniff_magic, self.__sensniff_magic_legacy):
# Peripheral UART output - print it
per_out = self.port.readline().rstrip()
return ''
# If we reach here:
# Next byte == 1: Proto version 1, header follows
# Next Byte != 1 && < 128. Old proto version. Frame follows, length == the byte
b = bytearray(b)
if b[4] != SNIFFER_PROTO_VERSION:
# Legacy contiki sniffer support. Will slowly fade away
size = b[4]
try:
b = self.port.read(size)
except (IOError, OSError) as e:
raise FreaklabException("Error reading port: {}".format(e))
return
if len(b) != size:
# We got the magic right but subsequent bytes did not match
# what we expected to receive
self.port.flushInput()
return ''
return b
# If we reach here, we have a packet of proto ver SNIFFER_PROTO_VERSION
# Read CMD and LEN
try:
b = self.port.read(2)
except (IOError, OSError) as e:
raise FreaklabException("Error reading port: {}".format(e))
return
if size < 2:
self.port.flushInput()
return ''
b = bytearray(b)
cmd = b[0]
length = b[1]
# Read the frame or command response
b = self.port.read(length)
if len(b) != length:
# We got the magic right but subsequent bytes did not match
# what we expected to receive
self.port.flushInput()
return ''
# If we reach here, b holds a frame or a command response of length len
if cmd == CMD_FRAME:
return b
# If we reach here, we have a command response
b = bytearray(b)
# if cmd == CMD_CHANNEL:
# # We'll only ever see this if the user asked for it, so we are
# # running interactive. Print away
# print 'Sniffing in channel: %d' % (b[0],)
# else:
# logger.warn("Received a command response with unknown code")
return ''
def __write_command(self, cmd):
self.port.write(self.__sensniff_magic)
self.port.write(bytearray([SNIFFER_PROTO_VERSION]))
self.port.write(cmd)
self.port.flush()
def set_channel(self, channel):
self.__write_command(bytearray([CMD_SET_CHANNEL, 1, channel]))
def get_channel(self):
self.__write_command(bytearray([CMD_GET_CHANNEL]))
class KismetFreaklabsZigbee(object):
def __init__(self):
self.frequencies = {
0: 868,
1: 906,
2: 908,
3: 910,
4: 912,
5: 914,
6: 916,
7: 918,
8: 920,
9: 922,
10: 924,
11: 2405,
12: 2410,
13: 2415,
14: 2420,
15: 2425,
16: 2430,
17: 2435,
18: 2440,
19: 2445,
20: 2450,
21: 2455,
22: 2460,
23: 2465,
24: 2470,
25: 2475,
26: 2480
}
self.band_map = {}
self.band_map["800"] = ["0"]
self.band_map["900"] = []
self.band_map["2400"] = []
for c in range(1, 12):
self.band_map["900"].append("{}".format(c))
for c in range(11, 27):
self.band_map["2400"].append("{}".format(c))
self.defaults = {}
self.defaults['device'] = "/dev/ttyUSB0"
self.defaults['baudrate'] = "57600"
self.defaults['band'] = "900"
self.defaults['name'] = None
self.hop_thread = None
self.monitor_thread = None
self.chan_config_lock = threading.RLock()
self.chan_config = {}
self.chan_config['chan_pos'] = 0
self.chan_config['hopping'] = True
self.chan_config['channel'] = "0"
self.chan_config['hop_channels'] = []
self.chan_config['hop_rate'] = 5
self.chan_config['chan_skip'] = 0
self.chan_config['chan_offset'] = 0
self.serialhandler = None
parser = argparse.ArgumentParser(description='Kismet datasource to capture from Freaklabs Zigbee hardware',
epilog='Requires Freaklabs hardware (or compatible SenSniff-based device)')
parser.add_argument('--in-fd', action="store", type=int, dest="infd")
parser.add_argument('--out-fd', action="store", type=int, dest="outfd")
parser.add_argument('--connect', action="store", dest="connect")
parser.add_argument("--source", action="store", dest="source")
self.config = parser.parse_args()
if not self.config.connect == None and self.config.source == None:
print("You must specify a source with --source when connecting to a remote Kismet server")
sys.exit(0)
self.proberet = None
if not self.config.source == None:
(source, options) = KismetExternal.Datasource.parse_definition(self.config.source)
if source == None:
print("Could not parse the --source option; this should be a standard Kismet source definition.")
sys.exit(0)
self.proberet = self.datasource_probesource(source, options)
if self.proberet == None:
print("Could not configure local source {}, check your source options and config.")
sys.exit(0)
if not "success" in self.proberet:
print("Could not configure local source {}, check your source options and config.")
if "message" in self.proberet:
print(self.proberet["message"])
sys.exit(0)
if not self.proberet["success"]:
print("Could not configure local source {}, check your source options and config.")
if "message" in self.proberet:
print(self.proberet["message"])
sys.exit(0)
print("Connecting to remote server {}".format(self.config.connect))
self.kismet = KismetExternal.Datasource(self.config.infd, self.config.outfd, remote = self.config.connect)
self.kismet.set_configsource_cb(self.datasource_configure)
self.kismet.set_listinterfaces_cb(self.datasource_listinterfaces)
self.kismet.set_opensource_cb(self.datasource_opensource)
self.kismet.set_probesource_cb(self.datasource_probesource)
# If we're connecting remote, kick a newsource
if self.proberet:
print("Registering remote source {} {}".format('freaklabszigbee', self.config.source))
self.kismet.send_datasource_newsource(self.config.source, 'freaklabszigbee', self.proberet['uuid'])
self.kismet.start()
def is_running(self):
return self.kismet.is_running()
def __start_hopping(self):
def hop_func():
while self.chan_config['hopping']:
wait_usec = 1.0 / self.chan_config['hop_rate']
try:
self.chan_config_lock.acquire()
c = self.chan_config['chan_pos'] % len(self.chan_config['hop_channels'])
self.serialhandler.set_channel(c)
except FreaklabException as e:
self.kismet.send_error_report(message = "Could not tune to {}: {}".format(self.chan_config['chan_pos'], e))
break
finally:
self.chan_config_lock.release()
self.chan_config['chan_pos'] = self.chan_config['chan_pos'] + 1
self.hop_thread = None
if self.hop_thread:
return
self.hop_thread = threading.Thread(target = hop_func)
self.hop_thread.daemon = True
self.hop_thread.start()
def __start_monitor(self):
def mon_func():
while self.kismet.is_running():
try:
raw = self.serialhandler.read_frame()
except FreaklabException as e:
self.kismet.send_error_report(message = "Error reading from zigbee device: {}".format(e))
break
if len(raw) == 0:
continue
packet = KismetExternal.datasource_pb2.SubPacket()
dt = datetime.now()
packet.time_sec = int(time.mktime(dt.timetuple()))
packet.time_usec = int(dt.microsecond)
packet.dlt = LINKTYPE_IEEE802_15_4_NOFCS
packet.size = len(raw)
packet.data = raw
self.kismet.send_datasource_data_report(full_packet = packet)
self.monitor_thread = None
if self.monitor_thread:
return
self.monitor_thread = threading.Thread(target = mon_func)
self.monitor_thread.daemon = True
self.monitor_thread.start()
# We can't really list interfaces other than to guess about serial ports which
# seems like a bad idea; maybe we do that, eventually
def datasource_listinterfaces(self, seqno):
interfaces = []
self.kismet.send_datasource_interfaces_report(seqno, interfaces)
def __get_uuid(self, opts):
uhash = KismetExternal.Datasource.adler32("{}{}{}{}".format(opts['device'], opts['baudrate'], opts['band'], opts['name']))
uhex = "0000{:02X}".format(uhash)
return KismetExternal.Datasource.make_uuid("kismet_cap_freaklabs_zigbee", uhex)
# Implement the probesource callback for the datasource api
def datasource_probesource(self, source, options):
ret = {}
if not source == "freaklabs":
return None
opts = options
for x in self.defaults:
opts.setdefault(x, self.defaults[x])
ret['uuid'] = self.__get_uuid(opts)
try:
SerialInputHandler(opts['device'], int(opts['baudrate']))
except FreaklabException as e:
ret['success'] = False
ret['message'] = "{}".format(e)
return ret
if not opts['band'] in self.band_map:
ret['success'] = False
ret['message'] = "Unknown band {}".format(opts['band'])
return ret
band = self.band_map[opts['band']]
ret['channel'] = band[0]
ret['channels'] = band
ret['capture_interface'] = opts['device']
ret['hardware'] = "freaklabs-{}".format(opts['band'])
ret['success'] = True
return ret
def datasource_opensource(self, source, options):
ret = {}
if not source == "freaklabs":
return None
opts = options
for x in self.defaults:
opts.setdefault(x, self.defaults[x])
ret['uuid'] = self.__get_uuid(opts)
try:
self.serialhandler = SerialInputHandler(opts['device'], int(opts['baudrate']))
self.serialhandler.get_channel()
except FreaklabException as e:
ret['success'] = False
ret['message'] = "{}".format(e)
return ret
if not opts['band'] in self.band_map:
ret['success'] = False
ret['message'] = "Unknown band {}".format(opts['band'])
return ret
band = self.band_map[opts['band']]
ret['phy'] = LINKTYPE_IEEE802_15_4_NOFCS
ret['channel'] = band[0]
ret['channels'] = band
ret['capture_interface'] = opts['device']
ret['hardware'] = "freaklabs-{}".format(opts['band'])
ret['success'] = True
self.__start_monitor()
return ret
def datasource_configure(self, seqno, config):
ret = {}
if config.HasField('channel'):
self.chan_config_lock.acquire()
self.chan_config['hopping'] = False
self.chan_config['channel'] = config.channel.channel
ret['channel'] = config.channel.channel
self.chan_config_lock.release()
elif config.HasField('hopping'):
self.chan_config_lock.acquire()
if config.hopping.HasField('rate'):
self.chan_config['hop_rate'] = config.hopping.rate
if len(config.hopping.channels):
self.chan_config['hop_channels'] = []
for c in config.hopping.channels:
self.chan_config['hop_channels'].append(c)
self.chan_config['hopping'] = True
self.chan_config_lock.release()
# Echo its config back at it
ret['full_hopping'] = config.hopping
ret['success'] = True
if self.chan_config['hopping'] and not self.hop_thread:
self.__start_hopping()
return ret
| StarcoderdataPython |
105484 | import logging
import click
import obelisk
from pathlib import Path
logger = logging.getLogger(__name__)
@click.command()
@click.option("-c", "--convert", is_flag=True,
help="convert file (currently the only mode)")
@click.option("-i", "--input", prompt="Input filepath: ",
help="Input filepath. Will prompt the user if empty")
@click.option("-d", "--destination", prompt="Destination filepath: ",
help="Output directory. WIll prompt the user if empty. Will create if it doesn't exist.")
def main(convert, input, destination):
if convert:
splitter = obelisk.FileSplitter(input, destination)
splitter.process_input()
logger.info("Conversion completed!")
else:
logger.error("Directives besides 'convert' are not implemented!")
if __name__ == "__main__":
main()
| StarcoderdataPython |
8432 | <gh_stars>0
"""
Copyright 2017-2018 yhenon (https://github.com/yhenon/)
Copyright 2017-2018 Fizyr (https://fizyr.com)
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
# from generators.common import Generator
import cv2
import numpy as np
from PIL import Image
from six import raise_from
import csv
import sys
import os.path as osp
from collections import OrderedDict
import os
def _parse(value, function, fmt):
"""
Parse a string into a value, and format a nice ValueError if it fails.
Returns `function(value)`.
Any `ValueError` raised is catched and a new `ValueError` is raised
with message `fmt.format(e)`, where `e` is the caught `ValueError`.
"""
try:
return function(value)
except ValueError as e:
raise_from(ValueError(fmt.format(e)), None)
def _read_classes(csv_reader):
"""
Parse the classes file given by csv_reader.
"""
result = OrderedDict()
for line, row in enumerate(csv_reader):
line += 1
try:
class_name, class_id = row
except ValueError:
raise_from(ValueError('line {}: format should be \'class_name,class_id\''.format(line)), None)
class_id = _parse(class_id, int, 'line {}: malformed class ID: {{}}'.format(line))
if class_name in result:
raise ValueError('line {}: duplicate class name: \'{}\''.format(line, class_name))
result[class_name] = class_id
return result
def _read_quadrangle_annotations(csv_reader, classes, detect_text=False):
"""
Read annotations from the csv_reader.
Args:
csv_reader: csv reader of args.annotations_path
classes: list[str] all the class names read from args.classes_path
Returns:
result: dict, dict is like {image_path: [{'x1': x1, 'y1': y1, 'x2': x2, 'y2': y2,
'x3': x3, 'y3': y3, 'x4': x4, 'y4': y4, 'class': class_name}]}
"""
result = OrderedDict()
for line, row in enumerate(csv_reader, 1):
try:
img_file, x1, y1, x2, y2, x3, y3, x4, y4, class_name = row[:10]
if img_file not in result:
result[img_file] = []
# If a row contains only an image path, it's an image without annotations.
if (x1, y1, x2, y2, x3, y3, x4, y4, class_name) == ('', '', '', '', '', '', '', '', ''):
continue
x1 = _parse(x1, int, 'line {}: malformed x1: {{}}'.format(line))
y1 = _parse(y1, int, 'line {}: malformed y1: {{}}'.format(line))
x2 = _parse(x2, int, 'line {}: malformed x2: {{}}'.format(line))
y2 = _parse(y2, int, 'line {}: malformed y2: {{}}'.format(line))
x3 = _parse(x3, int, 'line {}: malformed x3: {{}}'.format(line))
y3 = _parse(y3, int, 'line {}: malformed y3: {{}}'.format(line))
x4 = _parse(x4, int, 'line {}: malformed x4: {{}}'.format(line))
y4 = _parse(y4, int, 'line {}: malformed y4: {{}}'.format(line))
# check if the current class name is correctly present
if detect_text:
if class_name == '###':
continue
else:
class_name = 'text'
if class_name not in classes:
raise ValueError(f'line {line}: unknown class name: \'{class_name}\' (classes: {classes})')
result[img_file].append({'x1': x1, 'y1': y1, 'x2': x2, 'y2': y2,
'x3': x3, 'y3': y3, 'x4': x4, 'y4': y4, 'class': class_name})
except ValueError:
raise_from(ValueError(
f'line {line}: format should be \'img_file,x1,y1,x2,y2,x3,y3,x4,y4,class_name\' or \'img_file,,,,,\''),
None)
return result
def _read_annotations(csv_reader, classes, base_dir):
"""
Read annotations from the csv_reader.
Args:
csv_reader: csv reader of args.annotations_path
classes: list[str] all the class names read from args.classes_path
Returns:
result: dict, dict is like {image_path: [{'x1': x1, 'y1': y1, 'x2': x2, 'y2': y2, 'class': class_name}]}
"""
result = OrderedDict()
for line, row in enumerate(csv_reader, 1):
try:
img_file, x1, y1, x2, y2 = row[:5]
class_name = img_file.split("/")[0]
if img_file not in result:
result[img_file] = []
# If a row contains only an image path, it's an image without annotations.
if (x1, y1, x2, y2, class_name) == ('', '', '', '', ''):
continue
x1 = _parse(x1, int, 'line {}: malformed x1: {{}}'.format(line))
y1 = _parse(y1, int, 'line {}: malformed y1: {{}}'.format(line))
x2 = _parse(x2, int, 'line {}: malformed x2: {{}}'.format(line))
y2 = _parse(y2, int, 'line {}: malformed y2: {{}}'.format(line))
if class_name not in classes:
raise ValueError(f'line {line}: unknown class name: \'{class_name}\' (classes: {classes})')
result[img_file].append({'x1': x1, 'y1': y1, 'x2': x2, 'y2': y2, 'class': class_name,
'filename':img_file})
except ValueError:
raise_from(ValueError(
f'line {line}: format should be \'img_file,x1,y1,x2,y2,class_name\' or \'img_file,,,,,\''),
None)
return result
def _open_for_csv(path):
"""
Open a file with flags suitable for csv.reader.
This is different for python2 it means with mode 'rb', for python3 this means 'r' with "universal newlines".
"""
if sys.version_info[0] < 3:
return open(path, 'rb')
else:
return open(path, 'r', newline='')
def load_image(path):
"""
Load an image at the image_index.
"""
image = cv2.imread(path)
image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
return image | StarcoderdataPython |
3397804 | # Symbol
symbol_out = open('random6.txt','w')
symbol = '!@#$%^&*()-+{}|:"<>?[]\;\',./'
line_length = 20
text_length = 20000
output = ''
index = 0
for i in range(text_length) :
for j in range(line_length) :
index %= len(symbol)
output += symbol[index]
index += 1
output += '\n'
symbol_out.write(output)
| StarcoderdataPython |
1610010 | <filename>murano/dsl/constants.py<gh_stars>1-10
# Copyright (c) 2014 Mirantis, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import semantic_version
EXPRESSION_MEMORY_QUOTA = 512 * 1024
ITERATORS_LIMIT = 2000
CTX_ACTIONS_ONLY = '?actionsOnly'
CTX_ALLOW_PROPERTY_WRITES = '$?allowPropertyWrites'
CTX_ARGUMENT_OWNER = '$?argumentOwner'
CTX_ATTRIBUTE_STORE = '$?attributeStore'
CTX_CALLER_CONTEXT = '$?callerContext'
CTX_CURRENT_INSTRUCTION = '$?currentInstruction'
CTX_CURRENT_EXCEPTION = '$?currentException'
CTX_CURRENT_METHOD = '$?currentMethod'
CTX_EXECUTOR = '$?executor'
CTX_EXECUTION_SESSION = '$?executionSession'
CTX_NAMES_SCOPE = '$?namesScope'
CTX_ORIGINAL_CONTEXT = '$?originalContext'
CTX_PACKAGE_LOADER = '$?packageLoader'
CTX_SKIP_FRAME = '$?skipFrame'
CTX_THIS = '$?this'
CTX_TYPE = '$?type'
CTX_VARIABLE_SCOPE = '$?variableScope'
CTX_YAQL_ENGINE = '$?yaqlEngine'
DM_OBJECTS = 'Objects'
DM_OBJECTS_COPY = 'ObjectsCopy'
DM_ATTRIBUTES = 'Attributes'
META_MURANO_METHOD = '?muranoMethod'
META_NO_TRACE = '?noTrace'
META_MPL_META = 'Meta'
META_USAGE = 'Usage'
CORE_LIBRARY = 'io.murano'
CORE_LIBRARY_OBJECT = 'io.murano.Object'
TL_CONTEXT = '__murano_context'
TL_ID = '__thread_id'
TL_SESSION = '__murano_execution_session'
RUNTIME_VERSION_1_0 = semantic_version.Version('1.0.0')
RUNTIME_VERSION_1_1 = semantic_version.Version('1.1.0')
RUNTIME_VERSION_1_2 = semantic_version.Version('1.2.0')
RUNTIME_VERSION_1_3 = semantic_version.Version('1.3.0')
| StarcoderdataPython |
1603058 | from django.db.models import Value, F, TextField
from django.db.models.functions import Concat
from sphinxql import indexes, fields
from .models import Document
class DocumentIndex(indexes.Index):
name = fields.Text(Concat(F('type__name'), Value(' '), F('number'),
output_field=TextField()))
summary = fields.Text('summary')
text = fields.Text('text')
class Meta:
model = Document
query = Document.objects.exclude(dr_series='II')
range_step = 10000
| StarcoderdataPython |
1635849 | import random
randNum = random.randint(1, 9)
while True:
guess = int(input("Guess the number (1~9) "))
if guess < randNum:
print("Your guessed is too low.")
elif guess > randNum:
print("Your guessed is too high.")
else:
print("You've guessed it right!")
break
# print(str(randNum))
| StarcoderdataPython |
1606524 | <filename>main.py<gh_stars>0
print("git is my vcs of choice")
# its wow!! | StarcoderdataPython |
165235 | <reponame>OrangutanGaming/Nexus-Stats-py<filename>nexus_stats/API.py
import requests
from nexus_stats.profile import Profile
from nexus_stats.exceptions import *
class Requester():
"""Represents a connection to the API.
This class is used to get data from the API and return useful information.
A few options can be passed to :class:`Requester` to make it easier to use the functions.
Parameters
----------
user_name : Optional[str]
The default username used for any request username related.
If not given, it will be needed when a username related function is called.
Attributes
-----------
url : [str]
The base url used for the API.
user_name : Optional[str]
The username given when the instance is created.
If none is given, defaults to None.
"""
def __init__(self, **options):
self.url = "https://api.nexus-stats.com"
self.user_name = options.get("user_name", None)
def get_user_profile(self, userName = None):
if not userName and not self.user_name:
raise MissingInput("get_user_profile", "userName")
if not userName:
userName = self.user_name
response = Profile(self.url, userName)
return response
| StarcoderdataPython |
23953 | <reponame>maralla/validator.vim
# -*- coding: utf-8 -*-
from validator import Validator
class VimVint(Validator):
__filetype__ = 'vim'
checker = 'vint'
args = '-w --no-color'
regex = r"""
.+?:
(?P<lnum>\d+):
(?P<col>\d+):
\s(?P<text>.+)"""
| StarcoderdataPython |
3256954 |
from PyQt5.QtGui import *
from PyQt5.QtWidgets import *
from PyQt5.QtCore import *
from fe import *
from upload import *
from login import *
from pathlib import Path
import sys, os, sqlite3, datetime
from shutil import rmtree
from apscheduler.schedulers.qt import QtScheduler
import datetime
class MainApp(QMainWindow):
def __init__(self):
super().__init__()
self.ui = Ui_MainWindow()
self.ui.setupUi(self)
self.ui.tabWidget.tabBar().setVisible(False)
self.handle_button()
self.connect()
self.Show_all()
self.scheduler = QtScheduler()
self.setJob = 0
def handle_button(self):
self.ui.setfolder.clicked.connect(self.setDirection)
self.ui.setcaption.clicked.connect(self.setDirection)
self.ui.setfolder_3.clicked.connect(self.setDirection)
self.ui.setcaption_3.clicked.connect(self.setDirection)
self.ui.addsave.clicked.connect(self.Add_New)
self.ui.refresh.clicked.connect(self.Show_all)
self.ui.add.clicked.connect(self.tabAdd)
self.ui.backadd.clicked.connect(self.back)
self.ui.backedit.clicked.connect(self.back)
self.ui.backedit_2.clicked.connect(self.back)
self.ui.editsave.clicked.connect(self.Edit_user)
self.ui.reup.clicked.connect(self.reup)
self.ui.login.clicked.connect(self.login)
self.ui.pushButton.clicked.connect(self.lenLich)
self.ui.hengio.clicked.connect(self.henGio)
self.ui.tabdown.clicked.connect(self.chonTab)
self.ui.tabreup.clicked.connect(self.chonTab)
def chonTab(self):
but = self.sender()
if but.objectName() == 'tabdown':
self.ui.stackedWidget.setCurrentIndex(1)
if but.objectName() == 'tabreup':
self.ui.stackedWidget.setCurrentIndex(0)
def reup(self):
chedo = self.ui.comboBox.currentIndex()
if chedo == 0:
self.upload()
else:
self.runall()
def runall(self):
count = int(self.ui.tableWidget.rowCount())
self.chayall = []
for x in range(0, count-1):
try:
profile = self.ui.tableWidget.item(x, 2).text()
folder = self.ui.tableWidget.item(x, 3).text()
caption = self.ui.tableWidget.item(x, 4).text()
soluong = int(self.ui.comboBox_3.currentText())
if profile != '' and folder != '' and caption != '':
chayall = Upload(profile, folder, caption, soluong)
self.chayall.append(chayall)
chayall.stt.connect(self.stt)
chayall.start()
except Exception as e:
self.stt(str(e))
def upload(self):
try:
r = self.ui.tableWidget.currentRow()
profile = self.ui.tableWidget.item(r, 2).text()
folder = self.ui.tableWidget.item(r, 3).text()
caption = self.ui.tableWidget.item(r, 4).text()
soluong = int(self.ui.comboBox_3.currentText())
if profile != '' and folder != '' and caption != '':
self.luong = Upload(profile, folder, caption, soluong)
self.luong.stt.connect(self.stt)
self.luong.start()
except Exception as e:
self.stt('Dữ liệu chọn không đúng')
def henGio(self):
self.ui.tabWidget.setCurrentIndex(3)
self.ui.dateTimeEdit.setDateTime(QDateTime.currentDateTime())
_translate = QCoreApplication.translate
conn = sqlite3.connect('books.db')
cur = conn.cursor()
cur.execute('SELECT * from user')
data = cur.fetchall()
for row, form in enumerate(data):
self.ui.selectAcc.addItem(_translate("MainWindow", form[1]))
conn.close()
def chay(self, profile, folder, caption, soluong):
self.uphen = Upload(profile, folder, caption, soluong)
self.uphen.stt.connect(self.stt)
self.uphen.start()
self.ui.tableWidget_2.removeRow(0)
def lenLich(self):
taikhoan = self.ui.selectAcc.currentText()
conn = sqlite3.connect('books.db')
cur = conn.cursor()
cur.execute('SELECT * FROM user WHERE tai_khoan = ?',(taikhoan,))
data = cur.fetchone()
timeup = self.ui.dateTimeEdit.dateTime()
if datetime.datetime.now() < timeup.toPyDateTime():
tgian = timeup.toPyDateTime().strftime('%H:%M %d/%m')
soluong = int(self.ui.comboBox_6.currentText())
rowCount = self.ui.tableWidget_2.rowCount()
self.ui.tableWidget_2.insertRow(rowCount)
self.ui.tableWidget_2.setItem(rowCount, 0, QTableWidgetItem(str(data[0])))
self.ui.tableWidget_2.setItem(rowCount, 1, QTableWidgetItem(str(data[1])))
self.ui.tableWidget_2.setItem(rowCount, 2, QTableWidgetItem(str(data[2])))
self.ui.tableWidget_2.setItem(rowCount, 3, QTableWidgetItem(str(data[3])))
self.ui.tableWidget_2.setItem(rowCount, 4, QTableWidgetItem(str(data[4])))
self.ui.tableWidget_2.setItem(rowCount, 5, QTableWidgetItem(str(tgian)))
self.ui.tableWidget_2.setItem(rowCount, 6, QTableWidgetItem(str(soluong)))
self.scheduler.add_job(self.chay, 'date', next_run_time=timeup.toPyDateTime(), args=[data[2], data[3], data[4], soluong])
# scheduler.add_job(tick, 'interval')
if self.setJob == 0:
self.scheduler.start()
self.setJob = 1
else:
print('Bạn phải chọn thời gian lớn hơn hiện tại')
def login(self):
try:
r = self.ui.tableWidget.currentRow()
profile = self.ui.tableWidget.item(r, 2).text()
sltime = int(self.ui.comboBox_2.currentText())
self.dangnhap = Login(profile, sltime)
self.dangnhap.start()
except:
self.stt('Bạn chưa chọn tài khoản')
def stt(self, text):
self.statusBar().showMessage(text)
def back(self):
self.ui.tabWidget.setCurrentIndex(0)
def tabAdd(self):
self.ui.tabWidget.setCurrentIndex(1)
def connect(self):
conn = sqlite3.connect('books.db')
cur = conn.cursor()
cur.execute(
'CREATE TABLE IF NOT EXISTS user (id INTEGER PRIMARY KEY, tai_khoan text UNIQUE, profile text UNIQUE, folder text, caption text)')
# cur.execute(
# 'CREATE TABLE IF NOT EXISTS update_video (id INTEGER PRIMARY KEY, tai_khoan_vn text, tai_khoan_cn text, id_post_cn text)')
# cur.execute(
# 'CREATE TABLE IF NOT EXISTS user_post (id INTEGER PRIMARY KEY, id_post text, view_count integer, like_count integer, tai_khoan text)')
# cur.execute('CREATE TABLE IF NOT EXISTS setting_general (id INTEGER PRIMARY KEY, time_post text)')
# cur.execute('CREATE TABLE IF NOT EXISTS setting (id INTEGER PRIMARY KEY, time_post text, id_user text)')
# cur.execute('CREATE TABLE IF NOT EXISTS che_do (id INTEGER PRIMARY KEY, status text)')
conn.commit()
conn.close()
def Add_New(self):
conn = sqlite3.connect('books.db')
cur = conn.cursor()
taikhoan = self.ui.username.text()
profile = self.ui.profile.text()
folder = self.ui.folder.text()
caption = self.ui.caption.text()
if taikhoan == '':
self.statusBar().showMessage('Tài khoản không được để trống')
return 0
if profile == '':
self.statusBar().showMessage('Profile không được để trống')
return 0
if folder == '':
self.statusBar().showMessage('Thư mục reup không được để trống')
return 0
if caption == '':
self.statusBar().showMessage('Caption không được để trống')
return 0
try:
cur.execute('INSERT INTO user(tai_khoan,profile,folder,caption) VALUES (?,?,?,?)',
(taikhoan, profile, folder, caption))
except Exception as e:
if 'tai_khoan' in str(e):
self.statusBar().showMessage('Tài khoản này đã tồn tại')
if 'profile' in str(e):
self.statusBar().showMessage('Tên profile đã tồn tại')
conn.commit()
conn.close()
return 0
conn.commit()
conn.close()
self.Show_all()
self.statusBar().showMessage('New user Added')
self.ui.tabWidget.setCurrentIndex(0)
def Show_all(self):
self.ui.tableWidget.clearContents()
self.ui.tableWidget.setRowCount(0)
conn = sqlite3.connect('books.db')
cur = conn.cursor()
cur.execute('SELECT * from user')
data = cur.fetchall()
self.ui.tableWidget.insertRow(0)
for row, form in enumerate(data):
for column, item in enumerate(form):
self.ui.tableWidget.setItem(row, column, QTableWidgetItem(str(item)))
column += 1
self.btn_run = QPushButton('Edit')
self.btn_run.clicked.connect(self.handleButtonClicked)
self.ui.tableWidget.setCellWidget(row, 5, self.btn_run)
self.btn_del = QPushButton('Del')
self.btn_del.clicked.connect(self.handleButtonClicked)
self.ui.tableWidget.setCellWidget(row, 6, self.btn_del)
row_positon = self.ui.tableWidget.rowCount()
self.ui.tableWidget.insertRow(row_positon)
conn.close()
def Edit_user(self):
r = self.ui.tableWidget.currentRow()
uid = self.ui.tableWidget.item(r, 0).text()
taikhoan = self.ui.username_3.text()
profile = self.ui.profile_3.text()
folder = self.ui.folder_3.text()
caption = self.ui.caption_3.text()
conn = sqlite3.connect('books.db')
cur = conn.cursor()
cur.execute(
'UPDATE user SET tai_khoan=?, profile=?, folder=?, caption=? WHERE id=?',
(taikhoan, profile, folder, caption, uid))
# self.cur.execute('UPDATE update_video SET tai_khoan_vn=?, tai_khoan_cn=? WHERE tai_khoan_vn=?',
# (tai_khoan, tai_khoan_ru, tai_khoan))
# if self.cur.rowcount == 0:
# self.cur.execute('INSERT INTO update_video(tai_khoan_vn,tai_khoan_cn) VALUES (?,?)',
# (tai_khoan, tai_khoan_ru))
conn.commit()
conn.close()
self.statusBar().showMessage('User Updated')
self.Show_all()
self.ui.tabWidget.setCurrentIndex(0)
# self.Search_Books()
def Delete_user(self, uid):
conn = sqlite3.connect('books.db')
cur = conn.cursor()
try:
cur.execute('DELETE FROM user WHERE id=?', (int(uid),))
# rmtree('data/cookie/' + cookie)
except:
pass
conn.commit()
conn.close()
self.statusBar().showMessage('User Deteted')
def handleButtonClicked(self):
button = QApplication.focusWidget()
but = self.sender()
index = self.ui.tableWidget.indexAt(button.pos())
if index.isValid():
if (index.column() == 5):
self.ui.username_3.setText(self.ui.tableWidget.item(index.row(), 1).text())
self.ui.profile_3.setText(self.ui.tableWidget.item(index.row(), 2).text())
self.ui.folder_3.setText(self.ui.tableWidget.item(index.row(), 3).text())
self.ui.caption_3.setText(self.ui.tableWidget.item(index.row(), 4).text())
self.ui.tabWidget.setCurrentIndex(2)
# folder = self.ui.folder.text()
# limit = int(self.ui.limit.text())
# cookie = self.ui.tableWidget.item(index.row(), 1).text()
# self.thread = QThread()
# self.ui.tableWidget.setItem(index.row(), 2, QTableWidgetItem("Running"))
# obj = Reup(profile=1, limit=limit, folder=folder, cookie=cookie, row=index.row())
# self.obj.moveToThread(self.thread)
# self.thread.started.connect(self.obj.upload)
# obj.fn.connect(self.ketthuc)
# self.thread.append(obj)
# obj.start()
# self.obj.tq.connect(self.thread.quit)
# self.thread.start()
if (index.column() == 6):
buttonReply = QMessageBox.question(self, 'Delete the account', "Do you want to delete this account?",
QMessageBox.Yes | QMessageBox.No, QMessageBox.No)
if buttonReply == QMessageBox.Yes:
name = self.ui.tableWidget.item(index.row(), 0).text()
self.Delete_user(name)
self.ui.tableWidget.clearContents()
self.ui.tableWidget.setRowCount(0)
self.Show_all()
def setDirection(self):
btn = self.sender()
if btn.objectName() == 'setfolder':
path = str(Path().absolute())
dir_ = QFileDialog.getExistingDirectory(self, "Select a folder", path, QFileDialog.ShowDirsOnly)
if dir_:
self.ui.folder.setText(str(dir_))
if btn.objectName() == 'setfolder_3':
path = str(Path().absolute())
dir_ = QFileDialog.getExistingDirectory(self, "Select a folder", path, QFileDialog.ShowDirsOnly)
if dir_:
self.ui.folder_3.setText(str(dir_))
if btn.objectName() == 'setcaption':
path = str(Path().absolute())
options = QFileDialog.Options()
# options |= QFileDialog.DontUseNativeDialog
fileName, _ = QFileDialog.getOpenFileName(self, "Select Caption File", "",
"Text Files (*.txt)")
if fileName:
self.ui.caption.setText(str(fileName))
if btn.objectName() == 'setcaption_3':
path = str(Path().absolute())
options = QFileDialog.Options()
# options |= QFileDialog.DontUseNativeDialog
fileName, _ = QFileDialog.getOpenFileName(self, "Select Caption File", "",
"Text Files (*.txt)")
if fileName:
self.ui.caption_3.setText(str(fileName))
if __name__ == '__main__':
app = QApplication(sys.argv)
window = MainApp()
window.show()
app.exec_()
| StarcoderdataPython |
3327113 | # {'posCelula':posCelula, 'dimX':quantX, 'dimY':quantY}
#regras:
# quaisquer celulas com menos de 2 vizinhos, morre OK
# quaisquer celulas com 2 ou 3 vizinhos vive na proxima geração
# quaisquer celulas com mais de 3 vizinhos, morre OK
# quaisquer casas com 3 vizinhos vivos, torna-se viva na próxima geração
#Essa função verifica se as celulas existentes morrem ou vivem e
#retorna um array de arrays que representam as coordenadas das novas células
def killer(dados):
celulasEmProvacao = list(dados['posCelula'])
cacheCelulasVivas = list(dados['posCelula'])
def isValid(coords):
if coords[0] < 1 or coords[1] < 1:
return False
return True
def counter(casa, _cacheCelulasVivas):
vizinhos = [
[ casa[0] + 1, casa[1] ],
[ casa[0] - 1, casa[1] ],
[ casa[0], casa[1] + 1 ],
[ casa[0], casa[1] - 1 ],
[ casa[0] + 1, casa[1] + 1 ],
[ casa[0] - 1, casa[1] - 1 ],
[ casa[0] + 1, casa[1] - 1 ],
[ casa[0] - 1, casa[1] + 1 ]
]
casasProsperas = []
celulasQueDevemMorrer = []
cont = 0
for i in vizinhos:
if isValid(i) and i not in _cacheCelulasVivas:
casasProsperas.append(i)
elif isValid(i) and i in _cacheCelulasVivas:
cont += 1
retorno = {
'casasProsperas': casasProsperas,
'quantidadeCelulasVivasAoRedor': cont
}
return retorno
celulasSobreviventes = []
casasQuePodemGerarVida = []
for i in celulasEmProvacao:
resultado = counter(i, cacheCelulasVivas)
for casa in resultado['casasProsperas']:
if casa not in casasQuePodemGerarVida:
casasQuePodemGerarVida.append(casa)
if resultado['quantidadeCelulasVivasAoRedor'] == 2 or resultado['quantidadeCelulasVivasAoRedor'] == 3:
celulasSobreviventes.append(i)
retornoFinal = {
'vizinhos': casasQuePodemGerarVida,
'posCelulaFututo': celulasSobreviventes,
'posCelula': cacheCelulasVivas
}
return retornoFinal
#pra cada celula, verificar as casas ao redor
#retirar posiçoes invalidas (coordenadas menores que 1)
#a quantidade de posicoes validas é o num de vizinhos
#decidir se a celula vive ou morre | StarcoderdataPython |
1693261 | <gh_stars>0
#!/usr/bin/python3
"""
Library for Casambi Cloud api.
Request api_key at: https://developer.casambi.com/
"""
import uuid
import json
import logging
import datetime
import socket
from pprint import pformat
from typing import Tuple
from colorsys import rgb_to_hsv
import requests
import websocket
_LOGGER = logging.getLogger(__name__)
class CasambiApiException(Exception):
"""Custom exception"""
class ConfigException(Exception):
"""Custom exception"""
class Casambi:
"""
Casambi api object
"""
def __init__(self, *, api_key, email, user_password, network_password, wire_id=1):
self.sock = None
self.web_sock = None
self.connected = False
self.network_id = None
self._session_id = None
self.wire_id = wire_id
self.api_key = api_key
self.email = email
self.user_password = <PASSWORD>
self.network_password = <PASSWORD>_password
def create_user_session(self):
"""
Function for creating a user session in Casambis cloud api
"""
url = "https://door.casambi.com/v1/users/session/"
headers = {"Content-type": "application/json", "X-Casambi-Key": self.api_key}
payload = {"email": self.email, "password": self.user_password}
response = requests.post(url, json=payload, headers=headers)
if response.status_code != 200:
reason = "create_user_session: headers: {},".format(headers)
reason += " payload: {},".format(payload)
reason += 'message: "Got a invalid status_code",'
reason += "status_code: {},".format(response.status_code)
reason += "#response: {}".format(response.text)
raise CasambiApiException(reason)
data = response.json()
self._session_id = data["sessionId"]
self.network_id = data["networks"][list(data["networks"].keys())[0]]["id"]
_LOGGER.debug(f"data from create_user_session: {pformat(data)}")
return data["sessionId"]
def create_network_session(self):
"""
Function for creating a network session in Casambis cloud api
"""
url = "https://door.casambi.com/v1/networks/session/"
headers = {
"X-Casambi-Key": self.api_key,
"Content-type": "application/json",
}
payload = {"email": self.email, "password": <PASSWORD>}
response = requests.post(url, json=payload, headers=headers)
if response.status_code != 200:
reason = "create_network_session: failed with"
reason += f"status_code: {response.status_code},"
reason += f"response: {response.text}"
raise CasambiApiException(reason)
data = response.json()
self.network_id = list(data.keys())[0]
self._session_id = data[self.network_id]["sessionId"]
return data.keys()
def get_network_information(self):
"""
Function for getting the network information from Casambis cloud api
"""
# GET https://door.casambi.com/v1/networks/{id}
url = f"https://door.casambi.com/v1/networks/{self.network_id}"
if not self._session_id:
raise CasambiApiException("No session id is set. Need to login!")
headers = {
"X-Casambi-Key": self.api_key,
"X-Casambi-Session": self._session_id,
"Content-type": "application/json",
}
response = requests.get(url, headers=headers)
if response.status_code != 200:
reason = "get_network_information: url: {}".format(url)
reason += "failed with status_code: {},".format(response.status_code)
reason += "response: {}".format(response.text)
raise CasambiApiException(reason)
data = response.json()
dbg_msg = f"get_network_information: headers: {headers}"
dbg_msg += "response: {data}"
_LOGGER.debug(dbg_msg)
return data
def get_unit_state(self, *, unit_id):
"""
Getter for getting the unit state from Casambis cloud api
"""
# GET https://door.casambi.com/v1/networks/{id}
url = "https://door.casambi.com/v1/networks/"
url += f"{self.network_id}/units/{unit_id}/state"
if not self._session_id:
raise CasambiApiException("No session id is set. Need to login!")
headers = {
"X-Casambi-Key": self.api_key,
"X-Casambi-Session": self._session_id,
"Content-type": "application/json",
}
response = requests.get(url, headers=headers)
if response.status_code != 200:
reason = "get_unit_state: url: {}".format(url)
reason += "failed with status_code: {},".format(response.status_code)
reason += "response: {}".format(response.text)
raise CasambiApiException(reason)
data = response.json()
dbg_msg = f"get_unit_state: headers: {headers} response: {data}"
_LOGGER.debug(dbg_msg)
return data
def ws_open(self) -> bool:
"""
openWireSucceed API key authentication failed. Either given key
was invalid or WebSocket functionality is not enabled for it.
keyAuthenticateFailed API key authentication failed. Given key was
invalid.
keyAuthorizeFailed API key authorize failed. Given key has not been
authorized or WebSocket functionality is not enabled for it.
invalidSession Either access to given network is not authorized
by session or given session is invalid.
invalidValueType Received values are not in correct value type,
for example when expecting a number but receiving string value instead.
invalidData Received data is invalid and cannot be
processed, for example expected list of items is in wrong data format.
"""
url = "wss://door.casambi.com/v1/bridge/"
reference = "{}".format(uuid.uuid1())
if not self._session_id:
raise CasambiApiException("No session id is set. Need to login!")
if not self.network_id:
raise CasambiApiException("Network id needs to be set!")
message = {
"method": "open",
"id": self.network_id,
"session": self._session_id,
"ref": reference,
"wire": self.wire_id, # wire id
"type": 1, # Client type, use value 1 (FRONTEND)
}
self.web_sock = websocket.create_connection(url, subprotocols=[self.api_key])
self.web_sock.send(json.dumps(message))
result = self.web_sock.recv()
data = json.loads(result)
_LOGGER.debug(f"ws_open response: {data}")
# Can get what ever like:
# {'wire': 1, 'method': 'peerChanged', 'online': True}
#
# if data['wireStatus'] != 'openWireSucceed':
# reason = "ws_open_message: url: {},".format(url)
# reason += "message: {},".format(message)
# reason += 'reason: "failed with to open wire!"'
# reason += "response: {}".format(data)
# raise CasambiApiException(reason)
if "wireStatus" in data and data["wireStatus"] == "openWireSucceed":
return True
if (
(("method" in data) and (data["method"] == "peerChanged"))
and (("wire" in data) and (data["wire"] == self.wire_id))
and (("online" in data) and data["online"])
):
return True
return False
def turn_unit_off(self, *, unit_id: int):
"""
Function for turning a unit of using the websocket
"""
# Unit_id needs to be an integer
if isinstance(unit_id, int):
pass
elif isinstance(unit_id, str):
unit_id = int(unit_id)
elif isinstance(unit_id, float):
unit_id = int(unit_id)
else:
raise CasambiApiException(
"expected unit_id to be an integer, got: {}".format(unit_id)
)
if not self.web_sock:
raise CasambiApiException("No websocket connection!")
target_controls = {"Dimmer": {"value": 0}}
message = {
"wire": self.wire_id,
"method": "controlUnit",
"id": unit_id,
"targetControls": target_controls,
}
self.web_sock.send(json.dumps(message))
def turn_unit_on(self, *, unit_id):
"""
Response on ok:
{'wire': 1, 'method': 'peerChanged', 'online': True}
"""
# Unit_id needs to be an integer
if isinstance(unit_id, int):
pass
elif isinstance(unit_id, str):
unit_id = int(unit_id)
elif isinstance(unit_id, float):
unit_id = int(unit_id)
else:
reason = "expected unit_id to be an integer,"
reason += "got: {}".format(unit_id)
raise CasambiApiException(reason)
if not self.web_sock:
raise CasambiApiException("No websocket connection!")
target_controls = {"Dimmer": {"value": 1}}
message = {
"wire": self.wire_id,
"method": "controlUnit",
"id": unit_id,
"targetControls": target_controls,
}
self.web_sock.send(json.dumps(message))
def set_unit_vertical(self, *, unit_id: int, value: float):
"""
Support for setting vertical (dual led value)
"""
target_value = value
# Unit_id needs to be an integer
if isinstance(unit_id, int):
pass
elif isinstance(unit_id, str):
unit_id = int(unit_id)
elif isinstance(unit_id, float):
unit_id = int(unit_id)
else:
raise CasambiApiException(
f"expected unit_id to be an integer, got: {unit_id}"
)
# Unit_id needs to be an integer
if isinstance(value, float):
target_value = float(value)
if not self.web_sock:
raise CasambiApiException("No websocket connection!")
if target_value < 0.0:
raise CasambiApiException("Value needs to be between 0 and 1")
if target_value > 1.0:
raise CasambiApiException("Value needs to be between 0 and 1")
target_controls = {"Vertical": {"value": target_value}}
message = {
"wire": self.wire_id,
"method": "controlUnit",
"id": unit_id,
"targetControls": target_controls,
}
self.web_sock.send(json.dumps(message))
def set_unit_target_controls(self, *, unit_id, target_controls):
"""
Response on ok:
{'wire': 1, 'method': 'peerChanged', 'online': True}
"""
# Unit_id needs to be an integer
if isinstance(unit_id, int):
pass
elif isinstance(unit_id, str):
unit_id = int(unit_id)
elif isinstance(unit_id, float):
unit_id = int(unit_id)
else:
raise CasambiApiException(
f"expected unit_id to be an integer, got: {unit_id}"
)
if not self.web_sock:
raise CasambiApiException("No websocket connection!")
message = {
"wire": self.wire_id,
"method": "controlUnit",
"id": unit_id,
"targetControls": target_controls,
}
self.web_sock.send(json.dumps(message))
def set_unit_value(self, *, unit_id: int, value):
"""
Response on ok:
{'wire': 1, 'method': 'peerChanged', 'online': True}
"""
# Unit_id needs to be an integer
if isinstance(unit_id, int):
pass
elif isinstance(unit_id, str):
unit_id = int(unit_id)
elif isinstance(unit_id, float):
unit_id = int(unit_id)
else:
raise CasambiApiException(
f"expected unit_id to be an integer, got: {unit_id}"
)
if not (value >= 0 and value <= 1):
raise CasambiApiException("value needs to be between 0 and 1")
if not self.web_sock:
raise CasambiApiException("No websocket connection!")
target_controls = {"Dimmer": {"value": value}}
message = {
"wire": self.wire_id,
"method": "controlUnit",
"id": unit_id,
"targetControls": target_controls,
}
self.web_sock.send(json.dumps(message))
def set_unit_rgbw_color(
self, *, unit_id: int, color_value: Tuple[int, int, int, int]
):
"""
Setter for RGB color
"""
target_controls = None
(red, green, blue, white) = color_value
if isinstance(unit_id, int):
pass
elif isinstance(unit_id, str):
unit_id = int(unit_id)
elif isinstance(unit_id, float):
unit_id = int(unit_id)
else:
raise CasambiApiException(
"expected unit_id to be an integer, got: {}".format(unit_id)
)
if not self.web_sock:
raise CasambiApiException("No websocket connection!")
white_value = white / 255.0
# 'name': 'white', 'type': 'White', 'value': 0.0
target_controls = {
"RGB": {"rgb": f"rgb({red}, {green}, {blue})"},
"Colorsource": {"source": "RGB"},
"White": {"value": white_value},
}
message = {
"wire": self.wire_id,
"method": "controlUnit",
"id": unit_id,
"targetControls": target_controls,
}
self.web_sock.send(json.dumps(message))
def set_unit_rgb_color(
self, *, unit_id: int, color_value: Tuple[int, int, int], send_rgb_format=False
):
"""
Setter for RGB color
"""
target_controls = None
(red, green, blue) = color_value
(hue, sat, value) = rgb_to_hsv(red, green, blue)
if isinstance(unit_id, int):
pass
elif isinstance(unit_id, str):
unit_id = int(unit_id)
elif isinstance(unit_id, float):
unit_id = int(unit_id)
else:
raise CasambiApiException(
"expected unit_id to be an integer, got: {}".format(unit_id)
)
if not self.web_sock:
raise CasambiApiException("No websocket connection!")
if not send_rgb_format:
target_controls = {
"RGB": {"hue": round(hue, 1), "sat": round(sat, 1)},
"Colorsource": {"source": "RGB"},
}
else:
target_controls = {
"RGB": {"rgb": f"rgb({red}, {green}, {blue})"},
"Colorsource": {"source": "RGB"},
}
message = {
"wire": self.wire_id,
"method": "controlUnit",
"id": unit_id,
"targetControls": target_controls,
}
self.web_sock.send(json.dumps(message))
def set_unit_color_temperature(self, *, unit_id: int, value: int, source="TW"):
"""
Setter for unit color temperature (kelvin)
"""
target_value = value
if source == "mired":
# Convert to Kelvin
target_value = round(1000000 / value)
# Convert to nerest 50 in kelvin, like the gui is doing
if target_value % 50 != 0:
target_value = int(target_value / 50) * 50 + 50
dbg_msg = "set_unit_color_temperature "
dbg_msg += f"converting target value to {target_value}"
dbg_msg += " (nearest 50 kelvin like GUI)"
_LOGGER.debug(dbg_msg)
# Get min and max temperature color in kelvin
(cct_min, cct_max, _) = self.get_supported_color_temperature(unit_id=unit_id)
if target_value < cct_min:
dbg_msg = "set_unit_color_temperature "
dbg_msg += f"target_value: {target_value}"
dbg_msg += " smaller than min supported temperature,"
dbg_msg += " setting to min supported color temperature:"
dbg_msg += f" {cct_min}"
_LOGGER.debug(dbg_msg)
target_value = cct_min
elif target_value > cct_max:
dbg_msg = "set_unit_color_temperature "
dbg_msg += f"target_value: {target_value}"
dbg_msg += " larger than max supported temperature,"
dbg_msg += " setting to max supported color temperature:"
dbg_msg += f" {cct_max}"
_LOGGER.debug(dbg_msg)
target_value = cct_max
# Unit_id needs to be an integer
if isinstance(unit_id, int):
pass
elif isinstance(unit_id, str):
unit_id = int(unit_id)
elif isinstance(unit_id, float):
unit_id = int(unit_id)
else:
raise CasambiApiException(
f"expected unit_id to be an integer, got: {unit_id}"
)
if not self.web_sock:
raise CasambiApiException("No websocket connection!")
target_controls = {
"ColorTemperature": {"value": target_value},
"Colorsource": {"source": "TW"},
}
message = {
"wire": self.wire_id,
"method": "controlUnit",
"id": unit_id,
"targetControls": target_controls,
}
self.web_sock.send(json.dumps(message))
def get_supported_color_temperature(
self, *, unit_id: int
) -> Tuple[int, int, float]:
"""
Return the supported color temperatures
Returns (0, 0, 0) if nothing is supported
"""
cct_min = 0
cct_max = 0
current = 0
data = self.get_unit_state(unit_id=unit_id)
if "controls" not in data:
return (cct_min, cct_max, current)
for control in data["controls"]:
if isinstance(control, list):
for inner_control in control:
if "type" in inner_control and inner_control["type"] == "CCT":
cct_min = inner_control["min"]
cct_max = inner_control["max"]
current = inner_control["value"]
if "type" in control and control["type"] == "CCT":
cct_min = control["min"]
cct_max = control["max"]
current = control["value"]
return (cct_min, cct_max, current)
def unit_supports_rgbw(self, *, unit_id: int) -> bool:
"""
Returns true if unit supports color temperature
{
'activeSceneId': 0,
'address': 'ffffff',
'condition': 0,
'controls': [[{'name': 'dimmer0', 'type': 'Dimmer', 'value': 0.0},
{'hue': 0.9882697947214076,
'name': 'rgb',
'rgb': 'rgb(255, 21, 40)',
'sat': 0.9176470588235294,
'type': 'Color'},
{'name': 'white', 'type': 'White', 'value': 0.0}]],
'dimLevel': 0.0,
'firmwareVersion': '26.24',
'fixtureId': 4027,
'groupId': 0,
'id': 14,
'name': 'Test RGB',
'on': True,
'online': True,
'position': 10,
'priority': 3,
'status': 'ok',
'type': 'Luminaire'}
"""
data = self.get_unit_state(unit_id=unit_id)
color = False
white = False
if "controls" not in data:
return False
for control in data["controls"]:
if isinstance(control, list):
for inner_control in control:
if "type" in inner_control and inner_control["type"] == "Color":
color = True
elif "type" in inner_control and inner_control["type"] == "White":
white = True
if "type" in control and control["type"] == "Color":
color = True
elif "type" in control and control["type"] == "White":
white = True
if color and white:
return True
return False
def unit_supports_rgb(self, *, unit_id: int) -> bool:
"""
Returns true if unit supports color temperature
{
'activeSceneId': 0,
'address': 'ffffff',
'condition': 0,
'controls': [[{'name': 'dimmer0', 'type': 'Dimmer', 'value': 0.0},
{'hue': 0.9882697947214076,
'name': 'rgb',
'rgb': 'rgb(255, 21, 40)',
'sat': 0.9176470588235294,
'type': 'Color'},
{'name': 'white', 'type': 'White', 'value': 0.0}]],
'dimLevel': 0.0,
'firmwareVersion': '26.24',
'fixtureId': 4027,
'groupId': 0,
'id': 14,
'name': '<NAME>',
'on': True,
'online': True,
'position': 10,
'priority': 3,
'status': 'ok',
'type': 'Luminaire'}
"""
data = self.get_unit_state(unit_id=unit_id)
if "controls" not in data:
return False
for control in data["controls"]:
if isinstance(control, list):
for inner_control in control:
if "type" in inner_control and inner_control["type"] == "Color":
return True
if "type" in control and control["type"] == "Color":
return True
return False
def unit_supports_color_temperature(self, *, unit_id: int) -> bool:
"""
Returns true if unit supports color temperature
{
'activeSceneId': 0,
'address': '26925689c64c',
'condition': 0,
'controls': [[{'type': 'Dimmer', 'value': 0.0},
{'level': 0.49736842105263157,
'max': 6000,
'min': 2200,
'type': 'CCT',
'value': 4090.0}]],
'dimLevel': 0.0,
'firmwareVersion': '26.24',
'fixtureId': 14235,
'groupId': 0,
'id': 13,
'image': 'mbUdKbLz5g3VsVNJIgTYboHa8ce9YfSK',
'name': 'Arbetslampa',
'on': True,
'online': True,
'position': 9,
'priority': 3,
'status': 'ok',
'type': 'Luminaire'
}
"""
data = self.get_unit_state(unit_id=unit_id)
if "controls" not in data:
return False
for control in data["controls"]:
if isinstance(control, list):
for inner_control in control:
if "type" in inner_control and inner_control["type"] == "CCT":
return True
if "type" in control and control["type"] == "CCT":
return True
return False
def turn_scene_off(self, *, scene_id: int):
"""
Response on ok:
{'wire': 1, 'method': 'peerChanged', 'online': True}
"""
# Unit_id needs to be an integer
if isinstance(scene_id, int):
pass
elif isinstance(scene_id, str):
scene_id = int(scene_id)
elif isinstance(scene_id, float):
scene_id = int(scene_id)
else:
raise CasambiApiException(
f"expected scene_id to be an integer, got: {scene_id}"
)
if not self.web_sock:
raise CasambiApiException("No websocket connection!")
value = 0
message = {
"wire": self.wire_id,
"method": "controlScene",
"id": scene_id,
"level": value,
}
self.web_sock.send(json.dumps(message))
def turn_scene_on(self, *, scene_id):
"""
Response on ok:
{'wire': 1, 'method': 'peerChanged', 'online': True}
"""
# Unit_id needs to be an integer
if isinstance(scene_id, int):
pass
elif isinstance(scene_id, str):
scene_id = int(scene_id)
elif isinstance(scene_id, float):
scene_id = int(scene_id)
else:
raise CasambiApiException(
f"expected scene_id to be an integer, got: {scene_id}"
)
if not self.web_sock:
raise CasambiApiException("No websocket connection!")
value = 1
message = {
"wire": self.wire_id,
"method": "controlScene",
"id": scene_id,
"level": value,
}
self.web_sock.send(json.dumps(message))
def get_unit_list(self):
"""
Getter for unit lists
"""
if not self.network_id:
raise CasambiApiException("network_id is not set!")
url = "https://door.casambi.com/v1/networks/"
url += f"{self.network_id}/units"
headers = {
"X-Casambi-Key": self.api_key,
"X-Casambi-Session": self._session_id,
"Content-type": "application/json",
}
response = requests.get(url, headers=headers)
if response.status_code != 200:
reason = f"get_network_unit_list: headers: {headers},"
reason += 'message: "Got a invalid status_code",'
reason += f"status_code: {response.status_code},"
reason += f"response: {response.text}"
raise CasambiApiException(reason)
data = response.json()
dbg_msg = f"get_network_unit_list: headers: {headers}"
dbg_msg += f"response: {data}"
_LOGGER.debug(dbg_msg)
return data
def get_scenes_list(self):
"""
Getter for Scenes list
"""
url = "https://door.casambi.com/v1/networks/"
url += f"{self.network_id}/scenes"
if not self._session_id:
raise CasambiApiException("No session id is set. Need to login!")
headers = {
"X-Casambi-Key": self.api_key,
"X-Casambi-Session": self._session_id,
"Content-type": "application/json",
}
response = requests.get(url, headers=headers)
if response.status_code != 200:
reason = f"get_network_unit_list: headers: {headers},"
reason += 'message: "Got a invalid status_code",'
reason += f"status_code: {response.status_code},"
reason += f"response: {response.text}"
raise CasambiApiException(reason)
data = response.json()
dbg_msg = f"get_scenes_list: headers: {headers}"
dbg_msg += f" response: {data}"
_LOGGER.debug(dbg_msg)
return data
def get_fixture_information(self, *, unit_id: int):
"""
GET https://door.casambi.com/v1/fixtures/{id}
"""
url = f"https://door.casambi.com/v1/fixtures/{unit_id}"
if not self._session_id:
raise CasambiApiException("No session id is set. Need to login!")
headers = {
"X-Casambi-Key": self.api_key,
"X-Casambi-Session": self._session_id,
"Content-type": "application/json",
}
response = requests.get(url, headers=headers)
if response.status_code != 200:
reason = f"get_fixture_information: headers: {headers},"
reason += 'message: "Got a invalid status_code",'
reason += f"status_code: {response.status_code},"
reason += f"response: {response.text}"
raise CasambiApiException(reason)
data = response.json()
dbg_msg = f"get_fixture_information: headers: {headers}"
dbg_msg += f" response: {data}"
_LOGGER.debug(dbg_msg)
return data
def get_network_state(self):
"""
Getter for network state
"""
url = f"https://door.casambi.com/v1/networks/{self.network_id}/state"
if not self._session_id:
raise CasambiApiException("No session id is set. Need to login!")
headers = {
"X-Casambi-Key": self.api_key,
"X-Casambi-Session": self._session_id,
"Content-type": "application/json",
}
response = requests.get(url, headers=headers)
if response.status_code != 200:
reason = f"get_network_state: headers: {headers},"
reason += 'message: "Got a invalid status_code",'
reason += f"status_code: {response.status_code},"
reason += f"response: {response.text}"
raise CasambiApiException(reason)
data = response.json()
dbg_msg = f"get_network_state: headers: {headers}"
dbg_msg = f" response: {data}"
_LOGGER.debug(dbg_msg)
return data
def get_network_datapoints(self, *, from_time=None, to_time=None, sensor_type=0):
"""
sensorType: [0 = Casambi | 1 = Vendor]
from: yyyyMMdd[hh[mm[ss]]]
to: yyyyMMdd[hh[mm[ss]]]
"""
if not self._session_id:
raise CasambiApiException("No session id is set. Need to login!")
headers = {
"X-Casambi-Key": self.api_key,
"X-Casambi-Session": self._session_id,
"Content-type": "application/json",
}
if sensor_type not in [0, 1]:
raise CasambiApiException("invalid sentor_type")
now = datetime.datetime.now()
if not to_time:
to_time = now.strftime("%Y%m%d%H%M")
if not from_time:
from_time = (now - datetime.timedelta(days=7)).strftime("%Y%m%d%H%M")
url = (
"https://door.casambi.com/v1/networks/"
+ str(self.network_id)
+ "/datapoints?sensorType="
+ str(sensor_type)
+ "&from="
+ from_time
+ "&to="
+ to_time
)
response = requests.get(url, headers=headers)
if response.status_code != 200:
reason = f"get_network_datapoints: headers: {headers},"
reason += 'message: "Got a invalid status_code",'
reason += f"status_code: {response.status_code},"
reason += f"response: {response.text}"
raise CasambiApiException(reason)
data = response.json()
dbg_msg = f"get_network_datapoints headers: {headers}"
dbg_msg += f" response: {data}"
_LOGGER.debug(dbg_msg)
return data
def ws_recieve_message(self):
"""
Response on success?
{'wire': 1, 'method': 'peerChanged', 'online': True}
"""
if not self.web_sock:
raise CasambiApiException("No websocket connection!")
result = self.web_sock.recv()
data = json.loads(result)
return data
def ws_recieve_messages(self):
"""
Response on success?
{'wire': 1, 'method': 'peerChanged', 'online': True}
"""
messages = []
if not self.web_sock:
raise CasambiApiException("No websocket connection!")
self.web_sock.settimeout(0.1)
while True:
try:
casambi_msg = self.web_sock.recv()
data = json.loads(casambi_msg)
messages.append(data)
except websocket.WebSocketConnectionClosedException:
break
except socket.timeout:
break
except websocket.WebSocketTimeoutException:
break
return messages
def ws_close(self):
"""
Response on success?
{'wire': 1, 'method': 'peerChanged', 'online': True}
"""
if not self.web_sock:
raise CasambiApiException("No websocket connection!")
message = {"method": "close", "wire": self.wire_id}
self.web_sock.send(json.dumps(message))
| StarcoderdataPython |
1708005 | import cv2
import numpy
import math
from .cluster import Clusters, Cluster
CV_HARRIS_CORNER_THRESHOLD = 10e-03
MIN_CORNER_CLUSTER = 1
def init_harris_corners_and_cluster(monochrome_pil_img, polar_side_maximums, polar_side_minimums, origin):
harris_img = cv2.cornerHarris(numpy.array(monochrome_pil_img), 3, 3, 0.04)
harris_corners = []
for x in range(0, harris_img.shape[0]):
for y in range(0, harris_img.shape[1]):
if harris_img[x,y] > CV_HARRIS_CORNER_THRESHOLD:
harris_corners.append((x,y))
maxes_and_mins = list(polar_side_maximums)
for i in range(0, len(polar_side_minimums)):
maxes_and_mins.append(polar_side_minimums[i])
for i in range(0, len(maxes_and_mins)):
radius = maxes_and_mins[i][1]
angle = maxes_and_mins[i][0]
dx = int(radius * math.cos(angle))
dy = int(radius * math.sin(angle))
pixel = (origin[0] + dx, origin[1] - dy)
maxes_and_mins[i] = Cluster(pixel)
clusters = Clusters(harris_corners, maxes_and_mins)
clusters.fit_data_to_clusters(1, 0)
#remove_clusters_with_corners_under_threshold
i = 0
while i < len(clusters):
if len(clusters[i]) <= MIN_CORNER_CLUSTER:
del clusters[i]
else:
i += 1
return len(clusters)
| StarcoderdataPython |
1643327 | """ShuffleNet (https://arxiv.org/abs/1707.01083)"""
import collections
import torch
from .model import Model
from .modules import Add, Conv2dAct, ChannelShuffle, Conv2dBN
class ShuffleNet(Model):
FIRST_STAGE_CHANNELS = {1: 144, 2: 200, 3: 240, 4: 272, 8: 384}
class BasicBlock(torch.nn.Module):
def __init__(self, in_channels, out_channels, num_groups):
super().__init__()
assert in_channels == out_channels
bottleneck_channels = out_channels // 4
self.conv0 = Conv2dAct(in_channels, bottleneck_channels, kernel_size=1, groups=num_groups)
self.shuffle = ChannelShuffle(num_groups)
self.conv1 = Conv2dBN(bottleneck_channels, bottleneck_channels, kernel_size=3, padding=1, groups=bottleneck_channels)
self.conv2 = Conv2dBN(bottleneck_channels, out_channels, kernel_size=1, groups=num_groups)
self.add = Add()
self.activation = torch.nn.ReLU()
def forward(self, input):
x = self.conv0(input)
x = self.shuffle(x)
x = self.conv1(x)
x = self.conv2(x)
x = self.add(x, input)
return self.activation(x)
class DownsampleBasicBlock(torch.nn.Module):
def __init__(self, in_channels, out_channels, num_groups, stride, skip_first_group_conv=False):
super().__init__()
assert stride > 1
out_channels = out_channels - in_channels
bottleneck_channels = out_channels // 4
first_conv_num_groups = 1 if skip_first_group_conv else num_groups
self.conv0 = Conv2dAct(in_channels, bottleneck_channels, kernel_size=1, groups=first_conv_num_groups)
self.shuffle = ChannelShuffle(num_groups)
self.conv1 = Conv2dBN(bottleneck_channels, bottleneck_channels, kernel_size=3, padding=1, groups=bottleneck_channels, stride=stride)
self.conv2 = Conv2dBN(bottleneck_channels, out_channels, kernel_size=1, groups=num_groups)
self.pool = torch.nn.AvgPool2d(kernel_size=3, padding=1, stride=stride)
self.activation = torch.nn.ReLU()
def forward(self, input):
x = self.conv0(input)
x = self.shuffle(x)
x = self.conv1(x)
x = self.conv2(x)
shortcut = self.pool(input)
x = torch.cat((shortcut, x), 1)
return self.activation(x)
def __init__(self, width_multiplier=1, num_groups=3, num_blocks=[4, 8, 4]):
assert num_groups in ShuffleNet.FIRST_STAGE_CHANNELS, "Unexpected number of groups"
first_stage_channels = int(ShuffleNet.FIRST_STAGE_CHANNELS[num_groups] * width_multiplier)
feature_planes = first_stage_channels * (2 ** (len(num_blocks) - 1))
super().__init__(feature_planes)
in_planes = 24
out_planes = first_stage_channels
blocks = []
for i, n in enumerate(num_blocks):
blocks.extend(self._make_stage(in_planes, out_planes, n, num_groups, i, skip_first_group_conv=(i == 0)))
in_planes = out_planes
out_planes *= 2
self.features = torch.nn.Sequential(collections.OrderedDict([('conv0', Conv2dAct(3, 24, kernel_size=3, stride=2, padding=1)),
('pool0', torch.nn.MaxPool2d(kernel_size=3, stride=2, padding=1))]
+ blocks
+ [('pool1', torch.nn.AdaptiveAvgPool2d(1)),
('flatten', torch.nn.Flatten())]))
def _make_stage(self, in_channels, out_channels, num_blocks, num_groups, index, skip_first_group_conv=False):
blocks = [(f'block{index}_0', ShuffleNet.DownsampleBasicBlock(in_channels, out_channels, num_groups, stride=2, skip_first_group_conv=skip_first_group_conv))]
for i in range(num_blocks - 1):
blocks.append((f'block{index}_{i+1}', ShuffleNet.BasicBlock(out_channels, out_channels, num_groups)))
return blocks
| StarcoderdataPython |
161878 | <filename>snoop/data/management/commands/runworkers.py
"""Entrypoint for worker process.
Starts up a variable number of worker processes with Celery, depending on settings and available CPU count.
"""
import os
import logging
import subprocess
from django.conf import settings
from django.core.management.base import BaseCommand
from snoop.profiler import Profiler
from snoop.data.collections import ALL
from ... import tasks
from ...logs import logging_for_management_command
log = logging.getLogger(__name__)
def celery_argv(queues):
"""Builds the command line to run a `celery worker` process."""
celery_binary = (
subprocess.check_output(['which', 'celery'])
.decode('latin1')
.strip()
)
loglevel = 'warning' if settings.DEBUG else 'error'
argv = [
celery_binary,
'-A', 'snoop.data',
'worker',
'-E',
'--pidfile=',
f'--loglevel={loglevel}',
'-Ofair',
'--max-tasks-per-child', str(settings.WORKER_TASK_LIMIT),
'--max-memory-per-child', str(settings.WORKER_MEMORY_LIMIT * 1024),
'--prefetch-multiplier', str(14),
'--soft-time-limit', '190000', # 52h
'--time-limit', '200000', # 55h
'-Q', ','.join(queues),
'-c', str(settings.WORKER_COUNT),
]
return argv
class Command(BaseCommand):
"Run celery worker"
def add_arguments(self, parser):
"""Adds flag to switch between running collection workers and system workers."""
parser.add_argument('--system-queues', action='store_true',
help="Run system queues, not data queues (only one instance should exist)")
def handle(self, *args, **options):
"""Runs workers for either collection processing or system tasks."""
logging_for_management_command()
with Profiler():
tasks.import_snoop_tasks()
if options['system_queues']:
all_queues = settings.SYSTEM_QUEUES
else:
all_queues = [c.queue_name for c in ALL.values()]
argv = celery_argv(queues=all_queues)
log.info('+' + ' '.join(argv))
os.execv(argv[0], argv)
| StarcoderdataPython |
1705583 | import tensorflow as tf
from utils import RNN
# one byte input, 256 possiblities for 1 hot encoding
SIZE_IN = 8
SEQ_LEN = 10
BATCH_SIZE = 10
cell = RNN(128, (SIZE_IN, 8,), (SIZE_IN,8,))
out_state, logits =
Y_ = tf.placeholder(tf.float32, shape=(None,SIZE_IN,8,), name="Y_")
cost = tf.nn.softmax_cross_entropy_with_logits(logits=logits, labels=Y_)
train_step = tf.train.AdamOptimizer().minimize(cost)
with tf.Session() as sess:
sess.run((train_step, out_state), feed_dict="Stuff")
| StarcoderdataPython |
1723593 | # coding: utf-8
"""
Trakerr Client API
Get your application events and errors to Trakerr via the *Trakerr API*.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from __future__ import absolute_import
from __builtin__ import * #My interpreter was shirking adding this automatically on the non-generated files. Most shouldn't need this, figure out why on a second pass
import sys
import os
import re
# python 2 and python 3 compatibility library
from six import iteritems
from trakerr_client import ApiClient
from trakerr_client import EventsApi
from trakerr_client.apis import events_api
from trakerr_client.models import *
from event_trace_builder import EventTraceBuilder, Trakerr_Utils
from datetime import datetime, timedelta
class Trakerr(object):
"""
The public facing class that will log errors.
A use case is:
>>>from trakerr__client import Trakerr
>>>...
>>>l = Trakerr()
>>>...
>>>try:
>>> ...
>>>except:
>>> l.log("Optional Error String")
"""
def __init__(self): #Add args
raise NotImplementedError
def log(self, classification = "Error", error_type = None, error_message = None, exc_info = None):
"""
"""
#consider a configuration file for later. Removed my personal data for pushes for now.
try:
if exc_info is None: exc_info = sys.exc_info()
if exc_info is not False:
type, value = exc_info[:2]
if error_type is None: Trakerr_Utils.format_error_name(type)
if error_message is None: str(value)
excevent = client.create_new_app_event(classification, error_type, error_message)
if exc_info is not False:
excevent.event_stacktrace = EventTraceBuilder.get_event_traces(exc_info)
client.send_event_async(excevent)
finally:
del exc_info
class TrakerrClient(object):
"""
Description of class
"""
# Implied class variable
# event_Api
# api_Key
# context_App_Version
# context_Env_Name
# context_Env_Version
# context_Env_Hostname
# context_AppOS
# context_AppOS_Version
# context_DataCenter
# context_DataCenter_Region
EPOCH_CONSTANT = datetime(1970, 1, 1)
def __init__(self, api_key=None, url_path=None, context_app_version=None, context_env_name="development", context_env_version=None,
context_env_hostname=None,
context_appos=None, context_appos_version=None, context_datacenter=None,
context_datacenter_region=None):
"""
"""
self.api_Key = api_key
self.context_App_Version = context_app_version
self.context_Env_Name = context_env_name
self.context_Env_Version = context_env_version
self.context_Env_Hostname = context_env_hostname
self.context_AppOS = context_appos
self.context_AppOS_Version = context_appos_version
self.context_DataCenter = context_datacenter
self.context_DataCenter_Region = context_datacenter_region
if url_path is None:
client = ApiClient()
else:
client = ApiClient(url_path)
self.events_api = EventsApi(client)
def create_new_app_event(self, classification="Error", eventType="unknown",
eventMessage="unknown"): # Default None the arguments if they're not required?
"""
"""
return AppEvent(self.api_Key, classification, eventType, eventMessage)
def send_event(self, app_event):
"""
"""
self.fill_defaults(app_event)
self.events_api.events_post(app_event)
def async_callback(self, response):
"""
Callback method for the send_event_async function. Currently outputs nothing.
:param response: message returned after the async call is completed.
"""
#print response
def send_event_async(self, app_event):
"""
"""
self.fill_defaults(app_event)
self.events_api.events_post(app_event, callback=self.async_callback)
def fill_defaults(self, app_event):
"""
Checks the given app event, and if it each event field is not filled out in a specific case, fill out the the event with the instance defaults.
Returns the fully filled out AppEvent object, while also filling out the instance passed in
:param app_event: The app event to fill parameters out.
:return: The given AppEvent object after it is checked and filled out.
"""
if app_event.api_key is None: app_event.apiKey = self.api_Key
if app_event.context_app_version is None: app_event.context_app_version = self.context_App_Version
if app_event.context_env_name is None: app_event.context_env_name = self.context_Env_Name
if app_event.context_env_version is None: app_event.context_env_version = self.context_Env_Version
if app_event.context_env_hostname is None: app_event.context_env_hostname = self.context_Env_Hostname
if app_event.context_app_os is None:
app_event.context_app_os = self.context_AppOS
app_event.context_app_os_version = self.context_AppOS_Version
if app_event.context_data_center is None: app_event.context_data_center = self.context_DataCenter
if app_event.context_data_center_region is None: app_event.context_data_center_region = self.context_DataCenter_Region
TD = datetime.utcnow() - self.EPOCH_CONSTANT #timedelta object
if app_event.event_time is None: app_event.event_time = int(TD.total_seconds()*1000)
return app_event #Since we're filling out an an instance, probably don't need this.
| StarcoderdataPython |
4807230 | <gh_stars>0
from ryanair.ryanair import Ryanair
from ryanair.Dates import departuredates, returndates | StarcoderdataPython |
3295153 | import unittest
import torch
import numpy as np
import numpy.testing as npt
from torch_points_kernels import grouping_operation
class TestGroup(unittest.TestCase):
# input: points(b, c, n) idx(b, npoints, nsample)
# output: out(b, c, npoints, nsample)
def test_simple(self):
features = torch.tensor(
[
[[0, 10, 0], [1, 11, 0], [2, 12, 0]],
[
[100, 110, 120],
[101, 111, 121],
[102, 112, 122],
], # x-coordinates # y-coordinates # z-coordinates
]
).type(torch.float)
idx = torch.tensor([[[1, 0], [0, 0]], [[0, 1], [1, 2]]]).type(torch.long)
expected = np.array(
[
[[[10, 0], [0, 0]], [[11, 1], [1, 1]], [[12, 2], [2, 2]]],
[ # 2nd batch
[ # x-coordinates
[100, 110], # x-coordinates of samples for point 0
[110, 120], # x-coordinates of samples for point 1
],
[[101, 111], [111, 121]], # y-coordinates
[[102, 112], [112, 122]], # z-coordinates
],
]
)
cpu_output = grouping_operation(features, idx).detach().cpu().numpy()
npt.assert_array_equal(expected, cpu_output)
if torch.cuda.is_available():
npt.assert_array_equal(
grouping_operation(features.cuda(), idx.cuda()).detach().cpu().numpy(),
expected,
)
if __name__ == "__main__":
unittest.main()
| StarcoderdataPython |
3238555 | <reponame>vahtras/util
import numpy
class BlockDiagonalMatrix:
"""
Blocked matrix class based on lists of full matrices
"""
def __init__(self, nrow, ncol):
""" Constructur of the class."""
assert len(nrow) == len(ncol)
from . import full
self.nblocks = len(nrow)
self.nrow = nrow
self.ncol = ncol
self.subblock = []
self.irow = []
self.icol = []
for i in range(self.nblocks):
self.subblock.append(full.matrix((nrow[i], ncol[i])))
self.irow.append(sum(self.nrow[:i]))
self.icol.append(sum(self.ncol[:i]))
self.irow = tuple(self.irow)
self.icol = tuple(self.icol)
def __str__(self):
""" Formatted output based on full matrix class """
retstr = ""
for i in range(self.nblocks):
if self.nrow[i] * self.ncol[i]:
retstr += "\nBlock %d\n" % (i + 1) + str(self.subblock[i])
return retstr
def __repr__(self):
return f'BlockDiagonalMatrix({self.nrow}, {self.ncol})'
def __getitem__(self, n):
""" Index argument returns subblock
Example
>>> M = BlockDiagonalMatrix([2], [2])
>>> print M[0]
<BLANKLINE>
(2, 2)
Column 1 Column 2
<BLANKLINE>
"""
return self.subblock[n]
@staticmethod
def init(*arr):
from .full import init
matrices = tuple(init(a) for a in arr)
for m in matrices:
assert len(m.shape) == 2, "blocked only for two dimensions"
rdim = tuple(m.shape[0] for m in matrices)
cdim = tuple(m.shape[1] for m in matrices)
new = BlockDiagonalMatrix(rdim, cdim)
for i, a in enumerate(arr):
new.subblock[i][:, :] = matrices[i]
return new
@staticmethod
def init_from_array(arr, rdim, cdim):
from . import full
assert numpy.dot(rdim, cdim) == len(arr)
new = BlockDiagonalMatrix(rdim, cdim)
start = 0
end = 0
for i, block in enumerate(new.subblock):
end += rdim[i] * cdim[i]
block[:, :] = full.init(
arr[start:end]
).reshape(block.shape, order="F")
start += rdim[i] * cdim[i]
return new
@property
def size(self):
return sum(b.size for b in self.subblock)
def ravel(self, **kwargs):
from . import full
linear = full.matrix(self.size)
start = 0
end = 0
for s in self.subblock:
end += s.size
linear[start:end] = s.ravel(**kwargs)
start += s.size
return linear
def __matmul__(self, other):
"""Multiplication blockwise """
new = BlockDiagonalMatrix(self.nrow, other.ncol)
for i in range(self.nblocks):
if self.nrow[i]:
new.subblock[i] = self.subblock[i] @ other.subblock[i]
return new
def __rmul__(self, other):
"""Scalar multiplication"""
new = BlockDiagonalMatrix(self.nrow, self.ncol)
for i in range(self.nblocks):
if self.nrow[i]:
new.subblock[i] = other * self.subblock[i]
return new
def __add__(self, other):
"""Addition blockwise"""
bdm = BlockDiagonalMatrix(self.nrow, self.ncol)
bdm.subblock = [s + o for s, o in zip(self.subblock, other.subblock)]
return bdm
def __sub__(self, other):
"""Subtraction blockwize"""
bdm = BlockDiagonalMatrix(self.nrow, self.ncol)
bdm.subblock = [s - o for s, o in zip(self.subblock, other.subblock)]
return bdm
def __neg__(self):
"""Negation blockwise"""
bdm = BlockDiagonalMatrix(self.nrow, self.ncol)
bdm.subblock = [-b for b in self.subblock]
return bdm
def __truediv__(self, other):
"Scalar division"
bdm = BlockDiagonalMatrix(self.nrow, self.ncol)
bdm.subblock = [b * (1/other) for b in self.subblock]
return bdm
def __rtruediv__(self, other):
"Reverse Scalar division"
bdm = BlockDiagonalMatrix(self.nrow, self.ncol)
bdm.subblock = [other / b for b in self.subblock]
return bdm
def solve(self, other):
"Solve linear equations blockwise" ""
bdm = BlockDiagonalMatrix(self.nrow, self.ncol)
bdm.subblock = [s.solve(o) for s, o in zip(self.subblock, other.subblock)]
return bdm
def pack(self):
for i in range(self.nblocks):
assert self.nrow[i] == self.ncol[i]
new = BlockedTriangular(self.nrow)
for i in range(self.nblocks):
new.subblock[i] = self.subblock[i].pack()
return new
def unblock(self):
from . import full
nrows = sum(self.nrow)
ncols = sum(self.ncol)
new = full.matrix((nrows, ncols))
for i in range(self.nblocks):
new[
self.irow[i]: self.irow[i] + self.nrow[i],
self.icol[i]: self.icol[i] + self.ncol[i],
] = self.subblock[i]
return new
def T(self):
"""
Transpose
Example:
>>> M = BlockDiagonalMatrix([2],[2])
>>> M.subblock[0][0, 1] = 1
>>> M.subblock[0][1, 0] = 2
>>> print M, M.T()
<BLANKLINE>
Block 1
<BLANKLINE>
(2, 2)
Column 1 Column 2
1 0.00000000 1.00000000
2 2.00000000 0.00000000
<BLANKLINE>
Block 1
<BLANKLINE>
(2, 2)
Column 1 Column 2
1 0.00000000 2.00000000
2 1.00000000 0.00000000
<BLANKLINE>
"""
new = BlockDiagonalMatrix(self.ncol, self.nrow)
for i in range(self.nblocks):
new.subblock[i] = self.subblock[i].T
return new
def sqrt(self):
new = BlockDiagonalMatrix(self.ncol, self.nrow)
for i in range(self.nblocks):
new.subblock[i] = self.subblock[i].sqrt()
return new
def invsqrt(self):
new = BlockDiagonalMatrix(self.ncol, self.nrow)
for i in range(self.nblocks):
new.subblock[i] = self.subblock[i].invsqrt()
return new
def func(self, f):
""" Blockwise function of matrix"""
new = BlockDiagonalMatrix(self.ncol, self.nrow)
for i in range(self.nblocks):
new.subblock[i] = self.subblock[i].func(f)
return new
def tr(self):
"""Sum blockwise traces
"""
sum = 0
for i in range(self.nblocks):
if self.nrow[i]:
sum += self.subblock[i].tr()
return sum
def eigvec(self):
u = BlockDiagonalMatrix(self.nrow, self.nblocks * [1])
v = BlockDiagonalMatrix(self.nrow, self.ncol)
for i in range(self.nblocks):
u.subblock[i], v.subblock[i] = self.subblock[i].eigvec()
return u, v
def qr(self):
q = BlockDiagonalMatrix(self.nrow, self.nrow)
r = BlockDiagonalMatrix(self.nrow, self.ncol)
for i in range(self.nblocks):
q.subblock[i], r.subblock[i] = self.subblock[i].qr()
return q, r
def GS(self, S):
new = BlockDiagonalMatrix(self.nrow, self.ncol)
for i in range(self.nblocks):
new.subblock[i] = self.subblock[i].GS(S.subblock[i])
return new
def get_columns(self, columns_per_symmetry):
new = BlockDiagonalMatrix(self.nrow, columns_per_symmetry)
for oldblock, newblock, cols in zip(self, new, columns_per_symmetry):
if cols:
newblock[:, :] = oldblock[:, :cols]
return new
def unit(nbl, factor=1):
from .full import unit
new = BlockDiagonalMatrix(nbl, nbl)
for i in range(len(nbl)):
if nbl[i]:
new.subblock[i] = unit(nbl[i], factor)
return new
class BlockedTriangular(object):
def __init__(self, dim):
from . import full
self.nblocks = len(dim)
self.dim = dim
self.subblock = []
for i in range(self.nblocks):
self.subblock.append(full.Triangular((dim[i], dim[i])))
def __str__(self):
retstr = ""
for i in range(self.nblocks):
if self.dim[i]:
retstr += "\nBlock %d\n" % (i + 1) + str(self.subblock[i])
return retstr
def __add__(self, other):
bt = BlockedTriangular(self.dim)
bt.subblock = [s + o for s, o in zip(self.subblock, other.subblock)]
return bt
def __sub__(self, other):
new = BlockedTriangular(self.dim)
for i in range(self.nblocks):
new.subblock[i] = self.subblock[i] - other.subblock[i]
return new
def unpack(self):
new = BlockDiagonalMatrix(self.dim, self.dim)
for i in range(self.nblocks):
new.subblock[i] = self.subblock[i].unpack()
return new
def unblock(self):
return self.unpack().unblock().pack()
@staticmethod
def init(blocks):
from . import full
BlockedTriangular_matrices = [full.Triangular.init(block) for block in blocks]
dimensions = [tmat.dim for tmat in BlockedTriangular_matrices]
new = BlockedTriangular(dimensions)
new.subblock = BlockedTriangular_matrices
return new
# Keep non-standard names for back compatibility
triangular = BlockedTriangular
| StarcoderdataPython |
72506 | # -*- coding: utf8 -*-
#
# Copyright (c) 2006-2016 <NAME>
# Copyright (c) 2014 <NAME>
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
# IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
# CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
# TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
# SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
ur"""
Markdown output backend.
>>> from pybtex.richtext import Tag, HRef
>>> markdown = Backend()
>>> print Tag('em', '').render(markdown)
<BLANKLINE>
>>> print Tag('em', 'Non-', 'empty').render(markdown)
*Non\-empty*
>>> print HRef('/', '').render(markdown)
<BLANKLINE>
>>> print HRef('/', 'Non-', 'empty').render(markdown)
[Non\-empty](/)
"""
from xml.sax.saxutils import escape
from pybtex.backends import BaseBackend
SPECIAL_CHARS = [
u'\\', # backslash
u'`', # backtick
u'*', # asterisk
u'_', # underscore
u'{', # curly braces
u'}', # curly braces
u'[', # square brackets
u']', # square brackets
u'(', # parentheses
u')', # parentheses
u'#', # hash mark
u'+', # plus sign
u'-', # minus sign (hyphen)
u'.', # dot
u'!', # exclamation mark
]
class Backend(BaseBackend):
u""" A backend to support markdown output. It implements the same
features as the HTML backend.
In addition to that, you can use the keyword php_extra=True to enable
the definition list extension of php-markdown. The default is not to use
it, since we cannot be sure that this feature is implemented on all
systems.
More information:
http://www.michelf.com/projects/php-markdown/extra/#def-list
"""
def __init__(self, encoding=None, php_extra=False):
super(Backend, self).__init__(encoding=encoding)
self.php_extra = php_extra
default_suffix = '.md'
symbols = {
'ndash': u'–',# or 'ndash': u'–',
'newblock': u'\n',
'nbsp': u' '
}
tags = {
'em' : u'*', # emphasize text
'strong': u'**', # emphasize text even more
'i' : u'*', # italicize text: be careful, i is not semantic
'b' : u'**', # embolden text: be careful, b is not semantic
'tt' : u'`', # make text appear as code (typically typewriter text), a little hacky
}
def format_str(self, text):
"""Format the given string *str_*.
Escapes special markdown control characters.
"""
text = escape(text)
for special_char in SPECIAL_CHARS:
text = text.replace(special_char, u'\\' + special_char)
return text
def format_tag(self, tag_name, text):
tag = self.tags.get(tag_name)
if tag is None:
return text
else:
return ur'{0}{1}{0}'.format(tag, text) if text else u''
def format_href(self, url, text):
return ur'[%s](%s)' % (text, url) if text else u''
def write_entry(self, key, label, text):
# Support http://www.michelf.com/projects/php-markdown/extra/#def-list
if self.php_extra:
self.output(u'%s\n' % label)
self.output(u': %s\n\n' % text)
else:
self.output(u'[%s] ' % label)
self.output(u'%s \n' % text)
| StarcoderdataPython |
3222798 | rom = input()
dic = {'I':1, 'V':5, 'X':10, 'L':50, 'C':100, 'D':500, 'M':1000}
res = 0
tmp = pre = dic[rom[0]]
for cur in rom[1:]:
if dic[cur] == pre:
tmp += dic[cur]
elif dic[cur] > pre:
tmp = dic[cur] - tmp
elif dic[cur] < pre:
res += tmp
tmp = dic[cur]
pre = dic[cur]
res += tmp
print(res)
| StarcoderdataPython |
140435 | """Simple demo primarily for verifying the development environment."""
from gears import core
from gears import draw
def main():
node = draw.primitives.Triangle((200, 200), (100, 400))
node = draw.transforms.Translation(node, 300, 200)
# also we should try to see if triangle works when we
# put the vertices in clockwise order (instead of
# anticlockwise)
node = draw.transforms.Rotation(node, 0)
node = draw.transforms.Scaling(node, 1.3)
node2 = draw.primitives.Triangle((300, 100), (400, 50))
# verify that we can translate twice
node2 = draw.transforms.Translation(node2, 300, 0)
node2 = draw.transforms.Translation(node2, 0, 200)
comp = draw.primitives.CompositeNode((node, node2))
app = core.Application([])
#app.window.attach_node(node)
#app.window.attach_node(node2)
app.window.attach_node(comp)
app.window.resize(1024, 768)
app.run()
if __name__ == '__main__':
main()
| StarcoderdataPython |
4841971 | <gh_stars>0
import pkgutil
import os, sys
import os.path
import glob
import roadrunner
def getData(resource):
"""
get the contents of a testing resource file.
"""
return pkgutil.get_data(__name__, resource).decode("utf8")
def abspath(resource):
# current dir of package
d = os.path.dirname(__file__)
ap = os.path.join(d, resource)
ap = os.path.normpath(ap)
if os.path.isfile(ap) and os.access(ap, os.R_OK):
return ap
else:
raise Exception("error could not open the path {} for reading".format(ap))
def dir(pattern='*'):
d = os.path.dirname(__file__)
return glob.glob(os.path.join(d, pattern))
def getRoadRunner(resource):
"""
return a RoadRunner instance loaded with one of the test files.
"""
data = pkgutil.get_data(__name__, resource)
r = roadrunner.RoadRunner()
if sys.version_info[0] < 3:
r.load(data)
else:
r.load(data.decode())
return r
| StarcoderdataPython |
1701462 | #! python3
import sys, PyQt5
from PyQt5.QtWidgets import QMainWindow, QAction, QMenu, QApplication
class Example(QMainWindow):
def __init__(self):
super().__init__()
self.initUI()
def initUI(self):
menubar = self.menuBar()
fileMenu = menubar.addMenu('File')
impMenu = QMenu('Import', self)
impAct = QAction('Import Mail', self)
impMenu.addAction(impAct)
newAct = QAction('New', self)
fileMenu.addAction(newAct)
fileMenu.addMenu(impMenu)
self.setGeometry(300, 300, 300, 200)
self.setWindowTitle('Submenu')
self.show()
def main():
app = QApplication(sys.argv)
ex = Example()
sys.exit(app.exec_())
if __name__ == '__main__':
main() | StarcoderdataPython |
1757872 | # Copyright (c) 2020, <NAME>.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Provide a software system element model."""
from typing import Iterable, List, Set
from pydantic import Field
from .container import Container, ContainerIO
from .location import Location
from .static_structure_element import StaticStructureElement, StaticStructureElementIO
from .tags import Tags
__all__ = ("SoftwareSystem", "SoftwareSystemIO")
class SoftwareSystemIO(StaticStructureElementIO):
"""
Represent a software system in the C4 model.
Attributes:
location (Location): The location of this software system.
containers (set of Container): The containers within this software system.
"""
location: Location = Field(
default=Location.Unspecified,
description="The location of this software system.",
)
containers: List[ContainerIO] = Field(
default=(), description="The containers within this software system."
)
class SoftwareSystem(StaticStructureElement):
"""
Represent a software system in the C4 model.
Attributes:
location (Location): The location of this software system.
containers (set of Container): The containers within this software system.
"""
def __init__(self, *, location: Location = Location.Unspecified, **kwargs) -> None:
"""Initialise a new SoftwareSystem."""
super().__init__(**kwargs)
self.location = location
self._containers: Set[Container] = set()
# TODO: canonical_name
# TODO: parent
self.tags.add(Tags.ELEMENT)
self.tags.add(Tags.SOFTWARE_SYSTEM)
@property
def containers(self) -> Iterable[Container]:
"""Return read-only list of child containers."""
return list(self._containers)
@property
def child_elements(self) -> Iterable[Container]:
"""Return child elements (from `Element.children`)."""
return self.containers
def add_container(
self, name: str, description: str = "", technology: str = "", **kwargs
) -> Container:
"""Construct a new `Container` and add to this system and its model."""
container = Container(
name=name, description=description, technology=technology, **kwargs
)
self += container
return container
def __iadd__(self, container: Container) -> "SoftwareSystem":
"""Add a new container to this system and register with its model."""
# TODO: once we move past python 3.6 change to proper return type via
# __future__.annotations
if container in self._containers:
return self
if self.get_container_with_name(container.name):
raise ValueError(
f"Container with name {container.name} already exists for {self}."
)
if container.parent is None:
container.parent = self
elif container.parent is not self:
raise ValueError(
f"Container with name {container.name} already has parent "
f"{container.parent}. Cannot add to {self}."
)
self._containers.add(container)
if self.has_model:
model = self.model
model += container
return self
def get_container_with_name(self, name: str) -> Container:
"""Return the container with the given name, or None."""
return next((c for c in self._containers if c.name == name), None)
@classmethod
def hydrate(cls, software_system_io: SoftwareSystemIO) -> "SoftwareSystem":
"""Create a new SoftwareSystem instance and hydrate it from its IO."""
software_system = cls(
**cls.hydrate_arguments(software_system_io),
location=software_system_io.location,
)
for container_io in software_system_io.containers:
software_system += Container.hydrate(
container_io,
software_system=software_system,
)
return software_system
| StarcoderdataPython |
8657 | #! /usr/bin/env python
import rospy
from nav_msgs.msg import Odometry
class OdomTopicReader(object):
def __init__(self, topic_name = '/odom'):
self._topic_name = topic_name
self._sub = rospy.Subscriber(self._topic_name, Odometry, self.topic_callback)
self._odomdata = Odometry()
def topic_callback(self, msg):
self._odomdata = msg
rospy.loginfo(self._odomdata)
if __name__ == "__main__":
rospy.init_node('odom_topic_subscriber')
odom_reader_object = OdomTopicReader()
rate = rospy.Rate(10)
while not rospy.is_shutdown():
rate.sleep()
| StarcoderdataPython |
4839274 | """Support for the GIOS service."""
from homeassistant.components.air_quality import (
ATTR_CO,
ATTR_NO2,
ATTR_OZONE,
ATTR_PM_2_5,
ATTR_PM_10,
ATTR_SO2,
AirQualityEntity,
)
from homeassistant.const import CONF_NAME
from .const import ATTR_STATION, DATA_CLIENT, DEFAULT_SCAN_INTERVAL, DOMAIN, ICONS_MAP
ATTRIBUTION = "Data provided by GIOŚ"
SCAN_INTERVAL = DEFAULT_SCAN_INTERVAL
async def async_setup_entry(hass, config_entry, async_add_entities):
"""Add a GIOS entities from a config_entry."""
name = config_entry.data[CONF_NAME]
data = hass.data[DOMAIN][DATA_CLIENT][config_entry.entry_id]
async_add_entities([GiosAirQuality(data, name)], True)
def round_state(func):
"""Round state."""
def _decorator(self):
res = func(self)
if isinstance(res, float):
return round(res)
return res
return _decorator
class GiosAirQuality(AirQualityEntity):
"""Define an GIOS sensor."""
def __init__(self, gios, name):
"""Initialize."""
self.gios = gios
self._name = name
self._aqi = None
self._co = None
self._no2 = None
self._o3 = None
self._pm_2_5 = None
self._pm_10 = None
self._so2 = None
self._attrs = {}
@property
def name(self):
"""Return the name."""
return self._name
@property
def icon(self):
"""Return the icon."""
if self._aqi in ICONS_MAP:
return ICONS_MAP[self._aqi]
return "mdi:blur"
@property
def air_quality_index(self):
"""Return the air quality index."""
return self._aqi
@property
@round_state
def particulate_matter_2_5(self):
"""Return the particulate matter 2.5 level."""
return self._pm_2_5
@property
@round_state
def particulate_matter_10(self):
"""Return the particulate matter 10 level."""
return self._pm_10
@property
@round_state
def ozone(self):
"""Return the O3 (ozone) level."""
return self._o3
@property
@round_state
def carbon_monoxide(self):
"""Return the CO (carbon monoxide) level."""
return self._co
@property
@round_state
def sulphur_dioxide(self):
"""Return the SO2 (sulphur dioxide) level."""
return self._so2
@property
@round_state
def nitrogen_dioxide(self):
"""Return the NO2 (nitrogen dioxide) level."""
return self._no2
@property
def attribution(self):
"""Return the attribution."""
return ATTRIBUTION
@property
def unique_id(self):
"""Return a unique_id for this entity."""
return self.gios.station_id
@property
def available(self):
"""Return True if entity is available."""
return self.gios.available
@property
def device_state_attributes(self):
"""Return the state attributes."""
self._attrs[ATTR_STATION] = self.gios.station_name
return self._attrs
async def async_update(self):
"""Get the data from GIOS."""
await self.gios.async_update()
if self.gios.available:
# Different measuring stations have different sets of sensors. We don't know
# what data we will get.
if "AQI" in self.gios.sensors:
self._aqi = self.gios.sensors["AQI"]["value"]
if "CO" in self.gios.sensors:
self._co = self.gios.sensors["CO"]["value"]
self._attrs[f"{ATTR_CO}_index"] = self.gios.sensors["CO"]["index"]
if "NO2" in self.gios.sensors:
self._no2 = self.gios.sensors["NO2"]["value"]
self._attrs[f"{ATTR_NO2}_index"] = self.gios.sensors["NO2"]["index"]
if "O3" in self.gios.sensors:
self._o3 = self.gios.sensors["O3"]["value"]
self._attrs[f"{ATTR_OZONE}_index"] = self.gios.sensors["O3"]["index"]
if "PM2.5" in self.gios.sensors:
self._pm_2_5 = self.gios.sensors["PM2.5"]["value"]
self._attrs[f"{ATTR_PM_2_5}_index"] = self.gios.sensors["PM2.5"][
"index"
]
if "PM10" in self.gios.sensors:
self._pm_10 = self.gios.sensors["PM10"]["value"]
self._attrs[f"{ATTR_PM_10}_index"] = self.gios.sensors["PM10"]["index"]
if "SO2" in self.gios.sensors:
self._so2 = self.gios.sensors["SO2"]["value"]
self._attrs[f"{ATTR_SO2}_index"] = self.gios.sensors["SO2"]["index"]
| StarcoderdataPython |
4825641 | # encoding: utf-8
'''
@author: <NAME>
@contact: <EMAIL>
@software: nef
@file: app_recon_full.py
@date: 5/13/2019
@desc:
'''
import click
import srfnef as nef
import os
import pypandoc
import tensorflow as tf
@click.command()
@click.argument('json_path', type = click.Path(exists = True))
@click.option('--outdir', '-o', default = None)
def recon_full(json_path, outdir):
tf.enable_eager_execution()
if outdir is None:
outdir = os.path.dirname(os.path.abspath(json_path))
else:
outdir = os.path.abspath(outdir)
if not os.path.isdir(outdir):
os.mkdir(outdir, mode = 0o777)
mlem_full_without_data = nef.io.json_load(nef.functions.MlemFull, json_path)
mlem_obj = nef.io.load_all_data(mlem_full_without_data)
img = mlem_obj()
from srfnef.corrections.scattering.scatter import scatter_preprocess
if mlem_obj.scatter_corr is not None:
scatter_preprocess(mlem_obj.scanner,mlem_obj.listmode,img,mlem_obj.atten_corr.u_map,outdir)
nef.save(img, outdir + '/recon_image.hdf5')
# nef.save(mlem_full_without_data, outdir + '/mlem_full.hdf5')
nef.doc_gen(mlem_full_without_data, img, outdir, outdir + '/recon_doc.md')
pypandoc.convert_file(outdir + '/recon_doc.md', 'pdf',
outputfile = outdir + '/recon_doc.md' + '.pdf')
return img
@click.command()
@click.argument('json_path', type = click.Path(exists = True))
@click.option('--outdir', '-o', default = None)
def recon_with_scatter(json_path,outdir):
tf.enable_eager_execution()
if outdir is None:
outdir = os.path.dirname(os.path.abspath(json_path))
else:
outdir = os.path.abspath(outdir)
mlem_full_without_data = nef.io.json_load(nef.functions.MlemFull, json_path)
mlem_obj = nef.io.load_all_data(mlem_full_without_data)
img = nef.load(nef.Image,outdir + '/recon_image.hdf5')
if mlem_obj.scatter_corr is not None:
listmode = mlem_obj.scatter_corr(img,mlem_obj.atten_corr.u_map,mlem_obj.scanner,outdir)
listmode = nef.ListmodeCompress(scanner)(listmode)
mlem_full = nef.MlemFull(mlem_obj.n_iter, mlem_obj.image_config, mlem_obj.scanner, listmode)
img = mlem_full()
nef.save(img, outdir + '/recon_image_scatter.hdf5')
nef.save(mlem_full_without_data, outdir + '/mlem_full_scatter.hdf5')
nef.doc_gen(mlem_full_without_data, img, outdir, outdir + '/recon_doc_scatter.md')
pypandoc.convert_file(outdir + '/recon_doc_scatter.md', 'pdf',
outputfile = outdir + '/recon_doc_scatter.md' + '.pdf')
return img
if __name__ == '__main__':
recon_full()
| StarcoderdataPython |
3300260 | <gh_stars>0
def network_connections(data):
return [
{
'origin': data['vendorInformation']['provider'],
'related': {
'type': 'domain',
'value': connection['destinationDomain']
},
'relation': 'Connected_To',
'source': {
'type': 'ip',
'value': connection['sourceAddress']
}
}
for connection in data['networkConnections']
if (
connection['destinationDomain'] is not None and
connection['sourceAddress'] is not None
)
]
def file_states(data):
return [
{
"origin": data["vendorInformation"]["provider"],
"related": {
"type": "sha256",
"value": state['fileHash']['hashValue']
},
"relation": "File_Name_Of",
"source": {
"type": "file_name",
"value": state['name']
}
}
for state in data['fileStates']
if (
state['name'] is not None and
state['fileHash'] is not None and
state['fileHash']['hashValue'] is not None
)
] + [
{
"origin": data["vendorInformation"]["provider"],
"related": {
"type": "sha256",
"value": state['fileHash']['hashValue']
},
"relation": "File_Path_Of",
"source": {
"type": "file_path",
"value": state['path']
}
}
for state in data['fileStates']
if (
state['path'] is not None and
state['fileHash'] is not None and
state['fileHash']['hashValue'] is not None
)
]
| StarcoderdataPython |
3306585 | import numpy as np
from enum import Enum
from random import randint
MAX_GENERATION = 1000
POPULATION_SIZE = 20
CHROMOSOME_LENGTH = 20
NUMBER_OF_SELECTED_CHROMOSOME = 3
INCREMENT_RANGE_FOR_CHROMOSOME_LENGTH = 50
INCREMENT_SIZE_FOR_CHROMOSOME_LENGTH = 5
goal = np.array([[1, 2, 3], [8, 9, 4], [7, 6, 5]])
initial = np.array([[1, 3, 2], [8, 9, 4], [5, 6, 7]])
class Direction(Enum):
U = 1
R = 2
D = 3
L = 4
def isEqual(self, direction):
return self == direction
def isOpposite(self, direction):
return abs(self.value - direction.value) == 2
def getOpposite(self):
return Direction(self.value - 2) if self.value > 2 else Direction(self.value + 2)
def getDifferent(self):
enums = list(Direction)
enums.remove(self)
return enums[randint(0, 2)]
def getDifferentAxis(self):
enums = list(Direction)
enums.remove(self)
enums.remove(self.getOpposite())
return enums[randint(0, 1)]
class Puzzle:
def __init__(self):
self.puzzle = np.array(initial)
def move(self, direction):
if not isinstance(direction, Direction):
raise TypeError('direction must be an instance of Direction Enum')
x, y = np.where(self.puzzle == 9)
if direction == Direction.U:
if x == 0:
raise IndexError("the x coordinate cannot be a negative value")
self.__swap([x, y], [x-1, y])
elif direction == Direction.R:
if y == 2:
raise IndexError(
"the y coordinate exceeds the range of the puzzle.")
self.__swap([x, y], [x, y+1])
elif direction == Direction.D:
if x == 2:
raise IndexError(
"the x coordinate exceeds the range of the puzzle.")
self.__swap([x, y], [x+1, y])
elif direction == Direction.L:
if y == 0:
raise IndexError("the y coordinate cannot be a negative value")
self.__swap([x, y], [x, y-1])
def __swap(self, coordinate1, coordinate2):
tmp = self.puzzle[coordinate1[0], coordinate1[1]]
self.puzzle[coordinate1[0], coordinate1[1]
] = self.puzzle[coordinate2[0], coordinate2[1]]
self.puzzle[coordinate2[0], coordinate2[1]] = tmp
def fitness(self):
mdis = 0
for i in range(3):
for j in range(3):
if (goal[i, j] == 9):
continue
x, y = np.where(self.puzzle == goal[i, j])
mdis += abs(x[0]-i) + abs(y[0]-j)
return mdis
def __str__(self):
return str(self.puzzle)
def createChromosome(length=CHROMOSOME_LENGTH):
chromosome = []
enums = list(Direction)
[chromosome.append(enums[randint(0, 3)]) for i in range(length)]
return chromosome
def initializePopulation():
population = []
[population.append(createChromosome(CHROMOSOME_LENGTH))
for i in range(POPULATION_SIZE)]
return population
# <chromosome>'a (yani List<Direction>) düzeltme uygular
# - 3x3 lük puzzle da peş peşe 3 kere aynı yöne hareket yapılamaz
# - Peş peşe zıt hareketler yapmak anlamsızdır/gereksizdir
def mutation(chromosome):
lenght = len(chromosome)
if (lenght < 2):
return chromosome
if (lenght < CHROMOSOME_LENGTH):
chromosome += createChromosome(CHROMOSOME_LENGTH-lenght)
if (chromosome[0].isOpposite(chromosome[1])):
chromosome[1] = chromosome[1].getDifferent()
for i in range(2, lenght):
# peş peşe 3 kere aynı hareket
if (chromosome[i].isEqual(chromosome[i-2]) and chromosome[i].isEqual(chromosome[i-1])):
chromosome[i] = chromosome[i-1].getDifferentAxis()
# zıt yön
elif(chromosome[i].isOpposite(chromosome[i-1])):
chromosome[i] = chromosome[i-1].getDifferent()
# <chromosome>'u başlangıç puzzle'ına uygular.
# Eğer yönlerden biri puzzle'a uygulandığında puzzle'ın dışına çıkılıyor ise
# bu yön farklı eksenden bir yön ile değiştirilir.
def applyChromosomeToPuzzle(chromosome):
puzzle = Puzzle()
i = 0
while i < len(chromosome):
try:
if (puzzle.fitness() == 0):
return [chromosome[:i], puzzle]
puzzle.move(chromosome[i])
i += 1
except IndexError:
chromosome[i] = chromosome[i].getDifferentAxis()
return [chromosome, puzzle]
def crossover(chromosomes, index=0):
if (NUMBER_OF_SELECTED_CHROMOSOME == index+1):
return
for i in range(index+1, NUMBER_OF_SELECTED_CHROMOSOME):
chromosomes += (crossing(chromosomes[index], chromosomes[i]))
crossover(chromosomes, index+1)
def crossing(chromosome1, chromosome2):
i = randint(0, CHROMOSOME_LENGTH//2-1)
j = randint(CHROMOSOME_LENGTH//2, CHROMOSOME_LENGTH)
c1 = chromosome1[:i] + chromosome2[i:]
c2 = chromosome2[:i] + chromosome1[i:]
c3 = chromosome1[:j] + chromosome2[j:]
c4 = chromosome2[:j] + chromosome1[j:]
c5 = chromosome1[:i] + chromosome2[i:j] + chromosome1[j:]
c6 = chromosome2[:i] + chromosome1[i:j] + chromosome2[j:]
c7 = chromosome1[j:] + chromosome1[:i] + chromosome2[i:j]
c8 = chromosome2[j:] + chromosome2[:i] + chromosome1[i:j]
c9 = chromosome2[i:j] + chromosome1[:i] + chromosome1[j:]
c10 = chromosome1[i:j] + chromosome2[:i] + chromosome2[j:]
return [c1, c2, c3, c4, c5, c6, c7, c8, c9, c10]
# En iy kromozomları döner.
# return : [[chromosome, puzzle], ...]
def selection(chromosomes):
res = []
for chromosome in chromosomes:
tmp = applyChromosomeToPuzzle(chromosome)
res.append([tmp[0], tmp[1]])
res.sort(key=lambda x: x[1].fitness())
return res[:NUMBER_OF_SELECTED_CHROMOSOME]
def getStrOfChromosome(chromosome):
txt = ', '.join([x.name for x in chromosome])
return f"{txt}"
def solution():
global CHROMOSOME_LENGTH
generation, numOfIncrement, bestmdis = 0, 0, 36
bestSelection = []
population = initializePopulation()
while generation < MAX_GENERATION:
generation += 1
# mutasyon
for item in (population):
mutation(item)
# seçilim
slct = selection(population)
mdis = slct[0][1].fitness()
population = [item[0] for item in slct]
# en iyi seçim
if (mdis < bestmdis):
bestmdis = mdis
bestSelection = slct[0]
# kromozom uzunluğunu arttırma
if (generation//INCREMENT_RANGE_FOR_CHROMOSOME_LENGTH > numOfIncrement):
numOfIncrement += 1
CHROMOSOME_LENGTH += INCREMENT_SIZE_FOR_CHROMOSOME_LENGTH
print(f"generation: {generation} | fitness: {mdis}")
# Sonuç bulundu
if (mdis == 0):
break
crossover(population)
print("---------------------------")
print("initial")
print(initial)
print()
print("goal")
print(goal)
print("---------------------------")
print(f"fitness: {bestSelection[1].fitness()}")
print(f"best chromosome\n{getStrOfChromosome(bestSelection[0])}")
print(f"final status\n{bestSelection[1]}")
if __name__ == "__main__":
solution()
| StarcoderdataPython |
180640 | <reponame>Zhenye-Na/leetcode
#
# @lc app=leetcode id=25 lang=python3
#
# [25] Reverse Nodes in k-Group
#
# https://leetcode.com/problems/reverse-nodes-in-k-group/description/
#
# algorithms
# Hard (46.35%)
# Likes: 4377
# Dislikes: 423
# Total Accepted: 383.3K
# Total Submissions: 813.3K
# Testcase Example: '[1,2,3,4,5]\n2'
#
# Given a linked list, reverse the nodes of a linked list k at a time and
# return its modified list.
#
# k is a positive integer and is less than or equal to the length of the linked
# list. If the number of nodes is not a multiple of k then left-out nodes, in
# the end, should remain as it is.
#
# You may not alter the values in the list's nodes, only nodes themselves may
# be changed.
#
#
# Example 1:
#
#
# Input: head = [1,2,3,4,5], k = 2
# Output: [2,1,4,3,5]
#
#
# Example 2:
#
#
# Input: head = [1,2,3,4,5], k = 3
# Output: [3,2,1,4,5]
#
#
# Example 3:
#
#
# Input: head = [1,2,3,4,5], k = 1
# Output: [1,2,3,4,5]
#
#
# Example 4:
#
#
# Input: head = [1], k = 1
# Output: [1]
#
#
#
# Constraints:
#
#
# The number of nodes in the list is in the range sz.
# 1 <= sz <= 5000
# 0 <= Node.val <= 1000
# 1 <= k <= sz
#
#
#
# Follow-up: Can you solve the problem in O(1) extra memory space?
#
# @lc code=start
# Definition for singly-linked list.
# class ListNode:
# def __init__(self, val=0, next=None):
# self.val = val
# self.next = next
class Solution:
def reverseKGroup(self, head: ListNode, k: int) -> ListNode:
if not head:
return head
dummy = ListNode(0, head)
head = dummy
while head:
head = self._reverseK(head, k)
return dummy.next
def _reverseK(self, node, k):
n1, nk = node.next, node
for _ in range(k):
nk = nk.next
if nk is None:
return nk
nk_next = nk.next
# reverse
prev, curt = None, n1
while curt != nk_next:
tmp = curt.next
curt.next = prev
prev = curt
curt = tmp
# connect
node.next = nk
n1.next = nk_next
return n1
# @lc code=end
| StarcoderdataPython |
1665996 | <reponame>Hardikris/moodlepy<filename>moodle/core/webservice/advanced_feature.py
from moodle.attr import dataclass
@dataclass
class AdvancedFeatures:
name: str
value: int
def __str__(self) -> str:
return self.name
| StarcoderdataPython |
3383984 | <filename>test/tests/polymorphism_small.py
class Union(object):
def __init__(self, subs):
self.subs = subs
def score(self):
t = 0
for s in self.subs:
t += s.score()
t /= len(self.subs) ** 2.0
return t
class Simple(object):
def score(self):
return 1.0
class Poly1(object):
def __init__(self, sub):
self.sub = sub
def score(self):
return self.sub.score()
d = 0.0
def rand():
# Almost cryptographically secure?
global d
d = (d * 1.24591 + .195) % 1
return d
def make_random(x):
if rand() > x:
return Simple()
if rand() < 0.3:
return Union([make_random(0.5 * x - 1), make_random(0.5 * x - 1)])
return Poly1(make_random(x - 1))
print make_random(1000).score()
| StarcoderdataPython |
6933 | <reponame>mklew/quickstart-data-lake-qubole<filename>assets/utils/config.py
from configparser import ConfigParser
CONFIG_INT_KEYS = {
'hadoop_max_nodes_count',
'hadoop_ebs_volumes_count',
'hadoop_ebs_volume_size',
'spark_max_nodes_count',
'spark_ebs_volumes_count',
'spark_ebs_volume_size'
}
def read_config(config_path):
parser = ConfigParser()
parser.read(config_path)
config = {}
for section in parser.sections():
for (config_key, config_value) in parser.items(section):
config[config_key] = int(config_value) if config_key in CONFIG_INT_KEYS else config_value
return config
| StarcoderdataPython |
3330811 | #!/usr/bin/env python
#from distutils.core import setup
import re, uuid
from setuptools import setup, find_packages
from pip.req import parse_requirements
install_reqs = parse_requirements('requirements.txt', session=uuid.uuid1())
reqs = [str(req.req) for req in install_reqs]
setup(name="GeoTweet",
version="0.1",
description="Program to search tweets, tag, hashtag, user, with locations and maps",
license="MIT",
author="The pirate Group",
author_email="NULL",
url="https://github.com/Pinperepette/GeoTweet",
install_requires=reqs,
keywords="twitter geo",
classifiers=[
'Development Status :: 1 - Beta',
'Topic :: Forensics :: Analysis :: Hacking :: Social Engineer',
'License :: MIT License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
],
zip_safe=True)
| StarcoderdataPython |
178028 | # ---
# jupyter:
# jupytext:
# cell_metadata_filter: ExecuteTime
# formats: ipynb,py
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.3.0
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
# %load_ext autoreload
# %autoreload 2
# -
import numpy as np
import pandas as pd
# +
import matplotlib.pyplot as plt
# %matplotlib inline
# %config InlineBackend.figure_format = 'retina'
# -
import seaborn as sns
from tqdm.auto import tqdm
DATA_PATH = '../data'
| StarcoderdataPython |
3378371 | import argparse
import pandas as pd
import numpy as np
import re
import nltk
from sklearn.preprocessing import LabelEncoder
from sklearn import utils
from ..utils import serialize, deserialize
from .tokenization import tokenize_articles, nan_to_str, convert_tokens_to_int, get_words_freq
from gensim.models.doc2vec import Doc2Vec, TaggedDocument
from gensim.models.phrases import Phrases, Phraser
from nltk.tokenize import word_tokenize
def create_args_parser():
parser = argparse.ArgumentParser()
parser.add_argument(
'--input_articles_csv_path', default='',
help='Input path of the news CSV file.')
parser.add_argument(
'--input_label_encoders_path', default='',
help='Input path for a pickle with label encoders (article_id, category_id, publisher_id).')
parser.add_argument(
'--output_article_content_embeddings', default='',
help='')
return parser
#############################################################################################
#Based on text cleaner used to generate Brazilian Portuguese word embeddings:
#https://github.com/nathanshartmann/portuguese_word_embeddings/blob/master/preprocessing.py
# Punctuation list
punctuations = re.escape('!"#%\'()*+,./:;<=>?@[\\]^_`{|}~')
re_remove_brackets = re.compile(r'\{.*\}')
re_remove_html = re.compile(r'<(\/|\\)?.+?>', re.UNICODE)
re_transform_numbers = re.compile(r'\d', re.UNICODE)
re_transform_emails = re.compile(r'[^\s]+@[^\s]+', re.UNICODE)
re_transform_url = re.compile(r'(http|https)://[^\s]+', re.UNICODE)
# Different quotes are used.
re_quotes_1 = re.compile(r"(?u)(^|\W)[‘’′`']", re.UNICODE)
re_quotes_2 = re.compile(r"(?u)[‘’`′'](\W|$)", re.UNICODE)
re_quotes_3 = re.compile(r'(?u)[‘’`′“”]', re.UNICODE)
re_dots = re.compile(r'(?<!\.)\.\.(?!\.)', re.UNICODE)
re_punctuation = re.compile(r'([,";:]){2},', re.UNICODE)
re_hiphen = re.compile(r' -(?=[^\W\d_])', re.UNICODE)
re_tree_dots = re.compile(u'…', re.UNICODE)
# Differents punctuation patterns are used.
re_punkts = re.compile(r'(\w+)([%s])([ %s])' %
(punctuations, punctuations), re.UNICODE)
re_punkts_b = re.compile(r'([ %s])([%s])(\w+)' %
(punctuations, punctuations), re.UNICODE)
re_punkts_c = re.compile(r'(\w+)([%s])$' % (punctuations), re.UNICODE)
re_changehyphen = re.compile(u'–')
re_doublequotes_1 = re.compile(r'(\"\")')
re_doublequotes_2 = re.compile(r'(\'\')')
re_trim = re.compile(r' +', re.UNICODE)
def clean_str(string):
string = string.replace('\n', ' ')
"""Apply all regex above to a given string."""
string = string.lower()
string = re_tree_dots.sub('...', string)
string = re.sub('\.\.\.', '', string)
string = re_remove_brackets.sub('', string)
string = re_changehyphen.sub('-', string)
string = re_remove_html.sub(' ', string)
string = re_transform_numbers.sub('0', string)
string = re_transform_url.sub('URL', string)
string = re_transform_emails.sub('EMAIL', string)
string = re_quotes_1.sub(r'\1"', string)
string = re_quotes_2.sub(r'"\1', string)
string = re_quotes_3.sub('"', string)
string = re.sub('"', '', string)
string = re_dots.sub('.', string)
string = re_punctuation.sub(r'\1', string)
string = re_hiphen.sub(' - ', string)
string = re_punkts.sub(r'\1 \2 \3', string)
string = re_punkts_b.sub(r'\1 \2 \3', string)
string = re_punkts_c.sub(r'\1 \2', string)
string = re_doublequotes_1.sub('\"', string)
string = re_doublequotes_2.sub('\'', string)
string = re_trim.sub(' ', string)
return string.strip()
sent_tokenizer = nltk.data.load('tokenizers/punkt/portuguese.pickle')
def clean_and_filter_first_sentences(string, first_sentences=8):
# Tokenize sentences and remove short and malformed sentences.
sentences = []
for sent in sent_tokenizer.tokenize(string):
if sent.count(' ') >= 3 and sent[-1] in ['.', '!', '?', ';']:
sentences.append(clean_str(sent))
if len(sentences) == first_sentences:
break
return ' '.join(sentences)
#############################################################################################
def load_input_csv(path):
news_df = pd.read_csv(path, encoding = 'utf-8'
#,nrows=1000
)
#Concatenating all available text
news_df['full_text'] = (news_df['title'].apply(nan_to_str) + ". " + \
news_df['caption'].apply(nan_to_str) + ". " + \
news_df['body'].apply(nan_to_str)
).apply(clean_and_filter_first_sentences)
return news_df
'''
def process_cat_features(dataframe):
article_id_encoder = LabelEncoder()
dataframe['id_encoded'] = article_id_encoder.fit_transform(dataframe['id'])
#category_id_encoder = LabelEncoder()
#dataframe['categoryid_encoded'] = category_id_encoder.fit_transform(dataframe['categoryid'])
#domainid_encoder = LabelEncoder()
#dataframe['domainid_encoded'] = domainid_encoder.fit_transform(dataframe['domainid'])
return article_id_encoder#, category_id_encoder, domainid_encoder
def save_article_cat_encoders(output_path, article_id_encoder, category_id_encoder, domainid_encoder):
to_serialize = {'article_id': article_id_encoder,
'category_id': category_id_encoder,
'publisher_id': domainid_encoder}
serialize(output_path, to_serialize)
'''
def load_acr_preprocessing_assets(acr_label_encoders_path):
acr_label_encoders = deserialize(acr_label_encoders_path)
print("Read article id label encoder: {}".format(len(acr_label_encoders['article_id'].classes_)))
return acr_label_encoders
def export_article_content_embeddings(content_article_embeddings, output_article_content_embeddings):
output_path = output_article_content_embeddings
print('Exporting ACR Label Encoders, Article metadata and embeddings to {}'.format(output_path))
#to_serialize = (acr_label_encoders, articles_metadata_df, content_article_embeddings)
to_serialize = content_article_embeddings
serialize(output_path, to_serialize)
def main():
parser = create_args_parser()
args = parser.parse_args()
print('Loading news article CSV: {}'.format(args.input_articles_csv_path))
news_df = load_input_csv(args.input_articles_csv_path)
print('N. docs: {}'.format(len(news_df)))
print('ACR label encoder: {}'.format(args.input_articles_csv_path))
acr_label_encoders = load_acr_preprocessing_assets(args.input_label_encoders_path)
news_df['id_encoded'] = acr_label_encoders['article_id'].transform(news_df['id'])
#Sorting results by the encoded article Id, so that the matrix coincides and checking consistency
news_df = news_df.sort_values('id_encoded')
ids_encoded = news_df['id_encoded'].values.flatten()
print('ids_encoded.shape', ids_encoded.shape)
assert len(news_df) == len(acr_label_encoders['article_id'].classes_)
assert news_df['id_encoded'].values[0] == 0
assert news_df['id_encoded'].max()+1 == len(news_df)
assert len(news_df[pd.isnull(news_df['id_encoded'])]) == 0
del acr_label_encoders
'''
print('Encoding categorical features')
article_id_encoder, category_id_encoder, domainid_encoder = process_cat_features(news_df)
print('Exporting LabelEncoders of categorical features: {}'.format(args.output_label_encoders))
save_article_cat_encoders(args.output_label_encoders,
article_id_encoder,
category_id_encoder,
domainid_encoder)
'''
print('Tokenizing articles...')
tokenized_articles = tokenize_articles(news_df['full_text'])
del news_df
#print('Computing word frequencies...')
#words_freq = get_words_freq(tokenized_articles)
#print('Corpus vocabulary size: {}'.format(len(words_freq)))
#Dicovering frequent bigrams
phrases = Phrases(tokenized_articles)
bigram = Phraser(phrases)
tg_phrases = Phrases(bigram[tokenized_articles])
trigram = Phraser(tg_phrases)
print('Processing documents...')
tagged_data = [TaggedDocument(words=w, tags=[i]) for i, w in enumerate(trigram[bigram[tokenized_articles]])]
print('Training doc2vec')
max_epochs = 30
vec_size = 250
alpha = 0.025
#cores = multiprocessing.cpu_count()
#DMM (Distributed Memory Mean) - See https://towardsdatascience.com/another-twitter-sentiment-analysis-with-python-part-6-doc2vec-603f11832504
model = Doc2Vec(vector_size=vec_size,
alpha=alpha,
min_alpha=alpha,
window=5,
negative=5,
min_count=2,
max_vocab_size=100000,
dm = 1,
dm_mean=1,
workers=6)
model.build_vocab(tagged_data)
for epoch in range(max_epochs):
print('iteration {0}'.format(epoch))
model.train(tagged_data,
#utils.shuffle([x for x in tagged_data]),
total_examples=model.corpus_count,
epochs=1)
# decrease the learning rate
model.alpha -= 0.0002
# fix the learning rate, no decay
model.min_alpha = model.alpha
#print('Encoding categorical features')
#article_id_encoder = process_cat_features(news_df)
print('Concatenating article content embeddings, making sure that they are sorted by the encoded article id')
#article_content_embeddings = np.vstack([model.docvecs[i] for i in list(news_df.index)])
article_content_embeddings = np.vstack([model.docvecs[i] for i in ids_encoded])
print('Exporting article content embeddings')
export_article_content_embeddings(article_content_embeddings, args.output_article_content_embeddings)
#Ps: To experiment with these doc2vec embeddings, it is necessary to deserialize "acr_articles_metadata_embeddings.pickle", substitute the content_article_embedding and serialize for further usage by NAR module
#This is made by acr_module/notebooks/ACR_Results_Visualization_Gcom_doc2vec.ipynb
if __name__ == '__main__':
main()
'''
DATA_DIR=/media/data/projects/personal/doutorado/gcom && \
python3 -m acr.preprocessing.doc2vec_gcom \
--input_articles_csv_path ${DATA_DIR}/document_g1_exported/documents_g1_exported.csv \
--input_label_encoders_path ${DATA_DIR}/data_preprocessed/pickles_v4/acr_label_encoders_v4.pickle \
--output_article_content_embeddings ${DATA_DIR}/data_preprocessed/pickles_v4/article_content_embeddings_doc2vec_v4_trigrams_30epochs.pickle
''' | StarcoderdataPython |
126153 | <reponame>ralcabes/EventManager<filename>eventmanager/tasks/urls.py<gh_stars>1-10
from . import views
from django.conf.urls import url
from django.contrib.auth import views as auth_views
from django.urls import path
urlpatterns = [
path(
'events/<slug:slug>/task/<task>/delete/',
views.delete_task,
name='tasks.delete_task'),
path(
'events/<slug:slug>/task/<task>/edit',
views.edit_task,
name='tasks.edit_task'),
]
| StarcoderdataPython |
58590 | <reponame>KevinKnott/Coding-Review
# Search in Rotated Sorted Array: https://leetcode.com/problems/search-in-rotated-sorted-array/
# There is an integer array nums sorted in ascending order (with distinct values).
# Prior to being passed to your function, nums is rotated at an unknown pivot index k (0 <= k < nums.length) such that the resulting array is [nums[k], nums[k+1], ..., nums[n-1], nums[0], nums[1], ..., nums[k-1]] (0-indexed). For example, [0,1,2,4,5,6,7] might be rotated at pivot index 3 and become [4,5,6,7,0,1,2].
# Given the array nums after the rotation and an integer target, return the index of target if it is in nums, or -1 if it is not in nums.
# You must write an algorithm with O(log n) runtime complexity.
# This is a basic binary search the only difference is that we need to check if the values are sorted from high to mid so that we know wether to follow the traditional patern
# or to traverse the other way as the shift occurs on the other side
class Solution:
def search(self, nums, target: int) -> int:
lo, hi = 0, len(nums) - 1
while lo <= hi:
mid = lo + (hi - lo) // 2
if nums[mid] == target:
return mid
else:
# Is it properly sorted?
if nums[mid] >= nums[lo]:
# Now since we know where it is supposed to be we need to check if it can be here
if target >= nums[lo] and target <= nums[mid]:
hi = mid - 1
else:
lo = mid + 1
else:
if target <= nums[hi] and target > nums[mid]:
lo = mid + 1
else:
hi = mid - 1
return -1
# So this is pretty standard the only weird hiccup is finding whether or not you have a sorted segment or not
# this runs in o(logn) and O(1)
# Score Card
# Did I need hints? N
# Did you finish within 30 min? 10
# Was the solution optimal? This is optimal
# Were there any bugs? No
# 5 5 5 5 = 5
| StarcoderdataPython |
1774609 | from functools import cached_property
from typing import Union
from wtforms import StringField
from app.forms.field_handlers.field_handler import FieldHandler
from app.forms.validators import MobileNumberCheck, ResponseRequired
MobileNumberValidatorTypes = list[Union[ResponseRequired, MobileNumberCheck]]
class MobileNumberHandler(FieldHandler):
MANDATORY_MESSAGE_KEY = "MANDATORY_MOBILE_NUMBER"
@cached_property
def validators(self) -> MobileNumberValidatorTypes:
validate_with: MobileNumberValidatorTypes = super().validators
if not self.disable_validation:
validate_with.append(MobileNumberCheck())
return validate_with
def get_field(self) -> StringField:
return StringField(
label=self.label, description=self.guidance, validators=self.validators
)
| StarcoderdataPython |
3358819 | from ._base import Base
from loguru import logger
from os import path
import subprocess, os, platform
class Edit(Base):
"""Edit.
Opens the default editor (run `echo $EDITOR`) to edit the package file.
Usage: gitget edit [global options]
Examples:
gitget edit
"""
def run(self):
filepath = self.get_package_list_filepath()
# https://stackoverflow.com/questions/434597/open-document-with-default-os-application-in-python-both-in-windows-and-mac-os
logger.debug("Attempting to open the text editor")
try:
if platform.system() == "Darwin":
logger.debug("macOS found")
subprocess.call(("open", filepath))
elif platform.system() == "Windows":
logger.debug("Windows found")
os.startfile(filepath)
else:
logger.debug("Assuming linux")
subprocess.call(("xdg-open", filepath))
except FileNotFoundError:
logger.error(
f"Could not open text editor, please edit manually: {filepath}"
)
| StarcoderdataPython |
37679 | import requests
import json
from datetime import datetime
headers = {"Content-type": "application/json", "Accept": "text/plain"}
def addUser():
url = "http://10.194.223.134:5000/add_user"
data = {"username": "test_user"}
requests.post(url, data=json.dumps(data), headers=headers)
def addMessage():
url = "http://10.194.223.134:5000/phone_data/test_user"
data = {"message": "My Sample Message", "timestamp": datetime.timestamp(datetime.now())}
r = requests.post(url, data=json.dumps(data), headers=headers)
print(r.json())
addUser()
addMessage()
| StarcoderdataPython |
1679738 | <reponame>bquirin/-Rightmove-webscraper
import requests
from bs4 import BeautifulSoup
import csv
def get_page(url):
response = requests.get(url)
soup = BeautifulSoup(response.text, "html.parser")
if not response.ok:
print(f"Server responded with {response.status_code}")
return soup
def get_data(soup):
all_houses = []
divs = soup.find_all("div", class_="l-searchResult is-list is-not-grid")
for div in divs:
all_houses.append({
"description": div.find("h2", class_="propertyCard-title").text,
"price": div.find("div", class_="propertyCard-priceValue").text
})
return all_houses
def write_csv(all_houses):
with open("rightmoveData.csv", "w") as file:
writer = csv.writer(file)
writer.writerows(["test", "testing2", "testing3"])
return "Done"
def main():
url = ("https://www.rightmove.co.uk/property-for-sale/find.html?searchType"
"=SALE&locationIdentifier=REGION%5E85310&insId=1&radius=0.0&min"
"Price=&maxPrice=&minBedrooms=&maxBedrooms=&displayPropertyType="
"&maxDaysSinceAdded=&_includeSSTC=on&sortByPriceDescending=&primary"
"DisplayPropertyType=&secondaryDisplayPropertyType=&oldDisplayProp"
"ertyType=&oldPrimaryDisplayPropertyType=&newHome=&auction=false")
webpage = get_page(url)
houses_list = get_data(webpage)
write_csv(houses_list)
if __name__ == "__main__":
main()
print("Testing")
| StarcoderdataPython |
3258356 | <filename>threeML/utils/string_utils.py
def dash_separated_string_to_tuple(arg):
"""
turn a dash separated string into a tuple
:param arg: a dash separated string "a-b"
:return: (a,b)
"""
return arg.replace(" ", "").split("-") | StarcoderdataPython |
1693909 | <reponame>eventable/CalendarServer<filename>txdav/common/datastore/file.py
# -*- test-case-name: txdav.caldav.datastore.test.test_file -*-
##
# Copyright (c) 2010-2015 Apple Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
##
"""
Common utility functions for a file based datastore.
"""
import sys
from twext.internet.decorate import memoizedKey
from twext.python.log import Logger
from txdav.xml.rfc2518 import GETContentType, HRef
from txdav.xml.rfc5842 import ResourceID
from txweb2.http_headers import generateContentType, MimeType
from txweb2.dav.resource import TwistedGETContentMD5, \
TwistedQuotaUsedProperty
from twisted.internet.defer import succeed, inlineCallbacks, returnValue
from twisted.python.util import FancyEqMixin
from twisted.python import hashlib
from twisted.python.failure import Failure
from twistedcaldav import customxml
from twistedcaldav.customxml import NotificationType
from twistedcaldav.notifications import NotificationRecord
from twistedcaldav.notifications import NotificationsDatabase as OldNotificationIndex
from txdav.caldav.icalendarstore import ICalendarStore
from txdav.common.datastore.common import HomeChildBase
from txdav.common.datastore.sql_tables import _BIND_MODE_OWN
from txdav.common.icommondatastore import HomeChildNameNotAllowedError, \
HomeChildNameAlreadyExistsError, NoSuchHomeChildError, \
InternalDataStoreError, ObjectResourceNameNotAllowedError, \
ObjectResourceNameAlreadyExistsError, NoSuchObjectResourceError, \
ECALENDARTYPE, EADDRESSBOOKTYPE
from txdav.common.idirectoryservice import IStoreDirectoryService
from txdav.common.inotifications import INotificationCollection, \
INotificationObject
from txdav.base.datastore.file import DataStoreTransaction, DataStore, writeOperation, \
hidden, isValidName, FileMetaDataMixin
from txdav.base.datastore.util import cached
from txdav.base.propertystore.base import PropertyName
from txdav.base.propertystore.none import PropertyStore as NonePropertyStore
from txdav.base.propertystore.xattr import PropertyStore as XattrPropertyStore
from errno import EEXIST, ENOENT
from zope.interface import implements, directlyProvides
import json
import uuid
from twistedcaldav.sql import AbstractSQLDatabase, db_prefix
import os
TOPPATHS = (
"calendars",
"addressbooks"
)
UIDPATH = "__uids__"
class _StubQueuer(object):
pass
class CommonDataStore(DataStore):
"""
Shared logic for SQL-based data stores, between calendar and addressbook
storage.
@ivar _path: A L{CachingFilePath} referencing a directory on disk that
stores all calendar and addressbook data for a group of UIDs.
@ivar quota: the amount of space granted to each calendar home (in bytes)
for storing attachments, or C{None} if quota should not be enforced.
@type quota: C{int} or C{NoneType}
@ivar _propertyStoreClass: The class (or callable object / factory) that
produces an L{IPropertyStore} provider for a path. This has the
signature of the L{XattrPropertyStore} type: take 2 arguments
C{(default-user-uid, path-factory)}, return an L{IPropertyStore}
provider.
@ivar queuer: For compatibility with SQL-based store; currently a
non-functional implementation just for tests, but could be fixed to be
backed by SQLite or something.
"""
implements(ICalendarStore)
def __init__(
self,
path,
notifierFactories,
directoryService,
enableCalendars=True,
enableAddressBooks=True,
quota=(2 ** 20),
propertyStoreClass=XattrPropertyStore
):
"""
Create a store.
@param path: a L{FilePath} pointing at a directory on disk.
"""
assert enableCalendars or enableAddressBooks
super(CommonDataStore, self).__init__(path)
self._directoryService = IStoreDirectoryService(directoryService) if directoryService is not None else None
self.enableCalendars = enableCalendars
self.enableAddressBooks = enableAddressBooks
self._notifierFactories = notifierFactories if notifierFactories is not None else {}
self._transactionClass = CommonStoreTransaction
self._propertyStoreClass = propertyStoreClass
self.quota = quota
self._migrating = False
self._enableNotifications = True
self._newTransactionCallbacks = set()
# FIXME: see '@ivar queuer' above.
self.queuer = _StubQueuer()
def directoryService(self):
return self._directoryService
def setDirectoryService(self, directoryService):
self._directoryService = directoryService
def callWithNewTransactions(self, callback):
"""
Registers a method to be called whenever a new transaction is
created.
@param callback: callable taking a single argument, a transaction
"""
self._newTransactionCallbacks.add(callback)
def newTransaction(self, name='no name'):
"""
Create a new transaction.
@see: L{Transaction}
"""
txn = self._transactionClass(
self,
name,
self.enableCalendars,
self.enableAddressBooks,
self._notifierFactories if self._enableNotifications else None,
self._migrating,
)
for callback in self._newTransactionCallbacks:
callback(txn)
return txn
@inlineCallbacks
def inTransaction(self, label, operation, transactionCreator=None):
"""
Perform the given operation in a transaction, committing or aborting as
required.
@param label: the label to pass to the transaction creator
@param operation: a 1-arg callable that takes an L{IAsyncTransaction} and
returns a value.
@param transactionCreator: a 1-arg callable that takes a "label" arg and
returns a transaction
@return: a L{Deferred} that fires with C{operation}'s result or fails with
its error, unless there is an error creating, aborting or committing
the transaction.
"""
if transactionCreator is None:
transactionCreator = self.newTransaction
txn = transactionCreator(label=label)
try:
result = yield operation(txn)
except:
f = Failure()
yield txn.abort()
returnValue(f)
else:
yield txn.commit()
returnValue(result)
@inlineCallbacks
def _withEachHomeDo(self, enumerator, action, batchSize):
"""
Implementation of L{ICalendarStore.withEachCalendarHomeDo} and
L{IAddressBookStore.withEachAddressbookHomeDo}.
"""
for txn, home in enumerator():
try:
yield action(txn, home)
except:
a, b, c = sys.exc_info()
yield txn.abort()
raise a, b, c
else:
yield txn.commit()
def withEachCalendarHomeDo(self, action, batchSize=None):
"""
Implementation of L{ICalendarStore.withEachCalendarHomeDo}.
"""
return self._withEachHomeDo(self._eachCalendarHome, action, batchSize)
def withEachAddressbookHomeDo(self, action, batchSize=None):
"""
Implementation of L{ICalendarStore.withEachCalendarHomeDo}.
"""
return self._withEachHomeDo(self._eachAddressbookHome, action,
batchSize)
def setMigrating(self, state):
"""
Set the "migrating" state
"""
self._migrating = state
self._enableNotifications = not state
def setUpgrading(self, state):
"""
Set the "upgrading" state
"""
self._enableNotifications = not state
def _homesOfType(self, storeType):
"""
Common implementation of L{_eachCalendarHome} and
L{_eachAddressbookHome}; see those for a description of the return
type.
@param storeType: one of L{EADDRESSBOOKTYPE} or L{ECALENDARTYPE}.
"""
top = self._path.child(TOPPATHS[storeType]).child(UIDPATH)
if top.exists() and top.isdir():
for firstPrefix in top.children():
if not isValidName(firstPrefix.basename()):
continue
for secondPrefix in firstPrefix.children():
if not isValidName(secondPrefix.basename()):
continue
for actualHome in secondPrefix.children():
uid = actualHome.basename()
if not isValidName(uid):
continue
txn = self.newTransaction("enumerate home %r" % (uid,))
home = txn.homeWithUID(storeType, uid, False)
if home is not None:
yield (txn, home)
def _eachCalendarHome(self):
return self._homesOfType(ECALENDARTYPE)
def _eachAddressbookHome(self):
return self._homesOfType(EADDRESSBOOKTYPE)
class CommonStoreTransaction(DataStoreTransaction):
"""
In-memory implementation of
Note that this provides basic 'undo' support, but not truly transactional
operations.
"""
_homeClass = {}
def __init__(self, dataStore, name, enableCalendars, enableAddressBooks, notifierFactories, migrating=False):
"""
Initialize a transaction; do not call this directly, instead call
L{DataStore.newTransaction}.
@param dataStore: The store that created this transaction.
@type dataStore: L{CommonDataStore}
"""
from txdav.caldav.icalendarstore import ICalendarTransaction
from txdav.carddav.iaddressbookstore import IAddressBookTransaction
from txdav.caldav.datastore.file import CalendarHome
from txdav.carddav.datastore.file import AddressBookHome
super(CommonStoreTransaction, self).__init__(dataStore, name)
self._calendarHomes = {}
self._addressbookHomes = {}
self._notificationHomes = {}
self._notifierFactories = notifierFactories
self._notifiedAlready = set()
self._migrating = migrating
extraInterfaces = []
if enableCalendars:
extraInterfaces.append(ICalendarTransaction)
self._notificationHomeType = ECALENDARTYPE
else:
self._notificationHomeType = EADDRESSBOOKTYPE
if enableAddressBooks:
extraInterfaces.append(IAddressBookTransaction)
directlyProvides(self, *extraInterfaces)
CommonStoreTransaction._homeClass[ECALENDARTYPE] = CalendarHome
CommonStoreTransaction._homeClass[EADDRESSBOOKTYPE] = AddressBookHome
def calendarHomeWithUID(self, uid, status=None, create=False):
return self.homeWithUID(ECALENDARTYPE, uid, status=status, create=create)
def addressbookHomeWithUID(self, uid, status=None, create=False):
return self.homeWithUID(EADDRESSBOOKTYPE, uid, status=status, create=create)
def _determineMemo(self, storeType, uid, status=None, create=False):
"""
Determine the memo dictionary to use for homeWithUID.
"""
if storeType == ECALENDARTYPE:
return self._calendarHomes
else:
return self._addressbookHomes
def homes(self, storeType):
"""
Load all calendar or addressbook homes.
"""
uids = self._homeClass[storeType].listHomes(self)
for uid in uids:
self.homeWithUID(storeType, uid, create=False)
# Return the memoized list directly
returnValue([kv[1] for kv in sorted(self._determineMemo(storeType, None).items(), key=lambda x: x[0])])
@memoizedKey("uid", _determineMemo, deferredResult=False)
def homeWithUID(self, storeType, uid, status=None, create=False):
if uid.startswith("."):
return None
if storeType not in (ECALENDARTYPE, EADDRESSBOOKTYPE):
raise RuntimeError("Unknown home type.")
return self._homeClass[storeType].homeWithUID(self, uid, create, storeType == ECALENDARTYPE)
@memoizedKey("uid", "_notificationHomes", deferredResult=False)
def notificationsWithUID(self, uid, home=None, create=False):
if home is None:
home = self.homeWithUID(self._notificationHomeType, uid, create=True)
return NotificationCollection.notificationsFromHome(self, home)
# File-based storage of APN subscriptions not implementated.
def addAPNSubscription(self, token, key, timestamp, subscriber, userAgent, ipAddr):
return NotImplementedError
def removeAPNSubscription(self, token, key):
return NotImplementedError
def purgeOldAPNSubscriptions(self, purgeSeconds):
return NotImplementedError
def apnSubscriptionsByToken(self, token):
return NotImplementedError
def apnSubscriptionsByKey(self, key):
return NotImplementedError
def apnSubscriptionsBySubscriber(self, guid):
return NotImplementedError
def imipCreateToken(self, organizer, attendee, icaluid, token=None):
return NotImplementedError
def imipLookupByToken(self, token):
return NotImplementedError
def imipGetToken(self, organizer, attendee, icaluid):
return NotImplementedError
def imipRemoveToken(self, token):
return NotImplementedError
def purgeOldIMIPTokens(self, olderThan):
return NotImplementedError
def isNotifiedAlready(self, obj):
return obj in self._notifiedAlready
def notificationAddedForObject(self, obj):
self._notifiedAlready.add(obj)
class StubResource(object):
"""
Just enough resource to keep the shared sql DB classes going.
"""
def __init__(self, commonHome):
self._commonHome = commonHome
@property
def fp(self):
return self._commonHome._path
class SharedCollectionRecord(object):
def __init__(self, shareuid, sharetype, hosturl, localname, summary):
self.shareuid = shareuid
self.sharetype = sharetype
self.hosturl = hosturl
self.localname = localname
self.summary = summary
class SharedCollectionsDatabase(AbstractSQLDatabase):
log = Logger()
db_basename = db_prefix + "shares"
schema_version = "1"
db_type = "shares"
def __init__(self, resource):
"""
@param resource: the L{CalDAVResource} resource for
the shared collection. C{resource} must be a calendar/addressbook home collection.)
"""
self.resource = resource
db_filename = os.path.join(self.resource.fp.path, SharedCollectionsDatabase.db_basename)
super(SharedCollectionsDatabase, self).__init__(db_filename, True, autocommit=True)
def get_dbpath(self):
return self.resource.fp.child(SharedCollectionsDatabase.db_basename).path
def set_dbpath(self, newpath):
pass
dbpath = property(get_dbpath, set_dbpath)
def create(self):
"""
Create the index and initialize it.
"""
self._db()
def allRecords(self):
records = self._db_execute("select * from SHARES order by LOCALNAME")
return [self._makeRecord(row) for row in (records if records is not None else ())]
def recordForShareUID(self, shareUID):
row = self._db_execute("select * from SHARES where SHAREUID = :1", shareUID)
return self._makeRecord(row[0]) if row else None
def addOrUpdateRecord(self, record):
self._db_execute(
"""insert or replace into SHARES (SHAREUID, SHARETYPE, HOSTURL, LOCALNAME, SUMMARY)
values (:1, :2, :3, :4, :5)
""", record.shareuid, record.sharetype, record.hosturl, record.localname, record.summary,
)
def removeRecordForLocalName(self, localname):
self._db_execute("delete from SHARES where LOCALNAME = :1", localname)
def removeRecordForShareUID(self, shareUID):
self._db_execute("delete from SHARES where SHAREUID = :1", shareUID)
def remove(self):
self._db_close()
os.remove(self.dbpath)
def directShareID(self, shareeHome, sharerCollection):
return "Direct-%s-%s" % (shareeHome.resourceID(), sharerCollection.resourceID(),)
def _db_version(self):
"""
@return: the schema version assigned to this index.
"""
return SharedCollectionsDatabase.schema_version
def _db_type(self):
"""
@return: the collection type assigned to this index.
"""
return SharedCollectionsDatabase.db_type
def _db_init_data_tables(self, q):
"""
Initialise the underlying database tables.
@param q: a database cursor to use.
"""
#
# SHARES table is the primary table
# SHAREUID: UID for this share
# SHARETYPE: type of share: "I" for invite, "D" for direct
# HOSTURL: URL for data source
# LOCALNAME: local path name
# SUMMARY: Share summary
#
q.execute(
"""
create table SHARES (
SHAREUID text unique,
SHARETYPE text(1),
HOSTURL text,
LOCALNAME text,
SUMMARY text
)
"""
)
q.execute(
"""
create index SHAREUID on SHARES (SHAREUID)
"""
)
q.execute(
"""
create index HOSTURL on SHARES (HOSTURL)
"""
)
q.execute(
"""
create index LOCALNAME on SHARES (LOCALNAME)
"""
)
def _db_upgrade_data_tables(self, q, old_version):
"""
Upgrade the data from an older version of the DB.
"""
# Nothing to do as we have not changed the schema
pass
def _makeRecord(self, row):
return SharedCollectionRecord(*[str(item) if isinstance(item, unicode) else item for item in row])
class CommonHome(FileMetaDataMixin):
log = Logger()
# All these need to be initialized by derived classes for each store type
_childClass = None
_topPath = None
_notifierPrefix = None
def __init__(self, uid, path, dataStore, transaction):
self._dataStore = dataStore
self._uid = uid
self._path = path
self._transaction = transaction
self._notifiers = None
self._shares = SharedCollectionsDatabase(StubResource(self))
self._newChildren = {}
self._removedChildren = set()
self._cachedChildren = {}
def quotaAllowedBytes(self):
return self._transaction.store().quota
@classmethod
def listHomes(cls, txn):
"""
Retrieve the owner UIDs of all existing homes.
@return: an iterable of C{str}s.
"""
results = []
top = txn._dataStore._path.child(cls._topPath)
if top.exists() and top.isdir() and top.child(UIDPATH).exists():
for firstPrefix in top.child(UIDPATH).children():
if not isValidName(firstPrefix.basename()):
continue
for secondPrefix in firstPrefix.children():
if not isValidName(secondPrefix.basename()):
continue
for actualHome in secondPrefix.children():
uid = actualHome.basename()
if not isValidName(uid):
continue
results.append(uid)
return results
@classmethod
def homeWithUID(cls, txn, uid, create=False, withNotifications=False):
assert len(uid) >= 4
childPathSegments = []
childPathSegments.append(txn._dataStore._path.child(cls._topPath))
childPathSegments.append(childPathSegments[-1].child(UIDPATH))
childPathSegments.append(childPathSegments[-1].child(uid[0:2]))
childPathSegments.append(childPathSegments[-1].child(uid[2:4]))
childPath = childPathSegments[-1].child(uid)
def createDirectory(path):
try:
path.createDirectory()
except (IOError, OSError), e:
if e.errno != EEXIST:
# Ignore, in case someone else created the
# directory while we were trying to as well.
raise
creating = False
if create:
# Create intermediate directories
for child in childPathSegments:
if not child.isdir():
createDirectory(child)
if childPath.isdir():
homePath = childPath
else:
creating = True
homePath = childPath.temporarySibling()
createDirectory(homePath)
def do():
homePath.moveTo(childPath)
# do this _after_ all other file operations
home._path = childPath
return lambda : None
txn.addOperation(do, "create home UID %r" % (uid,))
elif not childPath.isdir():
return None
else:
homePath = childPath
home = cls(uid, homePath, txn._dataStore, txn)
for factory_name, factory in txn._notifierFactories.items():
home.addNotifier(factory_name, factory.newNotifier(home))
if creating:
home.createdHome()
if withNotifications:
txn.notificationsWithUID(uid, home)
return home
def __repr__(self):
return "<%s: %s>" % (self.__class__.__name__, self._path)
def uid(self):
return self._uid
def transaction(self):
return self._transaction
def directoryService(self):
return self._transaction.store().directoryService()
def directoryRecord(self):
return self.directoryService().recordWithUID(self.uid().decode("utf-8"))
def retrieveOldShares(self):
"""
Retrieve the old Index object.
"""
return self._shares
def children(self):
"""
Return a set of the child resource objects.
"""
return set(self._newChildren.itervalues()) | set(
self.childWithName(name)
for name in self._path.listdir()
if not name.startswith(".") and
name not in self._removedChildren
)
# For file store there is no efficient "bulk" load of all children so just
# use the "iterate over each child" method.
loadChildren = children
def listChildren(self):
"""
Return a set of the names of the child resources.
"""
return sorted(set(
[child.name() for child in self._newChildren.itervalues()]
) | set(
name
for name in self._path.listdir()
if not name.startswith(".") and
self._path.child(name).isdir() and
name not in self._removedChildren
))
def listSharedChildren(self):
"""
Retrieve the names of the children in this home.
@return: an iterable of C{str}s.
"""
return [share.localname for share in self._shares.allRecords()]
if self._childrenLoaded:
return succeed(self._sharedChildren.keys())
else:
return self._childClass.listObjects(self, owned=False)
def childWithName(self, name):
child = self._newChildren.get(name)
if child is not None:
return child
if name in self._removedChildren:
return None
if name in self._cachedChildren:
return self._cachedChildren[name]
if name.startswith("."):
return None
child = self._childClass.objectWithName(self, name, True)
if child is not None:
self._cachedChildren[name] = child
return child
@writeOperation
def createChildWithName(self, name):
if name.startswith("."):
raise HomeChildNameNotAllowedError(name)
childPath = self._path.child(name)
if name not in self._removedChildren and childPath.isdir():
raise HomeChildNameAlreadyExistsError(name)
temporary = hidden(childPath.temporarySibling())
temporaryName = temporary.basename()
temporary.createDirectory()
# In order for the index to work (which is doing real file ops on disk
# via SQLite) we need to create a real directory _immediately_.
# FIXME: some way to roll this back.
c = self._newChildren[name] = self._childClass(temporary.basename(), self, True, realName=name)
c.retrieveOldIndex().create()
def do():
childPath = self._path.child(name)
temporary = childPath.sibling(temporaryName)
try:
props = c.properties()
temporary.moveTo(childPath)
c._name = name
# FIXME: _lots_ of duplication of work here.
props.flush()
except (IOError, OSError), e:
if e.errno == EEXIST and childPath.isdir():
raise HomeChildNameAlreadyExistsError(name)
raise
# FIXME: direct tests, undo for index creation
# Return undo
return lambda: self._path.child(childPath.basename()).remove()
self._transaction.addOperation(do, "create child %r" % (name,))
self.notifyChanged()
return c
@writeOperation
def removeChildWithName(self, name, useTrash=True):
if name.startswith(".") or name in self._removedChildren:
raise NoSuchHomeChildError(name)
child = self.childWithName(name)
if child is None:
raise NoSuchHomeChildError()
try:
child.remove()
finally:
if name in self._newChildren:
del self._newChildren[name]
else:
self._removedChildren.add(name)
def getTrash(self, create=False):
return succeed(None)
@inlineCallbacks
def syncToken(self):
maxrev = 0
for child in self.children():
maxrev = max(int((yield child.syncToken()).split("_")[1]), maxrev)
try:
urnuuid = str(self.properties()[PropertyName.fromElement(ResourceID)].children[0])
except KeyError:
urnuuid = uuid.uuid4().urn
self.properties()[PropertyName(*ResourceID.qname())] = ResourceID(HRef.fromString(urnuuid))
returnValue("%s_%s" % (urnuuid[9:], maxrev))
def resourceNamesSinceToken(self, token, depth):
deleted = []
changed = []
invalid = []
return succeed((changed, deleted, invalid))
# @cached
def properties(self):
# FIXME: needs tests for actual functionality
# FIXME: needs to be cached
# FIXME: transaction tests
props = self._dataStore._propertyStoreClass(
self.uid(), lambda : self._path
)
self._transaction.addOperation(props.flush, "flush home properties")
return props
def objectResourcesWithUID(self, uid, ignore_children=()):
"""
Return all child object resources with the specified UID, ignoring any in the
named child collections. The file implementation just iterates all child collections.
"""
results = []
for child in self.children():
if child.name() in ignore_children:
continue
object = child.objectResourceWithUID(uid)
if object:
results.append(object)
return results
def objectResourceWithID(self, rid):
"""
Return all child object resources with the specified resource-ID.
"""
# File store does not have resource ids.
raise NotImplementedError
def quotaUsedBytes(self):
try:
return int(str(self.properties()[PropertyName.fromElement(TwistedQuotaUsedProperty)]))
except KeyError:
return 0
def adjustQuotaUsedBytes(self, delta):
"""
Adjust quota used. We need to get a lock on the row first so that the adjustment
is done atomically.
"""
old_used = self.quotaUsedBytes()
new_used = old_used + delta
if new_used < 0:
self.log.error("Fixing quota adjusted below zero to %s by change amount %s" % (new_used, delta,))
new_used = 0
self.properties()[PropertyName.fromElement(TwistedQuotaUsedProperty)] = TwistedQuotaUsedProperty(str(new_used))
def addNotifier(self, factory_name, notifier):
if self._notifiers is None:
self._notifiers = {}
self._notifiers[factory_name] = notifier
def getNotifier(self, factory_name):
return self._notifiers.get(factory_name)
def notifierID(self):
return (self._notifierPrefix, self.uid(),)
@inlineCallbacks
def notifyChanged(self):
"""
Trigger a notification of a change
"""
# Only send one set of change notifications per transaction
if self._notifiers and not self._transaction.isNotifiedAlready(self):
# cache notifiers run in post commit
notifier = self._notifiers.get("cache", None)
if notifier:
self._transaction.postCommit(notifier.notify)
# push notifiers add their work items immediately
notifier = self._notifiers.get("push", None)
if notifier:
yield notifier.notify(self._transaction)
self._transaction.notificationAddedForObject(self)
class CommonHomeChild(FileMetaDataMixin, FancyEqMixin, HomeChildBase):
"""
Common ancestor class of AddressBooks and Calendars.
"""
log = Logger()
compareAttributes = (
"_name",
"_home",
"_transaction",
)
_objectResourceClass = None
def __init__(self, name, home, owned, realName=None):
"""
Initialize an home child pointing at a path on disk.
@param name: the subdirectory of home where this child
resides.
@type name: C{str}
@param home: the home containing this child.
@type home: L{CommonHome}
@param realName: If this child was just created, the name which it
will eventually have on disk.
@type realName: C{str}
"""
self._name = name
self._home = home
self._owned = owned
self._transaction = home._transaction
self._newObjectResources = {}
self._cachedObjectResources = {}
self._removedObjectResources = set()
self._index = None # Derived classes need to set this
self._invites = None # Derived classes need to set this
self._renamedName = realName
if self._home._notifiers:
self._notifiers = dict([(factory_name, notifier.clone(self),) for factory_name, notifier in self._home._notifiers.items()])
else:
self._notifiers = None
@classmethod
def objectWithName(cls, home, name, owned):
return cls(name, home, owned) if home._path.child(name).isdir() else None
@property
def _path(self):
return self._home._path.child(self._name)
@property
def _txn(self):
return self._transaction
def directoryService(self):
return self._transaction.store().directoryService()
def retrieveOldIndex(self):
"""
Retrieve the old Index object.
"""
return self._index._oldIndex
def retrieveOldInvites(self):
"""
Retrieve the old Invites DB object.
"""
return self._invites._oldInvites
def __repr__(self):
return "<%s: %s>" % (self.__class__.__name__, self._path.path)
def name(self):
if self._renamedName is not None:
return self._renamedName
return self._path.basename()
def shareMode(self):
"""
Stub implementation of L{ICalendar.shareMode}; always returns L{_BIND_MODE_OWN}.
"""
return _BIND_MODE_OWN
def effectiveShareMode(self):
"""
Stub implementation of L{ICalendar.effectiveShareMode}; always returns L{_BIND_MODE_OWN}.
"""
return _BIND_MODE_OWN
def owned(self):
return self._owned
_renamedName = None
@writeOperation
def rename(self, name):
oldName = self.name()
self._renamedName = name
self._home._newChildren[name] = self
self._home._removedChildren.add(oldName)
def doIt():
self._path.moveTo(self._path.sibling(name))
return lambda : None # FIXME: revert
self._transaction.addOperation(doIt, "rename home child %r -> %r" %
(oldName, name))
self.retrieveOldIndex().bumpRevision()
self.notifyChanged()
@writeOperation
def remove(self):
def do(transaction=self._transaction):
childPath = self._path
for i in xrange(1000):
trash = childPath.sibling("._del_%s_%d" % (childPath.basename(), i))
if not trash.exists():
break
else:
raise InternalDataStoreError("Unable to create trash target for child at %s" % (childPath,))
try:
childPath.moveTo(trash)
except (IOError, OSError), e:
if e.errno == ENOENT:
raise NoSuchHomeChildError(self._name)
raise
def cleanup():
try:
trash.remove()
self.properties()._removeResource()
except Exception, e:
self.log.error("Unable to delete trashed child at %s: %s" % (trash.fp, e))
self._transaction.addOperation(cleanup, "remove child backup %r" % (self._name,))
def undo():
trash.moveTo(childPath)
return undo
# FIXME: direct tests
self._transaction.addOperation(
do, "prepare child remove %r" % (self._name,)
)
self.notifyChanged()
def ownerHome(self):
return self._home
def viewerHome(self):
return self._home
def setSharingUID(self, uid):
self.properties()._setPerUserUID(uid)
def objectResources(self):
"""
Return a list of object resource objects.
"""
return [self.objectResourceWithName(name)
for name in self.listObjectResources()]
def objectResourcesWithNames(self, names):
"""
Return a list of the specified object resource objects.
"""
results = []
for name in names:
obj = self.objectResourceWithName(name)
if obj is not None:
results.append(obj)
return results
def listObjectResources(self):
"""
Return a list of object resource names.
"""
return sorted((
name
for name in (
set(self._newObjectResources.iterkeys()) |
set(p.basename() for p in self._path.children()
if not p.basename().startswith(".") and
p.isfile()) -
set(self._removedObjectResources)
))
)
def countObjectResources(self):
return len(self.listObjectResources())
def objectResourceWithName(self, name):
if name in self._removedObjectResources:
return None
if name in self._newObjectResources:
return self._newObjectResources[name]
if name in self._cachedObjectResources:
return self._cachedObjectResources[name]
objectResourcePath = self._path.child(name)
if objectResourcePath.isfile():
obj = self._objectResourceClass(name, self)
self._cachedObjectResources[name] = obj
return obj
else:
return None
def objectResourceWithUID(self, uid):
rname = self.retrieveOldIndex().resourceNameForUID(uid)
if rname and rname not in self._removedObjectResources:
return self.objectResourceWithName(rname)
return None
@writeOperation
def createObjectResourceWithName(self, name, component, metadata=None):
"""
Create a new resource with component data and optional metadata. We create the
python object using the metadata then create the actual store object with setComponent.
"""
if name.startswith("."):
raise ObjectResourceNameNotAllowedError(name)
if len(name) > 255:
raise ObjectResourceNameNotAllowedError(name)
objectResourcePath = self._path.child(name)
if objectResourcePath.exists():
raise ObjectResourceNameAlreadyExistsError(name)
objectResource = self._objectResourceClass(name, self, metadata)
objectResource.setComponent(component, inserting=True)
self._cachedObjectResources[name] = objectResource
# Note: setComponent triggers a notification, so we don't need to
# call notify( ) here like we do for object removal.
return objectResource
def removedObjectResource(self, child):
self.retrieveOldIndex().deleteResource(child.name())
self._removedObjectResources.add(child.name())
self.notifyChanged()
def syncToken(self):
try:
urnuuid = str(self.properties()[PropertyName.fromElement(ResourceID)].children[0])
except KeyError:
urnuuid = uuid.uuid4().urn
self.properties()[PropertyName(*ResourceID.qname())] = ResourceID(HRef.fromString(urnuuid))
return succeed("%s_%s" % (urnuuid[9:], self.retrieveOldIndex().lastRevision()))
def objectResourcesSinceToken(self, token):
raise NotImplementedError()
def resourceNamesSinceToken(self, token):
return succeed(self.retrieveOldIndex().whatchanged(token))
def objectResourcesHaveProperties(self):
"""
So filestore objects do need to support properties.
"""
return True
# FIXME: property writes should be a write operation
@cached
def properties(self):
# FIXME: needs direct tests - only covered by store tests
# FIXME: transactions
propStoreClass = self._home._dataStore._propertyStoreClass
props = propStoreClass(self._home.uid(), lambda: self._path)
self.initPropertyStore(props)
self._transaction.addOperation(props.flush,
"flush object resource properties")
return props
def initPropertyStore(self, props):
"""
A hook for subclasses to override in order to set up their property
store after it's been created.
@param props: the L{PropertyStore} from C{properties()}.
"""
pass
def addNotifier(self, factory_name, notifier):
if self._notifiers is None:
self._notifiers = {}
self._notifiers[factory_name] = notifier
def getNotifier(self, factory_name):
return self._notifiers.get(factory_name)
def notifierID(self):
return (self.ownerHome()._notifierPrefix, "%s/%s" % (self.ownerHome().uid(), self.name(),),)
def parentNotifierID(self):
return self.ownerHome().notifierID()
@inlineCallbacks
def notifyChanged(self):
"""
Trigger a notification of a change
"""
# Only send one set of change notifications per transaction
if self._notifiers and not self._transaction.isNotifiedAlready(self):
# cache notifiers run in post commit
notifier = self._notifiers.get("cache", None)
if notifier:
self._transaction.postCommit(notifier.notify)
# push notifiers add their work items immediately
notifier = self._notifiers.get("push", None)
if notifier:
yield notifier.notify(self._transaction)
self._transaction.notificationAddedForObject(self)
@inlineCallbacks
def sharingInvites(self):
"""
Stub for interface-compliance tests.
"""
yield None
returnValue([])
class CommonObjectResource(FileMetaDataMixin, FancyEqMixin):
"""
@ivar _path: The path of the file on disk
@type _path: L{FilePath}
"""
log = Logger()
compareAttributes = (
"_name",
"_parentCollection",
)
def __init__(self, name, parent, metadata=None):
self._name = name
self._parentCollection = parent
self._transaction = parent._transaction
self._objectText = None
@property
def _path(self):
return self._parentCollection._path.child(self._name)
def __repr__(self):
return "<%s: %s>" % (self.__class__.__name__, self._path.path)
@property
def _txn(self):
return self._transaction
def transaction(self):
return self._transaction
def directoryService(self):
return self._transaction.store().directoryService()
@writeOperation
def setComponent(self, component, inserting=False):
raise NotImplementedError
def component(self):
raise NotImplementedError
def remove(self):
# FIXME: test for undo
objectResourcePath = self._path
def do():
objectResourcePath.remove()
return lambda: None
self._transaction.addOperation(do, "remove object resource object %r" % (self._name,))
self._parentCollection.removedObjectResource(self)
purge = remove
def _text(self):
raise NotImplementedError
def uid(self):
raise NotImplementedError
@cached
def properties(self):
home = self._parentCollection._home
uid = home.uid()
if self._parentCollection.objectResourcesHaveProperties():
propStoreClass = home._dataStore._propertyStoreClass
props = propStoreClass(uid, lambda : self._path)
else:
props = NonePropertyStore(uid)
self.initPropertyStore(props)
self._transaction.addOperation(props.flush, "object properties flush")
return props
def initPropertyStore(self, props):
"""
A hook for subclasses to override in order to set up their property
store after it's been created.
@param props: the L{PropertyStore} from C{properties()}.
"""
pass
class CommonStubResource(object):
"""
Just enough resource to keep the collection sql DB classes going.
"""
def __init__(self, resource):
self.resource = resource
self.fp = self.resource._path
def bumpSyncToken(self, reset=False):
# FIXME: needs direct tests
return self.resource._updateSyncToken(reset)
def initSyncToken(self):
# FIXME: needs direct tests
self.bumpSyncToken(True)
class NotificationCollection(CommonHomeChild):
"""
File-based implementation of L{INotificationCollection}.
"""
implements(INotificationCollection)
def __init__(self, name, parent, realName=None):
"""
Initialize an notification collection pointing at a path on disk.
@param name: the subdirectory of parent where this notification collection
resides.
@type name: C{str}
@param parent: the home containing this notification collection.
@type parent: L{CommonHome}
"""
super(NotificationCollection, self).__init__(name, parent, realName)
self._index = NotificationIndex(self)
self._invites = None
self._objectResourceClass = NotificationObject
@classmethod
def notificationsFromHome(cls, txn, home):
notificationCollectionName = "notification"
if not home._path.child(notificationCollectionName).isdir():
notifications = cls._create(txn, home, notificationCollectionName)
else:
notifications = cls(notificationCollectionName, home)
return notifications
@classmethod
def _create(cls, txn, home, collectionName):
# FIXME: this is a near-copy of CommonHome.createChildWithName.
temporary = hidden(home._path.child(collectionName).temporarySibling())
temporary.createDirectory()
temporaryName = temporary.basename()
c = cls(temporary.basename(), home)
def do():
childPath = home._path.child(collectionName)
temporary = childPath.sibling(temporaryName)
try:
props = c.properties()
temporary.moveTo(childPath)
c._name = collectionName
# FIXME: _lots_ of duplication of work here.
props.flush()
except (IOError, OSError), e:
if e.errno == EEXIST and childPath.isdir():
raise HomeChildNameAlreadyExistsError(collectionName)
raise
# FIXME: direct tests, undo for index creation
# Return undo
return lambda: home._path.child(collectionName).remove()
txn.addOperation(do, "create notification child %r" % (collectionName,))
return c
notificationObjects = CommonHomeChild.objectResources
listNotificationObjects = CommonHomeChild.listObjectResources
notificationObjectWithName = CommonHomeChild.objectResourceWithName
def notificationObjectWithUID(self, uid):
name = uid + ".xml"
return self.notificationObjectWithName(name)
def writeNotificationObject(self, uid, notificationtype, notificationdata):
name = uid + ".xml"
if name.startswith("."):
raise ObjectResourceNameNotAllowedError(name)
objectResource = NotificationObject(name, self)
objectResource.setData(uid, notificationtype, notificationdata)
self._cachedObjectResources[name] = objectResource
# Update database
self.retrieveOldIndex().addOrUpdateRecord(NotificationRecord(uid, name, notificationtype))
self.notifyChanged()
@writeOperation
def removeNotificationObjectWithName(self, name):
if name.startswith("."):
raise NoSuchObjectResourceError(name)
self.retrieveOldIndex().removeRecordForName(name)
objectResourcePath = self._path.child(name)
if objectResourcePath.isfile():
self._removedObjectResources.add(name)
# FIXME: test for undo
def do():
objectResourcePath.remove()
return lambda: None
self._transaction.addOperation(do, "remove object resource object %r" %
(name,))
self.notifyChanged()
else:
raise NoSuchObjectResourceError(name)
@writeOperation
def removeNotificationObjectWithUID(self, uid):
name = uid + ".xml"
self.removeNotificationObjectWithName(name)
class NotificationObject(CommonObjectResource):
"""
"""
implements(INotificationObject)
def __init__(self, name, notifications):
super(NotificationObject, self).__init__(name, notifications)
self._uid = name[:-4]
def notificationCollection(self):
return self._parentCollection
def created(self):
if not self._path.exists():
from twisted.internet import reactor
return int(reactor.seconds())
return super(NotificationObject, self).created()
def modified(self):
if not self._path.exists():
from twisted.internet import reactor
return int(reactor.seconds())
return super(NotificationObject, self).modified()
@writeOperation
def setData(self, uid, notificationtype, notificationdata, inserting=False):
rname = uid + ".xml"
self._parentCollection.retrieveOldIndex().addOrUpdateRecord(
NotificationRecord(uid, rname, notificationtype)
)
self._notificationdata = notificationdata
notificationtext = json.dumps(self._notificationdata)
md5 = hashlib.md5(notificationtext).hexdigest()
def do():
backup = None
if self._path.exists():
backup = hidden(self._path.temporarySibling())
self._path.moveTo(backup)
fh = self._path.open("w")
try:
# FIXME: concurrency problem; if this write is interrupted
# halfway through, the underlying file will be corrupt.
fh.write(notificationtext)
finally:
fh.close()
def undo():
if backup:
backup.moveTo(self._path)
else:
self._path.remove()
return undo
self._transaction.addOperation(do, "set notification data %r" % (self.name(),))
# Mark all properties as dirty, so they will be re-added to the
# temporary file when the main file is deleted. NOTE: if there were a
# temporary file and a rename() as there should be, this should really
# happen after the write but before the rename.
self.properties().update(self.properties())
props = self.properties()
props[PropertyName(*GETContentType.qname())] = GETContentType.fromString(generateContentType(MimeType("text", "xml", params={"charset": "utf-8"})))
props[PropertyName.fromElement(NotificationType)] = NotificationType(json.dumps(notificationtype))
props[PropertyName.fromElement(TwistedGETContentMD5)] = TwistedGETContentMD5.fromString(md5)
# FIXME: the property store's flush() method may already have been
# added to the transaction, but we need to add it again to make sure it
# happens _after_ the new file has been written. we may end up doing
# the work multiple times, and external callers to property-
# manipulation methods won't work.
self._transaction.addOperation(self.properties().flush, "post-update property flush")
_notificationdata = None
def notificationData(self):
if self._notificationdata is not None:
return self._notificationdata
try:
fh = self._path.open()
except IOError, e:
if e[0] == ENOENT:
raise NoSuchObjectResourceError(self)
else:
raise
try:
text = fh.read()
finally:
fh.close()
return json.loads(text)
def uid(self):
return self._uid
def notificationType(self):
# NB This is the NotificationType property element
return self.properties()[PropertyName.fromElement(NotificationType)]
def initPropertyStore(self, props):
# Setup peruser special properties
props.setSpecialProperties(
(
),
(
PropertyName.fromElement(customxml.NotificationType),
),
(),
)
class NotificationIndex(object):
#
# OK, here's where we get ugly.
# The index code needs to be rewritten also, but in the meantime...
#
def __init__(self, notificationCollection):
self.notificationCollection = notificationCollection
stubResource = CommonStubResource(notificationCollection)
self._oldIndex = OldNotificationIndex(stubResource)
| StarcoderdataPython |
1633182 | <filename>2020/python/day2.py
import sys
import common
def get_filename():
filename = sys.argv[0]
filename = filename.split("/")[-1]
filename = filename.split(".")[0]
return filename
data = common.get_file_contents("data/{}_input.txt".format(get_filename()))
def check_password(line):
# break our line up into parts ['1-3', 'a:', '<PASSWORD>']
parts = line.split(" ")
# fix the check letter so we dont have the colon
check_letter = parts[1].replace(":", "")
# break up our 1-3 into separate numbers
amount_min, amount_max = parts[0].split("-")
# do our count on how many times our check letter shows up
letter_amount = parts[2].count(check_letter)
# our letter amount should sit between the min and max to be valid
if int(amount_min) <= letter_amount <= int(amount_max):
return True
return False
def check_password_part2(line):
# break our line up into parts ['1-3', 'a:', '<PASSWORD>']
parts = line.split(" ")
# fix the check letter so we dont have the colon
check_letter = parts[1].replace(":", "")
# break up our 1-3 into separate numbers
index1, index2 = parts[0].split("-")
# fix the numbers so they are ints, and move to 0 based
index1 = int(index1) - 1
index2 = int(index2) - 1
# remember how many we match
matches = 0
# test first index
if parts[2][index1] == check_letter:
matches += 1
# test second index
if parts[2][index2] == check_letter:
matches += 1
# password matches only when we have one match
if matches == 1:
return True
return False
def part1():
result = 0
for item in data:
if check_password(item):
result += 1
return result
def part2():
result = 0
for item in data:
if check_password_part2(item):
result += 1
return result
def main():
part1_answer = part1()
part2_answer = part2()
print(f"Part 1: {part1_answer}")
print(f"Part 2: {part2_answer}")
if __name__ == '__main__':
main()
| StarcoderdataPython |
153716 | <gh_stars>1-10
# Copyright 2018 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
from libs.structured_object import StructuredObject
class IntRange(StructuredObject):
"""Represents a generic integer range to include an upper and lower bound."""
lower = int
upper = int
| StarcoderdataPython |
1743723 | <filename>demo/tests/unit/mock.py
from uuid import uuid4
class MockContext(object):
def __init__(self, function_name):
self.function_name = function_name
self.function_version = "v$LATEST"
self.memory_limit_in_mb = 512
self.invoked_function_arn = f"arn:aws:lambda:us-east-1:ACCOUNT:function:{self.function_name}"
self.aws_request_id = str(uuid4)
| StarcoderdataPython |
3215935 | # encoding: utf-8
import json
import logging
import ckan.plugins as p
try:
# CKAN 2.7 and later
from ckan.common import config
except ImportError:
# CKAN 2.6 and earlier
from pylons import config
log = logging.getLogger(__name__)
ignore_empty = p.toolkit.get_validator('ignore_empty')
DEFAULT_AGS_FORMATS = ['ags', 'esri rest']
def ags_view_default_basemap_url():
return config.get('ckanext.ags_view_default_basemap_url', '')
def ags_view_proxy():
return config.get('ckanext.ags_view_proxy', '')
def with_proxy(url):
text = url
proxies = json.loads(ags_view_proxy())
for p in proxies:
text = text.replace(p, proxies[p])
return text
class AGSFSView(p.SingletonPlugin):
'''This plugin makes views of arcgis FeatureServer services'''
p.implements(p.IConfigurer, inherit=True)
p.implements(p.IResourceView, inherit=True)
p.implements(p.ITemplateHelpers, inherit=True)
# IConfigurer
def update_config(self, config):
p.toolkit.add_public_directory(config, 'public')
p.toolkit.add_template_directory(config, 'templates')
p.toolkit.add_resource('public', 'ckanext-agsview')
# IResourceView
def can_view(self, data_dict):
return (data_dict['resource'].get('format', '').lower()
in DEFAULT_AGS_FORMATS)
def view_template(self, context, data_dict):
return 'ags_fs_view.html'
def form_template(self, context, data_dict):
return 'ags_fs_form.html'
def info(self):
return {'name': 'ags_fs_view',
'title': p.toolkit._('ArcGIS FeatureServer Service'),
'icon': 'compass',
'schema': {
'ags_url': [ignore_empty, unicode],
'basemap_url': [ignore_empty, unicode]
},
'iframed': False,
'default_title': p.toolkit._('ArcGIS FeatureServer Service'),
}
# ITemplateHelpers
def get_helpers(self):
h = {'ags_view_default_basemap_url': ags_view_default_basemap_url,
'ags_view_proxy': ags_view_proxy,
'with_proxy': with_proxy}
return h
class AGSMSView(p.SingletonPlugin):
'''This plugin makes views of arcgis MapServer services'''
p.implements(p.IConfigurer, inherit=True)
p.implements(p.IResourceView, inherit=True)
def update_config(self, config):
p.toolkit.add_public_directory(config, 'public')
p.toolkit.add_template_directory(config, 'templates')
p.toolkit.add_resource('public', 'ckanext-agsview')
def info(self):
return {'name': 'ags_ms_view',
'title': p.toolkit._('ArcGIS MapServer Service'),
'icon': 'compass',
'schema': {
'ags_url': [ignore_empty, unicode],
'basemap_url': [ignore_empty, unicode],
'layer_ids': [ignore_empty, unicode]
},
'iframed': False,
'default_title': p.toolkit._('ArcGIS MapServer Service'),
}
def can_view(self, data_dict):
return (data_dict['resource'].get('format', '').lower()
in DEFAULT_AGS_FORMATS)
def view_template(self, context, data_dict):
return 'ags_ms_view.html'
def form_template(self, context, data_dict):
return 'ags_ms_form.html'
| StarcoderdataPython |
3332784 | <reponame>AlphaMycelium/pathfinder.vim
from heapdict import heapdict
from pathfinder.server.motions.find import FindMotionGenerator
from pathfinder.server.motions.search import SearchMotionGenerator
from pathfinder.server.motions.simple import SimpleMotionGenerator
from pathfinder.server.node import Node
class Dijkstra:
"""
A path between a start and end point in the same window.
:param from_view: View of the start point
:param target_view: View of the target point
:param min_line: Do not explore nodes above this line
:param max_line: Do not explore nodes below this line
"""
def __init__(self, from_view, target_view, min_line, max_line):
self.from_view = from_view
self.target_view = target_view
self.min_line = min_line
self.max_line = max_line
self.motion_generators = {
SimpleMotionGenerator(self),
FindMotionGenerator(self),
SearchMotionGenerator(self),
}
self._open_queue = heapdict() # Min-priority queue: Key -> Distance
self._open_nodes = dict() # Key -> Node
self._closed_nodes = set() # Key
start_node = Node(self, self.from_view, None)
self._open_queue[start_node.key] = 0
self._open_nodes[start_node.key] = start_node
def find_path(self, client_connection):
"""
Use Dijkstra's algorithm to find the optimal sequence of motions.
:param client_connection: If another pathfinding request is waiting on this
connection, exit (returning None) as soon as possible. This cancels the
pathfinding, moving on to the new request immediately.
"""
while len(self._open_queue) > 0 and not client_connection.poll():
current_node_key, current_distance = self._open_queue.popitem()
current_node = self._open_nodes.pop(current_node_key)
self._closed_nodes.add(current_node_key)
if current_node.is_target():
return current_node.reconstruct_path()
for node in current_node.get_neighbours():
if node.key in self._closed_nodes:
continue
new_distance = current_distance + current_node.motion_weight(
node.came_by_motion
)
if (
node.key not in self._open_nodes
or new_distance < self._open_queue[node.key]
):
node.set_came_from(current_node)
self._open_nodes[node.key] = node
self._open_queue[node.key] = new_distance
| StarcoderdataPython |
4837782 | import pytest
import numpy as np
import torch
import model
import test
@pytest.fixture(params=[4096, 4096*10])
def nb_timesteps(request):
return int(request.param)
@pytest.fixture(params=[1, 2, 3])
def nb_channels(request):
return request.param
@pytest.fixture(params=[1, 2, 16])
def nb_samples(request):
return request.param
@pytest.fixture(params=[1024, 2048, 4096])
def nfft(request):
return int(request.param)
@pytest.fixture(params=[2, 4, 8])
def hop(request, nfft):
return(nfft // request.param)
@pytest.fixture
def audio(request, nb_samples, nb_channels, nb_timesteps):
return torch.rand((nb_samples, nb_channels, nb_timesteps))
def test_stft(audio, nb_channels, nfft, hop):
unmix = model.OpenUnmix(nb_channels=nb_channels)
unmix.stft.center = True
X = unmix.stft(audio)
X = X.detach().numpy()
X_complex_np = X[..., 0] + X[..., 1]*1j
out = test.istft(X_complex_np)
assert np.sqrt(np.mean((audio.detach().numpy() - out)**2)) < 1e-6
| StarcoderdataPython |
1646153 | <filename>Game/__init__.py
import Game.main
import Game.bird
import Game.utils
__all__ = (
main.__all__,
bird.__all__,
utils.__all__,
) | StarcoderdataPython |
24626 | from django.views.generic import ListView, CreateView, UpdateView
from django.utils.decorators import method_decorator
from django.contrib.admin.views.decorators import staff_member_required
from django.shortcuts import get_object_or_404, redirect, reverse
from django.urls import reverse_lazy
from django.contrib import messages
from django.template.loader import render_to_string
from django.http import JsonResponse
from django.db.models import Sum
from django_tables2 import RequestConfig
from .models import Order, OrderItem, CURRENCY
from .forms import OrderCreateForm, OrderEditForm
from product.models import Product, Category
from .tables import ProductTable, OrderItemTable, OrderTable
import datetime
@method_decorator(staff_member_required, name='dispatch')
class HomepageView(ListView):
template_name = 'index.html'
model = Order
queryset = Order.objects.all()[:10]
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
orders = Order.objects.all()
total_sales = orders.aggregate(Sum('final_value'))['final_value__sum'] if orders.exists() else 0
paid_value = orders.filter(is_paid=True).aggregate(Sum('final_value'))['final_value__sum']\
if orders.filter(is_paid=True).exists() else 0
remaining = total_sales - paid_value
diviner = total_sales if total_sales > 0 else 1
paid_percent, remain_percent = round((paid_value/diviner)*100, 1), round((remaining/diviner)*100, 1)
total_sales = f'{total_sales} {CURRENCY}'
paid_value = f'{paid_value} {CURRENCY}'
remaining = f'{remaining} {CURRENCY}'
orders = OrderTable(orders)
RequestConfig(self.request).configure(orders)
context.update(locals())
return context
@staff_member_required
def auto_create_order_view(request):
new_order = Order.objects.create(
title='Order 66',
date=datetime.datetime.now()
)
new_order.title = f'Order - {new_order.id}'
new_order.save()
return redirect(new_order.get_edit_url())
@method_decorator(staff_member_required, name='dispatch')
class OrderListView(ListView):
template_name = 'list.html'
model = Order
paginate_by = 50
def get_queryset(self):
qs = Order.objects.all()
if self.request.GET:
qs = Order.filter_data(self.request, qs)
return qs
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
orders = OrderTable(self.object_list)
RequestConfig(self.request).configure(orders)
context.update(locals())
return context
@method_decorator(staff_member_required, name='dispatch')
class CreateOrderView(CreateView):
template_name = 'form.html'
form_class = OrderCreateForm
model = Order
def get_success_url(self):
self.new_object.refresh_from_db()
return reverse('update_order', kwargs={'pk': self.new_object.id})
def form_valid(self, form):
object = form.save()
object.refresh_from_db()
self.new_object = object
return super().form_valid(form)
@method_decorator(staff_member_required, name='dispatch')
class OrderUpdateView(UpdateView):
model = Order
template_name = 'order_update.html'
form_class = OrderEditForm
def get_success_url(self):
return reverse('update_order', kwargs={'pk': self.object.id})
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
instance = self.object
qs_p = Product.objects.filter(active=True)[:12]
products = ProductTable(qs_p)
order_items = OrderItemTable(instance.order_items.all())
RequestConfig(self.request).configure(products)
RequestConfig(self.request).configure(order_items)
context.update(locals())
return context
@staff_member_required
def delete_order(request, pk):
instance = get_object_or_404(Order, id=pk)
instance.delete()
messages.warning(request, 'The order is deleted!')
return redirect(reverse('homepage'))
@staff_member_required
def done_order_view(request, pk):
instance = get_object_or_404(Order, id=pk)
instance.is_paid = True
instance.save()
return redirect(reverse('homepage'))
@staff_member_required
def ajax_add_product(request, pk, dk):
instance = get_object_or_404(Order, id=pk)
product = get_object_or_404(Product, id=dk)
order_item, created = OrderItem.objects.get_or_create(order=instance, product=product)
if created:
order_item.qty = 1
order_item.price = product.value
order_item.discount_price = product.discount_value
else:
order_item.qty += 1
order_item.save()
product.qty -= 1
product.save()
instance.refresh_from_db()
order_items = OrderItemTable(instance.order_items.all())
RequestConfig(request).configure(order_items)
data = dict()
data['result'] = render_to_string(template_name='include/order_container.html',
request=request,
context={'instance': instance,
'order_items': order_items
}
)
return JsonResponse(data)
@staff_member_required
def ajax_modify_order_item(request, pk, action):
order_item = get_object_or_404(OrderItem, id=pk)
product = order_item.product
instance = order_item.order
if action == 'remove':
order_item.qty -= 1
product.qty += 1
if order_item.qty < 1: order_item.qty = 1
if action == 'add':
order_item.qty += 1
product.qty -= 1
product.save()
order_item.save()
if action == 'delete':
order_item.delete()
data = dict()
instance.refresh_from_db()
order_items = OrderItemTable(instance.order_items.all())
RequestConfig(request).configure(order_items)
data['result'] = render_to_string(template_name='include/order_container.html',
request=request,
context={
'instance': instance,
'order_items': order_items
}
)
return JsonResponse(data)
@staff_member_required
def ajax_search_products(request, pk):
instance = get_object_or_404(Order, id=pk)
q = request.GET.get('q', None)
products = Product.broswer.active().filter(title__startswith=q) if q else Product.broswer.active()
products = products[:12]
products = ProductTable(products)
RequestConfig(request).configure(products)
data = dict()
data['products'] = render_to_string(template_name='include/product_container.html',
request=request,
context={
'products': products,
'instance': instance
})
return JsonResponse(data)
@staff_member_required
def order_action_view(request, pk, action):
instance = get_object_or_404(Order, id=pk)
if action == 'is_paid':
instance.is_paid = True
instance.save()
if action == 'delete':
instance.delete()
return redirect(reverse('homepage'))
@staff_member_required
def ajax_calculate_results_view(request):
orders = Order.filter_data(request, Order.objects.all())
total_value, total_paid_value, remaining_value, data = 0, 0, 0, dict()
if orders.exists():
total_value = orders.aggregate(Sum('final_value'))['final_value__sum']
total_paid_value = orders.filter(is_paid=True).aggregate(Sum('final_value'))['final_value__sum'] if\
orders.filter(is_paid=True) else 0
remaining_value = total_value - total_paid_value
total_value, total_paid_value, remaining_value = f'{total_value} {CURRENCY}',\
f'{total_paid_value} {CURRENCY}', f'{remaining_value} {CURRENCY}'
data['result'] = render_to_string(template_name='include/result_container.html',
request=request,
context=locals())
return JsonResponse(data)
@staff_member_required
def ajax_calculate_category_view(request):
orders = Order.filter_data(request, Order.objects.all())
order_items = OrderItem.objects.filter(order__in=orders)
category_analysis = order_items.values_list('product__category__title').annotate(qty=Sum('qty'),
total_incomes=Sum('total_price')
)
data = dict()
category, currency = True, CURRENCY
data['result'] = render_to_string(template_name='include/result_container.html',
request=request,
context=locals()
)
return JsonResponse(data)
| StarcoderdataPython |
1777524 | <gh_stars>1-10
import os
import pybind11
import subprocess
import sys
from setuptools import setup, Extension
from setuptools.command.build_ext import build_ext
with open("parametric_plasma_source/__init__.py", "r") as f:
for line in f.readlines():
if "__version__" in line:
version = line.split()[-1].strip('"')
class CMakeExtention(Extension):
def __init__(self, name, sourcedir=""):
Extension.__init__(self, name, sources=[])
self.sourcedir = os.path.abspath(sourcedir)
class CMakeBuild(build_ext):
def run(self):
try:
subprocess.check_output(["cmake", "--version"])
except FileNotFoundError:
raise RuntimeError(
"CMake must be installed to build the "
"following extentions: "
", ".join(e.name for e in self.extensions)
)
for ext in self.extensions:
self.build_extension(ext)
def build_extension(self, ext):
extdir = os.path.abspath(os.path.dirname(self.get_ext_fullpath(ext.name)))
if not extdir.endswith(os.path.sep):
extdir += os.path.sep
cmake_args = [
"-DCMAKE_LIBRARY_OUTPUT_DIRECTORY=" + extdir,
"-DCMAKE_ARCHIVE_OUTPUT_DIRECTORY=" + extdir,
"-DCMAKE_RUNTIME_OUTPUT_DIRECTORY=" + extdir,
"-DPYTHON_EXECUTABLE=" + sys.executable,
"-DPYBIND11_PATH=" + pybind11.commands.DIR,
]
cfg = "Debug" if self.debug else "Release"
build_args = ["--config", cfg]
cmake_args += ["-DCMAKE_BUILD_TYPE=" + cfg]
build_args += ["--", "-j2"]
env = os.environ.copy()
env["CXXFLAGS"] = '{} -DVERSION_INFO=\\"{}\\"'.format(
env.get("CXXFLAGS", ""), self.distribution.get_version()
)
if not os.path.exists(self.build_temp):
os.makedirs(self.build_temp)
subprocess.check_call(
["cmake", extdir] + cmake_args, cwd=self.build_temp, env=env
)
subprocess.check_call(
[
"cmake",
"--build",
".",
]
+ build_args,
cwd=self.build_temp,
)
with open("README.md", "r") as fh:
long_description = fh.read()
setup(
name="parametric_plasma_source",
version=version,
author="<NAME>",
author_email="<EMAIL>",
description="Parametric plasma source for fusion simulations in OpenMC",
long_description=long_description,
long_description_content_type="text/markdown",
url="https://github.com/makeclean/parametric-plasma-source/",
packages=["parametric_plasma_source"],
ext_modules=[CMakeExtention("parametric_plasma_source/plasma_source")],
package_data={
"parametric_plasma_source": [
"src/plasma_source.cpp",
"src/plasma_source.hpp",
"src/plasma_source_pybind.cpp",
"src/source_sampling.cpp",
"src/source_generator.cpp",
"CMakeLists.txt",
]
},
cmdclass=dict(build_ext=CMakeBuild),
classifiers=[
"Programming Language :: Python :: 3",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
],
)
| StarcoderdataPython |
3390772 | import os
from flask_restful import Resource, reqparse
script_dir = os.path.dirname(__file__)
parser = reqparse.RequestParser()
class ReviewsController(Resource):
def post(self):
parser.add_argument('text', type=str, required=True, help='Error when parsing review text')
parser.add_argument('restaurant_name', type=str, required=True,
help='Error when parsing the name of the restaurant')
args = parser.parse_args()
review_text = args['text']
restaurant_name = args['restaurant_name']
self.__add_new_review__(review_text=review_text, restaurant_name=restaurant_name)
return {'status': 'Review saved successfully'}, 200
@staticmethod
def __add_new_review__(review_text, restaurant_name):
with open(script_dir + '/reviews.txt', 'a', encoding='utf-8') as f:
f.write(restaurant_name + " | " + review_text + '\n')
f.close()
| StarcoderdataPython |
1646126 | <reponame>robtucker/pyspark-tooling
# import pytest
from pyspark_tooling import plan
from tests import base
# @pytest.mark.focus
class TestPlanUtils(base.BaseTest):
def test_parse_plan(self):
with open("./tests/sample_plan.txt") as f:
txt = f.read()
res = plan.parse_plan(txt)
assert (len(res)) == 4
keys = [i[0] for i in res.items()]
assert keys[0] == "physical"
assert keys[1] == "optimized"
assert keys[2] == "analyzed"
assert keys[3] == "logical"
for _, v in res.items():
assert len(v) > 0
| StarcoderdataPython |
1665496 | <filename>create_data.py
import collections
import tensorflow as tf
from prepro_utils import preprocess_text, partial, encode_ids, encode_pieces
import sentencepiece as spm
SEG_ID_A = 0
SEG_ID_B = 1
SEG_ID_CLS = 2
SEG_ID_SEP = 3
SEG_ID_PAD = 4
special_symbols = {
"<unk>": 0,
"<s>": 1,
"</s>": 2,
"<cls>": 3,
"<sep>": 4,
"<pad>": 5,
"<mask>": 6,
"<eod>": 7,
"<eop>": 8,
}
UNK_ID = special_symbols["<unk>"]
CLS_ID = special_symbols["<cls>"]
SEP_ID = special_symbols["<sep>"]
MASK_ID = special_symbols["<mask>"]
EOD_ID = special_symbols["<eod>"]
class InputFeatures(object):
"""A single set of features of data."""
def __init__(self,
input_ids,
input_mask,
segment_ids,
label_id,
label_x_id,
label_gather,
label_mask_x,
label_mask_gather,
label_index,
is_real_example=True):
self.input_ids = input_ids
self.input_mask = input_mask
self.segment_ids = segment_ids
self.label_id = label_id
self.label_x_id = label_x_id
self.label_gather = label_gather
self.label_mask_x = label_mask_x
self.label_mask_gather = label_mask_gather
self.label_index = label_index
self.is_real_example = is_real_example
def process_seq(words, labels, sp, x, lower=True):
assert len(words) == len(labels)
prepro_func = partial(preprocess_text, lower=lower)
tokens = []
label = []
label_x = []
is_start_token = []
for i in range(len(words)):
t = encode_ids(sp, prepro_func(words[i]))
tokens.extend(t)
label.extend([int(labels[i])] * len(t))
label_x.append(int(labels[i]))
is_start_token.append(1)
for _ in range(len(t) - 1):
label_x.append(x)
is_start_token.append(0)
return tokens, label, label_x, is_start_token
def get_data(input_file, max_seq_length, sp, encoder, lower=True):
with open(input_file, 'r') as f:
lines = f.readlines()
seqs = []
for line in lines:
line = line.strip()
if line:
line = line.split("\t")
if seqs:
seqs[-1].append((line[0], encoder[line[1]] if line[1] in encoder.keys() else 0))
else:
seqs.append([(line[0], encoder[line[1]] if line[1] in encoder.keys() else 0)])
else:
seqs.append([])
tokens = []
label = []
label_x = []
is_start_token = []
for s in seqs:
if not s:
continue
ws, ls = zip(*s)
t, l, lx, ist = process_seq(ws, ls, sp, encoder["X"], lower)
if len(t) > max_seq_length:
yield tokens, label, label_x, is_start_token
tokens = []
label = []
label_x = []
is_start_token = []
t = [t[i:i + max_seq_length] for i in range(0, len(t), max_seq_length)]
l = [l[i:i + max_seq_length] for i in range(0, len(l), max_seq_length)]
lx = [lx[i:i + max_seq_length] for i in range(0, len(lx), max_seq_length)]
ist = [ist[i:i + max_seq_length] for i in range(0, len(ist), max_seq_length)]
z = zip(t, l, lx, ist)
for i in z:
yield i
continue
if len(t) + len(tokens) > max_seq_length:
yield tokens, label, label_x, is_start_token
tokens = t
label = l
label_x = lx
is_start_token = ist
else:
tokens.extend(t)
label.extend(l)
label_x.extend(lx)
is_start_token.extend(ist)
if tokens:
yield tokens, label, label_x, is_start_token
def single_example(tokens, labels, labels_x, is_start_token, max_length):
tokens_length = len(tokens)
input_ids = []
input_mask = []
segment_ids = []
label_id = []
label_x_id = []
label_gather = []
label_mask_x = []
label_mask_gather = []
label_index = []
i = 0
for s in is_start_token:
if s:
label_index.append(i)
label_gather.append(labels[i])
label_mask_gather.append(1)
i += 1
for _ in range(max_length - len(label_gather)):
label_gather.append(0)
label_index.extend([i, i + 1, i + 2])
label_mask_gather.extend([1, 1, 1])
for _ in range(max_length - len(label_mask_gather)):
label_mask_gather.append(0)
label_index.append(0)
input_ids.extend(tokens)
input_mask.extend([0] * tokens_length)
segment_ids.extend([SEG_ID_A] * tokens_length)
label_id.extend(labels)
label_x_id.extend(labels_x)
label_mask_x.extend(is_start_token)
input_ids.extend([SEP_ID, SEP_ID, CLS_ID])
input_mask.extend([0, 0, 0])
segment_ids.extend([SEG_ID_A, SEG_ID_B, SEG_ID_CLS])
label_id.extend([0, 0, 0])
label_x_id.extend([0, 0, 0])
label_mask_x.extend([1, 1, 1])
for _ in range(max_length - tokens_length - 3):
input_ids.append(0)
input_mask.append(1)
segment_ids.append(SEG_ID_PAD)
label_id.append(0)
label_x_id.append(0)
label_mask_x.append(0)
assert len(input_ids) == max_length
assert len(input_mask) == max_length
assert len(segment_ids) == max_length
assert len(label_id) == max_length
assert len(label_x_id) == max_length
assert len(label_mask_gather) == max_length
return InputFeatures(input_ids, input_mask, segment_ids, label_id, label_x_id, label_gather, label_mask_x,
label_mask_gather, label_index)
def file_based_convert_examples_to_features(examples, output_file):
tf.logging.info("Start writing tfrecord %s.", output_file)
writer = tf.python_io.TFRecordWriter(output_file)
tf.logging.info("totle %d examples", len(examples))
for ex_index, example in enumerate(examples):
if ex_index % 100 == 0:
tf.logging.info("Writing example %d of %d" % (ex_index, len(examples)))
def create_int_feature(values):
f = tf.train.Feature(int64_list=tf.train.Int64List(value=list(values)))
return f
def create_float_feature(values):
f = tf.train.Feature(float_list=tf.train.FloatList(value=list(values)))
return f
features = collections.OrderedDict()
features["input_ids"] = create_int_feature(example.input_ids)
features["input_mask"] = create_float_feature(example.input_mask)
features["segment_ids"] = create_int_feature(example.segment_ids)
features["label_ids"] = create_int_feature(example.label_id)
features["label_x_id"] = create_int_feature(example.label_x_id)
features["label_gather"] = create_int_feature(example.label_gather)
features["label_mask_x"] = create_float_feature(example.label_mask_x)
features["label_mask_gather"] = create_float_feature(example.label_mask_gather)
features["label_index"] = create_int_feature(example.label_index)
tf_example = tf.train.Example(features=tf.train.Features(feature=features))
writer.write(tf_example.SerializeToString())
tf.logging.info("write finish!")
writer.close()
def convert_tsv_to_tfrecord(input_file, output_file, max_seq_length, sp_model, encoder, lower):
tf.logging.set_verbosity(tf.logging.INFO)
sp = spm.SentencePieceProcessor()
sp.load(sp_model)
examples = []
for data in get_data(input_file, max_seq_length - 3, sp, encoder, lower):
examples.append(single_example(*data, max_length=max_seq_length))
if "train" in input_file:
tokens = 0
words = 0
first_word = 0
prepro_func = partial(preprocess_text, lower=lower)
with open(input_file, 'r') as f:
lines = f.readlines()
for line in lines:
line = line.strip()
if line:
line = line.split("\t")
if line[-1] != "O":
words += 1
pieces = encode_pieces(sp, prepro_func(line[0]))
tokens += len(pieces)
first_word += len(pieces[0])
print("{} {} {} {}".format(input_file, tokens, words, first_word))
file_based_convert_examples_to_features(examples, output_file)
return len(examples)
if __name__ == '__main__':
m = {"O": 0, "B-Chemical": 1, "I-Chemical": 2, "E-Chemical": 3, "S-Chemical": 4, "B-Disease": 5,
"I-Disease": 6, "E-Disease": 7, "S-Disease": 8, "X": 9}
convert_tsv_to_tfrecord("data/BC5CDR-IOBES/train.tsv", "cache/train.tfrecord", 512,
"E:/tfhub-module/xlnet_cased_L-12_H-768_A-12/spiece.model", m, True)
# sp = spm.SentencePieceProcessor()
# sp.load("E:/tfhub-module/xlnet_cased_L-12_H-768_A-12/spiece.model")
# prepro_func = partial(preprocess_text, lower=True)
# s = [17, 23, 6159, 3141, 814, 17, 13, 17, 12674, 701, 9323, 11581, 23157, 25, 2133, 153, 672, 17, 26, 17, 23, 1487, 17]
# print(sp.decode_ids(s))
# for i in s:
# print(sp.decode_ids([i]))
| StarcoderdataPython |
1630183 | <gh_stars>0
import pytest
from text_emb.text_embedding import Fasttext, BOW, Tfidf
from util import ProtestDataset_txtfts_2, Lighting
import os
import torchvision.transforms as transforms
from easyocr.joint_model import Sentence_model
from torch.utils.data import Dataset, DataLoader
import torch
from train.collate_fn import CommonCollateFn
def test_sentence_model():
data_dir = "/home/Data/image_data/Presidential_clean_traintest"
id_lab_trans_train = os.path.join(data_dir, "id_lab_trans_train.csv")
id_lab_trans_eval = os.path.join(data_dir, "id_lab_trans_test.csv")
id_lab_trans = os.path.join(data_dir, "id_lab_trans.csv")
id_path_train = os.path.join(data_dir, "id_path_train.csv")
id_path_eval = os.path.join(data_dir, "id_path_test.csv")
# dataloader
normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225])
eigval = torch.Tensor([0.2175, 0.0188, 0.0045])
eigvec = torch.Tensor([[-0.5675, 0.7192, 0.4009],
[-0.5808, -0.0045, -0.8140],
[-0.5836, -0.6948, 0.4203]])
train_dataset = ProtestDataset_txtfts_2(
id_label_trans_train_f=id_lab_trans_train,
id_label_trans_f=id_lab_trans,
id_path_f=id_path_train,
embedding="fasttext",
transform=transforms.Compose([
transforms.RandomResizedCrop(224),
transforms.RandomRotation(30),
transforms.RandomHorizontalFlip(),
transforms.ColorJitter(
brightness = 0.4,
contrast = 0.4,
saturation = 0.4,
),
transforms.ToTensor(),
Lighting(0.1, eigval, eigvec),
normalize,
]),
)
train_loader = DataLoader(
train_dataset,
num_workers=4,
batch_size=8,
shuffle=True,
collate_fn=CommonCollateFn
)
# model
model = Sentence_model()
model = model.cuda()
model.train()
for data in train_loader:
text_enc = data["text_fts"].cuda()
out = model(text_enc)
| StarcoderdataPython |
3270637 |
"""
PROBLEM STATEMENT
design a data structure known as
a Least Recently Used (LRU) cache.
An LRU cache is a type of cache in which we remove
the least recently used entry when the cache memory
reaches its limit. For the current problem,
consider both get and set operations as an use
operation.
Your job is to use an appropriate data structure(s) to implement the cache.
* In case of a cache hit, your get() operation should return the appropriate value.
* In case of a cache miss, your get() should return -1.
* While putting an element in the cache,
your put() / set() operation must insert the element.
If the cache is full, you must write code that removes
the least recently used entry first and then insert
the element.
All operations must take O(1) time
"""
# we also see maps here, we can use the maps or dicts to store the caches
# we can use queues to keep track of lest recently viewed cache item
from collections import deque
class LRU_Cache(object):
def __init__(self, capacity):
# Initialize class variables
self.cache = dict({})
self.cache_history = deque()
self.capacity = capacity
def get(self, key):
# Retrieve item from provided key. Return -1 if nonexistent.
#check if key is in cache
if self.capacity == 0:
return None
# check it key is in cache and retrieve it,
# then update the cache history and pop the oldest element
if key in self.cache.keys():
# retrieve and delete fro chache
value = self.cache[key]
del self.cache[key]
#update history
self.cache_history.appendleft(key)
if len(self.cache_history) > self.capacity:
self.cache_history.pop()
return value
else:
return -1
def set(self, key, value):
# Set the value if the key is not present in the cache. If the cache is at capacity remove the oldest item.
# if check if chack is full
# and remove the least recently used entry
self._put_cache(key,value)
def _put_cache(self,key,value):
# check zero capacity
if self.capacity == 0:
return None
# a look up to check if key is in cache
# and just update the chache history
if key in self.cache.keys()\
and self.size() < self.capacity:
# empty the oldest key from the cache history
self.cache_history.pop()
elif key not in self.cache.keys()\
and self.size() < self.capacity:
# put value into chache
self.cache[key] = value
# update history
self.cache_history.appendleft(key)
if len(self.cache_history) > self.capacity:
self.cache_history.pop()
elif key not in self.cache.keys()\
and self.size() == self.capacity:
# get the oldest or tail from the history
oldest_key = self.cache_history.pop()
# delete it from the chache
self.cache.pop(oldest_key)
# now there is space to put new value
self.cache[key] = value
# update chache history
self.cache_history.appendleft(key)
if len(self.cache_history) > self.capacity:
self.cache_history.pop()
def size(self):
return len(self.cache)
our_cache = LRU_Cache(5)
our_cache.set(1, 1);
our_cache.set(2, 2);
our_cache.set(3, 3);
our_cache.set(4, 4);
print(our_cache.get(1)) # returns 1
print(our_cache.get(2)) # returns 2
print(our_cache.get(9) ) # returns -1 because 9 is not present in the cache
our_cache.set(5, 5)
our_cache.set(6, 6)
print(our_cache.get(2)) # returns -1 because the cache reached it's capacity and 2 was the least recently used entry
print(our_cache.get(3)) # returns -1
print(our_cache.get(1))
print(our_cache.get(5)) # returns 5
print(our_cache.get(6)) # returns 6
our_cache.set(7, 7);
our_cache.set(8, 8);
our_cache.set(9, 9);
print(our_cache.get(2)) # returns -1 because the cache reached it's capacity
print(our_cache.get(3)) # returns -1
print(our_cache.get(1)) # returns -1
print(our_cache.get(5)) # returns -1
print(our_cache.get(6)) # returns -1
print(our_cache.get(4)) # returns 4
print(our_cache.get(8)) # returns 8
print(our_cache.get(9)) # returns 9
print(our_cache.get(6)) # returns -1
# test case where capacity is null
our_cache = LRU_Cache(0)
our_cache.set(1, 1);
our_cache.set(2, 2);
our_cache.set(3, 3);
print(our_cache.get(1)) # returns None since capacity is 0
print(our_cache.get(2)) # returns None since capacity is 0
# test case where capacity is negative
our_cache = LRU_Cache(-5)
our_cache.set(1, 1);
our_cache.set(2, 2);
our_cache.set(3, 3);
print(our_cache.get(1)) # returns -1 since capacity is -1
print(our_cache.get(2)) # returns -1 since capacity is -1 | StarcoderdataPython |
1663915 | <gh_stars>1-10
#INPUT PARAMETERS *****************************************************
import matplotlib.pyplot as plt
import numpy as np
import math
import scipy
from SSFM import SSFM_FD
from numpy.fft import fftshift, ifftshift
c = 299792.458 #%speed of ligth nm/ps
#% Input Field Paramenters
tfwhm = 28.4e-3 #% ps
ni = 1/tfwhm # % ps^-1
lamda_central = 835
fo=c/lamda_central # central pulse frequency (Thz)
# Fiber Parameters
gamma = 110 # W^-1 * km^-1
alpha = 0 # atenuation coef. (km^-1)
L = 0.00001 # fiber length (km)
betaw =np.array ([0,0, -11.830, 8.1038e-2, -9.5205e-5, 2.0737e-7, -5.3943e-10, 1.3486e-12, -2.5495e-15, 3.0524e-18, -1.714e-21]) # beta coefficients (ps^n/ nm)
# Numerical Parameters
nt = 2**15 # number of spectral points`
time = 32 # ps
dt = time/nt # ps
t=np.arange(-(time / 2), (time / 2 ), dt) # ps
dz = 1e-8 #initial longitudinal step (km)
v=np.append(np.arange(0, nt/2 ), np.arange(-nt/2, 0))/(dt*nt) # frequencies frequencies (THz)
# INPUT FIELD ***********************************************************
PeakPower = 10000 # W, change here the Input Power!
#1 / np.sinc(np.pi * self.T_ps/(T0_ps*np.pi)
#initial field shape in W^0.5
u0= np.sqrt(PeakPower) / np.cosh(t / tfwhm)
# PR0PAGATE finding numerical solution **********************************
#************************************************************************
print('Interaction Picture Method started')
tol = 1e-1 # photon error number, or local error, depending on the used method.# #
u = SSFM_FD(u0,dt,L,dz,alpha,betaw,gamma,fo,tol) | StarcoderdataPython |
1685109 | <reponame>OuyangChao/Paddle<filename>python/paddle/fluid/tests/unittests/test_cross_entropy_loss.py<gh_stars>1-10
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import paddle
import paddle.fluid as fluid
import numpy as np
import unittest
def stable_softmax(x):
shiftx = (x - np.max(x)).clip(-64.)
exps = np.exp(shiftx)
return exps / np.sum(exps)
def log_softmax(x, axis=-1):
softmax_out = np.apply_along_axis(stable_softmax, axis, x)
return np.log(softmax_out)
def cross_entropy_loss_1d(input,
label,
weight=None,
reduction='mean',
ignore_index=-100):
log_softmax_out = log_softmax(input)
input_shape = log_softmax_out.shape
N = input_shape[0]
C = input_shape[1]
out = np.zeros_like(label).astype(np.float64)
total_weight = 0
for i in range(N):
cur_target = label[i]
if cur_target == ignore_index:
out[i] = 0
continue
cur_weight = weight[cur_target] if weight is not None else 1
total_weight += cur_weight
out[i] = -log_softmax_out[i][cur_target] * cur_weight
if reduction == 'sum':
return np.sum(out), np.array([total_weight]).astype('float64')
elif reduction == 'mean':
return out.sum() / total_weight, np.array(
[total_weight]).astype('float64')
elif reduction == 'none':
return out
def cross_entropy_loss_2d(input,
label,
weight=None,
reduction='mean',
ignore_index=-100):
log_softmax_out = log_softmax(input)
input_shape = log_softmax_out.shape
N = input_shape[0]
H = input_shape[1]
W = input_shape[2]
out = np.zeros_like(label).astype(np.float64)
total_weight = 0
for i in range(N):
for h in range(H):
for w in range(W):
cur_target = label[i][h][w]
if cur_target == ignore_index:
out[i][h][w] = 0
continue
cur_weight = weight[cur_target] if weight is not None else 1
total_weight += cur_weight
out[i][h][w] = -log_softmax_out[i][h][w][
cur_target] * cur_weight
if reduction == 'sum':
return np.sum(out), np.array([total_weight]).astype('float64')
elif reduction == 'mean':
return out.sum() / total_weight, np.array(
[total_weight]).astype('float64')
elif reduction == 'none':
return out
class CrossEntropyLoss(unittest.TestCase):
def test_cross_entropy_loss_1d_with_mean_ignore(self):
input_np = np.random.random([2, 4]).astype(np.float64)
label_np = np.random.randint(0, 4, size=(2)).astype(np.int64)
paddle.enable_static()
prog = fluid.Program()
startup_prog = fluid.Program()
place = fluid.CUDAPlace(0) if fluid.core.is_compiled_with_cuda(
) else fluid.CPUPlace()
with fluid.program_guard(prog, startup_prog):
input = fluid.data(name='input', shape=[2, 4], dtype='float64')
label = fluid.data(name='label', shape=[2], dtype='int64')
cross_entropy_loss = paddle.nn.loss.CrossEntropyLoss(ignore_index=0)
ret = cross_entropy_loss(input, label)
exe = fluid.Executor(place)
static_ret = exe.run(prog,
feed={
'input': input_np,
'label': label_np,
},
fetch_list=[ret])
self.assertIsNotNone(static_ret)
expected = cross_entropy_loss_1d(input_np, label_np)[0]
with fluid.dygraph.guard():
cross_entropy_loss = paddle.nn.loss.CrossEntropyLoss(
axis=1, ignore_index=0)
dy_ret = cross_entropy_loss(
fluid.dygraph.to_variable(input_np),
fluid.dygraph.to_variable(label_np))
dy_ret_value = dy_ret.numpy()
self.assertIsNotNone(dy_ret_value)
expected = cross_entropy_loss_1d(input_np, label_np, ignore_index=0)[0]
self.assertTrue(np.allclose(static_ret, dy_ret_value))
self.assertTrue(np.allclose(static_ret, expected))
self.assertTrue(np.allclose(dy_ret_value, expected))
def test_cross_entropy_loss_1d_with_weight_mean_ignore(self):
input_np = np.random.random([2, 4]).astype(np.float64)
label_np = np.random.randint(0, 4, size=(2)).astype(np.int64)
weight_np = np.random.random([4]).astype(np.float64) #shape:C
paddle.enable_static()
prog = fluid.Program()
startup_prog = fluid.Program()
place = fluid.CUDAPlace(0) if fluid.core.is_compiled_with_cuda(
) else fluid.CPUPlace()
with fluid.program_guard(prog, startup_prog):
input = fluid.data(name='input', shape=[2, 4], dtype='float64')
label = fluid.data(name='label', shape=[2], dtype='int64')
weight = fluid.data(
name='weight', shape=[4],
dtype='float64') #weight for each class
cross_entropy_loss = paddle.nn.loss.CrossEntropyLoss(
weight=weight, ignore_index=0)
ret = cross_entropy_loss(input, label)
exe = fluid.Executor(place)
static_ret = exe.run(prog,
feed={
'input': input_np,
'label': label_np,
"weight": weight_np
},
fetch_list=[ret])
self.assertIsNotNone(static_ret)
expected = cross_entropy_loss_1d(
input_np, label_np, weight=weight_np)[0]
with fluid.dygraph.guard():
cross_entropy_loss = paddle.nn.loss.CrossEntropyLoss(
weight=fluid.dygraph.to_variable(weight_np),
axis=1,
ignore_index=0)
dy_ret = cross_entropy_loss(
fluid.dygraph.to_variable(input_np),
fluid.dygraph.to_variable(label_np))
dy_ret_value = dy_ret.numpy()
self.assertIsNotNone(dy_ret_value)
expected = cross_entropy_loss_1d(
input_np, label_np, weight=weight_np, ignore_index=0)[0]
self.assertTrue(np.allclose(static_ret, dy_ret_value))
self.assertTrue(np.allclose(static_ret, expected))
self.assertTrue(np.allclose(dy_ret_value, expected))
def test_cross_entropy_loss_1d_with_weight_mean(self):
input_np = np.random.random([2, 4]).astype(np.float64)
label_np = np.random.randint(0, 4, size=(2)).astype(np.int64)
weight_np = np.random.random([4]).astype(np.float64) #shape:C
paddle.enable_static()
prog = fluid.Program()
startup_prog = fluid.Program()
place = fluid.CUDAPlace(0) if fluid.core.is_compiled_with_cuda(
) else fluid.CPUPlace()
with fluid.program_guard(prog, startup_prog):
input = fluid.data(name='input', shape=[2, 4], dtype='float64')
label = fluid.data(name='label', shape=[2], dtype='int64')
weight = fluid.data(
name='weight', shape=[4],
dtype='float64') #weight for each class
cross_entropy_loss = paddle.nn.loss.CrossEntropyLoss(weight=weight)
ret = cross_entropy_loss(input, label)
exe = fluid.Executor(place)
static_ret = exe.run(prog,
feed={
'input': input_np,
'label': label_np,
"weight": weight_np
},
fetch_list=[ret])
self.assertIsNotNone(static_ret)
expected = cross_entropy_loss_1d(
input_np, label_np, weight=weight_np)[0]
with fluid.dygraph.guard():
cross_entropy_loss = paddle.nn.loss.CrossEntropyLoss(
weight=fluid.dygraph.to_variable(weight_np), axis=1)
dy_ret = cross_entropy_loss(
fluid.dygraph.to_variable(input_np),
fluid.dygraph.to_variable(label_np))
dy_ret_value = dy_ret.numpy()
self.assertIsNotNone(dy_ret_value)
expected = cross_entropy_loss_1d(
input_np, label_np, weight=weight_np)[0]
self.assertTrue(np.allclose(static_ret, dy_ret_value))
self.assertTrue(np.allclose(static_ret, expected))
self.assertTrue(np.allclose(dy_ret_value, expected))
def test_cross_entropy_loss_1d_with_weight_sum(self):
input_np = np.random.random([100, 200]).astype(np.float64) #N,C
label_np = np.random.randint(0, 100, size=(100)).astype(np.int64) #N,1
weight_np = np.random.random([200]).astype(np.float64) #C
paddle.enable_static()
prog = fluid.Program()
startup_prog = fluid.Program()
place = fluid.CUDAPlace(0) if fluid.core.is_compiled_with_cuda(
) else fluid.CPUPlace()
with fluid.program_guard(prog, startup_prog):
input = fluid.data(name='input', shape=[100, 200], dtype='float64')
label = fluid.data(name='label', shape=[100], dtype='int64')
weight = fluid.data(name='weight', shape=[200], dtype='float64')
cross_entropy_loss = paddle.nn.loss.CrossEntropyLoss(
weight=weight, reduction='sum')
ret = cross_entropy_loss(input, label)
exe = fluid.Executor(place)
static_ret = exe.run(prog,
feed={
'input': input_np,
'label': label_np,
"weight": weight_np
},
fetch_list=[ret])
self.assertIsNotNone(static_ret)
with fluid.dygraph.guard():
cross_entropy_loss = paddle.nn.loss.CrossEntropyLoss(
weight=fluid.dygraph.to_variable(weight_np), reduction='sum')
dy_ret = cross_entropy_loss(
fluid.dygraph.to_variable(input_np),
fluid.dygraph.to_variable(label_np))
dy_ret_value = dy_ret.numpy()
self.assertIsNotNone(dy_ret_value)
expected = cross_entropy_loss_1d(
input_np, label_np, weight=weight_np, reduction='sum')[0]
self.assertTrue(np.allclose(static_ret, dy_ret_value))
self.assertTrue(np.allclose(static_ret, expected))
self.assertTrue(np.allclose(dy_ret_value, expected))
def test_cross_entropy_loss_1d_with_weight_none(self):
input_np = np.random.random([100, 200]).astype(np.float64) #N,C
label_np = np.random.randint(0, 100, size=(100)).astype(np.int64) #N,1
weight_np = np.random.random([200]).astype(np.float64) #C
paddle.enable_static()
prog = fluid.Program()
startup_prog = fluid.Program()
place = fluid.CUDAPlace(0) if fluid.core.is_compiled_with_cuda(
) else fluid.CPUPlace()
with fluid.program_guard(prog, startup_prog):
input = fluid.data(name='input', shape=[100, 200], dtype='float64')
label = fluid.data(name='label', shape=[100], dtype='int64')
weight = fluid.data(name='weight', shape=[200], dtype='float64')
cross_entropy_loss = paddle.nn.loss.CrossEntropyLoss(
weight=weight, reduction='none')
ret = cross_entropy_loss(input, label)
exe = fluid.Executor(place)
static_ret = exe.run(prog,
feed={
'input': input_np,
'label': label_np,
"weight": weight_np
},
fetch_list=[ret])
static_ret = np.squeeze(static_ret)
self.assertIsNotNone(static_ret)
with fluid.dygraph.guard():
cross_entropy_loss = paddle.nn.loss.CrossEntropyLoss(
weight=fluid.dygraph.to_variable(weight_np), reduction='none')
dy_ret = cross_entropy_loss(
fluid.dygraph.to_variable(input_np),
fluid.dygraph.to_variable(label_np))
dy_ret_value = dy_ret.numpy()
dy_ret_value = np.squeeze(dy_ret_value)
self.assertIsNotNone(dy_ret_value)
expected = cross_entropy_loss_1d(
input_np, label_np, weight=weight_np, reduction='none')
self.assertTrue(np.allclose(static_ret, dy_ret_value))
self.assertTrue(np.allclose(static_ret, expected))
self.assertTrue(np.allclose(dy_ret_value, expected))
def test_cross_entropy_loss_1d_with_weight_none_func(self):
input_np = np.random.random([100, 200]).astype(np.float64) #N,C
label_np = np.random.randint(0, 100, size=(100)).astype(np.int64) #N
weight_np = np.random.random([200]).astype(np.float64) #C
paddle.enable_static()
prog = fluid.Program()
startup_prog = fluid.Program()
place = fluid.CUDAPlace(0) if fluid.core.is_compiled_with_cuda(
) else fluid.CPUPlace()
with fluid.program_guard(prog, startup_prog):
input = fluid.data(name='input', shape=[100, 200], dtype='float64')
label = fluid.data(name='label', shape=[100], dtype='int64')
weight = fluid.data(name='weight', shape=[200], dtype='float64')
ret = paddle.nn.functional.cross_entropy(
input, label, weight=weight, reduction='none')
exe = fluid.Executor(place)
static_ret = exe.run(prog,
feed={
'input': input_np,
'label': label_np,
"weight": weight_np
},
fetch_list=[ret])
static_ret = np.squeeze(static_ret)
self.assertIsNotNone(static_ret)
with fluid.dygraph.guard():
dy_ret = paddle.nn.functional.cross_entropy(
fluid.dygraph.to_variable(input_np),
fluid.dygraph.to_variable(label_np),
weight=fluid.dygraph.to_variable(weight_np),
reduction='none')
dy_ret_value = dy_ret.numpy()
dy_ret_value = np.squeeze(dy_ret_value)
self.assertIsNotNone(dy_ret_value)
expected = cross_entropy_loss_1d(
input_np, label_np, weight=weight_np, reduction='none')
self.assertTrue(np.allclose(static_ret, dy_ret_value))
self.assertTrue(np.allclose(static_ret, expected))
self.assertTrue(np.allclose(dy_ret_value, expected))
def test_cross_entropy_loss_1d_mean(self):
input_np = np.random.random([100, 200]).astype(np.float64) #N,C
label_np = np.random.randint(0, 100, size=(100)).astype(np.int64) #N,1
weight_np = np.random.random([200]).astype(np.float64) #C
paddle.enable_static()
prog = fluid.Program()
startup_prog = fluid.Program()
place = fluid.CUDAPlace(0) if fluid.core.is_compiled_with_cuda(
) else fluid.CPUPlace()
with fluid.program_guard(prog, startup_prog):
input = fluid.data(name='input', shape=[100, 200], dtype='float64')
label = fluid.data(name='label', shape=[100], dtype='int64')
weight = fluid.data(name='weight', shape=[100], dtype='float64')
cross_entropy_loss = paddle.nn.loss.CrossEntropyLoss()
ret = cross_entropy_loss(input, label)
exe = fluid.Executor(place)
static_ret = exe.run(prog,
feed={'input': input_np,
'label': label_np},
fetch_list=[ret])
self.assertIsNotNone(static_ret)
with fluid.dygraph.guard():
cross_entropy_loss = paddle.nn.loss.CrossEntropyLoss()
dy_ret = cross_entropy_loss(
fluid.dygraph.to_variable(input_np),
fluid.dygraph.to_variable(label_np))
dy_ret_value = dy_ret.numpy()
self.assertIsNotNone(dy_ret_value)
expected = cross_entropy_loss_1d(input_np, label_np)[0]
self.assertTrue(np.allclose(static_ret, dy_ret_value))
self.assertTrue(np.allclose(static_ret, expected))
self.assertTrue(np.allclose(dy_ret_value, expected))
def test_cross_entropy_loss_1d_sum(self):
input_np = np.random.random([100, 200]).astype(np.float64) #N,C
label_np = np.random.randint(0, 100, size=(100)).astype(np.int64) #N,1
paddle.enable_static()
prog = fluid.Program()
startup_prog = fluid.Program()
place = fluid.CUDAPlace(0) if fluid.core.is_compiled_with_cuda(
) else fluid.CPUPlace()
with fluid.program_guard(prog, startup_prog):
input = fluid.data(name='input', shape=[100, 200], dtype='float64')
label = fluid.data(name='label', shape=[100], dtype='int64')
cross_entropy_loss = paddle.nn.loss.CrossEntropyLoss(
reduction='sum')
ret = cross_entropy_loss(input, label)
exe = fluid.Executor(place)
static_ret = exe.run(prog,
feed={'input': input_np,
'label': label_np},
fetch_list=[ret])
self.assertIsNotNone(static_ret)
with fluid.dygraph.guard():
cross_entropy_loss = paddle.nn.loss.CrossEntropyLoss(
reduction='sum')
dy_ret = cross_entropy_loss(
fluid.dygraph.to_variable(input_np),
fluid.dygraph.to_variable(label_np))
dy_ret_value = dy_ret.numpy()
self.assertIsNotNone(dy_ret_value)
expected = cross_entropy_loss_1d(input_np, label_np, reduction='sum')[0]
self.assertTrue(np.allclose(static_ret, dy_ret_value))
self.assertTrue(np.allclose(static_ret, expected))
self.assertTrue(np.allclose(dy_ret_value, expected))
def test_cross_entropy_loss_1d_none(self):
input_np = np.random.random([100, 200]).astype(np.float64) #N,C
label_np = np.random.randint(0, 100, size=(100)).astype(np.int64) #N,1
paddle.enable_static()
prog = fluid.Program()
startup_prog = fluid.Program()
place = fluid.CUDAPlace(0) if fluid.core.is_compiled_with_cuda(
) else fluid.CPUPlace()
with fluid.program_guard(prog, startup_prog):
input = fluid.data(name='input', shape=[100, 200], dtype='float64')
label = fluid.data(name='label', shape=[100], dtype='int64')
cross_entropy_loss = paddle.nn.loss.CrossEntropyLoss(
reduction='none')
ret = cross_entropy_loss(input, label)
exe = fluid.Executor(place)
static_ret = exe.run(prog,
feed={'input': input_np,
'label': label_np},
fetch_list=[ret])
static_ret = np.squeeze(static_ret)
self.assertIsNotNone(static_ret)
with fluid.dygraph.guard():
cross_entropy_loss = paddle.nn.loss.CrossEntropyLoss(
reduction='none')
dy_ret = cross_entropy_loss(
fluid.dygraph.to_variable(input_np),
fluid.dygraph.to_variable(label_np))
dy_ret_value = dy_ret.numpy()
dy_ret_value = np.squeeze(dy_ret_value)
self.assertIsNotNone(dy_ret_value)
expected = cross_entropy_loss_1d(input_np, label_np, reduction='none')
self.assertTrue(np.allclose(static_ret, dy_ret_value))
self.assertTrue(np.allclose(static_ret, expected))
self.assertTrue(np.allclose(dy_ret_value, expected))
def test_cross_entropy_loss_2d_with_weight_none(self):
input_np = np.random.random(size=(2, 2, 2, 3)).astype(np.float64) #NHWC
label_np = np.random.randint(
0, 3, size=(2, 2, 2)).astype(np.int64) #NHW1
weight_np = np.random.random(size=(3, )).astype(np.float64) #C
paddle.enable_static()
prog = fluid.Program()
startup_prog = fluid.Program()
place = fluid.CUDAPlace(0) if fluid.core.is_compiled_with_cuda(
) else fluid.CPUPlace()
with fluid.program_guard(prog, startup_prog):
input = fluid.data(
name='input', shape=[2, 2, 2, 3], dtype='float64')
label = fluid.data(name='label', shape=[2, 2, 2], dtype='int64')
weight = fluid.data(name='weight', shape=[3], dtype='float64')
cross_entropy_loss = paddle.nn.loss.CrossEntropyLoss(
weight=weight, reduction='none')
ret = cross_entropy_loss(input, label)
exe = fluid.Executor(place)
static_ret = exe.run(prog,
feed={
'input': input_np,
'label': label_np,
"weight": weight_np
},
fetch_list=[ret])
static_ret = np.squeeze(static_ret)
self.assertIsNotNone(static_ret)
with fluid.dygraph.guard():
cross_entropy_loss = paddle.nn.loss.CrossEntropyLoss(
weight=fluid.dygraph.to_variable(weight_np), reduction='none')
dy_ret = cross_entropy_loss(
fluid.dygraph.to_variable(input_np),
fluid.dygraph.to_variable(label_np))
dy_ret_value = dy_ret.numpy()
dy_ret_value = np.squeeze(dy_ret_value)
self.assertIsNotNone(dy_ret_value)
expected = cross_entropy_loss_2d(
input_np, label_np, weight=weight_np, reduction='none')
self.assertTrue(np.allclose(static_ret, dy_ret_value))
self.assertTrue(np.allclose(static_ret, expected))
self.assertTrue(np.allclose(dy_ret_value, expected))
def test_cross_entropy_loss_2d_with_weight_mean(self):
input_np = np.random.random(size=(2, 2, 2, 3)).astype(np.float64) #NHWC
label_np = np.random.randint(
0, 3, size=(2, 2, 2)).astype(np.int64) #NHW
weight_np = np.random.random(size=(3, )).astype(np.float64) #C
paddle.enable_static()
prog = fluid.Program()
startup_prog = fluid.Program()
place = fluid.CUDAPlace(0) if fluid.core.is_compiled_with_cuda(
) else fluid.CPUPlace()
with fluid.program_guard(prog, startup_prog):
input = fluid.data(
name='input', shape=[2, 2, 2, 3], dtype='float64')
label = fluid.data(name='label', shape=[2, 2, 2], dtype='int64')
weight = fluid.data(name='weight', shape=[3], dtype='float64')
cross_entropy_loss = paddle.nn.loss.CrossEntropyLoss(
weight=weight, reduction='mean')
ret = cross_entropy_loss(input, label)
exe = fluid.Executor(place)
static_ret = exe.run(prog,
feed={
'input': input_np,
'label': label_np,
"weight": weight_np
},
fetch_list=[ret])
self.assertIsNotNone(static_ret)
with fluid.dygraph.guard():
cross_entropy_loss = paddle.nn.loss.CrossEntropyLoss(
weight=fluid.dygraph.to_variable(weight_np), reduction='mean')
dy_ret = cross_entropy_loss(
fluid.dygraph.to_variable(input_np),
fluid.dygraph.to_variable(label_np))
dy_ret_value = dy_ret.numpy()
self.assertIsNotNone(dy_ret_value)
expected = cross_entropy_loss_2d(
input_np, label_np, weight=weight_np, reduction='mean')[0]
self.assertTrue(np.allclose(static_ret, dy_ret_value))
self.assertTrue(np.allclose(static_ret, expected))
self.assertTrue(np.allclose(dy_ret_value, expected))
def test_cross_entropy_loss_2d_with_weight_sum(self):
input_np = np.random.random(size=(2, 2, 2, 3)).astype(np.float64) #NHWC
label_np = np.random.randint(
0, 3, size=(2, 2, 2)).astype(np.int64) #NHW
weight_np = np.random.random(size=(3, )).astype(np.float64) #C
paddle.enable_static()
prog = fluid.Program()
startup_prog = fluid.Program()
place = fluid.CUDAPlace(0) if fluid.core.is_compiled_with_cuda(
) else fluid.CPUPlace()
with fluid.program_guard(prog, startup_prog):
input = fluid.data(
name='input', shape=[2, 2, 2, 3], dtype='float64')
label = fluid.data(name='label', shape=[2, 2, 2], dtype='int64')
weight = fluid.data(name='weight', shape=[3], dtype='float64')
cross_entropy_loss = paddle.nn.loss.CrossEntropyLoss(
weight=weight, reduction='sum')
ret = cross_entropy_loss(input, label)
exe = fluid.Executor(place)
static_ret = exe.run(prog,
feed={
'input': input_np,
'label': label_np,
"weight": weight_np
},
fetch_list=[ret])
self.assertIsNotNone(static_ret)
with fluid.dygraph.guard():
cross_entropy_loss = paddle.nn.loss.CrossEntropyLoss(
weight=fluid.dygraph.to_variable(weight_np), reduction='sum')
dy_ret = cross_entropy_loss(
fluid.dygraph.to_variable(input_np),
fluid.dygraph.to_variable(label_np))
dy_ret_value = dy_ret.numpy()
self.assertIsNotNone(dy_ret_value)
expected = cross_entropy_loss_2d(
input_np, label_np, weight=weight_np, reduction='sum')[0]
self.assertTrue(np.allclose(static_ret, dy_ret_value))
self.assertTrue(np.allclose(static_ret, expected))
self.assertTrue(np.allclose(dy_ret_value, expected))
def test_cross_entropy_loss_2d_none(self):
input_np = np.random.random(size=(2, 2, 2, 3)).astype(np.float64) #NHWC
label_np = np.random.randint(
0, 3, size=(2, 2, 2)).astype(np.int64) #NHW
paddle.enable_static()
prog = fluid.Program()
startup_prog = fluid.Program()
place = fluid.CUDAPlace(0) if fluid.core.is_compiled_with_cuda(
) else fluid.CPUPlace()
with fluid.program_guard(prog, startup_prog):
input = fluid.data(
name='input', shape=[2, 2, 2, 3], dtype='float64')
label = fluid.data(name='label', shape=[2, 2, 2], dtype='int64')
cross_entropy_loss = paddle.nn.loss.CrossEntropyLoss(
reduction='none')
ret = cross_entropy_loss(input, label)
exe = fluid.Executor(place)
static_ret = exe.run(prog,
feed={
'input': input_np,
'label': label_np,
},
fetch_list=[ret])
static_ret = np.squeeze(static_ret)
self.assertIsNotNone(static_ret)
with fluid.dygraph.guard():
cross_entropy_loss = paddle.nn.loss.CrossEntropyLoss(
reduction='none')
dy_ret = cross_entropy_loss(
fluid.dygraph.to_variable(input_np),
fluid.dygraph.to_variable(label_np))
dy_ret_value = dy_ret.numpy()
dy_ret_value = np.squeeze(dy_ret_value)
self.assertIsNotNone(dy_ret_value)
expected = cross_entropy_loss_2d(input_np, label_np, reduction='none')
self.assertTrue(np.allclose(static_ret, dy_ret_value))
self.assertTrue(np.allclose(static_ret, expected))
self.assertTrue(np.allclose(dy_ret_value, expected))
def test_cross_entropy_loss_2d_mean(self):
input_np = np.random.random(size=(2, 2, 2, 3)).astype(np.float64) #NHWC
label_np = np.random.randint(
0, 3, size=(2, 2, 2)).astype(np.int64) #NHW
paddle.enable_static()
prog = fluid.Program()
startup_prog = fluid.Program()
place = fluid.CUDAPlace(0) if fluid.core.is_compiled_with_cuda(
) else fluid.CPUPlace()
with fluid.program_guard(prog, startup_prog):
input = fluid.data(
name='input', shape=[2, 2, 2, 3], dtype='float64')
label = fluid.data(name='label', shape=[2, 2, 2], dtype='int64')
cross_entropy_loss = paddle.nn.loss.CrossEntropyLoss(
reduction='mean')
ret = cross_entropy_loss(input, label)
exe = fluid.Executor(place)
static_ret = exe.run(prog,
feed={
'input': input_np,
'label': label_np,
},
fetch_list=[ret])
self.assertIsNotNone(static_ret)
with fluid.dygraph.guard():
cross_entropy_loss = paddle.nn.loss.CrossEntropyLoss(
reduction='mean')
dy_ret = cross_entropy_loss(
fluid.dygraph.to_variable(input_np),
fluid.dygraph.to_variable(label_np))
dy_ret_value = dy_ret.numpy()
self.assertIsNotNone(dy_ret_value)
expected = cross_entropy_loss_2d(
input_np, label_np, reduction='mean')[0]
self.assertTrue(np.allclose(static_ret, dy_ret_value))
self.assertTrue(np.allclose(static_ret, expected))
self.assertTrue(np.allclose(dy_ret_value, expected))
def test_cross_entropy_loss_2d_sum(self):
input_np = np.random.random(size=(2, 2, 2, 3)).astype(np.float64) #NHWC
label_np = np.random.randint(
0, 3, size=(2, 2, 2)).astype(np.int64) #NHW
paddle.enable_static()
prog = fluid.Program()
startup_prog = fluid.Program()
place = fluid.CUDAPlace(0) if fluid.core.is_compiled_with_cuda(
) else fluid.CPUPlace()
with fluid.program_guard(prog, startup_prog):
input = fluid.data(
name='input', shape=[2, 2, 2, 3], dtype='float64')
label = fluid.data(name='label', shape=[2, 2, 2], dtype='int64')
cross_entropy_loss = paddle.nn.loss.CrossEntropyLoss(
reduction='sum')
ret = cross_entropy_loss(input, label)
exe = fluid.Executor(place)
static_ret = exe.run(prog,
feed={
'input': input_np,
'label': label_np,
},
fetch_list=[ret])
self.assertIsNotNone(static_ret)
with fluid.dygraph.guard():
cross_entropy_loss = paddle.nn.loss.CrossEntropyLoss(
reduction='sum')
dy_ret = cross_entropy_loss(
fluid.dygraph.to_variable(input_np),
fluid.dygraph.to_variable(label_np))
dy_ret_value = dy_ret.numpy()
self.assertIsNotNone(dy_ret_value)
expected = cross_entropy_loss_2d(input_np, label_np, reduction='sum')[0]
self.assertTrue(np.allclose(static_ret, dy_ret_value))
self.assertTrue(np.allclose(static_ret, expected))
self.assertTrue(np.allclose(dy_ret_value, expected))
if __name__ == "__main__":
unittest.main()
| StarcoderdataPython |
115524 | import copy
import torch
from corgie import constants, exceptions
from corgie.log import logger as corgie_logger
from corgie.boundingcube import BoundingCube
from corgie.layers.base import register_layer_type, BaseLayerType
from corgie import helpers
class VolumetricLayer(BaseLayerType):
def __init__(self, data_mip_ranges=None, **kwargs):
super().__init__(**kwargs)
self.declared_write_mips = []
self.declared_write_bcube = BoundingCube(0, 0, 0, 0, 0, 0, 0)
def read(self, bcube, mip, **kwargs):
indexed_bcube = self.indexing_scheme(bcube, mip, kwargs)
return super().read(bcube=indexed_bcube, mip=mip, **kwargs)
def write(self, data_tens, bcube, mip, **kwargs):
indexed_bcube = self.indexing_scheme(bcube, mip, kwargs)
super().write(data_tens=data_tens, bcube=indexed_bcube, mip=mip, **kwargs)
def indexing_scheme(self, bcube, mip, kwargs):
return bcube
def break_bcube_into_chunks(self, bcube, chunk_xy, chunk_z,
mip, flatten=True, chunk_xy_step=None, chunk_z_step=None,
**kwargs):
"""Default breaking up of a bcube into smaller bcubes (chunks).
Returns a list of chunks
Args:
bcube: BoundingBox for region to be broken into chunks
chunk_size: tuple for dimensions of chunk that bbox will be broken into
mip: int for MIP level at which chunk_xy is dspecified
"""
indexed_bcube = self.indexing_scheme(bcube, mip, kwargs)
x_range = indexed_bcube.x_range(mip=mip)
y_range = indexed_bcube.y_range(mip=mip)
z_range = indexed_bcube.z_range()
if chunk_xy_step is None:
chunk_xy_step = chunk_xy
if chunk_z_step is None:
chunk_z_step = chunk_z
xy_chunks = []
flat_chunks = []
for zs in range(z_range[0], z_range[1], chunk_z_step):
xy_chunks.append([])
for xs in range(x_range[0], x_range[1], chunk_xy_step):
xy_chunks[-1].append([])
for ys in range(y_range[0], y_range[1], chunk_xy_step):
chunk = BoundingCube(xs, xs + chunk_xy,
ys, ys + chunk_xy,
zs, zs + chunk_z,
mip=mip)
xy_chunks[-1][-1].append(chunk)
flat_chunks.append(chunk)
if flatten:
return flat_chunks
else:
return xy_chunks
@register_layer_type("img")
class ImgLayer(VolumetricLayer):
def __init__(self, *args, num_channels=1, **kwargs):
super().__init__(*args, **kwargs)
self.num_channels = num_channels
def get_downsampler(self):
def downsampler(data_tens):
return torch.nn.functional.interpolate(data_tens.float(),
mode='bilinear',
scale_factor=1/2,
align_corners=False,
recompute_scale_factor=False)
return downsampler
def get_upsampler(self):
def upsampler(data_tens):
return torch.nn.functional.interpolate(data_tens.float(),
mode='bilinear',
scale_factor=2.0,
align_corners=False,
recompute_scale_factor=False)
return upsampler
def get_num_channels(self, *args, **kwargs):
return self.num_channels
def get_default_data_type(self):
return 'uint8'
@register_layer_type("field")
class FieldLayer(VolumetricLayer):
def __init__(self, *args, num_channels=2, **kwargs):
if num_channels != 2:
raise exceptions.ArgumentError("Field layer 'num_channels'",
"Field layer must have 2 channels. 'num_channels' provided: {}".format(
num_channels
))
super().__init__(*args, **kwargs)
def get_downsampler(self):
def downsampler(data_tens):
downs_data = torch.nn.functional.interpolate(data_tens.float(),
mode='bilinear',
scale_factor=1/2,
align_corners=False,
recompute_scale_factor=False)
return downs_data * 2
return downsampler
def get_upsampler(self):
def upsampler(data_tens):
ups_data = torch.nn.functional.interpolate(data_tens.float(),
mode='bilinear',
scale_factor=2.0,
align_corners=False,
recompute_scale_factor=False)
return ups_data * 0.5
return upsampler
def get_num_channels(self, *args, **kwargs):
return 2
def get_default_data_type(self):
return 'float32'
@register_layer_type("mask")
class MaskLayer(VolumetricLayer):
def __init__(self, binarization=None,
num_channels=1, **kwargs):
self.binarizer = helpers.Binarizer(binarization)
if num_channels != 1:
raise exceptions.ArgumentError("Mask layer 'num_channels'",
"Mask layer must have 1 channels. 'num_channels' provided: {}".format(
num_channels
))
super().__init__(**kwargs)
def read(self, **kwargs):
data_tens = super().read(**kwargs)
data_bin = self.binarizer(data_tens)
return data_bin
def get_downsampler(self):
def downsampler(data_tens):
return torch.nn.functional.interpolate(data_tens.float(),
mode='nearest',
scale_factor=1/2,
recompute_scale_factor=False)
return downsampler
def get_upsampler(self):
def upsampler(data_tens):
return torch.nn.functional.interpolate(data_tens.float(),
mode='nearest',
scale_factor=2.0,
recompute_scale_factor=False)
return upsampler
def get_num_channels(self, *args, **kwargs):
return 1
def get_default_data_type(self):
return 'uint8'
@register_layer_type("section_value")
class SectionValueLayer(VolumetricLayer):
def __init__(self, *args, num_channels=1, **kwargs):
super().__init__(*args, **kwargs)
self.num_channels = num_channels
# TODO: insert custom indexing here.
def get_num_channels(self, *args, **kwargs):
return 1
def indexing_scheme(self, bcube, mip, kwargs):
new_bcube = copy.deepcopy(bcube)
if 'channel_start' in kwargs and 'channel_end' in kwargs:
channel_start = kwargs['channel_start']
channel_end = kwargs['channel_end']
del kwargs['channel_start'], kwargs['channel_end']
else:
channel_start = 0
channel_end = self.num_channels
new_bcube.reset_coords(channel_start, channel_end, 0, 1, mip=mip)
return new_bcube
def supports_voxel_offset(self):
return False
def supports_chunking(self):
return False
def get_default_data_type(self):
return 'float32'
| StarcoderdataPython |
3280584 | import sys
import signal
import crontab_jobs
default_app_config = 'dataloaderinterface.apps.DataloaderinterfaceConfig'
def on_dataloaderinterface_shutdown(*args):
from django.conf import settings
crontab_jobs.stop_jobs()
sys.exit(0)
signal.signal(signal.SIGINT, on_dataloaderinterface_shutdown)
signal.signal(signal.SIGTERM, on_dataloaderinterface_shutdown)
| StarcoderdataPython |
3217539 | <reponame>base4sistemas/pyescpos-cli
# -*- coding: utf-8 -*-
#
# escpostools/commands/cmd_test.py
#
# Copyright 2018 Base4 Sistemas Ltda ME
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import click
from escpostools.aliases import resolve_alias
from escpostools.cli import pass_context
LONG_RULER = '....:....|' * 8
SHORT_RULER = '....:....|' * 4
@click.command('test', short_help='Runs tests against implementations.')
@click.argument('aliases', type=click.STRING)
@click.option('--all', is_flag=True, help='Run all predefined test sets')
@click.option('--align', is_flag=True, help='Run predefined alignment test set')
@click.option('--modes', is_flag=True, help='Run predefined modes test set')
@click.option('--rulers', is_flag=True, help='Run predefined rulers test set')
@pass_context
def cli(ctx, aliases, all, align, modes, rulers):
"""Runs predefined tests against one or more implementations, sending sets
of commands to the printer(s) throught associated connection method(s).
For this command to work you must assign at least one alias with an
implementation and connection method. See help for "assign" command. For
example, if you want to run "modes" and "align" tests against an
implementation aliased as "tmt20" you type:
\b
$ escpos test tmt20 --align --modes
Or you can run all predefined tests against three aliased implementations:
\b
$ escpos test rm22,tmt20,dr700 --all
"""
impls = [resolve_alias(alias_id) for alias_id in aliases.split(',')]
if all:
align = True
modes = True
rulers = True
for impl in impls:
if align:
_run_align(impl)
if modes:
_run_modes(impl)
if rulers:
_run_rulers(impl)
def _run_align(impl):
impl.init()
impl.text('[Aligment Tests]')
impl.lf()
impl.justify_right()
impl.text('Right Aligned')
impl.justify_center()
impl.text('Centered Text')
impl.justify_left()
impl.text('Left Aligned')
impl.lf(2)
impl.text('This long text paragraph should be left aligned. The quick brown fox jumps over the lazy dog.')
impl.lf()
impl.justify_center()
impl.text('This long text paragraph should be centered. The quick brown fox jumps over the lazy dog.')
impl.lf()
impl.justify_right()
impl.text('This long text paragraph should be right aligned. The quick brown fox jumps over the lazy dog.')
impl.lf()
impl.justify_left()
impl.lf(2)
def _run_modes(impl):
impl.init()
impl.text('[Modes]')
impl.lf()
impl.text('Just normal text.')
impl.lf()
impl.text('Entering condensed...')
impl.set_condensed(True)
impl.text('The quick brown fox jumps over the lazy dog.')
impl.set_condensed(False)
impl.text('Condensed mode OFF')
impl.lf()
impl.text('Entering expanded...')
impl.set_expanded(True)
impl.text('The quick brown fox jumps over the lazy dog.')
impl.set_expanded(False)
impl.text('Expanded mode OFF')
impl.lf(2)
def _run_rulers(impl):
impl.init()
impl.text('[Rulers]')
impl.lf()
impl.text(LONG_RULER)
impl.lf(2)
impl.set_condensed(True)
impl.text(LONG_RULER)
impl.set_condensed(False)
impl.lf(2)
impl.set_expanded(True)
impl.text(SHORT_RULER)
impl.set_expanded(False)
impl.lf(2)
| StarcoderdataPython |
173585 | <gh_stars>1-10
# -*- coding: utf-8 -*-
"""
updater handlers movie poster module.
"""
from charma.updater.decorators import updater
from charma.updater.enumerations import UpdaterCategoryEnum
from charma.updater.handlers.base import UpdaterBase
from charma.updater.handlers.mixin import ImageFetcherMixin
class MoviePosterUpdaterBase(UpdaterBase, ImageFetcherMixin):
"""
movie poster updater base class.
"""
_category = UpdaterCategoryEnum.POSTER_NAME
IMAGE_WIDTH = 380
IMAGE_HEIGHT = 562
@updater()
class MoviePosterUpdater(MoviePosterUpdaterBase):
"""
movie poster updater class.
"""
def _fetch(self, content, **options):
"""
fetches data from given content.
:param bs4.BeautifulSoup content: the html content of imdb page.
:keyword bs4.BeautifulSoup credits: the html content of credits page.
this is only needed by person updaters.
:returns: imdb movie poster url.
:rtype: str
"""
image_url = None
url_container = content.find('div', class_='title-overview')
if url_container is not None:
poster_tag = url_container.find('div', class_='poster')
if poster_tag is not None:
image_tag = poster_tag.find('img', src=True)
if image_tag is not None:
image_url = self.get_resized_image_url(image_tag.get('src'),
self.IMAGE_WIDTH, self.IMAGE_HEIGHT)
return image_url
@updater()
class MoviePosterUpdaterV2(MoviePosterUpdaterBase):
"""
movie poster updater v2 class.
"""
def _fetch(self, content, **options):
"""
fetches data from given content.
:param bs4.BeautifulSoup content: the html content of imdb page.
:keyword bs4.BeautifulSoup credits: the html content of credits page.
this is only needed by person updaters.
:returns: imdb movie poster url.
:rtype: str
"""
image_url = None
url_container = content.find('div', {'data-testid': 'hero-media__poster'})
if url_container is not None:
image_tag = url_container.find('img', src=True)
if image_tag is not None:
image_url = self.get_resized_image_url(image_tag.get('src'),
self.IMAGE_WIDTH, self.IMAGE_HEIGHT)
return image_url
| StarcoderdataPython |
1661393 | <reponame>chanzuckerberg/aspen
import json
import requests
from botocore.client import ClientError
from aspen.database.models import CanSee, DataType
from aspen.test_infra.models.phylo_tree import phylorun_factory, phylotree_factory
from aspen.test_infra.models.sample import sample_factory
from aspen.test_infra.models.usergroup import group_factory, user_factory
def test_auspice_redirect_view(session, app, client, mock_s3_resource, test_data_dir):
viewer_group = group_factory()
can_see_group = group_factory("can_see")
can_see_user = user_factory(
can_see_group,
name="can_see_user",
auth0_user_id="can_see_user_auth0_id",
email="<EMAIL>",
)
wrong_can_see_group = group_factory("wrong_can_see")
wrong_can_see_user = user_factory(
wrong_can_see_group,
name="wrong_can_see_user",
auth0_user_id="wrong_can_see_user_auth0_id",
email="<EMAIL>",
)
no_can_see_group = group_factory("no_can_see")
no_can_see_user = user_factory(
no_can_see_group,
name="no_can_see_user",
auth0_user_id="no_can_see_user_auth0_id",
email="<EMAIL>",
)
can_see_group.can_be_seen_by.append(
CanSee(
viewer_group=viewer_group,
owner_group=can_see_group,
data_type=DataType.PRIVATE_IDENTIFIERS,
)
)
wrong_can_see_group.can_be_seen_by.append(
CanSee(
viewer_group=viewer_group,
owner_group=wrong_can_see_group,
data_type=DataType.SEQUENCES,
)
)
session.add_all(
[viewer_group, can_see_group, wrong_can_see_group, no_can_see_group]
)
user = user_factory(viewer_group)
local_sample = sample_factory(
viewer_group,
user,
private_identifier="private_identifier_1",
public_identifier="public_identifier_1",
)
can_see_sample = sample_factory(
can_see_group,
can_see_user,
private_identifier="private_identifier_2",
public_identifier="public_identifier_2",
)
wrong_can_see_sample = sample_factory(
wrong_can_see_group,
wrong_can_see_user,
private_identifier="private_identifer_3",
public_identifier="public_identifier_3",
)
no_can_see_sample = sample_factory(
no_can_see_group,
no_can_see_user,
private_identifier="private_identifer_4",
public_identifier="public_identifier_4",
)
phylo_tree = phylotree_factory(
phylorun_factory(viewer_group),
[local_sample, can_see_sample, wrong_can_see_sample, no_can_see_sample],
)
session.add(phylo_tree)
session.commit()
# We need to create the bucket since this is all in Moto's 'virtual' AWS account
try:
mock_s3_resource.meta.client.head_bucket(Bucket=phylo_tree.s3_bucket)
except ClientError:
# The bucket does not exist or you have no access.
mock_s3_resource.create_bucket(Bucket=phylo_tree.s3_bucket)
json_test_file = test_data_dir / "ncov_aspen.json"
with json_test_file.open() as fh:
test_json = json.dumps(json.load(fh))
mock_s3_resource.Bucket(phylo_tree.s3_bucket).Object(phylo_tree.s3_key).put(
Body=test_json
)
with client.session_transaction() as sess:
sess["profile"] = {"name": user.name, "user_id": user.auth0_user_id}
res = client.get(f"/api/auspice/view/{phylo_tree.id}")
res_presigned = res.json["url"]
# this is a little hacky, currently when calling a get request on localstack generated presigned url it gets 404
# i think this is due to calling get not being within the localstack test scope, so as a workaround i''m checking that
# the key and bucket names from the phylo tree entry are in the returned presigned url and that the response code from the view is 200
assert res.status == "200 OK"
assert app.aspen_config.EXTERNAL_AUSPICE_BUCKET in res_presigned
tree = requests.get(res_presigned).json()
assert tree == {
"tree": {
"name": "private_identifier_1",
"GISAID_ID": "public_identifier_1",
"children": [
{"name": "private_identifier_2", "GISAID_ID": "public_identifier_2"},
{"name": "public_identifier_3"},
{
"name": "public_identifier_4",
"children": [{"name": "public_identifier_5"}],
},
],
}
}
| StarcoderdataPython |
1710948 | <reponame>FelixSchwarz/pycerberus
# -*- coding: UTF-8 -*-
# This file is a part of pycerberus.
# The source code contained in this file is licensed under the MIT license.
# See LICENSE.txt in the main project directory, for more information.
# SPDX-License-Identifier: MIT
from __future__ import absolute_import, print_function, unicode_literals
from pythonic_testcase import *
from pycerberus.schemas import PositionalArgumentsParsingSchema
from pycerberus.test_util import ValidationTest
from pycerberus.validators import IntegerValidator, StringValidator
class TestPositionalArgumentsWithoutData(ValidationTest):
validator_class = PositionalArgumentsParsingSchema
def test_accept_input_without_parameters(self):
self.init_validator(self.schema())
self.assert_is_valid('', expected={})
self.assert_is_valid(None, expected={})
def test_bails_out_if_additional_parameters_are_passed(self):
e = self.assert_error('foo bar')
assert_equals(u'Unknown parameter "foo bar"', e.msg())
class TestPositionalArgumentsWithSingleParameter(ValidationTest):
class SingleParameterSchema(PositionalArgumentsParsingSchema):
foo = StringValidator()
parameter_order = ('foo', )
validator_class = SingleParameterSchema
def test_bails_out_if_no_parameter_is_passed(self):
self.assert_error('')
self.assert_error(None)
def test_bails_out_if_too_many_parameters_are_passed(self):
e = self.assert_error('foo, bar, baz')
assert_equals(u'Unknown parameter "bar, baz"', e.msg())
def test_accepts_one_parameter(self):
self.init_validator(self.schema())
self.assert_is_valid('fnord', expected={'foo': 'fnord'})
class TestPositionalArgumentsWithMultipleParameters(ValidationTest):
class MultipleParametersSchema(PositionalArgumentsParsingSchema):
foo = StringValidator()
bar = IntegerValidator()
parameter_order = ('foo', 'bar')
validator_class = MultipleParametersSchema
def test_bails_out_if_only_one_parameter_is_passed(self):
self.assert_error('fnord')
def test_accepts_two_parameter(self):
self.init_validator(self.schema())
self.assert_is_valid('fnord, 42', expected={'foo': 'fnord', 'bar': 42})
class TestProgrammaticSchemaConstructionForPositionalArguments(ValidationTest):
def setUp(self):
schema = PositionalArgumentsParsingSchema()
schema.set_internal_state_freeze(False)
schema.add('id', IntegerValidator())
schema.set_parameter_order(['id'])
schema.set_internal_state_freeze(True)
# the helper methods will use this private attribute
self._validator = schema
def test_can_instantiate_schema_programmatically(self):
self.assert_is_valid('42', expected={'id': 42})
self.assert_error('foo')
self.assert_error('foo, bar')
| StarcoderdataPython |
1764013 | <reponame>thomassutter/MoPoE<filename>celeba/flags.py
import argparse
from utils.BaseFlags import parser as parser
# DATASET NAME
parser.add_argument('--dataset', type=str, default='CelebA', help="name of the dataset")
# add arguments
parser.add_argument('--style_img_dim', type=int, default=32, help="dimension of varying factor latent space")
parser.add_argument('--style_text_dim', type=int, default=32, help="dimension of varying factor latent space")
parser.add_argument('--len_sequence', type=int, default=256, help="length of sequence")
parser.add_argument('--img_size', type=int, default=64, help="img dimension (width/height)")
parser.add_argument('--image_channels', type=int, default=3, help="number of channels in images")
parser.add_argument('--crop_size_img', type=int, default=148, help="number of channels in images")
parser.add_argument('--dir_text', type=str, default='../text', help="directory where text is stored")
parser.add_argument('--random_text_ordering', type=bool, default=False,
help="flag to indicate if attributes are shuffled randomly")
parser.add_argument('--random_text_startindex', type=bool, default=True,
help="flag to indicate if start index is random")
parser.add_argument('--DIM_text', type=int, default=128, help="filter dimensions of residual layers")
parser.add_argument('--DIM_img', type=int, default=128, help="filter dimensions of residual layers")
parser.add_argument('--num_layers_text', type=int, default=7, help="number of residual layers")
parser.add_argument('--num_layers_img', type=int, default=5, help="number of residual layers")
parser.add_argument('--likelihood_m1', type=str, default='laplace', help="output distribution")
parser.add_argument('--likelihood_m2', type=str, default='categorical', help="output distribution")
#classifier
parser.add_argument('--encoder_save_m1', type=str, default='encoderM1', help="model save for encoder")
parser.add_argument('--encoder_save_m2', type=str, default='encoderM2', help="model save for encoder")
parser.add_argument('--decoder_save_m1', type=str, default='decoderM1', help="model save for decoder")
parser.add_argument('--decoder_save_m2', type=str, default='decoderM2', help="model save for decoder")
parser.add_argument('--clf_save_m1', type=str, default='clf_m1', help="model save for clf")
parser.add_argument('--clf_save_m2', type=str, default='clf_m2', help="model save for clf")
#weighting of loss terms
parser.add_argument('--beta_m1_style', type=float, default=1.0, help="default weight divergence term style modality 1")
parser.add_argument('--beta_m2_style', type=float, default=2.0, help="default weight divergence term style modality 2")
parser.add_argument('--div_weight_m1_content', type=float, default=0.35, help="default weight divergence term content modality 1")
parser.add_argument('--div_weight_m2_content', type=float, default=0.35, help="default weight divergence term content modality 2")
parser.add_argument('--div_weight_uniform_content', type=float, default=0.3, help="default weight divergence term prior")
| StarcoderdataPython |
4801993 | #-*- coding: utf-8 -*-
from django.contrib import admin
class PrimitivePermissionAwareModelAdmin(admin.ModelAdmin):
def has_add_permission(self, request):
# we don't have a "add" permission... but all adding is handled
# by special methods that go around these permissions anyway
# TODO: reactivate return False
return False
def has_change_permission(self, request, obj=None):
if hasattr(obj, 'has_edit_permission'):
if obj.has_edit_permission(request):
return True
else:
return False
else:
return True
def has_delete_permission(self, request, obj=None):
# we don't have a specific delete permission... so we use change
return self.has_change_permission(request, obj)
| StarcoderdataPython |
120222 | # -*- coding: utf-8 -*-
"""
Routes for the restfulapi addon.
"""
from framework.routing import Rule, json_renderer
from . import views
widget_routes = {
'rules': [
Rule(
[
'/project/<pid>/restfulapi/download/'
],
'post',
views.restfulapi_download,
json_renderer
),
Rule(
[
'/project/<pid>/restfulapi/cancel/'
],
'post',
views.restfulapi_cancel,
json_renderer
)
],
'prefix': '/api/v1'
}
api_routes = {
'rules': [],
'prefix': '/api/v1'
}
| StarcoderdataPython |
1688550 | # -*- coding: utf-8 -*-
""" Adagrad learning algorithm. For a nice explanation of the algorithm, see: http://sebastianruder.com/optimizing-gradient-descent/index.html#adagrad
Adagrad [1] is an algorithm for gradient-based optimization
that adapts the learning rate to the parameters, performing
larger updates for infrequent and smaller updates for frequent parameters.
We set g_{t,i} to be the gradient of the objective function w.r.t. to the parameter θi at time step t
gt,i=∇θJ(θi)
Adagrad modifies the general learning rate η at each time step t for every parameter θi based
on the past gradients that have been computed for θi:
θ{t+1,i}=θ{t,i}−\div{η}{\sqrt{G_{t,ii}+ϵ}}*g_{t,i}
Gt∈ℝ^{d×d}: Diagonal matrix where each diagonal element i,i is
the sum of the squares of the gradients w.r.t. θi up to time step t
[1]: <NAME>., <NAME>., & <NAME>. (2011). Adaptive Subgradient Methods for Online Learning and Stochastic Optimization.
Journal of Machine Learning Research, 12, 2121–2159. (http://jmlr.org/papers/v12/duchi11a.html)
"""
__docformat__ = 'restructedtext en'
__authors__ = ("<NAME> "
"<NAME>")
__contact__ = "<NAME> <<EMAIL>>"
import numpy
import logging
import time
import theano
import theano.tensor as TT
from groundhog.utils import print_time
logger = logging.getLogger(__name__)
class AdaGrad(object):
def __init__(self,
model,
state,
data):
#####################################
# Step 0. Constructs shared variables
#####################################
bs = state['bs']
self.model = model
self.rng = numpy.random.RandomState(state['seed'])
self.add_noise = state['weight_noise']
self.step = 0
self.bs = bs
self.state = state
self.data = data
self.step_timer = time.time()
self.gdata = [theano.shared(numpy.zeros((2,)*x.ndim,
dtype=x.dtype), name=x.name) for x in model.inputs]
self.gs = [theano.shared(numpy.zeros(p.get_value(borrow=True).shape,
dtype=theano.config.floatX),
name=p.name)
for p in model.params]
# G_{i,i}
self.accumulated_squared_gradients = [theano.shared(numpy.zeros(param.shape.eval(), dtype=param.dtype),
name = param.name + '_acc_grad')
for param in model.params]
self.eps = 1e-4
if 'profile' not in self.state:
self.state['profile'] = 0
###################################
# Step 1. Compile training function
###################################
print 'Constructing grad function'
loc_data = self.gdata
lr = TT.scalar('lr')
self.prop_names = [x[0] for x in model.properties]
self.prop_exprs = [x[1] for x in model.properties]
self.update_rules = [x[1] for x in model.updates]
#inputs_replacement_list = zip(model.inputs, loc_data)
rval = theano.clone(model.param_grads + self.update_rules + \
self.prop_exprs + [model.train_cost],
replace=zip(model.inputs, loc_data))
nparams = len(model.params)
nouts = len(self.prop_exprs)
nrules = len(self.update_rules)
gs = rval[:nparams]
rules = rval[nparams:nparams + nrules]
outs = rval[nparams + nrules:]
norm_gs = sum(TT.sum(x**2)
for x,p in zip(gs,
self.model.params)
if p not in self.model.exclude_params_for_norm)
if 'cutoff' in state and state['cutoff'] > 0:
c = numpy.float32(state['cutoff'])
if state['cutoff_rescale_length']:
c = c * TT.cast(loc_data[0].shape[0], 'float32')
notfinite = TT.or_(TT.isnan(norm_gs), TT.isinf(norm_gs))
_gs = []
for g,p in zip(gs,self.model.params):
if p not in self.model.exclude_params_for_norm:
tmpg = TT.switch(TT.ge(norm_gs, c), g*c/norm_gs, g)
_gs.append(
TT.switch(notfinite, numpy.float32(.1)*p,
tmpg))
else:
_gs.append(g)
gs = _gs
store_gs = [(s,g) for s,g in zip(self.gs, gs)]
training_updates = store_gs + [(s[0], r) for s,r in zip(model.updates, rules)]
print 'Compiling grad function'
self.train_fn = theano.function(
[], outs, name='train_function',
updates = training_updates,
givens = zip(model.inputs, loc_data),
profile=self.state['profile'])
# Compute G_{i,i}
accumulated_squared_gradients_update_list = [(acc_gradient, acc_gradient + gradient**2)
for acc_gradient, gradient in
zip(self.accumulated_squared_gradients, self.gs)]
# θ{t+1,i}=θ{t,i}−\div{η}{\sqrt{G_{t,ii}+ϵ}}*g_{t,i}
updates = [(weight, weight - lr * gradient / TT.sqrt(G_t[1]+TT.pow(gradient, 2)+1e-8))
for weight, G_t, gradient in zip(model.params, accumulated_squared_gradients_update_list, self.gs)]
print 'Compiling update function'
self.lr = numpy.float32(state['lr'])
print '\t > Using a learning rate of', self.lr
self.update_fn = theano.function(
[lr], [], name='update_function',
allow_input_downcast=True,updates=updates,
profile=self.state['profile'],
)
self.old_cost = 1e20
self.schedules = model.get_schedules()
self.return_names = self.prop_names + \
['cost',
'time_step',
'whole_time']
def __call__(self, _):
"""
Ignored parameter: hypothesis.
"""
batch = self.data.next()
"""
# Perturb the data (! and the model)
if self.add_noise:
if isinstance(batch, dict):
batch = self.model.perturb(**batch)
else:
batch = self.model.perturb(*batch)
"""
# Load the dataset into GPU
# Note: not the most efficient approach in general, as it involves
# each batch is copied individually on gpu
if isinstance(batch, dict):
for gdata in self.gdata:
# print batch[gdata.name]
gdata.set_value(batch[gdata.name], borrow=True)
else:
for gdata, data in zip(self.gdata, batch):
gdata.set_value(data, borrow=True)
rvals = self.train_fn()
g_st = time.time()
self.update_fn(self.lr)
g_ed = time.time()
whole_time = time.time() - self.step_timer
self.state['lr'] = float(self.lr)
if self.step % self.state['trainFreq'] == 0:
msg = '.. iter %s'
vals = [self.step]
for dx, prop in enumerate(self.prop_names):
msg += ' '+prop+' %.2e'
msg += ' step time %s whole time %s lr %.2e'
vals += [print_time(g_ed - g_st),
print_time(time.time() - self.step_timer),
float(self.lr)]
self.step += 1
ret = dict([('lr', float(self.lr)),
('time_step', float(g_ed - g_st)),
('whole_time', float(whole_time))]+zip(self.prop_names, rvals))
return ret | StarcoderdataPython |
3223434 | <reponame>daadaada/triton<filename>python/triton/ops/blocksparse/matmul.py
import triton
import triton._C.libtriton as libtriton
import torch
import os
import math
src = triton.read(os.path.join(os.path.dirname(__file__), 'matmul.c'))
##############
# MAIN API #
##############
class _matmul(torch.autograd.Function):
sdd_cache = dict()
dsd_cache = dict()
dds_cache = dict()
locks = dict()
# Given an array sizes representing reduction size for each
# column of a block-mode matrix multiplication,
# performs load-balancing to achieve more smaller reductions
# between `seg_size` elements
@staticmethod
def load_balance(sizes, block):
# segment size
# heuristics taken from OpenAI blocksparse code
# https://github.com/openai/blocksparse/blob/master/blocksparse/matmul.py#L95
max_size = sizes.max()
min_size = sizes[sizes != 0].min()
#if max_size > min_size * 2.0:
# seg_max = max(triton.cdiv(max_size, 4), min_size*2)
#else:
# seg_max = max_size
seg_max = max_size
seg_min = max(triton.cdiv(seg_max, 4), 4)
# split reduction into segments
div = sizes // seg_max
rem = sizes % seg_max
packs = div + (sizes < seg_min).long() + (rem >= seg_min).long()
width = packs.sum()
segments = torch.empty(width, dtype=sizes.dtype)
column = torch.empty_like(segments)
lockid = torch.zeros_like(segments)
maxid = torch.zeros_like(segments)
nlocks = 0
current = 0
col_idx = 0
for i in range(len(sizes)):
d, r = div[i], rem[i]
isempty = sizes[i] < seg_min
last = current + d + (r >= seg_min) + isempty
# column id
column[current:last] = col_idx
# lock id
if d > 1 or (d == 1 and r >= seg_min):
nlocks += 1
lockid[current:last] = nlocks
maxid[current:last] = last - current
# segment size
segments[current:current+d] = seg_max
if r < seg_min and not isempty:
segments[current+d-1] += r
if r >= seg_min or isempty:
segments[current+d] = r
current = last
col_idx += 1
offsets = torch.zeros_like(segments)
offsets[1:] = torch.cumsum(segments[:-1], dim=0)
return segments, column, lockid, maxid, offsets
@staticmethod
def get_locks(size, dev):
if dev not in _matmul.locks or \
size > _matmul.locks[dev].size(0):
_matmul.locks[dev] = torch.zeros(size, dtype=torch.int32, device=dev)
return _matmul.locks[dev]
##########################
# SPARSE = DENSE x DENSE #
##########################
@staticmethod
def make_sdd_lut(layout, block, dtype, device):
start_width = 64 // block
superblocks = libtriton.superblock(layout.type(torch.int32), start_width)
luts, widths, packs = [], [], []
for size, nnz in superblocks:
width = nnz.shape[0] // (size*size)
h = nnz[:, 0]
i = nnz[:, 1]
j = nnz[:, 2]
b = nnz[:, 3]
lut = torch.stack((h, i, j, b), dim=1).view(-1).contiguous()
luts.append(lut.type(torch.int32).to(device))
widths.append(width)
packs.append(size)
# create locks
return luts, None, widths, packs
@staticmethod
def _sdd_matmul(a, b, trans_a, trans_b, trans_c,
spdims, block, luts, num_locks, widths, packs):
if trans_c:
a, b = b, a
trans_a, trans_b = not trans_b, not trans_a
AS0 = a.size(0)
AS1 = a.size(1)
AS2 = a.size(3 if trans_a else 2)
AS3 = a.size(2 if trans_a else 3)
BS0 = b.size(0)
BS1 = b.size(1)
BS2 = b.size(3 if trans_b else 2)
BS3 = b.size(2 if trans_b else 3)
dtype = a.dtype
device = a.device
is_16_multiple = AS3 % 16 == 0
is_32_multiple = AS3 % 32 == 0
is_64_multiple = AS3 % 64 == 0
if not is_16_multiple:
raise ValueError('Reduction size for SDD must be a multiple of 16')
# create kernel
total_width = sum([width*pack*pack for width,pack in zip(widths, packs)])
c = torch.empty((AS0, total_width, block, block), dtype=dtype, device=device)
for lut, width, pack in zip(luts, widths, packs):
num_lock = 1
key = (block, device, a.dtype, b.dtype, trans_a, trans_b, trans_c, pack, is_32_multiple, is_64_multiple)
if key not in _matmul.sdd_cache:
F32TK = [8, 16]
#F16TK = [16]
#F16TK += [32] if is_32_multiple else []
#F16TK += [64] if is_64_multiple else []
F16TK = [64]
TK = {torch.float32: F32TK,
torch.float16: F16TK}[dtype]
defines = {'TM': block*pack, 'TN': block*pack, 'TMN': block*block*pack*pack, 'BLOCK': block,
'TK': TK, 'TYPE': dtype,
'STRIDE_AM': '1' if trans_a else 'lda',
'STRIDE_AK': 'lda' if trans_a else '1',
'STRIDE_BN': 'ldb' if trans_b else '1',
'STRIDE_BK': '1' if trans_b else 'ldb',
'STRIDE_CM': 'ldc', 'STRIDE_CN': '1',
'SDD': True, 'TZ': 1, 'NAME': 'sdd_kernel'}
_matmul.sdd_cache[key] = triton.kernel(src, device=device, defines=defines, num_warps=[1, 2, 4])
kernel = _matmul.sdd_cache[key]
# create output
locks = _matmul.get_locks(2*width*AS0*num_lock, a.device)
# maximum grid size is 65535
# so operation might be decomposed into multiple
# kernel calls
max_width = 49152
for off_width in range(0, width, max_width):
kernel(a.data_ptr(), b.data_ptr(), c.data_ptr(),
a.stride(2), b.stride(2), block,
a.stride(0), b.stride(0), c.stride(0),
a.stride(1), b.stride(1), c.stride(0),
AS2, AS2, AS3, off_width, lut.data_ptr(), locks.data_ptr(), num_lock,
grid = lambda opt: [opt.TZ, min(max_width, width - off_width), AS0])
# save for backward pass
return c
##########################
# DENSE = DENSE x SPARSE #
# DENSE = SPARSE x DENSE #
##########################
# Given a binary layout of 0s and 1s,
# Construct look-up table for efficient execution on GPUs
@staticmethod
def make_dxx_lut(layout, block, step, trans, device, transform = lambda idx: idx):
# load-balancing
_empty = torch.tensor([], dtype=torch.int64, device=layout.device)
segments = _empty.clone()
column = _empty.clone()
depth = _empty.clone()
lockid = _empty.clone()
maxid = _empty.clone()
offsets = _empty.clone()
current_offset = 0
current_maxid = 0
for z in range(layout.size(0)):
if trans:
sizes = torch.sum(layout[z, :, :], 1)
else:
sizes = torch.sum(layout[z, :, :], 0)
z_segments, z_column, z_lockid, z_maxid, z_offsets = _matmul.load_balance(sizes, block)
z_depth = z * torch.ones_like(z_segments)
z_lockid[z_lockid > 0] += current_maxid
current_maxid = z_lockid.max()
# concatenate depth
segments = torch.cat((segments, z_segments))
column = torch.cat((column, z_column))
depth = torch.cat((depth, z_depth))
maxid = torch.cat((maxid, z_maxid))
offsets = torch.cat((offsets, current_offset + z_offsets))
lockid = torch.cat((lockid, z_lockid))
current_offset += layout[z, :, :].sum()
segments *= step
# pointer increments
if trans:
nnz = layout.nonzero(as_tuple=False)
else:
nnz = layout.transpose(1, 2).nonzero(as_tuple=False)
num_blocks = nnz.size(0)
offsets = torch.min(offsets, (num_blocks - 1)*torch.ones_like(offsets))
idx = transform(nnz[:, 2]*block)
xincs = idx.clone()
xincs[1:] -= idx[:-1]
# divide block into multiple steps
div = block // step
xincs = xincs.view(-1, 1).repeat(1, div)
xincs[:, 1:] = step
xincs[:, 0 ] -= (div-1)*step
# first increment for each reduction is actually the offset
xincs[offsets[segments>0], 0] = idx[offsets[segments>0]]
xincs = xincs.view(-1)
# block-mode input increments
if trans:
widx = torch.arange(num_blocks)
else:
widx = _empty.clone()
current_offset = 0
for z in range(layout.size(0)):
layoutw = layout[z, :, :].clone()
msum = layoutw.sum()
layoutw[layoutw > 0] = 1 + torch.arange(msum)
widx = torch.cat((widx, current_offset + layoutw.T[layoutw.T > 0] - 1))
current_offset += msum
widx = widx
wincs = widx*block*block
wincs[1:] -= widx[:-1]*block*block
wincs = wincs.view(-1, 1).repeat(1, div)
if trans:
wincs[:, 1:] = step
wincs[:, 0] -= (div-1)*step
else:
wincs[:, 1:] = step*block
wincs[:, 0] -= (div - 1)*step*block
wincs[offsets[segments>0], 0] = widx[offsets[segments>0]]
wincs = wincs.view(-1)
# adjust offset and segment size
offsets *= 2*div
segments *= div
# create header
width = column.size(0)
offsets += 6*width
header = torch.stack((offsets, segments, column, depth, lockid, maxid), dim=1).view(-1).contiguous()
incs = torch.stack((xincs, wincs), dim=1).view(-1).contiguous()
incs = torch.cat((incs, torch.zeros(2, device=incs.device, dtype=incs.dtype)))
# create lut
lut = torch.cat((header, incs))
lut = lut.type(torch.int32).to(device)
# create locks
num_locks = max(1, lockid.max())
return lut, num_locks, width, None
@staticmethod
def _dds_matmul(a, b, trans_a, trans_b, trans_c,
spdims, block, lut, num_locks, width, packs):
# shapes / dtypes
AS0 = a.size(0)
AS1 = a.size(1)
AS2 = a.size(3 if trans_a else 2)
AS3 = a.size(2 if trans_a else 3)
BS0 = spdims[0]
BS1 = block * spdims[2 if trans_b else 1]
BS2 = block * spdims[1 if trans_b else 2]
dtype = a.dtype
# kernel
key = (block, a.device, a.dtype, b.dtype, trans_a, trans_b, trans_c)
if key not in _matmul.dds_cache:
TM = [64, 128] if dtype == torch.float32 else [64, 128, 256]
TK = [8] if dtype == torch.float32 else [16]
defines = {'TM': TM, 'TN': block, 'TK': TK,
'BLOCK': block,
'TYPE': dtype,
'STRIDE_AM': 1 if trans_a else 'lda',
'STRIDE_AK': 'lda' if trans_a else 1,
'STRIDE_BN': block if trans_b else 1,
'STRIDE_BK': 1 if trans_b else block,
'STRIDE_CM': '1' if trans_c else 'ldc',
'STRIDE_CN': 'ldc' if trans_c else '1',
'NAME': 'dds_kernel',
'DDS': True}
_matmul.dds_cache[key] = triton.kernel(src, device=a.device, defines=defines, num_warps=[4])
kernel = _matmul.dds_cache[key]
# output
CS0 = AS0
CS1 = AS1
CS2 = BS2 if trans_c else AS2
CS3 = AS2 if trans_c else BS2
locks = _matmul.get_locks(2*AS0*AS2//32*num_locks, a.device)
c = torch.empty((CS0, CS1, CS2, CS3), dtype=dtype, device=a.device)
kernel(a.data_ptr(), b.data_ptr(), c.data_ptr(),
a.stride(2), block, c.stride(2),
a.stride(0), b.stride(0), c.stride(0),
a.stride(1), b.stride(1), c.stride(1),
AS2, BS2, 0, 0, lut.data_ptr(), locks.data_ptr(), num_locks,
grid = lambda opt: [width, triton.cdiv(AS2, opt.TM), AS0])
return c
@staticmethod
def _dsd_matmul(a, b, trans_a, trans_b, trans_c,
spdims, block, lut, num_locks, width, packs):
# shapes / dtypes
AS0 = spdims[0]
AS1 = block * spdims[2 if trans_a else 1]
AS2 = block * spdims[1 if trans_a else 2]
BS0 = b.size(0)
BS1 = b.size(1)
BS2 = b.size(3 if trans_b else 2)
BS3 = b.size(2 if trans_b else 3)
dtype = a.dtype
# kernel
key = (block, a.device, a.dtype, b.dtype, trans_a, trans_b, trans_c)
if key not in _matmul.dsd_cache:
TN = [64, 128] if dtype == torch.float32 else [64, 128]
TK = [8] if dtype == torch.float32 else [16]
defines = {'TM': block, 'TN': TN, 'TK': TK,
'BLOCK': block,
'TYPE': dtype,
'STRIDE_AM': 1 if trans_a else block,
'STRIDE_AK': block if trans_a else 1,
'STRIDE_BN': 'ldb' if trans_b else '1',
'STRIDE_BK': '1' if trans_b else 'ldb',
'STRIDE_CM': '1' if trans_c else 'ldc',
'STRIDE_CN': 'ldc' if trans_c else '1',
'NAME': 'dsd_kernel',
'DSD': True}
_matmul.dsd_cache[key] = triton.kernel(src, device=a.device, defines=defines, num_warps=[4])
kernel = _matmul.dsd_cache[key]
# output
CS0 = BS0
CS1 = BS1
CS2 = BS3 if trans_c else AS1
CS3 = AS1 if trans_c else BS3
locks = _matmul.get_locks(2*BS0*BS3//32*num_locks, a.device)
c = torch.empty((CS0, CS1, CS2, CS3), dtype=dtype, device=a.device)
kernel(a.data_ptr(), b.data_ptr(), c.data_ptr(),
block, b.stride(2), c.stride(2),
a.stride(0), b.stride(0), c.stride(0),
a.stride(1), b.stride(1), c.stride(1),
BS3, AS1, 0, 0, lut.data_ptr(), locks.data_ptr(), num_locks,
grid = lambda opt: [width, triton.cdiv(BS3, opt.TN), BS0])
return c
fn = {'sdd': _sdd_matmul.__get__(object),
'dsd': _dsd_matmul.__get__(object),
'dds': _dds_matmul.__get__(object)}
@staticmethod
def forward(ctx, a, b, trans_a, trans_b, trans_c,
mode, spdims, block,
c_lut, c_num_locks, c_width, c_packs,
da_lut, da_num_locks, da_width, da_packs,
db_lut, db_num_locks, db_width, db_packs):
c = _matmul.fn[mode](a, b, trans_a, trans_b, trans_c, spdims, block,
c_lut, c_num_locks, c_width, c_packs)
# save for backward
ctx.save_for_backward(a, b)
ctx.da_num_locks = da_num_locks
ctx.da_lut = da_lut
ctx.da_width = da_width
ctx.da_packs = da_packs
ctx.db_lut = db_lut
ctx.db_num_locks = db_num_locks
ctx.db_width = db_width
ctx.db_packs = db_packs
ctx.mode = mode
ctx.spdims = spdims
ctx.block = block
ctx.trans_a = trans_a
ctx.trans_b = trans_b
return c
@staticmethod
def backward(ctx, dc):
# saved for backward
a, b = ctx.saved_tensors
mode = ctx.mode
# gradients w.r.t. a
if ctx.needs_input_grad[0]:
mode_da = mode[1] + mode[0] + mode[2]
da = _matmul.fn[mode_da](dc, b, False, not ctx.trans_b, ctx.trans_a, ctx.spdims, ctx.block,
ctx.da_lut, ctx.da_num_locks, ctx.da_width, ctx.da_packs)
# gradients w.r.t. b
if ctx.needs_input_grad[1]:
mode_db = mode[2] + mode[1] + mode[0]
db = _matmul.fn[mode_db](a, dc, not ctx.trans_a, False, ctx.trans_b, ctx.spdims, ctx.block,
ctx.db_lut, ctx.db_num_locks, ctx.db_width, ctx.db_packs)
return da, db, None, None, None,\
None, None, None, None,\
None, None, None, None, None, None,\
None, None, None, None, None, None,\
None, None, None, None, None, None
class matmul:
def make_lut(self, dtype, device):
key = (dtype, device)
if key in self.lut_cache:
return self.lut_cache[key]
# C look-up table
layout, block = self.layout, self.block
step = 8 if dtype == torch.float32 else 16
if self.mode == 'sdd':
c_lut, c_num_locks, c_width, c_packs = _matmul.make_sdd_lut(layout, block, dtype, device)
elif self.mode == 'dsd':
c_lut, c_num_locks, c_width, c_packs = _matmul.make_dxx_lut(layout, block, step, not self.trans_a, device)
elif self.mode == 'dds':
c_lut, c_num_locks, c_width, c_packs = _matmul.make_dxx_lut(layout, block, step, self.trans_b, device)
# DA look-up table
if self.mode == 'sdd':
da_lut, da_num_locks, da_width, da_packs = _matmul.make_dxx_lut(layout, block, step, True, device)
elif self.mode == 'dsd':
da_lut, da_num_locks, da_width, da_packs = _matmul.make_sdd_lut(layout, block, dtype, device)
elif self.mode == 'dds':
da_lut, da_num_locks, da_width, da_packs = _matmul.make_dxx_lut(layout, block, step, not self.trans_b, device)
# DB look-up table
if self.mode == 'sdd':
db_lut, db_num_locks, db_width, db_packs = _matmul.make_dxx_lut(layout, block, step, False, device)
elif self.mode == 'dsd':
db_lut, db_num_locks, db_width, db_packs = _matmul.make_dxx_lut(layout, block, step, self.trans_a, device)
elif self.mode == 'dds':
db_lut, db_num_locks, db_width, db_packs = _matmul.make_sdd_lut(layout, block, dtype, device)
self.lut_cache[key] = (c_lut, c_num_locks, c_width, c_packs,\
da_lut, da_num_locks, da_width, da_packs,\
db_lut, db_num_locks, db_width, db_packs)
return self.lut_cache[key]
def __init__(self, layout, block, mode, trans_a = False, trans_b = False):
if mode not in ['sdd', 'dsd', 'dds']:
raise NotImplementedError('Supported modes are: sdd, dsd, dds')
# look-up table cache
self.lut_cache = dict()
# attributes
self.trans_a = trans_a
self.trans_b = trans_b
self.mode = mode
self.spdims = layout.shape
self.block = block
self.layout = layout
# pad shapes of a tensor to make it
# compatible with kernel calls
@staticmethod
def _pad_shape(x, is_sparse):
max_dim = 3 if is_sparse else 4
for i in range(max_dim - x.dim()):
x = x.unsqueeze(0)
return x
def __call__(self, a, b):
c_lut, c_num_locks, c_width, c_packs,\
da_lut, da_num_locks, da_width, da_packs,\
db_lut, db_num_locks, db_width, db_packs = self.make_lut(a.dtype, a.device)
# pad shapes with ones
a = matmul._pad_shape(a, self.mode == 'dsd')
b = matmul._pad_shape(b, self.mode == 'dds')
# execute
c = _matmul.apply(a, b, self.trans_a, self.trans_b, False,
self.mode, self.spdims, self.block,
c_lut, c_num_locks, c_width, c_packs,
da_lut, da_num_locks, da_width, da_packs,
db_lut, db_num_locks, db_width, db_packs)
return c | StarcoderdataPython |
1776613 | # -*- coding: utf-8 -*-
###############################################################################
# Author: (C) 2012 <NAME>
# Module: apps
# Description: Django project applications module
############################################################################### | StarcoderdataPython |
1730598 | catlog = [
"coordinate_tol",
"distance_tol",
"angle2_tol",
"angle_tol",
"area_tol",
"setting_tol",
]
| StarcoderdataPython |
120282 | import asyncio
import os
import textwrap
import urllib
from typing import Tuple
import caproto.server
from .archstats import Archstats
SUPPORTED_DATABASE_BACKENDS = {'elastic', }
def get_archiver_url() -> str:
"""Get the archiver appliance interface URL from the environment."""
archiver_url = os.environ.get(
'ARCHIVER_URL', 'http://pscaa02.slac.stanford.edu:17665/'
)
if not archiver_url:
raise RuntimeError('ARCHIVER_URL not set')
if not archiver_url.startswith('http'):
archiver_url = f'http://{archiver_url}'
result: urllib.SplitResult = urllib.parse.urlsplit(archiver_url)
return f'{result.scheme}://{result.netloc}/'
def get_database() -> Tuple[str, str]:
"""Get the database backend and URL from the environment."""
backend = os.environ.get('ARCHSTATS_DATABASE', 'elastic')
url = os.environ.get('ARCHSTATS_DATABASE_URL', 'http://localhost:9200/')
if not url:
raise RuntimeError('ARCHSTATS_DATABASE_URL unset')
if not backend:
raise RuntimeError('ARCHSTATS_DATABASE unset')
if backend not in SUPPORTED_DATABASE_BACKENDS:
raise RuntimeError(f'Unsupported database backend: {backend}')
return backend, url
def main():
"""Run archstats based on command-line arguments."""
ioc_options, run_options = caproto.server.ioc_arg_parser(
default_prefix='ARCH:',
desc=textwrap.dedent(Archstats.__doc__),
supported_async_libs=('asyncio', ),
)
database, database_url = get_database()
ioc = Archstats(
appliance_url=get_archiver_url(),
database_backend=database,
database_url=database_url,
**ioc_options
)
if hasattr(ioc, '__ainit__'):
loop = asyncio.get_event_loop()
ainit_task = loop.create_task(ioc.__ainit__())
loop.run_until_complete(ainit_task)
caproto.server.run(ioc.pvdb, **run_options)
return ioc
if __name__ == '__main__':
main()
| StarcoderdataPython |
3200725 | import luigi as luigi
import pandas as pd
import numpy as np
from visualization_tasks.PlotColumnEqualsData import PlotColumnEqualsData
from visualization_tasks.Visualizer import Visualizer
from data_tasks.MergeRetroSheetData import MergeRetroSheetData
class GroupBy(luigi.Task):
year = luigi.YearParameter()
column = luigi.Parameter()
def requires(self):
return MergeRetroSheetData(self.year)
def output(self):
return luigi.LocalTarget(
'{year:%Y}/images/{column}/groupby.csv'.format(year=self.year, column=self.column))
def run(self):
with self.input().open('r') as yearData:
year_dataframe = pd.read_csv(yearData)
year_strikes = year_dataframe.where((year_dataframe["pitch_type"] == "S"))
year_balls = year_dataframe.where((year_dataframe["pitch_type"] == "B")
)
value_groups = year_dataframe.groupby(self.column)
v = Visualizer()
output_dataframe = pd.DataFrame(columns=["value","count","magnitude"])
total_length = len(year_dataframe)
for value, group in value_groups:
value_strikes = group.where(
(group["pitch_type"] == "S"))
value_balls = group.where(
(group["pitch_type"] == "B"))
_, _, _, magnitude = v.get_differential_2dhist(value_strikes, value_balls, year_strikes, year_balls)
output_dataframe = output_dataframe.append({"value":value, "count":len(group) / total_length, "magnitude":magnitude},sort=False, ignore_index=True)
with self.output().open("w") as outputfile:
output_dataframe.to_csv(outputfile)
| StarcoderdataPython |
3270472 | <reponame>CRaNkXD/PyMoneyOrga
import unittest
import time
from timeit import default_timer as timer
import timeit
from PyMoneyOrga.database.database_sqlite import DatabaseSqlite
class TestDatabaseSqlite(unittest.TestCase):
def setUp(self):
self.database = DatabaseSqlite(url="sqlite:///pymoneyorga.sqlite.test")
self.acc_name_1 = "User1"
self.acc_name_2 = "User2"
return super().setUp()
def test_add_acc_first(self):
"""
tests if an account can be set by checking table columns
"""
with self.database.get_session() as session:
self.database.add_acc(session, self.acc_name_1, 100, "EUR")
self.database.commit(session)
acc = self.database.get_acc(session, self.acc_name_1)
self.assertEqual(acc.balance, 100)
def test_add_acc_second(self):
"""
tests if a second account can be set by checking table
columns of the first and the second entry
"""
with self.database.get_session() as session:
self.database.add_acc(session, self.acc_name_1, 100, "EUR")
self.database.add_acc(session, self.acc_name_2, 200, "EUR")
self.database.commit(session)
acc_user2 = self.database.get_acc(session, self.acc_name_2)
acc_user1 = self.database.get_acc(session, self.acc_name_1)
self.assertEqual(acc_user2.balance, 200)
self.assertEqual(acc_user1.balance, 100)
def test_get_acc_none(self):
"""
tests if get acc returns None if the
specified account does not exist
"""
with self.database.get_session() as session:
acc = self.database.get_acc(session, self.acc_name_1)
self.assertEqual(acc, None)
def test_get_transactions(self):
"""
tests if get_transactions returns the
specified transactions in the right order with the specified amount
"""
with self.database.get_session() as session:
self.database.add_acc(session, self.acc_name_1, 100, "EUR")
self.database.commit(session)
acc = self.database.get_acc(session, self.acc_name_1)
acc.add_income(100, "income1")
time.sleep(0.001)
acc.add_income(100, "income2")
time.sleep(0.001)
acc.add_income(100, "income3")
transactions = self.database.get_transactions(
session, self.acc_name_1, False, 2
)
self.assertEqual(transactions[0].new_balance, 200)
self.assertEqual(len(transactions), 2)
transactions = self.database.get_transactions(
session, self.acc_name_1, True, 50
)
self.assertEqual(transactions[0].new_balance, 400)
self.assertEqual(len(transactions), 3)
transactions = self.database.get_transactions(
session, self.acc_name_1, False, 2, 1
)
self.assertEqual(transactions[0].new_balance, 300)
def tearDown(self):
self.database._clear_all_tables()
return super().tearDown()
if __name__ == "__main__":
unittest.main()
| StarcoderdataPython |
111272 | <gh_stars>10-100
import dash_html_components as html
import dash_core_components as dcc
import dash_bootstrap_components as dbc
from plotly.subplots import make_subplots
class blocktimeViewClass:
def getBlocktimeContent(self, data, bgImage):
content = [dbc.Modal([dbc.ModalHeader("Info Block Time"),
dbc.ModalBody(self.getBlockTimeExplanation()),
dbc.ModalFooter(dbc.Button("close", id="closeInfoBlocktime", className="ml-auto"))],
id="modalBlocktime", size='xl'),
html.Div(id='hidden', style = {'display':'none'}),
dbc.Card(dbc.CardBody([html.H4(['Block time analysis on a daily base']),
dbc.Row(dbc.Col(dcc.Graph(figure=self.createBlocktimeFigure(data, bgImage), config={'displayModeBar': False}, id='figureBlocktime'))),
dbc.Row(dbc.Col(dbc.Button("Info/Explanation", id="openInfoBlocktime")))
]))]
return content
@staticmethod
def createBlocktimeFigure(data, bgImage):
figBlockTime = make_subplots(
rows=2, cols=1,
vertical_spacing=0.15,
row_width=[0.6, 0.4], # from bottom to top
specs=[[{}],
[{}]],
shared_xaxes=True,
subplot_titles=(['Mean Block time', 'Block time distribution']))
figBlockTime.layout.annotations[0].font.color = '#6c757d' # subplot title font color
figBlockTime.layout.annotations[0].font.size = 18
figBlockTime.layout.annotations[1].font.color = '#6c757d'
figBlockTime.layout.annotations[1].font.size = 18
trace_meanTime = dict(type='scatter', name='Mean Time',
x=data['meanBlockTime'].dropna().index.values[:-1],
y=data['meanBlockTime'].dropna().values[:-1],
mode='lines', line=dict(color='#da3832'), line_width=3, hovertemplate='%{y:.2f}s')
figBlockTime.add_trace(trace_meanTime, 1, 1)
trace_Min = dict(type='scatter', name='Mininum',
x=data['MinBlockTime'].dropna().index.values[:-1],
y=data['MinBlockTime'].dropna().values[:-1],
mode='lines', line=dict(color='#90d1e5'), line_width=0, hovertemplate='%{y:.0f}s',
showlegend=False)
trace_10Perc = dict(type='scatter', name='10% quantile',
x=data['10PercentBlockTime'].dropna().index.values[:-1],
y=data['10PercentBlockTime'].dropna().values[:-1],
mode='lines', line=dict(color='#90d1e5'), line_width=2, hovertemplate='%{y:.0f}s')
trace_4Fill10 = dict(type='scatter', name='4Filling',
x=data['30PercentBlockTime'].dropna().index.values[:-1],
y=data['30PercentBlockTime'].dropna().values[:-1],
fill='tonexty', fillcolor='rgba(144, 209, 229, 0.5)',
mode='lines', line=dict(color='#90d1e5'), line_width=0, hoverinfo='none', showlegend=False)
trace_30Perc = dict(type='scatter', name='30% quantile',
x=data['30PercentBlockTime'].dropna().index.values[:-1],
y=data['30PercentBlockTime'].dropna().values[:-1], fill='tonexty',
mode='lines', line=dict(color='#3fbadf'), line_width=2, hovertemplate='%{y:.0f}s')
trace_4Fill30 = dict(type='scatter', name='4Filling',
x=data['medianBlockTime'].dropna().index.values[:-1],
y=data['medianBlockTime'].dropna().values[:-1],
fill='tonexty', fillcolor='rgba(63, 186, 223, 0.5)',
mode='lines', line=dict(color='#3fbadf'), line_width=0, hoverinfo='none', showlegend=False)
trace_Median = dict(type='scatter', name='Median',
x=data['medianBlockTime'].dropna().index.values[:-1],
y=data['medianBlockTime'].dropna().values[:-1],
mode='lines', line=dict(color='#7f50ff'), line_width=4, hovertemplate='%{y:.0f}s')
trace_70Perc = dict(type='scatter', name='70% quantile',
x=data['70PercentBlockTime'].dropna().index.values[:-1],
y=data['70PercentBlockTime'].dropna().values[:-1],
fill='tonexty', fillcolor='rgba(63, 186, 223, 0.5)',
mode='lines', line=dict(color='#3fbadf'), line_width=2, hovertemplate='%{y:.0f}s')
trace_90Perc = dict(type='scatter', name='90% quantile',
x=data['90PercentBlockTime'].dropna().index.values[:-1],
y=data['90PercentBlockTime'].dropna().values[:-1],
fill='tonexty', fillcolor='rgba(144, 209, 229, 0.5)',
mode='lines', line=dict(color='#90d1e5'), line_width=2, hovertemplate='%{y:.0f}s')
trace_Max = dict(type='scatter', name='Maximum',
x=data['MaxBlockTime'].dropna().index.values[:-1],
y=data['MaxBlockTime'].dropna().values[:-1],
mode='lines', line=dict(color='#90d1e5'), line_width=0, hovertemplate='%{y:.0f}s',
showlegend=False)
figBlockTime.add_trace(trace_Min, 2, 1)
figBlockTime.add_trace(trace_10Perc, 2, 1)
figBlockTime.add_trace(trace_4Fill10, 2, 1)
figBlockTime.add_trace(trace_30Perc, 2, 1)
figBlockTime.add_trace(trace_4Fill30, 2, 1)
figBlockTime.add_trace(trace_Median, 2, 1)
figBlockTime.add_trace(trace_70Perc, 2, 1)
figBlockTime.add_trace(trace_90Perc, 2, 1)
figBlockTime.add_trace(trace_Max, 2, 1)
figBlockTime.update_yaxes(title_text='Time in s', tickformat=",.2f", gridcolor='#6c757d', color='#6c757d',
zerolinecolor='#6c757d', range=[20, 45], row=1, col=1) # ,range=[-50, 200]
figBlockTime.update_yaxes(title_text='Time in s', tickformat=",.2f", gridcolor='#6c757d', color='#6c757d',
zerolinecolor='#6c757d', range=[0, 100], row=2, col=1) # ,range=[-200000, 1000000]
figBlockTime.update_xaxes(gridcolor='#6c757d', zerolinecolor='#6c757d', row=1, col=1)
figBlockTime.update_xaxes(title_text="Date", gridcolor='#6c757d', color='#6c757d', zerolinecolor='#6c757d',
row=2, col=1)
# add background picture
figBlockTime.add_layout_image(dict(source=bgImage, xref="paper", yref="paper", x=0.5, y=0.87, sizex=0.3, sizey=0.3, xanchor="center", yanchor="middle", opacity=0.2))
figBlockTime.add_layout_image(dict(source=bgImage, xref="paper", yref="paper", x=0.5, y=0.3, sizex=0.5, sizey=0.5, xanchor="center", yanchor="middle", opacity=0.2))
figBlockTime.update_layout(margin={"t": 60, "l": 0, "b": 0, 'r': 0},
hovermode='x unified',
hoverlabel=dict(font_color="#6c757d"),
legend=dict(orientation="h",
yanchor="top",
y=-0.12,
xanchor="right",
x=1),
)
figBlockTime.layout.plot_bgcolor = '#ffffff' # background plotting area
figBlockTime.layout.paper_bgcolor = 'rgba(0,0,0,0)' # background around plotting area
figBlockTime.layout.legend.font.color = '#6c757d' # font color legend
return figBlockTime
@staticmethod
def getBlockTimeExplanation():
coinAddressCardExplanation = [html.P(['On this tab the block time of Defichain is tracked. With the help of the API a database is generated, where for each block ',
'the generation time is saved. With these timestamps the mean value of the time difference between 2 blocks is calculated for each day.'],
style={'text-align': 'justify'}),
html.P([
'Beside the mean value also the distribution of the blocktime for each day could be interesting. To visualize this, 5 different quantiles are ',
'plotted as lines over time']),
html.P([html.B('Hint:'),
' The presented diagrams are interactive. You can zoom in (select range with mouse) and rescale (double-click in diagram) as you like.'
' For specific questions it could be helpful to only show a selection of the available data. To exclude entries from the graph click on the corresponding legend entry.'],
style={'text-align': 'justify', 'fontSize': '0.7rem',
'color': '#6c757d'})
]
return coinAddressCardExplanation | StarcoderdataPython |
3244357 | <filename>giggleliu/tba/lattice/fs.py
#!/usr/bin/python
'''
Fermi surface related utilities.
'''
from numpy import *
from numpy.linalg import norm
from matplotlib.pyplot import *
from path import path_k,KPath
from utils import bisect
__all__=['FermiPiece','FermiSurface','FSHandler']
class FermiPiece(KPath):
'''
A piece of fermi surface.
Construct
------------------------
FermiPiece(centerpoint,klist)
centerpoint:
Center point for this pocket.
klist:
The k-route of this fermi surface piece.
'''
def __init__(self,centerpoint,klist):
self.centerpoint=centerpoint
super(FermiPiece,self).__init__(klist)
class FermiSurface(object):
'''
Fermi surface. It is a collection of <FermiPiece> instances.
Construct
-------------------
FermiSurface(pieces=None)
Attributes
---------------------
pieces:
A dict of pockets aranged in the order {token:<FermiPiece>}.
'''
def __init__(self,pieces=None):
assert(pieces is None or type(pieces)==dict)
self.pieces={} if pieces is None else pieces
def __str__(self):
s='FermiSurface -> Number of pieces %s.\n'%self.npiece
s+=str(self.pieces.keys())
return s
def __getitem__(self,index):
return self.pieces[index]
def __add__(self,target):
fs=FermiSurface()
fs.pieces.update(self.pieces)
fs.pieces.update(target.pieces)
return fs
def __radd__(self,target):
return target.__add__(self)
def __iadd__(self,target):
self.pieces.update(target.pieces)
@property
def tokens(self):
'''get piece tokens(or center points).'''
return self.pieces.keys()
@property
def npiece(self):
'''The number of fermi surface pieces.'''
return len(self.pieces)
def add_piece(self,token,piece):
'''
Add a piece of fermi surface to this system.
token:
The token.
piece:
A FermiPiece instance.
'''
self.pieces[token]=piece
def eval(self,func):
'''
Evaluate func on the k-points on This fermi surface.
function:
a function defined on fermi surfaces.
*return*:
A list of evaluated data defined on fermi pieces.
'''
res=[]
for piece in self.pieces.values():
res.append(piece.eval(func))
return res
def show(self,method='plot',offset=0.,color='k',**kwargs):
'''
Plot func(defined on k vectors) on fermi surface.
offset:
Expand the fermi surface k points. with a factor of `offset`
'''
nfs=self.npiece
ax=[]
for i,piece in enumerate(self.pieces.values()):
axis('equal')
colormap=cm.get_cmap('RdYlBu')
if method=='scatter':
ax.append(scatter(piece.data[:,0],pieces.data[:,1],edgecolor='none',facecolor=color))
elif method=='plot':
ax.append(piece.show(color=color))
else:
raise ValueError('Undefined method %s for show.'%method)
return ax
def plot(self,datas,fsindices=None,**kwargs):
'''
Plot func(defined on k vectors) on fermi surface
fsindex:
the index of the fermi surface piece(s).
'''
if fsindices is None:
for i,ps in enumerate(self.pieces.values()):
ps.plot(datas[i],normalize=True,withfigure=False,**kwargs)
for fsi in enumerate(fsindices):
self.pieces[fsi].plot(datas[i],normalize=True,withfigure=False,**kwargs)
class FSHandler(object):
'''
Fermi surface handler class.
Construct
------------------------
FSHandler(efunc,resolution=0.02,tol=1e-4)
Attributes
-----------------------
efunc:
Energy function.
resolution:
The resolution of fermi-surface. dk~b/resolution, here, b is the k-lattice constant. default resolution is 0.02.
tol:
The accuracy of k point data.
'''
def __init__(self,efunc,resolution=0.02,tol=1e-4):
self.resolution=resolution
self.tol=tol
self.efunc=efunc
def findzero(self,start,end,eshift=0.):
'''
find zero point on a path from center point to k.
start/end:
The start/end point.
efunc:
Energy function.
*return*:
The k point at the zero point of energy.
'''
resolution=self.resolution
tol=self.tol
efunc=lambda k:self.efunc(k)-eshift
dk=(end-start)*resolution
Nmax=int(1./resolution)
ei_old=efunc(start)
for i in xrange(Nmax):
ki=start+dk*(i+1)
ei=efunc(ki)
if abs(ei)<1e-15:
return ki
elif ei*ei_old<0:
if ei<0:
klow,khigh=ki,ki-dk
else:
klow,khigh=ki-dk,ki
return bisect(efunc,klow,khigh,tol=tol)
def get_ps(self,centerpoint,nseg,peripheral=None,eshift=0.):
'''
Get a piece of fermi surface.
centerpoint:
Center points.
nseg:
Number of k vectors.
peripheral:
Peripheral region, to limit the searching region..
eshift:
Fermi surface for E=eshift.
*return*:
A <FermiPiece> instance.
'''
if peripheral is None:
x,y=centerpoint
peripheral=array([[x-pi,y-pi],[x+pi,y-pi],[x+pi,y+pi],[x-pi,y+pi],[x-pi,y-pi]])
kp=path_k(peripheral,nseg)
kl=kp.data
for ii in xrange(len(kl)):
k=kl[ii]
ki=self.findzero(centerpoint,k,eshift)
if ki is None:
return None
kl[ii]=ki
return FermiPiece(centerpoint,kl)
def get_fs_byname(self,name,kspace,nseg,eshift=0):
'''
Fermi Surface decided by `G` `K` `M`.
name:
The name code of fermi surface type.
`G` -> G pockets.
`K` -> K pockets.
`M` -> M pockets.
kspace:
A <KSpace> instance.
*return*:
A list of <FermiPiece> instance.
'''
self.kspace=kspace
Kl=kspace.K
Ml=kspace.M
G0=kspace.G
nK,nM=len(Kl),len(Ml)
fs=FermiSurface()
if 'G' in name:
centralpoints=[]
peripherals=[]
centralpoints.append(G0)
peripherals.append(kspace.K+[kspace.K[0]])
for cp,peri in zip(centralpoints,peripherals):
ps=self.get_ps(cp,nseg=nseg,peripheral=peri,eshift=eshift)
if ps is not None:
fs.add_piece('G',ps)
else:
print 'This band does not have a `G` pocket!'
if 'K' in name:
centralpoints=[]
peripherals=[]
for i in xrange(nK):
GK=Kl[i]-G0
GM=Ml[i]-G0
n=GK/norm(GK)
Mi2=2*dot(n,GM)*n-GM
centralpoints.append(Kl[i])
peripherals.append(array([Ml[i],G0,Mi2]))
for i,(cp,peri) in enumerate(zip(centralpoints,peripherals)):
ps=self.get_ps(cp,nseg=nseg,peripheral=peri,eshift=eshift)
if ps is not None:
fs.add_piece('K%s'%i,ps)
else:
print 'This band does not have a `K%s` pocket!'%i
if 'M' in name:
centralpoints=[]
peripherals=[]
for i in xrange(nM):
GK=Kl[i]-G0
GM=Ml[i]-G0
n=GM/norm(GM)
Ki2=2*dot(n,GK)*n-GK
centralpoints.append(Ml[i])
peripherals.append(array([Kl[i],G0,Ki2]))
for i,(cp,peri) in enumerate(zip(centralpoints,peripherals)):
ps=self.get_ps(cp,nseg=nseg,peripheral=peri,eshift=eshift)
if ps is not None:
fs.add_piece('M%s'%i,ps)
else:
print 'This band does not have a `M%s` pocket!'%i
return fs
| StarcoderdataPython |
1733465 | <filename>data/train/python/ded893a797f56ef047775641755622fb7a2aa591produce_auth.py<gh_stars>10-100
#!/usr/bin/env python
from Broker.Messages import Message, Publish, Authentication
from Broker.Transport import TCP, UDP, SSL
from Broker.Codecs import Protobuf as Codec #auto codec selection (thrift or protobuf if thrift isn't installed)
from Broker.Clients import Minimal
server='127.0.0.1'
destination = '/python/tests'
destination_type = 'QUEUE'
N=1000
broker = Minimal(codec=Codec(), transport=SSL(host=server))
broker.send(Authentication.from_sts_credentials(username='<EMAIL>', password='<PASSWORD>'))
for n in xrange(N):
message = Message(payload='Message number %d' % n)
publish = Publish(destination=destination, destination_type=destination_type, message=message)
broker.send(publish)
| StarcoderdataPython |
183084 | import json
import argparse
import string
import calendar
import re
from datetime import date
def remove_punctuation(text):
exclude = set(string.punctuation)
return ''.join(ch for ch in text if ch not in exclude)
def cleanup_documents(documents):
dashCharacters = ["-", "\u00ad", "\u2010", "\u2011", "\u2012", "\u2013", "\u2014", "\u2043", "\u2053"]
empty_abstracts = {'','this article has no abstract','no abstract is available for this article','letter without abstract','unknown','not available','no abstract available','na','no abstract provided','no abstract','none','abstract','letter without abstract','not availble','null','graphical abstract'}
title_prefixes_to_trim = ['full-length title', 'infographic title', 'complete title', 'original title', 'title']
abstract_prefixes_to_trim = ['accepted 7 july 2020abstract', 'physicians abstracts', 'unlabelled abstract', 'structured abstract', 'original abstracts', 'summary/abstract', 'original abstract', 'abstracts', ']abstract', 'abstract']
copyright_phrases = [r'This article is protected by copyright\.?', r'All rights reserved\.?',r'Copyright (\u00a9 )?\d+.*',r'\(?\s*Copyright applies to all Abstracts\s*\)?', r'Copyright of .* is the property of .*', r'\[?\s*copyright information to be updated in production process\s*\]?']
copyright_regexes = [ re.compile(phrase, re.IGNORECASE) for phrase in copyright_phrases]
editornameRegex = re.compile('Communicated by Ramaswamy (H\. )?Sarma\.?',re.IGNORECASE)
preprintRemapping = {}
preprintRemapping['medrxiv'] = 'medRxiv'
preprintRemapping['medrxiv.org'] = 'medRxiv'
preprintRemapping['medrxiv : the preprint server for health sciences'] = 'medRxiv'
preprintRemapping['biorxiv'] = 'bioRxiv'
preprintRemapping['biorxiv.org'] = 'bioRxiv'
preprintRemapping['biorxiv : the preprint server for biology'] = 'bioRxiv'
preprintRemapping['chemrxiv'] = 'ChemRxiv'
preprintRemapping['chemrxiv.org'] = 'ChemRxiv'
preprintRemapping['chemrxiv : the preprint server for chemistry'] = 'ChemRxiv'
preprintRemapping['arxiv'] = 'arXiv'
preprintRemapping['arxiv.org'] = 'arXiv'
preprintRemapping['arxiv.org e-print archive'] = 'arXiv'
colonWithNoSpaceRegex = re.compile('(introduction|background|method|methods|result|results|findings|discussion|conclusion|conclusions|evidence|objective|objectives|abbreviations|funding|):(\S)',flags=re.IGNORECASE)
for doc in documents:
doc['title'] = doc['title'].strip()
doc['abstract'] = doc['abstract'].strip()
if any (dc in doc['title'] for dc in dashCharacters):
for dc in dashCharacters:
doc['title'] = doc['title'].replace(dc,'-')
if any (dc in doc['abstract'] for dc in dashCharacters):
for dc in dashCharacters:
doc['abstract'] = doc['abstract'].replace(dc,'-')
abstract_no_punct = remove_punctuation(doc['abstract'].lower())
if abstract_no_punct in empty_abstracts:
doc['abstract'] = ''
# Remove ()s from the end of title
doc['title'] = re.sub("(\(\))+$","",doc['title'])
for prefix in title_prefixes_to_trim:
if doc['title'].lower().startswith(prefix):
doc['title'] = doc['title'][len(prefix):].lstrip(': ').strip()
for prefix in abstract_prefixes_to_trim:
if doc['abstract'].lower().startswith(prefix):
doc['abstract'] = doc['abstract'][len(prefix):].lstrip(': ').strip()
if doc['title'].startswith('[') and (doc['title'].endswith(']') or doc['title'].endswith('].')):
doc['title'] = doc['title'].lstrip('[').rstrip('.').rstrip(']')
# Cleanup some messy section headings in the abstract where there is
# no space after a colon.
doc['abstract'] = colonWithNoSpaceRegex.sub('\\1: \\2',doc['abstract'])
# Removed copyright notices and editor names from bottom of abstracts (that shouldn't be there)
for regex in copyright_regexes:
doc['abstract'] = regex.sub('',doc['abstract']).strip()
doc['abstract'] = editornameRegex.sub('',doc['abstract'])
if 'source_x' in doc and doc['source_x'].lower() in ['biorxiv','medrxiv','arxiv']:
doc['journal'] = doc['source_x']
journal_lower = doc['journal'].lower()
if journal_lower in preprintRemapping:
doc['journal'] = preprintRemapping[journal_lower]
if 'publish_time' in doc:
assert len(doc['publish_time']) in [0,4,10], doc['publish_time']
doc['publish_year'] = None
doc['publish_month'] = None
doc['publish_day'] = None
if len(doc['publish_time']) == 4:
doc['publish_year'] = doc['publish_time']
elif len(doc['publish_time']) == 10:
doc['publish_year'] = doc['publish_time'][0:4]
doc['publish_month'] = doc['publish_time'][5:7]
doc['publish_day'] = doc['publish_time'][8:10]
del doc['publish_time']
if isinstance(doc['publish_year'],str):
doc['publish_year'] = doc['publish_year'].strip()
if isinstance(doc['publish_month'],str):
doc['publish_month'] = doc['publish_month'].strip()
if isinstance(doc['publish_day'],str):
doc['publish_day'] = doc['publish_day'].strip()
date_status = (bool(doc['publish_year']),bool(doc['publish_month']),bool(doc['publish_day']))
assert date_status in [(True,True,True),(True,True,False),(True,False,False),(False,False,False)]
if doc['publish_year']:
doc['publish_year'] = int(doc['publish_year'])
assert doc['publish_year'] > 1700 and doc['publish_year'] < 2100
else:
doc['publish_year'] = None
if doc['publish_month']:
doc['publish_month'] = int(doc['publish_month'])
assert doc['publish_month'] >= 1 and doc['publish_month'] <= 12
else:
doc['publish_month'] = None
if doc['publish_day']:
doc['publish_day'] = int(doc['publish_day'])
_,days_in_month = calendar.monthrange(doc['publish_year'],doc['publish_month'])
assert doc['publish_day'] >= 1 and doc['publish_day'] <= days_in_month
else:
doc['publish_day'] = None
# Check the publication isn't in the future, and drop it back to this month if it appears to be
if doc['publish_year'] is not None:
pub_date = date(doc['publish_year'],doc['publish_month'] if doc['publish_month'] else 1,doc['publish_day'] if doc['publish_day'] else 1)
if pub_date > date.today():
doc['publish_year'] = date.today().year
doc['publish_month'] = doc['publish_month'] if doc['publish_month'] == date.today().month else None
doc['publish_day'] = None
# PubMed IDs must be numbers
if doc['pubmed_id'] and not re.match('^\d+$',doc['pubmed_id']):
doc['pubmed_id'] = ''
# Remove suffix forward-slashes that are appearing in CORD data
if doc['doi']:
doc['doi'] = doc['doi'].rstrip('/')
def main():
parser = argparse.ArgumentParser('Clean up various bits of document metadata and document text')
parser.add_argument('--inJSON',required=True,type=str,help='Input JSON documents')
parser.add_argument('--outJSON',required=True,type=str,help='Output JSON documents')
args = parser.parse_args()
print("Loading documents...")
with open(args.inJSON) as f:
documents = json.load(f)
print("Cleaning documents...")
cleanup_documents(documents)
print("Saving data...")
with open(args.outJSON,'w',encoding='utf8') as f:
json.dump(documents,f)
if __name__ == '__main__':
main()
| StarcoderdataPython |
1689270 | """Copyright © 2020-present, Swisscom (Schweiz) AG.
All rights reserved.
Loss Class for all the losses of the MAMO framework.
Typical usage example: foo = Loss()
bar = foo.compute_loss()
"""
from abc import ABC, abstractmethod
class Loss(ABC):
"""
Loss class that will be specific for models / datasets / goals.
This class handles the computation of the loss functions for the MAMO framework.
Attributes:
name: name of the loss.
"""
def __init__(self, name, needs_model=False):
"""
Inits LossClass with a name.
Example: for VAE losses can be the Reconstruction or Regularization.
"""
self.name = name
self.needs_model = needs_model
def __check_dim_pred_gt__(self, y_true, y_pred):
"""
Function that checks if the shape of y_pred == y_true, return an error
"""
if(y_pred.shape[0] != y_true.shape[0]):
raise ValueError(
'The dimensions of predictions (y_pred) and ground-truth (y_true) should be the same,'
+ ' got {} and {}.'.format(y_pred.shape, y_true.shape))
def __check_is_mean_var__(self, mean, log_variance):
"""
Function that checks that mean or var are not None
"""
if(mean is None):
raise Exception('mean should not be None')
elif(log_variance is None):
raise Exception('var should not be None')
@abstractmethod
def compute_loss(self, y_true, output_model):
"""
This method compute the loss from predictions and ground-truth.
abstract method that needs to be defined by every Loss submodule.
Args:
y_true: ground-truth labels.
output_model: specific output for the model, for example:
output_model = (y_pred, mean, logvar) where
y_pred are the predictions (of the model),
mean and logvar are related to the model in the case of VAE.
Returns:
The actual loss (float)
"""
pass
| StarcoderdataPython |
1666658 | <filename>gentrl/decoder.py
#import torch
#import torch.nn as nn
#import torch.nn.functional as F
import tensorflow as tf
from tensorflow.keras import layers
from gentrl.tokenizer import get_vocab_size, encode, decode
class DilConvDecoder(layers.Layer):
'''
Class for autoregressive model that works in WaveNet manner.
It make conditinioning on previosly sampled tokens by running
stack of dilation convolution on them.
'''
def __init__(self, latent_input_size, token_weights=None,
split_len=50, num_dilated_layers=7, num_channels=128):
r'''
Args:
latent_input_size: int, size of latent code used in VAE-like models
token_weights: Tensor of shape [num_tokens], where i-th element
contains the weight of i-th token. If None, then all
tokens has the same weight.
split_len: int, maximum length of token sequence
num_dilated_layers: int, how much dilated layers is in stack
num_channels: int, num channels in convolutional layers
'''
super(DilConvDecoder, self).__init__()
self.vocab_size = get_vocab_size()
self.latent_input_size = latent_input_size
self.split_len = split_len
self.num_dilated_layers = num_dilated_layers
self.num_channels = num_channels
self.token_weights = token_weights
self.eos = 2
cur_dil = 1
self.dil_conv_layers = []
for i in range(num_dilated_layers):
self.dil_conv_layers.append(
DilConv1dWithGLU(num_channels, cur_dil))
cur_dil *= 2
#zz self.latent_fc = nn.Linear(latent_input_size, num_channels)
#zz self.input_embeddings = nn.Embedding(self.vocab_size,
#zz num_channels)
#zz self.logits_1x1_layer = nn.Conv1d(num_channels,
#zz self.vocab_size,
#zz kernel_size=1)
self.latent_fc = layers.Dense(num_channels)
self.input_embeddings = layers.Embedding(self.vocab_size,
num_channels)
self.logits_1x1_layer = layers.Conv1D(filters=self.vocab_size,
kernel_size=1,
input_shape=[latent_input_size, num_channels])
#zz cur_parameters = []
#zz for layer in [self.input_embeddings, self.logits_1x1_layer,
#zz self.latent_fc] + self.dil_conv_layers:
#zz cur_parameters += list(layer.parameters())
#zz self.parameters = nn.ParameterList(cur_parameters)
def get_logits(self, input_tensor, z, sampling=False):
'''
Computing logits for each token input_tensor by given latent code
[WORKS ONLY IN TEACHER-FORCING MODE]
Args:
input_tensor: Tensor of shape [batch_size, max_seq_len]
z: Tensor of shape [batch_size, lat_code_size]
'''
#zz input_embedded = self.input_embeddings(input_tensor).transpose(1, 2)
input_embedded = self.input_embeddings(input_tensor)
latent_embedded = self.latent_fc(z)
#zz x = input_embedded + latent_embedded.unsqueeze(-1)
x = input_embedded + tf.expand_dims(input=latent_embedded, axis=1)
for dil_conv_layer in self.dil_conv_layers:
x = dil_conv_layer(x, sampling=sampling)
#zz x = self.logits_1x1_layer(x).transpose(1, 2)
x = self.logits_1x1_layer(x)
#zz return F.log_softmax(x, dim=-1)
return tf.nn.log_softmax(x, axis=-1)
def get_log_prob(self, x, z):
'''
Getting logits of SMILES sequences
Args:
x: tensor of shape [batch_size, seq_size] with tokens
z: tensor of shape [batch_size, lat_size] with latents
Returns:
logits: tensor of shape [batch_size, seq_size]
'''
#zz seq_logits = torch.gather(self.get_logits(x, z)[:, :-1, :],
#zz 2, x[:, 1:].long().unsqueeze(-1))
idxzip = [[list(a) for a in zip(range(x.shape[1]), list(x[b, 1:].numpy()))]
for b in range(x.shape[0])]
idx = [[[j] + idxzip[j][i] for i in range(x.shape[1] - 1)] for j in range(x.shape[0])]
idx = tf.convert_to_tensor(idx)
seq_logits = tf.expand_dims(tf.gather_nd(self.get_logits(x, z)[:, :-1, :], idx), -1)
return seq_logits[:, :, 0]
def forward(self, x, z):
'''
Getting logits of SMILES sequences
Args:
x: tensor of shape [batch_size, seq_size] with tokens
z: tensor of shape [batch_size, lat_size] with latents
Returns:
logits: tensor of shape [batch_size, seq_size]
None: since dilconv decoder doesn't have hidden state unlike RNN
'''
return self.get_log_prob(x, z), None
def weighted_forward(self, sm_list, z):
'''
'''
#zz x = encode(sm_list)[0].to(
#zz self.input_embeddings.weight.data.device
#zz )
x = encode(sm_list.numpy().astype(str))[0]
seq_logits = self.get_log_prob(x, z)
if self.token_weights is not None:
#zz w = self.token_weights[x[:, 1:].long().contiguous().view(-1)]
#zz w = w.view_as(seq_logits)
w = self.token_weights[tf.reshape(tf.cast(x[:, 1:], tf.int64), shape=-1)]
w = tf.reshape(w, shape=seq_logits.shape)
seq_logits = seq_logits * w
#zz non_eof = (x != self.eos)[:, :-1].float()
#zz ans_logits = (seq_logits * non_eof).sum(dim=-1)
#zz ans_logits /= non_eof.sum(dim=-1)
non_eof = tf.cast((x != self.eos)[:, :-1], tf.float32)
ans_logits = tf.reduce_sum((seq_logits * non_eof), axis=-1)
ans_logits /= tf.reduce_sum(non_eof, axis=-1)
return ans_logits
def sample(self, max_len, latents, argmax=True):
''' Sample SMILES for given latents
Args:
latents: tensor of shape [n_batch, n_features]
Returns:
logits: tensor of shape [batch_size, seq_size], logits of tokens
tokens: tensor of shape [batch_size, seq_size], sampled token
None: since dilconv decoder doesn't have hidden state unlike RNN
'''
# clearing buffers
for dil_conv_layer in self.dil_conv_layers:
dil_conv_layer.clear_buffer()
num_objects = latents.shape[0]
ans_seqs = [[1] for _ in range(num_objects)]
ans_logits = []
cur_tokens = torch.tensor(ans_seqs, device=latents.device).long()
for s in range(max_len):
logits = self.get_logits(cur_tokens, latents, sampling=True)
logits = logits.detach()
logits = torch.log_softmax(logits[:, 0, :], dim=-1)
ans_logits.append(logits.unsqueeze(0))
if argmax:
cur_tokens = torch.max(logits, dim=-1)[1].unsqueeze(-1)
else:
cur_tokens = torch.multinomial(F.softmax(logits, dim=-1), 1)
det_tokens = cur_tokens.cpu().detach().tolist()
ans_seqs = [a + b for a, b in zip(ans_seqs, det_tokens)]
# clearing buffers
for dil_conv_layer in self.dil_conv_layers:
dil_conv_layer.clear_buffer()
ans_logits = torch.cat(ans_logits, dim=0)
ans_seqs = torch.tensor(ans_seqs)[:, 1:]
return decode(ans_seqs)
#zz class DilConv1dWithGLU(nn.Module):
class DilConv1dWithGLU(layers.Layer):
def __init__(self, num_channels, dilation, lenght=100,
kernel_size=2, activation=tf.nn.leaky_relu,
residual_connection=True, dropout=0.2):
super(DilConv1dWithGLU, self).__init__()
self.dilation = dilation
#zz self.start_ln = nn.LayerNorm(num_channels)
#zz self.start_conv1x1 = nn.Conv1d(num_channels, num_channels,
#zz kernel_size=1)
self.start_ln = layers.LayerNormalization(axis=-1)
self.start_conv1x1 = layers.Conv1D(filters=num_channels,
kernel_size=1)
#zz self.dilconv_ln = nn.LayerNorm(num_channels)
#zz self.dilated_conv = nn.Conv1d(num_channels, num_channels,
#zz dilation=dilation,
#zz kernel_size=kernel_size,
#zz padding=dilation)
self.dilconv_ln = layers.LayerNormalization(axis=-1)
self.dilated_conv = layers.Conv1D(filters=num_channels,
dilation_rate=dilation,
kernel_size=kernel_size,
padding='causal')
#zz self.gate_ln = nn.LayerNorm(num_channels)
#zz self.end_conv1x1 = nn.Conv1d(num_channels, num_channels,
#zz kernel_size=1)
#zz self.gated_conv1x1 = nn.Conv1d(num_channels, num_channels,
self.gate_ln = layers.LayerNormalization(axis=-1)
self.end_conv1x1 = layers.Conv1D(filters=num_channels,
kernel_size=1)
self.gated_conv1x1 = layers.Conv1D(filters=num_channels,
kernel_size=1)
self.activation = activation
self.buffer = None
self.residual_connection = residual_connection
def clear_buffer(self):
self.buffer = None
def forward(self, x_inp, sampling=False):
# applying 1x1 convolution
#zz x = self.start_ln(x_inp.transpose(1, 2)).transpose(1, 2)
x = self.start_ln(x_inp)
x = self.activation(x)
x = self.start_conv1x1(x)
# applying dilated convolution
# if in sampling mode
#zz x = self.dilconv_ln(x.transpose(1, 2)).transpose(1, 2)
x = self.dilconv_ln(x)
x = self.activation(x)
if sampling:
if self.buffer is None:
self.buffer = x
else:
#zz pre_buffer = torch.cat([self.buffer, x], dim=2)
pre_buffer = tf.concat([self.buffer, x], axis=2)
self.buffer = pre_buffer[:, :, -(self.dilation + 1):]
if self.buffer.shape[2] == self.dilation + 1:
x = self.buffer
else:
#zz x = torch.cat([torch.zeros(self.buffer.shape[0],
#zz self.buffer.shape[1],
#zz self.dilation + 1
#zz - self.buffer.shape[2],
#zz device=x_inp.device), self.buffer],
#zz dim=2)
x = tf.concat([tf.zeros([self.buffer.shape[0],
self.buffer.shape[1],
self.dilation + 1
- self.buffer.shape[2]]), self.buffer],
axis=2)
x = self.dilated_conv(x)[:, :, self.dilation:]
x = x[:, :, :x_inp.shape[-1]]
else:
x = self.dilated_conv(x)[:, :, :x_inp.shape[-1]]
# applying gated linear unit
#zz x = self.gate_ln(x.transpose(1, 2)).transpose(1, 2)
x = self.gate_ln(x)
x = self.activation(x)
#zz x = self.end_conv1x1(x) * torch.sigmoid(self.gated_conv1x1(x))
x = self.end_conv1x1(x) * tf.sigmoid(self.gated_conv1x1(x))
# if residual connection
if self.residual_connection:
x = x + x_inp
return x
| StarcoderdataPython |
3354975 | <gh_stars>10-100
import torch
import torch.nn as nn
import math
device = 'cuda' if torch.cuda.is_available() else 'cpu'
class DNN(nn.Module):
"""Deep Neural Network"""
def __init__(self, input_size, hidden_size, output_size):
super(DNN, self).__init__()
self.main = nn.Sequential(
nn.Linear(input_size, hidden_size),
nn.ReLU(inplace=True),
nn.Linear(hidden_size, output_size)
)
def forward(self, x):
x = x.squeeze(dim=2)
out = self.main(x)
return out
class CNN(nn.Module):
"""Convolutional Neural Networks"""
def __init__(self, input_size, hidden_dim, output_size):
super(CNN, self).__init__()
self.main = nn.Sequential(
nn.Conv1d(in_channels=input_size, out_channels=hidden_dim, kernel_size=1),
nn.ReLU(),
nn.Flatten(),
nn.Linear(hidden_dim, 10),
nn.Linear(10, output_size)
)
def forward(self, x):
out = self.main(x)
return out
class RNN(nn.Module):
"""Vanilla RNN"""
def __init__(self, input_size, hidden_size, num_layers, output_size):
super(RNN, self).__init__()
self.input_size = input_size
self.hidden_size = hidden_size
self.num_layers = num_layers
self.output_size = output_size
self.rnn = nn.RNN(input_size=input_size,
hidden_size=hidden_size,
num_layers=num_layers,
batch_first=True)
self.fc = nn.Linear(hidden_size, output_size)
def forward(self, x):
out, _ = self.rnn(x)
out = out[:, -1, :]
out = self.fc(out)
return out
class LSTM(nn.Module):
"""Long Short Term Memory"""
def __init__(self, input_size, hidden_size, num_layers, output_size, bidirectional=False):
super(LSTM, self).__init__()
self.input_size = input_size
self.hidden_size = hidden_size
self.num_layers = num_layers
self.output_size = output_size
self.bidirectional = bidirectional
self.lstm = nn.LSTM(input_size=input_size,
hidden_size=hidden_size,
num_layers=num_layers,
batch_first=True,
bidirectional=bidirectional)
if self.bidirectional:
self.fc = nn.Linear(hidden_size * 2, output_size)
else:
self.fc = nn.Linear(hidden_size, output_size)
def forward(self, x):
out, _ = self.lstm(x)
out = out[:, -1, :]
out = self.fc(out)
return out
class GRU(nn.Module):
"""Gat e Recurrent Unit"""
def __init__(self, input_size, hidden_size, num_layers, output_size):
super(GRU, self).__init__()
self.input_size = input_size
self.hidden_size = hidden_size
self.num_layers = num_layers
self.output_size = output_size
self.gru = nn.GRU(input_size=input_size,
hidden_size=hidden_size,
num_layers=num_layers,
batch_first=True)
self.fc = nn.Linear(hidden_size, output_size)
def forward(self, x):
out, _ = self.gru(x)
out = out[:, -1, :]
out = self.fc(out)
return out
class AttentionalLSTM(nn.Module):
"""LSTM with Attention"""
def __init__(self, input_size, qkv, hidden_size, num_layers, output_size, bidirectional=False):
super(AttentionalLSTM, self).__init__()
self.input_size = input_size
self.qkv = qkv
self.hidden_size = hidden_size
self.num_layers = num_layers
self.output_size = output_size
self.query = nn.Linear(input_size, qkv)
self.key = nn.Linear(input_size, qkv)
self.value = nn.Linear(input_size, qkv)
self.attn = nn.Linear(qkv, input_size)
self.scale = math.sqrt(qkv)
self.lstm = nn.LSTM(input_size=input_size,
hidden_size=hidden_size,
num_layers=num_layers,
batch_first=True,
bidirectional=bidirectional)
if bidirectional:
self.fc = nn.Linear(hidden_size * 2, output_size)
else:
self.fc = nn.Linear(hidden_size, output_size)
def forward(self, x):
Q, K, V = self.query(x), self.key(x), self.value(x)
dot_product = torch.matmul(Q, K.permute(0, 2, 1)) / self.scale
scores = torch.softmax(dot_product, dim=-1)
scaled_x = torch.matmul(scores, V) + x
out = self.attn(scaled_x) + x
out, _ = self.lstm(out)
out = out[:, -1, :]
out = self.fc(out)
return out | StarcoderdataPython |
3220745 | #!/usr/bin/env python3
"""
Copyright 2017 Brocade Communications Systems, Inc. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may also obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import pyfos.pyfos_auth as pyfos_auth
import getpass
import getopt
import sys
import atexit
session = None
def exit_handler():
global session
if session is not None:
pyfos_auth.logout(session)
def exit_register(local_session):
global session
session = local_session
atexit.register(exit_handler)
def generic_input(argv, usage):
if len(argv) == 0:
usage()
sys.exit()
ret_dict = dict()
try:
opts, args = getopt.getopt(
argv, "hn:i:m:f:p:a:d:u:e:s:c:L:P:",
["name=", "ipaddr=", "members=", "vf=", "pmembers=",
"allaccess=", "device=", "username=", "enabled=",
"speed=", "compare=", "hostname=", "hostport=",
"targetname=", "targetport=", "login=", "password="
])
except getopt.GetoptError:
usage()
sys.exit(2)
for opt, arg in opts:
if opt == '-h':
usage()
sys.exit()
elif opt in ("-i", "--ipaddr"):
ret_dict["ipaddr"] = arg
elif opt in ("-a", "--allaccess"):
ret_dict["allaccess"] = int(arg)
elif opt in ("-c", "--compare"):
ret_dict["compare"] = arg
elif opt in ("-d", "--device"):
ret_dict["device"] = arg
elif opt in ("-e", "--enabled"):
ret_dict["enabled"] = int(arg)
elif opt in ("-f", "--vf"):
ret_dict["vfid"] = int(arg)
elif opt in ("-m", "--members"):
ret_dict["members"] = arg.split(";")
elif opt in ("-n", "--name"):
ret_dict["name"] = arg
elif opt in ("-p", "--pmembers"):
ret_dict["pmembers"] = arg.split(";")
elif opt in ("-s", "--speed"):
ret_dict["speed"] = int(arg)
elif opt in ("-u", "--username"):
ret_dict["username"] = arg
elif opt in ("--hostname"):
ret_dict["hostname"] = arg
elif opt in ("--hostport"):
ret_dict["hostport"] = arg
elif opt in ("--targetname"):
ret_dict["targetname"] = arg
elif opt in ("--targetport"):
ret_dict["targetport"] = arg
elif opt in ("-L", "--login"):
ret_dict["login"] = arg
elif opt in ("-P", "--password"):
ret_dict["password"] = arg
if "login" not in ret_dict.keys():
login = input("Login:")
ret_dict["login"] = login
if "password" not in ret_dict.keys():
password = <PASSWORD>()
ret_dict["password"] = password
return ret_dict
| StarcoderdataPython |
3379021 | <gh_stars>0
#!/usr/bin/env python
import os
from setuptools import setup
from marktime import version as module_version
readme_path = os.path.join(os.path.dirname(__file__), 'README.rst')
setup(
name='marktime',
version=module_version,
py_modules=['marktime'],
description='Python timer module for humans.',
long_description=open(readme_path).read(),
license='MIT',
author='<NAME>',
author_email='<EMAIL>',
url='https://github.com/ekalinin/marktime.py',
keywords=[
'timer', 'stopwatch', 'time'
],
platforms='any',
classifiers=[
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3.3',
'Topic :: Software Development :: Libraries :: Python Modules'
],
test_suite='tests'
)
| StarcoderdataPython |
1692545 | import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
# PNA Aggregators ------------------------------------------------------------------------------
EPS = 1e-5
def aggregate_mean(h):
return torch.mean(h, dim=1)
def aggregate_max(h):
return torch.max(h, dim=1)[0]
def aggregate_min(h):
return torch.min(h, dim=1)[0]
def aggregate_std(h):
return torch.sqrt(aggregate_var(h) + EPS)
def aggregate_var(h):
h_mean_squares = torch.mean(h * h, dim=-2)
h_mean = torch.mean(h, dim=-2)
var = torch.relu(h_mean_squares - h_mean * h_mean)
return var
def aggregate_moment(h, n=3):
# for each node (E[(X-E[X])^n])^{1/n}
# EPS is added to the absolute value of expectation before taking the nth root for stability
h_mean = torch.mean(h, dim=1, keepdim=True)
h_n = torch.mean(torch.pow(h - h_mean, n))
rooted_h_n = torch.sign(h_n) * torch.pow(torch.abs(h_n) + EPS, 1.0 / n)
return rooted_h_n
def aggregate_moment_3(h):
return aggregate_moment(h, n=3)
def aggregate_moment_4(h):
return aggregate_moment(h, n=4)
def aggregate_moment_5(h):
return aggregate_moment(h, n=5)
def aggregate_sum(h):
return torch.sum(h, dim=1)
AGGREGATORS = {
"mean": aggregate_mean,
"sum": aggregate_sum,
"max": aggregate_max,
"min": aggregate_min,
"std": aggregate_std,
"var": aggregate_var,
"moment3": aggregate_moment_3,
"moment4": aggregate_moment_4,
"moment5": aggregate_moment_5,
}
# PNA Scalers ---------------------------------------------------------------------------------
# each scaler is a function that takes as input X (B x N x Din), adj (B x N x N) and
# avg_d (dictionary containing averages over training set) and returns X_scaled (B x N x Din) as output
def scale_identity(h, D=None, avg_d=None):
return h
def scale_amplification(h, D, avg_d):
# log(D + 1) / d * h where d is the average of the ``log(D + 1)`` in the training set
return h * (np.log(D + 1) / avg_d["log"])
def scale_attenuation(h, D, avg_d):
# (log(D + 1))^-1 / d * X where d is the average of the ``log(D + 1))^-1`` in the training set
return h * (avg_d["log"] / np.log(D + 1))
SCALERS = {
"identity": scale_identity,
"amplification": scale_amplification,
"attenuation": scale_attenuation,
}
SUPPORTED_ACTIVATION_MAP = {
"ReLU",
"Sigmoid",
"Tanh",
"ELU",
"SELU",
"GLU",
"LeakyReLU",
"Softplus",
"None",
}
def get_activation(activation):
"""returns the activation function represented by the input string"""
if activation and callable(activation):
# activation is already a function
return activation
# search in SUPPORTED_ACTIVATION_MAP a torch.nn.modules.activation
activation = [
x for x in SUPPORTED_ACTIVATION_MAP if activation.lower() == x.lower()
]
assert len(activation) == 1 and isinstance(
activation[0], str
), "Unhandled activation function"
activation = activation[0]
if activation.lower() == "none":
return None
return vars(torch.nn.modules.activation)[activation]()
class Set2Set(torch.nn.Module):
r"""
Set2Set global pooling operator from the `"Order Matters: Sequence to sequence for sets"
<https://arxiv.org/abs/1511.06391>`_ paper. This pooling layer performs the following operation
.. math::
\mathbf{q}_t &= \mathrm{LSTM}(\mathbf{q}^{*}_{t-1})
\alpha_{i,t} &= \mathrm{softmax}(\mathbf{x}_i \cdot \mathbf{q}_t)
\mathbf{r}_t &= \sum_{i=1}^N \alpha_{i,t} \mathbf{x}_i
\mathbf{q}^{*}_t &= \mathbf{q}_t \, \Vert \, \mathbf{r}_t,
where :math:`\mathbf{q}^{*}_T` defines the output of the layer with twice
the dimensionality as the input.
Arguments
---------
input_dim: int
Size of each input sample.
hidden_dim: int, optional
the dim of set representation which corresponds to the input dim of the LSTM in Set2Set.
This is typically the sum of the input dim and the lstm output dim. If not provided, it will be set to :obj:`input_dim*2`
steps: int, optional
Number of iterations :math:`T`. If not provided, the number of nodes will be used.
num_layers : int, optional
Number of recurrent layers (e.g., :obj:`num_layers=2` would mean stacking two LSTMs together)
(Default, value = 1)
"""
def __init__(
self, nin, nhid=None, steps=None, num_layers=1, activation=None, device="cpu"
):
super(Set2Set, self).__init__()
self.steps = steps
self.nin = nin
self.nhid = nin * 2 if nhid is None else nhid
if self.nhid <= self.nin:
raise ValueError("Set2Set hidden_dim should be larger than input_dim")
# the hidden is a concatenation of weighted sum of embedding and LSTM output
self.lstm_output_dim = self.nhid - self.nin
self.num_layers = num_layers
self.lstm = nn.LSTM(
self.nhid, self.nin, num_layers=num_layers, batch_first=True
).to(device)
self.softmax = nn.Softmax(dim=1)
def forward(self, x):
r"""
Applies the pooling on input tensor x
Arguments
----------
x: torch.FloatTensor
Input tensor of size (B, N, D)
Returns
-------
x: `torch.FloatTensor`
Tensor resulting from the set2set pooling operation.
"""
batch_size = x.shape[0]
n = self.steps or x.shape[1]
h = (
x.new_zeros((self.num_layers, batch_size, self.nin)),
x.new_zeros((self.num_layers, batch_size, self.nin)),
)
q_star = x.new_zeros(batch_size, 1, self.nhid)
for i in range(n):
# q: batch_size x 1 x input_dim
q, h = self.lstm(q_star, h)
# e: batch_size x n x 1
e = torch.matmul(x, torch.transpose(q, 1, 2))
a = self.softmax(e)
r = torch.sum(a * x, dim=1, keepdim=True)
q_star = torch.cat([q, r], dim=-1)
return torch.squeeze(q_star, dim=1)
class FCLayer(nn.Module):
r"""
A simple fully connected and customizable layer. This layer is centered around a torch.nn.Linear module.
The order in which transformations are applied is:
#. Dense Layer
#. Activation
#. Dropout (if applicable)
#. Batch Normalization (if applicable)
Arguments
----------
in_size: int
Input dimension of the layer (the torch.nn.Linear)
out_size: int
Output dimension of the layer.
dropout: float, optional
The ratio of units to dropout. No dropout by default.
(Default value = 0.)
activation: str or callable, optional
Activation function to use.
(Default value = relu)
b_norm: bool, optional
Whether to use batch normalization
(Default value = False)
bias: bool, optional
Whether to enable bias in for the linear layer.
(Default value = True)
init_fn: callable, optional
Initialization function to use for the weight of the layer. Default is
:math:`\mathcal{U}(-\sqrt{k}, \sqrt{k})` with :math:`k=\frac{1}{ \text{in_size}}`
(Default value = None)
Attributes
----------
dropout: int
The ratio of units to dropout.
b_norm: int
Whether to use batch normalization
linear: torch.nn.Linear
The linear layer
activation: the torch.nn.Module
The activation layer
init_fn: function
Initialization function used for the weight of the layer
in_size: int
Input dimension of the linear layer
out_size: int
Output dimension of the linear layer
"""
def __init__(
self,
in_size,
out_size,
activation="relu",
dropout=0.0,
b_norm=False,
bias=True,
init_fn=None,
device="cpu",
):
super(FCLayer, self).__init__()
self.__params = locals()
del self.__params["__class__"]
del self.__params["self"]
self.in_size = in_size
self.out_size = out_size
self.bias = bias
self.linear = nn.Linear(in_size, out_size, bias=bias).to(device)
self.dropout = None
self.b_norm = None
if dropout:
self.dropout = nn.Dropout(p=dropout)
if b_norm:
self.b_norm = nn.BatchNorm1d(out_size).to(device)
self.activation = get_activation(activation)
self.init_fn = nn.init.xavier_uniform_
self.reset_parameters()
def reset_parameters(self, init_fn=None):
init_fn = init_fn or self.init_fn
if init_fn is not None:
init_fn(self.linear.weight, 1 / self.in_size)
if self.bias:
self.linear.bias.data.zero_()
def forward(self, x):
h = self.linear(x)
if self.activation is not None:
h = self.activation(h)
if self.dropout is not None:
h = self.dropout(h)
if self.b_norm is not None:
if h.shape[1] != self.out_size:
h = self.b_norm(h.transpose(1, 2)).transpose(1, 2)
else:
h = self.b_norm(h)
return h
def __repr__(self):
return (
self.__class__.__name__
+ " ("
+ str(self.in_size)
+ " -> "
+ str(self.out_size)
+ ")"
)
class MLP(nn.Module):
"""
Simple multi-layer perceptron, built of a series of FCLayers
"""
def __init__(
self,
in_size,
hidden_size,
out_size,
layers,
mid_activation="relu",
last_activation="none",
dropout=0.0,
mid_b_norm=False,
last_b_norm=False,
device="cpu",
):
super(MLP, self).__init__()
self.in_size = in_size
self.hidden_size = hidden_size
self.out_size = out_size
self.fully_connected = nn.ModuleList()
if layers <= 1:
self.fully_connected.append(
FCLayer(
in_size,
out_size,
activation=last_activation,
b_norm=last_b_norm,
device=device,
dropout=dropout,
)
)
else:
self.fully_connected.append(
FCLayer(
in_size,
hidden_size,
activation=mid_activation,
b_norm=mid_b_norm,
device=device,
dropout=dropout,
)
)
for _ in range(layers - 2):
self.fully_connected.append(
FCLayer(
hidden_size,
hidden_size,
activation=mid_activation,
b_norm=mid_b_norm,
device=device,
dropout=dropout,
)
)
self.fully_connected.append(
FCLayer(
hidden_size,
out_size,
activation=last_activation,
b_norm=last_b_norm,
device=device,
dropout=dropout,
)
)
def forward(self, x):
for fc in self.fully_connected:
x = fc(x)
return x
def __repr__(self):
return (
self.__class__.__name__
+ " ("
+ str(self.in_size)
+ " -> "
+ str(self.out_size)
+ ")"
)
class GRU(nn.Module):
"""
Wrapper class for the GRU used by the GNN framework, nn.GRU is used for the Gated Recurrent Unit itself
"""
def __init__(self, input_size, hidden_size, device):
super(GRU, self).__init__()
self.input_size = input_size
self.hidden_size = hidden_size
self.gru = nn.GRU(input_size=input_size, hidden_size=hidden_size).to(device)
def forward(self, x, y):
"""
:param x: shape: (B, N, Din) where Din <= input_size (difference is padded)
:param y: shape: (B, N, Dh) where Dh <= hidden_size (difference is padded)
:return: shape: (B, N, Dh)
"""
assert x.shape[-1] <= self.input_size and y.shape[-1] <= self.hidden_size
(B, N, _) = x.shape
x = x.reshape(1, B * N, -1).contiguous()
y = y.reshape(1, B * N, -1).contiguous()
# padding if necessary
if x.shape[-1] < self.input_size:
x = F.pad(
input=x,
pad=[0, self.input_size - x.shape[-1]],
mode="constant",
value=0,
)
if y.shape[-1] < self.hidden_size:
y = F.pad(
input=y,
pad=[0, self.hidden_size - y.shape[-1]],
mode="constant",
value=0,
)
x = self.gru(x, y)[1]
x = x.reshape(B, N, -1)
return x
class S2SReadout(nn.Module):
"""
Performs a Set2Set aggregation of all the graph nodes' features followed by a series of fully connected layers
"""
def __init__(
self,
in_size,
hidden_size,
out_size,
fc_layers=3,
device="cpu",
final_activation="relu",
):
super(S2SReadout, self).__init__()
# set2set aggregation
self.set2set = Set2Set(in_size, device=device)
# fully connected layers
self.mlp = MLP(
in_size=2 * in_size,
hidden_size=hidden_size,
out_size=out_size,
layers=fc_layers,
mid_activation="relu",
last_activation=final_activation,
mid_b_norm=True,
last_b_norm=False,
device=device,
)
def forward(self, x):
x = self.set2set(x)
return self.mlp(x)
| StarcoderdataPython |
132910 | <reponame>18F/federalist-garden-build-py
import os
from invoke import MockContext, Result
from tasks import clone_repo, push_repo_remote
class TestCloneRepo():
def test_it_is_callable(self):
os.environ['GITHUB_TOKEN'] = 'fake_token'
ctx = MockContext(run=Result('git clone result'))
clone_repo(ctx, owner='owner', repository='repo', branch='master')
class TestPushRepoRemote():
def test_it_is_callable(self):
os.environ['GITHUB_TOKEN'] = 'fake_token'
ctx = MockContext(run=[
Result('git remote add result'),
Result('git push result')
]
)
push_repo_remote(ctx, owner='owner', repository='repo',
branch='branch', remote_name='boop')
| StarcoderdataPython |
3329578 | '''
script que aprensenta o quanto você pode comprar com base na sua quantia atual
'''
real = float(input('Dinheiro Atual na Carteira? R$'))
dolar = real / 5.51
euro = real / 6.37
libra = real / 7.53
franco = real / 5.97
print(f'você tem R${real:.2f}')
print(f'pode comprar até US${dolar:.2f}')
print(f'pode comprar até €{euro:.2f}')
print(f'pode comprar até £{libra:.2f}')
print(f'pode comprar até CHF{franco:.2f}') | StarcoderdataPython |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.