hexsha stringlengths 40 40 | size int64 3 1.03M | ext stringclasses 10
values | lang stringclasses 1
value | max_stars_repo_path stringlengths 3 972 | max_stars_repo_name stringlengths 6 130 | max_stars_repo_head_hexsha stringlengths 40 78 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 191k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 3 972 | max_issues_repo_name stringlengths 6 130 | max_issues_repo_head_hexsha stringlengths 40 78 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 116k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 3 972 | max_forks_repo_name stringlengths 6 130 | max_forks_repo_head_hexsha stringlengths 40 78 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 3 1.03M | avg_line_length float64 1.13 941k | max_line_length int64 2 941k | alphanum_fraction float64 0 1 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
117402d75f81fcb6c84866b477ee03e3acc57282 | 2,385 | py | Python | pypika/tests/test_parameter.py | trust-kaz/pypika | 78ce885ca445e63bb6168d4456dd11072c60e63b | [
"Apache-2.0"
] | null | null | null | pypika/tests/test_parameter.py | trust-kaz/pypika | 78ce885ca445e63bb6168d4456dd11072c60e63b | [
"Apache-2.0"
] | null | null | null | pypika/tests/test_parameter.py | trust-kaz/pypika | 78ce885ca445e63bb6168d4456dd11072c60e63b | [
"Apache-2.0"
] | null | null | null | import unittest
from pypika import Tables, Query, Parameter
class ParametrizedTests(unittest.TestCase):
table_abc, table_efg = Tables("abc", "efg")
def test_param_insert(self):
q = (
Query.into(self.table_abc)
.columns("a", "b", "c")
.insert(Parameter("?"), Parameter("?"), Parameter("?"))
)
self.assertEqual('INSERT INTO "abc" ("a","b","c") VALUES (?,?,?)', q.get_sql())
def test_param_select_join(self):
q = (
Query.from_(self.table_abc)
.select("*")
.where(self.table_abc.category == Parameter("%s"))
.join(self.table_efg)
.on(self.table_abc.id == self.table_efg.abc_id)
.where(self.table_efg.date >= Parameter("%s"))
.limit(10)
)
self.assertEqual(
'SELECT * FROM "abc" JOIN "efg" ON "abc"."id"="efg"."abc_id" WHERE "abc"."category"=%s AND "efg"."date">=%s LIMIT 10',
q.get_sql(),
)
def test_param_select_subquery(self):
q = (
Query.from_(self.table_abc)
.select("*")
.where(self.table_abc.category == Parameter("&1"))
.where(
self.table_abc.id.isin(
Query.from_(self.table_efg)
.select(self.table_efg.abc_id)
.where(self.table_efg.date >= Parameter("&2"))
)
)
.limit(10)
)
self.assertEqual(
'SELECT * FROM "abc" WHERE "category"=&1 AND "id" IN (SELECT "abc_id" FROM "efg" WHERE "date">=&2) LIMIT 10',
q.get_sql(),
)
def test_join(self):
subquery = (
Query.from_(self.table_efg)
.select(self.table_efg.fiz, self.table_efg.buz)
.where(self.table_efg.buz == Parameter(":buz"))
)
q = (
Query.from_(self.table_abc)
.join(subquery)
.on(self.table_abc.bar == subquery.buz)
.select(self.table_abc.foo, subquery.fiz)
.where(self.table_abc.bar == Parameter(":bar"))
)
self.assertEqual(
'SELECT "abc"."foo","sq0"."fiz" FROM "abc" JOIN (SELECT "fiz","buz" FROM "efg" WHERE "buz"=:buz)'
' "sq0" ON "abc"."bar"="sq0"."buz" WHERE "abc"."bar"=:bar',
q.get_sql(),
)
| 32.22973 | 130 | 0.502725 |
cc3289066081e4e8ce5afe8f3c4a02b7cd1d5799 | 1,203 | py | Python | iwatlas/stats.py | Yadidya5/iwatlas | 7cf2a8778b9ebefe564f93efe013d6d3743c94ec | [
"BSD-2-Clause"
] | 7 | 2019-12-24T08:24:46.000Z | 2022-01-30T07:06:47.000Z | iwatlas/stats.py | Yadidya5/iwatlas | 7cf2a8778b9ebefe564f93efe013d6d3743c94ec | [
"BSD-2-Clause"
] | 2 | 2020-10-12T05:40:24.000Z | 2022-01-30T07:25:27.000Z | iwatlas/stats.py | Yadidya5/iwatlas | 7cf2a8778b9ebefe564f93efe013d6d3743c94ec | [
"BSD-2-Clause"
] | 2 | 2021-05-31T10:20:27.000Z | 2022-01-30T07:06:52.000Z | """
Statistical metrics used for harmonic analysis
"""
import numpy as np
def harmonic_indices(ntide, na):
"""
Return the indices for each central (tidal) frequency
"""
ii=[]
for ff in range(ntide):
ii.append((ff+1)*(2*na+1)-na-1) # Index of the fixed harmonics
return ii
def TVFH(amp2, varin):
"""
Total Variance Fraction explained by the Harmonic model
"""
return 0.5 * np.sum(amp2, axis=0) / varin * 100
def SVFH(amp2, varin, ntide, na):
"""
Stationary variance fraction (i.e. the variance fraction of the fixed harmonics)
"""
ii = harmonic_indices(ntide, na)
return 0.5 * np.sum(amp2[ii,...], axis=0) / varin * 100
def VF_m(amp2, m, ntide, na):
"""
Variance fraction within a specific frequency band including annual harmonics
"""
ii = harmonic_indices(ntide, na)
return np.sum(amp2[ii[m]-na:ii[m]+na+1,...], axis=0) / np.sum(amp2, axis=0) * 100
def NSVF_m(amp2, m, ntide, na):
"""
Nonstationary variance fraction of a specific frequency band
"""
ii = harmonic_indices(ntide, na)
M2 = np.sum(amp2[ii[m]-na:ii[m]+na+1,...], axis=0)
return (M2 - amp2[ii[m],...])/ M2 * 100
| 26.152174 | 86 | 0.61596 |
0b32273ceb96bd7671d0c62d65620cdcf39287d4 | 9,017 | py | Python | slack_sdk/models/views/__init__.py | ggml1/python-slack-sdk | 1fddbffecb55abf4841ed03b1c59f8879148be9f | [
"MIT"
] | 2 | 2021-09-11T06:18:24.000Z | 2021-10-30T14:00:48.000Z | slack_sdk/models/views/__init__.py | ggml1/python-slack-sdk | 1fddbffecb55abf4841ed03b1c59f8879148be9f | [
"MIT"
] | 1 | 2021-09-12T23:26:37.000Z | 2021-09-12T23:26:37.000Z | slack_sdk/models/views/__init__.py | ggml1/python-slack-sdk | 1fddbffecb55abf4841ed03b1c59f8879148be9f | [
"MIT"
] | 1 | 2021-09-11T06:18:21.000Z | 2021-09-11T06:18:21.000Z | import copy
import logging
from typing import Optional, Union, Dict, Sequence
from slack_sdk.models.basic_objects import JsonObject, JsonValidator
from slack_sdk.models.blocks import Block, TextObject, PlainTextObject, Option
class View(JsonObject):
"""View object for modals and Home tabs.
https://api.slack.com/reference/surfaces/views
"""
types = ["modal", "home", "workflow_step"]
attributes = {
"type",
"id",
"callback_id",
"external_id",
"team_id",
"bot_id",
"app_id",
"root_view_id",
"previous_view_id",
"title",
"submit",
"close",
"blocks",
"private_metadata",
"state",
"hash",
"clear_on_close",
"notify_on_close",
}
def __init__(
self,
# "modal", "home", and "workflow_step"
type: str, # skipcq: PYL-W0622
id: Optional[str] = None, # skipcq: PYL-W0622
callback_id: Optional[str] = None,
external_id: Optional[str] = None,
team_id: Optional[str] = None,
bot_id: Optional[str] = None,
app_id: Optional[str] = None,
root_view_id: Optional[str] = None,
previous_view_id: Optional[str] = None,
title: Union[str, dict, PlainTextObject] = None,
submit: Optional[Union[str, dict, PlainTextObject]] = None,
close: Optional[Union[str, dict, PlainTextObject]] = None,
blocks: Optional[Sequence[Union[dict, Block]]] = None,
private_metadata: Optional[str] = None,
state: Optional[Union[dict, "ViewState"]] = None,
hash: Optional[str] = None, # skipcq: PYL-W0622
clear_on_close: Optional[bool] = None,
notify_on_close: Optional[bool] = None,
**kwargs,
):
self.type = type
self.id = id
self.callback_id = callback_id
self.external_id = external_id
self.team_id = team_id
self.bot_id = bot_id
self.app_id = app_id
self.root_view_id = root_view_id
self.previous_view_id = previous_view_id
self.title = TextObject.parse(title, default_type=PlainTextObject.type)
self.submit = TextObject.parse(submit, default_type=PlainTextObject.type)
self.close = TextObject.parse(close, default_type=PlainTextObject.type)
self.blocks = Block.parse_all(blocks)
self.private_metadata = private_metadata
self.state = state
if self.state is not None and isinstance(self.state, dict):
self.state = ViewState(**self.state)
self.hash = hash
self.clear_on_close = clear_on_close
self.notify_on_close = notify_on_close
self.additional_attributes = kwargs
title_max_length = 24
blocks_max_length = 100
close_max_length = 24
submit_max_length = 24
private_metadata_max_length = 3000
callback_id_max_length: int = 255
@JsonValidator('type must be either "modal", "home" or "workflow_step"')
def _validate_type(self):
return self.type is not None and self.type in self.types
@JsonValidator(f"title must be between 1 and {title_max_length} characters")
def _validate_title_length(self):
return self.title is None or 1 <= len(self.title.text) <= self.title_max_length
@JsonValidator(f"views must contain between 1 and {blocks_max_length} blocks")
def _validate_blocks_length(self):
return self.blocks is None or 0 < len(self.blocks) <= self.blocks_max_length
@JsonValidator("home view cannot have submit and close")
def _validate_home_tab_structure(self):
return self.type != "home" or (
self.type == "home" and self.close is None and self.submit is None
)
@JsonValidator(f"close cannot exceed {close_max_length} characters")
def _validate_close_length(self):
return self.close is None or len(self.close.text) <= self.close_max_length
@JsonValidator(f"submit cannot exceed {submit_max_length} characters")
def _validate_submit_length(self):
return self.submit is None or len(self.submit.text) <= int(
self.submit_max_length
)
@JsonValidator(
f"private_metadata cannot exceed {private_metadata_max_length} characters"
)
def _validate_private_metadata_max_length(self):
return (
self.private_metadata is None
or len(self.private_metadata) <= self.private_metadata_max_length
)
@JsonValidator(f"callback_id cannot exceed {callback_id_max_length} characters")
def _validate_callback_id_max_length(self):
return (
self.callback_id is None
or len(self.callback_id) <= self.callback_id_max_length
)
def __str__(self):
return str(self.get_non_null_attributes())
def __repr__(self):
return self.__str__()
class ViewState(JsonObject):
attributes = {"values"}
logger = logging.getLogger(__name__)
@classmethod
def _show_warning_about_unknown(cls, value):
c = value.__class__
name = ".".join([c.__module__, c.__name__])
cls.logger.warning(
f"Unknown type for view.state.values detected ({name}) and ViewState skipped to add it"
)
def __init__(
self,
*,
values: Dict[str, Dict[str, Union[dict, "ViewStateValue"]]],
):
value_objects: Dict[str, Dict[str, ViewStateValue]] = {}
new_state_values = copy.copy(values)
if isinstance(new_state_values, dict): # just in case
for block_id, actions in new_state_values.items():
if actions is None: # skipcq: PYL-R1724
continue
elif isinstance(actions, dict):
new_actions = copy.copy(actions)
for action_id, v in actions.items():
if isinstance(v, dict):
d = copy.copy(v)
value_object = ViewStateValue(**d)
elif isinstance(v, ViewStateValue):
value_object = v
else:
self._show_warning_about_unknown(v)
continue
new_actions[action_id] = value_object
value_objects[block_id] = new_actions
else:
self._show_warning_about_unknown(v)
self.values = value_objects
def to_dict(self, *args) -> Dict[str, Dict[str, Dict[str, dict]]]: # type: ignore
self.validate_json()
if self.values is not None:
dict_values: Dict[str, Dict[str, dict]] = {}
for block_id, actions in self.values.items():
if actions:
dict_value: Dict[str, dict] = {
action_id: value.to_dict() # type: ignore
for action_id, value in actions.items() # type: ignore
}
dict_values[block_id] = dict_value
return {"values": dict_values} # type: ignore
else:
return {}
class ViewStateValue(JsonObject):
attributes = {
"type",
"value",
"selected_date",
"selected_conversation",
"selected_channel",
"selected_user",
"selected_option",
"selected_conversations",
"selected_channels",
"selected_users",
"selected_options",
}
def __init__(
self,
*,
type: Optional[str] = None, # skipcq: PYL-W0622
value: Optional[str] = None,
selected_date: Optional[str] = None,
selected_conversation: Optional[str] = None,
selected_channel: Optional[str] = None,
selected_user: Optional[str] = None,
selected_option: Optional[str] = None,
selected_conversations: Optional[Sequence[str]] = None,
selected_channels: Optional[Sequence[str]] = None,
selected_users: Optional[Sequence[str]] = None,
selected_options: Optional[Sequence[Union[dict, Option]]] = None,
):
self.type = type
self.value = value
self.selected_date = selected_date
self.selected_conversation = selected_conversation
self.selected_channel = selected_channel
self.selected_user = selected_user
self.selected_option = selected_option
self.selected_conversations = selected_conversations
self.selected_channels = selected_channels
self.selected_users = selected_users
if isinstance(selected_options, list):
self.selected_options = []
for option in selected_options:
if isinstance(option, Option):
self.selected_options.append(option)
elif isinstance(option, dict):
self.selected_options.append(Option(**option))
else:
self.selected_options = selected_options
| 36.506073 | 99 | 0.608185 |
ae2f7b29ad18644c17925105f2d3a28858e79538 | 150 | py | Python | tests/test_ec2/test_windows.py | jonnangle/moto-1 | 40b4e299abb732aad7f56cc0f680c0a272a46594 | [
"Apache-2.0"
] | 1 | 2021-12-12T04:23:06.000Z | 2021-12-12T04:23:06.000Z | tests/test_ec2/test_windows.py | jonnangle/moto-1 | 40b4e299abb732aad7f56cc0f680c0a272a46594 | [
"Apache-2.0"
] | 17 | 2020-08-28T12:53:56.000Z | 2020-11-10T01:04:46.000Z | tests/test_ec2/test_windows.py | jonnangle/moto-1 | 40b4e299abb732aad7f56cc0f680c0a272a46594 | [
"Apache-2.0"
] | 2 | 2021-11-24T08:05:43.000Z | 2021-11-25T16:18:48.000Z | from __future__ import unicode_literals
import boto
import sure # noqa
from moto import mock_ec2
@mock_ec2
def test_windows():
pass
| 13.636364 | 40 | 0.726667 |
36c71b9c631c839ac1b2add810fe622147587cda | 2,764 | py | Python | discortbot.py | misc4747/discord_voiceroid_tts | 47e36c634511ebcb4c0b9c1007c10b224e4ac8dd | [
"MIT"
] | null | null | null | discortbot.py | misc4747/discord_voiceroid_tts | 47e36c634511ebcb4c0b9c1007c10b224e4ac8dd | [
"MIT"
] | null | null | null | discortbot.py | misc4747/discord_voiceroid_tts | 47e36c634511ebcb4c0b9c1007c10b224e4ac8dd | [
"MIT"
] | null | null | null | import discord
import text2wav
import ffmpeg
import time
import re
TOKEN = "TOKEN" #Replace TOKEN with your discord bot token
client = discord.Client()
queue = []
def play_voice(path):
print(queue)
if not voice.is_playing():
voice.play(discord.FFmpegPCMAudio(path),after=after_playing)
def after_playing(err):
if len(queue) > 1:
queue.pop(0)
time.sleep(1)
play_voice(queue[0])
else:
queue.pop(0)
print('end')
def make_input_text(text):
userid_pattern = r"<@!(?P<user_id>\d+)>"
emoji_pattern = r"<:.+:(?P<emoji_id>\d+)>"
replace_username = re.match(userid_pattern, text)
replace_emojiname = re.match(emoji_pattern, text)
if replace_username:
user_name = client.get_user(int(replace_username.group("user_id"))).name
text = re.sub(userid_pattern, user_name, text)
if replace_emojiname:
emoji_name = client.get_emoji(int(replace_emojiname.group("emoji_id"))).name
text = re.sub(emoji_pattern, emoji_name, text)
return text
@client.event
async def on_ready():
print('{0.user}'.format(client)+'としてログインしました。')
@client.event
async def on_message(message):
global voice
if message.author == client.user:
return
if message.content == "!summon":
command_ary = message.content.split()
if len(command_ary) == 1:
try:
voice_channel = message.author.voice.channel
voice = await voice_channel.connect()
except:
attention = await message.channel.send('ボイスチャットに入ってから!summonコマンドをつかってください。')
time.sleep(5)
await attention.delete()
else:
print("hoge")
if message.content == "!leave":
try:
if voice.is_connected():
await voice.disconnect()
else:
attention = await message.channel.send('botはボイスチャンネルに参加していません。')
time.sleep(5)
await attention.delete()
except NameError:
attention = await message.channel.send('botはボイスチャンネルに参加していません。')
time.sleep(5)
await attention.delete()
if message.content.startswith('!tts'):
command_ary = message.content.split(' ', maxsplit=1)
if len(command_ary) > 1 and voice.is_connected():
print(command_ary[1])
text = make_input_text(command_ary[1])
mp3_path = text2wav.generate_wav(text)
queue.append(mp3_path)
play_voice(mp3_path)
else:
attention = await message.channel.send('『!tts hogehoge』と入力してください。')
time.sleep(5)
await attention.delete()
client.run(TOKEN)
| 29.72043 | 92 | 0.601664 |
273e6d2091491d6ee3cdbb93daa919838a91b081 | 2,860 | py | Python | ed2d/assets/objloader.py | explosiveduck/cubix | 16e7a298a83fe53174bda8ec77dfcf6869ed5336 | [
"BSD-2-Clause"
] | 1 | 2015-11-02T02:11:18.000Z | 2015-11-02T02:11:18.000Z | ed2d/assets/objloader.py | explosiveduck/cubix | 16e7a298a83fe53174bda8ec77dfcf6869ed5336 | [
"BSD-2-Clause"
] | 29 | 2015-06-09T19:27:49.000Z | 2016-03-08T06:13:24.000Z | ed2d/assets/objloader.py | explosiveduck/cubix | 16e7a298a83fe53174bda8ec77dfcf6869ed5336 | [
"BSD-2-Clause"
] | null | null | null | from ed2d.assets.mtlloader import MTL
from ed2d import files
class OBJ(object):
def __init__(self, fileName):
''' Wavefront .obj file parser.'''
objPath = files.resolve_path('data', 'models', fileName + '.obj')
self.fmvnig = {}
# Shared temporary storage of data
self.tempVertices = []
self.tempNormals = []
self.tempUVs = []
# Load the obj file
with open(objPath, 'r') as objfile:
self.lines = objfile.readlines()
self.parse()
def parse(self):
''' Perform the parsing of the obj format '''
matname = None
valueType = None
for line in self.lines:
valueType, value = line.strip().split(' ', 1)
# Don't bother unless the following key words exist in the line
if valueType not in ['o', 'g', 'f', 'v', 'vt', 'vn', 'usemtl', 'mtllib']:
continue
value = value.split(' ')
# Check first and continue on early because of string splitting
if valueType == "usemtl":
matname = value[0]
continue
if valueType in ['g', 'o']:
# These objects reset state basically
matname = None
continue
if valueType == 'mtllib':
mtlpath = files.resolve_path('data', 'models', value[0])
# Load the mtl file
self.mtlfile = MTL(mtlpath)
for material in self.mtlfile.data.keys():
self.fmvnig[material] = [ [], [], [] ]
continue
if valueType == "f":
face = [item.split("/") for item in value]
for typeGroup in face:
for typeIndex in range(len(typeGroup)):
if typeIndex == 0: # Vertex
typeSource = self.tempVertices
elif typeIndex == 1: # UV
typeSource = self.tempUVs
elif typeIndex == 2: # Normal
typeSource = self.tempNormals
index = typeGroup[typeIndex]
# Make sure data exists
if index != '':
index = int(index)
typeData = typeSource[index - 1]
self.fmvnig[matname][typeIndex].append(typeData)
continue
# Map the values after the keyword to floats
value = list(map(float, value))
if valueType == "v":
self.tempVertices.append(value)
elif valueType == "vt":
self.tempUVs.append(value * 2)
elif valueType == "vn":
self.tempNormals.append(value)
| 31.777778 | 85 | 0.474825 |
a7a40cc0041cd1b331718fe37a0eefbdf93bb5f8 | 2,456 | py | Python | relaton_bib/relaton_bib.py | relaton/relaton-bib-py | 03b601d9a5482e6142b90d6b6cca5a05879452c8 | [
"BSD-3-Clause"
] | null | null | null | relaton_bib/relaton_bib.py | relaton/relaton-bib-py | 03b601d9a5482e6142b90d6b6cca5a05879452c8 | [
"BSD-3-Clause"
] | 7 | 2021-08-18T04:02:25.000Z | 2022-01-28T19:22:42.000Z | relaton_bib/relaton_bib.py | relaton/relaton-bib-py | 03b601d9a5482e6142b90d6b6cca5a05879452c8 | [
"BSD-3-Clause"
] | null | null | null | import datetime
import dataclasses
import re
from typing import TYPE_CHECKING
if TYPE_CHECKING:
from .localized_string import LocalizedString
class RequestError(Exception):
pass
def parse_date(date, str_res=True):
"""Convert date from string to date or string
"""
if date is datetime.datetime:
return date
cases = [
# February 2012
(r"(?P<date>\w+\s\d{4})", "%B %Y", "%Y-%m"),
# February 11, 2012
(r"(?P<date>\w+\s\d{1,2},\s\d{4})", "%B %d, %Y", "%Y-%m-%d"),
# 2012-02-11
(r"(?P<date>\d{4}-\d{2}-\d{2})", "%Y-%m-%d", None),
# 2012-02
(r"(?P<date>\d{4}-\d{2})", "%Y-%m", None),
# 2012
(r"(?P<date>\d{4})", "%Y", None),
]
for regexp, strp, strf in cases:
m = re.match(regexp, str(date))
if m:
value = m.group("date")
d = datetime.datetime.strptime(value, strp)
if strf:
return d.strftime(strf) if str_res else d
else:
return value if str_res else d.strftime(strp)
# TODO we don't need this for python probably
# @param array [Array]
# @return [Array<String>, String]
def single_element_array(array):
if len(array) > 1:
return map(lambda x: x if x is str else dataclasses.asdict(x), array)
else:
return array[0] if array[0] is str else dataclasses.asdict(array[0])
def lang_filter(target, opts={}):
lang = opts.get("lang")
filtered = list(filter(
lambda t: t.language and lang in t.language,
target))
return filtered if filtered else target
def to_ds_instance(klass, fail=False):
def f(x):
if isinstance(x, klass):
return x
elif isinstance(x, dict):
return klass(**x)
elif isinstance(x, str):
return klass(x)
elif fail:
ValueError(f"Unknown how to conver {type(x).__name__} to {klass}")
else:
return x
return f
def delegate(to, *methods):
"""https://stackoverflow.com/a/55563139/902217"""
def dec(klass):
def create_delegator(method):
def delegator(self, *args, **kwargs):
obj = getattr(self, to)
m = getattr(obj, method)
return m(*args, **kwargs)
return delegator
for m in methods:
setattr(klass, m, create_delegator(m))
return klass
return dec
| 26.695652 | 78 | 0.550896 |
2f24c915dec53177cc1f35562ee2901f6df5ee43 | 327 | py | Python | traccar_graphql/types.py | sunhoww/traccar_graphql | e63868f9b6c6d15cec4d184b2609991824554534 | [
"MIT"
] | 1 | 2020-11-13T22:15:54.000Z | 2020-11-13T22:15:54.000Z | traccar_graphql/types.py | sunhoww/traccar_graphql | e63868f9b6c6d15cec4d184b2609991824554534 | [
"MIT"
] | null | null | null | traccar_graphql/types.py | sunhoww/traccar_graphql | e63868f9b6c6d15cec4d184b2609991824554534 | [
"MIT"
] | 1 | 2021-01-14T01:26:16.000Z | 2021-01-14T01:26:16.000Z | import datetime, iso8601
from graphene.types import datetime
from graphql.language import ast
class DateTime(datetime.DateTime):
'''Graphene DateTime overide'''
@staticmethod
def serialize(dt):
if isinstance(dt, str):
dt = iso8601.parse_date(dt)
return datetime.DateTime.serialize(dt)
| 25.153846 | 46 | 0.700306 |
2cdcc879bc0b91f5bce84366d5aee46328b276f9 | 823 | py | Python | config.py | kenmutuma001/Blog | 6b19a77b71694bbe9f5e84207de46c68f87ebc5e | [
"Unlicense"
] | null | null | null | config.py | kenmutuma001/Blog | 6b19a77b71694bbe9f5e84207de46c68f87ebc5e | [
"Unlicense"
] | null | null | null | config.py | kenmutuma001/Blog | 6b19a77b71694bbe9f5e84207de46c68f87ebc5e | [
"Unlicense"
] | null | null | null | import os
class Config:
SECRET_KEY = os.environ.get('SECRET_KEY')
BASE_URL = 'http://quotes.stormconsultancy.co.uk/{}.json'
SQLALCHEMY_DATABASE_URI = 'postgresql+psycopg2://ken:kennedy@localhost/blogs'
MAIL_SERVER = 'smtp.googlemail.com'
MAIL_PORT = 587
MAIL_USE_TLS = True
MAIL_USERNAME = os.environ.get("MAIL_USERNAME")
MAIL_PASSWORD = os.environ.get("MAIL_PASSWORD")
class ProdConfig(Config):
SQLALCHEMY_DATABASE_URI = os.environ.get("DATABASE_URL")
class TestConfig(Config):
SQLALCHEMY_DATABASE_URI = 'postgresql+psycopg2://ken:kennedy@localhost/blogs'
class DevConfig(Config):
SQLALCHEMY_DATABASE_URI = 'postgresql+psycopg2://ken:kennedy@localhost/blogs'
DEBUG = True
config_options = {
'development':DevConfig,
'production':ProdConfig,
'test':TestConfig
}
| 21.102564 | 81 | 0.73633 |
9c9e852a964bd2e99b05029fd27b42f86d62f022 | 1,950 | py | Python | SciComputing with Python/TextAnalyzer/_backupTextAnalyzer.py | evtodorov/aerospace | 54a1b58c3c0b02c0eaa3aef14d0e732d7f867566 | [
"MIT"
] | null | null | null | SciComputing with Python/TextAnalyzer/_backupTextAnalyzer.py | evtodorov/aerospace | 54a1b58c3c0b02c0eaa3aef14d0e732d7f867566 | [
"MIT"
] | null | null | null | SciComputing with Python/TextAnalyzer/_backupTextAnalyzer.py | evtodorov/aerospace | 54a1b58c3c0b02c0eaa3aef14d0e732d7f867566 | [
"MIT"
] | null | null | null | '''
Assigment 2: Text analyzer
finding the fingerprint of different languages
29.4.2014
@author: etodorov
'''
#get the name of the tested file
fileName = "english.txt"#raw_input("Give file name (with the extenision)")
#clear the name of the tested file
fileName = fileName.lstrip().rstrip()
#open the file
fTested = open("D:/Evgeni/My Studies/Aerospace/Python/TextAnalyzer/"+fileName, "r")
#assure the file
if(fTested): print "Found "+fileName+", reading and analyzing it."
else:
print "Give a valid file name"
fTested.close
#read file
txt=fTested.read()
#prepare the text
#clean commas and dots, lower cases and new line
txt = txt.lower()
txt = txt.replace(","," ").replace("."," ").replace("!", " ").replace("?", " ").replace("\n", " ")
print txt
#get the number of occurances of each letter
totLetters = 0
fqLetters = []
#go over each letter and count how many times it occurs
for i in range(26):
occurLetter = txt.count(chr(97+i))
totLetters += occurLetter
fqLetters.append(occurLetter)
#prepare a string with the percent of every letter
txt_fqLetters = "Frequency of letters \n"
for l in range(26):
fqLetters[l]=float(fqLetters[l])/totLetters
txt_fqLetters+= chr(65+l)+": \t"+str(fqLetters[l]*100)+"% \n"
#print the result
print txt_fqLetters
#get the word length distribution
#split by whitespace
words = txt.split()
#count the words and get their length
numWords=0
wordDistr=20*[0]
for word in words:
#check if word
if(word.isalpha()):
#one more words
numWords+=1
#one more word with this length
wordDistr[len(word)]+=1
#get percentages and prepare string
txt_wordDistr="Word distribution \n"
print wordDistr
for k in range(1,len(wordDistr)):
wordDistr[k]=float(wordDistr[k])/numWords
txt_wordDistr += str(wordDistr[k]*100)+"% \t of the words are length \t"+str(k)+"\t characters \n"
#print the result
print txt_wordDistr
print wordDistr
fTested.close()
| 28.676471 | 102 | 0.702051 |
2f0cbfdbe852d180e98559654fb955a8b9f26ca9 | 29,411 | py | Python | Lib/asyncio/streams.py | hdaugherty/cpython | 27ee0f8551a6d576a65e20da90acf9f3cb412c35 | [
"CNRI-Python-GPL-Compatible"
] | 1 | 2019-05-22T08:19:52.000Z | 2019-05-22T08:19:52.000Z | Lib/asyncio/streams.py | hdaugherty/cpython | 27ee0f8551a6d576a65e20da90acf9f3cb412c35 | [
"CNRI-Python-GPL-Compatible"
] | null | null | null | Lib/asyncio/streams.py | hdaugherty/cpython | 27ee0f8551a6d576a65e20da90acf9f3cb412c35 | [
"CNRI-Python-GPL-Compatible"
] | null | null | null | __all__ = (
'StreamReader', 'StreamWriter', 'StreamReaderProtocol',
'open_connection', 'start_server')
import socket
import sys
import warnings
import weakref
if hasattr(socket, 'AF_UNIX'):
__all__ += ('open_unix_connection', 'start_unix_server')
from . import coroutines
from . import events
from . import exceptions
from . import format_helpers
from . import protocols
from .log import logger
from .tasks import sleep
_DEFAULT_LIMIT = 2 ** 16 # 64 KiB
async def open_connection(host=None, port=None, *,
loop=None, limit=_DEFAULT_LIMIT, **kwds):
"""A wrapper for create_connection() returning a (reader, writer) pair.
The reader returned is a StreamReader instance; the writer is a
StreamWriter instance.
The arguments are all the usual arguments to create_connection()
except protocol_factory; most common are positional host and port,
with various optional keyword arguments following.
Additional optional keyword arguments are loop (to set the event loop
instance to use) and limit (to set the buffer limit passed to the
StreamReader).
(If you want to customize the StreamReader and/or
StreamReaderProtocol classes, just copy the code -- there's
really nothing special here except some convenience.)
"""
if loop is None:
loop = events.get_event_loop()
reader = StreamReader(limit=limit, loop=loop,
_asyncio_internal=True)
protocol = StreamReaderProtocol(reader, loop=loop,
_asyncio_internal=True)
transport, _ = await loop.create_connection(
lambda: protocol, host, port, **kwds)
writer = StreamWriter(transport, protocol, reader, loop,
_asyncio_internal=True)
return reader, writer
async def start_server(client_connected_cb, host=None, port=None, *,
loop=None, limit=_DEFAULT_LIMIT, **kwds):
"""Start a socket server, call back for each client connected.
The first parameter, `client_connected_cb`, takes two parameters:
client_reader, client_writer. client_reader is a StreamReader
object, while client_writer is a StreamWriter object. This
parameter can either be a plain callback function or a coroutine;
if it is a coroutine, it will be automatically converted into a
Task.
The rest of the arguments are all the usual arguments to
loop.create_server() except protocol_factory; most common are
positional host and port, with various optional keyword arguments
following. The return value is the same as loop.create_server().
Additional optional keyword arguments are loop (to set the event loop
instance to use) and limit (to set the buffer limit passed to the
StreamReader).
The return value is the same as loop.create_server(), i.e. a
Server object which can be used to stop the service.
"""
if loop is None:
loop = events.get_event_loop()
def factory():
reader = StreamReader(limit=limit, loop=loop,
_asyncio_internal=True)
protocol = StreamReaderProtocol(reader, client_connected_cb,
loop=loop,
_asyncio_internal=True)
return protocol
return await loop.create_server(factory, host, port, **kwds)
if hasattr(socket, 'AF_UNIX'):
# UNIX Domain Sockets are supported on this platform
async def open_unix_connection(path=None, *,
loop=None, limit=_DEFAULT_LIMIT, **kwds):
"""Similar to `open_connection` but works with UNIX Domain Sockets."""
if loop is None:
loop = events.get_event_loop()
reader = StreamReader(limit=limit, loop=loop,
_asyncio_internal=True)
protocol = StreamReaderProtocol(reader, loop=loop,
_asyncio_internal=True)
transport, _ = await loop.create_unix_connection(
lambda: protocol, path, **kwds)
writer = StreamWriter(transport, protocol, reader, loop,
_asyncio_internal=True)
return reader, writer
async def start_unix_server(client_connected_cb, path=None, *,
loop=None, limit=_DEFAULT_LIMIT, **kwds):
"""Similar to `start_server` but works with UNIX Domain Sockets."""
if loop is None:
loop = events.get_event_loop()
def factory():
reader = StreamReader(limit=limit, loop=loop,
_asyncio_internal=True)
protocol = StreamReaderProtocol(reader, client_connected_cb,
loop=loop,
_asyncio_internal=True)
return protocol
return await loop.create_unix_server(factory, path, **kwds)
class FlowControlMixin(protocols.Protocol):
"""Reusable flow control logic for StreamWriter.drain().
This implements the protocol methods pause_writing(),
resume_writing() and connection_lost(). If the subclass overrides
these it must call the super methods.
StreamWriter.drain() must wait for _drain_helper() coroutine.
"""
def __init__(self, loop=None, *, _asyncio_internal=False):
if loop is None:
self._loop = events.get_event_loop()
else:
self._loop = loop
if not _asyncio_internal:
# NOTE:
# Avoid inheritance from FlowControlMixin
# Copy-paste the code to your project
# if you need flow control helpers
warnings.warn(f"{self.__class__} should be instaniated "
"by asyncio internals only, "
"please avoid its creation from user code",
DeprecationWarning)
self._paused = False
self._drain_waiter = None
self._connection_lost = False
def pause_writing(self):
assert not self._paused
self._paused = True
if self._loop.get_debug():
logger.debug("%r pauses writing", self)
def resume_writing(self):
assert self._paused
self._paused = False
if self._loop.get_debug():
logger.debug("%r resumes writing", self)
waiter = self._drain_waiter
if waiter is not None:
self._drain_waiter = None
if not waiter.done():
waiter.set_result(None)
def connection_lost(self, exc):
self._connection_lost = True
# Wake up the writer if currently paused.
if not self._paused:
return
waiter = self._drain_waiter
if waiter is None:
return
self._drain_waiter = None
if waiter.done():
return
if exc is None:
waiter.set_result(None)
else:
waiter.set_exception(exc)
async def _drain_helper(self):
if self._connection_lost:
raise ConnectionResetError('Connection lost')
if not self._paused:
return
waiter = self._drain_waiter
assert waiter is None or waiter.cancelled()
waiter = self._loop.create_future()
self._drain_waiter = waiter
await waiter
def _get_close_waiter(self, stream):
raise NotImplementedError
class StreamReaderProtocol(FlowControlMixin, protocols.Protocol):
"""Helper class to adapt between Protocol and StreamReader.
(This is a helper class instead of making StreamReader itself a
Protocol subclass, because the StreamReader has other potential
uses, and to prevent the user of the StreamReader to accidentally
call inappropriate methods of the protocol.)
"""
_source_traceback = None
def __init__(self, stream_reader, client_connected_cb=None, loop=None,
*, _asyncio_internal=False):
super().__init__(loop=loop, _asyncio_internal=_asyncio_internal)
if stream_reader is not None:
self._stream_reader_wr = weakref.ref(stream_reader,
self._on_reader_gc)
self._source_traceback = stream_reader._source_traceback
else:
self._stream_reader_wr = None
if client_connected_cb is not None:
# This is a stream created by the `create_server()` function.
# Keep a strong reference to the reader until a connection
# is established.
self._strong_reader = stream_reader
self._reject_connection = False
self._stream_writer = None
self._transport = None
self._client_connected_cb = client_connected_cb
self._over_ssl = False
self._closed = self._loop.create_future()
def _on_reader_gc(self, wr):
transport = self._transport
if transport is not None:
# connection_made was called
context = {
'message': ('An open stream object is being garbage '
'collected; call "stream.close()" explicitly.')
}
if self._source_traceback:
context['source_traceback'] = self._source_traceback
self._loop.call_exception_handler(context)
transport.abort()
else:
self._reject_connection = True
self._stream_reader_wr = None
@property
def _stream_reader(self):
if self._stream_reader_wr is None:
return None
return self._stream_reader_wr()
def connection_made(self, transport):
if self._reject_connection:
context = {
'message': ('An open stream was garbage collected prior to '
'establishing network connection; '
'call "stream.close()" explicitly.')
}
if self._source_traceback:
context['source_traceback'] = self._source_traceback
self._loop.call_exception_handler(context)
transport.abort()
return
self._transport = transport
reader = self._stream_reader
if reader is not None:
reader.set_transport(transport)
self._over_ssl = transport.get_extra_info('sslcontext') is not None
if self._client_connected_cb is not None:
self._stream_writer = StreamWriter(transport, self,
reader,
self._loop,
_asyncio_internal=True)
res = self._client_connected_cb(reader,
self._stream_writer)
if coroutines.iscoroutine(res):
self._loop.create_task(res)
self._strong_reader = None
def connection_lost(self, exc):
reader = self._stream_reader
if reader is not None:
if exc is None:
reader.feed_eof()
else:
reader.set_exception(exc)
if not self._closed.done():
if exc is None:
self._closed.set_result(None)
else:
self._closed.set_exception(exc)
super().connection_lost(exc)
self._stream_reader_wr = None
self._stream_writer = None
self._transport = None
def data_received(self, data):
reader = self._stream_reader
if reader is not None:
reader.feed_data(data)
def eof_received(self):
reader = self._stream_reader
if reader is not None:
reader.feed_eof()
if self._over_ssl:
# Prevent a warning in SSLProtocol.eof_received:
# "returning true from eof_received()
# has no effect when using ssl"
return False
return True
def _get_close_waiter(self, stream):
return self._closed
def __del__(self):
# Prevent reports about unhandled exceptions.
# Better than self._closed._log_traceback = False hack
closed = self._closed
if closed.done() and not closed.cancelled():
closed.exception()
def _swallow_unhandled_exception(task):
# Do a trick to suppress unhandled exception
# if stream.write() was used without await and
# stream.drain() was paused and resumed with an exception
task.exception()
class StreamWriter:
"""Wraps a Transport.
This exposes write(), writelines(), [can_]write_eof(),
get_extra_info() and close(). It adds drain() which returns an
optional Future on which you can wait for flow control. It also
adds a transport property which references the Transport
directly.
"""
def __init__(self, transport, protocol, reader, loop,
*, _asyncio_internal=False):
if not _asyncio_internal:
warnings.warn(f"{self.__class__} should be instaniated "
"by asyncio internals only, "
"please avoid its creation from user code",
DeprecationWarning)
self._transport = transport
self._protocol = protocol
# drain() expects that the reader has an exception() method
assert reader is None or isinstance(reader, StreamReader)
self._reader = reader
self._loop = loop
self._complete_fut = self._loop.create_future()
self._complete_fut.set_result(None)
def __repr__(self):
info = [self.__class__.__name__, f'transport={self._transport!r}']
if self._reader is not None:
info.append(f'reader={self._reader!r}')
return '<{}>'.format(' '.join(info))
@property
def transport(self):
return self._transport
def write(self, data):
self._transport.write(data)
return self._fast_drain()
def writelines(self, data):
self._transport.writelines(data)
return self._fast_drain()
def _fast_drain(self):
# The helper tries to use fast-path to return already existing complete future
# object if underlying transport is not paused and actual waiting for writing
# resume is not needed
if self._reader is not None:
# this branch will be simplified after merging reader with writer
exc = self._reader.exception()
if exc is not None:
fut = self._loop.create_future()
fut.set_exception(exc)
return fut
if not self._transport.is_closing():
if self._protocol._connection_lost:
fut = self._loop.create_future()
fut.set_exception(ConnectionResetError('Connection lost'))
return fut
if not self._protocol._paused:
# fast path, the stream is not paused
# no need to wait for resume signal
return self._complete_fut
ret = self._loop.create_task(self.drain())
ret.add_done_callback(_swallow_unhandled_exception)
return ret
def write_eof(self):
return self._transport.write_eof()
def can_write_eof(self):
return self._transport.can_write_eof()
def close(self):
self._transport.close()
return self._protocol._get_close_waiter(self)
def is_closing(self):
return self._transport.is_closing()
async def wait_closed(self):
await self._protocol._get_close_waiter(self)
def get_extra_info(self, name, default=None):
return self._transport.get_extra_info(name, default)
async def drain(self):
"""Flush the write buffer.
The intended use is to write
w.write(data)
await w.drain()
"""
if self._reader is not None:
exc = self._reader.exception()
if exc is not None:
raise exc
if self._transport.is_closing():
# Wait for protocol.connection_lost() call
# Raise connection closing error if any,
# ConnectionResetError otherwise
await sleep(0)
await self._protocol._drain_helper()
class StreamReader:
_source_traceback = None
def __init__(self, limit=_DEFAULT_LIMIT, loop=None,
*, _asyncio_internal=False):
if not _asyncio_internal:
warnings.warn(f"{self.__class__} should be instaniated "
"by asyncio internals only, "
"please avoid its creation from user code",
DeprecationWarning)
# The line length limit is a security feature;
# it also doubles as half the buffer limit.
if limit <= 0:
raise ValueError('Limit cannot be <= 0')
self._limit = limit
if loop is None:
self._loop = events.get_event_loop()
else:
self._loop = loop
self._buffer = bytearray()
self._eof = False # Whether we're done.
self._waiter = None # A future used by _wait_for_data()
self._exception = None
self._transport = None
self._paused = False
if self._loop.get_debug():
self._source_traceback = format_helpers.extract_stack(
sys._getframe(1))
def __repr__(self):
info = ['StreamReader']
if self._buffer:
info.append(f'{len(self._buffer)} bytes')
if self._eof:
info.append('eof')
if self._limit != _DEFAULT_LIMIT:
info.append(f'limit={self._limit}')
if self._waiter:
info.append(f'waiter={self._waiter!r}')
if self._exception:
info.append(f'exception={self._exception!r}')
if self._transport:
info.append(f'transport={self._transport!r}')
if self._paused:
info.append('paused')
return '<{}>'.format(' '.join(info))
def exception(self):
return self._exception
def set_exception(self, exc):
self._exception = exc
waiter = self._waiter
if waiter is not None:
self._waiter = None
if not waiter.cancelled():
waiter.set_exception(exc)
def _wakeup_waiter(self):
"""Wakeup read*() functions waiting for data or EOF."""
waiter = self._waiter
if waiter is not None:
self._waiter = None
if not waiter.cancelled():
waiter.set_result(None)
def set_transport(self, transport):
assert self._transport is None, 'Transport already set'
self._transport = transport
def _maybe_resume_transport(self):
if self._paused and len(self._buffer) <= self._limit:
self._paused = False
self._transport.resume_reading()
def feed_eof(self):
self._eof = True
self._wakeup_waiter()
def at_eof(self):
"""Return True if the buffer is empty and 'feed_eof' was called."""
return self._eof and not self._buffer
def feed_data(self, data):
assert not self._eof, 'feed_data after feed_eof'
if not data:
return
self._buffer.extend(data)
self._wakeup_waiter()
if (self._transport is not None and
not self._paused and
len(self._buffer) > 2 * self._limit):
try:
self._transport.pause_reading()
except NotImplementedError:
# The transport can't be paused.
# We'll just have to buffer all data.
# Forget the transport so we don't keep trying.
self._transport = None
else:
self._paused = True
async def _wait_for_data(self, func_name):
"""Wait until feed_data() or feed_eof() is called.
If stream was paused, automatically resume it.
"""
# StreamReader uses a future to link the protocol feed_data() method
# to a read coroutine. Running two read coroutines at the same time
# would have an unexpected behaviour. It would not possible to know
# which coroutine would get the next data.
if self._waiter is not None:
raise RuntimeError(
f'{func_name}() called while another coroutine is '
f'already waiting for incoming data')
assert not self._eof, '_wait_for_data after EOF'
# Waiting for data while paused will make deadlock, so prevent it.
# This is essential for readexactly(n) for case when n > self._limit.
if self._paused:
self._paused = False
self._transport.resume_reading()
self._waiter = self._loop.create_future()
try:
await self._waiter
finally:
self._waiter = None
async def readline(self):
"""Read chunk of data from the stream until newline (b'\n') is found.
On success, return chunk that ends with newline. If only partial
line can be read due to EOF, return incomplete line without
terminating newline. When EOF was reached while no bytes read, empty
bytes object is returned.
If limit is reached, ValueError will be raised. In that case, if
newline was found, complete line including newline will be removed
from internal buffer. Else, internal buffer will be cleared. Limit is
compared against part of the line without newline.
If stream was paused, this function will automatically resume it if
needed.
"""
sep = b'\n'
seplen = len(sep)
try:
line = await self.readuntil(sep)
except exceptions.IncompleteReadError as e:
return e.partial
except exceptions.LimitOverrunError as e:
if self._buffer.startswith(sep, e.consumed):
del self._buffer[:e.consumed + seplen]
else:
self._buffer.clear()
self._maybe_resume_transport()
raise ValueError(e.args[0])
return line
async def readuntil(self, separator=b'\n'):
"""Read data from the stream until ``separator`` is found.
On success, the data and separator will be removed from the
internal buffer (consumed). Returned data will include the
separator at the end.
Configured stream limit is used to check result. Limit sets the
maximal length of data that can be returned, not counting the
separator.
If an EOF occurs and the complete separator is still not found,
an IncompleteReadError exception will be raised, and the internal
buffer will be reset. The IncompleteReadError.partial attribute
may contain the separator partially.
If the data cannot be read because of over limit, a
LimitOverrunError exception will be raised, and the data
will be left in the internal buffer, so it can be read again.
"""
seplen = len(separator)
if seplen == 0:
raise ValueError('Separator should be at least one-byte string')
if self._exception is not None:
raise self._exception
# Consume whole buffer except last bytes, which length is
# one less than seplen. Let's check corner cases with
# separator='SEPARATOR':
# * we have received almost complete separator (without last
# byte). i.e buffer='some textSEPARATO'. In this case we
# can safely consume len(separator) - 1 bytes.
# * last byte of buffer is first byte of separator, i.e.
# buffer='abcdefghijklmnopqrS'. We may safely consume
# everything except that last byte, but this require to
# analyze bytes of buffer that match partial separator.
# This is slow and/or require FSM. For this case our
# implementation is not optimal, since require rescanning
# of data that is known to not belong to separator. In
# real world, separator will not be so long to notice
# performance problems. Even when reading MIME-encoded
# messages :)
# `offset` is the number of bytes from the beginning of the buffer
# where there is no occurrence of `separator`.
offset = 0
# Loop until we find `separator` in the buffer, exceed the buffer size,
# or an EOF has happened.
while True:
buflen = len(self._buffer)
# Check if we now have enough data in the buffer for `separator` to
# fit.
if buflen - offset >= seplen:
isep = self._buffer.find(separator, offset)
if isep != -1:
# `separator` is in the buffer. `isep` will be used later
# to retrieve the data.
break
# see upper comment for explanation.
offset = buflen + 1 - seplen
if offset > self._limit:
raise exceptions.LimitOverrunError(
'Separator is not found, and chunk exceed the limit',
offset)
# Complete message (with full separator) may be present in buffer
# even when EOF flag is set. This may happen when the last chunk
# adds data which makes separator be found. That's why we check for
# EOF *ater* inspecting the buffer.
if self._eof:
chunk = bytes(self._buffer)
self._buffer.clear()
raise exceptions.IncompleteReadError(chunk, None)
# _wait_for_data() will resume reading if stream was paused.
await self._wait_for_data('readuntil')
if isep > self._limit:
raise exceptions.LimitOverrunError(
'Separator is found, but chunk is longer than limit', isep)
chunk = self._buffer[:isep + seplen]
del self._buffer[:isep + seplen]
self._maybe_resume_transport()
return bytes(chunk)
async def read(self, n=-1):
"""Read up to `n` bytes from the stream.
If n is not provided, or set to -1, read until EOF and return all read
bytes. If the EOF was received and the internal buffer is empty, return
an empty bytes object.
If n is zero, return empty bytes object immediately.
If n is positive, this function try to read `n` bytes, and may return
less or equal bytes than requested, but at least one byte. If EOF was
received before any byte is read, this function returns empty byte
object.
Returned value is not limited with limit, configured at stream
creation.
If stream was paused, this function will automatically resume it if
needed.
"""
if self._exception is not None:
raise self._exception
if n == 0:
return b''
if n < 0:
# This used to just loop creating a new waiter hoping to
# collect everything in self._buffer, but that would
# deadlock if the subprocess sends more than self.limit
# bytes. So just call self.read(self._limit) until EOF.
blocks = []
while True:
block = await self.read(self._limit)
if not block:
break
blocks.append(block)
return b''.join(blocks)
if not self._buffer and not self._eof:
await self._wait_for_data('read')
# This will work right even if buffer is less than n bytes
data = bytes(self._buffer[:n])
del self._buffer[:n]
self._maybe_resume_transport()
return data
async def readexactly(self, n):
"""Read exactly `n` bytes.
Raise an IncompleteReadError if EOF is reached before `n` bytes can be
read. The IncompleteReadError.partial attribute of the exception will
contain the partial read bytes.
if n is zero, return empty bytes object.
Returned value is not limited with limit, configured at stream
creation.
If stream was paused, this function will automatically resume it if
needed.
"""
if n < 0:
raise ValueError('readexactly size can not be less than zero')
if self._exception is not None:
raise self._exception
if n == 0:
return b''
while len(self._buffer) < n:
if self._eof:
incomplete = bytes(self._buffer)
self._buffer.clear()
raise exceptions.IncompleteReadError(incomplete, n)
await self._wait_for_data('readexactly')
if len(self._buffer) == n:
data = bytes(self._buffer)
self._buffer.clear()
else:
data = bytes(self._buffer[:n])
del self._buffer[:n]
self._maybe_resume_transport()
return data
def __aiter__(self):
return self
async def __anext__(self):
val = await self.readline()
if val == b'':
raise StopAsyncIteration
return val
| 36.535404 | 86 | 0.603142 |
9fa2ee67adbd10b5cd897ec5cf9693a427f3d9be | 9,170 | py | Python | programs/buck.py | fkorotkov/buck | 4d63790ceda1028281600af9cf75153ccb92a5f5 | [
"Apache-2.0"
] | 1 | 2018-09-12T14:58:13.000Z | 2018-09-12T14:58:13.000Z | programs/buck.py | fkorotkov/buck | 4d63790ceda1028281600af9cf75153ccb92a5f5 | [
"Apache-2.0"
] | null | null | null | programs/buck.py | fkorotkov/buck | 4d63790ceda1028281600af9cf75153ccb92a5f5 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
from __future__ import print_function
import errno
import logging
import os
import re
import signal
import subprocess
import sys
import threading
import time
import uuid
import zipfile
from multiprocessing import Queue
from subprocess import check_output
from buck_logging import setup_logging
from buck_project import BuckProject, NoBuckConfigFoundException
from buck_tool import (
BuckDaemonErrorException,
BuckStatusReporter,
ExecuteTarget,
get_java_path,
install_signal_handlers,
)
from subprocutils import propagate_failure
from tracing import Tracing
class ExitCode(object):
"""Python equivalent of com.facebook.buck.util.ExitCode"""
SUCCESS = 0
COMMANDLINE_ERROR = 3
FATAL_GENERIC = 10
FATAL_BOOTSTRAP = 11
FATAL_IO = 13
FATAL_DISK_FULL = 14
SIGNAL_INTERRUPT = 130
SIGNAL_PIPE = 141
if sys.version_info < (2, 7):
import platform
print(
(
"Buck requires at least version 2.7 of Python, but you are using {}."
"\nPlease follow https://buckbuild.com/setup/getting_started.html "
+ "to properly setup your development environment."
).format(platform.version())
)
sys.exit(ExitCode.FATAL_BOOTSTRAP)
THIS_DIR = os.path.dirname(os.path.realpath(__file__))
REQUIRED_JAVA_VERSION = "8"
# Kill all buck processes
def killall_buck(reporter):
# Linux or macOS
if os.name != "posix":
message = "killall is not implemented on: " + os.name
logging.error(message)
reporter.status_message = message
return ExitCode.COMMANDLINE_ERROR
for line in os.popen("jps -l"):
split = line.split()
if len(split) == 1:
# Java processes which are launched not as `java Main`
# (e. g. `idea`) are shown with only PID without
# main class name.
continue
if len(split) != 2:
raise Exception("cannot parse a line in jps -l outout: " + repr(line))
pid = int(split[0])
name = split[1]
if name != "com.facebook.buck.cli.bootstrapper.ClassLoaderBootstrapper":
continue
os.kill(pid, signal.SIGTERM)
# TODO(buck_team) clean .buckd directories
return ExitCode.SUCCESS
def _get_java_version(java_path):
"""
Returns a Java version string (e.g. "7", "8").
Information is provided by java tool and parsing is based on
http://www.oracle.com/technetwork/java/javase/versioning-naming-139433.html
"""
java_version = check_output([java_path, "-version"], stderr=subprocess.STDOUT)
# extract java version from a string like 'java version "1.8.0_144"'
match = re.search('java version "(?P<version>.+)"', java_version)
if not match:
return None
pieces = match.group("version").split(".")
if pieces[0] != "1":
# versions starting at 9 look like "9.0.4"
return pieces[0]
# versions <9 look like "1.8.0_144"
return pieces[1]
def _try_to_verify_java_version(java_version_status_queue):
"""
Best effort check to make sure users have required Java version installed.
"""
java_path = get_java_path()
warning = None
try:
java_version = _get_java_version(java_path)
if java_version and java_version != REQUIRED_JAVA_VERSION:
warning = "You're using Java {}, but Buck requires Java {}.\nPlease follow \
https://buckbuild.com/setup/getting_started.html \
to properly setup your local environment and avoid build issues.".format(
java_version, REQUIRED_JAVA_VERSION
)
except:
# checking Java version is brittle and as such is best effort
warning = "Cannot verify that installed Java version at '{}' \
is correct.".format(
java_path
)
java_version_status_queue.put(warning)
def _try_to_verify_java_version_off_thread(java_version_status_queue):
""" Attempts to validate the java version off main execution thread.
The reason for this is to speed up the start-up time for the buck process.
testing has shown that starting java process is rather expensive and on local tests,
this optimization has reduced startup time of 'buck run' from 673 ms to 520 ms. """
verify_java_version_thread = threading.Thread(
target=_try_to_verify_java_version, args=(java_version_status_queue,)
)
verify_java_version_thread.daemon = True
verify_java_version_thread.start()
def _emit_java_version_warnings_if_any(java_version_status_queue):
""" Emits java_version warnings that got posted in the java_version_status_queue
queus from the java version verification thread.
There are 2 cases where we need to take special care for.
1. The main thread finishes before the main thread gets here before the version testing
thread is done. In such case we wait for 50 ms. This should pretty much never happen,
except in cases where buck deployment or the VM is really badly misconfigured.
2. The java version thread never testing returns. This can happen if the process that is
called java is hanging for some reason. This is also not a normal case, and in such case
we will wait for 50 ms and if still no response, ignore the error."""
if java_version_status_queue.empty():
time.sleep(0.05)
if not java_version_status_queue.empty():
warning = java_version_status_queue.get()
if warning is not None:
logging.warning(warning)
def main(argv, reporter):
java_version_status_queue = Queue(maxsize=1)
_try_to_verify_java_version_off_thread(java_version_status_queue)
def get_repo(p):
# Try to detect if we're running a PEX by checking if we were invoked
# via a zip file.
if zipfile.is_zipfile(argv[0]):
from buck_package import BuckPackage
return BuckPackage(p, reporter)
else:
from buck_repo import BuckRepo
return BuckRepo(THIS_DIR, p, reporter)
# If 'killall' is the second argument, shut down all the buckd processes
if sys.argv[1:] == ["killall"]:
return killall_buck(reporter)
install_signal_handlers()
try:
tracing_dir = None
build_id = os.environ.get("BUCK_BUILD_ID", str(uuid.uuid4()))
reporter.build_id = build_id
with Tracing("main"):
with BuckProject.from_current_dir() as project:
tracing_dir = os.path.join(project.get_buck_out_log_dir(), "traces")
with get_repo(project) as buck_repo:
# If 'kill' is the second argument, shut down the buckd
# process
if sys.argv[1:] == ["kill"]:
buck_repo.kill_buckd()
return ExitCode.SUCCESS
return buck_repo.launch_buck(build_id)
finally:
if tracing_dir:
Tracing.write_to_dir(tracing_dir, build_id)
_emit_java_version_warnings_if_any(java_version_status_queue)
if __name__ == "__main__":
exit_code = ExitCode.SUCCESS
reporter = BuckStatusReporter(sys.argv)
fn_exec = None
exception = None
try:
setup_logging()
exit_code = main(sys.argv, reporter)
except ExecuteTarget as e:
# this is raised once 'buck run' has the binary
# it can get here only if exit_code of corresponding buck build is 0
fn_exec = e.execve
except NoBuckConfigFoundException:
exc_type, exception, exc_traceback = sys.exc_info()
# buck is started outside project root
exit_code = ExitCode.COMMANDLINE_ERROR
except BuckDaemonErrorException:
reporter.status_message = "Buck daemon disconnected unexpectedly"
_, exception, _ = sys.exc_info()
print(str(exception))
exception = None
exit_code = ExitCode.FATAL_GENERIC
except IOError as e:
exc_type, exception, exc_traceback = sys.exc_info()
if e.errno == errno.ENOSPC:
exit_code = ExitCode.FATAL_DISK_FULL
elif e.errno == errno.EPIPE:
exit_code = ExitCode.SIGNAL_PIPE
else:
exit_code = ExitCode.FATAL_IO
except KeyboardInterrupt:
reporter.status_message = "Python wrapper keyboard interrupt"
exit_code = ExitCode.SIGNAL_INTERRUPT
except Exception:
exc_type, exception, exc_traceback = sys.exc_info()
exit_code = ExitCode.FATAL_BOOTSTRAP
if exception is not None:
logging.error(exception, exc_info=(exc_type, exception, exc_traceback))
if reporter.status_message is None:
reporter.status_message = str(exception)
# report result of Buck call
try:
reporter.report(exit_code)
except Exception as e:
logging.debug(
"Exception occurred while reporting build results. This error is "
"benign and doesn't affect the actual build.",
exc_info=True,
)
# execute 'buck run' target
if fn_exec is not None:
fn_exec()
propagate_failure(exit_code)
| 34.734848 | 97 | 0.667612 |
d05aea8549ebff2cddd7ceffcfaecbf9d7779138 | 306 | py | Python | scibeam/util/__init__.py | SuperYuLu/SciBeam | 80601adda9288fd32efeedf5b1de015761c1f8e5 | [
"MIT"
] | 3 | 2020-07-31T09:24:46.000Z | 2021-03-01T23:59:51.000Z | scibeam/util/__init__.py | SuperYuLu/SciBeam | 80601adda9288fd32efeedf5b1de015761c1f8e5 | [
"MIT"
] | 12 | 2018-05-14T18:40:14.000Z | 2019-02-06T22:51:33.000Z | scibeam/util/__init__.py | SuperYuLu/SciBeam | 80601adda9288fd32efeedf5b1de015761c1f8e5 | [
"MIT"
] | null | null | null | # __init__.py ---
#
# Filename: __init__.py
# Description:
#
# Author: Yu Lu
# Email: yulu@utexas.edu
# Github: https://github.com/SuperYuLu
#
# Created: Wed Aug 22 11:14:07 2018 (-0500)
# Version:
# Last-Updated: Wed Aug 22 11:14:07 2018 (-0500)
# By: yulu
# Update #: 1
#
| 19.125 | 48 | 0.581699 |
92c794c16260edc09e099f5997de409a7c65733d | 122 | py | Python | tests/test_tsv.py | roedoejet/convertextract | bf194a7d81d847d68690ea0d58dc47a70259cd78 | [
"MIT"
] | 12 | 2016-10-20T16:17:04.000Z | 2022-03-10T06:36:59.000Z | tests/test_tsv.py | roedoejet/convertextract | bf194a7d81d847d68690ea0d58dc47a70259cd78 | [
"MIT"
] | 3 | 2018-01-12T00:41:26.000Z | 2020-08-12T05:04:45.000Z | tests/test_tsv.py | roedoejet/convertextract | bf194a7d81d847d68690ea0d58dc47a70259cd78 | [
"MIT"
] | 3 | 2020-08-18T21:47:03.000Z | 2022-02-03T06:32:46.000Z | import unittest
from . import base
class TsvTestCase(base.BaseParserTestCase, unittest.TestCase):
extension = 'tsv' | 17.428571 | 62 | 0.770492 |
24f4371799c5a982e103f53f208aa0dcaa95100f | 61,737 | py | Python | pyNastran/bdf/subcase.py | JohannesSeidel/pyNastran | 91ccd2756b201a7a3e4bb81cc6dc53b947d43bbf | [
"BSD-3-Clause"
] | null | null | null | pyNastran/bdf/subcase.py | JohannesSeidel/pyNastran | 91ccd2756b201a7a3e4bb81cc6dc53b947d43bbf | [
"BSD-3-Clause"
] | null | null | null | pyNastran/bdf/subcase.py | JohannesSeidel/pyNastran | 91ccd2756b201a7a3e4bb81cc6dc53b947d43bbf | [
"BSD-3-Clause"
] | null | null | null | """Subcase creation/extraction class"""
from typing import List, Dict, Any
from numpy import ndarray
from pyNastran.utils.numpy_utils import integer_types
from pyNastran.utils import object_attributes, deprecated
from pyNastran.bdf.bdf_interface.case_control_cards import CLASS_MAP
from pyNastran.bdf.bdf_interface.subcase_utils import (
write_stress_type, write_set, expand_thru_case_control)
INT_CARDS = (
# these are cards that look like:
# LOAD = 6
'SPC', 'MPC', 'TRIM', 'FMETHOD', 'METHOD', 'LOAD',
'SUPORT', 'SUPORT1', 'TEMPERATURE(INITIAL)', 'TEMPERATURE(LOAD)',
'DLOAD', 'MFLUID', 'CLOAD', 'NLPARM', 'CMETHOD',
'FREQUENCY', 'TSTEP', 'TSTEPNL', 'SDAMPING', 'DESOBJ',
'TEMPERATURE(INIT)', 'RANDOM', 'DESSUB', 'ADAPT', 'MAXLINES',
'TFL', 'DESGLB', 'SMETHOD', 'DYNRED', 'GUST', 'TEMPERATURE(MATE)',
'OTIME', 'NONLINEAR', 'AUXM', 'IC', 'BC', 'OUTRCV', 'DIVERG',
'DATAREC', 'TEMPERATURE(BOTH)', 'DEFORM', 'MODES', 'CASE',
'SEDR', 'SELG', 'SEFINAL', 'SEKR', 'TEMPERATURE(ESTIMATE)',
'GPSDCON', 'AUXMODEL',
'MODTRAK', 'OFREQ', 'DRSPAN', 'OMODES', 'ADACT', 'SERESP', 'STATSUB',
'CURVESYM', 'ELSDCON', 'CSSCHD', 'NSM', 'TSTRU', 'RANDVAR',
'RGYRO', 'SELR', 'TEMPERATURE(ESTI)', 'RCROSS', 'SERE', 'SEMR',
)
PLOTTABLE_TYPES = (
# these are types that look like:
# STRESS(PLOT,PRINT,PUNCH,SORT1) = ALL
# they all support PLOT
'STRESS', 'STRAIN', 'SPCFORCES', 'DISPLACEMENT', 'MPCFORCES', 'SVECTOR',
'VELOCITY', 'ACCELERATION', 'FORCE', 'ESE', 'OLOAD', 'SEALL', 'GPFORCE',
'GPSTRESS', 'GPSTRAIN', 'FLUX', 'AEROF', 'THERMAL', 'STRFIELD',
'NOUTPUT', 'SEDV', 'APRES', 'HTFLOW', 'NLSTRESS', 'GPKE',
'SACCELERATION', 'SDISPLACEMENT', 'SEMG', 'HARMONICS', 'PRESSURE', 'VUGRID',
'ELSUM', 'SVELOCITY', 'STRFIELD REAL', 'SENSITY', 'MONITOR',
'NLLOAD', 'GPSDCON', 'BOUTPUT',
)
class Subcase:
"""
Subcase creation/extraction class
"""
allowed_param_types = [
'SET-type', 'CSV-type', 'SUBCASE-type', 'KEY-type', 'STRESS-type', 'STRING-type',
'OBJ-type',
]
solCodeMap = {
1: 101,
21: 101,
24: 101,
26: 101,
61: 101,
64: 106, # correct
66: 106, # correct
68: 106, # correct
76: 101,
99: 129, # correct
144: 101, # correct
187: 101,
}
def __init__(self, id=0):
self.id = id
self.params = {}
self.sol = None
self.log = None
#print("\n***adding subcase %s***" % self.id)
def load_hdf5_file(self, hdf5_file, encoding):
from pyNastran.utils.dict_to_h5py import _cast
keys = list(hdf5_file.keys())
for key in keys:
#print(key)
group = hdf5_file[key]
if key in ['id']: # scalars
value = _cast(group)
setattr(self, key, value)
elif key == 'params':
#print(group)
#print(group.keys())
for group_key in group.keys():
#self.log.debug('%s %s' % (group_key, key))
value, options, param_type = _load_hdf5_param(group, group_key, encoding)
#self.log.debug('%s (%s, %s, %s)' % (key, value, options, param_type))
if isinstance(options, list):
options = [
option.decode(encoding) if isinstance(option, bytes) else option
for option in options]
self.params[group_key] = (value, options, param_type)
str(self)
else: # pragma: no cover
raise RuntimeError('failed exporting Subcase/%s' % key)
def export_to_hdf5(self, hdf5_file, encoding):
keys_to_skip = ['log', 'solCodeMap', 'allowed_param_types']
h5attrs = object_attributes(self, mode='both', keys_to_skip=keys_to_skip)
#print('Subcase %i' % self.id)
for h5attr in h5attrs:
value = getattr(self, h5attr)
if h5attr in ['id']: # scalars
# simple export
hdf5_file.create_dataset(h5attr, data=value)
elif h5attr in ['sol']: # scalars/None
if value is None:
continue
hdf5_file.create_dataset(h5attr, data=value)
elif h5attr in ['params']:
if len(value) == 0:
continue
keys = list(self.params.keys())
params_group = hdf5_file.create_group('params')
#print('keys =', keys)
unused_keys_bytes = [key.encode(encoding) for key in keys]
#params_group.create_dataset('keys', data=keys_bytes)
for key, (value, options, param_type) in self.params.items():
#print(' %-14s: %-8r %r %r' % (key, value, options, param_type))
if key == '':
sub_group = params_group.create_group('blank')
sub_group.create_dataset('value', data=value)
else:
#print('key = %r' % key)
sub_group = params_group.create_group(key)
if value is not None:
if isinstance(value, list):
value_bytes = [
valuei.encode(encoding) if isinstance(valuei, str) else valuei
for valuei in value]
sub_group.create_dataset('value', data=value_bytes)
elif isinstance(value, (integer_types, float, str)):
sub_group.create_dataset('value', data=value)
elif hasattr(value, 'export_to_hdf5'):
sub_groupi = sub_group.create_group('object')
sub_groupi.attrs['type'] = key
value.export_to_hdf5(sub_groupi, encoding)
else:
print('value = %r' % value)
raise NotImplementedError(value)
if param_type is not None:
sub_group.create_dataset('param_type', data=param_type)
if options is not None:
if isinstance(options, list):
options_bytes = [
option.encode(encoding) if isinstance(option, str) else option
for option in options]
sub_group.create_dataset('options', data=options_bytes)
else:
sub_group.create_dataset('options', data=options)
#if h5attr in ['_begin_count', 'debug', 'write_begin_bulk']: # scalars
## do nothing on purpose
#hdf5_file.create_dataset(h5attr, data=value)
#elif h5attr in ['reject_lines', 'begin_bulk', 'lines', 'output_lines']:
# lists of strings
#if len(value) == 0:
#continue
#value_bytes = [line.encode(encoding) for line in value]
##print(value_bytes)
#hdf5_file.create_dataset(h5attr, data=value_bytes)
#elif h5attr == 'subcases':
#keys = list(self.subcases.keys())
#subcase_group = hdf5_file.create_group('subcases')
#subcase_group.create_dataset('keys', data=keys)
#for key, subcase in self.subcases.items():
#sub_group = subcase_group.create_group(str(key))
#subcase.export_to_hdf5(subcase_group, encoding)
else: # pragma: no cover
print(key, value)
raise RuntimeError('cant export to hdf5 Subcase/%s' % h5attr)
def __deepcopy__(self, memo):
"""
Custom method for copy.deepcopy to improve speed by more than
2x (tested with timeit).
Default method is a bit slow for a list of lists and can take a
long time to read a bdf with many subcases this method removes
some of the overhead if the subcase is the default subcase, it
is shallow copied instead this greatly improves bdf read speed,
since it avoids deepcopying large sets defined in the default
subcase for every subcase that is defined.
"""
_copy = self.__copy__()
memo[id(self)] = _copy
if _copy.id == 0:
return _copy
def _deepcopy(lst):
"""
Deep copies objects that aren't lists; references lists.
"""
_cpy = list(lst)
for i in range(len(_cpy)):
cpyi = _cpy[i]
if isinstance(cpyi, list):
_cpy[i] = _deepcopy(cpyi)
return _cpy
params = _copy.params
for key, val in self.params.items():
if isinstance(val, list):
val = _deepcopy(val)
params[key] = val
return _copy
def __copy__(self):
_copy = self.__class__()
_copy.id = self.id
_copy.sol = self.sol
_copy.log = self.log
_copy.params.update(self.params)
return _copy
def deprecated(self, old_name: str, new_name: str, deprecated_version: str) -> None:
"""
Throws a deprecation message and crashes if past a specific version.
Parameters
----------
old_name : str
the old function name
new_name : str
the new function name
deprecated_version : float
the version the method was first deprecated in
"""
return deprecated(old_name, new_name, deprecated_version, levels=[0, 1, 2])
def add_op2_data(self, data_code, msg, log):
"""
>>> self.subcase.add_op2_data(self.data_code, 'VECTOR')
"""
assert log is not None, log
#subtable_name = data_code['subtable_name']
table_name = data_code['table_name']
if not isinstance(table_name, str):
# table_name is a byte string
table_name = table_name.decode('latin1')
else:
raise NotImplementedError('table_name=%r' % table_name)
table_code = data_code['table_code']
unused_sort_code = data_code['sort_code']
unused_device_code = data_code['device_code']
#print(data_code)
#print('table_name=%r table_code=%s sort_code=%r device_code=%r' % (
#table_name, table_code, sort_code, device_code))
table_name = table_name.strip()
#if 'TITLE' in
#print(data_code)
#print(f'table_name={table_name!r} type={type(table_name)}')
options = []
if data_code['title']:
self.add('TITLE', data_code['title'], options, 'STRING-type')
if data_code['subtitle']:
self.add('SUBTITLE', data_code['subtitle'], options, 'STRING-type')
if data_code['label']:
self.add('LABEL', data_code['label'], options, 'STRING-type')
if table_name in ['OUGV1', 'BOUGV1', 'OUGV2', 'OUG1', 'OUGV1PAT']:
if table_code == 1:
thermal = data_code['thermal']
if thermal == 0:
self.add('DISPLACEMENT', 'ALL', options, 'STRESS-type')
elif thermal == 1:
self.add('ANALYSIS', 'HEAT', options, 'KEY-type')
else:
self._write_op2_error_msg(log, self.log, msg, data_code)
elif table_code == 7:
self.add('VECTOR', 'ALL', options, 'STRESS-type')
elif table_code == 10:
self.add('VELOCITY', 'ALL', options, 'STRESS-type')
elif table_code == 11:
self.add('ACCELERATION', 'ALL', options, 'STRESS-type')
else:
self._write_op2_error_msg(log, self.log, msg, data_code)
elif table_name == 'OAG1':
if table_code == 11:
thermal = data_code['thermal']
assert thermal == 0, data_code
self.add('ACCELERATION', 'ALL', options, 'STRESS-type')
else:
self._write_op2_error_msg(log, self.log, msg, data_code)
# OAG-random
elif table_name in ['OAGPSD1', 'OAGPSD2']:
options = ['PSDF']
self.add('ACCELERATION', 'ALL', options, 'STRESS-type')
elif table_name in ['OAGCRM1', 'OAGCRM2']:
options = ['CRM']
self.add('ACCELERATION', 'ALL', options, 'STRESS-type')
elif table_name in ['OAGRMS1', 'OAGRMS2']:
options = ['RMS']
self.add('ACCELERATION', 'ALL', options, 'STRESS-type')
elif table_name in ['OAGNO1', 'OAGNO2']:
options = ['NO']
self.add('ACCELERATION', 'ALL', options, 'STRESS-type')
elif table_name == 'TOUGV1':
thermal = data_code['thermal']
if thermal == 1:
self.add('ANALYSIS', 'HEAT', options, 'KEY-type')
else:
self._write_op2_error_msg(log, self.log, msg, data_code)
elif table_name in ['ROUGV1', 'ROUGV2']:
thermal = data_code['thermal']
if thermal == 0:
self.add('DISPLACEMENT', 'ALL', options, 'STRESS-type')
else:
self._write_op2_error_msg(log, self.log, msg, data_code)
elif table_name in ['OPHIG', 'BOPHIG', 'BOPHIGF']:
if table_code == 7:
self.add('ANALYSIS', 'HEAT', options, 'KEY-type')
else:
self._write_op2_error_msg(log, self.log, msg, data_code)
elif table_name == 'OUPV1':
if table_code == 1:
self.add('SDISPLACEMENT', 'ALL', options, 'STRESS-type')
elif table_code == 10:
self.add('SVELOCITY', 'ALL', options, 'STRESS-type')
elif table_code == 11:
self.add('SACCELERATION', 'ALL', options, 'STRESS-type')
else:
self._write_op2_error_msg(log, self.log, msg, data_code)
elif table_name == 'OPHSA':
if table_code == 14:
self.add('SVECTOR', 'ALL', options, 'STRESS-type')
else:
self._write_op2_error_msg(log, self.log, msg, data_code)
elif table_name in ['OUXY1', 'OUXY2']:
if table_code == 15:
self.add('SDISPLACEMENT', 'ALL', options, 'STRESS-type')
elif table_code == 16:
self.add('SVELOCITY', 'ALL', options, 'STRESS-type')
elif table_code == 17:
self.add('SACCELERATION', 'ALL', options, 'STRESS-type')
else:
self._write_op2_error_msg(log, self.log, msg, data_code)
elif table_name in ['OQG1', 'OQG2', 'OQGV1']:
if table_code == 3:
self.add('SPCFORCES', 'ALL', options, 'STRESS-type')
else:
self._write_op2_error_msg(log, self.log, msg, data_code)
elif table_name in ['OEF1X', 'OEF1', 'RAFCONS', 'RAFEATC']:
if table_code in [4]:
self.add('FORCE', 'ALL', options, 'STRESS-type')
else:
self._write_op2_error_msg(log, self.log, msg, data_code)
elif table_name in ['OEF2']:
options.append('SORT2')
if table_code == 4:
self.add('FORCE', 'ALL', options, 'STRESS-type')
else:
self._write_op2_error_msg(log, self.log, msg, data_code)
elif table_name in ['OEFATO1', 'OEFATO2']:
options.append('PSDF')
self.add('FORCE', 'ALL', options, 'STRESS-type')
elif table_name in ['OEFCRM1', 'OEFCRM2']:
options.append('CRM')
self.add('FORCE', 'ALL', options, 'STRESS-type')
elif table_name in ['OEFRMS1', 'OEFRMS2']:
options.append('RMS')
self.add('FORCE', 'ALL', options, 'STRESS-type')
elif table_name in ['OEFNO1', 'OEFNO2']:
options.append('NO')
self.add('FORCE', 'ALL', options, 'STRESS-type')
elif table_name in ['OEFPSD1', 'OEFPSD2']:
options.append('PSDF')
self.add('FORCE', 'ALL', options, 'STRESS-type')
elif table_name in ['OESATO1', 'OESATO2']:
options.append('PSDF')
self.add('STRESS', 'ALL', options, 'STRESS-type')
elif table_name in ['OESCRM1', 'OESCRM2']:
options.append('CRM')
self.add('STRESS', 'ALL', options, 'STRESS-type')
elif table_name in ['OESRMS1', 'OESRMS2', 'OESXRMS1']:
options.append('RMS')
self.add('STRESS', 'ALL', options, 'STRESS-type')
elif table_name in ['OESNO1', 'OESNO2', 'OESXNO1']:
options.append('NO')
self.add('STRESS', 'ALL', options, 'STRESS-type')
elif table_name in ['OESPSD1', 'OESPSD2']:
options.append('PSDF')
self.add('STRESS', 'ALL', options, 'STRESS-type')
elif table_name in ['OSTRATO1', 'OSTRATO2']:
options.append('PSDF')
self.add('STRAIN', 'ALL', options, 'STRESS-type')
elif table_name in ['OSTRCRM1', 'OSTRCRM2']:
options.append('CRM')
self.add('STRAIN', 'ALL', options, 'STRESS-type')
elif table_name in ['OSTRRMS1', 'OSTRRMS2']:
options.append('RMS')
self.add('STRAIN', 'ALL', options, 'STRESS-type')
elif table_name in ['OSTRNO1', 'OSTRNO2']:
options.append('NO')
self.add('STRAIN', 'ALL', options, 'STRESS-type')
elif table_name in ['OSTRPSD1', 'OSTRPSD2']:
options.append('PSDF')
self.add('STRAIN', 'ALL', options, 'STRESS-type')
elif table_name in ['OEFIT', 'OEFITSTN']:
if table_code in [25]:
self.add('FORCE', 'ALL', options, 'STRESS-type')
else: # pragma: no cover
self._write_op2_error_msg(log, self.log, msg, data_code)
elif table_name in ['OQMG1', 'OQMG2']:
if table_code in [3, 39]:
self.add('MPCFORCES', 'ALL', options, 'STRESS-type')
else: # pragma: no cover
self._write_op2_error_msg(log, self.log, msg, data_code)
elif table_name in ['OGPFB1', 'RAGCONS', 'RAGEATC']:
if table_code == 19:
self.add('GPFORCE', 'ALL', options, 'STRESS-type')
else: # pragma: no cover
self._write_op2_error_msg(log, self.log, msg, data_code)
# stress
elif table_name in ['OES1', 'OES1X', 'OES1X1', 'OES1C', 'OESCP',
'OESNL2', 'OESNLXD', 'OESNLXR', 'OESNLBR', 'OESTRCP',
'OESVM1', 'OESVM1C', 'OESNL1X',
'OESNLXR2', 'RASCONS', 'RASEATC']:
#assert data_code['is_stress_flag'] == True, data_code
options.append('SORT1')
if table_code == 5:
self.add('STRESS', 'ALL', options, 'STRESS-type')
else: # pragma: no cover
self._write_op2_error_msg(log, self.log, msg, data_code)
elif table_name in ['OES2', 'OES2C', 'OESVM2', ]:
options.append('SORT2')
if table_code == 5:
self.add('STRESS', 'ALL', options, 'STRESS-type')
else: # pragma: no cover
self._write_op2_error_msg(log, self.log, msg, data_code)
elif table_name in ['OESXRM1C']:
if table_code == 805:
self.add('STRESS', 'ALL', options, 'STRESS-type')
else: # pragma: no cover
self._write_op2_error_msg(log, self.log, msg, data_code)
elif table_name in ['OESXNO1C']:
if table_code == 905:
self.add('STRESS', 'ALL', options, 'STRESS-type')
else: # pragma: no cover
self._write_op2_error_msg(log, self.log, msg, data_code)
elif table_name in ['OSTR2', 'OSTR2C']:
options.append('SORT2')
if table_code == 5:
self.add('STRAIN', 'ALL', options, 'STRESS-type')
else: # pragma: no cover
self._write_op2_error_msg(log, self.log, msg, data_code)
elif table_name in ['OESRT']:
#assert data_code['is_stress_flag'] == True, data_code
if table_code in [25, 89]:
self.add('STRESS', 'ALL', options, 'STRESS-type')
else: # pragma: no cover
self._write_op2_error_msg(log, self.log, msg, data_code)
elif table_name in ['OCRUG']:
if table_code in [1]:
self.add('DISP', 'ALL', options, 'STRESS-type')
else: # pragma: no cover
self._write_op2_error_msg(log, self.log, msg, data_code)
# strain
elif table_name in ['OSTR1X', 'OSTR1C', 'OSTR1', 'RAECONS', 'RAEEATC']:
assert data_code['is_strain_flag'] is True, data_code
if table_code == 5:
self.add('STRAIN', 'ALL', options, 'STRESS-type')
else: # pragma: no cover
self._write_op2_error_msg(log, self.log, msg, data_code)
elif table_name in ['OSTRVM1', 'OSTRVM1C', 'OSTRVM2']:
#assert data_code['is_stress_flag'] == True, data_code
if table_code == 5:
self.add('STRAIN', 'ALL', options, 'STRESS-type')
else: # pragma: no cover
self._write_op2_error_msg(log, self.log, msg, data_code)
# special tables
elif table_name in ['RADCONS', 'RADEFFM', 'RADEATC', 'RAPEATC', 'RAQEATC', 'RADCONS',
'RASEATC', 'RAFEATC', 'RAEEATC', 'RANEATC', 'RAGEATC', 'RAQCONS',
'RAPCONS']:
pass
elif table_name in ['OUGPSD2',
'OSTRNO1', 'OSTNO1C', 'OSTRNO1C',
'OSTRMS1C', 'OSTRRMS1', 'OSTRRMS1C',
'OQMPSD2']:
pass
else: # pragma: no cover
self._write_op2_error_msg(log, self.log, msg, data_code)
#print(self)
def _write_op2_error_msg(self, log, log_error, msg, data_code):
if log is not None:
log.error(msg)
log.error(data_code)
elif log_error is not None:
log_error.error(msg)
log_error.error(data_code)
else: # pragma: no cover
# log_error is None
print('Error calling subcase.add_op2_data...')
print(msg)
print(data_code)
raise RuntimeError(data_code)
raise RuntimeError(data_code)
def __contains__(self, param_name: str) -> bool:
"""
Checks to see if a parameter name is in the subcase.
Parameters
----------
param_name : str
the case control parameters to check for
.. code-block:: python
model = BDF()
model.read_bdf(bdf_filename)
case_control = model.case_control_deck
subcase1 = case_control.subcases[1]
if 'LOAD' in subcase1:
print('found LOAD for subcase 1')
"""
if param_name in self.params:
return True
return False
def has_parameter(self, *param_names) -> List[bool]:
"""
Checks to see if one or more parameter names are in the subcase.
Parameters
----------
param_names : str; List[str]
the case control parameters to check for
Returns
-------
exists : List[bool]
do the parameters exist
.. code-block:: python
model = BDF()
model.read_bdf(bdf_filename)
case_control = model.case_control_deck
subcase1 = case_control.subcases[1]
if any(subcase1.has_parameter('LOAD', 'TEMPERATURE(LOAD)')):
print('found LOAD for subcase 1')
"""
exists = [param_name.upper() in self.params
for param_name in param_names]
return exists
def __getitem__(self, param_name):
"""
Gets the [value, options] for a subcase.
Parameters
----------
param_name : str
the case control parameters to get
Returns
-------
value : varies
the value of the parameter
'ALL' in STRESS(PLOT,PRINT) = ALL
options : List[varies]
the values in parentheses
['PLOT', 'PRINT'] in STRESS(PLOT,PRINT) = ALL
.. code-block:: python
model = BDF()
model.read_bdf(bdf_filename)
case_control = model.case_control_deck
subcase1 = case_control.subcases[1]
value, options = subcase1['LOAD']
"""
return self.get_parameter(param_name)
def suppress_output(self, suppress_to='PLOT'):
"""
Replaces F06 printing with OP2 printing
Converts:
STRESS(PRINT,SORT1,REAL)
FORCE(PRINT,PLOT,SORT1,REAL)
to:
STRESS(PLOT,SORT1,REAL)
FORCE(PLOT,SORT1,REAL)
.. warning:: needs more validation
"""
for key, param in self.params.items():
(unused_value, options, unused_param_type) = param
if key in INT_CARDS or key in ('SUBTITLE', 'LABEL', 'TITLE', 'ECHO'):
pass
elif key in PLOTTABLE_TYPES:
if suppress_to not in options:
param[1].append(suppress_to)
if 'PRINT' in options:
param[1].remove('PRINT')
else:
raise NotImplementedError(key)
def get_parameter(self, param_name, msg='', obj=False):
"""
Gets the [value, options] for a subcase.
Parameters
----------
param_name : str
the case control parameters to get
obj : bool; default=False
should the object be returned
Returns
-------
value : varies
the value of the parameter
'ALL' in STRESS(PLOT,PRINT) = ALL
options : List[varies]
the values in parentheses
['PLOT', 'PRINT'] in STRESS(PLOT,PRINT) = ALL
.. code-block:: python
model = BDF()
model.read_bdf(bdf_filename)
case_control = model.case_control_deck
subcase1 = case_control.subcases[1]
value, options = subcase1['LOAD']
"""
param_name = update_param_name(param_name)
if param_name not in self.params:
raise KeyError('%s doesnt exist in subcase=%s in the case '
'control deck%s.' % (param_name, self.id, msg))
value, options, param_type = self.params[param_name]
#print('param_name=%r value=%s options=%s param_type=%r' % (
#param_name, value, options, param_type))
if param_type == 'OBJ-type' and not obj:
return value.value, options
return value, options
def _validate_param_type(self, param_type):
if param_type not in self.allowed_param_types:
msg = (
f'param_type={param_type!r} is not supported\n'
f' allowed_types={self.allowed_param_types}\n'
' - SET-type: SET 5 = 1,2,3,4\n'
' - CSV-type: PARAM,FIXEDB,-1\n'
' - KEY-type: ANALYSIS = HEAT\n'
' - STRESS-type: LOAD = 5\n'
' - STRESS-type: STRESS = ALL\n'
' - STRESS-type: STRESS(PLOT) = ALL\n'
' - STRESS-type: DISP(PLOT) = ALL\n'
' - STRING-type: TITLE = SOME TITLE\n'
' - OBJ-type: ???\n'
)
raise TypeError(msg)
def add(self, key, value, options, param_type):
self._validate_param_type(param_type)
self._add_data(key, value, options, param_type)
def update(self, key, value, options, param_type):
self._validate_param_type(param_type)
assert key in self.params, 'key=%r is not in isubcase=%s' % (key, self.id)
self._add_data(key, value, options, param_type)
def _add_data(self, key, value, options, param_type):
key = update_param_name(key)
if key == 'ANALYSIS' and value == 'FLUT':
value = 'FLUTTER'
#print("adding isubcase=%s key=%r value=%r options=%r "
#"param_type=%r" %(self.id, key, value, options, param_type))
if isinstance(value, str) and value.isdigit():
value = int(value)
if param_type == 'OBJ-type':
self.params[key] = value
else:
(key, value, options) = self._simplify_data(key, value, options, param_type)
self.params[key] = [value, options, param_type]
def _simplify_data(self, key, value, options, param_type):
if param_type == 'SET-type':
#print("adding isubcase=%s key=%r value=%r options=%r "
#"param_type=%r" % (self.id, key, value, options, param_type))
#print("adding isubcase=%s key=%r value=%r options=%r "
#"param_type=%r" % (self.id, key, value, options, param_type))
values2 = expand_thru_case_control(value)
assert isinstance(values2, list), type(values2)
if isinstance(options, list):
msg = 'invalid type for options=%s value; expected an integer; got a list' % key
raise TypeError(msg)
options = int(options)
return (key, values2, options)
elif param_type == 'CSV-type':
#print("adding isubcase=%s key=%r value=|%s| options=|%s| "
# "param_type=%s" %(self.id, key, value, options, param_type))
if value.isdigit(): # PARAM,DBFIXED,-1
value = value
#elif param_type == 'OBJ-type':
##self.params[value.type] = value
#return value.type,
elif param_type == 'OBJ-type':
raise RuntimeError('this function should never be called with an OBJ-type...')
else:
#if 0:
#a = 'key=%r' % key
#b = 'value=%r' % value
#c = 'options=%r' % options
#d = 'param_type=%r' % param_type
#print("_adding isubcase=%s %-18s %-12s %-12s %-12s" %(self.id, a, b, c, d))
if isinstance(value, integer_types) or value is None:
pass
elif isinstance(value, (list, ndarray)): # new???
msg = 'invalid type for key=%s value; expected an integer; got a list' % key
raise TypeError(msg)
elif value.isdigit(): # STRESS = ALL
value = value
#else: pass
return key, value, options
def get_op2_data(self, sol, solmap_to_value):
self.sol = sol
label = 'SUBCASE %s' % (self.id)
op2_params = {
'isubcase': None, 'tables': [], 'analysis_codes': [],
'device_codes': [], 'sort_codes': [], 'table_codes': [],
'label': label, 'subtitle': None, 'title': None,
'format_codes': [], 'stress_codes': [], 'thermal': None}
results = ['DISPLACEMENT', 'EKE', 'EDE', 'ELSDCON', 'ENTHALPY',
'EQUILIBRIUM', 'ESE', 'FLUX', 'FORCE', 'GPFORCE', 'GPKE',
'GPSDCON', 'GPSTRAIN', 'GPSTRESS', 'HOUTPUT', 'MODALKE',
'MODALSE', 'MPCFORCES', 'NLSTRESS', 'NOUTPUT', 'OLOAD',
'PFGRID', 'PFMODE', 'PFPANEL', 'RCROSS', 'RESVEC',
'SACCELERATION', 'SDISPACEMENT', 'SPCFORCES', 'STRAIN',
'STRESS', 'SVECTOR', 'SVELOCITY', 'THERMAL', 'VECTOR',
'VELOCITY', 'VUGRID', 'WEIGHTCHECK']
# converts from solution 200 to solution 144
if self.sol == 200 or 'ANALYSIS' in self.params:
param = self.params['ANALYSIS']
(value, options, param_type) = param
sol = solmap_to_value[value.upper()]
#print("***value=%s sol=%s" % (value, sol))
else: # leaves SOL the same
sol = self.sol
if sol in self.solCodeMap: # reduces SOL 144 to SOL 101
sol = self.solCodeMap[sol]
for (key, param) in self.params.items():
key = key.upper()
(value, options, param_type) = param
#msg = (" -key=|%s| value=|%s| options=%s param_type=|%s|"
# % (key, value, options, param_type))
thermal = 0
for (key, param) in self.params.items():
key = key.upper()
(value, options, param_type) = param
#msg = (" *key=|%s| value=|%s| options=%s param_type=|%s|"
# % (key, value, options, param_type)
#print(msg)
#msg += self.printParam(key, param)
if param_type == 'SUBCASE-type':
op2_params['isubcase'].append(value)
elif key in ['BEGIN', 'ECHO', 'ANALYSIS'] or 'SET' in key:
pass
elif key == 'TEMPERATURE':
thermal = 1
elif key in results:
sort_code = get_sort_code(options, value)
device_code = get_device_code(options, value)
if key in ['STRESS', 'STRAIN']:
stress_code = get_stress_code(key, options, value)
op2_params['stress_codes'].append(stress_code)
else:
op2_params['stress_codes'].append(0)
format_code = get_format_code(options, value)
table_code = get_table_code(sol, key, options)
analysis_code = get_analysis_code(sol)
approach_code = analysis_code * 10 + device_code
tcode = table_code * 1000 + sort_code
op2_params['tables'].append(key)
op2_params['analysis_codes'].append(analysis_code)
op2_params['approach_codes'].append(approach_code)
op2_params['device_codes'].append(device_code)
op2_params['format_codes'].append(format_code)
op2_params['sort_codes'].append(sort_code)
op2_params['table_codes'].append(table_code)
op2_params['tcodes'].append(tcode)
#analysisMethod = value
#elif key in ['ADACT', 'ADAPT', 'AERCONFIG', 'TITLE', 'SUBTITLE',
# 'LABEL', 'LOAD', 'SUPORT', 'SUPORT1', 'MPC', 'SPC',
# 'TSTEPNL', 'NLPARM', 'TRIM', 'GUST', 'METHOD',
# 'DESOBJ', 'DESSUB', 'FMETHOD', 'SEALL']:
else:
op2_params[key.lower()] = value
#else:
# raise NotImplementedErrror('unsupported entry...\n%s' %(msg))
op2_params['thermal'] = thermal
#print("\nThe estimated results...")
#for (key, value) in sorted(op2_params.items()):
#if value is not None:
#print(" key=%r value=%r" % (key, value))
def print_param(self, key, param):
"""
Prints a single entry of the a subcase from the global or local
subcase list.
"""
msg = ''
#msg += 'id=%s ' %(self.id)
(value, options, param_type) = param
spaces = ''
if self.id > 0:
spaces = ' '
#print('key=%s param=%s param_type=%s' % (key, param, param_type))
if param_type == 'SUBCASE-type':
if self.id > 0:
msgi = 'SUBCASE %s\n' % (self.id)
assert len(msgi) < 72, 'len(msg)=%s; msg=\n%s' % (len(msgi), msgi)
msg += msgi
#else: global subcase ID=0 and is not printed
# pass
elif param_type == 'KEY-type':
#print (KEY-TYPE: %r" % value)
assert value is not None, param
if ',' in value:
sline = value.split(',')
two_spaces = ',\n' + 2 * spaces
msgi = spaces + two_spaces.join(sline) + '\n'
assert len(msgi) < 68, 'len(msg)=%s; msg=\n%s' % (len(msgi), msgi)
msg += msgi
else:
msgi = spaces + '%s\n' % value
#assert len(msgi) < 68, 'len(msg)=%s; msg=\n%s' % (len(msgi), msgi)
msg += msgi
elif param_type == 'STRING-type':
msgi = spaces + '%s = %s\n' % (key, value)
if key not in ['TITLE', 'LABEL', 'SUBTITLE']:
assert len(msgi) < 68, 'len(msg)=%s; msg=\n%s' % (len(msgi), msgi)
msg += msgi
elif param_type == 'CSV-type':
msgi = spaces + '%s,%s,%s\n' % (key, value, options)
assert len(msgi) < 68, 'len(msg)=%s; msg=\n%s' % (len(msgi), msgi)
msg += msgi
elif param_type == 'STRESS-type':
msg += write_stress_type(key, options, value, spaces)
elif param_type == 'SET-type':
#: .. todo:: collapse data...not written yet
msg += write_set(options, value, spaces)
elif param_type == 'OBJ-type':
msg += value.write(spaces)
else:
# SET-type is not supported yet...
msg = ('\nkey=%r param=%r\n'
'allowed_params=[SET-type, STRESS-type, STRING-type, SUBCASE-type, KEY-type]\n'
'CSV-type -> PARAM,FIXEDB,-1\n'
'KEY-type -> ???\n' # the catch all
'SET-type -> SET 99 = 1234\n'
'SUBCASE-type -> ???\n'
'STRESS-type -> DISP(PLOT, SORT1)=ALL\n'
' STRESS(PLOT, SORT1)=ALL\n'
'STRING-type -> LABEL = A label\n'
' TITLE = A title\n'
' SUBTITLE = A subtitle\n'
''% (key, param))
raise NotImplementedError(msg)
#print("msg = %r" % (msg))
return msg
#def cross_reference(self, model):
#"""
#Method crossReference:
#Parameters
#----------
#model : BDF()
#the BDF object
#.. note:: this is not integrated and probably never will be as it's
#not really that necessary. it's only really useful when running an
#analysis.
#"""
#raise NotImplementedError()
#print("keys = %s" % (sorted(self.params.keys())))
#if 'LOAD' in self.params:
#load_id = self.params['LOAD'][0]
#load_obj = model.loads[load_id]
#load_obj.cross_reference(model)
#if 'SUPORT' in self.params:
#pass
#if 'MPC' in self.params:
##mpc_id = self.params['MPC'][0]
##mpc_obj = model.mpcs[mpc_id]
##mpc_obj.cross_reference(model)
#pass
#if 'SPC' in self.params:
##spcID = self.params['SPC'][0]
##print "SPC ID = %r" % spcID
##spcObj = model.spcObject
##spcObj.cross_reference(spcID, model)
#pass
#if 'TSTEPNL' in self.params:
#tstepnl_id = self.params['TSTEPNL'][0]
#tstepnl_obj = model.tstepnl[tstepnl_id]
#tstepnl_obj.cross_reference(model)
#if 'NLPARM' in self.params:
#nlparm_id = self.params['NLPARM'][0]
#nlparm_obj = model.nlparms[nlparm_id]
#nlparm_obj.cross_reference(model)
#if 'TRIM' in self.params:
#trim_id = self.params['TRIM'][0]
#trim_obj = model.trims[trim_id]
#trim_obj.cross_reference(model)
#if 'GUST' in self.params:
#gust_id = self.params['GUST'][0]
#gust_obj = model.gusts[gust_id]
#gust_obj.cross_reference(model)
#if 'DLOAD' in self.params: # ???
#pass
def finish_subcase(self):
"""
Removes the subcase parameter from the subcase to avoid printing it in
a funny spot
"""
if 'SUBCASE' in self.params:
del self.params['SUBCASE']
#print "self.params %s = %s" %(self.id,self.params)
def write_subcase(self, subcase0):
"""
Internal method to print a subcase
Parameters
----------
subcase0 : Subcase()
the global Subcase object
Returns
-------
msg : str
the string of the current Subcase
"""
if self.id == 0:
msg = str(self)
else:
msg = 'SUBCASE %s\n' % self.id
nparams = 0
for (key, param) in self.subcase_sorted(self.params.items()):
if key in subcase0.params and subcase0.params[key] == param:
pass # dont write global subcase parameters
else:
#print("key=%s param=%s" %(key, param))
#(unused_value, unused_options, unused_param_type) = param
#print(" *key=%r value=%r options=%s "
#"param_type=%r" % (key, value, options, param_type))
msg += self.print_param(key, param)
nparams += 1
#print ""
if nparams == 0:
for (key, param) in self.subcase_sorted(self.params.items()):
#print("key=%s param=%s" %(key, param))
#(unused_value, unused_options, unused_param_type) = param
#print(" *key=%r value=%r options=%s "
#"param_type=%r" % (key, value, options, param_type))
msg += self.print_param(key, param)
nparams += 1
assert nparams > 0, 'No subcase parameters are defined for isubcase=%s...' % self.id
return msg
def subcase_sorted(self, lst):
"""
Does a "smart" sort on the keys such that SET cards increment in
numerical order. Also puts the sets first.
Parameters
----------
lst : List[str]
the list of subcase list objects (list_a)
Returns
-------
list_b : List[str]
the sorted version of list_a
"""
# presort the list to put all the SET cards next to each other
# instead of list_a.sort() as it allows lst to be any iterable
lst = sorted(lst)
i = 0 # needed in case the loop doesn't execute
iset = None # index of first SET card in the deck
set_dict = {}
list_before = []
set_keys = []
for (i, entry) in enumerate(lst):
key = entry[0] # type: str
if 'SET' in key[0:3]:
if key == 'SET': # handles "SET = ALL"
key = 0
else: # handles "SET 100 = 1,2,3"
sline = key.split(' ')
try:
key = int(sline[1])
except:
msg = 'error caclulating key; sline=%s' % sline
raise RuntimeError(msg)
# store the integer ID and the SET-type list
set_dict[key] = entry
set_keys.append(key)
else:
# only store the entries before the SET cards
list_before.append(entry)
if iset:
break
# grab the other entries
list_after = lst[i + 1:]
# write the SET cards in a sorted order
set_list = []
for key in sorted(set_keys):
set_list.append(set_dict[key])
# combine all the cards
list_b = set_list + list_before + list_after
return list_b
def __repr__(self):
"""
Prints out EVERY entry in the subcase. Skips parameters already in
the global subcase.
.. note:: this function is only used for debugging.
"""
#msg = "-------SUBCASE %s-------\n" %(self.id)
msg = ''
if self.id > 0:
msg += 'SUBCASE %s\n' % self.id
nparams = 0
for key, param in self.subcase_sorted(self.params.items()):
#(unused_value, unused_options, unused_param_type) = param
#print('key=%r value=%s options=%s' % (key, value, options))
msg += self.print_param(key, param)
nparams += 1
if self.id > 0:
assert nparams > 0, 'No subcase parameters are defined for isubcase=%s...' % self.id
return msg
def _load_hdf5_param(group, key, encoding):
from pyNastran.utils.dict_to_h5py import _cast
#print('-----------------------------------------')
#print(type(key), key)
sub_group = group[key]
keys = list(sub_group.keys())
#print('subgroup.keys() =', sub_group.keys())
if key == 'blank':
key = ''
if 'options' in sub_group:
keys.remove('options')
options = _cast(sub_group['options'])
if isinstance(options, (integer_types, str)):
pass
else:
options = options.tolist()
options = [
option.decode(encoding) if isinstance(option, bytes) else option
for option in options]
else:
options = None
param_type = None
if 'param_type' in sub_group:
param_type = _cast(sub_group['param_type'])
keys.remove('param_type')
#print('param_type ', param_type)
value = None
if 'value' in sub_group:
keys.remove('value')
value = _cast(sub_group['value'])
if isinstance(value, bytes):
value = value.decode(encoding)
elif isinstance(value, (integer_types, str)):
pass
else:
value = value.tolist()
elif 'object' in sub_group:
value, options = _load_hdf5_object(key, keys, sub_group, encoding)
if len(keys) > 0:
#keyi = _cast(sub_group['key'])
#print('keyi = %r' % keyi)
raise RuntimeError('keys = %s' % keys)
#print(value, options, param_type)
return value, options, param_type
def _load_hdf5_object(key, keys, sub_group, encoding):
import h5py
from pyNastran.utils.dict_to_h5py import _cast
keys.remove('object')
sub_groupi = sub_group['object']
Type = sub_groupi.attrs['type']
cls = CLASS_MAP[Type]
if hasattr(cls, 'load_hdf5'):
class_obj, options = cls.load_hdf5(sub_groupi, 'utf8')
value = class_obj
return value, options
use_data = True
if 'options' in sub_groupi:
options2 = _cast(sub_groupi['options']).tolist()
value = _cast(sub_groupi['value'])
#print('sub_keys =', sub_groupi, sub_groupi.keys())
options_str = [
option.decode(encoding) if isinstance(option, bytes) else option
for option in options2]
use_data = False
data_group = sub_groupi['data']
keys2 = _cast(data_group['keys']).tolist()
h5_values = data_group['values']
if isinstance(h5_values, h5py._hl.group.Group):
values2 = [None] * len(keys2)
for ih5 in h5_values.keys():
ih5_int = int(ih5)
h5_value = _cast(h5_values[ih5])
values2[ih5_int] = h5_value
else:
values2 = _cast(h5_values).tolist()
#print('data_keys =', data_group, data_group.keys())
unused_keys_str = [
keyi.decode(encoding) if isinstance(keyi, bytes) else keyi
for keyi in keys2]
unused_values_str = [
valuei.decode(encoding) if isinstance(valuei, bytes) else valuei
for valuei in values2]
#print('keys2 =', keys2)
#print('values2 =', values2)
#print('options2 =', options2)
if use_data:
#print('keys2 =', keys2)
#print('values2 =', values2)
data = []
for keyi, valuei in zip(keys2, values2):
data.append((keyi, valuei))
class_obj = cls(data)
assert options is None, options
else:
class_obj = cls(key, value, options_str)
options = options_str
value = class_obj
#print(class_obj)
#class_obj.load_hdf5_file(hdf5_file, encoding)
return value, options
def update_param_name(param_name):
"""
Takes an abbreviated name and expands it so the user can type DISP or
DISPLACEMENT and get the same answer
Parameters
----------
param_name : str
the parameter name to be standardized (e.g. DISP vs. DIPLACEMENT)
.. todo:: not a complete list
"""
param_name = param_name.strip().upper()
if param_name.startswith('ACCE'):
param_name = 'ACCELERATION'
elif param_name.startswith('DESO'):
param_name = 'DESOBJ'
elif param_name.startswith('DESS'):
param_name = 'DESSUB'
elif param_name.startswith('DISP'):
param_name = 'DISPLACEMENT'
elif param_name.startswith('EXPO'):
param_name = 'EXPORTLID'
elif param_name.startswith('ELFO'):
param_name = 'FORCE'
elif param_name.startswith('ELST'):
param_name = 'STRESS' # or ELSTRESS
elif param_name.startswith('FORC'):
param_name = 'FORCE'
elif param_name.startswith('FREQ'):
param_name = 'FREQUENCY'
elif param_name.startswith('GPFO'):
param_name = 'GPFORCE'
elif param_name == 'GPST':
raise SyntaxError('invalid GPST stress/strain')
elif param_name.startswith('HARMONIC'):
param_name = 'HARMONICS'
elif param_name.startswith('METH'):
param_name = 'METHOD'
elif param_name.startswith('MPCF'):
param_name = 'MPCFORCES'
elif param_name.startswith('NLPAR'):
param_name = 'NLPARM'
elif param_name.startswith('OLOA'):
param_name = 'OLOAD'
elif param_name.startswith('PRES'):
param_name = 'PRESSURE'
elif param_name.startswith('SDAMP'):
param_name = 'SDAMPING'
elif param_name.startswith('SDISP'):
param_name = 'SDISPLACEMENT'
elif param_name.startswith('SMETH'):
param_name = 'SMETHOD'
elif param_name.startswith('SPCF'):
param_name = 'SPCFORCES'
elif param_name.startswith('STRA'):
param_name = 'STRAIN'
elif param_name.startswith('STRE'):
param_name = 'STRESS'
elif param_name.startswith('SUBT'):
param_name = 'SUBTITLE'
elif param_name.startswith('SUPO'):
param_name = 'SUPORT1'
elif param_name.startswith('SVEC'):
param_name = 'SVECTOR'
elif param_name.startswith('SVELO'):
param_name = 'SVELOCITY'
elif param_name.startswith('THER'):
param_name = 'THERMAL'
elif param_name.startswith('VECT'):
#param_name = 'PRESSURE' # or VECTOR
param_name = 'DISPLACEMENT' # or VECTOR
elif param_name.startswith('VELO'):
param_name = 'VELOCITY'
elif param_name.startswith('TITL'):
param_name = 'TITLE'
elif param_name.startswith('MAXLINE'):
param_name = 'MAXLINES'
elif param_name.startswith('LINE'):
param_name = 'LINE'
elif param_name.startswith('AXISYM'):
param_name = 'AXISYMMETRIC'
elif param_name.startswith('SUBSE'):
param_name = 'SUBSEQ'
elif param_name.startswith('XTIT'):
param_name = 'XTITLE'
elif param_name.startswith('YTIT'):
param_name = 'YTITLE'
elif param_name.startswith('SACCE'):
param_name = 'SACCELERATION'
elif param_name.startswith('GPSTRE'):
param_name = 'GPSTRESS'
elif param_name.startswith('GPSTR'):
param_name = 'GPSTRAIN'
elif param_name in ['DEFO', 'DEFOR']:
param_name = 'DEFORM'
elif param_name == 'TEMPERATURE(INIT)':
param_name = 'TEMPERATURE(INITIAL)'
#elif param_name.startswith('DFRE'): param_name = 'D'
# handled in caseControlDeck.py
#elif param_name.startswith('TEMP'): param_name = 'TEMPERATURE'
#print '*param_name = ',param_name
return param_name
def get_analysis_code(sol):
"""
Maps the solution number to the OP2 analysis code.
* 8 - post-buckling (maybe 7 depending on NLPARM???)
# not important
* 3/4 - differential stiffness (obsolete)
* 11 - old geometric nonlinear statics
* 12 - contran (???)
.. todo:: verify
"""
codes = {
101 : 1, # staics
103 : 2, # modes
105 : 7, # pre-buckling
106 : 10, # nonlinear statics
107 : 9, # complex eigenvalues
108 : 5, # frequency
111 : 5,
112 : 6,
114 : 1,
115 : 2,
116 : 7,
118 : 5,
129 : 6, # nonlinear
144 : 1, # static aero
145 : 1,
146 : 1, # flutter
153 : 10,
159 : 6, # transient thermal
}
#print("sol=%s" % sol)
approach_code = codes[sol]
#print('approach_code = %s' % approach_code)
return approach_code
def get_device_code(options: Any, unused_value: Any) -> int:
"""
Gets the device code of a given set of options and value
Parameters
----------
options : list[int/float/str]
the options for a parameter
unused_value : int/float/str
the value of the parameter
Returns
-------
device_code : int
The OP2 device code
0 - No output
1 - PRINT
2 - PLOT
3 - PRINT, PLOT
4 - PUNCH
5 - PRINT, PUNCH
6 - PRINT, PLOT, PUNCH
"""
device_code = 0
if 'PRINT' in options:
device_code += 1
if 'PLOT' in options:
device_code += 2
if 'PUNCH' in options:
device_code += 4
device_code = max(device_code, 1)
#if device_code==0:
# device_code=1 # PRINT
return device_code
def get_table_code(sol, table_name, unused_options):
"""
Gets the table code of a given parameter. For example, the
DISPLACMENT(PLOT,POST)=ALL makes an OUGV1 table and stores the
displacement. This has an OP2 table code of 1, unless you're running a
modal solution, in which case it makes an OUGV1 table of eigenvectors
and has a table code of 7.
Parameters
----------
options : list[int/float/str]
the options for a parameter
value : int/float/str
the value of the parameter
Returns
-------
table_code : int
the OP2 table_code
"""
if table_name in ['VECTOR', 'PRESSURE']:
table_name = 'DISPLACEMENT' # equivalent tables...
key = (sol, table_name)
tables = {
# SOL, table_name table_code
(101, 'ACCELERATION'): 11,
(103, 'ACCELERATION'): 11,
(106, 'ACCELERATION'): 11,
(107, 'ACCELERATION'): 11,
(108, 'ACCELERATION'): 11,
(129, 'ACCELERATION'): 11,
#(144, 'ACCELERATION'): 11,
(145, 'ACCELERATION'): 11,
(146, 'ACCELERATION'): 11,
(101, 'DISPLACEMENT'): 1,
(103, 'DISPLACEMENT'): 7, # VECTOR
(105, 'DISPLACEMENT'): 7,
(106, 'DISPLACEMENT'): 1,
(107, 'DISPLACEMENT'): 7,
(108, 'DISPLACEMENT'): 1,
(109, 'DISPLACEMENT'): 1,
(111, 'DISPLACEMENT'): 7,
(112, 'DISPLACEMENT'): 1,
(129, 'DISPLACEMENT'): 7,
#(144, 'DISPLACEMENT'): 1,
(145, 'DISPLACEMENT'): 1,
(146, 'DISPLACEMENT'): 1,
(101, 'ESE'): 18, # energy
(103, 'ESE'): 18, # energy
(105, 'ESE'): 18, # energy
(106, 'ESE'): 18, # energy
(107, 'ESE'): 18, # energy
(108, 'ESE'): 18, # energy
(109, 'ESE'): 18, # energy
(110, 'ESE'): 18, # energy
(111, 'ESE'): 18, # energy
(112, 'ESE'): 18, # energy
(145, 'ESE'): 18, # energy
(146, 'ESE'): 18, # energy
(101, 'FORCE'): 3, # ???
(103, 'FORCE'): 3, # ???
(105, 'FORCE'): 3, # ???
(106, 'FORCE'): 3, # ???
(107, 'FORCE'): 4, # ???
(108, 'FORCE'): 3, # ???
(111, 'FORCE'): 3, # ???
(112, 'FORCE'): 3, # ???
(129, 'FORCE'): 3, # ???
(145, 'FORCE'): 3, # ???
(146, 'FORCE'): 3, # ???
(101, 'GPFORCE'): 19,
(105, 'GPFORCE'): 19,
(106, 'GPFORCE'): 19,
(107, 'GPFORCE'): 19,
(108, 'GPFORCE'): 19,
(111, 'GPFORCE'): 19,
(112, 'GPFORCE'): 19,
(129, 'GPFORCE'): 19,
(145, 'GPFORCE'): 19,
(146, 'GPFORCE'): 19,
(101, 'GPSTRESS'): 20,
(105, 'GPSTRESS'): 20,
(106, 'GPSTRESS'): 20,
(107, 'GPSTRESS'): 20,
(108, 'GPSTRESS'): 20,
(111, 'GPSTRESS'): 20,
(112, 'GPSTRESS'): 20,
(129, 'GPSTRESS'): 20,
(145, 'GPSTRESS'): 20,
(146, 'GPSTRESS'): 20,
(101, 'GPSTRAIN'): 21,
(105, 'GPSTRAIN'): 21,
(106, 'GPSTRAIN'): 21,
(107, 'GPSTRAIN'): 21,
(108, 'GPSTRAIN'): 21,
(111, 'GPSTRAIN'): 21,
(112, 'GPSTRAIN'): 21,
(129, 'GPSTRAIN'): 21,
(145, 'GPSTRAIN'): 21,
(146, 'GPSTRAIN'): 21,
(101, 'MPCFORCES'): 3,
(103, 'MPCFORCES'): 3,
(106, 'MPCFORCES'): 3,
(108, 'MPCFORCES'): 3,
(112, 'MPCFORCES'): 3,
(129, 'MPCFORCES'): 3,
#(144, 'MPCFORCES'): 3,
(145, 'MPCFORCES'): 3,
(146, 'MPCFORCES'): 3,
(101, 'OLOAD'): 2,
(103, 'OLOAD'): 2,
(105, 'OLOAD'): 2,
(106, 'OLOAD'): 2,
(107, 'OLOAD'): 2,
(108, 'OLOAD'): 2,
(111, 'OLOAD'): 2,
(112, 'OLOAD'): 2,
(129, 'OLOAD'): 2,
#(144, 'OLOAD'): 2,
(145, 'OLOAD'): 2,
(146, 'OLOAD'): 2,
(101, 'SPCFORCES'): 3,
(103, 'SPCFORCES'): 3,
(105, 'SPCFORCES'): 3,
(106, 'SPCFORCES'): 3,
(107, 'SPCFORCES'): 3,
(108, 'SPCFORCES'): 3,
(110, 'SPCFORCES'): 3,
(111, 'SPCFORCES'): 3,
(112, 'SPCFORCES'): 3,
(129, 'SPCFORCES'): 3,
#(144, 'SPCFORCES'): 3,
(145, 'SPCFORCES'): 3,
(146, 'SPCFORCES'): 3,
(101, 'STRAIN'): 5, # 5/20/21 ???
(105, 'STRAIN'): 5,
(106, 'STRAIN'): 5,
(107, 'STRAIN'): 5,
(108, 'STRAIN'): 5,
(110, 'STRAIN'): 5,
(111, 'STRAIN'): 5,
(112, 'STRAIN'): 5,
(129, 'STRAIN'): 5,
(145, 'STRAIN'): 5,
(146, 'STRAIN'): 5,
(101, 'STRESS'): 5, # 5/20/21 ???
(103, 'STRESS'): 5,
(105, 'STRESS'): 5,
(106, 'STRESS'): 5,
(107, 'STRESS'): 5,
(108, 'STRESS'): 5,
(111, 'STRESS'): 5,
(112, 'STRESS'): 5,
(129, 'STRESS'): 5,
(145, 'STRESS'): 5,
(146, 'STRESS'): 5,
(145, 'SVECTOR'): 14,
(101, 'FLUX'): 4,
(103, 'FLUX'): 4,
(106, 'FLUX'): 4,
(112, 'FLUX'): 4,
(108, 'FLUX'): 4,
(153, 'FLUX'): 4,
(159, 'FLUX'): 4,
(101, 'THERMAL'): 3, # 3/4 ???
(159, 'THERMAL'): 3, # 3/4 ???
(101, 'VELOCITY'): 10,
(103, 'VELOCITY'): 10,
(106, 'VELOCITY'): 10,
(107, 'VELOCITY'): 10,
(108, 'VELOCITY'): 10,
(111, 'VELOCITY'): 10,
(112, 'VELOCITY'): 10,
(129, 'VELOCITY'): 10,
#(144, 'VELOCITY'): 10,
(145, 'VELOCITY'): 10,
(146, 'VELOCITY'): 10,
(101, 'VUGRID'): 10,
}
#print("key=%s" % str(key))
if key not in tables:
raise KeyError(key)
table_code = tables[key]
return table_code
def get_sort_code(options, unused_value):
"""
Gets the sort code of a given set of options and value
Parameters
----------
options : List[int/str]
the options for a parameter
unused_value : int; str
the value of the parameter
"""
sort_code = 0
if 'COMPLEX' in options:
sort_code += 1
if 'SORT2' in options:
sort_code += 2
if 'RANDOM' in options:
sort_code += 4
return sort_code
def get_format_code(options: Any, unused_value: Any) -> int:
"""
Gets the format code that will be used by the op2 based on
the options.
Parameters
----------
options : list[int/float/str]
the options for a parameter
unused_value : int/float/str
the value of the parameter
.. todo:: not done...only supports REAL, IMAG, PHASE, not RANDOM
"""
format_code = 0
if 'REAL' in options:
format_code += 1
if 'IMAG' in options:
format_code += 2
if 'PHASE' in options:
format_code += 4
format_code = max(format_code, 1)
return format_code
def get_stress_code(key: str, options: Dict[str, Any], unused_value: Any) -> int:
"""
Method get_stress_code:
.. note:: the individual element must take the stress_code and reduce
it to what the element can return. For example, for an isotropic
CQUAD4 the fiber field doesnt mean anything.
BAR - no von mises/fiber
ISOTROPIC - no fiber
.. todo:: how does the MATERIAL bit get turned on? I'm assuming it's
element dependent...
"""
stress_code = 0
if 'VONMISES' in options:
stress_code += 1
if key == 'STRAIN':
stress_code += 10 # 2+8=10 - fields 2 and 4
if 'FIBER' in options:
stress_code += 4
#if 'MATERIAL' in options:
# stress_code += 16 material coord (1) vs element (0)
return stress_code
| 36.94614 | 100 | 0.525196 |
e4255d246e3df3bade5c59f558e13be33c14e4bb | 11,331 | py | Python | alg/knockoffgan/KnockoffGAN.py | loramf/mlforhealthlabpub | aa5a42a4814cf69c8223f27c21324ee39d43c404 | [
"BSD-3-Clause"
] | 171 | 2021-02-12T10:23:19.000Z | 2022-03-29T01:58:52.000Z | alg/knockoffgan/KnockoffGAN.py | loramf/mlforhealthlabpub | aa5a42a4814cf69c8223f27c21324ee39d43c404 | [
"BSD-3-Clause"
] | 4 | 2021-06-01T08:18:33.000Z | 2022-02-20T13:37:30.000Z | alg/knockoffgan/KnockoffGAN.py | loramf/mlforhealthlabpub | aa5a42a4814cf69c8223f27c21324ee39d43c404 | [
"BSD-3-Clause"
] | 93 | 2021-02-10T03:21:59.000Z | 2022-03-30T19:10:37.000Z | '''
KnockoffGAN Knockoff Variable Generation
Jinsung Yoon (9/27/2018)
'''
#%% Necessary Packages
import numpy as np
from tqdm import tqdm
import tensorflow as tf
import logging
import argparse
import pandas as pd
from sklearn.preprocessing import MinMaxScaler
#%% KnockoffGAN Function
'''
Inputs:
x_train: Training data
lamda: Power network parameter = 0.01
mu: WGAN parameter = 1
'''
logger = logging.getLogger()
def KnockoffGAN (x_train, x_name, lamda = 0.01, mu = 1, mb_size=128, niter=2000):
tf_debug = False
if tf_debug:
run_opts = tf.RunOptions(report_tensor_allocations_upon_oom = True)
config = tf.ConfigProto()
config.log_device_placement=True
config.gpu_options.allow_growth = True
else:
run_opts = None
config = None
#%% Parameters
# 1. # of samples
n = len(x_train[:,0])
# 2. # of features
x_dim = len(x_train[0,:])
# 3. # of random dimensions
z_dim = int(x_dim)
# 4. # of hidden dimensions
h_dim = int(x_dim)
# 5. # of minibatch
# mb_size = 128
# 6. WGAN parameters
lam = 10
lr = 1e-4
#%% Necessary Functions
# 1. Xavier Initialization Definition
def xavier_init(size):
in_dim = size[0]
xavier_stddev = 1. / tf.sqrt(in_dim / 2.)
return tf.random_normal(shape = size, stddev = xavier_stddev)
# 2. Sample from normal distribution: Random variable generation
def sample_Z(m, n, x_name):
if ((x_name == 'Normal') | (x_name == 'AR_Normal')):
return np.random.normal(0., np.sqrt(1./3000), size = [m, n]).copy()
elif ((x_name == 'Uniform') | (x_name == 'AR_Uniform')):
return np.random.uniform(-3*np.sqrt(1./3000),3*np.sqrt(1./3000),[m,n]).copy()
# 3. Sample from the real data (Mini-batch index sampling)
def sample_X(m, n):
return np.random.permutation(m)[:n].copy()
# 4. Permutation for MINE computation
def Permute (x):
n = len(x[:,0])
idx = np.random.permutation(n)
out = x[idx,:].copy()
return out
# 5. Bernoulli sampling for Swap and Hint variables
def sample_SH(m, n, p):
return np.random.binomial(1, p, [m,n]).copy()
#%% Placeholder inputs
# 1. Feature
X = tf.placeholder(tf.float32, shape = [None, x_dim])
# 2. Feature (Permute)
X_hat = tf.placeholder(tf.float32, shape = [None, x_dim])
# 3. Random Variable
Z = tf.placeholder(tf.float32, shape = [None, z_dim])
# 4. Swap
S = tf.placeholder(tf.float32, shape = [None, x_dim])
# 5. Hint
H = tf.placeholder(tf.float32, shape = [None, x_dim])
#%% Network Building
#%% 1. Discriminator
# Input: Swap (X, tilde X) and Hint
D_W1 = tf.Variable(xavier_init([x_dim + x_dim + x_dim, h_dim]))
D_b1 = tf.Variable(tf.zeros(shape=[h_dim]))
D_W2 = tf.Variable(xavier_init([h_dim,x_dim]))
D_b2 = tf.Variable(tf.zeros(shape=[x_dim]))
theta_D = [D_W1, D_W2, D_b1, D_b2]
#%% 2. WGAN Discriminator
# Input: tilde X
WD_W1 = tf.Variable(xavier_init([x_dim, h_dim]))
WD_b1 = tf.Variable(tf.zeros(shape=[h_dim]))
WD_W2 = tf.Variable(xavier_init([h_dim,1]))
WD_b2 = tf.Variable(tf.zeros(shape=[1]))
theta_WD = [WD_W1, WD_W2, WD_b1, WD_b2]
#%% 3. Generator
# Input: X and Z
G_W1 = tf.Variable(xavier_init([x_dim + z_dim, h_dim]))
G_b1 = tf.Variable(tf.zeros(shape=[h_dim]))
G_W2 = tf.Variable(xavier_init([h_dim,x_dim]))
G_b2 = tf.Variable(tf.zeros(shape=[x_dim]))
theta_G = [G_W1, G_W2, G_b1, G_b2]
#%% 4. MINE
# Input: X and tilde X
# For X
M_W1A = tf.Variable(xavier_init([x_dim]))
M_W1B = tf.Variable(xavier_init([x_dim]))
M_b1 = tf.Variable(tf.zeros(shape=[x_dim]))
# For tilde X
M_W2A = tf.Variable(xavier_init([x_dim]))
M_W2B = tf.Variable(xavier_init([x_dim]))
M_b2 = tf.Variable(tf.zeros(shape=[x_dim]))
# Combine
M_W3 = tf.Variable(xavier_init([x_dim]))
M_b3 = tf.Variable(tf.zeros(shape=[x_dim]))
theta_M = [M_W1A, M_W1B, M_W2A, M_W2B, M_W3, M_b1, M_b2, M_b3]
#%% Functions
# 1. Generator
def generator(x, z):
inputs = tf.concat(axis=1, values = [x, z])
G_h1 = tf.nn.tanh(tf.matmul(inputs, G_W1) + G_b1)
G_out = (tf.matmul(G_h1, G_W2) + G_b2)
return G_out
# 2. Discriminator
def discriminator(sA, sB, h):
inputs = tf.concat(axis=1, values = [sA, sB, h])
D_h1 = tf.nn.tanh(tf.matmul(inputs, D_W1) + D_b1)
D_out = tf.nn.sigmoid(tf.matmul(D_h1, D_W2) + D_b2)
return D_out
# 3. WGAN Discriminator
def WGAN_discriminator(x):
WD_h1 = tf.nn.relu(tf.matmul(x, WD_W1) + WD_b1)
WD_out = (tf.matmul(WD_h1, WD_W2) + WD_b2)
return WD_out
# 4. MINE
def MINE(x, x_hat):
M_h1 = tf.nn.tanh(M_W1A * x + M_W1B * x_hat + M_b1)
M_h2 = tf.nn.tanh(M_W2A * x + M_W2B * x_hat + M_b2)
M_out = (M_W3 * (M_h1 + M_h2) + M_b3)
Exp_M_out = tf.exp(M_out)
return M_out, Exp_M_out
#%% Combination across the networks
# 1. Generater Knockoffs
G_sample = generator(X,Z)
# 2. WGAN Outputs for real and fake
WD_real = WGAN_discriminator(X)
WD_fake = WGAN_discriminator(G_sample)
# 3. Generate swapping (X, tilde X)
SwapA = S * X + (1-S) * G_sample
SwapB = (1-S) * X + S * G_sample
# 4. Discriminator output
# (X, tilde X) is SwapA, SwapB. Hint is generated by H * S
D_out = discriminator(SwapA, SwapB, H*S)
# 5. MINE Computation
# Without permutation
M_out, _ = MINE(X, G_sample)
# Wit permutation
_, Exp_M_out = MINE(X_hat, G_sample)
# 6. WGAN Loss Replacement of Clipping algorithm to Penalty term
# 1. Line 6 in Algorithm 1
eps = tf.random_uniform([mb_size, 1], minval = 0., maxval = 1.)
X_inter = eps*X + (1. - eps) * G_sample
# 2. Line 7 in Algorithm 1
grad = tf.gradients(WGAN_discriminator(X_inter), [X_inter])[0]
grad_norm = tf.sqrt(tf.reduce_sum((grad)**2 + 1e-8, axis = 1))
grad_pen = lam * tf.reduce_mean((grad_norm - 1)**2)
#%% Loss function
# 1. WGAN Loss
WD_loss = tf.reduce_mean(WD_fake) - tf.reduce_mean(WD_real) + grad_pen
# 2. Discriminator loss
D_loss = -tf.reduce_mean(S * (1-H) * tf.log(D_out + 1e-8) + (1-S) * (1-H) * tf.log(1 - D_out + 1e-8))
# 3. MINE Loss
M_loss = tf.reduce_sum( tf.reduce_mean(M_out, axis = 0) - tf.log(tf.reduce_mean(Exp_M_out, axis = 0)) )
# 4. Generator loss
G_loss = - D_loss + mu * -tf.reduce_mean(WD_fake) + lamda * M_loss
# Solver
WD_solver = (tf.train.AdamOptimizer(learning_rate = lr, beta1 = 0.5).minimize(WD_loss, var_list = theta_WD))
D_solver = (tf.train.AdamOptimizer(learning_rate = lr, beta1 = 0.5).minimize(D_loss, var_list = theta_D))
G_solver = (tf.train.AdamOptimizer(learning_rate = lr, beta1 = 0.5).minimize(G_loss, var_list = theta_G))
M_solver = (tf.train.AdamOptimizer(learning_rate = lr, beta1 = 0.5).minimize(-M_loss, var_list = theta_M))
#%% Sessions
if tf_debug:
sess = tf.Session(config=config)
sess.run(tf.global_variables_initializer(), options=run_opts)
else:
sess = tf.Session()
sess.run(tf.global_variables_initializer())
#%% Iterations
for it in tqdm(range(niter)):
for dummy_range in range(5):
#%% WGAN, Discriminator and MINE Training
# Random variable generation
Z_mb = sample_Z(mb_size, z_dim, x_name)
# Minibatch sampling
X_idx = sample_X(n,mb_size)
X_mb = x_train[X_idx,:].copy()
X_perm_mb = Permute(X_mb)
# Swap generation
S_mb = sample_SH(mb_size, x_dim, 0.5)
# Hint generation
H_mb = sample_SH(mb_size, x_dim, 0.9)
# 1. WGAN Training
_, WD_loss_curr = sess.run([WD_solver, WD_loss], feed_dict = {X: X_mb, Z: Z_mb, X_hat: X_perm_mb, S: S_mb, H: H_mb}, options=run_opts)
# 2. Discriminator Training
# print('discriminator training')
_, D_loss_curr = sess.run([D_solver, D_loss], feed_dict = {X: X_mb, Z: Z_mb, X_hat: X_perm_mb, S: S_mb, H: H_mb}, options=run_opts)
# 3. MINE Training
# print('mine training')
_, M_loss_curr = sess.run([M_solver, M_loss], feed_dict = {X: X_mb, Z: Z_mb, X_hat: X_perm_mb, S: S_mb, H: H_mb}, options=run_opts)
#%% Generator Training
# Random variable generation
Z_mb = sample_Z(mb_size, z_dim, x_name)
# Minibatch sampling
X_idx = sample_X(n,mb_size)
X_mb = x_train[X_idx,:].copy()
X_perm_mb = Permute(X_mb)
# Swap generation
S_mb = sample_SH(mb_size, x_dim, 0.5)
# Hint generation
H_mb = sample_SH(mb_size, x_dim, 0.0)
# Generator training
# print('gen training')
_, G_loss_curr, G_sample_curr = sess.run([G_solver, G_loss, G_sample], feed_dict = {X: X_mb, Z: Z_mb, X_hat: X_perm_mb, S: S_mb, H: H_mb}, options=run_opts)
#%% Output
#print('last session run')
X_knockoff = sess.run([G_sample], feed_dict = {X: x_train, Z: sample_Z(n, z_dim, x_name)}, options=run_opts)[0]
# X_knockoff = sess.run([G_sample], feed_dict = {X: x_train, Z: sample_Z(n, z_dim, x_name)})[0]
#print('closing session')
sess.close()
tf.reset_default_graph()
return X_knockoff
def init_arg():
parser = argparse.ArgumentParser()
parser.add_argument(
'-i')
parser.add_argument(
'-o')
parser.add_argument(
'--bs', default=128, type=int)
parser.add_argument(
'--it', default=2000, type=int)
parser.add_argument(
'--target')
parser.add_argument(
'--xname', default='Normal', help='Sample distribution [Normal, Uniform]')
parser.add_argument(
'--scale', default=1, type=int)
return parser.parse_args()
if __name__ == "__main__":
args = init_arg()
df = pd.read_csv(args.i)
niter = args.it
use_scale = args.scale
x_name = args.xname
lbl = args.target
features = list(df.columns)
features.remove(lbl)
# scale/normalize dataset
range_scaler = (0, 1)
scaler = MinMaxScaler(feature_range=range_scaler)
x = df[features]
if use_scale:
scaler.fit(x)
x = scaler.transform(x)
else:
x = x.values
x_k = KnockoffGAN(
x,
x_name,
mb_size=args.bs,
niter=niter)
df_k = pd.DataFrame(x_k, columns=features)
df_k[lbl] = df[lbl]
df_k.to_csv(args.o, index=False)
| 31.650838 | 164 | 0.572235 |
ef7439605e68e2f5c25018566b3aa97b93fc13dc | 667 | py | Python | test/conftest.py | caedonhsieh/ps-munna | bb0194ef3c3efb5ecce5f16913ffb8d882953bb8 | [
"MIT"
] | null | null | null | test/conftest.py | caedonhsieh/ps-munna | bb0194ef3c3efb5ecce5f16913ffb8d882953bb8 | [
"MIT"
] | null | null | null | test/conftest.py | caedonhsieh/ps-munna | bb0194ef3c3efb5ecce5f16913ffb8d882953bb8 | [
"MIT"
] | null | null | null | from pathlib import Path
import pytest
import munna
TEST_ASSETS_DIR = Path(__file__).parent / 'assets'
###############################################################################
# Pytest fixtures
###############################################################################
@pytest.fixture(scope='session')
def dataset():
"""Preload the dataset"""
return munna.Dataset('DATASET', 'valid')
@pytest.fixture(scope='session')
def datamodule():
"""Preload the datamodule"""
return munna.DataModule('DATASET', batch_size=4, num_workers=0)
@pytest.fixture(scope='session')
def model():
"""Preload the model"""
return munna.Model()
| 20.84375 | 79 | 0.527736 |
a17b27a3f74bc47c3f3e83ff56ba2804353640b9 | 1,768 | py | Python | launch/razor-pub.launch.py | davidcutting/ros2_razor_imu | 8cac06e2dc38766bef5827faa4386016497054b9 | [
"BSD-3-Clause"
] | null | null | null | launch/razor-pub.launch.py | davidcutting/ros2_razor_imu | 8cac06e2dc38766bef5827faa4386016497054b9 | [
"BSD-3-Clause"
] | null | null | null | launch/razor-pub.launch.py | davidcutting/ros2_razor_imu | 8cac06e2dc38766bef5827faa4386016497054b9 | [
"BSD-3-Clause"
] | null | null | null | # MIT License
#
# Copyright (c) 2022 David Cutting
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import os
import sys
from ament_index_python.packages import get_package_share_directory
from launch import LaunchDescription
from launch.actions import DeclareLaunchArgument
from launch.substitutions import LaunchConfiguration
from launch_ros.actions import Node
def generate_launch_description():
config_path = os.path.join(get_package_share_directory("ros2_razor_imu"), "config", "razor.yaml")
razor_imu = Node(
package='ros2_razor_imu',
executable='imu_node',
name='ros2_razor_imu',
output='screen',
parameters=[config_path])
return LaunchDescription([
# Nodes
razor_imu,
])
| 35.36 | 101 | 0.760747 |
89f415407fb00e95453497e5eb6059830a72fa62 | 270 | py | Python | services/dsrp-api/app/commands.py | bcgov/dormant-site-reclamation-program | 4710434174a204a292a3128d92c8daf1de2a65a6 | [
"Apache-2.0"
] | null | null | null | services/dsrp-api/app/commands.py | bcgov/dormant-site-reclamation-program | 4710434174a204a292a3128d92c8daf1de2a65a6 | [
"Apache-2.0"
] | 9 | 2020-05-06T23:29:43.000Z | 2022-03-14T22:58:17.000Z | services/dsrp-api/app/commands.py | bcgov/dormant-site-reclamation-program | 4710434174a204a292a3128d92c8daf1de2a65a6 | [
"Apache-2.0"
] | 3 | 2020-05-08T16:54:22.000Z | 2021-01-27T17:28:49.000Z | import click
import psycopg2
from sqlalchemy.exc import DBAPIError
from multiprocessing.dummy import Pool as ThreadPool
from flask import current_app
from app.api.utils.include.user_info import User
from app.extensions import db
def register_commands(app):
return | 22.5 | 52 | 0.833333 |
fdac52c9760ad21d04251e24b15f9158eebe39ff | 8,648 | py | Python | partition/partition.py | davijo/superpoint_graph | 0d60fb364bfa37fb70570784899ce46c0296ee22 | [
"MIT"
] | null | null | null | partition/partition.py | davijo/superpoint_graph | 0d60fb364bfa37fb70570784899ce46c0296ee22 | [
"MIT"
] | null | null | null | partition/partition.py | davijo/superpoint_graph | 0d60fb364bfa37fb70570784899ce46c0296ee22 | [
"MIT"
] | null | null | null | """
Large-scale Point Cloud Semantic Segmentation with Superpoint Graphs
http://arxiv.org/abs/1711.09869
2017 Loic Landrieu, Martin Simonovsky
Script for partioning into simples shapes
"""
import os.path
import sys
import numpy as np
import argparse
from timeit import default_timer as timer
sys.path.append("./cut-pursuit/src")
sys.path.append("./ply_c")
sys.path.append("./partition/cut-pursuit/src")
sys.path.append("./partition/ply_c")
import libcp
import libply_c
from graphs import *
from provider import *
parser = argparse.ArgumentParser(description='Large-scale Point Cloud Semantic Segmentation with Superpoint Graphs')
parser.add_argument('--ROOT_PATH', default='datasets/s3dis')
parser.add_argument('--dataset', default='s3dis', help='s3dis/sema3d/your_dataset')
parser.add_argument('--k_nn_geof', default=45, type=int, help='number of neighbors for the geometric features')
parser.add_argument('--k_nn_adj', default=10, type=int, help='adjacency structure for the minimal partition')
parser.add_argument('--lambda_edge_weight', default=1., type=float, help='parameter determine the edge weight for minimal part.')
parser.add_argument('--reg_strength', default=0.1, type=float, help='regularization strength for the minimal partition')
parser.add_argument('--d_se_max', default=0, type=float, help='max length of super edges')
parser.add_argument('--voxel_width', default=0.03, type=float, help='voxel size when subsampling (in m)')
parser.add_argument('--ver_batch', default=0, type=int, help='Batch size for reading large files, 0 do disable batch loading')
args = parser.parse_args()
#path to data
root = args.ROOT_PATH+'/'
#list of subfolders to be processed
if args.dataset == 's3dis':
folders = ["Area_1/", "Area_2/", "Area_3/", "Area_4/", "Area_5/", "Area_6/"]
n_labels = 13
elif args.dataset == 'sema3d':
folders = ["test_reduced/", "test_full/", "train/"]
n_labels = 8
elif args.dataset == 'custom_dataset':
folders = ["train/", "test/"]
n_labels = 10 #number of classes
else:
raise ValueError('%s is an unknown data set' % dataset)
times = [0,0,0] #time for computing: features / partition / spg
if not os.path.isdir(root + "clouds"):
os.mkdir(root + "clouds")
if not os.path.isdir(root + "features"):
os.mkdir(root + "features")
if not os.path.isdir(root + "superpoint_graphs"):
os.mkdir(root + "superpoint_graphs")
for folder in folders:
print("=================\n "+folder+"\n=================")
data_folder = root + "data/" + folder
cloud_folder = root + "clouds/" + folder
fea_folder = root + "features/" + folder
spg_folder = root + "superpoint_graphs/" + folder
if not os.path.isdir(data_folder):
raise ValueError("%s does not exist" % data_folder)
if not os.path.isdir(cloud_folder):
os.mkdir(cloud_folder)
if not os.path.isdir(fea_folder):
os.mkdir(fea_folder)
if not os.path.isdir(spg_folder):
os.mkdir(spg_folder)
if args.dataset=='s3dis':
files = [os.path.join(data_folder, o) for o in os.listdir(data_folder)
if os.path.isdir(os.path.join(data_folder,o))]
elif args.dataset=='sema3d':
files = glob.glob(data_folder+"*.txt")
elif args.dataset=='custom_dataset':
#list all files in the folder
files = glob.glob(data_folder+"*.ply")
if (len(files) == 0):
raise ValueError('%s is empty' % data_folder)
n_files = len(files)
i_file = 0
for file in files:
file_name = os.path.splitext(os.path.basename(file))[0]
if args.dataset=='s3dis':
data_file = data_folder + file_name + '/' + file_name + ".txt"
cloud_file = cloud_folder + file_name
fea_file = fea_folder + file_name + '.h5'
spg_file = spg_folder + file_name + '.h5'
elif args.dataset=='sema3d':
file_name_short = '_'.join(file_name.split('_')[:2])
data_file = data_folder + file_name + ".txt"
label_file = data_folder + file_name_short + ".labels"
cloud_file = cloud_folder+ file_name_short
fea_file = fea_folder + file_name_short + '.h5'
spg_file = spg_folder + file_name_short + '.h5'
elif args.dataset=='custom_dataset':
#adapt to your hierarchy. The following 4 files must be defined
data_file = data_folder + file_name + ".ply"
cloud_file = cloud_folder + file_name
fea_file = fea_folder + file_name + '.h5'
spg_file = spg_folder + file_name + '.h5'
i_file = i_file + 1
print(str(i_file) + " / " + str(n_files) + "---> "+file_name)
#--- build the geometric feature file h5 file ---
if os.path.isfile(fea_file):
print(" reading the existing feature file...")
geof, xyz, rgb, graph_nn, labels = read_features(fea_file)
else :
print(" creating the feature file...")
#--- read the data files and compute the labels---
if args.dataset=='s3dis':
xyz, rgb, labels = read_s3dis_format(data_file)
if args.voxel_width > 0:
xyz, rgb, labels = libply_c.prune(xyz, args.voxel_width, rgb, labels, n_labels)
elif args.dataset=='sema3d':
label_file = data_folder + file_name + ".labels"
has_labels = (os.path.isfile(label_file))
if (has_labels):
xyz, rgb, labels = read_semantic3d_format(data_file, n_labels, label_file, args.voxel_width, args.ver_batch)
else:
xyz, rgb = read_semantic3d_format(data_file, 0, '', args.voxel_width, args.ver_batch)
labels = []
elif args.dataset=='custom_dataset':
#implement in provider.py your own read_custom_format outputing xyz, rgb, labels
#here is an example for ply files
xyz, rgb, labels = read_ply(data_file);
#if no labels available simpley set labels = []
start = timer()
#---compute 10 nn graph-------
graph_nn, target_fea = compute_graph_nn_2(xyz, args.k_nn_adj, args.k_nn_geof)
#---compute geometric features-------
geof = libply_c.compute_geof(xyz, target_fea, args.k_nn_geof).astype('float32')
end = timer()
times[0] = times[0] + end - start
del target_fea
write_features(fea_file, geof, xyz, rgb, graph_nn, labels)
#--compute the partition------
sys.stdout.flush()
if os.path.isfile(spg_file):
print(" reading the existing superpoint graph file...")
graph_sp, components, in_component = read_spg(spg_file)
else:
print(" computing the superpoint graph...")
#--- build the spg h5 file --
start = timer()
if args.dataset=='s3dis':
features = np.hstack((geof, rgb/255.)).astype('float32')#add rgb as a feature for partitioning
features[:,3] = 2. * features[:,3] #increase importance of verticality (heuristic)
elif args.dataset=='sema3d':
features = geof
geof[:,3] = 2. * geof[:, 3]
elif args.dataset=='custom_dataset':
#choose here which features to use for the partition
features = geof
geof[:,3] = 2. * geof[:, 3]
graph_nn["edge_weight"] = np.array(1. / ( args.lambda_edge_weight + graph_nn["distances"] / np.mean(graph_nn["distances"])), dtype = 'float32')
print(" minimal partition...")
components, in_component = libcp.cutpursuit(features, graph_nn["source"], graph_nn["target"]
, graph_nn["edge_weight"], args.reg_strength)
components = np.array(components, dtype = 'object')
end = timer()
times[1] = times[1] + end - start
print(" computation of the SPG...")
start = timer()
graph_sp = compute_sp_graph(xyz, args.d_se_max, in_component, components, labels, n_labels)
end = timer()
times[2] = times[2] + end - start
write_spg(spg_file, graph_sp, components, in_component)
print("Timer : %5.1f / %5.1f / %5.1f " % (times[0], times[1], times[2]))
| 47.516484 | 155 | 0.595398 |
48b524320e2ef1725410b778fe797be924543b0a | 2,894 | py | Python | tutorials/slac/arguments.py | namjiwon1023/Code_With_RL | 37beec975b1685e9f6cf991abed491b854b78173 | [
"MIT"
] | 3 | 2021-08-12T15:11:28.000Z | 2021-09-27T16:04:16.000Z | tutorials/slac/arguments.py | namjiwon1023/Code_With_RL | 37beec975b1685e9f6cf991abed491b854b78173 | [
"MIT"
] | null | null | null | tutorials/slac/arguments.py | namjiwon1023/Code_With_RL | 37beec975b1685e9f6cf991abed491b854b78173 | [
"MIT"
] | 1 | 2021-08-05T07:20:57.000Z | 2021-08-05T07:20:57.000Z | # Copyright (c) 2021: Zhiyuan Nan (namjw@hanyang.ac.kr).
#
# This work is licensed under the terms of the MIT license.
# For a copy, see <https://opensource.org/licenses/MIT>.
import argparse
import torch as T
device = T.device('cuda:0' if T.cuda.is_available() else 'cpu')
def get_args():
parser = argparse.ArgumentParser("Stochastic Latent Actor-Critic")
parser.add_argument("--device", default=device, help="GPU or CPU")
parser.add_argument("--seed", type=int, default=0, help="random seed")
parser.add_argument("--env_name", type=str, default="cheetah", help="env name")
parser.add_argument("--task_name", type=str, default="run")
parser.add_argument("--time-steps", type=int, default=2000000, help="number of time steps")
parser.add_argument("--actor-lr", type=float, default=3e-4, help="learning rate of actor")
parser.add_argument("--critic-lr", type=float, default=3e-4, help="learning rate of critic")
parser.add_argument("--alpha-lr", type=float, default=3e-4, help="learning rate of alpha")
parser.add_argument("--latent-lr", type=float, default=1e-4, help="learning rate of latent model")
parser.add_argument("--gamma", type=float, default=0.99, help="discount factor")
parser.add_argument("--tau", type=float, default=5e-3, help="soft update rate")
parser.add_argument("--z1_dim", type=int, default=32, help="latent z1 dim")
parser.add_argument("--z2_dim", type=int, default=256, help="latent z1 dim")
parser.add_argument("--feature_dim", type=int, default=256, help="encoder feature dim")
parser.add_argument("--num_sequences", type=int, default=8, help="number of sequences")
parser.add_argument("--hidden_units", default=(256, 256), help="number of hidden units")
parser.add_argument("--buffer_size", type=int, default=100000, help="number of transitions can be stored in buffer")
parser.add_argument("--batch_size_sac", type=int, default=256, help="number of episodes to optimize at the same time")
parser.add_argument("--batch_size_latent", type=int, default=32, help="number of episodes to optimize at the same time")
parser.add_argument("--initial_collection_steps", type=int, default=10000, help="initial collection steps")
parser.add_argument("--initial_learning_steps", type=int, default=100000, help="initial learning steps")
parser.add_argument("--action_repeat", type=int, default=4, help="action repeat")
parser.add_argument("--evaluate-episodes", type=int, default=5, help="number of episodes for evaluating")
parser.add_argument("--evaluate-rate", type=int, default=10000, help="how often to evaluate model")
parser.add_argument("--evaluate", type=bool, default=False, help="Test?")
parser.add_argument("--save-dir", type=str, default="./model", help="directory in which training state and model should be saved")
return parser.parse_args() | 59.061224 | 134 | 0.719765 |
aa0089979f3f2cde30d2c49864fc4a4d11839fcb | 5,706 | py | Python | data/p3BR/R2/benchmark/startQiskit_noisy127.py | UCLA-SEAL/QDiff | d968cbc47fe926b7f88b4adf10490f1edd6f8819 | [
"BSD-3-Clause"
] | null | null | null | data/p3BR/R2/benchmark/startQiskit_noisy127.py | UCLA-SEAL/QDiff | d968cbc47fe926b7f88b4adf10490f1edd6f8819 | [
"BSD-3-Clause"
] | null | null | null | data/p3BR/R2/benchmark/startQiskit_noisy127.py | UCLA-SEAL/QDiff | d968cbc47fe926b7f88b4adf10490f1edd6f8819 | [
"BSD-3-Clause"
] | null | null | null | # qubit number=3
# total number=21
import numpy as np
from qiskit import QuantumCircuit, execute, Aer, QuantumRegister, ClassicalRegister, transpile, BasicAer, IBMQ
from qiskit.visualization import plot_histogram
from typing import *
from pprint import pprint
from math import log2
from collections import Counter
from qiskit.test.mock import FakeVigo, FakeYorktown
kernel = 'circuit/bernstein'
def bitwise_xor(s: str, t: str) -> str:
length = len(s)
res = []
for i in range(length):
res.append(str(int(s[i]) ^ int(t[i])))
return ''.join(res[::-1])
def bitwise_dot(s: str, t: str) -> str:
length = len(s)
res = 0
for i in range(length):
res += int(s[i]) * int(t[i])
return str(res % 2)
def build_oracle(n: int, f: Callable[[str], str]) -> QuantumCircuit:
# implement the oracle O_f
# NOTE: use multi_control_toffoli_gate ('noancilla' mode)
# https://qiskit.org/documentation/_modules/qiskit/aqua/circuits/gates/multi_control_toffoli_gate.html
# https://quantumcomputing.stackexchange.com/questions/3943/how-do-you-implement-the-toffoli-gate-using-only-single-qubit-and-cnot-gates
# https://quantumcomputing.stackexchange.com/questions/2177/how-can-i-implement-an-n-bit-toffoli-gate
controls = QuantumRegister(n, "ofc")
target = QuantumRegister(1, "oft")
oracle = QuantumCircuit(controls, target, name="Of")
for i in range(2 ** n):
rep = np.binary_repr(i, n)
if f(rep) == "1":
for j in range(n):
if rep[j] == "0":
oracle.x(controls[j])
oracle.mct(controls, target[0], None, mode='noancilla')
for j in range(n):
if rep[j] == "0":
oracle.x(controls[j])
# oracle.barrier()
# oracle.draw('mpl', filename=(kernel + '-oracle.png'))
return oracle
def build_circuit(n: int, f: Callable[[str], str]) -> QuantumCircuit:
# implement the Bernstein-Vazirani circuit
zero = np.binary_repr(0, n)
b = f(zero)
# initial n + 1 bits
input_qubit = QuantumRegister(n+1, "qc")
classicals = ClassicalRegister(n, "qm")
prog = QuantumCircuit(input_qubit, classicals)
# inverse last one (can be omitted if using O_f^\pm)
prog.x(input_qubit[n])
# circuit begin
prog.h(input_qubit[1]) # number=1
prog.cx(input_qubit[0],input_qubit[2]) # number=11
prog.x(input_qubit[2]) # number=12
prog.h(input_qubit[2]) # number=18
prog.cz(input_qubit[0],input_qubit[2]) # number=19
prog.h(input_qubit[2]) # number=20
prog.h(input_qubit[1]) # number=7
prog.cz(input_qubit[2],input_qubit[1]) # number=8
prog.h(input_qubit[1]) # number=9
prog.cx(input_qubit[2],input_qubit[1]) # number=4
prog.y(input_qubit[1]) # number=14
prog.cx(input_qubit[2],input_qubit[1]) # number=10
prog.z(input_qubit[2]) # number=3
prog.x(input_qubit[1]) # number=17
prog.y(input_qubit[2]) # number=5
# apply H to get superposition
for i in range(n):
prog.h(input_qubit[i])
prog.h(input_qubit[n])
prog.barrier()
# apply oracle O_f
oracle = build_oracle(n, f)
prog.append(
oracle.to_gate(),
[input_qubit[i] for i in range(n)] + [input_qubit[n]])
# apply H back (QFT on Z_2^n)
for i in range(n):
prog.h(input_qubit[i])
prog.barrier()
# measure
return prog
def get_statevector(prog: QuantumCircuit) -> Any:
state_backend = Aer.get_backend('statevector_simulator')
statevec = execute(prog, state_backend).result()
quantum_state = statevec.get_statevector()
qubits = round(log2(len(quantum_state)))
quantum_state = {
"|" + np.binary_repr(i, qubits) + ">": quantum_state[i]
for i in range(2 ** qubits)
}
return quantum_state
def evaluate(backend_str: str, prog: QuantumCircuit, shots: int, b: str) -> Any:
# Q: which backend should we use?
# get state vector
quantum_state = get_statevector(prog)
# get simulate results
# provider = IBMQ.load_account()
# backend = provider.get_backend(backend_str)
# qobj = compile(prog, backend, shots)
# job = backend.run(qobj)
# job.result()
backend = Aer.get_backend(backend_str)
# transpile/schedule -> assemble -> backend.run
results = execute(prog, backend, shots=shots).result()
counts = results.get_counts()
a = Counter(counts).most_common(1)[0][0][::-1]
return {
"measurements": counts,
# "state": statevec,
"quantum_state": quantum_state,
"a": a,
"b": b
}
def bernstein_test_1(rep: str):
"""011 . x + 1"""
a = "011"
b = "1"
return bitwise_xor(bitwise_dot(a, rep), b)
def bernstein_test_2(rep: str):
"""000 . x + 0"""
a = "000"
b = "0"
return bitwise_xor(bitwise_dot(a, rep), b)
def bernstein_test_3(rep: str):
"""111 . x + 1"""
a = "111"
b = "1"
return bitwise_xor(bitwise_dot(a, rep), b)
if __name__ == "__main__":
n = 2
a = "11"
b = "1"
f = lambda rep: \
bitwise_xor(bitwise_dot(a, rep), b)
prog = build_circuit(n, f)
sample_shot =4000
writefile = open("../data/startQiskit_noisy127.csv", "w")
# prog.draw('mpl', filename=(kernel + '.png'))
backend = FakeYorktown()
circuit1 = transpile(prog, FakeYorktown())
circuit1.h(qubit=2)
circuit1.x(qubit=3)
circuit1.measure_all()
info = execute(circuit1,backend=backend, shots=sample_shot).result().get_counts()
print(info, file=writefile)
print("results end", file=writefile)
print(circuit1.depth(), file=writefile)
print(circuit1, file=writefile)
writefile.close()
| 29.564767 | 140 | 0.629863 |
2573314f155cf68ff486218538dc621f5f9b0063 | 9,570 | py | Python | pandas/tests/io/parser/test_encoding.py | loicdiridollou/pandas-loic | 2f203d11ef1dba251b6cd3df89d073bf5b970e68 | [
"PSF-2.0",
"Apache-2.0",
"BSD-3-Clause-No-Nuclear-License-2014",
"MIT",
"MIT-0",
"ECL-2.0",
"BSD-3-Clause"
] | 28,899 | 2016-10-13T03:32:12.000Z | 2022-03-31T21:39:05.000Z | pandas/tests/io/parser/test_encoding.py | soumyas567/pandas | ccb36cc8f1eeed53dea321ee7381602a6957de54 | [
"BSD-3-Clause"
] | 31,004 | 2016-10-12T23:22:27.000Z | 2022-03-31T23:17:38.000Z | pandas/tests/io/parser/test_encoding.py | soumyas567/pandas | ccb36cc8f1eeed53dea321ee7381602a6957de54 | [
"BSD-3-Clause"
] | 15,149 | 2016-10-13T03:21:31.000Z | 2022-03-31T18:46:47.000Z | """
Tests encoding functionality during parsing
for all of the parsers defined in parsers.py
"""
from io import BytesIO
import os
import tempfile
import numpy as np
import pytest
from pandas import (
DataFrame,
read_csv,
)
import pandas._testing as tm
skip_pyarrow = pytest.mark.usefixtures("pyarrow_skip")
@skip_pyarrow
def test_bytes_io_input(all_parsers):
encoding = "cp1255"
parser = all_parsers
data = BytesIO("שלום:1234\n562:123".encode(encoding))
result = parser.read_csv(data, sep=":", encoding=encoding)
expected = DataFrame([[562, 123]], columns=["שלום", "1234"])
tm.assert_frame_equal(result, expected)
@skip_pyarrow
def test_read_csv_unicode(all_parsers):
parser = all_parsers
data = BytesIO("\u0141aski, Jan;1".encode())
result = parser.read_csv(data, sep=";", encoding="utf-8", header=None)
expected = DataFrame([["\u0141aski, Jan", 1]])
tm.assert_frame_equal(result, expected)
@skip_pyarrow
@pytest.mark.parametrize("sep", [",", "\t"])
@pytest.mark.parametrize("encoding", ["utf-16", "utf-16le", "utf-16be"])
def test_utf16_bom_skiprows(all_parsers, sep, encoding):
# see gh-2298
parser = all_parsers
data = """skip this
skip this too
A,B,C
1,2,3
4,5,6""".replace(
",", sep
)
path = f"__{tm.rands(10)}__.csv"
kwargs = {"sep": sep, "skiprows": 2}
utf8 = "utf-8"
with tm.ensure_clean(path) as path:
from io import TextIOWrapper
bytes_data = data.encode(encoding)
with open(path, "wb") as f:
f.write(bytes_data)
bytes_buffer = BytesIO(data.encode(utf8))
bytes_buffer = TextIOWrapper(bytes_buffer, encoding=utf8)
result = parser.read_csv(path, encoding=encoding, **kwargs)
expected = parser.read_csv(bytes_buffer, encoding=utf8, **kwargs)
bytes_buffer.close()
tm.assert_frame_equal(result, expected)
@skip_pyarrow
def test_utf16_example(all_parsers, csv_dir_path):
path = os.path.join(csv_dir_path, "utf16_ex.txt")
parser = all_parsers
result = parser.read_csv(path, encoding="utf-16", sep="\t")
assert len(result) == 50
@skip_pyarrow
def test_unicode_encoding(all_parsers, csv_dir_path):
path = os.path.join(csv_dir_path, "unicode_series.csv")
parser = all_parsers
result = parser.read_csv(path, header=None, encoding="latin-1")
result = result.set_index(0)
got = result[1][1632]
expected = "\xc1 k\xf6ldum klaka (Cold Fever) (1994)"
assert got == expected
@skip_pyarrow
@pytest.mark.parametrize(
"data,kwargs,expected",
[
# Basic test
("a\n1", {}, DataFrame({"a": [1]})),
# "Regular" quoting
('"a"\n1', {"quotechar": '"'}, DataFrame({"a": [1]})),
# Test in a data row instead of header
("b\n1", {"names": ["a"]}, DataFrame({"a": ["b", "1"]})),
# Test in empty data row with skipping
("\n1", {"names": ["a"], "skip_blank_lines": True}, DataFrame({"a": [1]})),
# Test in empty data row without skipping
(
"\n1",
{"names": ["a"], "skip_blank_lines": False},
DataFrame({"a": [np.nan, 1]}),
),
],
)
def test_utf8_bom(all_parsers, data, kwargs, expected):
# see gh-4793
parser = all_parsers
bom = "\ufeff"
utf8 = "utf-8"
def _encode_data_with_bom(_data):
bom_data = (bom + _data).encode(utf8)
return BytesIO(bom_data)
result = parser.read_csv(_encode_data_with_bom(data), encoding=utf8, **kwargs)
tm.assert_frame_equal(result, expected)
@skip_pyarrow
def test_read_csv_utf_aliases(all_parsers, utf_value, encoding_fmt):
# see gh-13549
expected = DataFrame({"mb_num": [4.8], "multibyte": ["test"]})
parser = all_parsers
encoding = encoding_fmt.format(utf_value)
data = "mb_num,multibyte\n4.8,test".encode(encoding)
result = parser.read_csv(BytesIO(data), encoding=encoding)
tm.assert_frame_equal(result, expected)
@skip_pyarrow
@pytest.mark.parametrize(
"file_path,encoding",
[
(("io", "data", "csv", "test1.csv"), "utf-8"),
(("io", "parser", "data", "unicode_series.csv"), "latin-1"),
(("io", "parser", "data", "sauron.SHIFT_JIS.csv"), "shiftjis"),
],
)
def test_binary_mode_file_buffers(
all_parsers, csv_dir_path, file_path, encoding, datapath
):
# gh-23779: Python csv engine shouldn't error on files opened in binary.
# gh-31575: Python csv engine shouldn't error on files opened in raw binary.
parser = all_parsers
fpath = datapath(*file_path)
expected = parser.read_csv(fpath, encoding=encoding)
with open(fpath, encoding=encoding) as fa:
result = parser.read_csv(fa)
assert not fa.closed
tm.assert_frame_equal(expected, result)
with open(fpath, mode="rb") as fb:
result = parser.read_csv(fb, encoding=encoding)
assert not fb.closed
tm.assert_frame_equal(expected, result)
with open(fpath, mode="rb", buffering=0) as fb:
result = parser.read_csv(fb, encoding=encoding)
assert not fb.closed
tm.assert_frame_equal(expected, result)
@skip_pyarrow
@pytest.mark.parametrize("pass_encoding", [True, False])
def test_encoding_temp_file(all_parsers, utf_value, encoding_fmt, pass_encoding):
# see gh-24130
parser = all_parsers
encoding = encoding_fmt.format(utf_value)
expected = DataFrame({"foo": ["bar"]})
with tm.ensure_clean(mode="w+", encoding=encoding, return_filelike=True) as f:
f.write("foo\nbar")
f.seek(0)
result = parser.read_csv(f, encoding=encoding if pass_encoding else None)
tm.assert_frame_equal(result, expected)
@skip_pyarrow
def test_encoding_named_temp_file(all_parsers):
# see gh-31819
parser = all_parsers
encoding = "shift-jis"
if parser.engine == "python":
pytest.skip("NamedTemporaryFile does not work with Python engine")
title = "てすと"
data = "こむ"
expected = DataFrame({title: [data]})
with tempfile.NamedTemporaryFile() as f:
f.write(f"{title}\n{data}".encode(encoding))
f.seek(0)
result = parser.read_csv(f, encoding=encoding)
tm.assert_frame_equal(result, expected)
assert not f.closed
@pytest.mark.parametrize(
"encoding", ["utf-8", "utf-16", "utf-16-be", "utf-16-le", "utf-32"]
)
def test_parse_encoded_special_characters(encoding):
# GH16218 Verify parsing of data with encoded special characters
# Data contains a Unicode 'FULLWIDTH COLON' (U+FF1A) at position (0,"a")
data = "a\tb\n:foo\t0\nbar\t1\nbaz\t2"
encoded_data = BytesIO(data.encode(encoding))
result = read_csv(encoded_data, delimiter="\t", encoding=encoding)
expected = DataFrame(data=[[":foo", 0], ["bar", 1], ["baz", 2]], columns=["a", "b"])
tm.assert_frame_equal(result, expected)
@skip_pyarrow
@pytest.mark.parametrize("encoding", ["utf-8", None, "utf-16", "cp1255", "latin-1"])
def test_encoding_memory_map(all_parsers, encoding):
# GH40986
parser = all_parsers
expected = DataFrame(
{
"name": ["Raphael", "Donatello", "Miguel Angel", "Leonardo"],
"mask": ["red", "purple", "orange", "blue"],
"weapon": ["sai", "bo staff", "nunchunk", "katana"],
}
)
with tm.ensure_clean() as file:
expected.to_csv(file, index=False, encoding=encoding)
df = parser.read_csv(file, encoding=encoding, memory_map=True)
tm.assert_frame_equal(df, expected)
@skip_pyarrow
def test_chunk_splits_multibyte_char(all_parsers):
"""
Chunk splits a multibyte character with memory_map=True
GH 43540
"""
parser = all_parsers
# DEFAULT_CHUNKSIZE = 262144, defined in parsers.pyx
df = DataFrame(data=["a" * 127] * 2048)
# Put two-bytes utf-8 encoded character "ą" at the end of chunk
# utf-8 encoding of "ą" is b'\xc4\x85'
df.iloc[2047] = "a" * 127 + "ą"
with tm.ensure_clean("bug-gh43540.csv") as fname:
df.to_csv(fname, index=False, header=False, encoding="utf-8")
dfr = parser.read_csv(fname, header=None, memory_map=True, engine="c")
tm.assert_frame_equal(dfr, df)
@skip_pyarrow
def test_readcsv_memmap_utf8(all_parsers):
"""
GH 43787
Test correct handling of UTF-8 chars when memory_map=True and encoding is UTF-8
"""
lines = []
line_length = 128
start_char = " "
end_char = "\U00010080"
# This for loop creates a list of 128-char strings
# consisting of consecutive Unicode chars
for lnum in range(ord(start_char), ord(end_char), line_length):
line = "".join([chr(c) for c in range(lnum, lnum + 0x80)]) + "\n"
try:
line.encode("utf-8")
except UnicodeEncodeError:
continue
lines.append(line)
parser = all_parsers
df = DataFrame(lines)
with tm.ensure_clean("utf8test.csv") as fname:
df.to_csv(fname, index=False, header=False, encoding="utf-8")
dfr = parser.read_csv(
fname, header=None, memory_map=True, engine="c", encoding="utf-8"
)
tm.assert_frame_equal(df, dfr)
def test_not_readable(all_parsers):
# GH43439
parser = all_parsers
if parser.engine in ("python", "pyarrow"):
pytest.skip("SpooledTemporaryFile does only work with the c-engine")
with tempfile.SpooledTemporaryFile() as handle:
handle.write(b"abcd")
handle.seek(0)
df = parser.read_csv(handle)
expected = DataFrame([], columns=["abcd"])
tm.assert_frame_equal(df, expected)
| 30.28481 | 88 | 0.647858 |
0bbb8af27c41a58d0437d4532bfd95fc1fd7c8d0 | 478 | py | Python | NEMbox/osx/test.py | qinyongliang/musicbox | 5e1bbd2a4b35b4c3fb8d975c5e5a375bfa2931fb | [
"MIT"
] | 1 | 2019-10-24T10:25:57.000Z | 2019-10-24T10:25:57.000Z | NEMbox/osx/test.py | qinyongliang/musicbox | 5e1bbd2a4b35b4c3fb8d975c5e5a375bfa2931fb | [
"MIT"
] | null | null | null | NEMbox/osx/test.py | qinyongliang/musicbox | 5e1bbd2a4b35b4c3fb8d975c5e5a375bfa2931fb | [
"MIT"
] | null | null | null | import ctypes
def play():
print("play from python")
def nextSong():
print("next song from python")
def lastSong():
print("last song from python")
if __name__ == "__main__":
lib = ctypes.cdll.LoadLibrary("NEMbox/osx/HotKey.lib")
playControll = ctypes.CFUNCTYPE(None)(play)
nextControll = ctypes.CFUNCTYPE(None)(nextSong)
lastControll = ctypes.CFUNCTYPE(None)(lastSong)
lib.setContorllFunc(playControll,nextControll,lastControll)
lib.setup() | 28.117647 | 63 | 0.713389 |
1a9e73f390822455b8e39b61472cff8d38fe90f3 | 5,684 | py | Python | tryout-cnns/venv/TrainerAgent.py | thatmariia/math-modeling-13 | 31ab5ffb28d8025191b781c7001ac3a20895bb0d | [
"MIT"
] | null | null | null | tryout-cnns/venv/TrainerAgent.py | thatmariia/math-modeling-13 | 31ab5ffb28d8025191b781c7001ac3a20895bb0d | [
"MIT"
] | null | null | null | tryout-cnns/venv/TrainerAgent.py | thatmariia/math-modeling-13 | 31ab5ffb28d8025191b781c7001ac3a20895bb0d | [
"MIT"
] | null | null | null | from Constants import *
import pandas as pd
import seaborn as sns
import numpy as np
import matplotlib.pyplot as plt
from keras.utils.np_utils import to_categorical
from sklearn.model_selection import train_test_split
from sklearn.metrics import confusion_matrix
import itertools
from keras.utils.np_utils import to_categorical
from keras.models import Sequential
from keras.layers import Dense, Dropout, Flatten, Conv2D, MaxPool2D
from keras.optimizers import RMSprop,Adam
from keras.preprocessing.image import ImageDataGenerator
from keras.callbacks import ReduceLROnPlateau
class TrainerAgent:
def __init__(self, train, test):
self.epochs = 50
self.batchSize = 5
self.train = train
self.test = test.drop(labels=["label"], axis=1)
self.Y_train = train["label"] #.astype(int)
self.X_train = train.drop(labels=["label"], axis=1)
self.X_val = None
self.Y_val = None
self.model = None
self.datagen = None
self.history = None
def perform(self):
self.preprocess()
self.split()
self.constructModel()
self.compileModel()
self.augment()
self.fitModel()
self.evaluate()
''' ---CNN TRAINING FUNCTIONS--- '''
def constructModel(self):
r0 = RESOLUTION[0]
r1 = RESOLUTION[1]
self.model = Sequential()
#
self.model.add(Conv2D(filters=8, kernel_size=(5, 5), padding='Same',
activation='relu', input_shape=(r0, r1, 1)))
self.model.add(MaxPool2D(pool_size=(2, 2)))
self.model.add(Dropout(0.25))
#
self.model.add(Conv2D(filters=16, kernel_size=(3, 3), padding='Same',
activation='relu'))
self.model.add(MaxPool2D(pool_size=(2, 2), strides=(2, 2)))
self.model.add(Dropout(0.25))
# fully connected
self.model.add(Flatten())
self.model.add(Dense(256, activation="relu"))
self.model.add(Dropout(0.5))
self.model.add(Dense(NRIMAGES+1, activation="softmax"))
def compileModel(self):
optimizer = Adam(lr=0.001, beta_1=0.9, beta_2=0.999)
self.model.compile(optimizer=optimizer, loss="categorical_crossentropy", metrics=["accuracy"])
def augment(self):
self.datagen = ImageDataGenerator(
featurewise_center=False, # set input mean to 0 over the dataset
samplewise_center=False, # set each sample mean to 0
featurewise_std_normalization=False, # divide inputs by std of the dataset
samplewise_std_normalization=False, # divide each input by its std
zca_whitening=False, # dimesion reduction
rotation_range=5, # randomly rotate images in the range 5 degrees
zoom_range=0.1, # Randomly zoom image 10%
width_shift_range=0.1, # randomly shift images horizontally 10%
height_shift_range=0.1, # randomly shift images vertically 10%
horizontal_flip=False, # randomly flip images
vertical_flip=False) # randomly flip images
self.datagen.fit(self.X_train)
def fitModel(self):
self.history = self.model.fit_generator(self.datagen.flow(self.X_train, self.Y_train, batch_size=self.batchSize),
epochs=self.epochs, validation_data=(self.X_val, self.Y_val),
steps_per_epoch=self.X_train.shape[0] // self.batchSize)
def evaluate(self):
# Plot the loss and accuracy curves for training and validation
plt.plot(self.history.history['val_loss'], color='b', label="validation loss")
plt.title("Test Loss")
plt.xlabel("Number of Epochs")
plt.ylabel("Loss")
plt.legend()
plt.show()
# confusion matrix
# Predict the values from the validation dataset
Y_pred = self.model.predict(self.X_val)
# Convert predictions classes to one hot vectors
Y_pred_classes = np.argmax(Y_pred, axis=1)
# Convert validation observations to one hot vectors
Y_true = np.argmax(self.Y_val, axis=1)
# compute the confusion matrix
confusion_mtx = confusion_matrix(Y_true, Y_pred_classes)
# plot the confusion matrix
f, ax = plt.subplots(figsize=(8, 8))
sns.heatmap(confusion_mtx, annot=True, linewidths=0.01, cmap="Greens", linecolor="gray", fmt='.1f', ax=ax)
plt.xlabel("Predicted Label")
plt.ylabel("True Label")
plt.title("Confusion Matrix")
plt.show()
def split(self):
self.X_train, self.X_val, self.Y_train, self.Y_val = train_test_split (self.X_train, self.Y_train,
test_size=0.1, random_state=2)
''' ---DATA PREPROCESSING FUNCTIONS--- '''
def preprocess(self):
self.normalize()
self.reshape()
self.encode()
def encode(self):
self.Y_train = to_categorical(self.Y_train)
def reshape(self):
r0 = RESOLUTION[0]
r1 = RESOLUTION[1]
self.X_train = self.X_train.values.reshape (-1, r0, r1, 1)
self.test = self.test.values.reshape (-1, r0, r1, 1)
def normalize(self):
self.X_train = self.X_train / 255.0
self.test = self.test / 255.0
def plotSample(self):
img = self.X_train.iloc[0].to_numpy ()
img = img.reshape(RESOLUTION)
plt.imshow (img, cmap='gray')
plt.title (self.train.iloc[0, 0])
plt.axis ("off")
plt.show ()
| 36.670968 | 121 | 0.611717 |
50fce3fd5649dc8401922bb02946f118875f0804 | 903 | py | Python | avaliacoes/migrations/0001_initial.py | pjelelhml/pontosTuristicosAPI | 90cb9fee71ea8858ca225789fa73d5f659618413 | [
"MIT"
] | null | null | null | avaliacoes/migrations/0001_initial.py | pjelelhml/pontosTuristicosAPI | 90cb9fee71ea8858ca225789fa73d5f659618413 | [
"MIT"
] | null | null | null | avaliacoes/migrations/0001_initial.py | pjelelhml/pontosTuristicosAPI | 90cb9fee71ea8858ca225789fa73d5f659618413 | [
"MIT"
] | null | null | null | # Generated by Django 3.2.8 on 2021-10-13 19:46
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Avaliacao',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('comentario', models.TextField(blank=True, null=True)),
('nota', models.DecimalField(decimal_places=2, max_digits=3)),
('data', models.DateTimeField(auto_now_add=True)),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
]
| 32.25 | 118 | 0.635659 |
9700985ba4ef4f99be03fdf6053c064f9d772829 | 3,948 | py | Python | tests/test_sink.py | thenetcircle/event-bus-3 | 96216210c892d90a957540da9a2bc5712bd40752 | [
"Apache-2.0"
] | null | null | null | tests/test_sink.py | thenetcircle/event-bus-3 | 96216210c892d90a957540da9a2bc5712bd40752 | [
"Apache-2.0"
] | null | null | null | tests/test_sink.py | thenetcircle/event-bus-3 | 96216210c892d90a957540da9a2bc5712bd40752 | [
"Apache-2.0"
] | null | null | null | import asyncio
import pytest
from aiohttp import web
from loguru import logger
from utils import create_kafka_event_from_dict
from eventbus.config import (
ConsumerConfig,
HttpSinkConfig,
HttpSinkMethod,
UseProducersConfig,
)
from eventbus.event import EventProcessStatus
from eventbus.sink import HttpSink
@pytest.mark.asyncio
async def test_httpsink_send_event(aiohttp_client):
retry2_req_times = 0
timeout_req_times = 0
ce_req_times = 0
async def mock_server(request):
try:
req_body = await request.text()
if req_body == "ok":
return web.Response(text="ok")
elif req_body == "retry":
return web.Response(text="retry")
elif req_body == "retry2":
nonlocal retry2_req_times
retry2_req_times += 1
if retry2_req_times < 3:
return web.Response(text="retry")
else:
return web.Response(text="ok")
elif req_body == "unexpected_resp":
return web.Response(text="something else")
elif req_body == "timeout":
nonlocal timeout_req_times
timeout_req_times += 1
if timeout_req_times < 3:
await asyncio.sleep(0.2)
return web.Response(text="ok")
elif req_body == "non-200":
return web.Response(text="non-200", status=500)
elif req_body == "connection-error":
nonlocal ce_req_times
ce_req_times += 1
if ce_req_times < 3:
return
else:
return web.Response(text="ok")
except Exception as ex:
logger.error(ex)
app = web.Application()
app.router.add_post("/", mock_server)
client = await aiohttp_client(app)
sink = HttpSink(
"test_sink",
ConsumerConfig(
id="test_consumer",
kafka_topics=["topic1"],
kafka_config={},
use_producers=UseProducersConfig(producer_ids=["p1"]),
sink=HttpSinkConfig(
url="/", method=HttpSinkMethod.POST, timeout=0.2, max_retry_times=3
),
),
)
sink._client = client
ok_event = create_kafka_event_from_dict({"payload": b"ok"})
assert (await sink.send_event(ok_event))[1] == EventProcessStatus.DONE
retry_event = create_kafka_event_from_dict({"payload": b"retry"})
assert (await sink.send_event(retry_event))[1] == EventProcessStatus.RETRY_LATER
ok_event = create_kafka_event_from_dict({"payload": b"retry2"})
assert (await sink.send_event(ok_event))[1] == EventProcessStatus.DONE
retry_event = create_kafka_event_from_dict({"payload": b"unexpected_resp"})
assert (await sink.send_event(retry_event))[1] == EventProcessStatus.RETRY_LATER
retry_event = create_kafka_event_from_dict({"payload": b"timeout"})
assert (await sink.send_event(retry_event))[1] == EventProcessStatus.DONE
retry_event = create_kafka_event_from_dict({"payload": b"non-200"})
assert (await sink.send_event(retry_event))[1] == EventProcessStatus.RETRY_LATER
retry_event = create_kafka_event_from_dict({"payload": b"connection-error"})
assert (await sink.send_event(retry_event))[1] == EventProcessStatus.DONE
sink2 = HttpSink(
"test_sink",
ConsumerConfig(
id="test_consumer2",
kafka_topics=["topic1"],
kafka_config={},
use_producers=UseProducersConfig(producer_ids=["p1"]),
sink=HttpSinkConfig(
url="/unknown",
method=HttpSinkMethod.POST,
timeout=0.2,
max_retry_times=3,
),
),
)
sink2._client = client
assert (await sink2.send_event(ok_event))[1] == EventProcessStatus.RETRY_LATER
| 34.631579 | 84 | 0.606636 |
4744e365ba79102ee4637e23367c4d3ecaa6f189 | 432 | py | Python | 1201-1300/1228-Poor Pigs/1228-Poor Pigs.py | jiadaizhao/LintCode | a8aecc65c47a944e9debad1971a7bc6b8776e48b | [
"MIT"
] | 77 | 2017-12-30T13:33:37.000Z | 2022-01-16T23:47:08.000Z | 1201-1300/1228-Poor Pigs/1228-Poor Pigs.py | jxhangithub/LintCode-1 | a8aecc65c47a944e9debad1971a7bc6b8776e48b | [
"MIT"
] | 1 | 2018-05-14T14:15:40.000Z | 2018-05-14T14:15:40.000Z | 1201-1300/1228-Poor Pigs/1228-Poor Pigs.py | jxhangithub/LintCode-1 | a8aecc65c47a944e9debad1971a7bc6b8776e48b | [
"MIT"
] | 39 | 2017-12-07T14:36:25.000Z | 2022-03-10T23:05:37.000Z | import math
class Solution:
"""
@param buckets: an integer
@param minutesToDie: an integer
@param minutesToTest: an integer
@return: how many pigs you need to figure out the "poison" bucket within p minutes
"""
def poorPigs(self, buckets, minutesToDie, minutesToTest):
# Write your code here
states = minutesToTest // minutesToDie + 1
return math.ceil(math.log(buckets, states))
| 33.230769 | 87 | 0.673611 |
669494175fd905b266109854b516f00f38e18820 | 724 | py | Python | symlinks_cameras.py | GuillaumeRochette/PanopticProcessing | 6dd9bd902b2c9ede6fa3f83b6d17b826705d56f8 | [
"Apache-2.0"
] | 2 | 2022-03-16T07:06:08.000Z | 2022-03-20T09:08:09.000Z | symlinks_cameras.py | GuillaumeRochette/PanopticProcessing | 6dd9bd902b2c9ede6fa3f83b6d17b826705d56f8 | [
"Apache-2.0"
] | 1 | 2022-03-16T07:05:40.000Z | 2022-03-16T11:06:56.000Z | symlinks_cameras.py | GuillaumeRochette/PanopticProcessing | 6dd9bd902b2c9ede6fa3f83b6d17b826705d56f8 | [
"Apache-2.0"
] | null | null | null | import argparse
from pathlib import Path
from metadata import SEQUENCES, SUBSEQUENCES
def main():
parser = argparse.ArgumentParser()
parser.add_argument("--root", type=Path, required=True)
args = parser.parse_args()
root = args.root
for sequence, subsequences in zip(SEQUENCES, SUBSEQUENCES):
src_cameras = root / sequence / "cameras.json"
for i, (_, _) in enumerate(subsequences):
dst_cameras = root / sequence / "Subsequences" / f"{i}" / "cameras.json"
print(src_cameras, dst_cameras)
if dst_cameras.exists():
dst_cameras.unlink()
dst_cameras.symlink_to(src_cameras.resolve())
if __name__ == '__main__':
main()
| 27.846154 | 84 | 0.645028 |
d2b21f591170d36e86d046c5c4bb5e1d6e3558b6 | 348 | py | Python | migrations/0011_remove_question_edit_para_chains.py | tkhjp/editor_tkh | fcf8fc4ba3d058b5d0de878ec72a07c56a5c7377 | [
"MIT"
] | null | null | null | migrations/0011_remove_question_edit_para_chains.py | tkhjp/editor_tkh | fcf8fc4ba3d058b5d0de878ec72a07c56a5c7377 | [
"MIT"
] | null | null | null | migrations/0011_remove_question_edit_para_chains.py | tkhjp/editor_tkh | fcf8fc4ba3d058b5d0de878ec72a07c56a5c7377 | [
"MIT"
] | null | null | null | # Generated by Django 2.0.2 on 2018-03-20 10:58
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('editor', '0010_remove_hypothesis_para_list'),
]
operations = [
migrations.RemoveField(
model_name='question_edit',
name='para_chains',
),
]
| 19.333333 | 55 | 0.614943 |
7a5b4694fc39b7001e34d1734cbce28acd84f8bf | 2,311 | py | Python | src/command_modules/azure-cli-appservice/setup.py | wurp/azure-cli | a1f2f2b666a759eb2f593dd7796db6bbd0237614 | [
"MIT"
] | null | null | null | src/command_modules/azure-cli-appservice/setup.py | wurp/azure-cli | a1f2f2b666a759eb2f593dd7796db6bbd0237614 | [
"MIT"
] | null | null | null | src/command_modules/azure-cli-appservice/setup.py | wurp/azure-cli | a1f2f2b666a759eb2f593dd7796db6bbd0237614 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
# --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
from codecs import open
from setuptools import setup
try:
from azure_bdist_wheel import cmdclass
except ImportError:
from distutils import log as logger
logger.warn("Wheel is not available, disabling bdist_wheel hook")
cmdclass = {}
VERSION = "0.2.20"
CLASSIFIERS = [
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'Intended Audience :: System Administrators',
'Programming Language :: Python',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'License :: OSI Approved :: MIT License',
]
DEPENDENCIES = [
'azure-cli-core',
'azure-functions-devops-build==0.0.22',
'azure-mgmt-web==0.42.0',
'azure-mgmt-storage==3.3.0',
'azure-mgmt-containerregistry==2.8.0',
'azure-mgmt-applicationinsights==0.1.1',
# v1.17 breaks on wildcard cert https://github.com/shazow/urllib3/issues/981
'urllib3[secure]>=1.18',
'xmltodict',
'fabric>=2.4',
'cryptography',
'pyOpenSSL',
'six',
'vsts-cd-manager<1.1.0',
]
with open('README.rst', 'r', encoding='utf-8') as f:
README = f.read()
with open('HISTORY.rst', 'r', encoding='utf-8') as f:
HISTORY = f.read()
setup(
name='azure-cli-appservice',
version=VERSION,
description='Microsoft Azure Command-Line Tools AppService Command Module',
long_description=README + '\n\n' + HISTORY,
license='MIT',
author='Microsoft Corporation',
author_email='azpycli@microsoft.com',
url='https://github.com/Azure/azure-cli',
classifiers=CLASSIFIERS,
packages=[
'azure',
'azure.cli',
'azure.cli.command_modules',
'azure.cli.command_modules.appservice'
],
install_requires=DEPENDENCIES,
cmdclass=cmdclass
)
| 31.657534 | 94 | 0.603635 |
db21ccc5b2b44efe4cb7416c5dc9e8e0b98c3bd7 | 8,661 | py | Python | cosmogrb/universe/survey.py | grburgess/cosmogrb | 55182f2223a329f598bcbc43448f3b0ae9f45448 | [
"BSD-2-Clause"
] | 3 | 2020-03-08T18:20:32.000Z | 2022-03-10T17:27:26.000Z | cosmogrb/universe/survey.py | grburgess/cosmogrb | 55182f2223a329f598bcbc43448f3b0ae9f45448 | [
"BSD-2-Clause"
] | 11 | 2020-03-04T17:21:15.000Z | 2020-06-09T12:20:00.000Z | cosmogrb/universe/survey.py | grburgess/cosmogrb | 55182f2223a329f598bcbc43448f3b0ae9f45448 | [
"BSD-2-Clause"
] | 5 | 2020-03-18T18:05:05.000Z | 2022-03-21T16:06:38.000Z | import collections
from typing import Any, Dict, List, Optional
import h5py
import numpy as np
import pandas as pd
import popsynth
from IPython.display import display
from natsort import natsorted
from cosmogrb.grb.grb_detector import GRBDetector
from cosmogrb.io.detector_save import DetectorSave
from cosmogrb.io.grb_save import GRBSave
from cosmogrb.utils.file_utils import file_existing_and_readable
from cosmogrb.utils.logging import setup_logger
logger = setup_logger(__name__)
class Observation(object):
def __init__(
self,
grb_save_file: str,
grb_detector_file: Optional[str] = None,
population: Optional[popsynth.Population] = None,
idx=None,
):
"""
A small container class to access observations
:param grb_save_file:
:param grb_detector_file:
:param population:
:param idx:
:returns:
:rtype:
"""
self._grb: str = grb_save_file
self._detector: str = grb_detector_file
@property
def grb(self):
return GRBSave.from_file(self._grb)
@property
def detector_info(self):
if self._detector is None:
return None
else:
return DetectorSave.from_file(self._detector)
class Survey(collections.OrderedDict):
def __init__(
self, grb_save_files: List[str], population_file: str, grb_detector_files=None
) -> None:
"""
A container for a survey of observed GRBs. Holds file locations
for all the GRBs created in the Universe. It also allows you to process
the observations with a GRBDetector class.
:param grb_save_files: the file locations for the survey
:param population_file: the population file used to generate the population
:param grb_detector_files: the generated detector files
:returns:
:rtype:
"""
super(Survey, self).__init__()
self._n_grbs: int = len(grb_save_files)
self._grb_save_files: List[str] = grb_save_files
self._names: List[str] = []
# build a population from the file
if file_existing_and_readable(population_file):
self._population_file: Optional[str] = population_file
self._population: Optional[
popsynth.Population
] = popsynth.Population.from_file(self._population_file)
else:
self._population_file = None
self._population = None
logger.warnings(f"{population_file} does not exist. Perhaps you moved it?")
for f in self._grb_save_files:
with h5py.File(f, "r") as f:
self._names.append(f.attrs["grb_name"])
# we start off with not being processed unless
# we find that there are some detector files
self._is_processed: bool = False
self._detected = np.zeros(len(grb_save_files), dtype=bool)
self._grb_detector_files = None
# lets see if we have detector files
if grb_detector_files is not None:
self._is_processed = True
self._grb_detector_files = natsorted(grb_detector_files)
assert len(grb_detector_files) == len(grb_save_files)
# fill in the detected ones
for i, f in enumerate(self._grb_detector_files):
tmp = DetectorSave.from_file(f)
if tmp.is_detected:
self._detected[i] = True
# now fill the dict
for name, grb_save_file, grb_detector_file in zip(
self._names, self._grb_save_files, self._grb_detector_files
):
self[name] = Observation(
grb_save_file=grb_save_file, grb_detector_file=grb_detector_file
)
else:
for name, grb_save_file in zip(self._names, self._grb_save_files):
self[name] = Observation(
grb_save_file=grb_save_file, grb_detector_file=None
)
@property
def population(self) -> Optional[popsynth.Population]:
return self._population
@property
def n_detected(self) -> int:
return self._detected.sum()
@property
def n_grbs(self) -> int:
return self._n_grbs
def info(self) -> None:
"""
display the information about the survey
:returns:
:rtype:
"""
generic_info = collections.OrderedDict()
generic_info["n_grbs"] = self._n_grbs
generic_info["is_processed"] = self._is_processed
if self._is_processed:
generic_info["n_detected"] = self.n_detected
df = pd.Series(data=generic_info, index=generic_info.keys())
display(df.to_frame())
def process(
self, detector_type, client=None, serial: bool = False, **kwargs
) -> None:
"""
Process the triggers or detectors in the survey. This runs the provided
GRBDetector type on each of the GRBs and prepares the information
:param detector_type: a **class** of GRBDetector type
:param client: the dask client
:param serial: True/False for if the survey is processed without dask
:returns:
:rtype:
"""
assert issubclass(detector_type, GRBDetector), "Not a valid GRB detector"
if not serial:
assert (
client is not None
), "One must provide a client to process in parallel"
args = []
for grb_file in self._grb_save_files:
args.append([grb_file, detector_type, kwargs])
futures = client.map(_submit, args)
client.gather(futures)
else:
for grb_file in self._grb_save_files:
_submit([grb_file, detector_type, kwargs])
# the survey has now had its triggers run
# so lets flip its status and make sure that when
# when we save it, we record the new status
self._is_processed = True
self._grb_detector_files = []
for file_name in self._grb_save_files:
file_name_head = ".".join(file_name.split(".")[:-1])
out_file_name = f"{file_name_head}_detection_info.h5"
self._grb_detector_files.append(out_file_name)
# now update the survey
# fill in the detected ones
for i, f in enumerate(self._grb_detector_files):
tmp = DetectorSave.from_file(f)
if tmp.is_detected:
self._detected[i] = True
# now fill the dict
logger.debug("assigning detected grbs to survey")
for name, grb_save_file, grb_detector_file in zip(
self._names, self._grb_save_files, self._grb_detector_files
):
self[name] = Observation(
grb_save_file=grb_save_file, grb_detector_file=grb_detector_file
)
@property
def is_processed(self):
return self._is_processed
def write(self, file_name):
"""
write the info to a file.
if the universe has been processed, this information is also written
:param file_name:
:returns:
:rtype:
"""
dt = h5py.string_dtype(encoding="utf-8")
with h5py.File(file_name, "w") as f:
f.attrs["n_grbs"] = self._n_grbs
f.attrs["is_processed"] = self._is_processed
f.attrs["population_file"] = self._population_file
grbs = f.create_dataset(
"grb_saves", data=np.array(self._grb_save_files, dtype=dt)
)
if self._is_processed:
grb_dets = f.create_dataset(
"grb_dets", data=np.array(self._grb_detector_files, dtype=dt)
)
@classmethod
def from_file(cls, file_name):
"""
create a universe
:param cls:
:param file_name:
:returns:
:rtype:
"""
with h5py.File(file_name, "r") as f:
n_grbs = f.attrs["n_grbs"]
is_processed = f.attrs["is_processed"]
population_file = f.attrs["population_file"]
grb_files = f["grb_saves"][()].astype(str)
grb_dets = None
if is_processed:
grb_dets = f["grb_dets"][()].astype(str)
return cls(grb_files, population_file, grb_dets)
def _submit(args):
grb_file, detector_type, kwargs = args
processor = detector_type(grb_save_file_name=grb_file, **kwargs)
processor.process()
processor.save()
| 26.486239 | 87 | 0.603972 |
4663e494f00a884a99078313afdf18e2e62427fd | 1,171 | py | Python | cnn_model.py | amanrOnly/CalorieDetectionModel | 831c2cd91a8fa901b56bf9b034de3e8cbaef2916 | [
"Unlicense"
] | null | null | null | cnn_model.py | amanrOnly/CalorieDetectionModel | 831c2cd91a8fa901b56bf9b034de3e8cbaef2916 | [
"Unlicense"
] | null | null | null | cnn_model.py | amanrOnly/CalorieDetectionModel | 831c2cd91a8fa901b56bf9b034de3e8cbaef2916 | [
"Unlicense"
] | null | null | null | import tflearn
from tflearn.layers.conv import conv_2d, max_pool_2d
from tflearn.layers.core import input_data, dropout, fully_connected
from tflearn.layers.estimator import regression
def get_model(IMG_SIZE,no_of_fruits,LR):
try:
tf.reset_default_graph()
except:
print("tensorflow")
convnet = input_data(shape=[None, IMG_SIZE, IMG_SIZE, 3], name='input')
convnet = conv_2d(convnet, 32, 5, activation='relu')
convnet = max_pool_2d(convnet, 5)
convnet = conv_2d(convnet, 64, 5, activation='relu')
convnet = max_pool_2d(convnet, 5)
convnet = conv_2d(convnet, 128, 5, activation='relu')
convnet = max_pool_2d(convnet, 5)
convnet = conv_2d(convnet, 64, 5, activation='relu')
convnet = max_pool_2d(convnet, 5)
convnet = conv_2d(convnet, 32, 5, activation='relu')
convnet = max_pool_2d(convnet, 5)
convnet = fully_connected(convnet, 1024, activation='relu')
convnet = dropout(convnet, 0.8)
convnet = fully_connected(convnet, no_of_fruits, activation='softmax')
convnet = regression(convnet, optimizer='adam', learning_rate=LR, loss='categorical_crossentropy', name='targets')
model = tflearn.DNN(convnet, tensorboard_dir='log')
return model
| 29.275 | 115 | 0.752348 |
7c3370e9e102f838e83d2fb1df98ecf5cf2a5d94 | 4,810 | py | Python | assets/spark_regression/spark_regression_data_manager.py | kerenleibovich/mlapp | 0b8dfaba7a7070ab68cb29ff61dd1c7dd8076693 | [
"Apache-2.0"
] | 33 | 2021-02-26T10:41:09.000Z | 2021-11-07T12:35:32.000Z | assets/spark_regression/spark_regression_data_manager.py | kerenleibovich/mlapp | 0b8dfaba7a7070ab68cb29ff61dd1c7dd8076693 | [
"Apache-2.0"
] | 17 | 2021-03-04T15:37:21.000Z | 2021-04-06T12:00:13.000Z | assets/spark_regression/spark_regression_data_manager.py | kerenleibovich/mlapp | 0b8dfaba7a7070ab68cb29ff61dd1c7dd8076693 | [
"Apache-2.0"
] | 9 | 2021-03-03T20:02:41.000Z | 2021-10-05T13:03:56.000Z | from mlapp.handlers.instance import spark_handler
from mlapp.managers import DataManager, pipeline
import pyspark.sql.functions as F
from pyspark.ml.feature import Bucketizer
class SparkRegressionDataManager(DataManager):
@pipeline
def load_train_data(self, *args):
return self._load_data()
@pipeline
def load_forecast_data(self,*args):
return self._load_data()
@pipeline
def clean_train_data(self, data):
return self._clean_data(data)
@pipeline
def clean_forecast_data(self, data):
missing_values = self.get_metadata('missing_values', {})
return self._clean_data(data, missing_values)
@pipeline
def transform_train_data(self, data):
return self._transform_data(data)
@pipeline
def transform_forecast_data(self, data):
return self._transform_data(data)
@pipeline
def load_target_data(self, *args):
raise NotImplementedError()
# ------------------------- private functions for load/clean/transform ------------------------------
def _load_data(self):
return spark_handler('LOCAL-SPARK').load_csv_file(self.data_settings["local_file_path"], inferSchema=True)
def _clean_data(self, data, stored_missing_values=None):
missing_values = {}
data_handling = self.data_settings.get('data_handling', {})
features_handling = data_handling.get('features_handling', {})
# remove features by null percentage
null_percentage = data_handling.get("feature_remove_by_null_percentage", 0.5)
null_percentages = data.select(
[(F.count(F.when(F.isnull(c), c)) / data.count()).alias(c) for c in data.columns]).collect()[0]
data = data.select([c for c in data.columns if null_percentages[c] < null_percentage])
# filling missing values by function/value
if len(features_handling.keys()) > 0:
missing_values = {
k: v['fillna'] if not isinstance(v.get('fillna', 'mean'), str) else
data.agg((eval('F.' + v.get('fillna', 'mean')))(k)).collect()[0][0]
for (k, v) in features_handling.items()
}
# filling default missing features by mean
default_missing_features = list(set(data.columns).difference(set(list(features_handling.keys()))))
default_missing_values = data.select([F.mean(c).alias(c) for c in default_missing_features]).collect()[0]
missing_values.update({c: default_missing_values[c] for c in default_missing_features})
self.save_metadata('missing_values', missing_values)
if stored_missing_values is not None:
data = data.fillna(stored_missing_values)
else:
data = data.fillna(missing_values)
return data
def _transform_data(self, data):
data_handling = self.data_settings.get('data_handling', {})
# interactions
if data_handling.get('interactions', False):
columns_list = list(data.columns)
columns_list.remove(self.model_settings['variable_to_predict'])
for col1 in columns_list:
for col2 in columns_list:
if col1 != col2:
name = str(col1) + '_' + str(col2)
reverse_name = str(col2) + '_' + str(col1)
if reverse_name not in list(data.columns):
data = data.withColumn(name, (F.col(col1) + 1) * (F.col(col2) + 1))
# binning
for feature_to_bin in data_handling.get("features_to_bin", []):
min_val = data.agg({feature_to_bin['name']: "min"}).collect()[0][0]
max_val = data.agg({feature_to_bin['name']: "max"}).collect()[0][0]
full_bins = [(min_val - 1)] + feature_to_bin['bins'] + [(max_val + 1)]
bucketizer = Bucketizer(splits=full_bins,
inputCol=feature_to_bin['name'],
outputCol=feature_to_bin['name'] + '_binned')
data = bucketizer.transform(data)
# transformation
for col in data_handling.get("features_handling", {}).keys():
transformation_array = data_handling["features_handling"][col].get("transformation", [])
# applying transformations
for feature_transformation_method in transformation_array:
data = data.withColumn(
col + '_' + feature_transformation_method, eval('F.' + feature_transformation_method)(col))
# dropping features
features_to_remove = data_handling.get('features_to_remove', [])
if len(features_to_remove) > 0:
data = data.drop(*[feature for feature in features_to_remove if feature in data.columns])
return data
| 41.465517 | 114 | 0.621622 |
40fe6b60bde29c865d4aaf7cf05623e68282bcf5 | 2,601 | py | Python | metrics/psnr_ssim.py | changwoolee/gradient-rescaling-attention-model | 2f1d819e8cee03a9d06312e700a5c474bed48c70 | [
"Apache-2.0"
] | 6 | 2019-11-28T13:46:55.000Z | 2022-03-12T02:52:13.000Z | metrics/psnr_ssim.py | changwoolee/gradient-rescaling-attention-model | 2f1d819e8cee03a9d06312e700a5c474bed48c70 | [
"Apache-2.0"
] | null | null | null | metrics/psnr_ssim.py | changwoolee/gradient-rescaling-attention-model | 2f1d819e8cee03a9d06312e700a5c474bed48c70 | [
"Apache-2.0"
] | 1 | 2020-10-13T18:23:56.000Z | 2020-10-13T18:23:56.000Z | import numpy as np
import PIL.Image as Image
from skimage.measure import compare_psnr, compare_ssim
import glob
import os
import sys
import utils
import tqdm
csv_filename = sys.argv[1]
DATA_DIR='/home/esoc/datasets/SuperResolution/'
RESULT_DIR=sys.argv[2]
Benchmarks=['Set5', 'Set14', 'DIV2K_valid_HR', 'BSDS100', 'Urban100']
#Benchmarks=['Urban100']
def rgb_to_Y(img):
xform = np.array(
[[65.738 / 256.0, 129.057 / 256.0, 25.064 / 256.0],
[- 37.945 / 256.0, - 74.494 / 256.0, 112.439 / 256.0],
[112.439 / 256.0, - 94.154 / 256.0, - 18.285 / 256.0]])
img = img.dot(xform.T)[:,:,0:1]+16.0
return img
for bn in Benchmarks:
print(bn)
data_dir = os.path.join(DATA_DIR, bn)
result_dir = os.path.join(RESULT_DIR, bn)
hr_images = sorted(glob.glob(data_dir+'/*.png'))
sr_images = sorted(glob.glob(result_dir+'/x4/*_sr.png'))
psnr_mean = []
psnr_bic_mean = []
ssim_mean = []
ssim_bic_mean = []
for hr_fp, sr_fp in zip(hr_images, sr_images):
print(hr_fp, sr_fp)
hr = Image.open(hr_fp).convert('RGB')
sr = Image.open(sr_fp).convert('RGB')
hr = hr.crop((0,0,sr.size[0],sr.size[1]))
bicubic = hr.resize((hr.size[0]//4, hr.size[1]//4), Image.BICUBIC).resize((hr.size[0]//4*4, hr.size[1]//4*4), Image.BICUBIC)
# hr = hr.convert('YCbCr')
# sr = sr.convert('YCbCr')
# bicubic = bicubic.convert('YCbCr')
bicubic = rgb_to_Y(np.array(bicubic).astype(np.float64))
hr_arr = rgb_to_Y(np.array(hr).astype(np.float64))
sr_arr = rgb_to_Y(np.array(sr).astype(np.float64))
cutoff = 6 + 4
hr_arr = hr_arr[cutoff:-cutoff,cutoff:-cutoff,:]
sr_arr = sr_arr[cutoff:-cutoff,cutoff:-cutoff,:]
bicubic = bicubic[cutoff:-cutoff,cutoff:-cutoff,:]
psnr_val = compare_psnr(hr_arr, sr_arr, data_range=255)
psnr_bic_val = compare_psnr(hr_arr, bicubic, data_range=255)
print(psnr_val)
ssim_val = compare_ssim(hr_arr, sr_arr, win_size=11, gaussian_weights=True, multichannel=True, K1=0.01, K2=0.03, sigma=1.5, data_range=255)
ssim_bic_val = compare_ssim(hr_arr, bicubic, win_size=11, gaussian_weights=True, multichannel=True, K1=0.01, K2=0.03, sigma=1.5, data_range=255)
print(ssim_val)
psnr_mean.append(psnr_val)
psnr_bic_mean.append(psnr_bic_val)
ssim_mean.append(ssim_val)
ssim_bic_mean.append(ssim_bic_val)
pm = np.array(psnr_mean).mean()
pbm = np.array(psnr_bic_mean).mean()
sm = np.array(ssim_mean).mean()
sbm = np.array(ssim_bic_mean).mean()
print('psnr:',pm,'psnr_bicubic:',pbm,'ssim:',sm, 'ssim_bicubic:',sbm)
res = {'psnr_bicubic':pbm, 'psnr_pred':pm, 'ssim_bicubic':sbm, 'ssim_pred':sm}
utils.save_csv(csv_filename, res, result_dir, data_dir)
| 33.779221 | 146 | 0.697809 |
b5e366d5a5c75b0204b066137055a1d261caa72c | 4,798 | py | Python | ec2api_tempest_plugin/scenario/base.py | NeCTAR-RC/ec2api-tempest-plugin | 2757fdac7355e9bd355b0e472ed928d7b88b933e | [
"Apache-2.0"
] | 5 | 2018-06-27T11:17:10.000Z | 2019-01-28T22:00:08.000Z | ec2api_tempest_plugin/scenario/base.py | NeCTAR-RC/ec2api-tempest-plugin | 2757fdac7355e9bd355b0e472ed928d7b88b933e | [
"Apache-2.0"
] | null | null | null | ec2api_tempest_plugin/scenario/base.py | NeCTAR-RC/ec2api-tempest-plugin | 2757fdac7355e9bd355b0e472ed928d7b88b933e | [
"Apache-2.0"
] | 2 | 2019-01-14T04:54:29.000Z | 2020-02-03T15:53:18.000Z | # Copyright 2015 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import time
from oslo_log import log
from tempest.lib.common.utils import data_utils
from ec2api_tempest_plugin import base
from ec2api_tempest_plugin import config
CONF = config.CONF
LOG = log.getLogger(__name__)
class BaseScenarioTest(base.EC2TestCase):
def get_instance_ip(self, instance_id):
instance = self.get_instance(instance_id)
public_ip = instance.get('PublicIpAddress')
if public_ip:
return public_ip
is_vpc = 'VpcId' in instance
alloc_id, public_ip = self.allocate_address(is_vpc)
kwargs = {'InstanceId': instance_id}
if is_vpc:
kwargs['AllocationId'] = alloc_id
else:
kwargs['PublicIp'] = public_ip
data = self.client.associate_address(*[], **kwargs)
if is_vpc:
self.addResourceCleanUp(self.client.disassociate_address,
AssociationId=data['AssociationId'])
self.get_address_assoc_waiter().wait_available(
{'AllocationId': alloc_id})
else:
self.addResourceCleanUp(self.client.disassociate_address,
PublicIp=public_ip)
self.get_address_assoc_waiter().wait_available(
{'PublicIp': public_ip})
return public_ip
def allocate_address(self, is_vpc):
kwargs = dict()
if is_vpc:
kwargs['Domain'] = 'vpc'
data = self.client.allocate_address(*[], **kwargs)
alloc_id = data.get('AllocationId')
public_ip = data['PublicIp']
if is_vpc:
self.addResourceCleanUp(self.client.release_address,
AllocationId=alloc_id)
else:
self.addResourceCleanUp(self.client.release_address,
PublicIp=public_ip)
return alloc_id, public_ip
def create_key_pair(self, key_name):
data = self.client.create_key_pair(KeyName=key_name)
self.addResourceCleanUp(self.client.delete_key_pair, KeyName=key_name)
return data.get('KeyMaterial')
def create_standard_security_group(self):
name = data_utils.rand_name('sgName')
desc = data_utils.rand_name('sgDesc')
kwargs = {'GroupName': name, 'Description': desc}
self.client.create_security_group(*[], **kwargs)
self.addResourceCleanUp(self.client.delete_security_group,
GroupName=name)
time.sleep(2)
kwargs = {
'GroupName': name,
'IpPermissions': [{
'IpProtocol': 'icmp',
'FromPort': -1,
'ToPort': -1,
'IpRanges': [{
'CidrIp': '0.0.0.0/0'
}],
}, {
'IpProtocol': 'tcp',
'FromPort': 22,
'ToPort': 22,
'IpRanges': [{
'CidrIp': '0.0.0.0/0'
}],
}]
}
self.client.authorize_security_group_ingress(*[], **kwargs)
return name
def prepare_vpc_default_security_group(self, vpc_id):
data = self.client.describe_security_groups(
Filters=[{'Name': 'vpc-id', 'Values': [vpc_id]}])
self.assertEqual(1, len(data['SecurityGroups']))
group_id = data['SecurityGroups'][0]['GroupId']
kwargs = {
'GroupId': group_id,
'IpPermissions': [{
'IpProtocol': '-1',
'FromPort': -1,
'ToPort': -1,
'IpRanges': [{
'CidrIp': '0.0.0.0/0'
}],
}]
}
self.client.authorize_security_group_ingress(*[], **kwargs)
def create_network_interface(self, subnet_id):
data = self.client.create_network_interface(SubnetId=subnet_id)
ni_id = data['NetworkInterface']['NetworkInterfaceId']
self.addResourceCleanUp(self.client.delete_network_interface,
NetworkInterfaceId=ni_id)
self.get_network_interface_waiter().wait_available(ni_id)
return ni_id
| 35.279412 | 78 | 0.582118 |
7b904057c114d15865a65de43810647cea46b1f3 | 459 | py | Python | application/model/widget/game/GameTextdialogue.py | Kzulfazriawan/Stigma-game-demo | 971ee90a908784dfe1c9e87733b0394fa2212299 | [
"MIT"
] | 2 | 2016-08-09T05:33:21.000Z | 2016-10-05T06:34:04.000Z | application/model/widget/game/GameTextdialogue.py | Kzulfazriawan/stigma-game-demo | 971ee90a908784dfe1c9e87733b0394fa2212299 | [
"MIT"
] | null | null | null | application/model/widget/game/GameTextdialogue.py | Kzulfazriawan/stigma-game-demo | 971ee90a908784dfe1c9e87733b0394fa2212299 | [
"MIT"
] | null | null | null | from core import Files
from library.stigma.application import Label
from library.stigma.helper import kivyBuilder
kivyBuilder(Files.apppath, 'model', 'builder', 'game', 'gametextdialogue.kv')
class GameTextdialogue(Label):
def __init__(self):
super(GameTextdialogue, self).__init__()
self.state = None
self.save = None
self.params = None
self.part = None
self.touch_action = 1 | 32.785714 | 77 | 0.647059 |
3c85df2edd27841eb9c5cbfd8b7f767defb1e680 | 4,049 | py | Python | active_learning_ts/experiments/blueprint.py | bela127/active_learning_ts | b652995edfb14c37e486ddc8261d6093d6babdae | [
"MIT"
] | 1 | 2022-02-14T09:38:22.000Z | 2022-02-14T09:38:22.000Z | active_learning_ts/experiments/blueprint.py | bela127/active_learning_ts | b652995edfb14c37e486ddc8261d6093d6babdae | [
"MIT"
] | 1 | 2022-02-11T12:13:31.000Z | 2022-02-11T12:13:31.000Z | active_learning_ts/experiments/blueprint.py | bela127/active_learning_ts | b652995edfb14c37e486ddc8261d6093d6babdae | [
"MIT"
] | 2 | 2021-12-15T12:56:30.000Z | 2022-02-01T15:31:08.000Z | from typing import Iterable, Protocol
from active_learning_ts.data_pipeline import DataPipeline
from active_learning_ts.data_retrievement.augmentation.no_augmentation import NoAugmentation
from active_learning_ts.data_retrievement.data_source import DataSource
from active_learning_ts.data_retrievement.data_sources.test_data_source import TestDataSource
from active_learning_ts.data_retrievement.interpolation_strategies.flat_map_interpolation import FlatMapInterpolation
from active_learning_ts.data_retrievement.interpolation_strategy import InterpolationStrategy
from active_learning_ts.data_retrievement.retrievement_strategies.exact_retrievement import ExactRetrievement
from active_learning_ts.evaluation.evaluation_metric import EvaluationMetric
from active_learning_ts.experiments.blueprint_element import BlueprintElement
from active_learning_ts.instance_properties.costs.constant_instance_cost import ConstantInstanceCost
from active_learning_ts.instance_properties.instance_cost import InstanceCost
from active_learning_ts.instance_properties.instance_objective import InstanceObjective
from active_learning_ts.instance_properties.objectives.constant_instance_objective import ConstantInstanceObjective
from active_learning_ts.knowledge_discovery.discover_tasks.no_knowledge_discovery_task import NoKnowledgeDiscoveryTask
from active_learning_ts.knowledge_discovery.knowledge_discovery_task import KnowledgeDiscoveryTask
from active_learning_ts.data_retrievement.retrievement_strategy import RetrievementStrategy
from active_learning_ts.query_selection.query_optimizer import QueryOptimizer
from active_learning_ts.query_selection.query_optimizers.no_query_optimizer import NoQueryOptimizer
from active_learning_ts.query_selection.query_sampler import QuerySampler
from active_learning_ts.query_selection.query_samplers.no_query_sampler import NoQuerySampler
from active_learning_ts.query_selection.selection_criteria import SelectionCriteria
from active_learning_ts.query_selection.selection_criterias.no_selection_criteria import NoSelectionCriteria
from active_learning_ts.surrogate_model.surrogate_model import SurrogateModel
from active_learning_ts.surrogate_model.surrogate_models.no_surrogate_model import NoSurrogateModel
from active_learning_ts.training.training_strategies.no_training_strategy import NoTrainingStrategy
from active_learning_ts.training.training_strategy import TrainingStrategy
class Blueprint(Protocol):
"""
A blueprint is created in order to set up an experiment.
The config objects are used to instantiate experiment modules
"""
repeat: int
learning_steps: int
num_knowledge_discovery_queries: int
data_source: BlueprintElement[DataSource] = BlueprintElement[TestDataSource]()
retrievement_strategy: BlueprintElement[RetrievementStrategy] = BlueprintElement[ExactRetrievement]()
augmentation_pipeline: BlueprintElement[DataPipeline] = BlueprintElement[NoAugmentation]()
interpolation_strategy: BlueprintElement[InterpolationStrategy] = BlueprintElement[FlatMapInterpolation]()
instance_level_objective: BlueprintElement[InstanceObjective] = BlueprintElement[ConstantInstanceObjective]()
instance_cost: BlueprintElement[InstanceCost] = BlueprintElement[ConstantInstanceCost]()
surrogate_model: BlueprintElement[SurrogateModel] = BlueprintElement[NoSurrogateModel]()
training_strategy: BlueprintElement[TrainingStrategy] = BlueprintElement[NoTrainingStrategy]()
surrogate_sampler: BlueprintElement[QuerySampler] = BlueprintElement[NoQuerySampler]()
query_optimizer: BlueprintElement[QueryOptimizer] = BlueprintElement[NoQueryOptimizer]()
selection_criteria: BlueprintElement[SelectionCriteria] = BlueprintElement[NoSelectionCriteria]()
evaluation_metrics: Iterable[BlueprintElement[EvaluationMetric]] = []
knowledge_discovery_sampler: BlueprintElement[QuerySampler] = BlueprintElement[NoQuerySampler]()
knowledge_discovery_task: BlueprintElement[KnowledgeDiscoveryTask] = BlueprintElement[NoKnowledgeDiscoveryTask]()
| 69.810345 | 118 | 0.88244 |
39e6d66036c6b6df4b453a8078aa2be8db1b40c5 | 7,880 | py | Python | yolov5/object_detection_3d.py | jeffreyttc/yolov5_ros2 | 305f96b1c33ea0045738cc498cfe98a35efd3acb | [
"MIT"
] | 1 | 2021-12-28T22:41:56.000Z | 2021-12-28T22:41:56.000Z | yolov5/object_detection_3d.py | jeffreyttc/yolov5_ros2 | 305f96b1c33ea0045738cc498cfe98a35efd3acb | [
"MIT"
] | null | null | null | yolov5/object_detection_3d.py | jeffreyttc/yolov5_ros2 | 305f96b1c33ea0045738cc498cfe98a35efd3acb | [
"MIT"
] | null | null | null | #!/usr/bin/env python
import os, sys
from types import FrameType
import rclpy
from rclpy.node import Node
from rclpy.qos import qos_profile_sensor_data
from std_msgs.msg import String
from sensor_msgs.msg import Image, CompressedImage
from cv_bridge import CvBridge, CvBridgeError
import cv2
import numpy as np
import time
from pathlib import Path
import torch
import torch.backends.cudnn as cudnn
FILE = Path(__file__).absolute()
sys.path.append(FILE.parents[0].as_posix())
from models.experimental import attempt_load
from utils.datasets import LoadStreams, LoadImages
from utils.augmentations import Albumentations, augment_hsv, copy_paste, letterbox, mixup, random_perspective
from utils.general import check_img_size, check_requirements, check_imshow, colorstr, non_max_suppression, \
apply_classifier, scale_coords, xyxy2xywh, strip_optimizer, set_logging, increment_path
from utils.torch_utils import load_classifier, select_device, time_sync
from utils.plots import Annotator, colors, save_one_box, plot_one_box
bridge = CvBridge()
class ObjectDetection(Node):
def __init__(self):
super().__init__('object_detection')
weights='yolov5s.pt' # model.pt path(s)
self.imgsz=640 # inference size (pixels)
self.conf_thres=0.25 # confidence threshold
self.iou_thres=0.45 # NMS IOU threshold
self.max_det=1000 # maximum detections per image
self.classes=None # filter by class: --class 0, or --class 0 2 3
self.agnostic_nms=False # class-agnostic NMS
self.augment=False # augmented inference
self.visualize=False # visualize features
self.line_thickness=3 # bounding box thickness (pixels)
self.hide_labels=False # hide labels
self.hide_conf=False # hide confidences
self.half=False # use FP16 half-precision inference
self.stride = 32
device_num='' # cuda device, i.e. 0 or 0,1,2,3 or cpu
view_img=False # show results
save_crop=False # save cropped prediction boxes
nosave=False # do not save images/videos
update=False # update all models
name='exp' # save results to project/name
# Initialize
set_logging()
self.device = select_device(device_num)
self.half &= self.device.type != 'cpu' # half precision only supported on CUDA
# Load model
self.model = attempt_load(weights, map_location=self.device) # load FP32 model
stride = int(self.model.stride.max()) # model stride
imgsz = check_img_size(self.imgsz, s=stride) # check image size
self.names = self.model.module.names if hasattr(self.model, 'module') else self.model.names # get class names
if self.half:
self.model.half() # to FP16
# Second-stage classifier
self.classify = False
if self.classify:
self.modelc = load_classifier(name='resnet50', n=2) # initialize
self.modelc.load_state_dict(torch.load('resnet50.pt', map_location=self.device)['model']).to(self.device).eval()
# Dataloader
view_img = check_imshow()
cudnn.benchmark = True # set True to speed up constant image size inference
# Run inference
if self.device.type != 'cpu':
self.model(torch.zeros(1, 3, imgsz, imgsz).to(self.device).type_as(next(self.model.parameters()))) # run once
self.subscription = self.create_subscription(
Image,
'/image_raw',
self.camera_callback,
qos_profile_sensor_data)
self.subscription # prevent unused variable warning
self.subscription = self.create_subscription(
Image,
'/depth',
self.depth_callback,
qos_profile_sensor_data)
self.subscription # prevent unused variable warning
self.publisher_ = self.create_publisher(String, 'detected_objects', 10)
def camera_callback(self, data):
t0 = time.time()
img = bridge.imgmsg_to_cv2(data, "bgr8")
# check for common shapes
s = np.stack([letterbox(x, self.imgsz, stride=self.stride)[0].shape for x in img], 0) # shapes
self.rect = np.unique(s, axis=0).shape[0] == 1 # rect inference if all shapes equal
if not self.rect:
print('WARNING: Different stream shapes detected. For optimal performance supply similarly-shaped streams.')
# Letterbox
img0 = img.copy()
img = img[np.newaxis, :, :, :]
# Stack
img = np.stack(img, 0)
# Convert
img = img[..., ::-1].transpose((0, 3, 1, 2)) # BGR to RGB, BHWC to BCHW
img = np.ascontiguousarray(img)
img = torch.from_numpy(img).to(self.device)
img = img.half() if self.half else img.float() # uint8 to fp16/32
img /= 255.0 # 0 - 255 to 0.0 - 1.0
if img.ndimension() == 3:
img = img.unsqueeze(0)
# Inference
t1 = time_sync()
pred = self.model(img,
augment=self.augment,
visualize=increment_path(self.save_dir / 'features', mkdir=True) if self.visualize else False)[0]
# Apply NMS
pred = non_max_suppression(pred, self.conf_thres, self.iou_thres, self.classes, self.agnostic_nms, max_det=self.max_det)
t2 = time_sync()
# Apply Classifier
if self.classify:
pred = apply_classifier(pred, self.modelc, img, img0)
# Process detections
for i, det in enumerate(pred): # detections per image
s = f'{i}: '
s += '%gx%g ' % img.shape[2:] # print string
if len(det):
# Rescale boxes from img_size to im0 size
det[:, :4] = scale_coords(img.shape[2:], det[:, :4], img0.shape).round()
# Print results
for c in det[:, -1].unique():
n = (det[:, -1] == c).sum() # detections per class
s += f"{n} {self.names[int(c)]}{'s' * (n > 1)}, " # add to string
for *xyxy, conf, cls in reversed(det):
c = int(cls) # integer class
label = None if self.hide_labels else (self.names[c] if self.hide_conf else f'{self.names[c]} {conf:.2f}')
plot_one_box(xyxy, img0, label=label, color=colors(c, True), line_thickness=self.line_thickness)
msg = String()
msg.data = 'Detected Object: %s' % label
self.publisher_.publish(msg)
self.get_logger().info('%s' % msg.data)
ctrpts = ((int(xyxy[0])+int(xyxy[2]))/2, (int(xyxy[1])+int(xyxy[3]))/2)
print("center point")
print(ctrpts)
depth = self.depth_image[int(ctrpts[1]), int(ctrpts[0])]
print("depth")
print(depth)
cv2.imshow("RGB", img0)
cv2.waitKey(4)
def depth_callback(self, data):
# Display the message on the console
#self.get_logger().info('Receiving depth image')
# Convert depth image message to OpenCV image
#depth_image = bridge.imgmsg_to_cv2(data, "32FC1")
self.depth_image = bridge.imgmsg_to_cv2(data, "passthrough")
# Display depth image
cv2.imshow("Depth", self.depth_image)
cv2.waitKey(1)
def main(args=None):
rclpy.init(args=args)
object_detection = ObjectDetection()
rclpy.spin(object_detection)
# Destroy the node explicitly
# (optional - otherwise it will be done automatically
# when the garbage collector destroys the node object)
object_detection.destroy_node()
rclpy.shutdown()
if __name__ == '__main__':
main() | 38.439024 | 128 | 0.611675 |
05a3f910877ef2e5677f0368dc88236112d232e9 | 1,333 | py | Python | nipype/interfaces/camino/tests/test_auto_ComputeMeanDiffusivity.py | grlee77/nipype | 73f3a733ac1b7d9b09ec32a387905a9302423b87 | [
"BSD-3-Clause"
] | null | null | null | nipype/interfaces/camino/tests/test_auto_ComputeMeanDiffusivity.py | grlee77/nipype | 73f3a733ac1b7d9b09ec32a387905a9302423b87 | [
"BSD-3-Clause"
] | null | null | null | nipype/interfaces/camino/tests/test_auto_ComputeMeanDiffusivity.py | grlee77/nipype | 73f3a733ac1b7d9b09ec32a387905a9302423b87 | [
"BSD-3-Clause"
] | null | null | null | # AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT
from nipype.testing import assert_equal
from nipype.interfaces.camino.dti import ComputeMeanDiffusivity
def test_ComputeMeanDiffusivity_inputs():
input_map = dict(args=dict(argstr='%s',
),
environ=dict(nohash=True,
usedefault=True,
),
ignore_exception=dict(nohash=True,
usedefault=True,
),
in_file=dict(argstr='< %s',
mandatory=True,
position=1,
),
inputdatatype=dict(argstr='-inputdatatype %s',
),
inputmodel=dict(argstr='-inputmodel %s',
),
out_file=dict(argstr='> %s',
genfile=True,
position=-1,
),
outputdatatype=dict(argstr='-outputdatatype %s',
),
scheme_file=dict(argstr='%s',
position=2,
),
terminal_output=dict(nohash=True,
),
)
inputs = ComputeMeanDiffusivity.input_spec()
for key, metadata in input_map.items():
for metakey, value in metadata.items():
yield assert_equal, getattr(inputs.traits()[key], metakey), value
def test_ComputeMeanDiffusivity_outputs():
output_map = dict(md=dict(),
)
outputs = ComputeMeanDiffusivity.output_spec()
for key, metadata in output_map.items():
for metakey, value in metadata.items():
yield assert_equal, getattr(outputs.traits()[key], metakey), value
| 27.204082 | 78 | 0.662416 |
79b2df94d08cdac48138657f2bfb4844d102be68 | 1,749 | py | Python | aiida/storage/psql_dos/models/settings.py | mkrack/aiida-core | bab1ad6cfc8e4ff041bce268f9270c613663cb35 | [
"MIT",
"BSD-3-Clause"
] | 153 | 2016-12-23T20:59:03.000Z | 2019-07-02T06:47:52.000Z | aiida/storage/psql_dos/models/settings.py | mkrack/aiida-core | bab1ad6cfc8e4ff041bce268f9270c613663cb35 | [
"MIT",
"BSD-3-Clause"
] | 2,466 | 2016-12-24T01:03:52.000Z | 2019-07-04T13:41:08.000Z | aiida/storage/psql_dos/models/settings.py | mkrack/aiida-core | bab1ad6cfc8e4ff041bce268f9270c613663cb35 | [
"MIT",
"BSD-3-Clause"
] | 88 | 2016-12-23T16:28:00.000Z | 2019-07-01T15:55:20.000Z | # -*- coding: utf-8 -*-
###########################################################################
# Copyright (c), The AiiDA team. All rights reserved. #
# This file is part of the AiiDA code. #
# #
# The code is hosted on GitHub at https://github.com/aiidateam/aiida-core #
# For further information on the license, see the LICENSE.txt file #
# For further information please visit http://www.aiida.net #
###########################################################################
# pylint: disable=import-error,no-name-in-module
"""Module to manage node settings for the SQLA backend."""
from sqlalchemy import Column
from sqlalchemy.dialects.postgresql import JSONB
from sqlalchemy.sql.schema import Index
from sqlalchemy.types import DateTime, Integer, String, Text
from aiida.common import timezone
from aiida.storage.psql_dos.models.base import Base
class DbSetting(Base):
"""Database model to store global settings."""
__tablename__ = 'db_dbsetting'
id = Column(Integer, primary_key=True) # pylint: disable=invalid-name
key = Column(String(1024), nullable=False, unique=True)
val = Column(JSONB, default={})
# I also add a description field for the variables
description = Column(Text, default='', nullable=False)
time = Column(DateTime(timezone=True), default=timezone.now, onupdate=timezone.now, nullable=False)
__table_args__ = (
Index(
'ix_pat_db_dbsetting_key', 'key', postgresql_using='btree', postgresql_ops={'key': 'varchar_pattern_ops'}
),
)
def __str__(self):
return f"'{self.key}'={self.val}"
| 42.658537 | 117 | 0.590623 |
834215d8101d7a80508d8aacd733ac7510fe51f6 | 1,614 | py | Python | dashboard/dashboard/pinpoint/handlers/results2.py | bopopescu/chromium72-third-party-catapult | 774e1355b871e13bb858147a136e9cb476f55030 | [
"BSD-3-Clause"
] | 2,151 | 2020-04-18T07:31:17.000Z | 2022-03-31T08:39:18.000Z | dashboard/dashboard/pinpoint/handlers/results2.py | kind-john/catapult | 29635376119833f172a58a48a3282d353ce55d2b | [
"BSD-3-Clause"
] | 395 | 2020-04-18T08:22:18.000Z | 2021-12-08T13:04:49.000Z | dashboard/dashboard/pinpoint/handlers/results2.py | kind-john/catapult | 29635376119833f172a58a48a3282d353ce55d2b | [
"BSD-3-Clause"
] | 338 | 2020-04-18T08:03:10.000Z | 2022-03-29T12:33:22.000Z | # Copyright 2017 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Provides the web interface for displaying a results2 file."""
import json
import webapp2
from dashboard.pinpoint.models import job as job_module
from dashboard.pinpoint.models import results2
class Results2(webapp2.RequestHandler):
"""Shows an overview of recent anomalies for perf sheriffing."""
def get(self, job_id):
try:
job = job_module.JobFromId(job_id)
if not job:
raise results2.Results2Error('Error: Unknown job %s' % job_id)
if job.task:
self.response.out.write(json.dumps({'status': 'job-incomplete'}))
return
url = results2.GetCachedResults2(job)
if url:
self.response.out.write(json.dumps({'status': 'complete', 'url': url}))
return
if results2.ScheduleResults2Generation(job):
self.response.out.write(json.dumps({'status': 'pending'}))
return
self.response.out.write(json.dumps({'status': 'failed'}))
except results2.Results2Error as e:
self.response.set_status(400)
self.response.out.write(e.message)
class Results2Generator(webapp2.RequestHandler):
"""Creates a results2 file and streams it to cloud storage."""
def post(self, job_id):
try:
job = job_module.JobFromId(job_id)
if not job:
raise results2.Results2Error('Error: Unknown job %s' % job_id)
results2.GenerateResults2(job)
except results2.Results2Error as e:
self.response.out.write(e.message)
| 29.888889 | 79 | 0.69083 |
5b60c355135b35b64c4d30e49258566a1543c027 | 1,588 | py | Python | src/grid.py | alexlarson98/HollywoodSquares | ab48bf3a01233ed62fe4695a9e5733433e96832e | [
"MIT"
] | null | null | null | src/grid.py | alexlarson98/HollywoodSquares | ab48bf3a01233ed62fe4695a9e5733433e96832e | [
"MIT"
] | null | null | null | src/grid.py | alexlarson98/HollywoodSquares | ab48bf3a01233ed62fe4695a9e5733433e96832e | [
"MIT"
] | null | null | null | import pygame
class Grid:
def __init__(self, sizes):
self.sizes = sizes
self.board = pygame.image.load('./media/hs_grid.png')
self.board = pygame.transform.scale(self.board, (sizes.grid_size, sizes.grid_size))
self.alt_board = None
self.player_index = None
self.count = 0
self.width = (self.sizes.display_width-self.sizes.grid_size)*(self.sizes.grid_fraction)
self.height = (self.sizes.display_height-self.sizes.grid_size)/2
def display_grid(self, game_display):
if self.player_index is None:
game_display.blit(self.board,(self.width,self.height))
else:
if self.count > 15 and self.count <= 30:
game_display.blit(self.alt_board,(self.width,self.height))
elif self.count <= 15:
game_display.blit(self.board,(self.width,self.height))
if self.count >= 30:
self.count = 0
self.count += 1
def set_player_index(self, index):
self.alt_board = pygame.image.load(f'./media/hs_grid_{index}.png')
self.alt_board = pygame.transform.scale(self.alt_board, (self.sizes.grid_size, self.sizes.grid_size))
self.player_index = index
def reset_player_index(self):
self.player_index = None
self.alt_board = pygame.image.load('./media/hs_grid.png')
self.alt_board = pygame.transform.scale(self.board, (self.sizes.grid_size, self.sizes.grid_size))
def get_width(self):
return self.width
def get_height(self):
return self.height | 39.7 | 109 | 0.63665 |
a888bc46d6eecec7db157d766155f7bfee9cec77 | 2,505 | py | Python | tensortrade/features/indicators/talib_indicator.py | msincenselee/tensortrade | aeed3fbf1657ba1f3bf4bb81fec876d65284b79b | [
"Apache-2.0"
] | 7 | 2020-09-28T23:36:40.000Z | 2022-02-22T02:00:32.000Z | tensortrade/features/indicators/talib_indicator.py | msincenselee/tensortrade | aeed3fbf1657ba1f3bf4bb81fec876d65284b79b | [
"Apache-2.0"
] | 4 | 2020-11-13T18:48:52.000Z | 2022-02-10T01:29:47.000Z | tensortrade/features/indicators/talib_indicator.py | msincenselee/tensortrade | aeed3fbf1657ba1f3bf4bb81fec876d65284b79b | [
"Apache-2.0"
] | 3 | 2020-11-23T17:31:59.000Z | 2021-04-08T10:55:03.000Z | # Copyright 2019 The TensorTrade Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import talib
import numpy as np
import pandas as pd
from gym import Space
from copy import copy
from abc import abstractmethod
from typing import Union, List, Callable
from tensortrade.features import FeatureTransformer
class TAlibIndicator(FeatureTransformer):
"""Adds one or more TAlib indicators to a data frame, based on existing open, high, low, and close column values."""
def __init__(self, indicators: List[str], lows: Union[List[float], List[int]] = None, highs: Union[List[float], List[int]] = None, **kwargs):
self._indicator_names = [indicator[0].upper() for indicator in indicators]
self._indicator_args = {indicator[0]:indicator[1]['args'] for indicator in indicators}
self._indicator_params = {indicator[0]: indicator[1]['params'] for indicator in indicators}
self._indicators = [getattr(talib, name.split('-')[0]) for name in self._indicator_names]
def transform(self, X: pd.DataFrame) -> pd.DataFrame:
for idx, indicator in enumerate(self._indicators):
indicator_name = self._indicator_names[idx]
indicator_args = [X[arg].values for arg in self._indicator_args[indicator_name]]
indicator_params = self._indicator_params[indicator_name]
if indicator_name == 'BBANDS':
upper, middle, lower = indicator(*indicator_args,**indicator_params)
X["bb_upper"] = upper
X["bb_middle"] = middle
X["bb_lower"] = lower
else:
try:
value = indicator(*indicator_args,**indicator_params)
if type(value) == tuple:
X[indicator_name] = value[0][0]
else:
X[indicator_name] = value
except:
X[indicator_name] = indicator(*indicator_args,**indicator_params)[0]
return X
| 41.065574 | 145 | 0.65988 |
e195f634d6a08362d544e757a3012dcbe0d73a5a | 1,576 | py | Python | server/orchestrator/models/base.py | bsantaus/qiskit-dell-runtime | aec129a83e0a5d6ee34de01be14c91469ce74407 | [
"Apache-2.0"
] | 17 | 2021-09-27T14:46:06.000Z | 2022-02-15T12:08:48.000Z | server/orchestrator/models/base.py | manannarang/qiskit-dell-runtime | ef8da4fc27ccca60ef81e7391d0c2a60593170b3 | [
"Apache-2.0"
] | 4 | 2021-09-27T14:21:53.000Z | 2022-01-20T16:43:46.000Z | server/orchestrator/models/base.py | manannarang/qiskit-dell-runtime | ef8da4fc27ccca60ef81e7391d0c2a60593170b3 | [
"Apache-2.0"
] | 2 | 2021-10-13T03:42:31.000Z | 2022-01-16T10:12:54.000Z | # This code is part of Qiskit.
#
# (C) Copyright IBM 2017, 2018.
#
# This code is licensed under the Apache License, Version 2.0. You may
# obtain a copy of this license in the LICENSE.txt file in the root directory
# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
#
# Any modifications or derivative works of this code must retain this
# copyright notice, and modified files need to carry a notice indicating
# that they have been altered from the originals.
# Copyright 2021 Dell (www.dell.com)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from sqlalchemy import create_engine
from sqlalchemy.orm import sessionmaker
import os
from sqlalchemy.ext.declarative import declarative_base
db_host = os.environ['DB_HOST']
db_port = os.environ['DB_PORT']
db_user = os.environ['DB_USER']
db_password = os.environ['DB_PASSWORD']
db_name = os.environ['DB_NAME']
db_string = f"{db_host}://{db_user}:{db_password}@{db_host}:{db_port}/{db_name}"
engine = create_engine(db_string)
Session = sessionmaker(bind=engine)
session = Session()
Base = declarative_base()
| 35.022222 | 80 | 0.764594 |
bb22ad7744c76cd945d0bf803f2869ca9c66dd54 | 2,389 | py | Python | temp.py | Devanshu-singh-VR/pack-it-up | 4833ad08b7ba8afbb5a685f524fbb5fada88bc43 | [
"MIT"
] | null | null | null | temp.py | Devanshu-singh-VR/pack-it-up | 4833ad08b7ba8afbb5a685f524fbb5fada88bc43 | [
"MIT"
] | null | null | null | temp.py | Devanshu-singh-VR/pack-it-up | 4833ad08b7ba8afbb5a685f524fbb5fada88bc43 | [
"MIT"
] | null | null | null | #/usr/bin/env python3
import pyrebase
from sklearn import tree
config = {
"apiKey": "AIzaSyByA7celafHSloxdOLA7_s-D097Ld10Jus",
"authDomain": "firepack-66e50.firebaseapp.com",
"databaseURL": "https://firepack-66e50.firebaseio.com",
"projectId": "firepack-66e50",
"storageBucket": "firepack-66e50.appspot.com"
}
firebase = pyrebase.initialize_app(config)
db = firebase.database()
listt=["airtight",
"recyclable",
"toxic",
"moisture_resistance",
"aandb_resist",
"transparent",
"photosensitivity_resistance",
"flammable",
"thickness",
"tensile_strength",
"temperature",
"weightpercm2",
"puncture_resistance",
"costpercm"]
users=db.get()
d={}
for user in users.each():
d[user.key()]=user.val()
d["airtight"]=d["odour"]
# creating dataset for aluminium/plastic/glass/cardboard/paper
#airtight
#recyclable
#toxic
#moisture_resistance
#flexible
#aandb_resist
#transparent
#photosensitivity_resistance
#flammable
#resist_solvents
#thickness
#tensile_strength
#temperature
#weightpercm2 XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
#puncture_resistance
#costpercm2
features=[[1,1,0,1,0,1,1,1,0,1,0.102,10.2,200,0.6,0.21,0.08],[1,1,0,1,0,1,1,1,0,1,0.318,30.2,300,1.4,0.42,0.12],[1,1,0,1,0,1,1,1,0,1,0.417,65.2,300,1.9,0.72,0.21],[0,1,0,1,1,0,0,1,0,1,0.08,4.2,150,0.02,0.13,0.09],[1,1,0,1,1,1,0,1,0,1,0.18,11.2,180,0.08,0.36,0.25],[1,1,0,1,0,1,0,1,0,1,0.27,33.4,180,0.12,0.55,0.40],[0,1,0,0,1,0,0,1,1,0,0.09,4.2,90,0.03,0.14,0.04],[0,1,0,0,1,0,0,1,1,0,0.15,6.9,110,0.09,0.17,0.10],[0,1,0,0,0,0,0,1,1,0,0.20,9.4,130,0.15,0.20,0.17],[0,1,0,0,0,0,0,1,1,0,0.20,9.4,130,0.23,0.24,0.40],[1,1,0,1,1,0,0,1,1,1,0.10,8.1,150,0.11,0.16,0.28],[0,1,0,0,1,0,1,0,1,1,0.01,1.1,90,0.001,0.65,0.005],[1,1,0,1,0,1,1,0,0,1,0.82,30.2,220,0.09,0.73,0.43],[1,1,0,1,1,1,1,0,0,1,0.22,18.3,170,0.04,0.49,0.17],[1,1,0,1,0,0,0,1,0,1,0.82,8.3,120,0.11,0.18,0.09],[1,1,0,1,0,0,0,1,0,1,0.92,38.3,320,0.36,3.78,0.60]]
label=["singlelayerglass","doublelayerglass","triplelayerglass","singlelayeraluminium","doublelayeraluminium","triplelayeraluminium","single_faced"
,"double_faced","single_wall","double_wall","coated","polyethylene","hdpe","LDPE","polystyrene","pvc"]
# now calling decision tree algo
algo=tree.DecisionTreeClassifier()
# time for training data
trained=algo.fit(features,label)
# now prediction
output=trained.predict([[0,1,0,0,0,0,0,1,1,0,0.20,9.4,130,0.15,0.20,0.17]])
print(output)
| 31.434211 | 818 | 0.689828 |
a9e1e7b7c986a77eb9c23e5cbd59f730422e1293 | 2,797 | py | Python | tests/chainer_tests/functions_tests/test_linear.py | umitanuki/chainer | 225c56b233e684ff4855451d2af4c2fb66915f21 | [
"MIT"
] | null | null | null | tests/chainer_tests/functions_tests/test_linear.py | umitanuki/chainer | 225c56b233e684ff4855451d2af4c2fb66915f21 | [
"MIT"
] | null | null | null | tests/chainer_tests/functions_tests/test_linear.py | umitanuki/chainer | 225c56b233e684ff4855451d2af4c2fb66915f21 | [
"MIT"
] | 1 | 2018-11-18T00:36:51.000Z | 2018-11-18T00:36:51.000Z | import unittest
import numpy
import chainer
from chainer import cuda
from chainer import functions
from chainer import gradient_check
from chainer import testing
from chainer.testing import attr
from chainer.testing import condition
from chainer.utils import type_check
class TestLinear(unittest.TestCase):
in_shape = (3,)
out_size = 2
def setUp(self):
in_size = numpy.prod(self.in_shape)
self.func = functions.Linear(in_size, self.out_size)
self.func.W = numpy.random.uniform(
-1, 1, self.func.W.shape).astype(numpy.float32)
self.func.b = numpy.random.uniform(
-1, 1, self.func.b.shape).astype(numpy.float32)
self.func.gW.fill(0)
self.func.gb.fill(0)
self.W = self.func.W.copy() # fixed on CPU
self.b = self.func.b.copy() # fixed on CPU
x_shape = (4,) + self.in_shape
self.x = numpy.random.uniform(-1, 1, x_shape).astype(numpy.float32)
self.gy = numpy.random.uniform(
-1, 1, (4, self.out_size)).astype(numpy.float32)
self.y = self.x.reshape(4, -1).dot(self.func.W.T) + self.func.b
def check_forward(self, x_data):
x = chainer.Variable(x_data)
y = self.func(x)
self.assertEqual(y.data.dtype, numpy.float32)
gradient_check.assert_allclose(self.y, y.data)
@condition.retry(3)
def test_forward_cpu(self):
self.check_forward(self.x)
@attr.gpu
@condition.retry(3)
def test_forward_gpu(self):
self.func.to_gpu()
self.check_forward(cuda.to_gpu(self.x))
def check_backward(self, x_data, y_grad):
x = chainer.Variable(x_data)
y = self.func(x)
y.grad = y_grad
y.backward()
func = y.creator
f = lambda: func.forward((x.data,))
gx, gW, gb = gradient_check.numerical_grad(
f, (x.data, func.W, func.b), (y.grad,), eps=1e-2)
gradient_check.assert_allclose(gx, x.grad)
gradient_check.assert_allclose(gW, func.gW)
gradient_check.assert_allclose(gb, func.gb)
@condition.retry(3)
def test_backward_cpu(self):
self.check_backward(self.x, self.gy)
@attr.gpu
@condition.retry(3)
def test_backward_gpu(self):
self.func.to_gpu()
self.check_backward(cuda.to_gpu(self.x), cuda.to_gpu(self.gy))
class TestLinearWithSpatialDimensions(TestLinear):
in_shape = (3, 2, 2)
class TestInvalidLinear(unittest.TestCase):
def setUp(self):
self.func = functions.Linear(3, 2)
self.x = numpy.random.uniform(-1, 1, (4, 1, 2)).astype(numpy.float32)
def test_invalid_size(self):
with self.assertRaises(type_check.InvalidType):
self.func(chainer.Variable(self.x))
testing.run_module(__name__, __file__)
| 28.540816 | 77 | 0.642474 |
f50d96cfcdf6b758bd86d8866691ff1b6d54c19d | 78 | py | Python | message_media_messages/exceptions/__init__.py | Fredpwol/messages-python-sdk | 8902934edc2883afe5105ec02468caf00ded634e | [
"Apache-2.0"
] | 6 | 2018-04-30T05:45:53.000Z | 2021-09-18T04:45:28.000Z | message_media_messages/exceptions/__init__.py | Fredpwol/messages-python-sdk | 8902934edc2883afe5105ec02468caf00ded634e | [
"Apache-2.0"
] | 4 | 2017-11-30T21:50:23.000Z | 2020-06-19T06:25:51.000Z | message_media_messages/exceptions/__init__.py | messagemedia/messages-python-sdk | 915c29ab7cbabca5c3fbeb89c9d4b28754630a57 | [
"Apache-2.0"
] | 7 | 2017-11-30T13:29:50.000Z | 2022-01-25T22:30:34.000Z | __all__ = [
'api_exception',
'send_messages_400_response_exception',
] | 19.5 | 43 | 0.717949 |
b1b0f9d3fb0c4fc96ccf2e65cf999de1f2ea6f64 | 447 | py | Python | backend/common/schemas.py | restato/bunnybook | 970c862aa55941f073c708d748f7ff415ec3e785 | [
"MIT"
] | 131 | 2021-08-30T11:05:47.000Z | 2022-03-22T19:08:21.000Z | backend/common/schemas.py | restato/bunnybook | 970c862aa55941f073c708d748f7ff415ec3e785 | [
"MIT"
] | 1 | 2022-02-22T16:52:46.000Z | 2022-02-22T16:54:47.000Z | backend/common/schemas.py | restato/bunnybook | 970c862aa55941f073c708d748f7ff415ec3e785 | [
"MIT"
] | 13 | 2021-09-02T23:25:28.000Z | 2022-02-25T18:24:31.000Z | import datetime as dt
from fastapi_camelcase import CamelModel
def dt_to_iso8601z(d: dt.datetime) -> str:
# convert datetime to iso 8601 format, adding milliseconds and "Z" suffix
return f"{d.strftime('%Y-%m-%dT%H:%M:%S.%f')[:-3]}Z"
class BaseSchema(CamelModel):
# add Zulu timezone to allow javascript correctly parsing UTC timestamps
class Config:
json_encoders = {
dt.datetime: dt_to_iso8601z
}
| 26.294118 | 77 | 0.680089 |
a1a3d6aff9e2b321a57ab6f31fe83a396b009e15 | 2,233 | py | Python | examples/old_style_CPython_ast.py | LISTERINE/sourcerer | 5f756fdc3a79c902eafbceccb49c0c1e4b395ad1 | [
"Apache-2.0"
] | null | null | null | examples/old_style_CPython_ast.py | LISTERINE/sourcerer | 5f756fdc3a79c902eafbceccb49c0c1e4b395ad1 | [
"Apache-2.0"
] | null | null | null | examples/old_style_CPython_ast.py | LISTERINE/sourcerer | 5f756fdc3a79c902eafbceccb49c0c1e4b395ad1 | [
"Apache-2.0"
] | null | null | null | from yaml import load
from sys import argv
from pdb import set_trace
from ast import *
import re
from astunparse import unparse
def format_name(name):
no_vars = re.sub(r'_?\{.*?\}', '', name)
no_doubles = no_vars.replace('//', '/')
no_inner = re.sub('(\w)\/(\w)', r'\1_\2', no_doubles) # replace inner slashes
return no_inner.replace("/", "")
api = None
with open(argv[1], 'r') as api_file:
api = load(api_file.read())
body = []
functions = []
blueprint_raw = api['basePath']
blueprint_name = format_name(blueprint_raw)
blueprint_route = blueprint_name+".route"
blueprint = Assign(targets=[Name(id=blueprint_name, ctx=Store())],
value=Call(func=Name(id='BluePrint', ctx=Load()),
args=[Str(s=blueprint_name),
Name(id='__name__',
ctx=Load())],
keywords=[keyword(arg='template_folder',
value=Str(s=blueprint_name))],
starargs=None, kwargs=None))
body.append(blueprint)
for path, path_info in api['paths'].items():
route_dec = Call(func=Name(id=blueprint_route, ctx=Load()),
args=[Str(s=path)],
keywords=[],
starargs=None,
kwargs=None)
method_args = arguments(args=[Name(id='methods',
ctx=Param())],
vararg=None,
kwarg=None,
defaults=[List(elts=[Str(s=meth) for meth in path_info.keys()],
ctx=Load())])
returns = []
for method, method_info in path_info.items():
for response, response_info in method_info['responses'].items():
returns.append(Return(value=Num(n=response)))
route_func = FunctionDef(name=format_name(path),
args=method_args,
body=returns,
decorator_list=[route_dec])
functions.append(route_func)
body.append(functions)
m = Module(body=body)
print unparse(m)
| 31.9 | 91 | 0.512315 |
2e546567e7d4c87698c87bc0d1f515e4ab22b727 | 769 | py | Python | enrocrypt/error.py | Morgan-Phoenix/EnroCrypt | cf883225a06bac73c564cff83af5e3f9386de116 | [
"MIT"
] | 6 | 2021-05-25T10:04:04.000Z | 2021-12-26T14:36:21.000Z | enrocrypt/error.py | Morgan-Phoenix/EnroCrypt | cf883225a06bac73c564cff83af5e3f9386de116 | [
"MIT"
] | 7 | 2021-05-26T12:59:16.000Z | 2021-08-31T09:17:48.000Z | enrocrypt/error.py | Morgan-Phoenix/EnroCrypt | cf883225a06bac73c564cff83af5e3f9386de116 | [
"MIT"
] | 2 | 2021-05-27T10:16:58.000Z | 2021-08-01T03:50:17.000Z | class ModifiedError(Exception):
def __init__(self):
self.msg = 'The List Provided To The Function Is Modified'
super().__init__(self.msg)
class ListIndexError(Exception):
def __init__(self):
self.msg = 'Returned List Must Only Have 4 Elements'
super().__init__(self.msg)
class NoKeyFile(Exception):
def __init__(self):
self.msg = 'No Path For The Key File was Provided'
super().__init__(self.msg)
class List(Exception):
def __init__(self):
self.msg = "Must Be A List"
super().__init__(self.msg)
class KeyError(Exception):
def __init__(self,bits:int) -> None:
self.bits = bits
self.msg = f"Key Must Be Of 32, 24 or 16 bits not {self.bits} bits"
super().__init__(self.msg) | 36.619048 | 75 | 0.652796 |
85f66d49e446b793d35699fccef71d6d2f38bdaa | 5,882 | py | Python | Rationale_Analysis/models/extractors/supervised_bert_lstm_extractor.py | CMSC35100-JET/FRESH | ea2b23386f8411da7127ec84ff6dd6e684b1dced | [
"MIT"
] | 30 | 2020-05-15T02:24:54.000Z | 2022-03-14T21:52:48.000Z | Rationale_Analysis/models/extractors/supervised_bert_lstm_extractor.py | CMSC35100-JET/FRESH | ea2b23386f8411da7127ec84ff6dd6e684b1dced | [
"MIT"
] | 5 | 2020-05-04T13:43:14.000Z | 2022-02-14T19:37:01.000Z | Rationale_Analysis/models/extractors/supervised_bert_lstm_extractor.py | CMSC35100-JET/FRESH | ea2b23386f8411da7127ec84ff6dd6e684b1dced | [
"MIT"
] | 6 | 2020-10-12T21:09:57.000Z | 2022-01-12T00:48:42.000Z | from typing import Optional, Dict, Any
import torch
from allennlp.data.vocabulary import Vocabulary
from allennlp.models.model import Model
from allennlp.nn import InitializerApplicator, RegularizerApplicator, util
from allennlp.modules import Seq2SeqEncoder, TextFieldEmbedder, FeedForward, TimeDistributed
from Rationale_Analysis.models.classifiers.base_model import RationaleBaseModel
from allennlp.training.metrics import F1Measure, Average
from Rationale_Analysis.models.thresholders.top_k import TopKThresholder
@Model.register("supervised_bert_lstm_extractor")
class SupervisedBertLstmExtractor(RationaleBaseModel):
def __init__(
self,
vocab: Vocabulary,
text_field_embedder: TextFieldEmbedder,
seq2seq_encoder: Seq2SeqEncoder,
feedforward_encoder: FeedForward,
requires_grad: str,
dropout: float = 0.0,
max_length_ratio: float = 1.0,
initializer: InitializerApplicator = InitializerApplicator(),
regularizer: Optional[RegularizerApplicator] = None,
):
super(SupervisedBertLstmExtractor, self).__init__(vocab, initializer, regularizer)
self._vocabulary = vocab
self._text_field_embedder = text_field_embedder
if requires_grad in ["none", "all"]:
for param in self._text_field_embedder.parameters():
param.requires_grad = requires_grad == "all"
else:
model_name_regexes = requires_grad.split(",")
for name, param in self._text_field_embedder.named_parameters():
found = any([regex in name for regex in model_name_regexes])
param.requires_grad = found
self._seq2seq_encoder = seq2seq_encoder
self._dropout = torch.nn.Dropout(p=dropout)
self._feedforward_encoder = TimeDistributed(feedforward_encoder)
self._classifier_input_dim = feedforward_encoder.get_output_dim()
self._classification_layer = torch.nn.Linear(self._classifier_input_dim, 1, bias=False)
self._token_prf = F1Measure(1)
self._rationale_length = Average()
self._pos_weight = torch.Tensor([1 / max_length_ratio - 1])
self._extractor = TopKThresholder(max_length_ratio=max_length_ratio)
initializer(self)
def forward(self, document, query=None, label=None, metadata=None, rationale=None, **kwargs) -> Dict[str, Any]:
bert_document = self.combine_document_query(document, query)
embedded_text = self._text_field_embedder(bert_document)
mask = util.get_text_field_mask(bert_document)
embedded_text = self._seq2seq_encoder(embedded_text, mask=mask)
embedded_text = self._dropout(self._feedforward_encoder(embedded_text))
logits = self._classification_layer(embedded_text)
probs = torch.sigmoid(logits)[:, :, 0]
mask = mask.float()
output_dict = {}
output_dict["probs"] = probs * mask
output_dict["mask"] = mask
output_dict["metadata"] = metadata
output_dict["document"] = document
if rationale is not None:
loss = torch.nn.functional.binary_cross_entropy_with_logits(
logits.squeeze(-1),
rationale,
reduction="none",
pos_weight=self._pos_weight.to(rationale.device),
)
loss = ((loss * mask).sum(-1) / mask.sum(-1)).mean()
output_dict["loss"] = loss
self._token_prf(
torch.cat([1 - probs.unsqueeze(-1), probs.unsqueeze(-1)], dim=-1),
rationale.long(),
mask == 1,
)
predicted_rationale = (probs > 0.5).long() * mask
self._rationale_length(((predicted_rationale * mask).sum(-1).float() / mask.sum(-1)).mean())
return output_dict
def extract_rationale(self, output_dict):
rationales = []
sentences = [x["tokens"] for x in output_dict["metadata"]]
predicted_rationales = output_dict["predicted_rationales"].cpu().data.numpy()
for path, words in zip(predicted_rationales, sentences):
path = list(path)[: len(words)]
words = [x.text for x in words]
starts, ends = [], []
path.append(0)
for i in range(len(words)):
if path[i - 1 : i] == [0, 1]:
starts.append(i)
if path[i - 1 : i] == [1, 0]:
ends.append(i)
assert len(starts) == len(ends)
spans = list(zip(starts, ends))
rationales.append(
{
"document": " ".join([w for i, w in zip(path, words) if i == 1]),
"spans": [{"span": (s, e), "value": 1} for s, e in spans],
"metadata": None,
}
)
return rationales
def get_metrics(self, reset: bool = False) -> Dict[str, float]:
metrics = self._token_prf.get_metric(reset)
metrics = dict(zip(["p", "r", "f1"], metrics))
metrics.update({"rlength": float(self._rationale_length.get_metric(reset))})
return metrics
def make_output_human_readable(self, output_dict):
rationales = self._extractor.extract_rationale(attentions=output_dict['probs'], document=output_dict['document'], as_one_hot=False)
new_output_dict = {}
new_output_dict["predicted_rationale"] = rationales
new_output_dict["document"] = [r["document"] for r in rationales]
if "query" in output_dict["metadata"][0]:
new_output_dict["query"] = [m["query"] for m in output_dict["metadata"]]
new_output_dict["label"] = [m["label"] for m in output_dict["metadata"]]
new_output_dict["annotation_id"] = [m["annotation_id"] for m in output_dict["metadata"]]
return new_output_dict
| 39.213333 | 139 | 0.629548 |
5847ae666a144ba0d5fb1c2ec3660bf5e784561c | 6,923 | py | Python | test/test_plugin_ifttt.py | NiNiyas/apprise | 8d96e95acd7cb89f082685ae161bd0e268203f0c | [
"MIT"
] | 1 | 2022-01-19T01:40:04.000Z | 2022-01-19T01:40:04.000Z | test/test_plugin_ifttt.py | NiNiyas/apprise | 8d96e95acd7cb89f082685ae161bd0e268203f0c | [
"MIT"
] | null | null | null | test/test_plugin_ifttt.py | NiNiyas/apprise | 8d96e95acd7cb89f082685ae161bd0e268203f0c | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
#
# Copyright (C) 2021 Chris Caron <lead2gold@gmail.com>
# All rights reserved.
#
# This code is licensed under the MIT License.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files(the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and / or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions :
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
import pytest
import mock
import requests
from apprise import plugins
from apprise import NotifyType
from helpers import AppriseURLTester
# Disable logging for a cleaner testing output
import logging
logging.disable(logging.CRITICAL)
# Our Testing URLs
apprise_url_tests = (
('ifttt://', {
'instance': TypeError,
}),
('ifttt://:@/', {
'instance': TypeError,
}),
# No User
('ifttt://EventID/', {
'instance': TypeError,
}),
# A nicely formed ifttt url with 1 event and a new key/value store
('ifttt://WebHookID@EventID/?+TemplateKey=TemplateVal', {
'instance': plugins.NotifyIFTTT,
# Our expected url(privacy=True) startswith() response:
'privacy_url': 'ifttt://W...D',
}),
# Test to= in which case we set the host to the webhook id
('ifttt://WebHookID?to=EventID,EventID2', {
'instance': plugins.NotifyIFTTT,
}),
# Removing certain keys:
('ifttt://WebHookID@EventID/?-Value1=&-Value2', {
'instance': plugins.NotifyIFTTT,
}),
# A nicely formed ifttt url with 2 events defined:
('ifttt://WebHookID@EventID/EventID2/', {
'instance': plugins.NotifyIFTTT,
}),
# Support Native URL references
('https://maker.ifttt.com/use/WebHookID/', {
# No EventID specified
'instance': TypeError,
}),
('https://maker.ifttt.com/use/WebHookID/EventID/', {
'instance': plugins.NotifyIFTTT,
}),
# Native URL with arguments
('https://maker.ifttt.com/use/WebHookID/EventID/?-Value1=', {
'instance': plugins.NotifyIFTTT,
}),
# Test website connection failures
('ifttt://WebHookID@EventID', {
'instance': plugins.NotifyIFTTT,
# force a failure
'response': False,
'requests_response_code': requests.codes.internal_server_error,
}),
('ifttt://WebHookID@EventID', {
'instance': plugins.NotifyIFTTT,
# throw a bizzare code forcing us to fail to look it up
'response': False,
'requests_response_code': 999,
}),
('ifttt://WebHookID@EventID', {
'instance': plugins.NotifyIFTTT,
# Throws a series of connection and transfer exceptions when this flag
# is set and tests that we gracfully handle them
'test_requests_exceptions': True,
}),
)
def test_plugin_ifttt_urls():
"""
NotifyIFTTT() Apprise URLs
"""
# Run our general tests
AppriseURLTester(tests=apprise_url_tests).run_all()
@mock.patch('requests.get')
@mock.patch('requests.post')
def test_plugin_ifttt_edge_cases(mock_post, mock_get):
"""
NotifyIFTTT() Edge Cases
"""
# Disable Throttling to speed testing
plugins.NotifyBase.request_rate_per_sec = 0
# Initialize some generic (but valid) tokens
webhook_id = 'webhook_id'
events = ['event1', 'event2']
# Prepare Mock
mock_get.return_value = requests.Request()
mock_post.return_value = requests.Request()
mock_post.return_value.status_code = requests.codes.ok
mock_get.return_value.status_code = requests.codes.ok
mock_get.return_value.content = '{}'
mock_post.return_value.content = '{}'
# No webhook_id specified
with pytest.raises(TypeError):
plugins.NotifyIFTTT(webhook_id=None, events=None)
# Disable Throttling to speed testing
plugins.NotifyBase.request_rate_per_sec = 0
# Initializes the plugin with an invalid webhook id
with pytest.raises(TypeError):
plugins.NotifyIFTTT(webhook_id=None, events=events)
# Whitespace also acts as an invalid webhook id
with pytest.raises(TypeError):
plugins.NotifyIFTTT(webhook_id=" ", events=events)
# No events specified
with pytest.raises(TypeError):
plugins.NotifyIFTTT(webhook_id=webhook_id, events=None)
obj = plugins.NotifyIFTTT(webhook_id=webhook_id, events=events)
assert isinstance(obj, plugins.NotifyIFTTT) is True
assert obj.notify(
body='body', title='title', notify_type=NotifyType.INFO) is True
# Test the addition of tokens
obj = plugins.NotifyIFTTT(
webhook_id=webhook_id, events=events,
add_tokens={'Test': 'ValueA', 'Test2': 'ValueB'})
assert isinstance(obj, plugins.NotifyIFTTT) is True
assert obj.notify(
body='body', title='title', notify_type=NotifyType.INFO) is True
# Invalid del_tokens entry
with pytest.raises(TypeError):
plugins.NotifyIFTTT(
webhook_id=webhook_id, events=events,
del_tokens=plugins.NotifyIFTTT.ifttt_default_title_key)
assert isinstance(obj, plugins.NotifyIFTTT) is True
assert obj.notify(
body='body', title='title', notify_type=NotifyType.INFO) is True
# Test removal of tokens by a list
obj = plugins.NotifyIFTTT(
webhook_id=webhook_id, events=events,
add_tokens={
'MyKey': 'MyValue'
},
del_tokens=(
plugins.NotifyIFTTT.ifttt_default_title_key,
plugins.NotifyIFTTT.ifttt_default_body_key,
plugins.NotifyIFTTT.ifttt_default_type_key))
assert isinstance(obj, plugins.NotifyIFTTT) is True
assert obj.notify(
body='body', title='title', notify_type=NotifyType.INFO) is True
# Test removal of tokens as dict
obj = plugins.NotifyIFTTT(
webhook_id=webhook_id, events=events,
add_tokens={
'MyKey': 'MyValue'
},
del_tokens={
plugins.NotifyIFTTT.ifttt_default_title_key: None,
plugins.NotifyIFTTT.ifttt_default_body_key: None,
plugins.NotifyIFTTT.ifttt_default_type_key: None})
assert isinstance(obj, plugins.NotifyIFTTT) is True
| 33.444444 | 79 | 0.678896 |
a1ae33da36dd8f448097b8044587b4b85d5f9a7e | 1,028 | py | Python | 15_threeSum/solution.py | NekoApocalypse/leetcode-collection | d7db13b34078eb3027f89a02eeb45066073a4a5c | [
"MIT"
] | null | null | null | 15_threeSum/solution.py | NekoApocalypse/leetcode-collection | d7db13b34078eb3027f89a02eeb45066073a4a5c | [
"MIT"
] | null | null | null | 15_threeSum/solution.py | NekoApocalypse/leetcode-collection | d7db13b34078eb3027f89a02eeb45066073a4a5c | [
"MIT"
] | null | null | null | class Solution:
def threeSum(self, nums):
nums = sorted(nums)
ans = []
for i, n1 in enumerate(nums):
if n1 > 0:
break
if i > 0 and n1 == nums[i - 1]:
continue
target = -n1
ll = i + 1
rr = len(nums) - 1
while ll < rr:
while ll < rr and nums[ll] + nums[rr] > target:
rr -= 1
if ll < rr and nums[ll] + nums[rr] == target:
ans.append([n1, nums[ll], nums[rr]])
cur = nums[ll]
while ll < rr and nums[ll] == cur:
ll += 1
return ans
def test():
sol = Solution()
cases = [
[-1, 0, 1, 2, -1, -4],
[],
[-1, 0, 0, 1, 1, 2, -1, -2, -4],
[-1, 0, 0, 0, 1, 1, 2, -1, -2, -4]
]
for case in cases:
print(case)
print(sol.threeSum(case))
print('------------------')
if __name__ == '__main__':
test()
| 25.7 | 63 | 0.366732 |
146dfe8f63c9b5c1eabfdb9ae5ee71d20eff7ce7 | 13,068 | py | Python | examples/master.py | Sevendi/dnpy | 3d463805ae553e537be97050083113b56f30b20d | [
"MIT"
] | 3 | 2020-07-13T17:39:01.000Z | 2021-06-09T06:52:48.000Z | examples/master.py | Sevendi/dnpy | 3d463805ae553e537be97050083113b56f30b20d | [
"MIT"
] | 1 | 2020-08-09T04:53:38.000Z | 2020-08-27T04:22:19.000Z | examples/master.py | Sevendi/dnpy | 3d463805ae553e537be97050083113b56f30b20d | [
"MIT"
] | 3 | 2020-12-06T21:19:28.000Z | 2021-08-01T15:10:31.000Z | import logging
import sys
import time
from dnpy import opendnp3
FILTERS = opendnp3.levels.ALL | opendnp3.levels.ALL_APP_COMMS
HOST = "127.0.0.1"
LOCAL = "0.0.0.0"
PORT = 20005
stdout_stream = logging.StreamHandler(sys.stdout)
stdout_stream.setFormatter(logging.Formatter('%(asctime)s\t%(name)s\t%(levelname)s\t%(message)s'))
_log = logging.getLogger(__name__)
_log.addHandler(stdout_stream)
_log.setLevel(logging.DEBUG)
class MyMaster:
"""
Interface for all master application callback info except for measurement values.
DNP3 spec section 5.1.6.1:
The Application Layer provides the following services for the DNP3 User Layer in a master:
- Formats requests directed to one or more outstations.
- Notifies the DNP3 User Layer when new data or information arrives from an outstation.
DNP spec section 5.1.6.3:
The Application Layer requires specific services from the layers beneath it.
- Partitioning of fragments into smaller portions for transport reliability.
- Knowledge of which device(s) were the source of received messages.
- Transmission of messages to specific devices or to all devices.
- Message integrity (i.e., error-free reception and transmission of messages).
- Knowledge of the time when messages arrive.
- Either precise times of transmission or the ability to set time values
into outgoing messages.
"""
def __init__(self,
log_handler=opendnp3.ConsoleLogger(False).Create(),
listener=opendnp3.PrintingChannelListener().Create(),
soe_handler=opendnp3.PrintingSOEHandler().Create(),
master_application=opendnp3.DefaultMasterApplication().Create(),
stack_config=None):
threads_to_allocate = 1
self.log_handler = log_handler
self.master_application = master_application
self.listener = listener
self.soe_handler = soe_handler
_log.debug('Creating a opendnp3.DNP3Manager..')
self.manager = opendnp3.DNP3Manager(threads_to_allocate, self.log_handler)
_log.debug('Creating the DNP3 channel, a TCP client.')
self.channel = self.manager.AddTCPClient("tcpClient", FILTERS, opendnp3.ChannelRetry.Default(), {opendnp3.IPEndpoint(HOST, PORT)}, LOCAL, opendnp3.PrintingChannelListener.Create())
self.master_application = master_application
_log.debug('Configuring the DNP3 stack.')
self.stack_config = opendnp3.MasterStackConfig()
self.stack_config.master.responseTimeout = opendnp3.TimeDuration().Seconds(2)
self.stack_config.link.LocalAddr = 10
self.stack_config.link.RemoteAddr = 1
_log.debug('Adding the master to the channel.')
self.master = self.channel.AddMaster("master",
self.soe_handler,
self.master_application,
self.stack_config)
# _log.debug('Configuring some scans (periodic reads).')
self.slow_scan = self.master.AddClassScan(opendnp3.ClassField.AllClasses(), opendnp3.TimeDuration.Minutes(30), self.soe_handler)
self.fast_scan = self.master.AddClassScan(opendnp3.ClassField(opendnp3.ClassField.CLASS_1), opendnp3.TimeDuration.Minutes(1), self.soe_handler)
_log.debug('Enabling the master. At this point, traffic will start to flow between the Master and Outstations.')
self.master.Enable()
time.sleep(1)
def send_direct_operate_command(self, command, index, callback=opendnp3.PrintingCommandResultCallback.Get(),
config=opendnp3.TaskConfig.Default()):
"""
Direct operate a single command
:param command: command to operate
:param index: index of the command
:param callback: callback that will be invoked upon completion or failure
:param config: optional configuration that controls normal callbacks and allows the user to be specified for SA
"""
self.master.DirectOperate(command, index, callback, config)
def send_direct_operate_command_set(self, command_set, callback=opendnp3.PrintingCommandResultCallback.Get(),
config=opendnp3.TaskConfig.Default()):
"""
Direct operate a set of commands
:param command_set: set of command headers
:param callback: callback that will be invoked upon completion or failure
:param config: optional configuration that controls normal callbacks and allows the user to be specified for SA
"""
self.master.DirectOperate(command_set, callback, config)
@staticmethod # This will fail without this attribute! Static method is expected.
def command_callback(result=None):
"""
:type result: opendnp3.ICommandTaskResult
"""
print("Received Callback Command Result...")
print(f"Received command result with summary: {opendnp3.TaskCompletionSpec.to_human_string(result.summary)}")
# printDetails = lambda res: (
# print(f"Header: {res.headerIndex} Index: { res.index }")
#)
# << " State: " << CommandPointStateSpec::to_human_string(res.state)
# << " Status: " << CommandStatusSpec::to_human_string(res.status);
# )
# };
def foo(bar):
print(bar.index)
mybar = foo
result.ForeachItem[opendnp3.CommandPointResult](mybar)
#result.ForeachItem(collection_callback)
@staticmethod
def collection_callback(result=None):
"""
:type result: opendnp3.CommandPointResult
"""
print("foo")
# print("Header: {0} | Index: {1} | State: {2} | Status: {3}".format(
# result.headerIndex,
# result.index,
# opendnp3.CommandPointStateToString(result.state),
# opendnp3.CommandStatusToString(result.status)
# ))
def send_select_and_operate_command(self, command, index, callback=opendnp3.PrintingCommandResultCallback.Get(),
config=opendnp3.TaskConfig.Default()):
"""
Select and operate a single command
:param command: command to operate
:param index: index of the command
:param callback: callback that will be invoked upon completion or failure
:param config: optional configuration that controls normal callbacks and allows the user to be specified for SA
"""
_log.debug("Selecting and operating...")
self.master.SelectAndOperate[opendnp3.ControlRelayOutputBlock](command, index, callback, config)
_log.debug("Completed selecting and operating...")
def send_select_and_operate_command_set(self, command_set, callback=opendnp3.PrintingCommandResultCallback.Get(),
config=opendnp3.TaskConfig.Default()):
"""
Select and operate a set of commands
:param command_set: set of command headers
:param callback: callback that will be invoked upon completion or failure
:param config: optional configuration that controls normal callbacks and allows the user to be specified for SA
"""
self.master.SelectAndOperate(command_set, callback, config)
def shutdown(self):
del self.integrity_scan
del self.exception_scan
del self.master
del self.channel
self.manager.Shutdown()
'''
SOEHandler does not currently work due to some issues with nested templates and Cppyy. This is probably solvable with some help from Cppyy devs.
'''
# class SOEHandler(opendnp3.opendnp3.ISOEHandler.):
# """
# Override opendnp3.ISOEHandler. in this manner to implement application-specific sequence-of-events behavior.
# This is an interface for SequenceOfEvents (SOE) callbacks from the Master stack to the application layer.
# """
# def __init__(self):
# super(SOEHandler, self).__init__()
# def Process(self, info, values):
# """
# Process measurement data.
# :param info: opendnp3.HeaderInfo.
# :param values: A collection of values received from the Outstation (various data types are possible).
# """
# visitor_class_types = {
# opendnp3.ICollection.(opendnp3.Indexed.(opendnp3.Binary.)): Visitoropendnp3.Indexed.opendnp3.Binary.,
# opendnp3.opendnp3.ICollection.opendnp3.Indexed.opendnp3.DoubleBitopendnp3.Binary..: Visitoropendnp3.Indexed.opendnp3.DoubleBitopendnp3.Binary..,
# opendnp3.opendnp3.ICollection.opendnp3.Indexed.Counter: Visitoropendnp3.Indexed.Counter,
# opendnp3.opendnp3.ICollection.opendnp3.Indexed.opendnp3.FrozenCounter: Visitoropendnp3.Indexed.opendnp3.FrozenCounter,
# opendnp3.opendnp3.ICollection.opendnp3.Indexed.opendnp3.Analog: Visitoropendnp3.Indexed.opendnp3.Analog,
# opendnp3.opendnp3.ICollection.opendnp3.Indexed.opendnp3.Binary.OutputStatus: Visitoropendnp3.Indexed.opendnp3.Binary.OutputStatus,
# opendnp3.opendnp3.ICollection.opendnp3.Indexed.opendnp3.AnalogOutputStatus: Visitoropendnp3.Indexed.opendnp3.AnalogOutputStatus,
# opendnp3.opendnp3.ICollection.opendnp3.Indexed.opendnp3.TimeAndInterval: Visitoropendnp3.Indexed.opendnp3.TimeAndInterval
# }
# visitor_class = visitor_class_types[type(values)]
# visitor = visitor_class()
# values.Foreach(visitor)
# for index, value in visitor.index_and_value:
# log_string = 'SOEHandler.Process {0}\theaderIndex={1}\tdata_type={2}\tindex={3}\tvalue={4}'
# _log.debug(log_string.format(info.gv, info.headerIndex, type(values).__name__, index, value))
# def BeginFragment(self):
# _log.debug('In SOEHandler.BeginFragment')
# def EndFragment(self):
# _log.debug('In SOEHandler.EndFragment')
class MyLogger(opendnp3.ILogHandler):
"""
Override opendnp3.ILogHandler in this manner to implement application-specific logging behavior.
"""
def __init__(self):
super(MyLogger, self).__init__()
def log(self, module, id, level, location, message):
pass
#_log.debug('LOG\tentry={}'.format(message))
class AppChannelListener(opendnp3.IChannelListener):
"""
Override IChannelListener in this manner to implement application-specific channel behavior.
"""
def __init__(self):
super(AppChannelListener, self).__init__()
def OnStateChange(self, state):
_log.debug('In AppChannelListener.OnStateChange: state={}'.format(opendnp3.ChannelStateToString(state)))
def Start(self):
_log.debug('In AppChannelListener.Start')
def End(self):
_log.debug('In AppChannelListener.End')
class MasterApplication(opendnp3.IMasterApplication):
def __init__(self):
super(MasterApplication, self).__init__()
# Overridden method
def AssignClassDuringStartup(self):
_log.debug('In MasterApplication.AssignClassDuringStartup')
return False
# Overridden method
def OnClose(self):
_log.debug('In MasterApplication.OnClose')
# Overridden method
def OnOpen(self):
_log.debug('In MasterApplication.OnOpen')
# Overridden method
def OnReceiveIIN(self, iin):
_log.debug('In MasterApplication.OnReceiveIIN')
# Overridden method
def OnTaskComplete(self, info):
_log.debug('In MasterApplication.OnTaskComplete')
def Now(self):
_log.debug('In MasterApplication.Now')
return opendnp3.UTCTimestamp()
# Overridden method
def OnTaskStart(self, type, id):
_log.debug('In MasterApplication.OnTaskStart')
def restart_callback(result=opendnp3.RestartOperationResult()):
if result.summary == opendnp3.TaskCompletion.SUCCESS:
print("Restart success | Restart Time: {}".format(result.restartTime.GetMilliseconds()))
else:
print("Restart fail | Failure: {}".format(opendnp3.TaskCompletionToString(result.summary)))
def main():
"""The Master has been started from the command line. Execute ad-hoc tests if desired."""
# app = MyMaster()
app = MyMaster(#log_handler=MyLogger(), # This is currently broken. Not sure why at this point.
listener=AppChannelListener(),
#soe_handler=SOEHandler(), # This is currently broken for reasons highlighted above.
master_application=MasterApplication() # This isn't really baked yet
)
_log.debug('Initialization complete. In command loop.')
# Ad-hoc tests can be performed at this point. See master_cmd.py for examples.
app.shutdown()
_log.debug('Exiting.')
exit()
if __name__ == '__main__':
main()
| 42.428571 | 188 | 0.66858 |
564c731e5d5cd0bbd116928a5ae54f7ffc150fd6 | 1,040 | py | Python | python_files/server/serve_model.py | cjbumgardner/HE_for_Medical_Data | 248dcd8b48924fe1f6edbeee4e16282d4a31069a | [
"MIT"
] | 1 | 2020-05-17T08:36:11.000Z | 2020-05-17T08:36:11.000Z | python_files/server/serve_model.py | cjbumgardner/HE_for_Medical_Data | 248dcd8b48924fe1f6edbeee4e16282d4a31069a | [
"MIT"
] | null | null | null | python_files/server/serve_model.py | cjbumgardner/HE_for_Medical_Data | 248dcd8b48924fe1f6edbeee4e16282d4a31069a | [
"MIT"
] | null | null | null | """
This file will be removed in the future.
"""
import server.seal_functions as sf
from pathlib import Path, PurePath
import os
import streamlit as st
MODELPARMS = Path(os.path.realpath(__file__)).parent.joinpath("model_params")
MODELS = {"Mortality Risk":
{"path":"log_reg_mortality",
"seal_function":sf.linear_reg_svr,
}
}
def build_model_svr(model_keyvalue, inputs, encoder = None, context = None):
"""Builds model from, seal_functions, model params.
model_keyvalue: key identifying model
inputs: properly formatted encrypted inputs for model
encoder: SEAL encoder object
context: SEAL context object
"""
modeldict = MODELS[model_keyvalue]
params_path = MODELPARMS.joinpath(modeldict["path"])
alias = modeldict["seal_function"]
try:
func = alias(params_path, context=context, encoder=encoder)
except Exception as e:
raise ValueError(f"There was a problem with your inputs: {e}")
return func.eval(inputs)
| 31.515152 | 77 | 0.682692 |
1411a0a15f8a58e2d20235c4b42129c2418d30b1 | 253 | py | Python | Leetcode/0279. Perfect Squares/0279.py | Next-Gen-UI/Code-Dynamics | a9b9d5e3f27e870b3e030c75a1060d88292de01c | [
"MIT"
] | null | null | null | Leetcode/0279. Perfect Squares/0279.py | Next-Gen-UI/Code-Dynamics | a9b9d5e3f27e870b3e030c75a1060d88292de01c | [
"MIT"
] | null | null | null | Leetcode/0279. Perfect Squares/0279.py | Next-Gen-UI/Code-Dynamics | a9b9d5e3f27e870b3e030c75a1060d88292de01c | [
"MIT"
] | null | null | null | class Solution:
def numSquares(self, n: int) -> int:
dp = [n] * (n + 1)
dp[0] = 0
dp[1] = 1
for i in range(2, n + 1):
j = 1
while j * j <= i:
dp[i] = min(dp[i], dp[i - j * j] + 1)
j += 1
return dp[n]
| 16.866667 | 45 | 0.395257 |
781e61872fc47a6c46475cab2bc49fb527c73534 | 241 | py | Python | LA_app/LA_project/projects/forms.py | laxminarayanRaval/DjangoPractixe | 4ac22cdb732fd2443197f4f71662b74c35ae73ff | [
"CC0-1.0"
] | null | null | null | LA_app/LA_project/projects/forms.py | laxminarayanRaval/DjangoPractixe | 4ac22cdb732fd2443197f4f71662b74c35ae73ff | [
"CC0-1.0"
] | null | null | null | LA_app/LA_project/projects/forms.py | laxminarayanRaval/DjangoPractixe | 4ac22cdb732fd2443197f4f71662b74c35ae73ff | [
"CC0-1.0"
] | null | null | null | from django.forms import ModelForm
from .models import Project
class ProjectForm(ModelForm):
class Meta:
model = Project
fields = ['title', 'description', 'featured_img', 'tags', 'demo_link', 'source_link',] # '__all__' | 34.428571 | 107 | 0.680498 |
f6999adba50e72b21b70d12868ea05f886f9eb05 | 6,531 | py | Python | Allura/allura/tests/unit/phone/test_nexmo.py | 99Kies/allura | 745ab3c5a9bd287b365b699bd38ef94650afc32e | [
"Apache-2.0"
] | 1 | 2021-12-09T21:52:12.000Z | 2021-12-09T21:52:12.000Z | Allura/allura/tests/unit/phone/test_nexmo.py | 99Kies/allura | 745ab3c5a9bd287b365b699bd38ef94650afc32e | [
"Apache-2.0"
] | null | null | null | Allura/allura/tests/unit/phone/test_nexmo.py | 99Kies/allura | 745ab3c5a9bd287b365b699bd38ef94650afc32e | [
"Apache-2.0"
] | null | null | null | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from __future__ import unicode_literals
from __future__ import absolute_import
import json
from mock import patch
from datadiff.tools import assert_equal
from nose.tools import assert_in, assert_not_in
from allura.lib.phone.nexmo import NexmoPhoneService
class TestPhoneService(object):
def setUp(self):
config = {'phone.api_key': 'test-api-key',
'phone.api_secret': 'test-api-secret',
'site_name': 'Very loooooooooong site name'}
self.phone = NexmoPhoneService(config)
def test_add_common_params(self):
params = {'number': '1234567890', 'brand': 'Allura'}
res = self.phone.add_common_params(params)
expected = {'number': '1234567890',
'brand': 'Allura',
'api_key': 'test-api-key',
'api_secret': 'test-api-secret'}
assert_equal(expected, res)
self.phone.config['phone.lang'] = 'it-it'
res = self.phone.add_common_params(params)
expected['lg'] = 'it-it'
assert_equal(expected, res)
def test_error(self):
res = self.phone.error()
expected = {'status': 'error',
'error': 'Failed sending request to Nexmo'}
assert_equal(expected, res)
# not allowed code
res = self.phone.error(code='2', msg='text')
assert_equal(expected, res)
# allowed code
res = self.phone.error(code='15', msg='text')
expected = {'status': 'error', 'error': 'text'}
assert_equal(expected, res)
# invalid format, possibly US
res = self.phone.error(code='3', msg='Invalid value for parameter: number', number='8005551234')
assert_equal(res['status'], 'error')
assert_in('Invalid value for parameter: number', res['error'])
assert_in('country code', res['error'])
assert_in('US', res['error'])
# invalid format, not US
res = self.phone.error(code='3', msg='Invalid value for parameter: number', number='738005551234')
assert_equal(res['status'], 'error')
assert_in('Invalid value for parameter: number', res['error'])
assert_in('country code', res['error'])
assert_not_in('US', res['error'])
def test_ok(self):
res = self.phone.ok(request_id='123', other='smth')
expected = {'status': 'ok', 'request_id': '123', 'other': 'smth'}
assert_equal(expected, res)
@patch('allura.lib.phone.nexmo.requests', autospec=True)
def test_verify(self, req):
req.post.return_value.json.return_value = {
'request_id': 'test-req-id',
'status': '0',
}
data = json.dumps({
'number': '1234567890',
'api_key': 'test-api-key',
'api_secret': 'test-api-secret',
'brand': 'Very loooooooooong',
}, sort_keys=True)
headers = {'Content-Type': 'application/json'}
resp = self.phone.verify('1234567890')
expected = {'status': 'ok', 'request_id': 'test-req-id'}
assert_equal(expected, resp)
req.post.assert_called_once_with(
'https://api.nexmo.com/verify/json',
data=data,
headers=headers)
req.post.reset_mock()
req.post.return_value.json.return_value = {
'status': '3',
'error_text': 'Something went wrong',
}
resp = self.phone.verify('1234567890')
expected = {'status': 'error', 'error': 'Something went wrong'}
assert_equal(expected, resp)
req.post.assert_called_once_with(
'https://api.nexmo.com/verify/json',
data=data,
headers=headers)
@patch('allura.lib.phone.nexmo.requests', autospec=True)
def test_verify_exception(self, req):
req.post.side_effect = Exception('Boom!')
resp = self.phone.verify('1234567890')
expected = {'status': 'error',
'error': 'Failed sending request to Nexmo'}
assert_equal(expected, resp)
@patch('allura.lib.phone.nexmo.requests', autospec=True)
def test_check(self, req):
req.post.return_value.json.return_value = {
'request_id': 'test-req-id',
'status': '0',
}
data = json.dumps({
'request_id': 'test-req-id',
'code': '1234',
'api_key': 'test-api-key',
'api_secret': 'test-api-secret',
}, sort_keys=True)
headers = {'Content-Type': 'application/json'}
resp = self.phone.check('test-req-id', '1234')
expected = {'status': 'ok', 'request_id': 'test-req-id'}
assert_equal(expected, resp)
req.post.assert_called_once_with(
'https://api.nexmo.com/verify/check/json',
data=data,
headers=headers)
req.post.reset_mock()
req.post.return_value.json.return_value = {
'status': '3',
'error_text': 'Something went wrong',
}
resp = self.phone.check('test-req-id', '1234')
expected = {'status': 'error', 'error': 'Something went wrong'}
assert_equal(expected, resp)
req.post.assert_called_once_with(
'https://api.nexmo.com/verify/check/json',
data=data,
headers=headers)
@patch('allura.lib.phone.nexmo.requests', autospec=True)
def test_check_exception(self, req):
req.post.side_effect = Exception('Boom!')
resp = self.phone.check('req-id', '1234')
expected = {'status': 'error',
'error': 'Failed sending request to Nexmo'}
assert_equal(expected, resp)
| 39.107784 | 106 | 0.595315 |
1e645a0a5cf1ac8e66925c0a9950b9b504e3c3aa | 1,210 | py | Python | tutorials/W3D1_BayesianDecisions/solutions/W3D1_Tutorial1_Solution_a004d68d.py | arpitadash/course-content | c925341cdc34492f3c6125690a5c1b0ca1d26260 | [
"CC-BY-4.0",
"BSD-3-Clause"
] | 1 | 2020-06-14T16:12:01.000Z | 2020-06-14T16:12:01.000Z | tutorials/W3D1_BayesianDecisions/solutions/W3D1_Tutorial1_Solution_a004d68d.py | macasal/course-content | 0fc5e1a0d736c6b0391eeab587012ed0ab01e462 | [
"CC-BY-4.0",
"BSD-3-Clause"
] | 1 | 2021-06-16T05:41:08.000Z | 2021-06-16T05:41:08.000Z | tutorials/W3D1_BayesianDecisions/solutions/W3D1_Tutorial1_Solution_a004d68d.py | macasal/course-content | 0fc5e1a0d736c6b0391eeab587012ed0ab01e462 | [
"CC-BY-4.0",
"BSD-3-Clause"
] | 1 | 2021-11-26T17:23:48.000Z | 2021-11-26T17:23:48.000Z |
# 1). The prior exerts a strong influence over the posterior when it is very informative: when
#. the probability of the school being on one side or the other. If the prior that the fish are
#. on the left side is very high (like 0.9), the posterior probability of the state being left is
#. high regardless of the measurement.
# 2). The prior does not exert a strong influence when it is not informative: when the probabilities
#. of the school being on the left vs right are similar (both are 0.5 for example). In this case,
#. the posterior is more driven by the collected data (the measurement) and more closely resembles
#. the likelihood.
#. 3) Similarly to the prior, the likelihood exerts the most influence when it is informative: when catching
#. a fish tells you a lot of information about which state is likely. For example, if the probability of the
#. fisherperson catching a fish if he is fishing on the right side and the school is on the left is 0
#. (p fish | s = left) = 0 and the probability of catching a fish if the school is on the right is 1, the
#. prior does not affect the posterior at all. The measurement tells you the hidden state completely. | 71.176471 | 111 | 0.738017 |
29fc78734a3ec29ece4976f2681a207d16a82591 | 251 | py | Python | manage.py | joseluan/ifnaroca | 36a9ed54490a9cd14a349bb9c6f8fcbaa131aa76 | [
"Apache-2.0"
] | null | null | null | manage.py | joseluan/ifnaroca | 36a9ed54490a9cd14a349bb9c6f8fcbaa131aa76 | [
"Apache-2.0"
] | null | null | null | manage.py | joseluan/ifnaroca | 36a9ed54490a9cd14a349bb9c6f8fcbaa131aa76 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "ifnaroca.settings")
from django.core.management import execute_from_command_line
execute_from_command_line(sys.argv)
| 22.818182 | 72 | 0.772908 |
fe57c515be0f06cd5c80a273105ef59a3eb1b8b7 | 8,261 | py | Python | lyricsearch/searchutil.py | wmcooper2/lyricsearch | 0aff7a32d240f6ba2ba1e21ae46d3ce79d13edd5 | [
"MIT"
] | null | null | null | lyricsearch/searchutil.py | wmcooper2/lyricsearch | 0aff7a32d240f6ba2ba1e21ae46d3ce79d13edd5 | [
"MIT"
] | null | null | null | lyricsearch/searchutil.py | wmcooper2/lyricsearch | 0aff7a32d240f6ba2ba1e21ae46d3ce79d13edd5 | [
"MIT"
] | null | null | null | #!/usr/bin/env python3.7
"""Utility module for Lyric Search program."""
# stand lib
import difflib
from pathlib import Path
import re
import shelve
from time import asctime
from typing import (
Callable,
Dict,
List,
Set,
Text,
Tuple,
)
# 3rd party
from nltk import bigrams
# custom
from dividefilesutil import progress_bar
from dividesetsutil import normalized_pattern
from filesanddirs import (
count_sets_in_dbs,
file_path,
)
def fuzzy_search(pattern: Text,
strings: List[Text]) -> List[Tuple[float, Text]]:
"""Performs ranked search of artist names. Returns List.
-pattern: the pattern to search for
-strings: list of string
"""
matcher = difflib.SequenceMatcher()
results = []
for thing in strings:
matcher.set_seqs(pattern, thing)
results.append((round(matcher.ratio(), 2)*100, thing))
return results
def brute_force_search(target: Text, pattern: Text) -> bool:
"""Performs brute force pattern matching. Returns Boolean."""
try:
with open(target, "r") as f:
match = re.search(pattern, f.read())
if match is not None:
return True
except FileNotFoundError:
print("File not found:", target)
return False
# is this needed?
# if vocab_search() is 100, then is that enough?
def exact_search(possible: Tuple[List[Text], int],
pattern: Text) -> List[Text]:
"""Checks text files for exact matches. Returns List."""
matches = []
searched = 0
for poss in possible[0]:
if brute_force_search(poss, pattern):
matches.append(poss)
searched += 1
progress_bar(searched, len(possible[0]),
prefix="Exact:"+str(len(matches)))
return matches
def lyric_set(song: Text, dict_: Dict[Text, Text]) -> Set:
"""Gets the lyric's set. Returns Set."""
return dict_[song][1]
# need to research more about fuzzy string matching (fuzzywuzzy)
def ranking_search(pattern: Text,
possible: List[Text]) -> List[Text]:
"""Checks text files for approximate matches. Returns List."""
matches = []
searched = 0
for poss in possible:
if brute_force_search(poss, pattern):
matches.append(poss)
searched += 1
progress_bar(searched, len(possible),
prefix="Ranking: "+str(len(matches)))
return matches
#optimize
def rough_search(pattern: Text,
set_dir: Text,
result_dir: Text,
search_funct: Callable[[Text, Text], List[Text]],
) -> List[Text]:
"""Check for subset matches. Returns List.
- displays progress bar
"""
matches = []
searched = 0
song_tot = count_sets_in_dbs(set_dir)
breakpoint()
for song_db in Path(set_dir).glob("**/*.db"):
matches += search_funct(pattern, str(song_db))
searched += 1
progress_bar(searched, song_tot,
prefix="Matches: "+str(len(matches)))
return matches
def save(data: List[Text], dest: Text) -> None:
"""Saves sorted 'data' elements to 'dest'. Returns None."""
with open(dest, "a+") as file_:
for line in sorted(data):
if line is not None:
file_.write(str(line) + "\n")
return None
def save_results(pattern: Text,
dest_dir: Text,
results: List[Text]) -> None:
"""Saves to 'dest_dir<time stamp>/pattern.txt'. Returns None."""
t = asctime().split(" ")
# file_name = [t[4], t[1], t[2], t[0], t[3]]
file_name = [t[5], t[1], t[3], t[0], t[4]]
save_to = dest_dir+"_".join(file_name)+"_"+pattern
save(results, save_to)
return None
def search_db(pattern: Text, db: Text) -> List[Text]:
"""Searches db for 'pattern'. Returns List."""
pattern_set = set(normalized_pattern(pattern))
return subset_matches(pattern_set, db)
def search_db_bigrams(pattern: Text, db: Text) -> List[Text]:
"""Searches db for 'pattern'. Returns List."""
pattern_set = set(bigrams(normalized_pattern(pattern)))
return subset_matches(pattern_set, db)
def subset_matches(pattern_set: Set, db: Text) -> List[Text]:
"""Gets 'pattern_set' matches in db. Returns List."""
matches = []
with shelve.open(db) as miniset:
for name, tuple_ in miniset.items():
song = lyric_set(name, miniset)
if pattern_set.issubset(song):
matches.append(file_path(name, miniset))
return matches
def user_input_dirs() -> int:
"""Gets dir amount from user. Returns Integer.
-gets user input from terminal
-prints to terminal
"""
try:
ans = int(input("How many dirs do you want to divide among? "))
except ValueError:
print("Please choose a number. Quitting...")
quit()
return ans
def user_input_match_ratio() -> int:
"""Gets minimum vocabualry match ratio from user. Returns Integer.
-gets user input from terminal
-prints to terminal
"""
try:
ans = int(input("Choose a minimum match percentage [0-100]: "))
if ans >= 0 and ans <=100:
return ans
else:
print("Please choose a number between 0 and 100.")
quit()
except ValueError:
print("Please choose a number. Quitting...")
quit()
def user_input_pattern() -> Text:
"""Gets search pattern from user. Returns String.
-gets user input from terminal
-prints to terminal
"""
try:
pattern = input("What do you want to search for? ")
if len(pattern) is 0:
print("Give a string to search for.")
quit()
except:
print("Unknown error getting user input. Naked exception.")
quit()
print("Searching for: \n\t'"+pattern+"'")
return pattern
def vocab_ratio(song_set: Set, pattern_set: Set) -> float:
"""Calculates the similarity between two sets. Returns Float."""
matches = sum([1 for word in pattern_set if word in song_set])
try:
return round(matches/len(pattern_set), 2) * 100
except ZeroDivisionError:
return 0.00
def vocab_search(pattern: Text,
minimum: int,
set_dir: Text) -> List[Tuple[float, Text]]:
"""Checks sets for vocab matches. Returns List of Tuples."""
pattern_set = set(normalized_pattern(pattern))
matches = []
searched = 0
song_tot = count_sets_in_dbs(set_dir)
song_dbs = Path(set_dir).glob("**/*.db")
for db in song_dbs:
with shelve.open(str(db)) as miniset:
for name, tuple_ in miniset.items():
song_set = lyric_set(name, miniset)
# if pattern_set.issubset(song_set):
# path = file_path(name, miniset)
# matches.append((100.00, path,))
# else:
rank = vocab_ratio(song_set, pattern_set)
if rank >= minimum:
matches.append((rank, name))
searched += 1
progress_bar(searched, song_tot,
prefix="Matches: "+str(len(matches)))
return matches
# def verbose_paths(pattern: Text, paths: List[Text]) -> None:
def verbose_paths(paths: List[Text]) -> None:
"""Prints path info to terminal. Returns None"""
print("Paths status;")
for name in paths:
print("\t{0} {1:<15}".format(Path(name).exists(),
name))
return None
def verbose_exact_results(results: List[Text]) -> None:
"""Prints match and search time in terminal. Returns None."""
print("\t{0:<20} {1:>6}".format("Exact matches:",
len(results[0])))
print("\t{0:<20} {1:>6}".format("Search time (sec):",
round(results[1], 2)))
return None
def verbose_possible_results(possible: List[Text]) -> None:
"""Prints match and search time in terminal. Returns None."""
print("\t{0:<20} {1:>6}".format("Possible matches:",
len(possible[0])))
print("\t{0:<20} {1:>6}".format("Search time (sec):",
round(possible[1], 2)))
return None
| 31.173585 | 71 | 0.592059 |
56ed81643d4d14a3951c9f7b26537b0cd5efdb88 | 18,195 | py | Python | tests/test_modeling_tf_gpt2.py | christy-yuan-li/transformers-1 | 2ee9f9b69e67426aaed690f652f9cdd8b524b99d | [
"Apache-2.0"
] | null | null | null | tests/test_modeling_tf_gpt2.py | christy-yuan-li/transformers-1 | 2ee9f9b69e67426aaed690f652f9cdd8b524b99d | [
"Apache-2.0"
] | null | null | null | tests/test_modeling_tf_gpt2.py | christy-yuan-li/transformers-1 | 2ee9f9b69e67426aaed690f652f9cdd8b524b99d | [
"Apache-2.0"
] | null | null | null | # coding=utf-8
# Copyright 2020 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
from transformers import GPT2Config, is_tf_available
from transformers.testing_utils import require_tf, slow
from .test_configuration_common import ConfigTester
from .test_modeling_tf_common import TFModelTesterMixin, ids_tensor
if is_tf_available():
import tensorflow as tf
from transformers.models.gpt2.modeling_tf_gpt2 import (
TF_GPT2_PRETRAINED_MODEL_ARCHIVE_LIST,
TFGPT2DoubleHeadsModel,
TFGPT2ForSequenceClassification,
TFGPT2LMHeadModel,
TFGPT2Model,
shape_list,
)
class TFGPT2ModelTester:
def __init__(
self,
parent,
):
self.parent = parent
self.batch_size = 13
self.seq_length = 7
self.is_training = True
self.use_token_type_ids = True
self.use_input_mask = True
self.use_labels = True
self.use_mc_token_ids = True
self.vocab_size = 99
self.hidden_size = 32
self.num_hidden_layers = 5
self.num_attention_heads = 4
self.intermediate_size = 37
self.hidden_act = "gelu"
self.hidden_dropout_prob = 0.1
self.attention_probs_dropout_prob = 0.1
self.max_position_embeddings = 512
self.type_vocab_size = 16
self.type_sequence_label_size = 2
self.initializer_range = 0.02
self.num_labels = 3
self.num_choices = 4
self.scope = None
self.bos_token_id = self.vocab_size - 1
self.eos_token_id = self.vocab_size - 1
self.pad_token_id = self.vocab_size - 1
def prepare_config_and_inputs(self):
input_ids = ids_tensor([self.batch_size, self.seq_length], self.vocab_size)
input_mask = None
if self.use_input_mask:
input_mask = ids_tensor([self.batch_size, self.seq_length], vocab_size=2)
token_type_ids = None
if self.use_token_type_ids:
token_type_ids = ids_tensor([self.batch_size, self.seq_length], self.type_vocab_size)
mc_token_ids = None
if self.use_mc_token_ids:
mc_token_ids = ids_tensor([self.batch_size, self.num_choices], self.seq_length)
sequence_labels = None
token_labels = None
choice_labels = None
if self.use_labels:
sequence_labels = ids_tensor([self.batch_size], self.type_sequence_label_size)
token_labels = ids_tensor([self.batch_size, self.seq_length], self.num_labels)
choice_labels = ids_tensor([self.batch_size], self.num_choices)
config = GPT2Config(
vocab_size=self.vocab_size,
n_embd=self.hidden_size,
n_layer=self.num_hidden_layers,
n_head=self.num_attention_heads,
# intermediate_size=self.intermediate_size,
# hidden_act=self.hidden_act,
# hidden_dropout_prob=self.hidden_dropout_prob,
# attention_probs_dropout_prob=self.attention_probs_dropout_prob,
n_positions=self.max_position_embeddings,
n_ctx=self.max_position_embeddings,
# type_vocab_size=self.type_vocab_size,
# initializer_range=self.initializer_range
bos_token_id=self.bos_token_id,
eos_token_id=self.eos_token_id,
pad_token_id=self.pad_token_id,
return_dict=True,
)
head_mask = ids_tensor([self.num_hidden_layers, self.num_attention_heads], 2)
return (
config,
input_ids,
input_mask,
head_mask,
token_type_ids,
mc_token_ids,
sequence_labels,
token_labels,
choice_labels,
)
def create_and_check_gpt2_model(self, config, input_ids, input_mask, head_mask, token_type_ids, *args):
model = TFGPT2Model(config=config)
inputs = {
"input_ids": input_ids,
"attention_mask": input_mask,
"token_type_ids": token_type_ids,
}
result = model(inputs)
inputs = [input_ids, None, input_mask] # None is the input for 'past'
result = model(inputs)
result = model(input_ids)
self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size))
def create_and_check_gpt2_model_past(self, config, input_ids, input_mask, head_mask, token_type_ids, *args):
model = TFGPT2Model(config=config)
# first forward pass
outputs = model(input_ids, token_type_ids=token_type_ids, use_cache=True)
outputs_use_cache_conf = model(input_ids, token_type_ids=token_type_ids)
outputs_no_past = model(input_ids, token_type_ids=token_type_ids, use_cache=False)
self.parent.assertTrue(len(outputs) == len(outputs_use_cache_conf))
self.parent.assertTrue(len(outputs) == len(outputs_no_past) + 1)
output, past = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
next_tokens = ids_tensor((self.batch_size, 1), config.vocab_size)
next_token_types = ids_tensor([self.batch_size, 1], self.type_vocab_size)
# append to next input_ids and token_type_ids
next_input_ids = tf.concat([input_ids, next_tokens], axis=-1)
next_token_type_ids = tf.concat([token_type_ids, next_token_types], axis=-1)
output_from_no_past = model(next_input_ids, token_type_ids=next_token_type_ids)["last_hidden_state"]
output_from_past = model(next_tokens, token_type_ids=next_token_types, past=past)["last_hidden_state"]
# select random slice
random_slice_idx = int(ids_tensor((1,), shape_list(output_from_past)[-1]))
output_from_no_past_slice = output_from_no_past[:, -1, random_slice_idx]
output_from_past_slice = output_from_past[:, 0, random_slice_idx]
# test that outputs are equal for slice
tf.debugging.assert_near(output_from_past_slice, output_from_no_past_slice, rtol=1e-6)
def create_and_check_gpt2_model_attention_mask_past(
self, config, input_ids, input_mask, head_mask, token_type_ids, *args
):
model = TFGPT2Model(config=config)
# create attention mask
half_seq_length = self.seq_length // 2
attn_mask_begin = tf.ones((self.batch_size, half_seq_length), dtype=tf.int32)
attn_mask_end = tf.zeros((self.batch_size, self.seq_length - half_seq_length), dtype=tf.int32)
attn_mask = tf.concat([attn_mask_begin, attn_mask_end], axis=1)
# first forward pass
output, past = model(input_ids, attention_mask=attn_mask).to_tuple()
# create hypothetical next token and extent to next_input_ids
next_tokens = ids_tensor((self.batch_size, 1), config.vocab_size)
# change a random masked slice from input_ids
random_seq_idx_to_change = ids_tensor((1,), half_seq_length).numpy() + 1
random_other_next_tokens = ids_tensor((self.batch_size, self.seq_length), config.vocab_size)
vector_condition = tf.range(self.seq_length) == (self.seq_length - random_seq_idx_to_change)
condition = tf.transpose(
tf.broadcast_to(tf.expand_dims(vector_condition, -1), (self.seq_length, self.batch_size))
)
input_ids = tf.where(condition, random_other_next_tokens, input_ids)
# append to next input_ids and attn_mask
next_input_ids = tf.concat([input_ids, next_tokens], axis=-1)
attn_mask = tf.concat([attn_mask, tf.ones((shape_list(attn_mask)[0], 1), dtype=tf.int32)], axis=1)
# get two different outputs
output_from_no_past = model(next_input_ids, attention_mask=attn_mask)["last_hidden_state"]
output_from_past = model(next_tokens, past=past, attention_mask=attn_mask)["last_hidden_state"]
# select random slice
random_slice_idx = int(ids_tensor((1,), shape_list(output_from_past)[-1]))
output_from_no_past_slice = output_from_no_past[:, -1, random_slice_idx]
output_from_past_slice = output_from_past[:, 0, random_slice_idx]
# test that outputs are equal for slice
tf.debugging.assert_near(output_from_past_slice, output_from_no_past_slice, rtol=1e-12)
def create_and_check_gpt2_model_past_large_inputs(
self, config, input_ids, input_mask, head_mask, token_type_ids, *args
):
model = TFGPT2Model(config=config)
input_ids = input_ids[:1, :]
input_mask = input_mask[:1, :]
token_type_ids = token_type_ids[:1, :]
self.batch_size = 1
# first forward pass
outputs = model(input_ids, attention_mask=input_mask, token_type_ids=token_type_ids, use_cache=True)
output, past = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
next_tokens = ids_tensor((self.batch_size, 3), config.vocab_size)
next_attn_mask = ids_tensor((self.batch_size, 3), 2)
next_token_types = ids_tensor((self.batch_size, 3), self.type_vocab_size)
# append to next input_ids and token_type_ids
next_input_ids = tf.concat([input_ids, next_tokens], axis=-1)
next_attention_mask = tf.concat([input_mask, next_attn_mask], axis=-1)
next_token_type_ids = tf.concat([token_type_ids, next_token_types], axis=-1)
output_from_no_past = model(
next_input_ids, token_type_ids=next_token_type_ids, attention_mask=next_attention_mask
)["last_hidden_state"]
output_from_past = model(
next_tokens, token_type_ids=next_token_types, attention_mask=next_attention_mask, past=past
)["last_hidden_state"]
self.parent.assertTrue(output_from_past.shape[1] == next_tokens.shape[1])
# select random slice
random_slice_idx = int(ids_tensor((1,), shape_list(output_from_past)[-1]))
output_from_no_past_slice = output_from_no_past[:, -3:, random_slice_idx]
output_from_past_slice = output_from_past[:, :, random_slice_idx]
# test that outputs are equal for slice
tf.debugging.assert_near(output_from_past_slice, output_from_no_past_slice, rtol=1e-3)
def create_and_check_gpt2_lm_head(self, config, input_ids, input_mask, head_mask, token_type_ids, *args):
model = TFGPT2LMHeadModel(config=config)
inputs = {
"input_ids": input_ids,
"attention_mask": input_mask,
"token_type_ids": token_type_ids,
}
result = model(inputs)
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.vocab_size))
def create_and_check_gpt2_double_head(
self, config, input_ids, input_mask, head_mask, token_type_ids, mc_token_ids, *args
):
model = TFGPT2DoubleHeadsModel(config=config)
multiple_choice_inputs_ids = tf.tile(tf.expand_dims(input_ids, 1), (1, self.num_choices, 1))
multiple_choice_input_mask = tf.tile(tf.expand_dims(input_mask, 1), (1, self.num_choices, 1))
multiple_choice_token_type_ids = tf.tile(tf.expand_dims(token_type_ids, 1), (1, self.num_choices, 1))
inputs = {
"input_ids": multiple_choice_inputs_ids,
"mc_token_ids": mc_token_ids,
"attention_mask": multiple_choice_input_mask,
"token_type_ids": multiple_choice_token_type_ids,
}
result = model(inputs)
self.parent.assertEqual(
result.logits.shape, (self.batch_size, self.num_choices, self.seq_length, self.vocab_size)
)
self.parent.assertEqual(result.mc_logits.shape, (self.batch_size, self.num_choices))
def create_and_check_gpt2_for_sequence_classification(
self, config, input_ids, input_mask, head_mask, token_type_ids, mc_token_ids, sequence_labels, *args
):
config.num_labels = self.num_labels
inputs = {
"input_ids": input_ids,
"attention_mask": input_mask,
"token_type_ids": token_type_ids,
"labels": sequence_labels,
}
model = TFGPT2ForSequenceClassification(config)
result = model(inputs)
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_labels))
def prepare_config_and_inputs_for_common(self):
config_and_inputs = self.prepare_config_and_inputs()
(
config,
input_ids,
input_mask,
head_mask,
token_type_ids,
mc_token_ids,
sequence_labels,
token_labels,
choice_labels,
) = config_and_inputs
inputs_dict = {
"input_ids": input_ids,
"token_type_ids": token_type_ids,
"attention_mask": input_mask,
}
return config, inputs_dict
@require_tf
class TFGPT2ModelTest(TFModelTesterMixin, unittest.TestCase):
all_model_classes = (
(TFGPT2Model, TFGPT2LMHeadModel, TFGPT2ForSequenceClassification, TFGPT2DoubleHeadsModel)
if is_tf_available()
else ()
)
all_generative_model_classes = (TFGPT2LMHeadModel,) if is_tf_available() else ()
test_head_masking = False
def setUp(self):
self.model_tester = TFGPT2ModelTester(self)
self.config_tester = ConfigTester(self, config_class=GPT2Config, n_embd=37)
def test_config(self):
self.config_tester.run_common_tests()
def test_gpt2_model(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_gpt2_model(*config_and_inputs)
def test_gpt2_model_past(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_gpt2_model_past(*config_and_inputs)
def test_gpt2_model_att_mask_past(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_gpt2_model_attention_mask_past(*config_and_inputs)
def test_gpt2_model_past_large_inputs(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_gpt2_model_past_large_inputs(*config_and_inputs)
def test_gpt2_lm_head(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_gpt2_lm_head(*config_and_inputs)
def test_gpt2_double_head(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_gpt2_double_head(*config_and_inputs)
def test_model_common_attributes(self):
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
model = model_class(config)
assert isinstance(model.get_input_embeddings(), tf.keras.layers.Layer)
if model_class in self.all_generative_model_classes:
x = model.get_output_embeddings()
assert isinstance(x, tf.keras.layers.Layer)
name = model.get_bias()
assert name is None
else:
x = model.get_output_embeddings()
assert x is None
name = model.get_bias()
assert name is None
def test_gpt2_sequence_classification_model(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_gpt2_for_sequence_classification(*config_and_inputs)
def test_mixed_precision(self):
# TODO JP: Make GPT2 float16 compliant
pass
@slow
def test_model_from_pretrained(self):
for model_name in TF_GPT2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
model = TFGPT2Model.from_pretrained(model_name)
self.assertIsNotNone(model)
@require_tf
class TFGPT2ModelLanguageGenerationTest(unittest.TestCase):
@slow
def test_lm_generate_gpt2(self):
model = TFGPT2LMHeadModel.from_pretrained("gpt2")
input_ids = tf.convert_to_tensor([[464, 3290]], dtype=tf.int32) # The dog
expected_output_ids = [
464,
3290,
373,
1043,
287,
257,
2214,
1474,
262,
16246,
286,
2688,
290,
2688,
27262,
13,
198,
198,
464,
3290,
] # The dog was found in a field near the intersection of West and West Streets.\n\nThe dog
output_ids = model.generate(input_ids, do_sample=False)
self.assertListEqual(output_ids[0].numpy().tolist(), expected_output_ids)
@slow
def test_lm_generate_distilgpt2(self):
model = TFGPT2LMHeadModel.from_pretrained("distilgpt2")
input_ids = tf.convert_to_tensor([[464, 1893]], dtype=tf.int32) # The president
expected_output_ids = [
464,
1893,
286,
262,
1578,
1829,
11,
290,
262,
1893,
286,
262,
1578,
7526,
11,
423,
587,
287,
262,
2635,
] # The president of the United States, and the president of the United Kingdom, have been in the White
output_ids = model.generate(input_ids, do_sample=False)
self.assertListEqual(output_ids[0].numpy().tolist(), expected_output_ids)
| 39.468547 | 117 | 0.669854 |
78db66e4952af787252c688ca5bee69f5dd45ceb | 199 | py | Python | log/lib.py | mitsuboshi20190723/py3Study | 428eeb8a83efb6dbdffc40fd60acb7b765ffda24 | [
"Apache-2.0"
] | null | null | null | log/lib.py | mitsuboshi20190723/py3Study | 428eeb8a83efb6dbdffc40fd60acb7b765ffda24 | [
"Apache-2.0"
] | null | null | null | log/lib.py | mitsuboshi20190723/py3Study | 428eeb8a83efb6dbdffc40fd60acb7b765ffda24 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
##
# 2020.11.22
# lib.py
# ver 0.1
# Kunihito Mitsuboshi
# license(Apache-2.0) at http://www.apache.org/licenses/LICENSE-2.0
##
import sys
| 14.214286 | 69 | 0.603015 |
1f071863131bb2b18dfc5c822785e1dab12555d2 | 19,204 | py | Python | utilipy/decorators/func_io.py | nstarman/utilipy | 17984942145d31126724df23500bafba18fb7516 | [
"BSD-3-Clause"
] | 2 | 2020-11-15T01:48:45.000Z | 2020-12-02T20:44:20.000Z | utilipy/decorators/func_io.py | nstarman/astroPHD | 17984942145d31126724df23500bafba18fb7516 | [
"BSD-3-Clause"
] | 22 | 2020-09-13T17:58:24.000Z | 2022-02-04T19:05:23.000Z | utilipy/decorators/func_io.py | nstarman/utilipy | 17984942145d31126724df23500bafba18fb7516 | [
"BSD-3-Clause"
] | 1 | 2020-04-21T22:41:01.000Z | 2020-04-21T22:41:01.000Z | # -*- coding: utf-8 -*-
"""Function Input and Output Decorators."""
__all__ = [
"store_function_input",
"add_folder_backslash",
"random_generator_from_seed",
"dtypeDecoratorMaker",
]
##############################################################################
# IMPORTS
# BUILT-IN
import typing as T
# THIRD PARTY
import numpy as np
# PROJECT-SPECIFIC
from utilipy.utils import functools, inspect
from utilipy.utils.typing import EllipsisType
##############################################################################
# CODE
##############################################################################
def store_function_input(
function: T.Callable = None,
*,
store_inputs: bool = True,
_doc_style: str = "numpy",
_doc_fmt: T.Dict[str, T.Any] = {},
):
"""Store Function Inputs.
Store the function inputs as a BoundArguments.
Parameters
----------
function : T.Callable or None, optional
the function to be decoratored
if None, then returns decorator to apply.
store_inputs : bool, optional
whether to return all the inputs to the function as a dictionary
Returns
-------
wrapper : T.Callable
Wrapper for `function` that can store function inputs
in a BoundArguments instance.
Includes the original function in a method `.__wrapped__`
Other Parameters
----------------
_doc_style: str or formatter, optional
default 'numpy'
parameter to `utilipy.wraps`
_doc_fmt: dict, optional
default None
parameter to `utilipy.wraps`
"""
if function is None: # allowing for optional arguments
return functools.partial(
store_function_input,
store_inputs=store_inputs,
_doc_style=_doc_style,
_doc_fmt=_doc_fmt,
)
sig = inspect.fuller_signature(function)
_doc_fmt["wrapped_function"] = function.__name__
@functools.wraps(function, _doc_style=_doc_style, _doc_fmt=_doc_fmt)
def wrapper(*args, store_inputs: bool = store_inputs, **kw):
"""Wrapper docstring.
Parameters
----------
store_inputs: bool
whether to store function inputs in a `~inspect.BoundArguments`
instance default {store_inputs}
Returns
-------
inputs: `~inspect.BoundArguments`
the inputs to ``{wrapped_function}``
only returned if `store_inputs` is True
other returned values are in now in a tuple
"""
return_ = function(*args, **kw)
if store_inputs: # make and return BoundArguments
inputs = sig.bind_partial_with_defaults(*args, **kw)
return return_, inputs
else:
return return_
# /def
return wrapper
# /def
# -------------------------------------------------------------------
def add_folder_backslash(
function=None,
*,
arguments: T.List[T.Union[str, int]] = [],
_doc_style="numpy",
_doc_fmt={},
):
"""Add backslashes to str arguments.
For use in ensuring directory file-paths end in '/', when
``os.join`` just won't do.
Parameters
----------
function : T.Callable or None, optional
the function to be decoratored
if None, then returns decorator to apply.
arguments : list of string or int, optional
arguments to which to append '/', if not already present
strings are names of arguments.
Can also be int, which only applies to args.
Returns
-------
wrapper : T.Callable
wrapper for function
does a few things
includes the original function in a method ``.__wrapped__``
Other Parameters
----------------
_doc_style: str or formatter, optional
default 'numpy'
parameter to `~utilipy.wraps`
_doc_fmt: dict, optional
default None
parameter to `~utilipy.wraps`
Examples
--------
For modifying a single argument
>>> @add_folder_backslash(arguments='path')
... def func(path):
... return path
>>> func("~/Documents")
'~/Documents/'
When several arguments need modification.
>>> @add_folder_backslash(arguments=('path1', 'path2'))
... def func(path1, path2):
... return (path1, path2)
>>> func("~/Documents", "~Desktop")
('~/Documents/', '~Desktop/')
"""
if isinstance(arguments, (str, int)): # recast as tuple
arguments = (arguments,)
if function is None: # allowing for optional arguments
return functools.partial(
add_folder_backslash,
arguments=arguments,
_doc_style=_doc_style,
_doc_fmt=_doc_fmt,
)
sig = inspect.signature(function)
@functools.wraps(function, _doc_style=_doc_style, _doc_fmt=_doc_fmt)
def wrapper(*args, **kw):
"""Wrapper docstring.
Parameters
----------
store_inputs: bool
whether to store function inputs in a BoundArguments instance
default {store_inputs}
"""
# bind args & kwargs to function
ba = sig.bind_partial(*args, **kw)
ba.apply_defaults()
for name in arguments: # iter through args
# first check it's a string
if not isinstance(ba.arguments[name], (str, bytes)):
continue
else:
str_type = type(ba.arguments[name]) # get string type
backslash = str_type("/") # so we can work with any type
if isinstance(name, int): # only applies to args
if not ba.args[name].endswith(backslash):
ba.args[name] += backslash
elif isinstance(name, str): # args or kwargs
if not ba.arguments[name].endswith(backslash):
ba.arguments[name] += backslash
else:
raise TypeError("elements of `args` must be int or str")
return function(*ba.args, **ba.kwargs)
# /def
return wrapper
# /def
#####################################################################
def random_generator_from_seed(
function: T.Callable = None,
seed_names: T.Union[str, T.Sequence[str]] = ("random", "random_seed"),
generator: T.Callable = np.random.RandomState,
raise_if_not_int: bool = False,
):
"""Function decorator to convert random seed to random number generator.
Parameters
----------
function : types.FunctionType or None (optional)
the function to be decoratored
if None, then returns decorator to apply.
seed_names : list (optional)
possible parameter names for the random seed
generator : ClassType (optional)
ex :class:`numpy.random.default_rng`, :class:`numpy.random.RandomState`
raise_if_not_int : bool (optional, keyword-only)
raise TypeError if seed argument is not an int.
Returns
-------
wrapper : types.FunctionType
wrapper for function
converts random seeds to random number generators before calling.
includes the original function in a method `.__wrapped__`
Raises
------
TypeError
If `raise_if_not_int` is True and seed argument is not an int.
"""
if isinstance(seed_names, str): # correct a bare string to list
seed_names = (seed_names,)
if function is None: # allowing for optional arguments
return functools.partial(
random_generator_from_seed,
seed_names=seed_names,
generator=generator,
)
sig = inspect.signature(function)
pnames = tuple(sig.parameters.keys())
@functools.wraps(
function,
_doc_fmt={"seed_names": seed_names, "random_generator": generator},
)
def wrapper(*args, **kw):
"""Wrapper docstring, added to Function.
Notes
-----
T.Any argument in {seed_names} will be interpreted as a random seed,
if it is an integer, and will be converted to a random number generator
of type {random_generator}.
"""
ba = sig.bind_partial(*args, **kw)
ba.apply_defaults()
# go through possible parameter names for the random seed
# if it is a parameter and the value is an int, change to RandomState
for name in seed_names: # iterate through possible
if name in pnames: # see if present
if isinstance(ba.arguments[name], int): # seed -> generator
ba.arguments[name] = generator(ba.arguments[name])
elif raise_if_not_int:
raise TypeError(f"{name} must be <int>")
else: # do not replace
pass
# /for
return function(*ba.args, **ba.kwargs)
# /def
return wrapper
# /def
#####################################################################
# DataType Decorators
class dtypeDecorator:
"""Ensure arguments are type *dtype*.
Parameters
----------
func : function, optional
function to decorate
inargs : list
[(index, dtype), ...]
outargs : list
[(index, dtype), ...]
these arguments, except func, should be specified by key word
if inargs is forgotten and func is not a function, then func is
assumed to be inargs.
"""
def __new__(
cls,
func: T.Optional[T.Callable] = None,
in_dtype: T.Any = None,
out_dtype: T.Any = None,
):
"""New dtypeDecorator."""
self = super().__new__(cls) # making instance of self
# correcting if forgot to specify in_dtype and no function
# in this case, *in_dtype* is stored in *func*
# need to do func->None, inarags<-func, and out_dtype<-in_dtype
if not isinstance(func, T.Callable):
# moving arguments 'forward'
out_dtype = in_dtype
in_dtype = func
func = None
# allowing for wrapping with calling the class
if func is not None:
self.__init__(in_dtype, out_dtype)
return self(func)
# else:
return self
# /def
def __init__(
self, in_dtype: T.Any = None, out_dtype: T.Any = None
) -> None:
"""Initialize dtypeDecorator."""
super().__init__()
# in_dtype
# TODO check in_dtype is list of lists
self._in_dtype = in_dtype
# out_dtype
# TODO check out_dtype is list of lists
self._out_dtype = out_dtype
return
# /def
def __call__(self, wrapped_func: T.Callable) -> T.Callable:
"""Make Decorator.
Parameters
----------
wrapped_func: Callable
function to be wrapped
"""
# make wrapper
@functools.wraps(wrapped_func)
def wrapper(*args: T.Any, **kw: T.Any) -> T.Any:
# making arguments self._dtype
if self._in_dtype is None: # no conversion needed
return_ = wrapped_func(*args, **kw)
elif len(args) == 0:
return_ = wrapped_func(**kw)
elif len(args) == 1:
# TODO better
if len(self._in_dtype) != 1 or self._in_dtype[0][0] != 0:
raise IndexError("too many indices")
arg = self._in_dtype[0][1](args[0])
return_ = wrapped_func(arg, **kw)
else:
args = list(args) # allowing modifications
for i, dtype in self._in_dtype:
args[i] = dtype(args[i]) # converting to desired dtype`
return_ = wrapped_func(*args, **kw)
# POST
if self._out_dtype is None: # no conversion needed
pass
else:
if not np.isscalar(return_):
return_ = list(return_) # allowing modifications
for i, dtype in self._out_dtype:
# converting to desired dtype
return_[i] = dtype(return_[i])
else:
if len(self._out_dtype) != 1: # TODO do this check?
raise ValueError("out_dtype has too many indices")
return_ = self._out_dtype[0][1](return_)
return return_
# /def
return wrapper
# /def
# /class
# -------------------------------------------------------------------
# define class
class dtypeDecoratorBase:
"""Ensure arguments are type `dtype`.
Parameters
----------
func : function (optional)
function to decorate
inargs : Ellipsis or iterable or slice or None (optional)
- None (default), does nothing
- Ellipsis: converts all arguments to dtype
- iterable: convert arguments at index specified in iterable
ex: [0, 2] converts arguments 0 & 2
- slice: convert all arguments specified by slicer
outargs : Ellipsis or iterable or slice or None (optional)
- None (default), does nothing
- iterable: convert arguments at index specified in iterable
ex: [0, 2] converts arguments 0 & 2
- slice: convert all arguments specified by slicer
- Ellipsis : convert all arguments
these arguments, except func, should be specified by key word.
if inargs is forgotten and func is not a function, then func is
assumed to be inargs.
Examples
--------
>>> intDecorator = dtypeDecoratorMaker(int)
>>> @intDecorator(inargs=[0, 1], outargs=2)
... def func(x, y, z):
... return x, y, z, (x, y, z)
... # /def
>>> print(func(1.1, 2.2, 3.3))
(1, 2, 3, (1, 2, 3.3))
"""
def __init_subclass__(cls, dtype: T.Any = None):
"""Initialize subclass & store dtype."""
super().__init_subclass__()
cls._dtype = dtype
# /def
@property
def dtype(self):
"""Get dtype. read-only access."""
return self._dtype
# /def
def __new__(
cls,
func: T.Callable = None,
inargs: T.Union[EllipsisType, slice, T.Iterable, None] = None,
outargs: T.Union[EllipsisType, slice, T.Iterable, None] = None,
):
self = super().__new__(cls) # making instance of self
# correcting if forgot to specify inargs and did not provide a func
# in this case, *inargs* is stored in *func*
# need to do func->None, inarags<-func, and outargs<-inargs
if not isinstance(func, T.Callable):
# moving arguments 'forward'
outargs = inargs
inargs = func
func = None
# allowing for wrapping with calling the class
if func is not None:
# need to initialize b/c not returning `self`
self.__init__(inargs, outargs)
return self(func)
else:
return self
# /def
def __init__(
self,
inargs: T.Union[EllipsisType, slice, T.Iterable, None] = None,
outargs: T.Union[EllipsisType, slice, T.Iterable, None] = None,
) -> None:
super().__init__()
# inargs
if inargs is Ellipsis: # convert all
self._inargs = slice(None)
else:
self._inargs = inargs
# TODO validate inputs
# outargs
if outargs is Ellipsis:
self._outargs = slice(None)
elif np.isscalar(outargs):
self._outargs = [outargs]
else:
self._outargs = outargs
# /def
def __call__(self, wrapped_func: T.Callable) -> T.Callable:
"""Wrap function.
Works by making a wrapper which will convert input and
output arguments to the specified data type.
Parameters
----------
wrapped_func : callable
Function to wrap.
"""
sig = inspect.signature(wrapped_func)
@functools.wraps(wrapped_func)
def wrapper(*args: T.Any, **kwargs: T.Any) -> T.Any:
ba = sig.bind_partial(*args, **kwargs)
ba.apply_defaults()
# PRE
# making arguments self._dtype
if self._inargs is None: # no conversion needed
pass
elif isinstance(self._inargs, slice):
# converting inargs to list of indices
lna = len(ba.args)
inkeys = tuple(ba.arguments.keys())[:lna][self._inargs]
inargs = tuple(range(lna))[self._inargs]
# converting to desired dtype
for k, i in zip(inkeys, inargs):
ba.arguments[k] = self._dtype(ba.args[i])
else: # any iterable
lna = len(ba.args)
argkeys = tuple(ba.arguments.keys())
for i in self._inargs:
if isinstance(i, int): # it's for args
ba.arguments[argkeys[i]] = self._dtype(args[i])
else: # isinstance(i, str)
ba.arguments[i] = self._dtype(ba.arguments[i])
# /PRE
return_ = wrapped_func(*ba.args, **ba.kwargs)
# POST
# no conversion needed
if self._outargs is None or not isinstance(return_, tuple):
return return_
# slice
elif isinstance(self._outargs, slice):
iterable = tuple(range(len(return_)))[self._outargs]
else:
iterable = self._outargs
return_ = list(return_)
for i in iterable:
return_[i] = self._dtype(return_[i])
return tuple(return_)
# /POST
# /def
return wrapper
# /def
# /class
# -------------------------------------------------------------------
def dtypeDecoratorMaker(dtype: T.Any):
"""Function to make a dtype decorator.
Parameters
----------
dtype : type
intended data type
Returns
-------
dtypeDecorator : decorator class
a decorator which can convert input and output arguments
to the intended datatype
Examples
--------
>>> intDecorator = dtypeDecoratorMaker(int)
>>> @intDecorator(inargs=[0, 1], outargs=2)
... def func(x, y, z):
... return x, y, z, (x, y, z)
>>> x, y, z, orig = func(1.1, 2.2, 3.3)
>>> print(x, y, z, orig) # z->int before returned
1 2 3 (1, 2, 3.3)
"""
# make subclass
class dtypeDecorator(dtypeDecoratorBase, dtype=dtype):
pass
# /class
dtypeDecorator.__name__ = f"{dtype}Decorator"
return dtypeDecorator
# /def
#############################################################################
# MAKING DECORATORS
intDecorator = dtypeDecoratorMaker(int)
floatDecorator = dtypeDecoratorMaker(float)
strDecorator = dtypeDecoratorMaker(str)
boolDecorator = dtypeDecoratorMaker(bool)
ndarrayDecorator = dtypeDecoratorMaker(np.ndarray)
ndfloat64Decorator = dtypeDecoratorMaker(np.float64)
#############################################################################
# END
| 28.408284 | 79 | 0.550771 |
e3b1ec058b5045fe2a1f61008ffa81a9f654c1e3 | 219 | py | Python | scr/map_utr3_snp/in_utr3/relatejson.py | chunjie-sam-liu/miRNASNP-v3 | 41fab95b496b639674010863895547db0fc108bc | [
"MIT"
] | 1 | 2020-07-02T08:51:37.000Z | 2020-07-02T08:51:37.000Z | scr/map_utr3_snp/in_utr3/relatejson.py | chunjie-sam-liu/miRNASNP-v3 | 41fab95b496b639674010863895547db0fc108bc | [
"MIT"
] | null | null | null | scr/map_utr3_snp/in_utr3/relatejson.py | chunjie-sam-liu/miRNASNP-v3 | 41fab95b496b639674010863895547db0fc108bc | [
"MIT"
] | null | null | null | import json
surj = {}
with open("snp_utr3_relate.json","a") as sur:
with open("snp_utr3_relate") as infile:
for line in infile:
if line:
rs = line.split()[3]
surj[rs] = line.strip()
json.dump(surj,sur)
| 18.25 | 45 | 0.643836 |
9b8233a09577d1abb3edb7c4dad0bd916c43cf88 | 11,961 | py | Python | spyder/plugins/editor/widgets/tests/test_editorsplitter.py | aglotero/spyder | 075d32fa359b728416de36cb0e744715fa5e3943 | [
"MIT"
] | null | null | null | spyder/plugins/editor/widgets/tests/test_editorsplitter.py | aglotero/spyder | 075d32fa359b728416de36cb0e744715fa5e3943 | [
"MIT"
] | null | null | null | spyder/plugins/editor/widgets/tests/test_editorsplitter.py | aglotero/spyder | 075d32fa359b728416de36cb0e744715fa5e3943 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
#
# Copyright © Spyder Project Contributors
# Licensed under the terms of the MIT License
#
"""
Tests for EditorSplitter class in editor.py
"""
# Standard library imports
try:
from unittest.mock import Mock
except ImportError:
from mock import Mock # Python 2
# Third party imports
import pytest
from qtpy.QtCore import Qt
# Local imports
from spyder.plugins.editor.widgets.editor import EditorStack, EditorSplitter
# ---- Qt Test Fixtures
@pytest.fixture
def base_editor_bot(qtbot):
editor_stack = EditorStack(None, [])
editor_stack.set_introspector(Mock())
editor_stack.set_find_widget(Mock())
editor_stack.set_io_actions(Mock(), Mock(), Mock(), Mock())
return editor_stack, qtbot
@pytest.fixture
def editor_splitter_bot(qtbot):
"""Create editor splitter."""
es = editor_splitter = EditorSplitter(None, Mock(), [], first=True)
qtbot.addWidget(es)
es.show()
return es
@pytest.fixture
def editor_splitter_layout_bot(editor_splitter_bot):
"""Create editor splitter for testing layouts."""
es = editor_splitter_bot
# Allow the split() to duplicate editor stacks.
def clone(editorstack):
editorstack.close_action.setEnabled(False)
editorstack.set_introspector(Mock())
editorstack.set_find_widget(Mock())
editorstack.set_io_actions(Mock(), Mock(), Mock(), Mock())
editorstack.new('foo.py', 'utf-8', 'a = 1\nprint(a)\n\nx = 2')
editorstack.new('layout_test.py', 'utf-8', 'print(spam)')
with open(__file__) as f:
text = f.read()
editorstack.new(__file__, 'utf-8', text)
es.plugin.clone_editorstack.side_effect = clone
# Setup editor info for this EditorStack.
clone(es.editorstack)
return es
# ---- Tests
def test_init(editor_splitter_bot):
""""Test __init__."""
es = editor_splitter_bot
assert es.orientation() == Qt.Horizontal
assert es.testAttribute(Qt.WA_DeleteOnClose)
assert not es.childrenCollapsible()
assert not es.toolbar_list
assert not es.menu_list
assert es.register_editorstack_cb == es.plugin.register_editorstack
assert es.unregister_editorstack_cb == es.plugin.unregister_editorstack
# No menu actions in parameter call.
assert not es.menu_actions
# EditorStack adds its own menu actions to the existing actions.
assert es.editorstack.menu_actions != []
assert isinstance(es.editorstack, EditorStack)
es.plugin.register_editorstack.assert_called_with(es.editorstack)
es.plugin.unregister_editorstack.assert_not_called()
es.plugin.clone_editorstack.assert_not_called()
assert es.count() == 1
assert es.widget(0) == es.editorstack
def test_close(qtbot):
"""Test the inteface for closing the editor splitters."""
# Split the main editorspliter once, than split the second editorsplitter
# twice.
es = editor_splitter_bot(qtbot)
es.split()
esw1 = es.widget(1)
esw1.editorstack.set_closable(True)
assert es.count() == 2
assert esw1.count() == 1
esw1.split()
esw1w1 = esw1.widget(1)
esw1w1.editorstack.set_closable(True)
assert es.count() == 2
assert esw1.count() == 2
assert esw1w1.count() == 1
esw1.split()
esw1w2 = esw1.widget(2)
esw1w2.editorstack.set_closable(True)
assert es.count() == 2
assert esw1.count() == 3
assert esw1w1.count() == esw1w2.count() == 1
# Assert that all the editorsplitters are visible.
assert es.isVisible()
assert esw1.isVisible()
assert esw1w1.isVisible()
assert esw1w2.isVisible()
# Close the editorstack of the editorsplitter esw1 and assert that it is
# not destroyed because it still contains the editorsplitters esw1w1 and
# esw1w2.
with qtbot.waitSignal(esw1.editorstack.destroyed, timeout=1000):
esw1.editorstack.close_split()
assert es.count() == 2
assert esw1.count() == 2
assert esw1.editorstack is None
assert es.isVisible()
assert esw1.isVisible()
assert esw1w1.isVisible()
assert esw1w2.isVisible()
# Close the editorstack of the editorsplitter esw1w1, assert it is
# correctly destroyed afterwards on the Qt side and that it is correctly
# removed from the editorsplitter esw1.
with qtbot.waitSignal(esw1w1.destroyed, timeout=1000):
esw1w1.editorstack.close_split()
with pytest.raises(RuntimeError):
esw1w1.count()
assert es.count() == 2
assert esw1.count() == 1
assert es.isVisible()
assert esw1.isVisible()
assert esw1w2.isVisible()
# Close the editorstack of the editorsplitter esw1w2 and assert that
# editorsplitters esw1w2 AND esw1 are correctly destroyed afterward on
# the Qt side.
with qtbot.waitSignal(esw1.destroyed, timeout=1000):
esw1w2.editorstack.close_split()
with pytest.raises(RuntimeError):
esw1.count()
with pytest.raises(RuntimeError):
esw1w2.count()
assert es.isVisible()
assert es.count() == 1
# Test that the editorstack of the main editorsplitter es cannot be closed.
es.editorstack.close_split()
assert es.isVisible()
assert es.count() == 1
def test_split(editor_splitter_layout_bot):
"""Test split() that adds new splitters to this instance."""
es = editor_splitter_layout_bot
# Split main panel with default split.
es.split() # Call directly.
assert es.orientation() == Qt.Vertical
assert not es.editorstack.horsplit_action.isEnabled()
assert es.editorstack.versplit_action.isEnabled()
assert es.count() == 2
assert isinstance(es.widget(1), EditorSplitter)
# Each splitter gets its own editor stack as the first widget.
assert es.widget(1).count() == 1
assert es.widget(1).editorstack == es.widget(1).widget(0)
es.widget(1).plugin.clone_editorstack.assert_called_with(
editorstack=es.widget(1).editorstack)
# Create a horizontal split on original widget.
es.editorstack.sig_split_horizontally.emit() # Call from signal.
assert es.orientation() == Qt.Horizontal
assert es.editorstack.horsplit_action.isEnabled()
assert not es.editorstack.versplit_action.isEnabled()
assert es.count() == 3
assert isinstance(es.widget(2), EditorSplitter)
# Two splits have been created and each contains one EditorStack.
assert es.widget(1).count() == 1
assert es.widget(2).count() == 1
# Test splitting one of the children.
es1 = es.widget(1)
es1.editorstack.sig_split_vertically.emit()
assert es.orientation() == Qt.Horizontal # Main split didn't change.
assert es1.orientation() == Qt.Vertical # Child splitter.
assert not es1.editorstack.horsplit_action.isEnabled()
assert es1.editorstack.versplit_action.isEnabled()
assert es1.count() == 2
assert isinstance(es1.widget(0), EditorStack)
assert isinstance(es1.widget(1), EditorSplitter)
assert not es1.widget(1).isHidden()
def test_iter_editorstacks(editor_splitter_bot):
"""Test iter_editorstacks."""
es = editor_splitter_bot
es_iter = es.iter_editorstacks
# Check base splitter.
assert es_iter() == [(es.editorstack, es.orientation())]
# Split once.
es.split(Qt.Vertical)
esw1 = es.widget(1)
assert es_iter() == [(es.editorstack, es.orientation()),
(esw1.editorstack, esw1.orientation())]
# Second splitter on base isn't iterated.
es.split(Qt.Horizontal)
assert es_iter() == [(es.editorstack, es.orientation()),
(esw1.editorstack, esw1.orientation())]
# Split a child.
esw1.split(Qt.Vertical)
esw1w1 = es.widget(1).widget(1)
assert es_iter() == [(es.editorstack, es.orientation()),
(esw1.editorstack, esw1.orientation()),
(esw1w1.editorstack, esw1w1.orientation())]
def test_get_layout_settings(editor_splitter_bot, qtbot, mocker):
"""Test get_layout_settings()."""
es = editor_splitter_bot
# Initial settings from setup.
setting = es.get_layout_settings()
assert setting['splitsettings'] == [(False, None, [])]
# Add some editors to patch output of iter_editorstacks.
stack1 = base_editor_bot(qtbot)[0]
stack1.new('foo.py', 'utf-8', 'a = 1\nprint(a)\n\nx = 2')
stack1.new('layout_test.py', 'utf-8', 'spam egg\n')
stack2 = base_editor_bot(qtbot)[0]
stack2.new('test.py', 'utf-8', 'test text')
mocker.patch.object(EditorSplitter, "iter_editorstacks")
EditorSplitter.iter_editorstacks.return_value = (
[(stack1, Qt.Vertical), (stack2, Qt.Horizontal)])
setting = es.get_layout_settings()
assert setting['hexstate']
assert setting['sizes'] == es.sizes()
assert setting['splitsettings'] == [(False, 'foo.py', [5, 3]),
(False, 'test.py', [2])]
def test_set_layout_settings_dont_goto(editor_splitter_layout_bot):
"""Test set_layout_settings()."""
es = editor_splitter_layout_bot
linecount = es.editorstack.data[2].editor.get_cursor_line_number()
# New layout to restore.
state = '000000ff000000010000000200000231000001ff00ffffffff010000000200'
sizes = [561, 511]
splitsettings = [(False, 'layout_test.py', [2, 1, 52]),
(False, 'foo.py', [3, 2, 125]),
(False, __file__, [1, 1, 1])]
new_settings = {'hexstate': state,
'sizes': sizes,
'splitsettings': splitsettings}
# Current widget doesn't have saved settings applied.
get_settings = es.get_layout_settings()
assert es.count() == 1
assert get_settings['hexstate'] != state
assert get_settings['splitsettings'] != splitsettings
# Invalid settings value.
assert es.set_layout_settings({'spam': 'test'}) is None
# Restore layout with dont_goto set.
es.set_layout_settings(new_settings, dont_goto=True)
get_settings = es.get_layout_settings()
# Check that the panels were restored.
assert es.count() == 2 # One EditorStack and one EditorSplitter.
assert es.widget(1).count() == 2 # One EditorStack and one EditorSplitter.
assert es.widget(1).widget(1).count() == 1 # One EditorStack.
assert get_settings['hexstate'] == state
# All the lines for each tab and split are at the last line number.
assert get_settings['splitsettings'] == [(False, 'foo.py', [5, 2, linecount]),
(False, 'foo.py', [5, 2, linecount]),
(False, 'foo.py', [5, 2, linecount])]
def test_set_layout_settings_goto(editor_splitter_layout_bot):
"""Test set_layout_settings()."""
es = editor_splitter_layout_bot
# New layout to restore.
state = '000000ff000000010000000200000231000001ff00ffffffff010000000200'
sizes = [561, 511]
splitsettings = [(False, 'layout_test.py', [2, 1, 52]),
(False, 'foo.py', [3, 2, 125]),
(False, __file__, [1, 1, 1])]
new_settings = {'hexstate': state,
'sizes': sizes,
'splitsettings': splitsettings}
# Restore layout without dont_goto, meaning it should position to the lines.
es.set_layout_settings(new_settings, dont_goto=None)
get_settings = es.get_layout_settings()
# Even though the original splitsettings had different file names
# selected, the current tab isn't restored in set_layout_settings().
# However, this shows that the current line was positioned for each tab
# and each split.
assert get_settings['splitsettings'] == [(False, 'foo.py', [2, 1, 52]),
(False, 'foo.py', [3, 2, 125]),
(False, 'foo.py', [1, 1, 1])]
if __name__ == "__main__":
import os.path as osp
pytest.main(['-x', osp.basename(__file__), '-v', '-rw'])
| 35.076246 | 82 | 0.662654 |
5c0ddb6875cbfa6b875216b03aa72a14aee03a0f | 2,626 | py | Python | xlsxwriter/test/comparison/test_chart_up_down_bars02.py | sontek/XlsxWriter | 7f17a52f95be9ecfb9c7f213fc0a02e0f625c6ec | [
"BSD-2-Clause-FreeBSD"
] | 1 | 2015-05-19T22:17:15.000Z | 2015-05-19T22:17:15.000Z | xlsxwriter/test/comparison/test_chart_up_down_bars02.py | sontek/XlsxWriter | 7f17a52f95be9ecfb9c7f213fc0a02e0f625c6ec | [
"BSD-2-Clause-FreeBSD"
] | null | null | null | xlsxwriter/test/comparison/test_chart_up_down_bars02.py | sontek/XlsxWriter | 7f17a52f95be9ecfb9c7f213fc0a02e0f625c6ec | [
"BSD-2-Clause-FreeBSD"
] | null | null | null | ###############################################################################
#
# Tests for XlsxWriter.
#
# Copyright (c), 2013-2014, John McNamara, jmcnamara@cpan.org
#
import unittest
import os
from ...workbook import Workbook
from ..helperfunctions import _compare_xlsx_files
class TestCompareXLSXFiles(unittest.TestCase):
"""
Test file created by XlsxWriter against a file created by Excel.
"""
def setUp(self):
self.maxDiff = None
filename = 'chart_up_down_bars02.xlsx'
test_dir = 'xlsxwriter/test/comparison/'
self.got_filename = test_dir + '_test_' + filename
self.exp_filename = test_dir + 'xlsx_files/' + filename
self.ignore_files = []
self.ignore_elements = {}
def test_create_file(self):
"""Test the creation of an XlsxWriter file with up-down bars."""
filename = self.got_filename
####################################################
workbook = Workbook(filename)
worksheet = workbook.add_worksheet()
chart = workbook.add_chart({'type': 'line'})
chart.axis_ids = [49019136, 49222016]
data = [
[1, 2, 3, 4, 5],
[2, 4, 6, 8, 10],
[3, 6, 5, 12, 15],
]
worksheet.write_column('A1', data[0])
worksheet.write_column('B1', data[1])
worksheet.write_column('C1', data[2])
chart.set_up_down_bars({
'up': {
'fill': {'color': 'red'},
'line': {'color': 'yellow'}
},
'down': {
'fill': {'color': '#00B050'},
'border': {
'color': '#00B0F0',
'dash_type': 'square_dot'
},
},
})
chart.add_series({
'categories': '=Sheet1!$A$1:$A$5',
'values': '=Sheet1!$B$1:$B$5',
})
chart.add_series({
'categories': '=Sheet1!$A$1:$A$5',
'values': '=Sheet1!$C$1:$C$5',
})
worksheet.insert_chart('E9', chart)
workbook.close()
####################################################
got, exp = _compare_xlsx_files(self.got_filename,
self.exp_filename,
self.ignore_files,
self.ignore_elements)
self.assertEqual(got, exp)
def tearDown(self):
# Cleanup.
if os.path.exists(self.got_filename):
os.remove(self.got_filename)
if __name__ == '__main__':
unittest.main()
| 26.26 | 79 | 0.470678 |
9b8a3461ee253265b4850032aa5e82df995a5550 | 4,906 | py | Python | openapi_server/models/landing_page.py | eugenegesdisc/gmuedr | e8b3e5c7b8d18421d875f0f6f778a37a6d8ec3fd | [
"MIT"
] | null | null | null | openapi_server/models/landing_page.py | eugenegesdisc/gmuedr | e8b3e5c7b8d18421d875f0f6f778a37a6d8ec3fd | [
"MIT"
] | null | null | null | openapi_server/models/landing_page.py | eugenegesdisc/gmuedr | e8b3e5c7b8d18421d875f0f6f778a37a6d8ec3fd | [
"MIT"
] | null | null | null | # coding: utf-8
from datetime import date, datetime
from typing import List, Dict, Type
from openapi_server.models.base_model_ import Model
from openapi_server.models.landing_page_contact import LandingPageContact
from openapi_server.models.landing_page_provider import LandingPageProvider
from openapi_server.models.link import Link
from openapi_server import util
class LandingPage(Model):
"""NOTE: This class is auto generated by OpenAPI Generator (https://openapi-generator.tech).
Do not edit the class manually.
"""
def __init__(self, title: str=None, description: str=None, links: List[Link]=None, keywords: List[str]=None, provider: LandingPageProvider=None, contact: LandingPageContact=None):
"""LandingPage - a model defined in OpenAPI
:param title: The title of this LandingPage.
:param description: The description of this LandingPage.
:param links: The links of this LandingPage.
:param keywords: The keywords of this LandingPage.
:param provider: The provider of this LandingPage.
:param contact: The contact of this LandingPage.
"""
self.openapi_types = {
'title': str,
'description': str,
'links': List[Link],
'keywords': List[str],
'provider': LandingPageProvider,
'contact': LandingPageContact
}
self.attribute_map = {
'title': 'title',
'description': 'description',
'links': 'links',
'keywords': 'keywords',
'provider': 'provider',
'contact': 'contact'
}
self._title = title
self._description = description
self._links = links
self._keywords = keywords
self._provider = provider
self._contact = contact
@classmethod
def from_dict(cls, dikt: dict) -> 'LandingPage':
"""Returns the dict as a model
:param dikt: A dict.
:return: The landingPage of this LandingPage.
"""
return util.deserialize_model(dikt, cls)
@property
def title(self):
"""Gets the title of this LandingPage.
:return: The title of this LandingPage.
:rtype: str
"""
return self._title
@title.setter
def title(self, title):
"""Sets the title of this LandingPage.
:param title: The title of this LandingPage.
:type title: str
"""
self._title = title
@property
def description(self):
"""Gets the description of this LandingPage.
:return: The description of this LandingPage.
:rtype: str
"""
return self._description
@description.setter
def description(self, description):
"""Sets the description of this LandingPage.
:param description: The description of this LandingPage.
:type description: str
"""
self._description = description
@property
def links(self):
"""Gets the links of this LandingPage.
:return: The links of this LandingPage.
:rtype: List[Link]
"""
return self._links
@links.setter
def links(self, links):
"""Sets the links of this LandingPage.
:param links: The links of this LandingPage.
:type links: List[Link]
"""
if links is None:
raise ValueError("Invalid value for `links`, must not be `None`")
self._links = links
@property
def keywords(self):
"""Gets the keywords of this LandingPage.
:return: The keywords of this LandingPage.
:rtype: List[str]
"""
return self._keywords
@keywords.setter
def keywords(self, keywords):
"""Sets the keywords of this LandingPage.
:param keywords: The keywords of this LandingPage.
:type keywords: List[str]
"""
self._keywords = keywords
@property
def provider(self):
"""Gets the provider of this LandingPage.
:return: The provider of this LandingPage.
:rtype: LandingPageProvider
"""
return self._provider
@provider.setter
def provider(self, provider):
"""Sets the provider of this LandingPage.
:param provider: The provider of this LandingPage.
:type provider: LandingPageProvider
"""
self._provider = provider
@property
def contact(self):
"""Gets the contact of this LandingPage.
:return: The contact of this LandingPage.
:rtype: LandingPageContact
"""
return self._contact
@contact.setter
def contact(self, contact):
"""Sets the contact of this LandingPage.
:param contact: The contact of this LandingPage.
:type contact: LandingPageContact
"""
self._contact = contact
| 25.685864 | 183 | 0.611904 |
5236c0bf8d143eb6aa58129c02ce01c3c716c358 | 7,782 | py | Python | models/densenet121_3d_dilate_front.py | JSharpClone/M3D-RPN- | 5192b095e921b5c054a66fd0ce948e67aee957be | [
"Apache-2.0"
] | null | null | null | models/densenet121_3d_dilate_front.py | JSharpClone/M3D-RPN- | 5192b095e921b5c054a66fd0ce948e67aee957be | [
"Apache-2.0"
] | null | null | null | models/densenet121_3d_dilate_front.py | JSharpClone/M3D-RPN- | 5192b095e921b5c054a66fd0ce948e67aee957be | [
"Apache-2.0"
] | null | null | null | import torch.nn as nn
from torchvision import models
from lib.rpn_util import *
from flownet2pytorch.models import FlowNet2C
import torch
class Args:
def __init__(self):
self.rgb_max = 255.0
self.fp16 = False
def dilate_layer(layer, val):
layer.dilation = val
layer.padding = val
class RPN(nn.Module):
def __init__(self, phase, base, conf):
super(RPN, self).__init__()
self.flow = conf.flow
self.base = base
del self.base.transition3.pool
# dilate
dilate_layer(self.base.denseblock4.denselayer1.conv2, 2)
dilate_layer(self.base.denseblock4.denselayer2.conv2, 2)
dilate_layer(self.base.denseblock4.denselayer3.conv2, 2)
dilate_layer(self.base.denseblock4.denselayer4.conv2, 2)
dilate_layer(self.base.denseblock4.denselayer5.conv2, 2)
dilate_layer(self.base.denseblock4.denselayer6.conv2, 2)
dilate_layer(self.base.denseblock4.denselayer7.conv2, 2)
dilate_layer(self.base.denseblock4.denselayer8.conv2, 2)
dilate_layer(self.base.denseblock4.denselayer9.conv2, 2)
dilate_layer(self.base.denseblock4.denselayer10.conv2, 2)
dilate_layer(self.base.denseblock4.denselayer11.conv2, 2)
dilate_layer(self.base.denseblock4.denselayer12.conv2, 2)
dilate_layer(self.base.denseblock4.denselayer13.conv2, 2)
dilate_layer(self.base.denseblock4.denselayer14.conv2, 2)
dilate_layer(self.base.denseblock4.denselayer15.conv2, 2)
dilate_layer(self.base.denseblock4.denselayer16.conv2, 2)
# settings
self.phase = phase
self.num_classes = len(conf['lbls']) + 1
self.num_anchors = conf['anchors'].shape[0]
if self.flow:
self.prop_feats = nn.Sequential(
nn.Conv2d(self.base[-1].num_features+2, 512, 3, padding=1),
nn.ReLU(inplace=True),
)
else:
self.prop_feats = nn.Sequential(
nn.Conv2d(self.base[-1].num_features, 512, 3, padding=1),
nn.ReLU(inplace=True),
)
# outputs
self.cls = nn.Conv2d(self.prop_feats[0].out_channels, self.num_classes * self.num_anchors, 1)
# bbox 2d
self.bbox_x = nn.Conv2d(self.prop_feats[0].out_channels, self.num_anchors, 1)
self.bbox_y = nn.Conv2d(self.prop_feats[0].out_channels, self.num_anchors, 1)
self.bbox_w = nn.Conv2d(self.prop_feats[0].out_channels, self.num_anchors, 1)
self.bbox_h = nn.Conv2d(self.prop_feats[0].out_channels, self.num_anchors, 1)
# bbox 3d
self.bbox_x3d = nn.Conv2d(self.prop_feats[0].out_channels, self.num_anchors, 1)
self.bbox_y3d = nn.Conv2d(self.prop_feats[0].out_channels, self.num_anchors, 1)
self.bbox_z3d = nn.Conv2d(self.prop_feats[0].out_channels, self.num_anchors, 1)
self.bbox_w3d = nn.Conv2d(self.prop_feats[0].out_channels, self.num_anchors, 1)
self.bbox_h3d = nn.Conv2d(self.prop_feats[0].out_channels, self.num_anchors, 1)
self.bbox_l3d = nn.Conv2d(self.prop_feats[0].out_channels, self.num_anchors, 1)
self.bbox_rY3d = nn.Conv2d(self.prop_feats[0].out_channels, self.num_anchors, 1)
self.bbox_front_x3d = nn.Conv2d(self.prop_feats[0].out_channels, self.num_anchors, 1)
self.bbox_front_y3d = nn.Conv2d(self.prop_feats[0].out_channels, self.num_anchors, 1)
self.bbox_front_z3d = nn.Conv2d(self.prop_feats[0].out_channels, self.num_anchors, 1)
self.softmax = nn.Softmax(dim=1)
self.feat_stride = conf.feat_stride
self.feat_size = calc_output_size(np.array(conf.crop_size), self.feat_stride)
self.rois = locate_anchors(conf.anchors, self.feat_size, conf.feat_stride, convert_tensor=True)
self.rois = self.rois.type(torch.cuda.FloatTensor)
self.anchors = conf.anchors
def extract_feature(self, x):
x = self.base(x)
return x
def forward(self, x):
if self.flow:
flows = x[1]
x = x[0]
batch_size = x.size(0)
x = self.extract_feature(x)
if self.flow:
x = torch.cat([x, flows], dim=1)
prop_feats = self.prop_feats(x)
cls = self.cls(prop_feats)
# bbox 2d
bbox_x = self.bbox_x(prop_feats)
bbox_y = self.bbox_y(prop_feats)
bbox_w = self.bbox_w(prop_feats)
bbox_h = self.bbox_h(prop_feats)
# bbox 3d
bbox_x3d = self.bbox_x3d(prop_feats)
bbox_y3d = self.bbox_y3d(prop_feats)
bbox_z3d = self.bbox_z3d(prop_feats)
bbox_w3d = self.bbox_w3d(prop_feats)
bbox_h3d = self.bbox_h3d(prop_feats)
bbox_l3d = self.bbox_l3d(prop_feats)
bbox_rY3d = self.bbox_rY3d(prop_feats)
bbox_front_x3d = self.bbox_front_x3d(prop_feats)
bbox_front_y3d = self.bbox_front_y3d(prop_feats)
bbox_front_z3d = self.bbox_front_z3d(prop_feats)
feat_h = cls.size(2)
feat_w = cls.size(3)
# reshape for cross entropy
cls = cls.view(batch_size, self.num_classes, feat_h * self.num_anchors, feat_w)
# score probabilities
prob = self.softmax(cls)
# reshape for consistency
bbox_x = flatten_tensor(bbox_x.view(batch_size, 1, feat_h * self.num_anchors, feat_w))
bbox_y = flatten_tensor(bbox_y.view(batch_size, 1, feat_h * self.num_anchors, feat_w))
bbox_w = flatten_tensor(bbox_w.view(batch_size, 1, feat_h * self.num_anchors, feat_w))
bbox_h = flatten_tensor(bbox_h.view(batch_size, 1, feat_h * self.num_anchors, feat_w))
bbox_x3d = flatten_tensor(bbox_x3d.view(batch_size, 1, feat_h * self.num_anchors, feat_w))
bbox_y3d = flatten_tensor(bbox_y3d.view(batch_size, 1, feat_h * self.num_anchors, feat_w))
bbox_z3d = flatten_tensor(bbox_z3d.view(batch_size, 1, feat_h * self.num_anchors, feat_w))
bbox_w3d = flatten_tensor(bbox_w3d.view(batch_size, 1, feat_h * self.num_anchors, feat_w))
bbox_h3d = flatten_tensor(bbox_h3d.view(batch_size, 1, feat_h * self.num_anchors, feat_w))
bbox_l3d = flatten_tensor(bbox_l3d.view(batch_size, 1, feat_h * self.num_anchors, feat_w))
bbox_rY3d = flatten_tensor(bbox_rY3d.view(batch_size, 1, feat_h * self.num_anchors, feat_w))
bbox_front_x3d = flatten_tensor(bbox_front_x3d.view(batch_size, 1, feat_h * self.num_anchors, feat_w))
bbox_front_y3d = flatten_tensor(bbox_front_y3d.view(batch_size, 1, feat_h * self.num_anchors, feat_w))
bbox_front_z3d = flatten_tensor(bbox_front_z3d.view(batch_size, 1, feat_h * self.num_anchors, feat_w))
# bundle
bbox_2d = torch.cat((bbox_x, bbox_y, bbox_w, bbox_h), dim=2)
bbox_3d = torch.cat((bbox_x3d, bbox_y3d, bbox_z3d, bbox_w3d, bbox_h3d, bbox_l3d, bbox_rY3d, bbox_front_x3d,
bbox_front_y3d, bbox_front_z3d), dim=2)
feat_size = [feat_h, feat_w]
cls = flatten_tensor(cls)
prob = flatten_tensor(prob)
if self.training:
return cls, prob, bbox_2d, bbox_3d, feat_size
else:
if self.feat_size[0] != feat_h or self.feat_size[1] != feat_w:
self.feat_size = [feat_h, feat_w]
self.rois = locate_anchors(self.anchors, self.feat_size, self.feat_stride, convert_tensor=True)
self.rois = self.rois.type(torch.cuda.FloatTensor)
return cls, prob, bbox_2d, bbox_3d, feat_size, self.rois
def build(conf, phase='train'):
train = phase.lower() == 'train'
densenet121 = models.densenet121(pretrained=train)
rpn_net = RPN(phase, densenet121.features, conf)
if train: rpn_net.train()
else: rpn_net.eval()
return rpn_net
| 40.53125 | 117 | 0.662298 |
eba9f312dcc1755bb038bc6db71763297e2454e3 | 1,627 | py | Python | main.py | Sein-Jang/SRGAN | f6dd431f70857a95034b915c472f1e79210c537b | [
"MIT"
] | 1 | 2020-11-26T19:11:54.000Z | 2020-11-26T19:11:54.000Z | main.py | Sein-Jang/SRGAN | f6dd431f70857a95034b915c472f1e79210c537b | [
"MIT"
] | null | null | null | main.py | Sein-Jang/SRGAN | f6dd431f70857a95034b915c472f1e79210c537b | [
"MIT"
] | null | null | null | import glob
import os
from conf import *
from data import *
from model import generator, discriminator
from train import GeneratorTrainer, SrganTrainer
BASE_DIR = os.path.dirname(os.path.abspath(__file__))
DATASET = os.path.join(BASE_DIR, 'dataset')
"""
DATA
"""
train_hr_img_path = glob.glob(os.path.join(DATASET, TRAIN['HR_img_path']))
train_lr_img_path = glob.glob(os.path.join(DATASET, TRAIN['LR_img_path']))
valid_hr_img_path = glob.glob(os.path.join(DATASET, VALID['HR_img_path']))
valid_lr_img_path = glob.glob(os.path.join(DATASET, VALID['LR_img_path']))
train_data_loader = make_dataset(train_lr_img_path, train_hr_img_path)
train_dataset = train_data_loader.dataset(TRAIN['batch_size'], random_transform=True, repeat_count=None)
valid_data_loader = make_dataset(valid_lr_img_path, valid_hr_img_path)
valid_dataset = train_data_loader.dataset(VALID['batch_size'], random_transform=False, repeat_count=1)
"""
TRAINS
"""
# training context for the generator
pre_trainer = GeneratorTrainer(model=generator(), checkpoint_dir='./ckpt/pre', learning_rate=TRAIN['lr'])
pre_trainer.train(train_dataset, valid_dataset.take(10), steps=TRAIN['n_epoch_init'], eval_every=1000)
pre_trainer.model.save_weights('weights/generator.h5')
# init with pre-trained weights
gan_generator = generator()
gan_generator.load_weights('weights/generator.h5')
gan_trainer = SrganTrainer(generator=gan_generator, discriminator=discriminator())
gan_trainer.train(train_dataset, steps=TRAIN['n_epoch'])
gan_trainer.generator.save_weights('weights/gan_generator.h5')
gan_trainer.discriminator.save_weights('weights/gan_discriminator.h5') | 34.617021 | 105 | 0.803319 |
3e238b9022095ce6d9f9922cd5c9ff722a108eff | 1,485 | py | Python | homework-10-su21-falkishi-main/kmeans.py | falkishi/Python-HWs | 04504c21a7fc5dc4b9fe7820549d9cdf98c7aa91 | [
"Apache-2.0"
] | null | null | null | homework-10-su21-falkishi-main/kmeans.py | falkishi/Python-HWs | 04504c21a7fc5dc4b9fe7820549d9cdf98c7aa91 | [
"Apache-2.0"
] | null | null | null | homework-10-su21-falkishi-main/kmeans.py | falkishi/Python-HWs | 04504c21a7fc5dc4b9fe7820549d9cdf98c7aa91 | [
"Apache-2.0"
] | null | null | null | from cluster import *
from point import *
def kmeans(pointdata, clusterdata) :
#Fill in
#1. Make list of points using makePointList and pointdata
points = makePointList(pointdata);
#2. Make list of clusters using createClusters and clusterdata
clusters = createClusters(clusterdata);
#3. As long as points keep moving:
#A. Move every point to its closest cluster (use Point.closest and
# Point.moveToCluster)
# Hint: keep track here whether any point changed clusters by
# seeing if any moveToCluster call returns "True"
#B. Update the centers for each cluster (use Cluster.updateCenter)
tracker = True;
while(tracker):
output = []
for k in range(len(points)):
near = points[k].closest(clusters);
output.append(points[k].moveToCluster(near));
for l in clusters:
l.updateCenter();
if not any(output):
tracker = False;
#4. Return the list of clusters, with the centers in their final positions
return (clusters);
if __name__ == '__main__' :
data = np.array([[0.5, 2.5], [0.3, 4.5], [-0.5, 3], [0, 1.2], [10, -5], [11, -4.5], [8, -3]], dtype=float)
centers = np.array([[0, 0], [1, 1]], dtype=float)
clusters = kmeans(data, centers)
for c in clusters :
c.printAllPoints()
| 29.7 | 110 | 0.56835 |
8457aa0514b807b533e827e5b42203d18255a164 | 2,456 | py | Python | lib/rucio/tests/test_temporary_did.py | zzaiin/rucio-1 | 374e51a371a9c6ae8f7dcc7e2a90e5665bc3a65f | [
"Apache-2.0"
] | null | null | null | lib/rucio/tests/test_temporary_did.py | zzaiin/rucio-1 | 374e51a371a9c6ae8f7dcc7e2a90e5665bc3a65f | [
"Apache-2.0"
] | null | null | null | lib/rucio/tests/test_temporary_did.py | zzaiin/rucio-1 | 374e51a371a9c6ae8f7dcc7e2a90e5665bc3a65f | [
"Apache-2.0"
] | null | null | null | # Copyright 2016-2018 CERN for the benefit of the ATLAS collaboration.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Authors:
# - Vincent Garonne <vgaronne@gmail.com>, 2016-2018
# - Joaquin Bogado <jbogado@linti.unlp.edu.ar>, 2018
# - Andrew Lister, <andrew.lister@stfc.ac.uk>, 2019
from nose.tools import assert_equal
from rucio.common.utils import generate_uuid
from rucio.core.temporary_did import (add_temporary_dids, compose, delete_temporary_dids,
list_expired_temporary_dids)
from rucio.core.rse import get_rse_id
from rucio.client.didclient import DIDClient
def test_core_temporary_dids():
""" TMP DATA IDENTIFIERS (CORE): """
temporary_dids = []
rse = 'MOCK'
rse_id = get_rse_id(rse=rse)
for _ in range(10):
temporary_dids.append({'scope': 'mock',
'name': 'object_%s' % generate_uuid(),
'rse_id': rse_id,
'bytes': 1,
'path': None})
add_temporary_dids(dids=temporary_dids, account='root')
compose(scope='mock', name='file_%s' % generate_uuid(), rse_id=rse_id,
bytes=10, sources=temporary_dids, account='root',
md5=None, adler32=None, pfn=None, meta={}, rules=[],
parent_scope=None, parent_name=None)
dids = list_expired_temporary_dids(rse_id=rse_id, limit=10)
rowcount = delete_temporary_dids(dids=dids)
assert_equal(rowcount, 10)
def test_client_temporary_dids():
""" TMP DATA IDENTIFIERS (CLIENT): """
client = DIDClient()
temporary_dids = []
for _ in range(10):
temporary_dids.append({'scope': 'mock',
'name': 'object_%s' % generate_uuid(),
'rse': 'MOCK',
'bytes': 1,
'path': None})
client.add_temporary_dids(dids=temporary_dids)
| 35.594203 | 89 | 0.629479 |
fd8a0f8da309b202b6ff6b6e93be349520d8d16f | 1,901 | py | Python | util.py | xuqy1981/ppgn | 083d99f123c727363eb69033355880e3822540c5 | [
"MIT"
] | 1 | 2021-04-08T03:47:47.000Z | 2021-04-08T03:47:47.000Z | util.py | CRyan2016/ppgn | 083d99f123c727363eb69033355880e3822540c5 | [
"MIT"
] | null | null | null | util.py | CRyan2016/ppgn | 083d99f123c727363eb69033355880e3822540c5 | [
"MIT"
] | 1 | 2017-01-10T08:00:43.000Z | 2017-01-10T08:00:43.000Z | import numpy as np
import scipy.misc
import subprocess
def normalize(img, out_range=(0.,1.), in_range=None):
if not in_range:
min_val = np.min(img)
max_val = np.max(img)
else:
min_val = in_range[0]
max_val = in_range[1]
result = np.copy(img)
result[result > max_val] = max_val
result[result < min_val] = min_val
result = (result - min_val) / (max_val - min_val) * (out_range[1] - out_range[0]) + out_range[0]
return result
def deprocess(images, out_range=(0.,1.), in_range=None):
num = images.shape[0]
c = images.shape[1]
ih = images.shape[2]
iw = images.shape[3]
result = np.zeros((ih, iw, 3))
# Normalize before saving
result[:] = images[0].copy().transpose((1,2,0))
result = normalize(result, out_range, in_range)
return result
def get_image_size(data_shape):
'''
Return (227, 227) from (1, 3, 227, 227) tensor.
'''
if len(data_shape) == 4:
return data_shape[2:]
else:
raise Exception("Data shape invalid.")
def save_image(img, name):
'''
Normalize and save the image.
'''
img = img[:,::-1, :, :] # Convert from BGR to RGB
output_img = deprocess(img, in_range=(-120,120))
scipy.misc.imsave(name, output_img)
def write_label_to_img(filename, label):
# Add a label below each image via ImageMagick
subprocess.call(["convert %s -gravity south -splice 0x10 %s" % (filename, filename)], shell=True)
subprocess.call(["convert %s -append -gravity Center -pointsize %s label:\"%s\" -border 0x0 -append %s" %
(filename, 30, label, filename)], shell=True)
def convert_words_into_numbers(vocab_file, words):
# Load vocabularty
f = open(vocab_file, 'r')
lines = f.read().splitlines()
numbers = [ lines.index(w) + 1 for w in words ]
numbers.append( 0 ) # <unk>
return numbers
| 29.246154 | 109 | 0.620726 |
2fe33fdd34dbbaafcb7754a8974b8cc0b01ea64a | 22,469 | py | Python | deribit_api_python/openapi_client/models/trades_volumes.py | jDally987/deribit-simple-gui | 60c91f8e11b541b0e59cbd23625639a9b9f0dd43 | [
"MIT"
] | 5 | 2019-06-06T04:48:34.000Z | 2019-10-14T00:31:21.000Z | deribit_api_python/openapi_client/models/trades_volumes.py | jDally987/deribit-simple-gui | 60c91f8e11b541b0e59cbd23625639a9b9f0dd43 | [
"MIT"
] | 1 | 2019-10-15T08:55:21.000Z | 2019-10-15T08:55:21.000Z | deribit_api_python/openapi_client/models/trades_volumes.py | jDally987/deribit-simple-gui | 60c91f8e11b541b0e59cbd23625639a9b9f0dd43 | [
"MIT"
] | 4 | 2019-07-27T16:50:14.000Z | 2019-11-13T21:03:50.000Z | # coding: utf-8
"""
Deribit API
#Overview Deribit provides three different interfaces to access the API: * [JSON-RPC over Websocket](#json-rpc) * [JSON-RPC over HTTP](#json-rpc) * [FIX](#fix-api) (Financial Information eXchange) With the API Console you can use and test the JSON-RPC API, both via HTTP and via Websocket. To visit the API console, go to __Account > API tab > API Console tab.__ ##Naming Deribit tradeable assets or instruments use the following system of naming: |Kind|Examples|Template|Comments| |----|--------|--------|--------| |Future|<code>BTC-25MAR16</code>, <code>BTC-5AUG16</code>|<code>BTC-DMMMYY</code>|<code>BTC</code> is currency, <code>DMMMYY</code> is expiration date, <code>D</code> stands for day of month (1 or 2 digits), <code>MMM</code> - month (3 first letters in English), <code>YY</code> stands for year.| |Perpetual|<code>BTC-PERPETUAL</code> ||Perpetual contract for currency <code>BTC</code>.| |Option|<code>BTC-25MAR16-420-C</code>, <code>BTC-5AUG16-580-P</code>|<code>BTC-DMMMYY-STRIKE-K</code>|<code>STRIKE</code> is option strike price in USD. Template <code>K</code> is option kind: <code>C</code> for call options or <code>P</code> for put options.| # JSON-RPC JSON-RPC is a light-weight remote procedure call (RPC) protocol. The [JSON-RPC specification](https://www.jsonrpc.org/specification) defines the data structures that are used for the messages that are exchanged between client and server, as well as the rules around their processing. JSON-RPC uses JSON (RFC 4627) as data format. JSON-RPC is transport agnostic: it does not specify which transport mechanism must be used. The Deribit API supports both Websocket (preferred) and HTTP (with limitations: subscriptions are not supported over HTTP). ## Request messages > An example of a request message: ```json { \"jsonrpc\": \"2.0\", \"id\": 8066, \"method\": \"public/ticker\", \"params\": { \"instrument\": \"BTC-24AUG18-6500-P\" } } ``` According to the JSON-RPC sepcification the requests must be JSON objects with the following fields. |Name|Type|Description| |----|----|-----------| |jsonrpc|string|The version of the JSON-RPC spec: \"2.0\"| |id|integer or string|An identifier of the request. If it is included, then the response will contain the same identifier| |method|string|The method to be invoked| |params|object|The parameters values for the method. The field names must match with the expected parameter names. The parameters that are expected are described in the documentation for the methods, below.| <aside class=\"warning\"> The JSON-RPC specification describes two features that are currently not supported by the API: <ul> <li>Specification of parameter values by position</li> <li>Batch requests</li> </ul> </aside> ## Response messages > An example of a response message: ```json { \"jsonrpc\": \"2.0\", \"id\": 5239, \"testnet\": false, \"result\": [ { \"currency\": \"BTC\", \"currencyLong\": \"Bitcoin\", \"minConfirmation\": 2, \"txFee\": 0.0006, \"isActive\": true, \"coinType\": \"BITCOIN\", \"baseAddress\": null } ], \"usIn\": 1535043730126248, \"usOut\": 1535043730126250, \"usDiff\": 2 } ``` The JSON-RPC API always responds with a JSON object with the following fields. |Name|Type|Description| |----|----|-----------| |id|integer|This is the same id that was sent in the request.| |result|any|If successful, the result of the API call. The format for the result is described with each method.| |error|error object|Only present if there was an error invoking the method. The error object is described below.| |testnet|boolean|Indicates whether the API in use is actually the test API. <code>false</code> for production server, <code>true</code> for test server.| |usIn|integer|The timestamp when the requests was received (microseconds since the Unix epoch)| |usOut|integer|The timestamp when the response was sent (microseconds since the Unix epoch)| |usDiff|integer|The number of microseconds that was spent handling the request| <aside class=\"notice\"> The fields <code>testnet</code>, <code>usIn</code>, <code>usOut</code> and <code>usDiff</code> are not part of the JSON-RPC standard. <p>In order not to clutter the examples they will generally be omitted from the example code.</p> </aside> > An example of a response with an error: ```json { \"jsonrpc\": \"2.0\", \"id\": 8163, \"error\": { \"code\": 11050, \"message\": \"bad_request\" }, \"testnet\": false, \"usIn\": 1535037392434763, \"usOut\": 1535037392448119, \"usDiff\": 13356 } ``` In case of an error the response message will contain the error field, with as value an object with the following with the following fields: |Name|Type|Description |----|----|-----------| |code|integer|A number that indicates the kind of error.| |message|string|A short description that indicates the kind of error.| |data|any|Additional data about the error. This field may be omitted.| ## Notifications > An example of a notification: ```json { \"jsonrpc\": \"2.0\", \"method\": \"subscription\", \"params\": { \"channel\": \"deribit_price_index.btc_usd\", \"data\": { \"timestamp\": 1535098298227, \"price\": 6521.17, \"index_name\": \"btc_usd\" } } } ``` API users can subscribe to certain types of notifications. This means that they will receive JSON-RPC notification-messages from the server when certain events occur, such as changes to the index price or changes to the order book for a certain instrument. The API methods [public/subscribe](#public-subscribe) and [private/subscribe](#private-subscribe) are used to set up a subscription. Since HTTP does not support the sending of messages from server to client, these methods are only availble when using the Websocket transport mechanism. At the moment of subscription a \"channel\" must be specified. The channel determines the type of events that will be received. See [Subscriptions](#subscriptions) for more details about the channels. In accordance with the JSON-RPC specification, the format of a notification is that of a request message without an <code>id</code> field. The value of the <code>method</code> field will always be <code>\"subscription\"</code>. The <code>params</code> field will always be an object with 2 members: <code>channel</code> and <code>data</code>. The value of the <code>channel</code> member is the name of the channel (a string). The value of the <code>data</code> member is an object that contains data that is specific for the channel. ## Authentication > An example of a JSON request with token: ```json { \"id\": 5647, \"method\": \"private/get_subaccounts\", \"params\": { \"access_token\": \"67SVutDoVZSzkUStHSuk51WntMNBJ5mh5DYZhwzpiqDF\" } } ``` The API consists of `public` and `private` methods. The public methods do not require authentication. The private methods use OAuth 2.0 authentication. This means that a valid OAuth access token must be included in the request, which can get achived by calling method [public/auth](#public-auth). When the token was assigned to the user, it should be passed along, with other request parameters, back to the server: |Connection type|Access token placement |----|-----------| |**Websocket**|Inside request JSON parameters, as an `access_token` field| |**HTTP (REST)**|Header `Authorization: bearer ```Token``` ` value| ### Additional authorization method - basic user credentials <span style=\"color:red\"><b> ! Not recommended - however, it could be useful for quick testing API</b></span></br> Every `private` method could be accessed by providing, inside HTTP `Authorization: Basic XXX` header, values with user `ClientId` and assigned `ClientSecret` (both values can be found on the API page on the Deribit website) encoded with `Base64`: <code>Authorization: Basic BASE64(`ClientId` + `:` + `ClientSecret`)</code> ### Additional authorization method - Deribit signature credentials The Derbit service provides dedicated authorization method, which harness user generated signature to increase security level for passing request data. Generated value is passed inside `Authorization` header, coded as: <code>Authorization: deri-hmac-sha256 id=```ClientId```,ts=```Timestamp```,sig=```Signature```,nonce=```Nonce```</code> where: |Deribit credential|Description |----|-----------| |*ClientId*|Can be found on the API page on the Deribit website| |*Timestamp*|Time when the request was generated - given as **miliseconds**. It's valid for **60 seconds** since generation, after that time any request with an old timestamp will be rejected.| |*Signature*|Value for signature calculated as described below | |*Nonce*|Single usage, user generated initialization vector for the server token| The signature is generated by the following formula: <code> Signature = HEX_STRING( HMAC-SHA256( ClientSecret, StringToSign ) );</code></br> <code> StringToSign = Timestamp + \"\\n\" + Nonce + \"\\n\" + RequestData;</code></br> <code> RequestData = UPPERCASE(HTTP_METHOD()) + \"\\n\" + URI() + \"\\n\" + RequestBody + \"\\n\";</code></br> e.g. (using shell with ```openssl``` tool): <code> ClientId=AAAAAAAAAAA</code></br> <code> ClientSecret=ABCD</code></br> <code> Timestamp=$( date +%s000 )</code></br> <code> Nonce=$( cat /dev/urandom | tr -dc 'a-z0-9' | head -c8 )</code></br> <code> URI=\"/api/v2/private/get_account_summary?currency=BTC\"</code></br> <code> HttpMethod=GET</code></br> <code> Body=\"\"</code></br></br> <code> Signature=$( echo -ne \"${Timestamp}\\n${Nonce}\\n${HttpMethod}\\n${URI}\\n${Body}\\n\" | openssl sha256 -r -hmac \"$ClientSecret\" | cut -f1 -d' ' )</code></br></br> <code> echo $Signature</code></br></br> <code> shell output> ea40d5e5e4fae235ab22b61da98121fbf4acdc06db03d632e23c66bcccb90d2c (**WARNING**: Exact value depends on current timestamp and client credentials</code></br></br> <code> curl -s -X ${HttpMethod} -H \"Authorization: deri-hmac-sha256 id=${ClientId},ts=${Timestamp},nonce=${Nonce},sig=${Signature}\" \"https://www.deribit.com${URI}\"</code></br></br> ### Additional authorization method - signature credentials (WebSocket API) When connecting through Websocket, user can request for authorization using ```client_credential``` method, which requires providing following parameters (as a part of JSON request): |JSON parameter|Description |----|-----------| |*grant_type*|Must be **client_signature**| |*client_id*|Can be found on the API page on the Deribit website| |*timestamp*|Time when the request was generated - given as **miliseconds**. It's valid for **60 seconds** since generation, after that time any request with an old timestamp will be rejected.| |*signature*|Value for signature calculated as described below | |*nonce*|Single usage, user generated initialization vector for the server token| |*data*|**Optional** field, which contains any user specific value| The signature is generated by the following formula: <code> StringToSign = Timestamp + \"\\n\" + Nonce + \"\\n\" + Data;</code></br> <code> Signature = HEX_STRING( HMAC-SHA256( ClientSecret, StringToSign ) );</code></br> e.g. (using shell with ```openssl``` tool): <code> ClientId=AAAAAAAAAAA</code></br> <code> ClientSecret=ABCD</code></br> <code> Timestamp=$( date +%s000 ) # e.g. 1554883365000 </code></br> <code> Nonce=$( cat /dev/urandom | tr -dc 'a-z0-9' | head -c8 ) # e.g. fdbmmz79 </code></br> <code> Data=\"\"</code></br></br> <code> Signature=$( echo -ne \"${Timestamp}\\n${Nonce}\\n${Data}\\n\" | openssl sha256 -r -hmac \"$ClientSecret\" | cut -f1 -d' ' )</code></br></br> <code> echo $Signature</code></br></br> <code> shell output> e20c9cd5639d41f8bbc88f4d699c4baf94a4f0ee320e9a116b72743c449eb994 (**WARNING**: Exact value depends on current timestamp and client credentials</code></br></br> You can also check the signature value using some online tools like, e.g: [https://codebeautify.org/hmac-generator](https://codebeautify.org/hmac-generator) (but don't forget about adding *newline* after each part of the hashed text and remember that you **should use** it only with your **test credentials**). Here's a sample JSON request created using the values from the example above: <code> { </br> \"jsonrpc\" : \"2.0\", </br> \"id\" : 9929, </br> \"method\" : \"public/auth\", </br> \"params\" : </br> { </br> \"grant_type\" : \"client_signature\", </br> \"client_id\" : \"AAAAAAAAAAA\", </br> \"timestamp\": \"1554883365000\", </br> \"nonce\": \"fdbmmz79\", </br> \"data\": \"\", </br> \"signature\" : \"e20c9cd5639d41f8bbc88f4d699c4baf94a4f0ee320e9a116b72743c449eb994\" </br> } </br> } </br> </code> ### Access scope When asking for `access token` user can provide the required access level (called `scope`) which defines what type of functionality he/she wants to use, and whether requests are only going to check for some data or also to update them. Scopes are required and checked for `private` methods, so if you plan to use only `public` information you can stay with values assigned by default. |Scope|Description |----|-----------| |*account:read*|Access to **account** methods - read only data| |*account:read_write*|Access to **account** methods - allows to manage account settings, add subaccounts, etc.| |*trade:read*|Access to **trade** methods - read only data| |*trade:read_write*|Access to **trade** methods - required to create and modify orders| |*wallet:read*|Access to **wallet** methods - read only data| |*wallet:read_write*|Access to **wallet** methods - allows to withdraw, generate new deposit address, etc.| |*wallet:none*, *account:none*, *trade:none*|Blocked access to specified functionality| <span style=\"color:red\">**NOTICE:**</span> Depending on choosing an authentication method (```grant type```) some scopes could be narrowed by the server. e.g. when ```grant_type = client_credentials``` and ```scope = wallet:read_write``` it's modified by the server as ```scope = wallet:read```\" ## JSON-RPC over websocket Websocket is the prefered transport mechanism for the JSON-RPC API, because it is faster and because it can support [subscriptions](#subscriptions) and [cancel on disconnect](#private-enable_cancel_on_disconnect). The code examples that can be found next to each of the methods show how websockets can be used from Python or Javascript/node.js. ## JSON-RPC over HTTP Besides websockets it is also possible to use the API via HTTP. The code examples for 'shell' show how this can be done using curl. Note that subscriptions and cancel on disconnect are not supported via HTTP. #Methods # noqa: E501
OpenAPI spec version: 2.0.0
Generated by: https://openapi-generator.tech
"""
import pprint
import re # noqa: F401
import six
class TradesVolumes(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'calls_volume': 'float',
'puts_volume': 'float',
'currency_pair': 'str',
'futures_volume': 'float'
}
attribute_map = {
'calls_volume': 'calls_volume',
'puts_volume': 'puts_volume',
'currency_pair': 'currency_pair',
'futures_volume': 'futures_volume'
}
def __init__(self, calls_volume=None, puts_volume=None, currency_pair=None, futures_volume=None): # noqa: E501
"""TradesVolumes - a model defined in OpenAPI""" # noqa: E501
self._calls_volume = None
self._puts_volume = None
self._currency_pair = None
self._futures_volume = None
self.discriminator = None
self.calls_volume = calls_volume
self.puts_volume = puts_volume
self.currency_pair = currency_pair
self.futures_volume = futures_volume
@property
def calls_volume(self):
"""Gets the calls_volume of this TradesVolumes. # noqa: E501
Total 24h trade volume for call options. This is expressed in the base currency, e.g. BTC for `btc_usd` # noqa: E501
:return: The calls_volume of this TradesVolumes. # noqa: E501
:rtype: float
"""
return self._calls_volume
@calls_volume.setter
def calls_volume(self, calls_volume):
"""Sets the calls_volume of this TradesVolumes.
Total 24h trade volume for call options. This is expressed in the base currency, e.g. BTC for `btc_usd` # noqa: E501
:param calls_volume: The calls_volume of this TradesVolumes. # noqa: E501
:type: float
"""
if calls_volume is None:
raise ValueError("Invalid value for `calls_volume`, must not be `None`") # noqa: E501
self._calls_volume = calls_volume
@property
def puts_volume(self):
"""Gets the puts_volume of this TradesVolumes. # noqa: E501
Total 24h trade volume for put options. This is expressed in the base currency, e.g. BTC for `btc_usd` # noqa: E501
:return: The puts_volume of this TradesVolumes. # noqa: E501
:rtype: float
"""
return self._puts_volume
@puts_volume.setter
def puts_volume(self, puts_volume):
"""Sets the puts_volume of this TradesVolumes.
Total 24h trade volume for put options. This is expressed in the base currency, e.g. BTC for `btc_usd` # noqa: E501
:param puts_volume: The puts_volume of this TradesVolumes. # noqa: E501
:type: float
"""
if puts_volume is None:
raise ValueError("Invalid value for `puts_volume`, must not be `None`") # noqa: E501
self._puts_volume = puts_volume
@property
def currency_pair(self):
"""Gets the currency_pair of this TradesVolumes. # noqa: E501
Currency pair: `\"btc_usd\"` or `\"eth_usd\"` # noqa: E501
:return: The currency_pair of this TradesVolumes. # noqa: E501
:rtype: str
"""
return self._currency_pair
@currency_pair.setter
def currency_pair(self, currency_pair):
"""Sets the currency_pair of this TradesVolumes.
Currency pair: `\"btc_usd\"` or `\"eth_usd\"` # noqa: E501
:param currency_pair: The currency_pair of this TradesVolumes. # noqa: E501
:type: str
"""
if currency_pair is None:
raise ValueError("Invalid value for `currency_pair`, must not be `None`") # noqa: E501
allowed_values = ["btc_usd", "eth_usd"] # noqa: E501
if currency_pair not in allowed_values:
raise ValueError(
"Invalid value for `currency_pair` ({0}), must be one of {1}" # noqa: E501
.format(currency_pair, allowed_values)
)
self._currency_pair = currency_pair
@property
def futures_volume(self):
"""Gets the futures_volume of this TradesVolumes. # noqa: E501
Total 24h trade volume for futures. This is expressed in the base currency, e.g. BTC for `btc_usd` # noqa: E501
:return: The futures_volume of this TradesVolumes. # noqa: E501
:rtype: float
"""
return self._futures_volume
@futures_volume.setter
def futures_volume(self, futures_volume):
"""Sets the futures_volume of this TradesVolumes.
Total 24h trade volume for futures. This is expressed in the base currency, e.g. BTC for `btc_usd` # noqa: E501
:param futures_volume: The futures_volume of this TradesVolumes. # noqa: E501
:type: float
"""
if futures_volume is None:
raise ValueError("Invalid value for `futures_volume`, must not be `None`") # noqa: E501
self._futures_volume = futures_volume
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, TradesVolumes):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| 107.507177 | 15,697 | 0.66505 |
32a15e620103759eb08160ad4d3b2632765dae31 | 100,730 | py | Python | pandas/tests/io/test_sql.py | gabriellm1/pandas | 020040b3b92516b445ddd8daba3b9818340e82d4 | [
"BSD-3-Clause"
] | 1 | 2020-08-18T16:49:16.000Z | 2020-08-18T16:49:16.000Z | pandas/tests/io/test_sql.py | gabriellm1/pandas | 020040b3b92516b445ddd8daba3b9818340e82d4 | [
"BSD-3-Clause"
] | null | null | null | pandas/tests/io/test_sql.py | gabriellm1/pandas | 020040b3b92516b445ddd8daba3b9818340e82d4 | [
"BSD-3-Clause"
] | 2 | 2021-07-17T19:28:31.000Z | 2021-11-28T17:14:58.000Z | """SQL io tests
The SQL tests are broken down in different classes:
- `PandasSQLTest`: base class with common methods for all test classes
- Tests for the public API (only tests with sqlite3)
- `_TestSQLApi` base class
- `TestSQLApi`: test the public API with sqlalchemy engine
- `TestSQLiteFallbackApi`: test the public API with a sqlite DBAPI
connection
- Tests for the different SQL flavors (flavor specific type conversions)
- Tests for the sqlalchemy mode: `_TestSQLAlchemy` is the base class with
common methods, `_TestSQLAlchemyConn` tests the API with a SQLAlchemy
Connection object. The different tested flavors (sqlite3, MySQL,
PostgreSQL) derive from the base class
- Tests for the fallback mode (`TestSQLiteFallback`)
"""
import csv
from datetime import date, datetime, time
from io import StringIO
import sqlite3
import warnings
import numpy as np
import pytest
from pandas.core.dtypes.common import is_datetime64_dtype, is_datetime64tz_dtype
import pandas as pd
from pandas import (
DataFrame,
Index,
MultiIndex,
Series,
Timestamp,
concat,
date_range,
isna,
to_datetime,
to_timedelta,
)
import pandas._testing as tm
import pandas.io.sql as sql
from pandas.io.sql import read_sql_query, read_sql_table
try:
import sqlalchemy
from sqlalchemy.ext import declarative
from sqlalchemy.orm import session as sa_session
import sqlalchemy.schema
import sqlalchemy.sql.sqltypes as sqltypes
SQLALCHEMY_INSTALLED = True
except ImportError:
SQLALCHEMY_INSTALLED = False
SQL_STRINGS = {
"create_iris": {
"sqlite": """CREATE TABLE iris (
"SepalLength" REAL,
"SepalWidth" REAL,
"PetalLength" REAL,
"PetalWidth" REAL,
"Name" TEXT
)""",
"mysql": """CREATE TABLE iris (
`SepalLength` DOUBLE,
`SepalWidth` DOUBLE,
`PetalLength` DOUBLE,
`PetalWidth` DOUBLE,
`Name` VARCHAR(200)
)""",
"postgresql": """CREATE TABLE iris (
"SepalLength" DOUBLE PRECISION,
"SepalWidth" DOUBLE PRECISION,
"PetalLength" DOUBLE PRECISION,
"PetalWidth" DOUBLE PRECISION,
"Name" VARCHAR(200)
)""",
},
"insert_iris": {
"sqlite": """INSERT INTO iris VALUES(?, ?, ?, ?, ?)""",
"mysql": """INSERT INTO iris VALUES(%s, %s, %s, %s, "%s");""",
"postgresql": """INSERT INTO iris VALUES(%s, %s, %s, %s, %s);""",
},
"create_test_types": {
"sqlite": """CREATE TABLE types_test_data (
"TextCol" TEXT,
"DateCol" TEXT,
"IntDateCol" INTEGER,
"IntDateOnlyCol" INTEGER,
"FloatCol" REAL,
"IntCol" INTEGER,
"BoolCol" INTEGER,
"IntColWithNull" INTEGER,
"BoolColWithNull" INTEGER
)""",
"mysql": """CREATE TABLE types_test_data (
`TextCol` TEXT,
`DateCol` DATETIME,
`IntDateCol` INTEGER,
`IntDateOnlyCol` INTEGER,
`FloatCol` DOUBLE,
`IntCol` INTEGER,
`BoolCol` BOOLEAN,
`IntColWithNull` INTEGER,
`BoolColWithNull` BOOLEAN
)""",
"postgresql": """CREATE TABLE types_test_data (
"TextCol" TEXT,
"DateCol" TIMESTAMP,
"DateColWithTz" TIMESTAMP WITH TIME ZONE,
"IntDateCol" INTEGER,
"IntDateOnlyCol" INTEGER,
"FloatCol" DOUBLE PRECISION,
"IntCol" INTEGER,
"BoolCol" BOOLEAN,
"IntColWithNull" INTEGER,
"BoolColWithNull" BOOLEAN
)""",
},
"insert_test_types": {
"sqlite": {
"query": """
INSERT INTO types_test_data
VALUES(?, ?, ?, ?, ?, ?, ?, ?, ?)
""",
"fields": (
"TextCol",
"DateCol",
"IntDateCol",
"IntDateOnlyCol",
"FloatCol",
"IntCol",
"BoolCol",
"IntColWithNull",
"BoolColWithNull",
),
},
"mysql": {
"query": """
INSERT INTO types_test_data
VALUES("%s", %s, %s, %s, %s, %s, %s, %s, %s)
""",
"fields": (
"TextCol",
"DateCol",
"IntDateCol",
"IntDateOnlyCol",
"FloatCol",
"IntCol",
"BoolCol",
"IntColWithNull",
"BoolColWithNull",
),
},
"postgresql": {
"query": """
INSERT INTO types_test_data
VALUES(%s, %s, %s, %s, %s, %s, %s, %s, %s, %s)
""",
"fields": (
"TextCol",
"DateCol",
"DateColWithTz",
"IntDateCol",
"IntDateOnlyCol",
"FloatCol",
"IntCol",
"BoolCol",
"IntColWithNull",
"BoolColWithNull",
),
},
},
"read_parameters": {
"sqlite": "SELECT * FROM iris WHERE Name=? AND SepalLength=?",
"mysql": 'SELECT * FROM iris WHERE `Name`="%s" AND `SepalLength`=%s',
"postgresql": 'SELECT * FROM iris WHERE "Name"=%s AND "SepalLength"=%s',
},
"read_named_parameters": {
"sqlite": """
SELECT * FROM iris WHERE Name=:name AND SepalLength=:length
""",
"mysql": """
SELECT * FROM iris WHERE
`Name`="%(name)s" AND `SepalLength`=%(length)s
""",
"postgresql": """
SELECT * FROM iris WHERE
"Name"=%(name)s AND "SepalLength"=%(length)s
""",
},
"read_no_parameters_with_percent": {
"sqlite": "SELECT * FROM iris WHERE Name LIKE '%'",
"mysql": "SELECT * FROM iris WHERE `Name` LIKE '%'",
"postgresql": "SELECT * FROM iris WHERE \"Name\" LIKE '%'",
},
"create_view": {
"sqlite": """
CREATE VIEW iris_view AS
SELECT * FROM iris
"""
},
}
class MixInBase:
def teardown_method(self, method):
# if setup fails, there may not be a connection to close.
if hasattr(self, "conn"):
for tbl in self._get_all_tables():
self.drop_table(tbl)
self._close_conn()
class MySQLMixIn(MixInBase):
def drop_table(self, table_name):
cur = self.conn.cursor()
cur.execute(f"DROP TABLE IF EXISTS {sql._get_valid_mysql_name(table_name)}")
self.conn.commit()
def _get_all_tables(self):
cur = self.conn.cursor()
cur.execute("SHOW TABLES")
return [table[0] for table in cur.fetchall()]
def _close_conn(self):
from pymysql.err import Error
try:
self.conn.close()
except Error:
pass
class SQLiteMixIn(MixInBase):
def drop_table(self, table_name):
self.conn.execute(
f"DROP TABLE IF EXISTS {sql._get_valid_sqlite_name(table_name)}"
)
self.conn.commit()
def _get_all_tables(self):
c = self.conn.execute("SELECT name FROM sqlite_master WHERE type='table'")
return [table[0] for table in c.fetchall()]
def _close_conn(self):
self.conn.close()
class SQLAlchemyMixIn(MixInBase):
def drop_table(self, table_name):
sql.SQLDatabase(self.conn).drop_table(table_name)
def _get_all_tables(self):
meta = sqlalchemy.schema.MetaData(bind=self.conn)
meta.reflect()
table_list = meta.tables.keys()
return table_list
def _close_conn(self):
# https://docs.sqlalchemy.org/en/13/core/connections.html#engine-disposal
self.conn.dispose()
class PandasSQLTest:
"""
Base class with common private methods for SQLAlchemy and fallback cases.
"""
def _get_exec(self):
if hasattr(self.conn, "execute"):
return self.conn
else:
return self.conn.cursor()
@pytest.fixture(params=[("io", "data", "csv", "iris.csv")])
def load_iris_data(self, datapath, request):
iris_csv_file = datapath(*request.param)
if not hasattr(self, "conn"):
self.setup_connect()
self.drop_table("iris")
self._get_exec().execute(SQL_STRINGS["create_iris"][self.flavor])
with open(iris_csv_file, mode="r", newline=None) as iris_csv:
r = csv.reader(iris_csv)
next(r) # skip header row
ins = SQL_STRINGS["insert_iris"][self.flavor]
for row in r:
self._get_exec().execute(ins, row)
def _load_iris_view(self):
self.drop_table("iris_view")
self._get_exec().execute(SQL_STRINGS["create_view"][self.flavor])
def _check_iris_loaded_frame(self, iris_frame):
pytype = iris_frame.dtypes[0].type
row = iris_frame.iloc[0]
assert issubclass(pytype, np.floating)
tm.equalContents(row.values, [5.1, 3.5, 1.4, 0.2, "Iris-setosa"])
def _load_test1_data(self):
columns = ["index", "A", "B", "C", "D"]
data = [
(
"2000-01-03 00:00:00",
0.980268513777,
3.68573087906,
-0.364216805298,
-1.15973806169,
),
(
"2000-01-04 00:00:00",
1.04791624281,
-0.0412318367011,
-0.16181208307,
0.212549316967,
),
(
"2000-01-05 00:00:00",
0.498580885705,
0.731167677815,
-0.537677223318,
1.34627041952,
),
(
"2000-01-06 00:00:00",
1.12020151869,
1.56762092543,
0.00364077397681,
0.67525259227,
),
]
self.test_frame1 = DataFrame(data, columns=columns)
def _load_test2_data(self):
df = DataFrame(
dict(
A=[4, 1, 3, 6],
B=["asd", "gsq", "ylt", "jkl"],
C=[1.1, 3.1, 6.9, 5.3],
D=[False, True, True, False],
E=["1990-11-22", "1991-10-26", "1993-11-26", "1995-12-12"],
)
)
df["E"] = to_datetime(df["E"])
self.test_frame2 = df
def _load_test3_data(self):
columns = ["index", "A", "B"]
data = [
("2000-01-03 00:00:00", 2 ** 31 - 1, -1.987670),
("2000-01-04 00:00:00", -29, -0.0412318367011),
("2000-01-05 00:00:00", 20000, 0.731167677815),
("2000-01-06 00:00:00", -290867, 1.56762092543),
]
self.test_frame3 = DataFrame(data, columns=columns)
def _load_raw_sql(self):
self.drop_table("types_test_data")
self._get_exec().execute(SQL_STRINGS["create_test_types"][self.flavor])
ins = SQL_STRINGS["insert_test_types"][self.flavor]
data = [
{
"TextCol": "first",
"DateCol": "2000-01-03 00:00:00",
"DateColWithTz": "2000-01-01 00:00:00-08:00",
"IntDateCol": 535852800,
"IntDateOnlyCol": 20101010,
"FloatCol": 10.10,
"IntCol": 1,
"BoolCol": False,
"IntColWithNull": 1,
"BoolColWithNull": False,
},
{
"TextCol": "first",
"DateCol": "2000-01-04 00:00:00",
"DateColWithTz": "2000-06-01 00:00:00-07:00",
"IntDateCol": 1356998400,
"IntDateOnlyCol": 20101212,
"FloatCol": 10.10,
"IntCol": 1,
"BoolCol": False,
"IntColWithNull": None,
"BoolColWithNull": None,
},
]
for d in data:
self._get_exec().execute(
ins["query"], [d[field] for field in ins["fields"]]
)
def _count_rows(self, table_name):
result = (
self._get_exec()
.execute(f"SELECT count(*) AS count_1 FROM {table_name}")
.fetchone()
)
return result[0]
def _read_sql_iris(self):
iris_frame = self.pandasSQL.read_query("SELECT * FROM iris")
self._check_iris_loaded_frame(iris_frame)
def _read_sql_iris_parameter(self):
query = SQL_STRINGS["read_parameters"][self.flavor]
params = ["Iris-setosa", 5.1]
iris_frame = self.pandasSQL.read_query(query, params=params)
self._check_iris_loaded_frame(iris_frame)
def _read_sql_iris_named_parameter(self):
query = SQL_STRINGS["read_named_parameters"][self.flavor]
params = {"name": "Iris-setosa", "length": 5.1}
iris_frame = self.pandasSQL.read_query(query, params=params)
self._check_iris_loaded_frame(iris_frame)
def _read_sql_iris_no_parameter_with_percent(self):
query = SQL_STRINGS["read_no_parameters_with_percent"][self.flavor]
iris_frame = self.pandasSQL.read_query(query, params=None)
self._check_iris_loaded_frame(iris_frame)
def _to_sql(self, method=None):
self.drop_table("test_frame1")
self.pandasSQL.to_sql(self.test_frame1, "test_frame1", method=method)
assert self.pandasSQL.has_table("test_frame1")
num_entries = len(self.test_frame1)
num_rows = self._count_rows("test_frame1")
assert num_rows == num_entries
# Nuke table
self.drop_table("test_frame1")
def _to_sql_empty(self):
self.drop_table("test_frame1")
self.pandasSQL.to_sql(self.test_frame1.iloc[:0], "test_frame1")
def _to_sql_fail(self):
self.drop_table("test_frame1")
self.pandasSQL.to_sql(self.test_frame1, "test_frame1", if_exists="fail")
assert self.pandasSQL.has_table("test_frame1")
msg = "Table 'test_frame1' already exists"
with pytest.raises(ValueError, match=msg):
self.pandasSQL.to_sql(self.test_frame1, "test_frame1", if_exists="fail")
self.drop_table("test_frame1")
def _to_sql_replace(self):
self.drop_table("test_frame1")
self.pandasSQL.to_sql(self.test_frame1, "test_frame1", if_exists="fail")
# Add to table again
self.pandasSQL.to_sql(self.test_frame1, "test_frame1", if_exists="replace")
assert self.pandasSQL.has_table("test_frame1")
num_entries = len(self.test_frame1)
num_rows = self._count_rows("test_frame1")
assert num_rows == num_entries
self.drop_table("test_frame1")
def _to_sql_append(self):
# Nuke table just in case
self.drop_table("test_frame1")
self.pandasSQL.to_sql(self.test_frame1, "test_frame1", if_exists="fail")
# Add to table again
self.pandasSQL.to_sql(self.test_frame1, "test_frame1", if_exists="append")
assert self.pandasSQL.has_table("test_frame1")
num_entries = 2 * len(self.test_frame1)
num_rows = self._count_rows("test_frame1")
assert num_rows == num_entries
self.drop_table("test_frame1")
def _to_sql_method_callable(self):
check = [] # used to double check function below is really being used
def sample(pd_table, conn, keys, data_iter):
check.append(1)
data = [dict(zip(keys, row)) for row in data_iter]
conn.execute(pd_table.table.insert(), data)
self.drop_table("test_frame1")
self.pandasSQL.to_sql(self.test_frame1, "test_frame1", method=sample)
assert self.pandasSQL.has_table("test_frame1")
assert check == [1]
num_entries = len(self.test_frame1)
num_rows = self._count_rows("test_frame1")
assert num_rows == num_entries
# Nuke table
self.drop_table("test_frame1")
def _roundtrip(self):
self.drop_table("test_frame_roundtrip")
self.pandasSQL.to_sql(self.test_frame1, "test_frame_roundtrip")
result = self.pandasSQL.read_query("SELECT * FROM test_frame_roundtrip")
result.set_index("level_0", inplace=True)
# result.index.astype(int)
result.index.name = None
tm.assert_frame_equal(result, self.test_frame1)
def _execute_sql(self):
# drop_sql = "DROP TABLE IF EXISTS test" # should already be done
iris_results = self.pandasSQL.execute("SELECT * FROM iris")
row = iris_results.fetchone()
tm.equalContents(row, [5.1, 3.5, 1.4, 0.2, "Iris-setosa"])
def _to_sql_save_index(self):
df = DataFrame.from_records(
[(1, 2.1, "line1"), (2, 1.5, "line2")], columns=["A", "B", "C"], index=["A"]
)
self.pandasSQL.to_sql(df, "test_to_sql_saves_index")
ix_cols = self._get_index_columns("test_to_sql_saves_index")
assert ix_cols == [["A"]]
def _transaction_test(self):
with self.pandasSQL.run_transaction() as trans:
trans.execute("CREATE TABLE test_trans (A INT, B TEXT)")
class DummyException(Exception):
pass
# Make sure when transaction is rolled back, no rows get inserted
ins_sql = "INSERT INTO test_trans (A,B) VALUES (1, 'blah')"
try:
with self.pandasSQL.run_transaction() as trans:
trans.execute(ins_sql)
raise DummyException("error")
except DummyException:
# ignore raised exception
pass
res = self.pandasSQL.read_query("SELECT * FROM test_trans")
assert len(res) == 0
# Make sure when transaction is committed, rows do get inserted
with self.pandasSQL.run_transaction() as trans:
trans.execute(ins_sql)
res2 = self.pandasSQL.read_query("SELECT * FROM test_trans")
assert len(res2) == 1
# -----------------------------------------------------------------------------
# -- Testing the public API
class _TestSQLApi(PandasSQLTest):
"""
Base class to test the public API.
From this two classes are derived to run these tests for both the
sqlalchemy mode (`TestSQLApi`) and the fallback mode
(`TestSQLiteFallbackApi`). These tests are run with sqlite3. Specific
tests for the different sql flavours are included in `_TestSQLAlchemy`.
Notes:
flavor can always be passed even in SQLAlchemy mode,
should be correctly ignored.
we don't use drop_table because that isn't part of the public api
"""
flavor = "sqlite"
mode: str
def setup_connect(self):
self.conn = self.connect()
@pytest.fixture(autouse=True)
def setup_method(self, load_iris_data):
self.load_test_data_and_sql()
def load_test_data_and_sql(self):
self._load_iris_view()
self._load_test1_data()
self._load_test2_data()
self._load_test3_data()
self._load_raw_sql()
def test_read_sql_iris(self):
iris_frame = sql.read_sql_query("SELECT * FROM iris", self.conn)
self._check_iris_loaded_frame(iris_frame)
def test_read_sql_view(self):
iris_frame = sql.read_sql_query("SELECT * FROM iris_view", self.conn)
self._check_iris_loaded_frame(iris_frame)
def test_to_sql(self):
sql.to_sql(self.test_frame1, "test_frame1", self.conn)
assert sql.has_table("test_frame1", self.conn)
def test_to_sql_fail(self):
sql.to_sql(self.test_frame1, "test_frame2", self.conn, if_exists="fail")
assert sql.has_table("test_frame2", self.conn)
msg = "Table 'test_frame2' already exists"
with pytest.raises(ValueError, match=msg):
sql.to_sql(self.test_frame1, "test_frame2", self.conn, if_exists="fail")
def test_to_sql_replace(self):
sql.to_sql(self.test_frame1, "test_frame3", self.conn, if_exists="fail")
# Add to table again
sql.to_sql(self.test_frame1, "test_frame3", self.conn, if_exists="replace")
assert sql.has_table("test_frame3", self.conn)
num_entries = len(self.test_frame1)
num_rows = self._count_rows("test_frame3")
assert num_rows == num_entries
def test_to_sql_append(self):
sql.to_sql(self.test_frame1, "test_frame4", self.conn, if_exists="fail")
# Add to table again
sql.to_sql(self.test_frame1, "test_frame4", self.conn, if_exists="append")
assert sql.has_table("test_frame4", self.conn)
num_entries = 2 * len(self.test_frame1)
num_rows = self._count_rows("test_frame4")
assert num_rows == num_entries
def test_to_sql_type_mapping(self):
sql.to_sql(self.test_frame3, "test_frame5", self.conn, index=False)
result = sql.read_sql("SELECT * FROM test_frame5", self.conn)
tm.assert_frame_equal(self.test_frame3, result)
def test_to_sql_series(self):
s = Series(np.arange(5, dtype="int64"), name="series")
sql.to_sql(s, "test_series", self.conn, index=False)
s2 = sql.read_sql_query("SELECT * FROM test_series", self.conn)
tm.assert_frame_equal(s.to_frame(), s2)
def test_roundtrip(self):
sql.to_sql(self.test_frame1, "test_frame_roundtrip", con=self.conn)
result = sql.read_sql_query("SELECT * FROM test_frame_roundtrip", con=self.conn)
# HACK!
result.index = self.test_frame1.index
result.set_index("level_0", inplace=True)
result.index.astype(int)
result.index.name = None
tm.assert_frame_equal(result, self.test_frame1)
def test_roundtrip_chunksize(self):
sql.to_sql(
self.test_frame1,
"test_frame_roundtrip",
con=self.conn,
index=False,
chunksize=2,
)
result = sql.read_sql_query("SELECT * FROM test_frame_roundtrip", con=self.conn)
tm.assert_frame_equal(result, self.test_frame1)
def test_execute_sql(self):
# drop_sql = "DROP TABLE IF EXISTS test" # should already be done
iris_results = sql.execute("SELECT * FROM iris", con=self.conn)
row = iris_results.fetchone()
tm.equalContents(row, [5.1, 3.5, 1.4, 0.2, "Iris-setosa"])
def test_date_parsing(self):
# Test date parsing in read_sql
# No Parsing
df = sql.read_sql_query("SELECT * FROM types_test_data", self.conn)
assert not issubclass(df.DateCol.dtype.type, np.datetime64)
df = sql.read_sql_query(
"SELECT * FROM types_test_data", self.conn, parse_dates=["DateCol"]
)
assert issubclass(df.DateCol.dtype.type, np.datetime64)
assert df.DateCol.tolist() == [
pd.Timestamp(2000, 1, 3, 0, 0, 0),
pd.Timestamp(2000, 1, 4, 0, 0, 0),
]
df = sql.read_sql_query(
"SELECT * FROM types_test_data",
self.conn,
parse_dates={"DateCol": "%Y-%m-%d %H:%M:%S"},
)
assert issubclass(df.DateCol.dtype.type, np.datetime64)
assert df.DateCol.tolist() == [
pd.Timestamp(2000, 1, 3, 0, 0, 0),
pd.Timestamp(2000, 1, 4, 0, 0, 0),
]
df = sql.read_sql_query(
"SELECT * FROM types_test_data", self.conn, parse_dates=["IntDateCol"]
)
assert issubclass(df.IntDateCol.dtype.type, np.datetime64)
assert df.IntDateCol.tolist() == [
pd.Timestamp(1986, 12, 25, 0, 0, 0),
pd.Timestamp(2013, 1, 1, 0, 0, 0),
]
df = sql.read_sql_query(
"SELECT * FROM types_test_data", self.conn, parse_dates={"IntDateCol": "s"}
)
assert issubclass(df.IntDateCol.dtype.type, np.datetime64)
assert df.IntDateCol.tolist() == [
pd.Timestamp(1986, 12, 25, 0, 0, 0),
pd.Timestamp(2013, 1, 1, 0, 0, 0),
]
df = sql.read_sql_query(
"SELECT * FROM types_test_data",
self.conn,
parse_dates={"IntDateOnlyCol": "%Y%m%d"},
)
assert issubclass(df.IntDateOnlyCol.dtype.type, np.datetime64)
assert df.IntDateOnlyCol.tolist() == [
pd.Timestamp("2010-10-10"),
pd.Timestamp("2010-12-12"),
]
def test_date_and_index(self):
# Test case where same column appears in parse_date and index_col
df = sql.read_sql_query(
"SELECT * FROM types_test_data",
self.conn,
index_col="DateCol",
parse_dates=["DateCol", "IntDateCol"],
)
assert issubclass(df.index.dtype.type, np.datetime64)
assert issubclass(df.IntDateCol.dtype.type, np.datetime64)
def test_timedelta(self):
# see #6921
df = to_timedelta(Series(["00:00:01", "00:00:03"], name="foo")).to_frame()
with tm.assert_produces_warning(UserWarning):
df.to_sql("test_timedelta", self.conn)
result = sql.read_sql_query("SELECT * FROM test_timedelta", self.conn)
tm.assert_series_equal(result["foo"], df["foo"].astype("int64"))
def test_complex_raises(self):
df = DataFrame({"a": [1 + 1j, 2j]})
msg = "Complex datatypes not supported"
with pytest.raises(ValueError, match=msg):
df.to_sql("test_complex", self.conn)
@pytest.mark.parametrize(
"index_name,index_label,expected",
[
# no index name, defaults to 'index'
(None, None, "index"),
# specifying index_label
(None, "other_label", "other_label"),
# using the index name
("index_name", None, "index_name"),
# has index name, but specifying index_label
("index_name", "other_label", "other_label"),
# index name is integer
(0, None, "0"),
# index name is None but index label is integer
(None, 0, "0"),
],
)
def test_to_sql_index_label(self, index_name, index_label, expected):
temp_frame = DataFrame({"col1": range(4)})
temp_frame.index.name = index_name
query = "SELECT * FROM test_index_label"
sql.to_sql(temp_frame, "test_index_label", self.conn, index_label=index_label)
frame = sql.read_sql_query(query, self.conn)
assert frame.columns[0] == expected
def test_to_sql_index_label_multiindex(self):
temp_frame = DataFrame(
{"col1": range(4)},
index=MultiIndex.from_product([("A0", "A1"), ("B0", "B1")]),
)
# no index name, defaults to 'level_0' and 'level_1'
sql.to_sql(temp_frame, "test_index_label", self.conn)
frame = sql.read_sql_query("SELECT * FROM test_index_label", self.conn)
assert frame.columns[0] == "level_0"
assert frame.columns[1] == "level_1"
# specifying index_label
sql.to_sql(
temp_frame,
"test_index_label",
self.conn,
if_exists="replace",
index_label=["A", "B"],
)
frame = sql.read_sql_query("SELECT * FROM test_index_label", self.conn)
assert frame.columns[:2].tolist() == ["A", "B"]
# using the index name
temp_frame.index.names = ["A", "B"]
sql.to_sql(temp_frame, "test_index_label", self.conn, if_exists="replace")
frame = sql.read_sql_query("SELECT * FROM test_index_label", self.conn)
assert frame.columns[:2].tolist() == ["A", "B"]
# has index name, but specifying index_label
sql.to_sql(
temp_frame,
"test_index_label",
self.conn,
if_exists="replace",
index_label=["C", "D"],
)
frame = sql.read_sql_query("SELECT * FROM test_index_label", self.conn)
assert frame.columns[:2].tolist() == ["C", "D"]
msg = "Length of 'index_label' should match number of levels, which is 2"
with pytest.raises(ValueError, match=msg):
sql.to_sql(
temp_frame,
"test_index_label",
self.conn,
if_exists="replace",
index_label="C",
)
def test_multiindex_roundtrip(self):
df = DataFrame.from_records(
[(1, 2.1, "line1"), (2, 1.5, "line2")],
columns=["A", "B", "C"],
index=["A", "B"],
)
df.to_sql("test_multiindex_roundtrip", self.conn)
result = sql.read_sql_query(
"SELECT * FROM test_multiindex_roundtrip", self.conn, index_col=["A", "B"]
)
tm.assert_frame_equal(df, result, check_index_type=True)
def test_integer_col_names(self):
df = DataFrame([[1, 2], [3, 4]], columns=[0, 1])
sql.to_sql(df, "test_frame_integer_col_names", self.conn, if_exists="replace")
def test_get_schema(self):
create_sql = sql.get_schema(self.test_frame1, "test", con=self.conn)
assert "CREATE" in create_sql
def test_get_schema_dtypes(self):
float_frame = DataFrame({"a": [1.1, 1.2], "b": [2.1, 2.2]})
dtype = sqlalchemy.Integer if self.mode == "sqlalchemy" else "INTEGER"
create_sql = sql.get_schema(
float_frame, "test", con=self.conn, dtype={"b": dtype}
)
assert "CREATE" in create_sql
assert "INTEGER" in create_sql
def test_get_schema_keys(self):
frame = DataFrame({"Col1": [1.1, 1.2], "Col2": [2.1, 2.2]})
create_sql = sql.get_schema(frame, "test", con=self.conn, keys="Col1")
constraint_sentence = 'CONSTRAINT test_pk PRIMARY KEY ("Col1")'
assert constraint_sentence in create_sql
# multiple columns as key (GH10385)
create_sql = sql.get_schema(
self.test_frame1, "test", con=self.conn, keys=["A", "B"]
)
constraint_sentence = 'CONSTRAINT test_pk PRIMARY KEY ("A", "B")'
assert constraint_sentence in create_sql
def test_chunksize_read(self):
df = DataFrame(np.random.randn(22, 5), columns=list("abcde"))
df.to_sql("test_chunksize", self.conn, index=False)
# reading the query in one time
res1 = sql.read_sql_query("select * from test_chunksize", self.conn)
# reading the query in chunks with read_sql_query
res2 = DataFrame()
i = 0
sizes = [5, 5, 5, 5, 2]
for chunk in sql.read_sql_query(
"select * from test_chunksize", self.conn, chunksize=5
):
res2 = concat([res2, chunk], ignore_index=True)
assert len(chunk) == sizes[i]
i += 1
tm.assert_frame_equal(res1, res2)
# reading the query in chunks with read_sql_query
if self.mode == "sqlalchemy":
res3 = DataFrame()
i = 0
sizes = [5, 5, 5, 5, 2]
for chunk in sql.read_sql_table("test_chunksize", self.conn, chunksize=5):
res3 = concat([res3, chunk], ignore_index=True)
assert len(chunk) == sizes[i]
i += 1
tm.assert_frame_equal(res1, res3)
def test_categorical(self):
# GH8624
# test that categorical gets written correctly as dense column
df = DataFrame(
{
"person_id": [1, 2, 3],
"person_name": ["John P. Doe", "Jane Dove", "John P. Doe"],
}
)
df2 = df.copy()
df2["person_name"] = df2["person_name"].astype("category")
df2.to_sql("test_categorical", self.conn, index=False)
res = sql.read_sql_query("SELECT * FROM test_categorical", self.conn)
tm.assert_frame_equal(res, df)
def test_unicode_column_name(self):
# GH 11431
df = DataFrame([[1, 2], [3, 4]], columns=["\xe9", "b"])
df.to_sql("test_unicode", self.conn, index=False)
def test_escaped_table_name(self):
# GH 13206
df = DataFrame({"A": [0, 1, 2], "B": [0.2, np.nan, 5.6]})
df.to_sql("d1187b08-4943-4c8d-a7f6", self.conn, index=False)
res = sql.read_sql_query("SELECT * FROM `d1187b08-4943-4c8d-a7f6`", self.conn)
tm.assert_frame_equal(res, df)
@pytest.mark.single
@pytest.mark.skipif(not SQLALCHEMY_INSTALLED, reason="SQLAlchemy not installed")
class TestSQLApi(SQLAlchemyMixIn, _TestSQLApi):
"""
Test the public API as it would be used directly
Tests for `read_sql_table` are included here, as this is specific for the
sqlalchemy mode.
"""
flavor = "sqlite"
mode = "sqlalchemy"
def connect(self):
return sqlalchemy.create_engine("sqlite:///:memory:")
def test_read_table_columns(self):
# test columns argument in read_table
sql.to_sql(self.test_frame1, "test_frame", self.conn)
cols = ["A", "B"]
result = sql.read_sql_table("test_frame", self.conn, columns=cols)
assert result.columns.tolist() == cols
def test_read_table_index_col(self):
# test columns argument in read_table
sql.to_sql(self.test_frame1, "test_frame", self.conn)
result = sql.read_sql_table("test_frame", self.conn, index_col="index")
assert result.index.names == ["index"]
result = sql.read_sql_table("test_frame", self.conn, index_col=["A", "B"])
assert result.index.names == ["A", "B"]
result = sql.read_sql_table(
"test_frame", self.conn, index_col=["A", "B"], columns=["C", "D"]
)
assert result.index.names == ["A", "B"]
assert result.columns.tolist() == ["C", "D"]
def test_read_sql_delegate(self):
iris_frame1 = sql.read_sql_query("SELECT * FROM iris", self.conn)
iris_frame2 = sql.read_sql("SELECT * FROM iris", self.conn)
tm.assert_frame_equal(iris_frame1, iris_frame2)
iris_frame1 = sql.read_sql_table("iris", self.conn)
iris_frame2 = sql.read_sql("iris", self.conn)
tm.assert_frame_equal(iris_frame1, iris_frame2)
def test_not_reflect_all_tables(self):
# create invalid table
qry = """CREATE TABLE invalid (x INTEGER, y UNKNOWN);"""
self.conn.execute(qry)
qry = """CREATE TABLE other_table (x INTEGER, y INTEGER);"""
self.conn.execute(qry)
with warnings.catch_warnings(record=True) as w:
# Cause all warnings to always be triggered.
warnings.simplefilter("always")
# Trigger a warning.
sql.read_sql_table("other_table", self.conn)
sql.read_sql_query("SELECT * FROM other_table", self.conn)
# Verify some things
assert len(w) == 0
def test_warning_case_insensitive_table_name(self):
# see gh-7815
#
# We can't test that this warning is triggered, a the database
# configuration would have to be altered. But here we test that
# the warning is certainly NOT triggered in a normal case.
with warnings.catch_warnings(record=True) as w:
# Cause all warnings to always be triggered.
warnings.simplefilter("always")
# This should not trigger a Warning
self.test_frame1.to_sql("CaseSensitive", self.conn)
# Verify some things
assert len(w) == 0
def _get_index_columns(self, tbl_name):
from sqlalchemy.engine import reflection
insp = reflection.Inspector.from_engine(self.conn)
ixs = insp.get_indexes("test_index_saved")
ixs = [i["column_names"] for i in ixs]
return ixs
def test_sqlalchemy_type_mapping(self):
# Test Timestamp objects (no datetime64 because of timezone) (GH9085)
df = DataFrame(
{"time": to_datetime(["201412120154", "201412110254"], utc=True)}
)
db = sql.SQLDatabase(self.conn)
table = sql.SQLTable("test_type", db, frame=df)
# GH 9086: TIMESTAMP is the suggested type for datetimes with timezones
assert isinstance(table.table.c["time"].type, sqltypes.TIMESTAMP)
def test_database_uri_string(self):
# Test read_sql and .to_sql method with a database URI (GH10654)
test_frame1 = self.test_frame1
# db_uri = 'sqlite:///:memory:' # raises
# sqlalchemy.exc.OperationalError: (sqlite3.OperationalError) near
# "iris": syntax error [SQL: 'iris']
with tm.ensure_clean() as name:
db_uri = "sqlite:///" + name
table = "iris"
test_frame1.to_sql(table, db_uri, if_exists="replace", index=False)
test_frame2 = sql.read_sql(table, db_uri)
test_frame3 = sql.read_sql_table(table, db_uri)
query = "SELECT * FROM iris"
test_frame4 = sql.read_sql_query(query, db_uri)
tm.assert_frame_equal(test_frame1, test_frame2)
tm.assert_frame_equal(test_frame1, test_frame3)
tm.assert_frame_equal(test_frame1, test_frame4)
# using driver that will not be installed on Travis to trigger error
# in sqlalchemy.create_engine -> test passing of this error to user
try:
# the rest of this test depends on pg8000's being absent
import pg8000 # noqa
pytest.skip("pg8000 is installed")
except ImportError:
pass
db_uri = "postgresql+pg8000://user:pass@host/dbname"
with pytest.raises(ImportError, match="pg8000"):
sql.read_sql("select * from table", db_uri)
def _make_iris_table_metadata(self):
sa = sqlalchemy
metadata = sa.MetaData()
iris = sa.Table(
"iris",
metadata,
sa.Column("SepalLength", sa.REAL),
sa.Column("SepalWidth", sa.REAL),
sa.Column("PetalLength", sa.REAL),
sa.Column("PetalWidth", sa.REAL),
sa.Column("Name", sa.TEXT),
)
return iris
def test_query_by_text_obj(self):
# WIP : GH10846
name_text = sqlalchemy.text("select * from iris where name=:name")
iris_df = sql.read_sql(name_text, self.conn, params={"name": "Iris-versicolor"})
all_names = set(iris_df["Name"])
assert all_names == {"Iris-versicolor"}
def test_query_by_select_obj(self):
# WIP : GH10846
iris = self._make_iris_table_metadata()
name_select = sqlalchemy.select([iris]).where(
iris.c.Name == sqlalchemy.bindparam("name")
)
iris_df = sql.read_sql(name_select, self.conn, params={"name": "Iris-setosa"})
all_names = set(iris_df["Name"])
assert all_names == {"Iris-setosa"}
class _EngineToConnMixin:
"""
A mixin that causes setup_connect to create a conn rather than an engine.
"""
@pytest.fixture(autouse=True)
def setup_method(self, load_iris_data):
super().load_test_data_and_sql()
engine = self.conn
conn = engine.connect()
self.__tx = conn.begin()
self.pandasSQL = sql.SQLDatabase(conn)
self.__engine = engine
self.conn = conn
yield
self.__tx.rollback()
self.conn.close()
self.conn = self.__engine
self.pandasSQL = sql.SQLDatabase(self.__engine)
@pytest.mark.single
class TestSQLApiConn(_EngineToConnMixin, TestSQLApi):
pass
@pytest.mark.single
class TestSQLiteFallbackApi(SQLiteMixIn, _TestSQLApi):
"""
Test the public sqlite connection fallback API
"""
flavor = "sqlite"
mode = "fallback"
def connect(self, database=":memory:"):
return sqlite3.connect(database)
def test_sql_open_close(self):
# Test if the IO in the database still work if the connection closed
# between the writing and reading (as in many real situations).
with tm.ensure_clean() as name:
conn = self.connect(name)
sql.to_sql(self.test_frame3, "test_frame3_legacy", conn, index=False)
conn.close()
conn = self.connect(name)
result = sql.read_sql_query("SELECT * FROM test_frame3_legacy;", conn)
conn.close()
tm.assert_frame_equal(self.test_frame3, result)
@pytest.mark.skipif(SQLALCHEMY_INSTALLED, reason="SQLAlchemy is installed")
def test_con_string_import_error(self):
conn = "mysql://root@localhost/pandas_nosetest"
msg = "Using URI string without sqlalchemy installed"
with pytest.raises(ImportError, match=msg):
sql.read_sql("SELECT * FROM iris", conn)
def test_read_sql_delegate(self):
iris_frame1 = sql.read_sql_query("SELECT * FROM iris", self.conn)
iris_frame2 = sql.read_sql("SELECT * FROM iris", self.conn)
tm.assert_frame_equal(iris_frame1, iris_frame2)
msg = "Execution failed on sql 'iris': near \"iris\": syntax error"
with pytest.raises(sql.DatabaseError, match=msg):
sql.read_sql("iris", self.conn)
def test_safe_names_warning(self):
# GH 6798
df = DataFrame([[1, 2], [3, 4]], columns=["a", "b "]) # has a space
# warns on create table with spaces in names
with tm.assert_produces_warning():
sql.to_sql(df, "test_frame3_legacy", self.conn, index=False)
def test_get_schema2(self):
# without providing a connection object (available for backwards comp)
create_sql = sql.get_schema(self.test_frame1, "test")
assert "CREATE" in create_sql
def _get_sqlite_column_type(self, schema, column):
for col in schema.split("\n"):
if col.split()[0].strip('""') == column:
return col.split()[1]
raise ValueError(f"Column {column} not found")
def test_sqlite_type_mapping(self):
# Test Timestamp objects (no datetime64 because of timezone) (GH9085)
df = DataFrame(
{"time": to_datetime(["201412120154", "201412110254"], utc=True)}
)
db = sql.SQLiteDatabase(self.conn)
table = sql.SQLiteTable("test_type", db, frame=df)
schema = table.sql_schema()
assert self._get_sqlite_column_type(schema, "time") == "TIMESTAMP"
# -----------------------------------------------------------------------------
# -- Database flavor specific tests
class _TestSQLAlchemy(SQLAlchemyMixIn, PandasSQLTest):
"""
Base class for testing the sqlalchemy backend.
Subclasses for specific database types are created below. Tests that
deviate for each flavor are overwritten there.
"""
flavor: str
@pytest.fixture(autouse=True, scope="class")
def setup_class(cls):
cls.setup_import()
cls.setup_driver()
conn = cls.conn = cls.connect()
conn.connect()
def load_test_data_and_sql(self):
self._load_raw_sql()
self._load_test1_data()
@pytest.fixture(autouse=True)
def setup_method(self, load_iris_data):
self.load_test_data_and_sql()
@classmethod
def setup_import(cls):
# Skip this test if SQLAlchemy not available
if not SQLALCHEMY_INSTALLED:
pytest.skip("SQLAlchemy not installed")
@classmethod
def setup_driver(cls):
raise NotImplementedError()
@classmethod
def connect(cls):
raise NotImplementedError()
def setup_connect(self):
try:
self.conn = self.connect()
self.pandasSQL = sql.SQLDatabase(self.conn)
# to test if connection can be made:
self.conn.connect()
except sqlalchemy.exc.OperationalError:
pytest.skip(f"Can't connect to {self.flavor} server")
def test_read_sql(self):
self._read_sql_iris()
def test_read_sql_parameter(self):
self._read_sql_iris_parameter()
def test_read_sql_named_parameter(self):
self._read_sql_iris_named_parameter()
def test_to_sql(self):
self._to_sql()
def test_to_sql_empty(self):
self._to_sql_empty()
def test_to_sql_fail(self):
self._to_sql_fail()
def test_to_sql_replace(self):
self._to_sql_replace()
def test_to_sql_append(self):
self._to_sql_append()
def test_to_sql_method_multi(self):
self._to_sql(method="multi")
def test_to_sql_method_callable(self):
self._to_sql_method_callable()
def test_create_table(self):
temp_conn = self.connect()
temp_frame = DataFrame(
{"one": [1.0, 2.0, 3.0, 4.0], "two": [4.0, 3.0, 2.0, 1.0]}
)
pandasSQL = sql.SQLDatabase(temp_conn)
pandasSQL.to_sql(temp_frame, "temp_frame")
assert temp_conn.has_table("temp_frame")
def test_drop_table(self):
temp_conn = self.connect()
temp_frame = DataFrame(
{"one": [1.0, 2.0, 3.0, 4.0], "two": [4.0, 3.0, 2.0, 1.0]}
)
pandasSQL = sql.SQLDatabase(temp_conn)
pandasSQL.to_sql(temp_frame, "temp_frame")
assert temp_conn.has_table("temp_frame")
pandasSQL.drop_table("temp_frame")
assert not temp_conn.has_table("temp_frame")
def test_roundtrip(self):
self._roundtrip()
def test_execute_sql(self):
self._execute_sql()
def test_read_table(self):
iris_frame = sql.read_sql_table("iris", con=self.conn)
self._check_iris_loaded_frame(iris_frame)
def test_read_table_columns(self):
iris_frame = sql.read_sql_table(
"iris", con=self.conn, columns=["SepalLength", "SepalLength"]
)
tm.equalContents(iris_frame.columns.values, ["SepalLength", "SepalLength"])
def test_read_table_absent_raises(self):
msg = "Table this_doesnt_exist not found"
with pytest.raises(ValueError, match=msg):
sql.read_sql_table("this_doesnt_exist", con=self.conn)
def test_default_type_conversion(self):
df = sql.read_sql_table("types_test_data", self.conn)
assert issubclass(df.FloatCol.dtype.type, np.floating)
assert issubclass(df.IntCol.dtype.type, np.integer)
assert issubclass(df.BoolCol.dtype.type, np.bool_)
# Int column with NA values stays as float
assert issubclass(df.IntColWithNull.dtype.type, np.floating)
# Bool column with NA values becomes object
assert issubclass(df.BoolColWithNull.dtype.type, object)
def test_bigint(self):
# int64 should be converted to BigInteger, GH7433
df = DataFrame(data={"i64": [2 ** 62]})
df.to_sql("test_bigint", self.conn, index=False)
result = sql.read_sql_table("test_bigint", self.conn)
tm.assert_frame_equal(df, result)
def test_default_date_load(self):
df = sql.read_sql_table("types_test_data", self.conn)
# IMPORTANT - sqlite has no native date type, so shouldn't parse, but
# MySQL SHOULD be converted.
assert issubclass(df.DateCol.dtype.type, np.datetime64)
def test_datetime_with_timezone(self):
# edge case that converts postgresql datetime with time zone types
# to datetime64[ns,psycopg2.tz.FixedOffsetTimezone..], which is ok
# but should be more natural, so coerce to datetime64[ns] for now
def check(col):
# check that a column is either datetime64[ns]
# or datetime64[ns, UTC]
if is_datetime64_dtype(col.dtype):
# "2000-01-01 00:00:00-08:00" should convert to
# "2000-01-01 08:00:00"
assert col[0] == Timestamp("2000-01-01 08:00:00")
# "2000-06-01 00:00:00-07:00" should convert to
# "2000-06-01 07:00:00"
assert col[1] == Timestamp("2000-06-01 07:00:00")
elif is_datetime64tz_dtype(col.dtype):
assert str(col.dt.tz) == "UTC"
# "2000-01-01 00:00:00-08:00" should convert to
# "2000-01-01 08:00:00"
# "2000-06-01 00:00:00-07:00" should convert to
# "2000-06-01 07:00:00"
# GH 6415
expected_data = [
Timestamp("2000-01-01 08:00:00", tz="UTC"),
Timestamp("2000-06-01 07:00:00", tz="UTC"),
]
expected = Series(expected_data, name=col.name)
tm.assert_series_equal(col, expected)
else:
raise AssertionError(
f"DateCol loaded with incorrect type -> {col.dtype}"
)
# GH11216
df = pd.read_sql_query("select * from types_test_data", self.conn)
if not hasattr(df, "DateColWithTz"):
pytest.skip("no column with datetime with time zone")
# this is parsed on Travis (linux), but not on macosx for some reason
# even with the same versions of psycopg2 & sqlalchemy, possibly a
# Postgresql server version difference
col = df.DateColWithTz
assert is_datetime64tz_dtype(col.dtype)
df = pd.read_sql_query(
"select * from types_test_data", self.conn, parse_dates=["DateColWithTz"]
)
if not hasattr(df, "DateColWithTz"):
pytest.skip("no column with datetime with time zone")
col = df.DateColWithTz
assert is_datetime64tz_dtype(col.dtype)
assert str(col.dt.tz) == "UTC"
check(df.DateColWithTz)
df = pd.concat(
list(
pd.read_sql_query(
"select * from types_test_data", self.conn, chunksize=1
)
),
ignore_index=True,
)
col = df.DateColWithTz
assert is_datetime64tz_dtype(col.dtype)
assert str(col.dt.tz) == "UTC"
expected = sql.read_sql_table("types_test_data", self.conn)
col = expected.DateColWithTz
assert is_datetime64tz_dtype(col.dtype)
tm.assert_series_equal(df.DateColWithTz, expected.DateColWithTz)
# xref #7139
# this might or might not be converted depending on the postgres driver
df = sql.read_sql_table("types_test_data", self.conn)
check(df.DateColWithTz)
def test_datetime_with_timezone_roundtrip(self):
# GH 9086
# Write datetimetz data to a db and read it back
# For dbs that support timestamps with timezones, should get back UTC
# otherwise naive data should be returned
expected = DataFrame(
{"A": date_range("2013-01-01 09:00:00", periods=3, tz="US/Pacific")}
)
expected.to_sql("test_datetime_tz", self.conn, index=False)
if self.flavor == "postgresql":
# SQLAlchemy "timezones" (i.e. offsets) are coerced to UTC
expected["A"] = expected["A"].dt.tz_convert("UTC")
else:
# Otherwise, timestamps are returned as local, naive
expected["A"] = expected["A"].dt.tz_localize(None)
result = sql.read_sql_table("test_datetime_tz", self.conn)
tm.assert_frame_equal(result, expected)
result = sql.read_sql_query("SELECT * FROM test_datetime_tz", self.conn)
if self.flavor == "sqlite":
# read_sql_query does not return datetime type like read_sql_table
assert isinstance(result.loc[0, "A"], str)
result["A"] = to_datetime(result["A"])
tm.assert_frame_equal(result, expected)
def test_out_of_bounds_datetime(self):
# GH 26761
data = pd.DataFrame({"date": datetime(9999, 1, 1)}, index=[0])
data.to_sql("test_datetime_obb", self.conn, index=False)
result = sql.read_sql_table("test_datetime_obb", self.conn)
expected = pd.DataFrame([pd.NaT], columns=["date"])
tm.assert_frame_equal(result, expected)
def test_naive_datetimeindex_roundtrip(self):
# GH 23510
# Ensure that a naive DatetimeIndex isn't converted to UTC
dates = date_range("2018-01-01", periods=5, freq="6H")._with_freq(None)
expected = DataFrame({"nums": range(5)}, index=dates)
expected.to_sql("foo_table", self.conn, index_label="info_date")
result = sql.read_sql_table("foo_table", self.conn, index_col="info_date")
# result index with gain a name from a set_index operation; expected
tm.assert_frame_equal(result, expected, check_names=False)
def test_date_parsing(self):
# No Parsing
df = sql.read_sql_table("types_test_data", self.conn)
expected_type = object if self.flavor == "sqlite" else np.datetime64
assert issubclass(df.DateCol.dtype.type, expected_type)
df = sql.read_sql_table("types_test_data", self.conn, parse_dates=["DateCol"])
assert issubclass(df.DateCol.dtype.type, np.datetime64)
df = sql.read_sql_table(
"types_test_data", self.conn, parse_dates={"DateCol": "%Y-%m-%d %H:%M:%S"}
)
assert issubclass(df.DateCol.dtype.type, np.datetime64)
df = sql.read_sql_table(
"types_test_data",
self.conn,
parse_dates={"DateCol": {"format": "%Y-%m-%d %H:%M:%S"}},
)
assert issubclass(df.DateCol.dtype.type, np.datetime64)
df = sql.read_sql_table(
"types_test_data", self.conn, parse_dates=["IntDateCol"]
)
assert issubclass(df.IntDateCol.dtype.type, np.datetime64)
df = sql.read_sql_table(
"types_test_data", self.conn, parse_dates={"IntDateCol": "s"}
)
assert issubclass(df.IntDateCol.dtype.type, np.datetime64)
df = sql.read_sql_table(
"types_test_data", self.conn, parse_dates={"IntDateCol": {"unit": "s"}}
)
assert issubclass(df.IntDateCol.dtype.type, np.datetime64)
def test_datetime(self):
df = DataFrame(
{"A": date_range("2013-01-01 09:00:00", periods=3), "B": np.arange(3.0)}
)
df.to_sql("test_datetime", self.conn)
# with read_table -> type information from schema used
result = sql.read_sql_table("test_datetime", self.conn)
result = result.drop("index", axis=1)
tm.assert_frame_equal(result, df)
# with read_sql -> no type information -> sqlite has no native
result = sql.read_sql_query("SELECT * FROM test_datetime", self.conn)
result = result.drop("index", axis=1)
if self.flavor == "sqlite":
assert isinstance(result.loc[0, "A"], str)
result["A"] = to_datetime(result["A"])
tm.assert_frame_equal(result, df)
else:
tm.assert_frame_equal(result, df)
def test_datetime_NaT(self):
df = DataFrame(
{"A": date_range("2013-01-01 09:00:00", periods=3), "B": np.arange(3.0)}
)
df.loc[1, "A"] = np.nan
df.to_sql("test_datetime", self.conn, index=False)
# with read_table -> type information from schema used
result = sql.read_sql_table("test_datetime", self.conn)
tm.assert_frame_equal(result, df)
# with read_sql -> no type information -> sqlite has no native
result = sql.read_sql_query("SELECT * FROM test_datetime", self.conn)
if self.flavor == "sqlite":
assert isinstance(result.loc[0, "A"], str)
result["A"] = to_datetime(result["A"], errors="coerce")
tm.assert_frame_equal(result, df)
else:
tm.assert_frame_equal(result, df)
def test_datetime_date(self):
# test support for datetime.date
df = DataFrame([date(2014, 1, 1), date(2014, 1, 2)], columns=["a"])
df.to_sql("test_date", self.conn, index=False)
res = read_sql_table("test_date", self.conn)
result = res["a"]
expected = to_datetime(df["a"])
# comes back as datetime64
tm.assert_series_equal(result, expected)
def test_datetime_time(self):
# test support for datetime.time
df = DataFrame([time(9, 0, 0), time(9, 1, 30)], columns=["a"])
df.to_sql("test_time", self.conn, index=False)
res = read_sql_table("test_time", self.conn)
tm.assert_frame_equal(res, df)
# GH8341
# first, use the fallback to have the sqlite adapter put in place
sqlite_conn = TestSQLiteFallback.connect()
sql.to_sql(df, "test_time2", sqlite_conn, index=False)
res = sql.read_sql_query("SELECT * FROM test_time2", sqlite_conn)
ref = df.applymap(lambda _: _.strftime("%H:%M:%S.%f"))
tm.assert_frame_equal(ref, res) # check if adapter is in place
# then test if sqlalchemy is unaffected by the sqlite adapter
sql.to_sql(df, "test_time3", self.conn, index=False)
if self.flavor == "sqlite":
res = sql.read_sql_query("SELECT * FROM test_time3", self.conn)
ref = df.applymap(lambda _: _.strftime("%H:%M:%S.%f"))
tm.assert_frame_equal(ref, res)
res = sql.read_sql_table("test_time3", self.conn)
tm.assert_frame_equal(df, res)
def test_mixed_dtype_insert(self):
# see GH6509
s1 = Series(2 ** 25 + 1, dtype=np.int32)
s2 = Series(0.0, dtype=np.float32)
df = DataFrame({"s1": s1, "s2": s2})
# write and read again
df.to_sql("test_read_write", self.conn, index=False)
df2 = sql.read_sql_table("test_read_write", self.conn)
tm.assert_frame_equal(df, df2, check_dtype=False, check_exact=True)
def test_nan_numeric(self):
# NaNs in numeric float column
df = DataFrame({"A": [0, 1, 2], "B": [0.2, np.nan, 5.6]})
df.to_sql("test_nan", self.conn, index=False)
# with read_table
result = sql.read_sql_table("test_nan", self.conn)
tm.assert_frame_equal(result, df)
# with read_sql
result = sql.read_sql_query("SELECT * FROM test_nan", self.conn)
tm.assert_frame_equal(result, df)
def test_nan_fullcolumn(self):
# full NaN column (numeric float column)
df = DataFrame({"A": [0, 1, 2], "B": [np.nan, np.nan, np.nan]})
df.to_sql("test_nan", self.conn, index=False)
# with read_table
result = sql.read_sql_table("test_nan", self.conn)
tm.assert_frame_equal(result, df)
# with read_sql -> not type info from table -> stays None
df["B"] = df["B"].astype("object")
df["B"] = None
result = sql.read_sql_query("SELECT * FROM test_nan", self.conn)
tm.assert_frame_equal(result, df)
def test_nan_string(self):
# NaNs in string column
df = DataFrame({"A": [0, 1, 2], "B": ["a", "b", np.nan]})
df.to_sql("test_nan", self.conn, index=False)
# NaNs are coming back as None
df.loc[2, "B"] = None
# with read_table
result = sql.read_sql_table("test_nan", self.conn)
tm.assert_frame_equal(result, df)
# with read_sql
result = sql.read_sql_query("SELECT * FROM test_nan", self.conn)
tm.assert_frame_equal(result, df)
def _get_index_columns(self, tbl_name):
from sqlalchemy.engine import reflection
insp = reflection.Inspector.from_engine(self.conn)
ixs = insp.get_indexes(tbl_name)
ixs = [i["column_names"] for i in ixs]
return ixs
def test_to_sql_save_index(self):
self._to_sql_save_index()
def test_transactions(self):
self._transaction_test()
def test_get_schema_create_table(self):
# Use a dataframe without a bool column, since MySQL converts bool to
# TINYINT (which read_sql_table returns as an int and causes a dtype
# mismatch)
self._load_test3_data()
tbl = "test_get_schema_create_table"
create_sql = sql.get_schema(self.test_frame3, tbl, con=self.conn)
blank_test_df = self.test_frame3.iloc[:0]
self.drop_table(tbl)
self.conn.execute(create_sql)
returned_df = sql.read_sql_table(tbl, self.conn)
tm.assert_frame_equal(returned_df, blank_test_df, check_index_type=False)
self.drop_table(tbl)
def test_dtype(self):
cols = ["A", "B"]
data = [(0.8, True), (0.9, None)]
df = DataFrame(data, columns=cols)
df.to_sql("dtype_test", self.conn)
df.to_sql("dtype_test2", self.conn, dtype={"B": sqlalchemy.TEXT})
meta = sqlalchemy.schema.MetaData(bind=self.conn)
meta.reflect()
sqltype = meta.tables["dtype_test2"].columns["B"].type
assert isinstance(sqltype, sqlalchemy.TEXT)
msg = "The type of B is not a SQLAlchemy type"
with pytest.raises(ValueError, match=msg):
df.to_sql("error", self.conn, dtype={"B": str})
# GH9083
df.to_sql("dtype_test3", self.conn, dtype={"B": sqlalchemy.String(10)})
meta.reflect()
sqltype = meta.tables["dtype_test3"].columns["B"].type
assert isinstance(sqltype, sqlalchemy.String)
assert sqltype.length == 10
# single dtype
df.to_sql("single_dtype_test", self.conn, dtype=sqlalchemy.TEXT)
meta = sqlalchemy.schema.MetaData(bind=self.conn)
meta.reflect()
sqltypea = meta.tables["single_dtype_test"].columns["A"].type
sqltypeb = meta.tables["single_dtype_test"].columns["B"].type
assert isinstance(sqltypea, sqlalchemy.TEXT)
assert isinstance(sqltypeb, sqlalchemy.TEXT)
def test_notna_dtype(self):
cols = {
"Bool": Series([True, None]),
"Date": Series([datetime(2012, 5, 1), None]),
"Int": Series([1, None], dtype="object"),
"Float": Series([1.1, None]),
}
df = DataFrame(cols)
tbl = "notna_dtype_test"
df.to_sql(tbl, self.conn)
returned_df = sql.read_sql_table(tbl, self.conn) # noqa
meta = sqlalchemy.schema.MetaData(bind=self.conn)
meta.reflect()
if self.flavor == "mysql":
my_type = sqltypes.Integer
else:
my_type = sqltypes.Boolean
col_dict = meta.tables[tbl].columns
assert isinstance(col_dict["Bool"].type, my_type)
assert isinstance(col_dict["Date"].type, sqltypes.DateTime)
assert isinstance(col_dict["Int"].type, sqltypes.Integer)
assert isinstance(col_dict["Float"].type, sqltypes.Float)
def test_double_precision(self):
V = 1.23456789101112131415
df = DataFrame(
{
"f32": Series([V], dtype="float32"),
"f64": Series([V], dtype="float64"),
"f64_as_f32": Series([V], dtype="float64"),
"i32": Series([5], dtype="int32"),
"i64": Series([5], dtype="int64"),
}
)
df.to_sql(
"test_dtypes",
self.conn,
index=False,
if_exists="replace",
dtype={"f64_as_f32": sqlalchemy.Float(precision=23)},
)
res = sql.read_sql_table("test_dtypes", self.conn)
# check precision of float64
assert np.round(df["f64"].iloc[0], 14) == np.round(res["f64"].iloc[0], 14)
# check sql types
meta = sqlalchemy.schema.MetaData(bind=self.conn)
meta.reflect()
col_dict = meta.tables["test_dtypes"].columns
assert str(col_dict["f32"].type) == str(col_dict["f64_as_f32"].type)
assert isinstance(col_dict["f32"].type, sqltypes.Float)
assert isinstance(col_dict["f64"].type, sqltypes.Float)
assert isinstance(col_dict["i32"].type, sqltypes.Integer)
assert isinstance(col_dict["i64"].type, sqltypes.BigInteger)
def test_connectable_issue_example(self):
# This tests the example raised in issue
# https://github.com/pandas-dev/pandas/issues/10104
def foo(connection):
query = "SELECT test_foo_data FROM test_foo_data"
return sql.read_sql_query(query, con=connection)
def bar(connection, data):
data.to_sql(name="test_foo_data", con=connection, if_exists="append")
def main(connectable):
with connectable.connect() as conn:
with conn.begin():
foo_data = conn.run_callable(foo)
conn.run_callable(bar, foo_data)
DataFrame({"test_foo_data": [0, 1, 2]}).to_sql("test_foo_data", self.conn)
main(self.conn)
@pytest.mark.parametrize(
"input",
[{"foo": [np.inf]}, {"foo": [-np.inf]}, {"foo": [-np.inf], "infe0": ["bar"]}],
)
def test_to_sql_with_negative_npinf(self, input):
# GH 34431
df = pd.DataFrame(input)
if self.flavor == "mysql":
msg = "inf cannot be used with MySQL"
with pytest.raises(ValueError, match=msg):
df.to_sql("foobar", self.conn, index=False)
else:
df.to_sql("foobar", self.conn, index=False)
res = sql.read_sql_table("foobar", self.conn)
tm.assert_equal(df, res)
def test_temporary_table(self):
test_data = "Hello, World!"
expected = DataFrame({"spam": [test_data]})
Base = declarative.declarative_base()
class Temporary(Base):
__tablename__ = "temp_test"
__table_args__ = {"prefixes": ["TEMPORARY"]}
id = sqlalchemy.Column(sqlalchemy.Integer, primary_key=True)
spam = sqlalchemy.Column(sqlalchemy.Unicode(30), nullable=False)
Session = sa_session.sessionmaker(bind=self.conn)
session = Session()
with session.transaction:
conn = session.connection()
Temporary.__table__.create(conn)
session.add(Temporary(spam=test_data))
session.flush()
df = sql.read_sql_query(sql=sqlalchemy.select([Temporary.spam]), con=conn)
tm.assert_frame_equal(df, expected)
class _TestSQLAlchemyConn(_EngineToConnMixin, _TestSQLAlchemy):
def test_transactions(self):
pytest.skip("Nested transactions rollbacks don't work with Pandas")
class _TestSQLiteAlchemy:
"""
Test the sqlalchemy backend against an in-memory sqlite database.
"""
flavor = "sqlite"
@classmethod
def connect(cls):
return sqlalchemy.create_engine("sqlite:///:memory:")
@classmethod
def setup_driver(cls):
# sqlite3 is built-in
cls.driver = None
def test_default_type_conversion(self):
df = sql.read_sql_table("types_test_data", self.conn)
assert issubclass(df.FloatCol.dtype.type, np.floating)
assert issubclass(df.IntCol.dtype.type, np.integer)
# sqlite has no boolean type, so integer type is returned
assert issubclass(df.BoolCol.dtype.type, np.integer)
# Int column with NA values stays as float
assert issubclass(df.IntColWithNull.dtype.type, np.floating)
# Non-native Bool column with NA values stays as float
assert issubclass(df.BoolColWithNull.dtype.type, np.floating)
def test_default_date_load(self):
df = sql.read_sql_table("types_test_data", self.conn)
# IMPORTANT - sqlite has no native date type, so shouldn't parse, but
assert not issubclass(df.DateCol.dtype.type, np.datetime64)
def test_bigint_warning(self):
# test no warning for BIGINT (to support int64) is raised (GH7433)
df = DataFrame({"a": [1, 2]}, dtype="int64")
df.to_sql("test_bigintwarning", self.conn, index=False)
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always")
sql.read_sql_table("test_bigintwarning", self.conn)
assert len(w) == 0
class _TestMySQLAlchemy:
"""
Test the sqlalchemy backend against an MySQL database.
"""
flavor = "mysql"
@classmethod
def connect(cls):
return sqlalchemy.create_engine(
f"mysql+{cls.driver}://root@localhost/pandas_nosetest",
connect_args=cls.connect_args,
)
@classmethod
def setup_driver(cls):
pymysql = pytest.importorskip("pymysql")
cls.driver = "pymysql"
cls.connect_args = {"client_flag": pymysql.constants.CLIENT.MULTI_STATEMENTS}
def test_default_type_conversion(self):
df = sql.read_sql_table("types_test_data", self.conn)
assert issubclass(df.FloatCol.dtype.type, np.floating)
assert issubclass(df.IntCol.dtype.type, np.integer)
# MySQL has no real BOOL type (it's an alias for TINYINT)
assert issubclass(df.BoolCol.dtype.type, np.integer)
# Int column with NA values stays as float
assert issubclass(df.IntColWithNull.dtype.type, np.floating)
# Bool column with NA = int column with NA values => becomes float
assert issubclass(df.BoolColWithNull.dtype.type, np.floating)
def test_read_procedure(self):
import pymysql
# see GH7324. Although it is more an api test, it is added to the
# mysql tests as sqlite does not have stored procedures
df = DataFrame({"a": [1, 2, 3], "b": [0.1, 0.2, 0.3]})
df.to_sql("test_procedure", self.conn, index=False)
proc = """DROP PROCEDURE IF EXISTS get_testdb;
CREATE PROCEDURE get_testdb ()
BEGIN
SELECT * FROM test_procedure;
END"""
connection = self.conn.connect()
trans = connection.begin()
try:
r1 = connection.execute(proc) # noqa
trans.commit()
except pymysql.Error:
trans.rollback()
raise
res1 = sql.read_sql_query("CALL get_testdb();", self.conn)
tm.assert_frame_equal(df, res1)
# test delegation to read_sql_query
res2 = sql.read_sql("CALL get_testdb();", self.conn)
tm.assert_frame_equal(df, res2)
class _TestPostgreSQLAlchemy:
"""
Test the sqlalchemy backend against an PostgreSQL database.
"""
flavor = "postgresql"
@classmethod
def connect(cls):
return sqlalchemy.create_engine(
f"postgresql+{cls.driver}://postgres@localhost/pandas_nosetest"
)
@classmethod
def setup_driver(cls):
pytest.importorskip("psycopg2")
cls.driver = "psycopg2"
def test_schema_support(self):
# only test this for postgresql (schema's not supported in
# mysql/sqlite)
df = DataFrame({"col1": [1, 2], "col2": [0.1, 0.2], "col3": ["a", "n"]})
# create a schema
self.conn.execute("DROP SCHEMA IF EXISTS other CASCADE;")
self.conn.execute("CREATE SCHEMA other;")
# write dataframe to different schema's
df.to_sql("test_schema_public", self.conn, index=False)
df.to_sql(
"test_schema_public_explicit", self.conn, index=False, schema="public"
)
df.to_sql("test_schema_other", self.conn, index=False, schema="other")
# read dataframes back in
res1 = sql.read_sql_table("test_schema_public", self.conn)
tm.assert_frame_equal(df, res1)
res2 = sql.read_sql_table("test_schema_public_explicit", self.conn)
tm.assert_frame_equal(df, res2)
res3 = sql.read_sql_table(
"test_schema_public_explicit", self.conn, schema="public"
)
tm.assert_frame_equal(df, res3)
res4 = sql.read_sql_table("test_schema_other", self.conn, schema="other")
tm.assert_frame_equal(df, res4)
msg = "Table test_schema_other not found"
with pytest.raises(ValueError, match=msg):
sql.read_sql_table("test_schema_other", self.conn, schema="public")
# different if_exists options
# create a schema
self.conn.execute("DROP SCHEMA IF EXISTS other CASCADE;")
self.conn.execute("CREATE SCHEMA other;")
# write dataframe with different if_exists options
df.to_sql("test_schema_other", self.conn, schema="other", index=False)
df.to_sql(
"test_schema_other",
self.conn,
schema="other",
index=False,
if_exists="replace",
)
df.to_sql(
"test_schema_other",
self.conn,
schema="other",
index=False,
if_exists="append",
)
res = sql.read_sql_table("test_schema_other", self.conn, schema="other")
tm.assert_frame_equal(concat([df, df], ignore_index=True), res)
# specifying schema in user-provided meta
# The schema won't be applied on another Connection
# because of transactional schemas
if isinstance(self.conn, sqlalchemy.engine.Engine):
engine2 = self.connect()
meta = sqlalchemy.MetaData(engine2, schema="other")
pdsql = sql.SQLDatabase(engine2, meta=meta)
pdsql.to_sql(df, "test_schema_other2", index=False)
pdsql.to_sql(df, "test_schema_other2", index=False, if_exists="replace")
pdsql.to_sql(df, "test_schema_other2", index=False, if_exists="append")
res1 = sql.read_sql_table("test_schema_other2", self.conn, schema="other")
res2 = pdsql.read_table("test_schema_other2")
tm.assert_frame_equal(res1, res2)
def test_copy_from_callable_insertion_method(self):
# GH 8953
# Example in io.rst found under _io.sql.method
# not available in sqlite, mysql
def psql_insert_copy(table, conn, keys, data_iter):
# gets a DBAPI connection that can provide a cursor
dbapi_conn = conn.connection
with dbapi_conn.cursor() as cur:
s_buf = StringIO()
writer = csv.writer(s_buf)
writer.writerows(data_iter)
s_buf.seek(0)
columns = ", ".join(f'"{k}"' for k in keys)
if table.schema:
table_name = f"{table.schema}.{table.name}"
else:
table_name = table.name
sql_query = f"COPY {table_name} ({columns}) FROM STDIN WITH CSV"
cur.copy_expert(sql=sql_query, file=s_buf)
expected = DataFrame({"col1": [1, 2], "col2": [0.1, 0.2], "col3": ["a", "n"]})
expected.to_sql(
"test_copy_insert", self.conn, index=False, method=psql_insert_copy
)
result = sql.read_sql_table("test_copy_insert", self.conn)
tm.assert_frame_equal(result, expected)
@pytest.mark.single
@pytest.mark.db
class TestMySQLAlchemy(_TestMySQLAlchemy, _TestSQLAlchemy):
pass
@pytest.mark.single
@pytest.mark.db
class TestMySQLAlchemyConn(_TestMySQLAlchemy, _TestSQLAlchemyConn):
pass
@pytest.mark.single
@pytest.mark.db
class TestPostgreSQLAlchemy(_TestPostgreSQLAlchemy, _TestSQLAlchemy):
pass
@pytest.mark.single
@pytest.mark.db
class TestPostgreSQLAlchemyConn(_TestPostgreSQLAlchemy, _TestSQLAlchemyConn):
pass
@pytest.mark.single
class TestSQLiteAlchemy(_TestSQLiteAlchemy, _TestSQLAlchemy):
pass
@pytest.mark.single
class TestSQLiteAlchemyConn(_TestSQLiteAlchemy, _TestSQLAlchemyConn):
pass
# -----------------------------------------------------------------------------
# -- Test Sqlite / MySQL fallback
@pytest.mark.single
class TestSQLiteFallback(SQLiteMixIn, PandasSQLTest):
"""
Test the fallback mode against an in-memory sqlite database.
"""
flavor = "sqlite"
@classmethod
def connect(cls):
return sqlite3.connect(":memory:")
def setup_connect(self):
self.conn = self.connect()
def load_test_data_and_sql(self):
self.pandasSQL = sql.SQLiteDatabase(self.conn)
self._load_test1_data()
@pytest.fixture(autouse=True)
def setup_method(self, load_iris_data):
self.load_test_data_and_sql()
def test_read_sql(self):
self._read_sql_iris()
def test_read_sql_parameter(self):
self._read_sql_iris_parameter()
def test_read_sql_named_parameter(self):
self._read_sql_iris_named_parameter()
def test_to_sql(self):
self._to_sql()
def test_to_sql_empty(self):
self._to_sql_empty()
def test_to_sql_fail(self):
self._to_sql_fail()
def test_to_sql_replace(self):
self._to_sql_replace()
def test_to_sql_append(self):
self._to_sql_append()
def test_to_sql_method_multi(self):
# GH 29921
self._to_sql(method="multi")
def test_create_and_drop_table(self):
temp_frame = DataFrame(
{"one": [1.0, 2.0, 3.0, 4.0], "two": [4.0, 3.0, 2.0, 1.0]}
)
self.pandasSQL.to_sql(temp_frame, "drop_test_frame")
assert self.pandasSQL.has_table("drop_test_frame")
self.pandasSQL.drop_table("drop_test_frame")
assert not self.pandasSQL.has_table("drop_test_frame")
def test_roundtrip(self):
self._roundtrip()
def test_execute_sql(self):
self._execute_sql()
def test_datetime_date(self):
# test support for datetime.date
df = DataFrame([date(2014, 1, 1), date(2014, 1, 2)], columns=["a"])
df.to_sql("test_date", self.conn, index=False)
res = read_sql_query("SELECT * FROM test_date", self.conn)
if self.flavor == "sqlite":
# comes back as strings
tm.assert_frame_equal(res, df.astype(str))
elif self.flavor == "mysql":
tm.assert_frame_equal(res, df)
def test_datetime_time(self):
# test support for datetime.time, GH #8341
df = DataFrame([time(9, 0, 0), time(9, 1, 30)], columns=["a"])
df.to_sql("test_time", self.conn, index=False)
res = read_sql_query("SELECT * FROM test_time", self.conn)
if self.flavor == "sqlite":
# comes back as strings
expected = df.applymap(lambda _: _.strftime("%H:%M:%S.%f"))
tm.assert_frame_equal(res, expected)
def _get_index_columns(self, tbl_name):
ixs = sql.read_sql_query(
"SELECT * FROM sqlite_master WHERE type = 'index' "
+ f"AND tbl_name = '{tbl_name}'",
self.conn,
)
ix_cols = []
for ix_name in ixs.name:
ix_info = sql.read_sql_query(f"PRAGMA index_info({ix_name})", self.conn)
ix_cols.append(ix_info.name.tolist())
return ix_cols
def test_to_sql_save_index(self):
self._to_sql_save_index()
def test_transactions(self):
self._transaction_test()
def _get_sqlite_column_type(self, table, column):
recs = self.conn.execute(f"PRAGMA table_info({table})")
for cid, name, ctype, not_null, default, pk in recs:
if name == column:
return ctype
raise ValueError(f"Table {table}, column {column} not found")
def test_dtype(self):
if self.flavor == "mysql":
pytest.skip("Not applicable to MySQL legacy")
cols = ["A", "B"]
data = [(0.8, True), (0.9, None)]
df = DataFrame(data, columns=cols)
df.to_sql("dtype_test", self.conn)
df.to_sql("dtype_test2", self.conn, dtype={"B": "STRING"})
# sqlite stores Boolean values as INTEGER
assert self._get_sqlite_column_type("dtype_test", "B") == "INTEGER"
assert self._get_sqlite_column_type("dtype_test2", "B") == "STRING"
msg = r"B \(<class 'bool'>\) not a string"
with pytest.raises(ValueError, match=msg):
df.to_sql("error", self.conn, dtype={"B": bool})
# single dtype
df.to_sql("single_dtype_test", self.conn, dtype="STRING")
assert self._get_sqlite_column_type("single_dtype_test", "A") == "STRING"
assert self._get_sqlite_column_type("single_dtype_test", "B") == "STRING"
def test_notna_dtype(self):
if self.flavor == "mysql":
pytest.skip("Not applicable to MySQL legacy")
cols = {
"Bool": Series([True, None]),
"Date": Series([datetime(2012, 5, 1), None]),
"Int": Series([1, None], dtype="object"),
"Float": Series([1.1, None]),
}
df = DataFrame(cols)
tbl = "notna_dtype_test"
df.to_sql(tbl, self.conn)
assert self._get_sqlite_column_type(tbl, "Bool") == "INTEGER"
assert self._get_sqlite_column_type(tbl, "Date") == "TIMESTAMP"
assert self._get_sqlite_column_type(tbl, "Int") == "INTEGER"
assert self._get_sqlite_column_type(tbl, "Float") == "REAL"
def test_illegal_names(self):
# For sqlite, these should work fine
df = DataFrame([[1, 2], [3, 4]], columns=["a", "b"])
msg = "Empty table or column name specified"
with pytest.raises(ValueError, match=msg):
df.to_sql("", self.conn)
for ndx, weird_name in enumerate(
[
"test_weird_name]",
"test_weird_name[",
"test_weird_name`",
'test_weird_name"',
"test_weird_name'",
"_b.test_weird_name_01-30",
'"_b.test_weird_name_01-30"',
"99beginswithnumber",
"12345",
"\xe9",
]
):
df.to_sql(weird_name, self.conn)
sql.table_exists(weird_name, self.conn)
df2 = DataFrame([[1, 2], [3, 4]], columns=["a", weird_name])
c_tbl = f"test_weird_col_name{ndx:d}"
df2.to_sql(c_tbl, self.conn)
sql.table_exists(c_tbl, self.conn)
# -----------------------------------------------------------------------------
# -- Old tests from 0.13.1 (before refactor using sqlalchemy)
def date_format(dt):
"""Returns date in YYYYMMDD format."""
return dt.strftime("%Y%m%d")
_formatters = {
datetime: "'{}'".format,
str: "'{}'".format,
np.str_: "'{}'".format,
bytes: "'{}'".format,
float: "{:.8f}".format,
int: "{:d}".format,
type(None): lambda x: "NULL",
np.float64: "{:.10f}".format,
bool: "'{!s}'".format,
}
def format_query(sql, *args):
processed_args = []
for arg in args:
if isinstance(arg, float) and isna(arg):
arg = None
formatter = _formatters[type(arg)]
processed_args.append(formatter(arg))
return sql % tuple(processed_args)
def tquery(query, con=None, cur=None):
"""Replace removed sql.tquery function"""
res = sql.execute(query, con=con, cur=cur).fetchall()
if res is None:
return None
else:
return list(res)
@pytest.mark.single
class TestXSQLite(SQLiteMixIn):
@pytest.fixture(autouse=True)
def setup_method(self, request, datapath):
self.method = request.function
self.conn = sqlite3.connect(":memory:")
# In some test cases we may close db connection
# Re-open conn here so we can perform cleanup in teardown
yield
self.method = request.function
self.conn = sqlite3.connect(":memory:")
def test_basic(self):
frame = tm.makeTimeDataFrame()
self._check_roundtrip(frame)
def test_write_row_by_row(self):
frame = tm.makeTimeDataFrame()
frame.iloc[0, 0] = np.nan
create_sql = sql.get_schema(frame, "test")
cur = self.conn.cursor()
cur.execute(create_sql)
cur = self.conn.cursor()
ins = "INSERT INTO test VALUES (%s, %s, %s, %s)"
for idx, row in frame.iterrows():
fmt_sql = format_query(ins, *row)
tquery(fmt_sql, cur=cur)
self.conn.commit()
result = sql.read_sql("select * from test", con=self.conn)
result.index = frame.index
tm.assert_frame_equal(result, frame, rtol=1e-3)
def test_execute(self):
frame = tm.makeTimeDataFrame()
create_sql = sql.get_schema(frame, "test")
cur = self.conn.cursor()
cur.execute(create_sql)
ins = "INSERT INTO test VALUES (?, ?, ?, ?)"
row = frame.iloc[0]
sql.execute(ins, self.conn, params=tuple(row))
self.conn.commit()
result = sql.read_sql("select * from test", self.conn)
result.index = frame.index[:1]
tm.assert_frame_equal(result, frame[:1])
def test_schema(self):
frame = tm.makeTimeDataFrame()
create_sql = sql.get_schema(frame, "test")
lines = create_sql.splitlines()
for l in lines:
tokens = l.split(" ")
if len(tokens) == 2 and tokens[0] == "A":
assert tokens[1] == "DATETIME"
frame = tm.makeTimeDataFrame()
create_sql = sql.get_schema(frame, "test", keys=["A", "B"])
lines = create_sql.splitlines()
assert 'PRIMARY KEY ("A", "B")' in create_sql
cur = self.conn.cursor()
cur.execute(create_sql)
def test_execute_fail(self):
create_sql = """
CREATE TABLE test
(
a TEXT,
b TEXT,
c REAL,
PRIMARY KEY (a, b)
);
"""
cur = self.conn.cursor()
cur.execute(create_sql)
sql.execute('INSERT INTO test VALUES("foo", "bar", 1.234)', self.conn)
sql.execute('INSERT INTO test VALUES("foo", "baz", 2.567)', self.conn)
with pytest.raises(Exception):
sql.execute('INSERT INTO test VALUES("foo", "bar", 7)', self.conn)
def test_execute_closed_connection(self):
create_sql = """
CREATE TABLE test
(
a TEXT,
b TEXT,
c REAL,
PRIMARY KEY (a, b)
);
"""
cur = self.conn.cursor()
cur.execute(create_sql)
sql.execute('INSERT INTO test VALUES("foo", "bar", 1.234)', self.conn)
self.conn.close()
with pytest.raises(Exception):
tquery("select * from test", con=self.conn)
def test_na_roundtrip(self):
pass
def _check_roundtrip(self, frame):
sql.to_sql(frame, name="test_table", con=self.conn, index=False)
result = sql.read_sql("select * from test_table", self.conn)
# HACK! Change this once indexes are handled properly.
result.index = frame.index
expected = frame
tm.assert_frame_equal(result, expected)
frame["txt"] = ["a"] * len(frame)
frame2 = frame.copy()
new_idx = Index(np.arange(len(frame2))) + 10
frame2["Idx"] = new_idx.copy()
sql.to_sql(frame2, name="test_table2", con=self.conn, index=False)
result = sql.read_sql("select * from test_table2", self.conn, index_col="Idx")
expected = frame.copy()
expected.index = new_idx
expected.index.name = "Idx"
tm.assert_frame_equal(expected, result)
def test_keyword_as_column_names(self):
df = DataFrame({"From": np.ones(5)})
sql.to_sql(df, con=self.conn, name="testkeywords", index=False)
def test_onecolumn_of_integer(self):
# GH 3628
# a column_of_integers dataframe should transfer well to sql
mono_df = DataFrame([1, 2], columns=["c0"])
sql.to_sql(mono_df, con=self.conn, name="mono_df", index=False)
# computing the sum via sql
con_x = self.conn
the_sum = sum(my_c0[0] for my_c0 in con_x.execute("select * from mono_df"))
# it should not fail, and gives 3 ( Issue #3628 )
assert the_sum == 3
result = sql.read_sql("select * from mono_df", con_x)
tm.assert_frame_equal(result, mono_df)
def test_if_exists(self):
df_if_exists_1 = DataFrame({"col1": [1, 2], "col2": ["A", "B"]})
df_if_exists_2 = DataFrame({"col1": [3, 4, 5], "col2": ["C", "D", "E"]})
table_name = "table_if_exists"
sql_select = f"SELECT * FROM {table_name}"
def clean_up(test_table_to_drop):
"""
Drops tables created from individual tests
so no dependencies arise from sequential tests
"""
self.drop_table(test_table_to_drop)
msg = "'notvalidvalue' is not valid for if_exists"
with pytest.raises(ValueError, match=msg):
sql.to_sql(
frame=df_if_exists_1,
con=self.conn,
name=table_name,
if_exists="notvalidvalue",
)
clean_up(table_name)
# test if_exists='fail'
sql.to_sql(
frame=df_if_exists_1, con=self.conn, name=table_name, if_exists="fail"
)
msg = "Table 'table_if_exists' already exists"
with pytest.raises(ValueError, match=msg):
sql.to_sql(
frame=df_if_exists_1, con=self.conn, name=table_name, if_exists="fail"
)
# test if_exists='replace'
sql.to_sql(
frame=df_if_exists_1,
con=self.conn,
name=table_name,
if_exists="replace",
index=False,
)
assert tquery(sql_select, con=self.conn) == [(1, "A"), (2, "B")]
sql.to_sql(
frame=df_if_exists_2,
con=self.conn,
name=table_name,
if_exists="replace",
index=False,
)
assert tquery(sql_select, con=self.conn) == [(3, "C"), (4, "D"), (5, "E")]
clean_up(table_name)
# test if_exists='append'
sql.to_sql(
frame=df_if_exists_1,
con=self.conn,
name=table_name,
if_exists="fail",
index=False,
)
assert tquery(sql_select, con=self.conn) == [(1, "A"), (2, "B")]
sql.to_sql(
frame=df_if_exists_2,
con=self.conn,
name=table_name,
if_exists="append",
index=False,
)
assert tquery(sql_select, con=self.conn) == [
(1, "A"),
(2, "B"),
(3, "C"),
(4, "D"),
(5, "E"),
]
clean_up(table_name)
@pytest.mark.single
@pytest.mark.db
@pytest.mark.skip(
reason="gh-13611: there is no support for MySQL if SQLAlchemy is not installed"
)
class TestXMySQL(MySQLMixIn):
@pytest.fixture(autouse=True, scope="class")
def setup_class(cls):
pymysql = pytest.importorskip("pymysql")
pymysql.connect(host="localhost", user="root", passwd="", db="pandas_nosetest")
try:
pymysql.connect(read_default_group="pandas")
except pymysql.ProgrammingError as err:
raise RuntimeError(
"Create a group of connection parameters under the heading "
"[pandas] in your system's mysql default file, "
"typically located at ~/.my.cnf or /etc/.my.cnf."
) from err
except pymysql.Error as err:
raise RuntimeError(
"Cannot connect to database. "
"Create a group of connection parameters under the heading "
"[pandas] in your system's mysql default file, "
"typically located at ~/.my.cnf or /etc/.my.cnf."
) from err
@pytest.fixture(autouse=True)
def setup_method(self, request, datapath):
pymysql = pytest.importorskip("pymysql")
pymysql.connect(host="localhost", user="root", passwd="", db="pandas_nosetest")
try:
pymysql.connect(read_default_group="pandas")
except pymysql.ProgrammingError as err:
raise RuntimeError(
"Create a group of connection parameters under the heading "
"[pandas] in your system's mysql default file, "
"typically located at ~/.my.cnf or /etc/.my.cnf."
) from err
except pymysql.Error as err:
raise RuntimeError(
"Cannot connect to database. "
"Create a group of connection parameters under the heading "
"[pandas] in your system's mysql default file, "
"typically located at ~/.my.cnf or /etc/.my.cnf."
) from err
self.method = request.function
def test_basic(self):
frame = tm.makeTimeDataFrame()
self._check_roundtrip(frame)
def test_write_row_by_row(self):
frame = tm.makeTimeDataFrame()
frame.iloc[0, 0] = np.nan
drop_sql = "DROP TABLE IF EXISTS test"
create_sql = sql.get_schema(frame, "test")
cur = self.conn.cursor()
cur.execute(drop_sql)
cur.execute(create_sql)
ins = "INSERT INTO test VALUES (%s, %s, %s, %s)"
for idx, row in frame.iterrows():
fmt_sql = format_query(ins, *row)
tquery(fmt_sql, cur=cur)
self.conn.commit()
result = sql.read_sql("select * from test", con=self.conn)
result.index = frame.index
tm.assert_frame_equal(result, frame, rtol=1e-3)
# GH#32571 result comes back rounded to 6 digits in some builds;
# no obvious pattern
def test_chunksize_read_type(self):
frame = tm.makeTimeDataFrame()
frame.index.name = "index"
drop_sql = "DROP TABLE IF EXISTS test"
cur = self.conn.cursor()
cur.execute(drop_sql)
sql.to_sql(frame, name="test", con=self.conn)
query = "select * from test"
chunksize = 5
chunk_gen = pd.read_sql_query(
sql=query, con=self.conn, chunksize=chunksize, index_col="index"
)
chunk_df = next(chunk_gen)
tm.assert_frame_equal(frame[:chunksize], chunk_df)
def test_execute(self):
frame = tm.makeTimeDataFrame()
drop_sql = "DROP TABLE IF EXISTS test"
create_sql = sql.get_schema(frame, "test")
cur = self.conn.cursor()
with warnings.catch_warnings():
warnings.filterwarnings("ignore", "Unknown table.*")
cur.execute(drop_sql)
cur.execute(create_sql)
ins = "INSERT INTO test VALUES (%s, %s, %s, %s)"
row = frame.iloc[0].values.tolist()
sql.execute(ins, self.conn, params=tuple(row))
self.conn.commit()
result = sql.read_sql("select * from test", self.conn)
result.index = frame.index[:1]
tm.assert_frame_equal(result, frame[:1])
def test_schema(self):
frame = tm.makeTimeDataFrame()
create_sql = sql.get_schema(frame, "test")
lines = create_sql.splitlines()
for l in lines:
tokens = l.split(" ")
if len(tokens) == 2 and tokens[0] == "A":
assert tokens[1] == "DATETIME"
frame = tm.makeTimeDataFrame()
drop_sql = "DROP TABLE IF EXISTS test"
create_sql = sql.get_schema(frame, "test", keys=["A", "B"])
lines = create_sql.splitlines()
assert "PRIMARY KEY (`A`, `B`)" in create_sql
cur = self.conn.cursor()
cur.execute(drop_sql)
cur.execute(create_sql)
def test_execute_fail(self):
drop_sql = "DROP TABLE IF EXISTS test"
create_sql = """
CREATE TABLE test
(
a TEXT,
b TEXT,
c REAL,
PRIMARY KEY (a(5), b(5))
);
"""
cur = self.conn.cursor()
cur.execute(drop_sql)
cur.execute(create_sql)
sql.execute('INSERT INTO test VALUES("foo", "bar", 1.234)', self.conn)
sql.execute('INSERT INTO test VALUES("foo", "baz", 2.567)', self.conn)
with pytest.raises(Exception):
sql.execute('INSERT INTO test VALUES("foo", "bar", 7)', self.conn)
def test_execute_closed_connection(self, request, datapath):
drop_sql = "DROP TABLE IF EXISTS test"
create_sql = """
CREATE TABLE test
(
a TEXT,
b TEXT,
c REAL,
PRIMARY KEY (a(5), b(5))
);
"""
cur = self.conn.cursor()
cur.execute(drop_sql)
cur.execute(create_sql)
sql.execute('INSERT INTO test VALUES("foo", "bar", 1.234)', self.conn)
self.conn.close()
with pytest.raises(Exception):
tquery("select * from test", con=self.conn)
# Initialize connection again (needed for tearDown)
self.setup_method(request, datapath)
def test_na_roundtrip(self):
pass
def _check_roundtrip(self, frame):
drop_sql = "DROP TABLE IF EXISTS test_table"
cur = self.conn.cursor()
with warnings.catch_warnings():
warnings.filterwarnings("ignore", "Unknown table.*")
cur.execute(drop_sql)
sql.to_sql(frame, name="test_table", con=self.conn, index=False)
result = sql.read_sql("select * from test_table", self.conn)
# HACK! Change this once indexes are handled properly.
result.index = frame.index
result.index.name = frame.index.name
expected = frame
tm.assert_frame_equal(result, expected)
frame["txt"] = ["a"] * len(frame)
frame2 = frame.copy()
index = Index(np.arange(len(frame2))) + 10
frame2["Idx"] = index
drop_sql = "DROP TABLE IF EXISTS test_table2"
cur = self.conn.cursor()
with warnings.catch_warnings():
warnings.filterwarnings("ignore", "Unknown table.*")
cur.execute(drop_sql)
sql.to_sql(frame2, name="test_table2", con=self.conn, index=False)
result = sql.read_sql("select * from test_table2", self.conn, index_col="Idx")
expected = frame.copy()
# HACK! Change this once indexes are handled properly.
expected.index = index
expected.index.names = result.index.names
tm.assert_frame_equal(expected, result)
def test_keyword_as_column_names(self):
df = DataFrame({"From": np.ones(5)})
sql.to_sql(
df, con=self.conn, name="testkeywords", if_exists="replace", index=False
)
def test_if_exists(self):
df_if_exists_1 = DataFrame({"col1": [1, 2], "col2": ["A", "B"]})
df_if_exists_2 = DataFrame({"col1": [3, 4, 5], "col2": ["C", "D", "E"]})
table_name = "table_if_exists"
sql_select = f"SELECT * FROM {table_name}"
def clean_up(test_table_to_drop):
"""
Drops tables created from individual tests
so no dependencies arise from sequential tests
"""
self.drop_table(test_table_to_drop)
# test if invalid value for if_exists raises appropriate error
with pytest.raises(ValueError, match="<insert message here>"):
sql.to_sql(
frame=df_if_exists_1,
con=self.conn,
name=table_name,
if_exists="notvalidvalue",
)
clean_up(table_name)
# test if_exists='fail'
sql.to_sql(
frame=df_if_exists_1,
con=self.conn,
name=table_name,
if_exists="fail",
index=False,
)
with pytest.raises(ValueError, match="<insert message here>"):
sql.to_sql(
frame=df_if_exists_1, con=self.conn, name=table_name, if_exists="fail"
)
# test if_exists='replace'
sql.to_sql(
frame=df_if_exists_1,
con=self.conn,
name=table_name,
if_exists="replace",
index=False,
)
assert tquery(sql_select, con=self.conn) == [(1, "A"), (2, "B")]
sql.to_sql(
frame=df_if_exists_2,
con=self.conn,
name=table_name,
if_exists="replace",
index=False,
)
assert tquery(sql_select, con=self.conn) == [(3, "C"), (4, "D"), (5, "E")]
clean_up(table_name)
# test if_exists='append'
sql.to_sql(
frame=df_if_exists_1,
con=self.conn,
name=table_name,
if_exists="fail",
index=False,
)
assert tquery(sql_select, con=self.conn) == [(1, "A"), (2, "B")]
sql.to_sql(
frame=df_if_exists_2,
con=self.conn,
name=table_name,
if_exists="append",
index=False,
)
assert tquery(sql_select, con=self.conn) == [
(1, "A"),
(2, "B"),
(3, "C"),
(4, "D"),
(5, "E"),
]
clean_up(table_name)
| 34.854671 | 88 | 0.588295 |
dfc9da0ba5f29a8cb0629ebebfbf5121f85b5e8c | 5,645 | py | Python | pyci/tests/shell/__init__.py | iliapolo/pyrelease | 85784c556a0760d560378ef6edcfb32ab87048a5 | [
"Apache-2.0"
] | 5 | 2018-05-03T15:20:12.000Z | 2019-12-13T20:19:47.000Z | pyci/tests/shell/__init__.py | iliapolo/pyci | 85784c556a0760d560378ef6edcfb32ab87048a5 | [
"Apache-2.0"
] | 54 | 2018-04-09T06:34:50.000Z | 2020-03-30T06:13:39.000Z | pyci/tests/shell/__init__.py | iliapolo/pyrelease | 85784c556a0760d560378ef6edcfb32ab87048a5 | [
"Apache-2.0"
] | null | null | null | #############################################################################
# Copyright (c) 2018 Eli Polonsky. All rights reserved
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# * See the License for the specific language governing permissions and
# * limitations under the License.
#
#############################################################################
import logging
import os
import platform
import re
from boltons.cacheutils import cachedproperty
from click.testing import CliRunner
from pyci.tests import utils as test_utils
from pyci.api import exceptions
from pyci.api import logger
from pyci.api.package.packager import Packager
from pyci.api.runner import CommandExecutionResponse
from pyci.api.runner import LocalCommandRunner
from pyci.shell.main import app
CLICK_ISOLATION = '__CLICK_ISOLATION'
# pylint: disable=too-few-public-methods
class PyCI(object):
def __init__(self, repo_path, log=None):
self.repo_path = repo_path
self.version = test_utils.patch_setup_py(self.repo_path)
self._logger = log or logger.Logger(__name__)
self._click_runner = CliRunner()
self._local_runner = LocalCommandRunner()
self._packager = Packager.create(path=repo_path, target_dir=repo_path)
def run(self, command, binary=False, catch_exceptions=False):
if self._logger.isEnabledFor(logging.DEBUG):
command = '--debug {}'.format(command)
if binary:
response = self._run_binary(command=command)
else:
try:
os.environ[CLICK_ISOLATION] = 'True'
response = self._run_source(command=command)
finally:
del os.environ[CLICK_ISOLATION]
if response.return_code != 0 and not catch_exceptions:
raise exceptions.CommandExecutionException(command=command,
error=response.std_err,
output=response.std_out,
code=response.return_code)
return response
def _run_source(self, command):
args = split(command)
self._logger.info('Invoking command: {} [cwd={}]'.format(command, os.getcwd()))
result = self._click_runner.invoke(app, args, catch_exceptions=True)
exception = result.exception
return CommandExecutionResponse(command=command,
std_out=result.output,
std_err=str(exception) if exception else None,
return_code=result.exit_code)
def _run_binary(self, command):
command = '{} {}'.format(self.binary_path, command)
self._logger.info('Invoking command: {}. [cwd={}]'.format(command, os.getcwd()))
return self._local_runner.run(command, exit_on_failure=False, execution_env={
'PYCI_INTERACTIVE': 'False'
})
@cachedproperty
def binary_path(self):
# pylint: disable=cyclic-import
from pyci.tests import conftest
package_path = os.environ.get('PYCI_BINARY_PATH', None)
if not package_path:
self._logger.info('Creating binary package... [cwd={}]'.format(os.getcwd()))
package_path = self._packager.binary(entrypoint=conftest.SPEC_FILE)
self._logger.info('Created binary package: {} [cwd={}]'.format(package_path, os.getcwd()))
return package_path
@cachedproperty
def wheel_path(self):
self._logger.info('Creating wheel package... [cwd={}]'.format(os.getcwd()))
package_path = self._packager.wheel()
self._logger.info('Created wheel package: {} [cwd={}]'.format(package_path, os.getcwd()))
return package_path
# take from https://stackoverflow.com/questions/33560364/python-windows-parsing-command-
# lines-with-shlex?utm_medium=organic&utm_source=google_rich_qa&utm_campaign=google_rich_qa
# pylint: disable=too-many-branches
def split(command):
if platform.system().lower() != 'windows':
re_cmd_lex = r'"((?:\\["\\]|[^"])*)"|' \
r"'([^']*)'|(\\.)|(&&?|\|\|?|\d?\>|[<])|([^\s'" \
r'"\\&|<>]+)|(\s+)|(.)'
else:
re_cmd_lex = r'"((?:""|\\["\\]|[^"])*)"?()|(\\\\(?=\\*")|\\")|(&&?|\|\|' \
r'?|\d?>|[<])|([^\s"&|<>]+)|(\s+)|(.)'
args = []
accu = None
for qs, qss, esc, pipe, word, white, fail in re.findall(re_cmd_lex, command):
if word:
pass
elif esc:
word = esc[1]
elif white or pipe:
if accu is not None:
args.append(accu)
if pipe:
args.append(pipe)
accu = None
continue
elif fail:
raise ValueError("invalid or incomplete shell string")
elif qs:
word = qs.replace('\\"', '"').replace('\\\\', '\\')
if platform == 0:
word = word.replace('""', '"')
else:
word = qss # may be even empty; must be last
accu = (accu or '') + word
if accu is not None:
args.append(accu)
return args
| 34.631902 | 102 | 0.575376 |
5c10649b9f8f7e2d6dbced56344dcc5e24259ec9 | 594 | py | Python | testlib/__init__.py | MSanKeys963/gramex | 8ac5fd6e79d100982fdc9e9308d9a6250ce021e2 | [
"MIT"
] | 130 | 2019-02-05T06:09:16.000Z | 2022-03-31T03:09:00.000Z | testlib/__init__.py | MSanKeys963/gramex | 8ac5fd6e79d100982fdc9e9308d9a6250ce021e2 | [
"MIT"
] | 457 | 2019-02-05T05:08:34.000Z | 2022-03-30T03:47:45.000Z | testlib/__init__.py | MSanKeys963/gramex | 8ac5fd6e79d100982fdc9e9308d9a6250ce021e2 | [
"MIT"
] | 50 | 2019-02-12T12:20:02.000Z | 2022-03-15T02:50:02.000Z | import os
import sys
import logging
# Import from ../tests/ folder. e.g. dbutils.py for use in test_data.py, etc.
# This is a not elegant.
folder = os.path.dirname(os.path.abspath(__file__))
tests_dir = os.path.normpath(os.path.join(folder, '..', 'tests'))
sys.path.append(tests_dir)
import dbutils # noqa
from tests import remove_if_possible # noqa
# Location of the sales data file
sales_file = os.path.join(tests_dir, 'sales.xlsx')
# Turn off matplotlib's verbose debug logging
mpl_logger = logging.getLogger('matplotlib')
mpl_logger.setLevel(logging.WARNING)
| 31.263158 | 77 | 0.723906 |
2b58398450175e908b772eb8c61f137be3d45dd8 | 2,303 | py | Python | napalm_arubaoss/helper/__init__.py | derpedda/napalm-arubaos-switch | 7a705e995b48a9a1815949ff730787618d774ae5 | [
"MIT"
] | 13 | 2020-02-17T16:55:23.000Z | 2022-01-07T21:56:38.000Z | napalm_arubaoss/helper/__init__.py | derpedda/napalm-arubaos-switch | 7a705e995b48a9a1815949ff730787618d774ae5 | [
"MIT"
] | 7 | 2020-01-08T16:41:28.000Z | 2021-12-20T16:45:43.000Z | napalm_arubaoss/helper/__init__.py | derpedda/napalm-arubaos-switch | 7a705e995b48a9a1815949ff730787618d774ae5 | [
"MIT"
] | 6 | 2020-04-25T09:21:02.000Z | 2022-01-03T10:32:48.000Z | """Import all functions of this directory."""
from napalm_arubaoss.helper.base import Connection
from napalm_arubaoss.helper.commit_config import commit_config
from napalm_arubaoss.helper.compare_config import compare_config
from napalm_arubaoss.helper.confirm_commit import confirm_commit
from napalm_arubaoss.helper.get_arp_table import get_arp_table
from napalm_arubaoss.helper.get_config import get_config
from napalm_arubaoss.helper.get_facts import get_facts
from napalm_arubaoss.helper.get_interfaces import get_interfaces
from napalm_arubaoss.helper.get_interfaces_ip import get_interfaces_ip
from napalm_arubaoss.helper.get_lldp_neighbors import get_lldp_neighbors
from napalm_arubaoss.helper.get_lldp_neighbors_detail import get_lldp_neighbors_detail
from napalm_arubaoss.helper.get_mac_address_table import get_mac_address_table
from napalm_arubaoss.helper.get_ntp_servers import get_ntp_servers
from napalm_arubaoss.helper.get_ntp_stats import get_ntp_stats
from napalm_arubaoss.helper.get_route_to import get_route_to
from napalm_arubaoss.helper.has_pending_commit import has_pending_commit
from napalm_arubaoss.helper.is_alive import is_alive
from napalm_arubaoss.helper.load_merge_candidate import load_merge_candidate
from napalm_arubaoss.helper.load_replace_candidate import load_replace_candidate
from napalm_arubaoss.helper.ping import ping
from napalm_arubaoss.helper.rollback import rollback
from napalm_arubaoss.helper.traceroute import traceroute
from napalm_arubaoss.helper.utils import (
backup_config,
mac_reformat,
commit_candidate,
config_batch,
read_candidate,
str_to_b64,
transaction_status,
)
__all__ = (
"Connection",
"backup_config",
"commit_candidate",
"commit_config",
"compare_config",
"config_batch",
"confirm_commit",
"get_mac_address_table",
"get_facts",
"get_arp_table",
"get_config",
"get_interfaces",
"get_interfaces_ip",
"get_lldp_neighbors",
"get_lldp_neighbors_detail",
"get_ntp_stats",
"get_ntp_servers",
"get_route_to",
"has_pending_commit",
"is_alive",
"load_merge_candidate",
"load_replace_candidate",
"mac_reformat",
"ping",
"read_candidate",
"rollback",
"str_to_b64",
"traceroute",
"transaction_status",
)
| 34.893939 | 86 | 0.810248 |
8c5461acac6e7b0aa0676bfebcdd52d98a71ea85 | 9,825 | py | Python | tests/providers/google/cloud/hooks/test_automl.py | shashijangra/airflow-1 | c3e340584bf1892c4f73aa9e7495b5823dab0c40 | [
"Apache-2.0"
] | 2 | 2021-07-30T17:25:56.000Z | 2021-08-03T13:51:09.000Z | tests/providers/google/cloud/hooks/test_automl.py | shashijangra/airflow-1 | c3e340584bf1892c4f73aa9e7495b5823dab0c40 | [
"Apache-2.0"
] | 14 | 2019-12-03T02:54:42.000Z | 2020-02-27T16:08:10.000Z | tests/providers/google/cloud/hooks/test_automl.py | shashijangra/airflow-1 | c3e340584bf1892c4f73aa9e7495b5823dab0c40 | [
"Apache-2.0"
] | 1 | 2021-07-02T04:23:18.000Z | 2021-07-02T04:23:18.000Z | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
import unittest
import mock
from google.cloud.automl_v1beta1 import AutoMlClient, PredictionServiceClient
from airflow.providers.google.cloud.hooks.automl import CloudAutoMLHook
from tests.providers.google.cloud.utils.base_gcp_mock import mock_base_gcp_hook_no_default_project_id
CREDENTIALS = "test-creds"
CLIENT_INFO = "client-info"
TASK_ID = "test-automl-hook"
GCP_PROJECT_ID = "test-project"
GCP_LOCATION = "test-location"
MODEL_NAME = "test_model"
MODEL_ID = "projects/198907790164/locations/us-central1/models/TBL9195602771183665152"
DATASET_ID = "TBL123456789"
MODEL = {
"display_name": MODEL_NAME,
"dataset_id": DATASET_ID,
"tables_model_metadata": {"train_budget_milli_node_hours": 1000},
}
LOCATION_PATH = AutoMlClient.location_path(GCP_PROJECT_ID, GCP_LOCATION)
MODEL_PATH = PredictionServiceClient.model_path(GCP_PROJECT_ID, GCP_LOCATION, MODEL_ID)
DATASET_PATH = AutoMlClient.dataset_path(GCP_PROJECT_ID, GCP_LOCATION, DATASET_ID)
INPUT_CONFIG = {"input": "value"}
OUTPUT_CONFIG = {"output": "value"}
PAYLOAD = {"test": "payload"}
DATASET = {"dataset_id": "data"}
MASK = {"field": "mask"}
class TestAuoMLHook(unittest.TestCase):
def setUp(self) -> None:
with mock.patch(
"airflow.providers.google.cloud.hooks.automl.GoogleBaseHook.__init__",
new=mock_base_gcp_hook_no_default_project_id,
):
self.hook = CloudAutoMLHook()
self.hook._get_credentials = mock.MagicMock(return_value=CREDENTIALS) # type: ignore
@mock.patch(
"airflow.providers.google.cloud.hooks.automl.GoogleBaseHook.client_info",
new_callable=lambda: CLIENT_INFO,
)
@mock.patch("airflow.providers.google.cloud.hooks.automl.AutoMlClient")
def test_get_conn(self, mock_automl_client, mock_client_info):
self.hook.get_conn()
mock_automl_client.assert_called_once_with(credentials=CREDENTIALS, client_info=CLIENT_INFO)
@mock.patch(
"airflow.providers.google.cloud.hooks.automl.GoogleBaseHook.client_info",
new_callable=lambda: CLIENT_INFO,
)
@mock.patch("airflow.providers.google.cloud.hooks.automl.PredictionServiceClient")
def test_prediction_client(self, mock_prediction_client, mock_client_info):
client = self.hook.prediction_client # pylint: disable=unused-variable # noqa
mock_prediction_client.assert_called_once_with(credentials=CREDENTIALS, client_info=CLIENT_INFO)
@mock.patch("airflow.providers.google.cloud.hooks.automl.AutoMlClient.create_model")
def test_create_model(self, mock_create_model):
self.hook.create_model(model=MODEL, location=GCP_LOCATION, project_id=GCP_PROJECT_ID)
mock_create_model.assert_called_once_with(
parent=LOCATION_PATH, model=MODEL, retry=None, timeout=None, metadata=None
)
@mock.patch("airflow.providers.google.cloud.hooks.automl.PredictionServiceClient.batch_predict")
def test_batch_predict(self, mock_batch_predict):
self.hook.batch_predict(
model_id=MODEL_ID,
location=GCP_LOCATION,
project_id=GCP_PROJECT_ID,
input_config=INPUT_CONFIG,
output_config=OUTPUT_CONFIG,
)
mock_batch_predict.assert_called_once_with(
name=MODEL_PATH,
input_config=INPUT_CONFIG,
output_config=OUTPUT_CONFIG,
params=None,
retry=None,
timeout=None,
metadata=None,
)
@mock.patch("airflow.providers.google.cloud.hooks.automl.PredictionServiceClient.predict")
def test_predict(self, mock_predict):
self.hook.predict(
model_id=MODEL_ID,
location=GCP_LOCATION,
project_id=GCP_PROJECT_ID,
payload=PAYLOAD,
)
mock_predict.assert_called_once_with(
name=MODEL_PATH,
payload=PAYLOAD,
params=None,
retry=None,
timeout=None,
metadata=None,
)
@mock.patch("airflow.providers.google.cloud.hooks.automl.AutoMlClient.create_dataset")
def test_create_dataset(self, mock_create_dataset):
self.hook.create_dataset(dataset=DATASET, location=GCP_LOCATION, project_id=GCP_PROJECT_ID)
mock_create_dataset.assert_called_once_with(
parent=LOCATION_PATH,
dataset=DATASET,
retry=None,
timeout=None,
metadata=None,
)
@mock.patch("airflow.providers.google.cloud.hooks.automl.AutoMlClient.import_data")
def test_import_dataset(self, mock_import_data):
self.hook.import_data(
dataset_id=DATASET_ID,
location=GCP_LOCATION,
project_id=GCP_PROJECT_ID,
input_config=INPUT_CONFIG,
)
mock_import_data.assert_called_once_with(
name=DATASET_PATH,
input_config=INPUT_CONFIG,
retry=None,
timeout=None,
metadata=None,
)
@mock.patch("airflow.providers.google.cloud.hooks.automl.AutoMlClient.list_column_specs")
def test_list_column_specs(self, mock_list_column_specs):
table_spec = "table_spec_id"
filter_ = "filter"
page_size = 42
self.hook.list_column_specs(
dataset_id=DATASET_ID,
table_spec_id=table_spec,
location=GCP_LOCATION,
project_id=GCP_PROJECT_ID,
field_mask=MASK,
filter_=filter_,
page_size=page_size,
)
parent = AutoMlClient.table_spec_path(GCP_PROJECT_ID, GCP_LOCATION, DATASET_ID, table_spec)
mock_list_column_specs.assert_called_once_with(
parent=parent,
field_mask=MASK,
filter_=filter_,
page_size=page_size,
retry=None,
timeout=None,
metadata=None,
)
@mock.patch("airflow.providers.google.cloud.hooks.automl.AutoMlClient.get_model")
def test_get_model(self, mock_get_model):
self.hook.get_model(model_id=MODEL_ID, location=GCP_LOCATION, project_id=GCP_PROJECT_ID)
mock_get_model.assert_called_once_with(name=MODEL_PATH, retry=None, timeout=None, metadata=None)
@mock.patch("airflow.providers.google.cloud.hooks.automl.AutoMlClient.delete_model")
def test_delete_model(self, mock_delete_model):
self.hook.delete_model(model_id=MODEL_ID, location=GCP_LOCATION, project_id=GCP_PROJECT_ID)
mock_delete_model.assert_called_once_with(name=MODEL_PATH, retry=None, timeout=None, metadata=None)
@mock.patch("airflow.providers.google.cloud.hooks.automl.AutoMlClient.update_dataset")
def test_update_dataset(self, mock_update_dataset):
self.hook.update_dataset(
dataset=DATASET,
update_mask=MASK,
)
mock_update_dataset.assert_called_once_with(
dataset=DATASET, update_mask=MASK, retry=None, timeout=None, metadata=None
)
@mock.patch("airflow.providers.google.cloud.hooks.automl.AutoMlClient.deploy_model")
def test_deploy_model(self, mock_deploy_model):
image_detection_metadata = {}
self.hook.deploy_model(
model_id=MODEL_ID,
image_detection_metadata=image_detection_metadata,
location=GCP_LOCATION,
project_id=GCP_PROJECT_ID,
)
mock_deploy_model.assert_called_once_with(
name=MODEL_PATH,
retry=None,
timeout=None,
metadata=None,
image_object_detection_model_deployment_metadata=image_detection_metadata,
)
@mock.patch("airflow.providers.google.cloud.hooks.automl.AutoMlClient.list_table_specs")
def test_list_table_specs(self, mock_list_table_specs):
filter_ = "filter"
page_size = 42
self.hook.list_table_specs(
dataset_id=DATASET_ID,
location=GCP_LOCATION,
project_id=GCP_PROJECT_ID,
filter_=filter_,
page_size=page_size,
)
mock_list_table_specs.assert_called_once_with(
parent=DATASET_PATH,
filter_=filter_,
page_size=page_size,
retry=None,
timeout=None,
metadata=None,
)
@mock.patch("airflow.providers.google.cloud.hooks.automl.AutoMlClient.list_datasets")
def test_list_datasets(self, mock_list_datasets):
self.hook.list_datasets(location=GCP_LOCATION, project_id=GCP_PROJECT_ID)
mock_list_datasets.assert_called_once_with(
parent=LOCATION_PATH, retry=None, timeout=None, metadata=None
)
@mock.patch("airflow.providers.google.cloud.hooks.automl.AutoMlClient.delete_dataset")
def test_delete_dataset(self, mock_delete_dataset):
self.hook.delete_dataset(dataset_id=DATASET_ID, location=GCP_LOCATION, project_id=GCP_PROJECT_ID)
mock_delete_dataset.assert_called_once_with(
name=DATASET_PATH, retry=None, timeout=None, metadata=None
)
| 37.788462 | 107 | 0.695064 |
ee74ab47d4e097f20012558a1e5a914b9b0c6ba2 | 567 | py | Python | dnachisel/Specification/SpecificationSet.py | Edinburgh-Genome-Foundry/DnaChisel | fed4613cee67c22326a5f654ecf8ff0490298359 | [
"MIT"
] | 124 | 2017-11-14T14:42:25.000Z | 2022-03-31T08:02:07.000Z | dnachisel/Specification/SpecificationSet.py | Edinburgh-Genome-Foundry/DnaChisel | fed4613cee67c22326a5f654ecf8ff0490298359 | [
"MIT"
] | 65 | 2017-11-15T07:25:38.000Z | 2022-01-31T10:38:45.000Z | dnachisel/Specification/SpecificationSet.py | Edinburgh-Genome-Foundry/DnaChisel | fed4613cee67c22326a5f654ecf8ff0490298359 | [
"MIT"
] | 31 | 2018-10-18T12:59:47.000Z | 2022-02-11T16:54:43.000Z | class SpecificationSet:
"""Generic class for writing Specs which are actually made of more specs.
Behaves as a Specification when it comes to instantiation, reading it
from annotated records, etc. but the initialization actually creates a
dictionary of standard Specifications in the DNAOptimizationProblem
"""
def register_specifications(self, specifications):
for name, spec in specifications.items():
spec.parent_specification = self
spec.name_in_parent = name
self.specifications = specifications
| 40.5 | 77 | 0.731922 |
58a23099c324bbfdb2b3e48a798044e1715bd1e6 | 399 | py | Python | DSBD_Server/asgi.py | claudi47/DSBD_Server | a54192655042cdf8ff917db7a22b07bc83d07a0d | [
"MIT"
] | 1 | 2021-12-29T18:12:07.000Z | 2021-12-29T18:12:07.000Z | DSBD_Server/asgi.py | claudi47/DSBD_Server | a54192655042cdf8ff917db7a22b07bc83d07a0d | [
"MIT"
] | null | null | null | DSBD_Server/asgi.py | claudi47/DSBD_Server | a54192655042cdf8ff917db7a22b07bc83d07a0d | [
"MIT"
] | null | null | null | """
ASGI config for DSBD_Server project.
It exposes the ASGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/4.0/howto/deployment/asgi/
"""
import os
from django.core.asgi import get_asgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'DSBD_Server.settings')
application = get_asgi_application()
| 23.470588 | 78 | 0.789474 |
773083b3fe3ee029efc9588d78813e106b583d07 | 2,365 | py | Python | tests/communication/test_decorator.py | yourmoonlight/maro | 4fbe556f3ae1817995f90cb529e9ca6191f67d7f | [
"MIT"
] | 1 | 2020-09-22T12:13:32.000Z | 2020-09-22T12:13:32.000Z | tests/communication/test_decorator.py | chaosddp/maro | 3d6715649467d49a83886c1fd4ae9b41ff012a50 | [
"MIT"
] | null | null | null | tests/communication/test_decorator.py | chaosddp/maro | 3d6715649467d49a83886c1fd4ae9b41ff012a50 | [
"MIT"
] | null | null | null | # Copyright (c) Microsoft Corporation.
# Licensed under the MIT license.
import os
import subprocess
import sys
import threading
import unittest
from concurrent.futures import ThreadPoolExecutor, as_completed
from maro.communication import Proxy, SessionMessage, dist
from tests.communication.utils import get_random_port, proxy_generator
def handler_function(that, proxy, message):
replied_payload = {"counter": message.payload["counter"] + 1}
proxy.reply(message, payload=replied_payload)
sys.exit(0)
def lunch_receiver(handler_dict, redis_port):
proxy = proxy_generator("receiver", redis_port)
@dist(proxy, handler_dict)
class Receiver:
def __init__(self):
pass
receiver = Receiver()
receiver.launch()
@unittest.skipUnless(os.environ.get("test_with_redis", False), "require redis")
class TestDecorator(unittest.TestCase):
@classmethod
def setUpClass(cls):
print(f"The dist decorator unit test start!")
# Initialize the Redis.
redis_port = get_random_port()
cls.redis_process = subprocess.Popen(["redis-server", "--port", str(redis_port), "--daemonize yes"])
cls.redis_process.wait()
# Initialize the receiver.
conditional_event = "sender:*:1"
handler_dict = {conditional_event: handler_function}
decorator_task = threading.Thread(target=lunch_receiver, args=(handler_dict, redis_port,))
decorator_task.start()
# Initialize the sender proxy.
with ThreadPoolExecutor() as executor:
sender_task = executor.submit(proxy_generator, "sender", redis_port)
cls.sender_proxy = sender_task.result()
@classmethod
def tearDownClass(cls) -> None:
print(f"The dist decorator unit test finished!")
if hasattr(cls, "redis_process"):
cls.redis_process.kill()
def test_decorator(self):
message = SessionMessage(
tag="unittest",
source=TestDecorator.sender_proxy.component_name,
destination=TestDecorator.sender_proxy.peers_name["receiver"][0],
payload={"counter": 0}
)
replied_message = TestDecorator.sender_proxy.send(message)
self.assertEqual(message.payload["counter"] + 1, replied_message[0].payload["counter"])
if __name__ == "__main__":
unittest.main()
| 31.959459 | 108 | 0.687949 |
48acaa8a53e7e0ce5503bfefe8fd216cb2c93dc2 | 2,109 | py | Python | idao/data_module.py | Blue-Watermelon/MLhep2021-Competition2 | 2f02e133b95f2d6669b26f16cb2dfb8f1b1866f7 | [
"Apache-2.0"
] | null | null | null | idao/data_module.py | Blue-Watermelon/MLhep2021-Competition2 | 2f02e133b95f2d6669b26f16cb2dfb8f1b1866f7 | [
"Apache-2.0"
] | null | null | null | idao/data_module.py | Blue-Watermelon/MLhep2021-Competition2 | 2f02e133b95f2d6669b26f16cb2dfb8f1b1866f7 | [
"Apache-2.0"
] | null | null | null | import pathlib as path
import pytorch_lightning as pl
import torch
from torch.utils.data import DataLoader, random_split
from torchvision import transforms
from .dataloader import IDAODataset, img_loader, InferenceDataset
class IDAODataModule(pl.LightningDataModule):
def __init__(self, data_dir: path.Path, batch_size: int, cfg):
super().__init__()
self.data_dir = data_dir
self.batch_size = batch_size
self.cfg = cfg
def prepare_data(self):
# called only on 1 GPU
self.dataset = IDAODataset(
root=self.data_dir.joinpath("train"),
loader=img_loader,
transform=transforms.Compose(
[transforms.ToTensor(), transforms.CenterCrop(120)]
),
# TODO(kazeevn) use idiomatic torch
target_transform=transforms.Compose(
[
lambda num: (
torch.tensor([0, 1]) if num == 0 else torch.tensor([1, 0])
)
]
),
extensions=self.cfg["DATA"]["Extension"],
)
self.test = InferenceDataset(
main_dir=self.data_dir.joinpath("test"),
loader=img_loader,
transform=transforms.Compose(
[transforms.ToTensor(), transforms.CenterCrop(120)]
),
)
def setup(self, stage=None):
# called on every GPU
self.train, self.val = random_split(
self.dataset, [10000, 3404], generator=torch.Generator().manual_seed(666)
)
def train_dataloader(self):
return DataLoader(self.train, self.batch_size, shuffle=True, num_workers=4)
def val_dataloader(self):
return DataLoader(self.val, self.batch_size, num_workers=4, shuffle=False)
# return DataLoader(self.val, 1, num_workers=0, shuffle=False)
def test_dataloader(self):
return DataLoader(
self.test,
self.batch_size,
num_workers=0,
shuffle=False
)
| 31.477612 | 85 | 0.573732 |
46b3623c5759a430f86a170c16f3f4351c3ab5d1 | 12,252 | py | Python | dragonflow/switch/drivers/ovs/datapath.py | anlaneg/dragonflow | f684fd721cb953f4d0320725d708e79c9f35ef6c | [
"Apache-2.0"
] | null | null | null | dragonflow/switch/drivers/ovs/datapath.py | anlaneg/dragonflow | f684fd721cb953f4d0320725d708e79c9f35ef6c | [
"Apache-2.0"
] | null | null | null | dragonflow/switch/drivers/ovs/datapath.py | anlaneg/dragonflow | f684fd721cb953f4d0320725d708e79c9f35ef6c | [
"Apache-2.0"
] | null | null | null | # Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import collections
from os import path
from oslo_log import log
from oslo_serialization import jsonutils
import stevedore
from dragonflow._i18n import _
from dragonflow import conf as cfg
from dragonflow.controller import app_base
from dragonflow.controller import datapath_layout as dp_layout
LOG = log.getLogger(__name__)
REGS = frozenset((
'reg0',
'reg1',
'reg2',
'reg3',
'reg4',
'reg5',
'reg6',
'reg7',
'metadata',
))
def _sequence_generator(offset):
while True:
yield offset
offset += 1
class Datapath(object):
"""
Given the layout (e.g. from the config file), instantiate all the
applications in the datapath (vertices), and connect them (edges).
Instantiation includes allocating OpenFlow tables and registers.
Connection includes wiring and mapping the registers
"""
def __init__(self, layout):
self._layout = layout
self._dp_allocs = {}
self._public_variables = set()
self.apps = None
# FIXME(oanson) remove when done porting
self._dp_allocs[dp_layout.LEGACY_APP] = self._create_legacy_dp_alloc()
def _create_legacy_dp_alloc(self):
# Create all possible exits and entries
table_offset = cfg.CONF.df.datapath_autoalloc_table_offset
return app_base.DpAlloc(
states=(),
entrypoints={str(x): x for x in range(table_offset)},
exitpoints={str(x): x for x in range(table_offset)},
full_mapping={
'source_port_key': 'reg6',
'destination_port_key': 'reg7',
'network_key': 'metadata',
}
)
def set_up(self, os_ken_base, switch_backend, nb_api, neutron_notifier):
"""
Instantiate the application classes.
Instantiate the applications (Including table and register allocation)
Wire the applications (including translating registers)
"""
self.clear_old_set_up()
self._dp = os_ken_base.datapath
self._table_generator = _sequence_generator(
cfg.CONF.df.datapath_autoalloc_table_offset)
self._public_variables.clear()
app_classes = {}
self.apps = {}
for vertex in self._layout.vertices:
if vertex.type in app_classes:
continue
app_class = self._get_app_class(vertex.type)
app_classes[vertex.type] = app_class
self._public_variables.update(
app_class._specification.public_mapping.keys(),
)
for vertex in self._layout.vertices:
app_class = app_classes[vertex.type]
dp_alloc = self._create_dp_alloc(app_class._specification)
self.log_datapath_allocation(vertex.name, dp_alloc)
self._dp_allocs[vertex.name] = dp_alloc
app = app_class(api=os_ken_base,
switch_backend=switch_backend,
nb_api=nb_api,
neutron_server_notifier=neutron_notifier,
dp_alloc=dp_alloc,
**(vertex.params or {})
)
self.apps[vertex.name] = app
self.write_datapath_allocation()
for name, app in self.apps.items():
try:
app.initialize()
except Exception:
LOG.exception('Failed to initialize %s (%s)', name, app)
for edge in self._layout.edges:
self._install_edge(edge)
def clear_old_set_up(self):
if self.apps:
for name, app in self.apps.items():
dp_alloc = self._dp_allocs[name]
for state_name, table_num in dp_alloc.states.items():
app.api.unregister_table_handler(table_num)
def _get_app_class(self, app_type):
"""Get an application class (Python class) by app name"""
mgr = stevedore.NamedExtensionManager(
'dragonflow.controller.apps',
[app_type],
invoke_on_load=False,
)
for ext in mgr:
return ext.plugin
else:
raise RuntimeError(_('Failed to load app {0}').format(app_type))
def _create_dp_alloc(self, specification):
"""
Allocate the tables and registers for the given application (given
by its specification)
"""
public_mapping = specification.public_mapping.copy()
unmapped_vars = self._public_variables.difference(public_mapping)
# Convert to set() so the result won't be a frozenset()
unmapped_regs = set(REGS).difference(
public_mapping.values(),
).difference(
specification.private_mapping.values(),
)
while unmapped_vars and unmapped_regs:
public_mapping[unmapped_vars.pop()] = unmapped_regs.pop()
if unmapped_vars:
raise RuntimeError(
_("Can't allocate enough registers for variables"),
)
states_dict = {
state: next(self._table_generator)
for state in specification.states
}
states = app_base.AttributeDict(**states_dict)
exitpoints_dict = {
exit.name: next(self._table_generator)
for exit in specification.exitpoints
}
exitpoints = app_base.AttributeDict(**exitpoints_dict)
entrypoints_dict = {
entry.name: states[entry.target]
for entry in specification.entrypoints
}
entrypoints = app_base.AttributeDict(**entrypoints_dict)
return app_base.DpAlloc(
states=states,
exitpoints=exitpoints,
entrypoints=entrypoints,
full_mapping=public_mapping,
)
def _get_connector_config(self, connector):
return self._dp_allocs[connector.vertex]
def _install_edge(self, edge):
"""
Wire two applications. Infer the translation of metadata fields,
and install the actions/instructions to pass a packet from one
application's exit point to another's entry point
"""
exitpoint = edge.exitpoint
exit_config = self._get_connector_config(exitpoint)
entrypoint = edge.entrypoint
entry_config = self._get_connector_config(entrypoint)
translations = []
for var in self._public_variables:
exit_reg = exit_config.full_mapping[var]
entry_reg = entry_config.full_mapping[var]
if exit_reg == entry_reg:
continue
translations.append(
(exit_reg, entry_reg),
)
self._install_goto(
# Source
exit_config.exitpoints[exitpoint.name],
# Destination
entry_config.entrypoints[entrypoint.name],
translations,
)
def _install_goto(self, source, dest, translations):
"""
Install the actions/instructions to pass a packet from one
application's exit point to another's entry point, including
translating the metadata fields.
"""
ofproto = self._dp.ofproto
parser = self._dp.ofproto_parser
actions = []
try:
from_regs, to_regs = zip(*translations)
except ValueError:
from_regs, to_regs = ((), ())
# Push all register values
for reg in from_regs:
actions.append(
parser.NXActionStackPush(field=reg, start=0, end=32),
)
# Pop into target registers in reverse order
for reg in reversed(to_regs):
actions.append(
parser.NXActionStackPop(field=reg, start=0, end=32),
)
if source < dest:
instructions = [
parser.OFPInstructionActions(
ofproto.OFPIT_APPLY_ACTIONS,
actions,
),
parser.OFPInstructionGotoTable(dest),
]
else:
actions.append(parser.NXActionResubmitTable(table_id=dest))
instructions = [
parser.OFPInstructionActions(
ofproto.OFPIT_APPLY_ACTIONS,
actions,
),
]
message = parser.OFPFlowMod(
self._dp,
table_id=source,
command=ofproto.OFPFC_ADD,
match=parser.OFPMatch(),
instructions=instructions,
)
self._dp.send_msg(message)
def log_datapath_allocation(self, name, dp_alloc):
"""
Log the dp_alloc object (The allocation of tables, registers, etc.) for
the given application
"""
LOG.debug("Application: %s", name)
LOG.debug("\tStates:")
for state, table_num in dp_alloc.states.items():
LOG.debug("\t\t%s: %s", state, table_num)
LOG.debug("\tEntrypoints:")
for entry_name, table_num in dp_alloc.entrypoints.items():
LOG.debug("\t\t%s: %s", entry_name, table_num)
LOG.debug("\tExitpoints:")
for exit_name, table_num in dp_alloc.exitpoints.items():
LOG.debug("\t\t%s: %s", exit_name, table_num)
LOG.debug("\tMapping:")
for var, reg in dp_alloc.full_mapping.items():
LOG.debug("\t\t%s: %s", var, reg)
def write_datapath_allocation(self):
if not cfg.CONF.df.write_datapath_allocation:
return
dppath = cfg.CONF.df.datapath_allocation_output_path
if (path.isfile(dppath) and
not cfg.CONF.df.overwrite_datapath_allocation_output_path):
LOG.warning("File %s exists, but cannot overwrite", dppath)
return
try:
with open(dppath, 'w') as f:
dp_allocs = self._get_dp_allocs_basic_dictionary()
jsonutils.dump(dp_allocs, f)
except IOError:
LOG.exception("Cannot open file %s", dppath)
def _get_dp_allocs_basic_dictionary(self):
return {key: self._dp_alloc_to_dict(value)
for key, value in self._dp_allocs.items()}
def _dp_alloc_to_dict(self, dpalloc):
return {
'states': dict(dpalloc.states),
'entrypoints': dict(dpalloc.entrypoints),
'exitpoints': dict(dpalloc.exitpoints),
'full_mapping': dict(dpalloc.full_mapping),
}
# LoadedDatapath.apps: dict: vertex name -> dpalloc (of AttributeDict)
LoadedDatapath = collections.namedtuple('LoadedDatapath', ('apps',))
def _parse_dp_alloc(dpalloc):
kwargs = {k: app_base.AttributeDict(dpalloc.get(k, ()))
for k in ('states', 'exitpoints', 'entrypoints', 'full_mapping')}
return app_base.DpAlloc(**kwargs)
def load_datapath_from_dict(dp_allocs_dict):
apps = {app: _parse_dp_alloc(dpalloc)
for app, dpalloc in dp_allocs_dict.items()}
return LoadedDatapath(apps=apps)
def load_datapath_from_file_stream(stream):
dp_allocs_dict = jsonutils.load(stream)
return load_datapath_from_dict(dp_allocs_dict)
def load_datapath_from_file_name(dppath):
with open(dppath, 'r') as f:
return load_datapath_from_file_stream(f)
def load_datapath():
if not cfg.CONF.df.write_datapath_allocation:
# We assume that if the DF-controller is not allowed to write the file,
# then it would not exist. Otherwise, change the config or use one of
# the other methods available above.
return
dppath = cfg.CONF.df.datapath_allocation_output_path
return load_datapath_from_file_name(dppath)
| 33.845304 | 79 | 0.609941 |
193a1372cbc445b408097d34b26a662027c0d18e | 8,780 | py | Python | liteasr/models/transducer.py | Nazukixv/LiteASR | 8ee0ef2139acc7c9185b7fecc4b0283f31499830 | [
"MIT"
] | null | null | null | liteasr/models/transducer.py | Nazukixv/LiteASR | 8ee0ef2139acc7c9185b7fecc4b0283f31499830 | [
"MIT"
] | null | null | null | liteasr/models/transducer.py | Nazukixv/LiteASR | 8ee0ef2139acc7c9185b7fecc4b0283f31499830 | [
"MIT"
] | null | null | null | """Transducer."""
from dataclasses import dataclass
from dataclasses import field
from enum import Enum
from typing import List
from omegaconf import II
from omegaconf import MISSING
import torch
from torch import Tensor
import torch.nn as nn
import torch.nn.functional as F
from liteasr.config import LiteasrDataclass
from liteasr.models import LiteasrModel
from liteasr.models import register_model
from liteasr.nets.initialization import lecun_normal_init_parameters
from liteasr.nets.initialization import set_forget_bias_to_one
from liteasr.nets.rnn_decoder import RNNDecoder
from liteasr.nets.transformer_encoder import TransformerEncoder
from liteasr.utils.mask import padding_mask
class Hypothesis(object):
def __init__(
self,
score: float,
yseq: List[int],
str_yseq: str,
state_h: List[Tensor],
state_c: List[Tensor],
):
self.score = score
self.yseq = yseq
self.str_yseq = str_yseq
self.state_h = state_h
self.state_c = state_c
class EncoderArch(Enum):
Transformer = "transformer"
Conformer = "conformer"
class DecoderArch(Enum):
LSTM = "lstm"
@dataclass
class TransducerConfig(LiteasrDataclass):
# transducer
joint_dim: int = field(default=768)
dropout_rate: float = field(default=0.0)
# encoder
enc_arch: EncoderArch = field(default=EncoderArch.Transformer)
use_rel: bool = field(default=True)
input_dim: int = field(default=MISSING)
enc_dim: int = field(default=256)
enc_ff_dim: int = field(default=2048)
enc_attn_heads: int = field(default=4)
enc_dropout_rate: float = II("model.dropout_rate")
enc_layers: int = field(default=4)
activation: str = field(default="relu")
# decoder
dec_arch: DecoderArch = field(default=DecoderArch.LSTM)
vocab_size: int = field(default=MISSING)
dec_dim: int = field(default=256)
dec_units: int = field(default=2048)
dec_dropout_rate: float = II("model.dropout_rate")
dec_layers: int = field(default=2)
@register_model("transducer", dataclass=TransducerConfig)
class Transducer(LiteasrModel):
def __init__(self, cfg: TransducerConfig, task=None):
super().__init__()
if cfg.enc_arch in [EncoderArch.Transformer, EncoderArch.Conformer]:
self.encoder = TransformerEncoder(
use_rel=cfg.use_rel,
i_dim=cfg.input_dim,
h_dim=cfg.enc_dim,
ff_dim=cfg.enc_ff_dim,
n_head=cfg.enc_attn_heads,
n_layer=cfg.enc_layers,
dropout_rate=cfg.enc_dropout_rate,
arch=cfg.enc_arch.value,
)
if cfg.dec_arch == DecoderArch.LSTM:
self.decoder = RNNDecoder(
i_dim=cfg.vocab_size,
h_dim=cfg.dec_dim,
h_units=cfg.dec_units,
n_layer=cfg.dec_layers,
dropout_rate=cfg.dec_dropout_rate,
)
self.lin_enc = nn.Linear(cfg.enc_dim, cfg.joint_dim)
self.lin_dec = nn.Linear(cfg.dec_units, cfg.joint_dim, bias=False)
self.lin_jnt = nn.Linear(cfg.joint_dim, cfg.vocab_size)
self.joint_activation = nn.Tanh()
self.ignore = -1
self._init_module()
def forward(
self,
xs,
xlens: List[int],
ys,
ylens: List[int],
):
"""Forward function of Transducer.
:param Tensor xs: Padded audio input with shape (`Batch`, `Tmax`, `*`)
:param xlens: Time duration of x (`Batch`)
:type xlens: List[int]
:param Tensor ys: Padded token indices with shape (`Batch`, `Lmax`)
:param Tensor ylens: Length of transcripted text of y (`Batch`)
:type ylens: List[int]
:return: Joint tensor (`Batch`, `Fmax`, `Lmax` + 1, `Vocab`)
:rtype: Tensor
"""
xs_in, ys_in, xs_mask, ys_mask = self._preprocess(xs, xlens, ys, ylens)
h_enc = self.encoder(xs_in, mask=xs_mask).unsqueeze(2)
h_dec = self.decoder(ys_in).unsqueeze(1)
h_jnt = self.joint(h_enc, h_dec)
return h_jnt
def inference(self, x):
# implement beam search
h = self.encoder(x) # (1, time, 83) -> (1, frame, 256)
state_h, state_c = self.decoder.init_state(h)
blank_tensor = torch.zeros(1, dtype=torch.long, device=h.device)
cache = {}
init_hyp = Hypothesis(0.0, [0], "0", state_h, state_c)
kept_hyps: List[Hypothesis] = [init_hyp]
hyps: List[Hypothesis] = []
for i, hi in enumerate(h[0]):
hyps = kept_hyps
kept_hyps = []
while True:
hyp_max = max(hyps, key=lambda hyp: hyp.score)
hyps.remove(hyp_max)
if hyp_max.str_yseq in cache:
y, state_h, state_c = cache[hyp_max.str_yseq]
else:
token = torch.full(
(1, 1),
hyp_max.yseq[-1],
dtype=torch.long,
device=h.device
)
token_embed = self.decoder.embed(token)
y, state_h, state_c = self.decoder.rnn_forward(
token_embed[:, 0, :], hyp_max.state_h, hyp_max.state_c
)
cache[hyp_max.str_yseq] = (y, state_h, state_c)
ytu = F.log_softmax(self.joint(hi, y[0]), dim=-1)
top_k = ytu[1:].topk(10, dim=-1)
ytu = (
torch.cat((top_k[0], ytu[0:1])),
torch.cat((top_k[1] + 1, blank_tensor)),
)
for logp, k in zip(*ytu):
new_hyp = Hypothesis(
score=hyp_max.score + float(logp),
yseq=hyp_max.yseq[:],
str_yseq=hyp_max.str_yseq,
state_h=hyp_max.state_h,
state_c=hyp_max.state_c,
)
if k == 0:
kept_hyps.append(new_hyp)
else:
new_hyp.state_h = state_h
new_hyp.state_c = state_c
new_hyp.yseq.append(int(k))
new_hyp.str_yseq = hyp_max.str_yseq + "_" + str(k)
hyps.append(new_hyp)
# max_hyp_score = float(max(hyps, key=lambda x: x.score).score)
# kept_most_prob = sorted(
# [hyp for hyp in kept_hyps if hyp.score > max_hyp_score],
# key=lambda x: x.score,
# )
# if len(kept_most_prob) >= 10:
# kept_hyps = kept_most_prob
# break
if len(kept_hyps) >= 10:
break
best_hyp = sorted(
kept_hyps, key=lambda x: x.score / len(x.yseq), reverse=True
)[0]
return best_hyp.yseq
def get_pred_len(self, xlens):
pred_len = torch.tensor(xlens)
pred_len = ((pred_len - 1) // 2 - 1) // 2
return pred_len
def get_target(self, ys, ylens):
target = ys
return target
def get_target_len(self, ylens):
target_len = torch.tensor(ylens)
return target_len
def joint(self, h_enc, h_dec):
h_enc = self.lin_enc(h_enc)
h_dec = self.lin_dec(h_dec)
h_jnt = self.lin_jnt(self.joint_activation(h_enc + h_dec))
return h_jnt
def _preprocess(
self,
xs,
xlens: List[int],
ys,
ylens: List[int],
):
# xs_in
xs_in = xs
# xs_mask
xs_mask = padding_mask(xlens).to(device=xs.device)
# ys_in
ys_ = ys.masked_fill(ys == self.ignore, 0)
blank = torch.zeros(ys.size(0), 1).to(dtype=ys.dtype, device=ys.device)
ys_in = torch.cat([blank, ys_], dim=1)
# ys_mask
ys_mask = padding_mask([yl + 1 for yl in ylens]).to(device=ys.device)
return xs_in, ys_in, xs_mask, ys_mask
def _init_module(self):
# TODO: support dynamic init afterwards
lecun_normal_init_parameters(self.decoder)
lecun_normal_init_parameters(self.lin_enc)
lecun_normal_init_parameters(self.lin_dec)
lecun_normal_init_parameters(self.lin_jnt)
self.decoder.embed.weight.data.normal_(0, 1)
for i in range(len(self.decoder.dec_layers)):
set_forget_bias_to_one(self.decoder.dec_layers[i].bias_ih)
@classmethod
def build_model(cls, cfg, task=None):
cfg.input_dim = task.feat_dim
cfg.vocab_size = task.vocab_size
return cls(cfg, task)
| 32.639405 | 79 | 0.569134 |
8e47543d2043ae7eb97347f528838ca64b73feca | 4,350 | py | Python | code/platforms/linux/edit.py | joshpearce/knausj_talon | 44c49806c6e53b2e5fe90fc24fd06a1fc5125883 | [
"MIT"
] | 3 | 2020-04-07T10:44:31.000Z | 2022-01-30T17:04:14.000Z | code/platforms/linux/edit.py | joshpearce/knausj_talon | 44c49806c6e53b2e5fe90fc24fd06a1fc5125883 | [
"MIT"
] | null | null | null | code/platforms/linux/edit.py | joshpearce/knausj_talon | 44c49806c6e53b2e5fe90fc24fd06a1fc5125883 | [
"MIT"
] | 1 | 2021-05-26T14:43:11.000Z | 2021-05-26T14:43:11.000Z | # defines the default edit actions for linux
from talon import Context, actions
ctx = Context()
ctx.matches = r"""
os: linux
"""
@ctx.action_class('edit')
class EditActions:
def copy():
actions.key('ctrl-c')
def cut():
actions.key('ctrl-x')
def delete():
actions.key('backspace')
def delete_line():
actions.edit.select_line()
actions.edit.delete()
#action(edit.delete_paragraph):
#action(edit.delete_sentence):
def delete_word():
actions.edit.select_word()
actions.edit.delete()
def down():
actions.key('down')
#action(edit.extend_again):
#action(edit.extend_column):
def extend_down():
actions.key('shift-down')
def extend_file_end():
actions.key('shift-ctrl-end')
def extend_file_start():
actions.key('shift-ctrl-home')
def extend_left():
actions.key('shift-left')
#action(edit.extend_line):
def extend_line_down():
actions.key('shift-down')
def extend_line_end():
actions.key('shift-end')
def extend_line_start():
actions.key('shift-home')
def extend_line_up():
actions.key('shift-up')
def extend_page_down():
actions.key('shift-pagedown')
def extend_page_up():
actions.key('shift-pageup')
#action(edit.extend_paragraph_end):
#action(edit.extend_paragraph_next()):
#action(edit.extend_paragraph_previous()):
#action(edit.extend_paragraph_start()):
def extend_right():
actions.key('shift-right')
#action(edit.extend_sentence_end):
#action(edit.extend_sentence_next):
#action(edit.extend_sentence_previous):
#action(edit.extend_sentence_start):
def extend_up():
actions.key('shift-up')
def extend_word_left():
actions.key('ctrl-shift-left')
def extend_word_right():
actions.key('ctrl-shift-right')
def file_end():
actions.key('ctrl-end')
def file_start():
actions.key('ctrl-home')
def find(text: str=None):
actions.key('ctrl-f')
actions.actions.insert(text)
def find_next():
actions.key('f3')
#action(edit.find_previous):
def indent_less():
actions.key('home delete')
def indent_more():
actions.key('home tab')
#action(edit.jump_column(n: int)
#action(edit.jump_line(n: int)
def left():
actions.key('left')
def line_down():
actions.key('down home')
def line_end():
actions.key('end')
def line_insert_down():
actions.key('end enter')
def line_insert_up():
actions.key('home enter up')
def line_start():
actions.key('home')
def line_up():
actions.key('up home')
#action(edit.move_again):
def page_down():
actions.key('pagedown')
def page_up():
actions.key('pageup')
#action(edit.paragraph_end):
#action(edit.paragraph_next):
#action(edit.paragraph_previous):
#action(edit.paragraph_start):
def paste():
actions.key('ctrl-v')
#action(paste_match_style):
def print():
actions.key('ctrl-p')
def redo():
actions.key('ctrl-y')
def right():
actions.key('right')
def save():
actions.key('ctrl-s')
def save_all():
actions.key('ctrl-shift-s')
def select_all():
actions.key('ctrl-a')
def select_line(n: int=None):
actions.key('end shift-home')
#action(edit.select_lines(a: int, b: int)):
def select_none():
actions.key('right')
#action(edit.select_paragraph):
#action(edit.select_sentence):
def select_word():
actions.key('ctrl-left ctrl-shift-right')
#action(edit.selected_text): -> str
#action(edit.sentence_end):
#action(edit.sentence_next):
#action(edit.sentence_previous):
#action(edit.sentence_start):
def undo():
actions.key('ctrl-z')
def up():
actions.key('up')
def word_left():
actions.key('ctrl-left')
def word_right():
actions.key('ctrl-right')
def zoom_in():
actions.key('ctrl-+')
def zoom_out():
actions.key('ctrl--')
def zoom_reset():
actions.key('ctrl-0')
| 29.391892 | 51 | 0.591034 |
533fcab5df27c550d0fac753509c2cdce88ce42b | 12,680 | py | Python | tests/temporal/operations/test_horizons.py | anamileva/gridpath | e55eacb88ca5e6c034a90b18819e17cbd6f43854 | [
"Apache-2.0"
] | 44 | 2020-10-27T19:05:44.000Z | 2022-03-22T17:17:37.000Z | tests/temporal/operations/test_horizons.py | anamileva/gridpath | e55eacb88ca5e6c034a90b18819e17cbd6f43854 | [
"Apache-2.0"
] | 67 | 2020-10-08T22:36:53.000Z | 2022-03-22T22:58:33.000Z | tests/temporal/operations/test_horizons.py | anamileva/gridpath | e55eacb88ca5e6c034a90b18819e17cbd6f43854 | [
"Apache-2.0"
] | 21 | 2020-10-08T23:23:48.000Z | 2022-03-28T01:21:21.000Z | # Copyright 2016-2020 Blue Marble Analytics LLC.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from builtins import str
from collections import OrderedDict
from importlib import import_module
import os.path
import pandas as pd
import sys
import unittest
from tests.common_functions import create_abstract_model, \
add_components_and_load_data
TEST_DATA_DIRECTORY = \
os.path.join(os.path.dirname(__file__), "..", "..", "test_data")
# Import prerequisite modules
PREREQUISITE_MODULE_NAMES = ["temporal.operations.timepoints"]
NAME_OF_MODULE_BEING_TESTED = "temporal.operations.horizons"
IMPORTED_PREREQ_MODULES = list()
for mdl in PREREQUISITE_MODULE_NAMES:
try:
imported_module = import_module("." + str(mdl), package='gridpath')
IMPORTED_PREREQ_MODULES.append(imported_module)
except ImportError:
print("ERROR! Module " + str(mdl) + " not found.")
sys.exit(1)
# Import the module we'll test
try:
MODULE_BEING_TESTED = import_module("." + NAME_OF_MODULE_BEING_TESTED,
package='gridpath')
except ImportError:
print("ERROR! Couldn't import module " + NAME_OF_MODULE_BEING_TESTED +
" to test.")
class TestHorizons(unittest.TestCase):
"""
"""
def test_add_model_components(self):
"""
Test that there are no errors when adding model components
"""
create_abstract_model(prereq_modules=IMPORTED_PREREQ_MODULES,
module_to_test=MODULE_BEING_TESTED,
test_data_dir=TEST_DATA_DIRECTORY,
subproblem="",
stage=""
)
def test_load_model_data(self):
"""
Test that data are loaded with no errors
"""
add_components_and_load_data(prereq_modules=IMPORTED_PREREQ_MODULES,
module_to_test=MODULE_BEING_TESTED,
test_data_dir=TEST_DATA_DIRECTORY,
subproblem="",
stage=""
)
def test_initialized_components(self):
"""
Create components; check they are initialized with data as expected
"""
m, data = add_components_and_load_data(
prereq_modules=IMPORTED_PREREQ_MODULES,
module_to_test=MODULE_BEING_TESTED,
test_data_dir=TEST_DATA_DIRECTORY,
subproblem="",
stage=""
)
instance = m.create_instance(data)
# TODO: add test data with more horizon types
# Load test data
balancing_type_horizon_horizons_df = \
pd.read_csv(
os.path.join(TEST_DATA_DIRECTORY, "inputs", "horizons.tab"),
sep="\t"
)
timepoints_on_balancing_type_horizon_df = \
pd.read_csv(
os.path.join(TEST_DATA_DIRECTORY, "inputs",
"horizon_timepoints.tab"),
sep="\t"
)
# Check data are as expected
# BLN_TYPE_HRZS set
expected_horizons = [(b, h) for (b, h) in
zip(
balancing_type_horizon_horizons_df.balancing_type_horizon,
balancing_type_horizon_horizons_df.horizon)]
actual_horizons = [(b, h) for (b, h) in
instance.BLN_TYPE_HRZS]
self.assertListEqual(expected_horizons, actual_horizons,
msg="HORIZONS set data does not load correctly.")
# Params: boundary
expected_boundary_param = \
balancing_type_horizon_horizons_df.set_index(
['balancing_type_horizon', 'horizon']
).to_dict()['boundary']
actual_boundary_param = \
{(b, h): instance.boundary[b, h]
for (b, h) in instance.BLN_TYPE_HRZS
}
self.assertDictEqual(expected_boundary_param, actual_boundary_param,
msg="Data for param 'boundary' not loaded "
"correctly")
# BLN_TYPES set
expected_balancing_type_horizons = \
list(balancing_type_horizon_horizons_df.balancing_type_horizon.unique())
actual_balancing_type_horizons = list(instance.BLN_TYPES)
self.assertListEqual(expected_balancing_type_horizons, actual_balancing_type_horizons)
# HRZS_BY_BLN_TYPE set
expected_horizon_by_balancing_type_horizon = \
{balancing_type_horizon: horizons["horizon"].tolist()
for balancing_type_horizon, horizons
in balancing_type_horizon_horizons_df.groupby("balancing_type_horizon")}
actual_horizon_by_balancing_type_horizon = {
balancing_type_horizon: [
horizon for horizon
in list(instance.HRZS_BY_BLN_TYPE[balancing_type_horizon])
] for balancing_type_horizon in instance.HRZS_BY_BLN_TYPE.keys()
}
self.assertDictEqual(expected_horizon_by_balancing_type_horizon,
actual_horizon_by_balancing_type_horizon)
# Set TMPS_BY_BLN_TYPE_HRZ
expected_tmps_on_horizon = {
(balancing_type, horizon): timepoints["timepoint"].tolist()
for ((balancing_type, horizon), timepoints)
in timepoints_on_balancing_type_horizon_df.groupby([
"balancing_type_horizon", "horizon"])
}
actual_tmps_on_horizon = {
(b, h): [tmp for tmp in
instance.TMPS_BY_BLN_TYPE_HRZ[b, h]]
for (b, h) in list(instance.TMPS_BY_BLN_TYPE_HRZ
.keys())
}
self.assertDictEqual(expected_tmps_on_horizon,
actual_tmps_on_horizon,
msg="TMPS_BY_BLN_TYPE_HRZ data do not match "
"expected."
)
# Param: horizon
expected_horizon_by_tmp_type = {
(tmp, balancing_type_horizon): horizon for tmp, balancing_type_horizon, horizon
in zip(
timepoints_on_balancing_type_horizon_df.timepoint,
timepoints_on_balancing_type_horizon_df.balancing_type_horizon,
timepoints_on_balancing_type_horizon_df.horizon
)
}
actual_horizon_by_tmp_type = {
(tmp, _type): instance.horizon[tmp, _type]
for tmp in instance.TMPS for _type in instance.BLN_TYPES
}
self.assertDictEqual(expected_horizon_by_tmp_type,
actual_horizon_by_tmp_type)
# Param: first_hrz_tmp
expected_first_hrz_tmp = {
(b, h): expected_tmps_on_horizon[b, h][0] for (b, h) in
expected_horizons
}
actual_first_hrz_tmp = {
(b, h): instance.first_hrz_tmp[b, h] for (b, h) in
instance.BLN_TYPE_HRZS
}
self.assertDictEqual(expected_first_hrz_tmp,
actual_first_hrz_tmp,
msg="Data for param "
"first_hrz_tmp do "
"not match expected.")
# Param: last_hrz_tmp
expected_last_hrz_tmp = {
(b, h): expected_tmps_on_horizon[b, h][-1] for (b, h) in
expected_horizons
}
actual_last_hrz_tmp = {
(b, h): instance.last_hrz_tmp[b, h] for (b, h) in
instance.BLN_TYPE_HRZS
}
self.assertDictEqual(expected_last_hrz_tmp,
actual_last_hrz_tmp,
msg="Data for param "
"last_hrz_tmp do "
"not match expected.")
# Param: prev_tmp
# Testing for both horizons that are 'circular' and 'linear'
# TODO: should we have the actual previous timepoints in a data file
# somewhere as opposed to figuring it out here
expected_prev_tmp = dict()
prev_tmp = None
for (horizon, balancing_type, tmp) \
in [tuple(row) for row in
timepoints_on_balancing_type_horizon_df.values]:
if tmp == expected_first_hrz_tmp[balancing_type, horizon]:
if expected_boundary_param[balancing_type, horizon] == \
'circular':
expected_prev_tmp[tmp, balancing_type] \
= \
expected_last_hrz_tmp[balancing_type, horizon]
elif expected_boundary_param[balancing_type, horizon] == \
'linear':
expected_prev_tmp[tmp, balancing_type] \
= None
else:
raise(ValueError,
"Test data specifies horizon boundary different "
"from allowed values of 'circular' and 'linear'")
else:
expected_prev_tmp[tmp, balancing_type] \
= prev_tmp
prev_tmp = tmp
actual_prev_tmp = {
(tmp, balancing_type_horizon):
instance.prev_tmp[tmp, balancing_type_horizon]
for tmp in instance.TMPS
for balancing_type_horizon in instance.BLN_TYPES
}
self.assertDictEqual(expected_prev_tmp,
actual_prev_tmp,
msg="Data for param prev_tmp do "
"not match expected.")
# Param: next_tmp
# Testing for both horizons that 'circular' and 'linear'
expected_next_tmp = dict()
prev_tmp = None
for (horizon, balancing_type, tmp) \
in [tuple(row) for row in
timepoints_on_balancing_type_horizon_df.values]:
if prev_tmp is None:
if expected_boundary_param[balancing_type, horizon] == \
'circular':
expected_next_tmp[
expected_last_hrz_tmp[balancing_type,
horizon],
balancing_type
] = \
expected_first_hrz_tmp[balancing_type,
horizon]
elif expected_boundary_param[balancing_type, horizon] == \
'linear':
expected_next_tmp[
expected_last_hrz_tmp[balancing_type,
horizon],
balancing_type
] = None
else:
raise(ValueError,
"Test data specifies horizon boundary different "
"from allowed values of 'circular' and 'linear'")
else:
expected_next_tmp[prev_tmp, balancing_type] = tmp
# If we have reached the last horizon timepoint, set the
# previous timepoint to None (to enter the boundary logic above)
if tmp == expected_last_hrz_tmp[balancing_type, horizon]:
prev_tmp = None
else:
prev_tmp = tmp
expected_next_tmp_ordered = OrderedDict(sorted(
expected_next_tmp.items()))
actual_next_tmp = {
(tmp, balancing_type_horizon): instance.next_tmp[tmp, balancing_type_horizon]
for tmp in instance.TMPS
for balancing_type_horizon in instance.BLN_TYPES
}
actual_next_tmp_ordered = OrderedDict(sorted(
actual_next_tmp.items()))
self.assertDictEqual(expected_next_tmp_ordered,
actual_next_tmp_ordered,
msg="Data for param next_tmp do not match "
"expected.")
if __name__ == "__main__":
unittest.main()
| 40.641026 | 94 | 0.560331 |
6431a537106e24957432c80a6706ccd524352c78 | 341 | py | Python | Interview Preparation/Warm-up/sales_by_match.py | matheuscordeiro/HackerRank | 003ab87fdfa9e7c0535972abd06caebb1165423f | [
"MIT"
] | null | null | null | Interview Preparation/Warm-up/sales_by_match.py | matheuscordeiro/HackerRank | 003ab87fdfa9e7c0535972abd06caebb1165423f | [
"MIT"
] | null | null | null | Interview Preparation/Warm-up/sales_by_match.py | matheuscordeiro/HackerRank | 003ab87fdfa9e7c0535972abd06caebb1165423f | [
"MIT"
] | null | null | null | def sockMerchant(n, ar):
colors = {}
pairs = 0
for _, color in enumerate(ar):
if color in colors:
pairs += 1
del colors[color]
else:
colors[color] = True
return pairs
if __name__ == "__main__":
ar = [10, 20, 20, 10, 10, 30, 50, 10, 20]
print(sockMerchant(9, ar)) | 21.3125 | 45 | 0.513196 |
587ba640210704ffd68b8c6ba5afc38d70621111 | 5,542 | py | Python | venv/lib/python3.9/site-packages/markdown/extensions/extra.py | qarik-hanrattyjen/apache-airflow-backport-providers-google-2021.3.3 | 630dcef73e6a258b6e9a52f934e2dd912ce741f8 | [
"Apache-2.0"
] | 182 | 2017-03-05T07:43:13.000Z | 2022-03-15T13:09:07.000Z | venv/lib/python3.9/site-packages/markdown/extensions/extra.py | qarik-hanrattyjen/apache-airflow-backport-providers-google-2021.3.3 | 630dcef73e6a258b6e9a52f934e2dd912ce741f8 | [
"Apache-2.0"
] | 15 | 2018-05-02T11:05:30.000Z | 2018-05-11T20:51:27.000Z | env/lib/python3.6/site-packages/markdown/extensions/extra.py | bcss-pm/incidents | 927a102104b5718fe118bceb307d3cd633d6699b | [
"MIT"
] | 38 | 2017-04-26T14:13:37.000Z | 2021-06-24T11:36:38.000Z | """
Python-Markdown Extra Extension
===============================
A compilation of various Python-Markdown extensions that imitates
[PHP Markdown Extra](http://michelf.com/projects/php-markdown/extra/).
Note that each of the individual extensions still need to be available
on your PYTHONPATH. This extension simply wraps them all up as a
convenience so that only one extension needs to be listed when
initiating Markdown. See the documentation for each individual
extension for specifics about that extension.
There may be additional extensions that are distributed with
Python-Markdown that are not included here in Extra. Those extensions
are not part of PHP Markdown Extra, and therefore, not part of
Python-Markdown Extra. If you really would like Extra to include
additional extensions, we suggest creating your own clone of Extra
under a differant name. You could also edit the `extensions` global
variable defined below, but be aware that such changes may be lost
when you upgrade to any future version of Python-Markdown.
See <https://Python-Markdown.github.io/extensions/extra>
for documentation.
Copyright The Python Markdown Project
License: [BSD](http://www.opensource.org/licenses/bsd-license.php)
"""
from __future__ import absolute_import
from __future__ import unicode_literals
from . import Extension
from ..blockprocessors import BlockProcessor
from .. import util
import re
extensions = [
'markdown.extensions.smart_strong',
'markdown.extensions.fenced_code',
'markdown.extensions.footnotes',
'markdown.extensions.attr_list',
'markdown.extensions.def_list',
'markdown.extensions.tables',
'markdown.extensions.abbr'
]
class ExtraExtension(Extension):
""" Add various extensions to Markdown class."""
def __init__(self, *args, **kwargs):
""" config is a dumb holder which gets passed to actual ext later. """
self.config = kwargs.pop('configs', {})
self.config.update(kwargs)
def extendMarkdown(self, md, md_globals):
""" Register extension instances. """
md.registerExtensions(extensions, self.config)
if not md.safeMode:
# Turn on processing of markdown text within raw html
md.preprocessors['html_block'].markdown_in_raw = True
md.parser.blockprocessors.add('markdown_block',
MarkdownInHtmlProcessor(md.parser),
'_begin')
md.parser.blockprocessors.tag_counter = -1
md.parser.blockprocessors.contain_span_tags = re.compile(
r'^(p|h[1-6]|li|dd|dt|td|th|legend|address)$', re.IGNORECASE)
def makeExtension(*args, **kwargs):
return ExtraExtension(*args, **kwargs)
class MarkdownInHtmlProcessor(BlockProcessor):
"""Process Markdown Inside HTML Blocks."""
def test(self, parent, block):
return block == util.TAG_PLACEHOLDER % \
str(self.parser.blockprocessors.tag_counter + 1)
def _process_nests(self, element, block):
"""Process the element's child elements in self.run."""
# Build list of indexes of each nest within the parent element.
nest_index = [] # a list of tuples: (left index, right index)
i = self.parser.blockprocessors.tag_counter + 1
while len(self._tag_data) > i and self._tag_data[i]['left_index']:
left_child_index = self._tag_data[i]['left_index']
right_child_index = self._tag_data[i]['right_index']
nest_index.append((left_child_index - 1, right_child_index))
i += 1
# Create each nest subelement.
for i, (left_index, right_index) in enumerate(nest_index[:-1]):
self.run(element, block[left_index:right_index],
block[right_index:nest_index[i + 1][0]], True)
self.run(element, block[nest_index[-1][0]:nest_index[-1][1]], # last
block[nest_index[-1][1]:], True) # nest
def run(self, parent, blocks, tail=None, nest=False):
self._tag_data = self.parser.markdown.htmlStash.tag_data
self.parser.blockprocessors.tag_counter += 1
tag = self._tag_data[self.parser.blockprocessors.tag_counter]
# Create Element
markdown_value = tag['attrs'].pop('markdown')
element = util.etree.SubElement(parent, tag['tag'], tag['attrs'])
# Slice Off Block
if nest:
self.parser.parseBlocks(parent, tail) # Process Tail
block = blocks[1:]
else: # includes nests since a third level of nesting isn't supported
block = blocks[tag['left_index'] + 1: tag['right_index']]
del blocks[:tag['right_index']]
# Process Text
if (self.parser.blockprocessors.contain_span_tags.match( # Span Mode
tag['tag']) and markdown_value != 'block') or \
markdown_value == 'span':
element.text = '\n'.join(block)
else: # Block Mode
i = self.parser.blockprocessors.tag_counter + 1
if len(self._tag_data) > i and self._tag_data[i]['left_index']:
first_subelement_index = self._tag_data[i]['left_index'] - 1
self.parser.parseBlocks(
element, block[:first_subelement_index])
if not nest:
block = self._process_nests(element, block)
else:
self.parser.parseBlocks(element, block)
| 41.669173 | 78 | 0.648863 |
315ee8e12bb8a53a10fec2031974282edc1039f1 | 437 | py | Python | core/web/api/indicator.py | rorymbyrne/yeti | bc6d7ab7374f3b015c2f92ed8f5ac7e303f95474 | [
"Apache-2.0"
] | 2 | 2018-11-23T15:46:17.000Z | 2019-06-18T20:45:48.000Z | core/web/api/indicator.py | rorymbyrne/yeti | bc6d7ab7374f3b015c2f92ed8f5ac7e303f95474 | [
"Apache-2.0"
] | 3 | 2020-12-04T21:17:20.000Z | 2022-01-15T02:39:23.000Z | core/web/api/indicator.py | rorymbyrne/yeti | bc6d7ab7374f3b015c2f92ed8f5ac7e303f95474 | [
"Apache-2.0"
] | 1 | 2022-02-03T11:23:34.000Z | 2022-02-03T11:23:34.000Z | from __future__ import unicode_literals
from core.web.api.crud import CrudSearchApi, CrudApi
from core import indicators
class IndicatorSearch(CrudSearchApi):
template = 'indicator_api.html'
objectmanager = indicators.Indicator
class Indicator(CrudApi):
template = 'indicator_api.html'
objectmanager = indicators.Indicator
subobjects = {
'Regex': indicators.Regex,
'Yara': indicators.Yara,
}
| 23 | 52 | 0.732265 |
0e64be56edc39867604bac6d9923d8d62ea07faf | 1,066 | py | Python | frames/frameManager.py | AndreiPopescu21/StarshipInvaders | 0613dd83a5e947431734d53e8784e68d343a45a8 | [
"CC0-1.0"
] | null | null | null | frames/frameManager.py | AndreiPopescu21/StarshipInvaders | 0613dd83a5e947431734d53e8784e68d343a45a8 | [
"CC0-1.0"
] | null | null | null | frames/frameManager.py | AndreiPopescu21/StarshipInvaders | 0613dd83a5e947431734d53e8784e68d343a45a8 | [
"CC0-1.0"
] | null | null | null | # Import Frames
from frames.menuFrame import MenuFrame
from frames.gameFrame import GameFrame
from frames.settingsFrame import SettingsFrame
from frames.leaderboardsFrame import LeaderboardsFrame
# This function returns a dictionary with all the frames
def getFrames(window,changeFrameFunctions):
# Instantiate frames
leaderboardsFrame=LeaderboardsFrame(window,changeFrameFunctions["settingsMenuButton"])
gameFrame=GameFrame(window,changeFrameFunctions["leaderboardsFrame"],leaderboardsFrame)
menuFrame=MenuFrame(window,changeFrameFunctions["menuPlayButton"],changeFrameFunctions["menuSettingsButton"],gameFrame)
settingsFrame=SettingsFrame(window,changeFrameFunctions["settingsMenuButton"])
# Return a dictionary of frames
return {"menuFrame":menuFrame.frame,
"gameFrame":gameFrame.frame,
"settingsFrame":settingsFrame.frame,
"leaderboardsFrame":leaderboardsFrame.frame}
# This function displays the desired frame
def showFrame(frame):
# Raise the desired frame on top of the others
frame.tkraise() | 44.416667 | 123 | 0.798311 |
c4c9c6b06f67b4e1ebf98eb26f5e87e511932f3d | 2,180 | py | Python | vspk/v4_0/fetchers/nuflowforwardingpolicies_fetcher.py | cldelcourt/vspk-python | cdea810cd220e6ddc131407735941b9a26b2edda | [
"BSD-3-Clause"
] | null | null | null | vspk/v4_0/fetchers/nuflowforwardingpolicies_fetcher.py | cldelcourt/vspk-python | cdea810cd220e6ddc131407735941b9a26b2edda | [
"BSD-3-Clause"
] | null | null | null | vspk/v4_0/fetchers/nuflowforwardingpolicies_fetcher.py | cldelcourt/vspk-python | cdea810cd220e6ddc131407735941b9a26b2edda | [
"BSD-3-Clause"
] | null | null | null | # -*- coding: utf-8 -*-
#
# Copyright (c) 2015, Alcatel-Lucent Inc
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the copyright holder nor the names of its contributors
# may be used to endorse or promote products derived from this software without
# specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from bambou import NURESTFetcher
class NUFlowForwardingPoliciesFetcher(NURESTFetcher):
""" Represents a NUFlowForwardingPolicies fetcher
Notes:
This fetcher enables to fetch NUFlowForwardingPolicy objects.
See:
bambou.NURESTFetcher
"""
@classmethod
def managed_class(cls):
""" Return NUFlowForwardingPolicy class that is managed.
Returns:
.NUFlowForwardingPolicy: the managed class
"""
from .. import NUFlowForwardingPolicy
return NUFlowForwardingPolicy
| 41.923077 | 86 | 0.736239 |
f40c032dc82fc6f82d98797b101858b3c3f1f0f6 | 1,375 | py | Python | userbot/plugins/randomownsticker.py | HarshTech26/IndianBot | aa84b30889a2cceca9911a3e95216ac5ba7f4ad0 | [
"MIT"
] | null | null | null | userbot/plugins/randomownsticker.py | HarshTech26/IndianBot | aa84b30889a2cceca9911a3e95216ac5ba7f4ad0 | [
"MIT"
] | null | null | null | userbot/plugins/randomownsticker.py | HarshTech26/IndianBot | aa84b30889a2cceca9911a3e95216ac5ba7f4ad0 | [
"MIT"
] | null | null | null | # This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
""" Command: .dab , .brain
credit: lejend @anon_cracker"""
import random
from telethon import events, types, functions, utils
def choser(cmd, pack, blacklist={}):
docs = None
@borg.on(events.NewMessage(pattern=rf'\.{cmd}', outgoing=True))
async def handler(event):
await event.delete()
nonlocal docs
if docs is None:
docs = [
utils.get_input_document(x)
for x in (await borg(functions.messages.GetStickerSetRequest(types.InputStickerSetShortName(pack)))).documents
if x.id not in blacklist
]
await event.respond(file=random.choice(docs))
choser('rstic', 'Jayu_ke_locker_me')
choser('rastic', 'Jayu_ke_adult_stickers')
choser('ranastic', 'Jayu_Adult_Animated')
choser('ranstic', 'Jayu_Animated', {
1653974154589768377,
1653974154589768312,
1653974154589767857,
1653974154589768311,
1653974154589767816,
1653974154589767939,
1653974154589767944,
1653974154589767912,
1653974154589767911,
1653974154589767910,
1653974154589767909,
1653974154589767863,
1653974154589767852,
1653974154589768677
})
| 27.5 | 126 | 0.685818 |
682617a20052e0ff65f64e8384400498b706c696 | 3,107 | py | Python | instana/collector/helpers/base.py | rlopes-ki/python-sensor | 07e827f9982b2a0c482e8eab82d1a420923efd5e | [
"MIT"
] | 61 | 2017-09-27T02:50:17.000Z | 2022-03-22T12:13:37.000Z | instana/collector/helpers/base.py | rlopes-ki/python-sensor | 07e827f9982b2a0c482e8eab82d1a420923efd5e | [
"MIT"
] | 82 | 2017-07-11T13:47:33.000Z | 2022-03-22T10:10:38.000Z | instana/collector/helpers/base.py | rlopes-ki/python-sensor | 07e827f9982b2a0c482e8eab82d1a420923efd5e | [
"MIT"
] | 27 | 2017-09-11T16:22:32.000Z | 2022-03-11T17:21:49.000Z | # (c) Copyright IBM Corp. 2021
# (c) Copyright Instana Inc. 2020
"""
Base class for the various helpers that can be used by Collectors. Helpers assist
in the data collection for various entities such as host, hardware, AWS Task, ec2,
memory, cpu, docker etc etc..
"""
from ...log import logger
class BaseHelper(object):
"""
Base class for all helpers. Descendants must override and implement `self.collect_metrics`.
"""
def __init__(self, collector):
self.collector = collector
def get_delta(self, source, previous, metric):
"""
Given a metric, see if the value varies from the previous reported metrics
@param source [dict or value]: the dict to retrieve the new value of <metric> (as source[metric]) or
if not a dict, then the new value of the metric
@param previous [dict]: the previous value of <metric> that was reported (as previous[metric])
@param metric [String or Tuple]: the name of the metric in question. If the keys for source[metric],
and previous[metric] vary, you can pass a tuple in the form of (src, dst)
@return: None (meaning no difference) or the new value (source[metric])
"""
if isinstance(metric, tuple):
src_metric = metric[0]
dst_metric = metric[1]
else:
src_metric = metric
dst_metric = metric
if isinstance(source, dict):
new_value = source.get(src_metric, None)
else:
new_value = source
if previous[dst_metric] != new_value:
return new_value
else:
return None
def apply_delta(self, source, previous, new, metric, with_snapshot):
"""
Helper method to assist in delta reporting of metrics.
@param source [dict or value]: the dict to retrieve the new value of <metric> (as source[metric]) or
if not a dict, then the new value of the metric
@param previous [dict]: the previous value of <metric> that was reported (as previous[metric])
@param new [dict]: the new value of the metric that will be sent new (as new[metric])
@param metric [String or Tuple]: the name of the metric in question. If the keys for source[metric],
previous[metric] and new[metric] vary, you can pass a tuple in the form of (src, dst)
@param with_snapshot [Bool]: if this metric is being sent with snapshot data
@return: None
"""
if isinstance(metric, tuple):
src_metric = metric[0]
dst_metric = metric[1]
else:
src_metric = metric
dst_metric = metric
if isinstance(source, dict):
new_value = source.get(src_metric, None)
else:
new_value = source
previous_value = previous.get(dst_metric, 0)
if previous_value != new_value or with_snapshot is True:
previous[dst_metric] = new[dst_metric] = new_value
def collect_metrics(self, **kwargs):
logger.debug("BaseHelper.collect_metrics must be overridden")
| 39.329114 | 109 | 0.633408 |
3ce85e4d51677131b09e3707c081758b05f7937b | 921 | py | Python | Misc/changelog.py | pixeldrama/ANNIS | 94fa483804a7edb94bb303349a217c059feb5104 | [
"Apache-2.0"
] | null | null | null | Misc/changelog.py | pixeldrama/ANNIS | 94fa483804a7edb94bb303349a217c059feb5104 | [
"Apache-2.0"
] | null | null | null | Misc/changelog.py | pixeldrama/ANNIS | 94fa483804a7edb94bb303349a217c059feb5104 | [
"Apache-2.0"
] | 1 | 2018-06-25T19:22:52.000Z | 2018-06-25T19:22:52.000Z | #!/usr/bin/python
import json
import io
from subprocess import call
milestone_id = "29"
call(["curl", "-H", "Accept: application/vnd.github.beta.full+json", "-o", "issues.json", "https://api.github.com/repos/korpling/ANNIS/issues?state=closed&milestone=" + milestone_id + "&sort=created"])
f = open("issues.json")
j = json.load(f)
bugs = []
enhancements = []
other = []
for issue in j:
title = " #{0} {1}".format(issue["number"], issue["title"])
if len(issue["labels"]) > 0:
if issue["labels"][0]["name"] == "bug":
bugs.append(title)
elif issue["labels"][0]["name"] == "enhancement":
enhancements.append(title)
else:
other.append(title)
else:
other.append(title)
if len(bugs) > 0:
print "[Bugs]"
for t in bugs:
print t
if len(enhancements) > 0:
print ""
print "[Enhancements]"
for t in enhancements:
print t
if len(other) > 0:
print ""
print "[Other]"
for t in other:
print t
| 20.021739 | 201 | 0.640608 |
7ce05e0804397f78c33d502f50f1cfc1b168d965 | 269 | py | Python | tests/test_urls.py | Xaelias/Vestaboard | a1bc7b6efe6b2ad49079e851c09542c2b9452409 | [
"MIT"
] | 19 | 2020-04-17T04:59:41.000Z | 2022-01-27T23:21:28.000Z | tests/test_urls.py | Xaelias/Vestaboard | a1bc7b6efe6b2ad49079e851c09542c2b9452409 | [
"MIT"
] | 11 | 2020-04-17T04:59:19.000Z | 2022-01-07T00:18:34.000Z | tests/test_urls.py | Xaelias/Vestaboard | a1bc7b6efe6b2ad49079e851c09542c2b9452409 | [
"MIT"
] | 10 | 2021-02-08T18:35:23.000Z | 2022-01-27T23:21:23.000Z | import vestaboard.vbUrls as vbUrls
def test_post_url_matches():
assert vbUrls.post == 'https://platform.vestaboard.com/subscriptions/{}/message'
def test_subscription_url_matches():
assert vbUrls.subscription == 'https://platform.vestaboard.com/subscriptions' | 38.428571 | 84 | 0.784387 |
d6ffee5c2655d96e6d28889b9bd7563cc9ebdce9 | 59,958 | py | Python | numpy/linalg/tests/test_linalg.py | stefb965/numpy | deb4be85025088553d80d6637dbc17d7f8101f41 | [
"BSD-3-Clause"
] | null | null | null | numpy/linalg/tests/test_linalg.py | stefb965/numpy | deb4be85025088553d80d6637dbc17d7f8101f41 | [
"BSD-3-Clause"
] | null | null | null | numpy/linalg/tests/test_linalg.py | stefb965/numpy | deb4be85025088553d80d6637dbc17d7f8101f41 | [
"BSD-3-Clause"
] | null | null | null | """ Test functions for linalg module
"""
from __future__ import division, absolute_import, print_function
import os
import sys
import itertools
import traceback
import warnings
import numpy as np
from numpy import array, single, double, csingle, cdouble, dot, identity
from numpy import multiply, atleast_2d, inf, asarray, matrix
from numpy import linalg
from numpy.linalg import matrix_power, norm, matrix_rank, multi_dot, LinAlgError
from numpy.linalg.linalg import _multi_dot_matrix_chain_order
from numpy.testing import (
assert_, assert_equal, assert_raises, assert_array_equal,
assert_almost_equal, assert_allclose, run_module_suite,
dec, SkipTest, suppress_warnings
)
def ifthen(a, b):
return not a or b
def imply(a, b):
return not a or b
old_assert_almost_equal = assert_almost_equal
def assert_almost_equal(a, b, single_decimal=6, double_decimal=12, **kw):
if asarray(a).dtype.type in (single, csingle):
decimal = single_decimal
else:
decimal = double_decimal
old_assert_almost_equal(a, b, decimal=decimal, **kw)
def get_real_dtype(dtype):
return {single: single, double: double,
csingle: single, cdouble: double}[dtype]
def get_complex_dtype(dtype):
return {single: csingle, double: cdouble,
csingle: csingle, cdouble: cdouble}[dtype]
def get_rtol(dtype):
# Choose a safe rtol
if dtype in (single, csingle):
return 1e-5
else:
return 1e-11
# used to categorize tests
all_tags = {
'square', 'nonsquare', 'hermitian', # mutually exclusive
'generalized', 'size-0', 'strided' # optional additions
}
class LinalgCase(object):
def __init__(self, name, a, b, tags=set()):
"""
A bundle of arguments to be passed to a test case, with an identifying
name, the operands a and b, and a set of tags to filter the tests
"""
assert_(isinstance(name, str))
self.name = name
self.a = a
self.b = b
self.tags = frozenset(tags) # prevent shared tags
def check(self, do):
"""
Run the function `do` on this test case, expanding arguments
"""
do(self.a, self.b, tags=self.tags)
def __repr__(self):
return "<LinalgCase: %s>" % (self.name,)
def apply_tag(tag, cases):
"""
Add the given tag (a string) to each of the cases (a list of LinalgCase
objects)
"""
assert tag in all_tags, "Invalid tag"
for case in cases:
case.tags = case.tags | {tag}
return cases
#
# Base test cases
#
np.random.seed(1234)
CASES = []
# square test cases
CASES += apply_tag('square', [
LinalgCase("single",
array([[1., 2.], [3., 4.]], dtype=single),
array([2., 1.], dtype=single)),
LinalgCase("double",
array([[1., 2.], [3., 4.]], dtype=double),
array([2., 1.], dtype=double)),
LinalgCase("double_2",
array([[1., 2.], [3., 4.]], dtype=double),
array([[2., 1., 4.], [3., 4., 6.]], dtype=double)),
LinalgCase("csingle",
array([[1. + 2j, 2 + 3j], [3 + 4j, 4 + 5j]], dtype=csingle),
array([2. + 1j, 1. + 2j], dtype=csingle)),
LinalgCase("cdouble",
array([[1. + 2j, 2 + 3j], [3 + 4j, 4 + 5j]], dtype=cdouble),
array([2. + 1j, 1. + 2j], dtype=cdouble)),
LinalgCase("cdouble_2",
array([[1. + 2j, 2 + 3j], [3 + 4j, 4 + 5j]], dtype=cdouble),
array([[2. + 1j, 1. + 2j, 1 + 3j], [1 - 2j, 1 - 3j, 1 - 6j]], dtype=cdouble)),
LinalgCase("0x0",
np.empty((0, 0), dtype=double),
np.empty((0,), dtype=double),
tags={'size-0'}),
LinalgCase("0x0_matrix",
np.empty((0, 0), dtype=double).view(np.matrix),
np.empty((0, 1), dtype=double).view(np.matrix),
tags={'size-0'}),
LinalgCase("8x8",
np.random.rand(8, 8),
np.random.rand(8)),
LinalgCase("1x1",
np.random.rand(1, 1),
np.random.rand(1)),
LinalgCase("nonarray",
[[1, 2], [3, 4]],
[2, 1]),
LinalgCase("matrix_b_only",
array([[1., 2.], [3., 4.]]),
matrix([2., 1.]).T),
LinalgCase("matrix_a_and_b",
matrix([[1., 2.], [3., 4.]]),
matrix([2., 1.]).T),
])
# non-square test-cases
CASES += apply_tag('nonsquare', [
LinalgCase("single_nsq_1",
array([[1., 2., 3.], [3., 4., 6.]], dtype=single),
array([2., 1.], dtype=single)),
LinalgCase("single_nsq_2",
array([[1., 2.], [3., 4.], [5., 6.]], dtype=single),
array([2., 1., 3.], dtype=single)),
LinalgCase("double_nsq_1",
array([[1., 2., 3.], [3., 4., 6.]], dtype=double),
array([2., 1.], dtype=double)),
LinalgCase("double_nsq_2",
array([[1., 2.], [3., 4.], [5., 6.]], dtype=double),
array([2., 1., 3.], dtype=double)),
LinalgCase("csingle_nsq_1",
array(
[[1. + 1j, 2. + 2j, 3. - 3j], [3. - 5j, 4. + 9j, 6. + 2j]], dtype=csingle),
array([2. + 1j, 1. + 2j], dtype=csingle)),
LinalgCase("csingle_nsq_2",
array(
[[1. + 1j, 2. + 2j], [3. - 3j, 4. - 9j], [5. - 4j, 6. + 8j]], dtype=csingle),
array([2. + 1j, 1. + 2j, 3. - 3j], dtype=csingle)),
LinalgCase("cdouble_nsq_1",
array(
[[1. + 1j, 2. + 2j, 3. - 3j], [3. - 5j, 4. + 9j, 6. + 2j]], dtype=cdouble),
array([2. + 1j, 1. + 2j], dtype=cdouble)),
LinalgCase("cdouble_nsq_2",
array(
[[1. + 1j, 2. + 2j], [3. - 3j, 4. - 9j], [5. - 4j, 6. + 8j]], dtype=cdouble),
array([2. + 1j, 1. + 2j, 3. - 3j], dtype=cdouble)),
LinalgCase("cdouble_nsq_1_2",
array(
[[1. + 1j, 2. + 2j, 3. - 3j], [3. - 5j, 4. + 9j, 6. + 2j]], dtype=cdouble),
array([[2. + 1j, 1. + 2j], [1 - 1j, 2 - 2j]], dtype=cdouble)),
LinalgCase("cdouble_nsq_2_2",
array(
[[1. + 1j, 2. + 2j], [3. - 3j, 4. - 9j], [5. - 4j, 6. + 8j]], dtype=cdouble),
array([[2. + 1j, 1. + 2j], [1 - 1j, 2 - 2j], [1 - 1j, 2 - 2j]], dtype=cdouble)),
LinalgCase("8x11",
np.random.rand(8, 11),
np.random.rand(8)),
LinalgCase("1x5",
np.random.rand(1, 5),
np.random.rand(1)),
LinalgCase("5x1",
np.random.rand(5, 1),
np.random.rand(5)),
LinalgCase("0x4",
np.random.rand(0, 4),
np.random.rand(0),
tags={'size-0'}),
LinalgCase("4x0",
np.random.rand(4, 0),
np.random.rand(4),
tags={'size-0'}),
])
# hermitian test-cases
CASES += apply_tag('hermitian', [
LinalgCase("hsingle",
array([[1., 2.], [2., 1.]], dtype=single),
None),
LinalgCase("hdouble",
array([[1., 2.], [2., 1.]], dtype=double),
None),
LinalgCase("hcsingle",
array([[1., 2 + 3j], [2 - 3j, 1]], dtype=csingle),
None),
LinalgCase("hcdouble",
array([[1., 2 + 3j], [2 - 3j, 1]], dtype=cdouble),
None),
LinalgCase("hempty",
np.empty((0, 0), dtype=double),
None,
tags={'size-0'}),
LinalgCase("hnonarray",
[[1, 2], [2, 1]],
None),
LinalgCase("matrix_b_only",
array([[1., 2.], [2., 1.]]),
None),
LinalgCase("hmatrix_a_and_b",
matrix([[1., 2.], [2., 1.]]),
None),
LinalgCase("hmatrix_1x1",
np.random.rand(1, 1),
None),
])
#
# Gufunc test cases
#
def _make_generalized_cases():
new_cases = []
for case in CASES:
if not isinstance(case.a, np.ndarray):
continue
a = np.array([case.a, 2 * case.a, 3 * case.a])
if case.b is None:
b = None
else:
b = np.array([case.b, 7 * case.b, 6 * case.b])
new_case = LinalgCase(case.name + "_tile3", a, b,
tags=case.tags | {'generalized'})
new_cases.append(new_case)
a = np.array([case.a] * 2 * 3).reshape((3, 2) + case.a.shape)
if case.b is None:
b = None
else:
b = np.array([case.b] * 2 * 3).reshape((3, 2) + case.b.shape)
new_case = LinalgCase(case.name + "_tile213", a, b,
tags=case.tags | {'generalized'})
new_cases.append(new_case)
return new_cases
CASES += _make_generalized_cases()
#
# Generate stride combination variations of the above
#
def _stride_comb_iter(x):
"""
Generate cartesian product of strides for all axes
"""
if not isinstance(x, np.ndarray):
yield x, "nop"
return
stride_set = [(1,)] * x.ndim
stride_set[-1] = (1, 3, -4)
if x.ndim > 1:
stride_set[-2] = (1, 3, -4)
if x.ndim > 2:
stride_set[-3] = (1, -4)
for repeats in itertools.product(*tuple(stride_set)):
new_shape = [abs(a * b) for a, b in zip(x.shape, repeats)]
slices = tuple([slice(None, None, repeat) for repeat in repeats])
# new array with different strides, but same data
xi = np.empty(new_shape, dtype=x.dtype)
xi.view(np.uint32).fill(0xdeadbeef)
xi = xi[slices]
xi[...] = x
xi = xi.view(x.__class__)
assert_(np.all(xi == x))
yield xi, "stride_" + "_".join(["%+d" % j for j in repeats])
# generate also zero strides if possible
if x.ndim >= 1 and x.shape[-1] == 1:
s = list(x.strides)
s[-1] = 0
xi = np.lib.stride_tricks.as_strided(x, strides=s)
yield xi, "stride_xxx_0"
if x.ndim >= 2 and x.shape[-2] == 1:
s = list(x.strides)
s[-2] = 0
xi = np.lib.stride_tricks.as_strided(x, strides=s)
yield xi, "stride_xxx_0_x"
if x.ndim >= 2 and x.shape[:-2] == (1, 1):
s = list(x.strides)
s[-1] = 0
s[-2] = 0
xi = np.lib.stride_tricks.as_strided(x, strides=s)
yield xi, "stride_xxx_0_0"
def _make_strided_cases():
new_cases = []
for case in CASES:
for a, a_label in _stride_comb_iter(case.a):
for b, b_label in _stride_comb_iter(case.b):
new_case = LinalgCase(case.name + "_" + a_label + "_" + b_label, a, b,
tags=case.tags | {'strided'})
new_cases.append(new_case)
return new_cases
CASES += _make_strided_cases()
#
# Test different routines against the above cases
#
def _check_cases(func, require=set(), exclude=set()):
"""
Run func on each of the cases with all of the tags in require, and none
of the tags in exclude
"""
for case in CASES:
# filter by require and exclude
if case.tags & require != require:
continue
if case.tags & exclude:
continue
try:
case.check(func)
except Exception:
msg = "In test case: %r\n\n" % case
msg += traceback.format_exc()
raise AssertionError(msg)
class LinalgSquareTestCase(object):
def test_sq_cases(self):
_check_cases(self.do, require={'square'}, exclude={'generalized', 'size-0'})
def test_empty_sq_cases(self):
_check_cases(self.do, require={'square', 'size-0'}, exclude={'generalized'})
class LinalgNonsquareTestCase(object):
def test_nonsq_cases(self):
_check_cases(self.do, require={'nonsquare'}, exclude={'generalized', 'size-0'})
def test_empty_nonsq_cases(self):
_check_cases(self.do, require={'nonsquare', 'size-0'}, exclude={'generalized'})
class HermitianTestCase(object):
def test_herm_cases(self):
_check_cases(self.do, require={'hermitian'}, exclude={'generalized', 'size-0'})
def test_empty_herm_cases(self):
_check_cases(self.do, require={'hermitian', 'size-0'}, exclude={'generalized'})
class LinalgGeneralizedSquareTestCase(object):
@dec.slow
def test_generalized_sq_cases(self):
_check_cases(self.do, require={'generalized', 'square'}, exclude={'size-0'})
@dec.slow
def test_generalized_empty_sq_cases(self):
_check_cases(self.do, require={'generalized', 'square', 'size-0'})
class LinalgGeneralizedNonsquareTestCase(object):
@dec.slow
def test_generalized_nonsq_cases(self):
_check_cases(self.do, require={'generalized', 'nonsquare'}, exclude={'size-0'})
@dec.slow
def test_generalized_empty_nonsq_cases(self):
_check_cases(self.do, require={'generalized', 'nonsquare', 'size-0'})
class HermitianGeneralizedTestCase(object):
@dec.slow
def test_generalized_herm_cases(self):
_check_cases(self.do,
require={'generalized', 'hermitian'},
exclude={'size-0'})
@dec.slow
def test_generalized_empty_herm_cases(self):
_check_cases(self.do,
require={'generalized', 'hermitian', 'size-0'},
exclude={'none'})
def dot_generalized(a, b):
a = asarray(a)
if a.ndim >= 3:
if a.ndim == b.ndim:
# matrix x matrix
new_shape = a.shape[:-1] + b.shape[-1:]
elif a.ndim == b.ndim + 1:
# matrix x vector
new_shape = a.shape[:-1]
else:
raise ValueError("Not implemented...")
r = np.empty(new_shape, dtype=np.common_type(a, b))
for c in itertools.product(*map(range, a.shape[:-2])):
r[c] = dot(a[c], b[c])
return r
else:
return dot(a, b)
def identity_like_generalized(a):
a = asarray(a)
if a.ndim >= 3:
r = np.empty(a.shape, dtype=a.dtype)
for c in itertools.product(*map(range, a.shape[:-2])):
r[c] = identity(a.shape[-2])
return r
else:
return identity(a.shape[0])
class TestSolve(LinalgSquareTestCase, LinalgGeneralizedSquareTestCase):
def do(self, a, b, tags):
x = linalg.solve(a, b)
assert_almost_equal(b, dot_generalized(a, x))
assert_(imply(isinstance(b, matrix), isinstance(x, matrix)))
def test_types(self):
def check(dtype):
x = np.array([[1, 0.5], [0.5, 1]], dtype=dtype)
assert_equal(linalg.solve(x, x).dtype, dtype)
for dtype in [single, double, csingle, cdouble]:
yield check, dtype
def test_0_size(self):
class ArraySubclass(np.ndarray):
pass
# Test system of 0x0 matrices
a = np.arange(8).reshape(2, 2, 2)
b = np.arange(6).reshape(1, 2, 3).view(ArraySubclass)
expected = linalg.solve(a, b)[:, 0:0, :]
result = linalg.solve(a[:, 0:0, 0:0], b[:, 0:0, :])
assert_array_equal(result, expected)
assert_(isinstance(result, ArraySubclass))
# Test errors for non-square and only b's dimension being 0
assert_raises(linalg.LinAlgError, linalg.solve, a[:, 0:0, 0:1], b)
assert_raises(ValueError, linalg.solve, a, b[:, 0:0, :])
# Test broadcasting error
b = np.arange(6).reshape(1, 3, 2) # broadcasting error
assert_raises(ValueError, linalg.solve, a, b)
assert_raises(ValueError, linalg.solve, a[0:0], b[0:0])
# Test zero "single equations" with 0x0 matrices.
b = np.arange(2).reshape(1, 2).view(ArraySubclass)
expected = linalg.solve(a, b)[:, 0:0]
result = linalg.solve(a[:, 0:0, 0:0], b[:, 0:0])
assert_array_equal(result, expected)
assert_(isinstance(result, ArraySubclass))
b = np.arange(3).reshape(1, 3)
assert_raises(ValueError, linalg.solve, a, b)
assert_raises(ValueError, linalg.solve, a[0:0], b[0:0])
assert_raises(ValueError, linalg.solve, a[:, 0:0, 0:0], b)
def test_0_size_k(self):
# test zero multiple equation (K=0) case.
class ArraySubclass(np.ndarray):
pass
a = np.arange(4).reshape(1, 2, 2)
b = np.arange(6).reshape(3, 2, 1).view(ArraySubclass)
expected = linalg.solve(a, b)[:, :, 0:0]
result = linalg.solve(a, b[:, :, 0:0])
assert_array_equal(result, expected)
assert_(isinstance(result, ArraySubclass))
# test both zero.
expected = linalg.solve(a, b)[:, 0:0, 0:0]
result = linalg.solve(a[:, 0:0, 0:0], b[:, 0:0, 0:0])
assert_array_equal(result, expected)
assert_(isinstance(result, ArraySubclass))
class TestInv(LinalgSquareTestCase, LinalgGeneralizedSquareTestCase):
def do(self, a, b, tags):
a_inv = linalg.inv(a)
assert_almost_equal(dot_generalized(a, a_inv),
identity_like_generalized(a))
assert_(imply(isinstance(a, matrix), isinstance(a_inv, matrix)))
def test_types(self):
def check(dtype):
x = np.array([[1, 0.5], [0.5, 1]], dtype=dtype)
assert_equal(linalg.inv(x).dtype, dtype)
for dtype in [single, double, csingle, cdouble]:
yield check, dtype
def test_0_size(self):
# Check that all kinds of 0-sized arrays work
class ArraySubclass(np.ndarray):
pass
a = np.zeros((0, 1, 1), dtype=np.int_).view(ArraySubclass)
res = linalg.inv(a)
assert_(res.dtype.type is np.float64)
assert_equal(a.shape, res.shape)
assert_(isinstance(res, ArraySubclass))
a = np.zeros((0, 0), dtype=np.complex64).view(ArraySubclass)
res = linalg.inv(a)
assert_(res.dtype.type is np.complex64)
assert_equal(a.shape, res.shape)
assert_(isinstance(res, ArraySubclass))
class TestEigvals(LinalgSquareTestCase, LinalgGeneralizedSquareTestCase):
def do(self, a, b, tags):
ev = linalg.eigvals(a)
evalues, evectors = linalg.eig(a)
assert_almost_equal(ev, evalues)
def test_types(self):
def check(dtype):
x = np.array([[1, 0.5], [0.5, 1]], dtype=dtype)
assert_equal(linalg.eigvals(x).dtype, dtype)
x = np.array([[1, 0.5], [-1, 1]], dtype=dtype)
assert_equal(linalg.eigvals(x).dtype, get_complex_dtype(dtype))
for dtype in [single, double, csingle, cdouble]:
yield check, dtype
def test_0_size(self):
# Check that all kinds of 0-sized arrays work
class ArraySubclass(np.ndarray):
pass
a = np.zeros((0, 1, 1), dtype=np.int_).view(ArraySubclass)
res = linalg.eigvals(a)
assert_(res.dtype.type is np.float64)
assert_equal((0, 1), res.shape)
# This is just for documentation, it might make sense to change:
assert_(isinstance(res, np.ndarray))
a = np.zeros((0, 0), dtype=np.complex64).view(ArraySubclass)
res = linalg.eigvals(a)
assert_(res.dtype.type is np.complex64)
assert_equal((0,), res.shape)
# This is just for documentation, it might make sense to change:
assert_(isinstance(res, np.ndarray))
class TestEig(LinalgSquareTestCase, LinalgGeneralizedSquareTestCase):
def do(self, a, b, tags):
evalues, evectors = linalg.eig(a)
assert_allclose(dot_generalized(a, evectors),
np.asarray(evectors) * np.asarray(evalues)[..., None, :],
rtol=get_rtol(evalues.dtype))
assert_(imply(isinstance(a, matrix), isinstance(evectors, matrix)))
def test_types(self):
def check(dtype):
x = np.array([[1, 0.5], [0.5, 1]], dtype=dtype)
w, v = np.linalg.eig(x)
assert_equal(w.dtype, dtype)
assert_equal(v.dtype, dtype)
x = np.array([[1, 0.5], [-1, 1]], dtype=dtype)
w, v = np.linalg.eig(x)
assert_equal(w.dtype, get_complex_dtype(dtype))
assert_equal(v.dtype, get_complex_dtype(dtype))
for dtype in [single, double, csingle, cdouble]:
yield check, dtype
def test_0_size(self):
# Check that all kinds of 0-sized arrays work
class ArraySubclass(np.ndarray):
pass
a = np.zeros((0, 1, 1), dtype=np.int_).view(ArraySubclass)
res, res_v = linalg.eig(a)
assert_(res_v.dtype.type is np.float64)
assert_(res.dtype.type is np.float64)
assert_equal(a.shape, res_v.shape)
assert_equal((0, 1), res.shape)
# This is just for documentation, it might make sense to change:
assert_(isinstance(a, np.ndarray))
a = np.zeros((0, 0), dtype=np.complex64).view(ArraySubclass)
res, res_v = linalg.eig(a)
assert_(res_v.dtype.type is np.complex64)
assert_(res.dtype.type is np.complex64)
assert_equal(a.shape, res_v.shape)
assert_equal((0,), res.shape)
# This is just for documentation, it might make sense to change:
assert_(isinstance(a, np.ndarray))
class TestSVD(LinalgSquareTestCase, LinalgGeneralizedSquareTestCase):
def do(self, a, b, tags):
if 'size-0' in tags:
assert_raises(LinAlgError, linalg.svd, a, 0)
return
u, s, vt = linalg.svd(a, 0)
assert_allclose(a, dot_generalized(np.asarray(u) * np.asarray(s)[..., None, :],
np.asarray(vt)),
rtol=get_rtol(u.dtype))
assert_(imply(isinstance(a, matrix), isinstance(u, matrix)))
assert_(imply(isinstance(a, matrix), isinstance(vt, matrix)))
def test_types(self):
def check(dtype):
x = np.array([[1, 0.5], [0.5, 1]], dtype=dtype)
u, s, vh = linalg.svd(x)
assert_equal(u.dtype, dtype)
assert_equal(s.dtype, get_real_dtype(dtype))
assert_equal(vh.dtype, dtype)
s = linalg.svd(x, compute_uv=False)
assert_equal(s.dtype, get_real_dtype(dtype))
for dtype in [single, double, csingle, cdouble]:
yield check, dtype
def test_0_size(self):
# These raise errors currently
# (which does not mean that it may not make sense)
a = np.zeros((0, 0), dtype=np.complex64)
assert_raises(linalg.LinAlgError, linalg.svd, a)
a = np.zeros((0, 1), dtype=np.complex64)
assert_raises(linalg.LinAlgError, linalg.svd, a)
a = np.zeros((1, 0), dtype=np.complex64)
assert_raises(linalg.LinAlgError, linalg.svd, a)
class TestCondSVD(LinalgSquareTestCase, LinalgGeneralizedSquareTestCase):
def do(self, a, b, tags):
c = asarray(a) # a might be a matrix
if 'size-0' in tags:
assert_raises(LinAlgError, linalg.svd, c, compute_uv=False)
return
s = linalg.svd(c, compute_uv=False)
assert_almost_equal(
s[..., 0] / s[..., -1], linalg.cond(a),
single_decimal=5, double_decimal=11)
def test_stacked_arrays_explicitly(self):
A = np.array([[1., 2., 1.], [0, -2., 0], [6., 2., 3.]])
assert_equal(linalg.cond(A), linalg.cond(A[None, ...])[0])
class TestCond2(LinalgSquareTestCase):
def do(self, a, b, tags):
c = asarray(a) # a might be a matrix
if 'size-0' in tags:
assert_raises(LinAlgError, linalg.svd, c, compute_uv=False)
return
s = linalg.svd(c, compute_uv=False)
assert_almost_equal(
s[..., 0] / s[..., -1], linalg.cond(a, 2),
single_decimal=5, double_decimal=11)
def test_stacked_arrays_explicitly(self):
A = np.array([[1., 2., 1.], [0, -2., 0], [6., 2., 3.]])
assert_equal(linalg.cond(A, 2), linalg.cond(A[None, ...], 2)[0])
class TestCondInf(object):
def test(self):
A = array([[1., 0, 0], [0, -2., 0], [0, 0, 3.]])
assert_almost_equal(linalg.cond(A, inf), 3.)
class TestPinv(LinalgSquareTestCase, LinalgNonsquareTestCase):
def do(self, a, b, tags):
a_ginv = linalg.pinv(a)
# `a @ a_ginv == I` does not hold if a is singular
assert_almost_equal(dot(a, a_ginv).dot(a), a, single_decimal=5, double_decimal=11)
assert_(imply(isinstance(a, matrix), isinstance(a_ginv, matrix)))
class TestDet(LinalgSquareTestCase, LinalgGeneralizedSquareTestCase):
def do(self, a, b, tags):
d = linalg.det(a)
(s, ld) = linalg.slogdet(a)
if asarray(a).dtype.type in (single, double):
ad = asarray(a).astype(double)
else:
ad = asarray(a).astype(cdouble)
ev = linalg.eigvals(ad)
assert_almost_equal(d, multiply.reduce(ev, axis=-1))
assert_almost_equal(s * np.exp(ld), multiply.reduce(ev, axis=-1))
s = np.atleast_1d(s)
ld = np.atleast_1d(ld)
m = (s != 0)
assert_almost_equal(np.abs(s[m]), 1)
assert_equal(ld[~m], -inf)
def test_zero(self):
assert_equal(linalg.det([[0.0]]), 0.0)
assert_equal(type(linalg.det([[0.0]])), double)
assert_equal(linalg.det([[0.0j]]), 0.0)
assert_equal(type(linalg.det([[0.0j]])), cdouble)
assert_equal(linalg.slogdet([[0.0]]), (0.0, -inf))
assert_equal(type(linalg.slogdet([[0.0]])[0]), double)
assert_equal(type(linalg.slogdet([[0.0]])[1]), double)
assert_equal(linalg.slogdet([[0.0j]]), (0.0j, -inf))
assert_equal(type(linalg.slogdet([[0.0j]])[0]), cdouble)
assert_equal(type(linalg.slogdet([[0.0j]])[1]), double)
def test_types(self):
def check(dtype):
x = np.array([[1, 0.5], [0.5, 1]], dtype=dtype)
assert_equal(np.linalg.det(x).dtype, dtype)
ph, s = np.linalg.slogdet(x)
assert_equal(s.dtype, get_real_dtype(dtype))
assert_equal(ph.dtype, dtype)
for dtype in [single, double, csingle, cdouble]:
yield check, dtype
def test_0_size(self):
a = np.zeros((0, 0), dtype=np.complex64)
res = linalg.det(a)
assert_equal(res, 1.)
assert_(res.dtype.type is np.complex64)
res = linalg.slogdet(a)
assert_equal(res, (1, 0))
assert_(res[0].dtype.type is np.complex64)
assert_(res[1].dtype.type is np.float32)
a = np.zeros((0, 0), dtype=np.float64)
res = linalg.det(a)
assert_equal(res, 1.)
assert_(res.dtype.type is np.float64)
res = linalg.slogdet(a)
assert_equal(res, (1, 0))
assert_(res[0].dtype.type is np.float64)
assert_(res[1].dtype.type is np.float64)
class TestLstsq(LinalgSquareTestCase, LinalgNonsquareTestCase):
def do(self, a, b, tags):
if 'size-0' in tags:
assert_raises(LinAlgError, linalg.lstsq, a, b)
return
arr = np.asarray(a)
m, n = arr.shape
u, s, vt = linalg.svd(a, 0)
x, residuals, rank, sv = linalg.lstsq(a, b)
if m <= n:
assert_almost_equal(b, dot(a, x))
assert_equal(rank, m)
else:
assert_equal(rank, n)
assert_almost_equal(sv, sv.__array_wrap__(s))
if rank == n and m > n:
expect_resids = (
np.asarray(abs(np.dot(a, x) - b)) ** 2).sum(axis=0)
expect_resids = np.asarray(expect_resids)
if np.asarray(b).ndim == 1:
expect_resids.shape = (1,)
assert_equal(residuals.shape, expect_resids.shape)
else:
expect_resids = np.array([]).view(type(x))
assert_almost_equal(residuals, expect_resids)
assert_(np.issubdtype(residuals.dtype, np.floating))
assert_(imply(isinstance(b, matrix), isinstance(x, matrix)))
assert_(imply(isinstance(b, matrix), isinstance(residuals, matrix)))
class TestMatrixPower(object):
R90 = array([[0, 1], [-1, 0]])
Arb22 = array([[4, -7], [-2, 10]])
noninv = array([[1, 0], [0, 0]])
arbfloat = array([[0.1, 3.2], [1.2, 0.7]])
large = identity(10)
t = large[1, :].copy()
large[1, :] = large[0,:]
large[0, :] = t
def test_large_power(self):
assert_equal(
matrix_power(self.R90, 2 ** 100 + 2 ** 10 + 2 ** 5 + 1), self.R90)
def test_large_power_trailing_zero(self):
assert_equal(
matrix_power(self.R90, 2 ** 100 + 2 ** 10 + 2 ** 5), identity(2))
def testip_zero(self):
def tz(M):
mz = matrix_power(M, 0)
assert_equal(mz, identity(M.shape[0]))
assert_equal(mz.dtype, M.dtype)
for M in [self.Arb22, self.arbfloat, self.large]:
yield tz, M
def testip_one(self):
def tz(M):
mz = matrix_power(M, 1)
assert_equal(mz, M)
assert_equal(mz.dtype, M.dtype)
for M in [self.Arb22, self.arbfloat, self.large]:
yield tz, M
def testip_two(self):
def tz(M):
mz = matrix_power(M, 2)
assert_equal(mz, dot(M, M))
assert_equal(mz.dtype, M.dtype)
for M in [self.Arb22, self.arbfloat, self.large]:
yield tz, M
def testip_invert(self):
def tz(M):
mz = matrix_power(M, -1)
assert_almost_equal(identity(M.shape[0]), dot(mz, M))
for M in [self.R90, self.Arb22, self.arbfloat, self.large]:
yield tz, M
def test_invert_noninvertible(self):
import numpy.linalg
assert_raises(numpy.linalg.linalg.LinAlgError,
lambda: matrix_power(self.noninv, -1))
class TestBoolPower(object):
def test_square(self):
A = array([[True, False], [True, True]])
assert_equal(matrix_power(A, 2), A)
class TestEigvalsh(HermitianTestCase, HermitianGeneralizedTestCase):
def do(self, a, b, tags):
# note that eigenvalue arrays returned by eig must be sorted since
# their order isn't guaranteed.
ev = linalg.eigvalsh(a, 'L')
evalues, evectors = linalg.eig(a)
evalues.sort(axis=-1)
assert_allclose(ev, evalues, rtol=get_rtol(ev.dtype))
ev2 = linalg.eigvalsh(a, 'U')
assert_allclose(ev2, evalues, rtol=get_rtol(ev.dtype))
def test_types(self):
def check(dtype):
x = np.array([[1, 0.5], [0.5, 1]], dtype=dtype)
w = np.linalg.eigvalsh(x)
assert_equal(w.dtype, get_real_dtype(dtype))
for dtype in [single, double, csingle, cdouble]:
yield check, dtype
def test_invalid(self):
x = np.array([[1, 0.5], [0.5, 1]], dtype=np.float32)
assert_raises(ValueError, np.linalg.eigvalsh, x, UPLO="lrong")
assert_raises(ValueError, np.linalg.eigvalsh, x, "lower")
assert_raises(ValueError, np.linalg.eigvalsh, x, "upper")
def test_UPLO(self):
Klo = np.array([[0, 0], [1, 0]], dtype=np.double)
Kup = np.array([[0, 1], [0, 0]], dtype=np.double)
tgt = np.array([-1, 1], dtype=np.double)
rtol = get_rtol(np.double)
# Check default is 'L'
w = np.linalg.eigvalsh(Klo)
assert_allclose(w, tgt, rtol=rtol)
# Check 'L'
w = np.linalg.eigvalsh(Klo, UPLO='L')
assert_allclose(w, tgt, rtol=rtol)
# Check 'l'
w = np.linalg.eigvalsh(Klo, UPLO='l')
assert_allclose(w, tgt, rtol=rtol)
# Check 'U'
w = np.linalg.eigvalsh(Kup, UPLO='U')
assert_allclose(w, tgt, rtol=rtol)
# Check 'u'
w = np.linalg.eigvalsh(Kup, UPLO='u')
assert_allclose(w, tgt, rtol=rtol)
def test_0_size(self):
# Check that all kinds of 0-sized arrays work
class ArraySubclass(np.ndarray):
pass
a = np.zeros((0, 1, 1), dtype=np.int_).view(ArraySubclass)
res = linalg.eigvalsh(a)
assert_(res.dtype.type is np.float64)
assert_equal((0, 1), res.shape)
# This is just for documentation, it might make sense to change:
assert_(isinstance(res, np.ndarray))
a = np.zeros((0, 0), dtype=np.complex64).view(ArraySubclass)
res = linalg.eigvalsh(a)
assert_(res.dtype.type is np.float32)
assert_equal((0,), res.shape)
# This is just for documentation, it might make sense to change:
assert_(isinstance(res, np.ndarray))
class TestEigh(HermitianTestCase, HermitianGeneralizedTestCase):
def do(self, a, b, tags):
# note that eigenvalue arrays returned by eig must be sorted since
# their order isn't guaranteed.
ev, evc = linalg.eigh(a)
evalues, evectors = linalg.eig(a)
evalues.sort(axis=-1)
assert_almost_equal(ev, evalues)
assert_allclose(dot_generalized(a, evc),
np.asarray(ev)[..., None, :] * np.asarray(evc),
rtol=get_rtol(ev.dtype))
ev2, evc2 = linalg.eigh(a, 'U')
assert_almost_equal(ev2, evalues)
assert_allclose(dot_generalized(a, evc2),
np.asarray(ev2)[..., None, :] * np.asarray(evc2),
rtol=get_rtol(ev.dtype), err_msg=repr(a))
def test_types(self):
def check(dtype):
x = np.array([[1, 0.5], [0.5, 1]], dtype=dtype)
w, v = np.linalg.eigh(x)
assert_equal(w.dtype, get_real_dtype(dtype))
assert_equal(v.dtype, dtype)
for dtype in [single, double, csingle, cdouble]:
yield check, dtype
def test_invalid(self):
x = np.array([[1, 0.5], [0.5, 1]], dtype=np.float32)
assert_raises(ValueError, np.linalg.eigh, x, UPLO="lrong")
assert_raises(ValueError, np.linalg.eigh, x, "lower")
assert_raises(ValueError, np.linalg.eigh, x, "upper")
def test_UPLO(self):
Klo = np.array([[0, 0], [1, 0]], dtype=np.double)
Kup = np.array([[0, 1], [0, 0]], dtype=np.double)
tgt = np.array([-1, 1], dtype=np.double)
rtol = get_rtol(np.double)
# Check default is 'L'
w, v = np.linalg.eigh(Klo)
assert_allclose(w, tgt, rtol=rtol)
# Check 'L'
w, v = np.linalg.eigh(Klo, UPLO='L')
assert_allclose(w, tgt, rtol=rtol)
# Check 'l'
w, v = np.linalg.eigh(Klo, UPLO='l')
assert_allclose(w, tgt, rtol=rtol)
# Check 'U'
w, v = np.linalg.eigh(Kup, UPLO='U')
assert_allclose(w, tgt, rtol=rtol)
# Check 'u'
w, v = np.linalg.eigh(Kup, UPLO='u')
assert_allclose(w, tgt, rtol=rtol)
def test_0_size(self):
# Check that all kinds of 0-sized arrays work
class ArraySubclass(np.ndarray):
pass
a = np.zeros((0, 1, 1), dtype=np.int_).view(ArraySubclass)
res, res_v = linalg.eigh(a)
assert_(res_v.dtype.type is np.float64)
assert_(res.dtype.type is np.float64)
assert_equal(a.shape, res_v.shape)
assert_equal((0, 1), res.shape)
# This is just for documentation, it might make sense to change:
assert_(isinstance(a, np.ndarray))
a = np.zeros((0, 0), dtype=np.complex64).view(ArraySubclass)
res, res_v = linalg.eigh(a)
assert_(res_v.dtype.type is np.complex64)
assert_(res.dtype.type is np.float32)
assert_equal(a.shape, res_v.shape)
assert_equal((0,), res.shape)
# This is just for documentation, it might make sense to change:
assert_(isinstance(a, np.ndarray))
class _TestNorm(object):
dt = None
dec = None
def test_empty(self):
assert_equal(norm([]), 0.0)
assert_equal(norm(array([], dtype=self.dt)), 0.0)
assert_equal(norm(atleast_2d(array([], dtype=self.dt))), 0.0)
def test_vector_return_type(self):
a = np.array([1, 0, 1])
exact_types = np.typecodes['AllInteger']
inexact_types = np.typecodes['AllFloat']
all_types = exact_types + inexact_types
for each_inexact_types in all_types:
at = a.astype(each_inexact_types)
an = norm(at, -np.inf)
assert_(issubclass(an.dtype.type, np.floating))
assert_almost_equal(an, 0.0)
with suppress_warnings() as sup:
sup.filter(RuntimeWarning, "divide by zero encountered")
an = norm(at, -1)
assert_(issubclass(an.dtype.type, np.floating))
assert_almost_equal(an, 0.0)
an = norm(at, 0)
assert_(issubclass(an.dtype.type, np.floating))
assert_almost_equal(an, 2)
an = norm(at, 1)
assert_(issubclass(an.dtype.type, np.floating))
assert_almost_equal(an, 2.0)
an = norm(at, 2)
assert_(issubclass(an.dtype.type, np.floating))
assert_almost_equal(an, an.dtype.type(2.0)**an.dtype.type(1.0/2.0))
an = norm(at, 4)
assert_(issubclass(an.dtype.type, np.floating))
assert_almost_equal(an, an.dtype.type(2.0)**an.dtype.type(1.0/4.0))
an = norm(at, np.inf)
assert_(issubclass(an.dtype.type, np.floating))
assert_almost_equal(an, 1.0)
def test_matrix_return_type(self):
a = np.array([[1, 0, 1], [0, 1, 1]])
exact_types = np.typecodes['AllInteger']
# float32, complex64, float64, complex128 types are the only types
# allowed by `linalg`, which performs the matrix operations used
# within `norm`.
inexact_types = 'fdFD'
all_types = exact_types + inexact_types
for each_inexact_types in all_types:
at = a.astype(each_inexact_types)
an = norm(at, -np.inf)
assert_(issubclass(an.dtype.type, np.floating))
assert_almost_equal(an, 2.0)
with suppress_warnings() as sup:
sup.filter(RuntimeWarning, "divide by zero encountered")
an = norm(at, -1)
assert_(issubclass(an.dtype.type, np.floating))
assert_almost_equal(an, 1.0)
an = norm(at, 1)
assert_(issubclass(an.dtype.type, np.floating))
assert_almost_equal(an, 2.0)
an = norm(at, 2)
assert_(issubclass(an.dtype.type, np.floating))
assert_almost_equal(an, 3.0**(1.0/2.0))
an = norm(at, -2)
assert_(issubclass(an.dtype.type, np.floating))
assert_almost_equal(an, 1.0)
an = norm(at, np.inf)
assert_(issubclass(an.dtype.type, np.floating))
assert_almost_equal(an, 2.0)
an = norm(at, 'fro')
assert_(issubclass(an.dtype.type, np.floating))
assert_almost_equal(an, 2.0)
an = norm(at, 'nuc')
assert_(issubclass(an.dtype.type, np.floating))
# Lower bar needed to support low precision floats.
# They end up being off by 1 in the 7th place.
old_assert_almost_equal(an, 2.7320508075688772, decimal=6)
def test_vector(self):
a = [1, 2, 3, 4]
b = [-1, -2, -3, -4]
c = [-1, 2, -3, 4]
def _test(v):
np.testing.assert_almost_equal(norm(v), 30 ** 0.5,
decimal=self.dec)
np.testing.assert_almost_equal(norm(v, inf), 4.0,
decimal=self.dec)
np.testing.assert_almost_equal(norm(v, -inf), 1.0,
decimal=self.dec)
np.testing.assert_almost_equal(norm(v, 1), 10.0,
decimal=self.dec)
np.testing.assert_almost_equal(norm(v, -1), 12.0 / 25,
decimal=self.dec)
np.testing.assert_almost_equal(norm(v, 2), 30 ** 0.5,
decimal=self.dec)
np.testing.assert_almost_equal(norm(v, -2), ((205. / 144) ** -0.5),
decimal=self.dec)
np.testing.assert_almost_equal(norm(v, 0), 4,
decimal=self.dec)
for v in (a, b, c,):
_test(v)
for v in (array(a, dtype=self.dt), array(b, dtype=self.dt),
array(c, dtype=self.dt)):
_test(v)
def test_matrix_2x2(self):
A = matrix([[1, 3], [5, 7]], dtype=self.dt)
assert_almost_equal(norm(A), 84 ** 0.5)
assert_almost_equal(norm(A, 'fro'), 84 ** 0.5)
assert_almost_equal(norm(A, 'nuc'), 10.0)
assert_almost_equal(norm(A, inf), 12.0)
assert_almost_equal(norm(A, -inf), 4.0)
assert_almost_equal(norm(A, 1), 10.0)
assert_almost_equal(norm(A, -1), 6.0)
assert_almost_equal(norm(A, 2), 9.1231056256176615)
assert_almost_equal(norm(A, -2), 0.87689437438234041)
assert_raises(ValueError, norm, A, 'nofro')
assert_raises(ValueError, norm, A, -3)
assert_raises(ValueError, norm, A, 0)
def test_matrix_3x3(self):
# This test has been added because the 2x2 example
# happened to have equal nuclear norm and induced 1-norm.
# The 1/10 scaling factor accommodates the absolute tolerance
# used in assert_almost_equal.
A = (1 / 10) * \
np.array([[1, 2, 3], [6, 0, 5], [3, 2, 1]], dtype=self.dt)
assert_almost_equal(norm(A), (1 / 10) * 89 ** 0.5)
assert_almost_equal(norm(A, 'fro'), (1 / 10) * 89 ** 0.5)
assert_almost_equal(norm(A, 'nuc'), 1.3366836911774836)
assert_almost_equal(norm(A, inf), 1.1)
assert_almost_equal(norm(A, -inf), 0.6)
assert_almost_equal(norm(A, 1), 1.0)
assert_almost_equal(norm(A, -1), 0.4)
assert_almost_equal(norm(A, 2), 0.88722940323461277)
assert_almost_equal(norm(A, -2), 0.19456584790481812)
def test_axis(self):
# Vector norms.
# Compare the use of `axis` with computing the norm of each row
# or column separately.
A = array([[1, 2, 3], [4, 5, 6]], dtype=self.dt)
for order in [None, -1, 0, 1, 2, 3, np.Inf, -np.Inf]:
expected0 = [norm(A[:, k], ord=order) for k in range(A.shape[1])]
assert_almost_equal(norm(A, ord=order, axis=0), expected0)
expected1 = [norm(A[k, :], ord=order) for k in range(A.shape[0])]
assert_almost_equal(norm(A, ord=order, axis=1), expected1)
# Matrix norms.
B = np.arange(1, 25, dtype=self.dt).reshape(2, 3, 4)
nd = B.ndim
for order in [None, -2, 2, -1, 1, np.Inf, -np.Inf, 'fro']:
for axis in itertools.combinations(range(-nd, nd), 2):
row_axis, col_axis = axis
if row_axis < 0:
row_axis += nd
if col_axis < 0:
col_axis += nd
if row_axis == col_axis:
assert_raises(ValueError, norm, B, ord=order, axis=axis)
else:
n = norm(B, ord=order, axis=axis)
# The logic using k_index only works for nd = 3.
# This has to be changed if nd is increased.
k_index = nd - (row_axis + col_axis)
if row_axis < col_axis:
expected = [norm(B[:].take(k, axis=k_index), ord=order)
for k in range(B.shape[k_index])]
else:
expected = [norm(B[:].take(k, axis=k_index).T, ord=order)
for k in range(B.shape[k_index])]
assert_almost_equal(n, expected)
def test_keepdims(self):
A = np.arange(1, 25, dtype=self.dt).reshape(2, 3, 4)
allclose_err = 'order {0}, axis = {1}'
shape_err = 'Shape mismatch found {0}, expected {1}, order={2}, axis={3}'
# check the order=None, axis=None case
expected = norm(A, ord=None, axis=None)
found = norm(A, ord=None, axis=None, keepdims=True)
assert_allclose(np.squeeze(found), expected,
err_msg=allclose_err.format(None, None))
expected_shape = (1, 1, 1)
assert_(found.shape == expected_shape,
shape_err.format(found.shape, expected_shape, None, None))
# Vector norms.
for order in [None, -1, 0, 1, 2, 3, np.Inf, -np.Inf]:
for k in range(A.ndim):
expected = norm(A, ord=order, axis=k)
found = norm(A, ord=order, axis=k, keepdims=True)
assert_allclose(np.squeeze(found), expected,
err_msg=allclose_err.format(order, k))
expected_shape = list(A.shape)
expected_shape[k] = 1
expected_shape = tuple(expected_shape)
assert_(found.shape == expected_shape,
shape_err.format(found.shape, expected_shape, order, k))
# Matrix norms.
for order in [None, -2, 2, -1, 1, np.Inf, -np.Inf, 'fro', 'nuc']:
for k in itertools.permutations(range(A.ndim), 2):
expected = norm(A, ord=order, axis=k)
found = norm(A, ord=order, axis=k, keepdims=True)
assert_allclose(np.squeeze(found), expected,
err_msg=allclose_err.format(order, k))
expected_shape = list(A.shape)
expected_shape[k[0]] = 1
expected_shape[k[1]] = 1
expected_shape = tuple(expected_shape)
assert_(found.shape == expected_shape,
shape_err.format(found.shape, expected_shape, order, k))
def test_bad_args(self):
# Check that bad arguments raise the appropriate exceptions.
A = array([[1, 2, 3], [4, 5, 6]], dtype=self.dt)
B = np.arange(1, 25, dtype=self.dt).reshape(2, 3, 4)
# Using `axis=<integer>` or passing in a 1-D array implies vector
# norms are being computed, so also using `ord='fro'`
# or `ord='nuc'` raises a ValueError.
assert_raises(ValueError, norm, A, 'fro', 0)
assert_raises(ValueError, norm, A, 'nuc', 0)
assert_raises(ValueError, norm, [3, 4], 'fro', None)
assert_raises(ValueError, norm, [3, 4], 'nuc', None)
# Similarly, norm should raise an exception when ord is any finite
# number other than 1, 2, -1 or -2 when computing matrix norms.
for order in [0, 3]:
assert_raises(ValueError, norm, A, order, None)
assert_raises(ValueError, norm, A, order, (0, 1))
assert_raises(ValueError, norm, B, order, (1, 2))
# Invalid axis
assert_raises(np.AxisError, norm, B, None, 3)
assert_raises(np.AxisError, norm, B, None, (2, 3))
assert_raises(ValueError, norm, B, None, (0, 1, 2))
class TestNorm_NonSystematic(object):
def test_longdouble_norm(self):
# Non-regression test: p-norm of longdouble would previously raise
# UnboundLocalError.
x = np.arange(10, dtype=np.longdouble)
old_assert_almost_equal(norm(x, ord=3), 12.65, decimal=2)
def test_intmin(self):
# Non-regression test: p-norm of signed integer would previously do
# float cast and abs in the wrong order.
x = np.array([-2 ** 31], dtype=np.int32)
old_assert_almost_equal(norm(x, ord=3), 2 ** 31, decimal=5)
def test_complex_high_ord(self):
# gh-4156
d = np.empty((2,), dtype=np.clongdouble)
d[0] = 6 + 7j
d[1] = -6 + 7j
res = 11.615898132184
old_assert_almost_equal(np.linalg.norm(d, ord=3), res, decimal=10)
d = d.astype(np.complex128)
old_assert_almost_equal(np.linalg.norm(d, ord=3), res, decimal=9)
d = d.astype(np.complex64)
old_assert_almost_equal(np.linalg.norm(d, ord=3), res, decimal=5)
class TestNormDouble(_TestNorm):
dt = np.double
dec = 12
class TestNormSingle(_TestNorm):
dt = np.float32
dec = 6
class TestNormInt64(_TestNorm):
dt = np.int64
dec = 12
class TestMatrixRank(object):
def test_matrix_rank(self):
# Full rank matrix
yield assert_equal, 4, matrix_rank(np.eye(4))
# rank deficient matrix
I = np.eye(4)
I[-1, -1] = 0.
yield assert_equal, matrix_rank(I), 3
# All zeros - zero rank
yield assert_equal, matrix_rank(np.zeros((4, 4))), 0
# 1 dimension - rank 1 unless all 0
yield assert_equal, matrix_rank([1, 0, 0, 0]), 1
yield assert_equal, matrix_rank(np.zeros((4,))), 0
# accepts array-like
yield assert_equal, matrix_rank([1]), 1
# greater than 2 dimensions treated as stacked matrices
ms = np.array([I, np.eye(4), np.zeros((4,4))])
yield assert_equal, matrix_rank(ms), np.array([3, 4, 0])
# works on scalar
yield assert_equal, matrix_rank(1), 1
def test_reduced_rank():
# Test matrices with reduced rank
rng = np.random.RandomState(20120714)
for i in range(100):
# Make a rank deficient matrix
X = rng.normal(size=(40, 10))
X[:, 0] = X[:, 1] + X[:, 2]
# Assert that matrix_rank detected deficiency
assert_equal(matrix_rank(X), 9)
X[:, 3] = X[:, 4] + X[:, 5]
assert_equal(matrix_rank(X), 8)
class TestQR(object):
def check_qr(self, a):
# This test expects the argument `a` to be an ndarray or
# a subclass of an ndarray of inexact type.
a_type = type(a)
a_dtype = a.dtype
m, n = a.shape
k = min(m, n)
# mode == 'complete'
q, r = linalg.qr(a, mode='complete')
assert_(q.dtype == a_dtype)
assert_(r.dtype == a_dtype)
assert_(isinstance(q, a_type))
assert_(isinstance(r, a_type))
assert_(q.shape == (m, m))
assert_(r.shape == (m, n))
assert_almost_equal(dot(q, r), a)
assert_almost_equal(dot(q.T.conj(), q), np.eye(m))
assert_almost_equal(np.triu(r), r)
# mode == 'reduced'
q1, r1 = linalg.qr(a, mode='reduced')
assert_(q1.dtype == a_dtype)
assert_(r1.dtype == a_dtype)
assert_(isinstance(q1, a_type))
assert_(isinstance(r1, a_type))
assert_(q1.shape == (m, k))
assert_(r1.shape == (k, n))
assert_almost_equal(dot(q1, r1), a)
assert_almost_equal(dot(q1.T.conj(), q1), np.eye(k))
assert_almost_equal(np.triu(r1), r1)
# mode == 'r'
r2 = linalg.qr(a, mode='r')
assert_(r2.dtype == a_dtype)
assert_(isinstance(r2, a_type))
assert_almost_equal(r2, r1)
def test_qr_empty(self):
a = np.zeros((0, 2))
assert_raises(linalg.LinAlgError, linalg.qr, a)
def test_mode_raw(self):
# The factorization is not unique and varies between libraries,
# so it is not possible to check against known values. Functional
# testing is a possibility, but awaits the exposure of more
# of the functions in lapack_lite. Consequently, this test is
# very limited in scope. Note that the results are in FORTRAN
# order, hence the h arrays are transposed.
a = array([[1, 2], [3, 4], [5, 6]], dtype=np.double)
# Test double
h, tau = linalg.qr(a, mode='raw')
assert_(h.dtype == np.double)
assert_(tau.dtype == np.double)
assert_(h.shape == (2, 3))
assert_(tau.shape == (2,))
h, tau = linalg.qr(a.T, mode='raw')
assert_(h.dtype == np.double)
assert_(tau.dtype == np.double)
assert_(h.shape == (3, 2))
assert_(tau.shape == (2,))
def test_mode_all_but_economic(self):
a = array([[1, 2], [3, 4]])
b = array([[1, 2], [3, 4], [5, 6]])
for dt in "fd":
m1 = a.astype(dt)
m2 = b.astype(dt)
self.check_qr(m1)
self.check_qr(m2)
self.check_qr(m2.T)
self.check_qr(matrix(m1))
for dt in "fd":
m1 = 1 + 1j * a.astype(dt)
m2 = 1 + 1j * b.astype(dt)
self.check_qr(m1)
self.check_qr(m2)
self.check_qr(m2.T)
self.check_qr(matrix(m1))
def test_0_size(self):
# There may be good ways to do (some of this) reasonably:
a = np.zeros((0, 0))
assert_raises(linalg.LinAlgError, linalg.qr, a)
a = np.zeros((0, 1))
assert_raises(linalg.LinAlgError, linalg.qr, a)
a = np.zeros((1, 0))
assert_raises(linalg.LinAlgError, linalg.qr, a)
class TestCholesky(object):
# TODO: are there no other tests for cholesky?
def test_0_size(self):
class ArraySubclass(np.ndarray):
pass
a = np.zeros((0, 1, 1), dtype=np.int_).view(ArraySubclass)
res = linalg.cholesky(a)
assert_equal(a.shape, res.shape)
assert_(res.dtype.type is np.float64)
# for documentation purpose:
assert_(isinstance(res, np.ndarray))
a = np.zeros((1, 0, 0), dtype=np.complex64).view(ArraySubclass)
res = linalg.cholesky(a)
assert_equal(a.shape, res.shape)
assert_(res.dtype.type is np.complex64)
assert_(isinstance(res, np.ndarray))
def test_byteorder_check():
# Byte order check should pass for native order
if sys.byteorder == 'little':
native = '<'
else:
native = '>'
for dtt in (np.float32, np.float64):
arr = np.eye(4, dtype=dtt)
n_arr = arr.newbyteorder(native)
sw_arr = arr.newbyteorder('S').byteswap()
assert_equal(arr.dtype.byteorder, '=')
for routine in (linalg.inv, linalg.det, linalg.pinv):
# Normal call
res = routine(arr)
# Native but not '='
assert_array_equal(res, routine(n_arr))
# Swapped
assert_array_equal(res, routine(sw_arr))
def test_generalized_raise_multiloop():
# It should raise an error even if the error doesn't occur in the
# last iteration of the ufunc inner loop
invertible = np.array([[1, 2], [3, 4]])
non_invertible = np.array([[1, 1], [1, 1]])
x = np.zeros([4, 4, 2, 2])[1::2]
x[...] = invertible
x[0, 0] = non_invertible
assert_raises(np.linalg.LinAlgError, np.linalg.inv, x)
def test_xerbla_override():
# Check that our xerbla has been successfully linked in. If it is not,
# the default xerbla routine is called, which prints a message to stdout
# and may, or may not, abort the process depending on the LAPACK package.
XERBLA_OK = 255
try:
pid = os.fork()
except (OSError, AttributeError):
# fork failed, or not running on POSIX
raise SkipTest("Not POSIX or fork failed.")
if pid == 0:
# child; close i/o file handles
os.close(1)
os.close(0)
# Avoid producing core files.
import resource
resource.setrlimit(resource.RLIMIT_CORE, (0, 0))
# These calls may abort.
try:
np.linalg.lapack_lite.xerbla()
except ValueError:
pass
except Exception:
os._exit(os.EX_CONFIG)
try:
a = np.array([[1.]])
np.linalg.lapack_lite.dorgqr(
1, 1, 1, a,
0, # <- invalid value
a, a, 0, 0)
except ValueError as e:
if "DORGQR parameter number 5" in str(e):
# success, reuse error code to mark success as
# FORTRAN STOP returns as success.
os._exit(XERBLA_OK)
# Did not abort, but our xerbla was not linked in.
os._exit(os.EX_CONFIG)
else:
# parent
pid, status = os.wait()
if os.WEXITSTATUS(status) != XERBLA_OK:
raise SkipTest('Numpy xerbla not linked in.')
class TestMultiDot(object):
def test_basic_function_with_three_arguments(self):
# multi_dot with three arguments uses a fast hand coded algorithm to
# determine the optimal order. Therefore test it separately.
A = np.random.random((6, 2))
B = np.random.random((2, 6))
C = np.random.random((6, 2))
assert_almost_equal(multi_dot([A, B, C]), A.dot(B).dot(C))
assert_almost_equal(multi_dot([A, B, C]), np.dot(A, np.dot(B, C)))
def test_basic_function_with_dynamic_programing_optimization(self):
# multi_dot with four or more arguments uses the dynamic programing
# optimization and therefore deserve a separate
A = np.random.random((6, 2))
B = np.random.random((2, 6))
C = np.random.random((6, 2))
D = np.random.random((2, 1))
assert_almost_equal(multi_dot([A, B, C, D]), A.dot(B).dot(C).dot(D))
def test_vector_as_first_argument(self):
# The first argument can be 1-D
A1d = np.random.random(2) # 1-D
B = np.random.random((2, 6))
C = np.random.random((6, 2))
D = np.random.random((2, 2))
# the result should be 1-D
assert_equal(multi_dot([A1d, B, C, D]).shape, (2,))
def test_vector_as_last_argument(self):
# The last argument can be 1-D
A = np.random.random((6, 2))
B = np.random.random((2, 6))
C = np.random.random((6, 2))
D1d = np.random.random(2) # 1-D
# the result should be 1-D
assert_equal(multi_dot([A, B, C, D1d]).shape, (6,))
def test_vector_as_first_and_last_argument(self):
# The first and last arguments can be 1-D
A1d = np.random.random(2) # 1-D
B = np.random.random((2, 6))
C = np.random.random((6, 2))
D1d = np.random.random(2) # 1-D
# the result should be a scalar
assert_equal(multi_dot([A1d, B, C, D1d]).shape, ())
def test_dynamic_programming_logic(self):
# Test for the dynamic programming part
# This test is directly taken from Cormen page 376.
arrays = [np.random.random((30, 35)),
np.random.random((35, 15)),
np.random.random((15, 5)),
np.random.random((5, 10)),
np.random.random((10, 20)),
np.random.random((20, 25))]
m_expected = np.array([[0., 15750., 7875., 9375., 11875., 15125.],
[0., 0., 2625., 4375., 7125., 10500.],
[0., 0., 0., 750., 2500., 5375.],
[0., 0., 0., 0., 1000., 3500.],
[0., 0., 0., 0., 0., 5000.],
[0., 0., 0., 0., 0., 0.]])
s_expected = np.array([[0, 1, 1, 3, 3, 3],
[0, 0, 2, 3, 3, 3],
[0, 0, 0, 3, 3, 3],
[0, 0, 0, 0, 4, 5],
[0, 0, 0, 0, 0, 5],
[0, 0, 0, 0, 0, 0]], dtype=np.int)
s_expected -= 1 # Cormen uses 1-based index, python does not.
s, m = _multi_dot_matrix_chain_order(arrays, return_costs=True)
# Only the upper triangular part (without the diagonal) is interesting.
assert_almost_equal(np.triu(s[:-1, 1:]),
np.triu(s_expected[:-1, 1:]))
assert_almost_equal(np.triu(m), np.triu(m_expected))
def test_too_few_input_arrays(self):
assert_raises(ValueError, multi_dot, [])
assert_raises(ValueError, multi_dot, [np.random.random((3, 3))])
if __name__ == "__main__":
run_module_suite()
| 36.010811 | 96 | 0.554855 |
4b9ce1d405fdf9951b558b04ea92e50e97ff468e | 191 | py | Python | exec.28.py | Domingosws/Aprendendo.PYTHON.3 | f525e4324ab3fe33107b9c8cbea2deb61736d151 | [
"MIT"
] | 1 | 2022-02-01T04:22:56.000Z | 2022-02-01T04:22:56.000Z | exec.28.py | Domingosws/Aprendendo.PYTHON.3 | f525e4324ab3fe33107b9c8cbea2deb61736d151 | [
"MIT"
] | null | null | null | exec.28.py | Domingosws/Aprendendo.PYTHON.3 | f525e4324ab3fe33107b9c8cbea2deb61736d151 | [
"MIT"
] | null | null | null | #condição composta
nome=str(input('Qual é seu nome?'))
if nome=='domingos':
print('Que nome lindo você tem!')
else:
print('Seu nome é tão nomal!')
print('Boa noite, {}'.format(nome))
| 23.875 | 37 | 0.659686 |
cae3d7083923cfc4a853abb2783e41b03cb1b5c8 | 979 | py | Python | tests/test_models/test_decoder/test_context_net.py | hologerry/mmflow | 40caf064851bd95317424e31cc137c0007a2bece | [
"Apache-2.0"
] | 481 | 2021-11-16T07:04:23.000Z | 2022-03-31T22:21:21.000Z | tests/test_models/test_decoder/test_context_net.py | hologerry/mmflow | 40caf064851bd95317424e31cc137c0007a2bece | [
"Apache-2.0"
] | 72 | 2021-11-16T12:25:55.000Z | 2022-03-28T13:10:45.000Z | tests/test_models/test_decoder/test_context_net.py | hologerry/mmflow | 40caf064851bd95317424e31cc137c0007a2bece | [
"Apache-2.0"
] | 48 | 2021-11-16T06:48:46.000Z | 2022-03-30T12:46:40.000Z | # Copyright (c) OpenMMLab. All rights reserved.
import pytest
import torch
from mmflow.models.decoders.context_net import ContextNet
def test_context_net():
# test invalid inchannels type
with pytest.raises(AssertionError):
ContextNet(in_channels='invalid')
feat_channels = (128, 128, 128, 96, 64, 32)
context_net = ContextNet(
in_channels=512,
out_channels=2,
feat_channels=feat_channels,
dilations=(1, 2, 4, 8, 16, 1),
conv_cfg=None,
norm_cfg=None,
act_cfg=dict(type='LeakyReLU', negative_slope=0.1),
init_cfg=None)
# test layer out_channels
for i, feat_in in enumerate(feat_channels):
assert context_net.layers[i].conv.out_channels == feat_in
assert context_net.layers[-1].out_channels == 2
# test predicted flow shape
in_feat = torch.randn(1, 512, 100, 100)
out_flow = context_net(in_feat)
assert out_flow.shape == torch.Size((1, 2, 100, 100))
| 28.794118 | 65 | 0.671093 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.