text string | size int64 | token_count int64 |
|---|---|---|
import unittest
from model import Cell, Module, OutputType, CellType, Mux
import random
class CellTests (unittest.TestCase):
def setUp(self):
self.c = Cell()
def tearDown(self):
pass
def testAsyncOutputFalseWhenBothInputsFalse(self):
self.c.driveInputs([False, False])
self.assertFalse(self.c.asyncOutput())
def testAsyncOutputFalseWhenOneInputFalse(self):
self.c.driveInputs([True, False])
self.assertFalse(self.c.asyncOutput())
def testAsyncOutputTrueWhenBothInputsTrue(self):
self.c.driveInputs([True, True])
self.assertTrue(self.c.asyncOutput())
def testSyncOutputResetsToFalse(self):
self.assertFalse(self.c.syncOutput())
def testSyncOutputFalseWhenBothInputsFalse(self):
self.c.driveInputs([False, False])
self.c.clk()
self.assertFalse(self.c.syncOutput())
def testSyncOutputTrueWhenBothInputsTrue(self):
self.c.driveInputs([True, True])
self.c.clk()
self.assertTrue(self.c.syncOutput())
def testSyncOutputUpdatesWith2ndClk(self):
self.c.driveInputs([True, True])
self.c.clk()
self.c.driveInputs([False, False])
self.c.clk()
self.assertFalse(self.c.syncOutput())
def testSyncOutputHolds(self):
self.c.driveInputs([True, True])
self.c.clk()
self.c.clk()
self.assertTrue(self.c.syncOutput())
def testAsyncStableWhenFalse(self):
self.c.driveInputs([False, False])
self.c.driveInputs([False, False])
self.assertTrue(self.c.isStable())
def testAsyncStableWhenBothTrue(self):
self.c.driveInputs([True, True])
self.c.driveInputs([True, True])
self.assertTrue(self.c.isStable())
def testAsyncStableWhenBothFalse(self):
self.c.driveInputs([False, False])
self.c.driveInputs([False, False])
self.assertTrue(self.c.isStable())
def testAsyncNotStableWhenAChanges(self):
self.c.driveInputs([True, True])
self.c.driveInputs([False, True])
self.assertFalse(self.c.isStable())
def testAsyncNotStableWhenBChanges(self):
self.c.driveInputs([True, True])
self.c.clk()
self.c.driveInputs([True, False])
self.c.clk()
self.assertFalse(self.c.isStable())
def testCellCanBeOr(self):
self.c.setOperator(CellType._or)
self.c.driveInputs([False, True])
self.assertTrue(self.c.asyncOutput())
def testCellCanBeXor(self):
self.c.setOperator(CellType._xor)
self.c.driveInputs([True, True])
self.assertFalse(self.c.asyncOutput())
self.c.driveInputs([False, False])
self.assertFalse(self.c.asyncOutput())
self.c.driveInputs([False, True])
self.assertTrue(self.c.asyncOutput())
def testSetForAsyncOutput(self):
self.c.setOutputType(OutputType.async)
self.c.driveInputs([True, True])
self.assertTrue(self.c.output())
def testSetForSyncOutput(self):
self.c.setOutputType(OutputType.sync)
self.c.driveInputs([True, True])
self.assertFalse(self.c.output())
self.c.clk()
self.assertTrue(self.c.output())
def testGetOutputType(self):
self.c.setOutputType(OutputType.sync)
self.assertTrue(self.c.getOutputType() == OutputType.sync)
def testCellHistory(self):
self.c.setOutputType(OutputType.sync)
self.c.driveInputs([True, True])
for i in range(50):
if i == 49:
self.c.driveInputs([False, False])
self.c.clk()
self.c.output()
self.assertEqual(len(self.c.cellHistory()), 50)
self.assertEqual(self.c.cellHistory(), [True] * 49 + [False])
def testCellHistoryFixed(self):
self.c.setOutputType(OutputType.sync)
self.c.driveInputs([True, True])
for i in range(50):
self.c.clk()
self.c.output()
self.assertTrue(self.c.cellHistoryFixed())
def testCellHistoryNotFixed(self):
self.c.setOutputType(OutputType.sync)
self.c.driveInputs([True, True])
self.c.clk()
self.c.output()
self.c.driveInputs([False, True])
self.c.clk()
self.c.output()
self.assertFalse(self.c.cellHistoryFixed())
def testNoCellHistoryForAsync(self):
self.c.setOutputType(OutputType.async)
self.c.driveInputs([True, True])
self.c.output()
self.c.driveInputs([False, True])
self.c.output()
self.assertFalse(self.c.cellHistoryFixed())
class ModuleTests (unittest.TestCase):
def setUp(self):
self.m = Module()
def tearDown(self):
pass
def depth(self):
return len(self.m.cells)
def width(self):
return len(self.m.cells[0])
def createGridAndTieCell0Input(self, wIn, wOut, width, depth=1, initValForCell0 = False):
self.m.createGrid(wIn, wOut, width, depth)
self.m.tieCell0([initValForCell0])
def testInit4x1(self):
self.createGridAndTieCell0Input(4, 4, 4, 1)
self.assertTrue(self.depth() == 1)
self.assertTrue(self.width() == 4)
def testInitNxN(self):
self.createGridAndTieCell0Input(7, 7, 7, 6)
self.assertTrue(self.depth() == 6)
self.assertTrue(self.width() == 7)
def test2x1AndTiedLow(self):
self.createGridAndTieCell0Input(2, 2, 2, 1)
self.m.driveInputs([True, True])
self.assertEqual(self.m.sampleOutputs(), [False, False])
def test2x1AndTiedHigh(self):
self.createGridAndTieCell0Input(2, 2, 2, 1, True)
self.m.driveInputs([True, True])
self.assertEqual(self.m.sampleOutputs(), [True, True])
def test3x1AndTiedHigh(self):
self.createGridAndTieCell0Input(3, 3, 3, 1, True)
self.m.driveInputs([True, True, False])
self.assertEqual(self.m.sampleOutputs(), [True, True, False])
def test2x2AndTiedHigh(self):
self.createGridAndTieCell0Input(2, 2, 2, 2, True)
self.m.driveInputs([True, True])
self.assertEqual(self.m.sampleOutputs(), [True, True])
self.m.driveInputs([True, False])
self.assertEqual(self.m.sampleOutputs(), [False, False])
def test3x2AndTiedHigh(self):
self.createGridAndTieCell0Input(3, 3, 3, 2, True)
self.m.driveInputs([True, True, True])
self.assertEqual(self.m.sampleOutputs(), [True, True, True])
self.m.driveInputs([True, False, True])
self.assertEqual(self.m.sampleOutputs(), [False, False, False])
def testFixNumberOfFlopsTo0(self):
self.createGridAndTieCell0Input(25, 25, 25, 14, True)
self.m.setNumFlops(0)
self.assertTrue(self.m.getNumFlops() == 0)
def testFixNumberOfFlopsToLtWidth(self):
self.createGridAndTieCell0Input(25, 25, 25, 14, True)
self.m.setNumFlops(17)
self.assertTrue(self.m.getNumFlops() == 17)
def testFixNumberOfFlopsToGtWidth(self):
self.createGridAndTieCell0Input(25, 25, 25, 14, True)
self.m.setNumFlops(28)
self.assertTrue(self.m.getNumFlops() == 28)
def testFixNumberOfFlopsToMax(self):
self.createGridAndTieCell0Input(25, 25, 25, 14, True)
self.m.setNumFlops(25 * 14)
self.assertTrue(self.m.getNumFlops() == (25 * 14))
def test2x1FloppedAndTiedHigh(self):
self.createGridAndTieCell0Input(2, 2, 2, 1, True)
self.m.setNumFlops(2)
self.m.driveInputs([True, True])
self.m.clk()
self.assertEqual(self.m.sampleOutputs(), [True, False])
self.m.clk()
self.assertEqual(self.m.sampleOutputs(), [True, True])
def testOutputMuxOnlyExistsWhenOutputSmallerThanInputWidth(self):
self.createGridAndTieCell0Input(2, 2, 2)
self.assertEqual(self.m.outputMux, None)
def testOutputMuxForMoreInputsThanOutputs(self):
self.createGridAndTieCell0Input(2, 1, 2)
self.assertNotEqual(self.m.outputMux, None)
def testOutputSizeFor2Inputs1Output(self):
self.createGridAndTieCell0Input(2, 1, 2)
self.m.driveInputs([True, True])
self.assertEqual(len(self.m.sampleOutputs()), 1)
def testOutputFor2Inputs1Output(self):
self.createGridAndTieCell0Input(2, 1, 2, 1, True)
self.m.driveInputs([True, True])
self.assertEqual(self.m.sampleOutputs(), [ True ])
def testOutputFor3Inputs2Output(self):
self.createGridAndTieCell0Input(3, 2, 3, 1, True)
self.m.driveInputs([True, True, False])
self.assertEqual(self.m.sampleOutputs(), [ True, False ])
def testOutputFor4Inputs3Output(self):
self.createGridAndTieCell0Input(4, 3, 4, 1, True)
self.m.driveInputs([True, True, True, False])
self.assertEqual(self.m.sampleOutputs(), [ True, True, False ])
def testOutputFor5Inputs4Output(self):
self.createGridAndTieCell0Input(5, 4, 5, 1, True)
self.m.driveInputs([True, True, True, False, False])
self.assertEqual(self.m.sampleOutputs(), [ True, True, False, False ])
def testOutputFor8Inputs5Output(self):
self.createGridAndTieCell0Input(8, 5, 8, 1, True)
self.m.driveInputs([True] * 6 + [False, False])
self.assertEqual(self.m.sampleOutputs(), [ True, True, True, False, False ])
def testModuleHasFixedCells(self):
self.createGridAndTieCell0Input(2, 2, 2)
self.m.setNumFlops(2)
self.m.driveInputs([True] * 2)
self.m.clk()
self.m.sampleOutputs()
self.m.clk()
self.m.sampleOutputs()
self.assertTrue(self.m.moduleHasFixedCells())
def testModuleHasNoFixedCells(self):
self.createGridAndTieCell0Input(2, 2, 2, 1, True)
self.m.cells[0][1].setOutputType(OutputType.sync)
self.m.driveInputs([True] * 2)
self.m.clk()
self.m.sampleOutputs()
self.m.driveInputs([False] * 2)
self.m.clk()
self.m.sampleOutputs()
self.assertFalse(self.m.moduleHasFixedCells())
def testOutputHistory(self):
self.createGridAndTieCell0Input(2, 2, 2, 1, True)
self.m.driveInputs([True, True])
self.m.sampleOutputs()
self.m.sampleOutputs()
self.m.sampleOutputs()
self.assertEqual(len(self.m.outputHistory()), 3)
self.assertEqual(self.m.outputHistory(), [ [True, True], [True, True], [True, True] ])
self.assertTrue(self.m.outputsFixed())
def testOutputsNotFixed(self):
self.createGridAndTieCell0Input(2, 2, 2, 1, True)
self.m.driveInputs([True, True])
self.m.sampleOutputs()
self.m.driveInputs([False, False])
self.m.sampleOutputs()
self.assertFalse(self.m.outputsFixed())
def testOutputFor1Input2Outputs(self):
self.createGridAndTieCell0Input(1, 2, 2, 1, True)
self.m.driveInputs([True])
self.assertEqual(self.m.sampleOutputs(), [ True, True ])
def testOutputFor2Input4Outputs(self):
self.createGridAndTieCell0Input(2, 4, 4, 1, True)
self.m.driveInputs([True, True])
self.assertEqual(self.m.sampleOutputs(), [ True, True ] * 2)
def testOutputForLargerGridWidth(self):
self.createGridAndTieCell0Input(2, 4, 6, 1, True)
self.m.driveInputs([True, True])
self.assertEqual(self.m.sampleOutputs(), [ True, True ] * 2)
class MuxTests (unittest.TestCase):
def setUp(self):
self.m = Mux()
def tearDown(self):
pass
def testInputSelect2InputSelect0(self):
self.m.driveInputs([False, True])
self.assertEqual(self.m.inputSelect(), 0)
def testInputSelect2InputSelect1(self):
self.m.driveInputs([True, True])
self.assertEqual(self.m.inputSelect(), 1)
def testInputSelect3InputSelect0(self):
self.m.driveInputs([False, False, True])
self.assertEqual(self.m.inputSelect(), 0)
def testInputSelect3InputSelect1(self):
self.m.driveInputs([True, False, True])
self.assertEqual(self.m.inputSelect(), 1)
def testInputSelect3InputSelect2(self):
self.m.driveInputs([False, True, True])
self.assertEqual(self.m.inputSelect(), 2)
def testInputSelect3InputSelectOverflow(self):
self.m.driveInputs([True, True, True])
self.assertEqual(self.m.inputSelect(), 2)
def testInputSelect4InputSelect3(self):
self.m.driveInputs([True, True, True, False])
self.assertEqual(self.m.inputSelect(), 3)
def test2InputSelect0(self):
self.m.driveInputs([False, False])
self.assertFalse(self.m.asyncOutput())
def test2InputSelect1(self):
self.m.driveInputs([True, True])
self.assertTrue(self.m.asyncOutput())
def test4InputSelect3(self):
self.m.driveInputs([True, True, True, False])
self.assertFalse(self.m.asyncOutput())
if __name__ == "__main__":
unittest.main()
| 12,000 | 4,648 |
"""Test the smarttub config flow."""
from unittest.mock import patch
from smarttub import LoginFailed
from homeassistant import config_entries, data_entry_flow
from homeassistant.components.smarttub.const import DOMAIN
from homeassistant.const import CONF_EMAIL, CONF_PASSWORD
from tests.common import MockConfigEntry
async def test_form(hass):
"""Test we get the form."""
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": config_entries.SOURCE_USER}
)
assert result["type"] == "form"
assert result["errors"] == {}
with patch(
"homeassistant.components.smarttub.async_setup_entry",
return_value=True,
) as mock_setup_entry:
result = await hass.config_entries.flow.async_configure(
result["flow_id"],
{CONF_EMAIL: "test-email", CONF_PASSWORD: "test-password"},
)
assert result["type"] == "create_entry"
assert result["title"] == "test-email"
assert result["data"] == {
CONF_EMAIL: "test-email",
CONF_PASSWORD: "test-password",
}
await hass.async_block_till_done()
mock_setup_entry.assert_called_once()
async def test_form_invalid_auth(hass, smarttub_api):
"""Test we handle invalid auth."""
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": config_entries.SOURCE_USER}
)
smarttub_api.login.side_effect = LoginFailed
result = await hass.config_entries.flow.async_configure(
result["flow_id"],
{CONF_EMAIL: "test-email", CONF_PASSWORD: "test-password"},
)
assert result["type"] == "form"
assert result["errors"] == {"base": "invalid_auth"}
async def test_reauth_success(hass, smarttub_api, account):
"""Test reauthentication flow."""
mock_entry = MockConfigEntry(
domain=DOMAIN,
data={CONF_EMAIL: "test-email", CONF_PASSWORD: "test-password"},
unique_id=account.id,
)
mock_entry.add_to_hass(hass)
result = await hass.config_entries.flow.async_init(
DOMAIN,
context={
"source": config_entries.SOURCE_REAUTH,
"unique_id": mock_entry.unique_id,
"entry_id": mock_entry.entry_id,
},
data=mock_entry.data,
)
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result["step_id"] == "reauth_confirm"
result = await hass.config_entries.flow.async_configure(
result["flow_id"], {CONF_EMAIL: "test-email3", CONF_PASSWORD: "test-password3"}
)
assert result["type"] == data_entry_flow.RESULT_TYPE_ABORT
assert result["reason"] == "reauth_successful"
assert mock_entry.data[CONF_EMAIL] == "test-email3"
assert mock_entry.data[CONF_PASSWORD] == "test-password3"
async def test_reauth_wrong_account(hass, smarttub_api, account):
"""Test reauthentication flow if the user enters credentials for a different already-configured account."""
mock_entry1 = MockConfigEntry(
domain=DOMAIN,
data={CONF_EMAIL: "test-email1", CONF_PASSWORD: "test-password1"},
unique_id=account.id,
)
mock_entry1.add_to_hass(hass)
mock_entry2 = MockConfigEntry(
domain=DOMAIN,
data={CONF_EMAIL: "test-email2", CONF_PASSWORD: "test-password2"},
unique_id="mockaccount2",
)
mock_entry2.add_to_hass(hass)
# we try to reauth account #2, and the user successfully authenticates to account #1
account.id = mock_entry1.unique_id
result = await hass.config_entries.flow.async_init(
DOMAIN,
context={
"source": config_entries.SOURCE_REAUTH,
"unique_id": mock_entry2.unique_id,
"entry_id": mock_entry2.entry_id,
},
data=mock_entry2.data,
)
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result["step_id"] == "reauth_confirm"
result = await hass.config_entries.flow.async_configure(
result["flow_id"], {CONF_EMAIL: "test-email1", CONF_PASSWORD: "test-password1"}
)
assert result["type"] == data_entry_flow.RESULT_TYPE_ABORT
assert result["reason"] == "already_configured"
| 4,197 | 1,363 |
class DefenerVector:
def __init__(self, v):
self.__v = v
def __enter__(self):
self.__temp = self.__v[:]
return self.__temp
def __exit__(self, exc_type, exc_val, exc_tb):
if exc_type is None:
self.__v[:] = self.__temp
return False
v1 = [1, 2, 3]
v2 = [1, 2]
try:
with DefenerVector(v1) as dv:
for i in range(len(dv)):
dv[i] += v2[i]
except Exception as e:
print(e)
print(v1) | 476 | 178 |
# Generated by Django 2.2.17 on 2021-01-10 12:35
from django.db import migrations
def enable_all_remote_config_feature_states(apps, schema_editor):
FeatureState = apps.get_model('features', 'FeatureState')
# update all existing remote config feature states to maintain current
# functionality when hiding disabled flags since we've now merged flags
# and remote config feature states.
FeatureState.objects.filter(feature__type="CONFIG").update(enabled=True)
def reverse(apps, schema_editor):
pass
class Migration(migrations.Migration):
dependencies = [
('features', '0024_auto_20200917_1032'),
]
operations = [
migrations.RunPython(
enable_all_remote_config_feature_states, reverse_code=reverse
)
]
| 785 | 244 |
# built in libraries
import unittest.mock
from tempfile import TemporaryDirectory
from os.path import join
# tamcolors libraries
from tamcolors.utils import identifier
class IdentifierTests(unittest.TestCase):
def test_globals(self):
self.assertIsInstance(identifier.IDENTIFIER_FILE_NAME, str)
self.assertIsInstance(identifier.IDENTIFIER_SIZE, int)
def test_generate_identifier(self):
with TemporaryDirectory() as tmp_dir_name:
tmp_name = join(tmp_dir_name, "temp.id")
self.assertIsInstance(identifier.generate_identifier_bytes(tmp_name), bytes)
self.assertIsInstance(identifier.generate_identifier_bytes(tmp_name), bytes)
self.assertIsInstance(identifier.generate_identifier_bytes(tmp_name, 1000), bytes)
self.assertIsInstance(identifier.generate_identifier_bytes(tmp_name, 9999), bytes)
def test_get_identifier_bytes(self):
with TemporaryDirectory() as tmp_dir_name:
tmp_name = join(tmp_dir_name, "temp2.id")
tmp_id = identifier.get_identifier_bytes(tmp_name)
self.assertIsInstance(tmp_id, bytes)
self.assertEqual(len(tmp_id), identifier.IDENTIFIER_SIZE)
for _ in range(10):
self.assertEqual(tmp_id, identifier.get_identifier_bytes(tmp_name))
self.assertNotEqual(identifier.generate_identifier_bytes(tmp_name, identifier.IDENTIFIER_SIZE + 1000),
tmp_id)
| 1,488 | 436 |
# start importing some modules
# importing OpenCV
import cv2
# using this module , we can process images and videos to identify objects, faces, or even handwriting of a human.
# importing NumPy
import numpy as np
# NumPy is usually imported under the np alias. NumPy is a Python library used for working with arrays. It also has functions for working in domain of linear algebra, fourier transform, and matrices
# importing another essential module named time
import time
# The Python time module provides many ways of representing time in code, such as objects, numbers, and strings. It also provides functionality other than representing time, like waiting during code execution and measuring the efficiency of our code.
# I'll use a print function here. It's optional.
print("Hey! Have you ever heard about invisible cloak?")
print("What is an invisible cloak?")
print("""
You have watched invisible cloak in "Harry Potter" a lot, haven't you?
It's the same thing. How would I provide you that cloak?
Grab a red cloth first! I'll convert that cloth into an invisible cloak with my project!!!
""")
# starting the initial part
cap = cv2.VideoCapture(0) # It lets you create a video capture object which is helpful to capture videos through webcam and then you may perform desired operations on that video.
# I need to suspend execution time for 1 seconds now. I'll used it to capture the still background image.
time.sleep(1)
background = 0 # background plot
# capturing the live frame
for i in range(30):
ret,background = cap.read()
# flipping the image
background = np.flip(background,axis=1)
while(cap.isOpened()):
ret, img = cap.read() # reading from the ongoing video
img = np.flip(img,axis=1)
hsv = cv2.cvtColor(img, cv2.COLOR_BGR2HSV) # Converting the image : from BGR to HSV
value = (35, 35)
blurred = cv2.GaussianBlur(hsv, value,0)
# configuration for the mask1
lower_red = np.array([0,120,70])
upper_red = np.array([10,255,255])
mask1 = cv2.inRange(hsv,lower_red,upper_red)
# configuration for the mask2
lower_red = np.array([170,120,70])
upper_red = np.array([180,255,255])
mask2 = cv2.inRange(hsv,lower_red,upper_red)
# The upper blocks of code (mask1 and mask2) can be replaced with some other code depending the color of your cloth which you would use as the invisible cloak
mask = mask1+mask2
mask = cv2.morphologyEx(mask, cv2.MORPH_OPEN, np.ones((5,5),np.uint8)) # Morphological Transformations
img[np.where(mask==255)] = background[np.where(mask==255)]
cv2.imshow('Display',img) # display the image in the specified window
k = cv2.waitKey(10) # cv2. waitKey() is a keyboard binding function. The function waits for specified milliseconds for any keyboard event.
if k == 27:
break
| 2,847 | 876 |
__version_info__ = ('0', '1', '1')
__version__ = '.'.join(__version_info__)
from keyvalues.keyvalues import KeyValues
def load_keyvalues(filename):
kv = KeyValues()
kv.load(filename)
return kv
| 207 | 71 |
from collections import deque
from itertools import islice
from .base import RollingObject
class Apply(RollingObject):
"""
Iterator object that applies a function to
a rolling window over a Python iterable.
Parameters
----------
iterable : any iterable object
window_size : integer, the size of the rolling
window moving over the iterable
operation : callable, default sum
a function, or class implementing a __call__
method, to be applied to each window
Complexity
----------
Update time: operation dependent
Memory usage: O(k)
where k is the size of the rolling window
Examples
--------
Rolling sum using builtin sum():
>>> import rolling
>>> seq = (8, 1, 1, 3, 6, 5)
>>> r_sum = rolling.Apply(seq, 3, operation=sum)
>>> next(r_sum)
10
>>> next(r_sum)
5
Reverse each window:
>>> r_rev = rolling.Apply(seq, 4, operation=lambda x: list(reversed(x)))
>>> list(r_rev)
[[3, 1, 1, 8],
[6, 3, 1, 1],
[5, 6, 3, 1]]
"""
def _init_fixed(self, iterable, window_size, operation=sum, **kwargs):
head = islice(self._iterator, window_size - 1)
self._buffer = deque(head, maxlen=window_size)
self._operation = operation
def _init_variable(self, iterable, window_size, operation=sum, **kwargs):
self._buffer = deque(maxlen=window_size)
self._operation = operation
@property
def current_value(self):
return self._operation(self._buffer)
def _add_new(self, new):
self._buffer.append(new)
def _remove_old(self):
self._buffer.popleft()
def _update_window(self, new):
self._buffer.append(new)
@property
def _obs(self):
return len(self._buffer)
def __repr__(self):
return "Rolling(operation='{}', window_size={}, window_type='{}')".format(
self._operation.__name__, self.window_size, self.window_type
)
| 1,999 | 648 |
from typing import List
from discord import Role, Color, role
from ..bunkbot import BunkBot
from ..channel.channel_service import ChannelService
from ..core.bunk_user import BunkUser
from ..core.service import Service
from ..db.database_service import DatabaseService
class RoleService(Service):
"""
Service responsible for handling role references
and removing/adding new roles
Parameters
-----------
bot: Bunkbot
Super class instance of the bot
database: DatabaseService
Super class instance of the database service
channels: ChannelService
Access to the server channels and other channel functions
"""
def __init__(self, bot: BunkBot, database: DatabaseService, channels: ChannelService):
super().__init__(bot, database)
self.admin: Role = None
self.channels: ChannelService = channels
def get_role(self, role_name: str) -> Role:
"""
Get a role directly from the server by name
Parameters
-----------
role_name: str
Name of the role to retrieve from the server
"""
return next((role for role in self.server.roles if role.name == role_name), None)
def get_role_by_pattern(self, pattern: str, roles: List[Role] = None) -> Role:
"""
Get a role directly from the server with a pattern "contains"
Parameters
-----------
pattern: str
Pattern which will be used to fuzzy search a role name
roles: List[Role] (optional)
Optional list of roles to search if the default server is not used
"""
if roles is None:
roles = self.server.roles
return next((role for role in roles if pattern in role.name), None)
async def rm_role(self, role_name: str, user: BunkUser = None) -> None:
"""
Non-event driven - directly remove a role when another service has deemed appropriate
Parameters
-----------
role_name: str
Name of the role to remove
user: Bunkuser (optional)
When supplied, the role will be removed from a user rather than the server list
"""
if user is not None:
roles = user.member.roles.copy()
roles = [r for r in user.member.roles if r.name != role_name]
await user.set_roles(roles)
else:
roles: List[Role] = [r for r in self.bot.server.roles.copy() if r.name == role_name]
for role in roles:
ref: Role = role
await ref.delete()
async def rm_roles_from_user(self, role_names: List[str], user: BunkUser) -> None:
"""
Non-event driven - directly remove a role when another service has deemed appropriate
Parameters
-----------
role_names: List[str]
List of the roles to remove
user: Bunkuser
User from which the roles will be removed from a user
"""
roles: List[Role] = user.member.roles.copy()
new_roles: List[Role] = [r for r in roles if r.name not in role_names]
await user.set_roles(new_roles)
async def add_role_to_user(self, role_name: str, user: BunkUser, color: Color = None) -> Role:
"""
Non-event driven - directly add a role when another service has deemed appropriate
Parameters
-----------
role_name: str
Name of the role to add
user: BunkUser
User which to add the role
color: Color (optional)
Optionally add a color to the role
Returns
--------
Role added to the user
"""
roles: List[Role] = await self._get_user_roles_to_set(user.member.roles.copy(), role_name, user, color)
await user.set_roles(roles)
return self.get_role(role_name)
async def add_roles_to_user(self, role_names: List[str], user: BunkUser, color: Color = None) -> List[Role]:
"""
Non-event driven - directly add multiple roles when another service has deemed appropriate
Parameters
-----------
role_names: List[str]
List of roles to add to the user
user: BunkUser
User which to add the roles
color: Color (optional)
Optionally add a color to the roles
Returns
--------
Roles added to the user
"""
roles = user.member.roles.copy()
for role_name in role_names:
roles = await self._get_user_roles_to_set(roles, role_name, user, color)
await user.set_roles(roles)
return roles
async def _get_user_roles_to_set(self, current_roles: List[Role], role_name: str, user: BunkUser, color: Color = None) -> List[Role]:
if not user.has_role(role_name):
role = self.get_role(role_name)
if role is None:
if color is None:
role: Role = await self.bot.server.create_role(name=role_name)
else:
role: Role = await self.bot.server.create_role(name=role_name, color=color)
current_roles.append(role)
return current_roles
async def prune_orphaned_roles(self, pattern: str = None) -> None:
"""
When updating users/roles check for roles which are no longer being used
Parameters
-----------
pattern: str (optional)
Only pruned orphaned roles that contain a specific pattern in the name
"""
if self.bot.server is None:
pass
else:
empty_color_roles: List[str] = []
if pattern is None:
empty_color_roles = [r.name for r in self.bot.server.roles if len(r.members) == 0]
else:
empty_color_roles = [r.name for r in self.bot.server.roles if pattern in r.name and len(r.members) == 0]
for orphan_role in empty_color_roles:
await self.channels.log_info("Removing role `{0}`".format(orphan_role))
await self.rm_role(orphan_role)
async def get_role_containing(self, pattern: str, user: BunkUser) -> Role:
"""
Get a user role that contains a given pattern in the name
Parameters
-----------
pattern: str
Pattern which the role name must contain
user: BunkUser
User which to find the role
"""
role = next((r for r in user.member.roles if pattern in r.name.lower()), None)
return role
async def get_lowest_index_for(self, pattern: str) -> int:
"""
Get the server role index of a given role name (pattern)
Parameters
-----------
pattern: str
Pattern which to locate a role by it's index
"""
roles: List[int] = [r.position for r in self.bot.server.roles if pattern in r.name]
roles.sort()
if len(roles) == 0:
return 1
return roles[:1][0]
| 7,333 | 2,159 |
from __future__ import annotations
import abc
import enum
import pathlib
import typing as t
class Prediction(enum.Enum):
"""Represents model prediction."""
def _generate_next_value_(name, start, count, last_values):
return name
REAL = enum.auto()
FAKE = enum.auto()
UNCERTAIN = enum.auto()
@classmethod
def from_confidence(cls, confidence: float, threshold: float = 0.5) -> Prediction:
"""Translate model confidence into prediction using given threshold.
Returns:
Model prediction over given threshold.
"""
if confidence >= threshold:
return cls.FAKE
if 1 - confidence >= threshold:
return cls.REAL
return cls.UNCERTAIN
class ModelInterface(abc.ABC):
"""Height level wrapper around actual models used underneath.
The goal of exposed interface is to hide implementation details such as what library
is used to define models. Currently interface operates on paths and handles only data
stored on disk.
"""
@abc.abstractmethod
def train(self, train_ds_path: pathlib.Path, validation_ds_path: pathlib.Path) -> None:
"""Train model using given train and validation data."""
@abc.abstractmethod
def test(self, test_ds_path: pathlib.Path) -> t.Dict[str, float]:
"""Evaluate model over provided test data.
Returns:
dict, metrics of interests mapped to their values
"""
@abc.abstractmethod
def predict(self, sample_path: pathlib.Path) -> t.Dict[pathlib.Path, Prediction]:
"""Make predictions over provided sample of frames."""
@abc.abstractmethod
def save(self, path: pathlib.Path):
"""Save model under given path."""
@classmethod
@abc.abstractmethod
def load(cls, path: pathlib.Path) -> ModelInterface:
"""Load model from given path."""
@abc.abstractmethod
def get_available_metrics_names(self) -> t.List[str]:
"""Get names of metrics supported by model.
Each metric value will be returned by train and test functions.
Returns: names of supported metrics
"""
| 2,166 | 590 |
#python 3
from concurrent.futures import ThreadPoolExecutor
import threading
import random
def view_thread():
print("Executing Thread")
print("Accesing thread : {}".format(threading.get_ident()))
print("Thread Executed {}".format(threading.current_thread()))
def main():
executor = ThreadPoolExecutor(max_workers=3)
thread1 = executor.submit(view_thread)
thread1 = executor.submit(view_thread)
thread3 = executor.submit(view_thread)
if __name__ == '__main__':
main() | 502 | 156 |
from buildbot.buildslave import BuildSlave
from buildbot.schedulers.basic import SingleBranchScheduler
from buildbot.changes import filter
from buildbot.config import BuilderConfig
from buildbot.schedulers.forcesched import *
from poclfactory import createPoclFactory
# overrride the 'sample_slave' with a descriptive function name
# Note: when finished renaming, the string "sample" should not appear anywhere in this file!
#
# c - the global buildbot configuration data structure
# common_branch - this is the branch that the slave should build.
# typically 'master', but during release it will be changed
# to the release branch
def sample_slave( c, common_branch ):
#create a new slave in the master's database
c['slaves'].append(
BuildSlave(
"sample_slave_name",
"password" ))
# lauch the builders listed in "builderNames" whenever the change poller notices a change to github pocl
c['schedulers'].append(
SingleBranchScheduler(name="name for scheduler, not sure where this is used",
change_filter=filter.ChangeFilter(branch=common_branch),
treeStableTimer=60,
builderNames=[
"sample_builder_name - this is the name that appears on the webpage"] ))
#create one set of steps to build pocl. See poclfactory.py for details
# on how to configure it
sample_factory = createPoclFactory()
#register your build to the master
c['builders'].append(
BuilderConfig(
name = "sample_builder_name - this is the name that appears on the webpage",
slavenames=["sample_slave_name"],
factory = sample_factory ))
| 1,591 | 478 |
from flask import Flask, url_for, request, session, abort
import os
import re
import base64
pqr = Flask(__name__)
# Determines the destination of the build. Only usefull if you're using
# Frozen-Flask
pqr.config['FREEZER_DESTINATION'] = os.path.dirname(os.path.abspath(__file__)) + '/../build'
# Function to easily find your assets
# In your template use <link rel=stylesheet href="{{ static('filename') }}">
pqr.jinja_env.globals['static'] = (
lambda filename: url_for('static', filename=filename)
)
##########################################################################
# Form CSRF protection functions
@pqr.before_request
def csrf_protect():
if request.method == "POST":
token = session.pop('_csrf_token', None)
if not token or token != request.form.get('_csrf_token'):
abort(403)
def generate_csrf_token():
if '_csrf_token' not in session:
session['_csrf_token'] = some_random_string()
return session['_csrf_token']
def some_random_string():
return base64.urlsafe_b64encode(os.urandom(32))
pqr.jinja_env.globals['csrf_token'] = generate_csrf_token
##########################################################################
##########################################################################
# Custom Filters
# Auto Subscript any sequence of digits
def subnumbers_filter(input):
return re.sub("\d+", lambda val: "<sub>" + val.group(0) + "</sub>", input)
#Aubscript digits after ~characters removing the ~character
def supnumbers_iupac_filter(input):
return re.sub("~(.*?)~", lambda val: "<sup>" + val.group(0).replace('~', ' ') + "</sup>", input)
# Greek String Replacement
def replace_greek_filter(input):
choice = ""
try:
choice = re.findall(r"(Alpha|Beta|Gamma)", input)[0]
except IndexError:
pass
if len(re.findall("(Alpha|Beta|Gamma)[^\w\s]", input)) > 0:
return input.replace(choice, '&{};'.format(choice.lower()))
else:
return input
#return re.sub("(Alpha|Beta|Gamma)[^\w\s]", lambda val: "&{};{}".format(choice.lower(), val.group(0)[-1]), input, flags=re.I)
# Adding the filters to the environment
pqr.jinja_env.filters['subnumbers'] = subnumbers_filter
pqr.jinja_env.filters['supnumbersiupac'] = supnumbers_iupac_filter
pqr.jinja_env.filters['replacegreek'] = replace_greek_filter
assert pqr.jinja_env.filters['subnumbers']
assert pqr.jinja_env.filters['supnumbersiupac']
assert pqr.jinja_env.filters['replacegreek']
##########################################################################
from pqr import views
| 2,575 | 822 |
import json
from urllib import request
def get_ip():
info = None
try:
resp = request.urlopen("http://ip-api.com/json/")
raw = resp.read()
info = json.loads(raw)
except Exception as e:
print(e)
return info
| 255 | 83 |
import collections.abc
from typing import Union, Sequence, Optional
from .primitives import Number
from .units import Unit, UnitTypes
_Value = Union[Unit, Number, float, int]
class Calc:
type: UnitTypes
@classmethod
def build(
cls,
values: Union[_Value, Sequence[_Value]],
operators: Sequence[str] = [],
):
_values: Sequence[_Value] = (
values
if isinstance(values, collections.abc.Sequence) else
[values]
)
if len(_values) != len(operators) + 1:
raise ValueError("There must be one less operator than values.")
calc = CalcOperators(
[
CalcValue(value)
if not isinstance(value, (float, int)) else
CalcValue(Number(value))
for value in _values
],
operators[:],
)
if len(operators) == 0:
return calc._values[0]
return calc
class CalcValue(Calc):
_value: Union[Unit, Number]
def __init__(self, value: Union[Unit, Number]):
self._value = value
if isinstance(value, Unit):
self.type = value.TYPE
else:
self.type = UnitTypes.NONE
def __str__(self):
return str(self._value)
def __repr__(self):
return f"CalcValue({self._value!r})"
class CalcOperators(Calc):
_values: Sequence[Calc]
_operators: Sequence[str]
def __init__(self, values: Sequence[Calc], operators: Sequence[str]):
if len(values) != len(operators) + 1:
raise ValueError("There must be one less operator than values.")
types = {value.type for value in values if value.type is not UnitTypes.NONE}
if 1 < len(types):
raise ValueError(f"Cannot mix types {types}")
self._values = values
self._operators = operators
def __str__(self):
values = [None] * (len(self._values) * 2 - 1)
values[0::2] = self._values
values[1::2] = self._operators
return " ".join(str(v) for v in values)
def __repr__(self):
return f"CalcOperators({self._values!r}, {self._operators!r})"
| 2,205 | 655 |
from .binio import from_dword
from .opcodes import Reg, mov_reg_imm, mov_acc_mem, mov_rm_reg, x0f_movups, Prefix
def match_mov_reg_imm32(b: bytes, reg: Reg, imm: int) -> bool:
assert len(b) == 5, b
return b[0] == mov_reg_imm | 8 | int(reg) and from_dword(b[1:]) == imm
def get_start(s):
i = None
if s[-1] & 0xfe == mov_acc_mem:
i = 1
elif s[-2] & 0xf8 == mov_rm_reg and s[-1] & 0xc7 == 0x05:
i = 2
elif s[-3] == 0x0f and s[-2] & 0xfe == x0f_movups and s[-1] & 0xc7 == 0x05:
i = 3
return i # prefix is not allowed here
assert i is not None
if s[-1 - i] == Prefix.operand_size:
i += 1
return i
| 676 | 304 |
from tests.testcase import TestCase
from edmunds.database.db import db, mapper, relationship, backref
from sqlalchemy.orm import mapper as sqlalchemy_mapper, relationship as sqlalchemy_relationship, backref as sqlalchemy_backref
from edmunds.database.databasemanager import DatabaseManager
from werkzeug.local import LocalProxy
from flask_sqlalchemy import SQLAlchemy
class TestModel(TestCase):
"""
Test the model
"""
def test_model(self):
"""
Test model
:return: void
"""
test_db = DatabaseManager.get_sql_alchemy_instance()
self.assert_is_instance(db, LocalProxy)
self.assert_is_instance(db._get_current_object(), SQLAlchemy)
self.assert_equal_deep(test_db, db._get_current_object())
self.assert_equal_deep(sqlalchemy_mapper, mapper)
self.assert_equal_deep(sqlalchemy_relationship, relationship)
self.assert_equal_deep(sqlalchemy_backref, backref)
| 964 | 302 |
import logging
pvl_logger = logging.getLogger('pvlib')
import datetime
import numpy as np
import pandas as pd
from nose.tools import raises, assert_almost_equals
from nose.plugins.skip import SkipTest
from pandas.util.testing import assert_frame_equal
from pvlib.location import Location
from pvlib import solarposition
from pvlib import tracking
def test_solar_noon():
apparent_zenith = pd.Series([10])
apparent_azimuth = pd.Series([180])
tracker_data = tracking.singleaxis(apparent_zenith, apparent_azimuth,
axis_tilt=0, axis_azimuth=0,
max_angle=90, backtrack=True,
gcr=2.0/7.0)
expect = pd.DataFrame({'aoi': 10, 'surface_azimuth': 90,
'surface_tilt': 0, 'tracker_theta': 0},
index=[0], dtype=np.float64)
assert_frame_equal(expect, tracker_data)
def test_azimuth_north_south():
apparent_zenith = pd.Series([60])
apparent_azimuth = pd.Series([90])
tracker_data = tracking.singleaxis(apparent_zenith, apparent_azimuth,
axis_tilt=0, axis_azimuth=180,
max_angle=90, backtrack=True,
gcr=2.0/7.0)
expect = pd.DataFrame({'aoi': 0, 'surface_azimuth': 90,
'surface_tilt': 60, 'tracker_theta': -60},
index=[0], dtype=np.float64)
assert_frame_equal(expect, tracker_data)
tracker_data = tracking.singleaxis(apparent_zenith, apparent_azimuth,
axis_tilt=0, axis_azimuth=0,
max_angle=90, backtrack=True,
gcr=2.0/7.0)
expect['tracker_theta'] *= -1
assert_frame_equal(expect, tracker_data)
def test_max_angle():
apparent_zenith = pd.Series([60])
apparent_azimuth = pd.Series([90])
tracker_data = tracking.singleaxis(apparent_zenith, apparent_azimuth,
axis_tilt=0, axis_azimuth=0,
max_angle=45, backtrack=True,
gcr=2.0/7.0)
expect = pd.DataFrame({'aoi': 15, 'surface_azimuth': 90,
'surface_tilt': 45, 'tracker_theta': 45},
index=[0], dtype=np.float64)
assert_frame_equal(expect, tracker_data)
def test_backtrack():
apparent_zenith = pd.Series([80])
apparent_azimuth = pd.Series([90])
tracker_data = tracking.singleaxis(apparent_zenith, apparent_azimuth,
axis_tilt=0, axis_azimuth=0,
max_angle=90, backtrack=False,
gcr=2.0/7.0)
expect = pd.DataFrame({'aoi': 0, 'surface_azimuth': 90,
'surface_tilt': 80, 'tracker_theta': 80},
index=[0], dtype=np.float64)
assert_frame_equal(expect, tracker_data)
tracker_data = tracking.singleaxis(apparent_zenith, apparent_azimuth,
axis_tilt=0, axis_azimuth=0,
max_angle=90, backtrack=True,
gcr=2.0/7.0)
expect = pd.DataFrame({'aoi': 52.5716, 'surface_azimuth': 90,
'surface_tilt': 27.42833, 'tracker_theta': 27.4283},
index=[0], dtype=np.float64)
assert_frame_equal(expect, tracker_data)
def test_axis_tilt():
apparent_zenith = pd.Series([30])
apparent_azimuth = pd.Series([135])
tracker_data = tracking.singleaxis(apparent_zenith, apparent_azimuth,
axis_tilt=30, axis_azimuth=180,
max_angle=90, backtrack=True,
gcr=2.0/7.0)
expect = pd.DataFrame({'aoi': 7.286245, 'surface_azimuth': 37.3427,
'surface_tilt': 35.98741, 'tracker_theta': -20.88121},
index=[0], dtype=np.float64)
assert_frame_equal(expect, tracker_data)
tracker_data = tracking.singleaxis(apparent_zenith, apparent_azimuth,
axis_tilt=30, axis_azimuth=0,
max_angle=90, backtrack=True,
gcr=2.0/7.0)
expect = pd.DataFrame({'aoi': 47.6632, 'surface_azimuth': 129.0303,
'surface_tilt': 42.5152, 'tracker_theta': 31.6655},
index=[0], dtype=np.float64)
assert_frame_equal(expect, tracker_data)
def test_axis_azimuth():
apparent_zenith = pd.Series([30])
apparent_azimuth = pd.Series([90])
tracker_data = tracking.singleaxis(apparent_zenith, apparent_azimuth,
axis_tilt=0, axis_azimuth=90,
max_angle=90, backtrack=True,
gcr=2.0/7.0)
expect = pd.DataFrame({'aoi': 30, 'surface_azimuth': 180,
'surface_tilt': 0, 'tracker_theta': 0},
index=[0], dtype=np.float64)
assert_frame_equal(expect, tracker_data)
apparent_zenith = pd.Series([30])
apparent_azimuth = pd.Series([180])
tracker_data = tracking.singleaxis(apparent_zenith, apparent_azimuth,
axis_tilt=0, axis_azimuth=90,
max_angle=90, backtrack=True,
gcr=2.0/7.0)
expect = pd.DataFrame({'aoi': 0, 'surface_azimuth': 180,
'surface_tilt': 30, 'tracker_theta': 30},
index=[0], dtype=np.float64)
assert_frame_equal(expect, tracker_data)
@raises(ValueError)
def test_index_mismatch():
apparent_zenith = pd.Series([30])
apparent_azimuth = pd.Series([90,180])
tracker_data = tracking.singleaxis(apparent_zenith, apparent_azimuth,
axis_tilt=0, axis_azimuth=90,
max_angle=90, backtrack=True,
gcr=2.0/7.0) | 6,485 | 2,150 |
from MemoryHandler import *
from addresses import *
from struct import *
class carro (object):
def __init__(self):
self.velocidade=0
self.gasolina=0
self.pontos=0
self.posicao=0
self.rpm=0
self.nitro=0
self.gerenciadorMemoria = MemoryHandler("zsnesw.exe")
def update(self):
self.updateVel()
self.updateGas()
self.updatePontos()
self.updatePosicao()
self.updateRpm()
self.updateNitro()
def pack(self):
data = '1;'+str(self.velocidade)+';'+str(self.gasolina)+';'+str(self.pontos)+';'+str(self.posicao)+';'+str(self.rpm)+';'+str(self.nitro)+';'
return data
def updatePosicao(self):
self.posicao = (int) ((self.gerenciadorMemoria.lerByte(CARROSAFRENTE)) + 1)
def updateVel(self):
self.velocidade = (int)(self.gerenciadorMemoria.lerPalavra(SPEEDMETER)/10)
pass
def updateGas(self):
self.gasolina = (int) (100 - (((self.gerenciadorMemoria.lerByte(FUELCONSUMP))*100)/20))
pass
def updatePontos(self):
self.pontos = self.gerenciadorMemoria.lerByte(POINTS)
pass
def updateRpm(self):
self.rpm = 0
pass
def updateNitro(self):
self.nitro = (int) ( self.gerenciadorMemoria.lerByte(0x00C64B06) - 53)
pass
| 1,396 | 540 |
# Copyright (c) 2020 Software AG,
# Darmstadt, Germany and/or Software AG USA Inc., Reston, VA, USA,
# and/or its subsidiaries and/or its affiliates and/or their licensors.
# Use, reproduction, transfer, publication or disclosure is prohibited except
# as specifically provided for in your License Agreement with Software AG.
# pylint: disable=protected-access, redefined-outer-name
import base64
from unittest.mock import patch
import json
import pytest
import requests
import responses
from c8y_api._base_api import CumulocityRestApi # noqa (protected-access)
@pytest.fixture(scope='function')
def mock_c8y() -> CumulocityRestApi:
"""Provide mock CumulocityRestApi instance."""
return CumulocityRestApi(
base_url='http://base.com',
tenant_id='t12345',
username='username',
password='password',
application_key='application_key')
@pytest.fixture(scope='module')
def httpbin_basic() -> CumulocityRestApi:
"""Provide mock CumulocityRestApi instance for httpbin with basic auth."""
return CumulocityRestApi(
base_url='https://httpbin.org',
tenant_id='t12345',
username='username',
password='password'
)
def assert_auth_header(c8y, headers):
"""Assert that the given auth header is correctly formatted."""
auth_header = headers['Authorization'].lstrip('Basic ')
expected = f'{c8y.tenant_id}/{c8y.username}:{c8y.password}'
assert base64.b64decode(auth_header) == expected.encode('utf-8')
def assert_accept_header(headers, accept='application/json'):
"""Assert that the accept header matches the expectation."""
assert headers['Accept'] == accept
def assert_content_header(headers, content_type='application/json'):
"""Assert that the content-type header matches the expectation."""
assert headers['Content-Type'] == content_type
def assert_application_key_header(c8y, headers):
"""Assert that the application key header matches the expectation."""
assert headers[c8y.HEADER_APPLICATION_KEY] == c8y.application_key
@pytest.mark.parametrize('args, expected', [
({'accept': 'application/json'}, {'Accept': 'application/json'}),
({'content_tYPe': 'content/TYPE'}, {'Content-Type': 'content/TYPE'}),
({'some': 'thing', 'mORE_Of_this': 'same'}, {'Some': 'thing', 'More-Of-This': 'same'}),
({'empty': None, 'accept': 'accepted'}, {'Accept': 'accepted'}),
({'empty1': None, 'empty2': None}, None),
({'accept': ''}, {'Accept': None}),
])
def test_prepare_headers(args, expected):
"""Verify header preparation."""
assert CumulocityRestApi._prepare_headers(**args) == expected
@pytest.mark.parametrize('method', ['get', 'post', 'put'])
def test_remove_accept_header(mock_c8y: CumulocityRestApi, method):
"""Verify that the default accept header can be unset/removed."""
with responses.RequestsMock() as rsps:
rsps.add(method=method.upper(),
url=mock_c8y.base_url + '/resource',
status=200,
json={})
kwargs = {'resource': '/resource', 'accept': ''}
if method.startswith('p'):
kwargs['json'] = {}
func = getattr(mock_c8y, method)
func(**kwargs)
assert 'Accept' not in rsps.calls[0].request.headers
@pytest.mark.online
@pytest.mark.parametrize('method', ['get', 'post', 'put'])
def test_remove_accept_header_online(httpbin_basic: CumulocityRestApi, method):
"""Verify that the unset accept header are actually not sent."""
kwargs = {'resource': '/anything', 'accept': ''}
if method.startswith('p'):
kwargs['json'] = {}
func = getattr(httpbin_basic, method)
response = func(**kwargs)
assert 'Accept' not in response['headers']
@pytest.mark.parametrize('method', ['get', 'post', 'put', 'delete'])
def test_no_application_key_header(mock_c8y: CumulocityRestApi, method):
"""Verify that the application key header is not present by default."""
c8y = CumulocityRestApi(mock_c8y.base_url, mock_c8y.tenant_id, mock_c8y.username, mock_c8y.username)
with responses.RequestsMock() as rsps:
rsps.add(method=method.upper(),
url=mock_c8y.base_url + '/resource',
status=200,
json={'result': True})
kwargs = {'resource': '/resource'}
if method.startswith('p'):
kwargs['json'] = {}
func = getattr(c8y, method)
if method.startswith('p'):
kwargs.update({'json': {}})
func(**kwargs)
request_headers = rsps.calls[0].request.headers
assert CumulocityRestApi.HEADER_APPLICATION_KEY not in request_headers
@pytest.mark.online
def test_basic_auth_get(httpbin_basic: CumulocityRestApi):
"""Verify that the basic auth headers are added for the REST requests."""
c8y = httpbin_basic
# first we verify that the auth is there for GET requests
response = c8y.get('/anything')
assert_auth_header(c8y, response['headers'])
def test_post_defaults(mock_c8y: CumulocityRestApi):
"""Verify the basic functionality of the POST requests."""
with responses.RequestsMock() as rsps:
rsps.add(method=responses.POST,
url=mock_c8y.base_url + '/resource',
status=201,
json={'result': True})
response = mock_c8y.post('/resource', json={'request': True})
request_body = rsps.calls[0].request.body
request_headers = rsps.calls[0].request.headers
assert json.loads(request_body)['request']
assert_auth_header(mock_c8y, request_headers)
assert_accept_header(request_headers)
assert_content_header(request_headers)
assert_application_key_header(mock_c8y, request_headers)
assert response['result']
def test_post_explicits(mock_c8y: CumulocityRestApi):
"""Verify the basic functionality of the POST requests."""
with responses.RequestsMock() as rsps:
rsps.add(method=responses.POST,
url=mock_c8y.base_url + '/resource',
status=201,
json={'result': True})
response = mock_c8y.post('/resource', accept='custom/accept',
content_type='custom/content', json={'request': True})
request_body = rsps.calls[0].request.body
request_headers = rsps.calls[0].request.headers
assert json.loads(request_body)['request']
assert_auth_header(mock_c8y, request_headers)
assert_accept_header(request_headers, 'custom/accept')
assert_content_header(request_headers, 'custom/content')
assert_application_key_header(mock_c8y, request_headers)
assert response['result']
@pytest.mark.online
def test_get_default(httpbin_basic: CumulocityRestApi):
"""Verify that the get function with default parameters works as expected."""
c8y = httpbin_basic
# (1) with implicit parameters given and all default
response = c8y.get(resource='/anything/resource?p1=v1&p2=v2')
# auth header must always be present
assert response['headers']['Authorization']
# by default we accept JSON
assert response['headers']['Accept'] == 'application/json'
# inline parameters recognized
assert response['args']['p1']
assert response['args']['p2']
@pytest.mark.online
def test_get_explicit(httpbin_basic: CumulocityRestApi):
"""Verify that the get function with explicit parameters works as expected."""
c8y = httpbin_basic
response = c8y.get(resource='/anything/resource', params={'p1': 'v1', 'p2': 3}, accept='something/custom')
# auth header must always be present
assert response['headers']['Authorization']
# expecting our custom accept header
assert response['headers']['Accept'] == 'something/custom'
# explicit parameters recognized
assert response['args']['p1']
assert response['args']['p2']
def test_get_ordered_response():
"""Verify that the response JSON can be ordered on request."""
c8y = CumulocityRestApi(base_url='', tenant_id='', username='', password='')
with patch('requests.Session.get') as get_mock:
mock_response = requests.Response()
mock_response.status_code = 200
mock_response._content = b'{"list": [1, 2, 3, 4, 5], "x": "xxx", "m": "mmm", "c": "ccc"}'
get_mock.return_value = mock_response
response = c8y.get('any', ordered=True)
elements = list(response.items())
# first element is a list
assert elements[0][0] == 'list'
assert elements[0][1] == [1, 2, 3, 4, 5]
# 2nd to 4th are some elements in order
assert (elements[1][0], elements[2][0], elements[3][0]) == ('x', 'm', 'c')
def test_get_404():
"""Verify that a 404 results in a KeyError and a message naming the missing resource."""
c8y = CumulocityRestApi(base_url='', tenant_id='', username='', password='')
with patch('requests.Session.get') as get_mock:
mock_response = requests.Response()
mock_response.status_code = 404
get_mock.return_value = mock_response
with pytest.raises(KeyError) as error:
c8y.get('some/key')
assert 'some/key' in str(error)
def test_delete_defaults(mock_c8y: CumulocityRestApi):
"""Verify the basic funtionality of the DELETE requests."""
with responses.RequestsMock() as rsps:
rsps.add(method=responses.DELETE,
url=mock_c8y.base_url + '/resource',
status=204)
mock_c8y.delete('/resource')
request_headers = rsps.calls[0].request.headers
assert_auth_header(mock_c8y, request_headers)
assert_application_key_header(mock_c8y, request_headers)
def test_empty_response(mock_c8y: CumulocityRestApi):
"""Verify that an empty GET/POST/PUT responses doesn't break the code."""
with responses.RequestsMock() as rsps:
rsps.add(method=responses.GET,
url=mock_c8y.base_url + '/resource',
status=200)
mock_c8y.get('/resource')
with responses.RequestsMock() as rsps:
rsps.add(method=responses.POST,
url=mock_c8y.base_url + '/resource',
status=201)
mock_c8y.post('/resource', json={})
with responses.RequestsMock() as rsps:
rsps.add(method=responses.PUT,
url=mock_c8y.base_url + '/resource',
status=200)
mock_c8y.put('/resource', json={})
| 10,483 | 3,279 |
#
# Copyright (c) 2020, Quantum Espresso Foundation and SISSA.
# Internazionale Superiore di Studi Avanzati). All rights reserved.
# This file is distributed under the terms of the BSD 3-Clause license.
# See the file 'LICENSE' in the root directory of the present distribution,
# or https://opensource.org/licenses/BSD-3-Clause
#
from .abstract_generator import AbstractGenerator
class JSONSchemaGenerator(AbstractGenerator):
"""
JSON Schema generic generator for XSD schemas.
"""
formal_language = 'JSON Schema'
default_paths = ['templates/json-schema/']
builtin_types = {
'string': 'string',
'boolean': 'boolean',
'float': 'number',
'double': 'number',
'integer': 'integer',
'unsignedByte': 'integer',
'nonNegativeInteger': 'integer',
'positiveInteger': 'integer',
}
| 866 | 254 |
"""Bright general tests"""
from unittest import TestCase
import nose
from nose.tools import assert_equal, assert_not_equal, assert_raises, raises, \
assert_almost_equal, assert_true, assert_false, with_setup
import os
import warnings
import tables as tb
import numpy as np
from pyne import nucname
import bright
bright_conf = bright.bright_conf
#
# Fixtures
#
def setup_h5():
if 'isos.h5' in os.listdir('.'):
return
f = tb.openFile('isos.h5', 'w')
f.createArray(f.root, "ToIsos", np.array([92235, 922380, 10010]), "ToIsos")
f.createArray(f.root, "NotIsos", np.array([92235, 922380, 10010]), "NotIsos")
f.close()
def teardown_h5():
os.remove('isos.h5')
def setup_txt():
with open('isos.txt', 'w') as f:
f.write('U-235, 922380\n10010}')
def teardown_txt():
os.remove('isos.txt')
#
# Tests
#
def test_bright_start():
current = os.getenv("BRIGHT_DATA")
os.environ["BRIGHT_DATA"] = "/foo/bar"
new = os.getenv("BRIGHT_DATA")
bright.bright_start()
assert_equal(new, "/foo/bar")
os.environ["BRIGHT_DATA"] = current
def test_track_nucs():
old_isolist = bright_conf.track_nucs
new_isolist = [922350, 10010]
bright_conf.track_nucs = set(new_isolist)
assert_equal(bright_conf.track_nucs, set([10010, 922350]))
bright_conf.track_nucs = old_isolist
def test_verbosity():
old_verbosity = bright_conf.verbosity
bright_conf.verbosity = 100
assert_equal(bright_conf.verbosity, 100)
bright.verbosity = old_verbosity
def test_write_hdf5():
old_write = bright_conf.write_hdf5
bright_conf.write_hdf5 = False
assert_false(bright_conf.write_hdf5)
bright_conf.write_hdf5 = 1
assert_true(bright_conf.write_hdf5)
bright_conf.write_hdf5 = old_write
def test_write_text():
old_write = bright_conf.write_text
bright_conf.write_text = False
assert_false(bright_conf.write_text)
bright_conf.write_text = 1
assert_true(bright_conf.write_text)
bright_conf.write_text = old_write
def test_output_filename():
assert_equal( bright_conf.output_filename, 'fuel_cycle.h5')
bright_conf.output_filename = 'new_name.h5'
assert_equal( bright_conf.output_filename, 'new_name.h5')
@with_setup(setup_h5)
def test_load_track_nucs_hdf5_1():
old_isos = bright_conf.track_nucs
bright_conf.track_nucs = set([80160])
bright.load_track_nucs_hdf5('isos.h5')
assert_equal(bright_conf.track_nucs, set([10010, 80160, 922350, 922380]))
bright_conf.track_nucs = old_isos
@with_setup(setup_h5)
def test_load_track_nucs_hdf5_2():
old_isos = bright_conf.track_nucs
bright_conf.track_nucs = set([80160])
bright.load_track_nucs_hdf5('isos.h5', '/NotIsos')
assert_equal(bright_conf.track_nucs, set([10010, 80160, 922350, 922380]))
bright_conf.track_nucs = old_isos
@with_setup(setup_h5)
def test_load_track_nucs_hdf5_3():
old_isos = bright_conf.track_nucs
bright_conf.track_nucs = set([80160])
bright.load_track_nucs_hdf5('isos.h5', '', True)
assert_equal(bright_conf.track_nucs, set([10010, 922350, 922380]))
bright_conf.track_nucs = old_isos
@with_setup(setup_h5, teardown_h5)
def test_load_track_nucs_hdf5_4():
old_isos = bright_conf.track_nucs
bright_conf.track_nucs = set([80160])
bright.load_track_nucs_hdf5('isos.h5', '/NotIsos', True)
assert_equal(bright_conf.track_nucs, set([10010, 922350, 922380]))
bright_conf.track_nucs = old_isos
@with_setup(setup_txt)
def test_load_track_nucs_text_1():
old_isos = bright_conf.track_nucs
bright_conf.track_nucs = set([80160])
bright.load_track_nucs_text('isos.txt')
assert_equal(bright_conf.track_nucs, set([10010, 80160, 922350, 922380]))
bright_conf.track_nucs = old_isos
@with_setup(setup_txt, teardown_txt)
def test_load_track_nucs_text_2():
old_isos = bright_conf.track_nucs
bright_conf.track_nucs = set([80160])
bright.load_track_nucs_text('isos.txt', True)
assert_equal(bright_conf.track_nucs, set([10010, 922350, 922380]))
bright_conf.track_nucs = old_isos
if __name__ == "__main__":
nose.main()
| 4,135 | 1,854 |
from django.apps import AppConfig
class DogConfig(AppConfig):
name = 'Dog'
| 81 | 27 |
import unittest
from pathlib import Path
import os
import shutil
import time
from src.bt_utils.handle_sqlite import DatabaseHandler
from src.bt_utils.get_content import content_dir
from sqlite3 import IntegrityError
class TestClass(unittest.TestCase):
def testDB(self):
if os.path.exists(content_dir):
shutil.rmtree(content_dir, ignore_errors=True)
if not os.path.exists(content_dir):
os.makedirs(content_dir)
else:
try:
os.remove(os.path.join(content_dir, "bundestag.db"))
except OSError:
pass
self.db = DatabaseHandler()
self.roles = ["role1", "role2"]
# creates basic table structures if not already present
print("Create database and test if creation was successful")
self.db.create_structure(self.roles)
db_path = Path(os.path.join(content_dir, "bundestag.db"))
self.assertTrue(db_path.is_file())
print("Check if database is empty")
users = self.db.get_all_users()
self.assertEqual(users, [])
print("Add user to database and check if he exists.")
self.db.add_user(123, self.roles)
user = self.db.get_specific_user(123)
self.assertEqual(user, (123, 0, 0))
print("Add reaction to user and check if it exists.")
self.db.add_reaction(123, "role1")
user = self.db.get_specific_user(123)
self.assertEqual(user, (123, 1, 0))
print("Remove reaction and check if it does not exist anymore.")
self.db.remove_reaction(123, "role1")
user = self.db.get_specific_user(123)
self.assertEqual(user, (123, 0, 0))
print("Add another user and check if select all users works.")
self.db.add_user(124, self.roles)
users = self.db.get_all_users()
self.assertEqual(users, [(123, 0, 0), (124, 0, 0)])
print("Add another user with invalid id and check if it still get created.")
with self.assertRaises(IntegrityError):
self.db.add_user(124, self.roles)
users = self.db.get_all_users()
self.assertEqual(users, [(123, 0, 0), (124, 0, 0)])
print("Add another column and check if it gets applied correctly")
self.roles = ["role1", "role2", "role3"]
self.db.update_columns(self.roles)
users = self.db.get_all_users()
self.assertEqual(users, [(123, 0, 0, 0), (124, 0, 0, 0)])
print("Closing connection")
del self.db
if __name__ == '__main__':
unittest.main()
| 2,568 | 853 |
# load model and predicate
import mxnet as mx
import numpy as np
# define test data
batch_size = 1
num_batch = 1
filepath = 'frame-1.jpg'
DEFAULT_INPUT_SHAPE = 300
# load model
sym, arg_params, aux_params = mx.model.load_checkpoint("model/deploy_model_algo_1", 0) # load with net name and epoch num
mod = mx.mod.Module(symbol=sym, context=mx.cpu(), data_names=["data"], label_names=["cls_prob"])
print('data_names:', mod.data_names)
print('output_names:', mod.output_names)
#print('data_shapes:', mod.data_shapes)
#print('label_shapes:', mod.label_shapes)
#print('output_shapes:', mod.output_shapes)
mod.bind(data_shapes=[("data", (1, 3, DEFAULT_INPUT_SHAPE, DEFAULT_INPUT_SHAPE))], for_training=False)
mod.set_params(arg_params, aux_params) # , allow_missing=True
import cv2
img = cv2.cvtColor(cv2.imread(filepath), cv2.COLOR_BGR2RGB)
print(img.shape)
img = cv2.resize(img, (DEFAULT_INPUT_SHAPE, DEFAULT_INPUT_SHAPE))
img = np.swapaxes(img, 0, 2)
img = np.swapaxes(img, 1, 2)
img = img[np.newaxis, :]
print(img.shape)
# # predict
# eval_data = np.array([img])
# eval_label = np.zeros(len(eval_data)) # just need to be the same length, empty is ok
# eval_iter = mx.io.NDArrayIter(eval_data, eval_label, batch_size, shuffle=False)
# print('eval_iter.provide_data:', eval_iter.provide_data)
# print('eval_iter.provide_label:', eval_iter.provide_label)
# predict_stress = mod.predict(eval_iter, num_batch)
# print(predict_stress) # you can transfer to numpy array
# forward
from collections import namedtuple
Batch = namedtuple('Batch', ['data'])
mod.forward(Batch([mx.nd.array(img)]))
prob = mod.get_outputs()[0].asnumpy()
prob = np.squeeze(prob)
# Grab top result, convert to python list of lists and return
results = [prob[i].tolist() for i in range(4)]
print(results)
| 1,777 | 683 |
class dataMapper:
def __init__(self, data):
self.__data = data
self.__structure = self.getDataStructure()
def getDataStructure(self):
headings = self.__data[0]
structure = {}
for key in headings:
structure[key.lower()] = ''
return structure
def map(self):
dataSet = []
for dataRecord in self.__data[1:]:
item = {}
for index, key in enumerate(self.__structure):
item[key] = dataRecord[index]
dataSet.append(item)
return dataSet
| 577 | 157 |
from iconservice import *
class SampleInterface(InterfaceScore):
@interface
def set_value(self, value: int) -> None: pass
@interface
def get_value(self) -> int: pass
@interface
def get_db(self) -> IconScoreDatabase: pass
@interface
def fallback_via_internal_call(self) -> None: pass
@interface
def fallback_via_not_payable_internal_call(self) -> None: pass
class SampleLinkScore(IconScoreBase):
_SCORE_ADDR = 'score_addr'
@eventlog(indexed=1)
def Changed(self, value: int):
pass
def __init__(self, db: IconScoreDatabase) -> None:
super().__init__(db)
self._value = VarDB('value', db, value_type=int)
self._addr_score = VarDB(self._SCORE_ADDR, db, value_type=Address)
def on_install(self, value: int=0) -> None:
super().on_install()
self._value.set(value)
def on_update(self) -> None:
super().on_update()
@external(readonly=False)
def add_score_func(self, score_addr: Address) -> None:
self._addr_score.set(score_addr)
@external(readonly=True)
def get_value(self) -> int:
test_interface = self.create_interface_score(self._addr_score.get(), SampleInterface)
return test_interface.get_value()
@external
def set_value(self, value: int):
test_interface = self.create_interface_score(self._addr_score.get(), SampleInterface)
test_interface.set_value(value)
self.Changed(value)
def _get_other_score_db(self):
interface_score = self.create_interface_score(self._addr_score.get(), SampleInterface)
return interface_score.get_db()
@external(readonly=True)
def get_data_from_other_score(self) -> bool:
db = self._get_other_score_db()
db.get(b'dummy_key')
return True
@external
def put_data_to_other_score_db(self):
db = self._get_other_score_db()
db.put(b'dummy_key', b'dummy_value')
@external(readonly=False)
def transfer_icx_to_other_score(self, value: int) -> None:
test_interface = self.create_interface_score(self._addr_score.get(), SampleInterface)
test_interface.icx(value).fallback_via_internal_call()
@external(readonly=False)
def transfer_icx_to_other_score_fail(self, value: int) -> None:
test_interface = self.create_interface_score(self._addr_score.get(), SampleInterface)
test_interface.icx(value).fallback_via_not_payable_internal_call()
@external(readonly=False)
@payable
def transfer_all_icx_to_other_score(self) -> None:
amount: int = self.icx.get_balance(self.address)
self.call(self._addr_score.get(), 'fallback_via_internal_call', {}, amount)
@payable
def fallback(self) -> None:
pass
| 2,778 | 916 |
#!/usr/bin/python
from setuptools import setup
setup(
name = "python-sentry",
version = "1.0",
author = "Josip Domsic",
author_email = "josip.domsic+github@gmail.com",
description = ("Pure Python CLI for sentry, as well as client library"),
license = "MIT",
keywords = "python Sentry CLI",
url = "https://github.com/ulicar/sentry-cli",
packages=['sentry'],
data_files = [
('/usr/local/bin/', [
'sentry-cli'
])
],
)
| 489 | 170 |
import json
import os
def load_config():
PYTHON_ENV = os.getenv("PYTHON_ENV", default="DEV")
if PYTHON_ENV == "DEV":
with open("./crawler/src/config/config-dev.json") as f:
config = json.load(f)
host = config["database_host"]
name = config["database_name"]
user = config["database_user"]
_pass = config["database_pass"]
auth_key = config["kakao_auth_key"]
elif PYTHON_ENV == "PRD":
host = os.getenv("database_host")
name = os.getenv("database_name")
user = os.getenv("database_user")
_pass = os.getenv("database_pass")
auth_key = os.getenv("kakao_auth_key")
return {"host": host, "name": name, "user": user, "pass": _pass, "kakao_auth_key": auth_key}
| 790 | 268 |
# -*- coding: utf-8 -*-
from django.test import SimpleTestCase
from core.models import VisibilityMixin
class VisibilityMixinTest(SimpleTestCase):
def test_is_private(self):
visibility = VisibilityMixin()
self.assertTrue(visibility.is_private)
visibility = VisibilityMixin(
visibility_level=VisibilityMixin.Level.PUBLIC)
self.assertFalse(visibility.is_private)
def test_is_public(self):
visibility = VisibilityMixin(
visibility_level=VisibilityMixin.Level.PUBLIC)
self.assertTrue(visibility.is_public)
visibility = VisibilityMixin()
self.assertFalse(visibility.is_public)
| 672 | 200 |
import datetime
import pandas
import seaborn as sns
import matplotlib.pyplot as plt
import os
import re
import glob
amean_err = []
astddev_err = []
amin_err = []
amax_err = []
rmean_err = []
rstddev_err = []
rmin_err = []
rmax_err = []
#loading the true and predicted tec maps for calculating the min/max error, mean and stddev error for both absolute and relative differences
for i in range(32):
#print i
path = "predicted_tec_files/{}_pred_*.npy".format(i)
for fnm in glob.glob(path):
pred = np.load(fnm).tolist()
pred = np.array(pred)
#print pred.shape
path = "predicted_tec_files/{}_y_*.npy".format(i)
for fnm in glob.glob(path):
truth = np.load(fnm).tolist()
truth = np.array(truth)
#print truth.shape
pred = np.squeeze(pred)
truth = np.squeeze(truth)
diff_absolute = abs(pred - truth)
diff_relative = abs((pred - truth)/truth)
#print diff.shape
#flattern operation
diff_absolute = np.reshape(diff_absolute, (32,-1))
diff_relative = np.reshape(diff_relative, (32,-1))
#print diff.shape
amean_err += np.mean(diff_absolute, axis=1).tolist()
astddev_err += np.std(diff_absolute, axis=1).tolist()
amin_err += np.min(diff_absolute, axis=1).tolist()
amax_err += np.max(diff_absolute,axis=1).tolist()
rmean_err += np.mean(diff_relative, axis=1).tolist()
rstddev_err += np.std(diff_relative, axis=1).tolist()
rmin_err += np.min(diff_relative, axis=1).tolist()
rmax_err += np.max(diff_relative,axis=1).tolist()
#starting from 168 because we want one day cycle plot
amean_err = amean_err[168:]
astddev_err = astddev_err[168:]
amin_err = amin_err[168:]
amax_err = amax_err[168:]
rmean_err = rmean_err[168:]
rstddev_err = rstddev_err[168:]
rmin_err = rmin_err[168:]
rmax_err = rmax_err[168:]
amean_err = np.array(amean_err)
astddev_err = np.array(astddev_err)
amin_err = np.array(amin_err)
amax_err = np.array(amax_err)
print(amean_err.shape)
print(astddev_err.shape)
print(amin_err.shape)
print(amax_err.shape)
rmean_err = np.array(rmean_err)
rstddev_err = np.array(rstddev_err)
rmin_err = np.array(rmin_err)
rmax_err = np.array(rmax_err)
print(rmean_err.shape)
print(rstddev_err.shape)
print(rmin_err.shape)
print(rmax_err.shape)
#plotting the absolute error plots
sns.set_style("whitegrid")
sns.set_context("poster")
f, axArr = plt.subplots(5, sharex=True, figsize=(20, 20))
xlim1 = amean_err.shape[0]
dates = []
stdate = datetime.datetime(2015, 1, 12, 0, 5)
dummy = datetime.datetime(2015, 1, 12, 0, 10)
tec_resolution = (dummy - stdate)
dates.append(stdate)
for i in range(1, 856):
dates.append(dates[i-1]+tec_resolution)
x_val = dates
print(len(x_val))
cl = sns.color_palette('bright', 4)
axArr[0].plot(x_val, amean_err, color=cl[0])
axArr[1].plot(x_val, astddev_err, color=cl[1])
axArr[2].plot(x_val, amin_err, color=cl[2])
axArr[3].plot(x_val, amax_err, color=cl[3])
axArr[4].plot(x_val, amean_err, color=cl[0], label='mean')
axArr[4].plot(x_val, astddev_err, color=cl[1], label='stddev')
axArr[0].set_ylabel("Mean", fontsize=14)
axArr[1].set_ylabel("Stddev", fontsize=14)
axArr[2].set_ylabel("Min", fontsize=14)
axArr[3].set_ylabel("Max", fontsize=14)
axArr[4].set_ylabel("Mean/Stddev", fontsize=14)
axArr[-1].set_xlabel("TIME", fontsize=14)
axArr[0].get_xaxis().set_major_formatter(DateFormatter('%H:%M'))
axArr[1].get_xaxis().set_major_formatter(DateFormatter('%H:%M'))
axArr[2].get_xaxis().set_major_formatter(DateFormatter('%H:%M'))
axArr[3].get_xaxis().set_major_formatter(DateFormatter('%H:%M'))
axArr[4].get_xaxis().set_major_formatter(DateFormatter('%H:%M'))
axArr[4].legend( bbox_to_anchor=(0., 1.02, 1., .102), loc=1, ncol=2, borderaxespad=0.1 )
f.savefig('error_plot_absolute.png', dpi=f.dpi, bbox_inches='tight')
#plotting the relative error plots
sns.set_style("whitegrid")
sns.set_context("poster")
f, axArr = plt.subplots(5, sharex=True, figsize=(20, 20))
xlim1 = rmean_err.shape[0]
dates = []
stdate = datetime.datetime(2015, 1, 12, 0, 5)
dummy = datetime.datetime(2015, 1, 12, 0, 10)
tec_resolution = (dummy - stdate)
dates.append(stdate)
for i in range(1, 856):
dates.append(dates[i-1]+tec_resolution)
x_val = dates
print(len(x_val))
cl = sns.color_palette('bright', 4)
axArr[0].plot(x_val, rmean_err, color=cl[0])
axArr[1].plot(x_val, rstddev_err, color=cl[1])
axArr[2].plot(x_val, rmin_err, color=cl[2])
axArr[3].plot(x_val, rmax_err, color=cl[3])
axArr[4].plot(x_val, rmean_err, color=cl[0], label='mean')
axArr[4].plot(x_val, rstddev_err, color=cl[1], label='stddev')
axArr[0].set_ylabel("Mean", fontsize=14)
axArr[1].set_ylabel("Stddev", fontsize=14)
axArr[2].set_ylabel("Min", fontsize=14)
axArr[3].set_ylabel("Max", fontsize=14)
axArr[4].set_ylabel("Mean/Stddev", fontsize=14)
axArr[-1].set_xlabel("TIME", fontsize=14)
axArr[0].get_xaxis().set_major_formatter(DateFormatter('%H:%M'))
axArr[1].get_xaxis().set_major_formatter(DateFormatter('%H:%M'))
axArr[2].get_xaxis().set_major_formatter(DateFormatter('%H:%M'))
axArr[3].get_xaxis().set_major_formatter(DateFormatter('%H:%M'))
axArr[4].get_xaxis().set_major_formatter(DateFormatter('%H:%M'))
axArr[4].legend( bbox_to_anchor=(0., 1.02, 1., .102), loc=1, ncol=2, borderaxespad=0.1 )
f.savefig('error_plot_relative.png', dpi=f.dpi, bbox_inches='tight')
| 5,320 | 2,343 |
#!/usr/bin/env python
# coding: utf-8
# This notebook was prepared by [Donne Martin](https://github.com/donnemartin). Source and license info is on [GitHub](https://github.com/donnemartin/interactive-coding-challenges).
# # Challenge Notebook
# ## Problem: Sum of Two Integers (Subtraction Variant).
#
# See the [LeetCode](https://leetcode.com/problems/sum-of-two-integers/) problem page.
#
# * [Constraints](#Constraints)
# * [Test Cases](#Test-Cases)
# * [Algorithm](#Algorithm)
# * [Code](#Code)
# * [Unit Test](#Unit-Test)
# * [Solution Notebook](#Solution-Notebook)
# ## Constraints
#
# * Can we assume we're working with 32 bit ints?
# * Yes
# * Can we assume the inputs are valid?
# * No, check for None
# * Can we assume this fits memory?
# * Yes
# ## Test Cases
#
# <pre>
# * None input -> TypeError
# * 7, 5 -> 2
# * -5, -7 -> 2
# * -5, 7 -> -12
# * 5, -7 -> 12
# </pre>
# ## Algorithm
#
# Refer to the [Solution Notebook](). If you are stuck and need a hint, the solution notebook's algorithm discussion might be a good place to start.
# ## Code
# In[ ]:
class Solution(object):
def sub_two(self, val):
# TODO: Implement me
pass
# ## Unit Test
# **The following unit test is expected to fail until you solve the challenge.**
# In[ ]:
# %load test_sub_two.py
import unittest
class TestSubTwo(unittest.TestCase):
def test_sub_two(self):
solution = Solution()
self.assertRaises(TypeError, solution.sub_two, None)
self.assertEqual(solution.sub_two(7, 5), 2)
self.assertEqual(solution.sub_two(-5, -7), 2)
self.assertEqual(solution.sub_two(-5, 7), -12)
self.assertEqual(solution.sub_two(5, -7), 12)
print('Success: test_sub_two')
def main():
test = TestSubTwo()
test.test_sub_two()
if __name__ == '__main__':
main()
# ## Solution Notebook
#
# Review the [Solution Notebook]() for a discussion on algorithms and code solutions.
| 1,969 | 709 |
# encoding: utf-8
# Copyright 2011 California Institute of Technology. ALL RIGHTS
# RESERVED. U.S. Government Sponsorship acknowledged.
'''Curator: interface'''
from zope.interface import Interface
from zope import schema
from ipdasite.services import ProjectMessageFactory as _
class ICurator(Interface):
'''A person and agency that is responsible for a service.'''
title = schema.TextLine(
title=_(u'Name'),
description=_(u'Name of this curator.'),
required=True,
)
description = schema.Text(
title=_(u'Description'),
description=_(u'A short summary of this curator, used in free-text searches.'),
required=False,
)
contactName = schema.TextLine(
title=_(u'Contact Name'),
description=_(u'Name of a person who curates one or more services.'),
required=False,
)
emailAddress = schema.TextLine(
title=_(u'Email Address'),
description=_(u'Contact address for a person or workgroup that curates services.'),
required=False,
)
telephone = schema.TextLine(
title=_(u'Telephone'),
description=_(u'Public telephone number in international format in order to contact this curator.'),
required=False,
)
| 1,262 | 349 |
from .Results import *
| 23 | 7 |
# Generated by Django 3.0.2 on 2020-01-20 10:43
from django.db import migrations, models
import main.models
class Migration(migrations.Migration):
dependencies = [
('main', '0005_auto_20200120_1619'),
]
operations = [
migrations.AlterField(
model_name='user',
name='image',
field=models.ImageField(default='user_images/default.png', upload_to=main.models.PathAndRename('user_images')),
),
]
| 472 | 168 |
##############################################
# #
# Ferdinand 0.40, Ian Thompson, LLNL #
# #
# gnd,endf,fresco,azure,hyrma #
# #
##############################################
__all__ = ["f90nml"]
| 350 | 81 |
# -*- coding: utf-8 -*-
from __future__ import print_function
import numpy as np
from ellalgo.oracles.chol_ext import chol_ext
def test_chol1():
"""[summary]"""
l1 = [[25.0, 15.0, -5.0], [15.0, 18.0, 0.0], [-5.0, 0.0, 11.0]]
m1 = np.array(l1)
Q1 = chol_ext(len(m1))
assert Q1.factorize(m1)
def test_chol2():
"""[summary]"""
l2 = [
[18.0, 22.0, 54.0, 42.0],
[22.0, -70.0, 86.0, 62.0],
[54.0, 86.0, -174.0, 134.0],
[42.0, 62.0, 134.0, -106.0],
]
m2 = np.array(l2)
Q = chol_ext(len(m2))
assert not Q.factorize(m2)
Q.witness()
assert Q.p == (0, 2)
# assert ep == 1.0
def test_chol3():
"""[summary]"""
l3 = [[0.0, 15.0, -5.0], [15.0, 18.0, 0.0], [-5.0, 0.0, 11.0]]
m3 = np.array(l3)
Q = chol_ext(len(m3))
assert not Q.factorize(m3)
ep = Q.witness()
assert Q.p == (0, 1)
assert Q.v[0] == 1.0
assert ep == 0.0
def test_chol4():
"""[summary]"""
l1 = [[25.0, 15.0, -5.0], [15.0, 18.0, 0.0], [-5.0, 0.0, 11.0]]
m1 = np.array(l1)
Q1 = chol_ext(len(m1))
Q1.allow_semidefinite = True
assert Q1.factorize(m1)
def test_chol5():
"""[summary]"""
l2 = [
[18.0, 22.0, 54.0, 42.0],
[22.0, -70.0, 86.0, 62.0],
[54.0, 86.0, -174.0, 134.0],
[42.0, 62.0, 134.0, -106.0],
]
m2 = np.array(l2)
Q = chol_ext(len(m2))
Q.allow_semidefinite = True
assert not Q.factorize(m2)
Q.witness()
assert Q.p == (0, 2)
# assert ep == 1.0
def test_chol6():
"""[summary]"""
l3 = [[0.0, 15.0, -5.0], [15.0, 18.0, 0.0], [-5.0, 0.0, 11.0]]
m3 = np.array(l3)
Q = chol_ext(len(m3))
Q.allow_semidefinite = True
assert Q.factorize(m3)
# [v, ep] = Q.witness2()
# assert len(v) == 1
# assert v[0] == 1.0
# assert ep == 0.0
def test_chol7():
"""[summary]"""
l3 = [[0.0, 15.0, -5.0], [15.0, 18.0, 0.0], [-5.0, 0.0, -20.0]]
m3 = np.array(l3)
Q = chol_ext(len(m3))
Q.allow_semidefinite = True
assert not Q.factorize(m3)
ep = Q.witness()
assert ep == 20.0
def test_chol8():
"""[summary]"""
"""[summary]
"""
l3 = [[0.0, 15.0, -5.0], [15.0, 18.0, 0.0], [-5.0, 0.0, 20.0]]
m3 = np.array(l3)
Q = chol_ext(len(m3))
Q.allow_semidefinite = False
assert not Q.factorize(m3)
def test_chol9():
"""[summary]"""
"""[summary]
"""
l3 = [[0.0, 15.0, -5.0], [15.0, 18.0, 0.0], [-5.0, 0.0, 20.0]]
m3 = np.array(l3)
Q = chol_ext(len(m3))
Q.allow_semidefinite = True
assert Q.factorize(m3)
| 2,707 | 1,513 |
#!/usr/bin/env python3
from TelloSDKPy.djitellopy.tello import Tello
import cv2
import pygame
import numpy as np
import time
def main():
#Controller Init
pygame.init()
joysticks = []
for i in range(0,pygame.joystick.get_count()):
joysticks.append(pygame.joystick.Joystick(i))
joysticks[-1].init()
print(joysticks[-1].get_name())
#Tello Init
while True:
for event in pygame.event.get():
if(event.type == pygame.JOYBUTTONDOWN):
b = event.button
if (b == 0):
print("takeoff")
drone.takeoff()
elif (b == 1):
print("land")
drone.land()
elif (b == 2):
print("quit")
return 0
if __name__== "__main__":
main()
| 867 | 272 |
import os
from copy import deepcopy
import tqdm
import torch
import torch.nn.functional as F
import colorful
import numpy as np
import networkx as nx
from tensorboardX import SummaryWriter
from .reservoir import reservoir
from components import Net
from utils import BetaMixture1D
class SPR(torch.nn.Module):
""" Train Continual Model self-supervisedly
Freeze when required to eval and finetune supervisedly using Purified Buffer.
"""
def __init__(self, config, writer: SummaryWriter):
super().__init__()
self.config = config
self.device = config['device']
self.writer = writer
self.purified_buffer = reservoir['purified'](config, config['purified_buffer_size'], config['purified_buffer_q_poa'])
self.delay_buffer = reservoir['delay'](config, config['delayed_buffer_size'], config['delayed_buffer_q_poa'])
self.E_max = config['E_max']
self.expert_step = 0
self.base_step = 0
self.base_ft_step = 0
self.expert_number = 0
self.base = self.get_init_base(config)
self.expert = self.get_init_expert(config)
self.ssl_dir = os.path.join(os.path.dirname(os.path.dirname(self.config['log_dir'])),
'noiserate_{}'.format(config['corruption_percent']),
'expt_{}'.format(config['expert_train_epochs']),
'randomseed_{}'.format(config['random_seed']))
if os.path.exists(self.ssl_dir):
with open(os.path.join(self.ssl_dir, 'idx_sets.npy'), 'rb') as f:
self.debug_idxs = np.load(f, allow_pickle=True)
def get_init_base(self, config):
"""get initialized base model"""
base = Net[config['net']](config)
optim_config = config['optimizer']
lr_scheduler_config = deepcopy(config['lr_scheduler'])
lr_scheduler_config['options'].update({'T_max': config['base_train_epochs']})
base.setup_optimizer(optim_config)
base.setup_lr_scheduler(lr_scheduler_config)
return base
def get_init_expert(self, config):
"""get initialized expert model"""
expert = Net[config['net']](config)
optim_config = config['optimizer']
lr_scheduler_config = deepcopy(config['lr_scheduler'])
lr_scheduler_config['options'].update({'T_max': config['expert_train_epochs']})
expert.setup_optimizer(optim_config)
expert.setup_lr_scheduler(lr_scheduler_config)
return expert
def get_init_base_ft(self, config):
"""get initialized eval model"""
base_ft = Net[config['net'] + '_ft'](config)
optim_config = config['optimizer_ft']
lr_scheduler_config = config['lr_scheduler_ft']
base_ft.setup_optimizer(optim_config)
base_ft.setup_lr_scheduler(lr_scheduler_config)
return base_ft
def learn(self, x, y, corrupt, idx, step=None):
x, y = x.cuda(), y.cuda()
for i in range(len(x)):
self.delay_buffer.update(imgs=x[i: i + 1], cats=y[i: i + 1], corrupts=corrupt[i: i + 1], idxs=idx[i: i + 1])
if self.delay_buffer.is_full():
if not os.path.exists(os.path.join(self.ssl_dir, 'model{}.ckpt'.format(self.expert_number))):
self.expert = self.get_init_expert(self.config)
self.train_self_expert()
else:
self.expert.load_state_dict(
torch.load(os.path.join(self.ssl_dir, 'model{}.ckpt'.format(self.expert_number)),
map_location=self.device))
################### data consistency check ######################
if torch.sum(self.delay_buffer.get('idxs') != torch.Tensor(self.debug_idxs[self.expert_number])) != 0:
raise Exception("it seems there is a data consistency problem: exp_num {}".format(self.expert_number))
################### data consistency check ######################
self.train_self_base()
clean_idx, clean_p = self.cluster_and_sample()
self.update_purified_buffer(clean_idx, clean_p, step)
self.expert_number += 1
def update_purified_buffer(self, clean_idx, clean_p, step):
"""update purified buffer with the filtered samples"""
self.purified_buffer.update(
imgs=self.delay_buffer.get('imgs')[clean_idx],
cats=self.delay_buffer.get('cats')[clean_idx],
corrupts=self.delay_buffer.get('corrupts')[clean_idx],
idxs=self.delay_buffer.get('idxs')[clean_idx],
clean_ps=clean_p)
self.delay_buffer.reset()
print(colorful.bold_yellow(self.purified_buffer.state('corrupts')).styled_string)
self.writer.add_scalar(
'buffer_corrupts', torch.sum(self.purified_buffer.get('corrupts')), step)
def cluster_and_sample(self):
"""filter samples in delay buffer"""
self.expert.eval()
with torch.no_grad():
xs = self.delay_buffer.get('imgs')
ys = self.delay_buffer.get('cats')
corrs = self.delay_buffer.get('corrupts')
features = self.expert(xs)
features = F.normalize(features, dim=1)
clean_p = list()
clean_idx = list()
print("***********************************************")
for u_y in torch.unique(ys).tolist():
y_mask = ys == u_y
corr = corrs[y_mask]
feature = features[y_mask]
# ignore negative similairties
_similarity_matrix = torch.relu(F.cosine_similarity(feature.unsqueeze(1), feature.unsqueeze(0), dim=-1))
# stochastic ensemble
_clean_ps = torch.zeros((self.E_max, len(feature)), dtype=torch.double)
for _i in range(self.E_max):
similarity_matrix = (_similarity_matrix > torch.rand_like(_similarity_matrix)).type(torch.float32)
similarity_matrix[similarity_matrix == 0] = 1e-5 # add small num for ensuring positive matrix
g = nx.from_numpy_matrix(similarity_matrix.cpu().numpy())
info = nx.eigenvector_centrality(g, max_iter=6000, weight='weight') # index: value
centrality = [info[i] for i in range(len(info))]
bmm_model = BetaMixture1D(max_iters=10)
# fit beta mixture model
c = np.asarray(centrality)
c, c_min, c_max = bmm_model.outlier_remove(c)
c = bmm_model.normalize(c, c_min, c_max)
bmm_model.fit(c)
bmm_model.create_lookup(1) # 0: noisy, 1: clean
# get posterior
c = np.asarray(centrality)
c = bmm_model.normalize(c, c_min, c_max)
p = bmm_model.look_lookup(c)
_clean_ps[_i] = torch.from_numpy(p)
_clean_ps = torch.mean(_clean_ps, dim=0)
m = _clean_ps > torch.rand_like(_clean_ps)
clean_idx.extend(torch.nonzero(y_mask)[:, -1][m].tolist())
clean_p.extend(_clean_ps[m].tolist())
print("class: {}".format(u_y))
print("--- num of selected samples: {}".format(torch.sum(m).item()))
print("--- num of selected corrupt samples: {}".format(torch.sum(corr[m]).item()))
print("***********************************************")
return clean_idx, torch.Tensor(clean_p)
def train_self_base(self):
"""Self Replay. train base model with samples from delay and purified buffer"""
bs = self.config['base_batch_size']
# If purified buffer is full, train using it also
db_bs = (bs // 2) if self.purified_buffer.is_full() else bs
db_bs = min(db_bs, len(self.delay_buffer))
pb_bs = min(bs - db_bs, len(self.purified_buffer))
self.base.train()
self.base.init_ntxent(self.config, batch_size=db_bs + pb_bs)
dataloader = self.delay_buffer.get_dataloader(batch_size=db_bs, shuffle=True, drop_last=True)
for epoch_i in tqdm.trange(self.config['base_train_epochs'], desc="base training", leave=False):
for inner_step, data in enumerate(dataloader):
x = data['imgs']
self.base.zero_grad()
# sample data from purified buffer and merge
if pb_bs > 0:
replay_data = self.purified_buffer.sample(num=pb_bs)
x = torch.cat([replay_data['imgs'], x], dim=0)
loss = self.base.get_selfsup_loss(x)
loss.backward()
self.base.optimizer.step()
self.writer.add_scalar(
'continual_base_train_loss', loss,
self.base_step + inner_step + epoch_i * len(dataloader))
# warmup for the first 10 epochs
if epoch_i >= 10:
self.base.lr_scheduler.step()
self.writer.flush()
self.base_step += self.config['base_train_epochs'] * len(dataloader)
def train_self_expert(self):
"""train expert model with samples from delay"""
batch_size =min(self.config['expert_batch_size'], len(self.delay_buffer))
self.expert.train()
self.expert.init_ntxent(self.config, batch_size=batch_size)
dataloader = self.delay_buffer.get_dataloader(batch_size=batch_size, shuffle=True, drop_last=True)
for epoch_i in tqdm.trange(self.config['expert_train_epochs'], desc='expert training', leave=False):
for inner_step, data in enumerate(dataloader):
x = data['imgs']
self.expert.zero_grad()
loss = self.expert.get_selfsup_loss(x)
loss.backward()
self.expert.optimizer.step()
self.writer.add_scalar(
'expert_train_loss', loss,
self.expert_step + inner_step + len(dataloader) * epoch_i)
# warmup for the first 10 epochs
if epoch_i >= 10:
self.expert.lr_scheduler.step()
self.writer.flush()
self.expert_step += self.config['expert_train_epochs'] * len(dataloader)
def get_finetuned_model(self):
"""copy the base and fine-tune for evaluation"""
base_ft = self.get_init_base_ft(self.config)
# overwrite entries in the state dict
ft_dict = base_ft.state_dict()
ft_dict.update({k: v for k, v in self.base.state_dict().items() if k in ft_dict})
base_ft.load_state_dict(ft_dict)
base_ft.train()
dataloader = self.purified_buffer.get_dataloader(batch_size=self.config['ft_batch_size'], shuffle=True, drop_last=True)
for epoch_i in tqdm.trange(self.config['ft_epochs'], desc='finetuning', leave=False):
for inner_step, data in enumerate(dataloader):
x, y = data['imgs'], data['cats']
base_ft.zero_grad()
loss = base_ft.get_sup_loss(x, y).mean()
loss.backward()
base_ft.clip_grad()
base_ft.optimizer.step()
base_ft.lr_scheduler.step()
self.writer.add_scalar(
'ft_train_loss', loss,
self.base_ft_step + inner_step + epoch_i * len(dataloader))
self.writer.flush()
self.base_ft_step += self.config['ft_epochs'] * len(dataloader)
base_ft.eval()
return base_ft
def forward(self, x):
pass
| 11,761 | 3,690 |
# Copyright (c) 2020-2022 The PyUnity Team
# This file is licensed under the MIT License.
# See https://docs.pyunity.x10.bz/en/latest/license.html
from pyunity import (
SceneManager, Component, Camera, AudioListener, Light,
GameObject, Tag, Transform, GameObjectException,
ComponentException, Canvas, PyUnityException,
Behaviour, ShowInInspector, RenderTarget, Logger,
Vector3, MeshRenderer, Mesh)
from . import SceneTestCase
class TestScene(SceneTestCase):
def testInit(self):
scene = SceneManager.AddScene("Scene")
assert scene.name == "Scene"
assert len(scene.gameObjects) == 2
for gameObject in scene.gameObjects:
assert gameObject.scene is scene
for component in gameObject.components:
assert component.gameObject is gameObject
assert component.transform is gameObject.transform
assert isinstance(component, Component)
assert scene.gameObjects[0].name == "Main Camera"
assert scene.gameObjects[1].name == "Light"
assert scene.mainCamera is scene.gameObjects[0].components[1]
assert len(scene.gameObjects[0].components) == 3
assert len(scene.gameObjects[1].components) == 2
assert scene.gameObjects[0].GetComponent(Camera) is not None
assert scene.gameObjects[0].GetComponent(AudioListener) is not None
assert scene.gameObjects[1].GetComponent(Light) is not None
def testFind(self):
scene = SceneManager.AddScene("Scene")
a = GameObject("A")
b = GameObject("B", a)
c = GameObject("C", a)
d = GameObject("B", c)
scene.AddMultiple(a, b, c, d)
tagnum = Tag.AddTag("Custom Tag")
a.tag = Tag(tagnum)
c.tag = Tag("Custom Tag")
assert len(scene.FindGameObjectsByName("B")) == 2
assert scene.FindGameObjectsByName("B") == [b, d]
assert scene.FindGameObjectsByTagName("Custom Tag") == [a, c]
assert scene.FindGameObjectsByTagNumber(tagnum) == [a, c]
assert isinstance(scene.FindComponent(Transform), Transform)
assert scene.FindComponents(Transform) == [
scene.mainCamera.transform, scene.gameObjects[1].transform,
a.transform, b.transform, c.transform, d.transform]
with self.assertRaises(GameObjectException) as exc:
scene.FindGameObjectsByTagName("Invalid")
assert exc.value == "No tag named Invalid; create a new tag with Tag.AddTag"
with self.assertRaises(GameObjectException) as exc:
scene.FindGameObjectsByTagNumber(-1)
assert exc.value == "No tag at index -1; create a new tag with Tag.AddTag"
with self.assertRaises(ComponentException) as exc:
scene.FindComponent(Canvas)
assert exc.value == "Cannot find component Canvas in scene"
def testRootGameObjects(self):
scene = SceneManager.AddScene("Scene")
a = GameObject("A")
b = GameObject("B", a)
c = GameObject("C", a)
d = GameObject("B", c)
scene.AddMultiple(a, b, c, d)
assert len(scene.rootGameObjects) == 3
assert scene.rootGameObjects[2] is a
def testAddError(self):
scene = SceneManager.AddScene("Scene")
gameObject = GameObject("GameObject")
scene.Add(gameObject)
with self.assertRaises(PyUnityException) as exc:
scene.Add(gameObject)
assert exc.value == "GameObject \"GameObject\" is already in Scene \"Scene\""
def testBare(self):
from pyunity.scenes import Scene
scene = Scene.Bare("Scene")
assert scene.name == "Scene"
assert len(scene.gameObjects) == 0
assert scene.mainCamera is None
def testDestroy(self):
class Test(Behaviour):
other = ShowInInspector(GameObject)
scene = SceneManager.AddScene("Scene")
# Exception
fake = GameObject("Not in scene")
with self.assertRaises(PyUnityException) as exc:
scene.Destroy(fake)
assert exc.value == "The provided GameObject is not part of the Scene"
# Correct
a = GameObject("A")
b = GameObject("B", a)
c = GameObject("C", a)
scene.AddMultiple(a, b, c)
assert c.scene is scene
assert c in scene.gameObjects
scene.Destroy(c)
assert c.scene is None
assert c not in scene.gameObjects
# Multiple
scene.Destroy(a)
assert b.scene is None
assert b not in scene.gameObjects
assert c.scene is None
assert c not in scene.gameObjects
# Components
cam = GameObject("Camera")
camera = cam.AddComponent(Camera)
test = GameObject("Test")
test.AddComponent(Test).other = cam
target = GameObject("Target")
target.AddComponent(RenderTarget).source = camera
scene.AddMultiple(cam, test, target)
scene.Destroy(cam)
assert b.scene is None
assert cam not in scene.gameObjects
assert test.GetComponent(Test).other is None
assert target.GetComponent(RenderTarget).source is None
# Main Camera
with Logger.TempRedirect(silent=True) as r:
scene.Destroy(scene.mainCamera.gameObject)
assert r.get() == "Warning: Removing Main Camera from scene 'Scene'\n"
def testHas(self):
scene = SceneManager.AddScene("Scene")
gameObject = GameObject("GameObject")
gameObject2 = GameObject("GameObject 2")
scene.Add(gameObject)
assert scene.Has(gameObject)
assert not scene.Has(gameObject2)
def testList(self):
scene = SceneManager.AddScene("Scene")
a = GameObject("A")
b = GameObject("B", a)
c = GameObject("C", a)
d = GameObject("B", c)
scene.AddMultiple(b, d, c, a)
with Logger.TempRedirect(silent=True) as r:
scene.List()
assert r.get() == "\n".join([
"/A", "/A/B", "/A/C", "/A/C/B", "/Light", "/Main Camera\n"])
def testInsideFrustrum(self):
scene = SceneManager.AddScene("Scene")
gameObject = GameObject("Cube")
gameObject.transform.position = Vector3(0, 0, 5)
renderer = gameObject.AddComponent(MeshRenderer)
scene.Add(gameObject)
assert not scene.insideFrustrum(renderer)
renderer.mesh = Mesh.cube(2)
# assert scene.insideFrustrum(renderer))
gameObject.transform.position = Vector3(0, 0, -5)
# assert not scene.insideFrustrum(renderer)
| 6,602 | 1,920 |
import logging
import os
import alembic.command
import alembic.config
import cfnresponse
from db.session import get_session, get_session_maker
from retry import retry
from sqlalchemy.exc import OperationalError
def log(log_statement: str):
"""
Gets a Logger for the Lambda function with level logging.INFO and logs
`log_statement`. This is used multiple times as Alembic takes over the logging
configuration so we have to re-take control when we want to log
:param log_statement: str to log
"""
logger = logging.getLogger()
logger.setLevel(logging.INFO)
logger.info(log_statement)
@retry(OperationalError, tries=30, delay=10)
def check_rds_connection():
session_maker = get_session_maker()
with get_session(session_maker) as db:
db.execute("SELECT * FROM pg_catalog.pg_tables;")
def handler(event, context):
if event["RequestType"] == "Delete":
log("Received a Delete Request")
cfnresponse.send(
event, context, cfnresponse.SUCCESS, {"Response": "Nothing run on deletes"}
)
return
try:
log("Checking connection to RDS")
check_rds_connection()
log("Connected to RDS")
log("Running Alembic Migrations")
alembic_config = alembic.config.Config(os.path.join(".", "alembic.ini"))
alembic_config.set_main_option("script_location", ".")
alembic.command.upgrade(alembic_config, "head")
log("Migrations run successfully")
cfnresponse.send(
event,
context,
cfnresponse.SUCCESS,
{"Response": "Migrations run successfully"},
)
except Exception as ex:
log(str(ex))
cfnresponse.send(event, context, cfnresponse.FAILED, {"Response": str(ex)})
raise ex
| 1,806 | 534 |
from __future__ import unicode_literals
import plumber
from lxml import etree
from datetime import datetime
import pipeline
class BadArgumentError(Exception):
"""Raised when a Verb receives wrong args."""
class CannotDisseminateFormatError(Exception):
"""Raised when metadata format is not supported"""
class BadVerbError(Exception):
"""Raised when invalid verb is used"""
class IDDoesNotExistError(Exception):
"""Raised when identifier does not exists"""
class NoRecordsMatchError(Exception):
"""
Raised when all parameters combined
result in empty list of records
"""
class BadResumptionTokenError(Exception):
"""Raised when invalid resumption token is used"""
class IdentifyVerb(object):
data = {
'repositoryName': 'SciELO Books',
'protocolVersion': '2.0',
'adminEmail': 'scielo.books@scielo.org',
'deletedRecord': 'persistent',
'granularity': 'YYYY-MM-DD'
}
allowed_args = set(('verb',))
def __init__(self, last_book, request_kwargs, base_url):
if set(request_kwargs) != self.allowed_args:
raise BadArgumentError()
self.data['request'] = request_kwargs
self.data['baseURL'] = base_url
self.data['earliestDatestamp'] = last_book.get('updated', datetime.now().date().isoformat())
def __str__(self):
ppl = plumber.Pipeline(
pipeline.SetupPipe(),
pipeline.ResponseDatePipe(),
pipeline.RequestPipe(),
pipeline.IdentifyNodePipe(),
pipeline.TearDownPipe()
)
results = ppl.run([self.data])
return next(results)
class ListMetadataFormatsVerb(object):
data = {
'formats': [
{
'prefix': 'oai_dc',
'schema': 'http://www.openarchives.org/OAI/2.0/oai_dc.xsd',
'namespace': 'http://www.openarchives.org/OAI/2.0/oai_dc/'
}
]
}
allowed_args = set(('identifier', 'verb'))
def __init__(self, request_kwargs, base_url):
diff = set(request_kwargs) - self.allowed_args
if diff:
raise BadArgumentError()
self.data['request'] = request_kwargs
self.data['baseURL'] = base_url
def __str__(self):
ppl = plumber.Pipeline(
pipeline.SetupPipe(),
pipeline.ResponseDatePipe(),
pipeline.RequestPipe(),
pipeline.ListMetadataFormatsPipe(),
pipeline.MetadataFormatPipe(),
pipeline.TearDownPipe()
)
results = ppl.run([self.data])
return next(results)
class ListIdentifiersVerb(object):
allowed_args = set(('from', 'until', 'set', 'resumptionToken', 'metadataPrefix', 'verb'))
def __init__(self, books, request_kwargs, base_url):
request_set = set(request_kwargs)
diff = request_set - self.allowed_args
if not 'resumptionToken' in request_set and not 'metadataPrefix' in request_set:
raise BadArgumentError()
if diff:
raise BadArgumentError()
self.data = {
'request': request_kwargs,
'baseURL': base_url,
'books': books,
}
def __str__(self):
ppl = plumber.Pipeline(
pipeline.SetupPipe(),
pipeline.ResponseDatePipe(),
pipeline.RequestPipe(),
pipeline.ListIdentifiersPipe(),
pipeline.TearDownPipe()
)
result = ppl.run([self.data])
return next(result)
class ListSetsVerb(object):
allowed_args = set(('resumptionToken', 'verb'))
def __init__(self, books, request_kwargs, base_url):
diff = set(request_kwargs) - self.allowed_args
if diff:
raise BadArgumentError()
self.data = {
'request': request_kwargs,
'baseURL': base_url,
'books': books.distinct('publisher'),
}
def __str__(self):
ppl = plumber.Pipeline(
pipeline.SetupPipe(),
pipeline.ResponseDatePipe(),
pipeline.RequestPipe(),
pipeline.ListSetsPipe(),
pipeline.TearDownPipe()
)
results = ppl.run([self.data])
return next(results)
class GetRecordVerb(object):
required_args = set(('identifier', 'metadataPrefix', 'verb'))
def __init__(self, books, request_kwargs, base_url):
if set(request_kwargs) != self.required_args:
raise BadArgumentError()
self.data = {
'request': request_kwargs,
'baseURL': base_url,
'books': books
}
def __str__(self):
ppl = plumber.Pipeline(
pipeline.SetupPipe(),
pipeline.ResponseDatePipe(),
pipeline.RequestPipe(),
pipeline.GetRecordPipe(),
pipeline.TearDownPipe()
)
results = ppl.run([self.data])
return next(results)
class ListRecordsVerb(object):
allowed_args = set(('from', 'until', 'set', 'resumptionToken', 'metadataPrefix', 'verb'))
def __init__(self, books, request_kwargs, base_url):
request_set = set(request_kwargs)
diff = request_set - self.allowed_args
if not 'resumptionToken' in request_set and not 'metadataPrefix' in request_set:
raise BadArgumentError()
if diff:
raise BadArgumentError()
self.data = {
'request': request_kwargs,
'baseURL': base_url,
'books': books,
}
def __str__(self):
ppl = plumber.Pipeline(
pipeline.SetupPipe(),
pipeline.ResponseDatePipe(),
pipeline.RequestPipe(),
pipeline.ListRecordsPipe(),
pipeline.TearDownPipe()
)
results = ppl.run([self.data])
return next(results)
class CannotDisseminateFormat(object):
def __init__(self, request_kwargs, base_url):
self.data = {
'request': request_kwargs,
'baseURL': base_url,
}
def __str__(self):
ppl = plumber.Pipeline(
pipeline.SetupPipe(),
pipeline.ResponseDatePipe(),
pipeline.RequestPipe(),
pipeline.MetadataFormatErrorPipe(),
pipeline.TearDownPipe()
)
results = ppl.run([self.data])
return next(results)
class BadVerb(object):
def __init__(self, request_kwargs, base_url):
self.data = {
'request': request_kwargs,
'baseURL': base_url,
}
def __str__(self):
ppl = plumber.Pipeline(
pipeline.SetupPipe(),
pipeline.ResponseDatePipe(),
pipeline.RequestPipe(),
pipeline.BadVerbPipe(),
pipeline.TearDownPipe()
)
results = ppl.run([self.data])
return next(results)
class IDDoesNotExist(object):
def __init__(self, request_kwargs, base_url):
self.data = {
'request': request_kwargs,
'baseURL': base_url,
}
def __str__(self):
ppl = plumber.Pipeline(
pipeline.SetupPipe(),
pipeline.ResponseDatePipe(),
pipeline.RequestPipe(),
pipeline.IdNotExistPipe(),
pipeline.TearDownPipe()
)
results = ppl.run([self.data])
return next(results)
class NoRecordsMatch(object):
def __init__(self, request_kwargs, base_url):
self.data = {
'request': request_kwargs,
'baseURL': base_url,
}
def __str__(self):
ppl = plumber.Pipeline(
pipeline.SetupPipe(),
pipeline.ResponseDatePipe(),
pipeline.RequestPipe(),
pipeline.NoRecordsPipe(),
pipeline.TearDownPipe()
)
results = ppl.run([self.data])
return next(results)
class BadArgument(object):
def __init__(self, request_kwargs, base_url, books=None):
self.data = {
'request': request_kwargs,
'baseURL': base_url,
}
def __str__(self):
ppl = plumber.Pipeline(
pipeline.SetupPipe(),
pipeline.ResponseDatePipe(),
pipeline.RequestPipe(),
pipeline.BadArgumentPipe(),
pipeline.TearDownPipe()
)
results = ppl.run([self.data])
return next(results)
class BadResumptionToken(object):
def __init__(self, request_kwargs, base_url, books=None):
self.data = {
'request': request_kwargs,
'baseURL': base_url,
}
def __str__(self):
ppl = plumber.Pipeline(
pipeline.SetupPipe(),
pipeline.ResponseDatePipe(),
pipeline.RequestPipe(),
pipeline.BadResumptionTokenPipe(),
pipeline.TearDownPipe()
)
results = ppl.run([self.data])
return next(results)
| 8,988 | 2,591 |
from LinkedList import LinkedList
def sum_lists(ll_a, ll_b):
n1, n2 = ll_a.head, ll_b.head
ll = LinkedList()
carry = 0
while n1 or n2:
result = carry
if n1:
result += n1.value
n1 = n1.next
if n2:
result += n2.value
n2 = n2.next
ll.add(result % 10)
carry = result // 10
if carry:
ll.add(carry)
return ll
def sum_lists_followup(ll_a, ll_b):
# Pad the shorter list with zeros
if len(ll_a) < len(ll_b):
for i in range(len(ll_b) - len(ll_a)):
ll_a.add_to_beginning(0)
else:
for i in range(len(ll_a) - len(ll_b)):
ll_b.add_to_beginning(0)
# Find sum
n1, n2 = ll_a.head, ll_b.head
result = 0
while n1 and n2:
result = (result * 10) + n1.value + n2.value
n1 = n1.next
n2 = n2.next
# Create new linked list
ll = LinkedList()
ll.add_multiple([int(i) for i in str(result)])
return ll
ll_a = LinkedList()
ll_a.generate(4, 0, 9)
ll_b = LinkedList()
ll_b.generate(3, 0, 9)
print(ll_a)
print(ll_b)
#print(sum_lists(ll_a, ll_b))
print(sum_lists_recursive(ll_a, ll_b))
#print(sum_lists_followup(ll_a, ll_b)) | 1,229 | 508 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time : 2019/5/15 14:44
# @Author : Fred Yangxiaofei
# @File : server_common.py
# @Role : server公用方法,记录日志,更新资产,推送密钥,主要给手动更新资产使用
from models.server import Server, AssetErrorLog, ServerDetail
from libs.db_context import DBContext
from libs.web_logs import ins_log
from libs.server.sync_public_key import RsyncPublicKey, start_rsync
import sqlalchemy
def write_error_log(error_list):
with DBContext('w') as session:
for i in error_list:
ip = i.get('ip')
msg = i.get('msg')
error_log = '推送公钥失败, 错误信息:{}'.format(msg)
ins_log.read_log('error', error_log)
session.query(Server).filter(Server.ip == ip).update({Server.state: 'false'})
exist_ip = session.query(AssetErrorLog).filter(AssetErrorLog.ip == ip).first()
if exist_ip:
session.query(AssetErrorLog).filter(AssetErrorLog.ip == ip).update(
{AssetErrorLog.error_log: error_log})
else:
new_error_log = AssetErrorLog(ip=ip, error_log=error_log)
session.add(new_error_log)
session.commit()
def update_asset(asset_data):
"""
更新资产到数据库
:param host_data: 主机返回的资产采集基础数据
:return:
"""
with DBContext('w') as session:
for k, v in asset_data.items():
try:
if asset_data[k].get('status'):
_sn = v.get('sn', None)
_hostname = v.get('host_name', None)
_cpu = v.get('cpu', None)
_cpu_cores = v.get('cpu_cores', None)
_memory = v.get('memory', None)
_disk = v.get('disk', None)
_os_type = v.get('os_type', None)
_os_kernel = v.get('os_kernel', None)
# _instance_id = v.get('instance_id', None)
# _instance_type = v.get('instance_type', None)
# _instance_state = v.get('instance_state', None)
exist_detail = session.query(ServerDetail).filter(ServerDetail.ip == k).first()
if not exist_detail:
# 不存在就新建
new_server_detail = ServerDetail(ip=k, sn=_sn, cpu=_cpu, cpu_cores=_cpu_cores,
memory=_memory, disk=_disk,
os_type=_os_type, os_kernel=_os_kernel)
session.add(new_server_detail)
session.commit()
session.query(Server).filter(Server.ip == k).update(
{Server.hostname: _hostname, Server.state: 'true'})
session.commit()
else:
# 存在就更新
session.query(ServerDetail).filter(ServerDetail.ip == k).update({
ServerDetail.sn: _sn, ServerDetail.ip: k,
ServerDetail.cpu: _cpu, ServerDetail.cpu_cores: _cpu_cores,
ServerDetail.disk: _disk, ServerDetail.memory: _memory,
ServerDetail.os_type: _os_type, ServerDetail.os_kernel: _os_kernel,
})
session.query(Server).filter(Server.ip == k).update(
{Server.hostname: _hostname, Server.state: 'true'})
session.commit()
except sqlalchemy.exc.IntegrityError as e:
ins_log.read_log('error', e)
# 状态改为Flse->删除主机Detail--记录错误信息
session.query(Server).filter(Server.ip == k).update({Server.state: 'false'})
session.query(ServerDetail).filter(ServerDetail.ip == k).delete(
synchronize_session=False)
exist_ip = session.query(AssetErrorLog).filter(AssetErrorLog.ip == k).first()
error_log = str(e)
if exist_ip:
session.query(AssetErrorLog).filter(AssetErrorLog.ip == k).update(
{AssetErrorLog.error_log: error_log})
else:
new_error_log = AssetErrorLog(ip=k, error_log=error_log)
session.add(new_error_log)
session.commit()
return False
def rsync_public_key(server_list):
"""
推送PublicKey
:return: 只返回推送成功的,失败的直接写错误日志
"""
# server_list = [('47.100.231.147', 22, 'root', '-----BEGIN RSA PRIVATE KEYxxxxxEND RSA PRIVATE KEY-----', 'false')]
ins_log.read_log('info', 'rsync public key to server')
rsync_error_list = []
rsync_sucess_list = []
sync_key_obj = RsyncPublicKey()
check = sync_key_obj.check_rsa()
if check:
res_data = start_rsync(server_list)
if not res_data.get('status'):
rsync_error_list.append(res_data)
else:
rsync_sucess_list.append(res_data)
if rsync_error_list:
write_error_log(rsync_error_list)
return rsync_sucess_list
if __name__ == '__main__':
pass
| 5,147 | 1,638 |
import unittest
from src.main.serialization.codec.codec import Codec
from src.main.serialization.codec.object.stringCodec import StringCodec
from src.main.serialization.codec.primitive.shortCodec import ShortCodec
from src.main.serialization.codec.utils.byteIo import ByteIo
from src.main.serialization.codec.utils.bytes import to_byte
from src.test.serialization.codec.test_codec import TestCodec
class TestStringCodec(TestCodec):
def test_wide_range(self):
self.string_seria(None)
self.string_seria("abc")
self.string_seria("123")
self.string_seria("ほげほげ")
self.string_seria("漢字漢字")
self.string_seria(""" % Total\t\t\t\t % Received % Xferd Average Speed Time Time Time Current
Dload Upload Total Spent Left Speed
0 162 0 0 0 0 0 \t\t\t 0 --:--:-- --:--:-- --:--:-- 0
100 6 0 6 0 \r\n\0\t\t\t 0 0 0 --:--:-- 0:00:09 --:--:-- 1 漢字漢字漢字漢字漢字漢字漢字漢字
漢字漢字漢字漢字漢字漢字漢字漢字漢字漢字漢字漢字漢字漢字漢字漢字漢字漢字漢字漢字漢字漢字漢字漢字漢字漢字漢字漢字漢字漢字
漢字漢字漢字漢字漢字漢字漢字漢字漢字漢字漢字漢字漢字漢字漢字漢字漢字漢字漢字漢字漢字漢字漢字漢字漢字漢字漢字漢字漢字漢字
漢字漢字漢字漢字漢字漢字漢字漢字漢字漢字漢字漢字漢字漢字漢字漢字漢字漢字漢字漢字漢字漢字漢字漢字漢字漢字漢字漢字漢字漢字""")
def string_seria(self, value: None or str):
codec: Codec[str] = StringCodec(to_byte(12), 0)
writer: ByteIo = self.writer()
codec.write(writer, value)
writer.close()
reader: ByteIo = self.reader()
pim: int = codec.read(reader)
self.assertEqual(value, pim)
reader.close()
if __name__ == '__main__':
unittest.main()
| 1,607 | 696 |
# -*- coding: utf-8 -*-
"""
Created on Wed Jun 7 14:58:44 2017
@author: Jonas Lindemann
"""
import numpy as np
import pyvtk as vtk
print("Reading from uvw.dat...")
xyzuvw = np.loadtxt('uvw.dat', skiprows=2)
print("Converting to points and vectors")
points = xyzuvw[:, 0:3].tolist()
vectors = xyzuvw[:, 3:].tolist()
pointdata = vtk.PointData(vtk.Vectors(vectors, name="vec1"), vtk.Vectors(vectors, name="vec2"))
data = vtk.VtkData(vtk.StructuredGrid([96, 65, 48], points), pointdata)
data.tofile('uvw','ascii')
| 539 | 246 |
# Copyright 2014 Florian Ludwig
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import os
import tempfile
import subprocess
import atexit
import shutil
import argparse
import pkg_resources
def start(root, address='127.0.0.1', port=8000):
conf_template = pkg_resources.resource_string('nginc', 'nginx.conf')
conf_template = conf_template.decode('utf-8')
tmp = tempfile.mkdtemp(prefix='nginc')
@atexit.register
def cleanup_tmp():
shutil.rmtree(tmp)
root = os.path.abspath(root)
root = root.replace('"', '\\"')
config = conf_template.format(tmp=tmp, root=root, port=port, address=address)
conf_path = tmp + '/nginx.conf'
conf_file = open(conf_path, 'w')
conf_file.write(config)
conf_file.close()
proc = subprocess.Popen(['nginx', '-c', conf_path])
@atexit.register
def cleanup_proc():
try:
proc.kill()
except OSError:
pass
return proc
def main():
parser = argparse.ArgumentParser()
parser.add_argument('-p', '--port', type=int, default=8000,
help='port to bind to')
parser.add_argument('-r', '--root', type=str, default='.',
help='directory to serve, defaults to current working directory')
parser.add_argument('-a', '--address', type=str, default='127.0.0.1',
help='address to bind to')
parser.add_argument('-A', action='store_true',
help='shortcut for --address 0.0.0.0')
args = parser.parse_args()
address = args.address
if args.A:
address = '0.0.0.0'
proc = start(args.root, address, args.port)
try:
proc.wait()
except KeyboardInterrupt:
proc.kill()
| 2,237 | 716 |
# Import important libraries
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
# Read the data set
dataset = pd.read_csv('Salary_Data.csv')
X = dataset.iloc[:,:-1].values
y = dataset.iloc[:, 1].values
# Splitting the dataset into the Training set and Test set
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size = 1/3, random_state = 0)
# There is no need to do feature scaling as the linear regression model takes
# care of that for us
# Fitting Simple linear regression to the training set
from sklearn.linear_model import LinearRegression
regressor = LinearRegression()
regressor.fit(X_train, y_train)
# Predicting the test set results
y_pred = regressor.predict(X_test)
""" Now we will visualize the results that we achieved so far """
# Visualising the Training set results
plt.scatter(X_train, y_train, color='red')
plt.plot(X_train, regressor.predict(X_train), color='blue')
plt.title("Salary VS Experience (Training Set)")
plt.xlabel("Years of Experience")
plt.ylabel("Salary")
plt.show()
# Visualising the Test set results
plt.scatter(X_test, y_test, color='red')
# This is the same line as that of plt.plot(X_train, regressor.predict(X_train), color='blue')
plt.plot(X_test, y_pred, color='blue')
plt.title("Salary VS Experience (Test Set)")
plt.xlabel("Years of Experience")
plt.ylabel("Salary")
plt.show() | 1,417 | 479 |
# -*- coding: utf-8 -*-
"""Test for various sources
Supported sources
- Yahoo Finance
- I3Investor - KLSe
"""
import datetime as dt
import string
import unittest
from source import YahooFinanceSource, GoogleFinanceSource
class SourceTest(unittest.TestCase):
_TEST_YAHOO_FINANCE_SYMBOL = '6742.KL'
_YAHOO_FINANCE_SOURCE = YahooFinanceSource(_TEST_YAHOO_FINANCE_SYMBOL)
_TEST_GOOGLE_FINANCE_SYMBOL = "ytlpowr"
_GOOGLE_FINANCE_SOURCE = GoogleFinanceSource(_TEST_GOOGLE_FINANCE_SYMBOL)
_TODAY = dt.datetime.today().strftime('%Y-%m-%d')
@unittest.skip
def test_yahoo_get_stock_prices(self):
print("Getting historical prices")
# Get historical stock data
historical_data = self._YAHOO_FINANCE_SOURCE.get_historical_stock_data('2016-05-15', self._TODAY, 'daily')
print(historical_data)
# prices = historical_data[self._TEST_SYMBOL]['prices']
# print(prices)
# for price in prices:
# print(price.get('close', None))
# Get current price
# current_price = yahoo_finance_source.get_current_price()
# print(current_price)
@unittest.skip
def test_yahoo_get_dividend_history(self):
print("Getting historical dividends")
dividend_data = self._YAHOO_FINANCE_SOURCE.get_historical_stock_dividend_data('2010-05-15', self._TODAY,
'daily')
print(dividend_data)
@unittest.skip
def test_genereate_a_to_z(self):
for c in string.ascii_uppercase:
print(c)
def test_google_finance_get_stock_prices(self):
print("Getting historical prices")
historical_prices = self._GOOGLE_FINANCE_SOURCE.get_stock_historical_prices("2010-05-15", self._TODAY)
print(historical_prices)
| 1,853 | 662 |
#!/usr/bin/env python3
from collections import namedtuple
from itertools import combinations
import knapsack
def solve_it(input_data, language="rust"):
if language == "python":
return solve_it_python(input_data)
return solve_it_rust(input_data)
def solve_it_rust(input_data):
return knapsack.solve(input_data)
Item = namedtuple("Item", ["index", "value", "weight"])
def solve_it_python(input_data):
print("running in python", file=sys.stderr)
# parse the input
lines = input_data.split("\n")
firstLine = lines[0].split()
item_count = int(firstLine[0])
capacity = int(firstLine[1])
items = []
for i in range(1, item_count + 1):
line = lines[i]
parts = line.split()
items.append(Item(i - 1, int(parts[0]), int(parts[1])))
# a trivial algorithm for filling the knapsack
# it takes items in-order until the knapsack is full
value = 0
taken = [0] * len(items)
all_combinations = (
comb
for n in range(1, len(items) + 1)
for comb in combinations(items, n)
)
small_enough = (
comb
for comb in all_combinations
if sum(item.weight for item in comb) <= capacity
)
winner = max(small_enough, key=lambda items: sum(i.value for i in items))
value = sum(i.value for i in winner)
for idx, item in enumerate(items):
if item in winner:
taken[idx] = 1
# prepare the solution in the specified output format
output_data = str(value) + " " + str(1) + "\n"
output_data += " ".join(map(str, taken))
return output_data
if __name__ == "__main__":
import sys
if len(sys.argv) > 1:
file_location = sys.argv[1].strip()
with open(file_location, "r") as input_data_file:
input_data = input_data_file.read()
if len(sys.argv) > 2:
language = sys.argv[2].lower().strip()
print(solve_it(input_data, language=language))
else:
print(solve_it(input_data))
else:
print(
"This test requires an input file. Please select one from the data directory. (i.e. python solver.py ./data/ks_4_0)"
)
| 2,197 | 718 |
from django.conf import settings
from api.task.internal import InternalTask
from api.task.response import mgmt_task_response
from vms.utils import AttrDict
from vms.models import Vm
from que import TG_DC_UNBOUND, TG_DC_BOUND
class MonitoringGraph(AttrDict):
"""
Monitoring graph configuration.
"""
def __init__(self, name, **params):
dict.__init__(self)
self['name'] = name
self['params'] = params
# noinspection PyAbstractClass
class MonInternalTask(InternalTask):
"""
Internal zabbix tasks.
"""
abstract = True
def call(self, *args, **kwargs):
# Monitoring is completely disabled
if not settings.MON_ZABBIX_ENABLED:
return None
# Remove unused/useless parameters
kwargs.pop('old_json_active', None)
return super(MonInternalTask, self).call(*args, **kwargs)
def get_mon_vms(sr=('dc',), order_by=('hostname',), **filters):
"""Return iterator of Vm objects which are monitoring by an internal Zabbix"""
filters['slavevm__isnull'] = True
vms = Vm.objects.select_related(*sr).filter(**filters)\
.exclude(status=Vm.NOTCREATED)\
.order_by(*order_by)
return (vm for vm in vms
if vm.dc.settings.MON_ZABBIX_ENABLED and vm.is_zabbix_sync_active() and not vm.is_deploying())
def call_mon_history_task(request, task_function, view_fun_name, obj, dc_bound,
serializer, data, graph, graph_settings):
"""Function that calls task_function callback and returns output mgmt_task_response()"""
_apiview_ = {
'view': view_fun_name,
'method': request.method,
'hostname': obj.hostname,
'graph': graph,
'graph_params': serializer.object.copy(),
}
result = serializer.object.copy()
result['desc'] = graph_settings.get('desc', '')
result['hostname'] = obj.hostname
result['graph'] = graph
result['options'] = graph_settings.get('options', {})
result['update_interval'] = graph_settings.get('update_interval', None)
result['add_host_name'] = graph_settings.get('add_host_name', False)
tidlock = '%s obj:%s graph:%s item_id:%s since:%d until:%d' % (task_function.__name__,
obj.uuid, graph, serializer.item_id,
round(serializer.object['since'], -2),
round(serializer.object['until'], -2))
item_id = serializer.item_id
if item_id is None:
items = graph_settings['items']
else:
item_dict = {'id': item_id}
items = [i % item_dict for i in graph_settings['items']]
if 'items_search_fun' in graph_settings:
# noinspection PyCallingNonCallable
items_search = graph_settings['items_search_fun'](graph_settings, item_id)
else:
items_search = None
history = graph_settings['history']
# for VM the task_function is called without task group value because it's DC bound
if dc_bound:
tg = TG_DC_BOUND
else:
tg = TG_DC_UNBOUND
ter = task_function.call(request, obj.owner.id, (obj.uuid, items, history, result, items_search),
tg=tg, meta={'apiview': _apiview_}, tidlock=tidlock)
# NOTE: cache_result=tidlock, cache_timeout=60)
# Caching is disable here, because it makes no real sense.
# The latest graphs must be fetched from zabbix and the older are requested only seldom.
return mgmt_task_response(request, *ter, obj=obj, api_view=_apiview_,
dc_bound=dc_bound, data=data)
| 3,765 | 1,128 |
"""
A modular, runtime re-loadable database package!
A thin wrapper around the Mongo DB library 'motor' with helper functions to abstract away
some more complex database operations.
"""
import sys as __sys
import importlib as __importlib
import motor.motor_asyncio
import asyncio
# names of the python modules/packages (folder/file name with no extension)
__all__ = ['datatypes', 'character', 'world']
### Runtime Module Reloading support #############################
##################################################################
__importlib.invalidate_caches()
for __mod in __all__:
if __mod in dir():
__importlib.reload(__sys.modules[f"{__name__}.{__mod}"])
del __mod
##################################################################
from . import * # load all modules with filenames defined by '__all__'
class Database:
"""
Holds references and initialization variables related to the database connection and all
helper methods.
The class variables listed bellow are related to database names and collection names, as such
they should be changed to better fit the MUD.
"""
# NOTE: there is a newline seperating logical blocks, that is, collections inside the database
# are closely under eachother, a blank line seperates each of them.
__user_database_name = "test-users" # the database name where all user data is stored
__character_collection_name = "test-characters" # collection where individual characters and login is stored
__account_collection_name = "test-accounts" # collection where individual player accounts are stored
__world_database_name = "test-world" # the database name where all world data is kept
__tutorial_collection_name = "tutorial" # the name of the collection where the tutorial is stored
datatypes = datatypes
def __init__(self, database_uri='mongodb://localhost:27017'):
"""
Initialize the asynchronous client for the database inside the running eventloop.
Due to the import happening before the event loop being established
this init function must be called AFTER the main event loop is created to ensure it gets
the correct and running event loop is being passed on.
I have had "running outside main event loop" errors so please keep this in mind.
(That is, ensure this is called from inside the asyncio.run() function and not before it runs)
"""
self.uri = database_uri
# TODO: If issues arise, bump up the max pool size, each change stream cursor makes 1 connection
self.client = motor.motor_asyncio.AsyncIOMotorClient(database_uri,
io_loop=asyncio.get_running_loop(),
maxPoolSize=10000)
# add a thin layer on the databases/collections to allow direct manipulation
self.character = self.client[self.__user_database_name][self.__character_collection_name]
self.world = self.client[self.__world_database_name]
# add methods to abstract away complex methods and database operations
self.character_helper_methods = character.Character(self.character)
self.world_helper_methods = world.World(self.world) | 3,291 | 798 |
from helper import unittest, PillowTestCase, hopper
from test_imageqt import PillowQtTestCase, PillowQPixmapTestCase
from PIL import ImageQt
if ImageQt.qt_is_installed:
from PIL.ImageQt import QPixmap
class TestToQPixmap(PillowQPixmapTestCase, PillowTestCase):
def test_sanity(self):
PillowQtTestCase.setUp(self)
for mode in ('1', 'RGB', 'RGBA', 'L', 'P'):
data = ImageQt.toqpixmap(hopper(mode))
self.assertIsInstance(data, QPixmap)
self.assertFalse(data.isNull())
# Test saving the file
tempfile = self.tempfile('temp_{}.png'.format(mode))
data.save(tempfile)
if __name__ == '__main__':
unittest.main()
| 714 | 249 |
from rest_framework import serializers
from hood.models import UserProfile
class UserProfileSerializer(serializers.HyperlinkedModelSerializer):
class Meta:
model = UserProfile
fields = ('bio', 'birth_date','picture','email','picture') | 255 | 64 |
from .lexer import Lexer
class ListLexer(Lexer):
tokens = Lexer.tokens
fingerprints = [
(r'(?P<UL>^\*( +)?)', 'UL'),
(r'(?P<OL>^\d+.( +)?)', 'OL'),
]
def __init__(self):
super().__init__()
@_(r'^\*( +)?')
def UL(self, t):
return t
@_(r'^\d+.( +)?')
def OL(self, t):
return t
@_(r'.')
def SPAN(self, t):
return t | 403 | 171 |
# Generated by Django 2.0.2 on 2018-08-20 14:44
import django.contrib.postgres.fields
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('job', '0002_auto_20180820_0901'),
]
operations = [
migrations.AlterField(
model_name='job',
name='experience',
field=models.CharField(blank=True, max_length=130, null=True),
),
migrations.AlterField(
model_name='job',
name='industries',
field=models.ManyToManyField(blank=True, to='industry.Industry'),
),
migrations.AlterField(
model_name='job',
name='languages',
field=django.contrib.postgres.fields.ArrayField(base_field=models.CharField(blank=True, max_length=70, null=True), default=[], size=None),
),
]
| 879 | 286 |
import functools
import math
import operator
class Coordinate:
def __init__(self, lat, lng):
f_lat = float(lat)
if math.fabs(f_lat) > 180:
raise ValueError(f'The latitude must be between -180 and 180 degrees, but was {f_lat}!')
f_lng = float(lng)
if math.fabs(f_lng) > 180:
raise ValueError(f'The longitude must be between -180 and 180 degrees, but was {f_lng}!')
self.lat = f_lat
self.lng = f_lng
def __hash__(self) -> int:
hashes = map(hash, (self.lat, self.lng))
return functools.reduce(operator.xor, hashes)
def __str__(self) -> str:
return f"({self.lat}, {self.lng})"
def __eq__(self, other: object) -> bool:
if self is other:
return True
if isinstance(other, self.__class__):
return self.__dict__ == other.__dict__
return False
| 900 | 312 |
from os import environ
from requests import post
class OdinLogger:
@classmethod
def log(cls, type, desc, value, id, timestamp):
response = post(url="http://localhost:3939/stats/add", data = type + "," + desc + "," + str(value) + "," + id + "," + str(timestamp))
return response.status_code
| 315 | 94 |
# coding=utf-8
import numpy as np
from itertools import combinations_with_replacement
from my_log import logging
def load_transcription(transcription_file_name):
"""
:return: a list of tuple:
[
(word: string, phones: list),
(word: string, phones: list),
...,
(word: string, phones: list),
]
"""
transcription_list = list()
with open(transcription_file_name, "r") as transcription_file:
while 1:
lines = transcription_file.readlines(10000)
if not lines:
break
for line in lines:
line = line.strip()
word = line.split("\t")[0]
phones = line.split("\t")[1].split(" ")
transcription_list.append((word, phones))
pass
pass
transcription_list = transcription_list
logging.debug("transcription_list:")
logging.debug(transcription_list)
return transcription_list
def load_grapheme_dict(transcription_list):
"""
:return: a dictionary of grapheme-id pair like: {"a": 0, "b": 1, "c": 2, ...,}
"""
grapheme_set = set()
for (word, _) in transcription_list:
grapheme_set = grapheme_set.union(word)
pass
grapheme_list = list(grapheme_set)
grapheme_dict = dict()
for i in range(len(grapheme_list)):
grapheme_dict[grapheme_list[i]] = i
pass
grapheme_dict = grapheme_dict
logging.debug("grapheme_dict:")
logging.debug(grapheme_dict)
return grapheme_dict
def load_phoneme_dict(transcription_list):
"""
:return: a dictionary of phoneme-id pair like: {"ey1":0, "b":1, "iy2": 2, "s": 3, "iy2": 4, ...,}
"""
phoneme_set = set()
for (_, phones) in transcription_list:
phoneme_set = phoneme_set.union(phones)
pass
phoneme_list = list(phoneme_set)
phoneme_list.append("*")
phoneme_dict = dict()
for i in range(len(phoneme_list)):
phoneme_dict[phoneme_list[i]] = i
pass
phoneme_dict = phoneme_dict
logging.debug("phoneme_dict:")
logging.debug(phoneme_dict)
return phoneme_dict
def introduce_epsilon_phone_seq(word, phones):
"""
Introduce epsilon to every possible location in phones list.
:param word:
:param phones:
:return: a list containing all word-phones pairs with epsilon introduced
"""
length_diff = len(word) - len(phones)
if length_diff < 0:
logging.error("Word length is less than phones'!")
logging.info(word + "-" + str(phones))
location_combines_with_replace = [c for c in combinations_with_replacement(range(len(phones) + 1), length_diff)]
pair_list = list()
for locations in location_combines_with_replace:
temp_phones = phones.copy()
for i in range(len(locations)):
temp_phones.insert(locations[i] + i, "*")
pass
pair_list.append((word, temp_phones))
pass
return pair_list
def is_prob_matrix_equal(last_prob_matrix, new_prob_matrix, epsilon):
"""
:param last_prob_matrix: numpy array.
:param new_prob_matrix: numpy array.
:param epsilon:
:return: True: if mean-square error <= epsilon
False: if mean-square error > epsilon
"""
diff_mean = np.mean(np.subtract(last_prob_matrix, new_prob_matrix))
if diff_mean <= epsilon:
return True
return False
def path_to_string(path_list):
"""
:param path_list: a list of dtw path result, like:
[
("a", "ey1"),
("b", "b_iy1"),
("c", "s_iy1"),
]
:return: a string to be writen to the output file, like:
abc ey1 b_iy1 s_iy1
"""
word_list = []
phones = []
for step_tuple in path_list:
word_list.append(step_tuple[0])
phones.append(step_tuple[1])
pass
result = "".join(word_list) + "\t" + " ".join(phones) + "\n"
return result
class Aligner:
def __init__(self, training_file_name, test_data_file_name):
self.training_data_file_name = training_file_name
self.test_data_file_name = test_data_file_name
self.transcription_list = list()
self.grapheme_dict = dict()
self.phoneme_dict = dict()
self.prob_matrix = np.zeros(shape=(1, 1))
pass
def init_prob_matrix(self):
"""
:return: matrix containing probabilities of a grapheme match a phoneme, initialized with 0 value.
"""
g_count = len(self.grapheme_dict)
p_count = len(self.phoneme_dict)
self.prob_matrix = np.zeros(shape=(g_count, p_count), dtype=np.float32)
logging.debug("prob_matrix:")
logging.debug(self.prob_matrix)
return self.prob_matrix
def reset_prob_matrix(self, align_paths):
"""
Reset prob matrix according to align paths.
:param align_paths: a list of step lists, like:
[
[
("a", "ey1"),
("b", "b_iy1"),
...,
("c", "s_iy1"),
],
[
("a", "ey1"),
("b", "b_iy1"),
...,
("c", "s_iy1"),
],
...,
[
("a", "ey1"),
("b", "b_iy1"),
...,
("c", "s_iy1"),
],
]
:return: prob matrix
"""
logging.debug("before reset prob matrix:")
logging.debug(self.prob_matrix)
for align_path in align_paths:
for step in align_path:
g_id = self.get_grapheme_id(step[0])
p_id = self.get_phoneme_id(step[1])
self.prob_matrix[g_id][p_id] += 1
pass
pass
self.normalize_prob_matrix()
logging.debug("after reset prob matrix:")
logging.debug(self.prob_matrix)
return self.prob_matrix
def normalize_prob_matrix(self):
"""
Probability matrix is a matrix with shape: (grapheme_count, phoneme_count).
Normalization is to keep sum of each row in the matrix to 1.
:return: a normalized probability matrix.
"""
shape = self.prob_matrix.shape
sum_array = np.sum(self.prob_matrix, axis=1)
for i in range(shape[0]):
for j in range(shape[1]):
self.prob_matrix[i][j] /= sum_array[i]
pass
pass
logging.debug("prob_matrix:")
logging.debug(self.prob_matrix)
return self.prob_matrix
def get_grapheme_id(self, grapheme):
g_id = self.grapheme_dict[grapheme]
return g_id
def get_phoneme_id(self, phoneme):
p_id = self.phoneme_dict[phoneme]
return p_id
def distance(self, grapheme, phoneme):
"""
Calculate the distance(match probability) between a grapheme and a phoneme.
:param grapheme: a string like: a
:param phoneme: a string like: ey1
:return: probability of grapheme match phoneme
"""
g_id = self.get_grapheme_id(grapheme)
p_id = self.get_phoneme_id(phoneme)
distance = self.prob_matrix[g_id][p_id]
return distance
def init_prob_of_grapheme_match_phoneme(self):
"""
Initialize prob_matrix: the probability of G matching P, counting with DTW all possible G/P association for all possible epsilon positions in the phonetic
:return: prob_matrix
"""
self.transcription_list = load_transcription(training_data_file_name)
self.grapheme_dict = load_grapheme_dict(self.transcription_list)
self.phoneme_dict = load_phoneme_dict(self.transcription_list)
self.init_prob_matrix()
align_paths = []
for (word, phones) in self.transcription_list:
pair_list = introduce_epsilon_phone_seq(word, phones) # Introduce epsilon into phone list
for (w, p) in pair_list:
# align_path, _ = self.dynamic_time_wrapping(w, p)
align_path = []
for i in range(len(w)):
align_path.append((w[i], p[i]))
align_paths.append(align_path)
pass
self.reset_prob_matrix(align_paths)
return self.prob_matrix
def dynamic_time_wrapping(self, word, phones):
"""
Dynamic time wrapping for word-phones pair.
:param word: a string represent a word
:param phones: a list of string represent some phones
:return: a list of tuple represent the best path, like:
[
("a", "ey1"),
("b", "b_iy1"),
...,
("c", "s_iy1"),
]
"""
g_count = len(word)
p_count = len(phones)
frame_dist_matrix = np.zeros(shape=(g_count, p_count), dtype=np.float32) # Frame distance matrix.
for i in range(g_count):
for j in range(p_count):
frame_dist_matrix[i][j] = self.distance(word[i], phones[j])
pass
pass
acc_dist_matrix = np.zeros(shape=(g_count, p_count), dtype=np.float32) # Accumulated distance matrix.
acc_dist_matrix[0][0] = frame_dist_matrix[0][0]
"""Dynamic programming to compute the accumulated probability."""
for i in range(1, g_count):
for j in range(p_count):
d1 = acc_dist_matrix[i-1][j]
if j > 0:
d2 = acc_dist_matrix[i-1][j-1]
else:
d2 = 0
acc_dist_matrix[i][j] = frame_dist_matrix[i][j] + max([d1, d2])
pass
pass
prob_value = acc_dist_matrix[g_count-1][p_count-1]
"""Trace back to find the best path with the max accumulated probability."""
align_path = []
i, j = g_count-1, p_count-1
while 1:
align_path.append((word[i], phones[j]))
if i == 0 & j == 0:
break
if i > 0:
d1 = acc_dist_matrix[i - 1][j]
if j > 0:
d2 = acc_dist_matrix[i - 1][j - 1]
else:
d2 = 0
else:
d1 = 0
d2 = 0
candidate_steps = [(i-1, j), (i-1, j-1)]
candidate_prob = [d1, d2]
i, j = candidate_steps[candidate_prob.index(max(candidate_prob))]
pass
align_path.reverse()
return align_path, prob_value
def e_step(self):
"""
Expectation step that computes a optimized path with maximum probability for each word-phones pair.
:return: a list of align paths, like:
[
[("a", "ey1"), ("b", "b_iy10), ("c", "s_iy0"), ],
[("a", "ey1"), ("b", "b_iy10), ],
[("a", "ey1"), ("b", "b_iy10), ("c", "s_iy0"), ],
[("a", "ey1"), ("b", "b_iy10), ("c", "s_iy0"), ("d", "d_iy0"), ],
]
"""
align_paths = []
for (word, phones) in self.transcription_list:
pair_list = introduce_epsilon_phone_seq(word, phones)
logging.debug("pair list:")
logging.debug(pair_list)
candidate_path_list = [] # Construct a candidate path list for all word-phones
for (w, p) in pair_list:
align_path, prob_value = self.dynamic_time_wrapping(w, p)
candidate_path_list.append((align_path, prob_value))
candidate_path_list.sort(key=lambda x: x[1], reverse=True) # Sort by probability
align_paths.append(candidate_path_list[0][0]) # Pick up the promising path with the biggest probability.
pass
return align_paths
def m_step(self, align_paths):
"""
Maximum likelihood step that resets the frame prob matrix according to align paths generated by e_step.
:param align_paths: a list of align paths generated by e_step function.
"""
self.reset_prob_matrix(align_paths)
pass
def train(self, iter_num, epsilon):
"""
Train prop matrix until iter_num or the difference of adjacent iteration results is no more than epsilon.
:param iter_num:
:param epsilon:
"""
self.init_prob_of_grapheme_match_phoneme()
for i in range(iter_num):
logging.info("Training epoch:" + str(i))
last_prob_matrix = self.prob_matrix.copy()
align_paths = self.e_step() # Expectation step
self.m_step(align_paths) # Maximum step
# if self.is_prob_matrix_equal(last_prob_matrix, self.prob_matrix, epsilon):
# break
pass
pass
def align(self):
"""
Align the test data file by current model(frame prob matrix) trained already.
:return:
"""
transcription_list = load_transcription(self.test_data_file_name)
result_list = []
for (word, phones) in transcription_list:
pair_list = introduce_epsilon_phone_seq(word, phones)
candidate_path_list = [] # Construct a candidate path list for all possible word-phones pairs
for (w, p) in pair_list:
align_path, prob_value = self.dynamic_time_wrapping(w, p)
candidate_path_list.append((align_path, prob_value))
candidate_path_list.sort(key=lambda x: x[1], reverse=True) # Sort by probability
result_string = path_to_string(candidate_path_list[0][0])
result_list.append(result_string) # Pick up the promising path with the biggest probability.
with open(output_file_name, "w") as output_file:
output_file.writelines(result_list)
pass
pass
pass
if __name__ == '__main__':
training_data_file_name = "assets/mini_training_data.txt"
test_data_file_name = "assets/mini_test_data.txt"
output_file_name = "assets/result.txt"
iter_num = 5
epsilon = 0
aligner = Aligner(training_data_file_name, test_data_file_name)
aligner.train(iter_num, epsilon)
aligner.align()
| 14,673 | 4,720 |
"""
STAT 656 HW-10
@author:Lee Rainwater
@heavy_lifting_by: Dr. Edward Jones
@date: 2020-07-29
"""
import pandas as pd
# Classes provided from AdvancedAnalytics ver 1.25
from AdvancedAnalytics.Text import text_analysis
from AdvancedAnalytics.Text import sentiment_analysis
from sklearn.feature_extraction.text import CountVectorizer
import numpy as np
from AdvancedAnalytics.Text import text_plot
def heading(headerstring):
"""
Centers headerstring on the page. For formatting to stdout
Parameters
----------
headerstring : string
String that you wish to center.
Returns
-------
Returns: None.
"""
tw = 70 # text width
lead = int(tw/2)-(int(len(headerstring)/2))-1
tail = tw-lead-len(headerstring)-2
print('\n' + ('*'*tw))
print(('*'*lead) + ' ' + headerstring + ' ' + ('*'*tail))
print(('*'*tw))
return
heading("READING DATA SOURCE...")
# Set Pandas Columns Width for Excel Columns
pd.set_option('max_colwidth', 32000)
df = pd.read_excel("hotels.xlsx")
text_col = 'Review' #Identify the Data Frame Text Target Column Name
# Check if any text was truncated
pd_width = pd.get_option('max_colwidth')
maxsize = df[text_col].map(len).max() # Maps text_col onto len() and finds max()
n_truncated = (df[text_col].map(len) > pd_width).sum()
print("\nTEXT LENGTH:")
print("{:<17s}{:>6d}".format(" Max. Accepted", pd_width))
print("{:<17s}{:>6d}".format(" Max. Observed", maxsize))
print("{:<17s}{:>6d}".format(" Truncated", n_truncated))
# Initialize TextAnalytics and Sentiment Analysis.
ta = text_analysis(synonyms=None, stop_words=None, pos=False, stem=False)
# n_terms=2 only displays text containing 2 or more sentiment words for
# the list of the highest and lowest sentiment strings
sa = sentiment_analysis(n_terms=2)
heading("CREATING TOKEN COUNT MATRIX...")
# Create Word Frequency by Review Matrix using Custom Sentiment
cv = CountVectorizer(max_df=1.0, min_df=1, max_features=None, \
ngram_range=(1,2), analyzer=sa.analyzer, \
vocabulary=sa.sentiment_word_dic)
stf = cv.fit_transform(df[text_col]) # Return document-term matrix
sterms = cv.get_feature_names() # Map feature indices to feature names
heading("CALCULATE AND STORE SENTIMENT SCORES...")
# Calculate and Store Sentiment Scores into DataFrame "s_score"
s_score = sa.scores(stf, sterms)
n_reviews = s_score.shape[0]
n_sterms = s_score['n_words'].sum()
max_length = df['Review'].apply(len).max()
if n_sterms == 0 or n_reviews == 0:
print("No sentiment terms found.")
p = s_score['n_words'].sum() / n_reviews
print('{:-<24s}{:>6d}'.format("\nMaximum Text Length", max_length))
print('{:-<23s}{:>6d}'.format("Total Reviews", n_reviews))
print('{:-<23s}{:>6d}'.format("Total Sentiment Terms", n_sterms))
print('{:-<23s}{:>6.2f}'.format("Avg. Sentiment Terms", p))
# s_score['sentiment'] = s_score['sentiment'].map("{:,.2f}".format)
df = df.join(s_score)
print("\n", df[['hotel', 'sentiment', 'n_words']], "\n")
print(df.groupby(['hotel']).mean())
heading("GENERATING TOTAL WORD CLOUD FOR CORPUS...")
tcv = CountVectorizer(max_df=1.0, min_df=1, max_features=None, \
ngram_range=(1,2), analyzer=ta.analyzer)
tf = tcv.fit_transform(df[text_col])
terms = tcv.get_feature_names()
td = text_plot.term_dic(tf, terms)
text_plot.word_cloud_dic(td, max_words=200)
heading("GENERATING SENTIMENT WORD CLOUD FOR CORPUS...")
corpus_sentiment = {}
n_sw = 0
for i in range(n_reviews):
# Iterate over the terms with nonzero scores."stf" is a sparse matrix
term_list = stf[i].nonzero()[1]
if len(term_list)>0:
for t in np.nditer(term_list):
score = sa.sentiment_dic.get(sterms[t])
if score != None:
n_sw += stf[i,t]
current_count = corpus_sentiment.get(sterms[t])
if current_count == None:
corpus_sentiment[sterms[t]] = stf[i,t]
else:
corpus_sentiment[sterms[t]] += stf[i,t]
# Word cloud for the Sentiment Words found in the Corpus
text_plot.word_cloud_dic(corpus_sentiment, max_words=200)
n_usw = len(corpus_sentiment)
print("\nSENTIMENT TERMS")
print("------------------")
print("{:.<10s}{:>8d}".format("Unique",n_usw))
print("{:.<10s}{:>8d}".format("Total", n_sw ))
print("------------------")
heading("GENERATING TOTAL WORD CLOUD FOR BELLAGIO...")
tcv = CountVectorizer(max_df=1.0, min_df=1, max_features=None, \
ngram_range=(1,2), analyzer=ta.analyzer)
tf = tcv.fit_transform(df[df['hotel']=='Bellagio'][text_col])
terms = tcv.get_feature_names()
td = text_plot.term_dic(tf, terms)
text_plot.word_cloud_dic(td, max_words=200)
heading("GENERATING SENTIMENT WORD CLOUD FOR BELLAGIO...")
bcv = CountVectorizer(max_df=1.0, min_df=1, max_features=None, \
ngram_range=(1,2), analyzer=sa.analyzer, \
vocabulary=sa.sentiment_word_dic)
bstf = bcv.fit_transform(df[df['hotel']=='Bellagio'][text_col]) # Return document-term matrix
bsterms = bcv.get_feature_names() # Map feature indices to feature names
heading("CALCULATE AND STORE SENTIMENT SCORES FOR BELLAGIO...")
# Calculate and Store Sentiment Scores into DataFrame "s_score"
bs_score = sa.scores(bstf, bsterms)
bn_reviews = bs_score.shape[0]
bn_sterms = bs_score['n_words'].sum()
max_length = df['Review'].apply(len).max()
if bn_sterms == 0 or bn_reviews == 0:
print("No sentiment terms found.")
corpus_sentiment = {}
n_sw = 0
for i in range(bn_reviews):
# Iterate over the terms with nonzero scores."stf" is a sparse matrix
term_list = bstf[i].nonzero()[1]
if len(term_list)>0:
for t in np.nditer(term_list):
score = sa.sentiment_dic.get(bsterms[t])
if score != None:
n_sw += bstf[i,t]
current_count = corpus_sentiment.get(bsterms[t])
if current_count == None:
corpus_sentiment[bsterms[t]] = bstf[i,t]
else:
corpus_sentiment[bsterms[t]] += bstf[i,t]
# Word cloud for the Sentiment Words found in the Corpus
text_plot.word_cloud_dic(corpus_sentiment, max_words=200)
n_usw = len(corpus_sentiment)
print("\nBELLAGIO SENTIMENT TERMS")
print("------------------")
print("{:.<10s}{:>8d}".format("Unique",n_usw))
print("{:.<10s}{:>8d}".format("Total", n_sw ))
print("------------------")
| 6,483 | 2,419 |
class Timing:
def beat_to_seconds(self, beat_number: float) -> float:
"""
Convert beat number to seconds.
:param beat_number: Beat number counted from 0.
:return: Time in seconds.
"""
raise NotImplementedError
def seconds_to_beat(self, time: float) -> float:
"""
Convert seconds to beat number.
:param time: Time in seconds.
:return: Beat number counted from 0.
"""
raise NotImplementedError
| 497 | 136 |
from torch.utils.data import DataLoader
from torch.utils.data.sampler import SubsetRandomSampler
from meditorch.nn.models import UNetResNet
from torchsummary import summary
import torch.optim as optim
from torch.optim import lr_scheduler
from meditorch.nn import Trainer
from meditorch.utils.plot import plot_image_truemask_predictedmask
import numpy as np
import EDD
from util import resize_images
np.random.seed(42)
def get_edd_loader(path,validation_split=.25,shuffle_dataset=True):
dataset = EDD(path)#instantiating the data set.
dataset_size = len(dataset)
indices = list(range(dataset_size))
split = int(np.floor(validation_split * dataset_size))
if shuffle_dataset :
np.random.shuffle(indices)
train_indices, val_indices = indices[split:], indices[:split]
train_sampler = SubsetRandomSampler(train_indices)
valid_sampler = SubsetRandomSampler(val_indices)
loader={
'train':DataLoader(dataset, batch_size=4, sampler=train_sampler),
'val':DataLoader(dataset, batch_size = 4,sampler=valid_sampler)
}
return loader
def main():
np.random.seed(42)
#seting up the data set
!mkdir ./EDD2020/resized_masks/
resize_my_images('./EDD2020/EDD2020_release-I_2020-01-15/masks/','./EDD2020/resized_masks/',is_masks=True)
!mkdir ./EDD2020/resized_images/
resize_my_images('./EDD2020/EDD2020_release-I_2020-01-15/originalImages/','./EDD2020/resized_images/',is_masks=False)
loader = get_edd_loader('./EDD2020/',validation_split=.25,shuffle_dataset=True)
#using UNet+ResNet combo
model = UNetResNet(in_channel=3, n_classes=5)
optimizer_func = optim.Adam(model.parameters(), lr=1e-4)
scheduler = lr_scheduler.StepLR(optimizer_func, step_size=10, gamma=0.1)
trainer = Trainer(model, optimizer=optimizer_func, scheduler=scheduler)
#training
trainer.train_model(loader, num_epochs=30)
images, masks = next(iter(loader['val']))
#predicting for only a batch of 4 from val set
preds = trainer.predict(images)
plot_image_truemask_predictedmask(images, masks, preds)
if __name__ == '__main__':
main()
| 2,113 | 802 |
"""Query Object for all read-only queries to the Real Property table
"""
import os
import logging
from time import time
from typing import List
import asyncio
import aiohttp
import aiofiles
import databases
from PIL import Image
import sqlalchemy
from sqlalchemy.sql import select, func
import geoapi.common.spatial_utils as spatial_utils
import geoapi.common.decorators as decorators
from geoapi.common.exceptions import ResourceNotFoundError, ResourceMissingDataError
from geoapi.common.json_models import RealPropertyOut, GeometryAndDistanceIn, StatisticsOut
class RealPropertyQueries():
"""Repository for all DB Query Operations.
Different from repository for all transaction operations."""
def __init__(self, connection: databases.Database,
real_property_table: sqlalchemy.Table):
self._connection = connection
self._real_property_table = real_property_table
self.logger = logging.getLogger(__name__)
async def get_all(self) -> List[RealPropertyOut]:
"""Gets all the records
TODO: add paging
Raises:
ResourceNotFoundError: if the table is empty
Returns:
List[RealPropertyOut]: List of outgoing geojson based objects
"""
select_query = self._real_property_table.select()
db_rows = await self._connection.fetch_all(select_query)
if not db_rows:
msg = "No Properties found!"
self.logger.error(msg)
raise ResourceNotFoundError(msg)
out_list = [RealPropertyOut.from_db(db_row) for db_row in db_rows]
return out_list
async def get(self, property_id: str) -> RealPropertyOut:
"""Gets a single record
Args:
property_id (str): property id to search for
Raises:
ResourceNotFoundError: if property id not found
Returns:
RealPropertyOut: Outgoing geojson based object
"""
select_query = self._real_property_table.select().where(
self._real_property_table.c.id == property_id)
db_row = await self._connection.fetch_one(select_query)
if not db_row:
msg = "Property not found - id: {}".format(property_id)
self.logger.error(msg)
raise ResourceNotFoundError(msg)
return RealPropertyOut.from_db(db_row)
async def find(self, geometry_distance: GeometryAndDistanceIn) -> List[str]:
"""Searches for properties within a given distance of a geometry
Args:
geometry_distance (GeometryAndDistanceIn): geojson based geometry and distance in object
Raises:
ResourceNotFoundError: if no properties found
Returns:
List[str]: list of property ids
"""
geoalchemy_element_buffered = spatial_utils.buffer(
geometry_distance.location_geo, geometry_distance.distance)
select_query = select([self._real_property_table.c.id]).where(
self._real_property_table.c.geocode_geo.ST_Intersects(
geoalchemy_element_buffered))
db_rows = await self._connection.fetch_all(select_query)
if not db_rows:
msg = "No Properties found!"
self.logger.error(msg)
raise ResourceNotFoundError(msg)
out_list = [db_row["id"] for db_row in db_rows]
return out_list
# helpers for parallel running of queries
async def _query_parcels(self, select_query_parcels):
parcel_area = await self._connection.fetch_val(select_query_parcels)
return parcel_area
async def _query_buildings(self, select_query_buildings):
db_rows = await self._connection.fetch_all(select_query_buildings)
return db_rows
async def statistics(self, property_id: str, distance: int) -> StatisticsOut:
"""Gets statistics for data near a property
TODO: replace the property geocode with a redis geocode cache
and maintain db sync with postgres with a redis queue. Also, refactor
to reduce 'too many locals'
Args:
property_id (str): property id
distance (int): search radius in meters
Raises:
ResourceNotFoundError: if no property found for the given property id
ResourceMissingDataError: if given property does not have geometry info to locate itself
Returns:
StatisticsOut: A summary statistics outgoing object
"""
# get property geocode
select_query = select([
self._real_property_table.c.geocode_geo
]).where(self._real_property_table.c.id == property_id)
db_row = await self._connection.fetch_one(select_query)
if db_row is None:
msg = "Property not found - id: {}".format(property_id)
self.logger.error(msg)
raise ResourceNotFoundError(msg)
if db_row["geocode_geo"] is None:
msg = "Property missing geocode_geo data - id: {}".format(
property_id)
self.logger.error(msg)
raise ResourceMissingDataError(msg)
# get zone - buffer around property
geojson_obj = spatial_utils.to_geo_json(db_row["geocode_geo"])
geoalchemy_element_buffered = spatial_utils.buffer(
geojson_obj, distance)
area_distance = spatial_utils.area_distance(geoalchemy_element_buffered,
None)
zone_area = area_distance['area']
# get parcel area
select_query_parcels = select(
[func.sum(self._real_property_table.c.parcel_geo.ST_Area())]).where(
self._real_property_table.c.parcel_geo.ST_Intersects(
geoalchemy_element_buffered))
# get buildings
select_query_buildings = select(
[self._real_property_table.c.building_geo]).where(
self._real_property_table.c.building_geo.ST_Intersects(
geoalchemy_element_buffered))
# run queries in parallel
parcel_area, db_rows = await asyncio.gather(
self._query_parcels(select_query_parcels),
self._query_buildings(select_query_buildings),
)
# get parcel area result
if not parcel_area:
parcel_area = 0
parcel_area = round(parcel_area)
# get distance and area for buildings
if db_rows:
area_distance_list = [
spatial_utils.area_distance(db_row["building_geo"], geojson_obj)
for db_row in db_rows
]
building_area = sum(
[area_distance['area'] for area_distance in area_distance_list])
else:
area_distance_list = []
building_area = 0
buildings_area_distance = area_distance_list
# get final zone density
zone_density_percentage = 100 * building_area / zone_area
if zone_density_percentage > 100.00:
zone_density_percentage = 100.00
zone_density = round(zone_density_percentage, 2)
statistics_out = StatisticsOut(
parcel_area=parcel_area,
buildings_area_distance=buildings_area_distance,
zone_area=zone_area,
zone_density=zone_density)
return statistics_out
@decorators.logtime_async(1)
async def get_image(self, property_id) -> str:
"""Gets an image based on url from the database
Args:
property_id (str): property id
Raises:
ResourceNotFoundError: if property id not found
ResourceMissingDataError: if property does not have a url for image
Returns:
str: image file name/path
"""
# get property image url
select_query = select([
self._real_property_table.c.image_url
]).where(self._real_property_table.c.id == property_id)
db_row = await self._connection.fetch_one(select_query)
if db_row is None:
msg = "Property not found - id: {}".format(property_id)
self.logger.error(msg)
raise ResourceNotFoundError(msg)
if db_row["image_url"] is None:
msg = "Property missing image url - id: {}".format(property_id)
self.logger.error(msg)
raise ResourceMissingDataError(msg)
# get image
# with temporary placeholder for progress reporting, add logging etc.
# timeouts on url not found, badly formed urls, etc. not handled
total_size = 0
start = time()
print_size = 0.0
file_name = os.path.join('geoapi/static/tmp',
os.path.basename(db_row["image_url"]))
timeout = aiohttp.ClientTimeout(
total=5 * 60, connect=30) # could put in config eventually
try:
async with aiohttp.ClientSession(timeout=timeout) as session:
async with session.get(db_row["image_url"]) as r:
async with aiofiles.open(file_name, 'wb') as fd:
self.logger.info('file download started: %s', db_row["image_url"])
while True:
chunk = await r.content.read(16144)
if not chunk:
break
await fd.write(chunk)
total_size += len(chunk)
print_size += len(chunk)
if (print_size / (1024 * 1024)
) > 100: # print every 100MB download
msg = f'{time() - start:0.2f}s, downloaded: {total_size / (1024 * 1024):0.0f}MB'
self.logger.info(msg)
print_size = (print_size / (1024 * 1024)) - 100
self.logger.info('file downloaded: %s', file_name)
log_msg = f'total time: {time() - start:0.2f}s, total size: {total_size / (1024 * 1024):0.0f}MB'
self.logger.info(log_msg)
# convert to jpeg
file_name_jpg = os.path.splitext(file_name)[0] + ".jpg"
img = Image.open(file_name)
img.save(file_name_jpg, "JPEG", quality=100)
except aiohttp.client_exceptions.ServerTimeoutError as ste:
self.logger.error('Time out: %s', str(ste))
raise
return file_name_jpg
| 10,520 | 2,923 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
API for proxy
"""
from core import exceptions
from core.web import WebHandler
from service.proxy.serializers import ProxySerializer
from service.proxy.proxy import proxy_srv
from utils import log as logger
from utils.routes import route
def return_developing():
raise exceptions.NotFound(msg=exceptions.ERR_MSG_IS_DEVELOPING)
@route(r'/api/proxy/$')
class GetProxyHandler(WebHandler):
"""
proxy api
"""
async def get(self, *args, **kwargs):
"""
get proxies
"""
count = int(self.get_param('count', 1))
scheme = self.get_param('scheme')
if scheme:
scheme = scheme.lower()
anonymity = self.get_param('anonymity')
spec = dict(count=count, scheme=scheme, anonymity=anonymity)
_items = await proxy_srv.query(spec)
items = []
for i in _items:
s = ProxySerializer(i)
items.append(s.to_representation())
data = {
"count": len(items),
"detail": items,
}
# sort_by_speed = self.get_param('sort_by_speed', 0)
self.do_success(data)
async def post(self, *args, **kwargs):
""" create proxies
"""
datas = self.get_body()
logger.debug('datas:', datas, caller=self)
self.do_success({'ok': 1}, 'todo')
async def delete(self, *args, **kwargs):
""" delete proxies
"""
self.do_success({'ok': 1}, 'todo')
@route(r'/api/proxy/report/$')
class ReposrProxyHandler(WebHandler):
async def post(self, *args, **kwargs):
self.do_success({'ok': 1}, 'developing..')
| 1,685 | 550 |
#!/usr/bin/python3
from projects.crawler_for_prodect_category.category_output import output_utils
import codecs
Logger = output_utils.Logger
def output(filename, datas):
"""
将爬取的数据导出到html
:return:
"""
Logger.info('Output to html file, please wait ...')
# object_serialize('object.pkl',self.datas)
# categories , description,url
with codecs.open(output_utils.get_filename(filename, 'html'), 'w', 'utf-8') as file:
file.write('<html>\n')
file.write('<head>\n')
file.write('<meta charset="utf-8"/>\n')
file.write('<style>\n')
file.write('table{font-family:"Trebuchet MS", Arial, Helvetica, sans-serif;'
'width:100%;border-collapse:collapse;}\n')
file.write('table th,table td{font-size:1em;border:1px solid #98bf21;padding:3px 7px 2px 7px;}\n')
file.write('table th{font-size:1.1em;background-color:#A7C942;color:#ffffff;'
'padding:5px 7px 4px 7px;text-align:left;}\n')
file.write('table tr.alt td{background-color:#EAF2D3;color:#000000;}\n')
file.write('a:link{text-decoration: none;}\n')
file.write('a:visited{text-decoration: none;}\n')
file.write('a:hover{text-decoration: underline;}\n')
file.write('</style>\n')
file.write('</head>\n')
file.write('<body>\n')
file.write('<table>\n')
# 输出首行
file.write('<tr><th>Sequence</th><th>Product Categories</th>'
'<th>Product SubCategories</th><th>Description</th></tr>\n')
for i in range(len(datas)):
key = datas[i]
clazz = '' if i % 2 == 0 else ' class="alt" '
file.write('<tr %s><td>%05d</td><td>%s</td><td>%s</td>'
'<td><a target="_blank" href="%s">%s</a></td></tr>\n'
% (clazz, i + 1, key['categories'], key['subcategories'], key['url'], key['description']))
file.write('</table>\n')
file.write('</body>\n')
file.write('</html>\n')
Logger.info(' Save completed !')
| 2,058 | 731 |
#!/usr/bin/env python
# Copyright 2016-2019 Biomedical Imaging Group Rotterdam, Departments of
# Medical Informatics and Radiology, Erasmus MC, Rotterdam, The Netherlands
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from sklearn.base import BaseEstimator
from sklearn.feature_selection.base import SelectorMixin
import numpy as np
class SelectIndividuals(BaseEstimator, SelectorMixin):
'''
Object to fit feature selection based on the type group the feature belongs
to. The label for the feature is used for this procedure.
'''
def __init__(self, parameters=['hf_mean', 'sf_compactness']):
'''
Parameters
----------
parameters: dict, mandatory
Contains the settings for the groups to be selected. Should
contain the settings for the following groups:
- histogram_features
- shape_features
- orientation_features
- semantic_features
- patient_features
- coliage_features
- phase_features
- vessel_features
- log_features
- texture_features
'''
self.parameters = parameters
def fit(self, feature_labels):
'''
Select only features specificed by parameters per patient.
Parameters
----------
feature_labels: list, optional
Contains the labels of all features used. The index in this
list will be used in the transform funtion to select features.
'''
# Remove NAN
selectrows = list()
for num, l in enumerate(feature_labels):
if any(x in l for x in self.parameters):
selectrows.append(num)
self.selectrows = selectrows
def transform(self, inputarray):
'''
Transform the inputarray to select only the features based on the
result from the fit function.
Parameters
----------
inputarray: numpy array, mandatory
Array containing the items to use selection on. The type of
item in this list does not matter, e.g. floats, strings etc.
'''
return np.asarray([np.asarray(x)[self.selectrows].tolist() for x in inputarray])
def _get_support_mask(self):
# NOTE: Method is required for the Selector class, but can be empty
pass
| 2,947 | 743 |
"""
Background vs Foreground Image segmentation. The goal is to produce a segmentation map that imitates
videocalls tools like the ones implemented in Google Meet, Zoom without using Deep Learning- or Machine Learning-
based techniques.
This script does the following:
- builds a background model using the first 3s of the video, acting on the HSV colorspace;
- performs frame differencing in the HSV domain;
- runs LP filtering (median-filter) on the Saturation difference;
- uses Otsu's technique to threshold the saturation and the brightness difference;
- concatenates the saturation and the brightness masks to produce the foreground mask;
- runs morphological operators one the mask (closing and dilation) with a 3x5 ellipse (resembles the shape of a human face);
- uses the foreground mask, the current video stream and a pre-defined background picture to produce the final output.
Authors: M. Farina, F. Diprima - University of Trento
Last Update (dd/mm/yyyy): 09/04/2021
"""
import os
import cv2
import time
import numpy as np
from helpers.variables import *
from helpers.utils import build_argparser, codec_from_ext, make_folder, recursive_clean
def run(**kwargs):
"""
Main loop for background removal.
"""
time_lst = [0]
# setup an image for the background
bg_pic_path = kwargs['background']
bg_pic = cv2.imread(bg_pic_path)
bg_pic = cv2.resize(bg_pic, dst_size)
# setup the video writer if needed
writer = None
if kwargs["output_video"]:
codec = codec_from_ext(kwargs["output_video"])
writer = cv2.VideoWriter(kwargs["output_video"], codec, fps, frameSize=(width, height))
# create the output frame folder if needed
if kwargs["frame_folder"]:
if kwargs["refresh"]: recursive_clean(kwargs["frame_folder"])
make_folder(kwargs["frame_folder"])
# initialize background
hsv_bg = np.zeros(dst_shape_multi, dtype='uint16')
# start looping through frames
frame_count = 0
if cap.isOpened():
while cap.isOpened():
# retrieve the current frame and exit if needed
ret, frame = cap.read()
if not ret:
break
# otherwise, perform basic operations on the current frame
frame = cv2.resize(frame, dst_size)
hsv_frame = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)
hsv_frame_blurred = cv2.GaussianBlur(hsv_frame, gauss_kernel, sigmaX=2, sigmaY=2)
# build a model for the background during the first frames
if frame_count < bg_frame_limit:
hsv_bg = hsv_bg.copy() + hsv_frame_blurred
if frame_count == bg_frame_limit-1:
hsv_bg = np.uint8(hsv_bg.copy() / bg_frame_limit)
# when the bg has been modeled, segment the fg
else:
time_in = time.perf_counter()
diff = cv2.absdiff(hsv_frame_blurred, hsv_bg)
h_diff, s_diff, v_diff = cv2.split(diff)
# automatic global thresholding with Otsu's technique
r1, h_diff_thresh = cv2.threshold(h_diff, 1, 255, cv2.THRESH_BINARY | cv2.THRESH_OTSU)
r2, s_diff_thresh = cv2.threshold(s_diff, 1, 255, cv2.THRESH_BINARY | cv2.THRESH_OTSU)
r3, v_diff_thresh = cv2.threshold(v_diff, 1, 255, cv2.THRESH_BINARY | cv2.THRESH_OTSU)
# take into account contribution of saturation and value (aka 'brightness')
# clean the saturation mask beforehand, it usually is more unstable
s_diff_thresh_median = cv2.medianBlur(s_diff_thresh, ksize=median_ksize)
fg_mask = s_diff_thresh_median + v_diff_thresh
fg_mask_closed = cv2.morphologyEx(fg_mask, cv2.MORPH_CLOSE, kernel=kernel, iterations=10)
fg_mask_dilated = cv2.dilate(fg_mask_closed, kernel=kernel)
# compute the actual foreground and background
foreground = cv2.bitwise_and(frame, frame, mask=fg_mask_dilated)
background = bg_pic - cv2.bitwise_and(bg_pic, bg_pic, mask=fg_mask_dilated)
# ... and add them to generate the output image
out = cv2.add(foreground, background)
# display the output and the masks
cv2.imshow("Output", out)
# save frames on the fs if the user requested it
if kwargs["frame_folder"] and frame_count % kwargs["throttle"] == 0:
cv2.imwrite(os.path.join(kwargs["frame_folder"], "{}.jpg".format(frame_count - bg_frame_limit + 1)), out)
# write the video on the fs if the user requested it
if writer:
writer.write(cv2.resize(out, dsize=(width, height)))
# quit if needed
if cv2.waitKey(ms) & 0xFF==ord('q'):
break
# keep track of time
time_out = time.perf_counter()
time_diff = time_out - time_in
time_lst.append(time_diff)
frame_count += 1
print("Average Time x Frame: ", round(np.sum(np.array(time_lst))/len(time_lst), 2))
cv2.destroyAllWindows()
cap.release()
if writer:
writer.release()
if __name__ == "__main__":
parser = build_argparser()
kwargs = vars(parser.parse_args())
run(**kwargs) | 5,567 | 1,684 |
from entity import Entity
class RelativeEntity(Entity):
def __init__(self, width, height):
Entity.__init__(self, width, height)
self.margin = [0, 0, 0, 0]
def below(self, entity):
self.y = entity.y + entity.height + self.margin[1]
def above(self, entity):
self.y = entity.y - self.height - self.margin[3]
def leftOf(self, entity):
self.x = entity.x - self.width - self.margin[2]
def rightOf(self, entity):
self.x = entity.x + entity.width + self.margin[0]
def margin(self, margin):
self.margin = margin;
def marginLeft(self, margin):
self.margin[0] = margin
def marginRight(self, margin):
self.margin[2] = margin
def marginTop(self, margin):
self.margin[1] = margin
def marginBottom(self, margin):
self.margin[3] = margin
def alignLeft(self):
self.x = 0 + self.margin[0]
def alignRight(self, width):
self.x = width - self.width - self.margin[2]
def alignTop(self):
self.y = 0 + self.margin[1]
def alignBottom(self, height):
self.y = height - self.height - self.margin[3]
def centerRelativeX(self, entity):
self.x = entity.x + (entity.width / 2) - (self.width / 2)
def centerRelativeY(self, entity):
self.y = entity.y + (entity.height / 2) - (self.height / 2)
| 1,279 | 469 |
#!/opt/conda/envs/rapids/bin/python3
#
# Copyright (c) 2020, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
from functools import singledispatch
from typing import List
import cudf
import cupy
import dask
import dask_cudf
import pandas
from cuchemcommon.context import Context
from cuchemcommon.data import ClusterWfDAO
from cuchemcommon.data.cluster_wf import ChemblClusterWfDao
from cuchemcommon.fingerprint import MorganFingerprint
from cuchemcommon.utils.logger import MetricsLogger
from cuchemcommon.utils.singleton import Singleton
from cuml import SparseRandomProjection, KMeans
from cuchem.utils.metrics import batched_silhouette_scores
from cuchem.wf.cluster import BaseClusterWorkflow
logger = logging.getLogger(__name__)
@singledispatch
def _gpu_random_proj_wrapper(embedding, self):
return NotImplemented
@_gpu_random_proj_wrapper.register(dask.dataframe.core.DataFrame)
def _(embedding, self):
logger.info('Converting from dask.dataframe.core.DataFrame...')
embedding = embedding.compute()
return _gpu_random_proj_wrapper(embedding, self)
@_gpu_random_proj_wrapper.register(dask_cudf.core.DataFrame)
def _(embedding, self):
logger.info('Converting from dask_cudf.core.DataFrame...')
embedding = embedding.compute()
return _gpu_random_proj_wrapper(embedding, self)
@_gpu_random_proj_wrapper.register(pandas.DataFrame)
def _(embedding, self):
logger.info('Converting from pandas.DataFrame...')
embedding = cudf.from_pandas(embedding)
return _gpu_random_proj_wrapper(embedding, self)
@_gpu_random_proj_wrapper.register(cudf.DataFrame)
def _(embedding, self):
return self._cluster(embedding)
class GpuWorkflowRandomProjection(BaseClusterWorkflow, metaclass=Singleton):
def __init__(self,
n_molecules: int = None,
dao: ClusterWfDAO = ChemblClusterWfDao(MorganFingerprint),
n_clusters=7,
seed=0):
super(GpuWorkflowRandomProjection, self).__init__()
self.dao = dao
self.n_molecules = n_molecules
self.n_clusters = n_clusters
self.pca = None
self.seed = seed
self.n_silhouette = 500000
self.context = Context()
self.srp_embedding = SparseRandomProjection(n_components=2)
def rand_jitter(self, arr):
"""
Introduces random displacements to spread the points
"""
stdev = .023 * cupy.subtract(cupy.max(arr), cupy.min(arr))
for i in range(arr.shape[1]):
rnd = cupy.multiply(cupy.random.randn(len(arr)), stdev)
arr[:, i] = cupy.add(arr[:, i], rnd)
return arr
def _cluster(self, embedding):
logger.info('Computing cluster...')
embedding = embedding.reset_index()
n_molecules = embedding.shape[0]
# Before reclustering remove all columns that may interfere
embedding, prop_series = self._remove_non_numerics(embedding)
with MetricsLogger('random_proj', n_molecules) as ml:
srp = self.srp_embedding.fit_transform(embedding.values)
ml.metric_name = 'spearman_rho'
ml.metric_func = self._compute_spearman_rho
ml.metric_func_args = (embedding, embedding, srp)
with MetricsLogger('kmeans', n_molecules) as ml:
kmeans_cuml = KMeans(n_clusters=self.n_clusters)
kmeans_cuml.fit(srp)
kmeans_labels = kmeans_cuml.predict(srp)
ml.metric_name = 'silhouette_score'
ml.metric_func = batched_silhouette_scores
ml.metric_func_kwargs = {}
ml.metric_func_args = (None, None)
if self.context.is_benchmark:
(srp_sample, kmeans_labels_sample), _ = self._random_sample_from_arrays(
srp, kmeans_labels, n_samples=self.n_silhouette)
ml.metric_func_args = (srp_sample, kmeans_labels_sample)
# Add back the column required for plotting and to correlating data
# between re-clustering
srp = self.rand_jitter(srp)
embedding['cluster'] = kmeans_labels
embedding['x'] = srp[:, 0]
embedding['y'] = srp[:, 1]
# Add back the prop columns
for col in prop_series.keys():
embedding[col] = prop_series[col]
return embedding
def cluster(self, df_mol_embedding=None):
logger.info("Executing GPU workflow...")
if df_mol_embedding is None:
self.n_molecules = self.context.n_molecule
df_mol_embedding = self.dao.fetch_molecular_embedding(
self.n_molecules,
cache_directory=self.context.cache_directory)
df_mol_embedding = df_mol_embedding.persist()
self.df_embedding = _gpu_random_proj_wrapper(df_mol_embedding, self)
return self.df_embedding
def recluster(self,
filter_column=None,
filter_values=None,
n_clusters=None):
if filter_values is not None:
self.df_embedding['filter_col'] = self.df_embedding[filter_column].isin(filter_values)
self.df_embedding = self.df_embedding.query('filter_col == True')
if n_clusters is not None:
self.n_clusters = n_clusters
self.df_embedding = _gpu_random_proj_wrapper(self.df_embedding, self)
return self.df_embedding
def add_molecules(self, chemblids: List):
chem_mol_map = {row[0]: row[1] for row in self.dao.fetch_id_from_chembl(chemblids)}
molregnos = list(chem_mol_map.keys())
self.df_embedding['id_exists'] = self.df_embedding['id'].isin(molregnos)
ldf = self.df_embedding.query('id_exists == True')
if hasattr(ldf, 'compute'):
ldf = ldf.compute()
self.df_embedding = self.df_embedding.drop(['id_exists'], axis=1)
missing_mol = set(molregnos).difference(ldf['id'].to_array())
chem_mol_map = {id: chem_mol_map[id] for id in missing_mol}
missing_molregno = chem_mol_map.keys()
if len(missing_molregno) > 0:
new_fingerprints = self.dao.fetch_molecular_embedding_by_id(missing_molregno)
new_fingerprints = new_fingerprints.compute()
self.df_embedding = self._remove_ui_columns(self.df_embedding)
self.df_embedding = self.df_embedding.append(new_fingerprints)
return chem_mol_map, molregnos, self.df_embedding
| 6,967 | 2,320 |
#! /usr/bin/env python
'''
This script calculates fractions of SNPs with iHS values above 2.0 over
genomic windows of specified size.
#Example input:
#CHROM POS iHS
chr1 14548 -3.32086
chr1 14670 -2.52
chr1 19796 0.977669
chr1 19798 3.604374
chr1 29412 -0.308192
chr1 29813 2.231736
chr1 29847 0.6594
chr1 29873 -2.03918
chr1 30050 -0.113216
chr1 30097 2.0193944
chr1 30135 -0.161264
chr1 30259 0.13628
chr1 30365 -0.357767
chr1 30370 0.953858
chr1 30664 2.0124902
chr1 30723 -0.255984
chr1 30856 3.355832
chr1 30903 -3.196446
chr1 31052 2.590459
chr1 31409 -0.497963
chr1 31414 0.611446
chr1 31424 -0.700634
chr1 31758 2.262846
chr1 31841 -0.50899
chr1 31849 5.392066
chr1 31860 -0.383864
chr1 31864 6.39043
chr1 32008 0.00886538
chr1 32158 -3.451976
chr1 32360 0.194424
chr1 32439 -0.995733
#Example output:
#CHROM POS nSNPs iHS
chr1 14609.0 2 1.0
chr1 19797.0 2 0.0
chr1 29642.5 4 0.5
chr1 30476.5 10 0.4
chr1 31458.0 9 0.444444444444
chr1 32223.5 4 0.25
#command:
$ python calculate_iHSproportion.py \
-i iHS.txt \
-o iHS.window.txt \
-w 1000 \
-t 2
#contact:
Dmytro Kryvokhyzha dmytro.kryvokhyzha@evobio.eu
'''
############################# modules #############################
import calls # my custom module
############################# options #############################
parser = calls.CommandLineParser()
parser.add_argument(
'-i',
'--input',
help='name of the input file',
type=str,
required=True)
parser.add_argument(
'-o', '--output',
help='name of the output file',
type=str,
required=True)
parser.add_argument(
'-w',
'--window',
help='sliding window size',
type=int,
required=True)
parser.add_argument(
'-t',
'--threshold',
help='iHS threshold to calculate propotion for',
type=int,
required=True)
args = parser.parse_args()
############################# functions #############################
def proportionWindow(values, threshold):
''' calculates proportion of a values larger than threshold'''
largerThan = []
for i in values:
if abs(i) >= threshold:
largerThan.append(i)
windowSize = len(values)
proportion = len(largerThan) / float(windowSize)
return [windowSize, proportion]
############################# program #############################
print('Opening the file...')
windSize = args.window
windPosEnd = windSize
counter = 0
with open(args.input) as datafile:
header_line = datafile.readline()
# make output header
header_words = header_line.split()
chrPos = header_words[0:2]
chrPosP = '\t'.join(str(s) for s in chrPos)
outputFile = open(args.output, 'w')
outputFile.write("%s\tnSNPs\t%s\n" % (chrPosP, header_words[2]))
print('Processing the data ...')
Vwindow = []
ChrPrevious = ''
posS = ''
posE = ''
for line in datafile:
words = line.split()
Chr = words[0]
pos = int(words[1])
indVal = float(words[2])
# to store the values of a previous line
if not ChrPrevious:
ChrPrevious = Chr
if not posS:
posS = pos
if not posE:
posE = pos
# if window size is reached output the results
if Chr != ChrPrevious: # if end of a chromosome
meanValWindow = proportionWindow(Vwindow, args.threshold)
meanValWindowP = '\t'.join(str(s) for s in meanValWindow)
calls.processWindow(ChrPrevious, posS, posE,
meanValWindowP, outputFile)
windPosEnd = windSize
Vwindow = []
posS = pos
elif pos > windPosEnd: # if end of a window
if Vwindow:
meanValWindow = proportionWindow(Vwindow, args.threshold)
meanValWindowP = '\t'.join(str(s) for s in meanValWindow)
calls.processWindow(Chr, posS, posE,
meanValWindowP, outputFile)
windPosEnd = windPosEnd + windSize
Vwindow = []
posS = pos
while pos > windPosEnd: # gap is larger than window size
windPosEnd = windPosEnd + windSize
ChrPrevious = Chr
posE = pos
# append values
Vwindow.append(indVal)
# track progress
counter += 1
if counter % 1000000 == 0:
print str(counter), "lines processed"
# process the last window
meanValWindow = proportionWindow(Vwindow, args.threshold)
meanValWindowP = '\t'.join(str(s) for s in meanValWindow)
calls.processWindow(Chr, posS, pos, meanValWindowP, outputFile)
datafile.close()
outputFile.close()
print('Done!')
| 4,688 | 1,890 |
# Import the needed management objects from the libraries. The azure.common library
# is installed automatically with the other libraries.
from azure.common.client_factory import get_client_from_cli_profile
from azure.mgmt.resource import ResourceManagementClient
from utils.dbconn import *
from utils.logger import *
from model.project import Project
import string, random
from azure.common.credentials import ServicePrincipalCredentials
# Provision the resource group.
async def create_rg(project):
con = create_db_con()
try:
if Project.objects(name=project)[0]['resource_group']:
if Project.objects(name=project)[0]['resource_group_created']:
return True
except Exception as e:
print("Reaching Project document failed: "+repr(e))
logger("Reaching Project document failed: "+repr(e),"warning")
else:
rg_location = Project.objects(name=project)[0]['location']
rg_name = Project.objects(name=project)[0]['resource_group']
try:
client_id = Project.objects(name=project)[0]['client_id']
secret = Project.objects(name=project)[0]['secret']
tenant_id = Project.objects(name=project)[0]['tenant_id']
subscription_id = Project.objects(name=project)[0]['subscription_id']
creds = ServicePrincipalCredentials(client_id=client_id, secret=secret, tenant=tenant_id)
resource_client = ResourceManagementClient(creds,subscription_id)
print("Provisioning a resource group...some operations might take a minute or two.")
rg_result = resource_client.resource_groups.create_or_update(
rg_name, {"location": rg_location})
print(
"Provisioned resource group"+ rg_result.name+" in the "+rg_result.location+" region")
Project.objects(name=project).update(resource_group=rg_result.name, resource_group_created=True)
con.close()
return True
except Exception as e:
print("Resource group creation failed "+str(e))
logger("Resource group creation failed: "+repr(e),"warning")
return False
| 2,176 | 555 |
import numpy
from scipy import stats
from modules import controler
# To compile, us Auto Py to Exe:
# Step 1 - install Auto Py to Exe, if not already done
# To install the application run this line in cmd:
# pip install auto-py-to-exe
# To open the application run this line in cmd:
# auto-py-to-exe
# Step 2 - read the rest of the steps here:
# https://dev.to/eshleron/how-to-convert-py-to-exe-step-by-step-guide-3cfi
switch = 2
# Mean, Median, Mode
if switch == 1 :
speed = [99,86,87,88,111,86,103,87,94,78,77,85,86]
x = numpy.median(speed)
print(x)
x = stats.mode(speed)
print(x)
# Standard Deviation - distance from Mean
elif switch == 2 :
speed = [86,87,88,86,87,85,86]
print("speed = [86,87,88,86,87,85,86]")
print("Mean = ", numpy.mean(speed))
print("Standard Deviation = ", numpy.std(speed))
print("")
speed = [32,111,138,28,59,77,97]
print("speed = [32,111,138,28,59,77,97]")
print("Mean = ", numpy.mean(speed))
print("Standard Deviation = ", numpy.std(speed))
controler.app() | 1,049 | 444 |
# 10000 iterations, just for relative comparison
# 2.7.5 3.3.2
# FilesCompleter 75.1109 69.2116
# FastFilesCompleter 0.7383 1.0760
if __name__ == '__main__':
import sys
import timeit
from argcomplete.completers import FilesCompleter
from _pytest._argcomplete import FastFilesCompleter
count = 1000 # only a few seconds
setup = 'from __main__ import FastFilesCompleter\nfc = FastFilesCompleter()'
run = 'fc("/d")'
sys.stdout.write('%s\n' % (timeit.timeit(run,
setup=setup.replace('Fast', ''), number=count)))
sys.stdout.write('%s\n' % (timeit.timeit(run, setup=setup, number=count)))
| 694 | 242 |
from dataclasses import dataclass
from bindings.gmd.time_edge_property_type import TimePrimitivePropertyType
__NAMESPACE__ = "http://www.opengis.net/gml"
@dataclass
class ValidTime(TimePrimitivePropertyType):
"""
gml:validTime is a convenience property element.
"""
class Meta:
name = "validTime"
namespace = "http://www.opengis.net/gml"
| 374 | 119 |
import os
from compas_assembly.datastructures import Assembly
from compas_assembly.geometry import Arch
from compas_assembly.rhino import AssemblyArtist
from compas.rpc import Proxy
proxy = Proxy()
proxy.restart_server()
try:
HERE = os.path.dirname(__file__)
except NameError:
HERE = os.getcwd()
DATA = os.path.join(HERE, '../../../data')
FILE = os.path.join(DATA, 'arch.json')
# ==============================================================================
# Assembly
# ==============================================================================
rise = 5
span = 10
depth = 0.5
thickness = 0.7
n = 40
arch = Arch(rise, span, thickness, depth, n)
assembly = Assembly.from_geometry(arch)
assembly.node_attribute(0, 'is_support', True)
assembly.node_attribute(n - 1, 'is_support', True)
# ==============================================================================
# Identify the interfaces
# ==============================================================================
proxy.package = 'compas_assembly.datastructures'
# make proxy methods into configurable objects
# with __call__ for execution
# store the method objects in a dict of callables
assembly = proxy.assembly_interfaces_numpy(assembly, tmax=0.02)
# ==============================================================================
# Compute interface forces
# ==============================================================================
proxy.package = 'compas_rbe.equilibrium'
assembly = proxy.compute_interface_forces_cvx(assembly, solver='CPLEX')
# ==============================================================================
# Visualize
# ==============================================================================
artist = AssemblyArtist(assembly, layer="Arch")
artist.clear_layer()
artist.draw_nodes(color={key: (255, 0, 0) for key in assembly.nodes_where({'is_support': True})})
artist.draw_edges()
artist.draw_blocks()
artist.draw_interfaces()
artist.draw_resultants(scale=0.1)
# artist.color_interfaces(mode=1)
| 2,020 | 559 |
# -*- coding: utf-8 -*-
# Copyright (C) 2017-2019 by
# David Amos <somacdivad@gmail.com>
# Randy Davila <davilar@uhd.edu>
# BSD license.
#
# Authors: David Amos <somacdivad@gmail.com>
# Randy Davila <davilar@uhd.edu>
"""Assorted degree related graph utilities.
"""
import collections
from grinpy import degree, nodes, number_of_nodes
from grinpy.functions.neighborhoods import closed_neighborhood, neighborhood, set_neighborhood, set_closed_neighborhood
__all__ = [
"degree_sequence",
"min_degree",
"max_degree",
"average_degree",
"number_of_nodes_of_degree_k",
"number_of_degree_one_nodes",
"number_of_min_degree_nodes",
"number_of_max_degree_nodes",
"neighborhood_degree_list",
"closed_neighborhood_degree_list",
"is_regular",
"is_k_regular",
"is_sub_cubic",
"is_cubic",
]
def degree_sequence(G):
"""Return the degree sequence of G.
The degree sequence of a graph is the sequence of degrees of the nodes
in the graph.
Parameters
----------
G : NetworkX graph
An undirected graph.
Returns
-------
list
The degree sequence of the graph.
Examples
--------
>>> G = nx.path_graph(3) # Path on 3 nodes
>>> nx.degree_sequence(G)
[1, 2, 1]
"""
return [degree(G, v) for v in nodes(G)]
def min_degree(G):
"""Return the minimum degree of G.
The minimum degree of a graph is the smallest degree of any node in the
graph.
Parameters
----------
G : NetworkX graph
An undirected graph.
Returns
-------
int
The minimum degree of the graph.
Examples
--------
>>> G = nx.path_graph(3) # Path on 3 nodes
>>> nx.min_degree(G)
1
"""
D = degree_sequence(G)
D.sort()
return D[0]
def max_degree(G):
"""Return the maximum degree of G.
The maximum degree of a graph is the largest degree of any node in the
graph.
Parameters
----------
G : NetworkX graph
An undirected graph.
Returns
-------
int
The maximum degree of the graph.
Examples
--------
>>> G = nx.path_graph(3) # Path on 3 nodes
>>> nx.min_degree(G)
2
"""
D = degree_sequence(G)
D.sort(reverse=True)
return D[0]
def average_degree(G):
"""Return the average degree of G.
The average degree of a graph is the average of the degrees of all nodes
in the graph.
Parameters
----------
G : NetworkX graph
An undirected graph.
Returns
-------
float
The average degree of the graph.
Examples
--------
>>> G = nx.star_graph(3) # Star on 4 nodes
>>> nx.average_degree(G)
1.5
"""
return sum(degree_sequence(G)) / number_of_nodes(G)
def number_of_nodes_of_degree_k(G, k):
"""Return the number of nodes of the graph with degree equal to k.
Parameters
----------
G : NetworkX graph
An undirected graph.
k : int
A positive integer.
Returns
-------
int
The number of nodes in the graph with degree equal to k.
See Also
--------
number_of_leaves, number_of_min_degree_nodes, number_of_max_degree_nodes
Examples
--------
>>> G = nx.path_graph(3) # Path on 3 nodes
>>> nx.number_of_nodes_of_degree_k(G, 1)
2
"""
return sum(1 for v in nodes(G) if degree(G, v) == k)
def number_of_degree_one_nodes(G):
"""Return the number of nodes of the graph with degree equal to 1.
A vertex with degree equal to 1 is also called a *leaf*.
Parameters
----------
G : NetworkX graph
An undirected graph.
Returns
-------
int
The number of nodes in the graph with degree equal to 1.
See Also
--------
number_of_nodes_of_degree_k, number_of_min_degree_nodes,
number_of_max_degree_nodes
Examples
--------
>>> G = nx.path_graph(3) # Path on 3 nodes
>>> nx.number_of_leaves(G)
2
"""
return number_of_nodes_of_degree_k(G, 1)
def number_of_min_degree_nodes(G):
"""Return the number of nodes of the graph with degree equal to the minimum
degree of the graph.
Parameters
----------
G : NetworkX graph
An undirected graph.
Returns
-------
int
The number of nodes in the graph with degree equal to the minimum
degree.
See Also
--------
number_of_nodes_of_degree_k, number_of_leaves, number_of_max_degree_nodes,
min_degree
Examples
--------
>>> G = nx.path_graph(3) # Path on 3 nodes
>>> nx.number_of_min_degree_nodes(G)
2
"""
return number_of_nodes_of_degree_k(G, min_degree(G))
def number_of_max_degree_nodes(G):
"""Return the number of nodes of the graph with degree equal to the maximum
degree of the graph.
Parameters
----------
G : NetworkX graph
An undirected graph.
Returns
-------
int
The number of nodes in the graph with degree equal to the maximum
degree.
See Also
--------
number_of_nodes_of_degree_k, number_of_leaves, number_of_min_degree_nodes,
max_degree
Examples
--------
>>> G = nx.path_graph(3) # Path on 3 nodes
>>> nx.number_of_max_degree_nodes(G)
1
"""
return number_of_nodes_of_degree_k(G, max_degree(G))
def neighborhood_degree_list(G, nbunch):
"""Return a list of the unique degrees of all neighbors of nodes in
`nbunch`.
Parameters
----------
G : NetworkX graph
An undirected graph.
nbunch :
A single node or iterable container of nodes.
Returns
-------
list
A list of the degrees of all nodes in the neighborhood of the nodes
in `nbunch`.
See Also
--------
closed_neighborhood_degree_list, neighborhood
Examples
--------
>>> import grinpy as gp
>>> G = gp.path_graph(3) # Path on 3 nodes
>>> gp.neighborhood_degree_list(G, 1)
[1, 2]
"""
if isinstance(nodes, collections.abc.Iterable):
return list(set(degree(G, u) for u in set_neighborhood(G, nbunch)))
else:
return list(set(degree(G, u) for u in neighborhood(G, nbunch)))
def closed_neighborhood_degree_list(G, nbunch):
"""Return a list of the unique degrees of all nodes in the closed
neighborhood of the nodes in `nbunch`.
Parameters
----------
G : NetworkX graph
An undirected graph.
nbunch :
A single node or iterable container of nodes.
Returns
-------
list
A list of the degrees of all nodes in the closed neighborhood of the
nodes in `nbunch`.
See Also
--------
closed_neighborhood, neighborhood_degree_list
Examples
--------
>>> import grinpy as gp
>>> G = gp.path_graph(3) # Path on 3 nodes
>>> gp.closed_neighborhood_degree_list(G, 1)
[1, 2, 2]
"""
if isinstance(nodes, collections.abc.Iterable):
return list(set(degree(G, u) for u in set_closed_neighborhood(G, nbunch)))
else:
return list(set(degree(G, u) for u in closed_neighborhood(G, nbunch)))
def is_regular(G):
""" Return True if G is regular, and False otherwise.
A graph is *regular* if each node has the same degree.
Parameters
----------
G : NetworkX graph
An undirected graph
Returns
-------
boolean
True if regular, false otherwise.
"""
return min_degree(G) == max_degree(G)
def is_k_regular(G, k):
""" Return True if the graph is regular of degree k and False otherwise.
A graph is *regular of degree k* if all nodes have degree equal to *k*.
Parameters
----------
G : NetworkX graph
An undirected graph
k : int
An integer
Returns
-------
boolean
True if all nodes have degree equal to *k*, False otherwise.
"""
# check that k is an integer
if not float(k).is_integer():
raise TypeError("Expected k to be an integer.")
k = int(k)
for v in nodes(G):
if not degree(G, v) == k:
return False
return True
def is_sub_cubic(G):
""" Return True if *G* sub-cubic, and False otherwise.
A graph is *sub-cubic* if its maximum degree is at most 3.
Parameters
----------
G : NetworkX graph
An undirected graph.
Returns
-------
boolean
True if *G* is sub-cubic, False otherwise.
"""
return max_degree(G) <= 3
def is_cubic(G):
""" Return True if *G* is cubic, and False otherwise.
A graph is *cubic* if it is regular of degree 3.
Parameters
----------
G : NetworkX graph
An undirected graph
Returns
-------
boolean
True if *G* is cubic, False otherwise.
"""
return is_k_regular(G, 3)
| 8,840 | 3,014 |
"""Contain the tests for the handlers of each supported GitHub webhook."""
| 76 | 19 |
# Pytests to test the Polygon domain type in the domain.json schema file
import pytest
from jsonschema.exceptions import ValidationError
pytestmark = pytest.mark.schema("/schemas/domain")
@pytest.mark.exhaustive
def test_valid_polygon_domain(validator, polygon_domain):
''' Tests an example of a Polygon domain '''
validator.validate(polygon_domain)
def test_missing_composite_axis(validator, polygon_domain):
''' Invalid: Polygon domain with missing 'composite' axis '''
del polygon_domain["axes"]["composite"]
with pytest.raises(ValidationError):
validator.validate(polygon_domain)
def test_empty_composite_axis(validator, polygon_domain):
''' Invalid: Polygon domain with empty 'composite' axis '''
polygon_domain["axes"]["composite"] = { "values" : [] }
with pytest.raises(ValidationError):
validator.validate(polygon_domain)
def test_wrong_composite_axis_type(validator, polygon_domain):
''' Invalid: Polygon domain with primitive instead of polygon axis '''
polygon_domain["axes"]["composite"] = {
"values": [1, 2, 3]
}
with pytest.raises(ValidationError):
validator.validate(polygon_domain)
def test_wrong_composite_axis_type2(validator, polygon_domain):
''' Invalid: Polygon domain with tuple instead of polygon axis (invalid polygons) '''
polygon_domain["axes"]["composite"]["values"] = [ [1, 1], [2, 2], [3, 3] ]
with pytest.raises(ValidationError):
validator.validate(polygon_domain)
def test_composite_axis_with_2_values(validator, polygon_domain):
''' Invalid: Polygon domain with composite axis with two polygons '''
polygon_domain["axes"]["composite"]["values"] = [
[ [ [100.0, 1.0], [101.0, 0.0], [101.0, 2.0], [100.0, 2.0], [100.0, 1.0] ] ],
[ [ [101.0, 1.0], [102.0, 0.0], [102.0, 2.0], [101.0, 2.0], [101.0, 1.0] ] ]
]
with pytest.raises(ValidationError):
validator.validate(polygon_domain)
def test_wrong_composite_axis_coordinates(validator, polygon_domain):
''' Invalid: Polygon domain with invalid coordinates '''
polygon_domain["axes"]["composite"]["coordinates"] = ["y", "x"]
with pytest.raises(ValidationError):
validator.validate(polygon_domain)
def test_wrong_data_type(validator, polygon_domain):
''' Invalid: Polygon domain with wrong data type '''
polygon_domain["axes"]["composite"]["dataType"] = "tuple"
with pytest.raises(ValidationError):
validator.validate(polygon_domain)
def test_extra_axis(validator, polygon_domain):
''' Invalid: Polygon domain with unrecognised extra axis '''
polygon_domain["axes"]["composite2"] = \
polygon_domain["axes"]["composite"]
with pytest.raises(ValidationError):
validator.validate(polygon_domain)
def test_empty_z_axis(validator, polygon_domain):
''' Invalid: Polygon domain with empty 'z' axis '''
polygon_domain["axes"]["z"] = { "values" : [] }
with pytest.raises(ValidationError):
validator.validate(polygon_domain)
def test_multivalued_z_axis(validator, polygon_domain):
''' Invalid: Polygon domain with multi-valued 'z' axis '''
polygon_domain["axes"]["z"] = { "values" : [1, 2] }
with pytest.raises(ValidationError):
validator.validate(polygon_domain)
def test_empty_t_axis(validator, polygon_domain):
''' Invalid: Polygon domain with empty 't' axis '''
polygon_domain["axes"]["t"] = { "values" : [] }
with pytest.raises(ValidationError):
validator.validate(polygon_domain)
def test_multivalued_t_axis(validator, polygon_domain):
''' Invalid: Polygon domain with multi-valued 't' axis '''
polygon_domain["axes"]["t"] = { "values" : ["2008-01-01T04:00:00Z", "2008-01-01T05:00:00Z"] }
with pytest.raises(ValidationError):
validator.validate(polygon_domain)
| 3,860 | 1,313 |
from django.conf import settings
from django.db import models
from django.db.models.signals import post_save
from django.dispatch import receiver
from django.contrib.auth.models import BaseUserManager, AbstractBaseUser
from django.utils import timezone
from rest_framework.authtoken.models import Token
class BerryManager(BaseUserManager):
def create_user(self, email, nickname, password=None):
if not email:
raise ValueError('Users must have an email address')
user = self.model(
email=self.normalize_email(email),
nickname=nickname,
)
user.set_password(password)
user.save(using=self._db)
return user
def create_superuser(self):
pass
@receiver(post_save, sender=settings.AUTH_USER_MODEL)
def create_auth_token(sender, instance=None, created=False, **kwargs):
if created:
Token.objects.create(user=instance)
class Berry(AbstractBaseUser):
email = models.EmailField(unique=True, max_length=255)
nickname = models.CharField(unique=True, max_length=50)
created_at = models.DateTimeField(default=timezone.now)
is_active = models.BooleanField(default=True)
is_admin = models.BooleanField(default=False)
objects = BerryManager()
USERNAME_FIELD = 'email'
REQUIRED_FIELDS = ['nickname']
def get_full_name(self):
return self.nickname
def get_short_name(self):
return self.nickname
@property
def is_staff(self):
return False
| 1,516 | 453 |
import unittest
from app.models import Post,User
from app import db
class PostModelTest(unittest.TestCase):
def setUp(self):
self.user_Alice=User(username="Alice", password="potato", email="alice@ms.com")
self.new_post=Post(id=1,category="All", title="Great Things Take Time", blog="User Tests for blog posts", user=self.user_Alice)
def tearDown(self):
Post.query.delete()
User.query.delete()
def test_check_instance_variables(self):
self.assertEquals(self.new_post.category,"All")
self.assertEquals(self.new_post,title,"Great Things Take Time")
self.assertEquals(self.new_post,blog,"User Tests for blog posts")
self.assertEquals(self.new_post,user,self.user_Alice)
def test_save_post(self):
self.new_post.save_post()
self.assertTrue(len(Post.query.all())>0)
def test_get_post_by_id(self):
self.new_post.save_post()
got_posts=Post.get_posts("All")
self.assertTrue(len(got_posts)== 1) | 1,012 | 336 |
import sys, pygame, math, random
from Wall import *
from Ghost import *
from Manpac import *
from Norb import *
from Score import *
pygame.init()
clock = pygame.time.Clock()
width = 700
height = 700
size = width, height
bgColor = r,g,b = 0, 0, 0
screen = pygame.display.set_mode(size)
while True:
ghosts = [Ghost("purple", [random.randint(250, 450),random.randint(250, 450)]),
Ghost("blue", [random.randint(250, 450),random.randint(250, 450)]),
Ghost("green", [random.randint(250, 450),random.randint(250, 450)])]
player = Manpac([7,7], (602,602))
orbs = [Norb([75,75]),
Norb([125,75]),
Norb([175,75]),
Norb([225,75]),
Norb([275,75]),
Norb([325,75]),
Norb([375,75]),
Norb([425,75]),
Norb([475,75]),
Norb([525,75]),
Norb([575,75]),
Norb([75,125]),
Norb([75,175]),
Norb([75,225]),
Norb([75,275]),
Norb([75,325]),
Norb([75,375]),
Norb([75,425]),
Norb([75,475]),
Norb([75,525]),
Norb([75,575]),
Fruit([75,625]),
Norb([125,275]),
Norb([125,325]),
Norb([125,375]),
Norb([125,425]),
Norb([175,225]),
Norb([175,275]),
Norb([175,425]),
Norb([175,475]),
Norb([225,175]),
Norb([225,225]),
Norb([225,275]),
Norb([225,425]),
Norb([225,475]),
Norb([225,525]),
Norb([225,625]),
Norb([175,625]),
Norb([125,625]),
Norb([275,225]),
Norb([275,125]),
Norb([275,175]),
Norb([275,275]),
Norb([275,325]),
Norb([275,375]),
Norb([275,425]),
Norb([275,475]),
Norb([275,525]),
Norb([275,575]),
Norb([275,625]),
Norb([325,125]),
Norb([325,275]),
Norb([325,425]),
Norb([325,575]),
Norb([325,625]),
Norb([375,125]),
Norb([375,275]),
Norb([375,425]),
Norb([375,575]),
Norb([375,625]),
Norb([425,125]),
Norb([425,175]),
Norb([425,225]),
Norb([425,275]),
Norb([425,325]),
Norb([425,375]),
Norb([425,425]),
Norb([425,475]),
Norb([425,525]),
Norb([425,575]),
Norb([425,625]),
Norb([475,175]),
Norb([475,225]),
Norb([475,275]),
Norb([475,425]),
Norb([475,475]),
Norb([475,525]),
Norb([475,625]),
Norb([525,225]),
Norb([525,275]),
Norb([525,425]),
Norb([525,475]),
Norb([525,625]),
Norb([575,275]),
Norb([575,325]),
Norb([575,375]),
Norb([575,425]),
Norb([575,625]),
Fruit([625,75]),
Norb([625,125]),
Norb([625,175]),
Norb([625,225]),
Norb([625,275]),
Norb([625,325]),
Norb([625,375]),
Norb([625,425]),
Norb([625,475]),
Norb([625,525]),
Norb([625,575]),
Norb([625,625]),
Eorb([525,175]),
Eorb([175,175]),
Eorb([175,525]),
Eorb([525,525]),
]
walls = [Wall([0,0],[800,50]), #0
Wall([0,50],[50,300]),
Wall([0,400],[50,650]),
Wall([0,650],[700,700]),
Wall([650,400],[700,650]),
Wall([650,50],[700,300]), #5
Wall([100,100],[250,150]),
Wall([100,150],[150,250]),
Wall([450,100],[600,150]),
Wall([550,150],[600,250]),
Wall([100,450],[150,600]), #10
Wall([100,550],[250,600]),
Wall([450,550],[600,600]),
Wall([550,450],[600,600]),
Wall([150,300],[250,400]),
Wall([300,150],[400,250]), #15
Wall([450,300],[550,400]),
Wall([300,450],[400,550]), #17
]
ghosts = [Ghost("purple", [random.randint(5, 8)*50+25,random.randint(5, 8)*50+25]),
Ghost("blue", [random.randint(5, 8)*50+25,random.randint(5, 8)*50+25]),
Ghost("green", [random.randint(5, 8)*50+25,random.randint(5, 8)*50+25])]
score = Score("Score: ", (125,25))
lives = Score("Lives: ", (125,675))
while player.living and len(orbs) > 0:
for event in pygame.event.get():
if event.type == pygame.QUIT:
sys.exit()
elif event.type == pygame.KEYDOWN:
if event.key == pygame.K_UP:
player.go("up")
elif event.key == pygame.K_DOWN:
player.go("down")
elif event.key == pygame.K_LEFT:
player.go("left")
elif event.key == pygame.K_RIGHT:
player.go("right")
elif event.type == pygame.KEYUP:
if event.key == pygame.K_UP:
player.go("stop up")
elif event.key == pygame.K_DOWN:
player.go("stop down")
elif event.key == pygame.K_LEFT:
player.go("stop left")
elif event.key == pygame.K_RIGHT:
player.go("stop right")
player.update(size)
score.update(player.score)
lives.update(player.lives)
for wall in walls:
player.collideWall(wall)
for ghost in ghosts:
ghost.update(size)
for wall in walls:
ghost.collideWall(wall)
if ghost.living:
if player.collideObject(ghost):
if ghost.energized:
ghost.die()
else:
player.die()
player.rect.center = (625,625)
for orb in orbs:
orb.update(size)
if player.collideObject(orb):
player.score += orb.value
if orb.kind == "energizer":
for ghost in ghosts:
ghost.weaken()
orb.living = False
print player.score
for orb in orbs:
if not orb.living:
orbs.remove(orb)
bgColor = r,g,b
screen.fill(bgColor)
for orb in orbs:
screen.blit(orb.image, orb.rect)
screen.blit(player.image, player.rect)
for ghost in ghosts:
if ghost.living:
screen.blit(ghost.image, ghost.rect)
for wall in walls:
screen.blit(wall.image, wall.rect)
screen.blit(score.image,score.rect)
screen.blit(lives.image,lives.rect)
pygame.display.flip()
clock.tick(60)
print len(orbs)
if len(orbs) == 1:
print orbs[0].rect.center
while not player.living:
for event in pygame.event.get():
if event.type == pygame.QUIT:
sys.exit()
if event.type == pygame.KEYDOWN:
if event.key == pygame.K_r:
player = Manpac([7,7], (602,602))
bg = pygame.image.load("MenuStuff/GameOver.png")
bgrect = bg.get_rect()
bgColor = r,g,b
screen.fill(bgColor)
screen.blit(bg, bgrect)
pygame.display.flip()
clock.tick(60)
while len(orbs) <= 0:
for event in pygame.event.get():
if event.type == pygame.QUIT:
sys.exit()
if event.type == pygame.KEYDOWN:
if event.key == pygame.K_r:
player = Manpac([7,7], (602,602))
orbs += [Norb([75,75])]
bg = pygame.image.load("MenuStuff/Win screen.png")
bgrect = bg.get_rect()
bgColor = r,g,b
screen.fill(bgColor)
screen.blit(bg, bgrect)
pygame.display.flip()
clock.tick(60)
| 8,439 | 3,230 |
import logging
import multiprocessing
import sys
from Bio import Entrez
from tqdm import tqdm
from SNDG import execute, mkdir
from SNDG.WebServices import download_file
from SNDG.WebServices.NCBI import NCBI
Entrez.email = 'A.N.Other@example.com'
_log = logging.getLogger(__name__)
from collections import defaultdict
from SNDG.Annotation.GenebankUtils import GenebankUtils
gut_microbiote_assemblies = [x.strip() for x in """GCA_000712235.1
GCA_002017855.1
GCA_002215605.1
GCF_000144975.1
GCF_000146835.1
GCF_000148995.1
GCF_000151245.1
GCF_000153885.1
GCF_000153905.1
GCF_000153925.1
GCF_000154065.1
GCF_000154085.1
GCF_000154105.1
GCF_000154205.1
GCF_000154285.1
GCF_000154305.1
GCF_000154345.1
GCF_000154365.1
GCF_000154385.1
GCF_000154405.1
GCF_000154425.1
GCF_000154465.1
GCF_000154485.1
GCF_000154505.1
GCF_000154525.1
GCF_000154565.1
GCF_000154805.1
GCF_000154825.1
GCF_000154845.1
GCF_000154865.1
GCF_000154985.1
GCF_000155085.1
GCF_000155205.1
GCF_000155435.1
GCF_000155495.1
GCF_000155835.1
GCF_000155855.1
GCF_000155875.1
GCF_000155955.1
GCF_000155975.1
GCF_000155995.1
GCF_000156015.1
GCF_000156035.2
GCF_000156055.1
GCF_000156075.1
GCF_000156175.1
GCF_000156195.1
GCF_000156215.1
GCF_000156375.1
GCF_000156395.1
GCF_000156495.1
GCF_000156515.1
GCF_000156535.1
GCF_000156655.1
GCF_000157015.1
GCF_000157055.1
GCF_000157115.2
GCF_000157935.1
GCF_000157955.1
GCF_000157975.1
GCF_000157995.1
GCF_000158035.1
GCF_000158055.1
GCF_000158075.1
GCF_000158195.2
GCF_000158315.2
GCF_000158435.2
GCF_000158455.1
GCF_000158475.2
GCF_000158555.2
GCF_000158655.1
GCF_000158835.2
GCF_000159175.1
GCF_000159195.1
GCF_000159215.1
GCF_000159495.1
GCF_000159715.1
GCF_000159915.2
GCF_000159975.2
GCF_000160095.1
GCF_000160175.1
GCF_000160455.2
GCF_000160575.1
GCF_000160595.1
GCF_000161955.2
GCF_000162075.1
GCF_000162115.1
GCF_000162575.1
GCF_000163095.1
GCF_000163735.1
GCF_000163955.1
GCF_000164175.1
GCF_000169015.1
GCF_000169035.1
GCF_000169255.2
GCF_000169475.1
GCF_000172135.1
GCF_000172175.1
GCF_000173355.1
GCF_000173795.1
GCF_000173815.1
GCF_000173975.1
GCF_000174195.1
GCF_000174215.1
GCF_000177015.3
GCF_000178195.1
GCF_000178215.1
GCF_000179075.1
GCF_000185325.1
GCF_000185345.1
GCF_000185665.1
GCF_000185685.2
GCF_000185705.2
GCF_000185845.1
GCF_000186505.1
GCF_000186545.1
GCF_000187265.1
GCF_000187895.1
GCF_000188175.1
GCF_000188195.1
GCF_000191845.1
GCF_000191865.1
GCF_000195635.1
GCF_000204455.1
GCF_000205025.1
GCF_000205165.1
GCF_000213555.1
GCF_000218325.1
GCF_000218405.2
GCF_000220825.1
GCF_000220865.1
GCF_000224635.1
GCF_000224655.1
GCF_000225685.1
GCF_000225705.1
GCF_000225745.1
GCF_000225845.1
GCF_000227195.1
GCF_000227255.2
GCF_000231275.1
GCF_000233455.1
GCF_000233495.1
GCF_000233955.1
GCF_000234155.1
GCF_000234175.1
GCF_000235885.1
GCF_000238035.1
GCF_000238615.1
GCF_000238635.1
GCF_000238655.1
GCF_000238675.1
GCF_000238695.1
GCF_000238735.1
GCF_000238755.1
GCF_000239255.1
GCF_000239295.1
GCF_000239335.1
GCF_000239735.1
GCF_000241405.1
GCF_000242215.1
GCF_000242435.1
GCF_000243175.1
GCF_000243215.1
GCF_000245775.1
GCF_000250875.1
GCF_000261205.1
GCF_000273465.1
GCF_000273585.1
GCF_000296445.1
GCF_000296465.1
GCF_000297815.1
GCF_000315485.1
GCF_000320405.1
GCF_000332875.2
GCF_000345045.1
GCF_000349975.1
GCF_000376405.1
GCF_000381365.1
GCF_000382085.1
GCF_000398925.1
GCF_000411235.1
GCF_000411275.1
GCF_000411295.1
GCF_000411315.1
GCF_000411335.1
GCF_000411415.1
GCF_000412335.1
GCF_000413335.1
GCF_000413355.1
GCF_000413375.1
GCF_000466385.1
GCF_000466465.2
GCF_000466485.1
GCF_000466565.1
GCF_000468015.1
GCF_000469305.1
GCF_000469345.1
GCF_000469445.2
GCF_000479045.1
GCF_000507845.1
GCF_000507865.1
GCF_000517805.1
GCF_000690925.1
GCF_000760655.1
GCF_000763035.1
GCF_000763055.1
GCF_000771165.1
GCF_000969835.1
GCF_000969845.1
GCF_001025135.1
GCF_001025155.1
GCF_001185345.1
GCF_001311295.1
GCF_001315785.1
GCF_001434655.1
GCF_001434945.1
GCF_001435475.1
GCF_001435665.1
GCF_001436305.1
GCF_001941425.1
GCF_002222595.1
GCF_900129655.1
GCF_900167285.1
GCF_001025195.1
GCF_001025215.1
GCF_001434175.1""".split("\n")]
import gzip
class Offtarget(object):
DEFAULT_GUT_FILENAME = "gut_microbiota.fasta.gz"
DEFAULT_HUMAN_FILENAME = "human_uniprot100.fa.gz"
DEG_PROT_URL = {"p": "http://tubic.tju.edu.cn/deg_test/public/download/DEG10.aa.gz",
"a": "http://tubic.tju.edu.cn/deg_test/public/download/DEG30.aa.gz",
"e": "http://tubic.tju.edu.cn/deg_test/public/download/DEG20.aa.gz"
}
DEG_FAA_NAMES = {
"a": "degaa-a.dat", "p": "degaa-p.dat", "e": "degaa-e.dat"
}
@staticmethod
def download_deg(dst="/data/databases/deg/"):
for x in ["p", "e", "a"]:
download_file(Offtarget.DEG_PROT_URL[x], f"{dst}/{Offtarget.DEG_FAA_NAMES[x]}.gz", ovewrite=True)
execute(f"gunzip -f {dst}/{Offtarget.DEG_FAA_NAMES[x]}.gz")
# execute(f"makeblastdb -dbtype prot -in {dst}/{Offtarget.DEG_FAA_NAMES[x]}")
@staticmethod
def download_human_prots(dst="/data/databases/human/"):
file_path = dst + Offtarget.DEFAULT_HUMAN_FILENAME
unip_url = "https://www.uniprot.org/uniref/?query=uniprot:(taxonomy:%22Homo%20sapiens%20(Human)%20[9606]%22)%20identity:1.0&format=fasta&force=true&compress=yes"
download_file(unip_url, file_path, ovewrite=True, timeout=120)
return file_path
@staticmethod
def create_human_microbiome(dst="/data/databases/human/", update=False):
dst_accs = dst + "gut_microbiota_assemblies/"
mkdir(dst_accs)
final_file = dst + Offtarget.DEFAULT_GUT_FILENAME
utils = GenebankUtils()
with gzip.open(final_file, "wt") as h:
for accession in tqdm(gut_microbiote_assemblies, file=sys.stderr):
genome_path = dst_accs + accession + ".genomic.gbff.gz"
if update or not os.path.exists(genome_path):
genome_path = NCBI.download_assembly(accession, dst_accs)
utils.proteins(genome_path, h)
return final_file
@staticmethod
def count_organism_from_microbiome_blast(tbl_blast_result_path, microbiome_fasta, identity_threshold=0.4,
out_tbl=None, gene_id_column="id"):
prot_org_map = {}
organisms = []
with (gzip.open(microbiome_fasta, "rt") if microbiome_fasta.endswith(".gz") else open(microbiome_fasta)) as h:
for line in h:
if line.startswith(">"):
seqid = line.split()[0].strip().replace(">", "")
try:
org = line.replace("[[", "[").split("[")[1].strip()[:-1]
except IndexError:
err = "fasta does not have the organism name at the fasta header."
err += "example: >HMPREF1002_RS00015 alpha/beta hydrolase [Porphyromonas sp. 31_2]"
raise LookupError(err)
organisms.append(org)
prot_org_map[seqid] = org
organisms_count = len(set(organisms))
query_orgs = defaultdict(lambda: [])
with open(tbl_blast_result_path) as h:
for l in list(h)[1:]:
query, hit, identity = l.split()[:3]
identity = float(identity) / 100.0
if identity_threshold <= identity:
query_orgs[query].append(prot_org_map[hit])
for query, hits in query_orgs.items():
query_orgs[query] = set(hits)
if out_tbl:
with open(out_tbl, "w") as h:
h.write("\t".join(
[gene_id_column, "gut_microbiote_count", "gut_microbiote_norm", "gut_microbiote_organisms"]) + "\n")
for query, hits in query_orgs.items():
h.write("\t".join(
[query, str(len(hits)), str(len(hits) * 1.0 / organisms_count), ";".join(hits)]) + "\n")
return query_orgs
@staticmethod
def offtargets(proteome, dst_resutls, offtarget_db, cpus=multiprocessing.cpu_count(),min_identity=50):
cmd = f"diamond blastp --evalue 1e-5 --max-hsps 1 --outfmt 6 --max-target-seqs 10000 --db {offtarget_db} --query {proteome} --threads {cpus}|awk '$3>{min_identity}' > {dst_resutls}"
execute(cmd)
return dst_resutls
if __name__ == "__main__":
from SNDG import init_log
import argparse
import os
from SNDG.Sequence import smart_parse
parser = argparse.ArgumentParser(description='Offtarget Utilities')
subparsers = parser.add_subparsers(help='commands', description='valid subcommands', required=True, dest='command')
gut_download = subparsers.add_parser('download', help='Download offtarget data')
gut_download.add_argument('-db', '--databases', choices=["all", "deg", "human", "gut_microbiote"], default="all")
gut_download.add_argument('-o', '--output', help="output_directory", default="/data/databases/")
gut_download.add_argument('--force', action="store_true")
gut_microbiote_blast = subparsers.add_parser('gut_microbiote_blast',
help='Runs blastp against gut microbiote and counts organisms')
gut_microbiote_blast.add_argument('input_faa')
gut_microbiote_blast.add_argument('-o', '--output', help="output_directory", default="./")
gut_microbiote_blast.add_argument('-db', '--database', help="gut microbiome fasta",
default="/data/databases/human/gut_microbiota.fasta.gz")
gut_microbiote_blast.add_argument('--cpus', default=multiprocessing.cpu_count())
gut_microbiote_blast.add_argument('--force', action="store_true")
args = parser.parse_args()
init_log()
if args.command == "download":
if args.databases in ["all", "gut_microbiote"]:
path = f'{args.output}/gut_microbiote/{Offtarget.DEFAULT_GUT_FILENAME}'
if args.force or not os.path.exists(path):
path = Offtarget.create_human_microbiome(dst=path)
else:
sys.stderr.write(f'{path} already exists, overwrite using --force')
filename = os.path.basename(path)
execute(
f"zcat {path} | makeblastdb -title gut_microbiote -out {args.output}/human/{filename} -dbtype prot -in -")
if args.databases in ["all", "human"]:
path = f'{args.output}/human/'
if args.force or not os.path.exists(path + Offtarget.DEFAULT_HUMAN_FILENAME):
path = Offtarget.download_human_prots(dst=path)
else:
sys.stderr.write(f'{path} already exists, overwrite using --force')
filename = os.path.basename(path)
execute(
f"zcat {path}{Offtarget.DEFAULT_HUMAN_FILENAME} | makeblastdb -title human -out {path}{Offtarget.DEFAULT_HUMAN_FILENAME} -dbtype prot -in -")
if args.databases in ["all", "deg"]:
mkdir(f'{args.output}/deg/')
Offtarget.download_deg(f'{args.output}/deg/')
elif args.command == "gut_microbiote_blast":
blast_gut_path = f'{args.output}/gut_microbiome.blast.tbl'
gut_result_path = f'{args.output}/gut_microbiome.tbl'
# if not os.path.exists(args.database + ".phr"):
# raise FileNotFoundError(f"{args.database} index files could not be found. Run makeblastdb")
if args.force or not os.path.exists(blast_gut_path):
Offtarget.offtargets(args.input_faa, blast_gut_path, offtarget_db=args.database, cpus=args.cpus)
else:
sys.stderr.write(f'{blast_gut_path} already exists, overwrite using --force')
Offtarget.count_organism_from_microbiome_blast(blast_gut_path, args.database, identity_threshold=0.5,
out_tbl=gut_result_path)
| 11,871 | 6,180 |
"""Cut properly some text."""
import re
END_OF_SENTENCE_CHARACTERS = {".", ";", "!", "?"}
def properly_cut_text(
text: str, start_idx: int, end_idx: int, nbr_before: int = 30, nbr_after: int = 30
) -> str:
"""Properly cut a text around some interval."""
str_length = len(text)
start_idx = max(0, start_idx - nbr_before)
end_idx = end_idx + nbr_after
# Change the end depending on the value
match = re.search(r"\.[^\d]|\?|\!", text[end_idx:], flags=re.IGNORECASE)
if match:
end_idx = match.end() + end_idx
else:
end_idx = str_length
# Change the beginning depending on the value
match = re.search(r"(\.|\?|\!)(?!.*\1)", text[: start_idx - 1], flags=re.IGNORECASE)
if match:
start_idx = match.end() + 1
else:
start_idx = 0
return text[start_idx:end_idx].strip()
| 855 | 327 |
# [3차] n진수 게임
import string
tmp = string.digits+string.ascii_uppercase[:6]
def convert(n, base):
q, r = divmod(n, base)
if q == 0:
return tmp[r]
else:
return convert(q, base) + tmp[r]
def solution(n, t, m, p):
answer, nums = '', ''
count, cur = 0, 0
while count < t * m:
num = convert(cur,n)
nums += num
count += len(num)
cur += 1
for i in range(p-1, count, m):
answer += nums[i]
return answer[:t]
'''
채점을 시작합니다.
정확성 테스트
테스트 1 〉 통과 (0.01ms, 10.3MB)
테스트 2 〉 통과 (0.02ms, 10.3MB)
테스트 3 〉 통과 (0.02ms, 10.3MB)
테스트 4 〉 통과 (0.03ms, 10.4MB)
테스트 5 〉 통과 (0.11ms, 10.3MB)
테스트 6 〉 통과 (0.11ms, 10.4MB)
테스트 7 〉 통과 (0.21ms, 10.3MB)
테스트 8 〉 통과 (0.14ms, 10.3MB)
테스트 9 〉 통과 (0.12ms, 10.2MB)
테스트 10 〉 통과 (0.14ms, 10.3MB)
테스트 11 〉 통과 (0.14ms, 10.3MB)
테스트 12 〉 통과 (0.16ms, 10.3MB)
테스트 13 〉 통과 (0.14ms, 10.3MB)
테스트 14 〉 통과 (24.25ms, 10.4MB)
테스트 15 〉 통과 (24.34ms, 10.4MB)
테스트 16 〉 통과 (22.35ms, 10.4MB)
테스트 17 〉 통과 (1.03ms, 10.2MB)
테스트 18 〉 통과 (1.30ms, 10.3MB)
테스트 19 〉 통과 (0.36ms, 10.3MB)
테스트 20 〉 통과 (1.15ms, 10.4MB)
테스트 21 〉 통과 (6.58ms, 10.3MB)
테스트 22 〉 통과 (2.70ms, 10.3MB)
테스트 23 〉 통과 (8.42ms, 10.3MB)
테스트 24 〉 통과 (11.47ms, 10.4MB)
테스트 25 〉 통과 (10.08ms, 10.3MB)
테스트 26 〉 통과 (3.43ms, 10.3MB)
채점 결과
정확성: 100.0
합계: 100.0 / 100.0
''' | 1,307 | 1,104 |
from abc import abstractmethod
from typing import Any, List
import torch
def interpolate_vectors(v1: torch.Tensor, v2: torch.Tensor, n: int) -> torch.Tensor:
step = (v2 - v1) / (n - 1)
return torch.stack([v1 + i * step for i in range(n)], dim=0)
def reparameterize(mu: torch.Tensor, log_var: torch.Tensor) -> torch.Tensor:
"""
Reparameterization trick to sample from N(mu, var) from N(0,1).
:param mu: (Tensor) Mean of the latent Gaussian [B x D]
:param log_var: (Tensor) Standard deviation of the latent Gaussian [B x D]
:return: (Tensor) [B x D]
"""
std = torch.exp(0.5 * log_var)
eps = torch.randn_like(std)
return eps * std + mu
class BaseVAE(torch.nn.Module):
def __init__(self) -> None:
super(BaseVAE, self).__init__()
def encode(self, inp: torch.Tensor) -> (torch.Tensor, torch.Tensor):
raise NotImplementedError
def decode(self, inp: torch.Tensor) -> torch.Tensor:
raise NotImplementedError
def generate(self, x: torch.Tensor, **kwargs) -> torch.Tensor:
raise NotImplementedError
@abstractmethod
def forward(self, *inputs: torch.Tensor) -> List[torch.Tensor]:
pass
@abstractmethod
def loss_function(self, *inputs: Any, **kwargs) -> dict:
pass
| 1,292 | 475 |
# -*- coding: utf-8 -*-
import re
import pandas as pd
import multiprocessing
from multiprocessing.dummy import Pool as ThreadPool
import logging
from .utils import isNull
class Triplifier(object):
def __init__(self, config):
self.config = config
self.integer_columns = []
for rule in self.config.rules:
if rule['rule'].lower() == 'integer':
self.integer_columns.extend(rule['columns'])
def triplify(self, data_frame):
"""
Generate triples using the given data_frame and the config mappings
:param data_frame: pandas DataFrame
:return: list of triples for the given data_frame data
"""
triples = []
data_frame = data_frame.fillna('')
for index, row in data_frame.iterrows():
triples.extend(self._generate_triples_for_row(row))
triples.extend(self._generate_triples_for_relation_predicates())
triples.extend(self._generate_triples_for_entities())
triples.append(self._generate_ontology_import_triple())
return triples
def _generate_triples_for_chunk(self, chunk):
triples = []
for index, row in chunk.iterrows():
triples.extend(self._generate_triples_for_row(row))
return triples
def _generate_triples_for_row(self, row):
row_triples = []
for entity in self.config.entities:
s = "<{}{}>".format(entity['identifier_root'], self._get_value(row, entity['unique_key']))
if entity['concept_uri'] != 'http://www.w3.org/1999/02/22-rdf-syntax-ns#type':
o = "<{}>".format(entity['concept_uri'])
row_triples.append("{} <http://www.w3.org/1999/02/22-rdf-syntax-ns#type> {}".format(s, o))
for column, uri in entity['columns']:
val = self._get_value(row, column)
list_for_column = self.config.get_list(column)
# if there is a specified list for this column & the field contains a defined_by, substitute the
# defined_by value for the list field
literal_val = True
if list_for_column and "http://www.w3.org/1999/02/22-rdf-syntax-ns#type" in uri:
for i in list_for_column:
if i['field'] == val and i['defined_by']:
val = i['defined_by']
literal_val = False
break
# if this is not a list but URI specified is rdf:type for mapping column then we assume this is object Property
# and attempt to convert
elif "http://www.w3.org/1999/02/22-rdf-syntax-ns#type" in uri:
val = self.config._get_uri_from_label(val)
literal_val = False
# format and print all of the instance data triples
if (not isNull(val)):
p = "<{}>".format(uri)
if literal_val:
type = self._get_type(val)
o = "\"{}\"^^<http://www.w3.org/2001/XMLSchema#{}>".format(val, type)
else:
o = "<{}>".format(str(val))
row_triples.append("{} {} {}".format(s, p, o))
# format and print all triples describing relations
for relation in self.config.relations:
try:
subject_entity = self.config.get_entity(relation['subject_entity_alias'])
object_entity = self.config.get_entity(relation['object_entity_alias'])
s = "<{}{}>".format(subject_entity['identifier_root'], self._get_value(row, subject_entity['unique_key']))
p = "<{}>".format(relation['predicate'])
o = "<{}{}>".format(object_entity['identifier_root'], self._get_value(row, object_entity['unique_key']))
row_triples.append("{} {} {}".format(s, p, o))
except Exception as err:
raise RuntimeError("Error assigning relations between a subject and an object. "
"Check to be sure each relation maps to an entity alias")
return row_triples
def _generate_triples_for_relation_predicates(self):
predicate_triples = []
for relation in self.config.relations:
s = "<{}>".format(relation['predicate'])
p = "<http://www.w3.org/1999/02/22-rdf-syntax-ns#type>"
o = "<http://www.w3.org/2002/07/owl#ObjectProperty>"
predicate_triples.append("{} {} {}".format(s, p, o))
return predicate_triples
def _generate_triples_for_entities(self):
entity_triples = []
for entity in self.config.entities:
entity_triples.extend(self._generate_property_triples(entity['columns']))
if entity['concept_uri'] != 'http://www.w3.org/1999/02/22-rdf-syntax-ns#type':
entity_triples.append(self._generate_class_triple(entity['concept_uri']))
return entity_triples
def _generate_ontology_import_triple(self):
s = "<urn:importInstance>"
p = "<http://www.w3.org/2002/07/owl#imports>"
o = "<{}>".format(self.config.ontology)
return "{} {} {}".format(s, p, o)
@staticmethod
def _generate_class_triple(concept_uri):
s = "<{}>".format(concept_uri)
p = "<http://www.w3.org/1999/02/22-rdf-syntax-ns#type>"
o = "<http://www.w3.org/2000/01/rdf-schema#Class>"
return "{} {} {}".format(s, p, o)
@staticmethod
def _generate_property_triples(properties):
"""
generate triples for the properties of each entity
"""
property_triples = []
for column, uri in properties:
s = "<{}>".format(uri)
p = "<http://www.w3.org/1999/02/22-rdf-syntax-ns#type>"
o = "<http://www.w3.org/1999/02/22-rdf-syntax-ns#Property>"
property_triples.append("{} {} {}".format(s, p, o))
o2 = "<http://www.w3.org/2002/07/owl#DatatypeProperty>"
property_triples.append("{} {} {}".format(s, p, o2))
p2 = "<http://www.w3.org/2000/01/rdf-schema#isDefinedBy>"
property_triples.append("{} {} {}".format(s, p2, s))
return property_triples
def _get_value(self, row_data, column):
coerce_integer = False
if column in self.integer_columns:
coerce_integer = True
# TODO: This line breaks in certain situations. Workaround for now: return an empty string on exception
try:
val = str(row_data[column])
except:
return ''
# need to perform coercion here as pandas can't store ints along floats and strings. The only way to coerce
# to ints is to drop all strings and null values. We don't want to do this in the case of a warning.
if coerce_integer:
return int(float(val)) if re.fullmatch(r"[+-]?\d+(\.0+)?", str(val)) else val
return val
@staticmethod
def _get_type(val):
if re.fullmatch(r"[+-]?\d+", str(val)):
return 'integer'
elif re.fullmatch(r"[+-]?\d+\.\d+", str(val)):
return 'float'
else:
return 'string'
| 7,293 | 2,255 |
from django.urls import path, include
from rest_framework import routers
from core import views
router = routers.DefaultRouter()
router.register('coffe_types', views.CoffeTypeViewSet, base_name='coffe_types')
router.register('harvests', views.HarvestViewSet, base_name='harvests')
router.register(
'storage_report', views.StorageReportViewSet, base_name='storage_report'
)
app_name = 'core'
urlpatterns = [
path('', include(router.urls)),
]
| 455 | 146 |
from django.db import models
from core.utils.cnpj_is_valid import cnpj_is_valid
class Customer(models.Model):
name = models.CharField(max_length=50, null=False, blank=False)
address = models.CharField(max_length=50, null=False, blank=False)
cnpj = models.CharField(max_length=14, unique=True, null=False, blank=False, validators=[cnpj_is_valid])
def __str__(self):
return self.name
| 409 | 140 |
#!/usr/bin/env python
import rospy
from geometry_msgs.msg import PoseStamped
from styx_msgs.msg import Lane, Waypoint
from scipy.spatial import KDTree
import numpy as np
from std_msgs.msg import Int32
import math
'''
This node will publish waypoints from the car's current position to some `x` distance ahead.
As mentioned in the doc, you should ideally first implement a version which does not care
about traffic lights or obstacles.
Once you have created dbw_node, you will update this node to use the status of traffic lights too.
Please note that our simulator also provides the exact location of traffic lights and their
current status in `/vehicle/traffic_lights` message. You can use this message to build this node
as well as to verify your TL classifier.
'''
LOOKAHEAD_WPS = 50 # Number of waypoints we will publish. You can change this number
UPDATE_RATE = 30 #hz
NO_WP = -1
DECEL_RATE = 1.5 # m/s^2
STOPLINE = 3 # waypoints behind stopline to stop
DELAY = 20. # update difference between this node and twist_controller in hz
class WaypointUpdater(object):
def __init__(self, rate_hz=UPDATE_RATE):
rospy.init_node('waypoint_updater')
self.pose = None
self.base_waypoints = None
self.waypoints_2d = None
self.waypoint_ktree = None
self.freq = rate_hz
self.nearest_wp_idx = NO_WP
self.stop_wp = NO_WP
rospy.Subscriber('/current_pose', PoseStamped, self.pose_cb)
rospy.Subscriber('/base_waypoints', Lane, self.waypoints_cb)
rospy.Subscriber('/traffic_waypoint', Int32, self.traffic_cb)
self.final_waypoints_pub = rospy.Publisher('final_waypoints', Lane, queue_size=1)
self.loop()
def loop(self):
rate = rospy.Rate(self.freq)
while not rospy.is_shutdown():
if (self.pose != None) and \
(self.base_waypoints != None) and \
(self.waypoint_ktree != None):
self.nearest_wp_idx = self.get_nearest_wp_indx()
self.publish_waypoints()
# don't update unless we get new positional data
self.pose = None
rate.sleep()
def publish_waypoints(self):
lane = self.generate_lane()
self.final_waypoints_pub.publish(lane)
def generate_lane(self):
lane = Lane()
lane.header = self.base_waypoints.header
look_ahead_wp_max = self.nearest_wp_idx + LOOKAHEAD_WPS
base_wpts = self.base_waypoints.waypoints[self.nearest_wp_idx:look_ahead_wp_max]
if self.stop_wp == NO_WP or (self.stop_wp >= look_ahead_wp_max):
lane.waypoints = base_wpts
else:
temp_waypoints = []
stop_idx = max(self.stop_wp - self.nearest_wp_idx - STOPLINE, 0)
for i, wp in enumerate(base_wpts):
temp_wp = Waypoint()
temp_wp.pose = wp.pose
if stop_idx >= STOPLINE:
dist = self.distance(base_wpts, i, stop_idx)
# account for system lag
if DELAY > 0:
delay_s = 1./DELAY
else:
delay_s = 0
# x = xo + vot + .5at^2, xo = 0
dist += self.get_waypoint_velocity(base_wpts[i])*delay_s+.5*DECEL_RATE*delay_s*delay_s
# v^2 = vo^2 + 2*a*(x-xo)
# v^2 = 0 + 2*a*(dist)
# v = sqrt(2*a*dist)
vel = math.sqrt(2*DECEL_RATE*dist)
if vel < 1.0:
vel = 0.0
else:
vel = 0.0
temp_wp.twist.twist.linear.x = min(vel, self.get_waypoint_velocity(base_wpts[0]))
temp_waypoints.append(temp_wp)
lane.waypoints = temp_waypoints
return lane
def get_nearest_wp_indx(self):
ptx = self.pose.pose.position.x
pty = self.pose.pose.position.y
nearest_indx = self.waypoint_ktree.query([ptx,pty],1)[1]
nearest_coord = self.waypoints_2d[nearest_indx]
prev_coord = self.waypoints_2d[nearest_indx - 1]
neareset_vect = np.array(nearest_coord)
prev_vect = np.array(prev_coord)
positive_vect = np.array([ptx,pty])
# check if the nearest_coord is infront or behind the car
val = np.dot(neareset_vect-prev_vect, positive_vect-neareset_vect)
if val > 0.0:
# works for waypoints that are in a loop
nearest_indx = (nearest_indx + 1) % len(self.waypoints_2d)
return nearest_indx
def pose_cb(self, msg):
self.pose = msg
def waypoints_cb(self, lane):
self.base_waypoints = lane
if not self.waypoints_2d:
self.waypoints_2d = [ [ waypoint.pose.pose.position.x, waypoint.pose.pose.position.y ] for waypoint in lane.waypoints ]
self.waypoint_ktree = KDTree(self.waypoints_2d)
def traffic_cb(self, msg):
self.stop_wp = msg.data
def obstacle_cb(self, msg):
# TODO: Callback for /obstacle_waypoint message. We will implement it later
pass
def get_waypoint_velocity(self, waypoint):
return waypoint.twist.twist.linear.x
def set_waypoint_velocity(self, waypoints, waypoint, velocity):
waypoints[waypoint].twist.twist.linear.x = velocity
def distance(self, waypoints, wp1, wp2):
dist = 0
dl = lambda a, b: math.sqrt((a.x-b.x)**2 + (a.y-b.y)**2 + (a.z-b.z)**2)
for i in range(wp1, wp2+1):
dist += dl(waypoints[wp1].pose.pose.position, waypoints[i].pose.pose.position)
wp1 = i
return dist
if __name__ == '__main__':
try:
WaypointUpdater()
except rospy.ROSInterruptException:
rospy.logerr('Could not start waypoint updater node.')
| 5,876 | 1,971 |
from typing import Any
import cv2
import numpy as np
from sigmarsGarden.config import Configuration
from sigmarsGarden.parse import circle_coords
def configure(img: Any) -> Configuration:
cv2.namedWindow("configureDisplay")
# def click_and_crop(event, x, y, flags, param) -> None:
# print(event, x, y, flags, param)
# cv2.setMouseCallback("configureDisplay", click_and_crop)
cv2.imshow("configureDisplay", img)
result = Configuration()
result.down_distance = 114
result.right_distance = 66
result.start_coord = (1371, 400)
result.radius = 28
circle_color = [0, 0, 0]
while True:
keycode = cv2.waitKey(0)
print(keycode)
left = 81
up = 82
down = 84
right = 83
left = 104
up = 116
down = 110
right = 115
esc = 27
start_coord = list(result.start_coord)
if keycode == left:
start_coord[0] -= 1
elif keycode == right:
start_coord[0] += 1
elif keycode == up:
start_coord[1] -= 1
elif keycode == down:
start_coord[1] += 1
elif keycode == esc:
break
result.start_coord = (start_coord[0], start_coord[1])
new_img = np.copy(img)
for coord in circle_coords(result):
new_img = cv2.circle(new_img, coord, result.radius, circle_color)
cv2.imshow("configureDisplay", new_img)
print(start_coord)
return result
def main() -> None:
x = cv2.imread("testboards/1.jpg")
print(configure(x))
if __name__ == "__main__":
main()
| 1,633 | 570 |
import torch
from transformers import GPT2Tokenizer, GPT2LMHeadModel
from torch.utils.data import TensorDataset, DataLoader
# reference: \transformers\generation_utils.py
def select_greedy(logits):
next_token_logits = logits[:, -1, :]
# Greedy decoding
next_token = torch.argmax(next_token_logits, dim=-1)
return next_token
def select_topk(logits, k=10):
# next_token = random.choice(logits[0, -1, :].sort(descending=True)[1][:k]).item()
next_token_logits = logits[:, -1, :]
top_k = min(max(k, 1), next_token_logits.size(-1))
# Remove all tokens with a probability less than the last token of the top-k
indices_to_remove = next_token_logits < torch.topk(next_token_logits, top_k)[0][..., -1, None]
next_token_logits[indices_to_remove] = -float("Inf")
probs = torch.nn.functional.softmax(next_token_logits, dim=-1)
# multinominal方法可以根据给定权重对数组进行多次采样,返回采样后的元素下标
next_token = torch.multinomial(probs, num_samples=1).squeeze(1)
return next_token
def select_topp(logits, p=0.75):
next_token_logits = logits[:, -1, :] # (batch_size, vocab_size)
sorted_logits, sorted_indices = torch.sort(next_token_logits, descending=True)
cum_probs = torch.cumsum(torch.nn.functional.softmax(sorted_logits, dim=-1), dim=-1)
# Remove tokens with cumulative probability above the threshold (token with 0 are kept)
sorted_indices_to_remove = cum_probs > p
# Shift the indices to the right to keep also the first token above the threshold
sorted_indices_to_remove[..., 1:] = sorted_indices_to_remove[..., :-1].clone()
sorted_indices_to_remove[..., 0] = 0
# scatter sorted tensors to original indexing
indices_to_remove = sorted_indices_to_remove.scatter(1, sorted_indices, sorted_indices_to_remove)
next_token_logits[indices_to_remove] = -float("Inf")
probs = torch.nn.functional.softmax(next_token_logits, dim=-1)
# multinominal方法可以根据给定权重对数组进行多次采样,返回采样后的元素下标
next_token = torch.multinomial(probs, num_samples=1).squeeze(1)
return next_token
def read_data(path='./romeo_and_juliet.txt'):
with open(path, 'r', encoding='utf-8') as fin:
ds = fin.read()
return ds
def data_processor(dataset, tokenizer, max_len=32):
indexed_text = tokenizer.encode(dataset)
ds_cut = []
for i in range(0, len(indexed_text)-max_len, max_len):
# 将串切成长度为max_len
ds_cut.append(indexed_text[i: i+max_len])
ds_tensor = torch.tensor(ds_cut)
train_set = TensorDataset(ds_tensor, ds_tensor) # 数据和标签相同
return DataLoader(dataset=train_set, batch_size=8, shuffle=False)
def train(train_loader, model, ep=30, device=torch.device('cpu')):
optimizer = torch.optim.Adam(model.parameters(), lr=2e-5, eps=1e-8)
print(next(model.parameters()).device)
model.train()
model.to(device)
for i in range(ep):
total_loss = 0.
for bi, (data, target) in enumerate(train_loader):
data, target = data.to(device), target.to(device)
optimizer.zero_grad()
loss, logits, _ = model(data, labels=target)
print('loss:', loss.data.item())
total_loss += loss
loss.backward()
optimizer.step()
print('train loss:', total_loss / len(train_loader))
return model
def inference(model, tokenizer, prefix=None, max_len=100, top_k=20, top_p=0.75, temperature=1.):
print('inference ... ')
print(next(model.parameters()).device)
model.eval()
indexed_tokens = tokenizer.encode(prefix)
tokens_tensor = torch.tensor([indexed_tokens])
final_pred_text = prefix
cur_len = tokens_tensor.size(-1)
for _ in range(max_len):
with torch.no_grad():
output = model(tokens_tensor)
logits = output[0] # (batch_size, cur_len, vocab_size)
if temperature != 1:
logits /= temperature
next_idx = select_topk(logits, k=top_k)
# next_idx = select_topp(logits, p=0.75)
final_pred_text += tokenizer.decode(next_idx)
if tokenizer.eos_token in final_pred_text:
break
# indexed_tokens += [next_idx]
# tokens_tensor = torch.tensor([indexed_tokens])
tokens_tensor = torch.cat([tokens_tensor, next_idx.unsqueeze(-1)], dim=-1)
cur_len += 1
print(cur_len)
return final_pred_text
tokenizer = GPT2Tokenizer.from_pretrained('gpt2/en')
model = GPT2LMHeadModel.from_pretrained('gpt2/en')
# ds = read_data('./romeo_and_juliet.txt')
# train_loader = data_processor(ds, tokenizer)
# model = train(train_loader, model, ep=3, device=torch.device('cuda', 0))
pred_text = inference(model.to('cpu'), tokenizer,
'Yesterday, Jack said he saw an alien,',
top_k=20,
top_p=0.8,
temperature=0.5)
print(pred_text)
| 4,847 | 1,781 |
from cc3d import CompuCellSetup
from connectivityTestSteppables import connectivityTestSteppable
CompuCellSetup.register_steppable(steppable=connectivityTestSteppable(frequency=1))
CompuCellSetup.run()
| 205 | 65 |
from models.genetic_algorithms.population import Population | 59 | 14 |
from django.urls import path
from . import views
app_name = 'campaigns'
urlpatterns = [
path('<int:campaign_id>/', views.Campaign.as_view(), name='campaign'),
]
| 167 | 61 |
# -*- coding: iso-8859-15 -*-
import spanishconjugator
from spanishconjugator.SpanishConjugator import Conjugator
# ----------------------------------- Imperfect Indicative ----------------------------------- #
def test_imperfect_indicative_yo_ar():
expected = "hablaba"
assert Conjugator().conjugate('hablar','imperfect','indicative','yo') == expected
def test_imperfect_indicative_tu_ar():
expected = "hablabas"
assert Conjugator().conjugate('hablar','imperfect','indicative','tu') == expected
def test_imperfect_indicative_usted_ar():
expected = "hablaba"
assert Conjugator().conjugate('hablar','imperfect','indicative','usted') == expected
def test_imperfect_indicative_nosotros_ar():
expected = 'hablábamos'
assert str(Conjugator().conjugate('hablar','imperfect','indicative','nosotros')) == expected
def test_imperfect_indicative_vosotros_ar():
expected = "hablabais"
assert Conjugator().conjugate('hablar','imperfect','indicative','vosotros') == expected
def test_imperfect_indicative_ustedes_ar():
expected = "hablaban"
assert Conjugator().conjugate('hablar','imperfect','indicative','ustedes') == expected
def test_imperfect_indicative_yo_ar_3():
expected = "charlaba"
assert Conjugator().conjugate('charlar','imperfect','indicative','yo') == expected
def test_imperfect_indicative_yo_ar_4():
expected = "era"
assert Conjugator().conjugate('ser','imperfect','indicative','yo') == expected | 1,470 | 495 |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Wed May 13 17:37:31 2020
@author: robertopitz
"""
import numpy as np
from random import randrange
from math import isnan
import pygame as pg
def get_new_prey_pos(pos, board):
while True:
c = randrange(1,len(board)-1)
r = randrange(1,len(board[0])-1)
if c != pos[0] or r != pos[1]:
if board[c][r] == 0:
return np.array([c,r])
def get_next_move(pos, board):
c = pos[0]
r = pos[1]
gradient = np.array([board[c+1][r], board[c-1][r],
board[c][r-1], board[c][r+1]])
i = np.argmin(gradient)
move = ["RIGHT", "LEFT", "UP", "DOWN"]
return move[i]
def move_bot(bot_pos, prey_pos, board, penalty_board):
c = bot_pos[0]
r = bot_pos[1]
move = get_next_move(bot_pos, penalty_board)
step_size = 1
if move == "UP":
if board[c][r-1] == 0:
bot_pos[1] -= step_size
elif move == "DOWN":
if board[c][r+1] == 0:
bot_pos[1] += step_size
elif move == "LEFT":
if board[c-1][r] == 0:
bot_pos[0] -= step_size
elif move == "RIGHT":
if board[c+1][r] == 0:
bot_pos[0] += step_size
def convert_board(board):
new_board = np.zeros(board.shape)
new_board = new_board.astype(float)
new_board[board == 0.] = np.nan
new_board[board == 1.] = float('inf')
return new_board
def convert_to_draw_board(board):
new_board = np.zeros(board.shape)
for c in range(np.size(board,0)):
for r in range(np.size(board,1)):
b = board[c][r]
if b == "o" or b == "O" or b == " ":
new_board[c,r] = 0
else:
new_board[c,r] = 1
return new_board
def create_gradient(board):
# border is Inf
# empty field is NaN
step_penalty = 1
nans_present = True
border = float('inf')
while nans_present:
nans_present = False
for c in range(1,len(board)-1):
for r in range(1,len(board[0])-1):
if isnan(board[c][r]):
nans_present = True
if isnan(board[c+1][r]) and isnan(board[c][r+1]):
pass
elif isnan(board[c+1][r]) and not isnan(board[c][r+1]):
if board[c][r+1] != border:
board[c][r] = board[c][r+1] + step_penalty
elif not isnan(board[c+1][r]) and isnan(board[c][r+1]):
if board[c+1][r] != border:
board[c][r] = board[c+1][r] + step_penalty
else:
if board[c+1][r] != border and \
board[c][r+1] != border:
board[c][r] = int(0.5 * (board[c+1][r] + \
board[c][r+1]) + step_penalty)
elif board[c+1][r] == border and \
board[c][r+1] != border:
board[c][r] = board[c][r+1] + step_penalty
elif board[c+1][r] != border and \
board[c][r+1] == border:
board[c][r] = board[c+1][r] + step_penalty
else:
if board[c][r] != border:
if isnan(board[c+1][r]):
board[c+1][r] = board[c][r] + step_penalty
if isnan(board[c][r+1]):
board[c][r+1] = board[c][r] + step_penalty
return board
def nint(f):
return int(round(f))
def get_penalty_board(board, prey_pos):
new_board = np.copy(board)
c = nint(prey_pos[0])
r = nint(prey_pos[1])
new_board[c, r] = 0.0
penalty_board = create_gradient(new_board)
return penalty_board
def draw_board(screen, board, rs):
for c in range(np.size(board,0)):
for r in range(np.size(board,1)):
if board[c,r] == 1:
pg.draw.rect(screen,
pg.Color("blue"),
pg.Rect(c * rs,
r * rs,
rs, rs))
def draw_bot(screen, pos, rs):
pg.draw.rect(screen,
pg.Color("red"),
pg.Rect(pos[0] * rs,
pos[1] * rs,
rs, rs))
def draw_prey(screen, pos, rs):
pg.draw.rect(screen,
pg.Color("yellow"),
pg.Rect(pos[0] * rs,
pos[1] * rs,
rs, rs))
def play_game(bot_pos_start, board_extern):
board = convert_to_draw_board(board_extern)
penalty_board_blue_print = convert_board(board)
rect_size = 15
bot_pos = np.copy(bot_pos_start)
pg.init()
screen_color = pg.Color("black")
screen = pg.display.set_mode((np.size(board,0) * rect_size,
np.size(board,1) * rect_size))
clock = pg.time.Clock()
pg.display.set_caption("Clean Bot AI")
running = True
prey_pos = get_new_prey_pos(bot_pos, board)
penalty_board = get_penalty_board(penalty_board_blue_print, prey_pos)
while running:
move_bot(bot_pos, prey_pos, board, penalty_board)
if bot_pos[0] == prey_pos[0] and bot_pos[1] == prey_pos[1]:
prey_pos = get_new_prey_pos(bot_pos, board)
penalty_board = get_penalty_board(penalty_board_blue_print,
prey_pos)
screen.fill(screen_color)
for event in pg.event.get():
if event.type == pg.QUIT:
running = False
draw_board(screen, board, rect_size)
draw_prey(screen, prey_pos, rect_size)
draw_bot(screen, bot_pos, rect_size)
clock.tick(60)
pg.display.flip()
pg.quit()
#==MAIN CODE==================================================================
board = [list("x--------x---|-|---x----xx----x"),#1
list("|ooOooooo|---| |---|oooO||oooo|"),#2
list("|ox-xo--o|---| |---|o--o--o--o|"),#3
list("|o|-|o||o|---| |---|o||oooo||o|"),#4
list("|o|-|o||o|---| |---|o|x--|o||o|"),#5
list("|ox-xo--ox---x x---xo----|o||o|"),#6
list("|oooooooooooooooooooooooooo||o|"),#7
list("|ox-xo|------| |---|o--o|--x|o|"),#8
list("|o|-|o|--xx--| |---|o||o|--x|o|"),#9
list("|o|-|oooo|| o||oooo||o|"),#10
list("|o|-|o--o|| x---x --o||o--o||o|"),#11
list("|ox-xo||o-- |x-x| ||o--o||o--o|"),#12
list("|ooooo||o ||-|| ||oooo||oooo|"),#13
list("x---|o|x--| |--|| |x--|o|x--|o|"),#14
list("x---|o|x--| |--|| |x--|o|x--|o|"),#15
list("|ooooo||o ||-|| ||oooo||oooo|"),#16
list("|ox-xo||o-- |x-x| ||o--o||o--o|"),#17
list("|o|-|o--o|| x---x --o||o--o||o|"),#18
list("|o|-|oooo|| o||oooo||o|"),#19
list("|o|-|o|--xx--| |---|o||o|--x|o|"),#20
list("|ox-xo|------| |---|o--o|--x|o|"),#21
list("|oooooooooooooooooooooooooo||o|"),#22
list("|ox-xo--ox---x x---xox---|o||o|"),#23
list("|o|-|o||o|---| |---|o|x--|o||o|"),#24
list("|o|-|o||o|---| |---|o||oooo||o|"),#25
list("|ox-xo--o|---| |---|o--o--o--o|"),#26
list("|ooOooooo|---| |---|oooO||oooo|"),#27
list("x--------x---|-|---x----xx----x")#28
]
# board = [[1,1,1,1,1,1,1,1,1],
# [1,0,0,0,1,0,0,0,1],
# [1,0,0,0,1,0,1,0,1],
# [1,0,1,1,1,0,1,0,1],
# [1,0,1,0,1,1,1,0,1],
# [1,0,0,0,0,0,0,0,1],
# [1,0,0,0,1,1,1,0,1],
# [1,0,1,0,1,0,1,0,1],
# [1,0,1,1,1,0,1,0,1],
# [1,0,0,0,1,0,1,0,1],
# [1,0,0,0,1,0,0,0,1],
# [1,1,1,1,1,1,1,1,1]]
board = np.array(board)
bot_pos_start = np.array([1,1])
play_game(bot_pos_start, board)
| 8,044 | 3,157 |
# coding:utf-8
import ConfigParser
import sys
__author__ = '4ikist'
from core.engine import NotificatonIniter, TalkHandler, VKEventHandler
def load_config(prop_file):
cfg = ConfigParser.RawConfigParser()
cfg.read(prop_file)
api_name = dict(cfg.items('main'))['api_name']
api_credentials = {'api_name': api_name,
'login': dict(cfg.items(api_name))['login'],
'pwd': dict(cfg.items(api_name))['pwd']}
print 'api:', api_credentials
db_credentials = {'address': dict(cfg.items('storage'))['address'],
'db_name': dict(cfg.items('storage'))['db_name']}
print 'db:', db_credentials
return api_credentials, db_credentials
def main():
api_credentials, db_credentials = load_config(sys.argv[1] if len(sys.argv) > 1 else 'properties.cfg')
TalkHandler(api_credentials, db_credentials).start()
NotificatonIniter(api_credentials, db_credentials).start()
VKEventHandler(api_credentials, refresh_time=3600*3).start()
if __name__ == '__main__':
main() | 1,092 | 381 |
from utils.yacs_config import CfgNode as CN
__C = CN()
cfg = __C
# cfg.canvas_init=0
cfg.use_vit=0
cfg.use_fast_vit=0
cfg.img_mean=-1
cfg.vit_mlp_dim=2048
cfg.vit_depth=8
cfg.vit_dropout=1
cfg.concat_one_hot=0
cfg.mask_out_prevloc_samples=0
#cfg.input_id_canvas=0
cfg.register_deprecated_key('input_id_canvas')
cfg.use_cnn_process=0
cfg.input_id_only=0
cfg.cond_on_loc=0
cfg.gt_file=''
cfg.img_size=28
cfg.pw=10
cfg.register_renamed_key('ps', 'pw')
cfg.register_deprecated_key('steps')
cfg.register_deprecated_key('canvas_init')
cfg.register_deprecated_key('lw')
cfg.register_deprecated_key('anchor_dependent')
cfg.hid=256
cfg.batch_size=128
cfg.num_epochs=50
cfg.lr=3e-4
## cfg.lw=1.0
cfg.k=50
cfg.loc_loss_weight=1.0
cfg.cls_loss_weight=1.0
cfg.stp_loss_weight=1.0
cfg.output_folder='./exp/prior'
cfg.single_sample=0
cfg.dataset='mnist'
cfg.add_empty=0
cfg.add_stop=0
cfg.inputd=2
cfg.model_name='cnn_prior'
cfg.hidden_size_prior=64
cfg.hidden_size_vae=256
cfg.use_scheduler=0
cfg.early_stopping=0
cfg.loc_map=1
cfg.nloc=-1
cfg.num_layers=8 #15
cfg.loc_dist='Gaussian'
cfg.loc_stride=1
cfg.exp_key=''
cfg.device='cuda'
cfg.exp_dir='./exp/' # root of all experiments
cfg.mhead=0
cfg.kernel_size=7 # for picnn's kernel
cfg.permute_order=0 # for picnn's kernel
cfg.geometric=0
#cfg.anchor_dependent=0
cfg.start_time=''
cfg.pos_encode=0
cfg.use_emb_enc=0
| 1,375 | 662 |
#!/usr/bin/env python3
import sys
from itertools import repeat, product
from operator import mul
from functools import reduce
inputFile = 'input'
if len(sys.argv) >= 2:
inputFile = sys.argv[1]
heightmap : list[list[int]] = []
with open(inputFile) as fin:
for line in fin:
heightmap.append([int(c) for c in line.strip()])
width = len(heightmap[0])
height = len(heightmap)
def isLowPoint(i, j):
h = heightmap[i][j]
if i != 0 and heightmap[i - 1][j] <= h:
return False
if i != height - 1 and heightmap[i + 1][j] <= h:
return False
if j != 0 and heightmap[i][j - 1] <= h:
return False
if j != width - 1 and heightmap[i][j + 1] <= h:
return False
return True
lowpoints : list[tuple[int, int]] = []
for i, j in product(range(height), range(width)):
if isLowPoint(i, j):
lowpoints.append((i, j))
basinlog = [[0 for _ in range(width)] for _ in range(height)]
for i, j in product(range(height), range(width)):
if heightmap[i][j] == 9:
basinlog[i][j] = -1
def findbasin(i, j, t) -> int:
if basinlog[i][j] != 0:
return 0
basinlog[i][j] = t
size = 1
if i != 0 and heightmap[i - 1][j] != 9:
size += findbasin(i - 1, j, t)
if i != height - 1 and heightmap[i + 1][j] != 9:
size += findbasin(i + 1, j, t)
if j != 0 and heightmap[i][j - 1] != 9:
size += findbasin(i, j - 1, t)
if j != width - 1 and heightmap[i][j + 1] != 9:
size += findbasin(i, j + 1, t)
return size
basinsizes : list[int, int] = []
basintoken = 1
for i, j in lowpoints:
if (size := findbasin(i, j, basintoken)) != 0:
basinsizes.append(size)
basintoken += 1
print(f'{reduce(mul, sorted(basinsizes, reverse=True)[:3]) = }') | 1,769 | 699 |
#! /usr/bin/env python
# -*- coding: utf-8 -*-
"""
Module that contains custom tray balloon
"""
from __future__ import print_function, division, absolute_import
from Qt.QtWidgets import QWidget, QSystemTrayIcon, QMenu
class TrayMessage(QWidget, object):
def __init__(self, parent=None):
super(TrayMessage, self).__init__(parent=parent)
self._tools_icon = None
self.tray_icon_menu = QMenu(self)
self.tray_icon = QSystemTrayIcon(self)
# self.tray_icon.setIcon(self._tools_icon)
self.tray_icon.setToolTip('Tray')
self.tray_icon.setContextMenu(self.tray_icon_menu)
if not QSystemTrayIcon.isSystemTrayAvailable():
raise OSError('Tray Icon is not available!')
self.tray_icon.show()
def show_message(self, title, msg):
try:
self.tray_icon.showMessage(title, msg, self._tools_icon)
except Exception:
self.tray_icon.showMessage(title, msg)
| 974 | 327 |