text string | size int64 | token_count int64 |
|---|---|---|
import moonleap.resource.props as P
from moonleap import create, extend
from moonleap.utils.case import kebab_to_camel, sn
from moonleap.verbs import has
from titan.django_pkg.djangoapp import DjangoApp
from .resources import AppModule # noqa
@create("app:module")
def create_app_module(term):
module = AppModule(name=kebab_to_camel(term.data))
module.output_path = sn(module.name)
return module
@extend(DjangoApp)
class ExtendDjangoApp:
app_module = P.child(has, "app:module", required=True)
| 515 | 176 |
# Copyright 2020 Cisco Systems, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
'''
vSphere/vCenter helper module. This module interacts with vSphere using pynmoni
'''
from pyVim.connect import SmartConnectNoSSL
from pyVmomi import vim
from workload_auto import logger
LOG = logger.get_logging(__name__)
class VsphereHelper:
'''
VSphere Helper class
'''
def __init__(self, **kwargs):
'''
Init routine that connects with the specified vCenter
'''
self.ip_addr = kwargs.get('ip')
self.user = kwargs.get('user')
self.pwd = kwargs.get('pwd')
self.si_obj = SmartConnectNoSSL(host=self.ip_addr, user=self.user,
pwd=self.pwd)
def get_all_objs(self, vimtype):
'''
Retrieve all the objects of a specific type
'''
obj = {}
container = self.si_obj.content.viewManager.CreateContainerView(
self.si_obj.content.rootFolder, vimtype, True)
for managed_object_ref in container.view:
obj.update({managed_object_ref: managed_object_ref.name})
return obj
def get_specific_obj(self, vimtype, name):
'''
Retrieve the object of a specific type matching the name.
'''
container = self.si_obj.content.viewManager.CreateContainerView(
self.si_obj.content.rootFolder, vimtype, True)
for managed_object_ref in container.view:
if managed_object_ref.name == name:
return managed_object_ref
return None
def get_dvs_pg_obj(self, dvs_name, dvs_pg):
'''
Returns the DVS and DVS PG objects.
'''
dvs_obj = self.get_specific_obj([vim.DistributedVirtualSwitch],
dvs_name)
if dvs_obj is None or dvs_obj.name != dvs_name:
LOG.error("DVS %s does not exist", dvs_name)
return None, None
dvs_pgs = dvs_obj.portgroup
for dvpg in dvs_pgs:
if dvpg.name == dvs_pg:
return dvs_obj, dvpg
LOG.error("DVS PG %s does not exist", dvs_pg)
return None, None
def is_dvs_dvspg_exist(self, dvs_name, dvs_pg):
'''
Checks if the DVS and DVS PG exist in vSphere.
'''
dvs_obj, dvs_pg_obj = self.get_dvs_pg_obj(dvs_name, dvs_pg)
if dvs_obj is None or dvs_pg_obj is None:
LOG.error("get_dvs_pg_obj returns false for DVS %s dvs PG %s",
dvs_name, dvs_pg)
return False
return True
def get_host_pg_obj(self, host_name, host_pg):
'''
Returns the Host and PG object
'''
host_obj = self.get_specific_obj([vim.HostSystem], host_name)
if host_obj is None or host_obj.name != host_name:
LOG.error("Host %s does not exist", host_name)
return None, None
for pg_obj in host_obj.config.network.portgroup:
if pg_obj.spec.name == host_pg:
return host_obj, pg_obj
LOG.error("Host PG %s does not exist", host_pg)
return None, None
def is_host_hostpg_exist(self, host_name, host_pg):
'''
Checks if the Host and Host PG exist in vSphere.
'''
host_obj, host_pg_obj = self.get_host_pg_obj(host_name, host_pg)
if host_obj is None or host_pg_obj is None:
LOG.error("get_host_pg_obj returns false for host %s host PG %s",
host_name, host_pg)
return False
return True
def get_vlan_dvs(self, dvs_name, dvs_pg):
'''
Get the Vlan associated wih the DV-PG in a DVS.
First get all the DVS and get the DVS object matching the argument DVS
For all the DV-PG in the DVS object, find the DV-PG that matches the
DV-PG needed
Then from that DV-PG object get the VLAN as give.
Wish there's a should be a faster method to directly get the dvpg
info instead of going through the loop.
'''
dvs_obj, dvs_pg_obj = self.get_dvs_pg_obj(dvs_name, dvs_pg)
if dvs_obj is None or dvs_pg_obj is None:
LOG.error("DVS %s or PG %s does not exist, cannot obtain VLAN",
dvs_name, dvs_pg)
return ""
vlan_info = dvs_pg_obj.config.defaultPortConfig.vlan
cl_obj = vim.dvs.VmwareDistributedVirtualSwitch.TrunkVlanSpec
if isinstance(vlan_info, cl_obj):
# This needs to be tested
return ""
vlan_id = str(vlan_info.vlanId)
return vlan_id
def get_vlan_host_pg(self, host_name, pg_name):
'''
Get the Vlan associated wih the PG in a Host.
First get the network object associated with the PG.
Second, match the host, in case the same PG name is cfgs in multiple
hosts. There are other ways to write the loop, but i think this is
little better since the host loop being big is rare unless one cfgs the
same PG name in multiple hosts.
Next, for all the PG cfgd in the host find the matching PG object and
return the VLAN.
'''
#hosts = self.get_all_objs([vim.HostSystem])
host_obj, host_pg_obj = self.get_host_pg_obj(host_name, pg_name)
if host_obj is None or host_pg_obj is None:
LOG.error("Host %s or PG %s does not exist, cannot obtain VLAN",
host_name, pg_name)
return ""
vlan_id = str(host_pg_obj.spec.vlanId)
return vlan_id
def get_vlan(self, is_dvs, dvs_or_host_name, cmn_pg):
'''
Top level get vlan function.
'''
if is_dvs:
return self.get_vlan_dvs(dvs_or_host_name, cmn_pg)
else:
return self.get_vlan_host_pg(dvs_or_host_name, cmn_pg)
def get_host_neighbour(self, host_name, pnic_list):
'''
Retrieve the neighbour given a host and pnic's.
'''
host = self.get_specific_obj([vim.HostSystem], host_name)
neighbour = []
query = host.configManager.networkSystem.QueryNetworkHint()
for nic in query:
if not hasattr(nic, 'device'):
LOG.error('device attribute not present')
continue
pnic = nic.device
if pnic not in pnic_list:
continue
dct = {}
conn_sw_port = nic.connectedSwitchPort
if not hasattr(conn_sw_port, 'portId') or (
not hasattr(conn_sw_port, 'devId')):
LOG.error("Port Id or devId attribute not present")
continue
sw_port = conn_sw_port.portId
dev_id = conn_sw_port.devId
snum_str = dev_id.split('(')
if len(snum_str) > 1:
snum = snum_str[1].split(')')[0]
else:
LOG.error("snum not present for the switch")
if hasattr(conn_sw_port, 'mgmtAddr'):
sw_ip = conn_sw_port.mgmtAddr
if hasattr(conn_sw_port, 'systemName'):
sw_name = conn_sw_port.systemName
dct.update({'ip': sw_ip, 'snum': snum, 'pnic': pnic,
'sw_port': sw_port, 'name': sw_name})
neighbour.append(dct)
return neighbour
def get_pnic_dvs(self, host_name, key_dvs):
'''
Return the list of pnic's of a host that is a part of a DVS.
The pnic is taken from the host of DCV object.
'''
pnic_list = []
for host in key_dvs.config.host:
if host.config.host.name == host_name:
for pnic_elem in host.config.backing.pnicSpec:
pnic_list.append(pnic_elem.pnicDevice)
return pnic_list
def get_dvs_pnic_info(self, dvs, dvs_pg):
'''
Return the list of neighbours for every host associated with the DVS.
First get the matching DVS object and DV-PG object from the complete
list.
For the host that is a part of the DVS get the pnic and neighbour info.
'''
dvs_obj, dvs_pg_obj = self.get_dvs_pg_obj(dvs, dvs_pg)
if dvs_obj is None or dvs_pg_obj is None:
LOG.error("DVS %s or PG %s does not exist, cannot obtain pnic",
dvs, dvs_pg)
return ""
host_dict = {}
for host_obj in dvs_pg_obj.host:
pnic_list = self.get_pnic_dvs(host_obj.name, dvs_obj)
nei_list = self.get_host_neighbour(host_obj.name, pnic_list)
host_dict.update({host_obj.name: nei_list})
return host_dict
def _get_pnic_from_key(self, pnic_obj, pnic_comp_key):
'''
Return the device from pnic object for matching object name.
'''
for pnic_elem in pnic_obj:
if pnic_elem.key == pnic_comp_key:
return pnic_elem.device
return None
def get_host_pnic_info(self, host_name, pg_name):
'''
Get the pnic info for a specific host.
First get the list of all network object for the PG.
Then, filter the objects based on the passed host. This, i assume
may not be much unless the same PG name is cfgd in multiple host.
Get the vSwitch name that this PG is a part of.
Then, for all the vswitches in the host, filter the specific vswitch
and get the vswitch object.
Get the pnic list from the vswitch, this gives the object list!!
Then call _get_pnic_from_key which returns the dev associated
with the pnic object.
Retrieve the neighbour for the host and pnic.
'''
#hosts = self.get_all_objs([vim.HostSystem])
host_dict = {}
host_obj, host_pg_obj = self.get_host_pg_obj(host_name, pg_name)
if host_obj is None or host_pg_obj is None:
LOG.error("Host %s or PG %s does not exist, cannot obtain pnic",
host_name, pg_name)
return ""
vsw_name = host_pg_obj.spec.vswitchName
for vsw in host_obj.config.network.vswitch:
if vsw.name != vsw_name:
continue
pnic_list = vsw.pnic
pnic_dev_list = []
for pnic_elem in pnic_list:
dev = self._get_pnic_from_key(host_obj.config.network.pnic,
pnic_elem)
pnic_dev_list.append(dev)
nei_list = self.get_host_neighbour(host_name, pnic_dev_list)
host_dict.update({host_name: nei_list})
return host_dict
def get_pnic(self, is_dvs, dvs_host_name, cmn_pg):
'''
Top level function to retrieve the information associated with the pnic.
'''
if is_dvs:
return self.get_dvs_pnic_info(dvs_host_name, cmn_pg)
else:
return self.get_host_pnic_info(dvs_host_name, cmn_pg)
| 11,437 | 3,577 |
import pytest
from principal import somar
from principal import sub
def test_somar():
assert somar(2,4) == 6
def test_sub():
assert sub(3,2) == 20
| 23,041 | 1,493 |
from PIL import Image
from PIL import ImageChops
import pathlib
import sys
sys.path.append(str(pathlib.Path(__file__).parent.absolute()))
import TextureConversionMain as tcm
def Convert(normPath):
# setup
ogNorm = Image.open(normPath)
normTuple = tcm.SplitImg(ogNorm)
print("Images Loaded")
# Invert Blue Channel
invB = ImageChops.invert(normTuple[2])
normTuple = (normTuple[0],normTuple[1],invB)
print("Image Inverted")
# Normal Map
nrmWhite = tcm.CreateBWImg(ogNorm.size,255)
normal = Image.merge("RGBA",(normTuple[0],normTuple[1],normTuple[2],nrmWhite))
normal.save(workingDir + "NormalMap.png")
print("Images Saved!")
ogNormalPath = ""
with open(str(pathlib.Path(__file__).parent.absolute()) + '\\' + "Temp.txt") as file:
line = file.readlines()
ogNormalPath = line[0]
ogNormalPath = ogNormalPath.rstrip("\n")
workingDir = str(pathlib.Path(ogNormalPath).parent.absolute()) + '\\'
Convert(ogNormalPath)
tcm.RemoveTempFiles()
| 1,042 | 397 |
__author__ = 'Ehsan'
from mininet.node import CPULimitedHost
from mininet.topo import Topo
from mininet.net import Mininet
from mininet.log import setLogLevel, info
from mininet.node import RemoteController
from mininet.cli import CLI
"""
Instructions to run the topo:
1. Go to directory where this fil is.
2. run: sudo -E python Simple_Pkt_Topo.py.py
The topo has 4 switches and 4 hosts. They are connected in a star shape.
"""
class SimplePktSwitch(Topo):
"""Simple topology example."""
def __init__(self, **opts):
"""Create custom topo."""
# Initialize topology
# It uses the constructor for the Topo cloass
super(SimplePktSwitch, self).__init__(**opts)
# Add hosts and switches
h1 = self.addHost('h1')
h2 = self.addHost('h2')
h3 = self.addHost('h3')
h4 = self.addHost('h4')
# Adding switches
s1 = self.addSwitch('s1', dpid="0000000000000001")
s2 = self.addSwitch('s2', dpid="0000000000000002")
s3 = self.addSwitch('s3', dpid="0000000000000003")
s4 = self.addSwitch('s4', dpid="0000000000000004")
# Add links
self.addLink(h1, s1)
self.addLink(h2, s2)
self.addLink(h3, s3)
self.addLink(h4, s4)
self.addLink(s1, s2)
self.addLink(s1, s3)
self.addLink(s1, s4)
def run():
c = RemoteController('c', '192.168.56.1', 6633)
net = Mininet(topo=SimplePktSwitch(), host=CPULimitedHost, controller=None)
net.addController(c)
net.start()
CLI(net)
net.stop()
# if the script is run directly (sudo custom/optical.py):
if __name__ == '__main__':
setLogLevel('info')
run()
| 1,695 | 651 |
#!/usr/bin/python3
# INTEL CONFIDENTIAL
# Copyright 2018-2020 Intel Corporation
# The source code contained or described herein and all documents related to the
# source code ("Material") are owned by Intel Corporation or its suppliers or
# licensors. Title to the Material remains with Intel Corporation or its
# suppliers and licensors. The Material may contain trade secrets and proprietary
# and confidential information of Intel Corporation and its suppliers and
# licensors, and is protected by worldwide copyright and trade secret laws and
# treaty provisions. No part of the Material may be used, copied, reproduced,
# modified, published, uploaded, posted, transmitted, distributed, or disclosed
# in any way without Intel's prior express written permission.
# No license under any patent, copyright, trade secret or other intellectual
# property right is granted to or conferred upon you by disclosure or delivery of
# the Materials, either expressly, by implication, inducement, estoppel or
# otherwise. Any license under such intellectual property rights must be express
# and approved by Intel in writing.
# Include any supplier copyright notices as supplier requires Intel to use.
# Include supplier trademarks or logos as supplier requires Intel to use,
# preceded by an asterisk. An asterisked footnote can be added as follows:
# *Third Party trademarks are the property of their respective owners.
# Unless otherwise agreed by Intel in writing, you may not remove or alter
# this notice or any other notice embedded in Materials by Intel or Intel's
# suppliers or licensors in any way.
import requests
class MSTeamsCommunicator:
"""Class communicating with MSTeams using Incoming Webhook.
The purpose of this class is to use MSTeams API to send message.
Docs for used API, including wrapped methods can be found at:
https://docs.microsoft.com/en-us/outlook/actionable-messages/send-via-connectors
"""
def __init__(self, _ci_alerts_channel_url):
self._ci_alerts_channel_url = _ci_alerts_channel_url
self._queued_messages = {
self._ci_alerts_channel_url: [],
}
@property
def messages(self):
"""
Get list of queued messages.
:return: List of queued messages
:return type: List[String]
"""
return self._queued_messages.values()
def queue_message(self, message):
"""
Queue message to be sent later.
:param message: Message content
:type message: String
"""
self._queued_messages[self._ci_alerts_channel_url].append(message)
def _parse_text(self, message):
"""
Parse text to display as alert.
:param message: Unparsed message content
:type message: String
"""
message_split = message.split('\n')
title = message_split[2]
log_url = message_split[-1]
text = message_split[3]
header = message_split[0].split(' - ')
header_formatted = '{} - [Watchdog Log]({})'.format(header[0], header[1])
text_formatted = '{}: ***{}***'.format(text.split(':', 1)[0], text.split(':', 1)[1])
return title, log_url, '{}\n\n{}'.format(header_formatted, text_formatted)
def _json_request_content(self, title, log_url, text_formatted):
"""
Create final json request to send message to MS Teams channel.
:param title: Title of alert
:param log_url: URL to Watchdog log
:param text_formatted: General content of alert - finally formatted
:type title: String
:type title: String
:type title: String
"""
data = {
'@context': 'https://schema.org/extensions',
'@type': 'MessageCard',
'themeColor': '0072C6',
'title': title,
'text': text_formatted,
'potentialAction':
[
{
'@type': 'OpenUri',
'name': 'Open PR',
'targets':
[
{
'os': 'default',
'uri': log_url,
},
],
},
],
}
return data
def _send_to_channel(self, message, channel_url):
"""
Send MSTeams message to specified channel.
:param message: Message content
:type message: String
:param channel_url: Channel url
:type channel_url: String
"""
title, log_url, text_formatted = self._parse_text(message)
data = self._json_request_content(title, log_url, text_formatted)
try:
requests.post(url=channel_url, json=data)
except Exception as ex:
raise Exception('!!CRITICAL!! MSTeamsCommunicator: Could not send message '
'due to {}'.format(ex))
def send_message(self, message, quiet=False):
"""
Send queued messages as single communication.
:param message: Final message's content
:param quiet: Flag for disabling sending report through MS Teams
:type message: String
:type quiet: Boolean
"""
for channel, message_queue in self._queued_messages.items():
final_message = message + '\n\n' + '\n'.join(message_queue)
if not quiet and message_queue:
self._send_to_channel(final_message, channel)
| 5,806 | 1,484 |
"""Create wave training data.
Automatically annotate data by identifying waves. The begining,
middle and ending are obtained. Windows around these points are
collected as training data. They are organized in years.
"""
import sys, glob, os
path = os.getcwd()
sys.path.insert(0, ".")
from datetime import datetime
from vnpy.trader.database import database_manager
import pandas as pd
import matplotlib.pyplot as plt
import numpy as np
from tqdm import tqdm
import time
from lib import utils
from lib.alg import get_waves
def normalize(x):
#return x / (1e-9 + x.mean(2, keepdims=True))
return x[:, :, 1:] / (1e-9 + x[:, :, :-1]) - 1
os.chdir(path)
WIN_SIZE = 20 # four weeks
PAD = 0 # the minimum distance between two different labels
NUM_SEGMENTS = 3
FEATURE_KEYS = ['open_price', 'close_price', 'high_price', 'low_price', 'volume']
binfos = utils.fast_index().values
binfos = [b for b in binfos if b[3] == 'd'] # day line only
data_keys = ["buy", "sell", "hold", "empty"]
key2color = {
"buy": "red",
"sell": "green",
"hold": "orange",
"empty": "blue"}
dic = {k: {} for k in data_keys}
buy_count = sell_count = hold_count = 0
for idx, binfo in enumerate(tqdm(binfos)):
_, symbol, exchange, interval, _ = binfo
vt_symbol = f"{symbol}.{exchange}"
for key in data_keys:
dic[key][vt_symbol] = {}
start = datetime.strptime(f"2000-01-01", "%Y-%m-%d")
end = datetime.strptime(f"2021-01-01", "%Y-%m-%d")
bars = database_manager.load_bar_data_s(
symbol=symbol, exchange=exchange, interval="d",
start=start, end=end)
if len(bars) < 100:
continue
df = utils.bars_to_df(bars)
N = df.shape[0]
# get waves
prices = df['close_price'].values
waves = get_waves(prices, T1=0.30, T2=0.20)
points = {k: {} for k in data_keys}
for year in range(2000, 2022):
for key in points.keys():
points[key][str(year)] = []
plot_flag = idx < 5
plot_waves = 10
if plot_flag:
st = 0
ed = waves[plot_waves - 1][2]
x = np.arange(st, ed)
y = df['close_price'].values[st:ed]
fig = plt.figure(figsize=(18, 6))
plt.plot(x, y)
lx = np.arange(st, ed)
for wave_id, (x1, y1, x2, y2, t) in enumerate(waves):
if t == -1: # decrease wave
offset = 0.8
start_key, middle_key, end_key = "sell", "hold", "hold"
elif t == 0: # null wave
offset = 1.0
start_key, middle_key, end_key = "empty", "empty", "empty"
elif t == 1: # increase wave
offset = 1.2
start_key, middle_key, end_key = "buy", "hold", "hold"
# segment length
S = (x2 - x1) // NUM_SEGMENTS
if plot_flag and t != 0:
ly = (y2 - y1) * offset / (x2 - x1) * (lx - x1) + y1 * offset
if plot_flag and t == 0:
ly = np.zeros_like(lx) + y1 * offset
def _work(ckey, win_st, win_ed):
if win_st >= win_ed:
return None
if plot_flag and wave_id < plot_waves and win_ed > win_st:
plt.plot(lx[win_st:win_ed], ly[win_st:win_ed],
color=key2color[ckey], linestyle='-')
for i in range(win_st, win_ed):
d = np.array([df[key][i - WIN_SIZE : i] \
for key in FEATURE_KEYS])
year = str(df.index[i - WIN_SIZE].year)
points[ckey][year].append(d)
_work(start_key, max(x1 + PAD + 1, WIN_SIZE), x1 + S + 1)
_work(middle_key, max(x1 + S + PAD + 1, WIN_SIZE), x2 - S + 1)
_work(end_key, max(x2 - S + PAD + 1, WIN_SIZE), x2 + 1)
for key in points.keys():
for year in points[key]:
if len(points[key][year]) == 0:
continue
x = normalize(np.array(points[key][year]))
if np.abs(x).max() < 100: # filter error data
dic[key][vt_symbol][year] = x
if plot_flag:
plt.savefig(f"results/buy_point_viz_{idx}.png")
plt.close()
if (idx + 1) % 100 == 0:
I = (idx + 1) // 100
np.save(f"data/buy_point/share_{I:02d}.npy", dic)
del dic
dic = {k: {} for k in data_keys}
np.save(f"data/buy_point/share_{I + 1:02d}.npy", dic) | 4,301 | 1,623 |
import time
from neopixel import Adafruit_NeoPixel
import argparse
# Pixel class corresponds to a light strip
# It stores the strip, characteristics, and state
class Pix:
# LED strip configuration:
LED_COUNT = 594 # Number of LED pixels.
LED_PIN = 18 # GPIO pin connected to the pixels (18 uses PWM!).
LED_FREQ_HZ = 800000 # LED signal frequency in hertz (usually 800khz)
LED_DMA = 10 # DMA channel to use for generating signal (try 10)
LED_BRIGHTNESS = 255 # Set to 0 for darkest and 255 for brightest
LED_INVERT = False # True to invert the signal (when using NPN transistor level shift)
LED_CHANNEL = 0 # set to '1' for GPIOs 13, 19, 41, 45 or 53
# state will store the current state of the lights
state = {"red": 0, "green": 0, "blue": 0}
# Create NeoPixel object with appropriate configuration.
strip = Adafruit_NeoPixel(LED_COUNT, LED_PIN, LED_FREQ_HZ, LED_DMA, LED_INVERT, LED_BRIGHTNESS, LED_CHANNEL)
# Intialize the library (must be called once before other functions).
strip.begin()
| 1,087 | 398 |
__version__ = '3.6.0.1'
| 24 | 15 |
import sublime
from ..SublimeCscope import PACKAGE_NAME
SETTING_DEFAULTS = {
'index_file_extensions': [
".c",
".cc",
".cpp",
".h",
".hpp",
".l",
".y",
".py",
".rb",
".java"
],
'cscope_path': None,
'search_std_include_folders': False,
'extra_include_folders': [],
'tmp_folder': [],
'maximum_results': 1000
}
def load_settings():
return sublime.load_settings(PACKAGE_NAME + '.sublime-settings')
def get(key, view_or_window):
default = SETTING_DEFAULTS.get(key, None)
#first lookup the setting in project if it exists
#(prefixed by 'sublimecscope_')
win = view_or_window
if hasattr(view_or_window, 'window'):
win = view_or_window.window()
proj_settings = win.project_data().get('settings', None) if win else None
proj_settings_key = 'sublimecscope_' + key
if proj_settings and proj_settings_key in proj_settings:
return proj_settings[proj_settings_key]
#Otherwise look in our own settings
return load_settings().get(key, default)
| 1,730 | 417 |
import os
import re
from string import letters
import random
import hashlib
import hmac
import webapp2
import jinja2
from jinja2 import Environment
from google.appengine.ext import db
from utils.Utils import Utils
from models.Comment import Comment
from models.Post import Post
from models.User import User
from handlers.BlogFront import BlogFront
from handlers.DeleteComment import DeleteComment
from handlers.DeletePost import DeletePost
from handlers.DisPost import DisPost
from handlers.EditComment import EditComment
from handlers.EditPost import EditPost
from handlers.LikePost import LikePost
from handlers.Login import Login
from handlers.Logout import Logout
from handlers.MainPage import MainPage
from handlers.NewPost import NewPost
from handlers.PostPage import PostPage
from handlers.Signup import *
from handlers.Welcome import Welcome
app = webapp2.WSGIApplication([
('/', MainPage),
('/signup', Register),
('/login', Login),
('/logout', Logout),
('/blog/welcome', Welcome),
('/blog/?', BlogFront),
('/blog/([0-9]+)', PostPage),
('/blog/newpost', NewPost),
('/blog/editpost/([0-9]+)', EditPost),
('/blog/delete/([0-9]+)', DeletePost),
('/blog/like/([0-9]+)', LikePost),
('/blog/dis/([0-9]+)', DisPost),
('/blog/deletecomment/([0-9]+)', DeleteComment),
('/blog/editcomment/([0-9]+)', EditComment)
], debug=True)
| 1,393 | 428 |
import ast, _ast, subprocess, os, argparse
def write_files(app_name):
models = {}
# parse models.py
with open('%s/models.py' % app_name) as models_file:
m = ast.parse(models_file.read())
for i in m.body:
if type(i) == _ast.ClassDef:
models[i.name] = {}
for x in i.body:
if type(x) == _ast.Assign:
models[i.name][x.targets[0].id] = x.value.func.attr
models[i.name]['id'] = "Intrinsic"
serializer_names = [model+'Serializer' for model in models]
# serializers.py
with open('%s/serializers.py' % app_name, 'w') as ser_file:
def ser_class(model):
s = "class %sSerializer(serializers.HyperlinkedModelSerializer):\n" % model
s += " class Meta:\n"
s += " model = %s\n" % model
if len(models[model]) > 0:
s += " "*8 + "fields = (" + ', '.join(["'%s'" % x for x in models[model]]) + ',)\n'
ser_file.write('\n')
ser_file.write(s)
ser_file.write('\n'.join(["from django.contrib.auth.models import User, Group",
"from rest_framework import serializers",
"from %s.models import " % app_name + ', '.join([model for model in models]) + '\n']))
ser_file.write('\n'.join(["\nclass GroupSerializer(serializers.ModelSerializer):",
" "*4 + "class Meta:",
" "*8 + "model = Group",
" "*8 + "fields = ('id', 'name',)\n"]))
ser_file.write('\n'.join(["\nclass UserSerializer(serializers.ModelSerializer):",
" "*4 + "groups = GroupSerializer(many=True)",
" "*4 + "class Meta:",
" "*8 + "model = User",
" "*8 + "fields = ('id', 'username', 'email', 'groups',)\n"]))
for model in models:
ser_class(model)
# views.py
with open('%s/views.py' % app_name, 'w') as view_file:
def viewset_class(model):
v = "class %sViewSet(viewsets.ModelViewSet):\n" % model
v += " queryset = %s.objects.all()\n" % model
v += " serializer_class = %sSerializer\n" % model
view_file.write('\n')
view_file.write(v)
view_file.write('\n'.join(["from rest_framework import viewsets",
"from django.contrib.auth.models import User, Group",
"from %s.serializers import UserSerializer, GroupSerializer, " % app_name + ', '.join([name for name in serializer_names]),
"from %s.models import " % app_name + ', '.join([model for model in models]) + '\n']))
viewset_class("User")
viewset_class("Group")
for model in models:
viewset_class(model)
# admin.py
with open('%s/admin.py' % app_name, 'w') as admin_file:
def admin_class(models):
for model in models:
a = "class %sAdmin(admin.ModelAdmin):\n" % model
a += " queryset = %s.objects.all()\n" % model
z = ["'%s'" % x for x in models[model] if models[model][x] != 'ManyToManyField']
if len(z) > 0:
a += " " + "list_display = (" + ', '.join(z) + ',)\n'
admin_file.write('\n')
admin_file.write(a)
admin_file.write('from django.contrib import admin\n')
admin_file.write('from .models import ' + ', '.join([model for model in models]) + '\n')
admin_class(models)
admin_file.write('\n')
for model in models:
admin_file.write("admin.site.register(%(0)s, %(0)sAdmin)\n" % {'0':model})
# urls.py
with open('%s/urls.py' % app_name, 'w') as url_file:
url_file.write('\n'.join(["from django.conf.urls import url, include",
"from rest_framework import routers",
"from %s import views\n" % app_name,
"router = routers.DefaultRouter()",
"router.register(r'users', views.UserViewSet)",
"router.register(r'groups', views.GroupViewSet)\n"]))
for model in models:
plural = 's'
if model.endswith('s'):
plural = 'es'
url_file.write("router.register(r'%(0)s', views.%(1)sViewSet)\n" % {'0':model.lower() + plural, '1':model})
url_file.write('\n')
u = '\n'.join(["urlpatterns = [",
" "*4 + "url(r'^%s/', include(router.urls))," % app_name,
"]"])
url_file.write(u)
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Populate serializers, views, urls, and admin based on models.py')
parser.add_argument('--disable_venv', help='Disable creation of virtual environment', action="store_true")
parser.add_argument("--app_name", help='App name on which to perform script', default="{{cookiecutter.app_name}}")
args = parser.parse_args()
write_files(args.app_name)
my_env = os.environ.copy()
if not args.disable_venv:
subprocess.check_call(["virtualenv", "venv"])
my_env["PATH"] = os.getcwd() + '/venv/bin:' + my_env["PATH"]
subprocess.check_call(["pip", "install", "-r", "requirements.txt"], env=my_env)
subprocess.check_call(["python", "manage.py", "makemigrations", "%s" % args.app_name], env=my_env)
subprocess.check_call(["python", "manage.py", "migrate"], env=my_env)
| 5,784 | 1,768 |
from abc import ABC, abstractmethod
from typing import Callable
class PluginBase(ABC):
def __init__(self, *, name: str, port: int) -> None:
self._name = name
self._port = port
self._latest_action = "off"
def __getattribute__(self, name: str) -> Callable:
if name in ("on", "off"):
success = object.__getattribute__(self, name)()
if success is True:
self._latest_action = name
return lambda: success
return object.__getattribute__(self, name)
def __repr__(self) -> str:
attrs = ", ".join(f"{k}={v!r}" for k, v in self.__dict__.items())
return f"{self.__class__.__name__}({attrs}"
@property
def port(self) -> int:
return self._port
@property
def name(self) -> str:
return self._name
@abstractmethod
def on(self) -> bool: # pylint:disable=invalid-name
pass
@abstractmethod
def off(self) -> bool:
pass
def get_state(self) -> str:
return self._latest_action
def close(self) -> None:
pass
@property
def latest_action(self) -> str:
return self._latest_action
| 1,188 | 361 |
import sys
from django.shortcuts import render
from django.shortcuts import render_to_response
from django.http import Http404, HttpResponse, HttpResponseRedirect
from .forms import SearchForm
from .models import Search
def home(request):
if request.POST:
arrTweets = getTweets(request.POST['search'])
print arrTweets
context = {
'tweets': arrTweets,
'form' : SearchForm()
}
return render(request, 'search.html', context)
else:
context = {
'form': SearchForm()
}
return render(request, 'search.html', context)
#add options for popular, recent, no. of results,
def getTweetsID(strTerm):
arrToReturn=[]
try:
import twitter
api=twitter.Api('49tlKU14YrVhPyQTetupdOG1P', 'PyJ0sZuxNlPftq4h06lob8NWCndIud4Ej2WQEEN8rl55HZZNpM', '367335159-VEx8tRMHEfkMQXA92PWtFCHN6ZqxasYoLfYOZj1u', 'TOM1yK1cAKKpX4MvjpPsDkifMbSz6nLQqrXVqYs0WDZj9')
searchResults=api.GetSearch(term=strTerm, count=5, result_type="popular")
for result in searchResults:
arrToReturn.append(result.id)
return arrToReturn
except:
print str(sys.exc_info())
return "Problem with twitter import."
#todo: get tweets by above function and use id to embed
def getTweets(strTerm):
arrResults = getTweetsID(strTerm)
arrToReturn = []
try:
from embed.utils import Embed
Embed.consumer_key = '49tlKU14YrVhPyQTetupdOG1P'
Embed.consumer_secret = 'PyJ0sZuxNlPftq4h06lob8NWCndIud4Ej2WQEEN8rl55HZZNpM'
Embed.oauth_token = '367335159-VEx8tRMHEfkMQXA92PWtFCHN6ZqxasYoLfYOZj1u'
Embed.oauth_token_secret = 'TOM1yK1cAKKpX4MvjpPsDkifMbSz6nLQqrXVqYs0WDZj9'
Embed.config = {
'height': '300',
'width': '400',
}
for result in arrResults:
embed_code = Embed.get_twitter_embed_by_id(id=result)
arrToReturn.append(embed_code)
return arrToReturn
except:
print str(sys.exc_info())
return "Problem with twitter import."
'''
TODO:
-add model for facebook posts
- see how we can make request to facebook
- get and parse response from sed request
- put content into serch page
- make search-page friendly-er
''' | 2,038 | 920 |
from datetime import datetime
from calendar import timegm
from rest_framework_jwt.compat import get_username, get_username_field
from rest_framework_jwt.settings import api_settings
def jwt_otp_payload(user, device = None):
"""
Opcionalmente inclui o Device TOP no payload do JWT
"""
username_field = get_username_field()
username = get_username(user)
payload = {
'user_id': user.pk,
'username': username,
'exp': datetime.utcnow() + api_settings.JWT_EXPIRATION_DELTA
}
# Include original issued at time for a brand new token,
# to allow token refresh
if api_settings.JWT_ALLOW_REFRESH:
payload['orig_iat'] = timegm(
datetime.utcnow().utctimetuple()
)
if api_settings.JWT_AUDIENCE is not None:
payload['aud'] = api_settings.JWT_AUDIENCE
if api_settings.JWT_ISSUER is not None:
payload['iss'] = api_settings.JWT_ISSUER
return payload | 975 | 315 |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Wed Sep 15 18:36:44 2021
@author: aiswarya
"""
#import sys
#sys.path.append('/home/aiswarya/bayesvp')
from bayesvp.scripts import bvp_write_config as wc
from bayesvp.scripts import bvp_process_model as pm
from bayesvp.scripts import bvpfit as fit
from bayesvp import config as conf
from bayesvp import utilities as util
from bayesvp import mcmc_setup as setup
from bayesvp.config import DefineParams as dp
import matplotlib.pyplot as plt
import glob
#import WriteConfig
spec_path = '/home/aiswarya/bvprun/sim1'
"""
config = wc.WriteBayesVPConfig()
config.print_to_file(interactive_write=(True))
"""
config_fname = spec_path+'/config_OVI.dat'
#run fitting
setup.bvp_mcmc(config_fname)
redshift = 0.34758
dv = 300
#plot fitting
compfiles = glob.glob(spec_path+'/config_OVI*.dat',recursive=True)
print(compfiles)
for file in compfiles:
try:
config_fname = file
config_params = dp(config_fname)
#print('line32')
output = pm.ProcessModel(config_params)
output.plot_model_comparison(redshift, dv)
output.corner_plot()
output.write_model_summary()
output.write_model_spectrum()
output.plot_gr_indicator()
except:
continue
import os
os.system("""spd-say 'yo Aiswarya, lets party'""") | 1,361 | 507 |
"""
This script load and tally the result of UCF24 dataset in latex format.
"""
import os
import json
def run_exp(cmd):
return os.system(cmd)
if __name__ == '__main__':
base = '/mnt/mercury-alpha/ucf24/cache/resnet50'
modes = [['frames',['frame_actions', 'action_ness', 'action']],
['video',['action',]]]
logger = open('results_ucf24.tex','w')
for result_mode, label_types in modes:
logger.write('\n\nRESULTS FOR '+result_mode+'\n\n')
atable = 'Model'
for l in label_types: #['0.2', '0.5', '075','Avg-mAP']:
atable += ' & ' + l.replace('_','-').capitalize()
atable += '\\\\ \n\\midrule\n'
subsets = ['train']
for net,d in [('C2D',1), ('I3D',1),('RCN',1), ('RCLSTM',1)]:
for seq, bs, tseqs in [(8,4,[8,32])]:
for tseq in tseqs:
if result_mode == 'video':
trims = ['none','indiv']
eval_ths_all = [[20], [50], [75], [a for a in range(50,95,5)]]
else:
trims = ['none']
eval_ths_all = [[50]]
for trim in trims:
if result_mode != 'video':
atable += '{:s}-{:02d} '.format(net, tseq).ljust(15)
else:
atable += '{:s}-{:02d}-{:s} '.format(net, tseq, trim).ljust(20)
for train_subset in subsets:
splitn = train_subset[-1]
for eval_ths in eval_ths_all:
# logger.write(eval_ths)
anums = [[0,0] for _ in label_types]
for eval_th in eval_ths:
if result_mode == 'frames':
result_file = '{:s}{:s}512-Pkinetics-b{:d}s{:d}x1x1-ucf24t{:s}-h3x3x3/frame-ap-results-10-{:02d}-50.json'.format(base, net, bs, seq, splitn, tseq)
else:
result_file = '{:s}{:s}512-Pkinetics-b{:d}s{:d}x1x1-ucf24t{:s}-h3x3x3/tubes-10-{:02d}-80-20-score-25-4/video-ap-results-{:s}-0-{:d}-stiou.json'.format(base, net, bs, seq, splitn, tseq, trim, int(eval_th))
if os.path.isfile(result_file):
with open(result_file, 'r') as f:
results = json.load(f)
else:
results = None
for nlt, label_type in enumerate(label_types):
cc = 0
for subset, pp in [('test','&')]: #,
tag = subset + ' & ' + label_type
if results is not None and tag in results:
num = results[tag]['mAP']
anums[nlt][cc] += num
cc += 1
for nlt, label_type in enumerate(label_types):
cc = 0
for subset, pp in [('test','&')]: #,
num = anums[nlt][cc]/len(eval_ths)
atable += '{:s} {:0.01f} '.format(pp, num)
cc += 1
atable += '\\\\ \n'
logger.write(atable)
| 3,781 | 1,123 |
# -*- coding: utf-8 -*-
"""
author:wnl
date: 2018-12-7
"""
import os
from sayhello import app
dev_db = 'mysql+pymysql://root:root.123@192.168.100.105:3306/sayhello2'
SECRET_KEY = os.getenv('SECRRET','secret string')
SQLALCHEMY_TRACK_MODIFICATIONS = False
SQLALCHEMY_DATABASE_URI = os.getenv('DATABASE_URI',dev_db)
| 322 | 162 |
"""
===============
=== Purpose ===
===============
Downloads wiki access logs and stores unprocessed article counts
See also: wiki.py
Note: for maximum portability, this program is compatible with both Python2 and
Python3 and has no external dependencies (e.g. running on AWS)
=================
=== Changelog ===
=================
2017-02-24 v10
+ compute hmac over returned data
2016-08-14: v9
* use pageviews instead of pagecounts-raw
2015-08-12: v8
* Corrected `Influenzalike_illness` to `Influenza-like_illness`
2015-05-21: v7
* Updated for Python3 and to be directly callable by wiki.py
2015-05-??: v1-v6
* Original versions
"""
# python 2 and 3
from __future__ import print_function
import sys
if sys.version_info.major == 2:
# python 2 libraries
from urllib import urlencode
from urllib2 import urlopen
else:
# python 3 libraries
from urllib.parse import urlencode
from urllib.request import urlopen
# common libraries
import argparse
import datetime
import hashlib
import hmac
import json
import subprocess
import time
import os
from sys import platform
from . import wiki_util
VERSION = 10
MASTER_URL = 'https://delphi.cmu.edu/~automation/public/wiki/master.php'
def text(data_string):
return str(data_string.decode('utf-8'))
def data(text_string):
if sys.version_info.major == 2:
return text_string
else:
return bytes(text_string, 'utf-8')
def get_hmac_sha256(key, msg):
key_bytes, msg_bytes = key.encode('utf-8'), msg.encode('utf-8')
return hmac.new(key_bytes, msg_bytes, hashlib.sha256).hexdigest()
def extract_article_counts(filename, language, articles, debug_mode):
"""
Support multiple languages ('en' | 'es' | 'pt')
Running time optimized to O(M), which means only need to scan the whole file once
:param filename:
:param language: Different languages such as 'en', 'es', and 'pt'
:param articles:
:param debug_mode:
:return:
"""
counts = {}
articles_set = set(map(lambda x: x.lower(), articles))
total = 0
with open(filename, "r", encoding="utf8") as f:
for line in f:
content = line.strip().split()
if len(content) != 4:
print('unexpected article format: {0}'.format(line))
continue
article_title = content[1].lower()
article_count = int(content[2])
if content[0] == language:
total += article_count
if content[0] == language and article_title in articles_set:
if(debug_mode):
print("Find article {0}: {1}".format(article_title, line))
counts[article_title] = article_count
if debug_mode:
print("Total number of counts for language {0} is {1}".format(language, total))
counts['total'] = total
return counts
def extract_article_counts_orig(articles, debug_mode):
"""
The original method which extracts article counts by shell command grep (only support en articles).
As it is difficult to deal with other languages (utf-8 encoding), we choose to use python read files.
Another things is that it is slower to go over the whole file once and once again, the time complexity is O(NM),
where N is the number of articles and M is the lines in the file
In our new implementation extract_article_counts(), the time complexity is O(M), and it can cope with utf8 encoding
:param articles:
:param debug_mode:
:return:
"""
counts = {}
for article in articles:
if debug_mode:
print(' %s' % (article))
out = text(
subprocess.check_output('LC_ALL=C grep -a -i "^en %s " raw2 | cat' % (article.lower()), shell=True)).strip()
count = 0
if len(out) > 0:
for line in out.split('\n'):
fields = line.split()
if len(fields) != 4:
print('unexpected article format: [%s]' % (line))
else:
count += int(fields[2])
# print ' %4d %s'%(count, article)
counts[article.lower()] = count
if debug_mode:
print(' %d' % (count))
print('getting total count...')
out = text(subprocess.check_output(
'cat raw2 | LC_ALL=C grep -a -i "^en " | cut -d" " -f 3 | awk \'{s+=$1} END {printf "%.0f", s}\'', shell=True))
total = int(out)
if debug_mode:
print(total)
counts['total'] = total
return counts
def run(secret, download_limit=None, job_limit=None, sleep_time=1, job_type=0, debug_mode=False):
worker = text(subprocess.check_output("echo `whoami`@`hostname`", shell=True)).strip()
print('this is [%s]'%(worker))
if debug_mode:
print('*** running in debug mode ***')
total_download = 0
passed_jobs = 0
failed_jobs = 0
while (download_limit is None or total_download < download_limit) and (job_limit is None or (passed_jobs + failed_jobs) < job_limit):
try:
time_start = datetime.datetime.now()
req = urlopen(MASTER_URL + '?get=x&type=%s'%(job_type))
code = req.getcode()
if code != 200:
if code == 201:
print('no jobs available')
if download_limit is None and job_limit is None:
time.sleep(60)
continue
else:
print('nothing to do, exiting')
return
else:
raise Exception('server response code (get) was %d'%(code))
# Make the code compatible with mac os system
if platform == "darwin":
job_content = text(req.readlines()[1])
else:
job_content = text(req.readlines()[0])
if job_content == 'no jobs':
print('no jobs available')
if download_limit is None and job_limit is None:
time.sleep(60)
continue
else:
print('nothing to do, exiting')
return
job = json.loads(job_content)
print('received job [%d|%s]'%(job['id'], job['name']))
# updated parsing for pageviews - maybe use a regex in the future
#year, month = int(job['name'][11:15]), int(job['name'][15:17])
year, month = int(job['name'][10:14]), int(job['name'][14:16])
#print 'year=%d | month=%d'%(year, month)
url = 'https://dumps.wikimedia.org/other/pageviews/%d/%d-%02d/%s'%(year, year, month, job['name'])
print('downloading file [%s]...'%(url))
subprocess.check_call('curl -s %s > raw.gz'%(url), shell=True)
print('checking file size...')
# Make the code cross-platfrom, so use python to get the size of the file
# size = int(text(subprocess.check_output('ls -l raw.gz | cut -d" " -f 5', shell=True)))
size = os.stat("raw.gz").st_size
if debug_mode:
print(size)
total_download += size
if job['hash'] != '00000000000000000000000000000000':
print('checking hash...')
out = text(subprocess.check_output('md5sum raw.gz', shell=True))
result = out[0:32]
if result != job['hash']:
raise Exception('wrong hash [expected %s, got %s]'%(job['hash'], result))
if debug_mode:
print(result)
print('decompressing...')
subprocess.check_call('gunzip -f raw.gz', shell=True)
#print 'converting case...'
#subprocess.check_call('cat raw | tr "[:upper:]" "[:lower:]" > raw2', shell=True)
#subprocess.check_call('rm raw', shell=True)
subprocess.check_call('mv raw raw2', shell=True)
print('extracting article counts...')
# Use python to read the file and extract counts, if you want to use the original shell method, please use
counts = {}
for language in wiki_util.Articles.available_languages:
lang2articles = {'en': wiki_util.Articles.en_articles, 'es': wiki_util.Articles.es_articles, 'pt': wiki_util.Articles.pt_articles}
articles = lang2articles[language]
articles = sorted(articles)
if debug_mode:
print("Language is {0} and target articles are {1}".format(language, articles))
temp_counts = extract_article_counts("raw2", language, articles, debug_mode)
counts[language] = temp_counts
if not debug_mode:
print('deleting files...')
subprocess.check_call('rm raw2', shell=True)
print('saving results...')
time_stop = datetime.datetime.now()
result = {
'id': job['id'],
'size': size,
'data': json.dumps(counts),
'worker': worker,
'elapsed': (time_stop - time_start).total_seconds(),
}
payload = json.dumps(result)
hmac_str = get_hmac_sha256(secret, payload)
if debug_mode:
print(' hmac: %s' % hmac_str)
post_data = urlencode({'put': payload, 'hmac': hmac_str})
req = urlopen(MASTER_URL, data=data(post_data))
code = req.getcode()
if code != 200:
raise Exception('server response code (put) was %d'%(code))
print('done! (dl=%d)'%(total_download))
passed_jobs += 1
except Exception as ex:
print('***** Caught Exception: %s *****'%(str(ex)))
failed_jobs += 1
time.sleep(30)
print('passed=%d | failed=%d | total=%d'%(passed_jobs, failed_jobs, passed_jobs + failed_jobs))
time.sleep(sleep_time)
if download_limit is not None and total_download >= download_limit:
print('download limit has been reached [%d >= %d]'%(total_download, download_limit))
if job_limit is not None and (passed_jobs + failed_jobs) >= job_limit:
print('job limit has been reached [%d >= %d]'%(passed_jobs + failed_jobs, job_limit))
def main():
# version info
print('version', VERSION)
# args and usage
parser = argparse.ArgumentParser()
parser.add_argument('secret', type=str, help='hmac secret key')
parser.add_argument('-b', '--blimit', action='store', type=int, default=None, help='download limit, in bytes')
parser.add_argument('-j', '--jlimit', action='store', type=int, default=None, help='job limit')
parser.add_argument('-s', '--sleep', action='store', type=int, default=1, help='seconds to sleep between each job')
parser.add_argument('-t', '--type', action='store', type=int, default=0, help='type of job')
parser.add_argument('-d', '--debug', action='store_const', const=True, default=False, help='enable debug mode')
args = parser.parse_args()
# runtime options
secret, download_limit, job_limit, sleep_time, job_type, debug_mode = args.secret, args.blimit, args.jlimit, args.sleep, args.type, args.debug
# run
run(secret, download_limit, job_limit, sleep_time, job_type, debug_mode)
if __name__ == '__main__':
main()
| 10,325 | 3,421 |
# write_results_to_file
def test_write_results_to_file_interface():
"""
is the basic file writter wraper working as indended
"""
from aiida_fleur.tools.io_routines import write_results_to_file
from os.path import isfile, abspath
from os import remove
#import os
from numpy import array
# testing some defaults
inputhead = 'head\n'
data = array([[1,2],[3,4]])
destination = './outputfiletest'
write_results_to_file(inputhead, data, destination=destination)
isfile_ = isfile(abspath('./outputfiletest'))
test_file = open(destination, 'r')
content = test_file.read()
test_file.close()
content_exp = 'head\n1.00000000 3.00000000\n2.00000000 4.00000000\n'
remove(destination)
assert isfile_
assert content == content_exp
# write_xps_spectra_datafile
def test_write_xps_spectra_datafile_interface():
"""
is the xps data file writter working, is the file ok?
"""
from aiida_fleur.tools.io_routines import write_xps_spectra_datafile
#TODO how to test this?
# provide all sample inputs and check contents of outputfile
pass
#assert 1 == 2
| 1,166 | 408 |
from fastapi import FastAPI
from fastapi.middleware.cors import CORSMiddleware
from app import db, viz, eligibility, metrics, geocode
app = FastAPI(
title='DS API - Family Promise',
docs_url='/',
version='0.39.6',
)
app.include_router(db.router, tags=['Database'])
app.include_router(viz.router, tags=['Visualizations'])
app.include_router(eligibility.router, tags=['Eligibility'])
app.include_router(metrics.router, tags=['Metrics'])
app.include_router(geocode.router, tags=['Geocode'])
app.add_middleware(
CORSMiddleware,
allow_origins=['*'],
allow_credentials=True,
allow_methods=['*'],
allow_headers=['*'],
)
if __name__ == '__main__':
""" To run this API locally use the following commands
cd family-promise-service-tracker-ds-a
python -m app.main
"""
import uvicorn
uvicorn.run(app)
| 851 | 296 |
"""
Take a directory of images and their segmentation masks (which only contain two classes - inside and outside)
and split the inside class into black and white. Save the resulting masks.
"""
import argparse
import os
import numpy as np
import cv2
def show(img):
cv2.namedWindow("image", cv2.WINDOW_NORMAL)
cv2.imshow("image", img)
cv2.waitKey(0)
# cv2.destroyAllWindows()
def is_vertical(img):
# Are the _bars_ vertical?
horiz = np.array([[-1, -1, 4, -1, -1]])
vert = np.array([[-1], [-1], [4], [-1], [-1]])
hc = cv2.filter2D(img, -1, horiz) # Convolve
vc = cv2.filter2D(img, -1, vert) # Convolve
res = np.mean(hc) > np.mean(vc)
print(res, np.mean(hc) - np.mean(vc))
return res
def main():
parser = argparse.ArgumentParser()
parser.add_argument("--images-dir", type=str, help="Path to images")
parser.add_argument("--masks-dir", type=str, help="Path to masks")
parser.add_argument("--output-dir", type=str, help="Where to store output")
args = parser.parse_args()
names = [x[:-len(".png")] for x in os.listdir(args.masks_dir)]
cv2.namedWindow("image", cv2.WINDOW_NORMAL)
for name in names:
image = cv2.imread(os.path.join(args.images_dir, name + ".jpg"), cv2.IMREAD_GRAYSCALE)
mask = cv2.imread(os.path.join(args.masks_dir, name + ".png"), cv2.IMREAD_GRAYSCALE)
_, mask_bw = cv2.threshold(mask, 128, 255, cv2.THRESH_BINARY | cv2.THRESH_OTSU)
image_binary = cv2.adaptiveThreshold(image, 255, cv2.ADAPTIVE_THRESH_GAUSSIAN_C,
cv2.THRESH_BINARY, 21, 1)
image_rgb = cv2.cvtColor(image_binary, cv2.COLOR_GRAY2RGB)
image_rgb[:, :, 0] = 255 # Seems to be blue here.
image_rgb = image_rgb * np.expand_dims(mask_bw != 0, 2)
if is_vertical(image_rgb):
# Filter out barcodes with the incorrect orientation
cv2.imwrite(os.path.join(args.output_dir, name + ".png"), image_rgb)
if __name__ == '__main__':
main()
| 2,033 | 781 |
import uuid
import unittest
import warnings
import json
from syngenta_digital_dta.elasticsearch.es_connector import ESConnector
from tests.syngenta_digital_dta.elasticsearch.mocks import MockESAdapter
class ESConnectorTest(unittest.TestCase):
def setUp(self, *args, **kwargs):
warnings.simplefilter('ignore', ResourceWarning)
self.maxDiff = None
def test_class_port(self):
mock_adapter = MockESAdapter()
connector = ESConnector(mock_adapter)
self.assertEqual(connector.port, mock_adapter.port)
def test_class_port_nonlocalhost(self):
mock_adapter = MockESAdapter(endpoint='dev.aws.com')
connector = ESConnector(mock_adapter)
self.assertEqual(connector.port, mock_adapter.port)
def test_class_port_user_pass(self):
mock_adapter = MockESAdapter(endpoint='dev.aws.com', user='root', password='root', authentication='user-password')
connector = ESConnector(mock_adapter)
self.assertEqual(connector.user, mock_adapter.user)
self.assertEqual(connector.password, mock_adapter.password)
| 1,097 | 326 |
from typing import List, Dict
from collections import defaultdict
from pathlib import Path
from util import save_dataset, save_word_dict, save_embedding
import torch
import argparse
import nltk
import re
import logging
logging.basicConfig(level=logging.INFO,
format='%(asctime)s - %(name)s - %(levelname)s - %(message)s')
class Corpus(object):
def __init__(self, input_dir):
train_neg_dir = f'{input_dir}/train/neg'
train_pos_dir = f'{input_dir}/train/pos'
test_neg_dir = f'{input_dir}/test/neg'
test_pos_dir = f'{input_dir}/test/pos'
self.train_neg_tokens = self.load_data(train_neg_dir)
self.train_pos_tokens = self.load_data(train_pos_dir)
self.test_neg_tokens = self.load_data(test_neg_dir)
self.test_pos_tokens = self.load_data(test_pos_dir)
@staticmethod
def load_data(dir):
total_tokens = []
filenames = Path(dir).glob('*.txt')
for filename in filenames:
with open(filename, 'r', encoding='utf-8') as f:
tokens = Corpus.tokenize(f.read())
total_tokens.append(tokens)
return total_tokens
@staticmethod
def tokenize(sent):
sent = sent.lower().strip()
sent = re.sub(r"<br />", r" ", sent)
tokens = nltk.word_tokenize(sent)
return tokens
def stat_word_freq(c:Corpus):
"""Count the frequency of every word."""
freq_dict = defaultdict(int)
for data in (c.train_neg_tokens, c.train_pos_tokens, c.test_neg_tokens, c.test_pos_tokens):
for tokens in data:
for token in tokens:
freq_dict[token] += 1
return freq_dict
def add_to_vocab(word, word_dict_ref):
"""Add a word to word dict."""
if word not in word_dict_ref:
word_dict_ref[word] = len(word_dict_ref)
def build_vocab(freq_dict:Dict[str, int], max_size:int):
"""Build word dict based on the frequency of every word."""
word_dict = {'[PAD]': 0, '[UNK]': 1}
sorted_items = sorted(freq_dict.items(), key=lambda t: t[1], reverse=True)[
:max_size]
for word, _ in sorted_items:
add_to_vocab(word, word_dict)
return word_dict
@torch.jit.script
def convert_tokens_to_ids(datas: List[List[str]], word_dict: Dict[str, int], cls: int, max_seq_len: int):
"""Use @torch.jit.script to speed up."""
total = len(datas)
token_ids = torch.full((total, max_seq_len),
word_dict['[PAD]'], dtype=torch.long)
labels = torch.full((total,), cls, dtype=torch.long)
for i in range(total):
seq_len = len(datas[i])
for j in range(min(seq_len, max_seq_len)):
token_ids[i, j] = word_dict.get(datas[i][j], word_dict['[UNK]'])
return token_ids, labels
def create_dataset(neg, pos, word_dict, max_seq_len):
neg_tokens, neg_labels = convert_tokens_to_ids(
neg, word_dict, 0, max_seq_len)
pos_tokens, pos_labels = convert_tokens_to_ids(
pos, word_dict, 1, max_seq_len)
tokens = torch.cat([neg_tokens, pos_tokens], 0)
labels = torch.cat([neg_labels, pos_labels], 0)
return tokens, labels
def load_pretrained_glove(path, freq_dict, max_size):
word_dict = {'[PAD]': 0, '[UNK]': 1}
embedding = []
sorted_items = sorted(freq_dict.items(), key=lambda t: t[1], reverse=True)[:max_size]
freq_word_set = {word for word, _ in sorted_items}
with open(path, 'r', encoding='utf-8') as f:
vecs = f.readlines()
for line in vecs:
line = line.strip().split()
word, *vec = line
if word in freq_word_set:
add_to_vocab(word, word_dict)
vec = [float(num) for num in vec]
embedding.append(vec)
embedding = torch.tensor(embedding, dtype=torch.float)
embedding_dim = embedding.size(1)
pad = torch.randn(1, embedding_dim)
unk = torch.randn(1, embedding_dim)
embedding = torch.cat([pad, unk, embedding], 0)
return word_dict, embedding
if __name__ == "__main__":
nltk.download('punkt')
logger = logging.getLogger(__name__)
parser = argparse.ArgumentParser()
parser.add_argument("-i", "--input_dir", type=str, default='aclImdb', help='Folder of original dataset.')
parser.add_argument("-o", "--output_dir", type=str, default='data',
help='Folder to save the tensor format of dataset.')
parser.add_argument("--max_seq_len", type=int, default=256, help='Max sequence length.')
parser.add_argument("--max_vocab_size", type=int, default=30000, help='Max vocab size.')
parser.add_argument("--glove_path", type=str, default=None, help='Pre-trained word embedding path.')
args = parser.parse_args()
logger.info(
f"[input]: {args.input_dir} [output]: {args.output_dir} [max seq len]: {args.max_seq_len} [max vocab size]: {args.max_vocab_size}")
logger.info("Loading and tokenizing...")
c = Corpus(args.input_dir)
logger.info("Counting word frequency...")
freq_dict = stat_word_freq(c)
logger.info(f"Total number of words: {len(freq_dict)}")
logger.info("Building vocab...")
if args.glove_path is not None:
glove_path = Path(args.glove_path)
word_dict, embedding = load_pretrained_glove(glove_path, freq_dict, args.max_vocab_size)
logger.info(f"Embedding dim: {embedding.shape[1]}")
else:
word_dict = build_vocab(freq_dict, args.max_vocab_size)
logger.info(f"Vocab size: {len(word_dict)}")
logger.info("Creating train dataset...")
train_tokens, train_labels = create_dataset(
c.train_neg_tokens, c.train_pos_tokens, word_dict, args.max_seq_len)
logger.info("Creating test dataset...")
test_tokens, test_labels = create_dataset(
c.test_neg_tokens, c.test_pos_tokens, word_dict, args.max_seq_len)
saved_dir = Path(args.output_dir)
saved_dir.mkdir(parents=True, exist_ok=True)
logger.info("Saving dataset and word dict[and embedding]...")
save_word_dict(word_dict, saved_dir)
save_dataset(train_tokens, train_labels, saved_dir, 'train')
save_dataset(test_tokens, test_labels, saved_dir, 'test')
if args.glove_path is not None:
save_embedding(embedding, saved_dir)
logger.info("All done!")
| 6,293 | 2,235 |
#!/usr/bin/env python
# Convert text and standoff annotations into CoNLL format.
import re
import sys
from pathlib import Path
# assume script in brat tools/ directory, extend path to find sentencesplit.py
sys.path.append(str(Path(__file__).parent))
sys.path.append('.')
from sentencesplit import sentencebreaks_to_newlines
EMPTY_LINE_RE = re.compile(r'^\s*$')
CONLL_LINE_RE = re.compile(r'^\S+\t\d+\t\d+.')
import stokenizer # JT: Dec 6
from map_text_to_char import map_text_to_char # JT: Dec 6
NO_SPLIT = True
SINGLE_CLASS = None
ANN_SUFFIX = ".ann"
OUT_SUFFIX = "conll"
VERBOSE = False
def argparser():
import argparse
ap = argparse.ArgumentParser(
description='Convert text and standoff annotations into CoNLL format.'
)
ap.add_argument('-a', '--annsuffix', default=ANN_SUFFIX,
help='Standoff annotation file suffix (default "ann")')
ap.add_argument('-c', '--singleclass', default=SINGLE_CLASS,
help='Use given single class for annotations')
ap.add_argument('-n', '--nosplit', default=NO_SPLIT, action='store_true',
help='No sentence splitting')
ap.add_argument('-o', '--outsuffix', default=OUT_SUFFIX,
help='Suffix to add to output files (default "conll")')
ap.add_argument('-v', '--verbose', default=VERBOSE, action='store_true',
help='Verbose output')
# ap.add_argument('text', metavar='TEXT', nargs='+',
# help='Text files ("-" for STDIN)')
return ap
def init_globals():
global NO_SPLIT, SINGLE_CLASS, ANN_SUFFIX, OUT_SUFFIX, VERBOSE
ap = argparser()
args = ap.parse_args(sys.argv[1:])
NO_SPLIT = args.nosplit
SINGLE_CLASS = args.singleclass
ANN_SUFFIX = args.annsuffix
OUT_SUFFIX = args.outsuffix
VERBOSE = args.verbose
def read_sentence(f):
"""Return lines for one sentence from the CoNLL-formatted file.
Sentences are delimited by empty lines.
"""
lines = []
for l in f:
lines.append(l)
if EMPTY_LINE_RE.match(l):
break
if not CONLL_LINE_RE.search(l):
raise ValueError(
'Line not in CoNLL format: "%s"' %
l.rstrip('\n'))
return lines
def strip_labels(lines):
"""Given CoNLL-format lines, strip the label (first TAB-separated field)
from each non-empty line.
Return list of labels and list of lines without labels. Returned
list of labels contains None for each empty line in the input.
"""
labels, stripped = [], []
labels = []
for l in lines:
if EMPTY_LINE_RE.match(l):
labels.append(None)
stripped.append(l)
else:
fields = l.split('\t')
labels.append(fields[0])
stripped.append('\t'.join(fields[1:]))
return labels, stripped
def attach_labels(labels, lines):
"""Given a list of labels and CoNLL-format lines, affix TAB-separated label
to each non-empty line.
Returns list of lines with attached labels.
"""
assert len(labels) == len(
lines), "Number of labels (%d) does not match number of lines (%d)" % (len(labels), len(lines))
attached = []
for label, line in zip(labels, lines):
empty = EMPTY_LINE_RE.match(line)
assert (label is None and empty) or (label is not None and not empty)
if empty:
attached.append(line)
else:
attached.append('%s\t%s' % (label, line))
return attached
def conll_from_path(path):
"""Convert plain text into CoNLL format."""
lines = path.read_text().splitlines()
if NO_SPLIT:
sentences = lines
else:
sentences = []
for line in lines:
line = sentencebreaks_to_newlines(line)
sentences.extend([s for s in NEWLINE_TERM_REGEX.split(line) if s])
if ANN_SUFFIX:
annotations = get_annotations(path)
else:
annotations = None
return conll_from_sentences(sentences, annotations)
def conll_from_sentences(sentences, annotations=None):
"""Convert plain text into CoNLL format."""
lines = []
offset = 0
# print(sentences)
# JT: Feb 19: added it for resolving char encoding issues
fixed_sentences = []
for s in sentences:
# print(s)
# fixed_s = ftfy.fix_text(s)
# # print(fixed_s)
# fixed_sentences.append(fixed_s)
fixed_sentences.append(s)
# for s in sentences:
for s in fixed_sentences:
tokens = stokenizer.tokenize(s)
# Possibly apply timeout?
# try:
# tokens = stokenizer.tokenize(s)
# except stokenizer.TimedOutExc as e:
# try:
# print("***********using ark tokenizer")
# tokens = ark_twokenize.tokenizeRawTweetText(s)
# except Exception as e:
# print(e)
token_w_pos = map_text_to_char(s, tokens, offset)
for t, pos in token_w_pos:
if not t.isspace():
lines.append(('O', pos, pos + len(t), t))
lines.append(tuple())
offset += len(s)
# add labels (other than 'O') from standoff annotation if specified
if annotations:
lines = relabel(lines, annotations)
# lines = [[l[0], str(l[1]), str(l[2]), l[3]] if l else l for l in lines] #JT: Dec 6
return [(line[3], line[0]) if line else line for line in lines]
def relabel(lines, annotations):
# TODO: this could be done more neatly/efficiently
offset_label = {}
for tb in annotations:
for i in range(tb.start, tb.end):
if i in offset_label:
print("Warning: overlapping annotations in ", file=sys.stderr)
offset_label[i] = tb
prev_label = None
for i, l in enumerate(lines):
if not l:
prev_label = None
continue
tag, start, end, token = l
# TODO: warn for multiple, detailed info for non-initial
label = None
for o in range(start, end):
if o in offset_label:
if o != start:
print('Warning: annotation-token boundary mismatch: "%s" --- "%s"' % (
token, offset_label[o].text), file=sys.stderr)
label = offset_label[o].type
break
if label is not None:
if label == prev_label:
tag = 'I-' + label
else:
tag = 'B-' + label
prev_label = label
lines[i] = [tag, start, end, token]
# optional single-classing
if SINGLE_CLASS:
for l in lines:
if l and l[0] != 'O':
l[0] = l[0][:2] + SINGLE_CLASS
return lines
def process_files(files, output_directory, phase_name=""):
suffix = OUT_SUFFIX.replace(".", "") + "_" + phase_name.replace("/", "")
for path in files:
try:
lines = '\n'.join(
'\t'.join(line) for line in
conll_from_path(path)
)
except Exception as e:
print(e)
continue
# TODO: better error handling
if lines is None:
print(f"file at {path} could not be tokenized")
continue
file_name = output_directory / Path(f"{path.stem}_{suffix}.txt")
file_name.write_text(lines)
TEXTBOUND_LINE_RE = re.compile(r'^T\d+\t')
def parse_textbounds(f):
"""Parse textbound annotations in input, returning a list of Textbound."""
from .format_markdown import Annotation
textbounds = []
for line in f:
line = line.rstrip('\n')
if not TEXTBOUND_LINE_RE.search(line):
continue
id_, type_offsets, text = line.split('\t')
type_, start, end = type_offsets.split()
start, end = int(start), int(end)
textbounds.append(Annotation(None, type_, start, end, text))
return textbounds
def eliminate_overlaps(textbounds):
eliminate = {}
# TODO: avoid O(n^2) overlap check
for t1 in textbounds:
for t2 in textbounds:
if t1 is t2:
continue
if t2.start >= t1.end or t2.end <= t1.start:
continue
# eliminate shorter
if t1.end - t1.start > t2.end - t2.start:
print("Eliminate %s due to overlap with %s" % (
t2, t1), file=sys.stderr)
eliminate[t2] = True
else:
print("Eliminate %s due to overlap with %s" % (
t1, t2), file=sys.stderr)
eliminate[t1] = True
return [t for t in textbounds if t not in eliminate]
def get_annotations(path: Path):
path = path.with_suffix(ANN_SUFFIX)
textbounds = parse_textbounds(path.read_text().splitlines())
return eliminate_overlaps(textbounds)
def convert_standoff_to_conll(source_directory_ann, output_directory_conll):
init_globals()
files = [f for f in source_directory_ann.iterdir() if f.suffix == ".txt" and f.is_file()]
process_files(files, output_directory_conll)
if __name__ == '__main__':
own_path = Path(__file__)
source_directory_ann = own_path / Path("../temp_files/standoff_files/")
output_directory_conll = own_path / Path("../temp_files/conll_files/")
convert_standoff_to_conll(source_directory_ann, output_directory_conll)
| 9,422 | 2,997 |
import pandas as pd
import numpy as np
import warnings
import os
from torch.utils.data.dataset import Dataset
from PIL import Image
Image.MAX_IMAGE_PIXELS = None
warnings.simplefilter('ignore', Image.DecompressionBombWarning)
class CustomDatasetFromImages(Dataset):
def __init__(self, fine_data, coarse_data, transform):
"""
Args:
fine_data (list): list of fine evaluation results [source img name, patch path, precision, recall,
average precision, mean of loss, counts of object]
coarse_data (list): list of coarse evaluation results [source img name, patch path, precision, recall,
average precision, mean of loss, counts of object]
transform: pytorch transforms for transforms and tensor conversion
"""
# Transforms
self.transforms = transform
# [source img name, patch path, precision, recall, average precision, mean of loss, counts of object]
self.fine_data = fine_data
self.fine_data.sort(key=lambda element: element[1])
self.coarse_data = coarse_data
self.coarse_data.sort(key=lambda element: element[1])
# Calculate len
if len(self.fine_data) == len(self.coarse_data):
self.data_len = len(self.fine_data) / 4
# Second column is the image paths
# self.image_arr = np.asarray(data_info.iloc[:, 1])
# First column is the image IDs
# self.label_arr = np.asarray(data_info.iloc[:, 0])
def __getitem__(self, index):
index = index * 4
source_path = os.sep.join(self.fine_data[index][1].split(os.sep)[:-4])
source_path = os.path.join(source_path, self.fine_data[index][1].split(os.sep)[-3], 'images', self.fine_data[index][0]) + '.jpg'
# print('\ncomplete_source_path', source_path)
img_as_img = Image.open(source_path)
# Transform the image
img_as_tensor = self.transforms(img_as_img)
# Get label(class) of the image based on the cropped pandas column
# single_image_label = self.label_arr[index]
f_p, c_p = [], []
f_r, c_r = [], []
f_ap, c_ap = [], []
f_loss, c_loss = [], []
f_ob, c_ob = [], []
f_stats, c_stats = [], []
target_dict = dict()
for i in range(4):
f_p.append(self.fine_data[index+i][2])
c_p.append(self.coarse_data[index + i][2])
f_r.append(self.fine_data[index + i][3])
c_r.append(self.coarse_data[index + i][3])
f_ap.append(self.fine_data[index + i][4])
c_ap.append(self.coarse_data[index + i][4])
f_loss.append(self.fine_data[index + i][5])
c_loss.append(self.coarse_data[index + i][5])
f_ob.append(self.fine_data[index + i][6])
c_ob.append(self.coarse_data[index + i][6])
f_stats.append(self.fine_data[index+i][7])
c_stats.append(self.coarse_data[index+i][7])
target_dict['f_p'] = f_p
target_dict['c_p'] = c_p
target_dict['f_r'] = f_r
target_dict['c_r'] = c_r
target_dict['f_ap'] = f_ap
target_dict['c_ap'] = c_ap
target_dict['f_loss'] = f_loss
target_dict['c_loss'] = c_loss
target_dict['f_ob'] = f_ob
target_dict['c_ob'] = c_ob
target_dict['f_stats'] = f_stats
target_dict['c_stats'] = c_stats
return img_as_tensor, target_dict
def __len__(self):
return int(self.data_len)
class CustomDatasetFromImages_test(Dataset):
def __init__(self, img_path, transform):
# Transforms
self.transforms = transform
# img list
self.img_path = img_path
self.img_list = os.listdir(img_path)
def __len__(self):
return len(self.img_list)
def __getitem__(self, index):
img_as_img = Image.open(os.path.join(self.img_path, self.img_list[index]))
# Transform the image
img_as_tensor = self.transforms(img_as_img)
# Get label
label_path = os.path.join(self.img_path.replace('images', 'labels'), self.img_list[index].replace('.jpg', '.txt'))
return img_as_tensor, label_path
class CustomDatasetFromImages_timetest(Dataset):
def __init__(self, csv_path, transform):
"""
Args:
csv_path (string): path to csv file
img_path (string): path to the folder where images are
transform: pytorch transforms for transforms and tensor conversion
"""
# Transforms
self.transforms = transform
# Read the csv file
data_info = pd.read_csv(csv_path, header=None)
# Second column is the image paths
self.image_arr = np.asarray(data_info.iloc[:, 1])
# First column is the image IDs
self.label_arr = np.asarray(data_info.iloc[:, 0])
# Calculate len
self.data_len = len(data_info)
def __getitem__(self, index):
# Get image name from the pandas df
single_image_name = self.image_arr[index] + '.jpg'
# Open image
img_as_img = Image.open(single_image_name.replace('/media/data2/dataset', '/home'))
# Transform the image
img_as_tensor = self.transforms(img_as_img)
# Get label(class) of the image based on the cropped pandas column
single_image_label = self.label_arr[index]
return (img_as_tensor, single_image_name)
def __len__(self):
return self.data_len | 5,509 | 1,791 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from collections import OrderedDict
import unittest
import yaml
from datemike import ansible, base, utils
from datemike.providers import rackspace
desired_task_yaml = """name: Create Cloud Server(s)
rax:
exact_count: true
flavor: performance1-1
image: image-ubuntu-1204
name: servername
"""
desired_play_yaml = """name: TestPlay
tasks:
- name: Create Cloud Server(s)
rax:
exact_count: true
flavor: performance1-1
image: image-ubuntu-1204
name: servername
"""
desired_playbook_yaml = """- name: TestPlay
tasks:
- name: Create Cloud Server(s)
rax:
exact_count: true
flavor: performance1-1
image: image-ubuntu-1204
name: servername
"""
desired_task_obj = OrderedDict(
[
('name', 'Create Cloud Server(s)'),
(
'rax', {
'image': 'image-ubuntu-1204',
'name': 'servername',
'flavor': 'performance1-1',
'exact_count': True
}
)
]
)
class TestAnsible(unittest.TestCase):
def setUp(self):
self.server = rackspace.CloudServer(
'servername', 'performance1-1', 'image-ubuntu-1204'
)
self.task = ansible.Task(self.server)
def tearDown(self):
pass
def setup_play(self):
play = ansible.Play('TestPlay')
play.add_task(self.task)
return play
def setup_playbook(self):
play = self.setup_play()
book = ansible.Playbook()
return play, book
def test_task_localaction(self):
task = ansible.Task(self.server, local_action=True)
yamlobj = yaml.load(task.to_yaml())
self.assertIn('local_action', yamlobj.keys(),
'local_action not in the parsed YAML object!')
def test_task_localaction_module(self):
task = ansible.Task(self.server, local_action=True)
yamlobj = yaml.load(task.to_yaml())
module = yamlobj.get('local_action')
self.assertEqual(module.get('module'), 'rax',
'value of module not rax')
self.assertEqual(module.get('image'), 'image-ubuntu-1204',
'value of image not image-ubuntu-1204')
self.assertEqual(module.get('flavor'), 'performance1-1',
'value of flavor not performance1-1')
def test_task(self):
yamlobj = yaml.load(self.task.to_yaml())
self.assertNotIn('local_action', yamlobj.keys())
def test_task_module(self):
module = yaml.load(self.task.to_yaml())
self.assertIn('rax', module.keys())
rax = module.get('rax')
self.assertEqual(rax.get('image'), 'image-ubuntu-1204',
'value of image not image-ubuntu-1204')
self.assertEqual(rax.get('flavor'), 'performance1-1',
'value of flavor not performance1-1')
def test_task_to_yaml(self):
task_yaml = self.task.to_yaml()
self.assertEqual(desired_task_yaml, task_yaml,
'Task YAML and expected YAML are not equal.')
def test_task_as_str(self):
task_yaml = self.task.to_yaml()
self.assertEqual(desired_task_yaml, str(task_yaml))
def test_task_as_obj(self):
task_obj = self.task.as_obj()
self.assertEqual(desired_task_obj, task_obj,
'Task object and expected object are not the equal.')
def test_play(self):
play = ansible.Play('TestPlay')
self.assertEqual(play.play.get('name'), 'TestPlay',
'Play name not equal to TestPlay')
def test_play_add_task(self):
play = self.setup_play()
self.assertEqual(play.play.get('tasks')[0], self.task.as_obj(),
'Task not at expected index in play object')
def test_play_add_tasks(self):
play = self.setup_play()
task = ansible.Task(self.server, local_action=True)
play.add_task([self.task, task])
self.assertEqual(play.play.get('tasks', [])[0], self.task.as_obj(),
'Play task index 0 does not match self.task')
self.assertNotEqual(play.play.get('tasks')[1], task.as_obj(),
'Play task index 1 shouldn\' match local task')
def test_play_add_host(self):
play = ansible.Play('TestPlay')
play.add_host('testhost')
self.assertIn('testhost', play.as_obj().get('hosts'),
'testhosts not in play hosts')
def test_play_add_role(self):
play = ansible.Play('TestPlay')
play.add_role('testrole')
self.assertIn('testrole', play.as_obj().get('roles'),
'testrole not in play roles')
def test_play_yaml(self):
play = self.setup_play()
self.assertEqual(desired_play_yaml, play.to_yaml(),
'Play YAML does not equal expected YAML')
def test_play_as_str(self):
play = self.setup_play()
self.assertEqual(desired_play_yaml, str(play),
'Play YAML does not equal expected YAML')
def test_playbook_add_play(self):
play, book = self.setup_playbook()
book.add_play(play)
self.assertEquals(book.playbook[0], play.as_obj(),
'play does not equal playbook play at index 0')
def test_playbook_add_plays(self):
play, book = self.setup_playbook()
play2 = self.setup_play()
book.add_play([play, play2])
self.assertEqual(len(book.playbook), 2,
'length of plays in playbook is not equal to 2')
def test_playbook_yaml(self):
play, book = self.setup_playbook()
book.add_play(play)
self.assertEqual(desired_playbook_yaml, book.to_yaml(),
'Playbook YAML output does not match intended YAML')
def test_playbook_as_str(self):
play, book = self.setup_playbook()
book.add_play(play)
self.assertEqual(desired_playbook_yaml, str(book),
'Playbook YAML output does not match intended YAML')
def main():
TestAnsible.run()
if __name__ == '__main__':
main()
| 6,236 | 1,974 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import division, print_function
import re
import json
import requests
def get_number_of_citations(url):
r = requests.get(url)
try:
r.raise_for_status()
except Exception as e:
print(e)
return None
results = re.findall("([0-9]+) results", r.text)
if not len(results):
print("no results found")
return None
try:
return int(results[0])
except Exception as e:
print(e)
return None
def update_others():
with open("other_pubs.json", "r") as f:
pubs = json.load(f)
for i, pub in enumerate(pubs):
if not pub["url"].startswith("https://scholar.google.com"):
continue
n = get_number_of_citations(pub["url"])
if n is None or n < pub["citations"]:
continue
pubs[i]["citations"] = n
with open("other_pubs.json", "w") as f:
json.dump(pubs, f, sort_keys=True, indent=2, separators=(",", ": "))
if __name__ == "__main__":
update_others()
| 1,100 | 362 |
import sqlalchemy as sa
import datetime
from haminfo.db.models.modelbase import ModelBase
class Request(ModelBase):
__tablename__ = 'request'
id = sa.Column(sa.Integer, sa.Sequence('request_id_seq'), primary_key=True)
created = sa.Column(sa.Date)
latitude = sa.Column(sa.Float)
longitude = sa.Column(sa.Float)
band = sa.Column(sa.String)
filters = sa.Column(sa.String)
count = sa.Column(sa.Integer)
callsign = sa.Column(sa.String)
stations = sa.Column(sa.String)
def __repr__(self):
return (f"<Request(callsign='{self.callsign}', created='{self.created}'"
f", latitude='{self.latitude}', longitude='{self.longitude}'), "
f"count='{self.count}' filters='{self.filters}' "
f"stations='{self.stations}'>")
def to_dict(self):
dict_ = {}
for key in self.__mapper__.c.keys():
# LOG.debug("KEY {}".format(key))
dict_[key] = getattr(self, key)
return dict_
@staticmethod
def from_json(r_json):
r = Request(
latitude=r_json["lat"],
longitude=r_json["lon"],
band=r_json["band"],
callsign=r_json.get("Callsign", "None"),
count=r_json.get("count", 1),
filters=r_json.get("filters", "None"),
stations=r_json.get("stations", "None"),
created=datetime.datetime.now()
)
return r
| 1,451 | 468 |
# Created by Kelvin_Clark on 3/3/2022, 4:58 PM
from src import database as db
class User(db.Model):
__tablename__ = "users"
username: str = db.Column(db.String(200).with_variant(db.Text, "sqlite"), unique=True, primary_key=True)
email: str = db.Column(db.String(200).with_variant(db.Text, "sqlite"), unique=True, nullable=False)
password: str = db.Column(db.String(500).with_variant(db.Text, "sqlite"), nullable=False)
def __init__(self, username: str, email: str, password: str):
self.email = email
self.password = password
self.username = username
class UserOut:
def __init__(self, user: User):
self.username = user.username
self.email = user.email
| 720 | 246 |
# 2019-04-24
# John Dunne
# Box plot of the data set
# pandas box plot documentation: https://pandas.pydata.org/pandas-docs/stable/reference/api/pandas.DataFrame.boxplot.html
print("The box plot will appear on the screen momentarily")
import matplotlib.pyplot as pl
import pandas as pd
# imported the libraries needed and give shortened names
data = "https://raw.githubusercontent.com/johndunne2019/pands-project/master/Fishers_Iris_data_set.csv"
# url of the data set to be read by pandas.read
dataset = pd.read_csv(data, header=0)
# pandas.read used to read in the data set from my repository, header set to first row of data in the data set
dataset.boxplot(by= 'species', grid=True)
# pandas dataframe.boxplot used to plot box plot of data set: https://pandas.pydata.org/pandas-docs/stable/reference/api/pandas.DataFrame.boxplot.html
# Added grid and set box plot to group by column 5 of the data set- species
# So in this case 4 plots of - petal width and lenght and sepal width and lenght
# Adapted from : http://cmdlinetips.com/2018/03/how-to-make-boxplots-in-python-with-pandas-and-seaborn/
pl.show()
# pyplot.show() command shows the histogram on the screen | 1,166 | 368 |
# Generated by Django 2.1.7 on 2019-06-08 21:37
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Player',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('Name', models.CharField(max_length=50)),
('Age', models.IntegerField()),
('Photo', models.CharField(max_length=100)),
('Nationality', models.CharField(max_length=50)),
('Flag', models.CharField(max_length=100)),
('Overall', models.IntegerField()),
('Potential', models.IntegerField()),
('Club', models.CharField(max_length=50)),
('ClubLogo', models.CharField(max_length=100)),
('Value', models.CharField(max_length=10)),
('Wage', models.CharField(max_length=10)),
('PreferredFoot', models.CharField(max_length=10)),
('Position', models.CharField(max_length=10)),
('Height', models.CharField(max_length=10)),
('Weight', models.CharField(max_length=10)),
('Finishing', models.IntegerField()),
('HeadingAccuracy', models.IntegerField()),
('ShortPassing', models.IntegerField()),
('Volleys', models.IntegerField()),
('Dribbling', models.IntegerField()),
('Curve', models.IntegerField()),
('FKAccuracy', models.IntegerField()),
('LongPassing', models.IntegerField()),
('BallControl', models.IntegerField()),
('Acceleration', models.IntegerField()),
('SprintSpeed', models.IntegerField()),
('Agility', models.IntegerField()),
('Reactions', models.IntegerField()),
('Balance', models.IntegerField()),
('ShotPower', models.IntegerField()),
('Jumping', models.IntegerField()),
('Stamina', models.IntegerField()),
('Strength', models.IntegerField()),
('LongShots', models.IntegerField()),
('Aggression', models.IntegerField()),
('Interceptions', models.IntegerField()),
('Positioning', models.IntegerField()),
('Vision', models.IntegerField()),
('Penalties', models.IntegerField()),
('Marking', models.IntegerField()),
],
),
]
| 2,677 | 667 |
import asyncio as _asyncio
import os as _os
from unittest import SkipTest, expectedFailure, skip, skipIf, skipUnless # noqa
from asynctest import TestCase as _TestCase
from asynctest import _fail_on
from tortoise import Tortoise as _Tortoise
from tortoise.backends.base.client import BaseDBAsyncClient as _BaseDBAsyncClient
from tortoise.backends.base.db_url import expand_db_url as _expand_db_url
from tortoise.utils import generate_schema as _generate_schema
__all__ = ('SimpleTestCase', 'TransactionTestCase', 'TestCase', 'SkipTest', 'expectedFailure',
'skip', 'skipIf', 'skipUnless')
_TORTOISE_TEST_DB = _os.environ.get('TORTOISE_TEST_DB', 'sqlite:///tmp/test-{}.sqlite')
expectedFailure.__doc__ = """
Mark test as expecting failiure.
On success it will be marked as unexpected success.
"""
class SimpleTestCase(_TestCase):
"""
An asyncio capable test class that provides some helper functions.
Will run any ``test_*()`` function either as sync or async, depending
on the signature of the function.
If you specify ``async test_*()`` then it will run it in an event loop.
Based on `asynctest <http://asynctest.readthedocs.io/>`_
"""
async def getDB(self) -> _BaseDBAsyncClient:
"""
DB Client factory, for use in testing.
Please remember to call ``.close()`` and then ``.delete()`` on the returned object.
"""
dbconf = _expand_db_url(_TORTOISE_TEST_DB, testing=True)
db = dbconf['client'](**dbconf['params'])
await db.db_create()
await db.create_connection()
return db
async def _setUpDB(self):
pass
async def _tearDownDB(self) -> None:
pass
def _setUp(self) -> None:
self._init_loop()
# initialize post-test checks
test = getattr(self, self._testMethodName)
checker = getattr(test, _fail_on._FAIL_ON_ATTR, None)
self._checker = checker or _fail_on._fail_on()
self._checker.before_test(self)
self.loop.run_until_complete(self._setUpDB())
if _asyncio.iscoroutinefunction(self.setUp):
self.loop.run_until_complete(self.setUp())
else:
self.setUp()
# don't take into account if the loop ran during setUp
self.loop._asynctest_ran = False
def _tearDown(self) -> None:
self.loop.run_until_complete(self._tearDownDB())
if _asyncio.iscoroutinefunction(self.tearDown):
self.loop.run_until_complete(self.tearDown())
else:
self.tearDown()
# post-test checks
self._checker.check_test(self)
class TransactionTestCase(SimpleTestCase):
"""
An asyncio capable test class that will ensure that an isolated test db
is available for each test.
It will create and destroy a new DB instance for every test.
This is obviously slow, but guarantees a fresh DB.
It will define a ``self.db`` which is the fully initialised (with DB schema)
DB Client object.
"""
# pylint: disable=C0103,W0201
async def _setUpDB(self):
self.db = await self.getDB()
if not _Tortoise._inited:
_Tortoise.init(self.db)
else:
_Tortoise._client_routing(self.db)
await _generate_schema(self.db)
async def _tearDownDB(self) -> None:
await self.db.close()
await self.db.db_delete()
class TestCase(TransactionTestCase):
"""
An asyncio capable test class that will ensure that an partially isolated test db
is available for each test.
It will wrap each test in a transaction and roll the DB back.
This is much faster, but requires that your test does not explicitly use transactions.
.. note::
Currently does not run any faster than ``TransactionTestCase``, will be sped up later on.
"""
# TODO: Make this wrap everything in a rollback-transaction instead
pass
| 3,921 | 1,203 |
"""
Common elements for both client and server
==========================================
"""
import enum
import json
class ProtocolError(RuntimeError):
"""Raised when some low-level error in the network protocol has been
detected.
"""
class EndpointType(enum.Enum):
"""
Enumeration of endpoints exposed by a :py:class:`Server`.
.. py:attribute:: control
A *REP* endpoint which accepts JSON-formatted control messages.
.. py:attribute:: depth
A *PUB* endpoint which broadcasts compressed depth frames to connected subscribers.
"""
control = 1
depth = 2
class MessageType(enum.Enum):
error = b'\x00'
ping = b'\x01'
pong = b'\x02'
who = b'\x03'
me = b'\x04'
def make_msg(type, payload):
if payload is None:
return [type.value,]
return [type.value, json.dumps(payload).encode('utf8')]
def parse_msg(msg):
if len(msg) == 1:
return MessageType(msg[0]), None
elif len(msg) == 2:
return MessageType(msg[0]), json.loads(msg[1].decode('utf8'))
raise ValueError('Multipart message must have length 1 or 2')
| 1,129 | 358 |
import torch
import torchvision.datasets as datasets
import torchvision.transforms as transforms
import os
import sys
def export_sample_images(amount:int, export_dir:str, dataset, shuffle=True):
os.makedirs(export_dir, exist_ok=True)
loader = torch.utils.data.DataLoader(dataset=dataset, batch_size=amount, shuffle=shuffle)
for images, _ in loader:
for i, img in enumerate(images):
img = img.squeeze(0)
img = transforms.ToPILImage()(img)
img.save(os.path.join(export_dir, str(i)) + '.png')
break
def getStat(train_data):
print('Compute mean and variance for training data.')
print(len(train_data))
train_loader = torch.utils.data.DataLoader(
train_data, batch_size=1, shuffle=False, num_workers=0,
pin_memory=True)
mean = torch.zeros(3)
std = torch.zeros(3)
for X, _ in train_loader:
for d in range(3):
mean[d] += X[:, d, :, :].mean()
std[d] += X[:, d, :, :].std()
mean.div_(len(train_data))
std.div_(len(train_data))
return list(mean.numpy()), list(std.numpy())
if __name__ == '__main__':
if input('Are you sure to start calculating mean and std? [y/n] ') != y:
exit()
if len(sys.argv) != 2:
print('Please specify the path of the dataset')
exit(-1)
transform = transforms.Compose([
transforms.Resize((200, 200)),
transforms.ToTensor()
])
train_dataset = datasets.ImageFolder(root=r'/home/user/data/gender/train', transform=transform)
mean, std = getStat(train_dataset)
print('mean = ', mean)
print('std = ', std)
| 1,639 | 555 |
from unittest import TestCase
from bloock.infrastructure.hashing.blake2b import Blake2b
from bloock.infrastructure.hashing.keccak import Keccak
class Blake2bTestCase(TestCase):
def setUp(self):
self.blake = Blake2b()
def test_blake_generate_hash_64_zeros_string(self):
data = b'0000000000000000000000000000000000000000000000000000000000000000'
self.assertEqual(self.blake.generateHash(
data), '681df247e1ece8365db91166ed273590019df392004d2ea25543335c71bbe2d2',
'Hashes do not match')
def test_blake_generate_hash_string(self):
data = b'testing blake'
self.assertEqual(self.blake.generateHash(
data), 'bbe426afe3fae78c3d3e25502a3e197762ada886da94c1b8104a1984c8c4d886',
'Hashes do not match')
class KeccakTestCase(TestCase):
def setUp(self):
self.keccak = Keccak()
def test_keccak_generate_hash_64_zeros_hexa(self):
data = bytes.fromhex(
'0000000000000000000000000000000000000000000000000000000000000000')
self.assertEqual(self.keccak.generateHash(
data), '290decd9548b62a8d60345a988386fc84ba6bc95484008f6362f93160ef3e563',
'Hashes do not match')
def test_keccak_generate_hash_64_zeros_string(self):
data = b'0000000000000000000000000000000000000000000000000000000000000000'
self.assertEqual(self.keccak.generateHash(
data), 'd874d9e5ad41e13e8908ab82802618272c3433171cdc3d634f3b1ad0e6742827',
'Hashes do not match')
def test_keccak_generate_hash_string(self):
data = b'testing keccak'
self.assertEqual(self.keccak.generateHash(
data), '7e5e383e8e70e55cdccfccf40dfc5d4bed935613dffc806b16b4675b555be139',
'Hashes do not match')
| 1,791 | 904 |
# coding: utf-8
"""
SimScale API
The version of the OpenAPI document: 0.0.0
Generated by: https://openapi-generator.tech
"""
import pprint
import re # noqa: F401
import six
from simscale_sdk.configuration import Configuration
class TimeStepAnimationOutputSettingsAllOf(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'type': 'str',
'from_frame_index': 'int',
'to_frame_index': 'int',
'skip_frames': 'int'
}
attribute_map = {
'type': 'type',
'from_frame_index': 'fromFrameIndex',
'to_frame_index': 'toFrameIndex',
'skip_frames': 'skipFrames'
}
def __init__(self, type='TIME_STEP', from_frame_index=0, to_frame_index=None, skip_frames=0, local_vars_configuration=None): # noqa: E501
"""TimeStepAnimationOutputSettingsAllOf - a model defined in OpenAPI""" # noqa: E501
if local_vars_configuration is None:
local_vars_configuration = Configuration()
self.local_vars_configuration = local_vars_configuration
self._type = None
self._from_frame_index = None
self._to_frame_index = None
self._skip_frames = None
self.discriminator = None
self.type = type
self.from_frame_index = from_frame_index
self.to_frame_index = to_frame_index
self.skip_frames = skip_frames
@property
def type(self):
"""Gets the type of this TimeStepAnimationOutputSettingsAllOf. # noqa: E501
:return: The type of this TimeStepAnimationOutputSettingsAllOf. # noqa: E501
:rtype: str
"""
return self._type
@type.setter
def type(self, type):
"""Sets the type of this TimeStepAnimationOutputSettingsAllOf.
:param type: The type of this TimeStepAnimationOutputSettingsAllOf. # noqa: E501
:type: str
"""
if self.local_vars_configuration.client_side_validation and type is None: # noqa: E501
raise ValueError("Invalid value for `type`, must not be `None`") # noqa: E501
self._type = type
@property
def from_frame_index(self):
"""Gets the from_frame_index of this TimeStepAnimationOutputSettingsAllOf. # noqa: E501
:return: The from_frame_index of this TimeStepAnimationOutputSettingsAllOf. # noqa: E501
:rtype: int
"""
return self._from_frame_index
@from_frame_index.setter
def from_frame_index(self, from_frame_index):
"""Sets the from_frame_index of this TimeStepAnimationOutputSettingsAllOf.
:param from_frame_index: The from_frame_index of this TimeStepAnimationOutputSettingsAllOf. # noqa: E501
:type: int
"""
if self.local_vars_configuration.client_side_validation and from_frame_index is None: # noqa: E501
raise ValueError("Invalid value for `from_frame_index`, must not be `None`") # noqa: E501
if (self.local_vars_configuration.client_side_validation and
from_frame_index is not None and from_frame_index < 0): # noqa: E501
raise ValueError("Invalid value for `from_frame_index`, must be a value greater than or equal to `0`") # noqa: E501
self._from_frame_index = from_frame_index
@property
def to_frame_index(self):
"""Gets the to_frame_index of this TimeStepAnimationOutputSettingsAllOf. # noqa: E501
:return: The to_frame_index of this TimeStepAnimationOutputSettingsAllOf. # noqa: E501
:rtype: int
"""
return self._to_frame_index
@to_frame_index.setter
def to_frame_index(self, to_frame_index):
"""Sets the to_frame_index of this TimeStepAnimationOutputSettingsAllOf.
:param to_frame_index: The to_frame_index of this TimeStepAnimationOutputSettingsAllOf. # noqa: E501
:type: int
"""
if self.local_vars_configuration.client_side_validation and to_frame_index is None: # noqa: E501
raise ValueError("Invalid value for `to_frame_index`, must not be `None`") # noqa: E501
if (self.local_vars_configuration.client_side_validation and
to_frame_index is not None and to_frame_index < 0): # noqa: E501
raise ValueError("Invalid value for `to_frame_index`, must be a value greater than or equal to `0`") # noqa: E501
self._to_frame_index = to_frame_index
@property
def skip_frames(self):
"""Gets the skip_frames of this TimeStepAnimationOutputSettingsAllOf. # noqa: E501
:return: The skip_frames of this TimeStepAnimationOutputSettingsAllOf. # noqa: E501
:rtype: int
"""
return self._skip_frames
@skip_frames.setter
def skip_frames(self, skip_frames):
"""Sets the skip_frames of this TimeStepAnimationOutputSettingsAllOf.
:param skip_frames: The skip_frames of this TimeStepAnimationOutputSettingsAllOf. # noqa: E501
:type: int
"""
if self.local_vars_configuration.client_side_validation and skip_frames is None: # noqa: E501
raise ValueError("Invalid value for `skip_frames`, must not be `None`") # noqa: E501
if (self.local_vars_configuration.client_side_validation and
skip_frames is not None and skip_frames < 0): # noqa: E501
raise ValueError("Invalid value for `skip_frames`, must be a value greater than or equal to `0`") # noqa: E501
self._skip_frames = skip_frames
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, TimeStepAnimationOutputSettingsAllOf):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, TimeStepAnimationOutputSettingsAllOf):
return True
return self.to_dict() != other.to_dict()
| 7,395 | 2,222 |
'''
Repository configuration model.
'''
from typing import Dict, List, Literal, Optional
from .. import types
from .configuration import Configuration
class RepositoryConfiguration(Configuration):
'''
Repository configuration model.
'''
#: Configuration type.
type: Optional[Literal['repository']] = 'repository'
#: Project name.
name: str
#: Project display name.
display_name: Optional[str] = None
#: Project description.
description: Optional[str] = None
#: Project author name.
author: Optional[str] = None
#: Project tags.
tags: Optional[List[Dict[str, str]]] = None
#: Project version.
version: str
#: Credential store configurations.
credential_store: Optional[types.CredentialStore] = None
#: Metadata store configurations.
metadata_store: types.MetadataStore
#: Offline store configurations.
offline_store: types.OfflineStore
#: Online store configurations.
online_store: types.OnlineStore
#: Data source configurations.
data_sources: Optional[List[types.DataSource]] = None
__all__ = [
'RepositoryConfiguration'
]
| 1,143 | 312 |
#!/usr/bin/env python
def modpow(a,n,m):
ret=1
while n:
if n&1: ret=(ret*a)%m
n>>=1
a = (a*a)%m
return ret
def check(a1):
checkstr=''
for i in xrange(len(moves)):
if a1%3==0:
checkstr+='D'
a1/=3
elif a1%3==1:
checkstr+='U'
a1 = (4*a1+2)/3
else:
checkstr+='d'
a1 = (2*a1-1)/3
return checkstr
moves='UDDDUdddDDUDDddDdDddDDUDDdUUD'
a=0
b=0
d=0
for m in moves:
if m=='D': d += 1
if m=='U':
a+=2
b*=4
b+=2*3**d
d+=1
if m=='d':
a+=1
b*=2
b-=3**d
d+=1
# we may then use phi(3*3^d)==phi(3^30)==3^30-3^29 in order
# to find (2^21)^-1 (mod 3^30)
x = modpow(2**a,3**(d+1)-3**d-1,3**(d+1))*(2*3**d-b)%3**(d+1)
# x + k*3^30 > 10^15 ==> k == ceil((10^15-x)/3^30)
# assumes 10^15 - x is not evenly divisible by 3^30
print x + (10**15-x+3**30-1)/3**30 * 3**30
| 970 | 496 |
"""Represent a Wavelet Coefficient Set.
.. module::wave
:platform: Unix, Windows
.. modelauthor:: Juan C Galan-Hernandez <jcgalanh@gmail.com>
"""
import numpy as np
import waveletcodec.tools as tools
import waveletcodec.lwt as lwt
import cv2
import math
#Constant Section
CDF97 = 1
#End
class WCSet(np.ndarray):
"""
This object represents a wavelet.
The fundamental element for signal processing using wavelets is an N matrix
that holds the coefficients of a wavelet decomposition. This object extends
from numpy.ndarray and extends it to hold the extra values needed for a
wavelet data set
"""
level = 0
filter = None
def __new__(cls, array, level, filter_=None):
"""Create a wavelet.
This method creates a wavelet object using a numpy.ndarray as base
Args:
array. A numpy.ndarray as a base for this wavelet
level. Level of decomposition of this wavelet
filter. Filter bank name used
Return:
A Wavelet object with the same data as the numpy.ndarray object.
The data is shared between both objects
"""
print(cls)
obj = np.asarray(array).view(cls)
obj.level = level
obj.filter = filter_
return obj
def __array_finalize__(self, obj):
if obj is None:
return
self.level = getattr(obj, 'level', None)
self.filter = getattr(obj, 'filter', None)
def inverse(self):
"""Return the inverse of this wavelet coefficients.
This method returns the inverse transform of this wavelet
as another numpy.ndarray matrix. The method chooses the apropiate
inverse transform filter using the class property filter.
Return:
An numpy.ndarray instance that holds the reconstructed signal
using the filter specified in the class property filter.
Raises:
AttributeError if the property filter is not set
"""
if self.filter is None:
msg = "filter property is not set, unable to determine the inverse"
raise AttributeError(msg)
if self.filter is CDF97:
return icdf97(self)
def as_image(self):
dc_rows, dc_cols = self.shape
dc_rows //= 2 ** self.level
dc_cols //= 2 ** self.level
dc = self.copy()
ac = dc[:dc_rows, :dc_cols].copy()
dc[:dc_rows, :dc_cols] = 0
ac = tools.normalize(ac, upper_bound=255, dtype=np.uint8)
dc = np.abs(dc)
dc = tools.normalize(dc, upper_bound=255, dtype=np.uint8)
#ac = cv2.equalizeHist(ac)
dc = cv2.equalizeHist(dc)
dc[:dc_rows, :dc_cols] = ac
return dc
_CDF97 = lwt.FilterBank(
scale=1 / 1.149604398,
update=[-0.05298011854, 0.4435068522],
predict=[-1.586134342, 0.8829110762]
)
def cdf97(signal, level=1):
"""Calculate the Wavelet Transform of the signal using the CDF97 wavelet.
This method calculates the LWT of the signal given using the
Cohen-Daubechies-Feauveau wavelet using a filter bank of size 9,7
Args:
signal a 1D or 2D numpy.array instance
Returns:
An instance of Wavelet that holds the coefficients of the transform
"""
coeff = _CDF97.forward(signal, level)
wavelet = WCSet(coeff, level, CDF97)
return wavelet
def icdf97(wavelet):
"""Calculate the inverse Wavelet Transform using the CDF97 wavelet.
This method calculates the iLWT of the wavelet given using the
Cohen-Daubechies-Feauveau wavelet using a filter bank of size 9,7
Args:
wavelet a 1D or 2D Wavelet instance
Returns:
An instance of numpy.ndarray that holds the reconstructed signal
"""
signal = _CDF97.inverse(wavelet, wavelet.level)
return signal
def get_z_order(dim):
mtx = []
n = int(math.log(dim, 2))
pows = range(int(n / 2))
for i in range(dim):
x = 0
y = 0
for j in pows:
x |= ((i >> 2 * j) & 1) << j
y |= ((i >> 2 * j + 1) & 1) << j
mtx += [(y, x)]
return mtx
# def get_morton_order(dim, idx = 0, size = -1):
# if size < 0:
# mtx = deque()
# else:
# mtx = deque([],size)
# if idx <> 0:
# swp = idx
# idx = dim
# dim = swp
# n = int(math.log(dim,2))
# pows = range(int(n/2))
# for i in range(dim):
# x = 0
# y = 0
# for j in pows:
# x |= ((i >> 2*j) & 1) << j
# y |= ((i >> 2*j+1) & 1) << j
# if idx == 0:
# mtx += [vector((y,x))]
# else:
# idx -= 1
# return mtx
| 4,708 | 1,588 |
"""
Paint costs
You are getting ready to paint a piece of art. The canvas and brushes that you want to use will cost 40.00. Each color of paint that you buy is an additional 5.00. Determine how much money you will need based on the number of colors that you want to buy if tax at this store is 10%.
Task
Given the total number of colors of paint that you need, calculate and output the total cost of your project rounded up to the nearest whole number.
Input Format
An integer that represents the number of colors that you want to purchase for your project.
Output Format
A number that represents the cost of your purchase rounded up to the nearest whole number.
Sample Input
10
Sample Output
99
Explanation
You need 50.00 to buy 10 colors of paint + 40.00 for the canvas and brushes + 9.00 for the tax.
"""
from math import ceil
#user input
u = int(input()) * 5 + 40 #color per each and brushes
u += ceil((u / 100) * 10) # tax of 10% and rounded up
print(u) | 972 | 297 |
from __future__ import division
from .curve import Curve
from numpy import min, max, seterr
seterr(all='raise')
class PRCurve(Curve):
def __init__(self, points, pos_neg_ratio, label=None):
Curve.__init__(self, points, label)
self.pos_neg_ratio = pos_neg_ratio
if max([self.x_vals, self.y_vals]) > 1:
raise ValueError('Precision and recall cannot be greater than 1.')
if min([self.x_vals, self.y_vals]) < 0:
raise ValueError('Precision and recall cannot be lesser than 0.')
if self.pos_neg_ratio <= 0:
raise ValueError('\'pos_neg_ratio\' must be >= 0.')
for x, y in zip(self.x_vals, self.y_vals):
if x > 0 and y == 0:
raise ValueError('Precision cannot be 0 if recall is > 0.')
if x == 0 and y > 0:
raise ValueError('Precision cannot be > 0 if recall is 0. %s %s' % (self.x_vals, self.y_vals))
def compute_fpr_vals(self):
def compute_fpr_val(rec, prec):
try:
return rec * self.pos_neg_ratio * (1/prec - 1)
except (ZeroDivisionError, FloatingPointError):
return 1
return [compute_fpr_val(rec, prec) for rec, prec in zip(self.x_vals, self.y_vals)]
def to_roc(self):
from .roc_curve import ROCCurve
fpr_vals = self.compute_fpr_vals()
tpr_vals = self.x_vals
points = zip(fpr_vals, tpr_vals)
return ROCCurve(points, self.pos_neg_ratio)
def resample(self, num_points):
return self.to_roc().resample(num_points).to_pr() | 1,598 | 569 |
""" Utilities for homework 2.
Function "log_progress" is adapted from:
https://github.com/kuk/log-progress
"""
import matplotlib.pyplot as plt
import numpy as np
import torch
from ipywidgets import IntProgress, HTML, VBox
from IPython.display import display
from blg604ehw2.atari_wrapper import LazyFrames
def comparison(*log_name_pairs, texts=[[""]*3], smooth_factor=3):
""" Plots the given logs. There will be as many plots as
the length of the texts argument. Logs will be plotted on
top of each other so that they can be compared. For each
log, mean value is plotted and the area between the
+std and -std of the mean will be shaded.
"""
plt.ioff()
plt.close()
def plot_texts(title, xlabel, ylabel):
plt.title(title)
plt.xlabel(xlabel)
plt.ylabel(ylabel)
for i, (title, xlabel, ylabel) in enumerate(texts):
for logs, name in log_name_pairs:
smoothed_logs = np.stack(
[smoother(log[i], smooth_factor) for log in logs])
std_logs = np.std(smoothed_logs, axis=0)
mean_logs = np.mean(smoothed_logs, axis=0)
max_logs = np.max(smoothed_logs, axis=0)
min_logs = np.min(smoothed_logs, axis=0)
plot_texts(title, xlabel, ylabel)
plt.plot(mean_logs, label=name)
plt.legend()
plt.fill_between(np.arange(len(mean_logs)),
np.minimum(mean_logs+std_logs, max_logs),
np.minimum(mean_logs-std_logs, min_logs),
alpha=0.4)
plt.show()
def smoother(array, ws):
""" Return smoothed array by the mean filter """
return np.array([sum(array[i:i+ws])/ws for i in range(len(array) - ws)])
# Optional
def normalize(frame):
""" Return normalized frame """
frame -= 128.0
frame /= 128.0
return frame
# Optional
def process_state(state):
""" If the state is 4 dimensional image state
return transposed and normalized state otherwise
directly return the state. """
if len(state.shape) == 4:
state = torch.transpose(state, 2, 3)
state = torch.transpose(state, 1, 2)
return normalize(state)
return state
class LoadingBar:
""" Loading bar for ipython notebook """
def __init__(self, size, name):
self.size = size
self.name = name
self._progress = IntProgress(min=0, max=size, value=0)
self._label = HTML()
box = VBox(children=[self._label, self._progress])
display(box)
def success(self, reward):
""" Turn loading bar into "complete state" """
self._progress.bar_style = "success"
self._progress.value = self.size
self._label.value = (
"{name}: {size}/{index}, Best reward: {reward}".format(
name=self.name,
size=self.size,
index=self.size,
reward=reward
)
)
def progress(self, index, reward):
""" Update progress with given index and best reward """
self._progress.value = index
self._label.value = (
"{name}: {size}/{index}, Best reward: {reward}".format(
name=self.name,
size=self.size,
index=index,
reward=reward
)
)
| 3,384 | 1,032 |
#!/usr/bin/env python
#-*- coding:utf-8; mode:python; indent-tabs-mode: nil; c-basic-offset: 2; tab-width: 2 -*-
from bes.testing.unit_test import unit_test
from bes.archive.archive_util import archive_util
from bes.archive.archiver import archiver
from bes.archive.temp_archive import temp_archive
class test_archive_util(unit_test):
def test_remove_members(self):
items = temp_archive.make_temp_item_list([
( self.xp_path('foo-1.2.3/fruits/apple.txt'), 'apple.txt' ),
( self.xp_path('foo-1.2.3/fruits/durian.txt'), 'durian.txt' ),
( self.xp_path('foo-1.2.3/fruits/kiwi.txt'), 'kiwi.txt' ),
( self.xp_path('foo-1.2.3/.import/foo.txt'), 'foo.txt' ),
( self.xp_path('foo-1.2.3/.import/bar.txt'), 'bar.txt' ),
])
tmp_archive = temp_archive.make_temp_archive(items, 'zip', delete = not self.DEBUG)
self.assertEqual( [
'foo-1.2.3/.import/bar.txt',
'foo-1.2.3/.import/foo.txt',
'foo-1.2.3/fruits/apple.txt',
'foo-1.2.3/fruits/durian.txt',
'foo-1.2.3/fruits/kiwi.txt',
], archiver.members(tmp_archive))
archive_util.remove_members(tmp_archive, [ 'foo-1.2.3/.import' ], debug = self.DEBUG)
self.assertEqual( [
'foo-1.2.3/fruits/apple.txt',
'foo-1.2.3/fruits/durian.txt',
'foo-1.2.3/fruits/kiwi.txt',
], archiver.members(tmp_archive))
def test_member_checksums(self):
a = temp_archive.make_temp_archive(temp_archive.make_temp_item_list([
( self.xp_path('foo-1.2.3/fruits/apple.txt'), 'apple.txt' ),
( self.xp_path('foo-1.2.3/fruits/durian.txt'), 'durian.txt' ),
( self.xp_path('foo-1.2.3/fruits/kiwi.txt'), 'kiwi.txt' ),
]), 'zip', delete = not self.DEBUG)
self.assertEqual( {
'foo-1.2.3/fruits/apple.txt': '7269b27861e2a5ba6947b6279bb5e66b23439d83a65a3c0cf529f5834ed2e7fb',
'foo-1.2.3/fruits/kiwi.txt': 'a7be44d9dda7e951298316b34ce84a1b2da8b5e0bead26118145bda4fbca9329',
}, archive_util.member_checksums(a, [ 'foo-1.2.3/fruits/apple.txt', 'foo-1.2.3/fruits/kiwi.txt' ]) )
def test_duplicate_members(self):
a1 = temp_archive.make_temp_archive(temp_archive.make_temp_item_list([
( self.xp_path('foo-1.2.3/fruits/apple.txt'), 'apple.txt' ),
( self.xp_path('foo-1.2.3/fruits/durian.txt'), 'durian.txt' ),
( self.xp_path('foo-1.2.3/fruits/kiwi.txt'), 'kiwi.txt' ),
]), 'zip', delete = not self.DEBUG, prefix = 'a1-')
a2 = temp_archive.make_temp_archive(temp_archive.make_temp_item_list([
( self.xp_path('foo-1.2.3/fruits/apple.txt'), 'apple.txt' ),
( self.xp_path('foo-1.2.3/fruits/lemon.txt'), 'lemon.txt' ),
( self.xp_path('foo-1.2.3/fruits/melon.txt'), 'melon.txt' ),
( self.xp_path('foo-1.2.3/wine/barolo.txt'), 'barolo.txt' ),
( self.xp_path('foo-1.2.3/cheese/brie.txt'), 'brie.txt' ),
]), 'zip', delete = not self.DEBUG, prefix = 'a2-')
a3 = temp_archive.make_temp_archive(temp_archive.make_temp_item_list([
( self.xp_path('foo-1.2.3/fruits/strawberry.txt'), 'strawberry.txt' ),
( self.xp_path('foo-1.2.3/fruits/blueberry.txt'), 'blueberry.txt' ),
( self.xp_path('foo-1.2.3/fruits/banana.txt'), 'banana.txt' ),
( self.xp_path('foo-1.2.3/wine/barolo.txt'), 'barolo.txt' ),
( self.xp_path('foo-1.2.3/cheese/brie.txt'), 'brie.txt' ),
]), 'zip', delete = not self.DEBUG, prefix = 'a3-')
self.assertEqual( {
'foo-1.2.3/cheese/brie.txt': { a2, a3 },
'foo-1.2.3/fruits/apple.txt': { a1, a2 },
'foo-1.2.3/wine/barolo.txt': { a2, a3 },
}, archive_util.duplicate_members([ a1, a2, a3 ]) )
def test_duplicate_members_with_conflicts(self):
a1 = temp_archive.make_temp_archive(temp_archive.make_temp_item_list([
( self.xp_path('foo-1.2.3/fruits/apple.txt'), 'apple.txt' ),
( self.xp_path('foo-1.2.3/fruits/durian.txt'), 'durian.txt' ),
( self.xp_path('foo-1.2.3/fruits/kiwi.txt'), 'kiwi.txt' ),
]), 'zip', delete = not self.DEBUG, prefix = 'a1-')
a2 = temp_archive.make_temp_archive(temp_archive.make_temp_item_list([
( self.xp_path('foo-1.2.3/fruits/apple.txt'), 'apple2.txt' ),
( self.xp_path('foo-1.2.3/fruits/durian.txt'), 'durian.txt' ),
( self.xp_path('foo-1.2.3/fruits/melon.txt'), 'melon.txt' ),
( self.xp_path('foo-1.2.3/wine/barolo.txt'), 'barolo.txt' ),
( self.xp_path('foo-1.2.3/cheese/brie.txt'), 'brie.txt' ),
]), 'zip', delete = not self.DEBUG, prefix = 'a2-')
a3 = temp_archive.make_temp_archive(temp_archive.make_temp_item_list([
( self.xp_path('foo-1.2.3/fruits/strawberry.txt'), 'strawberry.txt' ),
( self.xp_path('foo-1.2.3/fruits/blueberry.txt'), 'blueberry.txt' ),
( self.xp_path('foo-1.2.3/fruits/banana.txt'), 'banana.txt' ),
( self.xp_path('foo-1.2.3/wine/barolo.txt'), 'barolo.txt' ),
( self.xp_path('foo-1.2.3/cheese/brie.txt'), 'brie2.txt' ),
]), 'zip', delete = not self.DEBUG, prefix = 'a3-')
self.assertEqual( {
'foo-1.2.3/cheese/brie.txt': { a2, a3 },
'foo-1.2.3/fruits/apple.txt': { a1, a2 },
}, archive_util.duplicate_members([ a1, a2, a3 ], only_content_conficts = True) )
def test_combine(self):
a1 = temp_archive.make_temp_archive(temp_archive.make_temp_item_list([
( self.xp_path('fruits/apple.txt'), 'apple.txt' ),
( self.xp_path('fruits/durian.txt'), 'durian.txt' ),
( self.xp_path('fruits/kiwi.txt'), 'kiwi.txt' ),
]), 'zip', delete = not self.DEBUG, prefix = 'a1-')
a2 = temp_archive.make_temp_archive(temp_archive.make_temp_item_list([
( self.xp_path('fruits/melon.txt'), 'melon.txt' ),
( self.xp_path('fruits/lemon.txt'), 'lemon.txt' ),
( self.xp_path('fruits/dragonfruit.txt'), 'dragonfruit.txt' ),
]), 'zip', delete = not self.DEBUG, prefix = 'a2-')
a3 = temp_archive.make_temp_archive(temp_archive.make_temp_item_list([
( self.xp_path('fruits/strawberry.txt'), 'strawberry.txt' ),
( self.xp_path('fruits/pear.txt'), 'pear.txt' ),
( self.xp_path('fruits/plum.txt'), 'plum.txt' ),
]), 'zip', delete = not self.DEBUG, prefix = 'a3-')
tmp_archive = self.make_temp_file(suffix = '.zip')
archive_util.combine([ a1, a2, a3 ], tmp_archive)
self.assertEqual( [
self.xp_path('fruits/apple.txt'),
self.xp_path('fruits/dragonfruit.txt'),
self.xp_path('fruits/durian.txt'),
self.xp_path('fruits/kiwi.txt'),
self.xp_path('fruits/lemon.txt'),
self.xp_path('fruits/melon.txt'),
self.xp_path('fruits/pear.txt'),
self.xp_path('fruits/plum.txt'),
self.xp_path('fruits/strawberry.txt'),
], archiver.members(tmp_archive) )
def test_combine_conflicts_same_content(self):
a1 = temp_archive.make_temp_archive(temp_archive.make_temp_item_list([
( self.xp_path('fruits/apple.txt'), 'apple.txt' ),
( self.xp_path('fruits/durian.txt'), 'durian.txt' ),
( self.xp_path('fruits/kiwi.txt'), 'kiwi.txt' ),
( self.xp_path('fruits/plum.txt'), 'plum.txt' ),
]), 'zip', delete = not self.DEBUG, prefix = 'a1-')
a2 = temp_archive.make_temp_archive(temp_archive.make_temp_item_list([
( self.xp_path('fruits/kiwi.txt'), 'kiwi.txt' ),
( self.xp_path('fruits/melon.txt'), 'melon.txt' ),
( self.xp_path('fruits/lemon.txt'), 'lemon.txt' ),
( self.xp_path('fruits/dragonfruit.txt'), 'dragonfruit.txt' ),
]), 'zip', delete = not self.DEBUG, prefix = 'a2-')
a3 = temp_archive.make_temp_archive(temp_archive.make_temp_item_list([
( self.xp_path('fruits/lemon.txt'), 'lemon.txt' ),
( self.xp_path('fruits/strawberry.txt'), 'strawberry.txt' ),
( self.xp_path('fruits/pear.txt'), 'pear.txt' ),
( self.xp_path('fruits/plum.txt'), 'plum.txt' ),
]), 'zip', delete = not self.DEBUG, prefix = 'a3-')
tmp_archive = self.make_temp_file(suffix = '.zip')
archive_util.combine([ a1, a2, a3 ], tmp_archive)
self.assertEqual( [
self.xp_path('fruits/apple.txt'),
self.xp_path('fruits/dragonfruit.txt'),
self.xp_path('fruits/durian.txt'),
self.xp_path('fruits/kiwi.txt'),
self.xp_path('fruits/lemon.txt'),
self.xp_path('fruits/melon.txt'),
self.xp_path('fruits/pear.txt'),
self.xp_path('fruits/plum.txt'),
self.xp_path('fruits/strawberry.txt'),
], archiver.members(tmp_archive) )
def test_combine_conflicts_different_content_no_check(self):
a1 = temp_archive.make_temp_archive(temp_archive.make_temp_item_list([
( self.xp_path('fruits/apple.txt'), 'apple.txt' ),
( self.xp_path('fruits/durian.txt'), 'durian.txt' ),
( self.xp_path('fruits/kiwi.txt'), 'kiwi.txt' ),
( self.xp_path('fruits/plum.txt'), '1plum.txt' ),
]), 'zip', delete = not self.DEBUG, prefix = 'a1-')
a2 = temp_archive.make_temp_archive(temp_archive.make_temp_item_list([
( self.xp_path('fruits/kiwi.txt'), 'kiwi.txt' ),
( self.xp_path('fruits/melon.txt'), 'melon.txt' ),
( self.xp_path('fruits/lemon.txt'), '1lemon.txt' ),
( self.xp_path('fruits/dragonfruit.txt'), 'dragonfruit.txt' ),
]), 'zip', delete = not self.DEBUG, prefix = 'a2-')
a3 = temp_archive.make_temp_archive(temp_archive.make_temp_item_list([
( self.xp_path('fruits/lemon.txt'), '2lemon.txt' ),
( self.xp_path('fruits/strawberry.txt'), 'strawberry.txt' ),
( self.xp_path('fruits/pear.txt'), 'pear.txt' ),
( self.xp_path('fruits/plum.txt'), '2plum.txt' ),
]), 'zip', delete = not self.DEBUG, prefix = 'a3-')
tmp_archive = self.make_temp_file(suffix = '.zip')
archive_util.combine([ a1, a2, a3 ], tmp_archive)
self.assertEqual( [
self.xp_path('fruits/apple.txt'),
self.xp_path('fruits/dragonfruit.txt'),
self.xp_path('fruits/durian.txt'),
self.xp_path('fruits/kiwi.txt'),
self.xp_path('fruits/lemon.txt'),
self.xp_path('fruits/melon.txt'),
self.xp_path('fruits/pear.txt'),
self.xp_path('fruits/plum.txt'),
self.xp_path('fruits/strawberry.txt'),
], archiver.members(tmp_archive) )
def test_combine_conflicts_different_content_with_check(self):
a1 = temp_archive.make_temp_archive(temp_archive.make_temp_item_list([
( self.xp_path('fruits/apple.txt'), 'apple.txt' ),
( self.xp_path('fruits/durian.txt'), 'durian.txt' ),
( self.xp_path('fruits/kiwi.txt'), 'kiwi.txt' ),
( self.xp_path('fruits/plum.txt'), '1plum.txt' ),
]), 'zip', delete = not self.DEBUG, prefix = 'a1-')
a2 = temp_archive.make_temp_archive(temp_archive.make_temp_item_list([
( self.xp_path('fruits/kiwi.txt'), 'kiwi.txt' ),
( self.xp_path('fruits/melon.txt'), 'melon.txt' ),
( self.xp_path('fruits/lemon.txt'), '1lemon.txt' ),
( self.xp_path('fruits/dragonfruit.txt'), 'dragonfruit.txt' ),
]), 'zip', delete = not self.DEBUG, prefix = 'a2-')
a3 = temp_archive.make_temp_archive(temp_archive.make_temp_item_list([
( self.xp_path('fruits/lemon.txt'), '2lemon.txt' ),
( self.xp_path('fruits/strawberry.txt'), 'strawberry.txt' ),
( self.xp_path('fruits/pear.txt'), 'pear.txt' ),
( self.xp_path('fruits/plum.txt'), '2plum.txt' ),
]), 'zip', delete = not self.DEBUG, prefix = 'a3-')
tmp_archive = self.make_temp_file(suffix = '.zip')
with self.assertRaises(RuntimeError) as ctx:
archive_util.combine([ a1, a2, a3 ], tmp_archive, check_content = True)
def test_combine_with_base_dir(self):
a1 = temp_archive.make_temp_archive(temp_archive.make_temp_item_list([
( self.xp_path('fruits/apple.txt'), 'apple.txt' ),
( self.xp_path('fruits/durian.txt'), 'durian.txt' ),
( self.xp_path('fruits/kiwi.txt'), 'kiwi.txt' ),
]), 'zip', delete = not self.DEBUG, prefix = 'a1-')
a2 = temp_archive.make_temp_archive(temp_archive.make_temp_item_list([
( self.xp_path('fruits/melon.txt'), 'melon.txt' ),
( self.xp_path('fruits/lemon.txt'), 'lemon.txt' ),
( self.xp_path('fruits/dragonfruit.txt'), 'dragonfruit.txt' ),
]), 'zip', delete = not self.DEBUG, prefix = 'a2-')
a3 = temp_archive.make_temp_archive(temp_archive.make_temp_item_list([
( self.xp_path('fruits/strawberry.txt'), 'strawberry.txt' ),
( self.xp_path('fruits/pear.txt'), 'pear.txt' ),
( self.xp_path('fruits/plum.txt'), 'plum.txt' ),
]), 'zip', delete = not self.DEBUG, prefix = 'a3-')
tmp_archive = self.make_temp_file(suffix = '.zip')
archive_util.combine([ a1, a2, a3 ], tmp_archive, base_dir = 'foo-1.2.3')
self.assertEqual( [
self.xp_path('foo-1.2.3/fruits/apple.txt'),
self.xp_path('foo-1.2.3/fruits/dragonfruit.txt'),
self.xp_path('foo-1.2.3/fruits/durian.txt'),
self.xp_path('foo-1.2.3/fruits/kiwi.txt'),
self.xp_path('foo-1.2.3/fruits/lemon.txt'),
self.xp_path('foo-1.2.3/fruits/melon.txt'),
self.xp_path('foo-1.2.3/fruits/pear.txt'),
self.xp_path('foo-1.2.3/fruits/plum.txt'),
self.xp_path('foo-1.2.3/fruits/strawberry.txt'),
], archiver.members(tmp_archive) )
def test_combine_conflicts_different_content_with_check_and_exclude(self):
a1 = temp_archive.make_temp_archive(temp_archive.make_temp_item_list([
( self.xp_path('fruits/apple.txt'), 'apple.txt' ),
( self.xp_path('fruits/durian.txt'), 'durian.txt' ),
( self.xp_path('fruits/plum.txt'), '1plum.txt' ),
]), 'zip', delete = not self.DEBUG, prefix = 'a1-')
a2 = temp_archive.make_temp_archive(temp_archive.make_temp_item_list([
( self.xp_path('fruits/kiwi.txt'), 'kiwi.txt' ),
( self.xp_path('fruits/melon.txt'), 'melon.txt' ),
( self.xp_path('fruits/plum.txt'), '2plum.txt' ),
]), 'zip', delete = not self.DEBUG, prefix = 'a2-')
a3 = temp_archive.make_temp_archive(temp_archive.make_temp_item_list([
( self.xp_path('fruits/lemon.txt'), 'lemon.txt' ),
( self.xp_path('fruits/strawberry.txt'), 'strawberry.txt' ),
( self.xp_path('fruits/plum.txt'), '3plum.txt' ),
]), 'zip', delete = not self.DEBUG, prefix = 'a3-')
tmp_archive = self.make_temp_file(suffix = '.zip')
archive_util.combine([ a1, a2, a3 ], tmp_archive, check_content = True, exclude = [ 'fruits/plum.txt' ])
self.assertEqual( [
self.xp_path('fruits/apple.txt'),
self.xp_path('fruits/durian.txt'),
self.xp_path('fruits/kiwi.txt'),
self.xp_path('fruits/lemon.txt'),
self.xp_path('fruits/melon.txt'),
self.xp_path('fruits/strawberry.txt'),
], archiver.members(tmp_archive) )
def test_match_members(self):
tmp_archive = temp_archive.make_temp_archive(temp_archive.make_temp_item_list([
( self.xp_path('fruits/apple.pdf'), 'apple.pdf' ),
( self.xp_path('fruits/durian.pdf'), 'durian.pdf' ),
( self.xp_path('fruits/plum.pdf'), 'plum.pdf' ),
( self.xp_path('cheese/brie.txt'), 'brie.txt' ),
( self.xp_path('cheese/cheddar.txt'), 'cheddar.txt' ),
( self.xp_path('cheese/fontina.txt'), 'fontina.txt' ),
]), 'zip', delete = not self.DEBUG)
self.assertEqual( [
'cheese/brie.txt',
'cheese/cheddar.txt',
'cheese/fontina.txt',
'fruits/apple.pdf',
'fruits/durian.pdf',
'fruits/plum.pdf',
], archive_util.match_members(tmp_archive, [ '*' ]) )
self.assertEqual( [
'cheese/brie.txt',
'cheese/cheddar.txt',
'cheese/fontina.txt',
], archive_util.match_members(tmp_archive, [ 'cheese*' ]) )
self.assertEqual( [
'cheese/brie.txt',
'cheese/cheddar.txt',
'cheese/fontina.txt',
], archive_util.match_members(tmp_archive, [ '*.txt' ]) )
self.assertEqual( [
'fruits/apple.pdf',
'fruits/durian.pdf',
'fruits/plum.pdf',
], archive_util.match_members(tmp_archive, [ '*.pdf' ]) )
def test_remove_members_matching_patterns(self):
items = temp_archive.make_temp_item_list([
( self.xp_path('foo-1.2.3/fruits/apple.txt'), 'apple.txt' ),
( self.xp_path('foo-1.2.3/fruits/durian.txt'), 'durian.txt' ),
( self.xp_path('foo-1.2.3/fruits/kiwi.txt'), 'kiwi.txt' ),
( self.xp_path('foo-1.2.3/.import/foo.txt'), 'foo.txt' ),
( self.xp_path('foo-1.2.3/.import/bar.txt'), 'bar.txt' ),
( self.xp_path('foo-1.2.3/cheese/brie.jpg'), 'brie.jpg' ),
( self.xp_path('foo-1.2.3/cheese/halumi.jpg'), 'halumi.jpg' ),
( self.xp_path('foo-1.2.3/cheese/feta.jpg'), 'feta.jpg' ),
])
tmp_archive = temp_archive.make_temp_archive(items, 'zip', delete = not self.DEBUG)
archive_util.remove_members_matching_patterns(tmp_archive, [ 'notfound' ], debug = self.DEBUG)
self.assertEqual( [
'foo-1.2.3/.import/bar.txt',
'foo-1.2.3/.import/foo.txt',
'foo-1.2.3/cheese/brie.jpg',
'foo-1.2.3/cheese/feta.jpg',
'foo-1.2.3/cheese/halumi.jpg',
'foo-1.2.3/fruits/apple.txt',
'foo-1.2.3/fruits/durian.txt',
'foo-1.2.3/fruits/kiwi.txt',
], archiver.members(tmp_archive))
tmp_archive = temp_archive.make_temp_archive(items, 'zip', delete = not self.DEBUG)
archive_util.remove_members_matching_patterns(tmp_archive, [ '*.txt' ], debug = self.DEBUG)
self.assertEqual( [
'foo-1.2.3/cheese/brie.jpg',
'foo-1.2.3/cheese/feta.jpg',
'foo-1.2.3/cheese/halumi.jpg',
], archiver.members(tmp_archive))
tmp_archive = temp_archive.make_temp_archive(items, 'zip', delete = not self.DEBUG)
archive_util.remove_members_matching_patterns(tmp_archive, [ '*cheese*' ], debug = self.DEBUG)
self.assertEqual( [
'foo-1.2.3/.import/bar.txt',
'foo-1.2.3/.import/foo.txt',
'foo-1.2.3/fruits/apple.txt',
'foo-1.2.3/fruits/durian.txt',
'foo-1.2.3/fruits/kiwi.txt',
], archiver.members(tmp_archive))
def test_read_patterns(self):
content = '''\
cheese.txt
foo.jpg
test_orange/foo.txt
test_kiwi/*
'''
tmp_file = self.make_temp_file(content = content)
self.assertEqual( [
'cheese.txt',
'foo.jpg',
'test_orange/foo.txt',
'test_kiwi/*',
], archive_util.read_patterns(tmp_file) )
if __name__ == "__main__":
unit_test.main()
| 17,982 | 7,428 |
# -*- coding: utf-8 -*-
import argparse
import os
from .client import YaleClient
class EnvDefault(argparse.Action):
def __init__(self, envvar, help, required=True, default=None, **kwargs):
if envvar:
if envvar in os.environ:
value_default = os.getenv(key=envvar, default=default)
if not len(value_default.strip()):
value_default = default
default = value_default
if required and default:
required = False
super(EnvDefault, self).__init__(default=default, required=required, help="{} (env: {})".format(help, envvar),
**kwargs)
def __call__(self, parser, namespace, values, option_string=None):
setattr(namespace, self.dest, values)
yale_base_cli = \
argparse.ArgumentParser(description="Yale-cli is designed to operate on yale systems, and doing tasks like opening "
"and closing locks/alarm systems.",
add_help=False)
yale_base_cli.add_argument('--api', type=str.upper, required=True,
choices=["LOCK", "ALARM"],
help='what API to use', default="LOCK")
yale_base_cli.add_argument('--username', type=str, action=EnvDefault, envvar='YALE_USERNAME',
help='yale username')
yale_base_cli.add_argument('--password', type=str, action=EnvDefault, envvar='YALE_PASSWORD',
help='yale password')
yale_lock_arguments = argparse.ArgumentParser(parents=[yale_base_cli], add_help=True)
yale_lock_arguments.add_argument('--lock_id', type=str, help='The lock to operate on. If a this is not given then '
'operation will be executed on all')
yale_lock_arguments.add_argument('--operation', type=str.upper, required=True,
choices=["STATUS", "OPEN", "CLOSE"],
help='what operation to do', default="STATUS")
yale_lock_arguments.add_argument('--pin', type=str, action=EnvDefault, envvar='LOCK_PIN_CODE',
help='lock pin code', required=False)
def api_locks():
try:
arguments = yale_lock_arguments.parse_args()
client = YaleClient(username=arguments.username, password=arguments.password)
for lock in client.lock_api.locks():
if arguments.lock_id is None or arguments.lock_id == lock.name:
if arguments.operation == 'STATUS':
pass # already printing out state at bottom of loop.
elif arguments.operation == 'CLOSE':
if not lock.is_locked():
lock.close()
elif arguments.operation == 'OPEN':
if arguments.pin is None:
raise RuntimeError("To open a lock you must specify a pin!")
if lock.is_locked():
lock.open(pin_code=arguments.pin)
print(lock)
except argparse.ArgumentTypeError:
yale_lock_arguments.print_help()
except argparse.ArgumentError:
yale_lock_arguments.print_help()
def api_not_implemented():
raise RuntimeError("Not implemented.")
apis = {
'LOCK': api_locks,
'ALARM': api_not_implemented
}
def main():
try:
arguments, _ignore = yale_base_cli.parse_known_args()
apis.get(arguments.api)()
except argparse.ArgumentError:
yale_base_cli.print_help()
if __name__ == '__main__':
main()
| 3,625 | 993 |
from .. import models
from django_filters import rest_framework as filters
class MajorFilterSet(filters.FilterSet):
name = filters.CharFilter(field_name="name", lookup_expr="contains")
description = filters.CharFilter(field_name="description", lookup_expr="contains")
id = filters.NumberFilter(field_name="id", lookup_expr="exact")
is_active = filters.BooleanFilter(field_name="is_active", lookup_expr="exact")
class Meta:
model = models.Major
fields = ["name", "id", "description", "is_active"]
class ClassInfoFilterSet(filters.FilterSet):
name = filters.CharFilter(field_name="name", lookup_expr="contains")
major_name = filters.CharFilter(field_name="major__name", lookup_expr="contains")
class Meta:
model = models.ClassInfo
fields = ["name", "major_name"]
class TeacherFilterSet(filters.FilterSet):
full_name = filters.CharFilter(field_name="user__full_name", lookup_expr="contains")
class Meta:
model = models.Teacher
fields = ["full_name"]
class StudentFilterSet(filters.FilterSet):
full_name = filters.CharFilter(field_name="user__full_name", lookup_expr="contains")
class_name = filters.CharFilter(
field_name="class_info__name", lookup_expr="contains"
)
class Meta:
model = models.Student
fields = ["full_name", "class_name"]
| 1,376 | 403 |
from django.contrib.auth.models import Group
from django.core.management.base import BaseCommand
from django.conf import settings
from django.views.i18n import set_language
from proposals.utils.statistics_utils import get_average_turnaround_time, \
get_qs_for_long_route_reviews, get_qs_for_short_route_reviews, \
get_qs_for_year, \
get_qs_for_year_and_committee, get_review_qs_for_proposals, \
get_total_long_route_proposals, \
get_total_short_route_proposals, \
get_total_students, get_total_submitted_proposals
class Command(BaseCommand):
help = 'Calculate statistics for a given year'
def add_arguments(self, parser):
parser.add_argument('year', type=int)
def handle(self, *args, **options):
AK = Group.objects.get(name=settings.GROUP_GENERAL_CHAMBER)
LK = Group.objects.get(name=settings.GROUP_LINGUISTICS_CHAMBER)
datasets = {
'Total': get_qs_for_year(options['year']),
'AK': get_qs_for_year_and_committee(options['year'], AK),
'LK': get_qs_for_year_and_committee(options['year'], LK)
}
for name, dataset in datasets.items():
print(name)
print('Total submitted:', get_total_submitted_proposals(dataset))
print(
'Total short route:',
get_total_short_route_proposals(dataset)
)
print(
'Total long route:',
get_total_long_route_proposals(dataset)
)
print()
print('Total per relation:')
for relation, count in get_total_students(dataset).items():
print(count, relation)
print()
print("Turnaround times:")
print(
"Short route",
get_average_turnaround_time(
get_qs_for_short_route_reviews(dataset)
),
'days'
)
print(
"Long route",
get_average_turnaround_time(
get_qs_for_long_route_reviews(dataset)
),
'days'
)
print() | 2,189 | 653 |
"""Scraper to get info on the latest Dilbert comic."""
from datetime import timedelta
from typing import Optional
from constants import LATEST_DATE_REFRESH, SRC_PREFIX
from scraper import Scraper, ScrapingException
from utils import curr_date, date_to_str, str_to_date
class LatestDateScraper(Scraper[str, None]):
"""Class to scrape the date of the latest Dilbert comic.
This scraper returns that date in the format used by "dilbert.com".
Attributes:
pool: The database connection pool
sess: The HTTP client session
logger: The main app logger
"""
async def _get_cached_data(self, _: None = None, /) -> Optional[str]:
"""Get the cached latest date from the database.
If the latest date entry is stale (i.e. it was updated a long time
back), or it wasn't found in the cache, None is returned.
"""
async with self.pool.acquire() as conn:
# The interval for "freshness" of the entry has to be given this
# way instead of '$1 hours', because of PostgreSQL's syntax.
# All dates managed by asyncpg are set to UTC.
date = await conn.fetchval(
"""SELECT latest FROM latest_date
WHERE last_check >= CURRENT_TIMESTAMP - INTERVAL '1 hour' * $1;
""",
LATEST_DATE_REFRESH,
)
if date is not None:
# A "fresh" entry was found
date = date_to_str(date)
return date
async def _cache_data(self, date: str, _: None = None, /) -> None:
"""Cache the latest date into the database."""
# The WHERE condition is not required as there is always only one row
# in the `latest_date` table.
async with self.pool.acquire() as conn:
result = await conn.execute(
"UPDATE latest_date SET latest = $1;", str_to_date(date)
)
rows_updated = int(result.split()[1])
if rows_updated == 1:
self.logger.info("Successfully updated latest date in cache")
return
elif rows_updated > 1:
raise RuntimeError(
'The "latest_date" table has more than one row, '
"i.e. this table is corrupt"
)
# No rows were updated, so the "latest_date" table must be empty. This
# should only happen if this table was cleared manually, or this is the
# first run of this code on this database.
self.logger.info(
"Couldn't update latest date in cache; trying to insert it"
)
async with self.pool.acquire() as conn:
await conn.execute(
"INSERT INTO latest_date (latest) VALUES ($1);",
str_to_date(date),
)
async def _scrape_data(self, _: None = None, /) -> str:
"""Scrape the date of the latest comic from "dilbert.com"."""
# If there is no comic for this date yet, "dilbert.com" will
# auto-redirect to the homepage.
latest = date_to_str(curr_date())
url = SRC_PREFIX + latest
async with self.sess.get(url) as resp:
self.logger.debug(f"Got response for latest date: {resp.status}")
date = resp.url.path.split("/")[-1]
if date == "":
# Redirected to homepage, implying that there's no comic for this
# date. There must be a comic for the previous date, so use that.
date = date_to_str(curr_date() - timedelta(days=1))
self.logger.info(
f"No comic found for today ({latest}); using date: {date}"
)
else:
# Check to see if the scraped date is invalid
try:
str_to_date(date)
except ValueError:
raise ScrapingException(
"Error in scraping the latest date from the URL"
)
return date
async def get_latest_date(self) -> str:
"""Retrieve the date of the latest comic.
Returns:
The latest date
"""
return await super().get_data(None)
async def update_latest_date(self, date: str) -> None:
"""Update the latest date in the cache."""
await self._cache_data(date)
| 4,299 | 1,197 |
from montag.domain.entities import Provider
from montag.use_cases.fetch_playlists import FetchPlaylists
from montag.use_cases.support import Failure, Success
from tests import factory
def test_fetch_playlists(repos, spotify_repo):
expected_playlists = factory.playlists(2)
spotify_repo.find_playlists.return_value = expected_playlists
response = FetchPlaylists(repos).execute(Provider.SPOTIFY)
assert response == Success(expected_playlists)
def test_error_handling_with_unexpected_errors(repos, spotify_repo):
error = ValueError("some message")
spotify_repo.find_playlists.side_effect = error
response = FetchPlaylists(repos).execute(Provider.SPOTIFY)
assert response == Failure("some message", error)
| 742 | 228 |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# Copyright (c) 2020 tecnovert
# Distributed under the MIT software license, see the accompanying
# file LICENSE.txt or http://www.opensource.org/licenses/mit-license.php.
import unittest
from xmrswap.interface_btc import (
testBTCInterface
)
from xmrswap.util import (
make_int,
format_amount,
validate_amount
)
from xmrswap.ecc_util import b2h, h2b
from xmrswap.ed25519_fast_util import (
hashToEd25519,
encodepoint
)
import xmrswap.contrib.ed25519_fast as edf
class Test(unittest.TestCase):
def test_script_checker(self):
testBTCInterface()
def test_make_int(self):
def test_case(vs, vf, expect_int):
i = make_int(vs)
assert(i == expect_int and isinstance(i, int))
i = make_int(vf)
assert(i == expect_int and isinstance(i, int))
vs_out = format_amount(i, 8)
# Strip
for i in range(7):
if vs_out[-1] == '0':
vs_out = vs_out[:-1]
if '.' in vs:
assert(vs_out == vs)
else:
assert(vs_out[:-2] == vs)
test_case('0', 0, 0)
test_case('1', 1, 100000000)
test_case('10', 10, 1000000000)
test_case('0.00899999', 0.00899999, 899999)
test_case('899999.0', 899999.0, 89999900000000)
test_case('899999.00899999', 899999.00899999, 89999900899999)
test_case('0.0', 0.0, 0)
test_case('1.0', 1.0, 100000000)
test_case('1.1', 1.1, 110000000)
test_case('1.2', 1.2, 120000000)
test_case('0.00899991', 0.00899991, 899991)
test_case('0.0089999', 0.0089999, 899990)
test_case('0.0089991', 0.0089991, 899910)
test_case('0.123', 0.123, 12300000)
test_case('123000.000123', 123000.000123, 12300000012300)
try:
make_int('0.123456789')
assert(False)
except Exception as e:
assert(str(e) == 'Mantissa too long')
validate_amount('0.12345678')
# floor
assert(make_int('0.123456789', r=-1) == 12345678)
# Round up
assert(make_int('0.123456789', r=1) == 12345679)
def test_makeInt12(self):
def test_case(vs, vf, expect_int):
i = make_int(vs, 12)
assert(i == expect_int and isinstance(i, int))
i = make_int(vf, 12)
assert(i == expect_int and isinstance(i, int))
vs_out = format_amount(i, 12)
# Strip
for i in range(7):
if vs_out[-1] == '0':
vs_out = vs_out[:-1]
if '.' in vs:
assert(vs_out == vs)
else:
assert(vs_out[:-2] == vs)
test_case('0.123456789', 0.123456789, 123456789000)
test_case('0.123456789123', 0.123456789123, 123456789123)
try:
make_int('0.1234567891234', 12)
assert(False)
except Exception as e:
assert(str(e) == 'Mantissa too long')
validate_amount('0.123456789123', 12)
try:
validate_amount('0.1234567891234', 12)
assert(False)
except Exception as e:
assert('Too many decimal places' in str(e))
try:
validate_amount(0.1234567891234, 12)
assert(False)
except Exception as e:
assert('Too many decimal places' in str(e))
def test_ed25519(self):
assert(encodepoint(edf.B) == b'Xfffffffffffffffffffffffffffffff')
assert(b2h(encodepoint(hashToEd25519(encodepoint(edf.B))))
== '13b663e5e06bf5301c77473bb2fc5beb51e4046e9b7efef2f6d1a324cb8b1094')
test_point_2 = '97ab9932634c2a71ded409c73e84d64487dcc224f9728fde24ef3327782e68c3'
assert(b2h(encodepoint(hashToEd25519(h2b(test_point_2))))
== 'ade1232c101e6e42564b97ac2b38387a509df0a31d38e36bf4bdf4ad2f4f5573')
if __name__ == '__main__':
unittest.main()
| 4,012 | 1,844 |
from Panel.master_panel import *
"""欢迎使用V2Ray云彩姬"""
if __name__ == '__main__':
V2Rayc = V2RaycSpider_Master_Panel()
try:
V2Rayc.home_menu()
except Exception as e:
V2Rayc.debug(e)
finally:
V2Rayc.kill()
| 243 | 116 |
from django.urls import path
from .views import index, supporter_index, customer_index
app_name = 'toppage'
urlpatterns = [
path('', index, name='index'),
path('supporter/', supporter_index, name='supporter_index'),
path('customer/', customer_index, name='customer_index'),
] | 289 | 90 |
from forest_constants import (LEAFY, CONIFEROUS)
def get_forest_dimensions(forest):
rows_num = len(forest)
cols_num = 0
if rows_num:
cols_num = len(forest[0])
return rows_num, cols_num
def get_tree_counts(forest):
leafy_count = 0
coniferous_count = 0
for row in forest:
leafy_count += row.count(LEAFY)
coniferous_count += row.count(CONIFEROUS)
return leafy_count, coniferous_count
| 443 | 171 |
"""No identified need to test model interface functionality."""
| 64 | 13 |
import sys
import h5py
import yaml
import numpy as np
from fuel.datasets import H5PYDataset
from fuel.streams import DataStream
from fuel.schemes import SequentialScheme, ShuffledScheme
from fuel.transformers import Mapping
from blocks.extensions import saveload, predicates
from blocks.extensions.training import TrackTheBest
from blocks import main_loop
from fuel.utils import do_not_pickle_attributes
#Define this class to skip serialization of extensions
@do_not_pickle_attributes('extensions')
class MainLoop(main_loop.MainLoop):
def __init__(self, **kwargs):
super(MainLoop, self).__init__(**kwargs)
def load(self):
self.extensions = []
def transpose_stream(data):
return (np.swapaxes(data[0],0,1), np.swapaxes(data[1],0,1))
def track_best(channel, save_path):
sys.setrecursionlimit(1500000)
tracker = TrackTheBest(channel, choose_best=min)
checkpoint = saveload.Checkpoint(
save_path, after_training=False, use_cpickle=True)
checkpoint.add_condition(["after_epoch"],
predicate=predicates.OnLogRecord('{0}_best_so_far'.format(channel)))
return [tracker, checkpoint]
def get_metadata(hdf5_file):
with h5py.File(hdf5_file) as f:
ix_to_out = yaml.load(f['targets'].attrs['ix_to_out'])
out_to_ix = yaml.load(f['targets'].attrs['out_to_ix'])
out_size = len(ix_to_out)
return ix_to_out, out_to_ix, out_size
def get_stream(hdf5_file, which_set, batch_size=None):
dataset = H5PYDataset(
hdf5_file, which_sets=(which_set,), load_in_memory=True)
if batch_size == None:
batch_size = dataset.num_examples
stream = DataStream(dataset=dataset, iteration_scheme=ShuffledScheme(
examples=dataset.num_examples, batch_size=batch_size))
# Required because Recurrent bricks receive as input [sequence, batch,
# features]
return Mapping(stream, transpose_stream)
| 1,923 | 640 |
# -*- coding: utf-8 -*-
#
# Copyright (C) 2012-2013 Vinay Sajip.
# Licensed to the Python Software Foundation under a contributor agreement.
# See LICENSE.txt and CONTRIBUTORS.txt.
#
from __future__ import unicode_literals
import os
import sys
from compat import unittest
from distlib.compat import url2pathname, urlparse, urljoin
from distlib.database import DistributionPath, make_graph, make_dist
from distlib.locators import (SimpleScrapingLocator, PyPIRPCLocator,
PyPIJSONLocator, DirectoryLocator,
DistPathLocator, AggregatingLocator,
JSONLocator, DistPathLocator,
DependencyFinder, locate,
get_all_distribution_names, default_locator)
HERE = os.path.abspath(os.path.dirname(__file__))
PYPI_RPC_HOST = 'http://python.org/pypi'
PYPI_WEB_HOST = os.environ.get('PYPI_WEB_HOST', 'https://pypi.python.org/simple/')
class LocatorTestCase(unittest.TestCase):
@unittest.skipIf('SKIP_SLOW' in os.environ, 'Skipping slow test')
def test_xmlrpc(self):
locator = PyPIRPCLocator(PYPI_RPC_HOST)
try:
result = locator.get_project('sarge')
except Exception: # pragma: no cover
raise unittest.SkipTest('PyPI XML-RPC not available')
self.assertIn('0.1', result)
dist = result['0.1']
self.assertEqual(dist.name, 'sarge')
self.assertEqual(dist.version, '0.1')
self.assertEqual(dist.source_url,
'https://pypi.python.org/packages/source/s/sarge/'
'sarge-0.1.tar.gz')
self.assertEqual(dist.digest,
('md5', '961ddd9bc085fdd8b248c6dd96ceb1c8'))
try:
names = locator.get_distribution_names()
except Exception: # pragma: no cover
raise unittest.SkipTest('PyPI XML-RPC not available')
self.assertGreater(len(names), 25000)
@unittest.skipIf('SKIP_SLOW' in os.environ, 'Skipping slow test')
def test_json(self):
locator = PyPIJSONLocator(PYPI_RPC_HOST)
result = locator.get_project('sarge')
self.assertIn('0.1.1', result)
dist = result['0.1.1']
self.assertEqual(dist.name, 'sarge')
self.assertEqual(dist.version, '0.1.1')
self.assertEqual(dist.source_url,
'https://pypi.python.org/packages/source/s/sarge/'
'sarge-0.1.1.tar.gz')
self.assertEqual(dist.digest,
('md5', '2a9b9d46e4ef6ae51e2a5ff7de93d9dd'))
self.assertRaises(NotImplementedError, locator.get_distribution_names)
@unittest.skipIf('SKIP_SLOW' in os.environ, 'Skipping slow test')
def test_scraper(self):
locator = SimpleScrapingLocator('https://pypi.python.org/simple/')
for name in ('sarge', 'Sarge'):
result = locator.get_project(name)
self.assertIn('0.1', result)
dist = result['0.1']
self.assertEqual(dist.name, 'sarge')
self.assertEqual(dist.version, '0.1')
self.assertEqual(dist.source_url,
'https://pypi.python.org/packages/source/s/sarge/'
'sarge-0.1.tar.gz')
self.assertEqual(dist.digest,
('md5', '961ddd9bc085fdd8b248c6dd96ceb1c8'))
return
# The following is too slow
names = locator.get_distribution_names()
self.assertGreater(len(names), 25000)
@unittest.skipIf('SKIP_SLOW' in os.environ, 'Skipping slow test')
def test_unicode_project_name(self):
# Just checking to see that no exceptions are raised.
NAME = '\u2603'
locator = SimpleScrapingLocator('https://pypi.python.org/simple/')
result = locator.get_project(NAME)
self.assertFalse(result)
locator = PyPIJSONLocator('https://pypi.python.org/pypi/')
result = locator.get_project(NAME)
self.assertFalse(result)
def test_dir(self):
d = os.path.join(HERE, 'fake_archives')
locator = DirectoryLocator(d)
expected = os.path.join(HERE, 'fake_archives', 'subdir',
'subsubdir', 'Flask-0.9.tar.gz')
def get_path(url):
t = urlparse(url)
return url2pathname(t.path)
for name in ('flask', 'Flask'):
result = locator.get_project(name)
self.assertIn('0.9', result)
dist = result['0.9']
self.assertEqual(dist.name, 'Flask')
self.assertEqual(dist.version, '0.9')
self.assertEqual(os.path.normcase(get_path(dist.source_url)),
os.path.normcase(expected))
names = locator.get_distribution_names()
expected = set(['Flask', 'python-gnupg', 'coverage', 'Django'])
if sys.version_info[:2] == (2, 7):
expected.add('config')
self.assertEqual(names, expected)
def test_dir_nonrecursive(self):
d = os.path.join(HERE, 'fake_archives')
locator = DirectoryLocator(d, recursive=False)
expected = os.path.join(HERE, 'fake_archives', 'subdir',
'subsubdir', 'Flask-0.9.tar.gz')
def get_path(url):
t = urlparse(url)
return url2pathname(t.path)
for name in ('flask', 'Flask'):
result = locator.get_project(name)
self.assertEqual(result, {})
names = locator.get_distribution_names()
expected = set(['coverage'])
self.assertEqual(names, expected)
def test_path(self):
fakes = os.path.join(HERE, 'fake_dists')
sys.path.insert(0, fakes)
try:
edp = DistributionPath(include_egg=True)
locator = DistPathLocator(edp)
cases = ('babar', 'choxie', 'strawberry', 'towel-stuff',
'coconuts-aster', 'bacon', 'grammar', 'truffles',
'banana', 'cheese')
for name in cases:
d = locator.locate(name, True)
r = locator.get_project(name)
self.assertIsNotNone(d)
self.assertEqual(r, { d.version: d })
d = locator.locate('nonexistent')
r = locator.get_project('nonexistent')
self.assertIsNone(d)
self.assertFalse(r)
finally:
sys.path.pop(0)
@unittest.skipIf('SKIP_SLOW' in os.environ, 'Skipping slow test')
def test_aggregation(self):
d = os.path.join(HERE, 'fake_archives')
loc1 = DirectoryLocator(d)
loc2 = SimpleScrapingLocator('https://pypi.python.org/simple/',
timeout=5.0)
locator = AggregatingLocator(loc1, loc2)
exp1 = os.path.join(HERE, 'fake_archives', 'subdir',
'subsubdir', 'Flask-0.9.tar.gz')
exp2 = 'https://pypi.python.org/packages/source/F/Flask/Flask-0.9.tar.gz'
result = locator.get_project('flask')
self.assertEqual(len(result), 1)
self.assertIn('0.9', result)
dist = result['0.9']
self.assertEqual(dist.name, 'Flask')
self.assertEqual(dist.version, '0.9')
scheme, _, path, _, _, _ = urlparse(dist.source_url)
self.assertEqual(scheme, 'file')
self.assertEqual(os.path.normcase(url2pathname(path)),
os.path.normcase(exp1))
locator.merge = True
locator._cache.clear()
result = locator.get_project('flask')
self.assertGreater(len(result), 1)
self.assertIn('0.9', result)
dist = result['0.9']
self.assertEqual(dist.name, 'Flask')
self.assertEqual(dist.version, '0.9')
self.assertEqual(dist.source_url, exp2)
return
# The following code is slow because it has
# to get all the dist names by scraping :-(
n1 = loc1.get_distribution_names()
n2 = loc2.get_distribution_names()
self.assertEqual(locator.get_distribution_names(), n1 | n2)
def test_dependency_finder(self):
locator = AggregatingLocator(
JSONLocator(),
SimpleScrapingLocator('https://pypi.python.org/simple/',
timeout=3.0),
scheme='legacy')
finder = DependencyFinder(locator)
dists, problems = finder.find('irc (== 5.0.1)')
self.assertFalse(problems)
actual = sorted([d.name for d in dists])
self.assertEqual(actual, ['hgtools', 'irc',
'pytest-runner'])
dists, problems = finder.find('irc (== 5.0.1)',
meta_extras=[':test:'])
self.assertFalse(problems)
actual = sorted([d.name for d in dists])
self.assertEqual(actual, ['hgtools', 'irc',
'py', 'pytest',
'pytest-runner'])
g = make_graph(dists)
slist, cycle = g.topological_sort()
self.assertFalse(cycle)
names = [d.name for d in slist]
expected = set([
('hgtools', 'py', 'pytest', 'pytest-runner', 'irc'),
('py', 'hgtools', 'pytest', 'pytest-runner', 'irc'),
('hgtools', 'py', 'pytest-runner', 'pytest', 'irc'),
('py', 'hgtools', 'pytest-runner', 'pytest', 'irc')
])
self.assertIn(tuple(names), expected)
# Test with extras
dists, problems = finder.find('Jinja2 (== 2.6)')
self.assertFalse(problems)
actual = sorted([d.name_and_version for d in dists])
self.assertEqual(actual, ['Jinja2 (2.6)'])
dists, problems = finder.find('Jinja2 [i18n] (== 2.6)')
self.assertFalse(problems)
actual = sorted([d.name_and_version for d in dists])
self.assertEqual(actual[-2], 'Jinja2 (2.6)')
self.assertTrue(actual[-1].startswith('pytz ('))
self.assertTrue(actual[0].startswith('Babel ('))
actual = [d.build_time_dependency for d in dists]
self.assertEqual(actual, [False, False, False])
# Now test with extra in dependency
locator.clear_cache()
dummy = make_dist('dummy', '0.1')
dummy.metadata.run_requires = [{'requires': ['Jinja2 [i18n]']}]
dists, problems = finder.find(dummy)
self.assertFalse(problems)
actual = sorted([d.name_and_version for d in dists])
self.assertTrue(actual[0].startswith('Babel ('))
locator.clear_cache()
dummy.metadata.run_requires = [{'requires': ['Jinja2']}]
dists, problems = finder.find(dummy)
self.assertFalse(problems)
actual = sorted([d.name_and_version for d in dists])
self.assertTrue(actual[0].startswith('Jinja2 ('))
def test_get_all_dist_names(self):
for url in (None, PYPI_RPC_HOST):
try:
all_dists = get_all_distribution_names(url)
except Exception: # pragma: no cover
raise unittest.SkipTest('PyPI XML-RPC not available')
self.assertGreater(len(all_dists), 0)
def test_url_preference(self):
cases = (('http://netloc/path', 'https://netloc/path'),
('http://pypi.python.org/path', 'http://netloc/path'),
('http://netloc/B', 'http://netloc/A'))
for url1, url2 in cases:
self.assertEqual(default_locator.prefer_url(url1, url2), url1)
def test_prereleases(self):
locator = AggregatingLocator(
JSONLocator(),
SimpleScrapingLocator('https://pypi.python.org/simple/',
timeout=3.0),
scheme='legacy')
REQT = 'SQLAlchemy (>0.5.8, < 0.6)'
finder = DependencyFinder(locator)
d = locator.locate(REQT)
self.assertIsNone(d)
d = locator.locate(REQT, True)
self.assertIsNotNone(d)
self.assertEqual(d.name_and_version, 'SQLAlchemy (0.6beta3)')
dist = make_dist('dummy', '0.1')
dist.metadata.run_requires = [{'requires': [REQT]}]
dists, problems = finder.find(dist, prereleases=True)
self.assertFalse(problems)
actual = sorted(dists, key=lambda o: o.name_and_version)
self.assertEqual(actual[0].name_and_version, 'SQLAlchemy (0.6beta3)')
dists, problems = finder.find(dist)
# Test changed since now prereleases as found as a last resort.
#self.assertEqual(dists, set([dist]))
#self.assertEqual(len(problems), 1)
#problem = problems.pop()
#self.assertEqual(problem, ('unsatisfied', REQT))
self.assertEqual(dists, set([actual[0], dist]))
self.assertFalse(problems)
def test_dist_reqts(self):
r = 'config (<=0.3.5)'
dist = default_locator.locate(r)
self.assertIsNotNone(dist)
self.assertIsNone(dist.extras)
self.assertTrue(dist.matches_requirement(r))
self.assertFalse(dist.matches_requirement('config (0.3.6)'))
def test_dist_reqts_extras(self):
r = 'config[doc,test](<=0.3.5)'
dist = default_locator.locate(r)
self.assertIsNotNone(dist)
self.assertTrue(dist.matches_requirement(r))
self.assertEqual(dist.extras, ['doc', 'test'])
if __name__ == '__main__': # pragma: no cover
import logging
logging.basicConfig(level=logging.DEBUG, filename='test_locators.log',
filemode='w', format='%(message)s')
unittest.main()
| 13,580 | 4,441 |
"""AWS S3 Plugin."""
import os
from typing import List
from tempfile import NamedTemporaryFile
from netskope.integrations.cls.plugin_base import (
PluginBase,
ValidationResult,
PushResult,
)
from .utils.aws_s3_validator import (
AWSS3Validator,
)
from .utils.aws_s3_client import AWSS3Client
class AWSS3Plugin(PluginBase):
"""The AWS S3 plugin implementation class."""
def transform(self, raw_data, data_type, subtype) -> List:
"""Transform the raw netskope JSON data into target platform supported data formats.
Args:
raw_data (list): The raw data to be tranformed.
data_type (str): The type of data to be ingested (alert/event)
subtype (str): The subtype of data to be ingested (DLP, anomaly etc. in case of alerts)
Raises:
NotImplementedError: If the method is not implemented.
Returns:
List: list of transformed data.
"""
return raw_data
def push(self, transformed_data, data_type, subtype) -> PushResult:
"""Push the transformed_data to the 3rd party platform."""
try:
aws_client = AWSS3Client(
self.configuration, self.logger, self.proxy
)
temp_obj_file = NamedTemporaryFile("wb", delete=False)
for data in transformed_data:
temp_obj_file.write(data)
temp_obj_file.flush()
try:
aws_client.push(temp_obj_file.name, data_type, subtype)
except Exception:
raise
finally:
temp_obj_file.close()
os.unlink(temp_obj_file.name)
except Exception as e:
self.logger.error(f"Error while pushing to AWS S3: {e}")
raise
def validate(self, configuration: dict) -> ValidationResult:
"""Validate the configuration parameters dict."""
aws_validator = AWSS3Validator(self.logger, self.proxy)
if (
"aws_public_key" not in configuration
or type(configuration["aws_public_key"]) != str
or not configuration["aws_public_key"].strip()
):
self.logger.error(
"AWS S3 Plugin: Validation error occurred. Error: "
"Invalid AWS Access Key ID (Public Key) found in the configuration parameters."
)
return ValidationResult(
success=False,
message="Invalid AWS Access Key ID (Public Key) provided.",
)
if (
"aws_private_key" not in configuration
or type(configuration["aws_private_key"]) != str
or not configuration["aws_private_key"].strip()
):
self.logger.error(
"AWS S3 Plugin: Validation error occurred. Error: "
"Invalid AWS Secret Access Key (Private Key) found in the configuration parameters."
)
return ValidationResult(
success=False,
message="Invalid AWS Secret Access Key (Private Key) provided.",
)
if (
"region_name" not in configuration
or type(configuration["region_name"]) != str
or not aws_validator.validate_region_name(
configuration["region_name"]
)
):
self.logger.error(
"AWS S3 Plugin: Validation error occurred. Error: "
"Invalid Region Name found in the configuration parameters."
)
return ValidationResult(
success=False,
message="Invalid Region Name provided.",
)
if (
"bucket_name" not in configuration
or type(configuration["bucket_name"]) != str
or not configuration["bucket_name"].strip()
):
self.logger.error(
"AWS S3 Plugin: Validation error occurred. Error: "
"Invalid Bucket Name found in the configuration parameters."
)
return ValidationResult(
success=False, message="Invalid Bucket Name provided."
)
if (
"obj_prefix" not in configuration
or type(configuration["obj_prefix"]) != str
or not configuration["obj_prefix"].strip()
):
self.logger.error(
"AWS S3 Plugin: Validation error occurred. Error: "
"Invalid Object Prefix found in the configuration parameters."
)
return ValidationResult(
success=False, message="Invalid Object Prefix provided."
)
if (
"max_file_size" not in configuration
or not aws_validator.validate_max_file_size(
configuration["max_file_size"]
)
):
self.logger.error(
"AWS S3 Plugin: Validation error occurred. Error: "
"Invalid Max File Size found in the configuration parameters."
)
return ValidationResult(
success=False, message="Invalid Max File Size provided."
)
if (
"max_duration" not in configuration
or not aws_validator.validate_max_duration(
configuration["max_duration"]
)
):
self.logger.error(
"AWS S3 Plugin: Validation error occurred. Error: "
"Invalid Max File Size found in the configuration parameters."
)
return ValidationResult(
success=False, message="Invalid Max File Size provided."
)
try:
aws_validator.validate_credentials(
configuration["aws_public_key"].strip(),
configuration["aws_private_key"].strip(),
)
except Exception:
self.logger.error(
"AWS S3 Plugin: Validation error occurred. Error: "
"Invalid AWS Access Key ID (Public Key) and AWS Secret Access Key "
"(Private Key) found in the configuration parameters."
)
return ValidationResult(
success=False,
message="Invalid AWS Access Key ID (Public Key) or AWS Secret Access "
"Key (Private Key) found in the configuration parameters.",
)
try:
aws_client = AWSS3Client(configuration, self.logger, self.proxy)
aws_client.get_bucket()
except Exception as err:
self.logger.error(
f"AWS S3 Plugin: Validation error occurred. Error: {err}"
)
return ValidationResult(
success=False,
message="Validation Error. Check logs for more details.",
)
return ValidationResult(success=True, message="Validation successful.")
| 6,946 | 1,662 |
"""Database fixtures."""
import pytest
@pytest.fixture(autouse=True)
def _auto_clean_specs_table(_clean_specs_table):
"""Autouses _clean_specs_table."""
| 160 | 62 |
import logging
import uvicorn
from fastapi_offline import FastAPIOffline as FastAPI
#from cdr_plugin_folder_to_folder.api.users import router
from osbot_utils.utils.Misc import to_int
from cdr_plugin_folder_to_folder.api.routes.Processing import router as router_processing
from cdr_plugin_folder_to_folder.api.routes.Pre_Processor import router as router_pre_processing
from cdr_plugin_folder_to_folder.api.routes.File_Distributor import router as router_file_distribution
from cdr_plugin_folder_to_folder.api.routes.Health import router as router_health
from cdr_plugin_folder_to_folder.api.routes.Configure import router as router_configure
class Server:
def __init__(self, app, port="8880", reload=True):
self.host = "0.0.0.0"
self.log_level = "info"
self.port = to_int(port) # todo: move to globally configurable value (set via Env variables)
self.app = app
self.reload = reload # automatically reloads server on code changes
def fix_logging_bug(self):
# there were duplicated entries on logs
# - https://github.com/encode/uvicorn/issues/614
# - https://github.com/encode/uvicorn/issues/562
logging.getLogger().handlers.clear() # todo: see side effects of this
def setup(self):
self.app.include_router(router_processing )
self.app.include_router(router_pre_processing )
self.app.include_router(router_file_distribution)
self.app.include_router(router_health )
self.app.include_router(router_configure )
self.fix_logging_bug()
return self
def start(self):
uvicorn.run("cdr_plugin_folder_to_folder.api.Server:app", host=self.host, port=self.port, log_level=self.log_level, reload=self.reload)
# todo: refactor this into a separate class which can also be used by the individual sections (i.e. tags)
tags_metadata = [
{"name": "Configuration" , "description": "Step 0"},
{"name": "Pre Processor" , "description": "Step 1"},
{"name": "Processing" , "description": "Step 2"},
{"name": "File Distributor", "description": "Util methods"},
]
# we need to do this here so that when unicorn reload is enabled the "cdr_plugin_folder_to_folder.api.Server:app" has an fully setup instance of the Server object
app = FastAPI(openapi_tags=tags_metadata)
server = Server(app)
server.setup()
def run_if_main():
if __name__ == "__main__":
server.start()
run_if_main() | 2,626 | 779 |
# Evaluate using Shuffle Split Cross Validation
from pandas import read_csv
from sklearn.model_selection import ShuffleSplit
from sklearn.model_selection import cross_val_score
from sklearn.linear_model import LogisticRegression
filename = 'pima-indians-diabetes.data.csv'
names = ['preg', 'plas', 'pres', 'skin', 'test', 'mass', 'pedi', 'age', 'class']
dataframe = read_csv(filename, names=names)
array = dataframe.values
X = array[:,0:8]
Y = array[:,8]
n_splits = 10
test_size = 0.33
seed = 7
kfold = ShuffleSplit(n_splits=n_splits, test_size=test_size, random_state=seed)
model = LogisticRegression(solver='liblinear')
results = cross_val_score(model, X, Y, cv=kfold)
print("Accuracy: %.3f%% (%.3f%%)" % (results.mean()*100.0, results.std()*100.0)) | 751 | 279 |
# coding: utf-8
from __future__ import (
unicode_literals, division, print_function, absolute_import
)
import re
import pytest
from ww import s, g, f
def test_lshift():
res = s >> """
This is a long text
And it's not indented
"""
assert isinstance(res, s)
s == "This is a long text\nAnd it's not indented"
def test_split():
gen = s('test').split(',')
assert isinstance(gen, g)
assert gen.list() == ['test']
assert s('test,test').split(',').list() == ['test', 'test']
assert s('a,b,c').split(',', maxsplit=1).list() == ['a', 'b,c']
assert s('a,b,c').split('b,').list() == ['a,', 'c']
assert s('a,b;c/d').split(',', ';', '/').list() == ['a', 'b', 'c', 'd']
assert s(r'a1b33c-d').split(r'\d+').list() == ['a', 'b', 'c-d']
assert s(r'a1b33c-d').split(r'\d+', '-').list() == ['a', 'b', 'c', 'd']
assert s(r'cAt').split('a', flags='i').list() == ['c', 't']
assert s(r'cAt').split('a', flags=re.I).list() == ['c', 't']
chunks = s('a,b;c/d=a,b;c/d').split(',', ';', '/', maxsplit=3)
assert chunks.list() == ['a', 'b', 'c', 'd=a,b;c/d']
with pytest.raises(TypeError):
s('foo').split(1)
def test_maxsplit_with_regex():
chunks = s('a,b;c/d=a,b;c/d').split(',', ';', '[/=]', maxsplit=4)
assert chunks.list() == ['a', 'b', 'c', 'd', 'a,b;c/d']
def test_replace():
st = s('test').replace(',', '')
assert isinstance(st, s)
assert st == 'test'
assert s('test,test').replace(',', ';') == 'test;test'
assert s('a,b,c').replace(',', ';', maxreplace=1) == 'a;b,c'
assert s('a,b,c').replace(',b,', ';') == 'a;c'
assert s('a,b;c/d').replace((',', ';', '/'), (',', ',', ',')) == 'a,b,c,d'
assert s('a,b;c/d').replace((',', ';', '/'), ',') == 'a,b,c,d'
assert s(r'a1b33c-d').replace(r'\d+', ',') == 'a,b,c-d'
assert s(r'a1b33c-d').replace((r'\d+', '-'), ',') == 'a,b,c,d'
assert s(r'cAt').replace('a', 'b', flags='i') == 'cbt'
assert s(r'cAt').replace('a', 'b', flags=re.I) == 'cbt'
with pytest.raises(ValueError):
s(r'cAt').replace(('a', 'b', 'c'), ('b', 'b'))
def test_replace_with_maxplit():
string = s(r'a-1,b-3,3c-d')
assert string.replace(('[,-]'), '', maxreplace=3) == 'a1b3,3c-d'
def test_replace_with_callback():
string = s(r'a-1,b-3,3c-d')
def upper(match):
return match.group().upper()
assert string.replace(('[ab]'), upper, maxreplace=3) == 'A-1,B-3,3c-d'
def test_join():
assert s(';').join('abc') == "a;b;c"
assert s(';').join(range(3)) == "0;1;2"
assert s(';').join(range(3), template="{:.1f}") == "0.0;1.0;2.0"
assert s(';').join(range(3), formatter=lambda s, t: "a") == "a;a;a"
def test_from_bytes():
assert isinstance(s.from_bytes(b'abc', 'ascii'), s)
assert s.from_bytes(b'abc', 'ascii') == 'abc'
assert s.from_bytes('é'.encode('utf8'), 'utf8') == 'é'
with pytest.raises(UnicodeDecodeError):
s.from_bytes('é'.encode('cp850'), 'ascii')
with pytest.raises(ValueError):
s.from_bytes('é'.encode('cp850'))
def test_format():
foo = 1
bar = [1]
string = s('{foo} {bar[0]:.1f}')
assert isinstance(string.format(foo=foo, bar=bar), s)
assert string.format(foo=foo, bar=bar) == "1 1.0"
assert f(string) == "1 1.0"
assert isinstance(f(string), s)
assert f('{foo} {bar[0]:.1f}') == "1 1.0"
def test_add():
string = s('foo')
assert string + 'bar' == 'foobar'
with pytest.raises(TypeError):
string + b'bar'
with pytest.raises(TypeError):
string + 1
assert 'bar' + string == 'barfoo'
with pytest.raises(TypeError):
b'bar' + string
with pytest.raises(TypeError):
1 + string
def test_tobool():
conversions = {
'1': True,
'0': False,
'true': True,
'false': False,
'on': True,
'off': False,
'yes': True,
'no': False,
'': False
}
for key, val in conversions.items():
assert s(key).to_bool() == val
assert s('foo').to_bool(default=True) is True
with pytest.raises(ValueError):
s('foo').to_bool()
| 4,178 | 1,761 |
import ast
from sherlock.codelib.analyzer.variable import Type
from sherlock.codelib.system_function import system_function
@system_function('range', Type.LIST, Type.NUMBER, Type.NUMBER)
def system_range(g, start, end):
return '$(seq %s $(( %s - 1 )))' % (g.dispatch(start), g.dispatch(end))
@system_function('print', Type.VOID, Type.ANY)
def system_print(g, msg):
return 'echo %s' % g.dispatch(msg)
@system_function('pipe', Type.VOID, Type.ANY, Type.ANY)
def system_pipe(g, before, after):
return '%s | %s' % (g.dispatch(before), g.dispatch(after))
| 566 | 206 |
from rest_framework.exceptions import APIException
from django.utils.translation import ugettext_lazy as _
from rest_framework.status import HTTP_423_LOCKED
# Create your errors here.
class LockedError(APIException):
status_code = HTTP_423_LOCKED
default_detail = _('You do not have permission to perform this action.')
default_code = 'permission_denied'
| 370 | 109 |
from services.volt.models import AccessDevice, VOLTDevice
from xosresource import XOSResource
class XOSAccessDevice(XOSResource):
provides = "tosca.nodes.AccessDevice"
xos_model = AccessDevice
copyin_props = ["uplink", "vlan"]
name_field = None
def get_xos_args(self, throw_exception=True):
args = super(XOSAccessDevice, self).get_xos_args()
volt_device_name = self.get_requirement("tosca.relationships.MemberOfDevice", throw_exception=throw_exception)
if volt_device_name:
args["volt_device"] = self.get_xos_object(VOLTDevice, throw_exception=throw_exception, name=volt_device_name)
return args
# AccessDevice has no name field, so we rely on matching the keys. We assume
# the for a given VOLTDevice, there is only one AccessDevice per (uplink, vlan)
# pair.
def get_existing_objs(self):
args = self.get_xos_args(throw_exception=False)
volt_device = args.get("volt_device", None)
uplink = args.get("uplink", None)
vlan = args.get("vlan", None)
if (volt_device is not None) and (uplink is not None) and (vlan is not None):
existing_obj = self.get_xos_object(AccessDevice, volt_device=volt_device, uplink=uplink, vlan=vlan, throw_exception=False)
if existing_obj:
return [ existing_obj ]
return []
| 1,373 | 443 |
import pandas as pd
import math
from matplotlib import pyplot
ts = pd.read_csv("airline-passengers.csv", header=0, index_col=0)
nOfPassengers = ts["Passengers"]
values = nOfPassengers.values.tolist()
forecast = [math.nan]
for i in range(0, len(values) - 1):
forecast.append(values[i])
pyplot.plot(values)
pyplot.plot(forecast)
print(values)
print(forecast)
me = 0
mae = 0
mse = 0
n = len(forecast)
for i in range(1, n):
diff = values[i] - forecast[i]
me = me + diff
mae = mae + abs(diff)
mse = mse + diff * diff
print(me / n, mae / n, mse / n)
pyplot.show()
| 587 | 241 |
"""create exchange table
Revision ID: bfa53193d3bf
Revises: b97a89b20fa2
Create Date: 2019-09-22 01:17:06.735174
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = 'bfa53193d3bf'
down_revision = 'b97a89b20fa2'
branch_labels = None
depends_on = None
def upgrade():
op.create_table(
"exchanges",
sa.Column("id", sa.Integer, primary_key=True),
sa.Column("name", sa.String),
)
def downgrade():
op.drop_table("exchanges")
| 513 | 222 |
#!/usr/bin/python
# -*- coding:utf-8 -*-
import sys
import time
import logging
import numpy as np
from biosppy.signals import ecg
from biosppy.storage import load_txt
import matplotlib.pyplot as plt
logging.basicConfig(level = logging.DEBUG,format = '%(asctime)s - %(name)s - %(levelname)s - %(message)s')
logger = logging.getLogger(__name__)
def test_rpeaks_simple(data_path):
signal, mdata = load_txt(data_path)
logging.info("--------------------------------------------------")
logging.info("载入信号-%s, 长度 = %d " % (data_path, len(signal)))
fs = 360 # 信号采样率 360 Hz
logging.info("调用 christov_segmenter 进行R波检测 ...")
tic = time.time()
rpeaks = ecg.christov_segmenter(signal, sampling_rate=fs)
toc = time.time()
logging.info("完成. 用时: %f 秒. " % (toc - tic))
# 以上这种方式返回的rpeaks类型为biosppy.utils.ReturnTuple, biosppy的内置类
logging.info("直接调用 christov_segmenter 返回类型为 " + str(type(rpeaks)))
# 得到R波位置序列的方法:
# 1) 取返回值的第1项:
logging.info("使用第1种方式取R波位置序列 ... ")
rpeaks_indices_1 = rpeaks[0]
logging.info("完成. 结果类型为 " + str(type(rpeaks_indices_1)))
# 2) 调用ReturnTuple的as_dict()方法,得到Python有序字典(OrderedDict)类型
logging.info("使用第2种方式取R波位置序列 ... ")
rpeaks_indices_2 = rpeaks.as_dict()
# 然后使用说明文档中的参数名(这里是rpeaks)作为key取值。
rpeaks_indices_2 = rpeaks_indices_2["rpeaks"]
logging.info("完成. 结果类型为 " + str(type(rpeaks_indices_2)))
# 检验两种方法得到的结果是否相同:
check_sum = np.sum(rpeaks_indices_1 == rpeaks_indices_2)
if check_sum == len(rpeaks_indices_1):
logging.info("两种取值方式结果相同 ... ")
else:
logging.info("两种取值方式结果不同,退出 ...")
sys.exit(1)
# 与 christov_segmenter 接口一致的还有 hamilton_segmenter
logging.info("调用接口一致的 hamilton_segmenter 进行R波检测")
tic = time.time()
rpeaks = ecg.hamilton_segmenter(signal, sampling_rate=fs)
toc = time.time()
logging.info("完成. 用时: %f 秒. " % (toc - tic))
rpeaks_indices_3 = rpeaks.as_dict()["rpeaks"]
# 绘波形图和R波位置
num_plot_samples = 3600
logging.info("绘制波形图和检测的R波位置 ...")
sig_plot = signal[:num_plot_samples]
rpeaks_plot_1 = rpeaks_indices_1[rpeaks_indices_1 <= num_plot_samples]
plt.figure()
plt.plot(sig_plot, "g", label="ECG")
plt.grid(True)
plt.plot(rpeaks_plot_1, sig_plot[rpeaks_plot_1], "ro", label="christov_segmenter")
rpeaks_plot_3 = rpeaks_indices_3[rpeaks_indices_3 <= num_plot_samples]
plt.plot(rpeaks_plot_3, sig_plot[rpeaks_plot_3], "b^", label="hamilton_segmenter")
plt.legend()
plt.title(data_path)
plt.show()
logging.info("完成.")
return
if __name__ == '__main__':
test_rpeaks_simple("./data/ecg_records_117.txt")
test_rpeaks_simple("./data/ecg_records_103.txt")
test_rpeaks_simple("./data/ecg_records_119.txt")
| 2,831 | 1,341 |
from django.apps import AppConfig
class BookOutletConfig(AppConfig):
default_auto_field = 'django.db.models.BigAutoField'
name = 'book_outlet'
| 153 | 49 |
from app import db
from app.models.base_model import BaseModel
class File(BaseModel):
__tablename__ = "files"
filename = db.Column(db.String(300)) # includes path!
status = db.Column(db.Integer())
size = db.Column(db.Integer())
position = db.Column(db.Integer())
# foreign keys
package_id = db.Column(db.Integer, db.ForeignKey("packages.id"))
# ffmpeg attributes
ffmpeg_eta = db.Column(db.Float(), default=0) # in seconds so the user can format it
ffmpeg_progress = db.Column(db.Float(), default=0)
ffmpeg_bitrate = db.Column(db.Float(), default=0)
ffmpeg_time = db.Column(db.Integer(), default=0)
ffmpeg_size = db.Column(db.Float(), default=0)
ffmpeg_fps = db.Column(db.Integer(), default=0)
def __repr__(self):
return "<File %r>" % self.id
def clear(self):
"""
clear all ffmpeg fields
"""
self.ffmpeg_progress = 0
self.ffmpeg_eta = 0
self.ffmpeg_bitrate = 0
self.ffmpeg_time = 0
self.ffmpeg_size = 0
self.ffmpeg_fps = 0
| 1,076 | 370 |
import time
import json
from copy import deepcopy
class Task(object):
"""Defines task objects that can be handled by the task manager."""
def __init__(self, t_id=None, user_id=None, product_id=None,
tag=None, data=None,
timestamp=None, update_timestamp=None, error=None):
"""Initializes the Task object.
Parameters
----------
t_id: int
transaction id this task belongs to
user_id: int
user_id who submitted this task, if applicable.
product_id: int
product_id of the product
tag: str
String specifying the task. Unique for each task.
data: dict
Data for specific products
timestamp: float
Timestamp of task creation from`time.time()`
update_timestamp: float
Timestamp of task update (via `create_child()`) from `time.time()`
error: str
a serialized error string in case the task failed while executing
"""
self.t_id = t_id
self.user_id = user_id
self.product_id = product_id
self.tag = tag
self.timestamp = timestamp or int(time.time())
self.update_timestamp = update_timestamp
self.data = data
self.error = error
# self.update = None
def to_dict(self):
return {'tag': self.tag,
'timestamp': self.timestamp,
'update_timestamp': self.update_timestamp,
'data': self.data,
't_id': self.t_id,
'user_id': self.user_id,
'product_id': self.product_id,
'error': self.error}
def to_json(self):
return json.dumps(self.to_dict())
def to_bytes(self):
return self.to_json().encode('utf-8')
def read_dict(self, d):
tag = d['tag']
timestamp = d['timestamp']
t_id = d.get('t_id', None)
user_id = d.get('user_id', None)
product_id = d.get('product_id', None)
update_timestamp = d.get('update_timestamp', None)
data = d.get('data', None)
error = d.get('error', None)
Task.__init__(
self, t_id=t_id, user_id=user_id,
product_id=product_id, tag=tag, data=data,
timestamp=timestamp, update_timestamp=update_timestamp,
error=error)
return self
def read_bytes(self, bytestring):
d = json.loads(bytestring.decode('utf-8'))
self.read_dict(d)
return self
def read_json(self, json_path):
with open(json_path, 'r') as f:
d = json.load(f)
self.read_dict(d)
return self
def create_child(self, tag=None):
"""Creates and returns a follow up task object."""
if tag is None:
tag = self.tag + '__child'
child_task = deepcopy(self)
child_task.tag = tag
child_task.update_timestamp = int(time.time())
return child_task
def __str__(self):
return str(self.to_dict())
def __repr__(self):
return self.__str__()
| 3,126 | 903 |
import math
import torch
from .Module import Module
from .utils import clear
class Linear(Module):
def __init__(self, inputSize, outputSize, bias=True):
super(Linear, self).__init__()
self.weight = torch.Tensor(outputSize, inputSize)
self.gradWeight = torch.Tensor(outputSize, inputSize)
self.bias = torch.Tensor(outputSize) if bias else None
self.gradBias = torch.Tensor(outputSize) if bias else None
self.reset()
self.addBuffer = None
def noBias(self):
self.bias = None
self.gradBias = None
return self
def reset(self, stdv=None):
if stdv is not None:
stdv = stdv * math.sqrt(3)
else:
stdv = 1. / math.sqrt(self.weight.size(1))
self.weight.uniform_(-stdv, stdv)
if self.bias is not None:
self.bias.uniform_(-stdv, stdv)
return self
def _updateAddBuffer(self, input):
nframe = input.size(0)
if self.addBuffer is None:
self.addBuffer = input.new()
if self.addBuffer.nelement() != nframe:
self.addBuffer.resize_(nframe).fill_(1)
def updateOutput(self, input):
assert input.dim() == 2
nframe = input.size(0)
nelement = self.output.nelement()
self.output.resize_(nframe, self.weight.size(0))
if self.output.nelement() != nelement:
self.output.zero_()
self._updateAddBuffer(input)
self.output.addmm_(0, 1, input, self.weight.t())
if self.bias is not None:
self.output.addr_(self.addBuffer, self.bias)
return self.output
def updateGradInput(self, input, gradOutput):
if self.gradInput is None:
return
nelement = self.gradInput.nelement()
self.gradInput.resize_as_(input)
if self.gradInput.nelement() != nelement:
self.gradInput.zero_()
assert input.dim() == 2
self.gradInput.addmm_(0, 1, gradOutput, self.weight)
return self.gradInput
def accGradParameters(self, input, gradOutput, scale=1):
assert input.dim() == 2
self.gradWeight.addmm_(scale, gradOutput.t(), input)
if self.bias is not None:
# update the size of addBuffer if the input is not the same size as the one we had in last updateGradInput
self._updateAddBuffer(input)
self.gradBias.addmv_(scale, gradOutput.t(), self.addBuffer)
def clearState(self):
clear(self, 'addBuffer')
return super(Linear, self).clearState()
def __repr__(self):
return super(Linear, self).__repr__() + \
'({} -> {})'.format(self.weight.size(1), self.weight.size(0)) + \
(' without bias' if self.bias is None else '')
| 2,791 | 864 |
"""
Utilities for testing
"""
import numpy as np
from astropy.table import Table as apTable
from astropy.utils.diff import report_diff_values
def compare_tables(t1, t2):
""" Compare all the tables in two `astropy.table.Table`)
Parameters
----------
t1 : `astropy.table.Table`
One table
t2 : `astropy.table.Table`
Another tables
Returns
-------
identical : `bool`
True if the tables are identical, False otherwise
Notes
-----
For now this explicitly flattens each of the columns, to avoid issues with shape
"""
if sorted(t1.colnames) != sorted(t2.colnames): #pragma: no cover
return False
for cname in t1.colnames:
c1 = t1[cname]
c2 = t2[cname]
if not np.allclose(np.array(c1).flat, np.array(c2).flat): #pragma: no cover
return False
return True
def compare_table_dicts(d1, d2, strict=False):
""" Compare all the tables in two `OrderedDict`, (`str`, `astropy.table.Table`)
Parameters
----------
d1 : `OrderedDict`, (`str`, `astropy.table.Table`)
One dictionary of tables
d2 : `OrderedDict`, (`str`, `astropy.table.Table`)
Another dictionary of tables
Returns
-------
identical : `bool`
True if all the tables are identical, False otherwise
"""
identical = True
for k, v in d1.items():
try:
vv = d2[k]
except KeyError: #pragma: no cover
vv = d2[k.upper()]
if strict: #pragma: no cover
identical &= report_diff_values(v, vv)
else: #pragma: no cover
identical &= compare_tables(v, vv)
return identical
def make_test_data():
""" Make and return some test data """
nrow = 1000
vect_size = 20
mat_size = 5
scalar = np.random.uniform(size=nrow)
vect = np.random.uniform(size=nrow*vect_size).reshape(nrow, vect_size)
matrix = np.random.uniform(size=nrow*mat_size*mat_size).reshape(nrow, mat_size, mat_size)
data = dict(scalar=scalar, vect=vect, matrix=matrix)
table = apTable(data)
table.meta['a'] = 1
table.meta['b'] = None
table.meta['c'] = [3, 4, 5]
small_table = apTable(dict(a=np.ones(21), b=np.zeros(21)))
small_table.meta['small'] = True
tables = dict(data=table, md=small_table)
return tables
| 2,349 | 793 |
__author__ = '__knh4vu__'
from helper import greeting
from helper3 import greeting3
from helper2 import greeting2
if __name__ == '__main__':
greeting('hello')
greeting2('There')
| 185 | 69 |
from textwrap import dedent
import pytest
from pylox.lox import Lox
# Base cases from https://github.com/munificent/craftinginterpreters/blob/master/test/inheritance/constructor.lox
TEST_SRC = dedent(
"""\
class A {
init(param) {
this.field = param;
}
test() {
print this.field;
}
}
class B < A {}
var b = B("value");
b.test(); // expect: value
"""
)
EXPECTED_STDOUTS = ["value"]
def test_constructor(capsys: pytest.CaptureFixture) -> None:
interpreter = Lox()
interpreter.run(TEST_SRC)
assert not interpreter.had_error
assert not interpreter.had_runtime_error
all_out = capsys.readouterr().out.splitlines()
assert all_out == EXPECTED_STDOUTS
| 746 | 261 |
#! -*- coding:utf-8 -*-
"""Shortcuts for yuqing trials
.. moduleauthor:: Huan Di <hd@iamhd.top>
"""
from byq_trial.auth import SimpleAuth
from byq_trial.call import APICall
__all__ = [SimpleAuth, APICall]
| 209 | 87 |
#!/usr/bin/env python3
""" Import comunity modules. """
import os
import sys
import jinja2
import re
HERE = os.path.dirname(os.path.realpath(__file__))
sys.path.insert(0, f"{HERE}")
def string_is_latest_or_version(check_string):
prog = re.compile("^(\d+\.)?(\d+\.)?(\*|\d+)$")
result = prog.match(check_string)
if check_string != "latest" and not result:
return False
return True
def get_template(file_name, searchpath=f"{HERE}/templates"):
""" Load and return a Jinja template file. """
templateLoader = jinja2.FileSystemLoader(searchpath=searchpath)
templateEnv = jinja2.Environment(loader=templateLoader)
template = templateEnv.get_template(file_name)
return template
def get_dugaire_image_label(return_format="string"):
""" Get the default label used when building images. """
default_label_key = "builtwith"
default_label_value = "dugaire"
default_label = {default_label_key: default_label_value}
if return_format == "string":
return f"{default_label_key}={default_label_value}"
if return_format == "dockerfile":
return f'{default_label_key}="{default_label_value}"'
return default_label
| 1,196 | 392 |
lista_filmes = ['vingadores ultimato', 'star wars', 'as branquelas', 'vovó... zona']#Criando as 4 listas
print("lista filmes: " ,lista_filmes)
print("\n")
lista_jogos = ['genshin impact', 'the witcher 3', 'dying light', 'destiny']#Criando as 4 listas
print("lista jogos: " ,lista_jogos)
print("\n")
lista_livros = ['o ultimo desejo', 'a espada do destino', 'o sangue dos elfos', 'a torre da andorinha']#Criando as 4 listas
print("lista livros: " ,lista_livros)
print("\n")
lista_esportes = ['futebol', 'vôlei', 'basquete']#Criando as 4 listas
print("lista esportes: " ,lista_esportes)
print("\n")
lista_filmes.append("carros 2") #adicionando 2 itens nas listas
lista_filmes.append("o exterminador do futuro")#adicionando 2 itens nas listas
print("lista filme atualizada: " ,lista_filmes)
print("\n")
lista_jogos.append("minecraft")#adicionando 2 itens nas listas
lista_jogos.append("terraria")#adicionando 2 itens nas listas
print("lista jogos atualizada: " ,lista_jogos)
print("\n")
lista_livros.append("tempo do desprezo")#adicionando 2 itens nas listas
lista_livros.append("a dama do lago")#adicionando 2 itens nas listas
print("lista livros atualizada: " ,lista_livros)
print("\n")
lista_esportes.append("ginastica")#adicionando 2 itens nas listas
lista_esportes.append("salto com vara")#adicionando 2 itens nas listas
print("lista esportes atualizada: " ,lista_esportes)
print("\n")
lista_geral = [lista_filmes, lista_jogos, lista_livros, lista_esportes]#lista com todas as outras listas
print("lista geral atualizada: " ,lista_geral)
print("\n")
print("lista livros atualizada: " ,lista_livros[1])#mostrando algum item da lista livros
print("\n")
del lista_esportes[0] #removendo item da lista esporte
print("livros esportes atualizada: " ,lista_esportes)
print("\n")
lista_geral.append("lista_disciplina =['matematica', 'portugues', 'ingles', programação]")#criando uma nova lista disciplina na lista onde tem todas as outras
print("lista geral atualizada: ",lista_geral) | 1,988 | 737 |
from django.test import TestCase
from django.test.client import Client
from django.core.urlresolvers import reverse
from ipaymu.forms import IpaymuForm
from ipaymu.models import IpaymuSessionID
from ipaymu.utils import save_session, verify_session, IpaymuParamsBuilder
class IpaymuTest(TestCase):
fixtures = ['ipaymu/fixtures/sessionID.json',]
def setUp(self):
self.c = Client()
self.good_sessid = 'ad05fd717b3bb836519df7c430f0db0801d347b34ea28e4f15bc6213b9f95772ff882808442e1a5275715f2895f3db8adbd95105147e9f0856c4c5ad7de24bab'
self.junk_sessid = 'this-sesssion-not-exists-in-database'
def test_forms(self):
"""
Tests that 1 + 1 always equals 2.
"""
self.assertEqual(1 + 1, 2)
def test_urls(self):
# Test canceled page
resp = self.c.get(reverse('ipaymu_cancel_url'))
self.assertEqual(resp.status_code, 200)
# Test return page
resp = self.c.get(reverse('ipaymu_return_url'))
self.assertEqual(resp.status_code, 200)
# Test process url - GET
resp = self.c.get(reverse('ipaymu_process_url'))
self.assertEqual(resp.status_code, 200)
self.assertEqual(resp.content, 'Invalid request.')
# Test process url - POST
# No data posted, will return invalid field.
resp = self.c.post(reverse('ipaymu_process_url'))
self.assertEqual(resp.status_code, 200)
self.assertTrue('valid' in resp.content)
# Test process url - POST
# With valid data, will redirected to Ipaymu
# resp = self.c.post(reverse('ipaymu_process_url'), {
# 'product': 'test product',
# 'quantity': 1,
# 'price': 5000,
# 'comments': 'this is comments',
# })
# self.assertEqual(resp.status_code, 302)
# Test notify url - GET
resp = self.c.get(reverse('ipaymu_notify_url'))
self.assertEqual(resp.status_code, 200)
self.assertEqual(resp.content, '')
# Test notify url - POST
resp = self.c.post(reverse('ipaymu_notify_url'))
self.assertEqual(resp.status_code, 200)
self.assertEqual(resp.content, '')
def test_functions(self):
# Test verify_session
verified = verify_session(self.good_sessid)
self.assertEqual(verified, True)
verified = verify_session(self.junk_sessid)
self.assertEqual(verified, False)
# Test save_session
save_session(self.junk_sessid)
try:
sess = IpaymuSessionID.objects.get(sessid=self.junk_sessid)
except IpaymuSessionID.DoesNotExist:
raise
else:
self.assertEqual(sess.sessid, self.junk_sessid)
| 2,770 | 973 |
# ---
# jupyter:
# jupytext:
# formats: ipynb,md
# text_representation:
# extension: .py
# format_name: percent
# format_version: '1.3'
# jupytext_version: 1.10.1
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# %% [markdown]
# # Eppy is a scripting language for EnergyPlus, used to run annual simulations and produce outputs from archetype idf files
# %%
# cd ..
# %%
from eppy import modeleditor
from eppy.modeleditor import IDF
import pandas as pd
# %%
IDF.setiddname('/usr/local/EnergyPlus-8-9-0/Energy+.idd')
# %% [markdown]
# ### Weather file used is for Dublin, Ireland
# %%
idf = IDF('data/resi_modelling/det_pre/detatched_pre.idf')
idf.epw = "data/resi_modelling/det_pre/IRL_Dublin.039690_IWEC.epw"
idf.run(expandobjects=True, readvars=True, output_directory="data/resi_modelling/det_pre")
# %%
df = pd.read_csv("data/resi_modelling/det_pre/eplusmtr.csv")
peak_demand_joule_det_pre = df["Electricity:Facility [J](Hourly)"].max()
df2 = pd.read_html("data/resi_modelling/det_pre/eplustbl.htm")
df_e = pd.DataFrame(df2[0])
ann_energy_demand_kwh_det_pre = df_e.iloc[1,1]
df3 = pd.DataFrame(df2[3])
ann_elec_demand_kwh_det_pre = df3.iloc[16,1]
ann_heat_demand_kwh_det_pre = df3.iloc[16,5]
print(peak_demand_joule_det_pre, ann_energy_demand_kwh_det_pre, ann_elec_demand_kwh_det_pre, ann_heat_demand_kwh_det_pre)
# %% [markdown]
# ### Hourly outputs here represent the sum of the entire hour
# %%
idf = IDF('data/resi_modelling/det_post/detatched_post.idf')
idf.epw = "data/resi_modelling/det_post/IRL_Dublin.039690_IWEC.epw"
idf.run(expandobjects=True, readvars=True, output_directory="data/resi_modelling/det_post")
# %%
df = pd.read_csv("data/resi_modelling/det_post/eplusmtr.csv")
peak_demand_joule_det_post = df["Electricity:Facility [J](Hourly)"].max()
df2 = pd.read_html("data/resi_modelling/det_post/eplustbl.htm")
df_e = pd.DataFrame(df2[0])
ann_energy_demand_kwh_det_post = df_e.iloc[1,1]
df3 = pd.DataFrame(df2[3])
ann_elec_demand_kwh_det_post = df3.iloc[16,1]
ann_heat_demand_kwh_det_post = df3.iloc[16,5]
print(peak_demand_joule_det_post, ann_energy_demand_kwh_det_post, ann_elec_demand_kwh_det_post, ann_heat_demand_kwh_det_post)
# %%
idf = IDF('data/resi_modelling/semi_d_pre/semi_d_pre.idf')
idf.epw = "data/resi_modelling/semi_d_pre/IRL_Dublin.039690_IWEC.epw"
idf.run(expandobjects=True, readvars=True, output_directory="data/resi_modelling/semid_pre")
# %%
df = pd.read_csv("data/resi_modelling/semi_d_pre/eplusmtr.csv")
peak_demand_joule_semid_pre = df["Electricity:Facility [J](Hourly)"].max()
df2 = pd.read_html("data/resi_modelling/semi_d_pre/eplustbl.htm")
df_e = pd.DataFrame(df2[0])
ann_energy_demand_kwh_semid_pre = df_e.iloc[1,1]
df3 = pd.DataFrame(df2[3])
ann_elec_demand_kwh_semid_pre = df3.iloc[16,1]
ann_heat_demand_kwh_semid_pre = df3.iloc[16,5]
print(peak_demand_joule_semid_pre, ann_energy_demand_kwh_semid_pre, ann_elec_demand_kwh_semid_pre, ann_heat_demand_kwh_semid_pre)
# %%
idf = IDF('data/resi_modelling/semi_d_post/semi_d_post.idf')
idf.epw = "data/resi_modelling/semi_d_post/IRL_Dublin.039690_IWEC.epw"
idf.run(expandobjects=True, readvars=True, output_directory="data/resi_modelling/semid_post")
# %%
df = pd.read_csv("data/resi_modelling/semi_d_post/eplusmtr.csv")
peak_demand_joule_semid_post = df["Electricity:Facility [J](Hourly)"].max()
df2 = pd.read_html("data/resi_modelling/semi_d_post/eplustbl.htm")
df_e = pd.DataFrame(df2[0])
ann_energy_demand_kwh_semid_post = df_e.iloc[1,1]
df3 = pd.DataFrame(df2[3])
ann_elec_demand_kwh_semid_post = df3.iloc[16,1]
ann_heat_demand_kwh_semid_post = df3.iloc[16,5]
print(peak_demand_joule_semid_post, ann_energy_demand_kwh_semid_post, ann_elec_demand_kwh_semid_post, ann_heat_demand_kwh_semid_post)
# %%
idf = IDF('data/resi_modelling/terr_pre/terraced_pre.idf')
idf.epw = "data/resi_modelling/terr_pre/IRL_Dublin.039690_IWEC.epw"
idf.run(expandobjects=True, readvars=True, output_directory="data/resi_modelling/terr_pre")
# %%
df = pd.read_csv("data/resi_modelling/terr_pre/eplusmtr.csv")
peak_demand_joule_terr_pre = df["Electricity:Facility [J](Hourly)"].max()
df2 = pd.read_html("data/resi_modelling/terr_pre/eplustbl.htm")
df_e = pd.DataFrame(df2[0])
ann_energy_demand_kwh_terr_pre = df_e.iloc[1,1]
df3 = pd.DataFrame(df2[3])
ann_elec_demand_kwh_terr_pre = df3.iloc[16,1]
ann_heat_demand_kwh_terr_pre = df3.iloc[16,5]
print(peak_demand_joule_terr_pre, ann_energy_demand_kwh_terr_pre, ann_elec_demand_kwh_terr_pre, ann_heat_demand_kwh_terr_pre)
# %%
idf = IDF('data/resi_modelling/terr_post/terraced_post.idf')
idf.epw = "data/resi_modelling/terr_post/IRL_Dublin.039690_IWEC.epw"
idf.run(expandobjects=True, readvars=True, output_directory="data/resi_modelling/terr_post")
# %%
df = pd.read_csv("data/resi_modelling/terr_post/eplusmtr.csv")
peak_demand_joule_terr_post = df["Electricity:Facility [J](Hourly)"].max()
df2 = pd.read_html("data/resi_modelling/terr_post/eplustbl.htm")
df_e = pd.DataFrame(df2[0])
ann_energy_demand_kwh_terr_post = df_e.iloc[1,1]
df3 = pd.DataFrame(df2[3])
ann_elec_demand_kwh_terr_post = df3.iloc[16,1]
ann_heat_demand_kwh_terr_post = df3.iloc[16,5]
print(peak_demand_joule_terr_post, ann_energy_demand_kwh_terr_post, ann_elec_demand_kwh_terr_post, ann_heat_demand_kwh_terr_post)
# %%
idf = IDF('data/resi_modelling/mid_apt_pre/mid_apt_pre.idf')
idf.epw = "data/resi_modelling/mid_apt_pre/IRL_Dublin.039690_IWEC.epw"
idf.run(expandobjects=True, readvars=True, output_directory="data/resi_modelling/mid_apt_pre")
# %%
df = pd.read_csv("data/resi_modelling/mid_apt_pre/eplusmtr.csv")
peak_demand_joule_mid_apt_pre = df["Electricity:Facility [J](Hourly)"].max()
df2 = pd.read_html("data/resi_modelling/mid_apt_pre/eplustbl.htm")
df_e = pd.DataFrame(df2[0])
ann_energy_demand_kwh_mid_apt_pre = df_e.iloc[1,1]
df3 = pd.DataFrame(df2[3])
ann_elec_demand_kwh_mid_apt_pre = df3.iloc[16,1]
ann_heat_demand_kwh_mid_apt_pre = df3.iloc[16,5]
print(peak_demand_joule_mid_apt_pre, ann_energy_demand_kwh_mid_apt_pre, ann_elec_demand_kwh_mid_apt_pre, ann_heat_demand_kwh_mid_apt_pre)
# %%
idf = IDF('data/resi_modelling/mid_apt_post/mid_apt_post.idf')
idf.epw = "data/resi_modelling/mid_apt_post/IRL_Dublin.039690_IWEC.epw"
idf.run(expandobjects=True, readvars=True, output_directory="data/resi_modelling/mid_apt_post")
# %%
df = pd.read_csv("data/resi_modelling/mid_apt_post/eplusmtr.csv")
peak_demand_joule_mid_apt_post = df["Electricity:Facility [J](Hourly)"].max()
df2 = pd.read_html("data/resi_modelling/mid_apt_post/eplustbl.htm")
df_e = pd.DataFrame(df2[0])
ann_energy_demand_kwh_mid_apt_post = df_e.iloc[1,1]
df3 = pd.DataFrame(df2[3])
ann_elec_demand_kwh_mid_apt_post = df3.iloc[16,1]
ann_heat_demand_kwh_mid_apt_post = df3.iloc[16,5]
print(peak_demand_joule_mid_apt_post, ann_energy_demand_kwh_mid_apt_post, ann_elec_demand_kwh_mid_apt_post, ann_heat_demand_kwh_mid_apt_post)
# %%
idf = IDF('data/resi_modelling/top_apt_pre/top_apt_pre.idf')
idf.epw = "data/resi_modelling/top_apt_pre/IRL_Dublin.039690_IWEC.epw"
idf.run(expandobjects=True, readvars=True, output_directory="data/resi_modelling/top_apt_pre")
# %%
df = pd.read_csv("data/resi_modelling/top_apt_pre/eplusmtr.csv")
peak_demand_joule_top_apt_pre = df["Electricity:Facility [J](Hourly)"].max()
df2 = pd.read_html("data/resi_modelling/top_apt_pre/eplustbl.htm")
df_e = pd.DataFrame(df2[0])
ann_energy_demand_kwh_top_apt_pre = df_e.iloc[1,1]
df3 = pd.DataFrame(df2[3])
ann_elec_demand_kwh_top_apt_pre = df3.iloc[16,1]
ann_heat_demand_kwh_top_apt_pre = df3.iloc[16,5]
print(peak_demand_joule_top_apt_pre, ann_energy_demand_kwh_top_apt_pre, ann_elec_demand_kwh_top_apt_pre, ann_heat_demand_kwh_top_apt_pre)
# %%
idf = IDF('data/resi_modelling/top_apt_post/top_apt_post.idf')
idf.epw = "data/resi_modelling/top_apt_post/IRL_Dublin.039690_IWEC.epw"
idf.run(expandobjects=True, readvars=True, output_directory="data/resi_modelling/top_apt_post")
# %%
df = pd.read_csv("data/resi_modelling/top_apt_post/eplusmtr.csv")
peak_demand_joule_top_apt_post = df["Electricity:Facility [J](Hourly)"].max()
df2 = pd.read_html("data/resi_modelling/top_apt_post/eplustbl.htm")
df_e = pd.DataFrame(df2[0])
ann_energy_demand_kwh_top_apt_post = df_e.iloc[1,1]
df3 = pd.DataFrame(df2[3])
ann_elec_demand_kwh_top_apt_post = df3.iloc[16,1]
ann_heat_demand_kwh_top_apt_post = df3.iloc[16,5]
print(peak_demand_joule_top_apt_post, ann_energy_demand_kwh_top_apt_post, ann_elec_demand_kwh_top_apt_post, ann_heat_demand_kwh_top_apt_post)
# %%
peak_data = [['Detatched housepre', peak_demand_joule_det_pre, ann_energy_demand_kwh_det_pre, ann_elec_demand_kwh_det_pre, ann_heat_demand_kwh_det_pre], ['Detatched housepost', peak_demand_joule_det_post, ann_energy_demand_kwh_det_post, ann_elec_demand_kwh_det_post, ann_heat_demand_kwh_det_post], ['Semi detatched housepre', peak_demand_joule_semid_pre, ann_energy_demand_kwh_semid_pre, ann_elec_demand_kwh_semid_pre, ann_heat_demand_kwh_semid_pre],['Semi detatched housepost', peak_demand_joule_semid_post, ann_energy_demand_kwh_semid_post, ann_elec_demand_kwh_semid_post, ann_heat_demand_kwh_semid_post],['Terraced housepre', peak_demand_joule_terr_pre, ann_energy_demand_kwh_terr_pre, ann_elec_demand_kwh_terr_pre, ann_heat_demand_kwh_terr_pre], ['Terraced housepost', peak_demand_joule_terr_post, ann_energy_demand_kwh_terr_post, ann_elec_demand_kwh_terr_post, ann_heat_demand_kwh_terr_post], ['Apartmentpre', peak_demand_joule_mid_apt_pre, ann_energy_demand_kwh_mid_apt_pre, ann_elec_demand_kwh_mid_apt_pre, ann_heat_demand_kwh_mid_apt_pre],['Apartmentpost', peak_demand_joule_mid_apt_post, ann_energy_demand_kwh_mid_apt_post, ann_elec_demand_kwh_mid_apt_post, ann_heat_demand_kwh_mid_apt_post],['Top floor apt.pre', peak_demand_joule_top_apt_pre, ann_energy_demand_kwh_top_apt_pre, ann_elec_demand_kwh_top_apt_pre, ann_heat_demand_kwh_top_apt_pre],['Top floor apt.post', peak_demand_joule_top_apt_post, ann_energy_demand_kwh_top_apt_post, ann_elec_demand_kwh_top_apt_post, ann_heat_demand_kwh_top_apt_post], ]
# %%
df_peaks = pd.DataFrame(peak_data, columns = ['dwelling_type','peak_hourly_elec_demand(J)', "annual_energy_demand_kwh", "annual_elec_demand_kwh", "annual_heat_demand_kwh"])
# %%
df_peaks
# %% [markdown]
# ### Note that the hourly elec values in J are across an entire hour thus the conversion below
# %%
df_peaks["peak_elec_demand(kW)"] = df_peaks["peak_hourly_elec_demand(J)"]/3600000
# %% [markdown]
# ### Assume a power factor of 0.85
# %%
df_peaks["peak_elec_demand(kVA)"] = df_peaks["peak_elec_demand(kW)"]*0.85
# %%
df_peaks
# %%
df_peaks.to_csv("data/interim/energy_demand_by_building_type_eppy.csv")
# %%
| 10,643 | 5,090 |
# MINUTES
IT_IS = [[0,0],[0,1],[0,3],[0,4]]
FIVE = [[3,i] for i in range(7,11)]
TEN = [[1,i] for i in range(6,9)]
QUARTER = [[2,i] for i in range(2,9)]
TWENTY = [[3,i] for i in range(1,7)]
HALF = [[1,i] for i in range(2,6)]
TO = [[4,i] for i in range(1,3)]
PAST = [[4,i] for i in range(3,7)]
MINUTES = [
[],
FIVE + PAST,
TEN + PAST,
QUARTER + PAST,
TWENTY + PAST,
TWENTY + FIVE + PAST,
HALF + PAST,
TWENTY + FIVE + TO,
TWENTY + TO,
QUARTER + TO,
TEN + TO,
FIVE + TO,
]
# HOURS
HOURS = [
[[4,i] for i in range(8,11)], # 1
[[8,i] for i in range(0,3)], # 2
[[6,i] for i in range(7,12)], # 3
[[5,i] for i in range(0,4)], # 4
[[5,i] for i in range(4,8)], # 5
[[5,i] for i in range(8,11)], # 6
[[7,i] for i in range(1,6)], # 7
[[7,i] for i in range(6,11)], # 8
[[9,i] for i in range(0,4)], # 9
[[8,i] for i in range(9,12)], # 10
[[6,i] for i in range(1,7)], # 11
[[8,i] for i in range(3,9)] # 12
]
OCLOCK = [[9,i] for i in range(6,12)]
clock = "ITNISOFMTEMP WRHALFTENVKC INQUARTERUTP FTWENTYFIVET " + \
"ATOPASTXONEK FOURFIVESIXJ BELEVENTHREE NSEVENEIGHTK " + \
"TWOTWELVETEN NINEPJOCLOCK HWYAWOUTSIDE MTWTFSSQNKMI"
clock_grid = [[char for char in row] for row in clock.split(" ")]
import time
from datetime import datetime
import shutil
while True:
now = datetime.now()
hour = int(now.strftime("%I"))
minute = int(now.strftime("%M"))
weekday = int(now.strftime("%w"))
highlight = IT_IS + MINUTES[minute//5] + HOURS[hour-(minute<35)] + OCLOCK
clock_print = [
[
("\033[1;36m" if [i,j] in highlight else "\033[2;90m") + clock_grid[i][j] + "\033[0m"
for j in range(12)
]
for i in range(11)
]
clock_print += [
[
("\033[1;36m" if j == weekday else "\033[1;00m") + clock_grid[11][j] + "\033[0m"
for j in range(7)
]
+ [
"\033[2;90m" + clock_grid[11][j] + "\033[0m"
for j in range(7,12)
]
]
columns = shutil.get_terminal_size().columns
lines = shutil.get_terminal_size().lines
print("\n"*((lines-12)//2))
for row in clock_print:
split_row = " ".join(row)
print(" "*((columns-23)//2) + split_row)
print("\n"*((lines-12)//2 - 2))
time.sleep(15)
# Weather
# 8 bit time
# Analog hand
# m i n e s w e e p e r / snake
# Pretty animations | 2,535 | 1,165 |
# TCE CCSD(T) and CCSD[T] calculations
import os
import sys
import qcdb
from ..utils import *
def check_ccsd_t_pr_br(return_value):
ccsd_tot = -76.240077811301250
ccsd_corl = -0.213269954065481
t_br_corr = -0.003139909173705
t_br_corl = -0.216409863239186
ccsd_t_br = -76.243217720474960
t_pr_corr = -0.003054718622142
t_pr_corl = -0.216324672687623
ccsd_t_pr = -76.243132529923390
assert compare_values(ccsd_tot, qcdb.variable("CCSD TOTAL ENERGY"), 5, "ccsd total")
assert compare_values(ccsd_corl, qcdb.variable("CCSD CORRELATION ENERGY"), 5, "ccsd corl")
assert compare_values(t_br_corr, qcdb.variable("T(CCSD) CORRECTION ENERGY"), 5, "[t] corr")
assert compare_values(t_br_corl, qcdb.variable("CCSD+T(CCSD) CORRELATION ENERGY"), 5, "ccsd[t] corl")
assert compare_values(ccsd_t_br, qcdb.variable("CCSD+T(CCSD) TOTAL ENERGY"), 5, "ccsd[t] total")
assert compare_values(t_pr_corr, qcdb.variable("(T) CORRECTION ENERGY"), 5, "(t) corr")
assert compare_values(t_pr_corl, qcdb.variable("CCSD(T) CORRELATION ENERGY"), 5, "ccsd(t) corl")
assert compare_values(ccsd_t_pr, qcdb.variable("CCSD(T) TOTAL ENERGY"), 5, "ccsd(t) tot")
@using("nwchem")
def test_1_ccsd_t():
h2o = qcdb.set_molecule(
"""
O 0.00000000 0.00000000 0.22138519
H 0.00000000 -1.43013023 -0.88554075
H 0.00000000 1.43013023 -0.88554075
units au"""
)
qcdb.set_options(
{
"basis": "cc-pvdz",
"nwchem_scf__rhf": True,
"nwchem_scf__thresh": 1.0e-10,
"nwchem_scf__tol2e": 1.0e-10,
"nwchem_scf__singlet": True,
"nwchem_tce__ccsd(t)": True,
"qc_module": "TCE",
"nwchem_tce__io": "ga",
}
)
val = qcdb.energy("nwc-ccsd(t)")
check_ccsd_t_pr_br(val)
| 1,884 | 967 |
#!/usr/bin/env python3
import time
import multiprocessing
import os
import vim
from async_worker import AsyncWorker
import utils
# Global map for host (source code) window ID -> Squeezer instance
squeezers = {}
# set of Squeezer instances that are waiting for updates
polling_squeezers = set()
def create_window(buf_name):
vim.command('rightbelow vnew {}'.format(buf_name))
vim.command('let w:squeeze_args=""')
# Use vim.command(), not buf.options[], since the options may not exist
vim.command('setlocal nomodifiable')
vim.command('setlocal buftype=nofile')
vim.command('setlocal syntax=objdump')
vim.command('setlocal filetype=squeeze')
return vim.current.window
class Squeezer:
BUFNAME_PREFIX = '__Squeeze__'
def __init__(self, win):
self.host_win = win
self.host_winid = utils.win_to_winid(win)
self.host_buf = win.buffer
self.host_bufnr = win.buffer.number
guest_buf_name = '{}.{}.{}'.format(self.BUFNAME_PREFIX,
self.host_winid,
self.host_buf.name)
self.guest_win = create_window(guest_buf_name)
self.guest_winid = utils.win_to_winid(self.guest_win)
self.guest_buf = self.guest_win.buffer
self.guest_bufnr = self.guest_buf.number
self._add_autocmd('BufWritePost', self.host_bufnr,
'trigger_build({})'.format(self.host_winid))
self._add_autocmd('QuitPre', self.host_bufnr,
'cleanup_squeezer({})'.format(self.host_winid))
self._add_autocmd('BufUnload', self.guest_bufnr,
'cleanup_squeezer({})'.format(self.host_winid))
# focus back to the host window
vim.current.window = self.host_win
utils.log('object created for {}({})'.format(
win.buffer.name, win.number))
self.worker = None
self.async_build()
def __del__(self):
if self.host_winid in squeezers:
squeezers.pop(self.host_winid)
if self in polling_squeezers:
polling_squeezers.remove(self)
def _add_autocmd(self, ev, bufnr, py_stmt):
cmd = 'call s:Python("{}")'.format(py_stmt)
vim.command('augroup SqueezeAutoCmds{}'.format(self.host_winid))
vim.command(' autocmd {} <buffer={}> {}'.format(ev, bufnr, cmd))
vim.command('augroup END')
def _del_autocmd(self, ev, bufnr):
vim.command('augroup SqueezeAutoCmds{}'.format(self.host_winid))
vim.command(' autocmd! {} <buffer={}>'.format(ev, bufnr))
vim.command('augroup END')
# Close the guest window and destroy the outstanding worker
def cleanup(self):
if self.worker:
self.worker.terminate()
self.worker.join()
self.worker = None
if self.host_winid in squeezers:
squeezers.pop(self.host_winid)
if self.guest_win.valid:
vim.command('{}close'.format(self.guest_win.number))
self._del_autocmd('*', self.host_bufnr)
self._del_autocmd('*', self.guest_bufnr)
utils.log('object destroyed for {}({})'.format(self.host_buf.name,
self.host_winid))
def async_build(self):
if self.worker:
utils.log('killing existing thread')
self.worker.terminate()
self.worker.join()
script = utils.get_var('squeeze_c_script')
args = utils.get_var('squeeze_c_args')
if args:
self.guest_win.vars['squeeze_args'] = args
else:
self.guest_win.vars['squeeze_args'] = '<none>'
path_script = os.path.join(vim.eval('s:plugin_path'), 'scripts/',
script, 'objdump')
self.out_q = multiprocessing.Queue()
self.worker = AsyncWorker(self.out_q, self.host_win.buffer.name,
path_script, args)
self.worker.start()
if len(polling_squeezers) == 0:
vim.command('''
let g:squeeze_timer = timer_start(100, \
function('s:TimerHandler'), {'repeat': -1})
''')
else:
vim.command('call timer_pause(g:squeeze_timer, 0)')
polling_squeezers.add(self)
def update_result(self):
if not self.guest_win.valid:
self.cleanup()
return
if self.worker and not self.out_q.empty():
out, err = self.out_q.get()
output = out + '\n-------\n' + err
self.worker.join()
exit_code = self.worker.exitcode
self.worker = None
# temporarily make the buffer modifiable
self.guest_buf.options['modifiable'] = 1
self.guest_buf[:] = output.split('\n')
self.guest_buf.options['modifiable'] = 0
if self in polling_squeezers:
polling_squeezers.remove(self)
def _toggle_on(win):
obj = Squeezer(win)
squeezers[obj.host_winid] = obj
def _toggle_off(win):
squeezers[utils.win_to_winid(win)].cleanup()
def toggle():
win = vim.current.window
winid = utils.win_to_winid(win)
if winid in squeezers:
_toggle_off(win)
else:
# Toggle hit in a guest window?
for obj in list(squeezers.values()):
if obj.guest_winid == winid:
_toggle_off(obj.host_win)
return
# Is is a regular file?
opts = win.buffer.options
if 'buftype' in opts and opts['buftype'] not in ['', b'']:
vim.command('echohl WarningMsg')
vim.command('echomsg "Not a regular file"')
vim.command('echohl None')
else:
_toggle_on(win)
def trigger_build(host_winid):
if host_winid in squeezers:
squeezers[host_winid].async_build()
def cleanup_squeezer(host_winid):
if host_winid in squeezers:
squeezers[host_winid].cleanup()
def poll_result():
for obj in list(polling_squeezers):
obj.update_result()
if len(polling_squeezers) == 0:
vim.command('call timer_pause(g:squeeze_timer, 1)')
| 6,246 | 2,101 |
# python code to demonstrate working of enumerate()
for key, value in enumerate(['The', 'Big', 'Bang', 'Theory']):
print(key, value)
print('\n')
# python code to demonstrate working of enumerate()
for key, value in enumerate(['Geeks', 'for', 'Geeks', 'is', 'the', 'Best', 'Coding', 'Platform']):
print(value, end=' ')
print('\n')
# python code to demonstrate working of zip()
# initializing list
questions = ['name', 'colour', 'shape']
answers = ['apple', 'red', 'a circle']
print('\n')
# using zip() to combine two containers and print values
for question, answer in zip(questions, answers):
print('What is your {0}? I am {1}.'.format(question, answer))
print('\n')
# program to demonstrate working of items()
king = {'Akbar' : 'The Great', 'Chandragupta' : 'The Maurya', 'Modi' : 'The Changer'}
# using items to print the dictionary key-value pair
for key, value in king.items():
print(key, value)
print('\n')
# program to demonstrate the working of sorted()
# initializing list
list1 = [1, 3, 5, 6, 2, 1, 3]
# using sorted() to print the list in sorted order
print("The list in sorted order is: ")
for i in sorted(list1):
print(i, end=" ")
print("\r")
# using sorted() and set() to print the list in sorted order
# use of set() removes duplicates
print("The list in sorted order (without duplicates) is : ")
for i in sorted(set(list1)):
print(i, end=" ")
print('\n')
# initializing list
basket = [ 'guava', 'orange', 'apple', 'pear', 'guava', 'banana', 'grape']
# using sorted() and set() to print the list in sorted order
for fruit in sorted(set(basket)):
print(fruit)
print('\n')
# program to demonstrate the use of reversed()
# initializing list
print("The list in reversed order is: ")
for i in reversed(basket):
print(i, end=" ") | 1,781 | 567 |
# -*- coding: utf-8 -*-
from sandbox_python_structure import f
def test_parse_no_subarg():
ret = f()
assert ret == 'hi'
| 131 | 52 |
"""initial tables
Revision ID: 9e7ee952f6ae
Revises:
Create Date: 2016-10-21 17:20:27.709284
"""
# revision identifiers, used by Alembic.
revision = '9e7ee952f6ae'
down_revision = None
branch_labels = None
depends_on = None
from alembic import op
from sqlalchemy.types import Text
import sqlalchemy as sa
from sqlalchemy.dialects import postgresql
from lingvodoc.models import SLBigInteger
def upgrade():
### commands auto generated by Alembic - please adjust! ###
op.create_table('basegroup',
sa.Column('created_at', sa.TIMESTAMP(), nullable=False),
sa.Column('id', SLBigInteger(), nullable=False),
sa.Column('dictionary_default', sa.Boolean(), nullable=False),
sa.Column('perspective_default', sa.Boolean(), nullable=False),
sa.Column('name', sa.UnicodeText(), nullable=False),
sa.Column('subject', sa.UnicodeText(), nullable=False),
sa.Column('action', sa.UnicodeText(), nullable=False),
sa.PrimaryKeyConstraint('id')
)
op.create_table('organization',
sa.Column('created_at', sa.TIMESTAMP(), nullable=False),
sa.Column('id', SLBigInteger(), nullable=False),
sa.Column('marked_for_deletion', sa.Boolean(), nullable=False),
sa.Column('name', sa.UnicodeText(), nullable=True),
sa.Column('about', sa.UnicodeText(), nullable=True),
sa.Column('additional_metadata', postgresql.JSONB(astext_type=Text()), nullable=True),
sa.PrimaryKeyConstraint('id')
)
op.create_table('translationgist',
sa.Column('created_at', sa.TIMESTAMP(), nullable=False),
sa.Column('object_id', SLBigInteger(), nullable=False),
sa.Column('client_id', SLBigInteger(), nullable=False),
sa.Column('marked_for_deletion', sa.Boolean(), nullable=False),
sa.Column('type', sa.UnicodeText(), nullable=True),
sa.PrimaryKeyConstraint('object_id', 'client_id')
)
op.create_table('field',
sa.Column('created_at', sa.TIMESTAMP(), nullable=False),
sa.Column('object_id', SLBigInteger(), nullable=False),
sa.Column('client_id', SLBigInteger(), nullable=False),
sa.Column('translation_gist_client_id', SLBigInteger(), nullable=False),
sa.Column('translation_gist_object_id', SLBigInteger(), nullable=False),
sa.Column('data_type_translation_gist_client_id', SLBigInteger(), nullable=False),
sa.Column('data_type_translation_gist_object_id', SLBigInteger(), nullable=False),
sa.Column('marked_for_deletion', sa.Boolean(), nullable=False),
sa.Column('is_translatable', sa.Boolean(), nullable=False),
sa.Column('additional_metadata', postgresql.JSONB(astext_type=Text()), nullable=True),
sa.ForeignKeyConstraint(['data_type_translation_gist_client_id', 'data_type_translation_gist_object_id'], ['translationgist.client_id', 'translationgist.object_id'], ),
sa.ForeignKeyConstraint(['translation_gist_object_id', 'translation_gist_client_id'], ['translationgist.object_id', 'translationgist.client_id'], ),
sa.PrimaryKeyConstraint('object_id', 'client_id')
)
op.create_table('group',
sa.Column('created_at', sa.TIMESTAMP(), nullable=False),
sa.Column('id', postgresql.UUID(), nullable=False),
sa.Column('old_id', SLBigInteger()),
sa.Column('base_group_id', SLBigInteger(), nullable=False),
sa.Column('subject_client_id', SLBigInteger(), nullable=True),
sa.Column('subject_object_id', SLBigInteger(), nullable=True),
sa.Column('subject_override', sa.Boolean(), nullable=True),
sa.ForeignKeyConstraint(['base_group_id'], ['basegroup.id'], ),
sa.PrimaryKeyConstraint('id')
)
op.create_table('language',
sa.Column('created_at', sa.TIMESTAMP(), nullable=False),
sa.Column('object_id', SLBigInteger(), nullable=False),
sa.Column('client_id', SLBigInteger(), nullable=False),
sa.Column('parent_object_id', SLBigInteger(), nullable=True),
sa.Column('parent_client_id', SLBigInteger(), nullable=True),
sa.Column('translation_gist_client_id', SLBigInteger(), nullable=False),
sa.Column('translation_gist_object_id', SLBigInteger(), nullable=False),
sa.Column('marked_for_deletion', sa.Boolean(), nullable=False),
sa.Column('additional_metadata', postgresql.JSONB(astext_type=Text()), nullable=True),
sa.ForeignKeyConstraint(['parent_object_id', 'parent_client_id'], ['language.object_id', 'language.client_id'], ),
sa.ForeignKeyConstraint(['translation_gist_object_id', 'translation_gist_client_id'], ['translationgist.object_id', 'translationgist.client_id'], ),
sa.PrimaryKeyConstraint('object_id', 'client_id')
)
op.create_table('translationatom',
sa.Column('created_at', sa.TIMESTAMP(), nullable=False),
sa.Column('object_id', SLBigInteger(), nullable=False),
sa.Column('client_id', SLBigInteger(), nullable=False),
sa.Column('parent_object_id', SLBigInteger(), nullable=True),
sa.Column('parent_client_id', SLBigInteger(), nullable=True),
sa.Column('locale_id', SLBigInteger(), nullable=False),
sa.Column('marked_for_deletion', sa.Boolean(), nullable=False),
sa.Column('content', sa.UnicodeText(), nullable=False),
sa.Column('additional_metadata', postgresql.JSONB(astext_type=Text()), nullable=True),
sa.ForeignKeyConstraint(['parent_object_id', 'parent_client_id'], ['translationgist.object_id', 'translationgist.client_id'], ),
sa.PrimaryKeyConstraint('object_id', 'client_id')
)
op.create_table('dictionary',
sa.Column('created_at', sa.TIMESTAMP(), nullable=False),
sa.Column('object_id', SLBigInteger(), nullable=False),
sa.Column('client_id', SLBigInteger(), nullable=False),
sa.Column('parent_object_id', SLBigInteger(), nullable=True),
sa.Column('parent_client_id', SLBigInteger(), nullable=True),
sa.Column('translation_gist_client_id', SLBigInteger(), nullable=False),
sa.Column('translation_gist_object_id', SLBigInteger(), nullable=False),
sa.Column('state_translation_gist_client_id', SLBigInteger(), nullable=False),
sa.Column('state_translation_gist_object_id', SLBigInteger(), nullable=False),
sa.Column('marked_for_deletion', sa.Boolean(), nullable=False),
sa.Column('category', SLBigInteger(), nullable=True),
sa.Column('domain', SLBigInteger(), nullable=True),
sa.Column('additional_metadata', postgresql.JSONB(astext_type=Text()), nullable=True),
sa.ForeignKeyConstraint(['parent_object_id', 'parent_client_id'], ['language.object_id', 'language.client_id'], ),
sa.ForeignKeyConstraint(['state_translation_gist_client_id', 'state_translation_gist_object_id'], ['translationgist.client_id', 'translationgist.object_id'], ),
sa.ForeignKeyConstraint(['translation_gist_object_id', 'translation_gist_client_id'], ['translationgist.object_id', 'translationgist.client_id'], ),
sa.PrimaryKeyConstraint('object_id', 'client_id')
)
op.create_table('locale',
sa.Column('created_at', sa.TIMESTAMP(), nullable=False),
sa.Column('id', SLBigInteger(), nullable=False),
sa.Column('parent_object_id', SLBigInteger(), nullable=True),
sa.Column('parent_client_id', SLBigInteger(), nullable=True),
sa.Column('shortcut', sa.UnicodeText(), nullable=False),
sa.Column('intl_name', sa.UnicodeText(), nullable=False),
sa.ForeignKeyConstraint(['parent_object_id', 'parent_client_id'], ['language.object_id', 'language.client_id'], ),
sa.PrimaryKeyConstraint('id')
)
op.create_table('organization_to_group_association',
sa.Column('organization_id', sa.BigInteger(), nullable=True),
sa.Column('group_id', postgresql.UUID(), nullable=True),
sa.ForeignKeyConstraint(['group_id'], ['group.id'], ),
sa.ForeignKeyConstraint(['organization_id'], ['organization.id'], )
)
op.create_table('dictionaryperspective',
sa.Column('created_at', sa.TIMESTAMP(), nullable=False),
sa.Column('object_id', SLBigInteger(), nullable=False),
sa.Column('client_id', SLBigInteger(), nullable=False),
sa.Column('parent_object_id', SLBigInteger(), nullable=True),
sa.Column('parent_client_id', SLBigInteger(), nullable=True),
sa.Column('translation_gist_client_id', SLBigInteger(), nullable=False),
sa.Column('translation_gist_object_id', SLBigInteger(), nullable=False),
sa.Column('state_translation_gist_client_id', SLBigInteger(), nullable=False),
sa.Column('state_translation_gist_object_id', SLBigInteger(), nullable=False),
sa.Column('marked_for_deletion', sa.Boolean(), nullable=False),
sa.Column('is_template', sa.Boolean(), nullable=False),
sa.Column('import_source', sa.UnicodeText(), nullable=True),
sa.Column('import_hash', sa.UnicodeText(), nullable=True),
sa.Column('additional_metadata', postgresql.JSONB(astext_type=Text()), nullable=True),
sa.ForeignKeyConstraint(['parent_object_id', 'parent_client_id'], ['dictionary.object_id', 'dictionary.client_id'], ),
sa.ForeignKeyConstraint(['state_translation_gist_client_id', 'state_translation_gist_object_id'], ['translationgist.client_id', 'translationgist.object_id'], ),
sa.ForeignKeyConstraint(['translation_gist_object_id', 'translation_gist_client_id'], ['translationgist.object_id', 'translationgist.client_id'], ),
sa.PrimaryKeyConstraint('object_id', 'client_id')
)
op.create_table('user',
sa.Column('created_at', sa.TIMESTAMP(), nullable=False),
sa.Column('id', SLBigInteger(), nullable=False),
sa.Column('default_locale_id', SLBigInteger(), nullable=False),
sa.Column('birthday', sa.Date(), nullable=True),
sa.Column('is_active', sa.Boolean(), nullable=False),
sa.Column('login', sa.UnicodeText(), nullable=False),
sa.Column('intl_name', sa.UnicodeText(), nullable=False),
sa.Column('name', sa.UnicodeText(), nullable=True),
sa.Column('additional_metadata', postgresql.JSONB(astext_type=Text()), nullable=True),
sa.ForeignKeyConstraint(['default_locale_id'], ['locale.id'], ),
sa.PrimaryKeyConstraint('id'),
sa.UniqueConstraint('login')
)
op.create_table('client',
sa.Column('created_at', sa.TIMESTAMP(), nullable=False),
sa.Column('id', SLBigInteger(), nullable=False),
sa.Column('user_id', SLBigInteger(), nullable=False),
sa.Column('is_browser_client', sa.Boolean(), nullable=False),
sa.Column('counter', SLBigInteger(), nullable=False),
sa.Column('additional_metadata', postgresql.JSONB(astext_type=Text()), nullable=True),
sa.ForeignKeyConstraint(['user_id'], ['user.id'], ),
sa.PrimaryKeyConstraint('id')
)
op.create_table('dictionaryperspectivetofield',
sa.Column('created_at', sa.TIMESTAMP(), nullable=False),
sa.Column('object_id', SLBigInteger(), nullable=False),
sa.Column('client_id', SLBigInteger(), nullable=False),
sa.Column('parent_object_id', SLBigInteger(), nullable=True),
sa.Column('parent_client_id', SLBigInteger(), nullable=True),
sa.Column('self_client_id', SLBigInteger(), nullable=True),
sa.Column('self_object_id', SLBigInteger(), nullable=True),
sa.Column('field_client_id', SLBigInteger(), nullable=False),
sa.Column('field_object_id', SLBigInteger(), nullable=False),
sa.Column('link_client_id', SLBigInteger(), nullable=True),
sa.Column('link_object_id', SLBigInteger(), nullable=True),
sa.Column('marked_for_deletion', sa.Boolean(), nullable=False),
sa.Column('position', sa.Integer(), nullable=False),
sa.ForeignKeyConstraint(['field_client_id', 'field_object_id'], ['field.client_id', 'field.object_id'], ),
sa.ForeignKeyConstraint(['link_client_id', 'link_object_id'], ['dictionaryperspective.client_id', 'dictionaryperspective.object_id'], ),
sa.ForeignKeyConstraint(['parent_object_id', 'parent_client_id'], ['dictionaryperspective.object_id', 'dictionaryperspective.client_id'], ),
sa.ForeignKeyConstraint(['self_client_id', 'self_object_id'], ['dictionaryperspectivetofield.client_id', 'dictionaryperspectivetofield.object_id'], ),
sa.PrimaryKeyConstraint('object_id', 'client_id')
)
op.create_table('email',
sa.Column('created_at', sa.TIMESTAMP(), nullable=False),
sa.Column('id', SLBigInteger(), nullable=False),
sa.Column('user_id', SLBigInteger(), nullable=False),
sa.Column('email', sa.UnicodeText(), nullable=True),
sa.ForeignKeyConstraint(['user_id'], ['user.id'], ),
sa.PrimaryKeyConstraint('id'),
sa.UniqueConstraint('email')
)
op.create_table('lexicalentry',
sa.Column('created_at', sa.TIMESTAMP(), nullable=False),
sa.Column('object_id', SLBigInteger(), nullable=False),
sa.Column('client_id', SLBigInteger(), nullable=False),
sa.Column('parent_object_id', SLBigInteger(), nullable=True),
sa.Column('parent_client_id', SLBigInteger(), nullable=True),
sa.Column('marked_for_deletion', sa.Boolean(), nullable=True),
sa.Column('moved_to', sa.UnicodeText(), nullable=True),
sa.Column('additional_metadata', postgresql.JSONB(astext_type=Text()), nullable=True),
sa.ForeignKeyConstraint(['parent_object_id', 'parent_client_id'], ['dictionaryperspective.object_id', 'dictionaryperspective.client_id'], ),
sa.PrimaryKeyConstraint('object_id', 'client_id')
)
op.create_table('passhash',
sa.Column('created_at', sa.TIMESTAMP(), nullable=False),
sa.Column('id', SLBigInteger(), nullable=False),
sa.Column('user_id', SLBigInteger(), nullable=False),
sa.Column('hash', sa.UnicodeText(), nullable=False),
sa.ForeignKeyConstraint(['user_id'], ['user.id'], ),
sa.PrimaryKeyConstraint('id')
)
op.create_table('user_to_group_association',
sa.Column('user_id', sa.BigInteger(), nullable=True),
sa.Column('group_id', postgresql.UUID(), nullable=True),
sa.ForeignKeyConstraint(['group_id'], ['group.id'], ),
sa.ForeignKeyConstraint(['user_id'], ['user.id'], )
)
op.create_table('user_to_organization_association',
sa.Column('user_id', sa.BigInteger(), nullable=True),
sa.Column('organization_id', sa.BigInteger(), nullable=True),
sa.ForeignKeyConstraint(['organization_id'], ['organization.id'], ),
sa.ForeignKeyConstraint(['user_id'], ['user.id'], )
)
op.create_table('userblobs',
sa.Column('created_at', sa.TIMESTAMP(), nullable=False),
sa.Column('object_id', SLBigInteger(), nullable=False),
sa.Column('client_id', SLBigInteger(), nullable=False),
sa.Column('marked_for_deletion', sa.Boolean(), nullable=True),
sa.Column('user_id', SLBigInteger(), nullable=True),
sa.Column('name', sa.UnicodeText(), nullable=False),
sa.Column('content', sa.UnicodeText(), nullable=False),
sa.Column('real_storage_path', sa.UnicodeText(), nullable=False),
sa.Column('data_type', sa.UnicodeText(), nullable=False),
sa.Column('additional_metadata', postgresql.JSONB(astext_type=Text()), nullable=True),
sa.ForeignKeyConstraint(['user_id'], ['user.id'], ),
sa.PrimaryKeyConstraint('object_id', 'client_id')
)
op.create_table('entity',
sa.Column('created_at', sa.TIMESTAMP(), nullable=False),
sa.Column('object_id', SLBigInteger(), nullable=False),
sa.Column('client_id', SLBigInteger(), nullable=False),
sa.Column('parent_object_id', SLBigInteger(), nullable=True),
sa.Column('parent_client_id', SLBigInteger(), nullable=True),
sa.Column('self_client_id', SLBigInteger(), nullable=True),
sa.Column('self_object_id', SLBigInteger(), nullable=True),
sa.Column('field_client_id', SLBigInteger(), nullable=False),
sa.Column('field_object_id', SLBigInteger(), nullable=False),
sa.Column('link_client_id', SLBigInteger(), nullable=True),
sa.Column('link_object_id', SLBigInteger(), nullable=True),
sa.Column('locale_id', SLBigInteger(), nullable=True),
sa.Column('marked_for_deletion', sa.Boolean(), nullable=False),
sa.Column('content', sa.UnicodeText(), nullable=True),
sa.Column('additional_metadata', postgresql.JSONB(astext_type=Text()), nullable=True),
sa.ForeignKeyConstraint(['field_client_id', 'field_object_id'], ['field.client_id', 'field.object_id'], ),
sa.ForeignKeyConstraint(['parent_object_id', 'parent_client_id'], ['lexicalentry.object_id', 'lexicalentry.client_id'], ),
sa.ForeignKeyConstraint(['self_client_id', 'self_object_id'], ['entity.client_id', 'entity.object_id'], ),
sa.PrimaryKeyConstraint('object_id', 'client_id')
)
op.create_table('publishingentity',
sa.Column('created_at', sa.TIMESTAMP(), nullable=False),
sa.Column('object_id', SLBigInteger(), nullable=False),
sa.Column('client_id', SLBigInteger(), nullable=False),
sa.Column('published', sa.Boolean(), nullable=False),
sa.Column('accepted', sa.Boolean(), nullable=False),
sa.ForeignKeyConstraint(['client_id', 'object_id'], ['entity.client_id', 'entity.object_id'], ),
sa.PrimaryKeyConstraint('object_id', 'client_id')
)
op.create_table('objecttoc',
sa.Column('object_id', SLBigInteger(), nullable=False),
sa.Column('client_id', SLBigInteger(), nullable=False),
sa.Column('table_name', sa.UnicodeText(), nullable=True),
sa.Column('marked_for_deletion', sa.Boolean(), nullable=False),
sa.PrimaryKeyConstraint('object_id', 'client_id')
)
### end Alembic commands ###
def downgrade():
### commands auto generated by Alembic - please adjust! ###
op.drop_table('objecttoc')
op.drop_table('publishingentity')
op.drop_table('entity')
op.drop_table('userblobs')
op.drop_table('user_to_organization_association')
op.drop_table('user_to_group_association')
op.drop_table('passhash')
op.drop_table('lexicalentry')
op.drop_table('email')
op.drop_table('dictionaryperspectivetofield')
op.drop_table('client')
op.drop_table('user')
op.drop_table('dictionaryperspective')
op.drop_table('organization_to_group_association')
op.drop_table('locale')
op.drop_table('dictionary')
op.drop_table('translationatom')
op.drop_table('language')
op.drop_table('group')
op.drop_table('field')
op.drop_table('translationgist')
op.drop_table('organization')
op.drop_table('basegroup')
### end Alembic commands ###
| 18,063 | 5,877 |
#!/usr/bin/env python3
try:
import os
import json
import argparse
import shutil
from git import Repo
from jinja2 import Template, Environment, FileSystemLoader
except ImportError as ie:
print("Import failed for " + ie.name)
exit(1)
NAME_TAG = 'name'
TABLES_TAG = 'tables'
BITWIDTH_TAG = 'bitwidth'
ACTIONS_TAG = 'actions'
PREAMBLE_TAG = 'preamble'
OTHER_MATCH_TYPE_TAG = 'otherMatchType'
MATCH_TYPE_TAG = 'matchType'
PARAMS_TAG = 'params'
ACTION_REFS_TAG = 'actionRefs'
MATCH_FIELDS_TAG = 'matchFields'
NOACTION = 'NoAction'
STAGES_TAG = 'stages'
def get_sai_key_type(key_size, key_header, key_field):
if key_size == 1:
return 'bool', "booldata"
elif key_size <= 8:
return 'sai_uint8_t', "u8"
elif key_size == 16 and ('_id' in key_field):
return 'sai_object_id_t', "u16"
elif key_size <= 16:
return 'sai_uint16_t', "u16"
elif key_size == 32 and ('addr' in key_field or 'ip' in key_header):
return 'sai_ip_address_t', "ipaddr"
elif key_size == 32 and ('_id' in key_field):
return 'sai_object_id_t', "u32"
elif key_size <= 32:
return 'sai_uint32_t', "u32"
elif key_size == 48 and ('addr' in key_field or 'mac' in key_header):
return 'sai_mac_t', "mac"
elif key_size <= 64:
return 'sai_uint64_t', "u64"
elif key_size == 128:
return 'sai_ip_address_t', "ipaddr"
else:
raise ValueError(f'key_size={key_size} is not supported')
def get_sai_lpm_type(key_size, key_header, key_field):
if key_size == 32 and ('addr' in key_field or 'ip' in key_header):
return 'sai_ip_prefix_t', 'ipPrefix'
elif key_size == 128 and ('addr' in key_field or 'ip' in key_header):
return 'sai_ip_prefix_t', 'ipPrefix'
raise ValueError(f'key_size={key_size}, key_header={key_header}, and key_field={key_field} is not supported')
def get_sai_list_type(key_size, key_header, key_field):
if key_size <= 8:
return 'sai_u8_list_t', "u8list"
elif key_size <= 16:
return 'sai_u16_list_t', "u16list"
elif key_size == 32 and ('addr' in key_field or 'ip' in key_header):
return 'sai_ip_address_list_t', "ipaddrlist"
elif key_size <= 32:
return 'sai_u32_list_t', "u32list"
elif key_size <= 64:
ValueError(f'sai_u64_list_t is not supported')
return 'sai_u64_list_t', "no mapping"
raise ValueError(f'key_size={key_size} is not supported')
def get_sai_range_list_type(key_size, key_header, key_field):
if key_size <= 8:
return 'sai_u8_range_list_t', 'u8rangelist'
elif key_size <= 16:
return 'sai_u16_range_list_t', 'u16rangelist'
elif key_size == 32 and ('addr' in key_field or 'ip' in key_header):
return 'sai_ipaddr_range_list_t', 'ipaddrrangelist'
elif key_size <= 32:
return 'sai_u32_range_list_t', 'u32rangelist'
elif key_size <= 64:
return 'sai_u64_range_list_t', 'u64rangelist'
raise ValueError(f'key_size={key_size} is not supported')
def get_sai_key_data(key):
sai_key_data = dict()
sai_key_data['id'] = key['id']
full_key_name, sai_key_name = key[NAME_TAG].split(':')
key_tuple = full_key_name.split('.')
if len(key_tuple) == 3:
key_struct, key_header, key_field = key_tuple
else:
key_header, key_field = key_tuple
sai_key_data['sai_key_name'] = sai_key_name
key_size = key[BITWIDTH_TAG]
if OTHER_MATCH_TYPE_TAG in key:
sai_key_data['match_type'] = key[OTHER_MATCH_TYPE_TAG].lower()
elif MATCH_TYPE_TAG in key:
sai_key_data['match_type'] = key[MATCH_TYPE_TAG].lower()
else:
raise ValueError(f'No valid match tag found')
if sai_key_data['match_type'] == 'exact':
sai_key_data['sai_key_type'], sai_key_data['sai_key_field'] = get_sai_key_type(key_size, key_header, key_field)
elif sai_key_data['match_type'] == 'lpm':
sai_key_data['sai_lpm_type'], sai_key_data['sai_lpm_field'] = get_sai_lpm_type(key_size, key_header, key_field)
elif sai_key_data['match_type'] == 'list':
sai_key_data['sai_list_type'], sai_key_data['sai_list_field'] = get_sai_list_type(key_size, key_header, key_field)
elif sai_key_data['match_type'] == 'range_list':
sai_key_data['sai_range_list_type'], sai_key_data['sai_range_list_field'] = get_sai_range_list_type(key_size, key_header, key_field)
else:
raise ValueError(f"match_type={sai_key_data['match_type']} is not supported")
sai_key_data['bitwidth'] = key_size
return sai_key_data
def extract_action_data(program):
action_data = {}
for action in program[ACTIONS_TAG]:
preable = action[PREAMBLE_TAG]
id = preable['id']
name = preable[NAME_TAG].split('.')[-1]
params = []
if PARAMS_TAG in action:
for p in action[PARAMS_TAG]:
param = dict()
param['id'] = p['id']
param[NAME_TAG] = p[NAME_TAG]
param['type'], param['field'] = get_sai_key_type(int(p[BITWIDTH_TAG]), p[NAME_TAG], p[NAME_TAG])
param['bitwidth'] = p[BITWIDTH_TAG]
params.append(param)
action_data[id] = {'id': id, NAME_TAG: name, PARAMS_TAG: params}
return action_data
def table_with_counters(program, table_id):
for counter in program['directCounters']:
if counter['directTableId'] == table_id:
return 'true'
return 'false'
def generate_sai_apis(program, ignore_tables):
sai_apis = []
all_actions = extract_action_data(program)
tables = sorted(program[TABLES_TAG], key=lambda k: k[PREAMBLE_TAG][NAME_TAG])
for table in tables:
sai_table_data = dict()
sai_table_data['keys'] = []
sai_table_data[ACTIONS_TAG] = []
sai_table_data[STAGES_TAG] = []
table_control, table_name = table[PREAMBLE_TAG][NAME_TAG].split('.', 1)
if table_name in ignore_tables:
continue
table_name, api_name = table_name.split('|')
sai_table_data[NAME_TAG] = table_name.replace('.' , '_')
sai_table_data['id'] = table[PREAMBLE_TAG]['id']
sai_table_data['with_counters'] = table_with_counters(program, sai_table_data['id'])
# chechk if table belongs to a group
is_new_group = True
if ':' in table_name:
stage, group_name = table_name.split(':')
table_name = group_name
stage = stage.replace('.' , '_')
for sai_api in sai_apis:
for sai_table in sai_api[TABLES_TAG]:
if sai_table['name'] == table_name:
sai_table[STAGES_TAG].append(stage)
is_new_group = False
break
if is_new_group:
sai_table_data[NAME_TAG] = table_name
sai_table_data[STAGES_TAG].append(stage)
else:
continue
for key in table[MATCH_FIELDS_TAG]:
# skip v4/v6 selector
if 'v4_or_v6' in key[NAME_TAG]:
continue
sai_table_data['keys'].append(get_sai_key_data(key))
for action in table[ACTION_REFS_TAG]:
action_id = action["id"]
if all_actions[action_id][NAME_TAG] != NOACTION:
sai_table_data[ACTIONS_TAG].append(all_actions[action_id])
if len(sai_table_data['keys']) == 1 and sai_table_data['keys'][0]['sai_key_name'].endswith(table_name.split('.')[-1] + '_id'):
sai_table_data['is_object'] = 'true'
# Object ID itself is a key
sai_table_data['keys'] = []
elif len(sai_table_data['keys']) > 5:
sai_table_data['is_object'] = 'true'
else:
sai_table_data['is_object'] = 'false'
sai_table_data['name'] = sai_table_data['name'] + '_entry'
is_new_api = True
for sai_api in sai_apis:
if sai_api['app_name'] == api_name:
sai_api[TABLES_TAG].append(sai_table_data)
is_new_api = False
break
if is_new_api:
new_api = dict()
new_api['app_name'] = api_name
new_api[TABLES_TAG] = [sai_table_data]
sai_apis.append(new_api)
return sai_apis
def write_sai_impl_files(sai_api):
env = Environment(loader=FileSystemLoader('.'), trim_blocks=True, lstrip_blocks=True)
sai_impl_tm = env.get_template('/templates/saiapi.cpp.j2')
sai_impl_str = sai_impl_tm.render(tables = sai_api[TABLES_TAG], app_name = sai_api['app_name'])
with open('./lib/sai' + sai_api['app_name'].replace('_', '') + '.cpp', 'w') as o:
o.write(sai_impl_str)
def write_sai_makefile(sai_api_name_list):
env = Environment(loader=FileSystemLoader('.'))
makefile_tm = env.get_template('/templates/Makefile.j2')
makefile_str = makefile_tm.render(api_names = sai_api_name_list)
with open('./lib/Makefile', 'w') as o:
o.write(makefile_str)
env = Environment(loader=FileSystemLoader('.'), trim_blocks=True, lstrip_blocks=True)
sai_impl_tm = env.get_template('/templates/utils.cpp.j2')
sai_impl_str = sai_impl_tm.render(tables = sai_api[TABLES_TAG], app_name = sai_api['app_name'])
with open('./lib/utils.cpp', 'w') as o:
o.write(sai_impl_str)
env = Environment(loader=FileSystemLoader('.'), trim_blocks=True, lstrip_blocks=True)
sai_impl_tm = env.get_template('/templates/utils.h.j2')
sai_impl_str = sai_impl_tm.render(tables = sai_api[TABLES_TAG], app_name = sai_api['app_name'])
with open('./lib/utils.h', 'w') as o:
o.write(sai_impl_str)
def write_sai_files(sai_api):
# The main file
with open('templates/saiapi.h.j2', 'r') as sai_header_tm_file:
sai_header_tm_str = sai_header_tm_file.read()
env = Environment(loader=FileSystemLoader('.'), trim_blocks=True, lstrip_blocks=True)
sai_header_tm = env.get_template('templates/saiapi.h.j2')
sai_header_str = sai_header_tm.render(sai_api = sai_api)
with open('./SAI/experimental/saiexperimental' + sai_api['app_name'].replace('_', '') + '.h', 'w') as o:
o.write(sai_header_str)
# The SAI Extensions
with open('./SAI/experimental/saiextensions.h', 'r') as f:
lines = f.readlines()
new_lines = []
for line in lines:
if 'Add new experimental APIs above this line' in line:
new_lines.append(' SAI_API_' + sai_api['app_name'].upper() + ',\n\n')
if 'new experimental object type includes' in line:
new_lines.append(line)
new_lines.append('#include "saiexperimental' + sai_api['app_name'].replace('_', '') + '.h"\n')
continue
new_lines.append(line)
with open('./SAI/experimental/saiextensions.h', 'w') as f:
f.write(''.join(new_lines))
# The SAI Type Extensions
with open('./SAI/experimental/saitypesextensions.h', 'r') as f:
lines = f.readlines()
new_lines = []
for line in lines:
if 'Add new experimental object types above this line' in line:
for table in sai_api[TABLES_TAG]:
new_lines.append(' SAI_OBJECT_TYPE_' + table[NAME_TAG].upper() + ',\n\n')
new_lines.append(line)
with open('./SAI/experimental/saitypesextensions.h', 'w') as f:
f.write(''.join(new_lines))
# The SAI object struct for entries
with open('./SAI/inc/saiobject.h', 'r') as f:
lines = f.readlines()
new_lines = []
for line in lines:
if 'Add new experimental entries above this line' in line:
for table in sai_api[TABLES_TAG]:
if table['is_object'] == 'false':
new_lines.append(' /** @validonly object_type == SAI_OBJECT_TYPE_' + table[NAME_TAG].upper() + ' */\n')
new_lines.append(' sai_' + table[NAME_TAG] + '_t ' + table[NAME_TAG] + ';\n\n')
if 'new experimental object type includes' in line:
new_lines.append(line)
new_lines.append('#include "../experimental/saiexperimental' + sai_api['app_name'].replace('_', '') + '.h"\n')
continue
new_lines.append(line)
with open('./SAI/inc/saiobject.h', 'w') as f:
f.write(''.join(new_lines))
# CLI
parser = argparse.ArgumentParser(description='P4 SAI API generator')
parser.add_argument('filepath', type=str, help='Path to P4 program RUNTIME JSON file')
parser.add_argument('apiname', type=str, help='Name of the new SAI API')
parser.add_argument('--print-sai-lib', type=bool)
parser.add_argument('--sai-git-url', type=str, default='https://github.com/Opencomputeproject/SAI')
parser.add_argument('--ignore-tables', type=str, default='', help='Comma separated list of tables to ignore')
parser.add_argument('--sai-git-branch', type=str, default='master')
parser.add_argument('--overwrite', type=bool, default=False, help='Overwrite the existing SAI repo')
args = parser.parse_args()
if not os.path.isfile(args.filepath):
print('File ' + args.filepath + ' does not exist')
exit(1)
if os.path.exists('./SAI'):
if args.overwrite == False:
print('Directory ./SAI already exists. Please remove in order to proceed')
exit(1)
else:
shutil.rmtree('./SAI')
if os.path.exists('./lib'):
if args.overwrite == False:
print('Directory ./lib already exists. Please remove in order to proceed')
exit(1)
else:
shutil.rmtree('./lib')
# Get SAI dictionary from P4 dictionary
print("Generating SAI API...")
with open(args.filepath) as json_program_file:
json_program = json.load(json_program_file)
sai_apis = generate_sai_apis(json_program, args.ignore_tables.split(','))
# Clone a clean SAI repo
print("Cloning SAI repository...")
Repo.clone_from(args.sai_git_url, './SAI', branch=args.sai_git_branch)
os.mkdir("lib")
# Write SAI dictionary into SAI API headers
sai_api_name_list = []
for sai_api in sai_apis:
write_sai_files(sai_api)
write_sai_impl_files(sai_api)
sai_api_name_list.append(sai_api['app_name'].replace('_', ''))
write_sai_makefile(sai_api_name_list)
if args.print_sai_lib:
print(json.dumps(sai_api, indent=2))
| 14,233 | 5,168 |
from setuptools import setup
def readme():
with open('./README.md') as f:
return f.read()
setup(
name='event_loop',
version='0.1',
author='mapogolions',
author_email='ivanovpavelalex45@gmail.com',
description='Simple event loop',
license='LICENSE.txt',
long_description=readme(),
packages=['event_loop'],
install_requires=['mood.event', 'pyuv']
)
| 399 | 141 |
class Person:
"""
Representation of a person, which includes name, gender, date of birth
"""
def __init__(self, name, gender, age):
self.name = name
self.gender = gender
self.age = age
def __str__(self):
return f'Name: {self.name}, Gender: {self.gender}, Age: {self.age}'
harry = Person('Harry', 'Male', 47)
print(harry)
| 382 | 126 |
#a = raw_input()
num = input()
def addone():
global t, qq
t[qq-1] += 1
carry = 0
c = 0
ss = ""
for j in range(qq):
t[qq-1-j] += carry
if (t[qq-1-j] > 1):
t[qq-1-j] = 0
carry = 1
else:
carry = 0
c += 1
ss += str(t[qq-1-j])
return ss
def makestring():
s = ""
ends = ""
global t, b, l, qq
for i in range(len(t)):
if (t[i] == 1):
s += b[i] + "\n"
if (len(s.rstrip()) > l): #just in case the current line is too
return ""
ends += s
s = ""
else:
s += b[i] + " "
st = s + b[qq]
if (len(st.rstrip()) > l):
return ""
else:
ends += st
return ends
def calcscore(s):
global l
score = 0
test = s.split("\n")
for i in range(len(test) -1 ):
score += (l - len(test[i].strip()))**2
return score
for jjj in range(num):
a = raw_input()
b = a.split(" ")
l = int(b[0])
b = b[1:]
c = len(b)-1
pp = 1
t = []
for i in range(c):
t.append(i%2)
qq = len(t)
q = {}
ls = 99999999
lo = ""
while not (addone() == qqq):
out = makestring()
if (out != ""):
gg = calcscore(out)
if (gg < ls):
lo = out
ls = gg
print "Case #"+str(jjj+1)+":",ls
| 1,121 | 607 |
#!/usr/bin/env python
import sys,os,re,fileinput,argparse
import csv
import random
parser = argparse.ArgumentParser(description="takes a file of CYC data, and produces pairwise info for cytoscape network viewing" )
parser.add_argument("--fi",help="the file, must be headered as \"Pathway-id Pathway-name Gene-id Gene-name\"",required=True)
args = parser.parse_args()
vcffi = args.fi
full = csv.DictReader(open(vcffi,'r'),delimiter="\t")
#parse results in a map or dict, or what??
#-------------------------------------here by DEFSgONS!!----------------------------------*
####def anyNone(rets):
def getGenes(pathid,pth):#idea here, get a gene by position, and step forward only.
count = 0
(pwyid,pwyname) = pathid.split(':')
while count < len(pth):
frontgene = pth[count]
for genes in pth[count + 1:len(pth)]:
(geneid,genename) = genes.split(":")
(frontid,frontname) = frontgene.split(":")
print(pwyid + "\t" + pwyname + "\t" + geneid + "\t" + genename + "\t" + frontid + "\t" + frontname )
count = count + 1
#---------------------------------main-----------------------------------#
pre_dict = {}
for line in full:#load dict ass array per pathway.
pathkey = line['Pathway-id'] + ":" + line["Pathway-name"]
if pathkey in pre_dict:
if "unknown" not in line['Gene-id']:
pre_dict[pathkey].append(line['Gene-id'] + ":" + line['Gene-name'])
else:
pre_dict[pathkey] = []
print('Pathway-id\tPathway-name\tGene-id\tGene-name\tTarget-id\tTarget-name')
for path in pre_dict:
getGenes(path,pre_dict[path])
| 1,658 | 592 |
import os
def makedirs(path):
try:
os.makedirs(path)
except OSError:
pass
def readfile(path):
with open(path, 'r') as file:
return file.read()
def writefile(path, content):
with open(path, 'w') as file:
return file.write(content)
def copyfile(_from, to):
writefile(to, readfile(_from))
def ordinal(number):
if number[-1] == "1":
return "st"
elif number[-1] == "2":
return "nd"
elif number[-1] == "3":
return "rd"
else:
return "th"
| 564 | 208 |
from .exceptions import UnexpectedValue
from .constants import *
import socket, struct, threading, time
class Tools:
atype2AF = {
ATYP_IPV4:socket.AF_INET,
ATYP_IPV6:socket.AF_INET6,
ATYP_DOMAINNAME:socket.AF_INET
}
def recv2(conn:socket.socket, *args):
"""
Some wrapper, to cath empty returns from recv
"""
try:
data = conn.recv(*args)
if not data:
raise ConnectionResetError("Cannot receive data, socket seems closed")
except Exception as e:
raise ConnectionResetError(str(e))
return data
def serverReadCmd(conn:socket.socket) -> tuple:
"""
Read and parse cmd message from client
return (version:int, cmd:int, atype:int, address:str, port:int)
"""
ver, cmd, _, atype = __class__.recv2(conn, 4, socket.MSG_WAITALL)
if atype == ATYP_DOMAINNAME:
length_name, = __class__.recv2(conn, 1, socket.MSG_WAITALL)
name = __class__.recv2(conn, length_name).decode("utf-8")
elif atype == ATYP_IPV4:
name = socket.inet_ntop(socket.AF_INET, __class__.recv2(conn, 4, socket.MSG_WAITALL))
elif atype == ATYP_IPV6:
name = socket.inet_ntop(socket.AF_INET6, __class__.recv2(conn, 16, socket.MSG_WAITALL))
else:
raise UnexpectedValue(f"Server sent unknown address type {atype}")
port = int.from_bytes(__class__.recv2(conn, 2, socket.MSG_WAITALL), byteorder='big')
return (ver, cmd, atype, name, port)
def serverSendCmdResp(conn:socket.socket, version:int, rep:int, atype:int, bnd_addr:str, bnd_port:int):
"""Send server response to cmd message"""
if atype == ATYP_DOMAINNAME:
bnd_addr = bnd_addr.encode("utf-8")
data = struct.pack(f"!BBxBB{len(bnd_addr)}sH", version, rep, atype, len(bnd_addr), bnd_addr, bnd_port)
elif atype == ATYP_IPV4:
data = struct.pack("!BBxB4sH", version, rep, atype, socket.inet_pton(socket.AF_INET, bnd_addr), bnd_port)
elif atype == ATYP_IPV6:
data = struct.pack("!BBxB16sH", version, rep, atype, socket.inet_pton(socket.AF_INET6, bnd_addr), bnd_port)
conn.send(data)
def serverReadHello(conn:socket.socket) -> tuple:
"""Read and parse "greetings" message from client
return (version:int, methods:list[int])"""
b = __class__.recv2(conn, 2, socket.MSG_WAITALL)
ver = b[0]
nm = b[1]
b = __class__.recv2(conn, nm, socket.MSG_WAITALL)
methods = []
for mtd in b:
methods.append(mtd)
return (ver, methods)
def serverSendHelloResp(conn:socket.socket, version:int, authtype:int):
"""Send server response to greeings message """
conn.send(struct.pack("BB", version, authtype))
def serverReadAuthCreds(conn:socket.socket) ->tuple:
"""
Get client creds by rfc1929 (socks username/password auth)
return (version:int, username:str, password:str)
"""
version, ulen = struct.unpack("BB", __class__.recv2(conn, 2, socket.MSG_WAITALL))
username = __class__.recv2(conn, ulen, socket.MSG_WAITALL)
plen = ord(__class__.recv2(conn, 1))
password = __class__.recv2(conn, plen, socket.MSG_WAITALL)
return (version, username.decode("utf-8"), password.decode("utf-8"))
def serverSendAuthResp(conn:socket.socket, version:int, status:int):
"""
Send response auth \n
status greater than 0 indicates auth failture
"""
conn.send(struct.pack('BB', version, status))
#-------------------------------------------------------------------------------
def clientSendHello(conn:socket.socket, version:int, authtypes:list[int]):
"""
Sends a client Greetings message to server (version, authtypes)
"""
conn.send(struct.pack(f"BB{'B'*len(authtypes)}", version, len(authtypes), *authtypes))
def clientReadHelloResp(conn:socket.socket):
"""
Reads server Greetings message (version, selected auth type)
returns (version:int, selectedauth:int)
"""
version, selected_auth = __class__.recv2(conn, 2)
return (version, selected_auth)
def clientSendCmd(conn:socket.socket, version:int, cmd:int, atype:int, adress:str, port:str):
"""
Sends a command to server
"""
if atype == ATYP_DOMAINNAME:
conn.send(struct.pack(f"!BBxBB{len(adress)}sH", version, cmd, atype, len(adress), adress.encode("utf-8"), port))
elif atype == ATYP_IPV4:
conn.send(struct.pack("!BBxB4sH", version, cmd, atype, socket.inet_pton(socket.AF_INET, adress), port) )
elif atype == ATYP_IPV6:
conn.send(struct.pack("!BBxB16sH", version, cmd, atype, socket.inet_pton(socket.AF_INET6, adress), port))
else:
raise UnexpectedValue(f"Cliend sent unknown address type {atype}")
def clientReadCmdResp(conn:socket.socket):
"""
Reads server command response\n
returns (version:int, rep:int, atype:int, address:str, port:int)
"""
b = __class__.recv2(conn, 4)
version, rep, atype = struct.unpack("BBxB", b)
if atype == ATYP_DOMAINNAME:
adrsize = __class__.recv2(conn, 1)[0]
address, port = struct.unpack(f"!{adrsize}sH", __class__.recv2(conn, adrsize+2))
elif atype == ATYP_IPV4:
address, port = struct.unpack("!4sH", __class__.recv2(conn, 10))
address = socket.inet_ntop(socket.AF_INET, address)
elif atype == ATYP_IPV6:
address, port = struct.unpack("!16sH", __class__.recv2(conn, 18))
address = socket.inet_ntop(socket.AF_INET6, address)
else:
raise UnexpectedValue(f"Sever sent unknown address type {atype}")
return (version, rep, atype, address, port)
def clientSendAuth(conn:socket.socket, username:str, password:str):
"""
Sends username/pasword auth packet
"""
s = struct.pack(f"BB{len(username)}sB{len(password)}s", 1, len(username), username.encode("utf-8"), len(password), password.encode("utf-8"))
conn.send(s)
def clientReadAuthResp(conn:socket.socket):
"""
Reads server response on username/password auth
return (ver:int, status:int)
"""
ver, status = __class__.recv2(conn, 2)
return (ver, status)
def proxy(target1:socket.socket, target2:socket.socket):
"""
sends data from target1 to target2 and back\n
when at least one socket closed, returns control\n
sets timeout both sockets to 5
"""
def resend(from_s:socket.socket, to_s:socket.socket):
try:
from_s.settimeout(5)
while True:
try:
b = from_s.recv(1024)
if len(b) == 0:
return
to_s.send(b)
except socket.timeout as e:
pass
except Exception as e:
# print(f"c > t {e}")
return
except:
pass
t1 = threading.Thread(target=resend, args=(target1, target2), name=f"{target1.getpeername()} client > I am > target {target2.getpeername()} ")
t2 = threading.Thread(target=resend, args=(target2, target1), name=f"{target1.getpeername()} client < I am < target {target2.getpeername()} ")
t1.start()
t2.start()
while t1.is_alive() and t2.is_alive():
time.sleep(5)
return
| 7,873 | 2,545 |
def binary_search(search_num, sorted_arr):
"""
https://runestone.academy/runestone/books/published/pythonds/SortSearch/TheBinarySearch.html
First Q at https://dev.to/javinpaul/20-basic-algorithms-problems-from-coding-interviews-4o76
"""
if sorted_arr[0] == search_num:
return True
arr_len = len(sorted_arr)
if arr_len > 1:
if sorted_arr[arr_len - 1] == search_num:
return True
mid_value = sorted_arr[abs(arr_len / 2)]
if arr_len <= 2:
return False
if mid_value == search_num:
return True
if mid_value < search_num:
return binary_search(search_num, sorted_arr[mid_value:])
if mid_value > search_num:
return binary_search(search_num, sorted_arr[:mid_value ])
def binary_search_no_rec(search_num, sorted_arr):
first = 0
last = len(sorted_arr) - 1
found = False
while first <= last and not found:
midpoint = (first + last) // 2
print(midpoint, sorted_arr[midpoint], sorted_arr[first: last])
if sorted_arr[midpoint] == search_num:
found = True
else:
if sorted_arr[midpoint] > search_num:
last = midpoint - 1
else:
first = midpoint + 1
return found
if __name__ == "__main__":
arr = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12]
print(binary_search_no_rec(5, arr))
| 1,402 | 501 |
import unittest
import axelrod
class TestResultSet(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.players = ('Player1', 'Player2', 'Player3')
cls.test_results = [
[[0, 0], [10, 10], [21, 21]],
[[10, 8], [0, 0], [16, 20]],
[[16, 16], [16, 16], [0, 0]],
]
cls.expected_scores = [
[3.1, 3.1],
[2.6, 2.8],
[3.2, 3.2],
]
cls.expected_payoffs = [
[0.0, 2.0, 4.2],
[1.8, 0.0, 3.6],
[3.2, 3.2, 0.0],
]
cls.test_payoffs_list = [
[[0, 10, 21], [10, 0, 16], [16, 16, 0]],
[[0, 10, 21], [8, 0, 20], [16, 16, 0]],
]
cls.expected_stddevs = [
[0.0, 0.0, 0.0],
[0.20, 0.0, 0.40],
[0.0, 0.0, 0.0],
]
cls.expected_ranking = [2, 0, 1]
cls.expected_ranked_names = ['Player3', 'Player1', 'Player2']
cls.expected_csv = 'Player3,Player1,Player2\n3.2,3.1,2.6\n3.2,3.1,2.8\n'
def test_init(self):
rs = axelrod.ResultSet(self.players, 5, 2)
expected_results = [[[0,0] for j in range(3)] for i in range(3)]
self.assertEquals(rs.nplayers, 3)
self.assertEquals(rs.players, self.players)
self.assertEquals(rs.turns, 5)
self.assertEquals(rs.repetitions, 2)
self.assertTrue(rs.results, expected_results)
self.assertFalse(rs.finalised)
def test_generate_scores(self):
rs = axelrod.ResultSet(self.players, 5, 2)
rs.results = self.test_results
self.assertEquals(rs.generate_scores(), self.expected_scores)
def test_generate_ranking(self):
rs = axelrod.ResultSet(self.players, 5, 2)
rs.results = self.test_results
scores = rs.generate_scores()
self.assertEquals(rs.generate_ranking(scores), self.expected_ranking)
def test_generate_ranked_names(self):
rs = axelrod.ResultSet(self.players, 5, 2)
rs.results = self.test_results
scores = rs.generate_scores()
rankings = rs.generate_ranking(scores)
self.assertEquals(rs.generate_ranked_names(rankings), self.expected_ranked_names)
def test_generate_payoff_matrix(self):
rs = axelrod.ResultSet(self.players, 5, 2)
rs.results = self.test_results
payoffs, stddevs = rs.generate_payoff_matrix()
stddevs = [[round(x, 1) for x in row] for row in stddevs]
self.assertEquals(payoffs, self.expected_payoffs)
self.assertEquals(stddevs, self.expected_stddevs)
def test_finalise(self):
rs = axelrod.ResultSet(self.players, 5, 2)
rs.finalise(self.test_payoffs_list)
self.assertEquals(rs.scores, self.expected_scores)
self.assertEquals(rs.ranking, self.expected_ranking)
self.assertEquals(rs.ranked_names, self.expected_ranked_names)
self.assertTrue(rs.finalised)
self.assertRaises(AttributeError, rs.finalise, self.test_payoffs_list)
def test_csv(self):
rs = axelrod.ResultSet(self.players, 5, 2)
self.assertRaises(AttributeError, rs.csv)
rs.finalise(self.test_payoffs_list)
rs.results = self.test_results
self.assertEquals(rs.csv(), self.expected_csv)
| 3,292 | 1,269 |
#There is a collection of
#input strings and a collection of query strings. For each query string, determine how many times it occurs in the list of input strings
def f():
strings = ['aba', 'baba', 'aba', 'xzxb']
queries = ['aba', 'xzxb', 'ab']
res = []
'''
for q in queries:
total= 0
for s in strings:
if s == q:
total+=1
res.append(total)
'''
for q in queries:
res.append(len(list(filter(lambda s: s == q, strings))))
print(res)
return res
f()
#2nd solution
def matchingStrings(strings, queries):
res = []
for q in queries:
total = 0
for s in strings:
if s == q:
total+=1
res.append(total)
return res
| 814 | 247 |
# Generated by Django 3.2.4 on 2021-06-30 14:10
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
('home', '0001_initial'),
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='SongList',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('owner', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
('songs', models.ManyToManyField(to='home.Song')),
],
),
migrations.CreateModel(
name='SongRating',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('rating', models.CharField(choices=[('1', 'Appalling'), ('2', 'Horrible'), ('3', 'Very Bad'), ('4', 'Bad'), ('5', 'Average'), ('6', 'Fine'), ('7', 'Good'), ('8', 'Very Good'), ('9', 'Great'), ('10', 'Masterpiece')], max_length=2)),
('parent_list', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='personal_list.songlist')),
('song', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='home.song')),
],
),
]
| 1,494 | 483 |
'''Ref: https://arxiv.org/pdf/2003.00295.pdf'''
from typing import Callable, Dict, List, Optional, Tuple
import numpy as np
from flwr.common import (
EvaluateIns,
EvaluateRes,
FitIns,
FitRes,
Weights,
parameters_to_weights,
weights_to_parameters,
)
from flwr.server.strategy.aggregate import aggregate, weighted_loss_avg
from flwr.server.client_proxy import ClientProxy
import torch
from .FedStrategy import FedStrategy
import json
DEFAULT_SERVER_ADDRESS = "[::]:8080"
DEVICE = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
DATA_ROOT = "uploads/testset.csv"
class FedOpt(FedStrategy):
def __init__(
self,
*,
fraction_fit: float = 0.1,
fraction_eval: float = 0.1,
min_fit_clients: int = 2,
min_eval_clients: int = 2,
min_available_clients: int = 2,
eval_fn = None,
on_fit_config_fn = None,
on_evaluate_config_fn = None,
accept_failures = True,
mode = 'adagrad',
beta = 0.99,
initial_parameters = None,
eta: float = 1e-1,
eta_l: float = 1e-1,
tau: float = 1e-9,
) -> None:
super().__init__(
fraction_fit=fraction_fit,
fraction_eval=fraction_eval,
min_fit_clients=min_fit_clients,
min_eval_clients=min_eval_clients,
min_available_clients=min_available_clients,
eval_fn=eval_fn,
on_fit_config_fn=on_fit_config_fn,
on_evaluate_config_fn=on_evaluate_config_fn,
accept_failures=accept_failures,
initial_parameters=initial_parameters,
)
self.mode = mode
self.current_weights = initial_parameters
self.beta = beta
self.eta = eta
self.eta_l = eta_l
self.tau = tau
self.v_t: Optional[Weights] = None
def __repr__(self) -> str:
rep = f"FedOpt(accept_failures={self.accept_failures})"
return rep
def aggregate_fit(
self,
rnd: int,
results: List[Tuple[ClientProxy, FitRes]],
failures: List[BaseException],
) -> Optional[Weights]:
if not results:
return None
if not self.accept_failures and failures:
return None
net = self.model.Loader(DATA_ROOT).load_model()
testset, _ = self.model.Loader(DATA_ROOT).load_data()
testloader = torch.utils.data.DataLoader(testset, batch_size=32, shuffle=False)
for client, fit_res in results:
self.set_weights(net, parameters_to_weights(fit_res.parameters))
net.to(DEVICE)
loss, acc = self.model.test(net, testloader, device=DEVICE)
self.contrib[fit_res.metrics['cid']].append(acc)
weights_results = [
(parameters_to_weights(fit_res.parameters), fit_res.num_examples)
for client, fit_res in results
]
fedavg_aggregate = aggregate(weights_results)
if fedavg_aggregate is None:
return None
aggregated_updates = [
subset_weights - self.current_weights[idx]
for idx, subset_weights in enumerate(fedavg_aggregate)
]
delta_t = aggregated_updates
if not self.v_t:
self.v_t = [np.zeros_like(subset_weights) for subset_weights in delta_t]
if self.mode == 'adagrad':
self.v_t = [
self.v_t[idx] + np.multiply(subset_weights, subset_weights)
for idx, subset_weights in enumerate(delta_t)
]
if self.mode == 'yogi':
self.v_t = [
self.v_t[idx] - (1 - self.beta)*np.multiply(subset_weights, subset_weights)*np.sign(self.v_t[idx] - np.multiply(subset_weights, subset_weights))
for idx, subset_weights in enumerate(delta_t)
]
if self.mode == 'adam':
self.v_t = [
self.beta*self.v_t[idx] + (1 - self.beta)*np.multiply(subset_weights, subset_weights)
for idx, subset_weights in enumerate(delta_t)
]
new_weights = [
self.current_weights[idx]
+ self.eta * delta_t[idx] / (np.sqrt(self.v_t[idx]) + self.tau)
for idx in range(len(delta_t))
]
self.current_weights = new_weights
self.set_weights(net, new_weights)
if new_weights is not None:
print(f"Saving round {rnd} model...")
torch.save(net, f"round-{rnd}-model.pt")
with open('contrib.json', 'w') as outfile:
json.dump(self.contrib, outfile)
return self.current_weights | 4,658 | 1,535 |
# also update in setup.py
__version__ = '0.6.9'
| 48 | 21 |
import setuptools
with open("README.md", "r") as fh:
long_description = fh.read()
long_description = """
Short description...
"""
setuptools.setup(
name='test_package_kthdesa',
version='1.0.0',
author='Alexandros Korkovelos',
author_email='alekor@desa.kth.se',
description='This is a test package',
long_description=long_description,
long_description_content_type='text/markdown',
url='https://github.com/alekordESA/package-template',
packages=['test_package_kthdesa'],
install_requires=[
'numpy>=1.16',
'pandas>=0.24'
],
classifiers=[
'Programming Language :: Python :: 3.7',
'License :: OSI Approved :: GNU General Public License v3 (GPLv3)',
'Operating System :: OS Independent',
],
) | 786 | 266 |
import torch
from torch.nn import LeakyReLU
from torch.nn.functional import interpolate
from gan.EqualizedLayers import EqualizedConv2d, EqualizedDeconv2d
class PixelwiseNormalization(torch.nn.Module):
"""
Normalize feature vectors per pixel as suggested in section 4.2 of
https://research.nvidia.com/sites/default/files/pubs/2017-10_Progressive-Growing-of/karras2018iclr-paper.pdf.
For each pixel location (i,j) in the input image, takes the vector across all channels and normalizes it to
unit length.
"""
def __init__(self):
super(PixelwiseNormalization, self).__init__()
def forward(self, x, eps=1e-8):
"""
:param x: input with shape (batch_size x num_channels x img_width x img_height)
:param eps: small constant to avoid division by zero
:return:
"""
return x / x.pow(2).mean(dim=1, keepdim=True).add(eps).sqrt()
class GenInitialBlock(torch.nn.Module):
"""
Initial block of generator. Consisting of the following layers:
input: latent noise vector (latent_size x 1 x 1)
layer activation output shape
Convolution 4 x 4 LeakyReLU latent_size x 4 x 4
Convolution 3 x 3 LeakyReLU latent_size x 4 x 4
output: image with latent_size channels (latent_size x 4 x 4)
"""
def __init__(self, latent_size):
"""
:param latent_size: size of noise input for generator
"""
super(GenInitialBlock, self).__init__()
self.layer1 = EqualizedDeconv2d(in_channels=latent_size, out_channels=latent_size, kernel_size=(4, 4))
self.layer2 = EqualizedConv2d(in_channels=latent_size, out_channels=latent_size, kernel_size=(3, 3), padding=1)
self.pixel_normalization = PixelwiseNormalization()
self.activation = LeakyReLU(negative_slope=0.2)
def forward(self, x):
"""
:param x: input noise (batch_size x latent_size)
:return:
"""
# add image width and height dimensions:
# (batch_size x latent_size) --> (batch_size x latent_size x 1 x 1)
y = torch.unsqueeze(torch.unsqueeze(x, -1), -1)
y = self.activation(self.layer1(y))
y = self.activation(self.layer2(y))
return self.pixel_normalization(y)
class GenConvolutionalBlock(torch.nn.Module):
"""
Regular block of generator. Consisting of following layers:
input: image (in_channels x img_width x img_height)
layer activation output shape
Upsampling - in_channels x 2*img_width x 2*img_height
Convolution 3 x 3 LeakyReLU out_channels x 2*img_width x 2*img_height
Convolution 3 x 3 LeakyReLU out_channels x 2*img_width x 2*img_height
output: image with latent_size channels and doubled size (out_channels x 2*img_width x 2*img_height)
"""
def __init__(self, in_channels, out_channels):
super(GenConvolutionalBlock, self).__init__()
self.upsample = lambda x: interpolate(x, scale_factor=2)
self.layer1 = EqualizedConv2d(in_channels, out_channels, kernel_size=(3, 3), padding=1)
self.layer2 = EqualizedConv2d(out_channels, out_channels, kernel_size=(3, 3), padding=1)
self.pixel_normalization = PixelwiseNormalization()
self.activation = LeakyReLU(negative_slope=0.2)
def forward(self, x):
y = self.upsample(x)
y = self.pixel_normalization(self.activation(self.layer1(y)))
y = self.pixel_normalization(self.activation(self.layer2(y)))
return y
class Generator(torch.nn.Module):
@staticmethod
def __to_rgb(in_channels):
return EqualizedConv2d(in_channels, 3, (1, 1))
def __init__(self, depth, latent_size):
"""
:param depth: depth of the generator, i.e. number of blocks (initial + convolutional)
:param latent_size: size of input noise for the generator
"""
super(Generator, self).__init__()
self.depth = depth
self.latent_size = latent_size
self.initial_block = GenInitialBlock(self.latent_size)
self.blocks = torch.nn.ModuleList([])
# hold an rgb converter for every intermediate resolution to visualize intermediate results
self.rgb_converters = torch.nn.ModuleList([self.__to_rgb(self.latent_size)])
for i in range(self.depth - 1):
if i < 3:
# first three blocks do not reduce the number of channels
in_channels = self.latent_size
out_channels = self.latent_size
else:
# half number of channels in each block
in_channels = self.latent_size // pow(2, i - 3)
out_channels = self.latent_size // pow(2, i - 2)
block = GenConvolutionalBlock(in_channels, out_channels)
rgb = self.__to_rgb(out_channels)
self.blocks.append(block)
self.rgb_converters.append(rgb)
def forward(self, x, current_depth, alpha):
"""
:param x: input noise (batch_size x latent_size)
:param current_depth: depth at which to evaluate (maximum depth of the forward pass)
:param alpha: interpolation between current depth output (alpha) and previous depth output (1 - alpha)
:return:
"""
y = self.initial_block(x)
if current_depth == 0:
return self.rgb_converters[0](y)
for block in self.blocks[:current_depth - 1]:
y = block(y)
residual = self.rgb_converters[current_depth - 1](interpolate(y, scale_factor=2))
straight = self.rgb_converters[current_depth](self.blocks[current_depth - 1](y))
# fade in new layer
return alpha * straight + (1 - alpha) * residual
| 5,801 | 1,891 |